diff options
Diffstat (limited to 'deps/v8/src')
324 files changed, 56340 insertions, 42217 deletions
diff --git a/deps/v8/src/SConscript b/deps/v8/src/SConscript index 16bfb55b38..772ac4eb2c 100755 --- a/deps/v8/src/SConscript +++ b/deps/v8/src/SConscript @@ -84,12 +84,12 @@ SOURCES = { global-handles.cc handles.cc heap-profiler.cc + heap-snapshot-generator.cc heap.cc hydrogen-instructions.cc hydrogen.cc ic.cc incremental-marking.cc - inspector.cc interface.cc interpreter-irregexp.cc isolate.cc @@ -97,7 +97,6 @@ SOURCES = { lithium-allocator.cc lithium.cc liveedit.cc - liveobjectlist.cc log-utils.cc log.cc mark-compact.cc @@ -328,6 +327,7 @@ debug-debugger.js EXPERIMENTAL_LIBRARY_FILES = ''' +symbol.js proxy.js collection.js '''.split() diff --git a/deps/v8/src/accessors.cc b/deps/v8/src/accessors.cc index 1bc9221a20..57062be41c 100644 --- a/deps/v8/src/accessors.cc +++ b/deps/v8/src/accessors.cc @@ -42,8 +42,8 @@ namespace internal { template <class C> -static C* FindInstanceOf(Object* obj) { - for (Object* cur = obj; !cur->IsNull(); cur = cur->GetPrototype()) { +static C* FindInstanceOf(Isolate* isolate, Object* obj) { + for (Object* cur = obj; !cur->IsNull(); cur = cur->GetPrototype(isolate)) { if (Is<C>(cur)) return C::cast(cur); } return NULL; @@ -77,7 +77,7 @@ MaybeObject* Accessors::ReadOnlySetAccessor(JSObject*, Object* value, void*) { MaybeObject* Accessors::ArrayGetLength(Object* object, void*) { // Traverse the prototype chain until we reach an array. - JSArray* holder = FindInstanceOf<JSArray>(object); + JSArray* holder = FindInstanceOf<JSArray>(Isolate::Current(), object); return holder == NULL ? Smi::FromInt(0) : holder->length(); } @@ -103,7 +103,7 @@ MaybeObject* Accessors::ArraySetLength(JSObject* object, Object* value, void*) { // causes an infinite loop. if (!object->IsJSArray()) { return object->SetLocalPropertyIgnoreAttributes( - isolate->heap()->length_symbol(), value, NONE); + isolate->heap()->length_string(), value, NONE); } value = FlattenNumber(value); @@ -112,7 +112,7 @@ MaybeObject* Accessors::ArraySetLength(JSObject* object, Object* value, void*) { HandleScope scope(isolate); // Protect raw pointers. - Handle<JSObject> object_handle(object, isolate); + Handle<JSArray> array_handle(JSArray::cast(object), isolate); Handle<Object> value_handle(value, isolate); bool has_exception; @@ -122,7 +122,7 @@ MaybeObject* Accessors::ArraySetLength(JSObject* object, Object* value, void*) { if (has_exception) return Failure::Exception(); if (uint32_v->Number() == number_v->Number()) { - return Handle<JSArray>::cast(object_handle)->SetElementsLength(*uint32_v); + return array_handle->SetElementsLength(*uint32_v); } return isolate->Throw( *isolate->factory()->NewRangeError("invalid_array_length", @@ -383,13 +383,14 @@ const AccessorDescriptor Accessors::ScriptEvalFromScript = { MaybeObject* Accessors::ScriptGetEvalFromScriptPosition(Object* object, void*) { - HandleScope scope; - Handle<Script> script(Script::cast(JSValue::cast(object)->value())); + Script* raw_script = Script::cast(JSValue::cast(object)->value()); + HandleScope scope(raw_script->GetIsolate()); + Handle<Script> script(raw_script); // If this is not a script compiled through eval there is no eval position. int compilation_type = Smi::cast(script->compilation_type())->value(); if (compilation_type != Script::COMPILATION_TYPE_EVAL) { - return HEAP->undefined_value(); + return script->GetHeap()->undefined_value(); } // Get the function from where eval was called and find the source position @@ -441,18 +442,19 @@ const AccessorDescriptor Accessors::ScriptEvalFromFunctionName = { MaybeObject* Accessors::FunctionGetPrototype(Object* object, void*) { - Heap* heap = Isolate::Current()->heap(); - JSFunction* function = FindInstanceOf<JSFunction>(object); - if (function == NULL) return heap->undefined_value(); + Isolate* isolate = Isolate::Current(); + JSFunction* function = FindInstanceOf<JSFunction>(isolate, object); + if (function == NULL) return isolate->heap()->undefined_value(); while (!function->should_have_prototype()) { - function = FindInstanceOf<JSFunction>(function->GetPrototype()); + function = FindInstanceOf<JSFunction>(isolate, function->GetPrototype()); // There has to be one because we hit the getter. ASSERT(function != NULL); } if (!function->has_prototype()) { Object* prototype; - { MaybeObject* maybe_prototype = heap->AllocateFunctionPrototype(function); + { MaybeObject* maybe_prototype + = isolate->heap()->AllocateFunctionPrototype(function); if (!maybe_prototype->ToObject(&prototype)) return maybe_prototype; } Object* result; @@ -465,24 +467,46 @@ MaybeObject* Accessors::FunctionGetPrototype(Object* object, void*) { MaybeObject* Accessors::FunctionSetPrototype(JSObject* object, - Object* value, + Object* value_raw, void*) { - Heap* heap = object->GetHeap(); - JSFunction* function = FindInstanceOf<JSFunction>(object); - if (function == NULL) return heap->undefined_value(); - if (!function->should_have_prototype()) { + Isolate* isolate = object->GetIsolate(); + Heap* heap = isolate->heap(); + JSFunction* function_raw = FindInstanceOf<JSFunction>(isolate, object); + if (function_raw == NULL) return heap->undefined_value(); + if (!function_raw->should_have_prototype()) { // Since we hit this accessor, object will have no prototype property. - return object->SetLocalPropertyIgnoreAttributes(heap->prototype_symbol(), - value, + return object->SetLocalPropertyIgnoreAttributes(heap->prototype_string(), + value_raw, NONE); } - Object* prototype; - { MaybeObject* maybe_prototype = function->SetPrototype(value); - if (!maybe_prototype->ToObject(&prototype)) return maybe_prototype; + HandleScope scope(isolate); + Handle<JSFunction> function(function_raw, isolate); + Handle<Object> value(value_raw, isolate); + + Handle<Object> old_value; + bool is_observed = + FLAG_harmony_observation && + *function == object && + function->map()->is_observed(); + if (is_observed) { + if (function->has_prototype()) + old_value = handle(function->prototype(), isolate); + else + old_value = isolate->factory()->NewFunctionPrototype(function); + } + + Handle<Object> result; + MaybeObject* maybe_result = function->SetPrototype(*value); + if (!maybe_result->ToHandle(&result, isolate)) return maybe_result; + ASSERT(function->prototype() == *value); + + if (is_observed && !old_value->SameValue(*value)) { + JSObject::EnqueueChangeRecord( + function, "updated", isolate->factory()->prototype_string(), old_value); } - ASSERT(function->prototype() == value); - return function; + + return *function; } @@ -499,7 +523,8 @@ const AccessorDescriptor Accessors::FunctionPrototype = { MaybeObject* Accessors::FunctionGetLength(Object* object, void*) { - JSFunction* function = FindInstanceOf<JSFunction>(object); + Isolate* isolate = Isolate::Current(); + JSFunction* function = FindInstanceOf<JSFunction>(isolate, object); if (function == NULL) return Smi::FromInt(0); // Check if already compiled. if (function->shared()->is_compiled()) { @@ -507,7 +532,7 @@ MaybeObject* Accessors::FunctionGetLength(Object* object, void*) { } // If the function isn't compiled yet, the length is not computed correctly // yet. Compile it now and return the right length. - HandleScope scope; + HandleScope scope(isolate); Handle<JSFunction> handle(function); if (JSFunction::CompileLazy(handle, KEEP_EXCEPTION)) { return Smi::FromInt(handle->shared()->length()); @@ -529,8 +554,11 @@ const AccessorDescriptor Accessors::FunctionLength = { MaybeObject* Accessors::FunctionGetName(Object* object, void*) { - JSFunction* holder = FindInstanceOf<JSFunction>(object); - return holder == NULL ? HEAP->undefined_value() : holder->shared()->name(); + Isolate* isolate = Isolate::Current(); + JSFunction* holder = FindInstanceOf<JSFunction>(isolate, object); + return holder == NULL + ? isolate->heap()->undefined_value() + : holder->shared()->name(); } @@ -550,7 +578,8 @@ static MaybeObject* ConstructArgumentsObjectForInlinedFunction( JavaScriptFrame* frame, Handle<JSFunction> inlined_function, int inlined_frame_index) { - Factory* factory = Isolate::Current()->factory(); + Isolate* isolate = inlined_function->GetIsolate(); + Factory* factory = isolate->factory(); Vector<SlotRef> args_slots = SlotRef::ComputeSlotMappingForArguments( frame, @@ -561,7 +590,7 @@ static MaybeObject* ConstructArgumentsObjectForInlinedFunction( factory->NewArgumentsObject(inlined_function, args_count); Handle<FixedArray> array = factory->NewFixedArray(args_count); for (int i = 0; i < args_count; ++i) { - Handle<Object> value = args_slots[i].GetValue(); + Handle<Object> value = args_slots[i].GetValue(isolate); array->set(i, *value); } arguments->set_elements(*array); @@ -575,7 +604,7 @@ static MaybeObject* ConstructArgumentsObjectForInlinedFunction( MaybeObject* Accessors::FunctionGetArguments(Object* object, void*) { Isolate* isolate = Isolate::Current(); HandleScope scope(isolate); - JSFunction* holder = FindInstanceOf<JSFunction>(object); + JSFunction* holder = FindInstanceOf<JSFunction>(isolate, object); if (holder == NULL) return isolate->heap()->undefined_value(); Handle<JSFunction> function(holder, isolate); @@ -601,7 +630,7 @@ MaybeObject* Accessors::FunctionGetArguments(Object* object, void*) { // If there is an arguments variable in the stack, we return that. Handle<ScopeInfo> scope_info(function->shared()->scope_info()); int index = scope_info->StackSlotIndex( - isolate->heap()->arguments_symbol()); + isolate->heap()->arguments_string()); if (index >= 0) { Handle<Object> arguments(frame->GetExpression(index), isolate); if (!arguments->IsArgumentsMarker()) return *arguments; @@ -649,19 +678,6 @@ const AccessorDescriptor Accessors::FunctionArguments = { // -static MaybeObject* CheckNonStrictCallerOrThrow( - Isolate* isolate, - JSFunction* caller) { - DisableAssertNoAllocation enable_allocation; - if (!caller->shared()->is_classic_mode()) { - return isolate->Throw( - *isolate->factory()->NewTypeError("strict_caller", - HandleVector<Object>(NULL, 0))); - } - return caller; -} - - class FrameFunctionIterator { public: FrameFunctionIterator(Isolate* isolate, const AssertNoAllocation& promise) @@ -712,7 +728,7 @@ MaybeObject* Accessors::FunctionGetCaller(Object* object, void*) { Isolate* isolate = Isolate::Current(); HandleScope scope(isolate); AssertNoAllocation no_alloc; - JSFunction* holder = FindInstanceOf<JSFunction>(object); + JSFunction* holder = FindInstanceOf<JSFunction>(isolate, object); if (holder == NULL) return isolate->heap()->undefined_value(); if (holder->shared()->native()) return isolate->heap()->null_value(); Handle<JSFunction> function(holder, isolate); @@ -748,7 +764,14 @@ MaybeObject* Accessors::FunctionGetCaller(Object* object, void*) { if (caller->shared()->bound()) { return isolate->heap()->null_value(); } - return CheckNonStrictCallerOrThrow(isolate, caller); + // Censor if the caller is not a classic mode function. + // Change from ES5, which used to throw, see: + // https://bugs.ecmascript.org/show_bug.cgi?id=310 + if (!caller->shared()->is_classic_mode()) { + return isolate->heap()->null_value(); + } + + return caller; } @@ -764,22 +787,49 @@ const AccessorDescriptor Accessors::FunctionCaller = { // -MaybeObject* Accessors::ObjectGetPrototype(Object* receiver, void*) { - Object* current = receiver->GetPrototype(); +static inline Object* GetPrototypeSkipHiddenPrototypes(Isolate* isolate, + Object* receiver) { + Object* current = receiver->GetPrototype(isolate); while (current->IsJSObject() && JSObject::cast(current)->map()->is_hidden_prototype()) { - current = current->GetPrototype(); + current = current->GetPrototype(isolate); } return current; } -MaybeObject* Accessors::ObjectSetPrototype(JSObject* receiver, - Object* value, +MaybeObject* Accessors::ObjectGetPrototype(Object* receiver, void*) { + return GetPrototypeSkipHiddenPrototypes(Isolate::Current(), receiver); +} + + +MaybeObject* Accessors::ObjectSetPrototype(JSObject* receiver_raw, + Object* value_raw, void*) { - const bool skip_hidden_prototypes = true; + const bool kSkipHiddenPrototypes = true; // To be consistent with other Set functions, return the value. - return receiver->SetPrototype(value, skip_hidden_prototypes); + if (!(FLAG_harmony_observation && receiver_raw->map()->is_observed())) + return receiver_raw->SetPrototype(value_raw, kSkipHiddenPrototypes); + + Isolate* isolate = receiver_raw->GetIsolate(); + HandleScope scope(isolate); + Handle<JSObject> receiver(receiver_raw); + Handle<Object> value(value_raw, isolate); + Handle<Object> old_value(GetPrototypeSkipHiddenPrototypes(isolate, *receiver), + isolate); + + MaybeObject* result = receiver->SetPrototype(*value, kSkipHiddenPrototypes); + Handle<Object> hresult; + if (!result->ToHandle(&hresult, isolate)) return result; + + Handle<Object> new_value(GetPrototypeSkipHiddenPrototypes(isolate, *receiver), + isolate); + if (!new_value->SameValue(*old_value)) { + JSObject::EnqueueChangeRecord(receiver, "prototype", + isolate->factory()->proto_string(), + old_value); + } + return *hresult; } @@ -802,15 +852,15 @@ static v8::Handle<v8::Value> ModuleGetExport( ASSERT(context->IsModuleContext()); int slot = info.Data()->Int32Value(); Object* value = context->get(slot); + Isolate* isolate = instance->GetIsolate(); if (value->IsTheHole()) { Handle<String> name = v8::Utils::OpenHandle(*property); - Isolate* isolate = instance->GetIsolate(); isolate->ScheduleThrow( *isolate->factory()->NewReferenceError("not_defined", HandleVector(&name, 1))); return v8::Handle<v8::Value>(); } - return v8::Utils::ToLocal(Handle<Object>(value)); + return v8::Utils::ToLocal(Handle<Object>(value, isolate)); } @@ -840,7 +890,7 @@ Handle<AccessorInfo> Accessors::MakeModuleExport( int index, PropertyAttributes attributes) { Factory* factory = name->GetIsolate()->factory(); - Handle<AccessorInfo> info = factory->NewAccessorInfo(); + Handle<ExecutableAccessorInfo> info = factory->NewExecutableAccessorInfo(); info->set_property_attributes(attributes); info->set_all_can_read(true); info->set_all_can_write(true); diff --git a/deps/v8/src/api.cc b/deps/v8/src/api.cc index f1683984e1..dfa57654db 100644 --- a/deps/v8/src/api.cc +++ b/deps/v8/src/api.cc @@ -42,6 +42,7 @@ #include "execution.h" #include "global-handles.h" #include "heap-profiler.h" +#include "heap-snapshot-generator-inl.h" #include "messages.h" #ifdef COMPRESS_STARTUP_DATA_BZ2 #include "natives.h" @@ -128,8 +129,13 @@ namespace v8 { static void DefaultFatalErrorHandler(const char* location, const char* message) { - i::VMState __state__(i::Isolate::Current(), i::OTHER); - API_Fatal(location, message); + i::Isolate* isolate = i::Isolate::Current(); + if (isolate->IsInitialized()) { + i::VMState __state__(isolate, i::OTHER); + API_Fatal(location, message); + } else { + API_Fatal(location, message); + } } @@ -202,15 +208,21 @@ void i::V8::FatalProcessOutOfMemory(const char* location, bool take_snapshot) { int end_marker; heap_stats.end_marker = &end_marker; i::Isolate* isolate = i::Isolate::Current(); - // BUG(1718): - // Don't use the take_snapshot since we don't support HeapIterator here - // without doing a special GC. - isolate->heap()->RecordStats(&heap_stats, false); + if (isolate->heap()->HasBeenSetUp()) { + // BUG(1718): Don't use the take_snapshot since we don't support + // HeapIterator here without doing a special GC. + isolate->heap()->RecordStats(&heap_stats, false); + } i::V8::SetFatalError(); FatalErrorCallback callback = GetFatalErrorHandler(); + const char* message = "Allocation failed - process out of memory"; { - LEAVE_V8(isolate); - callback(location, "Allocation failed - process out of memory"); + if (isolate->IsInitialized()) { + LEAVE_V8(isolate); + callback(location, message); + } else { + callback(location, message); + } } // If the callback returns, we stop execution. UNREACHABLE(); @@ -615,79 +627,60 @@ bool SetResourceConstraints(ResourceConstraints* constraints) { } -i::Object** V8::GlobalizeReference(i::Object** obj) { - i::Isolate* isolate = i::Isolate::Current(); +i::Object** V8::GlobalizeReference(i::Isolate* isolate, i::Object** obj) { if (IsDeadCheck(isolate, "V8::Persistent::New")) return NULL; LOG_API(isolate, "Persistent::New"); - i::Handle<i::Object> result = - isolate->global_handles()->Create(*obj); + i::Handle<i::Object> result = isolate->global_handles()->Create(*obj); return result.location(); } -void V8::MakeWeak(i::Object** object, void* parameters, - WeakReferenceCallback callback) { - i::Isolate* isolate = i::Isolate::Current(); +void V8::MakeWeak(i::Isolate* isolate, + i::Object** object, + void* parameters, + WeakReferenceCallback weak_reference_callback, + NearDeathCallback near_death_callback) { + ASSERT(isolate == i::Isolate::Current()); LOG_API(isolate, "MakeWeak"); - isolate->global_handles()->MakeWeak(object, parameters, - callback); + isolate->global_handles()->MakeWeak(object, + parameters, + weak_reference_callback, + near_death_callback); } -void V8::ClearWeak(i::Object** obj) { - i::Isolate* isolate = i::Isolate::Current(); +void V8::ClearWeak(i::Isolate* isolate, i::Object** obj) { LOG_API(isolate, "ClearWeak"); isolate->global_handles()->ClearWeakness(obj); } -void V8::MarkIndependent(i::Object** object) { - i::Isolate* isolate = i::Isolate::Current(); - LOG_API(isolate, "MakeIndependent"); - isolate->global_handles()->MarkIndependent(object); -} - - -bool V8::IsGlobalIndependent(i::Object** obj) { - i::Isolate* isolate = i::Isolate::Current(); - LOG_API(isolate, "IsGlobalIndependent"); - if (!isolate->IsInitialized()) return false; - return i::GlobalHandles::IsIndependent(obj); +void V8::DisposeGlobal(i::Isolate* isolate, i::Object** obj) { + ASSERT(isolate == i::Isolate::Current()); + LOG_API(isolate, "DisposeGlobal"); + if (!isolate->IsInitialized()) return; + isolate->global_handles()->Destroy(obj); } - -bool V8::IsGlobalNearDeath(i::Object** obj) { - i::Isolate* isolate = i::Isolate::Current(); - LOG_API(isolate, "IsGlobalNearDeath"); - if (!isolate->IsInitialized()) return false; - return i::GlobalHandles::IsNearDeath(obj); -} +// --- H a n d l e s --- -bool V8::IsGlobalWeak(i::Object** obj) { - i::Isolate* isolate = i::Isolate::Current(); - LOG_API(isolate, "IsGlobalWeak"); - if (!isolate->IsInitialized()) return false; - return i::GlobalHandles::IsWeak(obj); +HandleScope::HandleScope() { + Initialize(reinterpret_cast<Isolate*>(i::Isolate::Current())); } -void V8::DisposeGlobal(i::Object** obj) { - i::Isolate* isolate = i::Isolate::Current(); - LOG_API(isolate, "DisposeGlobal"); - if (!isolate->IsInitialized()) return; - isolate->global_handles()->Destroy(obj); +HandleScope::HandleScope(Isolate* isolate) { + Initialize(isolate); } -// --- H a n d l e s --- - -HandleScope::HandleScope() { - i::Isolate* isolate = i::Isolate::Current(); - API_ENTRY_CHECK(isolate, "HandleScope::HandleScope"); +void HandleScope::Initialize(Isolate* isolate) { + i::Isolate* internal_isolate = reinterpret_cast<i::Isolate*>(isolate); + API_ENTRY_CHECK(internal_isolate, "HandleScope::HandleScope"); v8::ImplementationUtilities::HandleScopeData* current = - isolate->handle_scope_data(); - isolate_ = isolate; + internal_isolate->handle_scope_data(); + isolate_ = internal_isolate; prev_next_ = current->next; prev_limit_ = current->limit; is_closed_ = false; @@ -703,7 +696,6 @@ HandleScope::~HandleScope() { void HandleScope::Leave() { - ASSERT(isolate_ == i::Isolate::Current()); v8::ImplementationUtilities::HandleScopeData* current = isolate_->handle_scope_data(); current->level--; @@ -721,21 +713,29 @@ void HandleScope::Leave() { int HandleScope::NumberOfHandles() { - EnsureInitializedForIsolate( - i::Isolate::Current(), "HandleScope::NumberOfHandles"); - return i::HandleScope::NumberOfHandles(); + i::Isolate* isolate = i::Isolate::Current(); + if (!EnsureInitializedForIsolate(isolate, "HandleScope::NumberOfHandles")) { + return 0; + } + return i::HandleScope::NumberOfHandles(isolate); } i::Object** HandleScope::CreateHandle(i::Object* value) { - return i::HandleScope::CreateHandle(value, i::Isolate::Current()); + return i::HandleScope::CreateHandle(i::Isolate::Current(), value); +} + + +i::Object** HandleScope::CreateHandle(i::Isolate* isolate, i::Object* value) { + ASSERT(isolate == i::Isolate::Current()); + return i::HandleScope::CreateHandle(isolate, value); } i::Object** HandleScope::CreateHandle(i::HeapObject* value) { ASSERT(value->IsHeapObject()); return reinterpret_cast<i::Object**>( - i::HandleScope::CreateHandle(value, value->GetIsolate())); + i::HandleScope::CreateHandle(value->GetIsolate(), value)); } @@ -773,33 +773,77 @@ void Context::Exit() { } -void Context::SetData(v8::Handle<Value> data) { - i::Handle<i::Context> env = Utils::OpenHandle(this); - i::Isolate* isolate = env->GetIsolate(); - if (IsDeadCheck(isolate, "v8::Context::SetData()")) return; - i::Handle<i::Object> raw_data = Utils::OpenHandle(*data); - ASSERT(env->IsNativeContext()); - if (env->IsNativeContext()) { - env->set_data(*raw_data); - } +static void* DecodeSmiToAligned(i::Object* value, const char* location) { + ApiCheck(value->IsSmi(), location, "Not a Smi"); + return reinterpret_cast<void*>(value); } -v8::Local<v8::Value> Context::GetData() { - i::Handle<i::Context> env = Utils::OpenHandle(this); - i::Isolate* isolate = env->GetIsolate(); - if (IsDeadCheck(isolate, "v8::Context::GetData()")) { - return Local<Value>(); - } - ASSERT(env->IsNativeContext()); - if (!env->IsNativeContext()) { - return Local<Value>(); +static i::Smi* EncodeAlignedAsSmi(void* value, const char* location) { + i::Smi* smi = reinterpret_cast<i::Smi*>(value); + ApiCheck(smi->IsSmi(), location, "Pointer is not aligned"); + return smi; +} + + +static i::Handle<i::FixedArray> EmbedderDataFor(Context* context, + int index, + bool can_grow, + const char* location) { + i::Handle<i::Context> env = Utils::OpenHandle(context); + bool ok = !IsDeadCheck(env->GetIsolate(), location) && + ApiCheck(env->IsNativeContext(), location, "Not a native context") && + ApiCheck(index >= 0, location, "Negative index"); + if (!ok) return i::Handle<i::FixedArray>(); + i::Handle<i::FixedArray> data(env->embedder_data()); + if (index < data->length()) return data; + if (!can_grow) { + Utils::ReportApiFailure(location, "Index too large"); + return i::Handle<i::FixedArray>(); } - i::Handle<i::Object> result(env->data(), isolate); + int new_size = i::Max(index, data->length() << 1) + 1; + data = env->GetIsolate()->factory()->CopySizeFixedArray(data, new_size); + env->set_embedder_data(*data); + return data; +} + + +v8::Local<v8::Value> Context::SlowGetEmbedderData(int index) { + const char* location = "v8::Context::GetEmbedderData()"; + i::Handle<i::FixedArray> data = EmbedderDataFor(this, index, false, location); + if (data.is_null()) return Local<Value>(); + i::Handle<i::Object> result(data->get(index), data->GetIsolate()); return Utils::ToLocal(result); } +void Context::SetEmbedderData(int index, v8::Handle<Value> value) { + const char* location = "v8::Context::SetEmbedderData()"; + i::Handle<i::FixedArray> data = EmbedderDataFor(this, index, true, location); + if (data.is_null()) return; + i::Handle<i::Object> val = Utils::OpenHandle(*value); + data->set(index, *val); + ASSERT_EQ(*Utils::OpenHandle(*value), + *Utils::OpenHandle(*GetEmbedderData(index))); +} + + +void* Context::SlowGetAlignedPointerFromEmbedderData(int index) { + const char* location = "v8::Context::GetAlignedPointerFromEmbedderData()"; + i::Handle<i::FixedArray> data = EmbedderDataFor(this, index, false, location); + if (data.is_null()) return NULL; + return DecodeSmiToAligned(data->get(index), location); +} + + +void Context::SetAlignedPointerInEmbedderData(int index, void* value) { + const char* location = "v8::Context::SetAlignedPointerInEmbedderData()"; + i::Handle<i::FixedArray> data = EmbedderDataFor(this, index, true, location); + data->set(index, EncodeAlignedAsSmi(value, location)); + ASSERT_EQ(value, GetAlignedPointerFromEmbedderData(index)); +} + + i::Object** v8::HandleScope::RawClose(i::Object** value) { if (!ApiCheck(!is_closed_, "v8::HandleScope::Close()", @@ -821,7 +865,7 @@ i::Object** v8::HandleScope::RawClose(i::Object** value) { } // Allocate a new handle on the previous handle block. - i::Handle<i::Object> handle(result); + i::Handle<i::Object> handle(result, isolate_); return handle.location(); } @@ -905,7 +949,7 @@ void Template::Set(v8::Handle<String> name, v8::Handle<Data> value, if (IsDeadCheck(isolate, "v8::Template::Set()")) return; ENTER_V8(isolate); i::HandleScope scope(isolate); - i::Handle<i::Object> list(Utils::OpenHandle(this)->property_list()); + i::Handle<i::Object> list(Utils::OpenHandle(this)->property_list(), isolate); if (list->IsUndefined()) { list = NeanderArray().value(); Utils::OpenHandle(this)->set_property_list(*list); @@ -931,7 +975,8 @@ Local<ObjectTemplate> FunctionTemplate::PrototypeTemplate() { return Local<ObjectTemplate>(); } ENTER_V8(isolate); - i::Handle<i::Object> result(Utils::OpenHandle(this)->prototype_template()); + i::Handle<i::Object> result(Utils::OpenHandle(this)->prototype_template(), + isolate); if (result->IsUndefined()) { result = Utils::OpenHandle(*ObjectTemplate::New()); Utils::OpenHandle(this)->set_prototype_template(*result); @@ -949,7 +994,7 @@ void FunctionTemplate::Inherit(v8::Handle<FunctionTemplate> value) { Local<FunctionTemplate> FunctionTemplate::New(InvocationCallback callback, - v8::Handle<Value> data, v8::Handle<Signature> signature) { + v8::Handle<Value> data, v8::Handle<Signature> signature, int length) { i::Isolate* isolate = i::Isolate::Current(); EnsureInitializedForIsolate(isolate, "v8::FunctionTemplate::New()"); LOG_API(isolate, "FunctionTemplate::New"); @@ -966,6 +1011,7 @@ Local<FunctionTemplate> FunctionTemplate::New(InvocationCallback callback, if (data.IsEmpty()) data = v8::Undefined(); Utils::ToLocal(obj)->SetCallHandler(callback, data); } + obj->set_length(length); obj->set_undetectable(false); obj->set_needs_access_check(false); @@ -1004,6 +1050,124 @@ Local<AccessorSignature> AccessorSignature::New( } +template<typename Operation> +static Local<Operation> NewDescriptor( + Isolate* isolate, + const i::DeclaredAccessorDescriptorData& data, + Data* previous_descriptor + ) { + i::Isolate* internal_isolate = reinterpret_cast<i::Isolate*>(isolate); + i::Handle<i::DeclaredAccessorDescriptor> previous = + i::Handle<i::DeclaredAccessorDescriptor>(); + if (previous_descriptor != NULL) { + previous = Utils::OpenHandle( + static_cast<DeclaredAccessorDescriptor*>(previous_descriptor)); + } + i::Handle<i::DeclaredAccessorDescriptor> descriptor = + i::DeclaredAccessorDescriptor::Create(internal_isolate, data, previous); + return Local<Operation>( + reinterpret_cast<Operation*>(*Utils::ToLocal(descriptor))); +} + + +Local<RawOperationDescriptor> + ObjectOperationDescriptor::NewInternalFieldDereference( + Isolate* isolate, + int internal_field) { + i::DeclaredAccessorDescriptorData data; + data.type = i::kDescriptorObjectDereference; + data.object_dereference_descriptor.internal_field = internal_field; + return NewDescriptor<RawOperationDescriptor>(isolate, data, NULL); +} + + +Local<RawOperationDescriptor> RawOperationDescriptor::NewRawShift( + Isolate* isolate, + int16_t byte_offset) { + i::DeclaredAccessorDescriptorData data; + data.type = i::kDescriptorPointerShift; + data.pointer_shift_descriptor.byte_offset = byte_offset; + return NewDescriptor<RawOperationDescriptor>(isolate, data, this); +} + + +Local<DeclaredAccessorDescriptor> RawOperationDescriptor::NewHandleDereference( + Isolate* isolate) { + i::DeclaredAccessorDescriptorData data; + data.type = i::kDescriptorReturnObject; + return NewDescriptor<DeclaredAccessorDescriptor>(isolate, data, this); +} + + +Local<RawOperationDescriptor> RawOperationDescriptor::NewRawDereference( + Isolate* isolate) { + i::DeclaredAccessorDescriptorData data; + data.type = i::kDescriptorPointerDereference; + return NewDescriptor<RawOperationDescriptor>(isolate, data, this); +} + + +Local<DeclaredAccessorDescriptor> RawOperationDescriptor::NewPointerCompare( + Isolate* isolate, + void* compare_value) { + i::DeclaredAccessorDescriptorData data; + data.type = i::kDescriptorPointerCompare; + data.pointer_compare_descriptor.compare_value = compare_value; + return NewDescriptor<DeclaredAccessorDescriptor>(isolate, data, this); +} + + +Local<DeclaredAccessorDescriptor> RawOperationDescriptor::NewPrimitiveValue( + Isolate* isolate, + DeclaredAccessorDescriptorDataType data_type, + uint8_t bool_offset) { + i::DeclaredAccessorDescriptorData data; + data.type = i::kDescriptorPrimitiveValue; + data.primitive_value_descriptor.data_type = data_type; + data.primitive_value_descriptor.bool_offset = bool_offset; + return NewDescriptor<DeclaredAccessorDescriptor>(isolate, data, this); +} + + +template<typename T> +static Local<DeclaredAccessorDescriptor> NewBitmaskCompare( + Isolate* isolate, + T bitmask, + T compare_value, + RawOperationDescriptor* operation) { + i::DeclaredAccessorDescriptorData data; + data.type = i::kDescriptorBitmaskCompare; + data.bitmask_compare_descriptor.bitmask = bitmask; + data.bitmask_compare_descriptor.compare_value = compare_value; + data.bitmask_compare_descriptor.size = sizeof(T); + return NewDescriptor<DeclaredAccessorDescriptor>(isolate, data, operation); +} + + +Local<DeclaredAccessorDescriptor> RawOperationDescriptor::NewBitmaskCompare8( + Isolate* isolate, + uint8_t bitmask, + uint8_t compare_value) { + return NewBitmaskCompare(isolate, bitmask, compare_value, this); +} + + +Local<DeclaredAccessorDescriptor> RawOperationDescriptor::NewBitmaskCompare16( + Isolate* isolate, + uint16_t bitmask, + uint16_t compare_value) { + return NewBitmaskCompare(isolate, bitmask, compare_value, this); +} + + +Local<DeclaredAccessorDescriptor> RawOperationDescriptor::NewBitmaskCompare32( + Isolate* isolate, + uint32_t bitmask, + uint32_t compare_value) { + return NewBitmaskCompare(isolate, bitmask, compare_value, this); +} + + Local<TypeSwitch> TypeSwitch::New(Handle<FunctionTemplate> type) { Handle<FunctionTemplate> types[1] = { type }; return TypeSwitch::New(1, types); @@ -1065,19 +1229,12 @@ void FunctionTemplate::SetCallHandler(InvocationCallback callback, } -static i::Handle<i::AccessorInfo> MakeAccessorInfo( - v8::Handle<String> name, - AccessorGetter getter, - AccessorSetter setter, - v8::Handle<Value> data, - v8::AccessControl settings, - v8::PropertyAttribute attributes, - v8::Handle<AccessorSignature> signature) { - i::Handle<i::AccessorInfo> obj = FACTORY->NewAccessorInfo(); - SET_FIELD_WRAPPED(obj, set_getter, getter); - SET_FIELD_WRAPPED(obj, set_setter, setter); - if (data.IsEmpty()) data = v8::Undefined(); - obj->set_data(*Utils::OpenHandle(*data)); +static i::Handle<i::AccessorInfo> SetAccessorInfoProperties( + i::Handle<i::AccessorInfo> obj, + v8::Handle<String> name, + v8::AccessControl settings, + v8::PropertyAttribute attributes, + v8::Handle<AccessorSignature> signature) { obj->set_name(*Utils::OpenHandle(*name)); if (settings & ALL_CAN_READ) obj->set_all_can_read(true); if (settings & ALL_CAN_WRITE) obj->set_all_can_write(true); @@ -1090,7 +1247,7 @@ static i::Handle<i::AccessorInfo> MakeAccessorInfo( } -void FunctionTemplate::AddInstancePropertyAccessor( +static i::Handle<i::AccessorInfo> MakeAccessorInfo( v8::Handle<String> name, AccessorGetter getter, AccessorSetter setter, @@ -1098,24 +1255,29 @@ void FunctionTemplate::AddInstancePropertyAccessor( v8::AccessControl settings, v8::PropertyAttribute attributes, v8::Handle<AccessorSignature> signature) { - i::Isolate* isolate = Utils::OpenHandle(this)->GetIsolate(); - if (IsDeadCheck(isolate, - "v8::FunctionTemplate::AddInstancePropertyAccessor()")) { - return; - } - ENTER_V8(isolate); - i::HandleScope scope(isolate); + i::Isolate* isolate = Utils::OpenHandle(*name)->GetIsolate(); + i::Handle<i::ExecutableAccessorInfo> obj = + isolate->factory()->NewExecutableAccessorInfo(); + SET_FIELD_WRAPPED(obj, set_getter, getter); + SET_FIELD_WRAPPED(obj, set_setter, setter); + if (data.IsEmpty()) data = v8::Undefined(); + obj->set_data(*Utils::OpenHandle(*data)); + return SetAccessorInfoProperties(obj, name, settings, attributes, signature); +} - i::Handle<i::AccessorInfo> obj = MakeAccessorInfo(name, getter, setter, data, - settings, attributes, - signature); - i::Handle<i::Object> list(Utils::OpenHandle(this)->property_accessors()); - if (list->IsUndefined()) { - list = NeanderArray().value(); - Utils::OpenHandle(this)->set_property_accessors(*list); - } - NeanderArray array(list); - array.add(obj); + +static i::Handle<i::AccessorInfo> MakeAccessorInfo( + v8::Handle<String> name, + v8::Handle<v8::DeclaredAccessorDescriptor> descriptor, + v8::AccessControl settings, + v8::PropertyAttribute attributes, + v8::Handle<AccessorSignature> signature) { + i::Isolate* isolate = Utils::OpenHandle(*name)->GetIsolate(); + if (descriptor.IsEmpty()) return i::Handle<i::DeclaredAccessorInfo>(); + i::Handle<i::DeclaredAccessorInfo> obj = + isolate->factory()->NewDeclaredAccessorInfo(); + obj->set_descriptor(*Utils::OpenHandle(*descriptor)); + return SetAccessorInfoProperties(obj, name, settings, attributes, signature); } @@ -1136,6 +1298,14 @@ Local<ObjectTemplate> FunctionTemplate::InstanceTemplate() { } +void FunctionTemplate::SetLength(int length) { + i::Isolate* isolate = Utils::OpenHandle(this)->GetIsolate(); + if (IsDeadCheck(isolate, "v8::FunctionTemplate::SetLength()")) return; + ENTER_V8(isolate); + Utils::OpenHandle(this)->set_length(length); +} + + void FunctionTemplate::SetClassName(Handle<String> name) { i::Isolate* isolate = Utils::OpenHandle(this)->GetIsolate(); if (IsDeadCheck(isolate, "v8::FunctionTemplate::SetClassName()")) return; @@ -1156,7 +1326,7 @@ void FunctionTemplate::SetHiddenPrototype(bool value) { void FunctionTemplate::ReadOnlyPrototype() { i::Isolate* isolate = Utils::OpenHandle(this)->GetIsolate(); - if (IsDeadCheck(isolate, "v8::FunctionTemplate::SetPrototypeAttributes()")) { + if (IsDeadCheck(isolate, "v8::FunctionTemplate::ReadOnlyPrototype()")) { return; } ENTER_V8(isolate); @@ -1288,6 +1458,19 @@ static void EnsureConstructor(ObjectTemplate* object_template) { } +static inline void AddPropertyToFunctionTemplate( + i::Handle<i::FunctionTemplateInfo> cons, + i::Handle<i::AccessorInfo> obj) { + i::Handle<i::Object> list(cons->property_accessors(), cons->GetIsolate()); + if (list->IsUndefined()) { + list = NeanderArray().value(); + cons->set_property_accessors(*list); + } + NeanderArray array(list); + array.add(obj); +} + + void ObjectTemplate::SetAccessor(v8::Handle<String> name, AccessorGetter getter, AccessorSetter setter, @@ -1303,13 +1486,31 @@ void ObjectTemplate::SetAccessor(v8::Handle<String> name, i::FunctionTemplateInfo* constructor = i::FunctionTemplateInfo::cast(Utils::OpenHandle(this)->constructor()); i::Handle<i::FunctionTemplateInfo> cons(constructor); - Utils::ToLocal(cons)->AddInstancePropertyAccessor(name, - getter, - setter, - data, - settings, - attribute, - signature); + i::Handle<i::AccessorInfo> obj = MakeAccessorInfo(name, getter, setter, data, + settings, attribute, + signature); + AddPropertyToFunctionTemplate(cons, obj); +} + + +bool ObjectTemplate::SetAccessor(Handle<String> name, + Handle<DeclaredAccessorDescriptor> descriptor, + AccessControl settings, + PropertyAttribute attribute, + Handle<AccessorSignature> signature) { + i::Isolate* isolate = Utils::OpenHandle(this)->GetIsolate(); + if (IsDeadCheck(isolate, "v8::ObjectTemplate::SetAccessor()")) return false; + ENTER_V8(isolate); + i::HandleScope scope(isolate); + EnsureConstructor(this); + i::FunctionTemplateInfo* constructor = + i::FunctionTemplateInfo::cast(Utils::OpenHandle(this)->constructor()); + i::Handle<i::FunctionTemplateInfo> cons(constructor); + i::Handle<i::AccessorInfo> obj = MakeAccessorInfo( + name, descriptor, settings, attribute, signature); + if (obj.is_null()) return false; + AddPropertyToFunctionTemplate(cons, obj); + return true; } @@ -1600,6 +1801,8 @@ Local<Value> Script::Run() { ON_BAILOUT(isolate, "v8::Script::Run()", return Local<Value>()); LOG_API(isolate, "Script::Run"); ENTER_V8(isolate); + i::Logger::TimerEventScope timer_scope( + isolate, i::Logger::TimerEventScope::v8_execute); i::Object* raw_result = NULL; { i::HandleScope scope(isolate); @@ -1649,10 +1852,10 @@ Local<Value> Script::Id() { i::HandleScope scope(isolate); i::Handle<i::SharedFunctionInfo> function_info = OpenScript(this); i::Handle<i::Script> script(i::Script::cast(function_info->script())); - i::Handle<i::Object> id(script->id()); + i::Handle<i::Object> id(script->id(), isolate); raw_id = *id; } - i::Handle<i::Object> id(raw_id); + i::Handle<i::Object> id(raw_id, isolate); return Utils::ToLocal(id); } @@ -1690,7 +1893,7 @@ v8::TryCatch::TryCatch() v8::TryCatch::~TryCatch() { ASSERT(isolate_ == i::Isolate::Current()); if (rethrow_) { - v8::HandleScope scope; + v8::HandleScope scope(reinterpret_cast<Isolate*>(isolate_)); v8::Local<v8::Value> exc = v8::Local<v8::Value>::New(Exception()); isolate_->UnregisterTryCatchHandler(this); v8::ThrowException(exc); @@ -1736,9 +1939,9 @@ v8::Local<Value> v8::TryCatch::StackTrace() const { if (!raw_obj->IsJSObject()) return v8::Local<Value>(); i::HandleScope scope(isolate_); i::Handle<i::JSObject> obj(i::JSObject::cast(raw_obj), isolate_); - i::Handle<i::String> name = isolate_->factory()->LookupAsciiSymbol("stack"); + i::Handle<i::String> name = isolate_->factory()->stack_string(); if (!obj->HasProperty(*name)) return v8::Local<Value>(); - i::Handle<i::Object> value = i::GetProperty(obj, name); + i::Handle<i::Object> value = i::GetProperty(isolate_, obj, name); if (value.is_null()) return v8::Local<Value>(); return v8::Utils::ToLocal(scope.CloseAndEscape(value)); } else { @@ -1782,9 +1985,9 @@ Local<String> Message::Get() const { i::Isolate* isolate = Utils::OpenHandle(this)->GetIsolate(); ON_BAILOUT(isolate, "v8::Message::Get()", return Local<String>()); ENTER_V8(isolate); - HandleScope scope; + HandleScope scope(reinterpret_cast<Isolate*>(isolate)); i::Handle<i::Object> obj = Utils::OpenHandle(this); - i::Handle<i::String> raw_result = i::MessageHandler::GetMessage(obj); + i::Handle<i::String> raw_result = i::MessageHandler::GetMessage(isolate, obj); Local<String> result = Utils::ToLocal(raw_result); return scope.Close(result); } @@ -1796,13 +1999,15 @@ v8::Handle<Value> Message::GetScriptResourceName() const { return Local<String>(); } ENTER_V8(isolate); - HandleScope scope; + HandleScope scope(reinterpret_cast<Isolate*>(isolate)); i::Handle<i::JSMessageObject> message = i::Handle<i::JSMessageObject>::cast(Utils::OpenHandle(this)); // Return this.script.name. i::Handle<i::JSValue> script = - i::Handle<i::JSValue>::cast(i::Handle<i::Object>(message->script())); - i::Handle<i::Object> resource_name(i::Script::cast(script->value())->name()); + i::Handle<i::JSValue>::cast(i::Handle<i::Object>(message->script(), + isolate)); + i::Handle<i::Object> resource_name(i::Script::cast(script->value())->name(), + isolate); return scope.Close(Utils::ToLocal(resource_name)); } @@ -1813,13 +2018,14 @@ v8::Handle<Value> Message::GetScriptData() const { return Local<Value>(); } ENTER_V8(isolate); - HandleScope scope; + HandleScope scope(reinterpret_cast<Isolate*>(isolate)); i::Handle<i::JSMessageObject> message = i::Handle<i::JSMessageObject>::cast(Utils::OpenHandle(this)); // Return this.script.data. i::Handle<i::JSValue> script = - i::Handle<i::JSValue>::cast(i::Handle<i::Object>(message->script())); - i::Handle<i::Object> data(i::Script::cast(script->value())->data()); + i::Handle<i::JSValue>::cast(i::Handle<i::Object>(message->script(), + isolate)); + i::Handle<i::Object> data(i::Script::cast(script->value())->data(), isolate); return scope.Close(Utils::ToLocal(data)); } @@ -1830,10 +2036,10 @@ v8::Handle<v8::StackTrace> Message::GetStackTrace() const { return Local<v8::StackTrace>(); } ENTER_V8(isolate); - HandleScope scope; + HandleScope scope(reinterpret_cast<Isolate*>(isolate)); i::Handle<i::JSMessageObject> message = i::Handle<i::JSMessageObject>::cast(Utils::OpenHandle(this)); - i::Handle<i::Object> stackFramesObj(message->stack_frames()); + i::Handle<i::Object> stackFramesObj(message->stack_frames(), isolate); if (!stackFramesObj->IsJSArray()) return v8::Handle<v8::StackTrace>(); i::Handle<i::JSArray> stackTrace = i::Handle<i::JSArray>::cast(stackFramesObj); @@ -1847,7 +2053,8 @@ static i::Handle<i::Object> CallV8HeapFunction(const char* name, i::Handle<i::Object> argv[], bool* has_pending_exception) { i::Isolate* isolate = i::Isolate::Current(); - i::Handle<i::String> fmt_str = isolate->factory()->LookupAsciiSymbol(name); + i::Handle<i::String> fmt_str = + isolate->factory()->InternalizeUtf8String(name); i::Object* object_fun = isolate->js_builtins_object()->GetPropertyNoExceptionThrown(*fmt_str); i::Handle<i::JSFunction> fun = @@ -1949,7 +2156,7 @@ Local<String> Message::GetSourceLine() const { i::Isolate* isolate = Utils::OpenHandle(this)->GetIsolate(); ON_BAILOUT(isolate, "v8::Message::GetSourceLine()", return Local<String>()); ENTER_V8(isolate); - HandleScope scope; + HandleScope scope(reinterpret_cast<Isolate*>(isolate)); EXCEPTION_PREAMBLE(isolate); i::Handle<i::Object> result = CallV8HeapFunction("GetSourceLine", Utils::OpenHandle(this), @@ -1979,7 +2186,7 @@ Local<StackFrame> StackTrace::GetFrame(uint32_t index) const { return Local<StackFrame>(); } ENTER_V8(isolate); - HandleScope scope; + HandleScope scope(reinterpret_cast<Isolate*>(isolate)); i::Handle<i::JSArray> self = Utils::OpenHandle(this); i::Object* raw_object = self->GetElementNoExceptionThrown(index); i::Handle<i::JSObject> obj(i::JSObject::cast(raw_object)); @@ -2056,7 +2263,7 @@ Local<String> StackFrame::GetScriptName() const { return Local<String>(); } ENTER_V8(isolate); - HandleScope scope; + HandleScope scope(reinterpret_cast<Isolate*>(isolate)); i::Handle<i::JSObject> self = Utils::OpenHandle(this); i::Handle<i::Object> name = GetProperty(self, "scriptName"); if (!name->IsString()) { @@ -2072,7 +2279,7 @@ Local<String> StackFrame::GetScriptNameOrSourceURL() const { return Local<String>(); } ENTER_V8(isolate); - HandleScope scope; + HandleScope scope(reinterpret_cast<Isolate*>(isolate)); i::Handle<i::JSObject> self = Utils::OpenHandle(this); i::Handle<i::Object> name = GetProperty(self, "scriptNameOrSourceURL"); if (!name->IsString()) { @@ -2088,7 +2295,7 @@ Local<String> StackFrame::GetFunctionName() const { return Local<String>(); } ENTER_V8(isolate); - HandleScope scope; + HandleScope scope(reinterpret_cast<Isolate*>(isolate)); i::Handle<i::JSObject> self = Utils::OpenHandle(this); i::Handle<i::Object> name = GetProperty(self, "functionName"); if (!name->IsString()) { @@ -2198,7 +2405,7 @@ bool Value::IsExternal() const { if (IsDeadCheck(i::Isolate::Current(), "v8::Value::IsExternal()")) { return false; } - return Utils::OpenHandle(this)->IsForeign(); + return Utils::OpenHandle(this)->IsExternal(); } @@ -2240,7 +2447,7 @@ bool Value::IsDate() const { i::Isolate* isolate = i::Isolate::Current(); if (IsDeadCheck(isolate, "v8::Value::IsDate()")) return false; i::Handle<i::Object> obj = Utils::OpenHandle(this); - return obj->HasSpecificClassOf(isolate->heap()->Date_symbol()); + return obj->HasSpecificClassOf(isolate->heap()->Date_string()); } @@ -2248,7 +2455,7 @@ bool Value::IsStringObject() const { i::Isolate* isolate = i::Isolate::Current(); if (IsDeadCheck(isolate, "v8::Value::IsStringObject()")) return false; i::Handle<i::Object> obj = Utils::OpenHandle(this); - return obj->HasSpecificClassOf(isolate->heap()->String_symbol()); + return obj->HasSpecificClassOf(isolate->heap()->String_string()); } @@ -2256,23 +2463,27 @@ bool Value::IsNumberObject() const { i::Isolate* isolate = i::Isolate::Current(); if (IsDeadCheck(isolate, "v8::Value::IsNumberObject()")) return false; i::Handle<i::Object> obj = Utils::OpenHandle(this); - return obj->HasSpecificClassOf(isolate->heap()->Number_symbol()); + return obj->HasSpecificClassOf(isolate->heap()->Number_string()); } static i::Object* LookupBuiltin(i::Isolate* isolate, const char* builtin_name) { - i::Handle<i::String> symbol = - isolate->factory()->LookupAsciiSymbol(builtin_name); + i::Handle<i::String> string = + isolate->factory()->InternalizeUtf8String(builtin_name); i::Handle<i::JSBuiltinsObject> builtins = isolate->js_builtins_object(); - return builtins->GetPropertyNoExceptionThrown(*symbol); + return builtins->GetPropertyNoExceptionThrown(*string); } static bool CheckConstructor(i::Isolate* isolate, i::Handle<i::JSObject> obj, const char* class_name) { - return obj->map()->constructor() == LookupBuiltin(isolate, class_name); + i::Object* constr = obj->map()->constructor(); + if (!constr->IsJSFunction()) return false; + i::JSFunction* func = i::JSFunction::cast(constr); + return func->shared()->native() && + constr == LookupBuiltin(isolate, class_name); } @@ -2299,7 +2510,7 @@ bool Value::IsBooleanObject() const { i::Isolate* isolate = i::Isolate::Current(); if (IsDeadCheck(isolate, "v8::Value::IsBooleanObject()")) return false; i::Handle<i::Object> obj = Utils::OpenHandle(this); - return obj->HasSpecificClassOf(isolate->heap()->Boolean_symbol()); + return obj->HasSpecificClassOf(isolate->heap()->Boolean_string()); } @@ -2381,7 +2592,8 @@ Local<Boolean> Value::ToBoolean() const { } LOG_API(isolate, "ToBoolean"); ENTER_V8(isolate); - i::Handle<i::Object> val = i::Execution::ToBoolean(obj); + i::Handle<i::Object> val = + isolate->factory()->ToBoolean(obj->BooleanValue()); return Local<Boolean>(ToApi<Boolean>(val)); } } @@ -2427,8 +2639,7 @@ Local<Integer> Value::ToInteger() const { void External::CheckCast(v8::Value* that) { if (IsDeadCheck(i::Isolate::Current(), "v8::External::Cast()")) return; - i::Handle<i::Object> obj = Utils::OpenHandle(that); - ApiCheck(obj->IsForeign(), + ApiCheck(Utils::OpenHandle(that)->IsExternal(), "v8::External::Cast()", "Could not convert to external"); } @@ -2492,7 +2703,7 @@ void v8::Date::CheckCast(v8::Value* that) { i::Isolate* isolate = i::Isolate::Current(); if (IsDeadCheck(isolate, "v8::Date::Cast()")) return; i::Handle<i::Object> obj = Utils::OpenHandle(that); - ApiCheck(obj->HasSpecificClassOf(isolate->heap()->Date_symbol()), + ApiCheck(obj->HasSpecificClassOf(isolate->heap()->Date_string()), "v8::Date::Cast()", "Could not convert to date"); } @@ -2502,7 +2713,7 @@ void v8::StringObject::CheckCast(v8::Value* that) { i::Isolate* isolate = i::Isolate::Current(); if (IsDeadCheck(isolate, "v8::StringObject::Cast()")) return; i::Handle<i::Object> obj = Utils::OpenHandle(that); - ApiCheck(obj->HasSpecificClassOf(isolate->heap()->String_symbol()), + ApiCheck(obj->HasSpecificClassOf(isolate->heap()->String_string()), "v8::StringObject::Cast()", "Could not convert to StringObject"); } @@ -2512,7 +2723,7 @@ void v8::NumberObject::CheckCast(v8::Value* that) { i::Isolate* isolate = i::Isolate::Current(); if (IsDeadCheck(isolate, "v8::NumberObject::Cast()")) return; i::Handle<i::Object> obj = Utils::OpenHandle(that); - ApiCheck(obj->HasSpecificClassOf(isolate->heap()->Number_symbol()), + ApiCheck(obj->HasSpecificClassOf(isolate->heap()->Number_string()), "v8::NumberObject::Cast()", "Could not convert to NumberObject"); } @@ -2522,7 +2733,7 @@ void v8::BooleanObject::CheckCast(v8::Value* that) { i::Isolate* isolate = i::Isolate::Current(); if (IsDeadCheck(isolate, "v8::BooleanObject::Cast()")) return; i::Handle<i::Object> obj = Utils::OpenHandle(that); - ApiCheck(obj->HasSpecificClassOf(isolate->heap()->Boolean_symbol()), + ApiCheck(obj->HasSpecificClassOf(isolate->heap()->Boolean_string()), "v8::BooleanObject::Cast()", "Could not convert to BooleanObject"); } @@ -2538,17 +2749,7 @@ void v8::RegExp::CheckCast(v8::Value* that) { bool Value::BooleanValue() const { - i::Handle<i::Object> obj = Utils::OpenHandle(this); - if (obj->IsBoolean()) { - return obj->IsTrue(); - } else { - i::Isolate* isolate = i::Isolate::Current(); - if (IsDeadCheck(isolate, "v8::Value::BooleanValue()")) return false; - LOG_API(isolate, "BooleanValue"); - ENTER_V8(isolate); - i::Handle<i::Object> value = i::Execution::ToBoolean(obj); - return value->IsTrue(); - } + return Utils::OpenHandle(this)->BooleanValue(); } @@ -2649,7 +2850,7 @@ Local<Uint32> Value::ToArrayIndex() const { if (str->AsArrayIndex(&index)) { i::Handle<i::Object> value; if (index <= static_cast<uint32_t>(i::Smi::kMaxValue)) { - value = i::Handle<i::Object>(i::Smi::FromInt(index)); + value = i::Handle<i::Object>(i::Smi::FromInt(index), isolate); } else { value = isolate->factory()->NewNumber(index); } @@ -2773,6 +2974,7 @@ bool v8::Object::Set(v8::Handle<Value> key, v8::Handle<Value> value, i::Handle<i::Object> value_obj = Utils::OpenHandle(*value); EXCEPTION_PREAMBLE(isolate); i::Handle<i::Object> obj = i::SetProperty( + isolate, self, key_obj, value_obj, @@ -2839,7 +3041,7 @@ bool v8::Object::ForceDelete(v8::Handle<Value> key) { // value with DontDelete properties. We have to deoptimize all contexts // because of possible cross-context inlined functions. if (self->IsJSGlobalProxy() || self->IsGlobalObject()) { - i::Deoptimizer::DeoptimizeAll(); + i::Deoptimizer::DeoptimizeAll(isolate); } EXCEPTION_PREAMBLE(isolate); @@ -2857,7 +3059,7 @@ Local<Value> v8::Object::Get(v8::Handle<Value> key) { i::Handle<i::Object> self = Utils::OpenHandle(this); i::Handle<i::Object> key_obj = Utils::OpenHandle(*key); EXCEPTION_PREAMBLE(isolate); - i::Handle<i::Object> result = i::GetProperty(self, key_obj); + i::Handle<i::Object> result = i::GetProperty(isolate, self, key_obj); has_pending_exception = result.is_null(); EXCEPTION_BAILOUT_CHECK(isolate, Local<Value>()); return Utils::ToLocal(result); @@ -2903,7 +3105,7 @@ Local<Value> v8::Object::GetPrototype() { return Local<v8::Value>()); ENTER_V8(isolate); i::Handle<i::Object> self = Utils::OpenHandle(this); - i::Handle<i::Object> result(self->GetPrototype()); + i::Handle<i::Object> result(self->GetPrototype(isolate), isolate); return Utils::ToLocal(result); } @@ -2992,7 +3194,7 @@ Local<String> v8::Object::ObjectProtoToString() { ENTER_V8(isolate); i::Handle<i::JSObject> self = Utils::OpenHandle(this); - i::Handle<i::Object> name(self->class_name()); + i::Handle<i::Object> name(self->class_name(), isolate); // Native implementation of Object.prototype.toString (v8natives.js): // var c = %ClassOf(this); @@ -3004,7 +3206,7 @@ Local<String> v8::Object::ObjectProtoToString() { } else { i::Handle<i::String> class_name = i::Handle<i::String>::cast(name); - if (class_name->IsEqualTo(i::CStrVector("Arguments"))) { + if (class_name->IsOneByteEqualTo(STATIC_ASCII_VECTOR("Arguments"))) { return v8::String::New("[object Object]"); } else { @@ -3045,7 +3247,7 @@ Local<Value> v8::Object::GetConstructor() { return Local<v8::Function>()); ENTER_V8(isolate); i::Handle<i::JSObject> self = Utils::OpenHandle(this); - i::Handle<i::Object> constructor(self->GetConstructor()); + i::Handle<i::Object> constructor(self->GetConstructor(), isolate); return Utils::ToLocal(constructor); } @@ -3087,7 +3289,7 @@ bool v8::Object::Delete(uint32_t index) { ON_BAILOUT(isolate, "v8::Object::DeleteProperty()", return false); ENTER_V8(isolate); - HandleScope scope; + HandleScope scope(reinterpret_cast<Isolate*>(isolate)); i::Handle<i::JSObject> self = Utils::OpenHandle(this); return i::JSObject::DeleteElement(self, index)->IsTrue(); } @@ -3101,6 +3303,16 @@ bool v8::Object::Has(uint32_t index) { } +static inline bool SetAccessor(Object* obj, i::Handle<i::AccessorInfo> info) { + if (info.is_null()) return false; + bool fast = Utils::OpenHandle(obj)->HasFastProperties(); + i::Handle<i::Object> result = i::SetAccessor(Utils::OpenHandle(obj), info); + if (result.is_null() || result->IsUndefined()) return false; + if (fast) i::JSObject::TransformToFastProperties(Utils::OpenHandle(obj), 0); + return true; +} + + bool Object::SetAccessor(Handle<String> name, AccessorGetter getter, AccessorSetter setter, @@ -3115,11 +3327,22 @@ bool Object::SetAccessor(Handle<String> name, i::Handle<i::AccessorInfo> info = MakeAccessorInfo(name, getter, setter, data, settings, attributes, signature); - bool fast = Utils::OpenHandle(this)->HasFastProperties(); - i::Handle<i::Object> result = i::SetAccessor(Utils::OpenHandle(this), info); - if (result.is_null() || result->IsUndefined()) return false; - if (fast) i::JSObject::TransformToFastProperties(Utils::OpenHandle(this), 0); - return true; + return v8::SetAccessor(this, info); +} + + +bool Object::SetAccessor(Handle<String> name, + Handle<DeclaredAccessorDescriptor> descriptor, + AccessControl settings, + PropertyAttribute attributes) { + i::Isolate* isolate = Utils::OpenHandle(this)->GetIsolate(); + ON_BAILOUT(isolate, "v8::Object::SetAccessor()", return false); + ENTER_V8(isolate); + i::HandleScope scope(isolate); + v8::Handle<AccessorSignature> signature; + i::Handle<i::AccessorInfo> info = MakeAccessorInfo( + name, descriptor, settings, attributes, signature); + return v8::SetAccessor(this, info); } @@ -3311,10 +3534,11 @@ bool v8::Object::SetHiddenValue(v8::Handle<v8::String> key, i::HandleScope scope(isolate); i::Handle<i::JSObject> self = Utils::OpenHandle(this); i::Handle<i::String> key_obj = Utils::OpenHandle(*key); - i::Handle<i::String> key_symbol = FACTORY->LookupSymbol(key_obj); + i::Handle<i::String> key_string = + isolate->factory()->InternalizeString(key_obj); i::Handle<i::Object> value_obj = Utils::OpenHandle(*value); i::Handle<i::Object> result = - i::JSObject::SetHiddenProperty(self, key_symbol, value_obj); + i::JSObject::SetHiddenProperty(self, key_string, value_obj); return *result == *self; } @@ -3326,8 +3550,8 @@ v8::Local<v8::Value> v8::Object::GetHiddenValue(v8::Handle<v8::String> key) { ENTER_V8(isolate); i::Handle<i::JSObject> self = Utils::OpenHandle(this); i::Handle<i::String> key_obj = Utils::OpenHandle(*key); - i::Handle<i::String> key_symbol = FACTORY->LookupSymbol(key_obj); - i::Handle<i::Object> result(self->GetHiddenProperty(*key_symbol)); + i::Handle<i::String> key_string = FACTORY->InternalizeString(key_obj); + i::Handle<i::Object> result(self->GetHiddenProperty(*key_string), isolate); if (result->IsUndefined()) return v8::Local<v8::Value>(); return Utils::ToLocal(result); } @@ -3340,8 +3564,8 @@ bool v8::Object::DeleteHiddenValue(v8::Handle<v8::String> key) { i::HandleScope scope(isolate); i::Handle<i::JSObject> self = Utils::OpenHandle(this); i::Handle<i::String> key_obj = Utils::OpenHandle(*key); - i::Handle<i::String> key_symbol = FACTORY->LookupSymbol(key_obj); - self->DeleteHiddenProperty(*key_symbol); + i::Handle<i::String> key_string = FACTORY->InternalizeString(key_obj); + self->DeleteHiddenProperty(*key_string); return true; } @@ -3564,6 +3788,8 @@ Local<v8::Value> Object::CallAsFunction(v8::Handle<v8::Object> recv, return Local<v8::Value>()); LOG_API(isolate, "Object::CallAsFunction"); ENTER_V8(isolate); + i::Logger::TimerEventScope timer_scope( + isolate, i::Logger::TimerEventScope::v8_execute); i::HandleScope scope(isolate); i::Handle<i::JSObject> obj = Utils::OpenHandle(this); i::Handle<i::Object> recv_obj = Utils::OpenHandle(*recv); @@ -3595,6 +3821,8 @@ Local<v8::Value> Object::CallAsConstructor(int argc, return Local<v8::Object>()); LOG_API(isolate, "Object::CallAsConstructor"); ENTER_V8(isolate); + i::Logger::TimerEventScope timer_scope( + isolate, i::Logger::TimerEventScope::v8_execute); i::HandleScope scope(isolate); i::Handle<i::JSObject> obj = Utils::OpenHandle(this); STATIC_ASSERT(sizeof(v8::Handle<v8::Value>) == sizeof(i::Object**)); @@ -3637,7 +3865,9 @@ Local<v8::Object> Function::NewInstance(int argc, return Local<v8::Object>()); LOG_API(isolate, "Function::NewInstance"); ENTER_V8(isolate); - HandleScope scope; + i::Logger::TimerEventScope timer_scope( + isolate, i::Logger::TimerEventScope::v8_execute); + HandleScope scope(reinterpret_cast<Isolate*>(isolate)); i::Handle<i::JSFunction> function = Utils::OpenHandle(this); STATIC_ASSERT(sizeof(v8::Handle<v8::Value>) == sizeof(i::Object**)); i::Handle<i::Object>* args = reinterpret_cast<i::Handle<i::Object>*>(argv); @@ -3655,6 +3885,8 @@ Local<v8::Value> Function::Call(v8::Handle<v8::Object> recv, int argc, ON_BAILOUT(isolate, "v8::Function::Call()", return Local<v8::Value>()); LOG_API(isolate, "Function::Call"); ENTER_V8(isolate); + i::Logger::TimerEventScope timer_scope( + isolate, i::Logger::TimerEventScope::v8_execute); i::Object* raw_result = NULL; { i::HandleScope scope(isolate); @@ -3668,7 +3900,7 @@ Local<v8::Value> Function::Call(v8::Handle<v8::Object> recv, int argc, EXCEPTION_BAILOUT_CHECK_DO_CALLBACK(isolate, Local<Object>()); raw_result = *returned; } - i::Handle<i::Object> result(raw_result); + i::Handle<i::Object> result(raw_result, isolate); return Utils::ToLocal(result); } @@ -3684,13 +3916,15 @@ void Function::SetName(v8::Handle<v8::String> name) { Handle<Value> Function::GetName() const { i::Handle<i::JSFunction> func = Utils::OpenHandle(this); - return Utils::ToLocal(i::Handle<i::Object>(func->shared()->name())); + return Utils::ToLocal(i::Handle<i::Object>(func->shared()->name(), + func->GetIsolate())); } Handle<Value> Function::GetInferredName() const { i::Handle<i::JSFunction> func = Utils::OpenHandle(this); - return Utils::ToLocal(i::Handle<i::Object>(func->shared()->inferred_name())); + return Utils::ToLocal(i::Handle<i::Object>(func->shared()->inferred_name(), + func->GetIsolate())); } @@ -3698,8 +3932,9 @@ ScriptOrigin Function::GetScriptOrigin() const { i::Handle<i::JSFunction> func = Utils::OpenHandle(this); if (func->shared()->script()->IsScript()) { i::Handle<i::Script> script(i::Script::cast(func->shared()->script())); + i::Handle<i::Object> scriptName = GetScriptNameOrSourceURL(script); v8::ScriptOrigin origin( - Utils::ToLocal(i::Handle<i::Object>(script->name())), + Utils::ToLocal(scriptName), v8::Integer::New(script->line_offset()->value()), v8::Integer::New(script->column_offset()->value())); return origin; @@ -3735,7 +3970,7 @@ Handle<Value> Function::GetScriptId() const { if (!func->shared()->script()->IsScript()) return v8::Undefined(); i::Handle<i::Script> script(i::Script::cast(func->shared()->script())); - return Utils::ToLocal(i::Handle<i::Object>(script->id())); + return Utils::ToLocal(i::Handle<i::Object>(script->id(), func->GetIsolate())); } int String::Length() const { @@ -3744,108 +3979,399 @@ int String::Length() const { return str->length(); } +bool String::MayContainNonAscii() const { + i::Handle<i::String> str = Utils::OpenHandle(this); + if (IsDeadCheck(str->GetIsolate(), "v8::String::MayContainNonAscii()")) { + return false; + } + return !str->HasOnlyAsciiChars(); +} + -int String::Utf8Length() const { +bool String::IsOneByte() const { i::Handle<i::String> str = Utils::OpenHandle(this); - if (IsDeadCheck(str->GetIsolate(), "v8::String::Utf8Length()")) return 0; - return i::Utf8Length(str); -} - - -// Will fail with a negative answer if the recursion depth is too high. -static int RecursivelySerializeToUtf8(i::String* string, - char* buffer, - int start, - int end, - int recursion_budget, - int32_t previous_character, - int32_t* last_character) { - int utf8_bytes = 0; - while (true) { - if (string->IsAsciiRepresentation()) { - i::String::WriteToFlat(string, buffer, start, end); - *last_character = unibrow::Utf16::kNoPreviousCharacter; - return utf8_bytes + end - start; + if (IsDeadCheck(str->GetIsolate(), "v8::String::IsOneByte()")) { + return false; + } + return str->IsOneByteConvertible(); +} + + +class Utf8LengthHelper : public i::AllStatic { + public: + enum State { + kEndsWithLeadingSurrogate = 1 << 0, + kStartsWithTrailingSurrogate = 1 << 1, + kLeftmostEdgeIsCalculated = 1 << 2, + kRightmostEdgeIsCalculated = 1 << 3, + kLeftmostEdgeIsSurrogate = 1 << 4, + kRightmostEdgeIsSurrogate = 1 << 5 + }; + + static const uint8_t kInitialState = 0; + + static inline bool EndsWithSurrogate(uint8_t state) { + return state & kEndsWithLeadingSurrogate; + } + + static inline bool StartsWithSurrogate(uint8_t state) { + return state & kStartsWithTrailingSurrogate; + } + + class Visitor { + public: + inline explicit Visitor() + : utf8_length_(0), + state_(kInitialState) {} + + void VisitOneByteString(const uint8_t* chars, int length) { + int utf8_length = 0; + // Add in length 1 for each non-ASCII character. + for (int i = 0; i < length; i++) { + utf8_length += *chars++ >> 7; + } + // Add in length 1 for each character. + utf8_length_ = utf8_length + length; + state_ = kInitialState; } - switch (i::StringShape(string).representation_tag()) { - case i::kExternalStringTag: { - const uint16_t* data = i::ExternalTwoByteString::cast(string)-> - ExternalTwoByteStringGetData(0); - char* current = buffer; - for (int i = start; i < end; i++) { - uint16_t character = data[i]; - current += - unibrow::Utf8::Encode(current, character, previous_character); - previous_character = character; - } - *last_character = previous_character; - return static_cast<int>(utf8_bytes + current - buffer); + + void VisitTwoByteString(const uint16_t* chars, int length) { + int utf8_length = 0; + int last_character = unibrow::Utf16::kNoPreviousCharacter; + for (int i = 0; i < length; i++) { + uint16_t c = chars[i]; + utf8_length += unibrow::Utf8::Length(c, last_character); + last_character = c; } - case i::kSeqStringTag: { - const uint16_t* data = - i::SeqTwoByteString::cast(string)->SeqTwoByteStringGetData(0); - char* current = buffer; - for (int i = start; i < end; i++) { - uint16_t character = data[i]; - current += - unibrow::Utf8::Encode(current, character, previous_character); - previous_character = character; - } - *last_character = previous_character; - return static_cast<int>(utf8_bytes + current - buffer); + utf8_length_ = utf8_length; + uint8_t state = 0; + if (unibrow::Utf16::IsTrailSurrogate(chars[0])) { + state |= kStartsWithTrailingSurrogate; } - case i::kSlicedStringTag: { - i::SlicedString* slice = i::SlicedString::cast(string); - unsigned offset = slice->offset(); - string = slice->parent(); - start += offset; - end += offset; - continue; + if (unibrow::Utf16::IsLeadSurrogate(chars[length-1])) { + state |= kEndsWithLeadingSurrogate; } - case i::kConsStringTag: { - i::ConsString* cons_string = i::ConsString::cast(string); - i::String* first = cons_string->first(); - int boundary = first->length(); - if (start >= boundary) { - // Only need RHS. - string = cons_string->second(); - start -= boundary; - end -= boundary; + state_ = state; + } + + static i::ConsString* VisitFlat(i::String* string, + int* length, + uint8_t* state) { + Visitor visitor; + i::ConsString* cons_string = i::String::VisitFlat(&visitor, string); + *length = visitor.utf8_length_; + *state = visitor.state_; + return cons_string; + } + + private: + int utf8_length_; + uint8_t state_; + DISALLOW_COPY_AND_ASSIGN(Visitor); + }; + + static inline void MergeLeafLeft(int* length, + uint8_t* state, + uint8_t leaf_state) { + bool edge_surrogate = StartsWithSurrogate(leaf_state); + if (!(*state & kLeftmostEdgeIsCalculated)) { + ASSERT(!(*state & kLeftmostEdgeIsSurrogate)); + *state |= kLeftmostEdgeIsCalculated + | (edge_surrogate ? kLeftmostEdgeIsSurrogate : 0); + } else if (EndsWithSurrogate(*state) && edge_surrogate) { + *length -= unibrow::Utf8::kBytesSavedByCombiningSurrogates; + } + if (EndsWithSurrogate(leaf_state)) { + *state |= kEndsWithLeadingSurrogate; + } else { + *state &= ~kEndsWithLeadingSurrogate; + } + } + + static inline void MergeLeafRight(int* length, + uint8_t* state, + uint8_t leaf_state) { + bool edge_surrogate = EndsWithSurrogate(leaf_state); + if (!(*state & kRightmostEdgeIsCalculated)) { + ASSERT(!(*state & kRightmostEdgeIsSurrogate)); + *state |= (kRightmostEdgeIsCalculated + | (edge_surrogate ? kRightmostEdgeIsSurrogate : 0)); + } else if (edge_surrogate && StartsWithSurrogate(*state)) { + *length -= unibrow::Utf8::kBytesSavedByCombiningSurrogates; + } + if (StartsWithSurrogate(leaf_state)) { + *state |= kStartsWithTrailingSurrogate; + } else { + *state &= ~kStartsWithTrailingSurrogate; + } + } + + static inline void MergeTerminal(int* length, + uint8_t state, + uint8_t* state_out) { + ASSERT((state & kLeftmostEdgeIsCalculated) && + (state & kRightmostEdgeIsCalculated)); + if (EndsWithSurrogate(state) && StartsWithSurrogate(state)) { + *length -= unibrow::Utf8::kBytesSavedByCombiningSurrogates; + } + *state_out = kInitialState | + (state & kLeftmostEdgeIsSurrogate ? kStartsWithTrailingSurrogate : 0) | + (state & kRightmostEdgeIsSurrogate ? kEndsWithLeadingSurrogate : 0); + } + + static int Calculate(i::ConsString* current, uint8_t* state_out) { + using namespace internal; + int total_length = 0; + uint8_t state = kInitialState; + while (true) { + i::String* left = current->first(); + i::String* right = current->second(); + uint8_t right_leaf_state; + uint8_t left_leaf_state; + int leaf_length; + ConsString* left_as_cons = + Visitor::VisitFlat(left, &leaf_length, &left_leaf_state); + if (left_as_cons == NULL) { + total_length += leaf_length; + MergeLeafLeft(&total_length, &state, left_leaf_state); + } + ConsString* right_as_cons = + Visitor::VisitFlat(right, &leaf_length, &right_leaf_state); + if (right_as_cons == NULL) { + total_length += leaf_length; + MergeLeafRight(&total_length, &state, right_leaf_state); + if (left_as_cons != NULL) { + // 1 Leaf node. Descend in place. + current = left_as_cons; continue; - } else if (end <= boundary) { - // Only need LHS. - string = first; } else { - if (recursion_budget == 0) return -1; - int extra_utf8_bytes = - RecursivelySerializeToUtf8(first, - buffer, - start, - boundary, - recursion_budget - 1, - previous_character, - &previous_character); - if (extra_utf8_bytes < 0) return extra_utf8_bytes; - buffer += extra_utf8_bytes; - utf8_bytes += extra_utf8_bytes; - string = cons_string->second(); - start = 0; - end -= boundary; + // Terminal node. + MergeTerminal(&total_length, state, state_out); + return total_length; } + } else if (left_as_cons == NULL) { + // 1 Leaf node. Descend in place. + current = right_as_cons; + continue; + } + // Both strings are ConsStrings. + // Recurse on smallest. + if (left->length() < right->length()) { + total_length += Calculate(left_as_cons, &left_leaf_state); + MergeLeafLeft(&total_length, &state, left_leaf_state); + current = right_as_cons; + } else { + total_length += Calculate(right_as_cons, &right_leaf_state); + MergeLeafRight(&total_length, &state, right_leaf_state); + current = left_as_cons; } } + UNREACHABLE(); + return 0; } - UNREACHABLE(); - return 0; + + static inline int Calculate(i::ConsString* current) { + uint8_t state = kInitialState; + return Calculate(current, &state); + } + + private: + DISALLOW_IMPLICIT_CONSTRUCTORS(Utf8LengthHelper); +}; + + +static int Utf8Length(i::String* str, i::Isolate* isolate) { + int length = str->length(); + if (length == 0) return 0; + uint8_t state; + i::ConsString* cons_string = + Utf8LengthHelper::Visitor::VisitFlat(str, &length, &state); + if (cons_string == NULL) return length; + return Utf8LengthHelper::Calculate(cons_string); } -bool String::MayContainNonAscii() const { +int String::Utf8Length() const { i::Handle<i::String> str = Utils::OpenHandle(this); - if (IsDeadCheck(str->GetIsolate(), "v8::String::MayContainNonAscii()")) { - return false; + i::Isolate* isolate = str->GetIsolate(); + if (IsDeadCheck(isolate, "v8::String::Utf8Length()")) return 0; + return v8::Utf8Length(*str, isolate); +} + + +class Utf8WriterVisitor { + public: + Utf8WriterVisitor( + char* buffer, int capacity, bool skip_capacity_check) + : early_termination_(false), + last_character_(unibrow::Utf16::kNoPreviousCharacter), + buffer_(buffer), + start_(buffer), + capacity_(capacity), + skip_capacity_check_(capacity == -1 || skip_capacity_check), + utf16_chars_read_(0) { + } + + static int WriteEndCharacter(uint16_t character, + int last_character, + int remaining, + char* const buffer) { + using namespace unibrow; + ASSERT(remaining > 0); + // We can't use a local buffer here because Encode needs to modify + // previous characters in the stream. We know, however, that + // exactly one character will be advanced. + if (Utf16::IsTrailSurrogate(character) && + Utf16::IsLeadSurrogate(last_character)) { + int written = Utf8::Encode(buffer, character, last_character); + ASSERT(written == 1); + return written; + } + // Use a scratch buffer to check the required characters. + char temp_buffer[Utf8::kMaxEncodedSize]; + // Can't encode using last_character as gcc has array bounds issues. + int written = Utf8::Encode(temp_buffer, + character, + Utf16::kNoPreviousCharacter); + // Won't fit. + if (written > remaining) return 0; + // Copy over the character from temp_buffer. + for (int j = 0; j < written; j++) { + buffer[j] = temp_buffer[j]; + } + return written; + } + + template<typename Char> + void Visit(const Char* chars, const int length) { + using namespace unibrow; + ASSERT(!early_termination_); + if (length == 0) return; + // Copy state to stack. + char* buffer = buffer_; + int last_character = + sizeof(Char) == 1 ? Utf16::kNoPreviousCharacter : last_character_; + int i = 0; + // Do a fast loop where there is no exit capacity check. + while (true) { + int fast_length; + if (skip_capacity_check_) { + fast_length = length; + } else { + int remaining_capacity = capacity_ - static_cast<int>(buffer - start_); + // Need enough space to write everything but one character. + STATIC_ASSERT(Utf16::kMaxExtraUtf8BytesForOneUtf16CodeUnit == 3); + int max_size_per_char = sizeof(Char) == 1 ? 2 : 3; + int writable_length = + (remaining_capacity - max_size_per_char)/max_size_per_char; + // Need to drop into slow loop. + if (writable_length <= 0) break; + fast_length = i + writable_length; + if (fast_length > length) fast_length = length; + } + // Write the characters to the stream. + if (sizeof(Char) == 1) { + for (; i < fast_length; i++) { + buffer += + Utf8::EncodeOneByte(buffer, static_cast<uint8_t>(*chars++)); + ASSERT(capacity_ == -1 || (buffer - start_) <= capacity_); + } + } else { + for (; i < fast_length; i++) { + uint16_t character = *chars++; + buffer += Utf8::Encode(buffer, character, last_character); + last_character = character; + ASSERT(capacity_ == -1 || (buffer - start_) <= capacity_); + } + } + // Array is fully written. Exit. + if (fast_length == length) { + // Write state back out to object. + last_character_ = last_character; + buffer_ = buffer; + utf16_chars_read_ += length; + return; + } + } + ASSERT(!skip_capacity_check_); + // Slow loop. Must check capacity on each iteration. + int remaining_capacity = capacity_ - static_cast<int>(buffer - start_); + ASSERT(remaining_capacity >= 0); + for (; i < length && remaining_capacity > 0; i++) { + uint16_t character = *chars++; + int written = WriteEndCharacter(character, + last_character, + remaining_capacity, + buffer); + if (written == 0) { + early_termination_ = true; + break; + } + buffer += written; + remaining_capacity -= written; + last_character = character; + } + // Write state back out to object. + last_character_ = last_character; + buffer_ = buffer; + utf16_chars_read_ += i; } - return !str->HasOnlyAsciiChars(); + + inline bool IsDone() { + return early_termination_; + } + + inline void VisitOneByteString(const uint8_t* chars, int length) { + Visit(chars, length); + } + + inline void VisitTwoByteString(const uint16_t* chars, int length) { + Visit(chars, length); + } + + int CompleteWrite(bool write_null, int* utf16_chars_read_out) { + // Write out number of utf16 characters written to the stream. + if (utf16_chars_read_out != NULL) { + *utf16_chars_read_out = utf16_chars_read_; + } + // Only null terminate if all of the string was written and there's space. + if (write_null && + !early_termination_ && + (capacity_ == -1 || (buffer_ - start_) < capacity_)) { + *buffer_++ = '\0'; + } + return static_cast<int>(buffer_ - start_); + } + + private: + bool early_termination_; + int last_character_; + char* buffer_; + char* const start_; + int capacity_; + bool const skip_capacity_check_; + int utf16_chars_read_; + DISALLOW_IMPLICIT_CONSTRUCTORS(Utf8WriterVisitor); +}; + + +static bool RecursivelySerializeToUtf8(i::String* current, + Utf8WriterVisitor* writer, + int recursion_budget) { + while (!writer->IsDone()) { + i::ConsString* cons_string = i::String::VisitFlat(writer, current); + if (cons_string == NULL) return true; // Leaf node. + if (recursion_budget <= 0) return false; + // Must write the left branch first. + i::String* first = cons_string->first(); + bool success = RecursivelySerializeToUtf8(first, + writer, + recursion_budget - 1); + if (!success) return false; + // Inline tail recurse for right branch. + current = cons_string->second(); + } + return true; } @@ -3861,123 +4387,41 @@ int String::WriteUtf8(char* buffer, if (options & HINT_MANY_WRITES_EXPECTED) { FlattenString(str); // Flatten the string for efficiency. } - int string_length = str->length(); - if (str->IsAsciiRepresentation()) { - int len; - if (capacity == -1) { - capacity = str->length() + 1; - len = string_length; - } else { - len = i::Min(capacity, str->length()); - } - i::String::WriteToFlat(*str, buffer, 0, len); - if (nchars_ref != NULL) *nchars_ref = len; - if (!(options & NO_NULL_TERMINATION) && capacity > len) { - buffer[len] = '\0'; - return len + 1; - } - return len; - } - + const int string_length = str->length(); + bool write_null = !(options & NO_NULL_TERMINATION); + // First check if we can just write the string without checking capacity. if (capacity == -1 || capacity / 3 >= string_length) { - int32_t previous = unibrow::Utf16::kNoPreviousCharacter; + Utf8WriterVisitor writer(buffer, capacity, true); const int kMaxRecursion = 100; - int utf8_bytes = - RecursivelySerializeToUtf8(*str, - buffer, - 0, - string_length, - kMaxRecursion, - previous, - &previous); - if (utf8_bytes >= 0) { - // Success serializing with recursion. - if ((options & NO_NULL_TERMINATION) == 0 && - (capacity > utf8_bytes || capacity == -1)) { - buffer[utf8_bytes++] = '\0'; - } - if (nchars_ref != NULL) *nchars_ref = string_length; - return utf8_bytes; - } - FlattenString(str); - // Recurse once. This time around the string is flat and the serializing - // with recursion will certainly succeed. - return WriteUtf8(buffer, capacity, nchars_ref, options); + bool success = RecursivelySerializeToUtf8(*str, &writer, kMaxRecursion); + if (success) return writer.CompleteWrite(write_null, nchars_ref); } else if (capacity >= string_length) { - // First check that the buffer is large enough. If it is, then recurse - // once without a capacity limit, which will get into the other branch of - // this 'if'. - int utf8_bytes = i::Utf8Length(str); - if ((options & NO_NULL_TERMINATION) == 0) utf8_bytes++; + // First check that the buffer is large enough. + int utf8_bytes = v8::Utf8Length(*str, str->GetIsolate()); if (utf8_bytes <= capacity) { - return WriteUtf8(buffer, -1, nchars_ref, options); - } - } - - // Slow case. - i::StringInputBuffer& write_input_buffer = *isolate->write_input_buffer(); - isolate->string_tracker()->RecordWrite(str); - - write_input_buffer.Reset(0, *str); - int len = str->length(); - // Encode the first K - 3 bytes directly into the buffer since we - // know there's room for them. If no capacity is given we copy all - // of them here. - int fast_end = capacity - (unibrow::Utf8::kMaxEncodedSize - 1); - int i; - int pos = 0; - int nchars = 0; - int previous = unibrow::Utf16::kNoPreviousCharacter; - for (i = 0; i < len && (capacity == -1 || pos < fast_end); i++) { - i::uc32 c = write_input_buffer.GetNext(); - int written = unibrow::Utf8::Encode(buffer + pos, c, previous); - pos += written; - nchars++; - previous = c; - } - if (i < len) { - // For the last characters we need to check the length for each one - // because they may be longer than the remaining space in the - // buffer. - char intermediate[unibrow::Utf8::kMaxEncodedSize]; - for (; i < len && pos < capacity; i++) { - i::uc32 c = write_input_buffer.GetNext(); - if (unibrow::Utf16::IsTrailSurrogate(c) && - unibrow::Utf16::IsLeadSurrogate(previous)) { - // We can't use the intermediate buffer here because the encoding - // of surrogate pairs is done under assumption that you can step - // back and fix the UTF8 stream. Luckily we only need space for one - // more byte, so there is always space. - ASSERT(pos < capacity); - int written = unibrow::Utf8::Encode(buffer + pos, c, previous); - ASSERT(written == 1); - pos += written; - nchars++; - } else { - int written = - unibrow::Utf8::Encode(intermediate, - c, - unibrow::Utf16::kNoPreviousCharacter); - if (pos + written <= capacity) { - for (int j = 0; j < written; j++) { - buffer[pos + j] = intermediate[j]; - } - pos += written; - nchars++; - } else { - // We've reached the end of the buffer - break; + // ASCII fast path. + if (utf8_bytes == string_length) { + WriteOneByte(reinterpret_cast<uint8_t*>(buffer), 0, capacity, options); + if (nchars_ref != NULL) *nchars_ref = string_length; + if (write_null && (utf8_bytes+1 <= capacity)) { + return string_length + 1; } + return string_length; + } + if (write_null && (utf8_bytes+1 > capacity)) { + options |= NO_NULL_TERMINATION; } - previous = c; + // Recurse once without a capacity limit. + // This will get into the first branch above. + // TODO(dcarney) Check max left rec. in Utf8Length and fall through. + return WriteUtf8(buffer, -1, nchars_ref, options); } } - if (nchars_ref != NULL) *nchars_ref = nchars; - if (!(options & NO_NULL_TERMINATION) && - (i == len && (capacity == -1 || pos < capacity))) { - buffer[pos++] = '\0'; - } - return pos; + // Recursive slow path can potentially be unreasonable slow. Flatten. + str = FlattenGetString(str); + Utf8WriterVisitor writer(buffer, capacity, false); + i::String::VisitFlat(&writer, *str); + return writer.CompleteWrite(write_null, nchars_ref); } @@ -3996,11 +4440,14 @@ int String::WriteAscii(char* buffer, FlattenString(str); // Flatten the string for efficiency. } - if (str->IsAsciiRepresentation()) { - // WriteToFlat is faster than using the StringInputBuffer. + if (str->HasOnlyAsciiChars()) { + // WriteToFlat is faster than using the StringCharacterStream. if (length == -1) length = str->length() + 1; int len = i::Min(length, str->length() - start); - i::String::WriteToFlat(*str, buffer, start, start + len); + i::String::WriteToFlat(*str, + reinterpret_cast<uint8_t*>(buffer), + start, + start + len); if (!(options & PRESERVE_ASCII_NULL)) { for (int i = 0; i < len; i++) { if (buffer[i] == '\0') buffer[i] = ' '; @@ -4012,16 +4459,15 @@ int String::WriteAscii(char* buffer, return len; } - i::StringInputBuffer& write_input_buffer = *isolate->write_input_buffer(); int end = length; if ((length == -1) || (length > str->length() - start)) { end = str->length() - start; } if (end < 0) return 0; - write_input_buffer.Reset(start, *str); + i::StringCharacterStream write_stream(*str, isolate->write_iterator(), start); int i; for (i = 0; i < end; i++) { - char c = static_cast<char>(write_input_buffer.GetNext()); + char c = static_cast<char>(write_stream.GetNext()); if (c == '\0' && !(options & PRESERVE_ASCII_NULL)) c = ' '; buffer[i] = c; } @@ -4032,20 +4478,22 @@ int String::WriteAscii(char* buffer, } -int String::Write(uint16_t* buffer, - int start, - int length, - int options) const { - i::Isolate* isolate = Utils::OpenHandle(this)->GetIsolate(); +template<typename CharType> +static inline int WriteHelper(const String* string, + CharType* buffer, + int start, + int length, + int options) { + i::Isolate* isolate = Utils::OpenHandle(string)->GetIsolate(); if (IsDeadCheck(isolate, "v8::String::Write()")) return 0; LOG_API(isolate, "String::Write"); ENTER_V8(isolate); ASSERT(start >= 0 && length >= -1); - i::Handle<i::String> str = Utils::OpenHandle(this); + i::Handle<i::String> str = Utils::OpenHandle(string); isolate->string_tracker()->RecordWrite(str); - if (options & HINT_MANY_WRITES_EXPECTED) { + if (options & String::HINT_MANY_WRITES_EXPECTED) { // Flatten the string for efficiency. This applies whether we are - // using StringInputBuffer or Get(i) to access the characters. + // using StringCharacterStream or Get(i) to access the characters. FlattenString(str); } int end = start + length; @@ -4053,7 +4501,7 @@ int String::Write(uint16_t* buffer, end = str->length(); if (end < 0) return 0; i::String::WriteToFlat(*str, buffer, start, end); - if (!(options & NO_NULL_TERMINATION) && + if (!(options & String::NO_NULL_TERMINATION) && (length == -1 || end - start < length)) { buffer[end - start] = '\0'; } @@ -4061,6 +4509,22 @@ int String::Write(uint16_t* buffer, } +int String::WriteOneByte(uint8_t* buffer, + int start, + int length, + int options) const { + return WriteHelper(this, buffer, start, length, options); +} + + +int String::Write(uint16_t* buffer, + int start, + int length, + int options) const { + return WriteHelper(this, buffer, start, length, options); +} + + bool v8::String::IsExternal() const { i::Handle<i::String> str = Utils::OpenHandle(this); if (IsDeadCheck(str->GetIsolate(), "v8::String::IsExternal()")) { @@ -4111,7 +4575,7 @@ void v8::String::VerifyExternalStringResourceBase( expectedEncoding = TWO_BYTE_ENCODING; } else { expected = NULL; - expectedEncoding = str->IsAsciiRepresentation() ? ASCII_ENCODING + expectedEncoding = str->IsOneByteRepresentation() ? ASCII_ENCODING : TWO_BYTE_ENCODING; } CHECK_EQ(expected, value); @@ -4191,75 +4655,57 @@ int v8::Object::InternalFieldCount() { } -Local<Value> v8::Object::CheckedGetInternalField(int index) { +static bool InternalFieldOK(i::Handle<i::JSObject> obj, + int index, + const char* location) { + return !IsDeadCheck(obj->GetIsolate(), location) && + ApiCheck(index < obj->GetInternalFieldCount(), + location, + "Internal field out of bounds"); +} + + +Local<Value> v8::Object::SlowGetInternalField(int index) { i::Handle<i::JSObject> obj = Utils::OpenHandle(this); - if (IsDeadCheck(obj->GetIsolate(), "v8::Object::GetInternalField()")) { - return Local<Value>(); - } - if (!ApiCheck(index < obj->GetInternalFieldCount(), - "v8::Object::GetInternalField()", - "Reading internal field out of bounds")) { - return Local<Value>(); - } - i::Handle<i::Object> value(obj->GetInternalField(index)); - Local<Value> result = Utils::ToLocal(value); -#ifdef DEBUG - Local<Value> unchecked = UncheckedGetInternalField(index); - ASSERT(unchecked.IsEmpty() || (unchecked == result)); -#endif - return result; + const char* location = "v8::Object::GetInternalField()"; + if (!InternalFieldOK(obj, index, location)) return Local<Value>(); + i::Handle<i::Object> value(obj->GetInternalField(index), obj->GetIsolate()); + return Utils::ToLocal(value); } void v8::Object::SetInternalField(int index, v8::Handle<Value> value) { i::Handle<i::JSObject> obj = Utils::OpenHandle(this); - i::Isolate* isolate = obj->GetIsolate(); - if (IsDeadCheck(isolate, "v8::Object::SetInternalField()")) { - return; - } - if (!ApiCheck(index < obj->GetInternalFieldCount(), - "v8::Object::SetInternalField()", - "Writing internal field out of bounds")) { - return; - } - ENTER_V8(isolate); + const char* location = "v8::Object::SetInternalField()"; + if (!InternalFieldOK(obj, index, location)) return; i::Handle<i::Object> val = Utils::OpenHandle(*value); obj->SetInternalField(index, *val); + ASSERT_EQ(value, GetInternalField(index)); } -static bool CanBeEncodedAsSmi(void* ptr) { - const uintptr_t address = reinterpret_cast<uintptr_t>(ptr); - return ((address & i::kEncodablePointerMask) == 0); +void* v8::Object::SlowGetAlignedPointerFromInternalField(int index) { + i::Handle<i::JSObject> obj = Utils::OpenHandle(this); + const char* location = "v8::Object::GetAlignedPointerFromInternalField()"; + if (!InternalFieldOK(obj, index, location)) return NULL; + return DecodeSmiToAligned(obj->GetInternalField(index), location); } -static i::Smi* EncodeAsSmi(void* ptr) { - ASSERT(CanBeEncodedAsSmi(ptr)); - const uintptr_t address = reinterpret_cast<uintptr_t>(ptr); - i::Smi* result = reinterpret_cast<i::Smi*>(address << i::kPointerToSmiShift); - ASSERT(i::Internals::HasSmiTag(result)); - ASSERT_EQ(result, i::Smi::FromInt(result->value())); - ASSERT_EQ(ptr, i::Internals::GetExternalPointerFromSmi(result)); - return result; +void v8::Object::SetAlignedPointerInInternalField(int index, void* value) { + i::Handle<i::JSObject> obj = Utils::OpenHandle(this); + const char* location = "v8::Object::SetAlignedPointerInInternalField()"; + if (!InternalFieldOK(obj, index, location)) return; + obj->SetInternalField(index, EncodeAlignedAsSmi(value, location)); + ASSERT_EQ(value, GetAlignedPointerFromInternalField(index)); } -void v8::Object::SetPointerInInternalField(int index, void* value) { - i::Isolate* isolate = Utils::OpenHandle(this)->GetIsolate(); - ENTER_V8(isolate); - if (CanBeEncodedAsSmi(value)) { - Utils::OpenHandle(this)->SetInternalField(index, EncodeAsSmi(value)); - } else { - HandleScope scope; - i::Handle<i::Foreign> foreign = - isolate->factory()->NewForeign( - reinterpret_cast<i::Address>(value), i::TENURED); - if (!foreign.is_null()) { - Utils::OpenHandle(this)->SetInternalField(index, *foreign); - } - } - ASSERT_EQ(value, GetPointerFromInternalField(index)); +static void* ExternalValue(i::Object* obj) { + // Obscure semantics for undefined, but somehow checked in our unit tests... + if (obj->IsUndefined()) return NULL; + i::Object* foreign = i::JSObject::cast(obj)->GetInternalField(0); + return i::Foreign::cast(foreign)->foreign_address(); } @@ -4314,26 +4760,24 @@ bool v8::V8::Dispose() { HeapStatistics::HeapStatistics(): total_heap_size_(0), total_heap_size_executable_(0), + total_physical_size_(0), used_heap_size_(0), heap_size_limit_(0) { } void v8::V8::GetHeapStatistics(HeapStatistics* heap_statistics) { - if (!i::Isolate::Current()->IsInitialized()) { + i::Isolate* isolate = i::Isolate::UncheckedCurrent(); + if (isolate == NULL || !isolate->IsInitialized()) { // Isolate is unitialized thus heap is not configured yet. - heap_statistics->set_total_heap_size(0); - heap_statistics->set_total_heap_size_executable(0); - heap_statistics->set_used_heap_size(0); - heap_statistics->set_heap_size_limit(0); + heap_statistics->total_heap_size_ = 0; + heap_statistics->total_heap_size_executable_ = 0; + heap_statistics->total_physical_size_ = 0; + heap_statistics->used_heap_size_ = 0; + heap_statistics->heap_size_limit_ = 0; return; } - - i::Heap* heap = i::Isolate::Current()->heap(); - heap_statistics->set_total_heap_size(heap->CommittedMemory()); - heap_statistics->set_total_heap_size_executable( - heap->CommittedMemoryExecutable()); - heap_statistics->set_used_heap_size(heap->SizeOfObjects()); - heap_statistics->set_heap_size_limit(heap->MaxReserved()); + Isolate* ext_isolate = reinterpret_cast<Isolate*>(isolate); + return ext_isolate->GetHeapStatistics(heap_statistics); } @@ -4344,30 +4788,47 @@ void v8::V8::VisitExternalResources(ExternalResourceVisitor* visitor) { } +class VisitorAdapter : public i::ObjectVisitor { + public: + explicit VisitorAdapter(PersistentHandleVisitor* visitor) + : visitor_(visitor) {} + virtual void VisitPointers(i::Object** start, i::Object** end) { + UNREACHABLE(); + } + virtual void VisitEmbedderReference(i::Object** p, uint16_t class_id) { + visitor_->VisitPersistentHandle(ToApi<Value>(i::Handle<i::Object>(p)), + class_id); + } + private: + PersistentHandleVisitor* visitor_; +}; + + void v8::V8::VisitHandlesWithClassIds(PersistentHandleVisitor* visitor) { i::Isolate* isolate = i::Isolate::Current(); IsDeadCheck(isolate, "v8::V8::VisitHandlesWithClassId"); i::AssertNoAllocation no_allocation; - class VisitorAdapter : public i::ObjectVisitor { - public: - explicit VisitorAdapter(PersistentHandleVisitor* visitor) - : visitor_(visitor) {} - virtual void VisitPointers(i::Object** start, i::Object** end) { - UNREACHABLE(); - } - virtual void VisitEmbedderReference(i::Object** p, uint16_t class_id) { - visitor_->VisitPersistentHandle(ToApi<Value>(i::Handle<i::Object>(p)), - class_id); - } - private: - PersistentHandleVisitor* visitor_; - } visitor_adapter(visitor); + VisitorAdapter visitor_adapter(visitor); isolate->global_handles()->IterateAllRootsWithClassIds(&visitor_adapter); } +void v8::V8::VisitHandlesForPartialDependence( + Isolate* exported_isolate, PersistentHandleVisitor* visitor) { + i::Isolate* isolate = reinterpret_cast<i::Isolate*>(exported_isolate); + ASSERT(isolate == i::Isolate::Current()); + IsDeadCheck(isolate, "v8::V8::VisitHandlesForPartialDependence"); + + i::AssertNoAllocation no_allocation; + + VisitorAdapter visitor_adapter(visitor); + isolate->global_handles()->IterateAllRootsInNewSpaceWithClassIds( + &visitor_adapter); +} + + bool v8::V8::IdleNotification(int hint) { // Returning true tells the caller that it need not // continue to call IdleNotification. @@ -4457,7 +4918,6 @@ Persistent<Context> v8::Context::New( // Create the environment. env = isolate->bootstrapper()->CreateEnvironment( - isolate, Utils::OpenHandle(*global_object, true), proxy_template, extensions); @@ -4513,7 +4973,7 @@ Handle<Value> v8::Context::GetSecurityToken() { } i::Handle<i::Context> env = Utils::OpenHandle(this); i::Object* security_token = env->security_token(); - i::Handle<i::Object> token_handle(security_token); + i::Handle<i::Object> token_handle(security_token, isolate); return Utils::ToLocal(token_handle); } @@ -4529,6 +4989,12 @@ bool Context::InContext() { } +v8::Isolate* Context::GetIsolate() { + i::Handle<i::Context> env = Utils::OpenHandle(this); + return reinterpret_cast<Isolate*>(env->GetIsolate()); +} + + v8::Local<v8::Context> Context::GetEntered() { i::Isolate* isolate = i::Isolate::Current(); if (!EnsureInitializedForIsolate(isolate, "v8::Context::GetEntered()")) { @@ -4568,13 +5034,14 @@ v8::Local<v8::Context> Context::GetCalling() { v8::Local<v8::Object> Context::Global() { - if (IsDeadCheck(i::Isolate::Current(), "v8::Context::Global()")) { + i::Isolate* isolate = i::Isolate::Current(); + if (IsDeadCheck(isolate, "v8::Context::Global()")) { return Local<v8::Object>(); } i::Object** ctx = reinterpret_cast<i::Object**>(this); i::Handle<i::Context> context = i::Handle<i::Context>::cast(i::Handle<i::Object>(ctx)); - i::Handle<i::Object> global(context->global_proxy()); + i::Handle<i::Object> global(context->global_proxy(), isolate); return Utils::ToLocal(i::Handle<i::JSObject>::cast(global)); } @@ -4642,21 +5109,11 @@ void Context::SetErrorMessageForCodeGenerationFromStrings( i::Object** ctx = reinterpret_cast<i::Object**>(this); i::Handle<i::Context> context = i::Handle<i::Context>::cast(i::Handle<i::Object>(ctx)); - i::Handle<i::Object> error_handle = Utils::OpenHandle(*error); + i::Handle<i::String> error_handle = Utils::OpenHandle(*error); context->set_error_message_for_code_gen_from_strings(*error_handle); } -void V8::SetWrapperClassId(i::Object** global_handle, uint16_t class_id) { - i::GlobalHandles::SetWrapperClassId(global_handle, class_id); -} - - -uint16_t V8::GetWrapperClassId(internal::Object** global_handle) { - return i::GlobalHandles::GetWrapperClassId(global_handle); -} - - Local<v8::Object> ObjectTemplate::NewInstance() { i::Isolate* isolate = i::Isolate::Current(); ON_BAILOUT(isolate, "v8::ObjectTemplate::NewInstance()", @@ -4695,74 +5152,20 @@ bool FunctionTemplate::HasInstance(v8::Handle<v8::Value> value) { } -static Local<External> ExternalNewImpl(void* data) { - return Utils::ToLocal(FACTORY->NewForeign(static_cast<i::Address>(data))); -} - -static void* ExternalValueImpl(i::Handle<i::Object> obj) { - return reinterpret_cast<void*>(i::Foreign::cast(*obj)->foreign_address()); -} - - -Local<Value> v8::External::Wrap(void* data) { - i::Isolate* isolate = i::Isolate::Current(); - STATIC_ASSERT(sizeof(data) == sizeof(i::Address)); - EnsureInitializedForIsolate(isolate, "v8::External::Wrap()"); - LOG_API(isolate, "External::Wrap"); - ENTER_V8(isolate); - - v8::Local<v8::Value> result = CanBeEncodedAsSmi(data) - ? Utils::ToLocal(i::Handle<i::Object>(EncodeAsSmi(data))) - : v8::Local<v8::Value>(ExternalNewImpl(data)); - - ASSERT_EQ(data, Unwrap(result)); - return result; -} - - -void* v8::Object::SlowGetPointerFromInternalField(int index) { - i::Handle<i::JSObject> obj = Utils::OpenHandle(this); - i::Object* value = obj->GetInternalField(index); - if (value->IsSmi()) { - return i::Internals::GetExternalPointerFromSmi(value); - } else if (value->IsForeign()) { - return reinterpret_cast<void*>(i::Foreign::cast(value)->foreign_address()); - } else { - return NULL; - } -} - - -void* v8::External::FullUnwrap(v8::Handle<v8::Value> wrapper) { - if (IsDeadCheck(i::Isolate::Current(), "v8::External::Unwrap()")) return 0; - i::Handle<i::Object> obj = Utils::OpenHandle(*wrapper); - void* result; - if (obj->IsSmi()) { - result = i::Internals::GetExternalPointerFromSmi(*obj); - } else if (obj->IsForeign()) { - result = ExternalValueImpl(obj); - } else { - result = NULL; - } - ASSERT_EQ(result, QuickUnwrap(wrapper)); - return result; -} - - -Local<External> v8::External::New(void* data) { - STATIC_ASSERT(sizeof(data) == sizeof(i::Address)); +Local<External> v8::External::New(void* value) { + STATIC_ASSERT(sizeof(value) == sizeof(i::Address)); i::Isolate* isolate = i::Isolate::Current(); EnsureInitializedForIsolate(isolate, "v8::External::New()"); LOG_API(isolate, "External::New"); ENTER_V8(isolate); - return ExternalNewImpl(data); + i::Handle<i::JSObject> external = isolate->factory()->NewExternal(value); + return Utils::ExternalToLocal(external); } void* External::Value() const { - if (IsDeadCheck(i::Isolate::Current(), "v8::External::Value()")) return 0; - i::Handle<i::Object> obj = Utils::OpenHandle(this); - return ExternalValueImpl(obj); + if (IsDeadCheck(i::Isolate::Current(), "v8::External::Value()")) return NULL; + return ExternalValue(*Utils::OpenHandle(this)); } @@ -4772,7 +5175,7 @@ Local<String> v8::String::Empty() { return v8::Local<String>(); } LOG_API(isolate, "String::Empty()"); - return Utils::ToLocal(isolate->factory()->empty_symbol()); + return Utils::ToLocal(isolate->factory()->empty_string()); } @@ -4897,7 +5300,7 @@ bool v8::String::MakeExternal(v8::String::ExternalStringResource* resource) { } CHECK(resource && resource->data()); bool result = obj->MakeExternal(resource); - if (result && !obj->IsSymbol()) { + if (result && !obj->IsInternalizedString()) { isolate->heap()->external_string_table()->AddString(*obj); } return result; @@ -4934,7 +5337,7 @@ bool v8::String::MakeExternal( } CHECK(resource && resource->data()); bool result = obj->MakeExternal(resource); - if (result && !obj->IsSymbol()) { + if (result && !obj->IsInternalizedString()) { isolate->heap()->external_string_table()->AddString(*obj); } return result; @@ -4991,8 +5394,10 @@ Local<v8::Value> v8::BooleanObject::New(bool value) { EnsureInitializedForIsolate(isolate, "v8::BooleanObject::New()"); LOG_API(isolate, "BooleanObject::New"); ENTER_V8(isolate); - i::Handle<i::Object> boolean(value ? isolate->heap()->true_value() - : isolate->heap()->false_value()); + i::Handle<i::Object> boolean(value + ? isolate->heap()->true_value() + : isolate->heap()->false_value(), + isolate); i::Handle<i::Object> obj = isolate->factory()->ToObject(boolean); return Utils::ToLocal(obj); } @@ -5071,7 +5476,8 @@ void v8::Date::DateTimeConfigurationChangeNotification() { i::HandleScope scope(isolate); // Get the function ResetDateCache (defined in date.js). i::Handle<i::String> func_name_str = - isolate->factory()->LookupAsciiSymbol("ResetDateCache"); + isolate->factory()->InternalizeOneByteString( + STATIC_ASCII_VECTOR("ResetDateCache")); i::MaybeObject* result = isolate->js_builtins_object()->GetProperty(*func_name_str); i::Object* object_func; @@ -5095,14 +5501,14 @@ void v8::Date::DateTimeConfigurationChangeNotification() { static i::Handle<i::String> RegExpFlagsToString(RegExp::Flags flags) { - char flags_buf[3]; + uint8_t flags_buf[3]; int num_flags = 0; if ((flags & RegExp::kGlobal) != 0) flags_buf[num_flags++] = 'g'; if ((flags & RegExp::kMultiline) != 0) flags_buf[num_flags++] = 'm'; if ((flags & RegExp::kIgnoreCase) != 0) flags_buf[num_flags++] = 'i'; ASSERT(num_flags <= static_cast<int>(ARRAY_SIZE(flags_buf))); - return FACTORY->LookupSymbol( - i::Vector<const char>(flags_buf, num_flags)); + return FACTORY->InternalizeOneByteString( + i::Vector<const uint8_t>(flags_buf, num_flags)); } @@ -5206,8 +5612,8 @@ Local<String> v8::String::NewSymbol(const char* data, int length) { LOG_API(isolate, "String::NewSymbol(char)"); ENTER_V8(isolate); if (length == -1) length = i::StrLength(data); - i::Handle<i::String> result = - isolate->factory()->LookupSymbol(i::Vector<const char>(data, length)); + i::Handle<i::String> result = isolate->factory()->InternalizeUtf8String( + i::Vector<const char>(data, length)); return Utils::ToLocal(result); } @@ -5337,13 +5743,6 @@ void V8::SetAddHistogramSampleFunction(AddHistogramSampleCallback callback) { SetAddHistogramSampleFunction(callback); } -void V8::EnableSlidingStateWindow() { - i::Isolate* isolate = i::Isolate::Current(); - if (IsDeadCheck(isolate, "v8::V8::EnableSlidingStateWindow()")) return; - isolate->logger()->EnableSlidingStateWindow(); -} - - void V8::SetFailedAccessCheckCallbackFunction( FailedAccessCheckCallback callback) { i::Isolate* isolate = i::Isolate::Current(); @@ -5353,6 +5752,7 @@ void V8::SetFailedAccessCheckCallbackFunction( isolate->SetFailedAccessCheckCallback(callback); } + void V8::AddObjectGroup(Persistent<Value>* objects, size_t length, RetainedObjectInfo* info) { @@ -5364,6 +5764,19 @@ void V8::AddObjectGroup(Persistent<Value>* objects, } +void V8::AddObjectGroup(Isolate* exported_isolate, + Persistent<Value>* objects, + size_t length, + RetainedObjectInfo* info) { + i::Isolate* isolate = reinterpret_cast<i::Isolate*>(exported_isolate); + ASSERT(isolate == i::Isolate::Current()); + if (IsDeadCheck(isolate, "v8::V8::AddObjectGroup()")) return; + STATIC_ASSERT(sizeof(Persistent<Value>) == sizeof(i::Object**)); + isolate->global_handles()->AddObjectGroup( + reinterpret_cast<i::Object***>(objects), length, info); +} + + void V8::AddImplicitReferences(Persistent<Object> parent, Persistent<Value>* children, size_t length) { @@ -5376,14 +5789,21 @@ void V8::AddImplicitReferences(Persistent<Object> parent, } +intptr_t Isolate::AdjustAmountOfExternalAllocatedMemory( + intptr_t change_in_bytes) { + i::Heap* heap = reinterpret_cast<i::Isolate*>(this)->heap(); + return heap->AdjustAmountOfExternalAllocatedMemory(change_in_bytes); +} + + intptr_t V8::AdjustAmountOfExternalAllocatedMemory(intptr_t change_in_bytes) { i::Isolate* isolate = i::Isolate::UncheckedCurrent(); if (isolate == NULL || !isolate->IsInitialized() || IsDeadCheck(isolate, "v8::V8::AdjustAmountOfExternalAllocatedMemory()")) { return 0; } - return isolate->heap()->AdjustAmountOfExternalAllocatedMemory( - change_in_bytes); + Isolate* isolate_ext = reinterpret_cast<Isolate*>(isolate); + return isolate_ext->AdjustAmountOfExternalAllocatedMemory(change_in_bytes); } @@ -5557,6 +5977,18 @@ void Isolate::Exit() { } +void Isolate::GetHeapStatistics(HeapStatistics* heap_statistics) { + i::Isolate* isolate = reinterpret_cast<i::Isolate*>(this); + i::Heap* heap = isolate->heap(); + heap_statistics->total_heap_size_ = heap->CommittedMemory(); + heap_statistics->total_heap_size_executable_ = + heap->CommittedMemoryExecutable(); + heap_statistics->total_physical_size_ = heap->CommittedPhysicalMemory(); + heap_statistics->used_heap_size_ = heap->SizeOfObjects(); + heap_statistics->heap_size_limit_ = heap->MaxReserved(); +} + + String::Utf8Value::Utf8Value(v8::Handle<v8::Value> obj) : str_(NULL), length_(0) { i::Isolate* isolate = i::Isolate::Current(); @@ -5568,7 +6000,7 @@ String::Utf8Value::Utf8Value(v8::Handle<v8::Value> obj) Handle<String> str = obj->ToString(); if (str.IsEmpty()) return; i::Handle<i::String> i_str = Utils::OpenHandle(*str); - length_ = i::Utf8Length(i_str); + length_ = v8::Utf8Length(*i_str, isolate); str_ = i::NewArray<char>(length_ + 1); str->WriteUtf8(str_); } @@ -5632,7 +6064,7 @@ Local<Value> Exception::RangeError(v8::Handle<v8::String> raw_message) { i::Handle<i::Object> result = isolate->factory()->NewRangeError(message); error = *result; } - i::Handle<i::Object> result(error); + i::Handle<i::Object> result(error, isolate); return Utils::ToLocal(result); } @@ -5649,7 +6081,7 @@ Local<Value> Exception::ReferenceError(v8::Handle<v8::String> raw_message) { isolate->factory()->NewReferenceError(message); error = *result; } - i::Handle<i::Object> result(error); + i::Handle<i::Object> result(error, isolate); return Utils::ToLocal(result); } @@ -5665,7 +6097,7 @@ Local<Value> Exception::SyntaxError(v8::Handle<v8::String> raw_message) { i::Handle<i::Object> result = isolate->factory()->NewSyntaxError(message); error = *result; } - i::Handle<i::Object> result(error); + i::Handle<i::Object> result(error, isolate); return Utils::ToLocal(result); } @@ -5681,7 +6113,7 @@ Local<Value> Exception::TypeError(v8::Handle<v8::String> raw_message) { i::Handle<i::Object> result = isolate->factory()->NewTypeError(message); error = *result; } - i::Handle<i::Object> result(error); + i::Handle<i::Object> result(error, isolate); return Utils::ToLocal(result); } @@ -5697,7 +6129,7 @@ Local<Value> Exception::Error(v8::Handle<v8::String> raw_message) { i::Handle<i::Object> result = isolate->factory()->NewError(message); error = *result; } - i::Handle<i::Object> result(error); + i::Handle<i::Object> result(error, isolate); return Utils::ToLocal(result); } @@ -5895,13 +6327,13 @@ Local<Value> Debug::GetMirror(v8::Handle<v8::Value> obj) { if (!isolate->IsInitialized()) return Local<Value>(); ON_BAILOUT(isolate, "v8::Debug::GetMirror()", return Local<Value>()); ENTER_V8(isolate); - v8::HandleScope scope; + v8::HandleScope scope(reinterpret_cast<Isolate*>(isolate)); i::Debug* isolate_debug = isolate->debug(); isolate_debug->Load(); i::Handle<i::JSObject> debug(isolate_debug->debug_context()->global_object()); - i::Handle<i::String> name = - isolate->factory()->LookupAsciiSymbol("MakeMirror"); - i::Handle<i::Object> fun_obj = i::GetProperty(debug, name); + i::Handle<i::String> name = isolate->factory()->InternalizeOneByteString( + STATIC_ASCII_VECTOR("MakeMirror")); + i::Handle<i::Object> fun_obj = i::GetProperty(isolate, debug, name); i::Handle<i::JSFunction> fun = i::Handle<i::JSFunction>::cast(fun_obj); v8::Handle<v8::Function> v8_fun = Utils::ToLocal(fun); const int kArgc = 1; @@ -5962,11 +6394,11 @@ Handle<String> CpuProfileNode::GetFunctionName() const { const i::CodeEntry* entry = node->entry(); if (!entry->has_name_prefix()) { return Handle<String>(ToApi<String>( - isolate->factory()->LookupAsciiSymbol(entry->name()))); + isolate->factory()->InternalizeUtf8String(entry->name()))); } else { return Handle<String>(ToApi<String>(isolate->factory()->NewConsString( - isolate->factory()->LookupAsciiSymbol(entry->name_prefix()), - isolate->factory()->LookupAsciiSymbol(entry->name())))); + isolate->factory()->InternalizeUtf8String(entry->name_prefix()), + isolate->factory()->InternalizeUtf8String(entry->name())))); } } @@ -5975,7 +6407,7 @@ Handle<String> CpuProfileNode::GetScriptResourceName() const { i::Isolate* isolate = i::Isolate::Current(); IsDeadCheck(isolate, "v8::CpuProfileNode::GetScriptResourceName"); const i::ProfileNode* node = reinterpret_cast<const i::ProfileNode*>(this); - return Handle<String>(ToApi<String>(isolate->factory()->LookupAsciiSymbol( + return Handle<String>(ToApi<String>(isolate->factory()->InternalizeUtf8String( node->entry()->resource_name()))); } @@ -6022,6 +6454,11 @@ unsigned CpuProfileNode::GetCallUid() const { } +unsigned CpuProfileNode::GetNodeId() const { + return reinterpret_cast<const i::ProfileNode*>(this)->id(); +} + + int CpuProfileNode::GetChildrenCount() const { i::Isolate* isolate = i::Isolate::Current(); IsDeadCheck(isolate, "v8::CpuProfileNode::GetChildrenCount"); @@ -6041,11 +6478,12 @@ const CpuProfileNode* CpuProfileNode::GetChild(int index) const { void CpuProfile::Delete() { i::Isolate* isolate = i::Isolate::Current(); IsDeadCheck(isolate, "v8::CpuProfile::Delete"); - i::CpuProfiler::DeleteProfile(reinterpret_cast<i::CpuProfile*>(this)); - if (i::CpuProfiler::GetProfilesCount() == 0 && - !i::CpuProfiler::HasDetachedProfiles()) { + i::CpuProfiler* profiler = isolate->cpu_profiler(); + ASSERT(profiler != NULL); + profiler->DeleteProfile(reinterpret_cast<i::CpuProfile*>(this)); + if (profiler->GetProfilesCount() == 0 && !profiler->HasDetachedProfiles()) { // If this was the last profile, clean up all accessory data as well. - i::CpuProfiler::DeleteAllProfiles(); + profiler->DeleteAllProfiles(); } } @@ -6061,31 +6499,36 @@ Handle<String> CpuProfile::GetTitle() const { i::Isolate* isolate = i::Isolate::Current(); IsDeadCheck(isolate, "v8::CpuProfile::GetTitle"); const i::CpuProfile* profile = reinterpret_cast<const i::CpuProfile*>(this); - return Handle<String>(ToApi<String>(isolate->factory()->LookupAsciiSymbol( + return Handle<String>(ToApi<String>(isolate->factory()->InternalizeUtf8String( profile->title()))); } -const CpuProfileNode* CpuProfile::GetBottomUpRoot() const { +const CpuProfileNode* CpuProfile::GetTopDownRoot() const { i::Isolate* isolate = i::Isolate::Current(); - IsDeadCheck(isolate, "v8::CpuProfile::GetBottomUpRoot"); + IsDeadCheck(isolate, "v8::CpuProfile::GetTopDownRoot"); const i::CpuProfile* profile = reinterpret_cast<const i::CpuProfile*>(this); - return reinterpret_cast<const CpuProfileNode*>(profile->bottom_up()->root()); + return reinterpret_cast<const CpuProfileNode*>(profile->top_down()->root()); } -const CpuProfileNode* CpuProfile::GetTopDownRoot() const { - i::Isolate* isolate = i::Isolate::Current(); - IsDeadCheck(isolate, "v8::CpuProfile::GetTopDownRoot"); +const CpuProfileNode* CpuProfile::GetSample(int index) const { const i::CpuProfile* profile = reinterpret_cast<const i::CpuProfile*>(this); - return reinterpret_cast<const CpuProfileNode*>(profile->top_down()->root()); + return reinterpret_cast<const CpuProfileNode*>(profile->sample(index)); +} + + +int CpuProfile::GetSamplesCount() const { + return reinterpret_cast<const i::CpuProfile*>(this)->samples_count(); } int CpuProfiler::GetProfilesCount() { i::Isolate* isolate = i::Isolate::Current(); IsDeadCheck(isolate, "v8::CpuProfiler::GetProfilesCount"); - return i::CpuProfiler::GetProfilesCount(); + i::CpuProfiler* profiler = isolate->cpu_profiler(); + ASSERT(profiler != NULL); + return profiler->GetProfilesCount(); } @@ -6093,8 +6536,10 @@ const CpuProfile* CpuProfiler::GetProfile(int index, Handle<Value> security_token) { i::Isolate* isolate = i::Isolate::Current(); IsDeadCheck(isolate, "v8::CpuProfiler::GetProfile"); + i::CpuProfiler* profiler = isolate->cpu_profiler(); + ASSERT(profiler != NULL); return reinterpret_cast<const CpuProfile*>( - i::CpuProfiler::GetProfile( + profiler->GetProfile( security_token.IsEmpty() ? NULL : *Utils::OpenHandle(*security_token), index)); } @@ -6104,17 +6549,21 @@ const CpuProfile* CpuProfiler::FindProfile(unsigned uid, Handle<Value> security_token) { i::Isolate* isolate = i::Isolate::Current(); IsDeadCheck(isolate, "v8::CpuProfiler::FindProfile"); + i::CpuProfiler* profiler = isolate->cpu_profiler(); + ASSERT(profiler != NULL); return reinterpret_cast<const CpuProfile*>( - i::CpuProfiler::FindProfile( + profiler->FindProfile( security_token.IsEmpty() ? NULL : *Utils::OpenHandle(*security_token), uid)); } -void CpuProfiler::StartProfiling(Handle<String> title) { +void CpuProfiler::StartProfiling(Handle<String> title, bool record_samples) { i::Isolate* isolate = i::Isolate::Current(); IsDeadCheck(isolate, "v8::CpuProfiler::StartProfiling"); - i::CpuProfiler::StartProfiling(*Utils::OpenHandle(*title)); + i::CpuProfiler* profiler = isolate->cpu_profiler(); + ASSERT(profiler != NULL); + profiler->StartProfiling(*Utils::OpenHandle(*title), record_samples); } @@ -6122,8 +6571,10 @@ const CpuProfile* CpuProfiler::StopProfiling(Handle<String> title, Handle<Value> security_token) { i::Isolate* isolate = i::Isolate::Current(); IsDeadCheck(isolate, "v8::CpuProfiler::StopProfiling"); + i::CpuProfiler* profiler = isolate->cpu_profiler(); + ASSERT(profiler != NULL); return reinterpret_cast<const CpuProfile*>( - i::CpuProfiler::StopProfiling( + profiler->StopProfiling( security_token.IsEmpty() ? NULL : *Utils::OpenHandle(*security_token), *Utils::OpenHandle(*title))); } @@ -6132,7 +6583,9 @@ const CpuProfile* CpuProfiler::StopProfiling(Handle<String> title, void CpuProfiler::DeleteAllProfiles() { i::Isolate* isolate = i::Isolate::Current(); IsDeadCheck(isolate, "v8::CpuProfiler::DeleteAllProfiles"); - i::CpuProfiler::DeleteAllProfiles(); + i::CpuProfiler* profiler = isolate->cpu_profiler(); + ASSERT(profiler != NULL); + profiler->DeleteAllProfiles(); } @@ -6158,12 +6611,12 @@ Handle<Value> HeapGraphEdge::GetName() const { case i::HeapGraphEdge::kInternal: case i::HeapGraphEdge::kProperty: case i::HeapGraphEdge::kShortcut: - return Handle<String>(ToApi<String>(isolate->factory()->LookupAsciiSymbol( - edge->name()))); + return Handle<String>(ToApi<String>( + isolate->factory()->InternalizeUtf8String(edge->name()))); case i::HeapGraphEdge::kElement: case i::HeapGraphEdge::kHidden: - return Handle<Number>(ToApi<Number>(isolate->factory()->NewNumberFromInt( - edge->index()))); + return Handle<Number>(ToApi<Number>( + isolate->factory()->NewNumberFromInt(edge->index()))); default: UNREACHABLE(); } return v8::Undefined(); @@ -6202,7 +6655,7 @@ HeapGraphNode::Type HeapGraphNode::GetType() const { Handle<String> HeapGraphNode::GetName() const { i::Isolate* isolate = i::Isolate::Current(); IsDeadCheck(isolate, "v8::HeapGraphNode::GetName"); - return Handle<String>(ToApi<String>(isolate->factory()->LookupAsciiSymbol( + return Handle<String>(ToApi<String>(isolate->factory()->InternalizeUtf8String( ToInternal(this)->name()))); } @@ -6281,7 +6734,7 @@ unsigned HeapSnapshot::GetUid() const { Handle<String> HeapSnapshot::GetTitle() const { i::Isolate* isolate = i::Isolate::Current(); IsDeadCheck(isolate, "v8::HeapSnapshot::GetTitle"); - return Handle<String>(ToApi<String>(isolate->factory()->LookupAsciiSymbol( + return Handle<String>(ToApi<String>(isolate->factory()->InternalizeUtf8String( ToInternal(this)->title()))); } @@ -6374,7 +6827,8 @@ SnapshotObjectId HeapProfiler::GetSnapshotObjectId(Handle<Value> value) { const HeapSnapshot* HeapProfiler::TakeSnapshot(Handle<String> title, HeapSnapshot::Type type, - ActivityControl* control) { + ActivityControl* control, + ObjectNameResolver* resolver) { i::Isolate* isolate = i::Isolate::Current(); IsDeadCheck(isolate, "v8::HeapProfiler::TakeSnapshot"); i::HeapSnapshot::Type internal_type = i::HeapSnapshot::kFull; @@ -6387,7 +6841,7 @@ const HeapSnapshot* HeapProfiler::TakeSnapshot(Handle<String> title, } return reinterpret_cast<const HeapSnapshot*>( i::HeapProfiler::TakeSnapshot( - *Utils::OpenHandle(*title), internal_type, control)); + *Utils::OpenHandle(*title), internal_type, control, resolver)); } @@ -6497,8 +6951,11 @@ void Testing::PrepareStressRun(int run) { } +// TODO(svenpanne) Deprecate this. void Testing::DeoptimizeAll() { - internal::Deoptimizer::DeoptimizeAll(); + i::Isolate* isolate = i::Isolate::Current(); + i::HandleScope scope(isolate); + internal::Deoptimizer::DeoptimizeAll(isolate); } @@ -6543,7 +7000,7 @@ void HandleScopeImplementer::IterateThis(ObjectVisitor* v) { for (int i = blocks()->length() - 2; i >= 0; --i) { Object** block = blocks()->at(i); if (last_handle_before_deferred_block_ != NULL && - (last_handle_before_deferred_block_ < &block[kHandleBlockSize]) && + (last_handle_before_deferred_block_ <= &block[kHandleBlockSize]) && (last_handle_before_deferred_block_ >= block)) { v->VisitPointers(block, last_handle_before_deferred_block_); ASSERT(!found_block_before_deferred); diff --git a/deps/v8/src/api.h b/deps/v8/src/api.h index 7197b6cb54..ac6b8342c5 100644 --- a/deps/v8/src/api.h +++ b/deps/v8/src/api.h @@ -177,7 +177,8 @@ class RegisteredExtension { V(Context, Context) \ V(External, Foreign) \ V(StackTrace, JSArray) \ - V(StackFrame, JSObject) + V(StackFrame, JSObject) \ + V(DeclaredAccessorDescriptor, DeclaredAccessorDescriptor) class Utils { @@ -201,8 +202,6 @@ class Utils { v8::internal::Handle<v8::internal::JSObject> obj); static inline Local<Array> ToLocal( v8::internal::Handle<v8::internal::JSArray> obj); - static inline Local<External> ToLocal( - v8::internal::Handle<v8::internal::Foreign> obj); static inline Local<Message> MessageToLocal( v8::internal::Handle<v8::internal::Object> obj); static inline Local<StackTrace> StackTraceToLocal( @@ -225,6 +224,10 @@ class Utils { v8::internal::Handle<v8::internal::FunctionTemplateInfo> obj); static inline Local<TypeSwitch> ToLocal( v8::internal::Handle<v8::internal::TypeSwitchInfo> obj); + static inline Local<External> ExternalToLocal( + v8::internal::Handle<v8::internal::JSObject> obj); + static inline Local<DeclaredAccessorDescriptor> ToLocal( + v8::internal::Handle<v8::internal::DeclaredAccessorDescriptor> obj); #define DECLARE_OPEN_HANDLE(From, To) \ static inline v8::internal::Handle<v8::internal::To> \ @@ -268,7 +271,6 @@ MAKE_TO_LOCAL(ToLocal, String, String) MAKE_TO_LOCAL(ToLocal, JSRegExp, RegExp) MAKE_TO_LOCAL(ToLocal, JSObject, Object) MAKE_TO_LOCAL(ToLocal, JSArray, Array) -MAKE_TO_LOCAL(ToLocal, Foreign, External) MAKE_TO_LOCAL(ToLocal, FunctionTemplateInfo, FunctionTemplate) MAKE_TO_LOCAL(ToLocal, ObjectTemplateInfo, ObjectTemplate) MAKE_TO_LOCAL(ToLocal, SignatureInfo, Signature) @@ -280,6 +282,8 @@ MAKE_TO_LOCAL(StackFrameToLocal, JSObject, StackFrame) MAKE_TO_LOCAL(NumberToLocal, Object, Number) MAKE_TO_LOCAL(IntegerToLocal, Object, Integer) MAKE_TO_LOCAL(Uint32ToLocal, Object, Uint32) +MAKE_TO_LOCAL(ExternalToLocal, JSObject, External) +MAKE_TO_LOCAL(ToLocal, DeclaredAccessorDescriptor, DeclaredAccessorDescriptor) #undef MAKE_TO_LOCAL diff --git a/deps/v8/src/apinatives.js b/deps/v8/src/apinatives.js index 79b41dd88c..ad1d869415 100644 --- a/deps/v8/src/apinatives.js +++ b/deps/v8/src/apinatives.js @@ -90,7 +90,7 @@ function InstantiateFunction(data, name) { // internal ToBoolean doesn't handle that! if (!(typeof parent === 'undefined')) { var parent_fun = Instantiate(parent); - fun.prototype.__proto__ = parent_fun.prototype; + %SetPrototype(fun.prototype, parent_fun.prototype); } ConfigureTemplateInstance(fun, data); } catch (e) { diff --git a/deps/v8/src/arm/assembler-arm-inl.h b/deps/v8/src/arm/assembler-arm-inl.h index 6268c332c8..123013b0a0 100644 --- a/deps/v8/src/arm/assembler-arm-inl.h +++ b/deps/v8/src/arm/assembler-arm-inl.h @@ -47,13 +47,54 @@ namespace v8 { namespace internal { +int Register::NumAllocatableRegisters() { + if (CpuFeatures::IsSupported(VFP2)) { + return kMaxNumAllocatableRegisters; + } else { + return kMaxNumAllocatableRegisters - kGPRsPerNonVFP2Double; + } +} + + +int DwVfpRegister::NumRegisters() { + if (CpuFeatures::IsSupported(VFP2)) { + return CpuFeatures::IsSupported(VFP32DREGS) ? 32 : 16; + } else { + return 1; + } +} + + +int DwVfpRegister::NumAllocatableRegisters() { + if (CpuFeatures::IsSupported(VFP2)) { + return NumRegisters() - kNumReservedRegisters; + } else { + return 1; + } +} + + int DwVfpRegister::ToAllocationIndex(DwVfpRegister reg) { ASSERT(!reg.is(kDoubleRegZero)); ASSERT(!reg.is(kScratchDoubleReg)); + if (reg.code() > kDoubleRegZero.code()) { + return reg.code() - kNumReservedRegisters; + } return reg.code(); } +DwVfpRegister DwVfpRegister::FromAllocationIndex(int index) { + ASSERT(index >= 0 && index < NumAllocatableRegisters()); + ASSERT(kScratchDoubleReg.code() - kDoubleRegZero.code() == + kNumReservedRegisters - 1); + if (index >= kDoubleRegZero.code()) { + return from_code(index + kNumReservedRegisters); + } + return from_code(index); +} + + void RelocInfo::apply(intptr_t delta) { if (RelocInfo::IsInternalReference(rmode_)) { // absolute code pointer inside code object moves with the code object. @@ -66,13 +107,13 @@ void RelocInfo::apply(intptr_t delta) { Address RelocInfo::target_address() { - ASSERT(IsCodeTarget(rmode_) || rmode_ == RUNTIME_ENTRY); + ASSERT(IsCodeTarget(rmode_) || IsRuntimeEntry(rmode_)); return Assembler::target_address_at(pc_); } Address RelocInfo::target_address_address() { - ASSERT(IsCodeTarget(rmode_) || rmode_ == RUNTIME_ENTRY + ASSERT(IsCodeTarget(rmode_) || IsRuntimeEntry(rmode_) || rmode_ == EMBEDDED_OBJECT || rmode_ == EXTERNAL_REFERENCE); return reinterpret_cast<Address>(Assembler::target_pointer_address_at(pc_)); @@ -85,9 +126,8 @@ int RelocInfo::target_address_size() { void RelocInfo::set_target_address(Address target, WriteBarrierMode mode) { - ASSERT(IsCodeTarget(rmode_) || rmode_ == RUNTIME_ENTRY); - Assembler::set_target_address_at(pc_, reinterpret_cast<Address>( - reinterpret_cast<intptr_t>(target) & ~3)); + ASSERT(IsCodeTarget(rmode_) || IsRuntimeEntry(rmode_)); + Assembler::set_target_address_at(pc_, target); if (mode == UPDATE_WRITE_BARRIER && host() != NULL && IsCodeTarget(rmode_)) { Object* target_code = Code::GetCodeFromTargetAddress(target); host()->GetHeap()->incremental_marking()->RecordWriteIntoCode( @@ -138,6 +178,19 @@ Address* RelocInfo::target_reference_address() { } +Address RelocInfo::target_runtime_entry(Assembler* origin) { + ASSERT(IsRuntimeEntry(rmode_)); + return target_address(); +} + + +void RelocInfo::set_target_runtime_entry(Address target, + WriteBarrierMode mode) { + ASSERT(IsRuntimeEntry(rmode_)); + if (target_address() != target) set_target_address(target, mode); +} + + Handle<JSGlobalPropertyCell> RelocInfo::target_cell_handle() { ASSERT(rmode_ == RelocInfo::GLOBAL_PROPERTY_CELL); Address address = Memory::Address_at(pc_); @@ -166,6 +219,24 @@ void RelocInfo::set_target_cell(JSGlobalPropertyCell* cell, } +static const int kNoCodeAgeSequenceLength = 3; + +Code* RelocInfo::code_age_stub() { + ASSERT(rmode_ == RelocInfo::CODE_AGE_SEQUENCE); + return Code::GetCodeFromTargetAddress( + Memory::Address_at(pc_ + Assembler::kInstrSize * + (kNoCodeAgeSequenceLength - 1))); +} + + +void RelocInfo::set_code_age_stub(Code* stub) { + ASSERT(rmode_ == RelocInfo::CODE_AGE_SEQUENCE); + Memory::Address_at(pc_ + Assembler::kInstrSize * + (kNoCodeAgeSequenceLength - 1)) = + stub->instruction_start(); +} + + Address RelocInfo::call_address() { // The 2 instructions offset assumes patched debug break slot or return // sequence. @@ -239,6 +310,8 @@ void RelocInfo::Visit(ObjectVisitor* visitor) { visitor->VisitGlobalPropertyCell(this); } else if (mode == RelocInfo::EXTERNAL_REFERENCE) { visitor->VisitExternalReference(this); + } else if (RelocInfo::IsCodeAgeSequence(mode)) { + visitor->VisitCodeAgeSequence(this); #ifdef ENABLE_DEBUGGER_SUPPORT // TODO(isolates): Get a cached isolate below. } else if (((RelocInfo::IsJSReturn(mode) && @@ -248,7 +321,7 @@ void RelocInfo::Visit(ObjectVisitor* visitor) { Isolate::Current()->debug()->has_break_points()) { visitor->VisitDebugTarget(this); #endif - } else if (mode == RelocInfo::RUNTIME_ENTRY) { + } else if (RelocInfo::IsRuntimeEntry(mode)) { visitor->VisitRuntimeEntry(this); } } @@ -265,6 +338,8 @@ void RelocInfo::Visit(Heap* heap) { StaticVisitor::VisitGlobalPropertyCell(heap, this); } else if (mode == RelocInfo::EXTERNAL_REFERENCE) { StaticVisitor::VisitExternalReference(this); + } else if (RelocInfo::IsCodeAgeSequence(mode)) { + StaticVisitor::VisitCodeAgeSequence(heap, this); #ifdef ENABLE_DEBUGGER_SUPPORT } else if (heap->isolate()->debug()->has_break_points() && ((RelocInfo::IsJSReturn(mode) && @@ -273,7 +348,7 @@ void RelocInfo::Visit(Heap* heap) { IsPatchedDebugBreakSlotSequence()))) { StaticVisitor::VisitDebugTarget(heap, this); #endif - } else if (mode == RelocInfo::RUNTIME_ENTRY) { + } else if (RelocInfo::IsRuntimeEntry(mode)) { StaticVisitor::VisitRuntimeEntry(this); } } @@ -296,7 +371,7 @@ Operand::Operand(const ExternalReference& f) { Operand::Operand(Smi* value) { rm_ = no_reg; imm32_ = reinterpret_cast<intptr_t>(value); - rmode_ = RelocInfo::NONE; + rmode_ = RelocInfo::NONE32; } @@ -473,14 +548,12 @@ void Assembler::set_target_pointer_at(Address pc, Address target) { Address Assembler::target_address_at(Address pc) { - return reinterpret_cast<Address>( - reinterpret_cast<intptr_t>(target_pointer_at(pc)) & ~3); + return target_pointer_at(pc); } void Assembler::set_target_address_at(Address pc, Address target) { - set_target_pointer_at(pc, reinterpret_cast<Address>( - reinterpret_cast<intptr_t>(target) & ~3)); + set_target_pointer_at(pc, target); } diff --git a/deps/v8/src/arm/assembler-arm.cc b/deps/v8/src/arm/assembler-arm.cc index 9be62a404b..1574d51bb1 100644 --- a/deps/v8/src/arm/assembler-arm.cc +++ b/deps/v8/src/arm/assembler-arm.cc @@ -48,9 +48,14 @@ namespace internal { bool CpuFeatures::initialized_ = false; #endif unsigned CpuFeatures::supported_ = 0; -unsigned CpuFeatures::found_by_runtime_probing_ = 0; +unsigned CpuFeatures::found_by_runtime_probing_only_ = 0; +ExternalReference ExternalReference::cpu_features() { + ASSERT(CpuFeatures::initialized_); + return ExternalReference(&CpuFeatures::supported_); +} + // Get the CPU features enabled by the build. For cross compilation the // preprocessor symbols CAN_USE_ARMV7_INSTRUCTIONS and CAN_USE_VFP3_INSTRUCTIONS // can be defined to enable ARMv7 and VFPv3 instructions when building the @@ -66,6 +71,9 @@ static unsigned CpuFeaturesImpliedByCompiler() { #ifdef CAN_USE_VFP2_INSTRUCTIONS answer |= 1u << VFP2; #endif // CAN_USE_VFP2_INSTRUCTIONS +#ifdef CAN_USE_VFP32DREGS + answer |= 1u << VFP32DREGS; +#endif // CAN_USE_VFP32DREGS #ifdef __arm__ // If the compiler is allowed to use VFP then we can use VFP too in our code @@ -85,8 +93,24 @@ static unsigned CpuFeaturesImpliedByCompiler() { } +const char* DwVfpRegister::AllocationIndexToString(int index) { + if (CpuFeatures::IsSupported(VFP2)) { + ASSERT(index >= 0 && index < NumAllocatableRegisters()); + ASSERT(kScratchDoubleReg.code() - kDoubleRegZero.code() == + kNumReservedRegisters - 1); + if (index >= kDoubleRegZero.code()) + index += kNumReservedRegisters; + + return VFPRegisters::Name(index, true); + } else { + ASSERT(index == 0); + return "sfpd0"; + } +} + + void CpuFeatures::Probe() { - unsigned standard_features = static_cast<unsigned>( + uint64_t standard_features = static_cast<unsigned>( OS::CpuFeaturesImpliedByPlatform()) | CpuFeaturesImpliedByCompiler(); ASSERT(supported_ == 0 || supported_ == standard_features); #ifdef DEBUG @@ -107,49 +131,66 @@ void CpuFeatures::Probe() { // For the simulator=arm build, use VFP when FLAG_enable_vfp3 is // enabled. VFPv3 implies ARMv7, see ARM DDI 0406B, page A1-6. if (FLAG_enable_vfp3) { - supported_ |= 1u << VFP3 | 1u << ARMv7 | 1u << VFP2; + supported_ |= + static_cast<uint64_t>(1) << VFP3 | + static_cast<uint64_t>(1) << ARMv7 | + static_cast<uint64_t>(1) << VFP2; } // For the simulator=arm build, use ARMv7 when FLAG_enable_armv7 is enabled if (FLAG_enable_armv7) { - supported_ |= 1u << ARMv7; + supported_ |= static_cast<uint64_t>(1) << ARMv7; } if (FLAG_enable_sudiv) { - supported_ |= 1u << SUDIV; + supported_ |= static_cast<uint64_t>(1) << SUDIV; } if (FLAG_enable_movw_movt) { - supported_ |= 1u << MOVW_MOVT_IMMEDIATE_LOADS; + supported_ |= static_cast<uint64_t>(1) << MOVW_MOVT_IMMEDIATE_LOADS; } + + if (FLAG_enable_32dregs) { + supported_ |= static_cast<uint64_t>(1) << VFP32DREGS; + } + #else // __arm__ // Probe for additional features not already known to be available. if (!IsSupported(VFP3) && OS::ArmCpuHasFeature(VFP3)) { // This implementation also sets the VFP flags if runtime // detection of VFP returns true. VFPv3 implies ARMv7 and VFP2, see ARM DDI // 0406B, page A1-6. - found_by_runtime_probing_ |= 1u << VFP3 | 1u << ARMv7 | 1u << VFP2; + found_by_runtime_probing_only_ |= + static_cast<uint64_t>(1) << VFP3 | + static_cast<uint64_t>(1) << ARMv7 | + static_cast<uint64_t>(1) << VFP2; } else if (!IsSupported(VFP2) && OS::ArmCpuHasFeature(VFP2)) { - found_by_runtime_probing_ |= 1u << VFP2; + found_by_runtime_probing_only_ |= static_cast<uint64_t>(1) << VFP2; } if (!IsSupported(ARMv7) && OS::ArmCpuHasFeature(ARMv7)) { - found_by_runtime_probing_ |= 1u << ARMv7; + found_by_runtime_probing_only_ |= static_cast<uint64_t>(1) << ARMv7; } if (!IsSupported(SUDIV) && OS::ArmCpuHasFeature(SUDIV)) { - found_by_runtime_probing_ |= 1u << SUDIV; + found_by_runtime_probing_only_ |= static_cast<uint64_t>(1) << SUDIV; } if (!IsSupported(UNALIGNED_ACCESSES) && OS::ArmCpuHasFeature(ARMv7)) { - found_by_runtime_probing_ |= 1u << UNALIGNED_ACCESSES; + found_by_runtime_probing_only_ |= + static_cast<uint64_t>(1) << UNALIGNED_ACCESSES; } if (OS::GetCpuImplementer() == QUALCOMM_IMPLEMENTER && OS::ArmCpuHasFeature(ARMv7)) { - found_by_runtime_probing_ |= 1u << MOVW_MOVT_IMMEDIATE_LOADS; + found_by_runtime_probing_only_ |= + static_cast<uint64_t>(1) << MOVW_MOVT_IMMEDIATE_LOADS; + } + + if (!IsSupported(VFP32DREGS) && OS::ArmCpuHasFeature(VFP32DREGS)) { + found_by_runtime_probing_only_ |= static_cast<uint64_t>(1) << VFP32DREGS; } - supported_ |= found_by_runtime_probing_; + supported_ |= found_by_runtime_probing_only_; #endif // Assert that VFP3 implies VFP2 and ARMv7. @@ -207,7 +248,7 @@ Operand::Operand(Handle<Object> handle) { } else { // no relocation needed imm32_ = reinterpret_cast<intptr_t>(obj); - rmode_ = RelocInfo::NONE; + rmode_ = RelocInfo::NONE32; } } @@ -282,8 +323,11 @@ const Instr kPopRegPattern = // mov lr, pc const Instr kMovLrPc = al | MOV | kRegister_pc_Code | kRegister_lr_Code * B12; // ldr rd, [pc, #offset] -const Instr kLdrPCMask = kCondMask | 15 * B24 | 7 * B20 | 15 * B16; -const Instr kLdrPCPattern = al | 5 * B24 | L | kRegister_pc_Code * B16; +const Instr kLdrPCMask = 15 * B24 | 7 * B20 | 15 * B16; +const Instr kLdrPCPattern = 5 * B24 | L | kRegister_pc_Code * B16; +// vldr dd, [pc, #offset] +const Instr kVldrDPCMask = 15 * B24 | 3 * B20 | 15 * B16 | 15 * B8; +const Instr kVldrDPCPattern = 13 * B24 | L | kRegister_pc_Code * B16 | 11 * B8; // blxcc rm const Instr kBlxRegMask = 15 * B24 | 15 * B20 | 15 * B16 | 15 * B12 | 15 * B8 | 15 * B4; @@ -318,47 +362,13 @@ const Instr kLdrStrInstrArgumentMask = 0x0000ffff; const Instr kLdrStrOffsetMask = 0x00000fff; -// Spare buffer. -static const int kMinimalBufferSize = 4*KB; - - -Assembler::Assembler(Isolate* arg_isolate, void* buffer, int buffer_size) - : AssemblerBase(arg_isolate), +Assembler::Assembler(Isolate* isolate, void* buffer, int buffer_size) + : AssemblerBase(isolate, buffer, buffer_size), recorded_ast_id_(TypeFeedbackId::None()), - positions_recorder_(this), - emit_debug_code_(FLAG_debug_code), - predictable_code_size_(false) { - if (buffer == NULL) { - // Do our own buffer management. - if (buffer_size <= kMinimalBufferSize) { - buffer_size = kMinimalBufferSize; - - if (isolate()->assembler_spare_buffer() != NULL) { - buffer = isolate()->assembler_spare_buffer(); - isolate()->set_assembler_spare_buffer(NULL); - } - } - if (buffer == NULL) { - buffer_ = NewArray<byte>(buffer_size); - } else { - buffer_ = static_cast<byte*>(buffer); - } - buffer_size_ = buffer_size; - own_buffer_ = true; - - } else { - // Use externally provided buffer instead. - ASSERT(buffer_size > 0); - buffer_ = static_cast<byte*>(buffer); - buffer_size_ = buffer_size; - own_buffer_ = false; - } - - // Set up buffer pointers. - ASSERT(buffer_ != NULL); - pc_ = buffer_; - reloc_info_writer.Reposition(buffer_ + buffer_size, pc_); + positions_recorder_(this) { + reloc_info_writer.Reposition(buffer_ + buffer_size_, pc_); num_pending_reloc_info_ = 0; + num_pending_64_bit_reloc_info_ = 0; next_buffer_check_ = 0; const_pool_blocked_nesting_ = 0; no_const_pool_before_ = 0; @@ -370,14 +380,6 @@ Assembler::Assembler(Isolate* arg_isolate, void* buffer, int buffer_size) Assembler::~Assembler() { ASSERT(const_pool_blocked_nesting_ == 0); - if (own_buffer_) { - if (isolate()->assembler_spare_buffer() == NULL && - buffer_size_ == kMinimalBufferSize) { - isolate()->set_assembler_spare_buffer(buffer_); - } else { - DeleteArray(buffer_); - } - } } @@ -385,6 +387,7 @@ void Assembler::GetCode(CodeDesc* desc) { // Emit constant pool if necessary. CheckConstPool(true, false); ASSERT(num_pending_reloc_info_ == 0); + ASSERT(num_pending_64_bit_reloc_info_ == 0); // Set up code descriptor. desc->buffer = buffer_; @@ -431,6 +434,11 @@ bool Assembler::IsLdrRegisterImmediate(Instr instr) { } +bool Assembler::IsVldrDRegisterImmediate(Instr instr) { + return (instr & (15 * B24 | 3 * B20 | 15 * B8)) == (13 * B24 | B20 | 11 * B8); +} + + int Assembler::GetLdrRegisterImmediateOffset(Instr instr) { ASSERT(IsLdrRegisterImmediate(instr)); bool positive = (instr & B23) == B23; @@ -439,6 +447,15 @@ int Assembler::GetLdrRegisterImmediateOffset(Instr instr) { } +int Assembler::GetVldrDRegisterImmediateOffset(Instr instr) { + ASSERT(IsVldrDRegisterImmediate(instr)); + bool positive = (instr & B23) == B23; + int offset = instr & kOff8Mask; // Zero extended offset. + offset <<= 2; + return positive ? offset : -offset; +} + + Instr Assembler::SetLdrRegisterImmediateOffset(Instr instr, int offset) { ASSERT(IsLdrRegisterImmediate(instr)); bool positive = offset >= 0; @@ -451,6 +468,19 @@ Instr Assembler::SetLdrRegisterImmediateOffset(Instr instr, int offset) { } +Instr Assembler::SetVldrDRegisterImmediateOffset(Instr instr, int offset) { + ASSERT(IsVldrDRegisterImmediate(instr)); + ASSERT((offset & ~3) == offset); // Must be 64-bit aligned. + bool positive = offset >= 0; + if (!positive) offset = -offset; + ASSERT(is_uint10(offset)); + // Set bit indicating whether the offset should be added. + instr = (instr & ~B23) | (positive ? B23 : 0); + // Set the actual offset. Its bottom 2 bits are zero. + return (instr & ~kOff8Mask) | (offset >> 2); +} + + bool Assembler::IsStrRegisterImmediate(Instr instr) { return (instr & (B27 | B26 | B25 | B22 | B20)) == B26; } @@ -536,7 +566,14 @@ bool Assembler::IsLdrRegFpNegOffset(Instr instr) { bool Assembler::IsLdrPcImmediateOffset(Instr instr) { // Check the instruction is indeed a // ldr<cond> <Rd>, [pc +/- offset_12]. - return (instr & (kLdrPCMask & ~kCondMask)) == 0x051f0000; + return (instr & kLdrPCMask) == kLdrPCPattern; +} + + +bool Assembler::IsVldrDPcImmediateOffset(Instr instr) { + // Check the instruction is indeed a + // vldr<cond> <Dd>, [pc +/- offset_10]. + return (instr & kVldrDPCMask) == kVldrDPCPattern; } @@ -812,7 +849,7 @@ bool Operand::must_output_reloc_info(const Assembler* assembler) const { #endif // def DEBUG if (assembler != NULL && assembler->predictable_code_size()) return true; return Serializer::enabled(); - } else if (rmode_ == RelocInfo::NONE) { + } else if (RelocInfo::IsNone(rmode_)) { return false; } return true; @@ -1512,7 +1549,7 @@ void Assembler::ldrsh(Register dst, const MemOperand& src, Condition cond) { void Assembler::ldrd(Register dst1, Register dst2, const MemOperand& src, Condition cond) { - ASSERT(CpuFeatures::IsEnabled(ARMv7)); + ASSERT(IsEnabled(ARMv7)); ASSERT(src.rm().is(no_reg)); ASSERT(!dst1.is(lr)); // r14. ASSERT_EQ(0, dst1.code() % 2); @@ -1527,7 +1564,7 @@ void Assembler::strd(Register src1, Register src2, ASSERT(!src1.is(lr)); // r14. ASSERT_EQ(0, src1.code() % 2); ASSERT_EQ(src1.code() + 1, src2.code()); - ASSERT(CpuFeatures::IsEnabled(ARMv7)); + ASSERT(IsEnabled(ARMv7)); addrmod3(cond | B7 | B6 | B5 | B4, src1, dst); } @@ -1723,19 +1760,21 @@ void Assembler::vldr(const DwVfpRegister dst, int offset, const Condition cond) { // Ddst = MEM(Rbase + offset). - // Instruction details available in ARM DDI 0406A, A8-628. - // cond(31-28) | 1101(27-24)| U001(23-20) | Rbase(19-16) | - // Vdst(15-12) | 1011(11-8) | offset - ASSERT(CpuFeatures::IsEnabled(VFP2)); + // Instruction details available in ARM DDI 0406C.b, A8-924. + // cond(31-28) | 1101(27-24)| U(23) | D(22) | 01(21-20) | Rbase(19-16) | + // Vd(15-12) | 1011(11-8) | offset + ASSERT(IsEnabled(VFP2)); int u = 1; if (offset < 0) { offset = -offset; u = 0; } + int vd, d; + dst.split_code(&vd, &d); ASSERT(offset >= 0); if ((offset % 4) == 0 && (offset / 4) < 256) { - emit(cond | u*B23 | 0xD1*B20 | base.code()*B16 | dst.code()*B12 | + emit(cond | 0xD*B24 | u*B23 | d*B22 | B20 | base.code()*B16 | vd*B12 | 0xB*B8 | ((offset / 4) & 255)); } else { // Larger offsets must be handled by computing the correct address @@ -1746,7 +1785,7 @@ void Assembler::vldr(const DwVfpRegister dst, } else { sub(ip, base, Operand(offset)); } - emit(cond | 0xD1*B20 | ip.code()*B16 | dst.code()*B12 | 0xB*B8); + emit(cond | 0xD*B24 | d*B22 | B20 | ip.code()*B16 | vd*B12 | 0xB*B8); } } @@ -1768,7 +1807,7 @@ void Assembler::vldr(const SwVfpRegister dst, // Instruction details available in ARM DDI 0406A, A8-628. // cond(31-28) | 1101(27-24)| U001(23-20) | Rbase(19-16) | // Vdst(15-12) | 1010(11-8) | offset - ASSERT(CpuFeatures::IsEnabled(VFP2)); + ASSERT(IsEnabled(VFP2)); int u = 1; if (offset < 0) { offset = -offset; @@ -1809,19 +1848,22 @@ void Assembler::vstr(const DwVfpRegister src, int offset, const Condition cond) { // MEM(Rbase + offset) = Dsrc. - // Instruction details available in ARM DDI 0406A, A8-786. - // cond(31-28) | 1101(27-24)| U000(23-20) | | Rbase(19-16) | - // Vsrc(15-12) | 1011(11-8) | (offset/4) - ASSERT(CpuFeatures::IsEnabled(VFP2)); + // Instruction details available in ARM DDI 0406C.b, A8-1082. + // cond(31-28) | 1101(27-24)| U(23) | D(22) | 00(21-20) | Rbase(19-16) | + // Vd(15-12) | 1011(11-8) | (offset/4) + ASSERT(IsEnabled(VFP2)); int u = 1; if (offset < 0) { offset = -offset; u = 0; } ASSERT(offset >= 0); + int vd, d; + src.split_code(&vd, &d); + if ((offset % 4) == 0 && (offset / 4) < 256) { - emit(cond | u*B23 | 0xD0*B20 | base.code()*B16 | src.code()*B12 | - 0xB*B8 | ((offset / 4) & 255)); + emit(cond | 0xD*B24 | u*B23 | d*B22 | base.code()*B16 | vd*B12 | 0xB*B8 | + ((offset / 4) & 255)); } else { // Larger offsets must be handled by computing the correct address // in the ip register. @@ -1831,7 +1873,7 @@ void Assembler::vstr(const DwVfpRegister src, } else { sub(ip, base, Operand(offset)); } - emit(cond | 0xD0*B20 | ip.code()*B16 | src.code()*B12 | 0xB*B8); + emit(cond | 0xD*B24 | d*B22 | ip.code()*B16 | vd*B12 | 0xB*B8); } } @@ -1853,7 +1895,7 @@ void Assembler::vstr(const SwVfpRegister src, // Instruction details available in ARM DDI 0406A, A8-786. // cond(31-28) | 1101(27-24)| U000(23-20) | Rbase(19-16) | // Vdst(15-12) | 1010(11-8) | (offset/4) - ASSERT(CpuFeatures::IsEnabled(VFP2)); + ASSERT(IsEnabled(VFP2)); int u = 1; if (offset < 0) { offset = -offset; @@ -1893,10 +1935,10 @@ void Assembler::vldm(BlockAddrMode am, DwVfpRegister first, DwVfpRegister last, Condition cond) { - // Instruction details available in ARM DDI 0406A, A8-626. + // Instruction details available in ARM DDI 0406C.b, A8-922. // cond(31-28) | 110(27-25)| PUDW1(24-20) | Rbase(19-16) | - // first(15-12) | 1010(11-8) | (count * 2) - ASSERT(CpuFeatures::IsEnabled(VFP2)); + // first(15-12) | 1011(11-8) | (count * 2) + ASSERT(IsEnabled(VFP2)); ASSERT_LE(first.code(), last.code()); ASSERT(am == ia || am == ia_w || am == db_w); ASSERT(!base.is(pc)); @@ -1915,10 +1957,10 @@ void Assembler::vstm(BlockAddrMode am, DwVfpRegister first, DwVfpRegister last, Condition cond) { - // Instruction details available in ARM DDI 0406A, A8-784. + // Instruction details available in ARM DDI 0406C.b, A8-1080. // cond(31-28) | 110(27-25)| PUDW0(24-20) | Rbase(19-16) | // first(15-12) | 1011(11-8) | (count * 2) - ASSERT(CpuFeatures::IsEnabled(VFP2)); + ASSERT(IsEnabled(VFP2)); ASSERT_LE(first.code(), last.code()); ASSERT(am == ia || am == ia_w || am == db_w); ASSERT(!base.is(pc)); @@ -1939,7 +1981,7 @@ void Assembler::vldm(BlockAddrMode am, // Instruction details available in ARM DDI 0406A, A8-626. // cond(31-28) | 110(27-25)| PUDW1(24-20) | Rbase(19-16) | // first(15-12) | 1010(11-8) | (count/2) - ASSERT(CpuFeatures::IsEnabled(VFP2)); + ASSERT(IsEnabled(VFP2)); ASSERT_LE(first.code(), last.code()); ASSERT(am == ia || am == ia_w || am == db_w); ASSERT(!base.is(pc)); @@ -1960,7 +2002,7 @@ void Assembler::vstm(BlockAddrMode am, // Instruction details available in ARM DDI 0406A, A8-784. // cond(31-28) | 110(27-25)| PUDW0(24-20) | Rbase(19-16) | // first(15-12) | 1011(11-8) | (count/2) - ASSERT(CpuFeatures::IsEnabled(VFP2)); + ASSERT(IsEnabled(VFP2)); ASSERT_LE(first.code(), last.code()); ASSERT(am == ia || am == ia_w || am == db_w); ASSERT(!base.is(pc)); @@ -2033,37 +2075,69 @@ static bool FitsVMOVDoubleImmediate(double d, uint32_t *encoding) { void Assembler::vmov(const DwVfpRegister dst, double imm, - const Register scratch, - const Condition cond) { - // Dd = immediate - // Instruction details available in ARM DDI 0406B, A8-640. - ASSERT(CpuFeatures::IsEnabled(VFP2)); + const Register scratch) { + ASSERT(IsEnabled(VFP2)); uint32_t enc; if (CpuFeatures::IsSupported(VFP3) && FitsVMOVDoubleImmediate(imm, &enc)) { // The double can be encoded in the instruction. - emit(cond | 0xE*B24 | 0xB*B20 | dst.code()*B12 | 0xB*B8 | enc); + // + // Dd = immediate + // Instruction details available in ARM DDI 0406C.b, A8-936. + // cond(31-28) | 11101(27-23) | D(22) | 11(21-20) | imm4H(19-16) | + // Vd(15-12) | 101(11-9) | sz=1(8) | imm4L(3-0) + int vd, d; + dst.split_code(&vd, &d); + emit(al | 0x1D*B23 | d*B22 | 0x3*B20 | vd*B12 | 0x5*B9 | B8 | enc); + } else if (FLAG_enable_vldr_imm) { + // TODO(jfb) Temporarily turned off until we have constant blinding or + // some equivalent mitigation: an attacker can otherwise control + // generated data which also happens to be executable, a Very Bad + // Thing indeed. + // Blinding gets tricky because we don't have xor, we probably + // need to add/subtract without losing precision, which requires a + // cookie value that Lithium is probably better positioned to + // choose. + // We could also add a few peepholes here like detecting 0.0 and + // -0.0 and doing a vmov from the sequestered d14, forcing denorms + // to zero (we set flush-to-zero), and normalizing NaN values. + // We could also detect redundant values. + // The code could also randomize the order of values, though + // that's tricky because vldr has a limited reach. Furthermore + // it breaks load locality. + RecordRelocInfo(imm); + vldr(dst, MemOperand(pc, 0)); } else { - // Synthesise the double from ARM immediates. This could be implemented - // using vldr from a constant pool. + // Synthesise the double from ARM immediates. uint32_t lo, hi; DoubleAsTwoUInt32(imm, &lo, &hi); - mov(ip, Operand(lo)); if (scratch.is(no_reg)) { - // Move the low part of the double into the lower of the corresponsing S - // registers of D register dst. - vmov(dst.low(), ip, cond); - - // Move the high part of the double into the higher of the corresponsing S - // registers of D register dst. - mov(ip, Operand(hi)); - vmov(dst.high(), ip, cond); + if (dst.code() < 16) { + // Move the low part of the double into the lower of the corresponsing S + // registers of D register dst. + mov(ip, Operand(lo)); + vmov(dst.low(), ip); + + // Move the high part of the double into the higher of the + // corresponsing S registers of D register dst. + mov(ip, Operand(hi)); + vmov(dst.high(), ip); + } else { + // D16-D31 does not have S registers, so move the low and high parts + // directly to the D register using vmov.32. + // Note: This may be slower, so we only do this when we have to. + mov(ip, Operand(lo)); + vmov(dst, VmovIndexLo, ip); + mov(ip, Operand(hi)); + vmov(dst, VmovIndexHi, ip); + } } else { // Move the low and high parts of the double to a D register in one // instruction. + mov(ip, Operand(lo)); mov(scratch, Operand(hi)); - vmov(dst, ip, scratch, cond); + vmov(dst, ip, scratch); } } } @@ -2074,7 +2148,7 @@ void Assembler::vmov(const SwVfpRegister dst, const Condition cond) { // Sd = Sm // Instruction details available in ARM DDI 0406B, A8-642. - ASSERT(CpuFeatures::IsEnabled(VFP2)); + ASSERT(IsEnabled(VFP2)); int sd, d, sm, m; dst.split_code(&sd, &d); src.split_code(&sm, &m); @@ -2086,10 +2160,33 @@ void Assembler::vmov(const DwVfpRegister dst, const DwVfpRegister src, const Condition cond) { // Dd = Dm - // Instruction details available in ARM DDI 0406B, A8-642. - ASSERT(CpuFeatures::IsEnabled(VFP2)); - emit(cond | 0xE*B24 | 0xB*B20 | - dst.code()*B12 | 0x5*B9 | B8 | B6 | src.code()); + // Instruction details available in ARM DDI 0406C.b, A8-938. + // cond(31-28) | 11101(27-23) | D(22) | 11(21-20) | 0000(19-16) | Vd(15-12) | + // 101(11-9) | sz=1(8) | 0(7) | 1(6) | M(5) | 0(4) | Vm(3-0) + ASSERT(IsEnabled(VFP2)); + int vd, d; + dst.split_code(&vd, &d); + int vm, m; + src.split_code(&vm, &m); + emit(cond | 0x1D*B23 | d*B22 | 0x3*B20 | vd*B12 | 0x5*B9 | B8 | B6 | m*B5 | + vm); +} + + +void Assembler::vmov(const DwVfpRegister dst, + const VmovIndex index, + const Register src, + const Condition cond) { + // Dd[index] = Rt + // Instruction details available in ARM DDI 0406C.b, A8-940. + // cond(31-28) | 1110(27-24) | 0(23) | opc1=0index(22-21) | 0(20) | + // Vd(19-16) | Rt(15-12) | 1011(11-8) | D(7) | opc2=00(6-5) | 1(4) | 0000(3-0) + ASSERT(IsEnabled(VFP2)); + ASSERT(index.index == 0 || index.index == 1); + int vd, d; + dst.split_code(&vd, &d); + emit(cond | 0xE*B24 | index.index*B21 | vd*B16 | src.code()*B12 | 0xB*B8 | + d*B7 | B4); } @@ -2098,13 +2195,15 @@ void Assembler::vmov(const DwVfpRegister dst, const Register src2, const Condition cond) { // Dm = <Rt,Rt2>. - // Instruction details available in ARM DDI 0406A, A8-646. + // Instruction details available in ARM DDI 0406C.b, A8-948. // cond(31-28) | 1100(27-24)| 010(23-21) | op=0(20) | Rt2(19-16) | // Rt(15-12) | 1011(11-8) | 00(7-6) | M(5) | 1(4) | Vm - ASSERT(CpuFeatures::IsEnabled(VFP2)); + ASSERT(IsEnabled(VFP2)); ASSERT(!src1.is(pc) && !src2.is(pc)); + int vm, m; + dst.split_code(&vm, &m); emit(cond | 0xC*B24 | B22 | src2.code()*B16 | - src1.code()*B12 | 0xB*B8 | B4 | dst.code()); + src1.code()*B12 | 0xB*B8 | m*B5 | B4 | vm); } @@ -2113,13 +2212,15 @@ void Assembler::vmov(const Register dst1, const DwVfpRegister src, const Condition cond) { // <Rt,Rt2> = Dm. - // Instruction details available in ARM DDI 0406A, A8-646. + // Instruction details available in ARM DDI 0406C.b, A8-948. // cond(31-28) | 1100(27-24)| 010(23-21) | op=1(20) | Rt2(19-16) | // Rt(15-12) | 1011(11-8) | 00(7-6) | M(5) | 1(4) | Vm - ASSERT(CpuFeatures::IsEnabled(VFP2)); + ASSERT(IsEnabled(VFP2)); ASSERT(!dst1.is(pc) && !dst2.is(pc)); + int vm, m; + src.split_code(&vm, &m); emit(cond | 0xC*B24 | B22 | B20 | dst2.code()*B16 | - dst1.code()*B12 | 0xB*B8 | B4 | src.code()); + dst1.code()*B12 | 0xB*B8 | m*B5 | B4 | vm); } @@ -2130,7 +2231,7 @@ void Assembler::vmov(const SwVfpRegister dst, // Instruction details available in ARM DDI 0406A, A8-642. // cond(31-28) | 1110(27-24)| 000(23-21) | op=0(20) | Vn(19-16) | // Rt(15-12) | 1010(11-8) | N(7)=0 | 00(6-5) | 1(4) | 0000(3-0) - ASSERT(CpuFeatures::IsEnabled(VFP2)); + ASSERT(IsEnabled(VFP2)); ASSERT(!src.is(pc)); int sn, n; dst.split_code(&sn, &n); @@ -2145,7 +2246,7 @@ void Assembler::vmov(const Register dst, // Instruction details available in ARM DDI 0406A, A8-642. // cond(31-28) | 1110(27-24)| 000(23-21) | op=1(20) | Vn(19-16) | // Rt(15-12) | 1010(11-8) | N(7)=0 | 00(6-5) | 1(4) | 0000(3-0) - ASSERT(CpuFeatures::IsEnabled(VFP2)); + ASSERT(IsEnabled(VFP2)); ASSERT(!dst.is(pc)); int sn, n; src.split_code(&sn, &n); @@ -2270,7 +2371,7 @@ void Assembler::vcvt_f64_s32(const DwVfpRegister dst, const SwVfpRegister src, VFPConversionMode mode, const Condition cond) { - ASSERT(CpuFeatures::IsEnabled(VFP2)); + ASSERT(IsEnabled(VFP2)); emit(EncodeVCVT(F64, dst.code(), S32, src.code(), mode, cond)); } @@ -2279,7 +2380,7 @@ void Assembler::vcvt_f32_s32(const SwVfpRegister dst, const SwVfpRegister src, VFPConversionMode mode, const Condition cond) { - ASSERT(CpuFeatures::IsEnabled(VFP2)); + ASSERT(IsEnabled(VFP2)); emit(EncodeVCVT(F32, dst.code(), S32, src.code(), mode, cond)); } @@ -2288,7 +2389,7 @@ void Assembler::vcvt_f64_u32(const DwVfpRegister dst, const SwVfpRegister src, VFPConversionMode mode, const Condition cond) { - ASSERT(CpuFeatures::IsEnabled(VFP2)); + ASSERT(IsEnabled(VFP2)); emit(EncodeVCVT(F64, dst.code(), U32, src.code(), mode, cond)); } @@ -2297,7 +2398,7 @@ void Assembler::vcvt_s32_f64(const SwVfpRegister dst, const DwVfpRegister src, VFPConversionMode mode, const Condition cond) { - ASSERT(CpuFeatures::IsEnabled(VFP2)); + ASSERT(IsEnabled(VFP2)); emit(EncodeVCVT(S32, dst.code(), F64, src.code(), mode, cond)); } @@ -2306,7 +2407,7 @@ void Assembler::vcvt_u32_f64(const SwVfpRegister dst, const DwVfpRegister src, VFPConversionMode mode, const Condition cond) { - ASSERT(CpuFeatures::IsEnabled(VFP2)); + ASSERT(IsEnabled(VFP2)); emit(EncodeVCVT(U32, dst.code(), F64, src.code(), mode, cond)); } @@ -2315,7 +2416,7 @@ void Assembler::vcvt_f64_f32(const DwVfpRegister dst, const SwVfpRegister src, VFPConversionMode mode, const Condition cond) { - ASSERT(CpuFeatures::IsEnabled(VFP2)); + ASSERT(IsEnabled(VFP2)); emit(EncodeVCVT(F64, dst.code(), F32, src.code(), mode, cond)); } @@ -2324,7 +2425,7 @@ void Assembler::vcvt_f32_f64(const SwVfpRegister dst, const DwVfpRegister src, VFPConversionMode mode, const Condition cond) { - ASSERT(CpuFeatures::IsEnabled(VFP2)); + ASSERT(IsEnabled(VFP2)); emit(EncodeVCVT(F32, dst.code(), F64, src.code(), mode, cond)); } @@ -2332,18 +2433,33 @@ void Assembler::vcvt_f32_f64(const SwVfpRegister dst, void Assembler::vneg(const DwVfpRegister dst, const DwVfpRegister src, const Condition cond) { - ASSERT(CpuFeatures::IsEnabled(VFP2)); - emit(cond | 0xE*B24 | 0xB*B20 | B16 | dst.code()*B12 | - 0x5*B9 | B8 | B6 | src.code()); + // Instruction details available in ARM DDI 0406C.b, A8-968. + // cond(31-28) | 11101(27-23) | D(22) | 11(21-20) | 0001(19-16) | Vd(15-12) | + // 101(11-9) | sz=1(8) | 0(7) | 1(6) | M(5) | 0(4) | Vm(3-0) + ASSERT(IsEnabled(VFP2)); + int vd, d; + dst.split_code(&vd, &d); + int vm, m; + src.split_code(&vm, &m); + + emit(cond | 0x1D*B23 | d*B22 | 0x3*B20 | B16 | vd*B12 | 0x5*B9 | B8 | B6 | + m*B5 | vm); } void Assembler::vabs(const DwVfpRegister dst, const DwVfpRegister src, const Condition cond) { - ASSERT(CpuFeatures::IsEnabled(VFP2)); - emit(cond | 0xE*B24 | 0xB*B20 | dst.code()*B12 | - 0x5*B9 | B8 | 0x3*B6 | src.code()); + // Instruction details available in ARM DDI 0406C.b, A8-524. + // cond(31-28) | 11101(27-23) | D(22) | 11(21-20) | 0000(19-16) | Vd(15-12) | + // 101(11-9) | sz=1(8) | 1(7) | 1(6) | M(5) | 0(4) | Vm(3-0) + ASSERT(IsEnabled(VFP2)); + int vd, d; + dst.split_code(&vd, &d); + int vm, m; + src.split_code(&vm, &m); + emit(cond | 0x1D*B23 | d*B22 | 0x3*B20 | vd*B12 | 0x5*B9 | B8 | B7 | B6 | + m*B5 | vm); } @@ -2353,12 +2469,18 @@ void Assembler::vadd(const DwVfpRegister dst, const Condition cond) { // Dd = vadd(Dn, Dm) double precision floating point addition. // Dd = D:Vd; Dm=M:Vm; Dn=N:Vm. - // Instruction details available in ARM DDI 0406A, A8-536. - // cond(31-28) | 11100(27-23)| D=?(22) | 11(21-20) | Vn(19-16) | - // Vd(15-12) | 101(11-9) | sz(8)=1 | N(7)=0 | 0(6) | M=?(5) | 0(4) | Vm(3-0) - ASSERT(CpuFeatures::IsEnabled(VFP2)); - emit(cond | 0xE*B24 | 0x3*B20 | src1.code()*B16 | - dst.code()*B12 | 0x5*B9 | B8 | src2.code()); + // Instruction details available in ARM DDI 0406C.b, A8-830. + // cond(31-28) | 11100(27-23)| D(22) | 11(21-20) | Vn(19-16) | + // Vd(15-12) | 101(11-9) | sz=1(8) | N(7) | 0(6) | M(5) | 0(4) | Vm(3-0) + ASSERT(IsEnabled(VFP2)); + int vd, d; + dst.split_code(&vd, &d); + int vn, n; + src1.split_code(&vn, &n); + int vm, m; + src2.split_code(&vm, &m); + emit(cond | 0x1C*B23 | d*B22 | 0x3*B20 | vn*B16 | vd*B12 | 0x5*B9 | B8 | + n*B7 | m*B5 | vm); } @@ -2368,12 +2490,18 @@ void Assembler::vsub(const DwVfpRegister dst, const Condition cond) { // Dd = vsub(Dn, Dm) double precision floating point subtraction. // Dd = D:Vd; Dm=M:Vm; Dn=N:Vm. - // Instruction details available in ARM DDI 0406A, A8-784. - // cond(31-28) | 11100(27-23)| D=?(22) | 11(21-20) | Vn(19-16) | - // Vd(15-12) | 101(11-9) | sz(8)=1 | N(7)=0 | 1(6) | M=?(5) | 0(4) | Vm(3-0) - ASSERT(CpuFeatures::IsEnabled(VFP2)); - emit(cond | 0xE*B24 | 0x3*B20 | src1.code()*B16 | - dst.code()*B12 | 0x5*B9 | B8 | B6 | src2.code()); + // Instruction details available in ARM DDI 0406C.b, A8-1086. + // cond(31-28) | 11100(27-23)| D(22) | 11(21-20) | Vn(19-16) | + // Vd(15-12) | 101(11-9) | sz=1(8) | N(7) | 1(6) | M(5) | 0(4) | Vm(3-0) + ASSERT(IsEnabled(VFP2)); + int vd, d; + dst.split_code(&vd, &d); + int vn, n; + src1.split_code(&vn, &n); + int vm, m; + src2.split_code(&vm, &m); + emit(cond | 0x1C*B23 | d*B22 | 0x3*B20 | vn*B16 | vd*B12 | 0x5*B9 | B8 | + n*B7 | B6 | m*B5 | vm); } @@ -2383,12 +2511,54 @@ void Assembler::vmul(const DwVfpRegister dst, const Condition cond) { // Dd = vmul(Dn, Dm) double precision floating point multiplication. // Dd = D:Vd; Dm=M:Vm; Dn=N:Vm. - // Instruction details available in ARM DDI 0406A, A8-784. - // cond(31-28) | 11100(27-23)| D=?(22) | 10(21-20) | Vn(19-16) | - // Vd(15-12) | 101(11-9) | sz(8)=1 | N(7)=0 | 0(6) | M=?(5) | 0(4) | Vm(3-0) - ASSERT(CpuFeatures::IsEnabled(VFP2)); - emit(cond | 0xE*B24 | 0x2*B20 | src1.code()*B16 | - dst.code()*B12 | 0x5*B9 | B8 | src2.code()); + // Instruction details available in ARM DDI 0406C.b, A8-960. + // cond(31-28) | 11100(27-23)| D(22) | 10(21-20) | Vn(19-16) | + // Vd(15-12) | 101(11-9) | sz=1(8) | N(7) | 0(6) | M(5) | 0(4) | Vm(3-0) + ASSERT(IsEnabled(VFP2)); + int vd, d; + dst.split_code(&vd, &d); + int vn, n; + src1.split_code(&vn, &n); + int vm, m; + src2.split_code(&vm, &m); + emit(cond | 0x1C*B23 | d*B22 | 0x2*B20 | vn*B16 | vd*B12 | 0x5*B9 | B8 | + n*B7 | m*B5 | vm); +} + + +void Assembler::vmla(const DwVfpRegister dst, + const DwVfpRegister src1, + const DwVfpRegister src2, + const Condition cond) { + // Instruction details available in ARM DDI 0406C.b, A8-932. + // cond(31-28) | 11100(27-23) | D(22) | 00(21-20) | Vn(19-16) | + // Vd(15-12) | 101(11-9) | sz=1(8) | N(7) | op=0(6) | M(5) | 0(4) | Vm(3-0) + int vd, d; + dst.split_code(&vd, &d); + int vn, n; + src1.split_code(&vn, &n); + int vm, m; + src2.split_code(&vm, &m); + emit(cond | 0x1C*B23 | d*B22 | vn*B16 | vd*B12 | 0x5*B9 | B8 | n*B7 | m*B5 | + vm); +} + + +void Assembler::vmls(const DwVfpRegister dst, + const DwVfpRegister src1, + const DwVfpRegister src2, + const Condition cond) { + // Instruction details available in ARM DDI 0406C.b, A8-932. + // cond(31-28) | 11100(27-23) | D(22) | 00(21-20) | Vn(19-16) | + // Vd(15-12) | 101(11-9) | sz=1(8) | N(7) | op=1(6) | M(5) | 0(4) | Vm(3-0) + int vd, d; + dst.split_code(&vd, &d); + int vn, n; + src1.split_code(&vn, &n); + int vm, m; + src2.split_code(&vm, &m); + emit(cond | 0x1C*B23 | d*B22 | vn*B16 | vd*B12 | 0x5*B9 | B8 | n*B7 | B6 | + m*B5 | vm); } @@ -2398,12 +2568,18 @@ void Assembler::vdiv(const DwVfpRegister dst, const Condition cond) { // Dd = vdiv(Dn, Dm) double precision floating point division. // Dd = D:Vd; Dm=M:Vm; Dn=N:Vm. - // Instruction details available in ARM DDI 0406A, A8-584. - // cond(31-28) | 11101(27-23)| D=?(22) | 00(21-20) | Vn(19-16) | - // Vd(15-12) | 101(11-9) | sz(8)=1 | N(7)=? | 0(6) | M=?(5) | 0(4) | Vm(3-0) - ASSERT(CpuFeatures::IsEnabled(VFP2)); - emit(cond | 0xE*B24 | B23 | src1.code()*B16 | - dst.code()*B12 | 0x5*B9 | B8 | src2.code()); + // Instruction details available in ARM DDI 0406C.b, A8-882. + // cond(31-28) | 11101(27-23)| D(22) | 00(21-20) | Vn(19-16) | + // Vd(15-12) | 101(11-9) | sz=1(8) | N(7) | 0(6) | M(5) | 0(4) | Vm(3-0) + ASSERT(IsEnabled(VFP2)); + int vd, d; + dst.split_code(&vd, &d); + int vn, n; + src1.split_code(&vn, &n); + int vm, m; + src2.split_code(&vm, &m); + emit(cond | 0x1D*B23 | d*B22 | vn*B16 | vd*B12 | 0x5*B9 | B8 | n*B7 | m*B5 | + vm); } @@ -2411,26 +2587,31 @@ void Assembler::vcmp(const DwVfpRegister src1, const DwVfpRegister src2, const Condition cond) { // vcmp(Dd, Dm) double precision floating point comparison. - // Instruction details available in ARM DDI 0406A, A8-570. - // cond(31-28) | 11101 (27-23)| D=?(22) | 11 (21-20) | 0100 (19-16) | - // Vd(15-12) | 101(11-9) | sz(8)=1 | E(7)=0 | 1(6) | M(5)=? | 0(4) | Vm(3-0) - ASSERT(CpuFeatures::IsEnabled(VFP2)); - emit(cond | 0xE*B24 |B23 | 0x3*B20 | B18 | - src1.code()*B12 | 0x5*B9 | B8 | B6 | src2.code()); + // Instruction details available in ARM DDI 0406C.b, A8-864. + // cond(31-28) | 11101(27-23)| D(22) | 11(21-20) | 0100(19-16) | + // Vd(15-12) | 101(11-9) | sz=1(8) | E=0(7) | 1(6) | M(5) | 0(4) | Vm(3-0) + ASSERT(IsEnabled(VFP2)); + int vd, d; + src1.split_code(&vd, &d); + int vm, m; + src2.split_code(&vm, &m); + emit(cond | 0x1D*B23 | d*B22 | 0x3*B20 | 0x4*B16 | vd*B12 | 0x5*B9 | B8 | B6 | + m*B5 | vm); } void Assembler::vcmp(const DwVfpRegister src1, const double src2, const Condition cond) { - // vcmp(Dd, Dm) double precision floating point comparison. - // Instruction details available in ARM DDI 0406A, A8-570. - // cond(31-28) | 11101 (27-23)| D=?(22) | 11 (21-20) | 0101 (19-16) | - // Vd(15-12) | 101(11-9) | sz(8)=1 | E(7)=0 | 1(6) | M(5)=? | 0(4) | 0000(3-0) - ASSERT(CpuFeatures::IsEnabled(VFP2)); + // vcmp(Dd, #0.0) double precision floating point comparison. + // Instruction details available in ARM DDI 0406C.b, A8-864. + // cond(31-28) | 11101(27-23)| D(22) | 11(21-20) | 0101(19-16) | + // Vd(15-12) | 101(11-9) | sz=1(8) | E=0(7) | 1(6) | 0(5) | 0(4) | 0000(3-0) + ASSERT(IsEnabled(VFP2)); ASSERT(src2 == 0.0); - emit(cond | 0xE*B24 |B23 | 0x3*B20 | B18 | B16 | - src1.code()*B12 | 0x5*B9 | B8 | B6); + int vd, d; + src1.split_code(&vd, &d); + emit(cond | 0x1D*B23 | d*B22 | 0x3*B20 | 0x5*B16 | vd*B12 | 0x5*B9 | B8 | B6); } @@ -2438,7 +2619,7 @@ void Assembler::vmsr(Register dst, Condition cond) { // Instruction details available in ARM DDI 0406A, A8-652. // cond(31-28) | 1110 (27-24) | 1110(23-20)| 0001 (19-16) | // Rt(15-12) | 1010 (11-8) | 0(7) | 00 (6-5) | 1(4) | 0000(3-0) - ASSERT(CpuFeatures::IsEnabled(VFP2)); + ASSERT(IsEnabled(VFP2)); emit(cond | 0xE*B24 | 0xE*B20 | B16 | dst.code()*B12 | 0xA*B8 | B4); } @@ -2448,7 +2629,7 @@ void Assembler::vmrs(Register dst, Condition cond) { // Instruction details available in ARM DDI 0406A, A8-652. // cond(31-28) | 1110 (27-24) | 1111(23-20)| 0001 (19-16) | // Rt(15-12) | 1010 (11-8) | 0(7) | 00 (6-5) | 1(4) | 0000(3-0) - ASSERT(CpuFeatures::IsEnabled(VFP2)); + ASSERT(IsEnabled(VFP2)); emit(cond | 0xE*B24 | 0xF*B20 | B16 | dst.code()*B12 | 0xA*B8 | B4); } @@ -2457,11 +2638,16 @@ void Assembler::vmrs(Register dst, Condition cond) { void Assembler::vsqrt(const DwVfpRegister dst, const DwVfpRegister src, const Condition cond) { - // cond(31-28) | 11101 (27-23)| D=?(22) | 11 (21-20) | 0001 (19-16) | - // Vd(15-12) | 101(11-9) | sz(8)=1 | 11 (7-6) | M(5)=? | 0(4) | Vm(3-0) - ASSERT(CpuFeatures::IsEnabled(VFP2)); - emit(cond | 0xE*B24 | B23 | 0x3*B20 | B16 | - dst.code()*B12 | 0x5*B9 | B8 | 3*B6 | src.code()); + // Instruction details available in ARM DDI 0406C.b, A8-1058. + // cond(31-28) | 11101(27-23)| D(22) | 11(21-20) | 0001(19-16) | + // Vd(15-12) | 101(11-9) | sz=1(8) | 11(7-6) | M(5) | 0(4) | Vm(3-0) + ASSERT(IsEnabled(VFP2)); + int vd, d; + dst.split_code(&vd, &d); + int vm, m; + src.split_code(&vm, &m); + emit(cond | 0x1D*B23 | d*B22 | 0x3*B20 | B16 | vd*B12 | 0x5*B9 | B8 | 0x3*B6 | + m*B5 | vm); } @@ -2594,6 +2780,7 @@ void Assembler::db(uint8_t data) { // to write pure data with no pointers and the constant pool should // be emitted before using db. ASSERT(num_pending_reloc_info_ == 0); + ASSERT(num_pending_64_bit_reloc_info_ == 0); CheckBuffer(); *reinterpret_cast<uint8_t*>(pc_) = data; pc_ += sizeof(uint8_t); @@ -2605,6 +2792,7 @@ void Assembler::dd(uint32_t data) { // to write pure data with no pointers and the constant pool should // be emitted before using dd. ASSERT(num_pending_reloc_info_ == 0); + ASSERT(num_pending_64_bit_reloc_info_ == 0); CheckBuffer(); *reinterpret_cast<uint32_t*>(pc_) = data; pc_ += sizeof(uint32_t); @@ -2628,16 +2816,9 @@ void Assembler::RecordRelocInfo(RelocInfo::Mode rmode, intptr_t data, || mode == DONT_USE_CONSTANT_POOL); // These modes do not need an entry in the constant pool. } else { - ASSERT(num_pending_reloc_info_ < kMaxNumPendingRelocInfo); - if (num_pending_reloc_info_ == 0) { - first_const_pool_use_ = pc_offset(); - } - pending_reloc_info_[num_pending_reloc_info_++] = rinfo; - // Make sure the constant pool is not emitted in place of the next - // instruction for which we just recorded relocation info. - BlockConstPoolFor(1); + RecordRelocInfoConstantPoolEntryHelper(rinfo); } - if (rinfo.rmode() != RelocInfo::NONE) { + if (!RelocInfo::IsNone(rinfo.rmode())) { // Don't record external references unless the heap will be serialized. if (rmode == RelocInfo::EXTERNAL_REFERENCE) { #ifdef DEBUG @@ -2663,14 +2844,38 @@ void Assembler::RecordRelocInfo(RelocInfo::Mode rmode, intptr_t data, } } +void Assembler::RecordRelocInfo(double data) { + // We do not try to reuse pool constants. + RelocInfo rinfo(pc_, data); + RecordRelocInfoConstantPoolEntryHelper(rinfo); +} + + +void Assembler::RecordRelocInfoConstantPoolEntryHelper(const RelocInfo& rinfo) { + ASSERT(num_pending_reloc_info_ < kMaxNumPendingRelocInfo); + if (num_pending_reloc_info_ == 0) { + first_const_pool_use_ = pc_offset(); + } + pending_reloc_info_[num_pending_reloc_info_++] = rinfo; + if (rinfo.rmode() == RelocInfo::NONE64) { + ++num_pending_64_bit_reloc_info_; + } + ASSERT(num_pending_64_bit_reloc_info_ <= num_pending_reloc_info_); + // Make sure the constant pool is not emitted in place of the next + // instruction for which we just recorded relocation info. + BlockConstPoolFor(1); +} + void Assembler::BlockConstPoolFor(int instructions) { int pc_limit = pc_offset() + instructions * kInstrSize; if (no_const_pool_before_ < pc_limit) { // If there are some pending entries, the constant pool cannot be blocked - // further than first_const_pool_use_ + kMaxDistToPool + // further than constant pool instruction's reach. ASSERT((num_pending_reloc_info_ == 0) || - (pc_limit < (first_const_pool_use_ + kMaxDistToPool))); + (pc_limit - first_const_pool_use_ < kMaxDistToIntPool)); + // TODO(jfb) Also check 64-bit entries are in range (requires splitting + // them up from 32-bit entries). no_const_pool_before_ = pc_limit; } @@ -2692,29 +2897,60 @@ void Assembler::CheckConstPool(bool force_emit, bool require_jump) { // There is nothing to do if there are no pending constant pool entries. if (num_pending_reloc_info_ == 0) { + ASSERT(num_pending_64_bit_reloc_info_ == 0); // Calculate the offset of the next check. next_buffer_check_ = pc_offset() + kCheckPoolInterval; return; } - // We emit a constant pool when: - // * requested to do so by parameter force_emit (e.g. after each function). - // * the distance to the first instruction accessing the constant pool is - // kAvgDistToPool or more. - // * no jump is required and the distance to the first instruction accessing - // the constant pool is at least kMaxDistToPool / 2. - ASSERT(first_const_pool_use_ >= 0); - int dist = pc_offset() - first_const_pool_use_; - if (!force_emit && dist < kAvgDistToPool && - (require_jump || (dist < (kMaxDistToPool / 2)))) { - return; - } - // Check that the code buffer is large enough before emitting the constant // pool (include the jump over the pool and the constant pool marker and // the gap to the relocation information). + // Note 64-bit values are wider, and the first one needs to be 64-bit aligned. int jump_instr = require_jump ? kInstrSize : 0; - int size = jump_instr + kInstrSize + num_pending_reloc_info_ * kPointerSize; + int size_up_to_marker = jump_instr + kInstrSize; + int size_after_marker = num_pending_reloc_info_ * kPointerSize; + bool has_fp_values = (num_pending_64_bit_reloc_info_ > 0); + // 64-bit values must be 64-bit aligned. + // We'll start emitting at PC: branch+marker, then 32-bit values, then + // 64-bit values which might need to be aligned. + bool require_64_bit_align = has_fp_values && + (((uintptr_t)pc_ + size_up_to_marker + size_after_marker) & 0x3); + if (require_64_bit_align) { + size_after_marker += kInstrSize; + } + // num_pending_reloc_info_ also contains 64-bit entries, the above code + // therefore already counted half of the size for 64-bit entries. Add the + // remaining size. + STATIC_ASSERT(kPointerSize == kDoubleSize / 2); + size_after_marker += num_pending_64_bit_reloc_info_ * (kDoubleSize / 2); + + int size = size_up_to_marker + size_after_marker; + + // We emit a constant pool when: + // * requested to do so by parameter force_emit (e.g. after each function). + // * the distance from the first instruction accessing the constant pool to + // any of the constant pool entries will exceed its limit the next + // time the pool is checked. This is overly restrictive, but we don't emit + // constant pool entries in-order so it's conservatively correct. + // * the instruction doesn't require a jump after itself to jump over the + // constant pool, and we're getting close to running out of range. + if (!force_emit) { + ASSERT((first_const_pool_use_ >= 0) && (num_pending_reloc_info_ > 0)); + int dist = pc_offset() + size - first_const_pool_use_; + if (has_fp_values) { + if ((dist < kMaxDistToFPPool - kCheckPoolInterval) && + (require_jump || (dist < kMaxDistToFPPool / 2))) { + return; + } + } else { + if ((dist < kMaxDistToIntPool - kCheckPoolInterval) && + (require_jump || (dist < kMaxDistToIntPool / 2))) { + return; + } + } + } + int needed_space = size + kGap; while (buffer_space() <= needed_space) GrowBuffer(); @@ -2730,11 +2966,44 @@ void Assembler::CheckConstPool(bool force_emit, bool require_jump) { b(&after_pool); } - // Put down constant pool marker "Undefined instruction" as specified by - // A5.6 (ARMv7) Instruction set encoding. - emit(kConstantPoolMarker | num_pending_reloc_info_); + // Put down constant pool marker "Undefined instruction". + // The data size helps disassembly know what to print. + emit(kConstantPoolMarker | EncodeConstantPoolLength(size_after_marker)); + + if (require_64_bit_align) { + emit(kConstantPoolMarker); + } + + // Emit 64-bit constant pool entries first: their range is smaller than + // 32-bit entries. + for (int i = 0; i < num_pending_reloc_info_; i++) { + RelocInfo& rinfo = pending_reloc_info_[i]; + + if (rinfo.rmode() != RelocInfo::NONE64) { + // 32-bit values emitted later. + continue; + } + + ASSERT(!((uintptr_t)pc_ & 0x3)); // Check 64-bit alignment. + + Instr instr = instr_at(rinfo.pc()); + // Instruction to patch must be 'vldr rd, [pc, #offset]' with offset == 0. + ASSERT((IsVldrDPcImmediateOffset(instr) && + GetVldrDRegisterImmediateOffset(instr) == 0)); + + int delta = pc_ - rinfo.pc() - kPcLoadDelta; + ASSERT(is_uint10(delta)); - // Emit constant pool entries. + instr_at_put(rinfo.pc(), SetVldrDRegisterImmediateOffset(instr, delta)); + + const double double_data = rinfo.data64(); + uint64_t uint_data = 0; + memcpy(&uint_data, &double_data, sizeof(double_data)); + emit(uint_data & 0xFFFFFFFF); + emit(uint_data >> 32); + } + + // Emit 32-bit constant pool entries. for (int i = 0; i < num_pending_reloc_info_; i++) { RelocInfo& rinfo = pending_reloc_info_[i]; ASSERT(rinfo.rmode() != RelocInfo::COMMENT && @@ -2742,25 +3011,35 @@ void Assembler::CheckConstPool(bool force_emit, bool require_jump) { rinfo.rmode() != RelocInfo::STATEMENT_POSITION && rinfo.rmode() != RelocInfo::CONST_POOL); + if (rinfo.rmode() == RelocInfo::NONE64) { + // 64-bit values emitted earlier. + continue; + } + Instr instr = instr_at(rinfo.pc()); - // Instruction to patch must be 'ldr rd, [pc, #offset]' with offset == 0. + + // 64-bit loads shouldn't get here. + ASSERT(!IsVldrDPcImmediateOffset(instr)); + + int delta = pc_ - rinfo.pc() - kPcLoadDelta; + // 0 is the smallest delta: + // ldr rd, [pc, #0] + // constant pool marker + // data + if (IsLdrPcImmediateOffset(instr) && GetLdrRegisterImmediateOffset(instr) == 0) { - int delta = pc_ - rinfo.pc() - kPcLoadDelta; - // 0 is the smallest delta: - // ldr rd, [pc, #0] - // constant pool marker - // data ASSERT(is_uint12(delta)); - instr_at_put(rinfo.pc(), SetLdrRegisterImmediateOffset(instr, delta)); + emit(rinfo.data()); } else { ASSERT(IsMovW(instr)); + emit(rinfo.data()); } - emit(rinfo.data()); } num_pending_reloc_info_ = 0; + num_pending_64_bit_reloc_info_ = 0; first_const_pool_use_ = -1; RecordComment("]"); diff --git a/deps/v8/src/arm/assembler-arm.h b/deps/v8/src/arm/assembler-arm.h index dfcce60114..045638e124 100644 --- a/deps/v8/src/arm/assembler-arm.h +++ b/deps/v8/src/arm/assembler-arm.h @@ -47,6 +47,50 @@ namespace v8 { namespace internal { +// CpuFeatures keeps track of which features are supported by the target CPU. +// Supported features must be enabled by a CpuFeatureScope before use. +class CpuFeatures : public AllStatic { + public: + // Detect features of the target CPU. Set safe defaults if the serializer + // is enabled (snapshots must be portable). + static void Probe(); + + // Check whether a feature is supported by the target CPU. + static bool IsSupported(CpuFeature f) { + ASSERT(initialized_); + if (f == VFP3 && !FLAG_enable_vfp3) return false; + if (f == VFP2 && !FLAG_enable_vfp2) return false; + if (f == SUDIV && !FLAG_enable_sudiv) return false; + if (f == UNALIGNED_ACCESSES && !FLAG_enable_unaligned_accesses) { + return false; + } + if (f == VFP32DREGS && !FLAG_enable_32dregs) return false; + return (supported_ & (1u << f)) != 0; + } + + static bool IsFoundByRuntimeProbingOnly(CpuFeature f) { + ASSERT(initialized_); + return (found_by_runtime_probing_only_ & + (static_cast<uint64_t>(1) << f)) != 0; + } + + static bool IsSafeForSnapshot(CpuFeature f) { + return (IsSupported(f) && + (!Serializer::enabled() || !IsFoundByRuntimeProbingOnly(f))); + } + + private: +#ifdef DEBUG + static bool initialized_; +#endif + static unsigned supported_; + static unsigned found_by_runtime_probing_only_; + + friend class ExternalReference; + DISALLOW_COPY_AND_ASSIGN(CpuFeatures); +}; + + // CPU Registers. // // 1) We would prefer to use an enum, but enum values are assignment- @@ -71,21 +115,24 @@ namespace internal { // Core register struct Register { static const int kNumRegisters = 16; - static const int kNumAllocatableRegisters = 8; + static const int kMaxNumAllocatableRegisters = 8; static const int kSizeInBytes = 4; + static const int kGPRsPerNonVFP2Double = 2; + + inline static int NumAllocatableRegisters(); static int ToAllocationIndex(Register reg) { - ASSERT(reg.code() < kNumAllocatableRegisters); + ASSERT(reg.code() < kMaxNumAllocatableRegisters); return reg.code(); } static Register FromAllocationIndex(int index) { - ASSERT(index >= 0 && index < kNumAllocatableRegisters); + ASSERT(index >= 0 && index < kMaxNumAllocatableRegisters); return from_code(index); } static const char* AllocationIndexToString(int index) { - ASSERT(index >= 0 && index < kNumAllocatableRegisters); + ASSERT(index >= 0 && index < kMaxNumAllocatableRegisters); const char* const names[] = { "r0", "r1", @@ -165,7 +212,6 @@ const Register sp = { kRegister_sp_Code }; const Register lr = { kRegister_lr_Code }; const Register pc = { kRegister_pc_Code }; - // Single word VFP register. struct SwVfpRegister { bool is_valid() const { return 0 <= code_ && code_ < 32; } @@ -190,52 +236,36 @@ struct SwVfpRegister { // Double word VFP register. struct DwVfpRegister { - static const int kNumRegisters = 16; + static const int kMaxNumRegisters = 32; // A few double registers are reserved: one as a scratch register and one to // hold 0.0, that does not fit in the immediate field of vmov instructions. // d14: 0.0 // d15: scratch register. static const int kNumReservedRegisters = 2; - static const int kNumAllocatableRegisters = kNumRegisters - + static const int kMaxNumAllocatableRegisters = kMaxNumRegisters - kNumReservedRegisters; - inline static int ToAllocationIndex(DwVfpRegister reg); + // Note: the number of registers can be different at snapshot and run-time. + // Any code included in the snapshot must be able to run both with 16 or 32 + // registers. + inline static int NumRegisters(); + inline static int NumAllocatableRegisters(); - static DwVfpRegister FromAllocationIndex(int index) { - ASSERT(index >= 0 && index < kNumAllocatableRegisters); - return from_code(index); - } - - static const char* AllocationIndexToString(int index) { - ASSERT(index >= 0 && index < kNumAllocatableRegisters); - const char* const names[] = { - "d0", - "d1", - "d2", - "d3", - "d4", - "d5", - "d6", - "d7", - "d8", - "d9", - "d10", - "d11", - "d12", - "d13" - }; - return names[index]; - } + inline static int ToAllocationIndex(DwVfpRegister reg); + static const char* AllocationIndexToString(int index); + inline static DwVfpRegister FromAllocationIndex(int index); static DwVfpRegister from_code(int code) { DwVfpRegister r = { code }; return r; } - // Supporting d0 to d15, can be later extended to d31. - bool is_valid() const { return 0 <= code_ && code_ < 16; } + bool is_valid() const { + return 0 <= code_ && code_ < kMaxNumRegisters; + } bool is(DwVfpRegister reg) const { return code_ == reg.code_; } SwVfpRegister low() const { + ASSERT(code_ < 16); SwVfpRegister reg; reg.code_ = code_ * 2; @@ -243,6 +273,7 @@ struct DwVfpRegister { return reg; } SwVfpRegister high() const { + ASSERT(code_ < 16); SwVfpRegister reg; reg.code_ = (code_ * 2) + 1; @@ -322,6 +353,25 @@ const DwVfpRegister d12 = { 12 }; const DwVfpRegister d13 = { 13 }; const DwVfpRegister d14 = { 14 }; const DwVfpRegister d15 = { 15 }; +const DwVfpRegister d16 = { 16 }; +const DwVfpRegister d17 = { 17 }; +const DwVfpRegister d18 = { 18 }; +const DwVfpRegister d19 = { 19 }; +const DwVfpRegister d20 = { 20 }; +const DwVfpRegister d21 = { 21 }; +const DwVfpRegister d22 = { 22 }; +const DwVfpRegister d23 = { 23 }; +const DwVfpRegister d24 = { 24 }; +const DwVfpRegister d25 = { 25 }; +const DwVfpRegister d26 = { 26 }; +const DwVfpRegister d27 = { 27 }; +const DwVfpRegister d28 = { 28 }; +const DwVfpRegister d29 = { 29 }; +const DwVfpRegister d30 = { 30 }; +const DwVfpRegister d31 = { 31 }; + +const Register sfpd_lo = { kRegister_r6_Code }; +const Register sfpd_hi = { kRegister_r7_Code }; // Aliases for double registers. Defined using #define instead of // "static const DwVfpRegister&" because Clang complains otherwise when a @@ -399,7 +449,7 @@ class Operand BASE_EMBEDDED { public: // immediate INLINE(explicit Operand(int32_t immediate, - RelocInfo::Mode rmode = RelocInfo::NONE)); + RelocInfo::Mode rmode = RelocInfo::NONE32)); INLINE(static Operand Zero()) { return Operand(static_cast<int32_t>(0)); } @@ -498,114 +548,6 @@ class MemOperand BASE_EMBEDDED { friend class Assembler; }; -// CpuFeatures keeps track of which features are supported by the target CPU. -// Supported features must be enabled by a Scope before use. -class CpuFeatures : public AllStatic { - public: - // Detect features of the target CPU. Set safe defaults if the serializer - // is enabled (snapshots must be portable). - static void Probe(); - - // Check whether a feature is supported by the target CPU. - static bool IsSupported(CpuFeature f) { - ASSERT(initialized_); - if (f == VFP3 && !FLAG_enable_vfp3) return false; - if (f == VFP2 && !FLAG_enable_vfp2) return false; - if (f == SUDIV && !FLAG_enable_sudiv) return false; - if (f == UNALIGNED_ACCESSES && !FLAG_enable_unaligned_accesses) { - return false; - } - return (supported_ & (1u << f)) != 0; - } - -#ifdef DEBUG - // Check whether a feature is currently enabled. - static bool IsEnabled(CpuFeature f) { - ASSERT(initialized_); - Isolate* isolate = Isolate::UncheckedCurrent(); - if (isolate == NULL) { - // When no isolate is available, work as if we're running in - // release mode. - return IsSupported(f); - } - unsigned enabled = static_cast<unsigned>(isolate->enabled_cpu_features()); - return (enabled & (1u << f)) != 0; - } -#endif - - // Enable a specified feature within a scope. - class Scope BASE_EMBEDDED { -#ifdef DEBUG - - public: - explicit Scope(CpuFeature f) { - unsigned mask = 1u << f; - // VFP2 and ARMv7 are implied by VFP3. - if (f == VFP3) mask |= 1u << VFP2 | 1u << ARMv7; - ASSERT(CpuFeatures::IsSupported(f)); - ASSERT(!Serializer::enabled() || - (CpuFeatures::found_by_runtime_probing_ & mask) == 0); - isolate_ = Isolate::UncheckedCurrent(); - old_enabled_ = 0; - if (isolate_ != NULL) { - old_enabled_ = static_cast<unsigned>(isolate_->enabled_cpu_features()); - isolate_->set_enabled_cpu_features(old_enabled_ | mask); - } - } - ~Scope() { - ASSERT_EQ(Isolate::UncheckedCurrent(), isolate_); - if (isolate_ != NULL) { - isolate_->set_enabled_cpu_features(old_enabled_); - } - } - - private: - Isolate* isolate_; - unsigned old_enabled_; -#else - - public: - explicit Scope(CpuFeature f) {} -#endif - }; - - class TryForceFeatureScope BASE_EMBEDDED { - public: - explicit TryForceFeatureScope(CpuFeature f) - : old_supported_(CpuFeatures::supported_) { - if (CanForce()) { - CpuFeatures::supported_ |= (1u << f); - } - } - - ~TryForceFeatureScope() { - if (CanForce()) { - CpuFeatures::supported_ = old_supported_; - } - } - - private: - static bool CanForce() { - // It's only safe to temporarily force support of CPU features - // when there's only a single isolate, which is guaranteed when - // the serializer is enabled. - return Serializer::enabled(); - } - - const unsigned old_supported_; - }; - - private: -#ifdef DEBUG - static bool initialized_; -#endif - static unsigned supported_; - static unsigned found_by_runtime_probing_; - - DISALLOW_COPY_AND_ASSIGN(CpuFeatures); -}; - - extern const Instr kMovLrPc; extern const Instr kLdrPCMask; extern const Instr kLdrPCPattern; @@ -629,7 +571,11 @@ extern const Instr kCmpCmnFlip; extern const Instr kAddSubFlip; extern const Instr kAndBicFlip; - +struct VmovIndex { + unsigned char index; +}; +const VmovIndex VmovIndexLo = { 0 }; +const VmovIndex VmovIndexHi = { 1 }; class Assembler : public AssemblerBase { public: @@ -647,15 +593,7 @@ class Assembler : public AssemblerBase { // is too small, a fatal error occurs. No deallocation of the buffer is done // upon destruction of the assembler. Assembler(Isolate* isolate, void* buffer, int buffer_size); - ~Assembler(); - - // Overrides the default provided by FLAG_debug_code. - void set_emit_debug_code(bool value) { emit_debug_code_ = value; } - - // Avoids using instructions that vary in size in unpredictable ways between - // the snapshot and the running VM. This is needed by the full compiler so - // that it can recompile code with debug support and fix the PC. - void set_predictable_code_size(bool value) { predictable_code_size_ = value; } + virtual ~Assembler(); // GetCode emits any pending (non-emitted) code and fills the descriptor // desc. GetCode() is idempotent; it returns the same result if no other @@ -1002,10 +940,7 @@ class Assembler : public AssemblerBase { LFlag l = Short); // v5 and above // Support for VFP. - // All these APIs support S0 to S31 and D0 to D15. - // Currently these APIs do not support extended D registers, i.e, D16 to D31. - // However, some simple modifications can allow - // these APIs to support D16 to D31. + // All these APIs support S0 to S31 and D0 to D31. void vldr(const DwVfpRegister dst, const Register base, @@ -1065,8 +1000,7 @@ class Assembler : public AssemblerBase { void vmov(const DwVfpRegister dst, double imm, - const Register scratch = no_reg, - const Condition cond = al); + const Register scratch = no_reg); void vmov(const SwVfpRegister dst, const SwVfpRegister src, const Condition cond = al); @@ -1074,6 +1008,10 @@ class Assembler : public AssemblerBase { const DwVfpRegister src, const Condition cond = al); void vmov(const DwVfpRegister dst, + const VmovIndex index, + const Register src, + const Condition cond = al); + void vmov(const DwVfpRegister dst, const Register src1, const Register src2, const Condition cond = al); @@ -1134,6 +1072,14 @@ class Assembler : public AssemblerBase { const DwVfpRegister src1, const DwVfpRegister src2, const Condition cond = al); + void vmla(const DwVfpRegister dst, + const DwVfpRegister src1, + const DwVfpRegister src2, + const Condition cond = al); + void vmls(const DwVfpRegister dst, + const DwVfpRegister src1, + const DwVfpRegister src2, + const Condition cond = al); void vdiv(const DwVfpRegister dst, const DwVfpRegister src1, const DwVfpRegister src2, @@ -1185,8 +1131,6 @@ class Assembler : public AssemblerBase { // Jump unconditionally to given label. void jmp(Label* L) { b(L, al); } - bool predictable_code_size() const { return predictable_code_size_; } - static bool use_immediate_embedded_pointer_loads( const Assembler* assembler) { #ifdef USE_BLX @@ -1282,8 +1226,6 @@ class Assembler : public AssemblerBase { void db(uint8_t data); void dd(uint32_t data); - int pc_offset() const { return pc_ - buffer_; } - PositionsRecorder* positions_recorder() { return &positions_recorder_; } // Read/patch instructions @@ -1299,8 +1241,11 @@ class Assembler : public AssemblerBase { static bool IsBranch(Instr instr); static int GetBranchOffset(Instr instr); static bool IsLdrRegisterImmediate(Instr instr); + static bool IsVldrDRegisterImmediate(Instr instr); static int GetLdrRegisterImmediateOffset(Instr instr); + static int GetVldrDRegisterImmediateOffset(Instr instr); static Instr SetLdrRegisterImmediateOffset(Instr instr, int offset); + static Instr SetVldrDRegisterImmediateOffset(Instr instr, int offset); static bool IsStrRegisterImmediate(Instr instr); static Instr SetStrRegisterImmediateOffset(Instr instr, int offset); static bool IsAddRegisterImmediate(Instr instr); @@ -1315,6 +1260,7 @@ class Assembler : public AssemblerBase { static bool IsStrRegFpNegOffset(Instr instr); static bool IsLdrRegFpNegOffset(Instr instr); static bool IsLdrPcImmediateOffset(Instr instr); + static bool IsVldrDPcImmediateOffset(Instr instr); static bool IsTstImmediate(Instr instr); static bool IsCmpRegister(Instr instr); static bool IsCmpImmediate(Instr instr); @@ -1325,10 +1271,13 @@ class Assembler : public AssemblerBase { static bool IsMovW(Instr instr); // Constants in pools are accessed via pc relative addressing, which can - // reach +/-4KB thereby defining a maximum distance between the instruction - // and the accessed constant. - static const int kMaxDistToPool = 4*KB; - static const int kMaxNumPendingRelocInfo = kMaxDistToPool/kInstrSize; + // reach +/-4KB for integer PC-relative loads and +/-1KB for floating-point + // PC-relative loads, thereby defining a maximum distance between the + // instruction and the accessed constant. + static const int kMaxDistToIntPool = 4*KB; + static const int kMaxDistToFPPool = 1*KB; + // All relocations could be integer, it therefore acts as the limit. + static const int kMaxNumPendingRelocInfo = kMaxDistToIntPool/kInstrSize; // Postpone the generation of the constant pool for the specified number of // instructions. @@ -1343,8 +1292,6 @@ class Assembler : public AssemblerBase { // the relocation info. TypeFeedbackId recorded_ast_id_; - bool emit_debug_code() const { return emit_debug_code_; } - int buffer_space() const { return reloc_info_writer.pos() - pc_; } // Decode branch instruction at pos and return branch target pos @@ -1370,7 +1317,9 @@ class Assembler : public AssemblerBase { if (--const_pool_blocked_nesting_ == 0) { // Check the constant pool hasn't been blocked for too long. ASSERT((num_pending_reloc_info_ == 0) || - (pc_offset() < (first_const_pool_use_ + kMaxDistToPool))); + (pc_offset() < (first_const_pool_use_ + kMaxDistToIntPool))); + ASSERT((num_pending_64_bit_reloc_info_ == 0) || + (pc_offset() < (first_const_pool_use_ + kMaxDistToFPPool))); // Two cases: // * no_const_pool_before_ >= next_buffer_check_ and the emission is // still blocked @@ -1386,13 +1335,6 @@ class Assembler : public AssemblerBase { } private: - // Code buffer: - // The buffer into which code and relocation info are generated. - byte* buffer_; - int buffer_size_; - // True if the assembler owns the buffer, false if buffer is external. - bool own_buffer_; - int next_buffer_check_; // pc offset of next buffer check // Code generation @@ -1401,7 +1343,6 @@ class Assembler : public AssemblerBase { // not have to check for overflow. The same is true for writes of large // relocation info entries. static const int kGap = 32; - byte* pc_; // the program counter; moves forward // Constant pool generation // Pools are emitted in the instruction stream, preferably after unconditional @@ -1421,13 +1362,6 @@ class Assembler : public AssemblerBase { static const int kCheckPoolInterval = kCheckPoolIntervalInst * kInstrSize; - // Average distance beetween a constant pool and the first instruction - // accessing the constant pool. Longer distance should result in less I-cache - // pollution. - // In practice the distance will be smaller since constant pool emission is - // forced after function return and sometimes after unconditional branches. - static const int kAvgDistToPool = kMaxDistToPool - kCheckPoolInterval; - // Emission of the constant pool may be blocked in some code sequences. int const_pool_blocked_nesting_; // Block emission if this is not zero. int no_const_pool_before_; // Block emission before this pc offset. @@ -1452,6 +1386,9 @@ class Assembler : public AssemblerBase { RelocInfo pending_reloc_info_[kMaxNumPendingRelocInfo]; // number of pending reloc info entries in the buffer int num_pending_reloc_info_; + // Number of pending reloc info entries included above which also happen to + // be 64-bit. + int num_pending_64_bit_reloc_info_; // The bound position, before this we cannot do instruction elimination. int last_bound_pos_; @@ -1488,6 +1425,8 @@ class Assembler : public AssemblerBase { // Record reloc info for current pc_ void RecordRelocInfo(RelocInfo::Mode rmode, intptr_t data = 0, UseConstantPoolMode mode = USE_CONSTANT_POOL); + void RecordRelocInfo(double data); + void RecordRelocInfoConstantPoolEntryHelper(const RelocInfo& rinfo); friend class RegExpMacroAssemblerARM; friend class RelocInfo; @@ -1495,10 +1434,6 @@ class Assembler : public AssemblerBase { friend class BlockConstPoolScope; PositionsRecorder positions_recorder_; - - bool emit_debug_code_; - bool predictable_code_size_; - friend class PositionsRecorder; friend class EnsureSpace; }; @@ -1512,26 +1447,6 @@ class EnsureSpace BASE_EMBEDDED { }; -class PredictableCodeSizeScope { - public: - explicit PredictableCodeSizeScope(Assembler* assembler) - : asm_(assembler) { - old_value_ = assembler->predictable_code_size(); - assembler->set_predictable_code_size(true); - } - - ~PredictableCodeSizeScope() { - if (!old_value_) { - asm_->set_predictable_code_size(false); - } - } - - private: - Assembler* asm_; - bool old_value_; -}; - - } } // namespace v8::internal #endif // V8_ARM_ASSEMBLER_ARM_H_ diff --git a/deps/v8/src/arm/builtins-arm.cc b/deps/v8/src/arm/builtins-arm.cc index 2d1d7b1199..2946b355af 100644 --- a/deps/v8/src/arm/builtins-arm.cc +++ b/deps/v8/src/arm/builtins-arm.cc @@ -124,12 +124,7 @@ static void AllocateEmptyJSArray(MacroAssembler* masm, if (initial_capacity > 0) { size += FixedArray::SizeFor(initial_capacity); } - __ AllocateInNewSpace(size, - result, - scratch2, - scratch3, - gc_required, - TAG_OBJECT); + __ Allocate(size, result, scratch2, scratch3, gc_required, TAG_OBJECT); // Allocated the JSArray. Now initialize the fields except for the elements // array. @@ -140,7 +135,7 @@ static void AllocateEmptyJSArray(MacroAssembler* masm, __ LoadRoot(scratch1, Heap::kEmptyFixedArrayRootIndex); __ str(scratch1, FieldMemOperand(result, JSArray::kPropertiesOffset)); // Field JSArray::kElementsOffset is initialized later. - __ mov(scratch3, Operand(0, RelocInfo::NONE)); + __ mov(scratch3, Operand::Zero()); __ str(scratch3, FieldMemOperand(result, JSArray::kLengthOffset)); if (initial_capacity == 0) { @@ -319,7 +314,7 @@ static void ArrayNativeCode(MacroAssembler* masm, has_non_smi_element, finish, cant_transition_map, not_double; // Check for array construction with zero arguments or one. - __ cmp(r0, Operand(0, RelocInfo::NONE)); + __ cmp(r0, Operand::Zero()); __ b(ne, &argc_one_or_more); // Handle construction of an empty array. @@ -347,7 +342,7 @@ static void ArrayNativeCode(MacroAssembler* masm, __ tst(r2, r2); __ b(ne, ¬_empty_array); __ Drop(1); // Adjust stack. - __ mov(r0, Operand(0)); // Treat this as a call with argc of zero. + __ mov(r0, Operand::Zero()); // Treat this as a call with argc of zero. __ b(&empty_array); __ bind(¬_empty_array); @@ -542,31 +537,65 @@ void Builtins::Generate_ArrayConstructCode(MacroAssembler* masm) { // ----------- S t a t e ------------- // -- r0 : number of arguments // -- r1 : constructor function + // -- r2 : type info cell // -- lr : return address // -- sp[...]: constructor arguments // ----------------------------------- - Label generic_constructor; if (FLAG_debug_code) { // The array construct code is only set for the builtin and internal // Array functions which always have a map. // Initial map for the builtin Array function should be a map. - __ ldr(r2, FieldMemOperand(r1, JSFunction::kPrototypeOrInitialMapOffset)); - __ tst(r2, Operand(kSmiTagMask)); + __ ldr(r3, FieldMemOperand(r1, JSFunction::kPrototypeOrInitialMapOffset)); + __ tst(r3, Operand(kSmiTagMask)); __ Assert(ne, "Unexpected initial map for Array function"); - __ CompareObjectType(r2, r3, r4, MAP_TYPE); + __ CompareObjectType(r3, r3, r4, MAP_TYPE); __ Assert(eq, "Unexpected initial map for Array function"); - } - // Run the native code for the Array function called as a constructor. - ArrayNativeCode(masm, &generic_constructor); + if (FLAG_optimize_constructed_arrays) { + // We should either have undefined in r2 or a valid jsglobalpropertycell + Label okay_here; + Handle<Object> undefined_sentinel( + masm->isolate()->heap()->undefined_value(), masm->isolate()); + Handle<Map> global_property_cell_map( + masm->isolate()->heap()->global_property_cell_map()); + __ cmp(r2, Operand(undefined_sentinel)); + __ b(eq, &okay_here); + __ ldr(r3, FieldMemOperand(r2, 0)); + __ cmp(r3, Operand(global_property_cell_map)); + __ Assert(eq, "Expected property cell in register ebx"); + __ bind(&okay_here); + } + } - // Jump to the generic construct code in case the specialized code cannot - // handle the construction. - __ bind(&generic_constructor); - Handle<Code> generic_construct_stub = - masm->isolate()->builtins()->JSConstructStubGeneric(); - __ Jump(generic_construct_stub, RelocInfo::CODE_TARGET); + if (FLAG_optimize_constructed_arrays) { + Label not_zero_case, not_one_case; + __ tst(r0, r0); + __ b(ne, ¬_zero_case); + ArrayNoArgumentConstructorStub no_argument_stub; + __ TailCallStub(&no_argument_stub); + + __ bind(¬_zero_case); + __ cmp(r0, Operand(1)); + __ b(gt, ¬_one_case); + ArraySingleArgumentConstructorStub single_argument_stub; + __ TailCallStub(&single_argument_stub); + + __ bind(¬_one_case); + ArrayNArgumentsConstructorStub n_argument_stub; + __ TailCallStub(&n_argument_stub); + } else { + Label generic_constructor; + // Run the native code for the Array function called as a constructor. + ArrayNativeCode(masm, &generic_constructor); + + // Jump to the generic construct code in case the specialized code cannot + // handle the construction. + __ bind(&generic_constructor); + Handle<Code> generic_construct_stub = + masm->isolate()->builtins()->JSConstructStubGeneric(); + __ Jump(generic_construct_stub, RelocInfo::CODE_TARGET); + } } @@ -590,7 +619,7 @@ void Builtins::Generate_StringConstructCode(MacroAssembler* masm) { // Load the first arguments in r0 and get rid of the rest. Label no_arguments; - __ cmp(r0, Operand(0, RelocInfo::NONE)); + __ cmp(r0, Operand::Zero()); __ b(eq, &no_arguments); // First args = sp[(argc - 1) * 4]. __ sub(r0, r0, Operand(1)); @@ -619,12 +648,12 @@ void Builtins::Generate_StringConstructCode(MacroAssembler* masm) { // ----------------------------------- Label gc_required; - __ AllocateInNewSpace(JSValue::kSize, - r0, // Result. - r3, // Scratch. - r4, // Scratch. - &gc_required, - TAG_OBJECT); + __ Allocate(JSValue::kSize, + r0, // Result. + r3, // Scratch. + r4, // Scratch. + &gc_required, + TAG_OBJECT); // Initialising the String Object. Register map = r3; @@ -634,7 +663,7 @@ void Builtins::Generate_StringConstructCode(MacroAssembler* masm) { __ cmp(r4, Operand(JSValue::kSize >> kPointerSizeLog2)); __ Assert(eq, "Unexpected string wrapper instance size"); __ ldrb(r4, FieldMemOperand(map, Map::kUnusedPropertyFieldsOffset)); - __ cmp(r4, Operand(0, RelocInfo::NONE)); + __ cmp(r4, Operand::Zero()); __ Assert(eq, "Unexpected unused properties of string wrapper"); } __ str(map, FieldMemOperand(r0, HeapObject::kMapOffset)); @@ -682,7 +711,7 @@ void Builtins::Generate_StringConstructCode(MacroAssembler* masm) { // Load the empty string into r2, remove the receiver from the // stack, and jump back to the case where the argument is a string. __ bind(&no_arguments); - __ LoadRoot(argument, Heap::kEmptyStringRootIndex); + __ LoadRoot(argument, Heap::kempty_stringRootIndex); __ Drop(1); __ b(&argument_is_string); @@ -712,6 +741,35 @@ void Builtins::Generate_InRecompileQueue(MacroAssembler* masm) { } +void Builtins::Generate_InstallRecompiledCode(MacroAssembler* masm) { + // Enter an internal frame. + { + FrameScope scope(masm, StackFrame::INTERNAL); + + // Preserve the function. + __ push(r1); + // Push call kind information. + __ push(r5); + + // Push the function on the stack as the argument to the runtime function. + __ push(r1); + __ CallRuntime(Runtime::kInstallRecompiledCode, 1); + // Calculate the entry point. + __ add(r2, r0, Operand(Code::kHeaderSize - kHeapObjectTag)); + + // Restore call kind information. + __ pop(r5); + // Restore saved function. + __ pop(r1); + + // Tear down internal frame. + } + + // Do a tail-call of the compiled function. + __ Jump(r2); +} + + void Builtins::Generate_ParallelRecompile(MacroAssembler* masm) { { FrameScope scope(masm, StackFrame::INTERNAL); @@ -1044,9 +1102,13 @@ static void Generate_JSConstructStubHelper(MacroAssembler* masm, // If the type of the result (stored in its map) is less than // FIRST_SPEC_OBJECT_TYPE, it is not an object in the ECMA sense. - __ CompareObjectType(r0, r3, r3, FIRST_SPEC_OBJECT_TYPE); + __ CompareObjectType(r0, r1, r3, FIRST_SPEC_OBJECT_TYPE); __ b(ge, &exit); + // Symbols are "objects". + __ CompareInstanceType(r1, r3, SYMBOL_TYPE); + __ b(eq, &exit); + // Throw away the result of the constructor invocation and use the // on-stack receiver as the result. __ bind(&use_receiver); @@ -1097,7 +1159,7 @@ static void Generate_JSEntryTrampolineHelper(MacroAssembler* masm, // r5-r7, cp may be clobbered // Clear the context before we push it when entering the internal frame. - __ mov(cp, Operand(0, RelocInfo::NONE)); + __ mov(cp, Operand::Zero()); // Enter an internal frame. { @@ -1141,6 +1203,10 @@ static void Generate_JSEntryTrampolineHelper(MacroAssembler* masm, // Invoke the code and pass argc as r0. __ mov(r0, Operand(r3)); if (is_construct) { + // No type feedback cell is available + Handle<Object> undefined_sentinel( + masm->isolate()->heap()->undefined_value(), masm->isolate()); + __ mov(r2, Operand(undefined_sentinel)); CallConstructStub stub(NO_CALL_FUNCTION_FLAGS); __ CallStub(&stub); } else { @@ -1226,6 +1292,57 @@ void Builtins::Generate_LazyRecompile(MacroAssembler* masm) { } +static void GenerateMakeCodeYoungAgainCommon(MacroAssembler* masm) { + // For now, we are relying on the fact that make_code_young doesn't do any + // garbage collection which allows us to save/restore the registers without + // worrying about which of them contain pointers. We also don't build an + // internal frame to make the code faster, since we shouldn't have to do stack + // crawls in MakeCodeYoung. This seems a bit fragile. + + // The following registers must be saved and restored when calling through to + // the runtime: + // r0 - contains return address (beginning of patch sequence) + // r1 - function object + FrameScope scope(masm, StackFrame::MANUAL); + __ stm(db_w, sp, r0.bit() | r1.bit() | fp.bit() | lr.bit()); + __ PrepareCallCFunction(1, 0, r1); + __ CallCFunction( + ExternalReference::get_make_code_young_function(masm->isolate()), 1); + __ ldm(ia_w, sp, r0.bit() | r1.bit() | fp.bit() | lr.bit()); + __ mov(pc, r0); +} + +#define DEFINE_CODE_AGE_BUILTIN_GENERATOR(C) \ +void Builtins::Generate_Make##C##CodeYoungAgainEvenMarking( \ + MacroAssembler* masm) { \ + GenerateMakeCodeYoungAgainCommon(masm); \ +} \ +void Builtins::Generate_Make##C##CodeYoungAgainOddMarking( \ + MacroAssembler* masm) { \ + GenerateMakeCodeYoungAgainCommon(masm); \ +} +CODE_AGE_LIST(DEFINE_CODE_AGE_BUILTIN_GENERATOR) +#undef DEFINE_CODE_AGE_BUILTIN_GENERATOR + + +void Builtins::Generate_NotifyStubFailure(MacroAssembler* masm) { + { + FrameScope scope(masm, StackFrame::INTERNAL); + + // Preserve registers across notification, this is important for compiled + // stubs that tail call the runtime on deopts passing their parameters in + // registers. + __ stm(db_w, sp, kJSCallerSaved | kCalleeSaved); + // Pass the function and deoptimization type to the runtime system. + __ CallRuntime(Runtime::kNotifyStubFailure, 0); + __ ldm(ia_w, sp, kJSCallerSaved | kCalleeSaved); + } + + __ add(sp, sp, Operand(kPointerSize)); // Ignore state + __ mov(pc, lr); // Jump to miss handler +} + + static void Generate_NotifyDeoptimizedHelper(MacroAssembler* masm, Deoptimizer::BailoutType type) { { @@ -1284,12 +1401,6 @@ void Builtins::Generate_NotifyOSR(MacroAssembler* masm) { void Builtins::Generate_OnStackReplacement(MacroAssembler* masm) { - CpuFeatures::TryForceFeatureScope scope(VFP3); - if (!CPU::SupportsCrankshaft()) { - __ Abort("Unreachable code: Cannot optimize without VFP3 support."); - return; - } - // Lookup the function in the JavaScript frame and push it as an // argument to the on-stack replacement function. __ ldr(r0, MemOperand(fp, JavaScriptFrameConstants::kFunctionOffset)); @@ -1322,7 +1433,7 @@ void Builtins::Generate_FunctionCall(MacroAssembler* masm) { // 1. Make sure we have at least one argument. // r0: actual number of arguments { Label done; - __ cmp(r0, Operand(0)); + __ cmp(r0, Operand::Zero()); __ b(ne, &done); __ LoadRoot(r2, Heap::kUndefinedValueRootIndex); __ push(r2); @@ -1343,7 +1454,7 @@ void Builtins::Generate_FunctionCall(MacroAssembler* masm) { // r0: actual number of arguments // r1: function Label shift_arguments; - __ mov(r4, Operand(0, RelocInfo::NONE)); // indicate regular JS_FUNCTION + __ mov(r4, Operand::Zero()); // indicate regular JS_FUNCTION { Label convert_to_object, use_global_receiver, patch_receiver; // Change context eagerly in case we need the global receiver. __ ldr(cp, FieldMemOperand(r1, JSFunction::kContextOffset)); @@ -1398,7 +1509,7 @@ void Builtins::Generate_FunctionCall(MacroAssembler* masm) { // Restore the function to r1, and the flag to r4. __ ldr(r1, MemOperand(sp, r0, LSL, kPointerSizeLog2)); - __ mov(r4, Operand(0, RelocInfo::NONE)); + __ mov(r4, Operand::Zero()); __ jmp(&patch_receiver); // Use the global receiver object from the called function as the @@ -1420,11 +1531,11 @@ void Builtins::Generate_FunctionCall(MacroAssembler* masm) { // 3b. Check for function proxy. __ bind(&slow); - __ mov(r4, Operand(1, RelocInfo::NONE)); // indicate function proxy + __ mov(r4, Operand(1, RelocInfo::NONE32)); // indicate function proxy __ cmp(r2, Operand(JS_FUNCTION_PROXY_TYPE)); __ b(eq, &shift_arguments); __ bind(&non_function); - __ mov(r4, Operand(2, RelocInfo::NONE)); // indicate non-function + __ mov(r4, Operand(2, RelocInfo::NONE32)); // indicate non-function // 3c. Patch the first argument when calling a non-function. The // CALL_NON_FUNCTION builtin expects the non-function callee as @@ -1468,7 +1579,7 @@ void Builtins::Generate_FunctionCall(MacroAssembler* masm) { __ tst(r4, r4); __ b(eq, &function); // Expected number of arguments is 0 for CALL_NON_FUNCTION. - __ mov(r2, Operand(0, RelocInfo::NONE)); + __ mov(r2, Operand::Zero()); __ SetCallKind(r5, CALL_AS_METHOD); __ cmp(r4, Operand(1)); __ b(ne, &non_proxy); @@ -1546,7 +1657,7 @@ void Builtins::Generate_FunctionApply(MacroAssembler* masm) { // Push current limit and index. __ bind(&okay); __ push(r0); // limit - __ mov(r1, Operand(0, RelocInfo::NONE)); // initial index + __ mov(r1, Operand::Zero()); // initial index __ push(r1); // Get the receiver. @@ -1658,7 +1769,7 @@ void Builtins::Generate_FunctionApply(MacroAssembler* masm) { __ bind(&call_proxy); __ push(r1); // add function proxy as last argument __ add(r0, r0, Operand(1)); - __ mov(r2, Operand(0, RelocInfo::NONE)); + __ mov(r2, Operand::Zero()); __ SetCallKind(r5, CALL_AS_METHOD); __ GetBuiltinEntry(r3, Builtins::CALL_FUNCTION_PROXY); __ Call(masm->isolate()->builtins()->ArgumentsAdaptorTrampoline(), diff --git a/deps/v8/src/arm/code-stubs-arm.cc b/deps/v8/src/arm/code-stubs-arm.cc index ceb108ffae..b1ffaea14d 100644 --- a/deps/v8/src/arm/code-stubs-arm.cc +++ b/deps/v8/src/arm/code-stubs-arm.cc @@ -32,17 +32,90 @@ #include "bootstrapper.h" #include "code-stubs.h" #include "regexp-macro-assembler.h" +#include "stub-cache.h" namespace v8 { namespace internal { +void FastCloneShallowObjectStub::InitializeInterfaceDescriptor( + Isolate* isolate, + CodeStubInterfaceDescriptor* descriptor) { + static Register registers[] = { r3, r2, r1, r0 }; + descriptor->register_param_count_ = 4; + descriptor->register_params_ = registers; + descriptor->stack_parameter_count_ = NULL; + descriptor->deoptimization_handler_ = + Runtime::FunctionForId(Runtime::kCreateObjectLiteralShallow)->entry; +} + + +void KeyedLoadFastElementStub::InitializeInterfaceDescriptor( + Isolate* isolate, + CodeStubInterfaceDescriptor* descriptor) { + static Register registers[] = { r1, r0 }; + descriptor->register_param_count_ = 2; + descriptor->register_params_ = registers; + descriptor->deoptimization_handler_ = + FUNCTION_ADDR(KeyedLoadIC_MissFromStubFailure); +} + + +void TransitionElementsKindStub::InitializeInterfaceDescriptor( + Isolate* isolate, + CodeStubInterfaceDescriptor* descriptor) { + static Register registers[] = { r0, r1 }; + descriptor->register_param_count_ = 2; + descriptor->register_params_ = registers; + Address entry = + Runtime::FunctionForId(Runtime::kTransitionElementsKind)->entry; + descriptor->deoptimization_handler_ = FUNCTION_ADDR(entry); +} + + +static void InitializeArrayConstructorDescriptor(Isolate* isolate, + CodeStubInterfaceDescriptor* descriptor) { + // register state + // r1 -- constructor function + // r2 -- type info cell with elements kind + // r0 -- number of arguments to the constructor function + static Register registers[] = { r1, r2 }; + descriptor->register_param_count_ = 2; + // stack param count needs (constructor pointer, and single argument) + descriptor->stack_parameter_count_ = &r0; + descriptor->register_params_ = registers; + descriptor->extra_expression_stack_count_ = 1; + descriptor->deoptimization_handler_ = + FUNCTION_ADDR(ArrayConstructor_StubFailure); +} + + +void ArrayNoArgumentConstructorStub::InitializeInterfaceDescriptor( + Isolate* isolate, + CodeStubInterfaceDescriptor* descriptor) { + InitializeArrayConstructorDescriptor(isolate, descriptor); +} + + +void ArraySingleArgumentConstructorStub::InitializeInterfaceDescriptor( + Isolate* isolate, + CodeStubInterfaceDescriptor* descriptor) { + InitializeArrayConstructorDescriptor(isolate, descriptor); +} + + +void ArrayNArgumentsConstructorStub::InitializeInterfaceDescriptor( + Isolate* isolate, + CodeStubInterfaceDescriptor* descriptor) { + InitializeArrayConstructorDescriptor(isolate, descriptor); +} + + #define __ ACCESS_MASM(masm) static void EmitIdenticalObjectComparison(MacroAssembler* masm, Label* slow, - Condition cond, - bool never_nan_nan); + Condition cond); static void EmitSmiNonsmiComparison(MacroAssembler* masm, Register lhs, Register rhs, @@ -93,12 +166,7 @@ void FastNewClosureStub::Generate(MacroAssembler* masm) { __ pop(r3); // Attempt to allocate new JSFunction in new space. - __ AllocateInNewSpace(JSFunction::kSize, - r0, - r1, - r2, - &gc, - TAG_OBJECT); + __ Allocate(JSFunction::kSize, r0, r1, r2, &gc, TAG_OBJECT); __ IncrementCounter(counters->fast_new_closure_total(), 1, r6, r7); @@ -225,12 +293,7 @@ void FastNewContextStub::Generate(MacroAssembler* masm) { int length = slots_ + Context::MIN_CONTEXT_SLOTS; // Attempt to allocate the context in new space. - __ AllocateInNewSpace(FixedArray::SizeFor(length), - r0, - r1, - r2, - &gc, - TAG_OBJECT); + __ Allocate(FixedArray::SizeFor(length), r0, r1, r2, &gc, TAG_OBJECT); // Load the function from the stack. __ ldr(r3, MemOperand(sp, 0)); @@ -275,8 +338,7 @@ void FastNewBlockContextStub::Generate(MacroAssembler* masm) { // Try to allocate the context in new space. Label gc; int length = slots_ + Context::MIN_CONTEXT_SLOTS; - __ AllocateInNewSpace(FixedArray::SizeFor(length), - r0, r1, r2, &gc, TAG_OBJECT); + __ Allocate(FixedArray::SizeFor(length), r0, r1, r2, &gc, TAG_OBJECT); // Load the function from the stack. __ ldr(r3, MemOperand(sp, 0)); @@ -334,6 +396,7 @@ static void GenerateFastCloneShallowArrayCommon( MacroAssembler* masm, int length, FastCloneShallowArrayStub::Mode mode, + AllocationSiteMode allocation_site_mode, Label* fail) { // Registers on entry: // @@ -347,16 +410,28 @@ static void GenerateFastCloneShallowArrayCommon( ? FixedDoubleArray::SizeFor(length) : FixedArray::SizeFor(length); } - int size = JSArray::kSize + elements_size; + + int size = JSArray::kSize; + int allocation_info_start = size; + if (allocation_site_mode == TRACK_ALLOCATION_SITE) { + size += AllocationSiteInfo::kSize; + } + size += elements_size; // Allocate both the JS array and the elements array in one big // allocation. This avoids multiple limit checks. - __ AllocateInNewSpace(size, - r0, - r1, - r2, - fail, - TAG_OBJECT); + AllocationFlags flags = TAG_OBJECT; + if (mode == FastCloneShallowArrayStub::CLONE_DOUBLE_ELEMENTS) { + flags = static_cast<AllocationFlags>(DOUBLE_ALIGNMENT | flags); + } + __ Allocate(size, r0, r1, r2, fail, flags); + + if (allocation_site_mode == TRACK_ALLOCATION_SITE) { + __ mov(r2, Operand(Handle<Map>(masm->isolate()->heap()-> + allocation_site_info_map()))); + __ str(r2, FieldMemOperand(r0, allocation_info_start)); + __ str(r3, FieldMemOperand(r0, allocation_info_start + kPointerSize)); + } // Copy the JS array part. for (int i = 0; i < JSArray::kSize; i += kPointerSize) { @@ -370,7 +445,11 @@ static void GenerateFastCloneShallowArrayCommon( // Get hold of the elements array of the boilerplate and setup the // elements pointer in the resulting object. __ ldr(r3, FieldMemOperand(r3, JSArray::kElementsOffset)); - __ add(r2, r0, Operand(JSArray::kSize)); + if (allocation_site_mode == TRACK_ALLOCATION_SITE) { + __ add(r2, r0, Operand(JSArray::kSize + AllocationSiteInfo::kSize)); + } else { + __ add(r2, r0, Operand(JSArray::kSize)); + } __ str(r2, FieldMemOperand(r0, JSArray::kElementsOffset)); // Copy the elements array. @@ -403,8 +482,9 @@ void FastCloneShallowArrayStub::Generate(MacroAssembler* masm) { __ ldr(r0, FieldMemOperand(r0, HeapObject::kMapOffset)); __ CompareRoot(r0, Heap::kFixedCOWArrayMapRootIndex); __ b(ne, &check_fast_elements); - GenerateFastCloneShallowArrayCommon(masm, 0, - COPY_ON_WRITE_ELEMENTS, &slow_case); + GenerateFastCloneShallowArrayCommon(masm, 0, COPY_ON_WRITE_ELEMENTS, + allocation_site_mode_, + &slow_case); // Return and remove the on-stack parameters. __ add(sp, sp, Operand(3 * kPointerSize)); __ Ret(); @@ -412,8 +492,9 @@ void FastCloneShallowArrayStub::Generate(MacroAssembler* masm) { __ bind(&check_fast_elements); __ CompareRoot(r0, Heap::kFixedArrayMapRootIndex); __ b(ne, &double_elements); - GenerateFastCloneShallowArrayCommon(masm, length_, - CLONE_ELEMENTS, &slow_case); + GenerateFastCloneShallowArrayCommon(masm, length_, CLONE_ELEMENTS, + allocation_site_mode_, + &slow_case); // Return and remove the on-stack parameters. __ add(sp, sp, Operand(3 * kPointerSize)); __ Ret(); @@ -445,7 +526,9 @@ void FastCloneShallowArrayStub::Generate(MacroAssembler* masm) { __ pop(r3); } - GenerateFastCloneShallowArrayCommon(masm, length_, mode, &slow_case); + GenerateFastCloneShallowArrayCommon(masm, length_, mode, + allocation_site_mode_, + &slow_case); // Return and remove the on-stack parameters. __ add(sp, sp, Operand(3 * kPointerSize)); @@ -456,55 +539,12 @@ void FastCloneShallowArrayStub::Generate(MacroAssembler* masm) { } -void FastCloneShallowObjectStub::Generate(MacroAssembler* masm) { - // Stack layout on entry: - // - // [sp]: object literal flags. - // [sp + kPointerSize]: constant properties. - // [sp + (2 * kPointerSize)]: literal index. - // [sp + (3 * kPointerSize)]: literals array. - - // Load boilerplate object into r3 and check if we need to create a - // boilerplate. - Label slow_case; - __ ldr(r3, MemOperand(sp, 3 * kPointerSize)); - __ ldr(r0, MemOperand(sp, 2 * kPointerSize)); - __ add(r3, r3, Operand(FixedArray::kHeaderSize - kHeapObjectTag)); - __ ldr(r3, MemOperand(r3, r0, LSL, kPointerSizeLog2 - kSmiTagSize)); - __ CompareRoot(r3, Heap::kUndefinedValueRootIndex); - __ b(eq, &slow_case); - - // Check that the boilerplate contains only fast properties and we can - // statically determine the instance size. - int size = JSObject::kHeaderSize + length_ * kPointerSize; - __ ldr(r0, FieldMemOperand(r3, HeapObject::kMapOffset)); - __ ldrb(r0, FieldMemOperand(r0, Map::kInstanceSizeOffset)); - __ cmp(r0, Operand(size >> kPointerSizeLog2)); - __ b(ne, &slow_case); - - // Allocate the JS object and copy header together with all in-object - // properties from the boilerplate. - __ AllocateInNewSpace(size, r0, r1, r2, &slow_case, TAG_OBJECT); - for (int i = 0; i < size; i += kPointerSize) { - __ ldr(r1, FieldMemOperand(r3, i)); - __ str(r1, FieldMemOperand(r0, i)); - } - - // Return and remove the on-stack parameters. - __ add(sp, sp, Operand(4 * kPointerSize)); - __ Ret(); - - __ bind(&slow_case); - __ TailCallRuntime(Runtime::kCreateObjectLiteralShallow, 4, 1); -} - - // Takes a Smi and converts to an IEEE 64 bit floating point value in two // registers. The format is 1 sign bit, 11 exponent bits (biased 1023) and // 52 fraction bits (20 in the first word, 32 in the second). Zeros is a // scratch register. Destroys the source register. No GC occurs during this // stub so you don't have to set up the frame. -class ConvertToDoubleStub : public CodeStub { +class ConvertToDoubleStub : public PlatformCodeStub { public: ConvertToDoubleStub(Register result_reg_1, Register result_reg_2, @@ -551,7 +591,7 @@ void ConvertToDoubleStub::Generate(MacroAssembler* masm) { STATIC_ASSERT(HeapNumber::kSignMask == 0x80000000u); __ and_(exponent, source_, Operand(HeapNumber::kSignMask), SetCC); // Subtract from 0 if source was negative. - __ rsb(source_, source_, Operand(0, RelocInfo::NONE), LeaveCC, ne); + __ rsb(source_, source_, Operand::Zero(), LeaveCC, ne); // We have -1, 0 or 1, which we treat specially. Register source_ contains // absolute value: it is either equal to 1 (special case of -1 and 1), @@ -564,7 +604,7 @@ void ConvertToDoubleStub::Generate(MacroAssembler* masm) { HeapNumber::kExponentBias << HeapNumber::kExponentShift; __ orr(exponent, exponent, Operand(exponent_word_for_1), LeaveCC, eq); // 1, 0 and -1 all have 0 for the second word. - __ mov(mantissa, Operand(0, RelocInfo::NONE)); + __ mov(mantissa, Operand::Zero()); __ Ret(); __ bind(¬_special); @@ -600,7 +640,7 @@ void FloatingPointHelper::LoadSmis(MacroAssembler* masm, Register scratch1, Register scratch2) { if (CpuFeatures::IsSupported(VFP2)) { - CpuFeatures::Scope scope(VFP2); + CpuFeatureScope scope(masm, VFP2); __ mov(scratch1, Operand(r0, ASR, kSmiTagSize)); __ vmov(d7.high(), scratch1); __ vcvt_f64_s32(d7, d7.high()); @@ -617,34 +657,16 @@ void FloatingPointHelper::LoadSmis(MacroAssembler* masm, __ mov(scratch1, Operand(r0)); ConvertToDoubleStub stub1(r3, r2, scratch1, scratch2); __ push(lr); - __ Call(stub1.GetCode()); + __ Call(stub1.GetCode(masm->isolate())); // Write Smi from r1 to r1 and r0 in double format. __ mov(scratch1, Operand(r1)); ConvertToDoubleStub stub2(r1, r0, scratch1, scratch2); - __ Call(stub2.GetCode()); + __ Call(stub2.GetCode(masm->isolate())); __ pop(lr); } } -void FloatingPointHelper::LoadOperands( - MacroAssembler* masm, - FloatingPointHelper::Destination destination, - Register heap_number_map, - Register scratch1, - Register scratch2, - Label* slow) { - - // Load right operand (r0) to d6 or r2/r3. - LoadNumber(masm, destination, - r0, d7, r2, r3, heap_number_map, scratch1, scratch2, slow); - - // Load left operand (r1) to d7 or r0/r1. - LoadNumber(masm, destination, - r1, d6, r0, r1, heap_number_map, scratch1, scratch2, slow); -} - - void FloatingPointHelper::LoadNumber(MacroAssembler* masm, Destination destination, Register object, @@ -669,7 +691,7 @@ void FloatingPointHelper::LoadNumber(MacroAssembler* masm, // Handle loading a double from a heap number. if (CpuFeatures::IsSupported(VFP2) && destination == kVFPRegisters) { - CpuFeatures::Scope scope(VFP2); + CpuFeatureScope scope(masm, VFP2); // Load the double from tagged HeapNumber to double register. __ sub(scratch1, object, Operand(kHeapObjectTag)); __ vldr(dst, scratch1, HeapNumber::kValueOffset); @@ -683,7 +705,7 @@ void FloatingPointHelper::LoadNumber(MacroAssembler* masm, // Handle loading a double from a smi. __ bind(&is_smi); if (CpuFeatures::IsSupported(VFP2)) { - CpuFeatures::Scope scope(VFP2); + CpuFeatureScope scope(masm, VFP2); // Convert smi to double using VFP instructions. __ vmov(dst.high(), scratch1); __ vcvt_f64_s32(dst, dst.high()); @@ -697,7 +719,7 @@ void FloatingPointHelper::LoadNumber(MacroAssembler* masm, __ mov(scratch1, Operand(object)); ConvertToDoubleStub stub(dst2, dst1, scratch1, scratch2); __ push(lr); - __ Call(stub.GetCode()); + __ Call(stub.GetCode(masm->isolate())); __ pop(lr); } @@ -712,34 +734,21 @@ void FloatingPointHelper::ConvertNumberToInt32(MacroAssembler* masm, Register scratch1, Register scratch2, Register scratch3, - DwVfpRegister double_scratch, + DwVfpRegister double_scratch1, + DwVfpRegister double_scratch2, Label* not_number) { + Label done; __ AssertRootValue(heap_number_map, Heap::kHeapNumberMapRootIndex, "HeapNumberMap register clobbered."); - Label done; - Label not_in_int32_range; __ UntagAndJumpIfSmi(dst, object, &done); __ ldr(scratch1, FieldMemOperand(object, HeapNumber::kMapOffset)); __ cmp(scratch1, heap_number_map); __ b(ne, not_number); - __ ConvertToInt32(object, - dst, - scratch1, - scratch2, - double_scratch, - ¬_in_int32_range); - __ jmp(&done); - - __ bind(¬_in_int32_range); - __ ldr(scratch1, FieldMemOperand(object, HeapNumber::kExponentOffset)); - __ ldr(scratch2, FieldMemOperand(object, HeapNumber::kMantissaOffset)); - - __ EmitOutOfInt32RangeTruncate(dst, - scratch1, - scratch2, - scratch3); + __ ECMAConvertNumberToInt32(object, dst, + scratch1, scratch2, scratch3, + double_scratch1, double_scratch2); __ bind(&done); } @@ -748,71 +757,72 @@ void FloatingPointHelper::ConvertIntToDouble(MacroAssembler* masm, Register int_scratch, Destination destination, DwVfpRegister double_dst, - Register dst1, - Register dst2, + Register dst_mantissa, + Register dst_exponent, Register scratch2, SwVfpRegister single_scratch) { ASSERT(!int_scratch.is(scratch2)); - ASSERT(!int_scratch.is(dst1)); - ASSERT(!int_scratch.is(dst2)); + ASSERT(!int_scratch.is(dst_mantissa)); + ASSERT(!int_scratch.is(dst_exponent)); Label done; if (CpuFeatures::IsSupported(VFP2)) { - CpuFeatures::Scope scope(VFP2); + CpuFeatureScope scope(masm, VFP2); __ vmov(single_scratch, int_scratch); __ vcvt_f64_s32(double_dst, single_scratch); if (destination == kCoreRegisters) { - __ vmov(dst1, dst2, double_dst); + __ vmov(dst_mantissa, dst_exponent, double_dst); } } else { Label fewer_than_20_useful_bits; // Expected output: - // | dst2 | dst1 | + // | dst_exponent | dst_mantissa | // | s | exp | mantissa | // Check for zero. __ cmp(int_scratch, Operand::Zero()); - __ mov(dst2, int_scratch); - __ mov(dst1, int_scratch); + __ mov(dst_exponent, int_scratch); + __ mov(dst_mantissa, int_scratch); __ b(eq, &done); // Preload the sign of the value. - __ and_(dst2, int_scratch, Operand(HeapNumber::kSignMask), SetCC); + __ and_(dst_exponent, int_scratch, Operand(HeapNumber::kSignMask), SetCC); // Get the absolute value of the object (as an unsigned integer). __ rsb(int_scratch, int_scratch, Operand::Zero(), SetCC, mi); // Get mantissa[51:20]. // Get the position of the first set bit. - __ CountLeadingZeros(dst1, int_scratch, scratch2); - __ rsb(dst1, dst1, Operand(31)); + __ CountLeadingZeros(dst_mantissa, int_scratch, scratch2); + __ rsb(dst_mantissa, dst_mantissa, Operand(31)); // Set the exponent. - __ add(scratch2, dst1, Operand(HeapNumber::kExponentBias)); - __ Bfi(dst2, scratch2, scratch2, + __ add(scratch2, dst_mantissa, Operand(HeapNumber::kExponentBias)); + __ Bfi(dst_exponent, scratch2, scratch2, HeapNumber::kExponentShift, HeapNumber::kExponentBits); // Clear the first non null bit. __ mov(scratch2, Operand(1)); - __ bic(int_scratch, int_scratch, Operand(scratch2, LSL, dst1)); + __ bic(int_scratch, int_scratch, Operand(scratch2, LSL, dst_mantissa)); - __ cmp(dst1, Operand(HeapNumber::kMantissaBitsInTopWord)); + __ cmp(dst_mantissa, Operand(HeapNumber::kMantissaBitsInTopWord)); // Get the number of bits to set in the lower part of the mantissa. - __ sub(scratch2, dst1, Operand(HeapNumber::kMantissaBitsInTopWord), SetCC); + __ sub(scratch2, dst_mantissa, Operand(HeapNumber::kMantissaBitsInTopWord), + SetCC); __ b(mi, &fewer_than_20_useful_bits); // Set the higher 20 bits of the mantissa. - __ orr(dst2, dst2, Operand(int_scratch, LSR, scratch2)); + __ orr(dst_exponent, dst_exponent, Operand(int_scratch, LSR, scratch2)); __ rsb(scratch2, scratch2, Operand(32)); - __ mov(dst1, Operand(int_scratch, LSL, scratch2)); + __ mov(dst_mantissa, Operand(int_scratch, LSL, scratch2)); __ b(&done); __ bind(&fewer_than_20_useful_bits); - __ rsb(scratch2, dst1, Operand(HeapNumber::kMantissaBitsInTopWord)); + __ rsb(scratch2, dst_mantissa, Operand(HeapNumber::kMantissaBitsInTopWord)); __ mov(scratch2, Operand(int_scratch, LSL, scratch2)); - __ orr(dst2, dst2, scratch2); + __ orr(dst_exponent, dst_exponent, scratch2); // Set dst1 to 0. - __ mov(dst1, Operand::Zero()); + __ mov(dst_mantissa, Operand::Zero()); } __ bind(&done); } @@ -823,8 +833,8 @@ void FloatingPointHelper::LoadNumberAsInt32Double(MacroAssembler* masm, Destination destination, DwVfpRegister double_dst, DwVfpRegister double_scratch, - Register dst1, - Register dst2, + Register dst_mantissa, + Register dst_exponent, Register heap_number_map, Register scratch1, Register scratch2, @@ -840,8 +850,8 @@ void FloatingPointHelper::LoadNumberAsInt32Double(MacroAssembler* masm, __ JumpIfNotSmi(object, &obj_is_not_smi); __ SmiUntag(scratch1, object); - ConvertIntToDouble(masm, scratch1, destination, double_dst, dst1, dst2, - scratch2, single_scratch); + ConvertIntToDouble(masm, scratch1, destination, double_dst, dst_mantissa, + dst_exponent, scratch2, single_scratch); __ b(&done); __ bind(&obj_is_not_smi); @@ -852,42 +862,62 @@ void FloatingPointHelper::LoadNumberAsInt32Double(MacroAssembler* masm, // Load the number. if (CpuFeatures::IsSupported(VFP2)) { - CpuFeatures::Scope scope(VFP2); + CpuFeatureScope scope(masm, VFP2); // Load the double value. __ sub(scratch1, object, Operand(kHeapObjectTag)); __ vldr(double_dst, scratch1, HeapNumber::kValueOffset); - __ EmitVFPTruncate(kRoundToZero, - scratch1, - double_dst, - scratch2, - double_scratch, - kCheckForInexactConversion); - + __ TestDoubleIsInt32(double_dst, double_scratch); // Jump to not_int32 if the operation did not succeed. __ b(ne, not_int32); if (destination == kCoreRegisters) { - __ vmov(dst1, dst2, double_dst); + __ vmov(dst_mantissa, dst_exponent, double_dst); } } else { ASSERT(!scratch1.is(object) && !scratch2.is(object)); - // Load the double value in the destination registers.. - __ Ldrd(dst1, dst2, FieldMemOperand(object, HeapNumber::kValueOffset)); + // Load the double value in the destination registers. + bool save_registers = object.is(dst_mantissa) || object.is(dst_exponent); + if (save_registers) { + // Save both output registers, because the other one probably holds + // an important value too. + __ Push(dst_exponent, dst_mantissa); + } + __ Ldrd(dst_mantissa, dst_exponent, + FieldMemOperand(object, HeapNumber::kValueOffset)); // Check for 0 and -0. - __ bic(scratch1, dst1, Operand(HeapNumber::kSignMask)); - __ orr(scratch1, scratch1, Operand(dst2)); + Label zero; + __ bic(scratch1, dst_exponent, Operand(HeapNumber::kSignMask)); + __ orr(scratch1, scratch1, Operand(dst_mantissa)); __ cmp(scratch1, Operand::Zero()); - __ b(eq, &done); + __ b(eq, &zero); // Check that the value can be exactly represented by a 32-bit integer. // Jump to not_int32 if that's not the case. - DoubleIs32BitInteger(masm, dst1, dst2, scratch1, scratch2, not_int32); + Label restore_input_and_miss; + DoubleIs32BitInteger(masm, dst_exponent, dst_mantissa, scratch1, scratch2, + &restore_input_and_miss); - // dst1 and dst2 were trashed. Reload the double value. - __ Ldrd(dst1, dst2, FieldMemOperand(object, HeapNumber::kValueOffset)); + // dst_* were trashed. Reload the double value. + if (save_registers) { + __ Pop(dst_exponent, dst_mantissa); + } + __ Ldrd(dst_mantissa, dst_exponent, + FieldMemOperand(object, HeapNumber::kValueOffset)); + __ b(&done); + + __ bind(&restore_input_and_miss); + if (save_registers) { + __ Pop(dst_exponent, dst_mantissa); + } + __ b(not_int32); + + __ bind(&zero); + if (save_registers) { + __ Drop(2); + } } __ bind(&done); @@ -910,31 +940,26 @@ void FloatingPointHelper::LoadNumberAsInt32(MacroAssembler* masm, !scratch1.is(scratch3) && !scratch2.is(scratch3)); - Label done; + Label done, maybe_undefined; __ UntagAndJumpIfSmi(dst, object, &done); __ AssertRootValue(heap_number_map, Heap::kHeapNumberMapRootIndex, "HeapNumberMap register clobbered."); - __ JumpIfNotHeapNumber(object, heap_number_map, scratch1, not_int32); + + __ JumpIfNotHeapNumber(object, heap_number_map, scratch1, &maybe_undefined); // Object is a heap number. // Convert the floating point value to a 32-bit integer. if (CpuFeatures::IsSupported(VFP2)) { - CpuFeatures::Scope scope(VFP2); + CpuFeatureScope scope(masm, VFP2); // Load the double value. __ sub(scratch1, object, Operand(kHeapObjectTag)); __ vldr(double_scratch0, scratch1, HeapNumber::kValueOffset); - __ EmitVFPTruncate(kRoundToZero, - dst, - double_scratch0, - scratch1, - double_scratch1, - kCheckForInexactConversion); - + __ TryDoubleToInt32Exact(dst, double_scratch0, double_scratch1); // Jump to not_int32 if the operation did not succeed. __ b(ne, not_int32); } else { @@ -964,20 +989,28 @@ void FloatingPointHelper::LoadNumberAsInt32(MacroAssembler* masm, __ tst(scratch1, Operand(HeapNumber::kSignMask)); __ rsb(dst, dst, Operand::Zero(), LeaveCC, mi); } + __ b(&done); + + __ bind(&maybe_undefined); + __ CompareRoot(object, Heap::kUndefinedValueRootIndex); + __ b(ne, not_int32); + // |undefined| is truncated to 0. + __ mov(dst, Operand(Smi::FromInt(0))); + // Fall through. __ bind(&done); } void FloatingPointHelper::DoubleIs32BitInteger(MacroAssembler* masm, - Register src1, - Register src2, + Register src_exponent, + Register src_mantissa, Register dst, Register scratch, Label* not_int32) { // Get exponent alone in scratch. __ Ubfx(scratch, - src1, + src_exponent, HeapNumber::kExponentShift, HeapNumber::kExponentBits); @@ -997,11 +1030,11 @@ void FloatingPointHelper::DoubleIs32BitInteger(MacroAssembler* masm, // Another way to put it is that if (exponent - signbit) > 30 then the // number cannot be represented as an int32. Register tmp = dst; - __ sub(tmp, scratch, Operand(src1, LSR, 31)); + __ sub(tmp, scratch, Operand(src_exponent, LSR, 31)); __ cmp(tmp, Operand(30)); __ b(gt, not_int32); // - Bits [21:0] in the mantissa are not null. - __ tst(src2, Operand(0x3fffff)); + __ tst(src_mantissa, Operand(0x3fffff)); __ b(ne, not_int32); // Otherwise the exponent needs to be big enough to shift left all the @@ -1012,19 +1045,19 @@ void FloatingPointHelper::DoubleIs32BitInteger(MacroAssembler* masm, // Get the 32 higher bits of the mantissa in dst. __ Ubfx(dst, - src2, + src_mantissa, HeapNumber::kMantissaBitsInTopWord, 32 - HeapNumber::kMantissaBitsInTopWord); __ orr(dst, dst, - Operand(src1, LSL, HeapNumber::kNonMantissaBitsInTopWord)); + Operand(src_exponent, LSL, HeapNumber::kNonMantissaBitsInTopWord)); // Create the mask and test the lower bits (of the higher bits). __ rsb(scratch, scratch, Operand(32)); - __ mov(src2, Operand(1)); - __ mov(src1, Operand(src2, LSL, scratch)); - __ sub(src1, src1, Operand(1)); - __ tst(dst, src1); + __ mov(src_mantissa, Operand(1)); + __ mov(src_exponent, Operand(src_mantissa, LSL, scratch)); + __ sub(src_exponent, src_exponent, Operand(1)); + __ tst(dst, src_exponent); __ b(ne, not_int32); } @@ -1049,7 +1082,7 @@ void FloatingPointHelper::CallCCodeForDoubleOperation( __ push(lr); __ PrepareCallCFunction(0, 2, scratch); if (masm->use_eabi_hardfloat()) { - CpuFeatures::Scope scope(VFP2); + CpuFeatureScope scope(masm, VFP2); __ vmov(d0, r0, r1); __ vmov(d1, r2, r3); } @@ -1061,7 +1094,7 @@ void FloatingPointHelper::CallCCodeForDoubleOperation( // Store answer in the overwritable heap number. Double returned in // registers r0 and r1 or in d0. if (masm->use_eabi_hardfloat()) { - CpuFeatures::Scope scope(VFP2); + CpuFeatureScope scope(masm, VFP2); __ vstr(d0, FieldMemOperand(heap_number_result, HeapNumber::kValueOffset)); } else { @@ -1089,11 +1122,12 @@ bool WriteInt32ToHeapNumberStub::IsPregenerated() { } -void WriteInt32ToHeapNumberStub::GenerateFixedRegStubsAheadOfTime() { +void WriteInt32ToHeapNumberStub::GenerateFixedRegStubsAheadOfTime( + Isolate* isolate) { WriteInt32ToHeapNumberStub stub1(r1, r0, r2); WriteInt32ToHeapNumberStub stub2(r2, r0, r3); - stub1.GetCode()->set_is_pregenerated(true); - stub2.GetCode()->set_is_pregenerated(true); + stub1.GetCode(isolate)->set_is_pregenerated(true); + stub2.GetCode(isolate)->set_is_pregenerated(true); } @@ -1114,7 +1148,7 @@ void WriteInt32ToHeapNumberStub::Generate(MacroAssembler* masm) { // Set the sign bit in scratch_ if the value was negative. __ orr(scratch_, scratch_, Operand(HeapNumber::kSignMask), LeaveCC, cs); // Subtract from 0 if the value was negative. - __ rsb(the_int_, the_int_, Operand(0, RelocInfo::NONE), LeaveCC, cs); + __ rsb(the_int_, the_int_, Operand::Zero(), LeaveCC, cs); // We should be masking the implict first digit of the mantissa away here, // but it just ends up combining harmlessly with the last digit of the // exponent that happens to be 1. The sign bit is 0 so we shift 10 to get @@ -1137,7 +1171,7 @@ void WriteInt32ToHeapNumberStub::Generate(MacroAssembler* masm) { non_smi_exponent += 1 << HeapNumber::kExponentShift; __ mov(ip, Operand(HeapNumber::kSignMask | non_smi_exponent)); __ str(ip, FieldMemOperand(the_heap_number_, HeapNumber::kExponentOffset)); - __ mov(ip, Operand(0, RelocInfo::NONE)); + __ mov(ip, Operand::Zero()); __ str(ip, FieldMemOperand(the_heap_number_, HeapNumber::kMantissaOffset)); __ Ret(); } @@ -1148,48 +1182,43 @@ void WriteInt32ToHeapNumberStub::Generate(MacroAssembler* masm) { // for "identity and not NaN". static void EmitIdenticalObjectComparison(MacroAssembler* masm, Label* slow, - Condition cond, - bool never_nan_nan) { + Condition cond) { Label not_identical; Label heap_number, return_equal; __ cmp(r0, r1); __ b(ne, ¬_identical); - // The two objects are identical. If we know that one of them isn't NaN then - // we now know they test equal. - if (cond != eq || !never_nan_nan) { - // Test for NaN. Sadly, we can't just compare to FACTORY->nan_value(), - // so we do the second best thing - test it ourselves. - // They are both equal and they are not both Smis so both of them are not - // Smis. If it's not a heap number, then return equal. - if (cond == lt || cond == gt) { - __ CompareObjectType(r0, r4, r4, FIRST_SPEC_OBJECT_TYPE); + // Test for NaN. Sadly, we can't just compare to FACTORY->nan_value(), + // so we do the second best thing - test it ourselves. + // They are both equal and they are not both Smis so both of them are not + // Smis. If it's not a heap number, then return equal. + if (cond == lt || cond == gt) { + __ CompareObjectType(r0, r4, r4, FIRST_SPEC_OBJECT_TYPE); + __ b(ge, slow); + } else { + __ CompareObjectType(r0, r4, r4, HEAP_NUMBER_TYPE); + __ b(eq, &heap_number); + // Comparing JS objects with <=, >= is complicated. + if (cond != eq) { + __ cmp(r4, Operand(FIRST_SPEC_OBJECT_TYPE)); __ b(ge, slow); - } else { - __ CompareObjectType(r0, r4, r4, HEAP_NUMBER_TYPE); - __ b(eq, &heap_number); - // Comparing JS objects with <=, >= is complicated. - if (cond != eq) { - __ cmp(r4, Operand(FIRST_SPEC_OBJECT_TYPE)); - __ b(ge, slow); - // Normally here we fall through to return_equal, but undefined is - // special: (undefined == undefined) == true, but - // (undefined <= undefined) == false! See ECMAScript 11.8.5. - if (cond == le || cond == ge) { - __ cmp(r4, Operand(ODDBALL_TYPE)); - __ b(ne, &return_equal); - __ LoadRoot(r2, Heap::kUndefinedValueRootIndex); - __ cmp(r0, r2); - __ b(ne, &return_equal); - if (cond == le) { - // undefined <= undefined should fail. - __ mov(r0, Operand(GREATER)); - } else { - // undefined >= undefined should fail. - __ mov(r0, Operand(LESS)); - } - __ Ret(); + // Normally here we fall through to return_equal, but undefined is + // special: (undefined == undefined) == true, but + // (undefined <= undefined) == false! See ECMAScript 11.8.5. + if (cond == le || cond == ge) { + __ cmp(r4, Operand(ODDBALL_TYPE)); + __ b(ne, &return_equal); + __ LoadRoot(r2, Heap::kUndefinedValueRootIndex); + __ cmp(r0, r2); + __ b(ne, &return_equal); + if (cond == le) { + // undefined <= undefined should fail. + __ mov(r0, Operand(GREATER)); + } else { + // undefined >= undefined should fail. + __ mov(r0, Operand(LESS)); } + __ Ret(); } } } @@ -1204,47 +1233,45 @@ static void EmitIdenticalObjectComparison(MacroAssembler* masm, } __ Ret(); - if (cond != eq || !never_nan_nan) { - // For less and greater we don't have to check for NaN since the result of - // x < x is false regardless. For the others here is some code to check - // for NaN. - if (cond != lt && cond != gt) { - __ bind(&heap_number); - // It is a heap number, so return non-equal if it's NaN and equal if it's - // not NaN. - - // The representation of NaN values has all exponent bits (52..62) set, - // and not all mantissa bits (0..51) clear. - // Read top bits of double representation (second word of value). - __ ldr(r2, FieldMemOperand(r0, HeapNumber::kExponentOffset)); - // Test that exponent bits are all set. - __ Sbfx(r3, r2, HeapNumber::kExponentShift, HeapNumber::kExponentBits); - // NaNs have all-one exponents so they sign extend to -1. - __ cmp(r3, Operand(-1)); - __ b(ne, &return_equal); - - // Shift out flag and all exponent bits, retaining only mantissa. - __ mov(r2, Operand(r2, LSL, HeapNumber::kNonMantissaBitsInTopWord)); - // Or with all low-bits of mantissa. - __ ldr(r3, FieldMemOperand(r0, HeapNumber::kMantissaOffset)); - __ orr(r0, r3, Operand(r2), SetCC); - // For equal we already have the right value in r0: Return zero (equal) - // if all bits in mantissa are zero (it's an Infinity) and non-zero if - // not (it's a NaN). For <= and >= we need to load r0 with the failing - // value if it's a NaN. - if (cond != eq) { - // All-zero means Infinity means equal. - __ Ret(eq); - if (cond == le) { - __ mov(r0, Operand(GREATER)); // NaN <= NaN should fail. - } else { - __ mov(r0, Operand(LESS)); // NaN >= NaN should fail. - } + // For less and greater we don't have to check for NaN since the result of + // x < x is false regardless. For the others here is some code to check + // for NaN. + if (cond != lt && cond != gt) { + __ bind(&heap_number); + // It is a heap number, so return non-equal if it's NaN and equal if it's + // not NaN. + + // The representation of NaN values has all exponent bits (52..62) set, + // and not all mantissa bits (0..51) clear. + // Read top bits of double representation (second word of value). + __ ldr(r2, FieldMemOperand(r0, HeapNumber::kExponentOffset)); + // Test that exponent bits are all set. + __ Sbfx(r3, r2, HeapNumber::kExponentShift, HeapNumber::kExponentBits); + // NaNs have all-one exponents so they sign extend to -1. + __ cmp(r3, Operand(-1)); + __ b(ne, &return_equal); + + // Shift out flag and all exponent bits, retaining only mantissa. + __ mov(r2, Operand(r2, LSL, HeapNumber::kNonMantissaBitsInTopWord)); + // Or with all low-bits of mantissa. + __ ldr(r3, FieldMemOperand(r0, HeapNumber::kMantissaOffset)); + __ orr(r0, r3, Operand(r2), SetCC); + // For equal we already have the right value in r0: Return zero (equal) + // if all bits in mantissa are zero (it's an Infinity) and non-zero if + // not (it's a NaN). For <= and >= we need to load r0 with the failing + // value if it's a NaN. + if (cond != eq) { + // All-zero means Infinity means equal. + __ Ret(eq); + if (cond == le) { + __ mov(r0, Operand(GREATER)); // NaN <= NaN should fail. + } else { + __ mov(r0, Operand(LESS)); // NaN >= NaN should fail. } - __ Ret(); } - // No fall through here. + __ Ret(); } + // No fall through here. __ bind(¬_identical); } @@ -1282,7 +1309,7 @@ static void EmitSmiNonsmiComparison(MacroAssembler* masm, // Lhs is a smi, rhs is a number. if (CpuFeatures::IsSupported(VFP2)) { // Convert lhs to a double in d7. - CpuFeatures::Scope scope(VFP2); + CpuFeatureScope scope(masm, VFP2); __ SmiToDoubleVFPRegister(lhs, d7, r7, s15); // Load the double from rhs, tagged HeapNumber r0, to d6. __ sub(r7, rhs, Operand(kHeapObjectTag)); @@ -1292,7 +1319,7 @@ static void EmitSmiNonsmiComparison(MacroAssembler* masm, // Convert lhs to a double in r2, r3. __ mov(r7, Operand(lhs)); ConvertToDoubleStub stub1(r3, r2, r7, r6); - __ Call(stub1.GetCode()); + __ Call(stub1.GetCode(masm->isolate())); // Load rhs to a double in r0, r1. __ Ldrd(r0, r1, FieldMemOperand(rhs, HeapNumber::kValueOffset)); __ pop(lr); @@ -1321,7 +1348,7 @@ static void EmitSmiNonsmiComparison(MacroAssembler* masm, // Rhs is a smi, lhs is a heap number. if (CpuFeatures::IsSupported(VFP2)) { - CpuFeatures::Scope scope(VFP2); + CpuFeatureScope scope(masm, VFP2); // Load the double from lhs, tagged HeapNumber r1, to d7. __ sub(r7, lhs, Operand(kHeapObjectTag)); __ vldr(d7, r7, HeapNumber::kValueOffset); @@ -1334,7 +1361,7 @@ static void EmitSmiNonsmiComparison(MacroAssembler* masm, // Convert rhs to a double in r0, r1. __ mov(r7, Operand(rhs)); ConvertToDoubleStub stub2(r1, r0, r7, r6); - __ Call(stub2.GetCode()); + __ Call(stub2.GetCode(masm->isolate())); __ pop(lr); } // Fall through to both_loaded_as_doubles. @@ -1360,7 +1387,7 @@ void EmitNanCheck(MacroAssembler* masm, Label* lhs_not_nan, Condition cond) { Operand(lhs_exponent, LSL, HeapNumber::kNonMantissaBitsInTopWord), SetCC); __ b(ne, &one_is_nan); - __ cmp(lhs_mantissa, Operand(0, RelocInfo::NONE)); + __ cmp(lhs_mantissa, Operand::Zero()); __ b(ne, &one_is_nan); __ bind(lhs_not_nan); @@ -1375,7 +1402,7 @@ void EmitNanCheck(MacroAssembler* masm, Label* lhs_not_nan, Condition cond) { Operand(rhs_exponent, LSL, HeapNumber::kNonMantissaBitsInTopWord), SetCC); __ b(ne, &one_is_nan); - __ cmp(rhs_mantissa, Operand(0, RelocInfo::NONE)); + __ cmp(rhs_mantissa, Operand::Zero()); __ b(eq, &neither_is_nan); __ bind(&one_is_nan); @@ -1433,7 +1460,7 @@ static void EmitTwoNonNanDoubleComparison(MacroAssembler* masm, __ push(lr); __ PrepareCallCFunction(0, 2, r5); if (masm->use_eabi_hardfloat()) { - CpuFeatures::Scope scope(VFP2); + CpuFeatureScope scope(masm, VFP2); __ vmov(d0, r0, r1); __ vmov(d1, r2, r3); } @@ -1480,12 +1507,13 @@ static void EmitStrictTwoHeapObjectCompare(MacroAssembler* masm, __ cmp(r3, Operand(ODDBALL_TYPE)); __ b(eq, &return_not_equal); - // Now that we have the types we might as well check for symbol-symbol. - // Ensure that no non-strings have the symbol bit set. - STATIC_ASSERT(LAST_TYPE < kNotStringTag + kIsSymbolMask); - STATIC_ASSERT(kSymbolTag != 0); + // Now that we have the types we might as well check for + // internalized-internalized. + // Ensure that no non-strings have the internalized bit set. + STATIC_ASSERT(LAST_TYPE < kNotStringTag + kIsInternalizedMask); + STATIC_ASSERT(kInternalizedTag != 0); __ and_(r2, r2, Operand(r3)); - __ tst(r2, Operand(kIsSymbolMask)); + __ tst(r2, Operand(kIsInternalizedMask)); __ b(ne, &return_not_equal); } @@ -1509,7 +1537,7 @@ static void EmitCheckForTwoHeapNumbers(MacroAssembler* masm, // Both are heap numbers. Load them up then jump to the code we have // for that. if (CpuFeatures::IsSupported(VFP2)) { - CpuFeatures::Scope scope(VFP2); + CpuFeatureScope scope(masm, VFP2); __ sub(r7, rhs, Operand(kHeapObjectTag)); __ vldr(d6, r7, HeapNumber::kValueOffset); __ sub(r7, lhs, Operand(kHeapObjectTag)); @@ -1522,29 +1550,29 @@ static void EmitCheckForTwoHeapNumbers(MacroAssembler* masm, } -// Fast negative check for symbol-to-symbol equality. -static void EmitCheckForSymbolsOrObjects(MacroAssembler* masm, - Register lhs, - Register rhs, - Label* possible_strings, - Label* not_both_strings) { +// Fast negative check for internalized-to-internalized equality. +static void EmitCheckForInternalizedStringsOrObjects(MacroAssembler* masm, + Register lhs, + Register rhs, + Label* possible_strings, + Label* not_both_strings) { ASSERT((lhs.is(r0) && rhs.is(r1)) || (lhs.is(r1) && rhs.is(r0))); // r2 is object type of rhs. - // Ensure that no non-strings have the symbol bit set. + // Ensure that no non-strings have the internalized bit set. Label object_test; - STATIC_ASSERT(kSymbolTag != 0); + STATIC_ASSERT(kInternalizedTag != 0); __ tst(r2, Operand(kIsNotStringMask)); __ b(ne, &object_test); - __ tst(r2, Operand(kIsSymbolMask)); + __ tst(r2, Operand(kIsInternalizedMask)); __ b(eq, possible_strings); __ CompareObjectType(lhs, r3, r3, FIRST_NONSTRING_TYPE); __ b(ge, not_both_strings); - __ tst(r3, Operand(kIsSymbolMask)); + __ tst(r3, Operand(kIsInternalizedMask)); __ b(eq, possible_strings); - // Both are symbols. We already checked they weren't the same pointer + // Both are internalized. We already checked they weren't the same pointer // so they are not equal. __ mov(r0, Operand(NOT_EQUAL)); __ Ret(); @@ -1599,7 +1627,7 @@ void NumberToStringStub::GenerateLookupNumberStringCache(MacroAssembler* masm, if (!object_is_smi) { __ JumpIfSmi(object, &is_smi); if (CpuFeatures::IsSupported(VFP2)) { - CpuFeatures::Scope scope(VFP2); + CpuFeatureScope scope(masm, VFP2); __ CheckMap(object, scratch1, Heap::kHeapNumberMapRootIndex, @@ -1678,42 +1706,60 @@ void NumberToStringStub::Generate(MacroAssembler* masm) { } -// On entry lhs_ and rhs_ are the values to be compared. +static void ICCompareStub_CheckInputType(MacroAssembler* masm, + Register input, + Register scratch, + CompareIC::State expected, + Label* fail) { + Label ok; + if (expected == CompareIC::SMI) { + __ JumpIfNotSmi(input, fail); + } else if (expected == CompareIC::NUMBER) { + __ JumpIfSmi(input, &ok); + __ CheckMap(input, scratch, Heap::kHeapNumberMapRootIndex, fail, + DONT_DO_SMI_CHECK); + } + // We could be strict about internalized/non-internalized here, but as long as + // hydrogen doesn't care, the stub doesn't have to care either. + __ bind(&ok); +} + + +// On entry r1 and r2 are the values to be compared. // On exit r0 is 0, positive or negative to indicate the result of // the comparison. -void CompareStub::Generate(MacroAssembler* masm) { - ASSERT((lhs_.is(r0) && rhs_.is(r1)) || - (lhs_.is(r1) && rhs_.is(r0))); +void ICCompareStub::GenerateGeneric(MacroAssembler* masm) { + Register lhs = r1; + Register rhs = r0; + Condition cc = GetCondition(); + + Label miss; + ICCompareStub_CheckInputType(masm, lhs, r2, left_, &miss); + ICCompareStub_CheckInputType(masm, rhs, r3, right_, &miss); Label slow; // Call builtin. Label not_smis, both_loaded_as_doubles, lhs_not_nan; - if (include_smi_compare_) { - Label not_two_smis, smi_done; - __ orr(r2, r1, r0); - __ JumpIfNotSmi(r2, ¬_two_smis); - __ mov(r1, Operand(r1, ASR, 1)); - __ sub(r0, r1, Operand(r0, ASR, 1)); - __ Ret(); - __ bind(¬_two_smis); - } else if (FLAG_debug_code) { - __ orr(r2, r1, r0); - __ tst(r2, Operand(kSmiTagMask)); - __ Assert(ne, "CompareStub: unexpected smi operands."); - } + Label not_two_smis, smi_done; + __ orr(r2, r1, r0); + __ JumpIfNotSmi(r2, ¬_two_smis); + __ mov(r1, Operand(r1, ASR, 1)); + __ sub(r0, r1, Operand(r0, ASR, 1)); + __ Ret(); + __ bind(¬_two_smis); // NOTICE! This code is only reached after a smi-fast-case check, so // it is certain that at least one operand isn't a smi. // Handle the case where the objects are identical. Either returns the answer // or goes to slow. Only falls through if the objects were not identical. - EmitIdenticalObjectComparison(masm, &slow, cc_, never_nan_nan_); + EmitIdenticalObjectComparison(masm, &slow, cc); // If either is a Smi (we know that not both are), then they can only // be strictly equal if the other is a HeapNumber. STATIC_ASSERT(kSmiTag == 0); ASSERT_EQ(0, Smi::FromInt(0)); - __ and_(r2, lhs_, Operand(rhs_)); + __ and_(r2, lhs, Operand(rhs)); __ JumpIfNotSmi(r2, ¬_smis); // One operand is a smi. EmitSmiNonsmiComparison generates code that can: // 1) Return the answer. @@ -1724,7 +1770,7 @@ void CompareStub::Generate(MacroAssembler* masm) { // comparison. If VFP3 is supported the double values of the numbers have // been loaded into d7 and d6. Otherwise, the double values have been loaded // into r0, r1, r2, and r3. - EmitSmiNonsmiComparison(masm, lhs_, rhs_, &lhs_not_nan, &slow, strict_); + EmitSmiNonsmiComparison(masm, lhs, rhs, &lhs_not_nan, &slow, strict()); __ bind(&both_loaded_as_doubles); // The arguments have been converted to doubles and stored in d6 and d7, if @@ -1732,7 +1778,7 @@ void CompareStub::Generate(MacroAssembler* masm) { Isolate* isolate = masm->isolate(); if (CpuFeatures::IsSupported(VFP2)) { __ bind(&lhs_not_nan); - CpuFeatures::Scope scope(VFP2); + CpuFeatureScope scope(masm, VFP2); Label no_nan; // ARMv7 VFP3 instructions to implement double precision comparison. __ VFPCompareAndSetFlags(d7, d6); @@ -1747,7 +1793,7 @@ void CompareStub::Generate(MacroAssembler* masm) { // If one of the sides was a NaN then the v flag is set. Load r0 with // whatever it takes to make the comparison fail, since comparisons with NaN // always fail. - if (cc_ == lt || cc_ == le) { + if (cc == lt || cc == le) { __ mov(r0, Operand(GREATER)); } else { __ mov(r0, Operand(LESS)); @@ -1756,62 +1802,64 @@ void CompareStub::Generate(MacroAssembler* masm) { } else { // Checks for NaN in the doubles we have loaded. Can return the answer or // fall through if neither is a NaN. Also binds lhs_not_nan. - EmitNanCheck(masm, &lhs_not_nan, cc_); + EmitNanCheck(masm, &lhs_not_nan, cc); // Compares two doubles in r0, r1, r2, r3 that are not NaNs. Returns the // answer. Never falls through. - EmitTwoNonNanDoubleComparison(masm, cc_); + EmitTwoNonNanDoubleComparison(masm, cc); } __ bind(¬_smis); // At this point we know we are dealing with two different objects, // and neither of them is a Smi. The objects are in rhs_ and lhs_. - if (strict_) { + if (strict()) { // This returns non-equal for some object types, or falls through if it // was not lucky. - EmitStrictTwoHeapObjectCompare(masm, lhs_, rhs_); + EmitStrictTwoHeapObjectCompare(masm, lhs, rhs); } - Label check_for_symbols; + Label check_for_internalized_strings; Label flat_string_check; // Check for heap-number-heap-number comparison. Can jump to slow case, // or load both doubles into r0, r1, r2, r3 and jump to the code that handles - // that case. If the inputs are not doubles then jumps to check_for_symbols. + // that case. If the inputs are not doubles then jumps to + // check_for_internalized_strings. // In this case r2 will contain the type of rhs_. Never falls through. EmitCheckForTwoHeapNumbers(masm, - lhs_, - rhs_, + lhs, + rhs, &both_loaded_as_doubles, - &check_for_symbols, + &check_for_internalized_strings, &flat_string_check); - __ bind(&check_for_symbols); + __ bind(&check_for_internalized_strings); // In the strict case the EmitStrictTwoHeapObjectCompare already took care of - // symbols. - if (cc_ == eq && !strict_) { - // Returns an answer for two symbols or two detectable objects. + // internalized strings. + if (cc == eq && !strict()) { + // Returns an answer for two internalized strings or two detectable objects. // Otherwise jumps to string case or not both strings case. // Assumes that r2 is the type of rhs_ on entry. - EmitCheckForSymbolsOrObjects(masm, lhs_, rhs_, &flat_string_check, &slow); + EmitCheckForInternalizedStringsOrObjects( + masm, lhs, rhs, &flat_string_check, &slow); } // Check for both being sequential ASCII strings, and inline if that is the // case. __ bind(&flat_string_check); - __ JumpIfNonSmisNotBothSequentialAsciiStrings(lhs_, rhs_, r2, r3, &slow); + __ JumpIfNonSmisNotBothSequentialAsciiStrings(lhs, rhs, r2, r3, &slow); __ IncrementCounter(isolate->counters()->string_compare_native(), 1, r2, r3); - if (cc_ == eq) { + if (cc == eq) { StringCompareStub::GenerateFlatAsciiStringEquals(masm, - lhs_, - rhs_, + lhs, + rhs, r2, r3, r4); } else { StringCompareStub::GenerateCompareFlatAsciiStrings(masm, - lhs_, - rhs_, + lhs, + rhs, r2, r3, r4, @@ -1821,18 +1869,18 @@ void CompareStub::Generate(MacroAssembler* masm) { __ bind(&slow); - __ Push(lhs_, rhs_); + __ Push(lhs, rhs); // Figure out which native to call and setup the arguments. Builtins::JavaScript native; - if (cc_ == eq) { - native = strict_ ? Builtins::STRICT_EQUALS : Builtins::EQUALS; + if (cc == eq) { + native = strict() ? Builtins::STRICT_EQUALS : Builtins::EQUALS; } else { native = Builtins::COMPARE; int ncr; // NaN compare result - if (cc_ == lt || cc_ == le) { + if (cc == lt || cc == le) { ncr = GREATER; } else { - ASSERT(cc_ == gt || cc_ == ge); // remaining cases + ASSERT(cc == gt || cc == ge); // remaining cases ncr = LESS; } __ mov(r0, Operand(Smi::FromInt(ncr))); @@ -1842,6 +1890,9 @@ void CompareStub::Generate(MacroAssembler* masm) { // Call the native; it returns -1 (less), 0 (equal), or 1 (greater) // tagged as a small integer. __ InvokeBuiltin(native, JUMP_FUNCTION); + + __ bind(&miss); + GenerateMiss(masm); } @@ -1881,7 +1932,7 @@ void ToBooleanStub::Generate(MacroAssembler* masm) { __ ldrb(ip, FieldMemOperand(map, Map::kBitFieldOffset)); __ tst(ip, Operand(1 << Map::kIsUndetectable)); // Undetectable -> false. - __ mov(tos_, Operand(0, RelocInfo::NONE), LeaveCC, ne); + __ mov(tos_, Operand::Zero(), LeaveCC, ne); __ Ret(ne); } } @@ -1907,21 +1958,21 @@ void ToBooleanStub::Generate(MacroAssembler* masm) { __ b(ne, ¬_heap_number); if (CpuFeatures::IsSupported(VFP2)) { - CpuFeatures::Scope scope(VFP2); + CpuFeatureScope scope(masm, VFP2); __ vldr(d1, FieldMemOperand(tos_, HeapNumber::kValueOffset)); __ VFPCompareAndSetFlags(d1, 0.0); // "tos_" is a register, and contains a non zero value by default. // Hence we only need to overwrite "tos_" with zero to return false for // FP_ZERO or FP_NAN cases. Otherwise, by default it returns true. - __ mov(tos_, Operand(0, RelocInfo::NONE), LeaveCC, eq); // for FP_ZERO - __ mov(tos_, Operand(0, RelocInfo::NONE), LeaveCC, vs); // for FP_NAN + __ mov(tos_, Operand::Zero(), LeaveCC, eq); // for FP_ZERO + __ mov(tos_, Operand::Zero(), LeaveCC, vs); // for FP_NAN } else { Label done, not_nan, not_zero; __ ldr(temp, FieldMemOperand(tos_, HeapNumber::kExponentOffset)); // -0 maps to false: __ bic( - temp, temp, Operand(HeapNumber::kSignMask, RelocInfo::NONE), SetCC); + temp, temp, Operand(HeapNumber::kSignMask, RelocInfo::NONE32), SetCC); __ b(ne, ¬_zero); // If exponent word is zero then the answer depends on the mantissa word. __ ldr(tos_, FieldMemOperand(tos_, HeapNumber::kMantissaOffset)); @@ -1934,25 +1985,25 @@ void ToBooleanStub::Generate(MacroAssembler* masm) { __ mov(temp, Operand(temp, LSR, HeapNumber::kMantissaBitsInTopWord)); unsigned int shifted_exponent_mask = HeapNumber::kExponentMask >> HeapNumber::kMantissaBitsInTopWord; - __ cmp(temp, Operand(shifted_exponent_mask, RelocInfo::NONE)); + __ cmp(temp, Operand(shifted_exponent_mask, RelocInfo::NONE32)); __ b(ne, ¬_nan); // If exponent is not 0x7ff then it can't be a NaN. // Reload exponent word. __ ldr(temp, FieldMemOperand(tos_, HeapNumber::kExponentOffset)); - __ tst(temp, Operand(HeapNumber::kMantissaMask, RelocInfo::NONE)); + __ tst(temp, Operand(HeapNumber::kMantissaMask, RelocInfo::NONE32)); // If mantissa is not zero then we have a NaN, so return 0. - __ mov(tos_, Operand(0, RelocInfo::NONE), LeaveCC, ne); + __ mov(tos_, Operand::Zero(), LeaveCC, ne); __ b(ne, &done); // Load mantissa word. __ ldr(temp, FieldMemOperand(tos_, HeapNumber::kMantissaOffset)); - __ cmp(temp, Operand(0, RelocInfo::NONE)); + __ cmp(temp, Operand::Zero()); // If mantissa is not zero then we have a NaN, so return 0. - __ mov(tos_, Operand(0, RelocInfo::NONE), LeaveCC, ne); + __ mov(tos_, Operand::Zero(), LeaveCC, ne); __ b(ne, &done); __ bind(¬_nan); - __ mov(tos_, Operand(1, RelocInfo::NONE)); + __ mov(tos_, Operand(1, RelocInfo::NONE32)); __ bind(&done); } __ Ret(); @@ -1975,7 +2026,7 @@ void ToBooleanStub::CheckOddball(MacroAssembler* masm, // The value of a root is never NULL, so we can avoid loading a non-null // value into tos_ when we want to return 'true'. if (!result) { - __ mov(tos_, Operand(0, RelocInfo::NONE), LeaveCC, eq); + __ mov(tos_, Operand::Zero(), LeaveCC, eq); } __ Ret(eq); } @@ -2003,17 +2054,22 @@ void StoreBufferOverflowStub::Generate(MacroAssembler* masm) { // store the registers in any particular way, but we do have to store and // restore them. __ stm(db_w, sp, kCallerSaved | lr.bit()); + + const Register scratch = r1; + if (save_doubles_ == kSaveFPRegs) { - CpuFeatures::Scope scope(VFP2); - __ sub(sp, sp, Operand(kDoubleSize * DwVfpRegister::kNumRegisters)); - for (int i = 0; i < DwVfpRegister::kNumRegisters; i++) { + CpuFeatureScope scope(masm, VFP2); + // Check CPU flags for number of registers, setting the Z condition flag. + __ CheckFor32DRegs(scratch); + + __ sub(sp, sp, Operand(kDoubleSize * DwVfpRegister::kMaxNumRegisters)); + for (int i = 0; i < DwVfpRegister::kMaxNumRegisters; i++) { DwVfpRegister reg = DwVfpRegister::from_code(i); - __ vstr(reg, MemOperand(sp, i * kDoubleSize)); + __ vstr(reg, MemOperand(sp, i * kDoubleSize), i < 16 ? al : ne); } } const int argument_count = 1; const int fp_argument_count = 0; - const Register scratch = r1; AllowExternalCallThatCantCauseGC scope(masm); __ PrepareCallCFunction(argument_count, fp_argument_count, scratch); @@ -2022,12 +2078,16 @@ void StoreBufferOverflowStub::Generate(MacroAssembler* masm) { ExternalReference::store_buffer_overflow_function(masm->isolate()), argument_count); if (save_doubles_ == kSaveFPRegs) { - CpuFeatures::Scope scope(VFP2); - for (int i = 0; i < DwVfpRegister::kNumRegisters; i++) { + CpuFeatureScope scope(masm, VFP2); + + // Check CPU flags for number of registers, setting the Z condition flag. + __ CheckFor32DRegs(scratch); + + for (int i = 0; i < DwVfpRegister::kMaxNumRegisters; i++) { DwVfpRegister reg = DwVfpRegister::from_code(i); - __ vldr(reg, MemOperand(sp, i * kDoubleSize)); + __ vldr(reg, MemOperand(sp, i * kDoubleSize), i < 16 ? al : ne); } - __ add(sp, sp, Operand(kDoubleSize * DwVfpRegister::kNumRegisters)); + __ add(sp, sp, Operand(kDoubleSize * DwVfpRegister::kMaxNumRegisters)); } __ ldm(ia_w, sp, kCallerSaved | pc.bit()); // Also pop pc to get Ret(0). } @@ -2056,8 +2116,8 @@ void UnaryOpStub::Generate(MacroAssembler* masm) { case UnaryOpIC::SMI: GenerateSmiStub(masm); break; - case UnaryOpIC::HEAP_NUMBER: - GenerateHeapNumberStub(masm); + case UnaryOpIC::NUMBER: + GenerateNumberStub(masm); break; case UnaryOpIC::GENERIC: GenerateGenericStub(masm); @@ -2120,7 +2180,7 @@ void UnaryOpStub::GenerateSmiCodeSub(MacroAssembler* masm, __ b(eq, slow); // Return '0 - value'. - __ rsb(r0, r0, Operand(0, RelocInfo::NONE)); + __ rsb(r0, r0, Operand::Zero()); __ Ret(); } @@ -2137,13 +2197,13 @@ void UnaryOpStub::GenerateSmiCodeBitNot(MacroAssembler* masm, // TODO(svenpanne): Use virtual functions instead of switch. -void UnaryOpStub::GenerateHeapNumberStub(MacroAssembler* masm) { +void UnaryOpStub::GenerateNumberStub(MacroAssembler* masm) { switch (op_) { case Token::SUB: - GenerateHeapNumberStubSub(masm); + GenerateNumberStubSub(masm); break; case Token::BIT_NOT: - GenerateHeapNumberStubBitNot(masm); + GenerateNumberStubBitNot(masm); break; default: UNREACHABLE(); @@ -2151,7 +2211,7 @@ void UnaryOpStub::GenerateHeapNumberStub(MacroAssembler* masm) { } -void UnaryOpStub::GenerateHeapNumberStubSub(MacroAssembler* masm) { +void UnaryOpStub::GenerateNumberStubSub(MacroAssembler* masm) { Label non_smi, slow, call_builtin; GenerateSmiCodeSub(masm, &non_smi, &call_builtin); __ bind(&non_smi); @@ -2163,7 +2223,7 @@ void UnaryOpStub::GenerateHeapNumberStubSub(MacroAssembler* masm) { } -void UnaryOpStub::GenerateHeapNumberStubBitNot(MacroAssembler* masm) { +void UnaryOpStub::GenerateNumberStubBitNot(MacroAssembler* masm) { Label non_smi, slow; GenerateSmiCodeBitNot(masm, &non_smi); __ bind(&non_smi); @@ -2206,18 +2266,16 @@ void UnaryOpStub::GenerateHeapNumberCodeSub(MacroAssembler* masm, } -void UnaryOpStub::GenerateHeapNumberCodeBitNot( - MacroAssembler* masm, Label* slow) { - Label impossible; - +void UnaryOpStub::GenerateHeapNumberCodeBitNot(MacroAssembler* masm, + Label* slow) { EmitCheckForHeapNumber(masm, r0, r1, r6, slow); - // Convert the heap number is r0 to an untagged integer in r1. - __ ConvertToInt32(r0, r1, r2, r3, d0, slow); + // Convert the heap number in r0 to an untagged integer in r1. + __ ECMAConvertNumberToInt32(r0, r1, r2, r3, r4, d0, d1); // Do the bitwise operation and check if the result fits in a smi. Label try_float; __ mvn(r1, Operand(r1)); - __ add(r2, r1, Operand(0x40000000), SetCC); + __ cmn(r1, Operand(0x40000000)); __ b(mi, &try_float); // Tag the result as a smi and we're done. @@ -2228,48 +2286,36 @@ void UnaryOpStub::GenerateHeapNumberCodeBitNot( __ bind(&try_float); if (mode_ == UNARY_NO_OVERWRITE) { Label slow_allocate_heapnumber, heapnumber_allocated; - // Allocate a new heap number without zapping r0, which we need if it fails. - __ AllocateHeapNumber(r2, r3, r4, r6, &slow_allocate_heapnumber); + __ AllocateHeapNumber(r0, r3, r4, r6, &slow_allocate_heapnumber); __ jmp(&heapnumber_allocated); __ bind(&slow_allocate_heapnumber); { FrameScope scope(masm, StackFrame::INTERNAL); - __ push(r0); // Push the heap number, not the untagged int32. + // Push the lower bit of the result (left shifted to look like a smi). + __ mov(r2, Operand(r1, LSL, 31)); + // Push the 31 high bits (bit 0 cleared to look like a smi). + __ bic(r1, r1, Operand(1)); + __ Push(r2, r1); __ CallRuntime(Runtime::kNumberAlloc, 0); - __ mov(r2, r0); // Move the new heap number into r2. - // Get the heap number into r0, now that the new heap number is in r2. - __ pop(r0); + __ Pop(r2, r1); // Restore the result. + __ orr(r1, r1, Operand(r2, LSR, 31)); } - - // Convert the heap number in r0 to an untagged integer in r1. - // This can't go slow-case because it's the same number we already - // converted once again. - __ ConvertToInt32(r0, r1, r3, r4, d0, &impossible); - __ mvn(r1, Operand(r1)); - __ bind(&heapnumber_allocated); - __ mov(r0, r2); // Move newly allocated heap number to r0. } if (CpuFeatures::IsSupported(VFP2)) { // Convert the int32 in r1 to the heap number in r0. r2 is corrupted. - CpuFeatures::Scope scope(VFP2); + CpuFeatureScope scope(masm, VFP2); __ vmov(s0, r1); __ vcvt_f64_s32(d0, s0); - __ sub(r2, r0, Operand(kHeapObjectTag)); - __ vstr(d0, r2, HeapNumber::kValueOffset); + __ vstr(d0, FieldMemOperand(r0, HeapNumber::kValueOffset)); __ Ret(); } else { // WriteInt32ToHeapNumberStub does not trigger GC, so we do not // have to set up a frame. WriteInt32ToHeapNumberStub stub(r1, r0, r2); - __ Jump(stub.GetCode(), RelocInfo::CODE_TARGET); - } - - __ bind(&impossible); - if (FLAG_debug_code) { - __ stop("Incorrect assumption in bit-not stub"); + __ Jump(stub.GetCode(masm->isolate()), RelocInfo::CODE_TARGET); } } @@ -2325,20 +2371,23 @@ void UnaryOpStub::GenerateGenericCodeFallback(MacroAssembler* masm) { } +void BinaryOpStub::Initialize() { + platform_specific_bit_ = CpuFeatures::IsSupported(VFP2); +} + + void BinaryOpStub::GenerateTypeTransition(MacroAssembler* masm) { Label get_result; __ Push(r1, r0); __ mov(r2, Operand(Smi::FromInt(MinorKey()))); - __ mov(r1, Operand(Smi::FromInt(op_))); - __ mov(r0, Operand(Smi::FromInt(operands_type_))); - __ Push(r2, r1, r0); + __ push(r2); __ TailCallExternalReference( ExternalReference(IC_Utility(IC::kBinaryOp_Patch), masm->isolate()), - 5, + 3, 1); } @@ -2349,59 +2398,8 @@ void BinaryOpStub::GenerateTypeTransitionWithSavedArgs( } -void BinaryOpStub::Generate(MacroAssembler* masm) { - // Explicitly allow generation of nested stubs. It is safe here because - // generation code does not use any raw pointers. - AllowStubCallsScope allow_stub_calls(masm, true); - - switch (operands_type_) { - case BinaryOpIC::UNINITIALIZED: - GenerateTypeTransition(masm); - break; - case BinaryOpIC::SMI: - GenerateSmiStub(masm); - break; - case BinaryOpIC::INT32: - GenerateInt32Stub(masm); - break; - case BinaryOpIC::HEAP_NUMBER: - GenerateHeapNumberStub(masm); - break; - case BinaryOpIC::ODDBALL: - GenerateOddballStub(masm); - break; - case BinaryOpIC::BOTH_STRING: - GenerateBothStringStub(masm); - break; - case BinaryOpIC::STRING: - GenerateStringStub(masm); - break; - case BinaryOpIC::GENERIC: - GenerateGeneric(masm); - break; - default: - UNREACHABLE(); - } -} - - -void BinaryOpStub::PrintName(StringStream* stream) { - const char* op_name = Token::Name(op_); - const char* overwrite_name; - switch (mode_) { - case NO_OVERWRITE: overwrite_name = "Alloc"; break; - case OVERWRITE_RIGHT: overwrite_name = "OverwriteRight"; break; - case OVERWRITE_LEFT: overwrite_name = "OverwriteLeft"; break; - default: overwrite_name = "UnknownOverwrite"; break; - } - stream->Add("BinaryOpStub_%s_%s_%s", - op_name, - overwrite_name, - BinaryOpIC::GetName(operands_type_)); -} - - -void BinaryOpStub::GenerateSmiSmiOperation(MacroAssembler* masm) { +void BinaryOpStub_GenerateSmiSmiOperation(MacroAssembler* masm, + Token::Value op) { Register left = r1; Register right = r0; Register scratch1 = r7; @@ -2411,7 +2409,7 @@ void BinaryOpStub::GenerateSmiSmiOperation(MacroAssembler* masm) { STATIC_ASSERT(kSmiTag == 0); Label not_smi_result; - switch (op_) { + switch (op) { case Token::ADD: __ add(right, left, Operand(right), SetCC); // Add optimistically. __ Ret(vc); @@ -2436,7 +2434,7 @@ void BinaryOpStub::GenerateSmiSmiOperation(MacroAssembler* masm) { __ cmp(ip, Operand(scratch2)); __ b(ne, ¬_smi_result); // Go slow on zero result to handle -0. - __ cmp(scratch1, Operand(0)); + __ cmp(scratch1, Operand::Zero()); __ mov(right, Operand(scratch1), LeaveCC, ne); __ Ret(ne); // We need -0 if we were multiplying a negative number with 0 to get 0. @@ -2447,33 +2445,112 @@ void BinaryOpStub::GenerateSmiSmiOperation(MacroAssembler* masm) { // We fall through here if we multiplied a negative number with 0, because // that would mean we should produce -0. break; - case Token::DIV: + case Token::DIV: { + Label div_with_sdiv; + + // Check for 0 divisor. + __ cmp(right, Operand::Zero()); + __ b(eq, ¬_smi_result); + // Check for power of two on the right hand side. - __ JumpIfNotPowerOfTwoOrZero(right, scratch1, ¬_smi_result); - // Check for positive and no remainder (scratch1 contains right - 1). - __ orr(scratch2, scratch1, Operand(0x80000000u)); - __ tst(left, scratch2); - __ b(ne, ¬_smi_result); + __ sub(scratch1, right, Operand(1)); + __ tst(scratch1, right); + if (CpuFeatures::IsSupported(SUDIV)) { + __ b(ne, &div_with_sdiv); + // Check for no remainder. + __ tst(left, scratch1); + __ b(ne, ¬_smi_result); + // Check for positive left hand side. + __ cmp(left, Operand::Zero()); + __ b(mi, &div_with_sdiv); + } else { + __ b(ne, ¬_smi_result); + // Check for positive and no remainder. + __ orr(scratch2, scratch1, Operand(0x80000000u)); + __ tst(left, scratch2); + __ b(ne, ¬_smi_result); + } // Perform division by shifting. __ CountLeadingZeros(scratch1, scratch1, scratch2); __ rsb(scratch1, scratch1, Operand(31)); __ mov(right, Operand(left, LSR, scratch1)); __ Ret(); + + if (CpuFeatures::IsSupported(SUDIV)) { + Label result_not_zero; + + __ bind(&div_with_sdiv); + // Do division. + __ sdiv(scratch1, left, right); + // Check that the remainder is zero. + __ mls(scratch2, scratch1, right, left); + __ cmp(scratch2, Operand::Zero()); + __ b(ne, ¬_smi_result); + // Check for negative zero result. + __ cmp(scratch1, Operand::Zero()); + __ b(ne, &result_not_zero); + __ cmp(right, Operand::Zero()); + __ b(lt, ¬_smi_result); + __ bind(&result_not_zero); + // Check for the corner case of dividing the most negative smi by -1. + __ cmp(scratch1, Operand(0x40000000)); + __ b(eq, ¬_smi_result); + // Tag and return the result. + __ SmiTag(right, scratch1); + __ Ret(); + } break; - case Token::MOD: - // Check for two positive smis. - __ orr(scratch1, left, Operand(right)); - __ tst(scratch1, Operand(0x80000000u | kSmiTagMask)); - __ b(ne, ¬_smi_result); + } + case Token::MOD: { + Label modulo_with_sdiv; + + if (CpuFeatures::IsSupported(SUDIV)) { + // Check for x % 0. + __ cmp(right, Operand::Zero()); + __ b(eq, ¬_smi_result); + + // Check for two positive smis. + __ orr(scratch1, left, Operand(right)); + __ tst(scratch1, Operand(0x80000000u)); + __ b(ne, &modulo_with_sdiv); + + // Check for power of two on the right hand side. + __ sub(scratch1, right, Operand(1)); + __ tst(scratch1, right); + __ b(ne, &modulo_with_sdiv); + } else { + // Check for two positive smis. + __ orr(scratch1, left, Operand(right)); + __ tst(scratch1, Operand(0x80000000u)); + __ b(ne, ¬_smi_result); - // Check for power of two on the right hand side. - __ JumpIfNotPowerOfTwoOrZero(right, scratch1, ¬_smi_result); + // Check for power of two on the right hand side. + __ JumpIfNotPowerOfTwoOrZero(right, scratch1, ¬_smi_result); + } - // Perform modulus by masking. + // Perform modulus by masking (scratch1 contains right - 1). __ and_(right, left, Operand(scratch1)); __ Ret(); + + if (CpuFeatures::IsSupported(SUDIV)) { + __ bind(&modulo_with_sdiv); + __ mov(scratch2, right); + // Perform modulus with sdiv and mls. + __ sdiv(scratch1, left, right); + __ mls(right, scratch1, right, left); + // Return if the result is not 0. + __ cmp(right, Operand::Zero()); + __ Ret(ne); + // The result is 0, check for -0 case. + __ cmp(left, Operand::Zero()); + __ Ret(pl); + // This is a -0 case, restore the value of right. + __ mov(right, scratch2); + // We fall through here to not_smi_result to produce -0. + } break; + } case Token::BIT_OR: __ orr(right, left, Operand(right)); __ Ret(); @@ -2526,10 +2603,24 @@ void BinaryOpStub::GenerateSmiSmiOperation(MacroAssembler* masm) { } -void BinaryOpStub::GenerateFPOperation(MacroAssembler* masm, - bool smi_operands, - Label* not_numbers, - Label* gc_required) { +void BinaryOpStub_GenerateHeapResultAllocation(MacroAssembler* masm, + Register result, + Register heap_number_map, + Register scratch1, + Register scratch2, + Label* gc_required, + OverwriteMode mode); + + +void BinaryOpStub_GenerateFPOperation(MacroAssembler* masm, + BinaryOpIC::TypeInfo left_type, + BinaryOpIC::TypeInfo right_type, + bool smi_operands, + Label* not_numbers, + Label* gc_required, + Label* miss, + Token::Value op, + OverwriteMode mode) { Register left = r1; Register right = r0; Register scratch1 = r7; @@ -2541,11 +2632,17 @@ void BinaryOpStub::GenerateFPOperation(MacroAssembler* masm, __ AssertSmi(left); __ AssertSmi(right); } + if (left_type == BinaryOpIC::SMI) { + __ JumpIfNotSmi(left, miss); + } + if (right_type == BinaryOpIC::SMI) { + __ JumpIfNotSmi(right, miss); + } Register heap_number_map = r6; __ LoadRoot(heap_number_map, Heap::kHeapNumberMapRootIndex); - switch (op_) { + switch (op) { case Token::ADD: case Token::SUB: case Token::MUL: @@ -2555,25 +2652,42 @@ void BinaryOpStub::GenerateFPOperation(MacroAssembler* masm, // depending on whether VFP3 is available or not. FloatingPointHelper::Destination destination = CpuFeatures::IsSupported(VFP2) && - op_ != Token::MOD ? + op != Token::MOD ? FloatingPointHelper::kVFPRegisters : FloatingPointHelper::kCoreRegisters; // Allocate new heap number for result. Register result = r5; - GenerateHeapResultAllocation( - masm, result, heap_number_map, scratch1, scratch2, gc_required); + BinaryOpStub_GenerateHeapResultAllocation( + masm, result, heap_number_map, scratch1, scratch2, gc_required, mode); // Load the operands. if (smi_operands) { FloatingPointHelper::LoadSmis(masm, destination, scratch1, scratch2); } else { - FloatingPointHelper::LoadOperands(masm, - destination, - heap_number_map, - scratch1, - scratch2, - not_numbers); + // Load right operand to d7 or r2/r3. + if (right_type == BinaryOpIC::INT32) { + FloatingPointHelper::LoadNumberAsInt32Double( + masm, right, destination, d7, d8, r2, r3, heap_number_map, + scratch1, scratch2, s0, miss); + } else { + Label* fail = (right_type == BinaryOpIC::NUMBER) ? miss : not_numbers; + FloatingPointHelper::LoadNumber( + masm, destination, right, d7, r2, r3, heap_number_map, + scratch1, scratch2, fail); + } + // Load left operand to d6 or r0/r1. This keeps r0/r1 intact if it + // jumps to |miss|. + if (left_type == BinaryOpIC::INT32) { + FloatingPointHelper::LoadNumberAsInt32Double( + masm, left, destination, d6, d8, r0, r1, heap_number_map, + scratch1, scratch2, s0, miss); + } else { + Label* fail = (left_type == BinaryOpIC::NUMBER) ? miss : not_numbers; + FloatingPointHelper::LoadNumber( + masm, destination, left, d6, r0, r1, heap_number_map, + scratch1, scratch2, fail); + } } // Calculate the result. @@ -2581,8 +2695,8 @@ void BinaryOpStub::GenerateFPOperation(MacroAssembler* masm, // Using VFP registers: // d6: Left value // d7: Right value - CpuFeatures::Scope scope(VFP2); - switch (op_) { + CpuFeatureScope scope(masm, VFP2); + switch (op) { case Token::ADD: __ vadd(d5, d6, d7); break; @@ -2606,7 +2720,7 @@ void BinaryOpStub::GenerateFPOperation(MacroAssembler* masm, } else { // Call the C function to handle the double operation. FloatingPointHelper::CallCCodeForDoubleOperation(masm, - op_, + op, result, scratch1); if (FLAG_debug_code) { @@ -2634,6 +2748,7 @@ void BinaryOpStub::GenerateFPOperation(MacroAssembler* masm, scratch2, scratch3, d0, + d1, not_numbers); FloatingPointHelper::ConvertNumberToInt32(masm, right, @@ -2643,11 +2758,12 @@ void BinaryOpStub::GenerateFPOperation(MacroAssembler* masm, scratch2, scratch3, d0, + d1, not_numbers); } Label result_not_a_smi; - switch (op_) { + switch (op) { case Token::BIT_OR: __ orr(r2, r3, Operand(r2)); break; @@ -2698,8 +2814,9 @@ void BinaryOpStub::GenerateFPOperation(MacroAssembler* masm, __ AllocateHeapNumber( result, scratch1, scratch2, heap_number_map, gc_required); } else { - GenerateHeapResultAllocation( - masm, result, heap_number_map, scratch1, scratch2, gc_required); + BinaryOpStub_GenerateHeapResultAllocation( + masm, result, heap_number_map, scratch1, scratch2, gc_required, + mode); } // r2: Answer as signed int32. @@ -2712,9 +2829,9 @@ void BinaryOpStub::GenerateFPOperation(MacroAssembler* masm, if (CpuFeatures::IsSupported(VFP2)) { // Convert the int32 in r2 to the heap number in r0. r3 is corrupted. As // mentioned above SHR needs to always produce a positive result. - CpuFeatures::Scope scope(VFP2); + CpuFeatureScope scope(masm, VFP2); __ vmov(s0, r2); - if (op_ == Token::SHR) { + if (op == Token::SHR) { __ vcvt_f64_u32(d0, s0); } else { __ vcvt_f64_s32(d0, s0); @@ -2739,12 +2856,14 @@ void BinaryOpStub::GenerateFPOperation(MacroAssembler* masm, // Generate the smi code. If the operation on smis are successful this return is // generated. If the result is not a smi and heap number allocation is not // requested the code falls through. If number allocation is requested but a -// heap number cannot be allocated the code jumps to the lable gc_required. -void BinaryOpStub::GenerateSmiCode( +// heap number cannot be allocated the code jumps to the label gc_required. +void BinaryOpStub_GenerateSmiCode( MacroAssembler* masm, Label* use_runtime, Label* gc_required, - SmiCodeGenerateHeapNumberResults allow_heapnumber_results) { + Token::Value op, + BinaryOpStub::SmiCodeGenerateHeapNumberResults allow_heapnumber_results, + OverwriteMode mode) { Label not_smis; Register left = r1; @@ -2757,12 +2876,14 @@ void BinaryOpStub::GenerateSmiCode( __ JumpIfNotSmi(scratch1, ¬_smis); // If the smi-smi operation results in a smi return is generated. - GenerateSmiSmiOperation(masm); + BinaryOpStub_GenerateSmiSmiOperation(masm, op); // If heap number results are possible generate the result in an allocated // heap number. - if (allow_heapnumber_results == ALLOW_HEAPNUMBER_RESULTS) { - GenerateFPOperation(masm, true, use_runtime, gc_required); + if (allow_heapnumber_results == BinaryOpStub::ALLOW_HEAPNUMBER_RESULTS) { + BinaryOpStub_GenerateFPOperation( + masm, BinaryOpIC::UNINITIALIZED, BinaryOpIC::UNINITIALIZED, true, + use_runtime, gc_required, ¬_smis, op, mode); } __ bind(¬_smis); } @@ -2774,14 +2895,14 @@ void BinaryOpStub::GenerateSmiStub(MacroAssembler* masm) { if (result_type_ == BinaryOpIC::UNINITIALIZED || result_type_ == BinaryOpIC::SMI) { // Only allow smi results. - GenerateSmiCode(masm, &call_runtime, NULL, NO_HEAPNUMBER_RESULTS); + BinaryOpStub_GenerateSmiCode( + masm, &call_runtime, NULL, op_, NO_HEAPNUMBER_RESULTS, mode_); } else { // Allow heap number result and don't make a transition if a heap number // cannot be allocated. - GenerateSmiCode(masm, - &call_runtime, - &call_runtime, - ALLOW_HEAPNUMBER_RESULTS); + BinaryOpStub_GenerateSmiCode( + masm, &call_runtime, &call_runtime, op_, ALLOW_HEAPNUMBER_RESULTS, + mode_); } // Code falls through if the result is not returned as either a smi or heap @@ -2789,23 +2910,14 @@ void BinaryOpStub::GenerateSmiStub(MacroAssembler* masm) { GenerateTypeTransition(masm); __ bind(&call_runtime); + GenerateRegisterArgsPush(masm); GenerateCallRuntime(masm); } -void BinaryOpStub::GenerateStringStub(MacroAssembler* masm) { - ASSERT(operands_type_ == BinaryOpIC::STRING); - ASSERT(op_ == Token::ADD); - // Try to add arguments as strings, otherwise, transition to the generic - // BinaryOpIC type. - GenerateAddStrings(masm); - GenerateTypeTransition(masm); -} - - void BinaryOpStub::GenerateBothStringStub(MacroAssembler* masm) { Label call_runtime; - ASSERT(operands_type_ == BinaryOpIC::BOTH_STRING); + ASSERT(left_type_ == BinaryOpIC::STRING && right_type_ == BinaryOpIC::STRING); ASSERT(op_ == Token::ADD); // If both arguments are strings, call the string add stub. // Otherwise, do a transition. @@ -2834,7 +2946,7 @@ void BinaryOpStub::GenerateBothStringStub(MacroAssembler* masm) { void BinaryOpStub::GenerateInt32Stub(MacroAssembler* masm) { - ASSERT(operands_type_ == BinaryOpIC::INT32); + ASSERT(Max(left_type_, right_type_) == BinaryOpIC::INT32); Register left = r1; Register right = r0; @@ -2856,7 +2968,7 @@ void BinaryOpStub::GenerateInt32Stub(MacroAssembler* masm) { Label skip; __ orr(scratch1, left, right); __ JumpIfNotSmi(scratch1, &skip); - GenerateSmiSmiOperation(masm); + BinaryOpStub_GenerateSmiSmiOperation(masm, op_); // Fall through if the result is not a smi. __ bind(&skip); @@ -2866,6 +2978,15 @@ void BinaryOpStub::GenerateInt32Stub(MacroAssembler* masm) { case Token::MUL: case Token::DIV: case Token::MOD: { + // It could be that only SMIs have been seen at either the left + // or the right operand. For precise type feedback, patch the IC + // again if this changes. + if (left_type_ == BinaryOpIC::SMI) { + __ JumpIfNotSmi(left, &transition); + } + if (right_type_ == BinaryOpIC::SMI) { + __ JumpIfNotSmi(right, &transition); + } // Load both operands and check that they are 32-bit integer. // Jump to type transition if they are not. The registers r0 and r1 (right // and left) are preserved for the runtime call. @@ -2900,7 +3021,7 @@ void BinaryOpStub::GenerateInt32Stub(MacroAssembler* masm) { &transition); if (destination == FloatingPointHelper::kVFPRegisters) { - CpuFeatures::Scope scope(VFP2); + CpuFeatureScope scope(masm, VFP2); Label return_heap_number; switch (op_) { case Token::ADD: @@ -2925,16 +3046,14 @@ void BinaryOpStub::GenerateInt32Stub(MacroAssembler* masm) { // Otherwise return a heap number if allowed, or jump to type // transition. - __ EmitVFPTruncate(kRoundToZero, - scratch1, - d5, - scratch2, - d8); - if (result_type_ <= BinaryOpIC::INT32) { + __ TryDoubleToInt32Exact(scratch1, d5, d8); // If the ne condition is set, result does // not fit in a 32-bit integer. __ b(ne, &transition); + } else { + __ vcvt_s32_f64(s8, d5); + __ vmov(scratch1, s8); } // Check if the result fits in a smi. @@ -2960,16 +3079,17 @@ void BinaryOpStub::GenerateInt32Stub(MacroAssembler* masm) { __ bind(&return_heap_number); // Return a heap number, or fall through to type transition or runtime // call if we can't. - if (result_type_ >= ((op_ == Token::DIV) ? BinaryOpIC::HEAP_NUMBER + if (result_type_ >= ((op_ == Token::DIV) ? BinaryOpIC::NUMBER : BinaryOpIC::INT32)) { // We are using vfp registers so r5 is available. heap_number_result = r5; - GenerateHeapResultAllocation(masm, - heap_number_result, - heap_number_map, - scratch1, - scratch2, - &call_runtime); + BinaryOpStub_GenerateHeapResultAllocation(masm, + heap_number_result, + heap_number_map, + scratch1, + scratch2, + &call_runtime, + mode_); __ sub(r0, heap_number_result, Operand(kHeapObjectTag)); __ vstr(d5, r0, HeapNumber::kValueOffset); __ mov(r0, heap_number_result); @@ -2988,12 +3108,13 @@ void BinaryOpStub::GenerateInt32Stub(MacroAssembler* masm) { // Allocate a heap number to store the result. heap_number_result = r5; - GenerateHeapResultAllocation(masm, - heap_number_result, - heap_number_map, - scratch1, - scratch2, - &pop_and_call_runtime); + BinaryOpStub_GenerateHeapResultAllocation(masm, + heap_number_result, + heap_number_map, + scratch1, + scratch2, + &pop_and_call_runtime, + mode_); // Load the left value from the value saved on the stack. __ Pop(r1, r0); @@ -3098,15 +3219,16 @@ void BinaryOpStub::GenerateInt32Stub(MacroAssembler* masm) { __ bind(&return_heap_number); heap_number_result = r5; - GenerateHeapResultAllocation(masm, - heap_number_result, - heap_number_map, - scratch1, - scratch2, - &call_runtime); + BinaryOpStub_GenerateHeapResultAllocation(masm, + heap_number_result, + heap_number_map, + scratch1, + scratch2, + &call_runtime, + mode_); if (CpuFeatures::IsSupported(VFP2)) { - CpuFeatures::Scope scope(VFP2); + CpuFeatureScope scope(masm, VFP2); if (op_ != Token::SHR) { // Convert the result to a floating point value. __ vmov(double_scratch.low(), r2); @@ -3147,6 +3269,7 @@ void BinaryOpStub::GenerateInt32Stub(MacroAssembler* masm) { } __ bind(&call_runtime); + GenerateRegisterArgsPush(masm); GenerateCallRuntime(masm); } @@ -3180,25 +3303,37 @@ void BinaryOpStub::GenerateOddballStub(MacroAssembler* masm) { } __ bind(&done); - GenerateHeapNumberStub(masm); + GenerateNumberStub(masm); } -void BinaryOpStub::GenerateHeapNumberStub(MacroAssembler* masm) { - Label call_runtime; - GenerateFPOperation(masm, false, &call_runtime, &call_runtime); +void BinaryOpStub::GenerateNumberStub(MacroAssembler* masm) { + Label call_runtime, transition; + BinaryOpStub_GenerateFPOperation( + masm, left_type_, right_type_, false, + &transition, &call_runtime, &transition, op_, mode_); + + __ bind(&transition); + GenerateTypeTransition(masm); __ bind(&call_runtime); + GenerateRegisterArgsPush(masm); GenerateCallRuntime(masm); } void BinaryOpStub::GenerateGeneric(MacroAssembler* masm) { - Label call_runtime, call_string_add_or_runtime; + Label call_runtime, call_string_add_or_runtime, transition; - GenerateSmiCode(masm, &call_runtime, &call_runtime, ALLOW_HEAPNUMBER_RESULTS); + BinaryOpStub_GenerateSmiCode( + masm, &call_runtime, &call_runtime, op_, ALLOW_HEAPNUMBER_RESULTS, mode_); - GenerateFPOperation(masm, false, &call_string_add_or_runtime, &call_runtime); + BinaryOpStub_GenerateFPOperation( + masm, left_type_, right_type_, false, + &call_string_add_or_runtime, &call_runtime, &transition, op_, mode_); + + __ bind(&transition); + GenerateTypeTransition(masm); __ bind(&call_string_add_or_runtime); if (op_ == Token::ADD) { @@ -3206,6 +3341,7 @@ void BinaryOpStub::GenerateGeneric(MacroAssembler* masm) { } __ bind(&call_runtime); + GenerateRegisterArgsPush(masm); GenerateCallRuntime(masm); } @@ -3241,61 +3377,20 @@ void BinaryOpStub::GenerateAddStrings(MacroAssembler* masm) { } -void BinaryOpStub::GenerateCallRuntime(MacroAssembler* masm) { - GenerateRegisterArgsPush(masm); - switch (op_) { - case Token::ADD: - __ InvokeBuiltin(Builtins::ADD, JUMP_FUNCTION); - break; - case Token::SUB: - __ InvokeBuiltin(Builtins::SUB, JUMP_FUNCTION); - break; - case Token::MUL: - __ InvokeBuiltin(Builtins::MUL, JUMP_FUNCTION); - break; - case Token::DIV: - __ InvokeBuiltin(Builtins::DIV, JUMP_FUNCTION); - break; - case Token::MOD: - __ InvokeBuiltin(Builtins::MOD, JUMP_FUNCTION); - break; - case Token::BIT_OR: - __ InvokeBuiltin(Builtins::BIT_OR, JUMP_FUNCTION); - break; - case Token::BIT_AND: - __ InvokeBuiltin(Builtins::BIT_AND, JUMP_FUNCTION); - break; - case Token::BIT_XOR: - __ InvokeBuiltin(Builtins::BIT_XOR, JUMP_FUNCTION); - break; - case Token::SAR: - __ InvokeBuiltin(Builtins::SAR, JUMP_FUNCTION); - break; - case Token::SHR: - __ InvokeBuiltin(Builtins::SHR, JUMP_FUNCTION); - break; - case Token::SHL: - __ InvokeBuiltin(Builtins::SHL, JUMP_FUNCTION); - break; - default: - UNREACHABLE(); - } -} - - -void BinaryOpStub::GenerateHeapResultAllocation(MacroAssembler* masm, - Register result, - Register heap_number_map, - Register scratch1, - Register scratch2, - Label* gc_required) { +void BinaryOpStub_GenerateHeapResultAllocation(MacroAssembler* masm, + Register result, + Register heap_number_map, + Register scratch1, + Register scratch2, + Label* gc_required, + OverwriteMode mode) { // Code below will scratch result if allocation fails. To keep both arguments // intact for the runtime call result cannot be one of these. ASSERT(!result.is(r0) && !result.is(r1)); - if (mode_ == OVERWRITE_LEFT || mode_ == OVERWRITE_RIGHT) { + if (mode == OVERWRITE_LEFT || mode == OVERWRITE_RIGHT) { Label skip_allocation, allocated; - Register overwritable_operand = mode_ == OVERWRITE_LEFT ? r1 : r0; + Register overwritable_operand = mode == OVERWRITE_LEFT ? r1 : r0; // If the overwritable operand is already an object, we skip the // allocation of a heap number. __ JumpIfNotSmi(overwritable_operand, &skip_allocation); @@ -3308,7 +3403,7 @@ void BinaryOpStub::GenerateHeapResultAllocation(MacroAssembler* masm, __ mov(result, Operand(overwritable_operand)); __ bind(&allocated); } else { - ASSERT(mode_ == NO_OVERWRITE); + ASSERT(mode == NO_OVERWRITE); __ AllocateHeapNumber( result, scratch1, scratch2, heap_number_map, gc_required); } @@ -3336,7 +3431,7 @@ void TranscendentalCacheStub::Generate(MacroAssembler* masm) { const bool tagged = (argument_type_ == TAGGED); if (CpuFeatures::IsSupported(VFP2)) { - CpuFeatures::Scope scope(VFP2); + CpuFeatureScope scope(masm, VFP2); if (tagged) { // Argument is a number and is on stack and in r0. // Load argument and check if it is a smi. @@ -3386,7 +3481,7 @@ void TranscendentalCacheStub::Generate(MacroAssembler* masm) { __ ldr(cache_entry, MemOperand(cache_entry, cache_array_index)); // r0 points to the cache for the type type_. // If NULL, the cache hasn't been initialized yet, so go through runtime. - __ cmp(cache_entry, Operand(0, RelocInfo::NONE)); + __ cmp(cache_entry, Operand::Zero()); __ b(eq, &invalid_cache); #ifdef DEBUG @@ -3438,7 +3533,7 @@ void TranscendentalCacheStub::Generate(MacroAssembler* masm) { __ TailCallExternalReference(runtime_function, 1, 1); } else { ASSERT(CpuFeatures::IsSupported(VFP2)); - CpuFeatures::Scope scope(VFP2); + CpuFeatureScope scope(masm, VFP2); Label no_update; Label skip_cache; @@ -3499,7 +3594,7 @@ void TranscendentalCacheStub::Generate(MacroAssembler* masm) { void TranscendentalCacheStub::GenerateCallCFunction(MacroAssembler* masm, Register scratch) { - ASSERT(CpuFeatures::IsEnabled(VFP2)); + ASSERT(masm->IsEnabled(VFP2)); Isolate* isolate = masm->isolate(); __ push(lr); @@ -3560,15 +3655,15 @@ void InterruptStub::Generate(MacroAssembler* masm) { void MathPowStub::Generate(MacroAssembler* masm) { - CpuFeatures::Scope vfp2_scope(VFP2); + CpuFeatureScope vfp2_scope(masm, VFP2); const Register base = r1; const Register exponent = r2; const Register heapnumbermap = r5; const Register heapnumber = r0; - const DoubleRegister double_base = d1; - const DoubleRegister double_exponent = d2; - const DoubleRegister double_result = d3; - const DoubleRegister double_scratch = d0; + const DwVfpRegister double_base = d1; + const DwVfpRegister double_exponent = d2; + const DwVfpRegister double_result = d3; + const DwVfpRegister double_scratch = d0; const SwVfpRegister single_scratch = s0; const Register scratch = r9; const Register scratch2 = r7; @@ -3697,8 +3792,8 @@ void MathPowStub::Generate(MacroAssembler* masm) { __ vmov(double_result, 1.0, scratch2); // Get absolute value of exponent. - __ cmp(scratch, Operand(0)); - __ mov(scratch2, Operand(0), LeaveCC, mi); + __ cmp(scratch, Operand::Zero()); + __ mov(scratch2, Operand::Zero(), LeaveCC, mi); __ sub(scratch, scratch2, scratch, LeaveCC, mi); Label while_true; @@ -3708,7 +3803,7 @@ void MathPowStub::Generate(MacroAssembler* masm) { __ vmul(double_scratch, double_scratch, double_scratch, ne); __ b(ne, &while_true); - __ cmp(exponent, Operand(0)); + __ cmp(exponent, Operand::Zero()); __ b(ge, &done); __ vmov(double_scratch, 1.0, scratch); __ vdiv(double_result, double_scratch, double_result); @@ -3769,31 +3864,54 @@ bool CEntryStub::IsPregenerated() { } -void CodeStub::GenerateStubsAheadOfTime() { - CEntryStub::GenerateAheadOfTime(); - WriteInt32ToHeapNumberStub::GenerateFixedRegStubsAheadOfTime(); - StoreBufferOverflowStub::GenerateFixedRegStubsAheadOfTime(); - RecordWriteStub::GenerateFixedRegStubsAheadOfTime(); +void CodeStub::GenerateStubsAheadOfTime(Isolate* isolate) { + CEntryStub::GenerateAheadOfTime(isolate); + WriteInt32ToHeapNumberStub::GenerateFixedRegStubsAheadOfTime(isolate); + StoreBufferOverflowStub::GenerateFixedRegStubsAheadOfTime(isolate); + RecordWriteStub::GenerateFixedRegStubsAheadOfTime(isolate); } -void CodeStub::GenerateFPStubs() { - CEntryStub save_doubles(1, kSaveFPRegs); - Handle<Code> code = save_doubles.GetCode(); - code->set_is_pregenerated(true); - StoreBufferOverflowStub stub(kSaveFPRegs); - stub.GetCode()->set_is_pregenerated(true); - code->GetIsolate()->set_fp_stubs_generated(true); +void CodeStub::GenerateFPStubs(Isolate* isolate) { + SaveFPRegsMode mode = CpuFeatures::IsSupported(VFP2) + ? kSaveFPRegs + : kDontSaveFPRegs; + CEntryStub save_doubles(1, mode); + StoreBufferOverflowStub stub(mode); + // These stubs might already be in the snapshot, detect that and don't + // regenerate, which would lead to code stub initialization state being messed + // up. + Code* save_doubles_code; + if (!save_doubles.FindCodeInCache(&save_doubles_code, isolate)) { + save_doubles_code = *save_doubles.GetCode(isolate); + save_doubles_code->set_is_pregenerated(true); + + Code* store_buffer_overflow_code = *stub.GetCode(isolate); + store_buffer_overflow_code->set_is_pregenerated(true); + } + isolate->set_fp_stubs_generated(true); } -void CEntryStub::GenerateAheadOfTime() { +void CEntryStub::GenerateAheadOfTime(Isolate* isolate) { CEntryStub stub(1, kDontSaveFPRegs); - Handle<Code> code = stub.GetCode(); + Handle<Code> code = stub.GetCode(isolate); code->set_is_pregenerated(true); } +static void JumpIfOOM(MacroAssembler* masm, + Register value, + Register scratch, + Label* oom_label) { + STATIC_ASSERT(Failure::OUT_OF_MEMORY_EXCEPTION == 3); + STATIC_ASSERT(kFailureTag == 3); + __ and_(scratch, value, Operand(0xf)); + __ cmp(scratch, Operand(0xf)); + __ b(eq, oom_label); +} + + void CEntryStub::GenerateCore(MacroAssembler* masm, Label* throw_normal_exception, Label* throw_termination_exception, @@ -3893,9 +4011,7 @@ void CEntryStub::GenerateCore(MacroAssembler* masm, __ b(eq, &retry); // Special handling of out of memory exceptions. - Failure* out_of_memory = Failure::OutOfMemoryException(); - __ cmp(r0, Operand(reinterpret_cast<int32_t>(out_of_memory))); - __ b(eq, throw_out_of_memory_exception); + JumpIfOOM(masm, r0, ip, throw_out_of_memory_exception); // Retrieve the pending exception and clear the variable. __ mov(r3, Operand(isolate->factory()->the_hole_value())); @@ -3982,13 +4098,16 @@ void CEntryStub::Generate(MacroAssembler* masm) { Isolate* isolate = masm->isolate(); ExternalReference external_caught(Isolate::kExternalCaughtExceptionAddress, isolate); - __ mov(r0, Operand(false, RelocInfo::NONE)); + __ mov(r0, Operand(false, RelocInfo::NONE32)); __ mov(r2, Operand(external_caught)); __ str(r0, MemOperand(r2)); // Set pending exception and r0 to out of memory exception. - Failure* out_of_memory = Failure::OutOfMemoryException(); + Label already_have_failure; + JumpIfOOM(masm, r0, ip, &already_have_failure); + Failure* out_of_memory = Failure::OutOfMemoryException(0x1); __ mov(r0, Operand(reinterpret_cast<int32_t>(out_of_memory))); + __ bind(&already_have_failure); __ mov(r2, Operand(ExternalReference(Isolate::kPendingExceptionAddress, isolate))); __ str(r0, MemOperand(r2)); @@ -4017,7 +4136,7 @@ void JSEntryStub::GenerateBody(MacroAssembler* masm, bool is_construct) { __ stm(db_w, sp, kCalleeSaved | lr.bit()); if (CpuFeatures::IsSupported(VFP2)) { - CpuFeatures::Scope scope(VFP2); + CpuFeatureScope scope(masm, VFP2); // Save callee-saved vfp registers. __ vstm(db_w, sp, kFirstCalleeSavedDoubleReg, kLastCalleeSavedDoubleReg); // Set up the reserved register for 0.0. @@ -4171,7 +4290,7 @@ void JSEntryStub::GenerateBody(MacroAssembler* masm, bool is_construct) { #endif if (CpuFeatures::IsSupported(VFP2)) { - CpuFeatures::Scope scope(VFP2); + CpuFeatureScope scope(masm, VFP2); // Restore callee-saved vfp registers. __ vldm(ia_w, sp, kFirstCalleeSavedDoubleReg, kLastCalleeSavedDoubleReg); } @@ -4358,12 +4477,177 @@ void InstanceofStub::Generate(MacroAssembler* masm) { } +void ArrayLengthStub::Generate(MacroAssembler* masm) { + Label miss; + Register receiver; + if (kind() == Code::KEYED_LOAD_IC) { + // ----------- S t a t e ------------- + // -- lr : return address + // -- r0 : key + // -- r1 : receiver + // ----------------------------------- + __ cmp(r0, Operand(masm->isolate()->factory()->length_string())); + __ b(ne, &miss); + receiver = r1; + } else { + ASSERT(kind() == Code::LOAD_IC); + // ----------- S t a t e ------------- + // -- r2 : name + // -- lr : return address + // -- r0 : receiver + // -- sp[0] : receiver + // ----------------------------------- + receiver = r0; + } + + StubCompiler::GenerateLoadArrayLength(masm, receiver, r3, &miss); + __ bind(&miss); + StubCompiler::TailCallBuiltin(masm, StubCompiler::MissBuiltin(kind())); +} + + +void FunctionPrototypeStub::Generate(MacroAssembler* masm) { + Label miss; + Register receiver; + if (kind() == Code::KEYED_LOAD_IC) { + // ----------- S t a t e ------------- + // -- lr : return address + // -- r0 : key + // -- r1 : receiver + // ----------------------------------- + __ cmp(r0, Operand(masm->isolate()->factory()->prototype_string())); + __ b(ne, &miss); + receiver = r1; + } else { + ASSERT(kind() == Code::LOAD_IC); + // ----------- S t a t e ------------- + // -- r2 : name + // -- lr : return address + // -- r0 : receiver + // -- sp[0] : receiver + // ----------------------------------- + receiver = r0; + } + + StubCompiler::GenerateLoadFunctionPrototype(masm, receiver, r3, r4, &miss); + __ bind(&miss); + StubCompiler::TailCallBuiltin(masm, StubCompiler::MissBuiltin(kind())); +} + + +void StringLengthStub::Generate(MacroAssembler* masm) { + Label miss; + Register receiver; + if (kind() == Code::KEYED_LOAD_IC) { + // ----------- S t a t e ------------- + // -- lr : return address + // -- r0 : key + // -- r1 : receiver + // ----------------------------------- + __ cmp(r0, Operand(masm->isolate()->factory()->length_string())); + __ b(ne, &miss); + receiver = r1; + } else { + ASSERT(kind() == Code::LOAD_IC); + // ----------- S t a t e ------------- + // -- r2 : name + // -- lr : return address + // -- r0 : receiver + // -- sp[0] : receiver + // ----------------------------------- + receiver = r0; + } + + StubCompiler::GenerateLoadStringLength(masm, receiver, r3, r4, &miss, + support_wrapper_); + + __ bind(&miss); + StubCompiler::TailCallBuiltin(masm, StubCompiler::MissBuiltin(kind())); +} + + +void StoreArrayLengthStub::Generate(MacroAssembler* masm) { + // This accepts as a receiver anything JSArray::SetElementsLength accepts + // (currently anything except for external arrays which means anything with + // elements of FixedArray type). Value must be a number, but only smis are + // accepted as the most common case. + Label miss; + + Register receiver; + Register value; + if (kind() == Code::KEYED_STORE_IC) { + // ----------- S t a t e ------------- + // -- lr : return address + // -- r0 : value + // -- r1 : key + // -- r2 : receiver + // ----------------------------------- + __ cmp(r1, Operand(masm->isolate()->factory()->length_string())); + __ b(ne, &miss); + receiver = r2; + value = r0; + } else { + ASSERT(kind() == Code::STORE_IC); + // ----------- S t a t e ------------- + // -- lr : return address + // -- r0 : value + // -- r1 : receiver + // -- r2 : key + // ----------------------------------- + receiver = r1; + value = r0; + } + Register scratch = r3; + + // Check that the receiver isn't a smi. + __ JumpIfSmi(receiver, &miss); + + // Check that the object is a JS array. + __ CompareObjectType(receiver, scratch, scratch, JS_ARRAY_TYPE); + __ b(ne, &miss); + + // Check that elements are FixedArray. + // We rely on StoreIC_ArrayLength below to deal with all types of + // fast elements (including COW). + __ ldr(scratch, FieldMemOperand(receiver, JSArray::kElementsOffset)); + __ CompareObjectType(scratch, scratch, scratch, FIXED_ARRAY_TYPE); + __ b(ne, &miss); + + // Check that the array has fast properties, otherwise the length + // property might have been redefined. + __ ldr(scratch, FieldMemOperand(receiver, JSArray::kPropertiesOffset)); + __ ldr(scratch, FieldMemOperand(scratch, FixedArray::kMapOffset)); + __ CompareRoot(scratch, Heap::kHashTableMapRootIndex); + __ b(eq, &miss); + + // Check that value is a smi. + __ JumpIfNotSmi(value, &miss); + + // Prepare tail call to StoreIC_ArrayLength. + __ Push(receiver, value); + + ExternalReference ref = + ExternalReference(IC_Utility(IC::kStoreIC_ArrayLength), masm->isolate()); + __ TailCallExternalReference(ref, 2, 1); + + __ bind(&miss); + + StubCompiler::TailCallBuiltin(masm, StubCompiler::MissBuiltin(kind())); +} + + Register InstanceofStub::left() { return r0; } Register InstanceofStub::right() { return r1; } +void LoadFieldStub::Generate(MacroAssembler* masm) { + StubCompiler::DoGenerateFastPropertyLoad(masm, r0, reg_, inobject_, index_); + __ Ret(); +} + + void ArgumentsAccessStub::GenerateReadElement(MacroAssembler* masm) { // The displacement is the offset of the last parameter (if any) // relative to the frame pointer. @@ -4664,7 +4948,7 @@ void ArgumentsAccessStub::GenerateNewStrict(MacroAssembler* masm) { // of the arguments object and the elements array in words. Label add_arguments_object; __ bind(&try_allocate); - __ cmp(r1, Operand(0, RelocInfo::NONE)); + __ cmp(r1, Operand::Zero()); __ b(eq, &add_arguments_object); __ mov(r1, Operand(r1, LSR, kSmiTagSize)); __ add(r1, r1, Operand(FixedArray::kHeaderSize / kPointerSize)); @@ -4697,7 +4981,7 @@ void ArgumentsAccessStub::GenerateNewStrict(MacroAssembler* masm) { // If there are no actual arguments, we're done. Label done; - __ cmp(r1, Operand(0, RelocInfo::NONE)); + __ cmp(r1, Operand::Zero()); __ b(eq, &done); // Get the parameters pointer from the stack. @@ -4724,7 +5008,7 @@ void ArgumentsAccessStub::GenerateNewStrict(MacroAssembler* masm) { // Post-increment r4 with kPointerSize on each iteration. __ str(r3, MemOperand(r4, kPointerSize, PostIndex)); __ sub(r1, r1, Operand(1)); - __ cmp(r1, Operand(0, RelocInfo::NONE)); + __ cmp(r1, Operand::Zero()); __ b(ne, &loop); // Return and remove the on-stack parameters. @@ -4757,8 +5041,7 @@ void RegExpExecStub::Generate(MacroAssembler* masm) { const int kSubjectOffset = 2 * kPointerSize; const int kJSRegExpOffset = 3 * kPointerSize; - Label runtime, invoke_regexp; - + Label runtime; // Allocation of registers for this function. These are in callee save // registers and will be preserved by the call to the native RegExp code, as // this code is called using the normal C calling convention. When calling @@ -4776,7 +5059,7 @@ void RegExpExecStub::Generate(MacroAssembler* masm) { ExternalReference::address_of_regexp_stack_memory_size(isolate); __ mov(r0, Operand(address_of_regexp_stack_memory_size)); __ ldr(r0, MemOperand(r0, 0)); - __ cmp(r0, Operand(0)); + __ cmp(r0, Operand::Zero()); __ b(eq, &runtime); // Check that the first argument is a JSRegExp object. @@ -4805,68 +5088,48 @@ void RegExpExecStub::Generate(MacroAssembler* masm) { // Check that the number of captures fit in the static offsets vector buffer. __ ldr(r2, FieldMemOperand(regexp_data, JSRegExp::kIrregexpCaptureCountOffset)); - // Calculate number of capture registers (number_of_captures + 1) * 2. This - // uses the asumption that smis are 2 * their untagged value. + // Check (number_of_captures + 1) * 2 <= offsets vector size + // Or number_of_captures * 2 <= offsets vector size - 2 + // Multiplying by 2 comes for free since r2 is smi-tagged. STATIC_ASSERT(kSmiTag == 0); STATIC_ASSERT(kSmiTagSize + kSmiShiftSize == 1); - __ add(r2, r2, Operand(2)); // r2 was a smi. - // Check that the static offsets vector buffer is large enough. - __ cmp(r2, Operand(Isolate::kJSRegexpStaticOffsetsVectorSize)); + STATIC_ASSERT(Isolate::kJSRegexpStaticOffsetsVectorSize >= 2); + __ cmp(r2, Operand(Isolate::kJSRegexpStaticOffsetsVectorSize - 2)); __ b(hi, &runtime); - // r2: Number of capture registers - // regexp_data: RegExp data (FixedArray) - // Check that the second argument is a string. + // Reset offset for possibly sliced string. + __ mov(r9, Operand::Zero()); __ ldr(subject, MemOperand(sp, kSubjectOffset)); __ JumpIfSmi(subject, &runtime); - Condition is_string = masm->IsObjectStringType(subject, r0); - __ b(NegateCondition(is_string), &runtime); - // Get the length of the string to r3. - __ ldr(r3, FieldMemOperand(subject, String::kLengthOffset)); - - // r2: Number of capture registers - // r3: Length of subject string as a smi - // subject: Subject string - // regexp_data: RegExp data (FixedArray) - // Check that the third argument is a positive smi less than the subject - // string length. A negative value will be greater (unsigned comparison). - __ ldr(r0, MemOperand(sp, kPreviousIndexOffset)); - __ JumpIfNotSmi(r0, &runtime); - __ cmp(r3, Operand(r0)); - __ b(ls, &runtime); - - // r2: Number of capture registers - // subject: Subject string - // regexp_data: RegExp data (FixedArray) - // Check that the fourth object is a JSArray object. - __ ldr(r0, MemOperand(sp, kLastMatchInfoOffset)); - __ JumpIfSmi(r0, &runtime); - __ CompareObjectType(r0, r1, r1, JS_ARRAY_TYPE); - __ b(ne, &runtime); - // Check that the JSArray is in fast case. - __ ldr(last_match_info_elements, - FieldMemOperand(r0, JSArray::kElementsOffset)); - __ ldr(r0, FieldMemOperand(last_match_info_elements, HeapObject::kMapOffset)); - __ CompareRoot(r0, Heap::kFixedArrayMapRootIndex); - __ b(ne, &runtime); - // Check that the last match info has space for the capture registers and the - // additional information. - __ ldr(r0, - FieldMemOperand(last_match_info_elements, FixedArray::kLengthOffset)); - __ add(r2, r2, Operand(RegExpImpl::kLastMatchOverhead)); - __ cmp(r2, Operand(r0, ASR, kSmiTagSize)); - __ b(gt, &runtime); - - // Reset offset for possibly sliced string. - __ mov(r9, Operand(0)); - // subject: Subject string - // regexp_data: RegExp data (FixedArray) - // Check the representation and encoding of the subject string. - Label seq_string; + __ mov(r3, subject); // Make a copy of the original subject string. __ ldr(r0, FieldMemOperand(subject, HeapObject::kMapOffset)); __ ldrb(r0, FieldMemOperand(r0, Map::kInstanceTypeOffset)); - // First check for flat string. None of the following string type tests will - // succeed if subject is not a string or a short external string. + // subject: subject string + // r3: subject string + // r0: subject string instance type + // regexp_data: RegExp data (FixedArray) + // Handle subject string according to its encoding and representation: + // (1) Sequential string? If yes, go to (5). + // (2) Anything but sequential or cons? If yes, go to (6). + // (3) Cons string. If the string is flat, replace subject with first string. + // Otherwise bailout. + // (4) Is subject external? If yes, go to (7). + // (5) Sequential string. Load regexp code according to encoding. + // (E) Carry on. + /// [...] + + // Deferred code at the end of the stub: + // (6) Not a long external string? If yes, go to (8). + // (7) External string. Make it, offset-wise, look like a sequential string. + // Go to (5). + // (8) Short external string or not a string? If yes, bail out to runtime. + // (9) Sliced string. Replace subject with parent. Go to (4). + + Label seq_string /* 5 */, external_string /* 7 */, + check_underlying /* 4 */, not_seq_nor_cons /* 6 */, + not_long_external /* 8 */; + + // (1) Sequential string? If yes, go to (5). __ and_(r1, r0, Operand(kIsNotStringMask | @@ -4874,77 +5137,62 @@ void RegExpExecStub::Generate(MacroAssembler* masm) { kShortExternalStringMask), SetCC); STATIC_ASSERT((kStringTag | kSeqStringTag) == 0); - __ b(eq, &seq_string); + __ b(eq, &seq_string); // Go to (5). - // subject: Subject string - // regexp_data: RegExp data (FixedArray) - // r1: whether subject is a string and if yes, its string representation - // Check for flat cons string or sliced string. - // A flat cons string is a cons string where the second part is the empty - // string. In that case the subject string is just the first part of the cons - // string. Also in this case the first part of the cons string is known to be - // a sequential string or an external string. - // In the case of a sliced string its offset has to be taken into account. - Label cons_string, external_string, check_encoding; + // (2) Anything but sequential or cons? If yes, go to (6). STATIC_ASSERT(kConsStringTag < kExternalStringTag); STATIC_ASSERT(kSlicedStringTag > kExternalStringTag); STATIC_ASSERT(kIsNotStringMask > kExternalStringTag); STATIC_ASSERT(kShortExternalStringTag > kExternalStringTag); __ cmp(r1, Operand(kExternalStringTag)); - __ b(lt, &cons_string); - __ b(eq, &external_string); - - // Catch non-string subject or short external string. - STATIC_ASSERT(kNotStringTag != 0 && kShortExternalStringTag !=0); - __ tst(r1, Operand(kIsNotStringMask | kShortExternalStringMask)); - __ b(ne, &runtime); + __ b(ge, ¬_seq_nor_cons); // Go to (6). - // String is sliced. - __ ldr(r9, FieldMemOperand(subject, SlicedString::kOffsetOffset)); - __ mov(r9, Operand(r9, ASR, kSmiTagSize)); - __ ldr(subject, FieldMemOperand(subject, SlicedString::kParentOffset)); - // r9: offset of sliced string, smi-tagged. - __ jmp(&check_encoding); - // String is a cons string, check whether it is flat. - __ bind(&cons_string); + // (3) Cons string. Check that it's flat. + // Replace subject with first string and reload instance type. __ ldr(r0, FieldMemOperand(subject, ConsString::kSecondOffset)); - __ CompareRoot(r0, Heap::kEmptyStringRootIndex); + __ CompareRoot(r0, Heap::kempty_stringRootIndex); __ b(ne, &runtime); __ ldr(subject, FieldMemOperand(subject, ConsString::kFirstOffset)); - // Is first part of cons or parent of slice a flat string? - __ bind(&check_encoding); + + // (4) Is subject external? If yes, go to (7). + __ bind(&check_underlying); __ ldr(r0, FieldMemOperand(subject, HeapObject::kMapOffset)); __ ldrb(r0, FieldMemOperand(r0, Map::kInstanceTypeOffset)); STATIC_ASSERT(kSeqStringTag == 0); __ tst(r0, Operand(kStringRepresentationMask)); - __ b(ne, &external_string); + // The underlying external string is never a short external string. + STATIC_CHECK(ExternalString::kMaxShortLength < ConsString::kMinLength); + STATIC_CHECK(ExternalString::kMaxShortLength < SlicedString::kMinLength); + __ b(ne, &external_string); // Go to (7). + // (5) Sequential string. Load regexp code according to encoding. __ bind(&seq_string); - // subject: Subject string - // regexp_data: RegExp data (FixedArray) - // r0: Instance type of subject string - STATIC_ASSERT(4 == kAsciiStringTag); + // subject: sequential subject string (or look-alike, external string) + // r3: original subject string + // Load previous index and check range before r3 is overwritten. We have to + // use r3 instead of subject here because subject might have been only made + // to look like a sequential string when it actually is an external string. + __ ldr(r1, MemOperand(sp, kPreviousIndexOffset)); + __ JumpIfNotSmi(r1, &runtime); + __ ldr(r3, FieldMemOperand(r3, String::kLengthOffset)); + __ cmp(r3, Operand(r1)); + __ b(ls, &runtime); + __ mov(r1, Operand(r1, ASR, kSmiTagSize)); + + STATIC_ASSERT(4 == kOneByteStringTag); STATIC_ASSERT(kTwoByteStringTag == 0); - // Find the code object based on the assumptions above. __ and_(r0, r0, Operand(kStringEncodingMask)); __ mov(r3, Operand(r0, ASR, 2), SetCC); __ ldr(r7, FieldMemOperand(regexp_data, JSRegExp::kDataAsciiCodeOffset), ne); __ ldr(r7, FieldMemOperand(regexp_data, JSRegExp::kDataUC16CodeOffset), eq); + // (E) Carry on. String handling is done. + // r7: irregexp code // Check that the irregexp code has been generated for the actual string // encoding. If it has, the field contains a code object otherwise it contains // a smi (code flushing support). __ JumpIfSmi(r7, &runtime); - // r3: encoding of subject string (1 if ASCII, 0 if two_byte); - // r7: code - // subject: Subject string - // regexp_data: RegExp data (FixedArray) - // Load used arguments before starting to push arguments for call to native - // RegExp code to avoid handling changing stack height. - __ ldr(r1, MemOperand(sp, kPreviousIndexOffset)); - __ mov(r1, Operand(r1, ASR, kSmiTagSize)); - // r1: previous index // r3: encoding of subject string (1 if ASCII, 0 if two_byte); // r7: code @@ -4979,7 +5227,7 @@ void RegExpExecStub::Generate(MacroAssembler* masm) { // Argument 6: Set the number of capture registers to zero to force global // regexps to behave as non-global. This does not affect non-global regexps. - __ mov(r0, Operand(0)); + __ mov(r0, Operand::Zero()); __ str(r0, MemOperand(sp, 2 * kPointerSize)); // Argument 5 (sp[4]): static offsets vector buffer. @@ -5024,10 +5272,8 @@ void RegExpExecStub::Generate(MacroAssembler* masm) { // subject: subject string (callee saved) // regexp_data: RegExp data (callee saved) // last_match_info_elements: Last match info elements (callee saved) - // Check the result. Label success; - __ cmp(r0, Operand(1)); // We expect exactly one result since we force the called regexp to behave // as non-global. @@ -5073,10 +5319,29 @@ void RegExpExecStub::Generate(MacroAssembler* masm) { __ ldr(r1, FieldMemOperand(regexp_data, JSRegExp::kIrregexpCaptureCountOffset)); // Calculate number of capture registers (number_of_captures + 1) * 2. + // Multiplying by 2 comes for free since r1 is smi-tagged. STATIC_ASSERT(kSmiTag == 0); STATIC_ASSERT(kSmiTagSize + kSmiShiftSize == 1); __ add(r1, r1, Operand(2)); // r1 was a smi. + __ ldr(r0, MemOperand(sp, kLastMatchInfoOffset)); + __ JumpIfSmi(r0, &runtime); + __ CompareObjectType(r0, r2, r2, JS_ARRAY_TYPE); + __ b(ne, &runtime); + // Check that the JSArray is in fast case. + __ ldr(last_match_info_elements, + FieldMemOperand(r0, JSArray::kElementsOffset)); + __ ldr(r0, FieldMemOperand(last_match_info_elements, HeapObject::kMapOffset)); + __ CompareRoot(r0, Heap::kFixedArrayMapRootIndex); + __ b(ne, &runtime); + // Check that the last match info has space for the capture registers and the + // additional information. + __ ldr(r0, + FieldMemOperand(last_match_info_elements, FixedArray::kLengthOffset)); + __ add(r2, r1, Operand(RegExpImpl::kLastMatchOverhead)); + __ cmp(r2, Operand(r0, ASR, kSmiTagSize)); + __ b(gt, &runtime); + // r1: number of capture registers // r4: subject string // Store the capture count. @@ -5090,10 +5355,11 @@ void RegExpExecStub::Generate(MacroAssembler* masm) { __ mov(r2, subject); __ RecordWriteField(last_match_info_elements, RegExpImpl::kLastSubjectOffset, - r2, + subject, r7, kLRHasNotBeenSaved, kDontSaveFPRegs); + __ mov(subject, r2); __ str(subject, FieldMemOperand(last_match_info_elements, RegExpImpl::kLastInputOffset)); @@ -5133,8 +5399,17 @@ void RegExpExecStub::Generate(MacroAssembler* masm) { __ add(sp, sp, Operand(4 * kPointerSize)); __ Ret(); - // External string. Short external strings have already been ruled out. - // r0: scratch + // Do the runtime call to execute the regexp. + __ bind(&runtime); + __ TailCallRuntime(Runtime::kRegExpExec, 4, 1); + + // Deferred code for string handling. + // (6) Not a long external string? If yes, go to (8). + __ bind(¬_seq_nor_cons); + // Compare flags are still set. + __ b(gt, ¬_long_external); // Go to (8). + + // (7) External string. Make it, offset-wise, look like a sequential string. __ bind(&external_string); __ ldr(r0, FieldMemOperand(subject, HeapObject::kMapOffset)); __ ldrb(r0, FieldMemOperand(r0, Map::kInstanceTypeOffset)); @@ -5147,15 +5422,24 @@ void RegExpExecStub::Generate(MacroAssembler* masm) { __ ldr(subject, FieldMemOperand(subject, ExternalString::kResourceDataOffset)); // Move the pointer so that offset-wise, it looks like a sequential string. - STATIC_ASSERT(SeqTwoByteString::kHeaderSize == SeqAsciiString::kHeaderSize); + STATIC_ASSERT(SeqTwoByteString::kHeaderSize == SeqOneByteString::kHeaderSize); __ sub(subject, subject, Operand(SeqTwoByteString::kHeaderSize - kHeapObjectTag)); - __ jmp(&seq_string); + __ jmp(&seq_string); // Go to (5). - // Do the runtime call to execute the regexp. - __ bind(&runtime); - __ TailCallRuntime(Runtime::kRegExpExec, 4, 1); + // (8) Short external string or not a string? If yes, bail out to runtime. + __ bind(¬_long_external); + STATIC_ASSERT(kNotStringTag != 0 && kShortExternalStringTag !=0); + __ tst(r1, Operand(kIsNotStringMask | kShortExternalStringMask)); + __ b(ne, &runtime); + + // (9) Sliced string. Replace subject with parent. Go to (4). + // Load offset into r9 and replace subject string with parent. + __ ldr(r9, FieldMemOperand(subject, SlicedString::kOffsetOffset)); + __ mov(r9, Operand(r9, ASR, kSmiTagSize)); + __ ldr(subject, FieldMemOperand(subject, SlicedString::kParentOffset)); + __ jmp(&check_underlying); // Go to (4). #endif // V8_INTERPRETED_REGEXP } @@ -5234,7 +5518,7 @@ void RegExpConstructResultStub::Generate(MacroAssembler* masm) { // r3: Start of elements in FixedArray. // r5: Number of elements to fill. Label loop; - __ cmp(r5, Operand(0)); + __ cmp(r5, Operand::Zero()); __ bind(&loop); __ b(le, &done); // Jump if r5 is negative or zero. __ sub(r5, r5, Operand(1), SetCC); @@ -5250,12 +5534,13 @@ void RegExpConstructResultStub::Generate(MacroAssembler* masm) { } -static void GenerateRecordCallTarget(MacroAssembler* masm) { +static void GenerateRecordCallTargetNoArray(MacroAssembler* masm) { // Cache the called function in a global property cell. Cache states // are uninitialized, monomorphic (indicated by a JSFunction), and // megamorphic. // r1 : the function to call // r2 : cache cell for call target + ASSERT(!FLAG_optimize_constructed_arrays); Label done; ASSERT_EQ(*TypeFeedbackCells::MegamorphicSentinel(masm->isolate()), @@ -5289,6 +5574,82 @@ static void GenerateRecordCallTarget(MacroAssembler* masm) { } +static void GenerateRecordCallTarget(MacroAssembler* masm) { + // Cache the called function in a global property cell. Cache states + // are uninitialized, monomorphic (indicated by a JSFunction), and + // megamorphic. + // r1 : the function to call + // r2 : cache cell for call target + ASSERT(FLAG_optimize_constructed_arrays); + Label initialize, done, miss, megamorphic, not_array_function; + + ASSERT_EQ(*TypeFeedbackCells::MegamorphicSentinel(masm->isolate()), + masm->isolate()->heap()->undefined_value()); + ASSERT_EQ(*TypeFeedbackCells::UninitializedSentinel(masm->isolate()), + masm->isolate()->heap()->the_hole_value()); + + // Load the cache state into r3. + __ ldr(r3, FieldMemOperand(r2, JSGlobalPropertyCell::kValueOffset)); + + // A monomorphic cache hit or an already megamorphic state: invoke the + // function without changing the state. + __ cmp(r3, r1); + __ b(eq, &done); + __ CompareRoot(r3, Heap::kUndefinedValueRootIndex); + __ b(eq, &done); + + // Special handling of the Array() function, which caches not only the + // monomorphic Array function but the initial ElementsKind with special + // sentinels + Handle<Object> terminal_kind_sentinel = + TypeFeedbackCells::MonomorphicArraySentinel(masm->isolate(), + LAST_FAST_ELEMENTS_KIND); + __ cmp(r3, Operand(terminal_kind_sentinel)); + __ b(ne, &miss); + // Make sure the function is the Array() function + __ LoadArrayFunction(r3); + __ cmp(r1, r3); + __ b(ne, &megamorphic); + __ jmp(&done); + + __ bind(&miss); + + // A monomorphic miss (i.e, here the cache is not uninitialized) goes + // megamorphic. + __ CompareRoot(r3, Heap::kTheHoleValueRootIndex); + __ b(eq, &initialize); + // MegamorphicSentinel is an immortal immovable object (undefined) so no + // write-barrier is needed. + __ bind(&megamorphic); + __ LoadRoot(ip, Heap::kUndefinedValueRootIndex); + __ str(ip, FieldMemOperand(r2, JSGlobalPropertyCell::kValueOffset)); + + // An uninitialized cache is patched with the function or sentinel to + // indicate the ElementsKind if function is the Array constructor. + __ bind(&initialize); + // Make sure the function is the Array() function + __ LoadArrayFunction(r3); + __ cmp(r1, r3); + __ b(ne, ¬_array_function); + + // The target function is the Array constructor, install a sentinel value in + // the constructor's type info cell that will track the initial ElementsKind + // that should be used for the array when its constructed. + Handle<Object> initial_kind_sentinel = + TypeFeedbackCells::MonomorphicArraySentinel(masm->isolate(), + GetInitialFastElementsKind()); + __ mov(r3, Operand(initial_kind_sentinel)); + __ str(r3, FieldMemOperand(r2, JSGlobalPropertyCell::kValueOffset)); + __ b(&done); + + __ bind(¬_array_function); + __ str(r1, FieldMemOperand(r2, JSGlobalPropertyCell::kValueOffset)); + // No need for a write barrier here - cells are rescanned. + + __ bind(&done); +} + + void CallFunctionStub::Generate(MacroAssembler* masm) { // r1 : the function to call // r2 : cache cell for call target @@ -5321,7 +5682,11 @@ void CallFunctionStub::Generate(MacroAssembler* masm) { __ b(ne, &slow); if (RecordCallTarget()) { - GenerateRecordCallTarget(masm); + if (FLAG_optimize_constructed_arrays) { + GenerateRecordCallTarget(masm); + } else { + GenerateRecordCallTargetNoArray(masm); + } } // Fast-case: Invoke the function now. @@ -5360,8 +5725,8 @@ void CallFunctionStub::Generate(MacroAssembler* masm) { __ cmp(r3, Operand(JS_FUNCTION_PROXY_TYPE)); __ b(ne, &non_function); __ push(r1); // put proxy as additional argument - __ mov(r0, Operand(argc_ + 1, RelocInfo::NONE)); - __ mov(r2, Operand(0, RelocInfo::NONE)); + __ mov(r0, Operand(argc_ + 1, RelocInfo::NONE32)); + __ mov(r2, Operand::Zero()); __ GetBuiltinEntry(r3, Builtins::CALL_FUNCTION_PROXY); __ SetCallKind(r5, CALL_AS_METHOD); { @@ -5375,7 +5740,7 @@ void CallFunctionStub::Generate(MacroAssembler* masm) { __ bind(&non_function); __ str(r1, MemOperand(sp, argc_ * kPointerSize)); __ mov(r0, Operand(argc_)); // Set up the number of arguments. - __ mov(r2, Operand(0, RelocInfo::NONE)); + __ mov(r2, Operand::Zero()); __ GetBuiltinEntry(r3, Builtins::CALL_NON_FUNCTION); __ SetCallKind(r5, CALL_AS_METHOD); __ Jump(masm->isolate()->builtins()->ArgumentsAdaptorTrampoline(), @@ -5396,13 +5761,19 @@ void CallConstructStub::Generate(MacroAssembler* masm) { __ b(ne, &slow); if (RecordCallTarget()) { - GenerateRecordCallTarget(masm); + if (FLAG_optimize_constructed_arrays) { + GenerateRecordCallTarget(masm); + } else { + GenerateRecordCallTargetNoArray(masm); + } } // Jump to the function-specific construct stub. - __ ldr(r2, FieldMemOperand(r1, JSFunction::kSharedFunctionInfoOffset)); - __ ldr(r2, FieldMemOperand(r2, SharedFunctionInfo::kConstructStubOffset)); - __ add(pc, r2, Operand(Code::kHeaderSize - kHeapObjectTag)); + Register jmp_reg = FLAG_optimize_constructed_arrays ? r3 : r2; + __ ldr(jmp_reg, FieldMemOperand(r1, JSFunction::kSharedFunctionInfoOffset)); + __ ldr(jmp_reg, FieldMemOperand(jmp_reg, + SharedFunctionInfo::kConstructStubOffset)); + __ add(pc, jmp_reg, Operand(Code::kHeaderSize - kHeapObjectTag)); // r0: number of arguments // r1: called object @@ -5418,55 +5789,13 @@ void CallConstructStub::Generate(MacroAssembler* masm) { __ GetBuiltinEntry(r3, Builtins::CALL_NON_FUNCTION_AS_CONSTRUCTOR); __ bind(&do_call); // Set expected number of arguments to zero (not changing r0). - __ mov(r2, Operand(0, RelocInfo::NONE)); + __ mov(r2, Operand::Zero()); __ SetCallKind(r5, CALL_AS_METHOD); __ Jump(masm->isolate()->builtins()->ArgumentsAdaptorTrampoline(), RelocInfo::CODE_TARGET); } -// Unfortunately you have to run without snapshots to see most of these -// names in the profile since most compare stubs end up in the snapshot. -void CompareStub::PrintName(StringStream* stream) { - ASSERT((lhs_.is(r0) && rhs_.is(r1)) || - (lhs_.is(r1) && rhs_.is(r0))); - const char* cc_name; - switch (cc_) { - case lt: cc_name = "LT"; break; - case gt: cc_name = "GT"; break; - case le: cc_name = "LE"; break; - case ge: cc_name = "GE"; break; - case eq: cc_name = "EQ"; break; - case ne: cc_name = "NE"; break; - default: cc_name = "UnknownCondition"; break; - } - bool is_equality = cc_ == eq || cc_ == ne; - stream->Add("CompareStub_%s", cc_name); - stream->Add(lhs_.is(r0) ? "_r0" : "_r1"); - stream->Add(rhs_.is(r0) ? "_r0" : "_r1"); - if (strict_ && is_equality) stream->Add("_STRICT"); - if (never_nan_nan_ && is_equality) stream->Add("_NO_NAN"); - if (!include_number_compare_) stream->Add("_NO_NUMBER"); - if (!include_smi_compare_) stream->Add("_NO_SMI"); -} - - -int CompareStub::MinorKey() { - // Encode the three parameters in a unique 16 bit value. To avoid duplicate - // stubs the never NaN NaN condition is only taken into account if the - // condition is equals. - ASSERT((static_cast<unsigned>(cc_) >> 28) < (1 << 12)); - ASSERT((lhs_.is(r0) && rhs_.is(r1)) || - (lhs_.is(r1) && rhs_.is(r0))); - return ConditionField::encode(static_cast<unsigned>(cc_) >> 28) - | RegisterField::encode(lhs_.is(r0)) - | StrictField::encode(strict_) - | NeverNanNanField::encode(cc_ == eq ? never_nan_nan_ : false) - | IncludeNumberCompareField::encode(include_number_compare_) - | IncludeSmiCompareField::encode(include_smi_compare_); -} - - // StringCharCodeAtGenerator void StringCharCodeAtGenerator::GenerateFast(MacroAssembler* masm) { Label flat_string; @@ -5565,10 +5894,10 @@ void StringCharFromCodeGenerator::GenerateFast(MacroAssembler* masm) { // Fast case of Heap::LookupSingleCharacterStringFromCode. STATIC_ASSERT(kSmiTag == 0); STATIC_ASSERT(kSmiShiftSize == 0); - ASSERT(IsPowerOf2(String::kMaxAsciiCharCode + 1)); + ASSERT(IsPowerOf2(String::kMaxOneByteCharCode + 1)); __ tst(code_, Operand(kSmiTagMask | - ((~String::kMaxAsciiCharCode) << kSmiTagSize))); + ((~String::kMaxOneByteCharCode) << kSmiTagSize))); __ b(ne, &slow_case_); __ LoadRoot(result_, Heap::kSingleCharacterStringCacheRootIndex); @@ -5599,23 +5928,6 @@ void StringCharFromCodeGenerator::GenerateSlow( } -// ------------------------------------------------------------------------- -// StringCharAtGenerator - -void StringCharAtGenerator::GenerateFast(MacroAssembler* masm) { - char_code_at_generator_.GenerateFast(masm); - char_from_code_generator_.GenerateFast(masm); -} - - -void StringCharAtGenerator::GenerateSlow( - MacroAssembler* masm, - const RuntimeCallHelper& call_helper) { - char_code_at_generator_.GenerateSlow(masm, call_helper); - char_from_code_generator_.GenerateSlow(masm, call_helper); -} - - void StringHelper::GenerateCopyCharacters(MacroAssembler* masm, Register dest, Register src, @@ -5629,7 +5941,7 @@ void StringHelper::GenerateCopyCharacters(MacroAssembler* masm, if (!ascii) { __ add(count, count, Operand(count), SetCC); } else { - __ cmp(count, Operand(0, RelocInfo::NONE)); + __ cmp(count, Operand::Zero()); } __ b(eq, &done); @@ -5684,7 +5996,7 @@ void StringHelper::GenerateCopyCharactersLong(MacroAssembler* masm, if (!ascii) { __ add(count, count, Operand(count), SetCC); } else { - __ cmp(count, Operand(0, RelocInfo::NONE)); + __ cmp(count, Operand::Zero()); } __ b(eq, &done); @@ -5795,7 +6107,7 @@ void StringHelper::GenerateCopyCharactersLong(MacroAssembler* masm, } -void StringHelper::GenerateTwoCharacterSymbolTableProbe(MacroAssembler* masm, +void StringHelper::GenerateTwoCharacterStringTableProbe(MacroAssembler* masm, Register c1, Register c2, Register scratch1, @@ -5808,7 +6120,7 @@ void StringHelper::GenerateTwoCharacterSymbolTableProbe(MacroAssembler* masm, Register scratch = scratch3; // Make sure that both characters are not digits as such strings has a - // different hash algorithm. Don't try to look for these in the symbol table. + // different hash algorithm. Don't try to look for these in the string table. Label not_array_index; __ sub(scratch, c1, Operand(static_cast<int>('0'))); __ cmp(scratch, Operand(static_cast<int>('9' - '0'))); @@ -5836,43 +6148,43 @@ void StringHelper::GenerateTwoCharacterSymbolTableProbe(MacroAssembler* masm, // chars: two character string, char 1 in byte 0 and char 2 in byte 1. // hash: hash of two character string. - // Load symbol table - // Load address of first element of the symbol table. - Register symbol_table = c2; - __ LoadRoot(symbol_table, Heap::kSymbolTableRootIndex); + // Load string table + // Load address of first element of the string table. + Register string_table = c2; + __ LoadRoot(string_table, Heap::kStringTableRootIndex); Register undefined = scratch4; __ LoadRoot(undefined, Heap::kUndefinedValueRootIndex); - // Calculate capacity mask from the symbol table capacity. + // Calculate capacity mask from the string table capacity. Register mask = scratch2; - __ ldr(mask, FieldMemOperand(symbol_table, SymbolTable::kCapacityOffset)); + __ ldr(mask, FieldMemOperand(string_table, StringTable::kCapacityOffset)); __ mov(mask, Operand(mask, ASR, 1)); __ sub(mask, mask, Operand(1)); - // Calculate untagged address of the first element of the symbol table. - Register first_symbol_table_element = symbol_table; - __ add(first_symbol_table_element, symbol_table, - Operand(SymbolTable::kElementsStartOffset - kHeapObjectTag)); + // Calculate untagged address of the first element of the string table. + Register first_string_table_element = string_table; + __ add(first_string_table_element, string_table, + Operand(StringTable::kElementsStartOffset - kHeapObjectTag)); // Registers // chars: two character string, char 1 in byte 0 and char 2 in byte 1. // hash: hash of two character string // mask: capacity mask - // first_symbol_table_element: address of the first element of - // the symbol table + // first_string_table_element: address of the first element of + // the string table // undefined: the undefined object // scratch: - - // Perform a number of probes in the symbol table. + // Perform a number of probes in the string table. const int kProbes = 4; - Label found_in_symbol_table; + Label found_in_string_table; Label next_probe[kProbes]; Register candidate = scratch5; // Scratch register contains candidate. for (int i = 0; i < kProbes; i++) { - // Calculate entry in symbol table. + // Calculate entry in string table. if (i > 0) { - __ add(candidate, hash, Operand(SymbolTable::GetProbeOffset(i))); + __ add(candidate, hash, Operand(StringTable::GetProbeOffset(i))); } else { __ mov(candidate, hash); } @@ -5880,9 +6192,9 @@ void StringHelper::GenerateTwoCharacterSymbolTableProbe(MacroAssembler* masm, __ and_(candidate, candidate, Operand(mask)); // Load the entry from the symble table. - STATIC_ASSERT(SymbolTable::kEntrySize == 1); + STATIC_ASSERT(StringTable::kEntrySize == 1); __ ldr(candidate, - MemOperand(first_symbol_table_element, + MemOperand(first_string_table_element, candidate, LSL, kPointerSizeLog2)); @@ -5898,7 +6210,7 @@ void StringHelper::GenerateTwoCharacterSymbolTableProbe(MacroAssembler* masm, if (FLAG_debug_code) { __ LoadRoot(ip, Heap::kTheHoleValueRootIndex); __ cmp(ip, candidate); - __ Assert(eq, "oddball in symbol table is not undefined or the hole"); + __ Assert(eq, "oddball in string table is not undefined or the hole"); } __ jmp(&next_probe[i]); @@ -5916,9 +6228,9 @@ void StringHelper::GenerateTwoCharacterSymbolTableProbe(MacroAssembler* masm, // Check if the two characters match. // Assumes that word load is little endian. - __ ldrh(scratch, FieldMemOperand(candidate, SeqAsciiString::kHeaderSize)); + __ ldrh(scratch, FieldMemOperand(candidate, SeqOneByteString::kHeaderSize)); __ cmp(chars, scratch); - __ b(eq, &found_in_symbol_table); + __ b(eq, &found_in_string_table); __ bind(&next_probe[i]); } @@ -5927,7 +6239,7 @@ void StringHelper::GenerateTwoCharacterSymbolTableProbe(MacroAssembler* masm, // Scratch register contains result when we fall through to here. Register result = candidate; - __ bind(&found_in_symbol_table); + __ bind(&found_in_string_table); __ Move(r0, result); } @@ -5999,25 +6311,34 @@ void SubStringStub::Generate(MacroAssembler* masm) { STATIC_ASSERT(kSmiTag == 0); STATIC_ASSERT(kSmiTagSize + kSmiShiftSize == 1); - // I.e., arithmetic shift right by one un-smi-tags. - __ mov(r2, Operand(r2, ASR, 1), SetCC); - __ mov(r3, Operand(r3, ASR, 1), SetCC, cc); - // If either to or from had the smi tag bit set, then carry is set now. - __ b(cs, &runtime); // Either "from" or "to" is not a smi. + // Arithmetic shift right by one un-smi-tags. In this case we rotate right + // instead because we bail out on non-smi values: ROR and ASR are equivalent + // for smis but they set the flags in a way that's easier to optimize. + __ mov(r2, Operand(r2, ROR, 1), SetCC); + __ mov(r3, Operand(r3, ROR, 1), SetCC, cc); + // If either to or from had the smi tag bit set, then C is set now, and N + // has the same value: we rotated by 1, so the bottom bit is now the top bit. // We want to bailout to runtime here if From is negative. In that case, the // next instruction is not executed and we fall through to bailing out to - // runtime. pl is the opposite of mi. - // Both r2 and r3 are untagged integers. - __ sub(r2, r2, Operand(r3), SetCC, pl); - __ b(mi, &runtime); // Fail if from > to. + // runtime. + // Executed if both r2 and r3 are untagged integers. + __ sub(r2, r2, Operand(r3), SetCC, cc); + // One of the above un-smis or the above SUB could have set N==1. + __ b(mi, &runtime); // Either "from" or "to" is not an smi, or from > to. // Make sure first argument is a string. __ ldr(r0, MemOperand(sp, kStringOffset)); STATIC_ASSERT(kSmiTag == 0); - __ JumpIfSmi(r0, &runtime); - Condition is_string = masm->IsObjectStringType(r0, r1); + // Do a JumpIfSmi, but fold its jump into the subsequent string test. + __ tst(r0, Operand(kSmiTagMask)); + Condition is_string = masm->IsObjectStringType(r0, r1, ne); + ASSERT(is_string == eq); __ b(NegateCondition(is_string), &runtime); + Label single_char; + __ cmp(r2, Operand(1)); + __ b(eq, &single_char); + // Short-cut for the case of trivial substring. Label return_r0; // r0: original string @@ -6047,7 +6368,7 @@ void SubStringStub::Generate(MacroAssembler* masm) { __ b(ne, &sliced_string); // Cons string. Check whether it is flat, then fetch first part. __ ldr(r5, FieldMemOperand(r0, ConsString::kSecondOffset)); - __ CompareRoot(r5, Heap::kEmptyStringRootIndex); + __ CompareRoot(r5, Heap::kempty_stringRootIndex); __ b(ne, &runtime); __ ldr(r5, FieldMemOperand(r0, ConsString::kFirstOffset)); // Update instance type. @@ -6086,7 +6407,7 @@ void SubStringStub::Generate(MacroAssembler* masm) { // string's encoding is wrong because we always have to recheck encoding of // the newly created string's parent anyways due to externalized strings. Label two_byte_slice, set_slice_header; - STATIC_ASSERT((kStringEncodingMask & kAsciiStringTag) != 0); + STATIC_ASSERT((kStringEncodingMask & kOneByteStringTag) != 0); STATIC_ASSERT((kStringEncodingMask & kTwoByteStringTag) == 0); __ tst(r1, Operand(kStringEncodingMask)); __ b(eq, &two_byte_slice); @@ -6124,12 +6445,12 @@ void SubStringStub::Generate(MacroAssembler* masm) { __ bind(&sequential_string); // Locate first character of underlying subject string. - STATIC_ASSERT(SeqTwoByteString::kHeaderSize == SeqAsciiString::kHeaderSize); - __ add(r5, r5, Operand(SeqAsciiString::kHeaderSize - kHeapObjectTag)); + STATIC_ASSERT(SeqTwoByteString::kHeaderSize == SeqOneByteString::kHeaderSize); + __ add(r5, r5, Operand(SeqOneByteString::kHeaderSize - kHeapObjectTag)); __ bind(&allocate_result); // Sequential acii string. Allocate the result. - STATIC_ASSERT((kAsciiStringTag & kStringEncodingMask) != 0); + STATIC_ASSERT((kOneByteStringTag & kStringEncodingMask) != 0); __ tst(r1, Operand(kStringEncodingMask)); __ b(eq, &two_byte_sequential); @@ -6139,13 +6460,13 @@ void SubStringStub::Generate(MacroAssembler* masm) { // Locate first character of substring to copy. __ add(r5, r5, r3); // Locate first character of result. - __ add(r1, r0, Operand(SeqAsciiString::kHeaderSize - kHeapObjectTag)); + __ add(r1, r0, Operand(SeqOneByteString::kHeaderSize - kHeapObjectTag)); // r0: result string // r1: first character of result string // r2: result string length // r5: first character of substring to copy - STATIC_ASSERT((SeqAsciiString::kHeaderSize & kObjectAlignmentMask) == 0); + STATIC_ASSERT((SeqOneByteString::kHeaderSize & kObjectAlignmentMask) == 0); StringHelper::GenerateCopyCharactersLong(masm, r1, r5, r2, r3, r4, r6, r7, r9, COPY_ASCII | DEST_ALWAYS_ALIGNED); __ jmp(&return_r0); @@ -6171,12 +6492,25 @@ void SubStringStub::Generate(MacroAssembler* masm) { __ bind(&return_r0); Counters* counters = masm->isolate()->counters(); __ IncrementCounter(counters->sub_string_native(), 1, r3, r4); - __ add(sp, sp, Operand(3 * kPointerSize)); + __ Drop(3); __ Ret(); // Just jump to runtime to create the sub string. __ bind(&runtime); __ TailCallRuntime(Runtime::kSubString, 3, 1); + + __ bind(&single_char); + // r0: original string + // r1: instance type + // r2: length + // r3: from index (untagged) + __ SmiTag(r3, r3); + StringCharAtGenerator generator( + r0, r3, r2, r0, &runtime, &runtime, &runtime, STRING_INDEX_IS_NUMBER); + generator.GenerateFast(masm); + __ Drop(3); + __ Ret(); + generator.SkipSlow(masm, &runtime); } @@ -6202,7 +6536,7 @@ void StringCompareStub::GenerateFlatAsciiStringEquals(MacroAssembler* masm, Label compare_chars; __ bind(&check_zero_length); STATIC_ASSERT(kSmiTag == 0); - __ cmp(length, Operand(0)); + __ cmp(length, Operand::Zero()); __ b(ne, &compare_chars); __ mov(r0, Operand(Smi::FromInt(EQUAL))); __ Ret(); @@ -6235,7 +6569,7 @@ void StringCompareStub::GenerateCompareFlatAsciiStrings(MacroAssembler* masm, __ mov(scratch1, scratch2, LeaveCC, gt); Register min_length = scratch1; STATIC_ASSERT(kSmiTag == 0); - __ cmp(min_length, Operand(0)); + __ cmp(min_length, Operand::Zero()); __ b(eq, &compare_lengths); // Compare loop. @@ -6270,7 +6604,7 @@ void StringCompareStub::GenerateAsciiCharsCompareLoop( // doesn't need an additional compare. __ SmiUntag(length); __ add(scratch1, length, - Operand(SeqAsciiString::kHeaderSize - kHeapObjectTag)); + Operand(SeqOneByteString::kHeaderSize - kHeapObjectTag)); __ add(left, left, Operand(scratch1)); __ add(right, right, Operand(scratch1)); __ rsb(length, length, Operand::Zero()); @@ -6407,8 +6741,8 @@ void StringAddStub::Generate(MacroAssembler* masm) { // Adding two lengths can't overflow. STATIC_ASSERT(String::kMaxLength < String::kMaxLength * 2); __ add(r6, r2, Operand(r3)); - // Use the symbol table when adding two one character strings, as it - // helps later optimizations to return a symbol here. + // Use the string table when adding two one character strings, as it + // helps later optimizations to return a string here. __ cmp(r6, Operand(2)); __ b(ne, &longer_than_two); @@ -6423,13 +6757,13 @@ void StringAddStub::Generate(MacroAssembler* masm) { &call_runtime); // Get the two characters forming the sub string. - __ ldrb(r2, FieldMemOperand(r0, SeqAsciiString::kHeaderSize)); - __ ldrb(r3, FieldMemOperand(r1, SeqAsciiString::kHeaderSize)); + __ ldrb(r2, FieldMemOperand(r0, SeqOneByteString::kHeaderSize)); + __ ldrb(r3, FieldMemOperand(r1, SeqOneByteString::kHeaderSize)); - // Try to lookup two character string in symbol table. If it is not found + // Try to lookup two character string in string table. If it is not found // just allocate a new one. Label make_two_character_string; - StringHelper::GenerateTwoCharacterSymbolTableProbe( + StringHelper::GenerateTwoCharacterStringTableProbe( masm, r2, r3, r6, r7, r4, r5, r9, &make_two_character_string); __ IncrementCounter(counters->string_add_native(), 1, r2, r3); __ add(sp, sp, Operand(2 * kPointerSize)); @@ -6443,7 +6777,7 @@ void StringAddStub::Generate(MacroAssembler* masm) { // in a little endian mode) __ mov(r6, Operand(2)); __ AllocateAsciiString(r0, r6, r4, r5, r9, &call_runtime); - __ strh(r2, FieldMemOperand(r0, SeqAsciiString::kHeaderSize)); + __ strh(r2, FieldMemOperand(r0, SeqOneByteString::kHeaderSize)); __ IncrementCounter(counters->string_add_native(), 1, r2, r3); __ add(sp, sp, Operand(2 * kPointerSize)); __ Ret(); @@ -6494,9 +6828,9 @@ void StringAddStub::Generate(MacroAssembler* masm) { __ tst(r5, Operand(kAsciiDataHintMask), ne); __ b(ne, &ascii_data); __ eor(r4, r4, Operand(r5)); - STATIC_ASSERT(kAsciiStringTag != 0 && kAsciiDataHintTag != 0); - __ and_(r4, r4, Operand(kAsciiStringTag | kAsciiDataHintTag)); - __ cmp(r4, Operand(kAsciiStringTag | kAsciiDataHintTag)); + STATIC_ASSERT(kOneByteStringTag != 0 && kAsciiDataHintTag != 0); + __ and_(r4, r4, Operand(kOneByteStringTag | kAsciiDataHintTag)); + __ cmp(r4, Operand(kOneByteStringTag | kAsciiDataHintTag)); __ b(eq, &ascii_data); // Allocate a two byte cons string. @@ -6530,10 +6864,10 @@ void StringAddStub::Generate(MacroAssembler* masm) { STATIC_ASSERT(kSeqStringTag == 0); __ tst(r4, Operand(kStringRepresentationMask)); - STATIC_ASSERT(SeqAsciiString::kHeaderSize == SeqTwoByteString::kHeaderSize); + STATIC_ASSERT(SeqOneByteString::kHeaderSize == SeqTwoByteString::kHeaderSize); __ add(r7, r0, - Operand(SeqAsciiString::kHeaderSize - kHeapObjectTag), + Operand(SeqOneByteString::kHeaderSize - kHeapObjectTag), LeaveCC, eq); __ b(eq, &first_prepared); @@ -6546,10 +6880,10 @@ void StringAddStub::Generate(MacroAssembler* masm) { STATIC_ASSERT(kSeqStringTag == 0); __ tst(r5, Operand(kStringRepresentationMask)); - STATIC_ASSERT(SeqAsciiString::kHeaderSize == SeqTwoByteString::kHeaderSize); + STATIC_ASSERT(SeqOneByteString::kHeaderSize == SeqTwoByteString::kHeaderSize); __ add(r1, r1, - Operand(SeqAsciiString::kHeaderSize - kHeapObjectTag), + Operand(SeqOneByteString::kHeaderSize - kHeapObjectTag), LeaveCC, eq); __ b(eq, &second_prepared); @@ -6572,7 +6906,7 @@ void StringAddStub::Generate(MacroAssembler* masm) { __ b(eq, &non_ascii_string_add_flat_result); __ AllocateAsciiString(r0, r6, r4, r5, r9, &call_runtime); - __ add(r6, r0, Operand(SeqAsciiString::kHeaderSize - kHeapObjectTag)); + __ add(r6, r0, Operand(SeqOneByteString::kHeaderSize - kHeapObjectTag)); // r0: result string. // r7: first character of first string. // r1: first character of second string. @@ -6663,7 +6997,7 @@ void StringAddStub::GenerateConvertArgument(MacroAssembler* masm, void ICCompareStub::GenerateSmis(MacroAssembler* masm) { - ASSERT(state_ == CompareIC::SMIS); + ASSERT(state_ == CompareIC::SMI); Label miss; __ orr(r2, r1, r0); __ JumpIfNotSmi(r2, &miss); @@ -6683,32 +7017,54 @@ void ICCompareStub::GenerateSmis(MacroAssembler* masm) { } -void ICCompareStub::GenerateHeapNumbers(MacroAssembler* masm) { - ASSERT(state_ == CompareIC::HEAP_NUMBERS); +void ICCompareStub::GenerateNumbers(MacroAssembler* masm) { + ASSERT(state_ == CompareIC::NUMBER); Label generic_stub; Label unordered, maybe_undefined1, maybe_undefined2; Label miss; - __ and_(r2, r1, Operand(r0)); - __ JumpIfSmi(r2, &generic_stub); - __ CompareObjectType(r0, r2, r2, HEAP_NUMBER_TYPE); - __ b(ne, &maybe_undefined1); - __ CompareObjectType(r1, r2, r2, HEAP_NUMBER_TYPE); - __ b(ne, &maybe_undefined2); + if (left_ == CompareIC::SMI) { + __ JumpIfNotSmi(r1, &miss); + } + if (right_ == CompareIC::SMI) { + __ JumpIfNotSmi(r0, &miss); + } // Inlining the double comparison and falling back to the general compare - // stub if NaN is involved or VFP3 is unsupported. + // stub if NaN is involved or VFP2 is unsupported. if (CpuFeatures::IsSupported(VFP2)) { - CpuFeatures::Scope scope(VFP2); + CpuFeatureScope scope(masm, VFP2); - // Load left and right operand - __ sub(r2, r1, Operand(kHeapObjectTag)); - __ vldr(d0, r2, HeapNumber::kValueOffset); + // Load left and right operand. + Label done, left, left_smi, right_smi; + __ JumpIfSmi(r0, &right_smi); + __ CheckMap(r0, r2, Heap::kHeapNumberMapRootIndex, &maybe_undefined1, + DONT_DO_SMI_CHECK); __ sub(r2, r0, Operand(kHeapObjectTag)); __ vldr(d1, r2, HeapNumber::kValueOffset); + __ b(&left); + __ bind(&right_smi); + __ SmiUntag(r2, r0); // Can't clobber r0 yet. + SwVfpRegister single_scratch = d2.low(); + __ vmov(single_scratch, r2); + __ vcvt_f64_s32(d1, single_scratch); + + __ bind(&left); + __ JumpIfSmi(r1, &left_smi); + __ CheckMap(r1, r2, Heap::kHeapNumberMapRootIndex, &maybe_undefined2, + DONT_DO_SMI_CHECK); + __ sub(r2, r1, Operand(kHeapObjectTag)); + __ vldr(d0, r2, HeapNumber::kValueOffset); + __ b(&done); + __ bind(&left_smi); + __ SmiUntag(r2, r1); // Can't clobber r1 yet. + single_scratch = d3.low(); + __ vmov(single_scratch, r2); + __ vcvt_f64_s32(d0, single_scratch); - // Compare operands + __ bind(&done); + // Compare operands. __ VFPCompareAndSetFlags(d0, d1); // Don't base result on status bits when a NaN is involved. @@ -6722,14 +7078,16 @@ void ICCompareStub::GenerateHeapNumbers(MacroAssembler* masm) { } __ bind(&unordered); - CompareStub stub(GetCondition(), strict(), NO_COMPARE_FLAGS, r1, r0); __ bind(&generic_stub); - __ Jump(stub.GetCode(), RelocInfo::CODE_TARGET); + ICCompareStub stub(op_, CompareIC::GENERIC, CompareIC::GENERIC, + CompareIC::GENERIC); + __ Jump(stub.GetCode(masm->isolate()), RelocInfo::CODE_TARGET); __ bind(&maybe_undefined1); if (Token::IsOrderedRelationalCompareOp(op_)) { __ CompareRoot(r0, Heap::kUndefinedValueRootIndex); __ b(ne, &miss); + __ JumpIfSmi(r1, &unordered); __ CompareObjectType(r1, r2, r2, HEAP_NUMBER_TYPE); __ b(ne, &maybe_undefined2); __ jmp(&unordered); @@ -6746,8 +7104,8 @@ void ICCompareStub::GenerateHeapNumbers(MacroAssembler* masm) { } -void ICCompareStub::GenerateSymbols(MacroAssembler* masm) { - ASSERT(state_ == CompareIC::SYMBOLS); +void ICCompareStub::GenerateInternalizedStrings(MacroAssembler* masm) { + ASSERT(state_ == CompareIC::INTERNALIZED_STRING); Label miss; // Registers containing left and right operands respectively. @@ -6759,17 +7117,68 @@ void ICCompareStub::GenerateSymbols(MacroAssembler* masm) { // Check that both operands are heap objects. __ JumpIfEitherSmi(left, right, &miss); - // Check that both operands are symbols. + // Check that both operands are internalized strings. __ ldr(tmp1, FieldMemOperand(left, HeapObject::kMapOffset)); __ ldr(tmp2, FieldMemOperand(right, HeapObject::kMapOffset)); __ ldrb(tmp1, FieldMemOperand(tmp1, Map::kInstanceTypeOffset)); __ ldrb(tmp2, FieldMemOperand(tmp2, Map::kInstanceTypeOffset)); - STATIC_ASSERT(kSymbolTag != 0); + STATIC_ASSERT(kInternalizedTag != 0); __ and_(tmp1, tmp1, Operand(tmp2)); - __ tst(tmp1, Operand(kIsSymbolMask)); + __ tst(tmp1, Operand(kIsInternalizedMask)); __ b(eq, &miss); - // Symbols are compared by identity. + // Internalized strings are compared by identity. + __ cmp(left, right); + // Make sure r0 is non-zero. At this point input operands are + // guaranteed to be non-zero. + ASSERT(right.is(r0)); + STATIC_ASSERT(EQUAL == 0); + STATIC_ASSERT(kSmiTag == 0); + __ mov(r0, Operand(Smi::FromInt(EQUAL)), LeaveCC, eq); + __ Ret(); + + __ bind(&miss); + GenerateMiss(masm); +} + + +void ICCompareStub::GenerateUniqueNames(MacroAssembler* masm) { + ASSERT(state_ == CompareIC::UNIQUE_NAME); + ASSERT(GetCondition() == eq); + Label miss; + + // Registers containing left and right operands respectively. + Register left = r1; + Register right = r0; + Register tmp1 = r2; + Register tmp2 = r3; + + // Check that both operands are heap objects. + __ JumpIfEitherSmi(left, right, &miss); + + // Check that both operands are unique names. This leaves the instance + // types loaded in tmp1 and tmp2. + STATIC_ASSERT(kInternalizedTag != 0); + __ ldr(tmp1, FieldMemOperand(left, HeapObject::kMapOffset)); + __ ldr(tmp2, FieldMemOperand(right, HeapObject::kMapOffset)); + __ ldrb(tmp1, FieldMemOperand(tmp1, Map::kInstanceTypeOffset)); + __ ldrb(tmp2, FieldMemOperand(tmp2, Map::kInstanceTypeOffset)); + + Label succeed1; + __ tst(tmp1, Operand(kIsInternalizedMask)); + __ b(ne, &succeed1); + __ cmp(tmp1, Operand(SYMBOL_TYPE)); + __ b(ne, &miss); + __ bind(&succeed1); + + Label succeed2; + __ tst(tmp2, Operand(kIsInternalizedMask)); + __ b(ne, &succeed2); + __ cmp(tmp2, Operand(SYMBOL_TYPE)); + __ b(ne, &miss); + __ bind(&succeed2); + + // Unique names are compared by identity. __ cmp(left, right); // Make sure r0 is non-zero. At this point input operands are // guaranteed to be non-zero. @@ -6785,7 +7194,7 @@ void ICCompareStub::GenerateSymbols(MacroAssembler* masm) { void ICCompareStub::GenerateStrings(MacroAssembler* masm) { - ASSERT(state_ == CompareIC::STRINGS); + ASSERT(state_ == CompareIC::STRING); Label miss; bool equality = Token::IsEqualityOp(op_); @@ -6821,13 +7230,13 @@ void ICCompareStub::GenerateStrings(MacroAssembler* masm) { // Handle not identical strings. - // Check that both strings are symbols. If they are, we're done + // Check that both strings are internalized strings. If they are, we're done // because we already know they are not identical. if (equality) { ASSERT(GetCondition() == eq); - STATIC_ASSERT(kSymbolTag != 0); + STATIC_ASSERT(kInternalizedTag != 0); __ and_(tmp3, tmp1, Operand(tmp2)); - __ tst(tmp3, Operand(kIsSymbolMask)); + __ tst(tmp3, Operand(kIsInternalizedMask)); // Make sure r0 is non-zero. At this point input operands are // guaranteed to be non-zero. ASSERT(right.is(r0)); @@ -6863,7 +7272,7 @@ void ICCompareStub::GenerateStrings(MacroAssembler* masm) { void ICCompareStub::GenerateObjects(MacroAssembler* masm) { - ASSERT(state_ == CompareIC::OBJECTS); + ASSERT(state_ == CompareIC::OBJECT); Label miss; __ and_(r2, r1, Operand(r0)); __ JumpIfSmi(r2, &miss); @@ -6941,8 +7350,9 @@ void DirectCEntryStub::GenerateCall(MacroAssembler* masm, void DirectCEntryStub::GenerateCall(MacroAssembler* masm, Register target) { - __ mov(lr, Operand(reinterpret_cast<intptr_t>(GetCode().location()), - RelocInfo::CODE_TARGET)); + intptr_t code = + reinterpret_cast<intptr_t>(GetCode(masm->isolate()).location()); + __ mov(lr, Operand(code, RelocInfo::CODE_TARGET)); // Prevent literal pool emission during calculation of return address. Assembler::BlockConstPoolScope block_const_pool(masm); @@ -6959,13 +7369,14 @@ void DirectCEntryStub::GenerateCall(MacroAssembler* masm, } -void StringDictionaryLookupStub::GenerateNegativeLookup(MacroAssembler* masm, - Label* miss, - Label* done, - Register receiver, - Register properties, - Handle<String> name, - Register scratch0) { +void NameDictionaryLookupStub::GenerateNegativeLookup(MacroAssembler* masm, + Label* miss, + Label* done, + Register receiver, + Register properties, + Handle<Name> name, + Register scratch0) { + ASSERT(name->IsUniqueName()); // If names of slots in range from 1 to kProbes - 1 for the hash value are // not equal to the name and kProbes-th slot is not used (its name is the // undefined value), it guarantees the hash table doesn't contain the @@ -6979,10 +7390,10 @@ void StringDictionaryLookupStub::GenerateNegativeLookup(MacroAssembler* masm, __ ldr(index, FieldMemOperand(properties, kCapacityOffset)); __ sub(index, index, Operand(1)); __ and_(index, index, Operand( - Smi::FromInt(name->Hash() + StringDictionary::GetProbeOffset(i)))); + Smi::FromInt(name->Hash() + NameDictionary::GetProbeOffset(i)))); // Scale the index by multiplying by the entry size. - ASSERT(StringDictionary::kEntrySize == 3); + ASSERT(NameDictionary::kEntrySize == 3); __ add(index, index, Operand(index, LSL, 1)); // index *= 3. Register entity_name = scratch0; @@ -7002,21 +7413,23 @@ void StringDictionaryLookupStub::GenerateNegativeLookup(MacroAssembler* masm, __ LoadRoot(tmp, Heap::kTheHoleValueRootIndex); // Stop if found the property. - __ cmp(entity_name, Operand(Handle<String>(name))); + __ cmp(entity_name, Operand(Handle<Name>(name))); __ b(eq, miss); - Label the_hole; + Label good; __ cmp(entity_name, tmp); - __ b(eq, &the_hole); + __ b(eq, &good); - // Check if the entry name is not a symbol. + // Check if the entry name is not a unique name. __ ldr(entity_name, FieldMemOperand(entity_name, HeapObject::kMapOffset)); __ ldrb(entity_name, FieldMemOperand(entity_name, Map::kInstanceTypeOffset)); - __ tst(entity_name, Operand(kIsSymbolMask)); - __ b(eq, miss); + __ tst(entity_name, Operand(kIsInternalizedMask)); + __ b(ne, &good); + __ cmp(entity_name, Operand(SYMBOL_TYPE)); + __ b(ne, miss); - __ bind(&the_hole); + __ bind(&good); // Restore the properties. __ ldr(properties, @@ -7030,10 +7443,10 @@ void StringDictionaryLookupStub::GenerateNegativeLookup(MacroAssembler* masm, __ stm(db_w, sp, spill_mask); __ ldr(r0, FieldMemOperand(receiver, JSObject::kPropertiesOffset)); - __ mov(r1, Operand(Handle<String>(name))); - StringDictionaryLookupStub stub(NEGATIVE_LOOKUP); + __ mov(r1, Operand(Handle<Name>(name))); + NameDictionaryLookupStub stub(NEGATIVE_LOOKUP); __ CallStub(&stub); - __ cmp(r0, Operand(0)); + __ cmp(r0, Operand::Zero()); __ ldm(ia_w, sp, spill_mask); __ b(eq, done); @@ -7041,23 +7454,23 @@ void StringDictionaryLookupStub::GenerateNegativeLookup(MacroAssembler* masm, } -// Probe the string dictionary in the |elements| register. Jump to the +// Probe the name dictionary in the |elements| register. Jump to the // |done| label if a property with the given name is found. Jump to // the |miss| label otherwise. // If lookup was successful |scratch2| will be equal to elements + 4 * index. -void StringDictionaryLookupStub::GeneratePositiveLookup(MacroAssembler* masm, - Label* miss, - Label* done, - Register elements, - Register name, - Register scratch1, - Register scratch2) { +void NameDictionaryLookupStub::GeneratePositiveLookup(MacroAssembler* masm, + Label* miss, + Label* done, + Register elements, + Register name, + Register scratch1, + Register scratch2) { ASSERT(!elements.is(scratch1)); ASSERT(!elements.is(scratch2)); ASSERT(!name.is(scratch1)); ASSERT(!name.is(scratch2)); - __ AssertString(name); + __ AssertName(name); // Compute the capacity mask. __ ldr(scratch1, FieldMemOperand(elements, kCapacityOffset)); @@ -7069,20 +7482,20 @@ void StringDictionaryLookupStub::GeneratePositiveLookup(MacroAssembler* masm, // cover ~93% of loads from dictionaries. for (int i = 0; i < kInlinedProbes; i++) { // Compute the masked index: (hash + i + i * i) & mask. - __ ldr(scratch2, FieldMemOperand(name, String::kHashFieldOffset)); + __ ldr(scratch2, FieldMemOperand(name, Name::kHashFieldOffset)); if (i > 0) { // Add the probe offset (i + i * i) left shifted to avoid right shifting // the hash in a separate instruction. The value hash + i + i * i is right // shifted in the following and instruction. - ASSERT(StringDictionary::GetProbeOffset(i) < - 1 << (32 - String::kHashFieldOffset)); + ASSERT(NameDictionary::GetProbeOffset(i) < + 1 << (32 - Name::kHashFieldOffset)); __ add(scratch2, scratch2, Operand( - StringDictionary::GetProbeOffset(i) << String::kHashShift)); + NameDictionary::GetProbeOffset(i) << Name::kHashShift)); } - __ and_(scratch2, scratch1, Operand(scratch2, LSR, String::kHashShift)); + __ and_(scratch2, scratch1, Operand(scratch2, LSR, Name::kHashShift)); // Scale the index by multiplying by the element size. - ASSERT(StringDictionary::kEntrySize == 3); + ASSERT(NameDictionary::kEntrySize == 3); // scratch2 = scratch2 * 3. __ add(scratch2, scratch2, Operand(scratch2, LSL, 1)); @@ -7107,9 +7520,9 @@ void StringDictionaryLookupStub::GeneratePositiveLookup(MacroAssembler* masm, __ Move(r0, elements); __ Move(r1, name); } - StringDictionaryLookupStub stub(POSITIVE_LOOKUP); + NameDictionaryLookupStub stub(POSITIVE_LOOKUP); __ CallStub(&stub); - __ cmp(r0, Operand(0)); + __ cmp(r0, Operand::Zero()); __ mov(scratch2, Operand(r2)); __ ldm(ia_w, sp, spill_mask); @@ -7118,15 +7531,15 @@ void StringDictionaryLookupStub::GeneratePositiveLookup(MacroAssembler* masm, } -void StringDictionaryLookupStub::Generate(MacroAssembler* masm) { +void NameDictionaryLookupStub::Generate(MacroAssembler* masm) { // This stub overrides SometimesSetsUpAFrame() to return false. That means // we cannot call anything that could cause a GC from this stub. // Registers: - // result: StringDictionary to probe + // result: NameDictionary to probe // r1: key - // : StringDictionary to probe. - // index_: will hold an index of entry if lookup is successful. - // might alias with result_. + // dictionary: NameDictionary to probe. + // index: will hold an index of entry if lookup is successful. + // might alias with result_. // Returns: // result_ is zero if lookup failed, non zero otherwise. @@ -7145,7 +7558,7 @@ void StringDictionaryLookupStub::Generate(MacroAssembler* masm) { __ mov(mask, Operand(mask, ASR, kSmiTagSize)); __ sub(mask, mask, Operand(1)); - __ ldr(hash, FieldMemOperand(key, String::kHashFieldOffset)); + __ ldr(hash, FieldMemOperand(key, Name::kHashFieldOffset)); __ LoadRoot(undefined, Heap::kUndefinedValueRootIndex); @@ -7156,17 +7569,17 @@ void StringDictionaryLookupStub::Generate(MacroAssembler* masm) { // Add the probe offset (i + i * i) left shifted to avoid right shifting // the hash in a separate instruction. The value hash + i + i * i is right // shifted in the following and instruction. - ASSERT(StringDictionary::GetProbeOffset(i) < - 1 << (32 - String::kHashFieldOffset)); + ASSERT(NameDictionary::GetProbeOffset(i) < + 1 << (32 - Name::kHashFieldOffset)); __ add(index, hash, Operand( - StringDictionary::GetProbeOffset(i) << String::kHashShift)); + NameDictionary::GetProbeOffset(i) << Name::kHashShift)); } else { __ mov(index, Operand(hash)); } - __ and_(index, mask, Operand(index, LSR, String::kHashShift)); + __ and_(index, mask, Operand(index, LSR, Name::kHashShift)); // Scale the index by multiplying by the entry size. - ASSERT(StringDictionary::kEntrySize == 3); + ASSERT(NameDictionary::kEntrySize == 3); __ add(index, index, Operand(index, LSL, 1)); // index *= 3. ASSERT_EQ(kSmiTagSize, 1); @@ -7182,12 +7595,16 @@ void StringDictionaryLookupStub::Generate(MacroAssembler* masm) { __ b(eq, &in_dictionary); if (i != kTotalProbes - 1 && mode_ == NEGATIVE_LOOKUP) { - // Check if the entry name is not a symbol. + // Check if the entry name is not a unique name. + Label cont; __ ldr(entry_key, FieldMemOperand(entry_key, HeapObject::kMapOffset)); __ ldrb(entry_key, FieldMemOperand(entry_key, Map::kInstanceTypeOffset)); - __ tst(entry_key, Operand(kIsSymbolMask)); - __ b(eq, &maybe_in_dictionary); + __ tst(entry_key, Operand(kIsInternalizedMask)); + __ b(ne, &cont); + __ cmp(entry_key, Operand(SYMBOL_TYPE)); + __ b(ne, &maybe_in_dictionary); + __ bind(&cont); } } @@ -7220,7 +7637,6 @@ struct AheadOfTimeWriteBarrierStubList { static const AheadOfTimeWriteBarrierStubList kAheadOfTime[] = { // Used in RegExpExecStub. { REG(r6), REG(r4), REG(r7), EMIT_REMEMBERED_SET }, - { REG(r6), REG(r2), REG(r7), EMIT_REMEMBERED_SET }, // Used in CompileArrayPushCall. // Also used in StoreIC::GenerateNormal via GenerateDictionaryStore. // Also used in KeyedStoreIC::GenerateGeneric. @@ -7276,13 +7692,14 @@ bool StoreBufferOverflowStub::IsPregenerated() { } -void StoreBufferOverflowStub::GenerateFixedRegStubsAheadOfTime() { +void StoreBufferOverflowStub::GenerateFixedRegStubsAheadOfTime( + Isolate* isolate) { StoreBufferOverflowStub stub1(kDontSaveFPRegs); - stub1.GetCode()->set_is_pregenerated(true); + stub1.GetCode(isolate)->set_is_pregenerated(true); } -void RecordWriteStub::GenerateFixedRegStubsAheadOfTime() { +void RecordWriteStub::GenerateFixedRegStubsAheadOfTime(Isolate* isolate) { for (const AheadOfTimeWriteBarrierStubList* entry = kAheadOfTime; !entry->object.is(no_reg); entry++) { @@ -7291,7 +7708,7 @@ void RecordWriteStub::GenerateFixedRegStubsAheadOfTime() { entry->address, entry->action, kDontSaveFPRegs); - stub.GetCode()->set_is_pregenerated(true); + stub.GetCode(isolate)->set_is_pregenerated(true); } } @@ -7396,12 +7813,7 @@ void RecordWriteStub::InformIncrementalMarker(MacroAssembler* masm, Mode mode) { ASSERT(!address.is(r0)); __ Move(address, regs_.address()); __ Move(r0, regs_.object()); - if (mode == INCREMENTAL_COMPACTION) { - __ Move(r1, address); - } else { - ASSERT(mode == INCREMENTAL); - __ ldr(r1, MemOperand(address, 0)); - } + __ Move(r1, address); __ mov(r2, Operand(ExternalReference::isolate_address())); AllowExternalCallThatCantCauseGC scope(masm); @@ -7559,7 +7971,7 @@ void StoreArrayLiteralElementStub::Generate(MacroAssembler* masm) { // Array literal has ElementsKind of FAST_DOUBLE_ELEMENTS. __ bind(&double_elements); __ ldr(r5, FieldMemOperand(r1, JSObject::kElementsOffset)); - __ StoreNumberToDoubleElements(r0, r3, r1, + __ StoreNumberToDoubleElements(r0, r3, // Overwrites all regs after this. r5, r6, r7, r9, r2, &slow_elements); @@ -7567,9 +7979,24 @@ void StoreArrayLiteralElementStub::Generate(MacroAssembler* masm) { } +void StubFailureTrampolineStub::Generate(MacroAssembler* masm) { + ASSERT(!Serializer::enabled()); + bool save_fp_regs = CpuFeatures::IsSupported(VFP2); + CEntryStub ces(1, save_fp_regs ? kSaveFPRegs : kDontSaveFPRegs); + __ Call(ces.GetCode(masm->isolate()), RelocInfo::CODE_TARGET); + int parameter_count_offset = + StubFailureTrampolineFrame::kCallerStackParameterCountFrameOffset; + __ ldr(r1, MemOperand(fp, parameter_count_offset)); + masm->LeaveFrame(StackFrame::STUB_FAILURE_TRAMPOLINE); + __ mov(r1, Operand(r1, LSL, kPointerSizeLog2)); + __ add(sp, sp, r1); + __ Ret(); +} + + void ProfileEntryHookStub::MaybeCallEntryHook(MacroAssembler* masm) { if (entry_hook_ != NULL) { - PredictableCodeSizeScope predictable(masm); + PredictableCodeSizeScope predictable(masm, 4 * Assembler::kInstrSize); ProfileEntryHookStub stub; __ push(lr); __ CallStub(&stub); diff --git a/deps/v8/src/arm/code-stubs-arm.h b/deps/v8/src/arm/code-stubs-arm.h index 3e796249c8..61ecc975fc 100644 --- a/deps/v8/src/arm/code-stubs-arm.h +++ b/deps/v8/src/arm/code-stubs-arm.h @@ -36,7 +36,7 @@ namespace internal { // Compute a transcendental math function natively, or call the // TranscendentalCache runtime function. -class TranscendentalCacheStub: public CodeStub { +class TranscendentalCacheStub: public PlatformCodeStub { public: enum ArgumentType { TAGGED = 0 << TranscendentalCache::kTranscendentalTypeBits, @@ -58,7 +58,7 @@ class TranscendentalCacheStub: public CodeStub { }; -class StoreBufferOverflowStub: public CodeStub { +class StoreBufferOverflowStub: public PlatformCodeStub { public: explicit StoreBufferOverflowStub(SaveFPRegsMode save_fp) : save_doubles_(save_fp) { } @@ -66,7 +66,7 @@ class StoreBufferOverflowStub: public CodeStub { void Generate(MacroAssembler* masm); virtual bool IsPregenerated(); - static void GenerateFixedRegStubsAheadOfTime(); + static void GenerateFixedRegStubsAheadOfTime(Isolate* isolate); virtual bool SometimesSetsUpAFrame() { return false; } private: @@ -77,7 +77,7 @@ class StoreBufferOverflowStub: public CodeStub { }; -class UnaryOpStub: public CodeStub { +class UnaryOpStub: public PlatformCodeStub { public: UnaryOpStub(Token::Value op, UnaryOverwriteMode mode, @@ -119,9 +119,9 @@ class UnaryOpStub: public CodeStub { void GenerateSmiCodeSub(MacroAssembler* masm, Label* non_smi, Label* slow); void GenerateSmiCodeBitNot(MacroAssembler* masm, Label* slow); - void GenerateHeapNumberStub(MacroAssembler* masm); - void GenerateHeapNumberStubSub(MacroAssembler* masm); - void GenerateHeapNumberStubBitNot(MacroAssembler* masm); + void GenerateNumberStub(MacroAssembler* masm); + void GenerateNumberStubSub(MacroAssembler* masm); + void GenerateNumberStubBitNot(MacroAssembler* masm); void GenerateHeapNumberCodeSub(MacroAssembler* masm, Label* slow); void GenerateHeapNumberCodeBitNot(MacroAssembler* masm, Label* slow); @@ -142,108 +142,6 @@ class UnaryOpStub: public CodeStub { }; -class BinaryOpStub: public CodeStub { - public: - BinaryOpStub(Token::Value op, OverwriteMode mode) - : op_(op), - mode_(mode), - operands_type_(BinaryOpIC::UNINITIALIZED), - result_type_(BinaryOpIC::UNINITIALIZED) { - use_vfp2_ = CpuFeatures::IsSupported(VFP2); - ASSERT(OpBits::is_valid(Token::NUM_TOKENS)); - } - - BinaryOpStub( - int key, - BinaryOpIC::TypeInfo operands_type, - BinaryOpIC::TypeInfo result_type = BinaryOpIC::UNINITIALIZED) - : op_(OpBits::decode(key)), - mode_(ModeBits::decode(key)), - use_vfp2_(VFP2Bits::decode(key)), - operands_type_(operands_type), - result_type_(result_type) { } - - private: - enum SmiCodeGenerateHeapNumberResults { - ALLOW_HEAPNUMBER_RESULTS, - NO_HEAPNUMBER_RESULTS - }; - - Token::Value op_; - OverwriteMode mode_; - bool use_vfp2_; - - // Operand type information determined at runtime. - BinaryOpIC::TypeInfo operands_type_; - BinaryOpIC::TypeInfo result_type_; - - virtual void PrintName(StringStream* stream); - - // Minor key encoding in 16 bits RRRTTTVOOOOOOOMM. - class ModeBits: public BitField<OverwriteMode, 0, 2> {}; - class OpBits: public BitField<Token::Value, 2, 7> {}; - class VFP2Bits: public BitField<bool, 9, 1> {}; - class OperandTypeInfoBits: public BitField<BinaryOpIC::TypeInfo, 10, 3> {}; - class ResultTypeInfoBits: public BitField<BinaryOpIC::TypeInfo, 13, 3> {}; - - Major MajorKey() { return BinaryOp; } - int MinorKey() { - return OpBits::encode(op_) - | ModeBits::encode(mode_) - | VFP2Bits::encode(use_vfp2_) - | OperandTypeInfoBits::encode(operands_type_) - | ResultTypeInfoBits::encode(result_type_); - } - - void Generate(MacroAssembler* masm); - void GenerateGeneric(MacroAssembler* masm); - void GenerateSmiSmiOperation(MacroAssembler* masm); - void GenerateFPOperation(MacroAssembler* masm, - bool smi_operands, - Label* not_numbers, - Label* gc_required); - void GenerateSmiCode(MacroAssembler* masm, - Label* use_runtime, - Label* gc_required, - SmiCodeGenerateHeapNumberResults heapnumber_results); - void GenerateLoadArguments(MacroAssembler* masm); - void GenerateReturn(MacroAssembler* masm); - void GenerateUninitializedStub(MacroAssembler* masm); - void GenerateSmiStub(MacroAssembler* masm); - void GenerateInt32Stub(MacroAssembler* masm); - void GenerateHeapNumberStub(MacroAssembler* masm); - void GenerateOddballStub(MacroAssembler* masm); - void GenerateStringStub(MacroAssembler* masm); - void GenerateBothStringStub(MacroAssembler* masm); - void GenerateGenericStub(MacroAssembler* masm); - void GenerateAddStrings(MacroAssembler* masm); - void GenerateCallRuntime(MacroAssembler* masm); - - void GenerateHeapResultAllocation(MacroAssembler* masm, - Register result, - Register heap_number_map, - Register scratch1, - Register scratch2, - Label* gc_required); - void GenerateRegisterArgsPush(MacroAssembler* masm); - void GenerateTypeTransition(MacroAssembler* masm); - void GenerateTypeTransitionWithSavedArgs(MacroAssembler* masm); - - virtual int GetCodeKind() { return Code::BINARY_OP_IC; } - - virtual InlineCacheState GetICState() { - return BinaryOpIC::ToState(operands_type_); - } - - virtual void FinishCode(Handle<Code> code) { - code->set_binary_op_type(operands_type_); - code->set_binary_op_result_type(result_type_); - } - - friend class CodeGenerator; -}; - - class StringHelper : public AllStatic { public: // Generate code for copying characters using a simple loop. This should only @@ -274,14 +172,14 @@ class StringHelper : public AllStatic { int flags); - // Probe the symbol table for a two character string. If the string is + // Probe the string table for a two character string. If the string is // not found by probing a jump to the label not_found is performed. This jump - // does not guarantee that the string is not in the symbol table. If the + // does not guarantee that the string is not in the string table. If the // string is found the code falls through with the string in register r0. // Contents of both c1 and c2 registers are modified. At the exit c1 is // guaranteed to contain halfword with low and high bytes equal to // initial contents of c1 and c2 respectively. - static void GenerateTwoCharacterSymbolTableProbe(MacroAssembler* masm, + static void GenerateTwoCharacterStringTableProbe(MacroAssembler* masm, Register c1, Register c2, Register scratch1, @@ -321,7 +219,7 @@ enum StringAddFlags { }; -class StringAddStub: public CodeStub { +class StringAddStub: public PlatformCodeStub { public: explicit StringAddStub(StringAddFlags flags) : flags_(flags) {} @@ -344,7 +242,7 @@ class StringAddStub: public CodeStub { }; -class SubStringStub: public CodeStub { +class SubStringStub: public PlatformCodeStub { public: SubStringStub() {} @@ -357,7 +255,7 @@ class SubStringStub: public CodeStub { -class StringCompareStub: public CodeStub { +class StringCompareStub: public PlatformCodeStub { public: StringCompareStub() { } @@ -397,7 +295,7 @@ class StringCompareStub: public CodeStub { // This stub can convert a signed int32 to a heap number (double). It does // not work for int32s that are in Smi range! No GC occurs during this stub // so you don't have to set up the frame. -class WriteInt32ToHeapNumberStub : public CodeStub { +class WriteInt32ToHeapNumberStub : public PlatformCodeStub { public: WriteInt32ToHeapNumberStub(Register the_int, Register the_heap_number, @@ -407,7 +305,7 @@ class WriteInt32ToHeapNumberStub : public CodeStub { scratch_(scratch) { } bool IsPregenerated(); - static void GenerateFixedRegStubsAheadOfTime(); + static void GenerateFixedRegStubsAheadOfTime(Isolate* isolate); private: Register the_int_; @@ -431,7 +329,7 @@ class WriteInt32ToHeapNumberStub : public CodeStub { }; -class NumberToStringStub: public CodeStub { +class NumberToStringStub: public PlatformCodeStub { public: NumberToStringStub() { } @@ -457,7 +355,7 @@ class NumberToStringStub: public CodeStub { }; -class RecordWriteStub: public CodeStub { +class RecordWriteStub: public PlatformCodeStub { public: RecordWriteStub(Register object, Register value, @@ -481,7 +379,7 @@ class RecordWriteStub: public CodeStub { }; virtual bool IsPregenerated(); - static void GenerateFixedRegStubsAheadOfTime(); + static void GenerateFixedRegStubsAheadOfTime(Isolate* isolate); virtual bool SometimesSetsUpAFrame() { return false; } static void PatchBranchIntoNop(MacroAssembler* masm, int pos) { @@ -571,12 +469,15 @@ class RecordWriteStub: public CodeStub { void SaveCallerSaveRegisters(MacroAssembler* masm, SaveFPRegsMode mode) { masm->stm(db_w, sp, (kCallerSaved | lr.bit()) & ~scratch1_.bit()); if (mode == kSaveFPRegs) { - CpuFeatures::Scope scope(VFP2); + // Number of d-regs not known at snapshot time. + ASSERT(!Serializer::enabled()); + CpuFeatureScope scope(masm, VFP2); masm->sub(sp, sp, - Operand(kDoubleSize * (DwVfpRegister::kNumRegisters - 1))); + Operand(kDoubleSize * (DwVfpRegister::NumRegisters() - 1))); // Save all VFP registers except d0. - for (int i = DwVfpRegister::kNumRegisters - 1; i > 0; i--) { + // TODO(hans): We should probably save d0 too. And maybe use vstm. + for (int i = DwVfpRegister::NumRegisters() - 1; i > 0; i--) { DwVfpRegister reg = DwVfpRegister::from_code(i); masm->vstr(reg, MemOperand(sp, (i - 1) * kDoubleSize)); } @@ -586,15 +487,18 @@ class RecordWriteStub: public CodeStub { inline void RestoreCallerSaveRegisters(MacroAssembler*masm, SaveFPRegsMode mode) { if (mode == kSaveFPRegs) { - CpuFeatures::Scope scope(VFP2); + // Number of d-regs not known at snapshot time. + ASSERT(!Serializer::enabled()); + CpuFeatureScope scope(masm, VFP2); // Restore all VFP registers except d0. - for (int i = DwVfpRegister::kNumRegisters - 1; i > 0; i--) { + // TODO(hans): We should probably restore d0 too. And maybe use vldm. + for (int i = DwVfpRegister::NumRegisters() - 1; i > 0; i--) { DwVfpRegister reg = DwVfpRegister::from_code(i); masm->vldr(reg, MemOperand(sp, (i - 1) * kDoubleSize)); } masm->add(sp, sp, - Operand(kDoubleSize * (DwVfpRegister::kNumRegisters - 1))); + Operand(kDoubleSize * (DwVfpRegister::NumRegisters() - 1))); } masm->ldm(ia_w, sp, (kCallerSaved | lr.bit()) & ~scratch1_.bit()); } @@ -613,7 +517,7 @@ class RecordWriteStub: public CodeStub { Register GetRegThatIsNotOneOf(Register r1, Register r2, Register r3) { - for (int i = 0; i < Register::kNumAllocatableRegisters; i++) { + for (int i = 0; i < Register::NumAllocatableRegisters(); i++) { Register candidate = Register::FromAllocationIndex(i); if (candidate.is(r1)) continue; if (candidate.is(r2)) continue; @@ -672,7 +576,7 @@ class RecordWriteStub: public CodeStub { // Enter C code from generated RegExp code in a way that allows // the C code to fix the return address in case of a GC. // Currently only needed on ARM. -class RegExpCEntryStub: public CodeStub { +class RegExpCEntryStub: public PlatformCodeStub { public: RegExpCEntryStub() {} virtual ~RegExpCEntryStub() {} @@ -691,7 +595,7 @@ class RegExpCEntryStub: public CodeStub { // keep the code which called into native pinned in the memory. Currently the // simplest approach is to generate such stub early enough so it can never be // moved by GC -class DirectCEntryStub: public CodeStub { +class DirectCEntryStub: public PlatformCodeStub { public: DirectCEntryStub() {} void Generate(MacroAssembler* masm); @@ -724,20 +628,6 @@ class FloatingPointHelper : public AllStatic { Register scratch1, Register scratch2); - // Loads objects from r0 and r1 (right and left in binary operations) into - // floating point registers. Depending on the destination the values ends up - // either d7 and d6 or in r2/r3 and r0/r1 respectively. If the destination is - // floating point registers VFP3 must be supported. If core registers are - // requested when VFP3 is supported d6 and d7 will still be scratched. If - // either r0 or r1 is not a number (not smi and not heap number object) the - // not_number label is jumped to with r0 and r1 intact. - static void LoadOperands(MacroAssembler* masm, - FloatingPointHelper::Destination destination, - Register heap_number_map, - Register scratch1, - Register scratch2, - Label* not_number); - // Convert the smi or heap number in object to an int32 using the rules // for ToInt32 as described in ECMAScript 9.5.: the value is truncated // and brought into the range -2^31 .. +2^31 - 1. @@ -748,7 +638,8 @@ class FloatingPointHelper : public AllStatic { Register scratch1, Register scratch2, Register scratch3, - DwVfpRegister double_scratch, + DwVfpRegister double_scratch1, + DwVfpRegister double_scratch2, Label* not_int32); // Converts the integer (untagged smi) in |int_scratch| to a double, storing @@ -836,7 +727,12 @@ class FloatingPointHelper : public AllStatic { Register heap_number_result, Register scratch); - private: + // Loads the objects from |object| into floating point registers. + // Depending on |destination| the value ends up either in |dst| or + // in |dst1|/|dst2|. If |destination| is kVFPRegisters, then VFP3 + // must be supported. If kCoreRegisters are requested and VFP3 is + // supported, |dst| will be scratched. If |object| is neither smi nor + // heap number, |not_number| is jumped to with |object| still intact. static void LoadNumber(MacroAssembler* masm, FloatingPointHelper::Destination destination, Register object, @@ -850,11 +746,11 @@ class FloatingPointHelper : public AllStatic { }; -class StringDictionaryLookupStub: public CodeStub { +class NameDictionaryLookupStub: public PlatformCodeStub { public: enum LookupMode { POSITIVE_LOOKUP, NEGATIVE_LOOKUP }; - explicit StringDictionaryLookupStub(LookupMode mode) : mode_(mode) { } + explicit NameDictionaryLookupStub(LookupMode mode) : mode_(mode) { } void Generate(MacroAssembler* masm); @@ -863,7 +759,7 @@ class StringDictionaryLookupStub: public CodeStub { Label* done, Register receiver, Register properties, - Handle<String> name, + Handle<Name> name, Register scratch0); static void GeneratePositiveLookup(MacroAssembler* masm, @@ -881,14 +777,14 @@ class StringDictionaryLookupStub: public CodeStub { static const int kTotalProbes = 20; static const int kCapacityOffset = - StringDictionary::kHeaderSize + - StringDictionary::kCapacityIndex * kPointerSize; + NameDictionary::kHeaderSize + + NameDictionary::kCapacityIndex * kPointerSize; static const int kElementsStartOffset = - StringDictionary::kHeaderSize + - StringDictionary::kElementsStartIndex * kPointerSize; + NameDictionary::kHeaderSize + + NameDictionary::kElementsStartIndex * kPointerSize; - Major MajorKey() { return StringDictionaryLookup; } + Major MajorKey() { return NameDictionaryLookup; } int MinorKey() { return LookupModeBits::encode(mode_); diff --git a/deps/v8/src/arm/codegen-arm.cc b/deps/v8/src/arm/codegen-arm.cc index 09166c3c01..ff97ab5094 100644 --- a/deps/v8/src/arm/codegen-arm.cc +++ b/deps/v8/src/arm/codegen-arm.cc @@ -31,11 +31,11 @@ #include "codegen.h" #include "macro-assembler.h" +#include "simulator-arm.h" namespace v8 { namespace internal { -#define __ ACCESS_MASM(masm) UnaryMathFunction CreateTranscendentalFunction(TranscendentalCache::Type type) { switch (type) { @@ -49,6 +49,75 @@ UnaryMathFunction CreateTranscendentalFunction(TranscendentalCache::Type type) { } +#define __ masm. + + +#if defined(USE_SIMULATOR) +byte* fast_exp_arm_machine_code = NULL; +double fast_exp_simulator(double x) { + return Simulator::current(Isolate::Current())->CallFP( + fast_exp_arm_machine_code, x, 0); +} +#endif + + +UnaryMathFunction CreateExpFunction() { + if (!CpuFeatures::IsSupported(VFP2)) return &exp; + if (!FLAG_fast_math) return &exp; + size_t actual_size; + byte* buffer = static_cast<byte*>(OS::Allocate(1 * KB, &actual_size, true)); + if (buffer == NULL) return &exp; + ExternalReference::InitializeMathExpData(); + + MacroAssembler masm(NULL, buffer, static_cast<int>(actual_size)); + + { + CpuFeatureScope use_vfp(&masm, VFP2); + DwVfpRegister input = d0; + DwVfpRegister result = d1; + DwVfpRegister double_scratch1 = d2; + DwVfpRegister double_scratch2 = d3; + Register temp1 = r4; + Register temp2 = r5; + Register temp3 = r6; + + if (masm.use_eabi_hardfloat()) { + // Input value is in d0 anyway, nothing to do. + } else { + __ vmov(input, r0, r1); + } + __ Push(temp3, temp2, temp1); + MathExpGenerator::EmitMathExp( + &masm, input, result, double_scratch1, double_scratch2, + temp1, temp2, temp3); + __ Pop(temp3, temp2, temp1); + if (masm.use_eabi_hardfloat()) { + __ vmov(d0, result); + } else { + __ vmov(r0, r1, result); + } + __ Ret(); + } + + CodeDesc desc; + masm.GetCode(&desc); + ASSERT(!RelocInfo::RequiresRelocation(desc)); + + CPU::FlushICache(buffer, actual_size); + OS::ProtectCode(buffer, actual_size); + +#if !defined(USE_SIMULATOR) + return FUNCTION_CAST<UnaryMathFunction>(buffer); +#else + fast_exp_arm_machine_code = buffer; + return &fast_exp_simulator; +#endif +} + + +#undef __ + + UnaryMathFunction CreateSqrtFunction() { return &sqrt; } @@ -73,8 +142,11 @@ void StubRuntimeCallHelper::AfterCall(MacroAssembler* masm) const { // ------------------------------------------------------------------------- // Code generators +#define __ ACCESS_MASM(masm) + void ElementsTransitionGenerator::GenerateMapChangeElementsTransition( - MacroAssembler* masm) { + MacroAssembler* masm, AllocationSiteMode mode, + Label* allocation_site_info_found) { // ----------- S t a t e ------------- // -- r0 : value // -- r1 : key @@ -83,6 +155,12 @@ void ElementsTransitionGenerator::GenerateMapChangeElementsTransition( // -- r3 : target map, scratch for subsequent call // -- r4 : scratch (elements) // ----------------------------------- + if (mode == TRACK_ALLOCATION_SITE) { + ASSERT(allocation_site_info_found != NULL); + __ TestJSArrayForAllocationSiteInfo(r2, r4); + __ b(eq, allocation_site_info_found); + } + // Set transitioned map. __ str(r3, FieldMemOperand(r2, HeapObject::kMapOffset)); __ RecordWriteField(r2, @@ -97,7 +175,7 @@ void ElementsTransitionGenerator::GenerateMapChangeElementsTransition( void ElementsTransitionGenerator::GenerateSmiToDouble( - MacroAssembler* masm, Label* fail) { + MacroAssembler* masm, AllocationSiteMode mode, Label* fail) { // ----------- S t a t e ------------- // -- r0 : value // -- r1 : key @@ -109,6 +187,11 @@ void ElementsTransitionGenerator::GenerateSmiToDouble( Label loop, entry, convert_hole, gc_required, only_change_map, done; bool vfp2_supported = CpuFeatures::IsSupported(VFP2); + if (mode == TRACK_ALLOCATION_SITE) { + __ TestJSArrayForAllocationSiteInfo(r2, r4); + __ b(eq, fail); + } + // Check for empty arrays, which only require a map transition and no changes // to the backing store. __ ldr(r4, FieldMemOperand(r2, JSObject::kElementsOffset)); @@ -123,27 +206,10 @@ void ElementsTransitionGenerator::GenerateSmiToDouble( // Allocate new FixedDoubleArray. // Use lr as a temporary register. __ mov(lr, Operand(r5, LSL, 2)); - __ add(lr, lr, Operand(FixedDoubleArray::kHeaderSize + kPointerSize)); - __ AllocateInNewSpace(lr, r6, r7, r9, &gc_required, NO_ALLOCATION_FLAGS); + __ add(lr, lr, Operand(FixedDoubleArray::kHeaderSize)); + __ AllocateInNewSpace(lr, r6, r7, r9, &gc_required, DOUBLE_ALIGNMENT); // r6: destination FixedDoubleArray, not tagged as heap object. - // Align the array conveniently for doubles. - // Store a filler value in the unused memory. - Label aligned, aligned_done; - __ tst(r6, Operand(kDoubleAlignmentMask)); - __ mov(ip, Operand(masm->isolate()->factory()->one_pointer_filler_map())); - __ b(eq, &aligned); - // Store at the beginning of the allocated memory and update the base pointer. - __ str(ip, MemOperand(r6, kPointerSize, PostIndex)); - __ b(&aligned_done); - - __ bind(&aligned); - // Store the filler at the end of the allocated memory. - __ sub(lr, lr, Operand(kPointerSize)); - __ str(ip, MemOperand(r6, lr)); - - __ bind(&aligned_done); - // Set destination FixedDoubleArray's length and map. __ LoadRoot(r9, Heap::kFixedDoubleArrayMapRootIndex); __ str(r5, MemOperand(r6, FixedDoubleArray::kLengthOffset)); @@ -192,7 +258,7 @@ void ElementsTransitionGenerator::GenerateSmiToDouble( HeapObject::kMapOffset, r3, r9, - kLRHasBeenSaved, + kLRHasNotBeenSaved, kDontSaveFPRegs, OMIT_REMEMBERED_SET, OMIT_SMI_CHECK); @@ -211,7 +277,7 @@ void ElementsTransitionGenerator::GenerateSmiToDouble( // Normal smi, convert to double and store. if (vfp2_supported) { - CpuFeatures::Scope scope(VFP2); + CpuFeatureScope scope(masm, VFP2); __ vmov(s0, r9); __ vcvt_f64_s32(d0, s0); __ vstr(d0, r7, 0); @@ -251,7 +317,7 @@ void ElementsTransitionGenerator::GenerateSmiToDouble( void ElementsTransitionGenerator::GenerateDoubleToObject( - MacroAssembler* masm, Label* fail) { + MacroAssembler* masm, AllocationSiteMode mode, Label* fail) { // ----------- S t a t e ------------- // -- r0 : value // -- r1 : key @@ -262,6 +328,11 @@ void ElementsTransitionGenerator::GenerateDoubleToObject( // ----------------------------------- Label entry, loop, convert_hole, gc_required, only_change_map; + if (mode == TRACK_ALLOCATION_SITE) { + __ TestJSArrayForAllocationSiteInfo(r2, r4); + __ b(eq, fail); + } + // Check for empty arrays, which only require a map transition and no changes // to the backing store. __ ldr(r4, FieldMemOperand(r2, JSObject::kElementsOffset)); @@ -397,7 +468,7 @@ void StringCharLoadGenerator::Generate(MacroAssembler* masm, // the string. __ bind(&cons_string); __ ldr(result, FieldMemOperand(string, ConsString::kSecondOffset)); - __ CompareRoot(result, Heap::kEmptyStringRootIndex); + __ CompareRoot(result, Heap::kempty_stringRootIndex); __ b(ne, call_runtime); // Get the first of the two strings and load its instance type. __ ldr(string, FieldMemOperand(string, ConsString::kFirstOffset)); @@ -416,7 +487,7 @@ void StringCharLoadGenerator::Generate(MacroAssembler* masm, __ b(ne, &external_string); // Prepare sequential strings - STATIC_ASSERT(SeqTwoByteString::kHeaderSize == SeqAsciiString::kHeaderSize); + STATIC_ASSERT(SeqTwoByteString::kHeaderSize == SeqOneByteString::kHeaderSize); __ add(string, string, Operand(SeqTwoByteString::kHeaderSize - kHeapObjectTag)); @@ -450,8 +521,188 @@ void StringCharLoadGenerator::Generate(MacroAssembler* masm, __ bind(&done); } + +void SeqStringSetCharGenerator::Generate(MacroAssembler* masm, + String::Encoding encoding, + Register string, + Register index, + Register value) { + if (FLAG_debug_code) { + __ tst(index, Operand(kSmiTagMask)); + __ Check(eq, "Non-smi index"); + __ tst(value, Operand(kSmiTagMask)); + __ Check(eq, "Non-smi value"); + + __ ldr(ip, FieldMemOperand(string, String::kLengthOffset)); + __ cmp(index, ip); + __ Check(lt, "Index is too large"); + + __ cmp(index, Operand(Smi::FromInt(0))); + __ Check(ge, "Index is negative"); + + __ ldr(ip, FieldMemOperand(string, HeapObject::kMapOffset)); + __ ldrb(ip, FieldMemOperand(ip, Map::kInstanceTypeOffset)); + + __ and_(ip, ip, Operand(kStringRepresentationMask | kStringEncodingMask)); + static const uint32_t one_byte_seq_type = kSeqStringTag | kOneByteStringTag; + static const uint32_t two_byte_seq_type = kSeqStringTag | kTwoByteStringTag; + __ cmp(ip, Operand(encoding == String::ONE_BYTE_ENCODING + ? one_byte_seq_type : two_byte_seq_type)); + __ Check(eq, "Unexpected string type"); + } + + __ add(ip, + string, + Operand(SeqString::kHeaderSize - kHeapObjectTag)); + __ SmiUntag(value, value); + STATIC_ASSERT(kSmiTagSize == 1 && kSmiTag == 0); + if (encoding == String::ONE_BYTE_ENCODING) { + // Smis are tagged by left shift by 1, thus LSR by 1 to smi-untag inline. + __ strb(value, MemOperand(ip, index, LSR, 1)); + } else { + // No need to untag a smi for two-byte addressing. + __ strh(value, MemOperand(ip, index)); + } +} + + +static MemOperand ExpConstant(int index, Register base) { + return MemOperand(base, index * kDoubleSize); +} + + +void MathExpGenerator::EmitMathExp(MacroAssembler* masm, + DwVfpRegister input, + DwVfpRegister result, + DwVfpRegister double_scratch1, + DwVfpRegister double_scratch2, + Register temp1, + Register temp2, + Register temp3) { + ASSERT(!input.is(result)); + ASSERT(!input.is(double_scratch1)); + ASSERT(!input.is(double_scratch2)); + ASSERT(!result.is(double_scratch1)); + ASSERT(!result.is(double_scratch2)); + ASSERT(!double_scratch1.is(double_scratch2)); + ASSERT(!temp1.is(temp2)); + ASSERT(!temp1.is(temp3)); + ASSERT(!temp2.is(temp3)); + ASSERT(ExternalReference::math_exp_constants(0).address() != NULL); + + Label done; + + __ mov(temp3, Operand(ExternalReference::math_exp_constants(0))); + + __ vldr(double_scratch1, ExpConstant(0, temp3)); + __ vmov(result, kDoubleRegZero); + __ VFPCompareAndSetFlags(double_scratch1, input); + __ b(ge, &done); + __ vldr(double_scratch2, ExpConstant(1, temp3)); + __ VFPCompareAndSetFlags(input, double_scratch2); + __ vldr(result, ExpConstant(2, temp3)); + __ b(ge, &done); + __ vldr(double_scratch1, ExpConstant(3, temp3)); + __ vldr(result, ExpConstant(4, temp3)); + __ vmul(double_scratch1, double_scratch1, input); + __ vadd(double_scratch1, double_scratch1, result); + __ vmov(temp2, temp1, double_scratch1); + __ vsub(double_scratch1, double_scratch1, result); + __ vldr(result, ExpConstant(6, temp3)); + __ vldr(double_scratch2, ExpConstant(5, temp3)); + __ vmul(double_scratch1, double_scratch1, double_scratch2); + __ vsub(double_scratch1, double_scratch1, input); + __ vsub(result, result, double_scratch1); + __ vmul(input, double_scratch1, double_scratch1); + __ vmul(result, result, input); + __ mov(temp1, Operand(temp2, LSR, 11)); + __ vldr(double_scratch2, ExpConstant(7, temp3)); + __ vmul(result, result, double_scratch2); + __ vsub(result, result, double_scratch1); + __ vldr(double_scratch2, ExpConstant(8, temp3)); + __ vadd(result, result, double_scratch2); + __ movw(ip, 0x7ff); + __ and_(temp2, temp2, Operand(ip)); + __ add(temp1, temp1, Operand(0x3ff)); + __ mov(temp1, Operand(temp1, LSL, 20)); + + // Must not call ExpConstant() after overwriting temp3! + __ mov(temp3, Operand(ExternalReference::math_exp_log_table())); + __ ldr(ip, MemOperand(temp3, temp2, LSL, 3)); + __ add(temp3, temp3, Operand(kPointerSize)); + __ ldr(temp2, MemOperand(temp3, temp2, LSL, 3)); + __ orr(temp1, temp1, temp2); + __ vmov(input, ip, temp1); + __ vmul(result, result, input); + __ bind(&done); +} + #undef __ +// add(r0, pc, Operand(-8)) +static const uint32_t kCodeAgePatchFirstInstruction = 0xe24f0008; + +static byte* GetNoCodeAgeSequence(uint32_t* length) { + // The sequence of instructions that is patched out for aging code is the + // following boilerplate stack-building prologue that is found in FUNCTIONS + static bool initialized = false; + static uint32_t sequence[kNoCodeAgeSequenceLength]; + byte* byte_sequence = reinterpret_cast<byte*>(sequence); + *length = kNoCodeAgeSequenceLength * Assembler::kInstrSize; + if (!initialized) { + CodePatcher patcher(byte_sequence, kNoCodeAgeSequenceLength); + PredictableCodeSizeScope scope(patcher.masm(), *length); + patcher.masm()->stm(db_w, sp, r1.bit() | cp.bit() | fp.bit() | lr.bit()); + patcher.masm()->LoadRoot(ip, Heap::kUndefinedValueRootIndex); + patcher.masm()->add(fp, sp, Operand(2 * kPointerSize)); + initialized = true; + } + return byte_sequence; +} + + +bool Code::IsYoungSequence(byte* sequence) { + uint32_t young_length; + byte* young_sequence = GetNoCodeAgeSequence(&young_length); + bool result = !memcmp(sequence, young_sequence, young_length); + ASSERT(result || + Memory::uint32_at(sequence) == kCodeAgePatchFirstInstruction); + return result; +} + + +void Code::GetCodeAgeAndParity(byte* sequence, Age* age, + MarkingParity* parity) { + if (IsYoungSequence(sequence)) { + *age = kNoAge; + *parity = NO_MARKING_PARITY; + } else { + Address target_address = Memory::Address_at( + sequence + Assembler::kInstrSize * (kNoCodeAgeSequenceLength - 1)); + Code* stub = GetCodeFromTargetAddress(target_address); + GetCodeAgeAndParity(stub, age, parity); + } +} + + +void Code::PatchPlatformCodeAge(byte* sequence, + Code::Age age, + MarkingParity parity) { + uint32_t young_length; + byte* young_sequence = GetNoCodeAgeSequence(&young_length); + if (age == kNoAge) { + memcpy(sequence, young_sequence, young_length); + CPU::FlushICache(sequence, young_length); + } else { + Code* stub = GetCodeAgeStub(age, parity); + CodePatcher patcher(sequence, young_length / Assembler::kInstrSize); + patcher.masm()->add(r0, pc, Operand(-8)); + patcher.masm()->ldr(pc, MemOperand(pc, -4)); + patcher.masm()->dd(reinterpret_cast<uint32_t>(stub->instruction_start())); + } +} + + } } // namespace v8::internal #endif // V8_TARGET_ARCH_ARM diff --git a/deps/v8/src/arm/codegen-arm.h b/deps/v8/src/arm/codegen-arm.h index c340e6b108..75899a948e 100644 --- a/deps/v8/src/arm/codegen-arm.h +++ b/deps/v8/src/arm/codegen-arm.h @@ -44,6 +44,10 @@ enum TypeofState { INSIDE_TYPEOF, NOT_INSIDE_TYPEOF }; class CodeGenerator: public AstVisitor { public: + CodeGenerator() { + InitializeAstVisitor(); + } + static bool MakeCode(CompilationInfo* info); // Printing of AST, etc. as requested by flags. @@ -68,6 +72,8 @@ class CodeGenerator: public AstVisitor { int pos, bool right_here = false); + DEFINE_AST_VISITOR_SUBCLASS_MEMBERS(); + private: DISALLOW_COPY_AND_ASSIGN(CodeGenerator); }; @@ -88,6 +94,22 @@ class StringCharLoadGenerator : public AllStatic { DISALLOW_COPY_AND_ASSIGN(StringCharLoadGenerator); }; + +class MathExpGenerator : public AllStatic { + public: + static void EmitMathExp(MacroAssembler* masm, + DwVfpRegister input, + DwVfpRegister result, + DwVfpRegister double_scratch1, + DwVfpRegister double_scratch2, + Register temp1, + Register temp2, + Register temp3); + + private: + DISALLOW_COPY_AND_ASSIGN(MathExpGenerator); +}; + } } // namespace v8::internal #endif // V8_ARM_CODEGEN_ARM_H_ diff --git a/deps/v8/src/arm/constants-arm.cc b/deps/v8/src/arm/constants-arm.cc index bf9da232cc..cdca1f5310 100644 --- a/deps/v8/src/arm/constants-arm.cc +++ b/deps/v8/src/arm/constants-arm.cc @@ -87,8 +87,8 @@ const char* Registers::Name(int reg) { } -// Support for VFP registers s0 to s31 (d0 to d15). -// Note that "sN:sM" is the same as "dN/2" +// Support for VFP registers s0 to s31 (d0 to d15) and d16-d31. +// Note that "sN:sM" is the same as "dN/2" up to d15. // These register names are defined in a way to match the native disassembler // formatting. See for example the command "objdump -d <binary file>". const char* VFPRegisters::names_[kNumVFPRegisters] = { @@ -97,7 +97,9 @@ const char* VFPRegisters::names_[kNumVFPRegisters] = { "s16", "s17", "s18", "s19", "s20", "s21", "s22", "s23", "s24", "s25", "s26", "s27", "s28", "s29", "s30", "s31", "d0", "d1", "d2", "d3", "d4", "d5", "d6", "d7", - "d8", "d9", "d10", "d11", "d12", "d13", "d14", "d15" + "d8", "d9", "d10", "d11", "d12", "d13", "d14", "d15", + "d16", "d17", "d18", "d19", "d20", "d21", "d22", "d23", + "d24", "d25", "d26", "d27", "d28", "d29", "d30", "d31" }; diff --git a/deps/v8/src/arm/constants-arm.h b/deps/v8/src/arm/constants-arm.h index 4fa49e3d3c..004165ac32 100644 --- a/deps/v8/src/arm/constants-arm.h +++ b/deps/v8/src/arm/constants-arm.h @@ -84,16 +84,25 @@ namespace v8 { namespace internal { // Constant pool marker. -const int kConstantPoolMarkerMask = 0xffe00000; -const int kConstantPoolMarker = 0x0c000000; -const int kConstantPoolLengthMask = 0x001ffff; +// Use UDF, the permanently undefined instruction. +const int kConstantPoolMarkerMask = 0xfff000f0; +const int kConstantPoolMarker = 0xe7f000f0; +const int kConstantPoolLengthMaxMask = 0xffff; +inline int EncodeConstantPoolLength(int length) { + ASSERT((length & kConstantPoolLengthMaxMask) == length); + return ((length & 0xfff0) << 4) | (length & 0xf); +} +inline int DecodeConstantPoolLength(int instr) { + ASSERT((instr & kConstantPoolMarkerMask) == kConstantPoolMarker); + return ((instr >> 4) & 0xfff0) | (instr & 0xf); +} // Number of registers in normal ARM mode. const int kNumRegisters = 16; // VFP support. const int kNumVFPSingleRegisters = 32; -const int kNumVFPDoubleRegisters = 16; +const int kNumVFPDoubleRegisters = 32; const int kNumVFPRegisters = kNumVFPSingleRegisters + kNumVFPDoubleRegisters; // PC is register 15. @@ -258,7 +267,8 @@ enum { kCoprocessorMask = 15 << 8, kOpCodeMask = 15 << 21, // In data-processing instructions. kImm24Mask = (1 << 24) - 1, - kOff12Mask = (1 << 12) - 1 + kOff12Mask = (1 << 12) - 1, + kOff8Mask = (1 << 8) - 1 }; @@ -455,6 +465,9 @@ extern const Instr kMovLrPc; // ldr rd, [pc, #offset] extern const Instr kLdrPCMask; extern const Instr kLdrPCPattern; +// vldr dd, [pc, #offset] +extern const Instr kVldrDPCMask; +extern const Instr kVldrDPCPattern; // blxcc rm extern const Instr kBlxRegMask; diff --git a/deps/v8/src/arm/debug-arm.cc b/deps/v8/src/arm/debug-arm.cc index c2941be06d..e9a65b2b0f 100644 --- a/deps/v8/src/arm/debug-arm.cc +++ b/deps/v8/src/arm/debug-arm.cc @@ -161,7 +161,7 @@ static void Generate_DebugBreakCallHelper(MacroAssembler* masm, #ifdef DEBUG __ RecordComment("// Calling from debug break to runtime - come in - over"); #endif - __ mov(r0, Operand(0, RelocInfo::NONE)); // no arguments + __ mov(r0, Operand::Zero()); // no arguments __ mov(r1, Operand(ExternalReference::debug_break(masm->isolate()))); CEntryStub ceb(1); diff --git a/deps/v8/src/arm/deoptimizer-arm.cc b/deps/v8/src/arm/deoptimizer-arm.cc index 19667b9d5f..9bcc1ac14c 100644 --- a/deps/v8/src/arm/deoptimizer-arm.cc +++ b/deps/v8/src/arm/deoptimizer-arm.cc @@ -44,11 +44,14 @@ int Deoptimizer::patch_size() { } -void Deoptimizer::DeoptimizeFunction(JSFunction* function) { - HandleScope scope; +void Deoptimizer::DeoptimizeFunctionWithPreparedFunctionList( + JSFunction* function) { + Isolate* isolate = function->GetIsolate(); + HandleScope scope(isolate); AssertNoAllocation no_allocation; - if (!function->IsOptimized()) return; + ASSERT(function->IsOptimized()); + ASSERT(function->FunctionsInFunctionListShareSameCode()); // The optimized code is going to be patched, so we cannot use it // any more. Play safe and reset the whole cache. @@ -72,17 +75,17 @@ void Deoptimizer::DeoptimizeFunction(JSFunction* function) { for (int i = 0; i < deopt_data->DeoptCount(); i++) { if (deopt_data->Pc(i)->value() == -1) continue; Address call_address = code_start_address + deopt_data->Pc(i)->value(); - Address deopt_entry = GetDeoptimizationEntry(i, LAZY); + Address deopt_entry = GetDeoptimizationEntry(isolate, i, LAZY); // We need calls to have a predictable size in the unoptimized code, but // this is optimized code, so we don't have to have a predictable size. int call_size_in_bytes = MacroAssembler::CallSizeNotPredictableCodeSize(deopt_entry, - RelocInfo::NONE); + RelocInfo::NONE32); int call_size_in_words = call_size_in_bytes / Assembler::kInstrSize; ASSERT(call_size_in_bytes % Assembler::kInstrSize == 0); ASSERT(call_size_in_bytes <= patch_size()); CodePatcher patcher(call_address, call_size_in_words); - patcher.masm()->Call(deopt_entry, RelocInfo::NONE); + patcher.masm()->Call(deopt_entry, RelocInfo::NONE32); ASSERT(prev_call_address == NULL || call_address >= prev_call_address + patch_size()); ASSERT(call_address + patch_size() <= code->instruction_end()); @@ -91,8 +94,6 @@ void Deoptimizer::DeoptimizeFunction(JSFunction* function) { #endif } - Isolate* isolate = code->GetIsolate(); - // Add the deoptimizing code to the list. DeoptimizingCodeListNode* node = new DeoptimizingCodeListNode(code); DeoptimizerData* data = isolate->deoptimizer_data(); @@ -114,7 +115,6 @@ void Deoptimizer::DeoptimizeFunction(JSFunction* function) { } -static const int32_t kBranchBeforeStackCheck = 0x2a000001; static const int32_t kBranchBeforeInterrupt = 0x5a000004; @@ -123,24 +123,21 @@ void Deoptimizer::PatchStackCheckCodeAt(Code* unoptimized_code, Code* check_code, Code* replacement_code) { const int kInstrSize = Assembler::kInstrSize; - // The call of the stack guard check has the following form: - // e1 5d 00 0c cmp sp, <limit> - // 2a 00 00 01 bcs ok + // The back edge bookkeeping code matches the pattern: + // + // <decrement profiling counter> + // 2a 00 00 01 bpl ok // e5 9f c? ?? ldr ip, [pc, <stack guard address>] // e1 2f ff 3c blx ip ASSERT(Memory::int32_at(pc_after - kInstrSize) == kBlxIp); ASSERT(Assembler::IsLdrPcImmediateOffset( Assembler::instr_at(pc_after - 2 * kInstrSize))); - if (FLAG_count_based_interrupts) { - ASSERT_EQ(kBranchBeforeInterrupt, - Memory::int32_at(pc_after - 3 * kInstrSize)); - } else { - ASSERT_EQ(kBranchBeforeStackCheck, - Memory::int32_at(pc_after - 3 * kInstrSize)); - } + ASSERT_EQ(kBranchBeforeInterrupt, + Memory::int32_at(pc_after - 3 * kInstrSize)); // We patch the code to the following form: - // e1 5d 00 0c cmp sp, <limit> + // + // <decrement profiling counter> // e1 a0 00 00 mov r0, r0 (NOP) // e5 9f c? ?? ldr ip, [pc, <on-stack replacement address>] // e1 2f ff 3c blx ip @@ -177,15 +174,9 @@ void Deoptimizer::RevertStackCheckCodeAt(Code* unoptimized_code, // Replace NOP with conditional jump. CodePatcher patcher(pc_after - 3 * kInstrSize, 1); - if (FLAG_count_based_interrupts) { - patcher.masm()->b(+16, pl); - ASSERT_EQ(kBranchBeforeInterrupt, - Memory::int32_at(pc_after - 3 * kInstrSize)); - } else { - patcher.masm()->b(+4, cs); - ASSERT_EQ(kBranchBeforeStackCheck, - Memory::int32_at(pc_after - 3 * kInstrSize)); - } + patcher.masm()->b(+16, pl); + ASSERT_EQ(kBranchBeforeInterrupt, + Memory::int32_at(pc_after - 3 * kInstrSize)); // Replace the stack check address in the constant pool // with the entry address of the replacement code. @@ -222,7 +213,7 @@ static int LookupBailoutId(DeoptimizationInputData* data, BailoutId ast_id) { void Deoptimizer::DoComputeOsrOutputFrame() { DeoptimizationInputData* data = DeoptimizationInputData::cast( - optimized_code_->deoptimization_data()); + compiled_code_->deoptimization_data()); unsigned ast_id = data->OsrAstId()->value(); int bailout_id = LookupBailoutId(data, BailoutId(ast_id)); @@ -256,7 +247,7 @@ void Deoptimizer::DoComputeOsrOutputFrame() { unsigned input_frame_size = input_->GetFrameSize(); ASSERT(fixed_size + height_in_bytes == input_frame_size); - unsigned stack_slot_size = optimized_code_->stack_slots() * kPointerSize; + unsigned stack_slot_size = compiled_code_->stack_slots() * kPointerSize; unsigned outgoing_height = data->ArgumentsStackHeight(bailout_id)->value(); unsigned outgoing_size = outgoing_height * kPointerSize; unsigned output_frame_size = fixed_size + stack_slot_size + outgoing_size; @@ -348,7 +339,7 @@ void Deoptimizer::DoComputeOsrOutputFrame() { unsigned pc_offset = data->OsrPcOffset()->value(); uint32_t pc = reinterpret_cast<uint32_t>( - optimized_code_->entry() + pc_offset); + compiled_code_->entry() + pc_offset); output_[0]->SetPc(pc); } Code* continuation = isolate_->builtins()->builtin(Builtins::kNotifyOSR); @@ -365,342 +356,6 @@ void Deoptimizer::DoComputeOsrOutputFrame() { } -void Deoptimizer::DoComputeArgumentsAdaptorFrame(TranslationIterator* iterator, - int frame_index) { - JSFunction* function = JSFunction::cast(ComputeLiteral(iterator->Next())); - unsigned height = iterator->Next(); - unsigned height_in_bytes = height * kPointerSize; - if (FLAG_trace_deopt) { - PrintF(" translating arguments adaptor => height=%d\n", height_in_bytes); - } - - unsigned fixed_frame_size = ArgumentsAdaptorFrameConstants::kFrameSize; - unsigned output_frame_size = height_in_bytes + fixed_frame_size; - - // Allocate and store the output frame description. - FrameDescription* output_frame = - new(output_frame_size) FrameDescription(output_frame_size, function); - output_frame->SetFrameType(StackFrame::ARGUMENTS_ADAPTOR); - - // Arguments adaptor can not be topmost or bottommost. - ASSERT(frame_index > 0 && frame_index < output_count_ - 1); - ASSERT(output_[frame_index] == NULL); - output_[frame_index] = output_frame; - - // The top address of the frame is computed from the previous - // frame's top and this frame's size. - uint32_t top_address; - top_address = output_[frame_index - 1]->GetTop() - output_frame_size; - output_frame->SetTop(top_address); - - // Compute the incoming parameter translation. - int parameter_count = height; - unsigned output_offset = output_frame_size; - for (int i = 0; i < parameter_count; ++i) { - output_offset -= kPointerSize; - DoTranslateCommand(iterator, frame_index, output_offset); - } - - // Read caller's PC from the previous frame. - output_offset -= kPointerSize; - intptr_t callers_pc = output_[frame_index - 1]->GetPc(); - output_frame->SetFrameSlot(output_offset, callers_pc); - if (FLAG_trace_deopt) { - PrintF(" 0x%08x: [top + %d] <- 0x%08x ; caller's pc\n", - top_address + output_offset, output_offset, callers_pc); - } - - // Read caller's FP from the previous frame, and set this frame's FP. - output_offset -= kPointerSize; - intptr_t value = output_[frame_index - 1]->GetFp(); - output_frame->SetFrameSlot(output_offset, value); - intptr_t fp_value = top_address + output_offset; - output_frame->SetFp(fp_value); - if (FLAG_trace_deopt) { - PrintF(" 0x%08x: [top + %d] <- 0x%08x ; caller's fp\n", - fp_value, output_offset, value); - } - - // A marker value is used in place of the context. - output_offset -= kPointerSize; - intptr_t context = reinterpret_cast<intptr_t>( - Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR)); - output_frame->SetFrameSlot(output_offset, context); - if (FLAG_trace_deopt) { - PrintF(" 0x%08x: [top + %d] <- 0x%08x ; context (adaptor sentinel)\n", - top_address + output_offset, output_offset, context); - } - - // The function was mentioned explicitly in the ARGUMENTS_ADAPTOR_FRAME. - output_offset -= kPointerSize; - value = reinterpret_cast<intptr_t>(function); - output_frame->SetFrameSlot(output_offset, value); - if (FLAG_trace_deopt) { - PrintF(" 0x%08x: [top + %d] <- 0x%08x ; function\n", - top_address + output_offset, output_offset, value); - } - - // Number of incoming arguments. - output_offset -= kPointerSize; - value = reinterpret_cast<uint32_t>(Smi::FromInt(height - 1)); - output_frame->SetFrameSlot(output_offset, value); - if (FLAG_trace_deopt) { - PrintF(" 0x%08x: [top + %d] <- 0x%08x ; argc (%d)\n", - top_address + output_offset, output_offset, value, height - 1); - } - - ASSERT(0 == output_offset); - - Builtins* builtins = isolate_->builtins(); - Code* adaptor_trampoline = - builtins->builtin(Builtins::kArgumentsAdaptorTrampoline); - uint32_t pc = reinterpret_cast<uint32_t>( - adaptor_trampoline->instruction_start() + - isolate_->heap()->arguments_adaptor_deopt_pc_offset()->value()); - output_frame->SetPc(pc); -} - - -void Deoptimizer::DoComputeConstructStubFrame(TranslationIterator* iterator, - int frame_index) { - Builtins* builtins = isolate_->builtins(); - Code* construct_stub = builtins->builtin(Builtins::kJSConstructStubGeneric); - JSFunction* function = JSFunction::cast(ComputeLiteral(iterator->Next())); - unsigned height = iterator->Next(); - unsigned height_in_bytes = height * kPointerSize; - if (FLAG_trace_deopt) { - PrintF(" translating construct stub => height=%d\n", height_in_bytes); - } - - unsigned fixed_frame_size = 8 * kPointerSize; - unsigned output_frame_size = height_in_bytes + fixed_frame_size; - - // Allocate and store the output frame description. - FrameDescription* output_frame = - new(output_frame_size) FrameDescription(output_frame_size, function); - output_frame->SetFrameType(StackFrame::CONSTRUCT); - - // Construct stub can not be topmost or bottommost. - ASSERT(frame_index > 0 && frame_index < output_count_ - 1); - ASSERT(output_[frame_index] == NULL); - output_[frame_index] = output_frame; - - // The top address of the frame is computed from the previous - // frame's top and this frame's size. - uint32_t top_address; - top_address = output_[frame_index - 1]->GetTop() - output_frame_size; - output_frame->SetTop(top_address); - - // Compute the incoming parameter translation. - int parameter_count = height; - unsigned output_offset = output_frame_size; - for (int i = 0; i < parameter_count; ++i) { - output_offset -= kPointerSize; - DoTranslateCommand(iterator, frame_index, output_offset); - } - - // Read caller's PC from the previous frame. - output_offset -= kPointerSize; - intptr_t callers_pc = output_[frame_index - 1]->GetPc(); - output_frame->SetFrameSlot(output_offset, callers_pc); - if (FLAG_trace_deopt) { - PrintF(" 0x%08x: [top + %d] <- 0x%08x ; caller's pc\n", - top_address + output_offset, output_offset, callers_pc); - } - - // Read caller's FP from the previous frame, and set this frame's FP. - output_offset -= kPointerSize; - intptr_t value = output_[frame_index - 1]->GetFp(); - output_frame->SetFrameSlot(output_offset, value); - intptr_t fp_value = top_address + output_offset; - output_frame->SetFp(fp_value); - if (FLAG_trace_deopt) { - PrintF(" 0x%08x: [top + %d] <- 0x%08x ; caller's fp\n", - fp_value, output_offset, value); - } - - // The context can be gotten from the previous frame. - output_offset -= kPointerSize; - value = output_[frame_index - 1]->GetContext(); - output_frame->SetFrameSlot(output_offset, value); - if (FLAG_trace_deopt) { - PrintF(" 0x%08x: [top + %d] <- 0x%08x ; context\n", - top_address + output_offset, output_offset, value); - } - - // A marker value is used in place of the function. - output_offset -= kPointerSize; - value = reinterpret_cast<intptr_t>(Smi::FromInt(StackFrame::CONSTRUCT)); - output_frame->SetFrameSlot(output_offset, value); - if (FLAG_trace_deopt) { - PrintF(" 0x%08x: [top + %d] <- 0x%08x ; function (construct sentinel)\n", - top_address + output_offset, output_offset, value); - } - - // The output frame reflects a JSConstructStubGeneric frame. - output_offset -= kPointerSize; - value = reinterpret_cast<intptr_t>(construct_stub); - output_frame->SetFrameSlot(output_offset, value); - if (FLAG_trace_deopt) { - PrintF(" 0x%08x: [top + %d] <- 0x%08x ; code object\n", - top_address + output_offset, output_offset, value); - } - - // Number of incoming arguments. - output_offset -= kPointerSize; - value = reinterpret_cast<uint32_t>(Smi::FromInt(height - 1)); - output_frame->SetFrameSlot(output_offset, value); - if (FLAG_trace_deopt) { - PrintF(" 0x%08x: [top + %d] <- 0x%08x ; argc (%d)\n", - top_address + output_offset, output_offset, value, height - 1); - } - - // Constructor function being invoked by the stub. - output_offset -= kPointerSize; - value = reinterpret_cast<intptr_t>(function); - output_frame->SetFrameSlot(output_offset, value); - if (FLAG_trace_deopt) { - PrintF(" 0x%08x: [top + %d] <- 0x%08x ; constructor function\n", - top_address + output_offset, output_offset, value); - } - - // The newly allocated object was passed as receiver in the artificial - // constructor stub environment created by HEnvironment::CopyForInlining(). - output_offset -= kPointerSize; - value = output_frame->GetFrameSlot(output_frame_size - kPointerSize); - output_frame->SetFrameSlot(output_offset, value); - if (FLAG_trace_deopt) { - PrintF(" 0x%08x: [top + %d] <- 0x%08x ; allocated receiver\n", - top_address + output_offset, output_offset, value); - } - - ASSERT(0 == output_offset); - - uint32_t pc = reinterpret_cast<uint32_t>( - construct_stub->instruction_start() + - isolate_->heap()->construct_stub_deopt_pc_offset()->value()); - output_frame->SetPc(pc); -} - - -void Deoptimizer::DoComputeAccessorStubFrame(TranslationIterator* iterator, - int frame_index, - bool is_setter_stub_frame) { - JSFunction* accessor = JSFunction::cast(ComputeLiteral(iterator->Next())); - // The receiver (and the implicit return value, if any) are expected in - // registers by the LoadIC/StoreIC, so they don't belong to the output stack - // frame. This means that we have to use a height of 0. - unsigned height = 0; - unsigned height_in_bytes = height * kPointerSize; - const char* kind = is_setter_stub_frame ? "setter" : "getter"; - if (FLAG_trace_deopt) { - PrintF(" translating %s stub => height=%u\n", kind, height_in_bytes); - } - - // We need 5 stack entries from StackFrame::INTERNAL (lr, fp, cp, frame type, - // code object, see MacroAssembler::EnterFrame). For a setter stub frames we - // need one additional entry for the implicit return value, see - // StoreStubCompiler::CompileStoreViaSetter. - unsigned fixed_frame_entries = 5 + (is_setter_stub_frame ? 1 : 0); - unsigned fixed_frame_size = fixed_frame_entries * kPointerSize; - unsigned output_frame_size = height_in_bytes + fixed_frame_size; - - // Allocate and store the output frame description. - FrameDescription* output_frame = - new(output_frame_size) FrameDescription(output_frame_size, accessor); - output_frame->SetFrameType(StackFrame::INTERNAL); - - // A frame for an accessor stub can not be the topmost or bottommost one. - ASSERT(frame_index > 0 && frame_index < output_count_ - 1); - ASSERT(output_[frame_index] == NULL); - output_[frame_index] = output_frame; - - // The top address of the frame is computed from the previous frame's top and - // this frame's size. - uint32_t top_address = output_[frame_index - 1]->GetTop() - output_frame_size; - output_frame->SetTop(top_address); - - unsigned output_offset = output_frame_size; - - // Read caller's PC from the previous frame. - output_offset -= kPointerSize; - intptr_t callers_pc = output_[frame_index - 1]->GetPc(); - output_frame->SetFrameSlot(output_offset, callers_pc); - if (FLAG_trace_deopt) { - PrintF(" 0x%08" V8PRIxPTR ": [top + %u] <- 0x%08" V8PRIxPTR - " ; caller's pc\n", - top_address + output_offset, output_offset, callers_pc); - } - - // Read caller's FP from the previous frame, and set this frame's FP. - output_offset -= kPointerSize; - intptr_t value = output_[frame_index - 1]->GetFp(); - output_frame->SetFrameSlot(output_offset, value); - intptr_t fp_value = top_address + output_offset; - output_frame->SetFp(fp_value); - if (FLAG_trace_deopt) { - PrintF(" 0x%08" V8PRIxPTR ": [top + %u] <- 0x%08" V8PRIxPTR - " ; caller's fp\n", - fp_value, output_offset, value); - } - - // The context can be gotten from the previous frame. - output_offset -= kPointerSize; - value = output_[frame_index - 1]->GetContext(); - output_frame->SetFrameSlot(output_offset, value); - if (FLAG_trace_deopt) { - PrintF(" 0x%08" V8PRIxPTR ": [top + %u] <- 0x%08" V8PRIxPTR - " ; context\n", - top_address + output_offset, output_offset, value); - } - - // A marker value is used in place of the function. - output_offset -= kPointerSize; - value = reinterpret_cast<intptr_t>(Smi::FromInt(StackFrame::INTERNAL)); - output_frame->SetFrameSlot(output_offset, value); - if (FLAG_trace_deopt) { - PrintF(" 0x%08" V8PRIxPTR ": [top + %u] <- 0x%08" V8PRIxPTR - " ; function (%s sentinel)\n", - top_address + output_offset, output_offset, value, kind); - } - - // Get Code object from accessor stub. - output_offset -= kPointerSize; - Builtins::Name name = is_setter_stub_frame ? - Builtins::kStoreIC_Setter_ForDeopt : - Builtins::kLoadIC_Getter_ForDeopt; - Code* accessor_stub = isolate_->builtins()->builtin(name); - value = reinterpret_cast<intptr_t>(accessor_stub); - output_frame->SetFrameSlot(output_offset, value); - if (FLAG_trace_deopt) { - PrintF(" 0x%08" V8PRIxPTR ": [top + %u] <- 0x%08" V8PRIxPTR - " ; code object\n", - top_address + output_offset, output_offset, value); - } - - // Skip receiver. - Translation::Opcode opcode = - static_cast<Translation::Opcode>(iterator->Next()); - iterator->Skip(Translation::NumberOfOperandsFor(opcode)); - - if (is_setter_stub_frame) { - // The implicit return value was part of the artificial setter stub - // environment. - output_offset -= kPointerSize; - DoTranslateCommand(iterator, frame_index, output_offset); - } - - ASSERT(0 == output_offset); - - Smi* offset = is_setter_stub_frame ? - isolate_->heap()->setter_stub_deopt_pc_offset() : - isolate_->heap()->getter_stub_deopt_pc_offset(); - intptr_t pc = reinterpret_cast<intptr_t>( - accessor_stub->instruction_start() + offset->value()); - output_frame->SetPc(pc); -} - - // This code is very similar to ia32 code, but relies on register names (fp, sp) // and how the frame is laid out. void Deoptimizer::DoComputeJSFrame(TranslationIterator* iterator, @@ -718,7 +373,7 @@ void Deoptimizer::DoComputeJSFrame(TranslationIterator* iterator, } unsigned height = iterator->Next(); unsigned height_in_bytes = height * kPointerSize; - if (FLAG_trace_deopt) { + if (trace_) { PrintF(" translating "); function->PrintName(); PrintF(" => node=%d, height=%d\n", node_id.ToInt(), height_in_bytes); @@ -782,7 +437,7 @@ void Deoptimizer::DoComputeJSFrame(TranslationIterator* iterator, value = output_[frame_index - 1]->GetPc(); } output_frame->SetFrameSlot(output_offset, value); - if (FLAG_trace_deopt) { + if (trace_) { PrintF(" 0x%08x: [top + %d] <- 0x%08x ; caller's pc\n", top_address + output_offset, output_offset, value); } @@ -805,7 +460,7 @@ void Deoptimizer::DoComputeJSFrame(TranslationIterator* iterator, if (is_topmost) { output_frame->SetRegister(fp.code(), fp_value); } - if (FLAG_trace_deopt) { + if (trace_) { PrintF(" 0x%08x: [top + %d] <- 0x%08x ; caller's fp\n", fp_value, output_offset, value); } @@ -823,7 +478,7 @@ void Deoptimizer::DoComputeJSFrame(TranslationIterator* iterator, output_frame->SetFrameSlot(output_offset, value); output_frame->SetContext(value); if (is_topmost) output_frame->SetRegister(cp.code(), value); - if (FLAG_trace_deopt) { + if (trace_) { PrintF(" 0x%08x: [top + %d] <- 0x%08x ; context\n", top_address + output_offset, output_offset, value); } @@ -836,7 +491,7 @@ void Deoptimizer::DoComputeJSFrame(TranslationIterator* iterator, // input frame. ASSERT(!is_bottommost || input_->GetFrameSlot(input_offset) == value); output_frame->SetFrameSlot(output_offset, value); - if (FLAG_trace_deopt) { + if (trace_) { PrintF(" 0x%08x: [top + %d] <- 0x%08x ; function\n", top_address + output_offset, output_offset, value); } @@ -888,7 +543,7 @@ void Deoptimizer::FillInputFrame(Address tos, JavaScriptFrame* frame) { } input_->SetRegister(sp.code(), reinterpret_cast<intptr_t>(frame->sp())); input_->SetRegister(fp.code(), reinterpret_cast<intptr_t>(frame->fp())); - for (int i = 0; i < DoubleRegister::kNumAllocatableRegisters; i++) { + for (int i = 0; i < DoubleRegister::NumAllocatableRegisters(); i++) { input_->SetDoubleRegister(i, 0.0); } @@ -899,6 +554,28 @@ void Deoptimizer::FillInputFrame(Address tos, JavaScriptFrame* frame) { } +void Deoptimizer::SetPlatformCompiledStubRegisters( + FrameDescription* output_frame, CodeStubInterfaceDescriptor* descriptor) { + ApiFunction function(descriptor->deoptimization_handler_); + ExternalReference xref(&function, ExternalReference::BUILTIN_CALL, isolate_); + intptr_t handler = reinterpret_cast<intptr_t>(xref.address()); + int params = descriptor->register_param_count_; + if (descriptor->stack_parameter_count_ != NULL) { + params++; + } + output_frame->SetRegister(r0.code(), params); + output_frame->SetRegister(r1.code(), handler); +} + + +void Deoptimizer::CopyDoubleRegisters(FrameDescription* output_frame) { + for (int i = 0; i < DwVfpRegister::kMaxNumRegisters; ++i) { + double double_value = input_->GetDoubleRegister(i); + output_frame->SetDoubleRegister(i, double_value); + } +} + + #define __ masm()-> // This code tries to be close to ia32 code so that any changes can be @@ -908,7 +585,6 @@ void Deoptimizer::EntryGenerator::Generate() { Isolate* isolate = masm()->isolate(); - CpuFeatures::Scope scope(VFP3); // Save all general purpose registers before messing with them. const int kNumberOfRegisters = Register::kNumRegisters; @@ -916,23 +592,25 @@ void Deoptimizer::EntryGenerator::Generate() { RegList restored_regs = kJSCallerSaved | kCalleeSaved | ip.bit(); const int kDoubleRegsSize = - kDoubleSize * DwVfpRegister::kNumAllocatableRegisters; - - // Save all VFP registers before messing with them. - DwVfpRegister first = DwVfpRegister::FromAllocationIndex(0); - DwVfpRegister last = - DwVfpRegister::FromAllocationIndex( - DwVfpRegister::kNumAllocatableRegisters - 1); - ASSERT(last.code() > first.code()); - ASSERT((last.code() - first.code()) == - (DwVfpRegister::kNumAllocatableRegisters - 1)); -#ifdef DEBUG - for (int i = 0; i <= (DwVfpRegister::kNumAllocatableRegisters - 1); i++) { - ASSERT((DwVfpRegister::FromAllocationIndex(i).code() <= last.code()) && - (DwVfpRegister::FromAllocationIndex(i).code() >= first.code())); + kDoubleSize * DwVfpRegister::kMaxNumAllocatableRegisters; + + if (CpuFeatures::IsSupported(VFP2)) { + CpuFeatureScope scope(masm(), VFP2); + // Save all allocatable VFP registers before messing with them. + ASSERT(kDoubleRegZero.code() == 14); + ASSERT(kScratchDoubleReg.code() == 15); + + // Check CPU flags for number of registers, setting the Z condition flag. + __ CheckFor32DRegs(ip); + + // Push registers d0-d13, and possibly d16-d31, on the stack. + // If d16-d31 are not pushed, decrease the stack pointer instead. + __ vstm(db_w, sp, d16, d31, ne); + __ sub(sp, sp, Operand(16 * kDoubleSize), LeaveCC, eq); + __ vstm(db_w, sp, d0, d13); + } else { + __ sub(sp, sp, Operand(kDoubleRegsSize)); } -#endif - __ vstm(db_w, sp, first, last); // Push all 16 registers (needed to populate FrameDescription::registers_). // TODO(1588) Note that using pc with stm is deprecated, so we should perhaps @@ -949,7 +627,7 @@ void Deoptimizer::EntryGenerator::Generate() { // address for lazy deoptimization) and compute the fp-to-sp delta in // register r4. if (type() == EAGER) { - __ mov(r3, Operand(0)); + __ mov(r3, Operand::Zero()); // Correct one word for bailout id. __ add(r4, sp, Operand(kSavedRegistersAreaSize + (1 * kPointerSize))); } else if (type() == OSR) { @@ -991,14 +669,17 @@ void Deoptimizer::EntryGenerator::Generate() { __ str(r2, MemOperand(r1, offset)); } - // Copy VFP registers to - // double_registers_[DoubleRegister::kNumAllocatableRegisters] - int double_regs_offset = FrameDescription::double_registers_offset(); - for (int i = 0; i < DwVfpRegister::kNumAllocatableRegisters; ++i) { - int dst_offset = i * kDoubleSize + double_regs_offset; - int src_offset = i * kDoubleSize + kNumberOfRegisters * kPointerSize; - __ vldr(d0, sp, src_offset); - __ vstr(d0, r1, dst_offset); + if (CpuFeatures::IsSupported(VFP2)) { + CpuFeatureScope scope(masm(), VFP2); + // Copy VFP registers to + // double_registers_[DoubleRegister::kMaxNumAllocatableRegisters] + int double_regs_offset = FrameDescription::double_registers_offset(); + for (int i = 0; i < DwVfpRegister::kMaxNumAllocatableRegisters; ++i) { + int dst_offset = i * kDoubleSize + double_regs_offset; + int src_offset = i * kDoubleSize + kNumberOfRegisters * kPointerSize; + __ vldr(d0, sp, src_offset); + __ vstr(d0, r1, dst_offset); + } } // Remove the bailout id, eventually return address, and the saved registers @@ -1019,10 +700,13 @@ void Deoptimizer::EntryGenerator::Generate() { // frame description. __ add(r3, r1, Operand(FrameDescription::frame_content_offset())); Label pop_loop; + Label pop_loop_header; + __ b(&pop_loop_header); __ bind(&pop_loop); __ pop(r4); __ str(r4, MemOperand(r3, 0)); __ add(r3, r3, Operand(sizeof(uint32_t))); + __ bind(&pop_loop_header); __ cmp(r2, sp); __ b(ne, &pop_loop); @@ -1039,27 +723,49 @@ void Deoptimizer::EntryGenerator::Generate() { __ pop(r0); // Restore deoptimizer object (class Deoptimizer). // Replace the current (input) frame with the output frames. - Label outer_push_loop, inner_push_loop; - // Outer loop state: r0 = current "FrameDescription** output_", + Label outer_push_loop, inner_push_loop, + outer_loop_header, inner_loop_header; + // Outer loop state: r4 = current "FrameDescription** output_", // r1 = one past the last FrameDescription**. __ ldr(r1, MemOperand(r0, Deoptimizer::output_count_offset())); - __ ldr(r0, MemOperand(r0, Deoptimizer::output_offset())); // r0 is output_. - __ add(r1, r0, Operand(r1, LSL, 2)); + __ ldr(r4, MemOperand(r0, Deoptimizer::output_offset())); // r4 is output_. + __ add(r1, r4, Operand(r1, LSL, 2)); + __ jmp(&outer_loop_header); __ bind(&outer_push_loop); // Inner loop state: r2 = current FrameDescription*, r3 = loop index. - __ ldr(r2, MemOperand(r0, 0)); // output_[ix] + __ ldr(r2, MemOperand(r4, 0)); // output_[ix] __ ldr(r3, MemOperand(r2, FrameDescription::frame_size_offset())); + __ jmp(&inner_loop_header); __ bind(&inner_push_loop); __ sub(r3, r3, Operand(sizeof(uint32_t))); __ add(r6, r2, Operand(r3)); __ ldr(r7, MemOperand(r6, FrameDescription::frame_content_offset())); __ push(r7); - __ cmp(r3, Operand(0)); + __ bind(&inner_loop_header); + __ cmp(r3, Operand::Zero()); __ b(ne, &inner_push_loop); // test for gt? - __ add(r0, r0, Operand(kPointerSize)); - __ cmp(r0, r1); + __ add(r4, r4, Operand(kPointerSize)); + __ bind(&outer_loop_header); + __ cmp(r4, r1); __ b(lt, &outer_push_loop); + if (CpuFeatures::IsSupported(VFP2)) { + CpuFeatureScope scope(masm(), VFP2); + // Check CPU flags for number of registers, setting the Z condition flag. + __ CheckFor32DRegs(ip); + + __ ldr(r1, MemOperand(r0, Deoptimizer::input_offset())); + int src_offset = FrameDescription::double_registers_offset(); + for (int i = 0; i < DwVfpRegister::kMaxNumRegisters; ++i) { + if (i == kDoubleRegZero.code()) continue; + if (i == kScratchDoubleReg.code()) continue; + + const DwVfpRegister reg = DwVfpRegister::from_code(i); + __ vldr(reg, r1, src_offset, i < 16 ? al : ne); + src_offset += kDoubleSize; + } + } + // Push state, pc, and continuation from the last output frame. if (type() != OSR) { __ ldr(r6, MemOperand(r2, FrameDescription::state_offset())); diff --git a/deps/v8/src/arm/disasm-arm.cc b/deps/v8/src/arm/disasm-arm.cc index 3c94a46e62..dec62b341a 100644 --- a/deps/v8/src/arm/disasm-arm.cc +++ b/deps/v8/src/arm/disasm-arm.cc @@ -192,7 +192,7 @@ void Decoder::PrintSRegister(int reg) { Print(VFPRegisters::Name(reg, false)); } -// Print the VFP D register name according to the active name converter. +// Print the VFP D register name according to the active name converter. void Decoder::PrintDRegister(int reg) { Print(VFPRegisters::Name(reg, true)); } @@ -381,7 +381,16 @@ int Decoder::FormatVFPRegister(Instruction* instr, const char* format) { } else if (format[1] == 'm') { reg = instr->VFPMRegValue(precision); } else if (format[1] == 'd') { - reg = instr->VFPDRegValue(precision); + if ((instr->TypeValue() == 7) && + (instr->Bit(24) == 0x0) && + (instr->Bits(11, 9) == 0x5) && + (instr->Bit(4) == 0x1)) { + // vmov.32 has Vd in a different place. + reg = instr->Bits(19, 16) | (instr->Bit(7) << 4); + } else { + reg = instr->VFPDRegValue(precision); + } + if (format[2] == '+') { int immed8 = instr->Immed8Value(); if (format[0] == 'S') reg += immed8 - 1; @@ -1098,6 +1107,8 @@ int Decoder::DecodeType7(Instruction* instr) { // Dd = vadd(Dn, Dm) // Dd = vsub(Dn, Dm) // Dd = vmul(Dn, Dm) +// Dd = vmla(Dn, Dm) +// Dd = vmls(Dn, Dm) // Dd = vdiv(Dn, Dm) // vcmp(Dd, Dm) // vmrs @@ -1113,16 +1124,16 @@ void Decoder::DecodeTypeVFP(Instruction* instr) { if ((instr->Opc2Value() == 0x0) && (instr->Opc3Value() == 0x1)) { // vmov register to register. if (instr->SzValue() == 0x1) { - Format(instr, "vmov.f64'cond 'Dd, 'Dm"); + Format(instr, "vmov'cond.f64 'Dd, 'Dm"); } else { - Format(instr, "vmov.f32'cond 'Sd, 'Sm"); + Format(instr, "vmov'cond.f32 'Sd, 'Sm"); } } else if ((instr->Opc2Value() == 0x0) && (instr->Opc3Value() == 0x3)) { // vabs - Format(instr, "vabs.f64'cond 'Dd, 'Dm"); + Format(instr, "vabs'cond.f64 'Dd, 'Dm"); } else if ((instr->Opc2Value() == 0x1) && (instr->Opc3Value() == 0x1)) { // vneg - Format(instr, "vneg.f64'cond 'Dd, 'Dm"); + Format(instr, "vneg'cond.f64 'Dd, 'Dm"); } else if ((instr->Opc2Value() == 0x7) && (instr->Opc3Value() == 0x3)) { DecodeVCVTBetweenDoubleAndSingle(instr); } else if ((instr->Opc2Value() == 0x8) && (instr->Opc3Value() & 0x1)) { @@ -1134,10 +1145,10 @@ void Decoder::DecodeTypeVFP(Instruction* instr) { (instr->Opc3Value() & 0x1)) { DecodeVCMP(instr); } else if (((instr->Opc2Value() == 0x1)) && (instr->Opc3Value() == 0x3)) { - Format(instr, "vsqrt.f64'cond 'Dd, 'Dm"); + Format(instr, "vsqrt'cond.f64 'Dd, 'Dm"); } else if (instr->Opc3Value() == 0x0) { if (instr->SzValue() == 0x1) { - Format(instr, "vmov.f64'cond 'Dd, 'd"); + Format(instr, "vmov'cond.f64 'Dd, 'd"); } else { Unknown(instr); // Not used by V8. } @@ -1147,22 +1158,34 @@ void Decoder::DecodeTypeVFP(Instruction* instr) { } else if (instr->Opc1Value() == 0x3) { if (instr->SzValue() == 0x1) { if (instr->Opc3Value() & 0x1) { - Format(instr, "vsub.f64'cond 'Dd, 'Dn, 'Dm"); + Format(instr, "vsub'cond.f64 'Dd, 'Dn, 'Dm"); } else { - Format(instr, "vadd.f64'cond 'Dd, 'Dn, 'Dm"); + Format(instr, "vadd'cond.f64 'Dd, 'Dn, 'Dm"); } } else { Unknown(instr); // Not used by V8. } } else if ((instr->Opc1Value() == 0x2) && !(instr->Opc3Value() & 0x1)) { if (instr->SzValue() == 0x1) { - Format(instr, "vmul.f64'cond 'Dd, 'Dn, 'Dm"); + Format(instr, "vmul'cond.f64 'Dd, 'Dn, 'Dm"); + } else { + Unknown(instr); // Not used by V8. + } + } else if ((instr->Opc1Value() == 0x0) && !(instr->Opc3Value() & 0x1)) { + if (instr->SzValue() == 0x1) { + Format(instr, "vmla'cond.f64 'Dd, 'Dn, 'Dm"); + } else { + Unknown(instr); // Not used by V8. + } + } else if ((instr->Opc1Value() == 0x0) && (instr->Opc3Value() & 0x1)) { + if (instr->SzValue() == 0x1) { + Format(instr, "vmls'cond.f64 'Dd, 'Dn, 'Dm"); } else { Unknown(instr); // Not used by V8. } } else if ((instr->Opc1Value() == 0x4) && !(instr->Opc3Value() & 0x1)) { if (instr->SzValue() == 0x1) { - Format(instr, "vdiv.f64'cond 'Dd, 'Dn, 'Dm"); + Format(instr, "vdiv'cond.f64 'Dd, 'Dn, 'Dm"); } else { Unknown(instr); // Not used by V8. } @@ -1173,6 +1196,14 @@ void Decoder::DecodeTypeVFP(Instruction* instr) { if ((instr->VCValue() == 0x0) && (instr->VAValue() == 0x0)) { DecodeVMOVBetweenCoreAndSinglePrecisionRegisters(instr); + } else if ((instr->VLValue() == 0x0) && + (instr->VCValue() == 0x1) && + (instr->Bit(23) == 0x0)) { + if (instr->Bit(21) == 0x0) { + Format(instr, "vmov'cond.32 'Dd[0], 'rt"); + } else { + Format(instr, "vmov'cond.32 'Dd[1], 'rt"); + } } else if ((instr->VCValue() == 0x0) && (instr->VAValue() == 0x7) && (instr->Bits(19, 16) == 0x1)) { @@ -1220,9 +1251,9 @@ void Decoder::DecodeVCMP(Instruction* instr) { if (dp_operation && !raise_exception_for_qnan) { if (instr->Opc2Value() == 0x4) { - Format(instr, "vcmp.f64'cond 'Dd, 'Dm"); + Format(instr, "vcmp'cond.f64 'Dd, 'Dm"); } else if (instr->Opc2Value() == 0x5) { - Format(instr, "vcmp.f64'cond 'Dd, #0.0"); + Format(instr, "vcmp'cond.f64 'Dd, #0.0"); } else { Unknown(instr); // invalid } @@ -1239,9 +1270,9 @@ void Decoder::DecodeVCVTBetweenDoubleAndSingle(Instruction* instr) { bool double_to_single = (instr->SzValue() == 1); if (double_to_single) { - Format(instr, "vcvt.f32.f64'cond 'Sd, 'Dm"); + Format(instr, "vcvt'cond.f32.f64 'Sd, 'Dm"); } else { - Format(instr, "vcvt.f64.f32'cond 'Dd, 'Sm"); + Format(instr, "vcvt'cond.f64.f32 'Dd, 'Sm"); } } @@ -1258,15 +1289,15 @@ void Decoder::DecodeVCVTBetweenFloatingPointAndInteger(Instruction* instr) { if (dp_operation) { if (unsigned_integer) { - Format(instr, "vcvt.u32.f64'cond 'Sd, 'Dm"); + Format(instr, "vcvt'cond.u32.f64 'Sd, 'Dm"); } else { - Format(instr, "vcvt.s32.f64'cond 'Sd, 'Dm"); + Format(instr, "vcvt'cond.s32.f64 'Sd, 'Dm"); } } else { if (unsigned_integer) { - Format(instr, "vcvt.u32.f32'cond 'Sd, 'Sm"); + Format(instr, "vcvt'cond.u32.f32 'Sd, 'Sm"); } else { - Format(instr, "vcvt.s32.f32'cond 'Sd, 'Sm"); + Format(instr, "vcvt'cond.s32.f32 'Sd, 'Sm"); } } } else { @@ -1274,15 +1305,15 @@ void Decoder::DecodeVCVTBetweenFloatingPointAndInteger(Instruction* instr) { if (dp_operation) { if (unsigned_integer) { - Format(instr, "vcvt.f64.u32'cond 'Dd, 'Sm"); + Format(instr, "vcvt'cond.f64.u32 'Dd, 'Sm"); } else { - Format(instr, "vcvt.f64.s32'cond 'Dd, 'Sm"); + Format(instr, "vcvt'cond.f64.s32 'Dd, 'Sm"); } } else { if (unsigned_integer) { - Format(instr, "vcvt.f32.u32'cond 'Sd, 'Sm"); + Format(instr, "vcvt'cond.f32.u32 'Sd, 'Sm"); } else { - Format(instr, "vcvt.f32.s32'cond 'Sd, 'Sm"); + Format(instr, "vcvt'cond.f32.s32 'Sd, 'Sm"); } } } @@ -1336,7 +1367,7 @@ void Decoder::DecodeType6CoprocessorIns(Instruction* instr) { switch (instr->OpcodeValue()) { case 0x2: // Load and store double to two GP registers - if (instr->Bits(7, 4) != 0x1) { + if (instr->Bits(7, 6) != 0 || instr->Bit(4) != 1) { Unknown(instr); // Not used by V8. } else if (instr->HasL()) { Format(instr, "vmov'cond 'rt, 'rn, 'Dm"); @@ -1345,6 +1376,7 @@ void Decoder::DecodeType6CoprocessorIns(Instruction* instr) { } break; case 0x8: + case 0xA: if (instr->HasL()) { Format(instr, "vldr'cond 'Dd, ['rn - 4*'imm08@00]"); } else { @@ -1352,6 +1384,7 @@ void Decoder::DecodeType6CoprocessorIns(Instruction* instr) { } break; case 0xC: + case 0xE: if (instr->HasL()) { Format(instr, "vldr'cond 'Dd, ['rn + 4*'imm08@00]"); } else { @@ -1360,7 +1393,10 @@ void Decoder::DecodeType6CoprocessorIns(Instruction* instr) { break; case 0x4: case 0x5: - case 0x9: { + case 0x6: + case 0x7: + case 0x9: + case 0xB: { bool to_vfp_register = (instr->VLValue() == 0x1); if (to_vfp_register) { Format(instr, "vldm'cond'pu 'rn'w, {'Dd-'Dd+}"); @@ -1388,7 +1424,7 @@ bool Decoder::IsConstantPoolAt(byte* instr_ptr) { int Decoder::ConstantPoolSizeAt(byte* instr_ptr) { if (IsConstantPoolAt(instr_ptr)) { int instruction_bits = *(reinterpret_cast<int*>(instr_ptr)); - return instruction_bits & kConstantPoolLengthMask; + return DecodeConstantPoolLength(instruction_bits); } else { return -1; } @@ -1410,8 +1446,7 @@ int Decoder::InstructionDecode(byte* instr_ptr) { if ((instruction_bits & kConstantPoolMarkerMask) == kConstantPoolMarker) { out_buffer_pos_ += OS::SNPrintF(out_buffer_ + out_buffer_pos_, "constant pool begin (length %d)", - instruction_bits & - kConstantPoolLengthMask); + DecodeConstantPoolLength(instruction_bits)); return Instruction::kInstrSize; } switch (instr->TypeValue()) { diff --git a/deps/v8/src/arm/frames-arm.cc b/deps/v8/src/arm/frames-arm.cc index a805d280ca..5cbe77afc2 100644 --- a/deps/v8/src/arm/frames-arm.cc +++ b/deps/v8/src/arm/frames-arm.cc @@ -29,7 +29,12 @@ #if defined(V8_TARGET_ARCH_ARM) +#include "assembler.h" +#include "assembler-arm.h" +#include "assembler-arm-inl.h" #include "frames-inl.h" +#include "macro-assembler.h" +#include "macro-assembler-arm.h" namespace v8 { namespace internal { @@ -40,6 +45,10 @@ Address ExitFrame::ComputeStackPointer(Address fp) { } +Register StubFailureTrampolineFrame::fp_register() { return v8::internal::fp; } +Register StubFailureTrampolineFrame::context_register() { return cp; } + + } } // namespace v8::internal #endif // V8_TARGET_ARCH_ARM diff --git a/deps/v8/src/arm/frames-arm.h b/deps/v8/src/arm/frames-arm.h index a10acd0687..30f4057fae 100644 --- a/deps/v8/src/arm/frames-arm.h +++ b/deps/v8/src/arm/frames-arm.h @@ -134,20 +134,6 @@ class ExitFrameConstants : public AllStatic { }; -class StandardFrameConstants : public AllStatic { - public: - // Fixed part of the frame consists of return address, caller fp, - // context and function. - static const int kFixedFrameSize = 4 * kPointerSize; - static const int kExpressionsOffset = -3 * kPointerSize; - static const int kMarkerOffset = -2 * kPointerSize; - static const int kContextOffset = -1 * kPointerSize; - static const int kCallerFPOffset = 0 * kPointerSize; - static const int kCallerPCOffset = 1 * kPointerSize; - static const int kCallerSPOffset = 2 * kPointerSize; -}; - - class JavaScriptFrameConstants : public AllStatic { public: // FP-relative. @@ -163,14 +149,30 @@ class JavaScriptFrameConstants : public AllStatic { class ArgumentsAdaptorFrameConstants : public AllStatic { public: + // FP-relative. static const int kLengthOffset = StandardFrameConstants::kExpressionsOffset; + static const int kFrameSize = StandardFrameConstants::kFixedFrameSize + kPointerSize; }; +class ConstructFrameConstants : public AllStatic { + public: + // FP-relative. + static const int kImplicitReceiverOffset = -6 * kPointerSize; + static const int kConstructorOffset = -5 * kPointerSize; + static const int kLengthOffset = -4 * kPointerSize; + static const int kCodeOffset = StandardFrameConstants::kExpressionsOffset; + + static const int kFrameSize = + StandardFrameConstants::kFixedFrameSize + 4 * kPointerSize; +}; + + class InternalFrameConstants : public AllStatic { public: + // FP-relative. static const int kCodeOffset = StandardFrameConstants::kExpressionsOffset; }; diff --git a/deps/v8/src/arm/full-codegen-arm.cc b/deps/v8/src/arm/full-codegen-arm.cc index be8228377a..1df1649d3d 100644 --- a/deps/v8/src/arm/full-codegen-arm.cc +++ b/deps/v8/src/arm/full-codegen-arm.cc @@ -130,7 +130,7 @@ void FullCodeGenerator::Generate() { handler_table_ = isolate()->factory()->NewFixedArray(function()->handler_count(), TENURED); profiling_counter_ = isolate()->factory()->NewJSGlobalPropertyCell( - Handle<Smi>(Smi::FromInt(FLAG_interrupt_budget))); + Handle<Smi>(Smi::FromInt(FLAG_interrupt_budget), isolate())); SetFunctionPosition(function()); Comment cmnt(masm_, "[ function compiled by full code generator"); @@ -138,7 +138,7 @@ void FullCodeGenerator::Generate() { #ifdef DEBUG if (strlen(FLAG_stop_at) > 0 && - info->function()->name()->IsEqualTo(CStrVector(FLAG_stop_at))) { + info->function()->name()->IsUtf8EqualTo(CStrVector(FLAG_stop_at))) { __ stop("stop-at"); } #endif @@ -149,7 +149,7 @@ void FullCodeGenerator::Generate() { // function calls. if (!info->is_classic_mode() || info->is_native()) { Label ok; - __ cmp(r5, Operand(0)); + __ cmp(r5, Operand::Zero()); __ b(eq, &ok); int receiver_offset = info->scope()->num_parameters() * kPointerSize; __ LoadRoot(r2, Heap::kUndefinedValueRootIndex); @@ -164,14 +164,19 @@ void FullCodeGenerator::Generate() { int locals_count = info->scope()->num_stack_slots(); - __ Push(lr, fp, cp, r1); - if (locals_count > 0) { + info->set_prologue_offset(masm_->pc_offset()); + { + PredictableCodeSizeScope predictible_code_size_scope( + masm_, kNoCodeAgeSequenceLength * Assembler::kInstrSize); + // The following three instructions must remain together and unmodified + // for code aging to work properly. + __ stm(db_w, sp, r1.bit() | cp.bit() | fp.bit() | lr.bit()); // Load undefined value here, so the value is ready for the loop // below. __ LoadRoot(ip, Heap::kUndefinedValueRootIndex); + // Adjust FP to point to saved FP. + __ add(fp, sp, Operand(2 * kPointerSize)); } - // Adjust fp to point to caller's fp. - __ add(fp, sp, Operand(2 * kPointerSize)); { Comment cmnt(masm_, "[ Allocate locals"); for (int i = 0; i < locals_count; i++) { @@ -287,7 +292,7 @@ void FullCodeGenerator::Generate() { __ LoadRoot(ip, Heap::kStackLimitRootIndex); __ cmp(sp, Operand(ip)); __ b(hs, &ok); - PredictableCodeSizeScope predictable(masm_); + PredictableCodeSizeScope predictable(masm_, 2 * Assembler::kInstrSize); StackCheckStub stub; __ CallStub(&stub); __ bind(&ok); @@ -342,42 +347,31 @@ void FullCodeGenerator::EmitProfilingCounterReset() { } -void FullCodeGenerator::EmitStackCheck(IterationStatement* stmt, - Label* back_edge_target) { - Comment cmnt(masm_, "[ Stack check"); +void FullCodeGenerator::EmitBackEdgeBookkeeping(IterationStatement* stmt, + Label* back_edge_target) { + Comment cmnt(masm_, "[ Back edge bookkeeping"); // Block literal pools whilst emitting stack check code. Assembler::BlockConstPoolScope block_const_pool(masm_); Label ok; - if (FLAG_count_based_interrupts) { - int weight = 1; - if (FLAG_weighted_back_edges) { - ASSERT(back_edge_target->is_bound()); - int distance = masm_->SizeOfCodeGeneratedSince(back_edge_target); - weight = Min(kMaxBackEdgeWeight, - Max(1, distance / kBackEdgeDistanceUnit)); - } - EmitProfilingCounterDecrement(weight); - __ b(pl, &ok); - InterruptStub stub; - __ CallStub(&stub); - } else { - __ LoadRoot(ip, Heap::kStackLimitRootIndex); - __ cmp(sp, Operand(ip)); - __ b(hs, &ok); - PredictableCodeSizeScope predictable(masm_); - StackCheckStub stub; - __ CallStub(&stub); + int weight = 1; + if (FLAG_weighted_back_edges) { + ASSERT(back_edge_target->is_bound()); + int distance = masm_->SizeOfCodeGeneratedSince(back_edge_target); + weight = Min(kMaxBackEdgeWeight, + Max(1, distance / kBackEdgeDistanceUnit)); } + EmitProfilingCounterDecrement(weight); + __ b(pl, &ok); + InterruptStub stub; + __ CallStub(&stub); // Record a mapping of this PC offset to the OSR id. This is used to find // the AST id from the unoptimized code in order to use it as a key into // the deoptimization input data found in the optimized code. - RecordStackCheck(stmt->OsrEntryId()); + RecordBackEdge(stmt->OsrEntryId()); - if (FLAG_count_based_interrupts) { - EmitProfilingCounterReset(); - } + EmitProfilingCounterReset(); __ bind(&ok); PrepareForBailoutForId(stmt->EntryId(), NO_REGISTERS); @@ -439,7 +433,8 @@ void FullCodeGenerator::EmitReturnSequence() { // tool from instrumenting as we rely on the code size here. int32_t sp_delta = (info_->scope()->num_parameters() + 1) * kPointerSize; CodeGenerator::RecordPositions(masm_, function()->end_position() - 1); - PredictableCodeSizeScope predictable(masm_); + // TODO(svenpanne) The code below is sometimes 4 words, sometimes 5! + PredictableCodeSizeScope predictable(masm_, -1); __ RecordJSReturn(); masm_->mov(sp, fp); masm_->ldm(ia_w, sp, fp.bit() | lr.bit()); @@ -680,7 +675,7 @@ void FullCodeGenerator::DoTest(Expression* condition, Label* if_false, Label* fall_through) { ToBooleanStub stub(result_register()); - __ CallStub(&stub); + __ CallStub(&stub, condition->test_id()); __ tst(result_register(), result_register()); Split(ne, if_true, if_false, fall_through); } @@ -914,34 +909,33 @@ void FullCodeGenerator::VisitFunctionDeclaration( void FullCodeGenerator::VisitModuleDeclaration(ModuleDeclaration* declaration) { - VariableProxy* proxy = declaration->proxy(); - Variable* variable = proxy->var(); - Handle<JSModule> instance = declaration->module()->interface()->Instance(); - ASSERT(!instance.is_null()); + Variable* variable = declaration->proxy()->var(); + ASSERT(variable->location() == Variable::CONTEXT); + ASSERT(variable->interface()->IsFrozen()); - switch (variable->location()) { - case Variable::UNALLOCATED: { - Comment cmnt(masm_, "[ ModuleDeclaration"); - globals_->Add(variable->name(), zone()); - globals_->Add(instance, zone()); - Visit(declaration->module()); - break; - } + Comment cmnt(masm_, "[ ModuleDeclaration"); + EmitDebugCheckDeclarationContext(variable); - case Variable::CONTEXT: { - Comment cmnt(masm_, "[ ModuleDeclaration"); - EmitDebugCheckDeclarationContext(variable); - __ mov(r1, Operand(instance)); - __ str(r1, ContextOperand(cp, variable->index())); - Visit(declaration->module()); - break; - } + // Load instance object. + __ LoadContext(r1, scope_->ContextChainLength(scope_->GlobalScope())); + __ ldr(r1, ContextOperand(r1, variable->interface()->Index())); + __ ldr(r1, ContextOperand(r1, Context::EXTENSION_INDEX)); - case Variable::PARAMETER: - case Variable::LOCAL: - case Variable::LOOKUP: - UNREACHABLE(); - } + // Assign it. + __ str(r1, ContextOperand(cp, variable->index())); + // We know that we have written a module, which is not a smi. + __ RecordWriteContextSlot(cp, + Context::SlotOffset(variable->index()), + r1, + r3, + kLRHasBeenSaved, + kDontSaveFPRegs, + EMIT_REMEMBERED_SET, + OMIT_SMI_CHECK); + PrepareForBailoutForId(declaration->proxy()->id(), NO_REGISTERS); + + // Traverse into body. + Visit(declaration->module()); } @@ -984,6 +978,14 @@ void FullCodeGenerator::DeclareGlobals(Handle<FixedArray> pairs) { } +void FullCodeGenerator::DeclareModules(Handle<FixedArray> descriptions) { + // Call the runtime to declare the modules. + __ Push(descriptions); + __ CallRuntime(Runtime::kDeclareModules, 1); + // Return value is ignored. +} + + void FullCodeGenerator::VisitSwitchStatement(SwitchStatement* stmt) { Comment cmnt(masm_, "[ SwitchStatement"); Breakable nested_statement(this, stmt); @@ -1033,11 +1035,11 @@ void FullCodeGenerator::VisitSwitchStatement(SwitchStatement* stmt) { // Record position before stub call for type feedback. SetSourcePosition(clause->position()); - Handle<Code> ic = CompareIC::GetUninitialized(Token::EQ_STRICT); + Handle<Code> ic = CompareIC::GetUninitialized(isolate(), Token::EQ_STRICT); CallIC(ic, RelocInfo::CODE_TARGET, clause->CompareId()); patch_site.EmitPatchInfo(); - __ cmp(r0, Operand(0)); + __ cmp(r0, Operand::Zero()); __ b(ne, &next_test); __ Drop(1); // Switch value is no longer needed. __ b(clause->body_target()); @@ -1162,7 +1164,8 @@ void FullCodeGenerator::VisitForInStatement(ForInStatement* stmt) { Handle<JSGlobalPropertyCell> cell = isolate()->factory()->NewJSGlobalPropertyCell( Handle<Object>( - Smi::FromInt(TypeFeedbackCells::kForInFastCaseMarker))); + Smi::FromInt(TypeFeedbackCells::kForInFastCaseMarker), + isolate())); RecordTypeFeedbackCell(stmt->ForInFeedbackId(), cell); __ LoadHeapObject(r1, cell); __ mov(r2, Operand(Smi::FromInt(TypeFeedbackCells::kForInSlowCaseMarker))); @@ -1238,7 +1241,7 @@ void FullCodeGenerator::VisitForInStatement(ForInStatement* stmt) { __ add(r0, r0, Operand(Smi::FromInt(1))); __ push(r0); - EmitStackCheck(stmt, &loop); + EmitBackEdgeBookkeeping(stmt, &loop); __ b(&loop); // Remove the pointers stored on the stack. @@ -1391,9 +1394,9 @@ void FullCodeGenerator::EmitDynamicLookupFastCase(Variable* var, } else if (var->mode() == DYNAMIC_LOCAL) { Variable* local = var->local_if_not_shadowed(); __ ldr(r0, ContextSlotOperandCheckExtensions(local, slow)); - if (local->mode() == CONST || - local->mode() == CONST_HARMONY || - local->mode() == LET) { + if (local->mode() == LET || + local->mode() == CONST || + local->mode() == CONST_HARMONY) { __ CompareRoot(r0, Heap::kTheHoleValueRootIndex); if (local->mode() == CONST) { __ LoadRoot(r0, Heap::kUndefinedValueRootIndex, eq); @@ -1544,7 +1547,7 @@ void FullCodeGenerator::VisitRegExpLiteral(RegExpLiteral* expr) { __ bind(&materialized); int size = JSRegExp::kSize + JSRegExp::kInObjectFieldCount * kPointerSize; Label allocated, runtime_allocate; - __ AllocateInNewSpace(size, r0, r2, r3, &runtime_allocate, TAG_OBJECT); + __ Allocate(size, r0, r2, r3, &runtime_allocate, TAG_OBJECT); __ jmp(&allocated); __ bind(&runtime_allocate); @@ -1577,7 +1580,7 @@ void FullCodeGenerator::EmitAccessor(Expression* expression) { void FullCodeGenerator::VisitObjectLiteral(ObjectLiteral* expr) { Comment cmnt(masm_, "[ ObjectLiteral"); Handle<FixedArray> constant_properties = expr->constant_properties(); - __ ldr(r3, MemOperand(fp, JavaScriptFrameConstants::kFunctionOffset)); + __ ldr(r3, MemOperand(fp, JavaScriptFrameConstants::kFunctionOffset)); __ ldr(r3, FieldMemOperand(r3, JSFunction::kLiteralsOffset)); __ mov(r2, Operand(Smi::FromInt(expr->literal_index()))); __ mov(r1, Operand(constant_properties)); @@ -1588,12 +1591,13 @@ void FullCodeGenerator::VisitObjectLiteral(ObjectLiteral* expr) { ? ObjectLiteral::kHasFunction : ObjectLiteral::kNoFlags; __ mov(r0, Operand(Smi::FromInt(flags))); - __ Push(r3, r2, r1, r0); int properties_count = constant_properties->length() / 2; if (expr->depth() > 1) { + __ Push(r3, r2, r1, r0); __ CallRuntime(Runtime::kCreateObjectLiteral, 4); - } else if (flags != ObjectLiteral::kFastElements || + } else if (Serializer::enabled() || flags != ObjectLiteral::kFastElements || properties_count > FastCloneShallowObjectStub::kMaximumClonedProperties) { + __ Push(r3, r2, r1, r0); __ CallRuntime(Runtime::kCreateObjectLiteralShallow, 4); } else { FastCloneShallowObjectStub stub(properties_count); @@ -1627,7 +1631,7 @@ void FullCodeGenerator::VisitObjectLiteral(ObjectLiteral* expr) { ASSERT(!CompileTimeValue::IsCompileTimeValue(property->value())); // Fall through. case ObjectLiteral::Property::COMPUTED: - if (key->handle()->IsSymbol()) { + if (key->handle()->IsInternalizedString()) { if (property->emit_store()) { VisitForAccumulatorValue(value); __ mov(r2, Operand(key->handle())); @@ -1642,8 +1646,6 @@ void FullCodeGenerator::VisitObjectLiteral(ObjectLiteral* expr) { } break; } - // Fall through. - case ObjectLiteral::Property::PROTOTYPE: // Duplicate receiver on stack. __ ldr(r0, MemOperand(sp)); __ push(r0); @@ -1657,6 +1659,18 @@ void FullCodeGenerator::VisitObjectLiteral(ObjectLiteral* expr) { __ Drop(3); } break; + case ObjectLiteral::Property::PROTOTYPE: + // Duplicate receiver on stack. + __ ldr(r0, MemOperand(sp)); + __ push(r0); + VisitForStackValue(value); + if (property->emit_store()) { + __ CallRuntime(Runtime::kSetPrototype, 2); + } else { + __ Drop(2); + } + break; + case ObjectLiteral::Property::GETTER: accessor_table.lookup(key)->second->getter = value; break; @@ -1717,7 +1731,9 @@ void FullCodeGenerator::VisitArrayLiteral(ArrayLiteral* expr) { if (has_fast_elements && constant_elements_values->map() == isolate()->heap()->fixed_cow_array_map()) { FastCloneShallowArrayStub stub( - FastCloneShallowArrayStub::COPY_ON_WRITE_ELEMENTS, length); + FastCloneShallowArrayStub::COPY_ON_WRITE_ELEMENTS, + DONT_TRACK_ALLOCATION_SITE, + length); __ CallStub(&stub); __ IncrementCounter( isolate()->counters()->cow_arrays_created_stub(), 1, r1, r2); @@ -1728,10 +1744,17 @@ void FullCodeGenerator::VisitArrayLiteral(ArrayLiteral* expr) { } else { ASSERT(IsFastSmiOrObjectElementsKind(constant_elements_kind) || FLAG_smi_only_arrays); - FastCloneShallowArrayStub::Mode mode = has_fast_elements - ? FastCloneShallowArrayStub::CLONE_ELEMENTS - : FastCloneShallowArrayStub::CLONE_ANY_ELEMENTS; - FastCloneShallowArrayStub stub(mode, length); + FastCloneShallowArrayStub::Mode mode = + FastCloneShallowArrayStub::CLONE_ANY_ELEMENTS; + AllocationSiteMode allocation_site_mode = FLAG_track_allocation_sites + ? TRACK_ALLOCATION_SITE : DONT_TRACK_ALLOCATION_SITE; + + if (has_fast_elements) { + mode = FastCloneShallowArrayStub::CLONE_ELEMENTS; + allocation_site_mode = DONT_TRACK_ALLOCATION_SITE; + } + + FastCloneShallowArrayStub stub(mode, allocation_site_mode, length); __ CallStub(&stub); } @@ -1937,7 +1960,7 @@ void FullCodeGenerator::EmitInlineSmiBinaryOp(BinaryOperation* expr, __ bind(&stub_call); BinaryOpStub stub(op, mode); - CallIC(stub.GetCode(), RelocInfo::CODE_TARGET, + CallIC(stub.GetCode(isolate()), RelocInfo::CODE_TARGET, expr->BinaryOperationFeedbackId()); patch_site.EmitPatchInfo(); __ jmp(&done); @@ -1989,7 +2012,7 @@ void FullCodeGenerator::EmitInlineSmiBinaryOp(BinaryOperation* expr, __ mov(ip, Operand(scratch1, ASR, 31)); __ cmp(ip, Operand(scratch2)); __ b(ne, &stub_call); - __ cmp(scratch1, Operand(0)); + __ cmp(scratch1, Operand::Zero()); __ mov(right, Operand(scratch1), LeaveCC, ne); __ b(ne, &done); __ add(scratch2, right, Operand(left), SetCC); @@ -2021,7 +2044,7 @@ void FullCodeGenerator::EmitBinaryOp(BinaryOperation* expr, __ pop(r1); BinaryOpStub stub(op, mode); JumpPatchSite patch_site(masm_); // unbound, signals no inlined smi code. - CallIC(stub.GetCode(), RelocInfo::CODE_TARGET, + CallIC(stub.GetCode(isolate()), RelocInfo::CODE_TARGET, expr->BinaryOperationFeedbackId()); patch_site.EmitPatchInfo(); context()->Plug(r0); @@ -2029,7 +2052,7 @@ void FullCodeGenerator::EmitBinaryOp(BinaryOperation* expr, void FullCodeGenerator::EmitAssignment(Expression* expr) { - // Invalid left-hand sides are rewritten to have a 'throw + // Invalid left-hand sides are rewritten by the parser to have a 'throw // ReferenceError' on the left-hand side. if (!expr->IsValidLeftHandSide()) { VisitForEffect(expr); @@ -2328,7 +2351,7 @@ void FullCodeGenerator::EmitCallWithStub(Call* expr, CallFunctionFlags flags) { CallFunctionStub stub(arg_count, flags); __ ldr(r1, MemOperand(sp, (arg_count + 1) * kPointerSize)); - __ CallStub(&stub); + __ CallStub(&stub, expr->CallFeedbackId()); RecordJSReturnSite(expr); // Restore context register. __ ldr(cp, MemOperand(fp, StandardFrameConstants::kContextOffset)); @@ -2374,7 +2397,7 @@ void FullCodeGenerator::VisitCall(Call* expr) { VariableProxy* proxy = callee->AsVariableProxy(); Property* property = callee->AsProperty(); - if (proxy != NULL && proxy->var()->is_possibly_eval()) { + if (proxy != NULL && proxy->var()->is_possibly_eval(isolate())) { // In a call to eval, we first call %ResolvePossiblyDirectEval to // resolve the function we need to call and the receiver of the // call. Then we call the resolved function using the given @@ -2523,7 +2546,7 @@ void FullCodeGenerator::VisitCallNew(CallNew* expr) { __ mov(r2, Operand(cell)); CallConstructStub stub(RECORD_CALL_TARGET); - __ Call(stub.GetCode(), RelocInfo::CONSTRUCT_CALL); + __ Call(stub.GetCode(isolate()), RelocInfo::CONSTRUCT_CALL); PrepareForBailoutForId(expr->ReturnId(), TOS_REG); context()->Plug(r0); } @@ -2678,14 +2701,14 @@ void FullCodeGenerator::EmitIsStringWrapperSafeForDefaultValueOf( __ cmp(r2, ip); __ b(eq, if_false); - // Look for valueOf symbol in the descriptor array, and indicate false if + // Look for valueOf name in the descriptor array, and indicate false if // found. Since we omit an enumeration index check, if it is added via a // transition that shares its descriptor array, this is a false positive. Label entry, loop, done; // Skip loop if no descriptors are valid. __ NumberOfOwnDescriptors(r3, r1); - __ cmp(r3, Operand(0)); + __ cmp(r3, Operand::Zero()); __ b(eq, &done); __ LoadInstanceDescriptors(r1, r4); @@ -2703,10 +2726,10 @@ void FullCodeGenerator::EmitIsStringWrapperSafeForDefaultValueOf( __ add(r2, r2, Operand(r3, LSL, kPointerSizeLog2 - kSmiTagSize)); // Loop through all the keys in the descriptor array. If one of these is the - // symbol valueOf the result is false. - // The use of ip to store the valueOf symbol asumes that it is not otherwise + // string "valueOf" the result is false. + // The use of ip to store the valueOf string assumes that it is not otherwise // used in the loop below. - __ mov(ip, Operand(FACTORY->value_of_symbol())); + __ mov(ip, Operand(FACTORY->value_of_string())); __ jmp(&entry); __ bind(&loop); __ ldr(r3, MemOperand(r4, 0)); @@ -2741,6 +2764,28 @@ void FullCodeGenerator::EmitIsStringWrapperSafeForDefaultValueOf( } +void FullCodeGenerator::EmitIsSymbol(CallRuntime* expr) { + ZoneList<Expression*>* args = expr->arguments(); + ASSERT(args->length() == 1); + + VisitForAccumulatorValue(args->at(0)); + + Label materialize_true, materialize_false; + Label* if_true = NULL; + Label* if_false = NULL; + Label* fall_through = NULL; + context()->PrepareTest(&materialize_true, &materialize_false, + &if_true, &if_false, &fall_through); + + __ JumpIfSmi(r0, if_false); + __ CompareObjectType(r0, r1, r2, SYMBOL_TYPE); + PrepareForBailoutBeforeSplit(expr, true, if_true, if_false); + Split(eq, if_true, if_false, fall_through); + + context()->Plug(if_true, if_false); +} + + void FullCodeGenerator::EmitIsFunction(CallRuntime* expr) { ZoneList<Expression*>* args = expr->arguments(); ASSERT(args->length() == 1); @@ -2941,12 +2986,12 @@ void FullCodeGenerator::EmitClassOf(CallRuntime* expr) { // Functions have class 'Function'. __ bind(&function); - __ LoadRoot(r0, Heap::kfunction_class_symbolRootIndex); + __ LoadRoot(r0, Heap::kfunction_class_stringRootIndex); __ jmp(&done); // Objects with a non-function constructor have class 'Object'. __ bind(&non_function_constructor); - __ LoadRoot(r0, Heap::kObject_symbolRootIndex); + __ LoadRoot(r0, Heap::kObject_stringRootIndex); __ jmp(&done); // Non-JS objects have class null. @@ -3008,7 +3053,7 @@ void FullCodeGenerator::EmitRandomHeapNumber(CallRuntime* expr) { __ ldr(r0, FieldMemOperand(r0, GlobalObject::kNativeContextOffset)); __ CallCFunction(ExternalReference::random_uint32_function(isolate()), 1); - CpuFeatures::Scope scope(VFP2); + CpuFeatureScope scope(masm(), VFP2); // 0x41300000 is the top half of 1.0 x 2^20 as a double. // Create this constant using mov/orr to avoid PC relative load. __ mov(r1, Operand(0x41000000)); @@ -3016,7 +3061,7 @@ void FullCodeGenerator::EmitRandomHeapNumber(CallRuntime* expr) { // Move 0x41300000xxxxxxxx (x = random bits) to VFP. __ vmov(d7, r0, r1); // Move 0x4130000000000000 to VFP. - __ mov(r0, Operand(0, RelocInfo::NONE)); + __ mov(r0, Operand::Zero()); __ vmov(d8, r0, r1); // Subtract and store the result in the heap number. __ vsub(d7, d7, d8); @@ -3129,6 +3174,39 @@ void FullCodeGenerator::EmitDateField(CallRuntime* expr) { } +void FullCodeGenerator::EmitOneByteSeqStringSetChar(CallRuntime* expr) { + ZoneList<Expression*>* args = expr->arguments(); + ASSERT_EQ(3, args->length()); + + VisitForStackValue(args->at(1)); // index + VisitForStackValue(args->at(2)); // value + __ pop(r2); + __ pop(r1); + VisitForAccumulatorValue(args->at(0)); // string + + static const String::Encoding encoding = String::ONE_BYTE_ENCODING; + SeqStringSetCharGenerator::Generate(masm_, encoding, r0, r1, r2); + context()->Plug(r0); +} + + +void FullCodeGenerator::EmitTwoByteSeqStringSetChar(CallRuntime* expr) { + ZoneList<Expression*>* args = expr->arguments(); + ASSERT_EQ(3, args->length()); + + VisitForStackValue(args->at(1)); // index + VisitForStackValue(args->at(2)); // value + __ pop(r2); + __ pop(r1); + VisitForAccumulatorValue(args->at(0)); // string + + static const String::Encoding encoding = String::TWO_BYTE_ENCODING; + SeqStringSetCharGenerator::Generate(masm_, encoding, r0, r1, r2); + context()->Plug(r0); +} + + + void FullCodeGenerator::EmitMathPow(CallRuntime* expr) { // Load the arguments on the stack and call the runtime function. ZoneList<Expression*>* args = expr->arguments(); @@ -3278,7 +3356,7 @@ void FullCodeGenerator::EmitStringCharAt(CallRuntime* expr) { __ bind(&index_out_of_range); // When the index is out of range, the spec requires us to return // the empty string. - __ LoadRoot(result, Heap::kEmptyStringRootIndex); + __ LoadRoot(result, Heap::kempty_stringRootIndex); __ jmp(&done); __ bind(&need_conversion); @@ -3587,7 +3665,7 @@ void FullCodeGenerator::EmitFastAsciiArrayJoin(CallRuntime* expr) { __ ldr(array_length, FieldMemOperand(array, JSArray::kLengthOffset)); __ SmiUntag(array_length, SetCC); __ b(ne, &non_trivial_array); - __ LoadRoot(r0, Heap::kEmptyStringRootIndex); + __ LoadRoot(r0, Heap::kempty_stringRootIndex); __ b(&done); __ bind(&non_trivial_array); @@ -3599,7 +3677,7 @@ void FullCodeGenerator::EmitFastAsciiArrayJoin(CallRuntime* expr) { // Check that all array elements are sequential ASCII strings, and // accumulate the sum of their lengths, as a smi-encoded value. - __ mov(string_length, Operand(0)); + __ mov(string_length, Operand::Zero()); __ add(element, elements, Operand(FixedArray::kHeaderSize - kHeapObjectTag)); __ add(elements_end, element, Operand(array_length, LSL, kPointerSizeLog2)); @@ -3612,7 +3690,7 @@ void FullCodeGenerator::EmitFastAsciiArrayJoin(CallRuntime* expr) { // element: Current array element. // elements_end: Array end. if (generate_debug_code_) { - __ cmp(array_length, Operand(0)); + __ cmp(array_length, Operand::Zero()); __ Assert(gt, "No empty arrays here in EmitFastAsciiArrayJoin"); } __ bind(&loop); @@ -3621,7 +3699,7 @@ void FullCodeGenerator::EmitFastAsciiArrayJoin(CallRuntime* expr) { __ ldr(scratch1, FieldMemOperand(string, HeapObject::kMapOffset)); __ ldrb(scratch1, FieldMemOperand(scratch1, Map::kInstanceTypeOffset)); __ JumpIfInstanceTypeIsNotSequentialAscii(scratch1, scratch2, &bailout); - __ ldr(scratch1, FieldMemOperand(string, SeqAsciiString::kLengthOffset)); + __ ldr(scratch1, FieldMemOperand(string, SeqOneByteString::kLengthOffset)); __ add(string_length, string_length, Operand(scratch1), SetCC); __ b(vs, &bailout); __ cmp(element, elements_end); @@ -3650,12 +3728,12 @@ void FullCodeGenerator::EmitFastAsciiArrayJoin(CallRuntime* expr) { // Add (separator length times array_length) - separator length to the // string_length to get the length of the result string. array_length is not // smi but the other values are, so the result is a smi - __ ldr(scratch1, FieldMemOperand(separator, SeqAsciiString::kLengthOffset)); + __ ldr(scratch1, FieldMemOperand(separator, SeqOneByteString::kLengthOffset)); __ sub(string_length, string_length, Operand(scratch1)); __ smull(scratch2, ip, array_length, scratch1); // Check for smi overflow. No overflow if higher 33 bits of 64-bit result are // zero. - __ cmp(ip, Operand(0)); + __ cmp(ip, Operand::Zero()); __ b(ne, &bailout); __ tst(scratch2, Operand(0x80000000)); __ b(ne, &bailout); @@ -3688,10 +3766,10 @@ void FullCodeGenerator::EmitFastAsciiArrayJoin(CallRuntime* expr) { array_length = no_reg; __ add(result_pos, result, - Operand(SeqAsciiString::kHeaderSize - kHeapObjectTag)); + Operand(SeqOneByteString::kHeaderSize - kHeapObjectTag)); // Check the length of the separator. - __ ldr(scratch1, FieldMemOperand(separator, SeqAsciiString::kLengthOffset)); + __ ldr(scratch1, FieldMemOperand(separator, SeqOneByteString::kLengthOffset)); __ cmp(scratch1, Operand(Smi::FromInt(1))); __ b(eq, &one_char_separator); __ b(gt, &long_separator); @@ -3707,7 +3785,9 @@ void FullCodeGenerator::EmitFastAsciiArrayJoin(CallRuntime* expr) { __ ldr(string, MemOperand(element, kPointerSize, PostIndex)); __ ldr(string_length, FieldMemOperand(string, String::kLengthOffset)); __ SmiUntag(string_length); - __ add(string, string, Operand(SeqAsciiString::kHeaderSize - kHeapObjectTag)); + __ add(string, + string, + Operand(SeqOneByteString::kHeaderSize - kHeapObjectTag)); __ CopyBytes(string, result_pos, string_length, scratch1); __ cmp(element, elements_end); __ b(lt, &empty_separator_loop); // End while (element < elements_end). @@ -3717,7 +3797,7 @@ void FullCodeGenerator::EmitFastAsciiArrayJoin(CallRuntime* expr) { // One-character separator case __ bind(&one_char_separator); // Replace separator with its ASCII character value. - __ ldrb(separator, FieldMemOperand(separator, SeqAsciiString::kHeaderSize)); + __ ldrb(separator, FieldMemOperand(separator, SeqOneByteString::kHeaderSize)); // Jump into the loop after the code that copies the separator, so the first // element is not preceded by a separator __ jmp(&one_char_separator_loop_entry); @@ -3737,7 +3817,9 @@ void FullCodeGenerator::EmitFastAsciiArrayJoin(CallRuntime* expr) { __ ldr(string, MemOperand(element, kPointerSize, PostIndex)); __ ldr(string_length, FieldMemOperand(string, String::kLengthOffset)); __ SmiUntag(string_length); - __ add(string, string, Operand(SeqAsciiString::kHeaderSize - kHeapObjectTag)); + __ add(string, + string, + Operand(SeqOneByteString::kHeaderSize - kHeapObjectTag)); __ CopyBytes(string, result_pos, string_length, scratch1); __ cmp(element, elements_end); __ b(lt, &one_char_separator_loop); // End while (element < elements_end). @@ -3758,14 +3840,16 @@ void FullCodeGenerator::EmitFastAsciiArrayJoin(CallRuntime* expr) { __ SmiUntag(string_length); __ add(string, separator, - Operand(SeqAsciiString::kHeaderSize - kHeapObjectTag)); + Operand(SeqOneByteString::kHeaderSize - kHeapObjectTag)); __ CopyBytes(string, result_pos, string_length, scratch1); __ bind(&long_separator); __ ldr(string, MemOperand(element, kPointerSize, PostIndex)); __ ldr(string_length, FieldMemOperand(string, String::kLengthOffset)); __ SmiUntag(string_length); - __ add(string, string, Operand(SeqAsciiString::kHeaderSize - kHeapObjectTag)); + __ add(string, + string, + Operand(SeqOneByteString::kHeaderSize - kHeapObjectTag)); __ CopyBytes(string, result_pos, string_length, scratch1); __ cmp(element, elements_end); __ b(lt, &long_separator_loop); // End while (element < elements_end). @@ -3964,7 +4048,7 @@ void FullCodeGenerator::EmitUnaryOperation(UnaryOperation* expr, // accumulator register r0. VisitForAccumulatorValue(expr->expression()); SetSourcePosition(expr->position()); - CallIC(stub.GetCode(), RelocInfo::CODE_TARGET, + CallIC(stub.GetCode(isolate()), RelocInfo::CODE_TARGET, expr->UnaryOperationFeedbackId()); context()->Plug(r0); } @@ -4070,13 +4154,16 @@ void FullCodeGenerator::VisitCountOperation(CountOperation* expr) { // Call stub. Undo operation first. __ sub(r0, r0, Operand(Smi::FromInt(count_value))); } - __ mov(r1, Operand(Smi::FromInt(count_value))); + __ mov(r1, r0); + __ mov(r0, Operand(Smi::FromInt(count_value))); // Record position before stub call. SetSourcePosition(expr->position()); BinaryOpStub stub(Token::ADD, NO_OVERWRITE); - CallIC(stub.GetCode(), RelocInfo::CODE_TARGET, expr->CountBinOpFeedbackId()); + CallIC(stub.GetCode(isolate()), + RelocInfo::CODE_TARGET, + expr->CountBinOpFeedbackId()); patch_site.EmitPatchInfo(); __ bind(&done); @@ -4191,13 +4278,13 @@ void FullCodeGenerator::EmitLiteralCompareTypeof(Expression* expr, } PrepareForBailoutBeforeSplit(expr, true, if_true, if_false); - if (check->Equals(isolate()->heap()->number_symbol())) { + if (check->Equals(isolate()->heap()->number_string())) { __ JumpIfSmi(r0, if_true); __ ldr(r0, FieldMemOperand(r0, HeapObject::kMapOffset)); __ LoadRoot(ip, Heap::kHeapNumberMapRootIndex); __ cmp(r0, ip); Split(eq, if_true, if_false, fall_through); - } else if (check->Equals(isolate()->heap()->string_symbol())) { + } else if (check->Equals(isolate()->heap()->string_string())) { __ JumpIfSmi(r0, if_false); // Check for undetectable objects => false. __ CompareObjectType(r0, r0, r1, FIRST_NONSTRING_TYPE); @@ -4205,16 +4292,16 @@ void FullCodeGenerator::EmitLiteralCompareTypeof(Expression* expr, __ ldrb(r1, FieldMemOperand(r0, Map::kBitFieldOffset)); __ tst(r1, Operand(1 << Map::kIsUndetectable)); Split(eq, if_true, if_false, fall_through); - } else if (check->Equals(isolate()->heap()->boolean_symbol())) { + } else if (check->Equals(isolate()->heap()->boolean_string())) { __ CompareRoot(r0, Heap::kTrueValueRootIndex); __ b(eq, if_true); __ CompareRoot(r0, Heap::kFalseValueRootIndex); Split(eq, if_true, if_false, fall_through); } else if (FLAG_harmony_typeof && - check->Equals(isolate()->heap()->null_symbol())) { + check->Equals(isolate()->heap()->null_string())) { __ CompareRoot(r0, Heap::kNullValueRootIndex); Split(eq, if_true, if_false, fall_through); - } else if (check->Equals(isolate()->heap()->undefined_symbol())) { + } else if (check->Equals(isolate()->heap()->undefined_string())) { __ CompareRoot(r0, Heap::kUndefinedValueRootIndex); __ b(eq, if_true); __ JumpIfSmi(r0, if_false); @@ -4224,19 +4311,23 @@ void FullCodeGenerator::EmitLiteralCompareTypeof(Expression* expr, __ tst(r1, Operand(1 << Map::kIsUndetectable)); Split(ne, if_true, if_false, fall_through); - } else if (check->Equals(isolate()->heap()->function_symbol())) { + } else if (check->Equals(isolate()->heap()->function_string())) { __ JumpIfSmi(r0, if_false); STATIC_ASSERT(NUM_OF_CALLABLE_SPEC_OBJECT_TYPES == 2); __ CompareObjectType(r0, r0, r1, JS_FUNCTION_TYPE); __ b(eq, if_true); __ cmp(r1, Operand(JS_FUNCTION_PROXY_TYPE)); Split(eq, if_true, if_false, fall_through); - } else if (check->Equals(isolate()->heap()->object_symbol())) { + } else if (check->Equals(isolate()->heap()->object_string())) { __ JumpIfSmi(r0, if_false); if (!FLAG_harmony_typeof) { __ CompareRoot(r0, Heap::kNullValueRootIndex); __ b(eq, if_true); } + if (FLAG_harmony_symbols) { + __ CompareObjectType(r0, r0, r1, SYMBOL_TYPE); + __ b(eq, if_true); + } // Check for JS objects => true. __ CompareObjectType(r0, r0, r1, FIRST_NONCALLABLE_SPEC_OBJECT_TYPE); __ b(lt, if_false); @@ -4295,29 +4386,7 @@ void FullCodeGenerator::VisitCompareOperation(CompareOperation* expr) { default: { VisitForAccumulatorValue(expr->right()); - Condition cond = eq; - switch (op) { - case Token::EQ_STRICT: - case Token::EQ: - cond = eq; - break; - case Token::LT: - cond = lt; - break; - case Token::GT: - cond = gt; - break; - case Token::LTE: - cond = le; - break; - case Token::GTE: - cond = ge; - break; - case Token::IN: - case Token::INSTANCEOF: - default: - UNREACHABLE(); - } + Condition cond = CompareIC::ComputeCondition(op); __ pop(r1); bool inline_smi_code = ShouldInlineSmiCase(op); @@ -4333,11 +4402,11 @@ void FullCodeGenerator::VisitCompareOperation(CompareOperation* expr) { // Record position and call the compare IC. SetSourcePosition(expr->position()); - Handle<Code> ic = CompareIC::GetUninitialized(op); + Handle<Code> ic = CompareIC::GetUninitialized(isolate(), op); CallIC(ic, RelocInfo::CODE_TARGET, expr->CompareOperationFeedbackId()); patch_site.EmitPatchInfo(); PrepareForBailoutBeforeSplit(expr, true, if_true, if_false); - __ cmp(r0, Operand(0)); + __ cmp(r0, Operand::Zero()); Split(cond, if_true, if_false, fall_through); } } diff --git a/deps/v8/src/arm/ic-arm.cc b/deps/v8/src/arm/ic-arm.cc index 48395897da..84a11b6144 100644 --- a/deps/v8/src/arm/ic-arm.cc +++ b/deps/v8/src/arm/ic-arm.cc @@ -64,12 +64,12 @@ static void GenerateGlobalInstanceTypeCheck(MacroAssembler* masm, // Generated code falls through if the receiver is a regular non-global // JS object with slow properties and no interceptors. -static void GenerateStringDictionaryReceiverCheck(MacroAssembler* masm, - Register receiver, - Register elements, - Register t0, - Register t1, - Label* miss) { +static void GenerateNameDictionaryReceiverCheck(MacroAssembler* masm, + Register receiver, + Register elements, + Register t0, + Register t1, + Label* miss) { // Register usage: // receiver: holds the receiver on entry and is unchanged. // elements: holds the property dictionary on fall through. @@ -131,19 +131,19 @@ static void GenerateDictionaryLoad(MacroAssembler* masm, Label done; // Probe the dictionary. - StringDictionaryLookupStub::GeneratePositiveLookup(masm, - miss, - &done, - elements, - name, - scratch1, - scratch2); + NameDictionaryLookupStub::GeneratePositiveLookup(masm, + miss, + &done, + elements, + name, + scratch1, + scratch2); // If probing finds an entry check that the value is a normal // property. __ bind(&done); // scratch2 == elements + 4 * index - const int kElementsStartOffset = StringDictionary::kHeaderSize + - StringDictionary::kElementsStartIndex * kPointerSize; + const int kElementsStartOffset = NameDictionary::kHeaderSize + + NameDictionary::kElementsStartIndex * kPointerSize; const int kDetailsOffset = kElementsStartOffset + 2 * kPointerSize; __ ldr(scratch1, FieldMemOperand(scratch2, kDetailsOffset)); __ tst(scratch1, Operand(PropertyDetails::TypeField::kMask << kSmiTagSize)); @@ -180,19 +180,19 @@ static void GenerateDictionaryStore(MacroAssembler* masm, Label done; // Probe the dictionary. - StringDictionaryLookupStub::GeneratePositiveLookup(masm, - miss, - &done, - elements, - name, - scratch1, - scratch2); + NameDictionaryLookupStub::GeneratePositiveLookup(masm, + miss, + &done, + elements, + name, + scratch1, + scratch2); // If probing finds an entry in the dictionary check that the value // is a normal property that is not read only. __ bind(&done); // scratch2 == elements + 4 * index - const int kElementsStartOffset = StringDictionary::kHeaderSize + - StringDictionary::kElementsStartIndex * kPointerSize; + const int kElementsStartOffset = NameDictionary::kHeaderSize + + NameDictionary::kElementsStartIndex * kPointerSize; const int kDetailsOffset = kElementsStartOffset + 2 * kPointerSize; const int kTypeAndReadOnlyMask = (PropertyDetails::TypeField::kMask | @@ -213,53 +213,6 @@ static void GenerateDictionaryStore(MacroAssembler* masm, } -void LoadIC::GenerateArrayLength(MacroAssembler* masm) { - // ----------- S t a t e ------------- - // -- r2 : name - // -- lr : return address - // -- r0 : receiver - // -- sp[0] : receiver - // ----------------------------------- - Label miss; - - StubCompiler::GenerateLoadArrayLength(masm, r0, r3, &miss); - __ bind(&miss); - StubCompiler::GenerateLoadMiss(masm, Code::LOAD_IC); -} - - -void LoadIC::GenerateStringLength(MacroAssembler* masm, bool support_wrappers) { - // ----------- S t a t e ------------- - // -- r2 : name - // -- lr : return address - // -- r0 : receiver - // -- sp[0] : receiver - // ----------------------------------- - Label miss; - - StubCompiler::GenerateLoadStringLength(masm, r0, r1, r3, &miss, - support_wrappers); - // Cache miss: Jump to runtime. - __ bind(&miss); - StubCompiler::GenerateLoadMiss(masm, Code::LOAD_IC); -} - - -void LoadIC::GenerateFunctionPrototype(MacroAssembler* masm) { - // ----------- S t a t e ------------- - // -- r2 : name - // -- lr : return address - // -- r0 : receiver - // -- sp[0] : receiver - // ----------------------------------- - Label miss; - - StubCompiler::GenerateLoadFunctionPrototype(masm, r0, r1, r3, &miss); - __ bind(&miss); - StubCompiler::GenerateLoadMiss(masm, Code::LOAD_IC); -} - - // Checks the receiver for special cases (value type, slow case bits). // Falls through for regular JS object. static void GenerateKeyedLoadReceiverCheck(MacroAssembler* masm, @@ -350,30 +303,35 @@ static void GenerateFastArrayLoad(MacroAssembler* masm, } -// Checks whether a key is an array index string or a symbol string. -// Falls through if a key is a symbol. -static void GenerateKeyStringCheck(MacroAssembler* masm, - Register key, - Register map, - Register hash, - Label* index_string, - Label* not_symbol) { +// Checks whether a key is an array index string or a unique name. +// Falls through if a key is a unique name. +static void GenerateKeyNameCheck(MacroAssembler* masm, + Register key, + Register map, + Register hash, + Label* index_string, + Label* not_unique) { // The key is not a smi. - // Is it a string? - __ CompareObjectType(key, map, hash, FIRST_NONSTRING_TYPE); - __ b(ge, not_symbol); + Label unique; + // Is it a name? + __ CompareObjectType(key, map, hash, LAST_UNIQUE_NAME_TYPE); + __ b(hi, not_unique); + STATIC_ASSERT(LAST_UNIQUE_NAME_TYPE == FIRST_NONSTRING_TYPE); + __ b(eq, &unique); // Is the string an array index, with cached numeric value? - __ ldr(hash, FieldMemOperand(key, String::kHashFieldOffset)); - __ tst(hash, Operand(String::kContainsCachedArrayIndexMask)); + __ ldr(hash, FieldMemOperand(key, Name::kHashFieldOffset)); + __ tst(hash, Operand(Name::kContainsCachedArrayIndexMask)); __ b(eq, index_string); - // Is the string a symbol? + // Is the string internalized? // map: key map __ ldrb(hash, FieldMemOperand(map, Map::kInstanceTypeOffset)); - STATIC_ASSERT(kSymbolTag != 0); - __ tst(hash, Operand(kIsSymbolMask)); - __ b(eq, not_symbol); + STATIC_ASSERT(kInternalizedTag != 0); + __ tst(hash, Operand(kIsInternalizedMask)); + __ b(eq, not_unique); + + __ bind(&unique); } @@ -474,7 +432,7 @@ void CallICBase::GenerateNormal(MacroAssembler* masm, int argc) { // Get the receiver of the function from the stack into r1. __ ldr(r1, MemOperand(sp, argc * kPointerSize)); - GenerateStringDictionaryReceiverCheck(masm, r1, r0, r3, r4, &miss); + GenerateNameDictionaryReceiverCheck(masm, r1, r0, r3, r4, &miss); // r0: elements // Search the dictionary - put result in register r1. @@ -578,11 +536,11 @@ void KeyedCallIC::GenerateMegamorphic(MacroAssembler* masm, int argc) { __ ldr(r1, MemOperand(sp, argc * kPointerSize)); Label do_call, slow_call, slow_load, slow_reload_receiver; - Label check_number_dictionary, check_string, lookup_monomorphic_cache; - Label index_smi, index_string; + Label check_number_dictionary, check_name, lookup_monomorphic_cache; + Label index_smi, index_name; // Check that the key is a smi. - __ JumpIfNotSmi(r2, &check_string); + __ JumpIfNotSmi(r2, &check_name); __ bind(&index_smi); // Now the key is known to be a smi. This place is also jumped to from below // where a numeric string is converted to a smi. @@ -629,10 +587,10 @@ void KeyedCallIC::GenerateMegamorphic(MacroAssembler* masm, int argc) { __ mov(r1, r0); __ jmp(&do_call); - __ bind(&check_string); - GenerateKeyStringCheck(masm, r2, r0, r3, &index_string, &slow_call); + __ bind(&check_name); + GenerateKeyNameCheck(masm, r2, r0, r3, &index_name, &slow_call); - // The key is known to be a symbol. + // The key is known to be a unique name. // If the receiver is a regular JS object with slow properties then do // a quick inline probe of the receiver's dictionary. // Otherwise do the monomorphic cache probe. @@ -660,14 +618,14 @@ void KeyedCallIC::GenerateMegamorphic(MacroAssembler* masm, int argc) { __ bind(&slow_call); // This branch is taken if: // - the receiver requires boxing or access check, - // - the key is neither smi nor symbol, + // - the key is neither smi nor a unique name, // - the value loaded is not a function, // - there is hope that the runtime will create a monomorphic call stub // that will get fetched next time. __ IncrementCounter(counters->keyed_call_generic_slow(), 1, r0, r3); GenerateMiss(masm, argc); - __ bind(&index_string); + __ bind(&index_name); __ IndexFromHash(r3, r2); // Now jump to the place where smi keys are handled. __ jmp(&index_smi); @@ -680,10 +638,10 @@ void KeyedCallIC::GenerateNormal(MacroAssembler* masm, int argc) { // -- lr : return address // ----------------------------------- - // Check if the name is a string. + // Check if the name is really a name. Label miss; __ JumpIfSmi(r2, &miss); - __ IsObjectJSStringType(r2, r0, &miss); + __ IsObjectNameType(r2, r0, &miss); CallICBase::GenerateNormal(masm, argc); __ bind(&miss); @@ -703,8 +661,9 @@ void LoadIC::GenerateMegamorphic(MacroAssembler* masm) { // ----------------------------------- // Probe the stub cache. - Code::Flags flags = - Code::ComputeFlags(Code::LOAD_IC, MONOMORPHIC); + Code::Flags flags = Code::ComputeFlags( + Code::STUB, MONOMORPHIC, Code::kNoExtraICState, + Code::NORMAL, Code::LOAD_IC); Isolate::Current()->stub_cache()->GenerateProbe( masm, flags, r0, r2, r3, r4, r5, r6); @@ -722,7 +681,7 @@ void LoadIC::GenerateNormal(MacroAssembler* masm) { // ----------------------------------- Label miss; - GenerateStringDictionaryReceiverCheck(masm, r0, r1, r3, r4, &miss); + GenerateNameDictionaryReceiverCheck(masm, r0, r1, r3, r4, &miss); // r1: elements GenerateDictionaryLoad(masm, &miss, r1, r2, r0, r3, r4); @@ -862,7 +821,7 @@ void KeyedLoadIC::GenerateNonStrictArguments(MacroAssembler* masm) { __ mov(r0, r2); __ Ret(); __ bind(&slow); - GenerateMiss(masm, false); + GenerateMiss(masm, MISS); } @@ -891,7 +850,7 @@ void KeyedStoreIC::GenerateNonStrictArguments(MacroAssembler* masm) { __ RecordWrite(r3, r6, r9, kLRHasNotBeenSaved, kDontSaveFPRegs); __ Ret(); __ bind(&slow); - GenerateMiss(masm, false); + GenerateMiss(masm, MISS); } @@ -925,7 +884,7 @@ void KeyedCallIC::GenerateNonStrictArguments(MacroAssembler* masm, Object* KeyedLoadIC_Miss(Arguments args); -void KeyedLoadIC::GenerateMiss(MacroAssembler* masm, bool force_generic) { +void KeyedLoadIC::GenerateMiss(MacroAssembler* masm, ICMissMode miss_mode) { // ---------- S t a t e -------------- // -- lr : return address // -- r0 : key @@ -938,7 +897,7 @@ void KeyedLoadIC::GenerateMiss(MacroAssembler* masm, bool force_generic) { __ Push(r1, r0); // Perform tail call to the entry. - ExternalReference ref = force_generic + ExternalReference ref = miss_mode == MISS_FORCE_GENERIC ? ExternalReference(IC_Utility(kKeyedLoadIC_MissForceGeneric), isolate) : ExternalReference(IC_Utility(kKeyedLoadIC_Miss), isolate); @@ -965,7 +924,7 @@ void KeyedLoadIC::GenerateGeneric(MacroAssembler* masm) { // -- r0 : key // -- r1 : receiver // ----------------------------------- - Label slow, check_string, index_smi, index_string, property_array_property; + Label slow, check_name, index_smi, index_name, property_array_property; Label probe_dictionary, check_number_dictionary; Register key = r0; @@ -974,7 +933,7 @@ void KeyedLoadIC::GenerateGeneric(MacroAssembler* masm) { Isolate* isolate = masm->isolate(); // Check that the key is a smi. - __ JumpIfNotSmi(key, &check_string); + __ JumpIfNotSmi(key, &check_name); __ bind(&index_smi); // Now the key is known to be a smi. This place is also jumped to from below // where a numeric string is converted to a smi. @@ -1011,8 +970,8 @@ void KeyedLoadIC::GenerateGeneric(MacroAssembler* masm) { 1, r2, r3); GenerateRuntimeGetProperty(masm); - __ bind(&check_string); - GenerateKeyStringCheck(masm, key, r2, r3, &index_string, &slow); + __ bind(&check_name); + GenerateKeyNameCheck(masm, key, r2, r3, &index_name, &slow); GenerateKeyedLoadReceiverCheck( masm, receiver, r2, r3, Map::kHasNamedInterceptor, &slow); @@ -1026,15 +985,15 @@ void KeyedLoadIC::GenerateGeneric(MacroAssembler* masm) { __ b(eq, &probe_dictionary); // Load the map of the receiver, compute the keyed lookup cache hash - // based on 32 bits of the map pointer and the string hash. + // based on 32 bits of the map pointer and the name hash. __ ldr(r2, FieldMemOperand(r1, HeapObject::kMapOffset)); __ mov(r3, Operand(r2, ASR, KeyedLookupCache::kMapHashShift)); - __ ldr(r4, FieldMemOperand(r0, String::kHashFieldOffset)); - __ eor(r3, r3, Operand(r4, ASR, String::kHashShift)); + __ ldr(r4, FieldMemOperand(r0, Name::kHashFieldOffset)); + __ eor(r3, r3, Operand(r4, ASR, Name::kHashShift)); int mask = KeyedLookupCache::kCapacityMask & KeyedLookupCache::kHashMask; __ And(r3, r3, Operand(mask)); - // Load the key (consisting of map and symbol) from the cache and + // Load the key (consisting of map and unique name) from the cache and // check for match. Label load_in_object_property; static const int kEntriesPerBucket = KeyedLookupCache::kEntriesPerBucket; @@ -1051,13 +1010,13 @@ void KeyedLoadIC::GenerateGeneric(MacroAssembler* masm) { __ ldr(r5, MemOperand(r4, kPointerSize * 2, PostIndex)); __ cmp(r2, r5); __ b(ne, &try_next_entry); - __ ldr(r5, MemOperand(r4, -kPointerSize)); // Load symbol + __ ldr(r5, MemOperand(r4, -kPointerSize)); // Load name __ cmp(r0, r5); __ b(eq, &hit_on_nth_entry[i]); __ bind(&try_next_entry); } - // Last entry: Load map and move r4 to symbol. + // Last entry: Load map and move r4 to name. __ ldr(r5, MemOperand(r4, kPointerSize, PostIndex)); __ cmp(r2, r5); __ b(ne, &slow); @@ -1119,11 +1078,11 @@ void KeyedLoadIC::GenerateGeneric(MacroAssembler* masm) { GenerateGlobalInstanceTypeCheck(masm, r2, &slow); // Load the property to r0. GenerateDictionaryLoad(masm, &slow, r3, r0, r0, r2, r4); - __ IncrementCounter(isolate->counters()->keyed_load_generic_symbol(), - 1, r2, r3); + __ IncrementCounter( + isolate->counters()->keyed_load_generic_symbol(), 1, r2, r3); __ Ret(); - __ bind(&index_string); + __ bind(&index_name); __ IndexFromHash(r3, key); // Now jump to the place where smi keys are handled. __ jmp(&index_smi); @@ -1158,7 +1117,7 @@ void KeyedLoadIC::GenerateString(MacroAssembler* masm) { char_at_generator.GenerateSlow(masm, call_helper); __ bind(&miss); - GenerateMiss(masm, false); + GenerateMiss(masm, MISS); } @@ -1198,11 +1157,11 @@ void KeyedLoadIC::GenerateIndexedInterceptor(MacroAssembler* masm) { 1); __ bind(&slow); - GenerateMiss(masm, false); + GenerateMiss(masm, MISS); } -void KeyedStoreIC::GenerateMiss(MacroAssembler* masm, bool force_generic) { +void KeyedStoreIC::GenerateMiss(MacroAssembler* masm, ICMissMode miss_mode) { // ---------- S t a t e -------------- // -- r0 : value // -- r1 : key @@ -1213,7 +1172,7 @@ void KeyedStoreIC::GenerateMiss(MacroAssembler* masm, bool force_generic) { // Push receiver, key and value for runtime call. __ Push(r2, r1, r0); - ExternalReference ref = force_generic + ExternalReference ref = miss_mode == MISS_FORCE_GENERIC ? ExternalReference(IC_Utility(kKeyedStoreIC_MissForceGeneric), masm->isolate()) : ExternalReference(IC_Utility(kKeyedStoreIC_Miss), masm->isolate()); @@ -1249,7 +1208,9 @@ void KeyedStoreIC::GenerateTransitionElementsSmiToDouble(MacroAssembler* masm) { // Must return the modified receiver in r0. if (!FLAG_trace_elements_transitions) { Label fail; - ElementsTransitionGenerator::GenerateSmiToDouble(masm, &fail); + AllocationSiteMode mode = AllocationSiteInfo::GetMode(FAST_SMI_ELEMENTS, + FAST_DOUBLE_ELEMENTS); + ElementsTransitionGenerator::GenerateSmiToDouble(masm, mode, &fail); __ mov(r0, r2); __ Ret(); __ bind(&fail); @@ -1270,7 +1231,9 @@ void KeyedStoreIC::GenerateTransitionElementsDoubleToObject( // Must return the modified receiver in r0. if (!FLAG_trace_elements_transitions) { Label fail; - ElementsTransitionGenerator::GenerateDoubleToObject(masm, &fail); + AllocationSiteMode mode = AllocationSiteInfo::GetMode(FAST_DOUBLE_ELEMENTS, + FAST_ELEMENTS); + ElementsTransitionGenerator::GenerateDoubleToObject(masm, mode, &fail); __ mov(r0, r2); __ Ret(); __ bind(&fail); @@ -1379,7 +1342,6 @@ static void KeyedStoreGenerateGenericHelper( __ bind(&fast_double_without_map_check); __ StoreNumberToDoubleElements(value, key, - receiver, elements, // Overwritten. r3, // Scratch regs... r4, @@ -1407,7 +1369,9 @@ static void KeyedStoreGenerateGenericHelper( r4, slow); ASSERT(receiver_map.is(r3)); // Transition code expects map in r3 - ElementsTransitionGenerator::GenerateSmiToDouble(masm, slow); + AllocationSiteMode mode = AllocationSiteInfo::GetMode(FAST_SMI_ELEMENTS, + FAST_DOUBLE_ELEMENTS); + ElementsTransitionGenerator::GenerateSmiToDouble(masm, mode, slow); __ ldr(elements, FieldMemOperand(receiver, JSObject::kElementsOffset)); __ jmp(&fast_double_without_map_check); @@ -1419,7 +1383,9 @@ static void KeyedStoreGenerateGenericHelper( r4, slow); ASSERT(receiver_map.is(r3)); // Transition code expects map in r3 - ElementsTransitionGenerator::GenerateMapChangeElementsTransition(masm); + mode = AllocationSiteInfo::GetMode(FAST_SMI_ELEMENTS, FAST_ELEMENTS); + ElementsTransitionGenerator::GenerateMapChangeElementsTransition(masm, mode, + slow); __ ldr(elements, FieldMemOperand(receiver, JSObject::kElementsOffset)); __ jmp(&finish_object_store); @@ -1433,7 +1399,8 @@ static void KeyedStoreGenerateGenericHelper( r4, slow); ASSERT(receiver_map.is(r3)); // Transition code expects map in r3 - ElementsTransitionGenerator::GenerateDoubleToObject(masm, slow); + mode = AllocationSiteInfo::GetMode(FAST_DOUBLE_ELEMENTS, FAST_ELEMENTS); + ElementsTransitionGenerator::GenerateDoubleToObject(masm, mode, slow); __ ldr(elements, FieldMemOperand(receiver, JSObject::kElementsOffset)); __ jmp(&finish_object_store); } @@ -1577,62 +1544,6 @@ void StoreIC::GenerateMiss(MacroAssembler* masm) { } -void StoreIC::GenerateArrayLength(MacroAssembler* masm) { - // ----------- S t a t e ------------- - // -- r0 : value - // -- r1 : receiver - // -- r2 : name - // -- lr : return address - // ----------------------------------- - // - // This accepts as a receiver anything JSArray::SetElementsLength accepts - // (currently anything except for external arrays which means anything with - // elements of FixedArray type). Value must be a number, but only smis are - // accepted as the most common case. - - Label miss; - - Register receiver = r1; - Register value = r0; - Register scratch = r3; - - // Check that the receiver isn't a smi. - __ JumpIfSmi(receiver, &miss); - - // Check that the object is a JS array. - __ CompareObjectType(receiver, scratch, scratch, JS_ARRAY_TYPE); - __ b(ne, &miss); - - // Check that elements are FixedArray. - // We rely on StoreIC_ArrayLength below to deal with all types of - // fast elements (including COW). - __ ldr(scratch, FieldMemOperand(receiver, JSArray::kElementsOffset)); - __ CompareObjectType(scratch, scratch, scratch, FIXED_ARRAY_TYPE); - __ b(ne, &miss); - - // Check that the array has fast properties, otherwise the length - // property might have been redefined. - __ ldr(scratch, FieldMemOperand(receiver, JSArray::kPropertiesOffset)); - __ ldr(scratch, FieldMemOperand(scratch, FixedArray::kMapOffset)); - __ CompareRoot(scratch, Heap::kHashTableMapRootIndex); - __ b(eq, &miss); - - // Check that value is a smi. - __ JumpIfNotSmi(value, &miss); - - // Prepare tail call to StoreIC_ArrayLength. - __ Push(receiver, value); - - ExternalReference ref = - ExternalReference(IC_Utility(kStoreIC_ArrayLength), masm->isolate()); - __ TailCallExternalReference(ref, 2, 1); - - __ bind(&miss); - - GenerateMiss(masm); -} - - void StoreIC::GenerateNormal(MacroAssembler* masm) { // ----------- S t a t e ------------- // -- r0 : value @@ -1642,7 +1553,7 @@ void StoreIC::GenerateNormal(MacroAssembler* masm) { // ----------------------------------- Label miss; - GenerateStringDictionaryReceiverCheck(masm, r1, r3, r4, r5, &miss); + GenerateNameDictionaryReceiverCheck(masm, r1, r3, r4, r5, &miss); GenerateDictionaryStore(masm, &miss, r3, r2, r0, r4, r5); Counters* counters = masm->isolate()->counters(); @@ -1699,36 +1610,15 @@ Condition CompareIC::ComputeCondition(Token::Value op) { } -void CompareIC::UpdateCaches(Handle<Object> x, Handle<Object> y) { - HandleScope scope; - Handle<Code> rewritten; - State previous_state = GetState(); - State state = TargetState(previous_state, false, x, y); - if (state == GENERIC) { - CompareStub stub(GetCondition(), strict(), NO_COMPARE_FLAGS, r1, r0); - rewritten = stub.GetCode(); - } else { - ICCompareStub stub(op_, state); - if (state == KNOWN_OBJECTS) { - stub.set_known_map(Handle<Map>(Handle<JSObject>::cast(x)->map())); - } - rewritten = stub.GetCode(); - } - set_target(*rewritten); - -#ifdef DEBUG - if (FLAG_trace_ic) { - PrintF("[CompareIC (%s->%s)#%s]\n", - GetStateName(previous_state), - GetStateName(state), - Token::Name(op_)); - } -#endif +bool CompareIC::HasInlinedSmiCode(Address address) { + // The address of the instruction following the call. + Address cmp_instruction_address = + Assembler::return_address_from_call_start(address); - // Activate inlined smi code. - if (previous_state == UNINITIALIZED) { - PatchInlinedSmiCode(address(), ENABLE_INLINED_SMI_CHECK); - } + // If the instruction following the call is not a cmp rx, #yyy, nothing + // was inlined. + Instr instr = Assembler::instr_at(cmp_instruction_address); + return Assembler::IsCmpImmediate(instr); } diff --git a/deps/v8/src/arm/lithium-arm.cc b/deps/v8/src/arm/lithium-arm.cc index 21c549f175..38884ce0af 100644 --- a/deps/v8/src/arm/lithium-arm.cc +++ b/deps/v8/src/arm/lithium-arm.cc @@ -42,10 +42,10 @@ LITHIUM_CONCRETE_INSTRUCTION_LIST(DEFINE_COMPILE) #undef DEFINE_COMPILE LOsrEntry::LOsrEntry() { - for (int i = 0; i < Register::kNumAllocatableRegisters; ++i) { + for (int i = 0; i < Register::NumAllocatableRegisters(); ++i) { register_spills_[i] = NULL; } - for (int i = 0; i < DoubleRegister::kNumAllocatableRegisters; ++i) { + for (int i = 0; i < DoubleRegister::NumAllocatableRegisters(); ++i) { double_register_spills_[i] = NULL; } } @@ -112,7 +112,11 @@ void LInstruction::PrintDataTo(StringStream* stream) { stream->Add("= "); for (int i = 0; i < InputCount(); i++) { if (i > 0) stream->Add(" "); - InputAt(i)->PrintTo(stream); + if (InputAt(i) == NULL) { + stream->Add("NULL"); + } else { + InputAt(i)->PrintTo(stream); + } } } @@ -177,6 +181,7 @@ const char* LArithmeticT::Mnemonic() const { case Token::BIT_AND: return "bit-and-t"; case Token::BIT_OR: return "bit-or-t"; case Token::BIT_XOR: return "bit-xor-t"; + case Token::ROR: return "ror-t"; case Token::SHL: return "shl-t"; case Token::SAR: return "sar-t"; case Token::SHR: return "shr-t"; @@ -285,6 +290,13 @@ void LTypeofIsAndBranch::PrintDataTo(StringStream* stream) { } +void LInnerAllocatedObject::PrintDataTo(StringStream* stream) { + stream->Add(" = "); + base_object()->PrintTo(stream); + stream->Add(" + %d", offset()); +} + + void LCallConstantFunction::PrintDataTo(StringStream* stream) { stream->Add("#%d / ", arity()); } @@ -296,6 +308,11 @@ void LUnaryMathOperation::PrintDataTo(StringStream* stream) { } +void LMathExp::PrintDataTo(StringStream* stream) { + value()->PrintTo(stream); +} + + void LLoadContextSlot::PrintDataTo(StringStream* stream) { context()->PrintTo(stream); stream->Add("[%d]", slot_index()); @@ -345,6 +362,17 @@ void LCallNew::PrintDataTo(StringStream* stream) { } +void LCallNewArray::PrintDataTo(StringStream* stream) { + stream->Add("= "); + constructor()->PrintTo(stream); + stream->Add(" #%d / ", arity()); + ASSERT(hydrogen()->property_cell()->value()->IsSmi()); + ElementsKind kind = static_cast<ElementsKind>( + Smi::cast(hydrogen()->property_cell()->value())->value()); + stream->Add(" (%s) ", ElementsKindToString(kind)); +} + + void LAccessArgumentsAt::PrintDataTo(StringStream* stream) { arguments()->PrintTo(stream); stream->Add(" length "); @@ -372,20 +400,27 @@ void LStoreNamedGeneric::PrintDataTo(StringStream* stream) { } -void LStoreKeyedFastElement::PrintDataTo(StringStream* stream) { - object()->PrintTo(stream); +void LLoadKeyed::PrintDataTo(StringStream* stream) { + elements()->PrintTo(stream); stream->Add("["); key()->PrintTo(stream); - stream->Add("] <- "); - value()->PrintTo(stream); + if (hydrogen()->IsDehoisted()) { + stream->Add(" + %d]", additional_index()); + } else { + stream->Add("]"); + } } -void LStoreKeyedFastDoubleElement::PrintDataTo(StringStream* stream) { +void LStoreKeyed::PrintDataTo(StringStream* stream) { elements()->PrintTo(stream); stream->Add("["); key()->PrintTo(stream); - stream->Add("] <- "); + if (hydrogen()->IsDehoisted()) { + stream->Add(" + %d] <-", additional_index()); + } else { + stream->Add("] <- "); + } value()->PrintTo(stream); } @@ -599,6 +634,7 @@ LInstruction* LChunkBuilder::AssignEnvironment(LInstruction* instr) { LInstruction* LChunkBuilder::MarkAsCall(LInstruction* instr, HInstruction* hinstr, CanDeoptimize can_deoptimize) { + info()->MarkAsNonDeferredCalling(); #ifdef DEBUG instr->VerifyCall(); #endif @@ -639,8 +675,12 @@ LInstruction* LChunkBuilder::AssignPointerMap(LInstruction* instr) { LUnallocated* LChunkBuilder::TempRegister() { LUnallocated* operand = new(zone()) LUnallocated(LUnallocated::MUST_HAVE_REGISTER); - operand->set_virtual_register(allocator_->GetVirtualRegister()); - if (!allocator_->AllocationOk()) Abort("Not enough virtual registers."); + int vreg = allocator_->GetVirtualRegister(); + if (!allocator_->AllocationOk()) { + Abort("Out of virtual registers while trying to allocate temp register."); + return NULL; + } + operand->set_virtual_register(vreg); return operand; } @@ -664,6 +704,11 @@ LInstruction* LChunkBuilder::DoBlockEntry(HBlockEntry* instr) { } +LInstruction* LChunkBuilder::DoDummyUse(HDummyUse* instr) { + return DefineAsRegister(new(zone()) LDummyUse(UseAny(instr->value()))); +} + + LInstruction* LChunkBuilder::DoSoftDeoptimize(HSoftDeoptimize* instr) { return AssignEnvironment(new(zone()) LDeoptimize); } @@ -894,7 +939,7 @@ LInstruction* LChunkBuilder::DoGoto(HGoto* instr) { LInstruction* LChunkBuilder::DoBranch(HBranch* instr) { HValue* value = instr->value(); if (value->EmitAtUses()) { - HBasicBlock* successor = HConstant::cast(value)->ToBoolean() + HBasicBlock* successor = HConstant::cast(value)->BooleanValue() ? instr->FirstSuccessor() : instr->SecondSuccessor(); return new(zone()) LGoto(successor->block_id()); @@ -949,6 +994,12 @@ LInstruction* LChunkBuilder::DoInstanceOfKnownGlobal( } +LInstruction* LChunkBuilder::DoInstanceSize(HInstanceSize* instr) { + LOperand* object = UseRegisterAtStart(instr->object()); + return DefineAsRegister(new(zone()) LInstanceSize(object)); +} + + LInstruction* LChunkBuilder::DoWrapReceiver(HWrapReceiver* instr) { LOperand* receiver = UseRegisterAtStart(instr->receiver()); LOperand* function = UseRegisterAtStart(instr->function()); @@ -977,6 +1028,15 @@ LInstruction* LChunkBuilder::DoPushArgument(HPushArgument* instr) { } +LInstruction* LChunkBuilder::DoInnerAllocatedObject( + HInnerAllocatedObject* inner_object) { + LOperand* base_object = UseRegisterAtStart(inner_object->base_object()); + LInnerAllocatedObject* result = + new(zone()) LInnerAllocatedObject(base_object); + return DefineAsRegister(result); +} + + LInstruction* LChunkBuilder::DoThisFunction(HThisFunction* instr) { return instr->HasNoUses() ? NULL @@ -985,7 +1045,14 @@ LInstruction* LChunkBuilder::DoThisFunction(HThisFunction* instr) { LInstruction* LChunkBuilder::DoContext(HContext* instr) { - return instr->HasNoUses() ? NULL : DefineAsRegister(new(zone()) LContext); + // If there is a non-return use, the context must be allocated in a register. + for (HUseIterator it(instr->uses()); !it.Done(); it.Advance()) { + if (!it.value()->IsReturn()) { + return DefineAsRegister(new(zone()) LContext); + } + } + + return NULL; } @@ -1033,6 +1100,15 @@ LInstruction* LChunkBuilder::DoUnaryMathOperation(HUnaryMathOperation* instr) { LOperand* input = UseFixedDouble(instr->value(), d2); LUnaryMathOperation* result = new(zone()) LUnaryMathOperation(input, NULL); return MarkAsCall(DefineFixedDouble(result, d2), instr); + } else if (op == kMathExp) { + ASSERT(instr->representation().IsDouble()); + ASSERT(instr->value()->representation().IsDouble()); + LOperand* input = UseTempRegister(instr->value()); + LOperand* temp1 = TempRegister(); + LOperand* temp2 = TempRegister(); + LOperand* double_temp = FixedTemp(d3); // Chosen by fair dice roll. + LMathExp* result = new(zone()) LMathExp(input, double_temp, temp1, temp2); + return DefineAsRegister(result); } else if (op == kMathPowHalf) { LOperand* input = UseFixedDouble(instr->value(), d2); LOperand* temp = FixedTemp(d3); @@ -1094,6 +1170,14 @@ LInstruction* LChunkBuilder::DoCallNew(HCallNew* instr) { } +LInstruction* LChunkBuilder::DoCallNewArray(HCallNewArray* instr) { + LOperand* constructor = UseFixed(instr->constructor(), r1); + argument_count_ -= instr->argument_count(); + LCallNewArray* result = new(zone()) LCallNewArray(constructor); + return MarkAsCall(DefineFixed(result, r0), instr); +} + + LInstruction* LChunkBuilder::DoCallFunction(HCallFunction* instr) { LOperand* function = UseFixed(instr->function(), r1); argument_count_ -= instr->argument_count(); @@ -1108,6 +1192,11 @@ LInstruction* LChunkBuilder::DoCallRuntime(HCallRuntime* instr) { } +LInstruction* LChunkBuilder::DoRor(HRor* instr) { + return DoShift(Token::ROR, instr); +} + + LInstruction* LChunkBuilder::DoShr(HShr* instr) { return DoShift(Token::SHR, instr); } @@ -1157,6 +1246,13 @@ LInstruction* LChunkBuilder::DoDiv(HDiv* instr) { if (instr->representation().IsDouble()) { return DoArithmeticD(Token::DIV, instr); } else if (instr->representation().IsInteger32()) { + if (instr->HasPowerOf2Divisor()) { + ASSERT(!instr->CheckFlag(HValue::kCanBeDivByZero)); + LOperand* value = UseRegisterAtStart(instr->left()); + LDivI* div = + new(zone()) LDivI(value, UseOrConstant(instr->right())); + return AssignEnvironment(DefineSameAsFirst(div)); + } // TODO(1042) The fixed register allocation // is needed because we call TypeRecordingBinaryOpStub from // the generated code, which requires registers r0 @@ -1213,31 +1309,43 @@ HValue* LChunkBuilder::SimplifiedDividendForMathFloorOfDiv(HValue* dividend) { HValue* LChunkBuilder::SimplifiedDivisorForMathFloorOfDiv(HValue* divisor) { - // Only optimize when we have magic numbers for the divisor. - // The standard integer division routine is usually slower than transitionning - // to VFP. - if (divisor->IsConstant() && - HConstant::cast(divisor)->HasInteger32Value()) { + if (CpuFeatures::IsSupported(SUDIV)) { + // A value with an integer representation does not need to be transformed. + if (divisor->representation().IsInteger32()) { + return divisor; + // A change from an integer32 can be replaced by the integer32 value. + } else if (divisor->IsChange() && + HChange::cast(divisor)->from().IsInteger32()) { + return HChange::cast(divisor)->value(); + } + } + + if (divisor->IsConstant() && HConstant::cast(divisor)->HasInteger32Value()) { HConstant* constant_val = HConstant::cast(divisor); int32_t int32_val = constant_val->Integer32Value(); - if (LChunkBuilder::HasMagicNumberForDivisor(int32_val)) { + if (LChunkBuilder::HasMagicNumberForDivisor(int32_val) || + CpuFeatures::IsSupported(SUDIV)) { return constant_val->CopyToRepresentation(Representation::Integer32(), divisor->block()->zone()); } } + return NULL; } LInstruction* LChunkBuilder::DoMathFloorOfDiv(HMathFloorOfDiv* instr) { - HValue* right = instr->right(); - LOperand* dividend = UseRegister(instr->left()); - LOperand* divisor = UseRegisterOrConstant(right); - LOperand* remainder = TempRegister(); - ASSERT(right->IsConstant() && - HConstant::cast(right)->HasInteger32Value() && - HasMagicNumberForDivisor(HConstant::cast(right)->Integer32Value())); - return AssignEnvironment(DefineAsRegister( + HValue* right = instr->right(); + LOperand* dividend = UseRegister(instr->left()); + LOperand* divisor = CpuFeatures::IsSupported(SUDIV) + ? UseRegister(right) + : UseOrConstant(right); + LOperand* remainder = TempRegister(); + ASSERT(CpuFeatures::IsSupported(SUDIV) || + (right->IsConstant() && + HConstant::cast(right)->HasInteger32Value() && + HasMagicNumberForDivisor(HConstant::cast(right)->Integer32Value()))); + return AssignEnvironment(DefineAsRegister( new(zone()) LMathFloorOfDiv(dividend, divisor, remainder))); } @@ -1306,8 +1414,28 @@ LInstruction* LChunkBuilder::DoMul(HMul* instr) { return DefineAsRegister(mul); } else if (instr->representation().IsDouble()) { - return DoArithmeticD(Token::MUL, instr); + if (instr->UseCount() == 1 && (instr->uses().value()->IsAdd() || + instr->uses().value()->IsSub())) { + HBinaryOperation* use = HBinaryOperation::cast(instr->uses().value()); + + if (use->IsAdd() && instr == use->left()) { + // This mul is the lhs of an add. The add and mul will be folded into a + // multiply-add in DoAdd. + return NULL; + } + if (instr == use->right() && use->IsAdd() && !use->left()->IsMul()) { + // This mul is the rhs of an add, where the lhs is not another mul. + // The add and mul will be folded into a multiply-add in DoAdd. + return NULL; + } + if (instr == use->right() && use->IsSub()) { + // This mul is the rhs of a sub. The sub and mul will be folded into a + // multiply-sub in DoSub. + return NULL; + } + } + return DoArithmeticD(Token::MUL, instr); } else { return DoArithmeticT(Token::MUL, instr); } @@ -1318,6 +1446,12 @@ LInstruction* LChunkBuilder::DoSub(HSub* instr) { if (instr->representation().IsInteger32()) { ASSERT(instr->left()->representation().IsInteger32()); ASSERT(instr->right()->representation().IsInteger32()); + + if (instr->left()->IsConstant()) { + // If lhs is constant, do reverse subtraction instead. + return DoRSub(instr); + } + LOperand* left = UseRegisterAtStart(instr->left()); LOperand* right = UseOrConstantAtStart(instr->right()); LSubI* sub = new(zone()) LSubI(left, right); @@ -1327,6 +1461,10 @@ LInstruction* LChunkBuilder::DoSub(HSub* instr) { } return result; } else if (instr->representation().IsDouble()) { + if (instr->right()->IsMul()) { + return DoMultiplySub(instr->left(), HMul::cast(instr->right())); + } + return DoArithmeticD(Token::SUB, instr); } else { return DoArithmeticT(Token::SUB, instr); @@ -1334,6 +1472,44 @@ LInstruction* LChunkBuilder::DoSub(HSub* instr) { } +LInstruction* LChunkBuilder::DoRSub(HSub* instr) { + ASSERT(instr->representation().IsInteger32()); + ASSERT(instr->left()->representation().IsInteger32()); + ASSERT(instr->right()->representation().IsInteger32()); + + // Note: The lhs of the subtraction becomes the rhs of the + // reverse-subtraction. + LOperand* left = UseRegisterAtStart(instr->right()); + LOperand* right = UseOrConstantAtStart(instr->left()); + LRSubI* rsb = new(zone()) LRSubI(left, right); + LInstruction* result = DefineAsRegister(rsb); + if (instr->CheckFlag(HValue::kCanOverflow)) { + result = AssignEnvironment(result); + } + return result; +} + + +LInstruction* LChunkBuilder::DoMultiplyAdd(HMul* mul, HValue* addend) { + LOperand* multiplier_op = UseRegisterAtStart(mul->left()); + LOperand* multiplicand_op = UseRegisterAtStart(mul->right()); + LOperand* addend_op = UseRegisterAtStart(addend); + return DefineSameAsFirst(new(zone()) LMultiplyAddD(addend_op, multiplier_op, + multiplicand_op)); +} + + +LInstruction* LChunkBuilder::DoMultiplySub(HValue* minuend, HMul* mul) { + LOperand* minuend_op = UseRegisterAtStart(minuend); + LOperand* multiplier_op = UseRegisterAtStart(mul->left()); + LOperand* multiplicand_op = UseRegisterAtStart(mul->right()); + + return DefineSameAsFirst(new(zone()) LMultiplySubD(minuend_op, + multiplier_op, + multiplicand_op)); +} + + LInstruction* LChunkBuilder::DoAdd(HAdd* instr) { if (instr->representation().IsInteger32()) { ASSERT(instr->left()->representation().IsInteger32()); @@ -1347,6 +1523,15 @@ LInstruction* LChunkBuilder::DoAdd(HAdd* instr) { } return result; } else if (instr->representation().IsDouble()) { + if (instr->left()->IsMul()) { + return DoMultiplyAdd(HMul::cast(instr->left()), instr->right()); + } + + if (instr->right()->IsMul()) { + ASSERT(!instr->left()->IsMul()); + return DoMultiplyAdd(HMul::cast(instr->right()), instr->left()); + } + return DoArithmeticD(Token::ADD, instr); } else { ASSERT(instr->representation().IsTagged()); @@ -1412,7 +1597,7 @@ LInstruction* LChunkBuilder::DoCompareGeneric(HCompareGeneric* instr) { LInstruction* LChunkBuilder::DoCompareIDAndBranch( HCompareIDAndBranch* instr) { - Representation r = instr->GetInputRepresentation(); + Representation r = instr->representation(); if (r.IsInteger32()) { ASSERT(instr->left()->representation().IsInteger32()); ASSERT(instr->right()->representation().IsInteger32()); @@ -1566,6 +1751,27 @@ LInstruction* LChunkBuilder::DoDateField(HDateField* instr) { } +LInstruction* LChunkBuilder::DoSeqStringSetChar(HSeqStringSetChar* instr) { + LOperand* string = UseRegister(instr->string()); + LOperand* index = UseRegister(instr->index()); + LOperand* value = UseRegister(instr->value()); + LSeqStringSetChar* result = + new(zone()) LSeqStringSetChar(instr->encoding(), string, index, value); + return DefineAsRegister(result); +} + + +LInstruction* LChunkBuilder::DoNumericConstraint(HNumericConstraint* instr) { + return NULL; +} + + +LInstruction* LChunkBuilder::DoInductionVariableAnnotation( + HInductionVariableAnnotation* instr) { + return NULL; +} + + LInstruction* LChunkBuilder::DoBoundsCheck(HBoundsCheck* instr) { LOperand* value = UseRegisterOrConstantAtStart(instr->index()); LOperand* length = UseRegister(instr->length()); @@ -1573,6 +1779,13 @@ LInstruction* LChunkBuilder::DoBoundsCheck(HBoundsCheck* instr) { } +LInstruction* LChunkBuilder::DoBoundsCheckBaseIndexInformation( + HBoundsCheckBaseIndexInformation* instr) { + UNREACHABLE(); + return NULL; +} + + LInstruction* LChunkBuilder::DoAbnormalExit(HAbnormalExit* instr) { // The control instruction marking the end of a block that completed // abruptly (e.g., threw an exception). There is nothing specific to do. @@ -1604,6 +1817,7 @@ LInstruction* LChunkBuilder::DoChange(HChange* instr) { Representation to = instr->to(); if (from.IsTagged()) { if (to.IsDouble()) { + info()->MarkAsDeferredCalling(); LOperand* value = UseRegister(instr->value()); LNumberUntagD* res = new(zone()) LNumberUntagD(value); return AssignEnvironment(DefineAsRegister(res)); @@ -1628,6 +1842,7 @@ LInstruction* LChunkBuilder::DoChange(HChange* instr) { } } else if (from.IsDouble()) { if (to.IsTagged()) { + info()->MarkAsDeferredCalling(); LOperand* value = UseRegister(instr->value()); LOperand* temp1 = TempRegister(); LOperand* temp2 = TempRegister(); @@ -1647,6 +1862,7 @@ LInstruction* LChunkBuilder::DoChange(HChange* instr) { return AssignEnvironment(DefineAsRegister(res)); } } else if (from.IsInteger32()) { + info()->MarkAsDeferredCalling(); if (to.IsTagged()) { HValue* val = instr->value(); LOperand* value = UseRegisterAtStart(val); @@ -1689,10 +1905,10 @@ LInstruction* LChunkBuilder::DoCheckInstanceType(HCheckInstanceType* instr) { LInstruction* LChunkBuilder::DoCheckPrototypeMaps(HCheckPrototypeMaps* instr) { - LOperand* temp1 = TempRegister(); + LUnallocated* temp1 = TempRegister(); LOperand* temp2 = TempRegister(); - LInstruction* result = new(zone()) LCheckPrototypeMaps(temp1, temp2); - return AssignEnvironment(result); + LCheckPrototypeMaps* result = new(zone()) LCheckPrototypeMaps(temp1, temp2); + return AssignEnvironment(Define(result, temp1)); } @@ -1702,6 +1918,12 @@ LInstruction* LChunkBuilder::DoCheckSmi(HCheckSmi* instr) { } +LInstruction* LChunkBuilder::DoCheckSmiOrInt32(HCheckSmiOrInt32* instr) { + LOperand* value = UseRegisterAtStart(instr->value()); + return AssignEnvironment(new(zone()) LCheckSmi(value)); +} + + LInstruction* LChunkBuilder::DoCheckFunction(HCheckFunction* instr) { LOperand* value = UseRegisterAtStart(instr->value()); return AssignEnvironment(new(zone()) LCheckFunction(value)); @@ -1734,7 +1956,9 @@ LInstruction* LChunkBuilder::DoClampToUint8(HClampToUint8* instr) { LInstruction* LChunkBuilder::DoReturn(HReturn* instr) { - return new(zone()) LReturn(UseFixed(instr->value(), r0)); + LOperand* parameter_count = UseRegisterOrConstant(instr->parameter_count()); + return new(zone()) LReturn(UseFixed(instr->value(), r0), + parameter_count); } @@ -1860,53 +2084,49 @@ LInstruction* LChunkBuilder::DoLoadExternalArrayPointer( } -LInstruction* LChunkBuilder::DoLoadKeyedFastElement( - HLoadKeyedFastElement* instr) { - ASSERT(instr->representation().IsTagged()); - ASSERT(instr->key()->representation().IsInteger32() || - instr->key()->representation().IsTagged()); - LOperand* obj = UseRegisterAtStart(instr->object()); - LOperand* key = UseRegisterOrConstantAtStart(instr->key()); - LLoadKeyedFastElement* result = new(zone()) LLoadKeyedFastElement(obj, key); - if (instr->RequiresHoleCheck()) AssignEnvironment(result); - return DefineAsRegister(result); -} - - -LInstruction* LChunkBuilder::DoLoadKeyedFastDoubleElement( - HLoadKeyedFastDoubleElement* instr) { - ASSERT(instr->representation().IsDouble()); +LInstruction* LChunkBuilder::DoLoadKeyed(HLoadKeyed* instr) { ASSERT(instr->key()->representation().IsInteger32() || instr->key()->representation().IsTagged()); - LOperand* elements = UseTempRegister(instr->elements()); + ElementsKind elements_kind = instr->elements_kind(); LOperand* key = UseRegisterOrConstantAtStart(instr->key()); - LLoadKeyedFastDoubleElement* result = - new(zone()) LLoadKeyedFastDoubleElement(elements, key); - return AssignEnvironment(DefineAsRegister(result)); -} + LLoadKeyed* result = NULL; + if (!instr->is_external()) { + LOperand* obj = NULL; + if (instr->representation().IsDouble()) { + obj = UseTempRegister(instr->elements()); + } else { + ASSERT(instr->representation().IsTagged()); + obj = UseRegisterAtStart(instr->elements()); + } + result = new(zone()) LLoadKeyed(obj, key); + } else { + ASSERT( + (instr->representation().IsInteger32() && + (elements_kind != EXTERNAL_FLOAT_ELEMENTS) && + (elements_kind != EXTERNAL_DOUBLE_ELEMENTS)) || + (instr->representation().IsDouble() && + ((elements_kind == EXTERNAL_FLOAT_ELEMENTS) || + (elements_kind == EXTERNAL_DOUBLE_ELEMENTS)))); + // float->double conversion on non-VFP2 requires an extra scratch + // register. For convenience, just mark the elements register as "UseTemp" + // so that it can be used as a temp during the float->double conversion + // after it's no longer needed after the float load. + bool needs_temp = + !CpuFeatures::IsSupported(VFP2) && + (elements_kind == EXTERNAL_FLOAT_ELEMENTS); + LOperand* external_pointer = needs_temp + ? UseTempRegister(instr->elements()) + : UseRegister(instr->elements()); + result = new(zone()) LLoadKeyed(external_pointer, key); + } -LInstruction* LChunkBuilder::DoLoadKeyedSpecializedArrayElement( - HLoadKeyedSpecializedArrayElement* instr) { - ElementsKind elements_kind = instr->elements_kind(); - ASSERT( - (instr->representation().IsInteger32() && - (elements_kind != EXTERNAL_FLOAT_ELEMENTS) && - (elements_kind != EXTERNAL_DOUBLE_ELEMENTS)) || - (instr->representation().IsDouble() && - ((elements_kind == EXTERNAL_FLOAT_ELEMENTS) || - (elements_kind == EXTERNAL_DOUBLE_ELEMENTS)))); - ASSERT(instr->key()->representation().IsInteger32() || - instr->key()->representation().IsTagged()); - LOperand* external_pointer = UseRegister(instr->external_pointer()); - LOperand* key = UseRegisterOrConstant(instr->key()); - LLoadKeyedSpecializedArrayElement* result = - new(zone()) LLoadKeyedSpecializedArrayElement(external_pointer, key); - LInstruction* load_instr = DefineAsRegister(result); + DefineAsRegister(result); // An unsigned int array load might overflow and cause a deopt, make sure it // has an environment. - return (elements_kind == EXTERNAL_UNSIGNED_INT_ELEMENTS) ? - AssignEnvironment(load_instr) : load_instr; + bool can_deoptimize = instr->RequiresHoleCheck() || + (elements_kind == EXTERNAL_UNSIGNED_INT_ELEMENTS); + return can_deoptimize ? AssignEnvironment(result) : result; } @@ -1920,66 +2140,48 @@ LInstruction* LChunkBuilder::DoLoadKeyedGeneric(HLoadKeyedGeneric* instr) { } -LInstruction* LChunkBuilder::DoStoreKeyedFastElement( - HStoreKeyedFastElement* instr) { - bool needs_write_barrier = instr->NeedsWriteBarrier(); - ASSERT(instr->value()->representation().IsTagged()); - ASSERT(instr->object()->representation().IsTagged()); - ASSERT(instr->key()->representation().IsInteger32() || - instr->key()->representation().IsTagged()); - - LOperand* obj = UseTempRegister(instr->object()); - LOperand* val = needs_write_barrier - ? UseTempRegister(instr->value()) - : UseRegisterAtStart(instr->value()); - LOperand* key = needs_write_barrier - ? UseTempRegister(instr->key()) - : UseRegisterOrConstantAtStart(instr->key()); - return new(zone()) LStoreKeyedFastElement(obj, key, val); -} - - -LInstruction* LChunkBuilder::DoStoreKeyedFastDoubleElement( - HStoreKeyedFastDoubleElement* instr) { - ASSERT(instr->value()->representation().IsDouble()); - ASSERT(instr->elements()->representation().IsTagged()); - ASSERT(instr->key()->representation().IsInteger32() || - instr->key()->representation().IsTagged()); - - LOperand* elements = UseRegisterAtStart(instr->elements()); - LOperand* val = UseTempRegister(instr->value()); - LOperand* key = UseRegisterOrConstantAtStart(instr->key()); +LInstruction* LChunkBuilder::DoStoreKeyed(HStoreKeyed* instr) { + ElementsKind elements_kind = instr->elements_kind(); - return new(zone()) LStoreKeyedFastDoubleElement(elements, key, val); -} + if (!instr->is_external()) { + ASSERT(instr->elements()->representation().IsTagged()); + bool needs_write_barrier = instr->NeedsWriteBarrier(); + LOperand* object = NULL; + LOperand* key = NULL; + LOperand* val = NULL; + + if (instr->value()->representation().IsDouble()) { + object = UseRegisterAtStart(instr->elements()); + val = UseTempRegister(instr->value()); + key = UseRegisterOrConstantAtStart(instr->key()); + } else { + ASSERT(instr->value()->representation().IsTagged()); + object = UseTempRegister(instr->elements()); + val = needs_write_barrier ? UseTempRegister(instr->value()) + : UseRegisterAtStart(instr->value()); + key = needs_write_barrier ? UseTempRegister(instr->key()) + : UseRegisterOrConstantAtStart(instr->key()); + } + return new(zone()) LStoreKeyed(object, key, val); + } -LInstruction* LChunkBuilder::DoStoreKeyedSpecializedArrayElement( - HStoreKeyedSpecializedArrayElement* instr) { - ElementsKind elements_kind = instr->elements_kind(); ASSERT( (instr->value()->representation().IsInteger32() && (elements_kind != EXTERNAL_FLOAT_ELEMENTS) && (elements_kind != EXTERNAL_DOUBLE_ELEMENTS)) || (instr->value()->representation().IsDouble() && ((elements_kind == EXTERNAL_FLOAT_ELEMENTS) || - (elements_kind == EXTERNAL_DOUBLE_ELEMENTS)))); - ASSERT(instr->external_pointer()->representation().IsExternal()); - ASSERT(instr->key()->representation().IsInteger32() || - instr->key()->representation().IsTagged()); - - LOperand* external_pointer = UseRegister(instr->external_pointer()); + (elements_kind == EXTERNAL_DOUBLE_ELEMENTS)))); + ASSERT(instr->elements()->representation().IsExternal()); bool val_is_temp_register = elements_kind == EXTERNAL_PIXEL_ELEMENTS || elements_kind == EXTERNAL_FLOAT_ELEMENTS; - LOperand* val = val_is_temp_register - ? UseTempRegister(instr->value()) + LOperand* val = val_is_temp_register ? UseTempRegister(instr->value()) : UseRegister(instr->value()); - LOperand* key = UseRegisterOrConstant(instr->key()); - - return new(zone()) LStoreKeyedSpecializedArrayElement(external_pointer, - key, - val); + LOperand* key = UseRegisterOrConstantAtStart(instr->key()); + LOperand* external_pointer = UseRegister(instr->elements()); + return new(zone()) LStoreKeyed(external_pointer, key, val); } @@ -1998,14 +2200,16 @@ LInstruction* LChunkBuilder::DoStoreKeyedGeneric(HStoreKeyedGeneric* instr) { LInstruction* LChunkBuilder::DoTransitionElementsKind( HTransitionElementsKind* instr) { - ElementsKind from_kind = instr->original_map()->elements_kind(); - ElementsKind to_kind = instr->transitioned_map()->elements_kind(); - if (IsSimpleMapChangeTransition(from_kind, to_kind)) { - LOperand* object = UseRegister(instr->object()); + LOperand* object = UseRegister(instr->object()); + if (IsSimpleMapChangeTransition(instr->from_kind(), instr->to_kind())) { LOperand* new_map_reg = TempRegister(); LTransitionElementsKind* result = new(zone()) LTransitionElementsKind(object, new_map_reg, NULL); return DefineSameAsFirst(result); + } else if (FLAG_compiled_transitions) { + LTransitionElementsKind* result = + new(zone()) LTransitionElementsKind(object, NULL, NULL); + return AssignPointerMap(result); } else { LOperand* object = UseFixed(instr->object(), r0); LOperand* fixed_object_reg = FixedTemp(r2); @@ -2014,11 +2218,21 @@ LInstruction* LChunkBuilder::DoTransitionElementsKind( new(zone()) LTransitionElementsKind(object, new_map_reg, fixed_object_reg); - return MarkAsCall(DefineFixed(result, r0), instr); + return MarkAsCall(result, instr); } } +LInstruction* LChunkBuilder::DoTrapAllocationMemento( + HTrapAllocationMemento* instr) { + LOperand* object = UseRegister(instr->object()); + LOperand* temp = TempRegister(); + LTrapAllocationMemento* result = + new(zone()) LTrapAllocationMemento(object, temp); + return AssignEnvironment(result); +} + + LInstruction* LChunkBuilder::DoStoreNamedField(HStoreNamedField* instr) { bool needs_write_barrier = instr->NeedsWriteBarrier(); bool needs_write_barrier_for_map = !instr->transition().is_null() && @@ -2085,12 +2299,23 @@ LInstruction* LChunkBuilder::DoStringLength(HStringLength* instr) { LInstruction* LChunkBuilder::DoAllocateObject(HAllocateObject* instr) { + info()->MarkAsDeferredCalling(); LAllocateObject* result = new(zone()) LAllocateObject(TempRegister(), TempRegister()); return AssignPointerMap(DefineAsRegister(result)); } +LInstruction* LChunkBuilder::DoAllocate(HAllocate* instr) { + info()->MarkAsDeferredCalling(); + LOperand* size = UseTempRegister(instr->size()); + LOperand* temp1 = TempRegister(); + LOperand* temp2 = TempRegister(); + LAllocate* result = new(zone()) LAllocate(size, temp1, temp2); + return AssignPointerMap(DefineAsRegister(result)); +} + + LInstruction* LChunkBuilder::DoFastLiteral(HFastLiteral* instr) { return MarkAsCall(DefineFixed(new(zone()) LFastLiteral, r0), instr); } @@ -2133,8 +2358,17 @@ LInstruction* LChunkBuilder::DoOsrEntry(HOsrEntry* instr) { LInstruction* LChunkBuilder::DoParameter(HParameter* instr) { - int spill_index = chunk()->GetParameterStackSlot(instr->index()); - return DefineAsSpilled(new(zone()) LParameter, spill_index); + LParameter* result = new(zone()) LParameter; + if (instr->kind() == HParameter::STACK_PARAMETER) { + int spill_index = chunk()->GetParameterStackSlot(instr->index()); + return DefineAsSpilled(result, spill_index); + } else { + ASSERT(info()->IsStub()); + CodeStubInterfaceDescriptor* descriptor = + info()->code_stub()->GetInterfaceDescriptor(info()->isolate()); + Register reg = descriptor->register_params_[instr->index()]; + return DefineFixed(result, reg); + } } @@ -2202,7 +2436,7 @@ LInstruction* LChunkBuilder::DoSimulate(HSimulate* instr) { env->set_ast_id(instr->ast_id()); env->Drop(instr->pop_count()); - for (int i = 0; i < instr->values()->length(); ++i) { + for (int i = instr->values()->length() - 1; i >= 0; --i) { HValue* value = instr->values()->at(i); if (instr->HasAssignedIndexAt(i)) { env->Bind(instr->GetAssignedIndexAt(i), value); @@ -2246,8 +2480,8 @@ LInstruction* LChunkBuilder::DoEnterInlined(HEnterInlined* instr) { instr->arguments_count(), instr->function(), undefined, - instr->call_kind(), - instr->inlining_kind()); + instr->inlining_kind(), + instr->undefined_receiver()); if (instr->arguments_var() != NULL) { inner->Bind(instr->arguments_var(), graph()->GetArgumentsObject()); } diff --git a/deps/v8/src/arm/lithium-arm.h b/deps/v8/src/arm/lithium-arm.h index fb36fe9c0d..f49e8ce4f8 100644 --- a/deps/v8/src/arm/lithium-arm.h +++ b/deps/v8/src/arm/lithium-arm.h @@ -50,6 +50,7 @@ class LCodeGen; V(AccessArgumentsAt) \ V(AddI) \ V(AllocateObject) \ + V(Allocate) \ V(ApplyArguments) \ V(ArgumentsElements) \ V(ArgumentsLength) \ @@ -67,6 +68,7 @@ class LCodeGen; V(CallKnownGlobal) \ V(CallNamed) \ V(CallNew) \ + V(CallNewArray) \ V(CallRuntime) \ V(CallStub) \ V(CheckFunction) \ @@ -93,6 +95,7 @@ class LCodeGen; V(Deoptimize) \ V(DivI) \ V(DoubleToI) \ + V(DummyUse) \ V(ElementsKind) \ V(FastLiteral) \ V(FixedArrayBaseLength) \ @@ -106,6 +109,7 @@ class LCodeGen; V(In) \ V(InstanceOf) \ V(InstanceOfKnownGlobal) \ + V(InstanceSize) \ V(InstructionGap) \ V(Integer32ToDouble) \ V(Uint32ToDouble) \ @@ -125,18 +129,19 @@ class LCodeGen; V(LoadFunctionPrototype) \ V(LoadGlobalCell) \ V(LoadGlobalGeneric) \ - V(LoadKeyedFastDoubleElement) \ - V(LoadKeyedFastElement) \ + V(LoadKeyed) \ V(LoadKeyedGeneric) \ - V(LoadKeyedSpecializedArrayElement) \ V(LoadNamedField) \ V(LoadNamedFieldPolymorphic) \ V(LoadNamedGeneric) \ V(MapEnumLength) \ + V(MathExp) \ V(MathFloorOfDiv) \ V(MathMinMax) \ V(ModI) \ V(MulI) \ + V(MultiplyAddD) \ + V(MultiplySubD) \ V(NumberTagD) \ V(NumberTagI) \ V(NumberTagU) \ @@ -150,6 +155,7 @@ class LCodeGen; V(Random) \ V(RegExpLiteral) \ V(Return) \ + V(SeqStringSetChar) \ V(ShiftI) \ V(SmiTag) \ V(SmiUntag) \ @@ -157,10 +163,8 @@ class LCodeGen; V(StoreContextSlot) \ V(StoreGlobalCell) \ V(StoreGlobalGeneric) \ - V(StoreKeyedFastDoubleElement) \ - V(StoreKeyedFastElement) \ + V(StoreKeyed) \ V(StoreKeyedGeneric) \ - V(StoreKeyedSpecializedArrayElement) \ V(StoreNamedField) \ V(StoreNamedGeneric) \ V(StringAdd) \ @@ -169,11 +173,13 @@ class LCodeGen; V(StringCompareAndBranch) \ V(StringLength) \ V(SubI) \ + V(RSubI) \ V(TaggedToI) \ V(ThisFunction) \ V(Throw) \ V(ToFastProperties) \ V(TransitionElementsKind) \ + V(TrapAllocationMemento) \ V(Typeof) \ V(TypeofIsAndBranch) \ V(UnaryMathOperation) \ @@ -185,7 +191,8 @@ class LCodeGen; V(LoadFieldByIndex) \ V(DateField) \ V(WrapReceiver) \ - V(Drop) + V(Drop) \ + V(InnerAllocatedObject) #define DECLARE_CONCRETE_INSTRUCTION(type, mnemonic) \ @@ -256,6 +263,11 @@ class LInstruction: public ZoneObject { void MarkAsCall() { is_call_ = true; } // Interface to the register allocator and iterators. + bool ClobbersTemps() const { return is_call_; } + bool ClobbersRegisters() const { return is_call_; } + bool ClobbersDoubleRegisters() const { return is_call_; } + + // Interface to the register allocator and iterators. bool IsMarkedAsCall() const { return is_call_; } virtual bool HasResult() const = 0; @@ -398,6 +410,15 @@ class LLazyBailout: public LTemplateInstruction<0, 0, 0> { }; +class LDummyUse: public LTemplateInstruction<1, 1, 0> { + public: + explicit LDummyUse(LOperand* value) { + inputs_[0] = value; + } + DECLARE_CONCRETE_INSTRUCTION(DummyUse, "dummy-use") +}; + + class LDeoptimize: public LTemplateInstruction<0, 0, 0> { public: DECLARE_CONCRETE_INSTRUCTION(Deoptimize, "deoptimize") @@ -625,6 +646,42 @@ class LMulI: public LTemplateInstruction<1, 2, 1> { }; +// Instruction for computing multiplier * multiplicand + addend. +class LMultiplyAddD: public LTemplateInstruction<1, 3, 0> { + public: + LMultiplyAddD(LOperand* addend, LOperand* multiplier, + LOperand* multiplicand) { + inputs_[0] = addend; + inputs_[1] = multiplier; + inputs_[2] = multiplicand; + } + + LOperand* addend() { return inputs_[0]; } + LOperand* multiplier() { return inputs_[1]; } + LOperand* multiplicand() { return inputs_[2]; } + + DECLARE_CONCRETE_INSTRUCTION(MultiplyAddD, "multiply-add-d") +}; + + +// Instruction for computing minuend - multiplier * multiplicand. +class LMultiplySubD: public LTemplateInstruction<1, 3, 0> { + public: + LMultiplySubD(LOperand* minuend, LOperand* multiplier, + LOperand* multiplicand) { + inputs_[0] = minuend; + inputs_[1] = multiplier; + inputs_[2] = multiplicand; + } + + LOperand* minuend() { return inputs_[0]; } + LOperand* multiplier() { return inputs_[1]; } + LOperand* multiplicand() { return inputs_[2]; } + + DECLARE_CONCRETE_INSTRUCTION(MultiplySubD, "multiply-sub-d") +}; + + class LCmpIDAndBranch: public LControlInstruction<2, 0> { public: LCmpIDAndBranch(LOperand* left, LOperand* right) { @@ -640,7 +697,7 @@ class LCmpIDAndBranch: public LControlInstruction<2, 0> { Token::Value op() const { return hydrogen()->token(); } bool is_double() const { - return hydrogen()->GetInputRepresentation().IsDouble(); + return hydrogen()->representation().IsDouble(); } virtual void PrintDataTo(StringStream* stream); @@ -665,6 +722,30 @@ class LUnaryMathOperation: public LTemplateInstruction<1, 1, 1> { }; +class LMathExp: public LTemplateInstruction<1, 1, 3> { + public: + LMathExp(LOperand* value, + LOperand* double_temp, + LOperand* temp1, + LOperand* temp2) { + inputs_[0] = value; + temps_[0] = temp1; + temps_[1] = temp2; + temps_[2] = double_temp; + ExternalReference::InitializeMathExpData(); + } + + LOperand* value() { return inputs_[0]; } + LOperand* temp1() { return temps_[0]; } + LOperand* temp2() { return temps_[1]; } + LOperand* double_temp() { return temps_[2]; } + + DECLARE_CONCRETE_INSTRUCTION(MathExp, "math-exp") + + virtual void PrintDataTo(StringStream* stream); +}; + + class LCmpObjectEqAndBranch: public LControlInstruction<2, 0> { public: LCmpObjectEqAndBranch(LOperand* left, LOperand* right) { @@ -921,6 +1002,19 @@ class LInstanceOfKnownGlobal: public LTemplateInstruction<1, 1, 1> { }; +class LInstanceSize: public LTemplateInstruction<1, 1, 0> { + public: + explicit LInstanceSize(LOperand* object) { + inputs_[0] = object; + } + + LOperand* object() { return inputs_[0]; } + + DECLARE_CONCRETE_INSTRUCTION(InstanceSize, "instance-size") + DECLARE_HYDROGEN_ACCESSOR(InstanceSize) +}; + + class LBoundsCheck: public LTemplateInstruction<0, 2, 0> { public: LBoundsCheck(LOperand* index, LOperand* length) { @@ -989,6 +1083,21 @@ class LSubI: public LTemplateInstruction<1, 2, 0> { }; +class LRSubI: public LTemplateInstruction<1, 2, 0> { + public: + LRSubI(LOperand* left, LOperand* right) { + inputs_[0] = left; + inputs_[1] = right; + } + + LOperand* left() { return inputs_[0]; } + LOperand* right() { return inputs_[1]; } + + DECLARE_CONCRETE_INSTRUCTION(RSubI, "rsub-i") + DECLARE_HYDROGEN_ACCESSOR(Sub) +}; + + class LConstantI: public LTemplateInstruction<1, 0, 0> { public: DECLARE_CONCRETE_INSTRUCTION(ConstantI, "constant-i") @@ -1142,6 +1251,30 @@ class LDateField: public LTemplateInstruction<1, 1, 1> { }; +class LSeqStringSetChar: public LTemplateInstruction<1, 3, 0> { + public: + LSeqStringSetChar(String::Encoding encoding, + LOperand* string, + LOperand* index, + LOperand* value) : encoding_(encoding) { + inputs_[0] = string; + inputs_[1] = index; + inputs_[2] = value; + } + + String::Encoding encoding() { return encoding_; } + LOperand* string() { return inputs_[0]; } + LOperand* index() { return inputs_[1]; } + LOperand* value() { return inputs_[2]; } + + DECLARE_CONCRETE_INSTRUCTION(SeqStringSetChar, "seq-string-set-char") + DECLARE_HYDROGEN_ACCESSOR(SeqStringSetChar) + + private: + String::Encoding encoding_; +}; + + class LThrow: public LTemplateInstruction<0, 1, 0> { public: explicit LThrow(LOperand* value) { @@ -1266,14 +1399,24 @@ class LArithmeticT: public LTemplateInstruction<1, 2, 0> { }; -class LReturn: public LTemplateInstruction<0, 1, 0> { +class LReturn: public LTemplateInstruction<0, 2, 0> { public: - explicit LReturn(LOperand* value) { + explicit LReturn(LOperand* value, LOperand* parameter_count) { inputs_[0] = value; + inputs_[1] = parameter_count; } LOperand* value() { return inputs_[0]; } + bool has_constant_parameter_count() { + return parameter_count()->IsConstantOperand(); + } + LConstantOperand* constant_parameter_count() { + ASSERT(has_constant_parameter_count()); + return LConstantOperand::cast(parameter_count()); + } + LOperand* parameter_count() { return inputs_[1]; } + DECLARE_CONCRETE_INSTRUCTION(Return, "return") }; @@ -1357,58 +1500,26 @@ class LLoadExternalArrayPointer: public LTemplateInstruction<1, 1, 0> { }; -class LLoadKeyedFastElement: public LTemplateInstruction<1, 2, 0> { +class LLoadKeyed: public LTemplateInstruction<1, 2, 0> { public: - LLoadKeyedFastElement(LOperand* elements, LOperand* key) { + LLoadKeyed(LOperand* elements, LOperand* key) { inputs_[0] = elements; inputs_[1] = key; } LOperand* elements() { return inputs_[0]; } LOperand* key() { return inputs_[1]; } - - DECLARE_CONCRETE_INSTRUCTION(LoadKeyedFastElement, "load-keyed-fast-element") - DECLARE_HYDROGEN_ACCESSOR(LoadKeyedFastElement) - - uint32_t additional_index() const { return hydrogen()->index_offset(); } -}; - - -class LLoadKeyedFastDoubleElement: public LTemplateInstruction<1, 2, 0> { - public: - LLoadKeyedFastDoubleElement(LOperand* elements, LOperand* key) { - inputs_[0] = elements; - inputs_[1] = key; + ElementsKind elements_kind() const { + return hydrogen()->elements_kind(); } - - LOperand* elements() { return inputs_[0]; } - LOperand* key() { return inputs_[1]; } - - DECLARE_CONCRETE_INSTRUCTION(LoadKeyedFastDoubleElement, - "load-keyed-fast-double-element") - DECLARE_HYDROGEN_ACCESSOR(LoadKeyedFastDoubleElement) - - uint32_t additional_index() const { return hydrogen()->index_offset(); } -}; - - -class LLoadKeyedSpecializedArrayElement: public LTemplateInstruction<1, 2, 0> { - public: - LLoadKeyedSpecializedArrayElement(LOperand* external_pointer, LOperand* key) { - inputs_[0] = external_pointer; - inputs_[1] = key; + bool is_external() const { + return hydrogen()->is_external(); } - LOperand* external_pointer() { return inputs_[0]; } - LOperand* key() { return inputs_[1]; } - - DECLARE_CONCRETE_INSTRUCTION(LoadKeyedSpecializedArrayElement, - "load-keyed-specialized-array-element") - DECLARE_HYDROGEN_ACCESSOR(LoadKeyedSpecializedArrayElement) + DECLARE_CONCRETE_INSTRUCTION(LoadKeyed, "load-keyed") + DECLARE_HYDROGEN_ACCESSOR(LoadKeyed) - ElementsKind elements_kind() const { - return hydrogen()->elements_kind(); - } + virtual void PrintDataTo(StringStream* stream); uint32_t additional_index() const { return hydrogen()->index_offset(); } }; @@ -1545,6 +1656,22 @@ class LDrop: public LTemplateInstruction<0, 0, 0> { }; +class LInnerAllocatedObject: public LTemplateInstruction<1, 1, 0> { + public: + explicit LInnerAllocatedObject(LOperand* base_object) { + inputs_[0] = base_object; + } + + LOperand* base_object() { return inputs_[0]; } + int offset() { return hydrogen()->offset(); } + + virtual void PrintDataTo(StringStream* stream); + + DECLARE_CONCRETE_INSTRUCTION(InnerAllocatedObject, "sub-allocated-object") + DECLARE_HYDROGEN_ACCESSOR(InnerAllocatedObject) +}; + + class LThisFunction: public LTemplateInstruction<1, 0, 0> { public: DECLARE_CONCRETE_INSTRUCTION(ThisFunction, "this-function") @@ -1555,6 +1682,7 @@ class LThisFunction: public LTemplateInstruction<1, 0, 0> { class LContext: public LTemplateInstruction<1, 0, 0> { public: DECLARE_CONCRETE_INSTRUCTION(Context, "context") + DECLARE_HYDROGEN_ACCESSOR(Context) }; @@ -1717,6 +1845,23 @@ class LCallNew: public LTemplateInstruction<1, 1, 0> { }; +class LCallNewArray: public LTemplateInstruction<1, 1, 0> { + public: + explicit LCallNewArray(LOperand* constructor) { + inputs_[0] = constructor; + } + + LOperand* constructor() { return inputs_[0]; } + + DECLARE_CONCRETE_INSTRUCTION(CallNewArray, "call-new-array") + DECLARE_HYDROGEN_ACCESSOR(CallNewArray) + + virtual void PrintDataTo(StringStream* stream); + + int arity() const { return hydrogen()->argument_count() - 1; } +}; + + class LCallRuntime: public LTemplateInstruction<1, 0, 0> { public: DECLARE_CONCRETE_INSTRUCTION(CallRuntime, "call-runtime") @@ -1788,6 +1933,7 @@ class LNumberTagD: public LTemplateInstruction<1, 1, 2> { LOperand* temp2() { return temps_[1]; } DECLARE_CONCRETE_INSTRUCTION(NumberTagD, "number-tag-d") + DECLARE_HYDROGEN_ACCESSOR(Change) }; @@ -1922,51 +2068,28 @@ class LStoreNamedGeneric: public LTemplateInstruction<0, 2, 0> { }; -class LStoreKeyedFastElement: public LTemplateInstruction<0, 3, 0> { +class LStoreKeyed: public LTemplateInstruction<0, 3, 0> { public: - LStoreKeyedFastElement(LOperand* object, LOperand* key, LOperand* value) { + LStoreKeyed(LOperand* object, LOperand* key, LOperand* value) { inputs_[0] = object; inputs_[1] = key; inputs_[2] = value; } - LOperand* object() { return inputs_[0]; } - LOperand* key() { return inputs_[1]; } - LOperand* value() { return inputs_[2]; } - - DECLARE_CONCRETE_INSTRUCTION(StoreKeyedFastElement, - "store-keyed-fast-element") - DECLARE_HYDROGEN_ACCESSOR(StoreKeyedFastElement) - - virtual void PrintDataTo(StringStream* stream); - - uint32_t additional_index() const { return hydrogen()->index_offset(); } -}; - - -class LStoreKeyedFastDoubleElement: public LTemplateInstruction<0, 3, 0> { - public: - LStoreKeyedFastDoubleElement(LOperand* elements, - LOperand* key, - LOperand* value) { - inputs_[0] = elements; - inputs_[1] = key; - inputs_[2] = value; - } - + bool is_external() const { return hydrogen()->is_external(); } LOperand* elements() { return inputs_[0]; } LOperand* key() { return inputs_[1]; } LOperand* value() { return inputs_[2]; } + ElementsKind elements_kind() const { + return hydrogen()->elements_kind(); + } - DECLARE_CONCRETE_INSTRUCTION(StoreKeyedFastDoubleElement, - "store-keyed-fast-double-element") - DECLARE_HYDROGEN_ACCESSOR(StoreKeyedFastDoubleElement) + DECLARE_CONCRETE_INSTRUCTION(StoreKeyed, "store-keyed") + DECLARE_HYDROGEN_ACCESSOR(StoreKeyed) virtual void PrintDataTo(StringStream* stream); - - uint32_t additional_index() const { return hydrogen()->index_offset(); } - bool NeedsCanonicalization() { return hydrogen()->NeedsCanonicalization(); } + uint32_t additional_index() const { return hydrogen()->index_offset(); } }; @@ -1990,37 +2113,15 @@ class LStoreKeyedGeneric: public LTemplateInstruction<0, 3, 0> { StrictModeFlag strict_mode_flag() { return hydrogen()->strict_mode_flag(); } }; -class LStoreKeyedSpecializedArrayElement: public LTemplateInstruction<0, 3, 0> { - public: - LStoreKeyedSpecializedArrayElement(LOperand* external_pointer, - LOperand* key, - LOperand* value) { - inputs_[0] = external_pointer; - inputs_[1] = key; - inputs_[2] = value; - } - - LOperand* external_pointer() { return inputs_[0]; } - LOperand* key() { return inputs_[1]; } - LOperand* value() { return inputs_[2]; } - - DECLARE_CONCRETE_INSTRUCTION(StoreKeyedSpecializedArrayElement, - "store-keyed-specialized-array-element") - DECLARE_HYDROGEN_ACCESSOR(StoreKeyedSpecializedArrayElement) - - ElementsKind elements_kind() const { return hydrogen()->elements_kind(); } - uint32_t additional_index() const { return hydrogen()->index_offset(); } -}; - class LTransitionElementsKind: public LTemplateInstruction<1, 1, 2> { public: LTransitionElementsKind(LOperand* object, LOperand* new_map_temp, - LOperand* temp) { + LOperand* fixed_object_temp) { inputs_[0] = object; temps_[0] = new_map_temp; - temps_[1] = temp; + temps_[1] = fixed_object_temp; } LOperand* object() { return inputs_[0]; } @@ -2035,6 +2136,24 @@ class LTransitionElementsKind: public LTemplateInstruction<1, 1, 2> { Handle<Map> original_map() { return hydrogen()->original_map(); } Handle<Map> transitioned_map() { return hydrogen()->transitioned_map(); } + ElementsKind from_kind() { return hydrogen()->from_kind(); } + ElementsKind to_kind() { return hydrogen()->to_kind(); } +}; + + +class LTrapAllocationMemento : public LTemplateInstruction<0, 1, 1> { + public: + LTrapAllocationMemento(LOperand* object, + LOperand* temp) { + inputs_[0] = object; + temps_[0] = temp; + } + + LOperand* object() { return inputs_[0]; } + LOperand* temp() { return temps_[0]; } + + DECLARE_CONCRETE_INSTRUCTION(TrapAllocationMemento, + "trap-allocation-memento") }; @@ -2134,7 +2253,7 @@ class LCheckMaps: public LTemplateInstruction<0, 1, 0> { }; -class LCheckPrototypeMaps: public LTemplateInstruction<0, 0, 2> { +class LCheckPrototypeMaps: public LTemplateInstruction<1, 0, 2> { public: LCheckPrototypeMaps(LOperand* temp, LOperand* temp2) { temps_[0] = temp; @@ -2147,8 +2266,10 @@ class LCheckPrototypeMaps: public LTemplateInstruction<0, 0, 2> { DECLARE_CONCRETE_INSTRUCTION(CheckPrototypeMaps, "check-prototype-maps") DECLARE_HYDROGEN_ACCESSOR(CheckPrototypeMaps) - Handle<JSObject> prototype() const { return hydrogen()->prototype(); } - Handle<JSObject> holder() const { return hydrogen()->holder(); } + ZoneList<Handle<JSObject> >* prototypes() const { + return hydrogen()->prototypes(); + } + ZoneList<Handle<Map> >* maps() const { return hydrogen()->maps(); } }; @@ -2216,7 +2337,7 @@ class LClampTToUint8: public LTemplateInstruction<1, 1, 1> { }; -class LAllocateObject: public LTemplateInstruction<1, 0, 2> { +class LAllocateObject: public LTemplateInstruction<1, 1, 2> { public: LAllocateObject(LOperand* temp, LOperand* temp2) { temps_[0] = temp; @@ -2231,6 +2352,23 @@ class LAllocateObject: public LTemplateInstruction<1, 0, 2> { }; +class LAllocate: public LTemplateInstruction<1, 2, 2> { + public: + LAllocate(LOperand* size, LOperand* temp1, LOperand* temp2) { + inputs_[1] = size; + temps_[0] = temp1; + temps_[1] = temp2; + } + + LOperand* size() { return inputs_[1]; } + LOperand* temp1() { return temps_[0]; } + LOperand* temp2() { return temps_[1]; } + + DECLARE_CONCRETE_INSTRUCTION(Allocate, "allocate") + DECLARE_HYDROGEN_ACCESSOR(Allocate) +}; + + class LFastLiteral: public LTemplateInstruction<1, 0, 0> { public: DECLARE_CONCRETE_INSTRUCTION(FastLiteral, "fast-literal") @@ -2355,8 +2493,9 @@ class LOsrEntry: public LTemplateInstruction<0, 0, 0> { // slot, i.e., that must also be restored to the spill slot on OSR entry. // NULL if the register has no assigned spill slot. Indexed by allocation // index. - LOperand* register_spills_[Register::kNumAllocatableRegisters]; - LOperand* double_register_spills_[DoubleRegister::kNumAllocatableRegisters]; + LOperand* register_spills_[Register::kMaxNumAllocatableRegisters]; + LOperand* double_register_spills_[ + DoubleRegister::kMaxNumAllocatableRegisters]; }; @@ -2478,6 +2617,10 @@ class LChunkBuilder BASE_EMBEDDED { HYDROGEN_CONCRETE_INSTRUCTION_LIST(DECLARE_DO) #undef DECLARE_DO + LInstruction* DoMultiplyAdd(HMul* mul, HValue* addend); + LInstruction* DoMultiplySub(HValue* minuend, HMul* mul); + LInstruction* DoRSub(HSub* instr); + static bool HasMagicNumberForDivisor(int32_t divisor); static HValue* SimplifiedDividendForMathFloorOfDiv(HValue* val); static HValue* SimplifiedDivisorForMathFloorOfDiv(HValue* val); diff --git a/deps/v8/src/arm/lithium-codegen-arm.cc b/deps/v8/src/arm/lithium-codegen-arm.cc index 6f5aa436a8..3ad86cfed3 100644 --- a/deps/v8/src/arm/lithium-codegen-arm.cc +++ b/deps/v8/src/arm/lithium-codegen-arm.cc @@ -65,10 +65,6 @@ bool LCodeGen::GenerateCode() { HPhase phase("Z_Code generation", chunk()); ASSERT(is_unused()); status_ = GENERATING; - CpuFeatures::Scope scope1(VFP3); - CpuFeatures::Scope scope2(ARMv7); - - CodeStub::GenerateFPStubs(); // Open a frame scope to indicate that there is a frame on the stack. The // NONE indicates that the scope shouldn't actually generate code to set up @@ -87,7 +83,14 @@ void LCodeGen::FinishCode(Handle<Code> code) { ASSERT(is_done()); code->set_stack_slots(GetStackSlotCount()); code->set_safepoint_table_offset(safepoints_.GetCodeOffset()); + if (FLAG_weak_embedded_maps_in_optimized_code) { + RegisterDependentCodeForEmbeddedMaps(code); + } PopulateDeoptimizationData(code); + for (int i = 0 ; i < prototype_maps_.length(); i++) { + prototype_maps_.at(i)->AddDependentCode( + DependentCode::kPrototypeCheckGroup, code); + } } @@ -118,55 +121,96 @@ void LCodeGen::Comment(const char* format, ...) { bool LCodeGen::GeneratePrologue() { ASSERT(is_generating()); - ProfileEntryHookStub::MaybeCallEntryHook(masm_); + if (info()->IsOptimizing()) { + ProfileEntryHookStub::MaybeCallEntryHook(masm_); #ifdef DEBUG - if (strlen(FLAG_stop_at) > 0 && - info_->function()->name()->IsEqualTo(CStrVector(FLAG_stop_at))) { - __ stop("stop_at"); - } + if (strlen(FLAG_stop_at) > 0 && + info_->function()->name()->IsUtf8EqualTo(CStrVector(FLAG_stop_at))) { + __ stop("stop_at"); + } #endif - // r1: Callee's JS function. - // cp: Callee's context. - // fp: Caller's frame pointer. - // lr: Caller's pc. + // r1: Callee's JS function. + // cp: Callee's context. + // fp: Caller's frame pointer. + // lr: Caller's pc. - // Strict mode functions and builtins need to replace the receiver - // with undefined when called as functions (without an explicit - // receiver object). r5 is zero for method calls and non-zero for - // function calls. - if (!info_->is_classic_mode() || info_->is_native()) { - Label ok; - __ cmp(r5, Operand(0)); - __ b(eq, &ok); - int receiver_offset = scope()->num_parameters() * kPointerSize; - __ LoadRoot(r2, Heap::kUndefinedValueRootIndex); - __ str(r2, MemOperand(sp, receiver_offset)); - __ bind(&ok); + // Strict mode functions and builtins need to replace the receiver + // with undefined when called as functions (without an explicit + // receiver object). r5 is zero for method calls and non-zero for + // function calls. + if (!info_->is_classic_mode() || info_->is_native()) { + Label ok; + __ cmp(r5, Operand::Zero()); + __ b(eq, &ok); + int receiver_offset = scope()->num_parameters() * kPointerSize; + __ LoadRoot(r2, Heap::kUndefinedValueRootIndex); + __ str(r2, MemOperand(sp, receiver_offset)); + __ bind(&ok); + } } - __ stm(db_w, sp, r1.bit() | cp.bit() | fp.bit() | lr.bit()); - __ add(fp, sp, Operand(2 * kPointerSize)); // Adjust FP to point to saved FP. + info()->set_prologue_offset(masm_->pc_offset()); + if (NeedsEagerFrame()) { + if (info()->IsStub()) { + __ stm(db_w, sp, cp.bit() | fp.bit() | lr.bit()); + __ Push(Smi::FromInt(StackFrame::STUB)); + // Adjust FP to point to saved FP. + __ add(fp, sp, Operand(2 * kPointerSize)); + } else { + PredictableCodeSizeScope predictible_code_size_scope( + masm_, kNoCodeAgeSequenceLength * Assembler::kInstrSize); + // The following three instructions must remain together and unmodified + // for code aging to work properly. + __ stm(db_w, sp, r1.bit() | cp.bit() | fp.bit() | lr.bit()); + // Load undefined value here, so the value is ready for the loop + // below. + __ LoadRoot(ip, Heap::kUndefinedValueRootIndex); + // Adjust FP to point to saved FP. + __ add(fp, sp, Operand(2 * kPointerSize)); + } + frame_is_built_ = true; + } // Reserve space for the stack slots needed by the code. int slots = GetStackSlotCount(); if (slots > 0) { if (FLAG_debug_code) { - __ mov(r0, Operand(slots)); - __ mov(r2, Operand(kSlotsZapValue)); + __ sub(sp, sp, Operand(slots * kPointerSize)); + __ push(r0); + __ push(r1); + __ add(r0, sp, Operand(slots * kPointerSize)); + __ mov(r1, Operand(kSlotsZapValue)); Label loop; __ bind(&loop); - __ push(r2); - __ sub(r0, r0, Operand(1), SetCC); + __ sub(r0, r0, Operand(kPointerSize)); + __ str(r1, MemOperand(r0, 2 * kPointerSize)); + __ cmp(r0, sp); __ b(ne, &loop); + __ pop(r1); + __ pop(r0); } else { __ sub(sp, sp, Operand(slots * kPointerSize)); } } + if (info()->saves_caller_doubles() && CpuFeatures::IsSupported(VFP2)) { + CpuFeatureScope scope(masm(), VFP2); + Comment(";;; Save clobbered callee double registers"); + int count = 0; + BitVector* doubles = chunk()->allocated_double_registers(); + BitVector::Iterator save_iterator(doubles); + while (!save_iterator.Done()) { + __ vstr(DwVfpRegister::FromAllocationIndex(save_iterator.Current()), + MemOperand(sp, count * kDoubleSize)); + save_iterator.Advance(); + count++; + } + } + // Possibly allocate a local context. - int heap_slots = scope()->num_heap_slots() - Context::MIN_CONTEXT_SLOTS; + int heap_slots = info()->num_heap_slots() - Context::MIN_CONTEXT_SLOTS; if (heap_slots > 0) { Comment(";;; Allocate local context"); // Argument to NewContext is the function, which is in r1. @@ -202,7 +246,7 @@ bool LCodeGen::GeneratePrologue() { } // Trace the call. - if (FLAG_trace) { + if (FLAG_trace && info()->IsOptimizing()) { __ CallRuntime(Runtime::kTraceEnter, 0); } return !is_aborted(); @@ -222,7 +266,30 @@ bool LCodeGen::GenerateBody() { } if (emit_instructions) { - Comment(";;; @%d: %s.", current_instruction_, instr->Mnemonic()); + if (FLAG_code_comments) { + HValue* hydrogen = instr->hydrogen_value(); + if (hydrogen != NULL) { + if (hydrogen->IsChange()) { + HValue* changed_value = HChange::cast(hydrogen)->value(); + int use_id = 0; + const char* use_mnemo = "dead"; + if (hydrogen->UseCount() >= 1) { + HValue* use_value = hydrogen->uses().value(); + use_id = use_value->id(); + use_mnemo = use_value->Mnemonic(); + } + Comment(";;; @%d: %s. <of #%d %s for #%d %s>", + current_instruction_, instr->Mnemonic(), + changed_value->id(), changed_value->Mnemonic(), + use_id, use_mnemo); + } else { + Comment(";;; @%d: %s. <#%d>", current_instruction_, + instr->Mnemonic(), hydrogen->id()); + } + } else { + Comment(";;; @%d: %s.", current_instruction_, instr->Mnemonic()); + } + } instr->CompileToNative(this); } } @@ -237,10 +304,31 @@ bool LCodeGen::GenerateDeferredCode() { for (int i = 0; !is_aborted() && i < deferred_.length(); i++) { LDeferredCode* code = deferred_[i]; __ bind(code->entry()); + if (NeedsDeferredFrame()) { + Comment(";;; Deferred build frame", + code->instruction_index(), + code->instr()->Mnemonic()); + ASSERT(!frame_is_built_); + ASSERT(info()->IsStub()); + frame_is_built_ = true; + __ stm(db_w, sp, cp.bit() | fp.bit() | lr.bit()); + __ mov(scratch0(), Operand(Smi::FromInt(StackFrame::STUB))); + __ push(scratch0()); + __ add(fp, sp, Operand(2 * kPointerSize)); + } Comment(";;; Deferred code @%d: %s.", code->instruction_index(), code->instr()->Mnemonic()); code->Generate(); + if (NeedsDeferredFrame()) { + Comment(";;; Deferred destroy frame", + code->instruction_index(), + code->instr()->Mnemonic()); + ASSERT(frame_is_built_); + __ pop(ip); + __ ldm(ia_w, sp, cp.bit() | fp.bit() | lr.bit()); + frame_is_built_ = false; + } __ jmp(code->exit()); } } @@ -262,24 +350,77 @@ bool LCodeGen::GenerateDeoptJumpTable() { // Each entry in the jump table generates one instruction and inlines one // 32bit data after it. if (!is_int24((masm()->pc_offset() / Assembler::kInstrSize) + - deopt_jump_table_.length() * 2)) { + deopt_jump_table_.length() * 7)) { Abort("Generated code is too large"); } - // Block the constant pool emission during the jump table emission. - __ BlockConstPoolFor(deopt_jump_table_.length()); __ RecordComment("[ Deoptimisation jump table"); Label table_start; __ bind(&table_start); + Label needs_frame_not_call; + Label needs_frame_is_call; for (int i = 0; i < deopt_jump_table_.length(); i++) { __ bind(&deopt_jump_table_[i].label); - __ ldr(pc, MemOperand(pc, Assembler::kInstrSize - Assembler::kPcLoadDelta)); - __ dd(reinterpret_cast<uint32_t>(deopt_jump_table_[i].address)); + Address entry = deopt_jump_table_[i].address; + bool is_lazy_deopt = deopt_jump_table_[i].is_lazy_deopt; + Deoptimizer::BailoutType type = + is_lazy_deopt ? Deoptimizer::LAZY : Deoptimizer::EAGER; + int id = Deoptimizer::GetDeoptimizationId(isolate(), entry, type); + if (id == Deoptimizer::kNotDeoptimizationEntry) { + Comment(";;; jump table entry %d.", i); + } else { + Comment(";;; jump table entry %d: deoptimization bailout %d.", i, id); + } + if (deopt_jump_table_[i].needs_frame) { + __ mov(ip, Operand(ExternalReference::ForDeoptEntry(entry))); + if (is_lazy_deopt) { + if (needs_frame_is_call.is_bound()) { + __ b(&needs_frame_is_call); + } else { + __ bind(&needs_frame_is_call); + __ stm(db_w, sp, cp.bit() | fp.bit() | lr.bit()); + // This variant of deopt can only be used with stubs. Since we don't + // have a function pointer to install in the stack frame that we're + // building, install a special marker there instead. + ASSERT(info()->IsStub()); + __ mov(scratch0(), Operand(Smi::FromInt(StackFrame::STUB))); + __ push(scratch0()); + __ add(fp, sp, Operand(2 * kPointerSize)); + __ mov(lr, Operand(pc), LeaveCC, al); + __ mov(pc, ip); + } + } else { + if (needs_frame_not_call.is_bound()) { + __ b(&needs_frame_not_call); + } else { + __ bind(&needs_frame_not_call); + __ stm(db_w, sp, cp.bit() | fp.bit() | lr.bit()); + // This variant of deopt can only be used with stubs. Since we don't + // have a function pointer to install in the stack frame that we're + // building, install a special marker there instead. + ASSERT(info()->IsStub()); + __ mov(scratch0(), Operand(Smi::FromInt(StackFrame::STUB))); + __ push(scratch0()); + __ add(fp, sp, Operand(2 * kPointerSize)); + __ mov(pc, ip); + } + } + } else { + if (is_lazy_deopt) { + __ mov(lr, Operand(pc), LeaveCC, al); + __ mov(pc, Operand(ExternalReference::ForDeoptEntry(entry))); + } else { + __ mov(pc, Operand(ExternalReference::ForDeoptEntry(entry))); + } + } + masm()->CheckConstPool(false, false); } - ASSERT(masm()->InstructionsGeneratedSince(&table_start) == - deopt_jump_table_.length() * 2); __ RecordComment("]"); + // Force constant pool emission at the end of the deopt jump table to make + // sure that no constant pools are emitted after. + masm()->CheckConstPool(true, false); + // The deoptimization jump table is the last part of the instruction // sequence. Mark the generated code as done unless we bailed out. if (!is_aborted()) status_ = DONE; @@ -299,8 +440,8 @@ Register LCodeGen::ToRegister(int index) const { } -DoubleRegister LCodeGen::ToDoubleRegister(int index) const { - return DoubleRegister::FromAllocationIndex(index); +DwVfpRegister LCodeGen::ToDoubleRegister(int index) const { + return DwVfpRegister::FromAllocationIndex(index); } @@ -341,15 +482,15 @@ Register LCodeGen::EmitLoadRegister(LOperand* op, Register scratch) { } -DoubleRegister LCodeGen::ToDoubleRegister(LOperand* op) const { +DwVfpRegister LCodeGen::ToDoubleRegister(LOperand* op) const { ASSERT(op->IsDoubleRegister()); return ToDoubleRegister(op->index()); } -DoubleRegister LCodeGen::EmitLoadDoubleRegister(LOperand* op, - SwVfpRegister flt_scratch, - DoubleRegister dbl_scratch) { +DwVfpRegister LCodeGen::EmitLoadDoubleRegister(LOperand* op, + SwVfpRegister flt_scratch, + DwVfpRegister dbl_scratch) { if (op->IsDoubleRegister()) { return ToDoubleRegister(op->index()); } else if (op->IsConstantOperand()) { @@ -394,8 +535,6 @@ bool LCodeGen::IsInteger32(LConstantOperand* op) const { int LCodeGen::ToInteger32(LConstantOperand* op) const { HConstant* constant = chunk_->LookupConstant(op); - ASSERT(chunk_->LookupLiteralRepresentation(op).IsInteger32()); - ASSERT(constant->HasInteger32Value()); return constant->Integer32Value(); } @@ -424,11 +563,11 @@ Operand LCodeGen::ToOperand(LOperand* op) { return Operand(ToRegister(op)); } else if (op->IsDoubleRegister()) { Abort("ToOperand IsDoubleRegister unimplemented"); - return Operand(0); + return Operand::Zero(); } // Stack slots not implemented, use ToMemOperand instead. UNREACHABLE(); - return Operand(0); + return Operand::Zero(); } @@ -436,37 +575,20 @@ MemOperand LCodeGen::ToMemOperand(LOperand* op) const { ASSERT(!op->IsRegister()); ASSERT(!op->IsDoubleRegister()); ASSERT(op->IsStackSlot() || op->IsDoubleStackSlot()); - int index = op->index(); - if (index >= 0) { - // Local or spill slot. Skip the frame pointer, function, and - // context in the fixed part of the frame. - return MemOperand(fp, -(index + 3) * kPointerSize); - } else { - // Incoming parameter. Skip the return address. - return MemOperand(fp, -(index - 1) * kPointerSize); - } + return MemOperand(fp, StackSlotOffset(op->index())); } MemOperand LCodeGen::ToHighMemOperand(LOperand* op) const { ASSERT(op->IsDoubleStackSlot()); - int index = op->index(); - if (index >= 0) { - // Local or spill slot. Skip the frame pointer, function, context, - // and the first word of the double in the fixed part of the frame. - return MemOperand(fp, -(index + 3) * kPointerSize + kPointerSize); - } else { - // Incoming parameter. Skip the return address and the first word of - // the double. - return MemOperand(fp, -(index - 1) * kPointerSize + kPointerSize); - } + return MemOperand(fp, StackSlotOffset(op->index()) + kPointerSize); } void LCodeGen::WriteTranslation(LEnvironment* environment, Translation* translation, - int* arguments_index, - int* arguments_count) { + int* pushed_arguments_index, + int* pushed_arguments_count) { if (environment == NULL) return; // The translation includes one command per value in the environment. @@ -478,14 +600,16 @@ void LCodeGen::WriteTranslation(LEnvironment* environment, // arguments index points to the first element of a sequence of tagged // values on the stack that represent the arguments. This needs to be // kept in sync with the LArgumentsElements implementation. - *arguments_index = -environment->parameter_count(); - *arguments_count = environment->parameter_count(); + *pushed_arguments_index = -environment->parameter_count(); + *pushed_arguments_count = environment->parameter_count(); WriteTranslation(environment->outer(), translation, - arguments_index, - arguments_count); - int closure_id = *info()->closure() != *environment->closure() + pushed_arguments_index, + pushed_arguments_count); + bool has_closure_id = !info()->closure().is_null() && + *info()->closure() != *environment->closure(); + int closure_id = has_closure_id ? DefineDeoptimizationLiteral(environment->closure()) : Translation::kSelfLiteralId; @@ -506,19 +630,29 @@ void LCodeGen::WriteTranslation(LEnvironment* environment, ASSERT(height == 0); translation->BeginSetterStubFrame(closure_id); break; + case STUB: + translation->BeginCompiledStubFrame(); + break; case ARGUMENTS_ADAPTOR: translation->BeginArgumentsAdaptorFrame(closure_id, translation_size); break; } // Inlined frames which push their arguments cause the index to be - // bumped and a new stack area to be used for materialization. - if (environment->entry() != NULL && - environment->entry()->arguments_pushed()) { - *arguments_index = *arguments_index < 0 - ? GetStackSlotCount() - : *arguments_index + *arguments_count; - *arguments_count = environment->entry()->arguments_count() + 1; + // bumped and another stack area to be used for materialization, + // otherwise actual argument values are unknown for inlined frames. + bool arguments_known = true; + int arguments_index = *pushed_arguments_index; + int arguments_count = *pushed_arguments_count; + if (environment->entry() != NULL) { + arguments_known = environment->entry()->arguments_pushed(); + arguments_index = arguments_index < 0 + ? GetStackSlotCount() : arguments_index + arguments_count; + arguments_count = environment->entry()->arguments_count() + 1; + if (environment->entry()->arguments_pushed()) { + *pushed_arguments_index = arguments_index; + *pushed_arguments_count = arguments_count; + } } for (int i = 0; i < translation_size; ++i) { @@ -533,8 +667,9 @@ void LCodeGen::WriteTranslation(LEnvironment* environment, environment->spilled_registers()[value->index()], environment->HasTaggedValueAt(i), environment->HasUint32ValueAt(i), - *arguments_index, - *arguments_count); + arguments_known, + arguments_index, + arguments_count); } else if ( value->IsDoubleRegister() && environment->spilled_double_registers()[value->index()] != NULL) { @@ -544,8 +679,9 @@ void LCodeGen::WriteTranslation(LEnvironment* environment, environment->spilled_double_registers()[value->index()], false, false, - *arguments_index, - *arguments_count); + arguments_known, + arguments_index, + arguments_count); } } @@ -553,8 +689,9 @@ void LCodeGen::WriteTranslation(LEnvironment* environment, value, environment->HasTaggedValueAt(i), environment->HasUint32ValueAt(i), - *arguments_index, - *arguments_count); + arguments_known, + arguments_index, + arguments_count); } } @@ -563,13 +700,15 @@ void LCodeGen::AddToTranslation(Translation* translation, LOperand* op, bool is_tagged, bool is_uint32, + bool arguments_known, int arguments_index, int arguments_count) { if (op == NULL) { // TODO(twuerthinger): Introduce marker operands to indicate that this value // is not present and must be reconstructed from the deoptimizer. Currently // this is only used for the arguments object. - translation->StoreArgumentsObject(arguments_index, arguments_count); + translation->StoreArgumentsObject( + arguments_known, arguments_index, arguments_count); } else if (op->IsStackSlot()) { if (is_tagged) { translation->StoreStackSlot(op->index()); @@ -701,43 +840,83 @@ void LCodeGen::DeoptimizeIf(Condition cc, LEnvironment* environment) { RegisterEnvironmentForDeoptimization(environment, Safepoint::kNoLazyDeopt); ASSERT(environment->HasBeenRegistered()); int id = environment->deoptimization_index(); - Address entry = Deoptimizer::GetDeoptimizationEntry(id, Deoptimizer::EAGER); + ASSERT(info()->IsOptimizing() || info()->IsStub()); + Deoptimizer::BailoutType bailout_type = info()->IsStub() + ? Deoptimizer::LAZY + : Deoptimizer::EAGER; + Address entry = + Deoptimizer::GetDeoptimizationEntry(isolate(), id, bailout_type); if (entry == NULL) { Abort("bailout was not prepared"); return; } ASSERT(FLAG_deopt_every_n_times < 2); // Other values not supported on ARM. - - if (FLAG_deopt_every_n_times == 1 && - info_->shared_info()->opt_count() == id) { + if (FLAG_deopt_every_n_times == 1 && info_->opt_count() == id) { __ Jump(entry, RelocInfo::RUNTIME_ENTRY); return; } - if (FLAG_trap_on_deopt) __ stop("trap_on_deopt", cc); + if (FLAG_trap_on_deopt) { + __ stop("trap_on_deopt", cc); + } - if (cc == al) { - __ Jump(entry, RelocInfo::RUNTIME_ENTRY); + ASSERT(info()->IsStub() || frame_is_built_); + bool needs_lazy_deopt = info()->IsStub(); + if (cc == al && frame_is_built_) { + if (needs_lazy_deopt) { + __ Call(entry, RelocInfo::RUNTIME_ENTRY); + } else { + __ Jump(entry, RelocInfo::RUNTIME_ENTRY); + } } else { // We often have several deopts to the same entry, reuse the last // jump entry if this is the case. if (deopt_jump_table_.is_empty() || - (deopt_jump_table_.last().address != entry)) { - deopt_jump_table_.Add(JumpTableEntry(entry), zone()); + (deopt_jump_table_.last().address != entry) || + (deopt_jump_table_.last().is_lazy_deopt != needs_lazy_deopt) || + (deopt_jump_table_.last().needs_frame != !frame_is_built_)) { + JumpTableEntry table_entry(entry, !frame_is_built_, needs_lazy_deopt); + deopt_jump_table_.Add(table_entry, zone()); } __ b(cc, &deopt_jump_table_.last().label); } } +void LCodeGen::RegisterDependentCodeForEmbeddedMaps(Handle<Code> code) { + ZoneList<Handle<Map> > maps(1, zone()); + int mode_mask = RelocInfo::ModeMask(RelocInfo::EMBEDDED_OBJECT); + for (RelocIterator it(*code, mode_mask); !it.done(); it.next()) { + RelocInfo::Mode mode = it.rinfo()->rmode(); + if (mode == RelocInfo::EMBEDDED_OBJECT && + it.rinfo()->target_object()->IsMap()) { + Handle<Map> map(Map::cast(it.rinfo()->target_object())); + if (map->CanTransition()) { + maps.Add(map, zone()); + } + } + } +#ifdef VERIFY_HEAP + // This disables verification of weak embedded maps after full GC. + // AddDependentCode can cause a GC, which would observe the state where + // this code is not yet in the depended code lists of the embedded maps. + NoWeakEmbeddedMapsVerificationScope disable_verification_of_embedded_maps; +#endif + for (int i = 0; i < maps.length(); i++) { + maps.at(i)->AddDependentCode(DependentCode::kWeaklyEmbeddedGroup, code); + } +} + + void LCodeGen::PopulateDeoptimizationData(Handle<Code> code) { int length = deoptimizations_.length(); if (length == 0) return; Handle<DeoptimizationInputData> data = factory()->NewDeoptimizationInputData(length, TENURED); - Handle<ByteArray> translations = translations_.CreateByteArray(); + Handle<ByteArray> translations = + translations_.CreateByteArray(isolate()->factory()); data->SetTranslationByteArray(*translations); data->SetInlinedFunctionCount(Smi::FromInt(inlined_function_count_)); @@ -905,39 +1084,39 @@ void LCodeGen::DoCallStub(LCallStub* instr) { switch (instr->hydrogen()->major_key()) { case CodeStub::RegExpConstructResult: { RegExpConstructResultStub stub; - CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr); + CallCode(stub.GetCode(isolate()), RelocInfo::CODE_TARGET, instr); break; } case CodeStub::RegExpExec: { RegExpExecStub stub; - CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr); + CallCode(stub.GetCode(isolate()), RelocInfo::CODE_TARGET, instr); break; } case CodeStub::SubString: { SubStringStub stub; - CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr); + CallCode(stub.GetCode(isolate()), RelocInfo::CODE_TARGET, instr); break; } case CodeStub::NumberToString: { NumberToStringStub stub; - CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr); + CallCode(stub.GetCode(isolate()), RelocInfo::CODE_TARGET, instr); break; } case CodeStub::StringAdd: { StringAddStub stub(NO_STRING_ADD_FLAGS); - CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr); + CallCode(stub.GetCode(isolate()), RelocInfo::CODE_TARGET, instr); break; } case CodeStub::StringCompare: { StringCompareStub stub; - CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr); + CallCode(stub.GetCode(isolate()), RelocInfo::CODE_TARGET, instr); break; } case CodeStub::TranscendentalCache: { __ ldr(r0, MemOperand(sp, 0)); TranscendentalCacheStub stub(instr->transcendental_type(), TranscendentalCacheStub::TAGGED); - CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr); + CallCode(stub.GetCode(isolate()), RelocInfo::CODE_TARGET, instr); break; } default: @@ -962,14 +1141,14 @@ void LCodeGen::DoModI(LModI* instr) { if (divisor < 0) divisor = -divisor; Label positive_dividend, done; - __ cmp(dividend, Operand(0)); + __ cmp(dividend, Operand::Zero()); __ b(pl, &positive_dividend); - __ rsb(result, dividend, Operand(0)); + __ rsb(result, dividend, Operand::Zero()); __ and_(result, result, Operand(divisor - 1), SetCC); if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) { DeoptimizeIf(eq, instr->environment()); } - __ rsb(result, result, Operand(0)); + __ rsb(result, result, Operand::Zero()); __ b(&done); __ bind(&positive_dividend); __ and_(result, dividend, Operand(divisor - 1)); @@ -984,11 +1163,21 @@ void LCodeGen::DoModI(LModI* instr) { Label done; if (CpuFeatures::IsSupported(SUDIV)) { - CpuFeatures::Scope scope(SUDIV); + CpuFeatureScope scope(masm(), SUDIV); // Check for x % 0. if (instr->hydrogen()->CheckFlag(HValue::kCanBeDivByZero)) { - __ cmp(right, Operand(0)); + __ cmp(right, Operand::Zero()); + DeoptimizeIf(eq, instr->environment()); + } + + // Check for (kMinInt % -1). + if (instr->hydrogen()->CheckFlag(HValue::kCanOverflow)) { + Label left_not_min_int; + __ cmp(left, Operand(kMinInt)); + __ b(ne, &left_not_min_int); + __ cmp(right, Operand(-1)); DeoptimizeIf(eq, instr->environment()); + __ bind(&left_not_min_int); } // For r3 = r1 % r2; we can have the following ARM code @@ -997,11 +1186,11 @@ void LCodeGen::DoModI(LModI* instr) { __ sdiv(result, left, right); __ mls(result, result, right, left); - __ cmp(result, Operand(0)); + __ cmp(result, Operand::Zero()); __ b(ne, &done); if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) { - __ cmp(left, Operand(0)); + __ cmp(left, Operand::Zero()); DeoptimizeIf(lt, instr->environment()); } } else { @@ -1020,16 +1209,18 @@ void LCodeGen::DoModI(LModI* instr) { Label vfp_modulo, both_positive, right_negative; + CpuFeatureScope scope(masm(), VFP2); + // Check for x % 0. if (instr->hydrogen()->CheckFlag(HValue::kCanBeDivByZero)) { - __ cmp(right, Operand(0)); + __ cmp(right, Operand::Zero()); DeoptimizeIf(eq, instr->environment()); } __ Move(result, left); // (0 % x) must yield 0 (if x is finite, which is the case here). - __ cmp(left, Operand(0)); + __ cmp(left, Operand::Zero()); __ b(eq, &done); // Preload right in a vfp register. __ vmov(divisor.low(), right); @@ -1049,7 +1240,7 @@ void LCodeGen::DoModI(LModI* instr) { __ bind(&right_negative); // Negate right. The sign of the divisor does not matter. - __ rsb(right, right, Operand(0)); + __ rsb(right, right, Operand::Zero()); __ bind(&both_positive); const int kUnfolds = 3; @@ -1100,7 +1291,7 @@ void LCodeGen::DoModI(LModI* instr) { // Check for -0. __ sub(scratch2, left, scratch, SetCC); __ b(ne, &ok); - __ cmp(left, Operand(0)); + __ cmp(left, Operand::Zero()); DeoptimizeIf(mi, instr->environment()); __ bind(&ok); // Load the result and we are done. @@ -1135,11 +1326,11 @@ void LCodeGen::EmitSignedIntegerDivisionByConstant( if (divisor > 0) { __ Move(result, dividend); } else { - __ rsb(result, dividend, Operand(0), SetCC); + __ rsb(result, dividend, Operand::Zero(), SetCC); DeoptimizeIf(vs, environment); } // Compute the remainder. - __ mov(remainder, Operand(0)); + __ mov(remainder, Operand::Zero()); return; default: @@ -1157,7 +1348,7 @@ void LCodeGen::EmitSignedIntegerDivisionByConstant( // handled separately. if (divisor < 0) { ASSERT(divisor != -1); - __ rsb(result, result, Operand(0)); + __ rsb(result, result, Operand::Zero()); } // Compute the remainder. if (divisor > 0) { @@ -1193,7 +1384,7 @@ void LCodeGen::EmitSignedIntegerDivisionByConstant( __ mov(scratch, Operand(scratch, ASR, s)); } __ add(result, scratch, Operand(dividend, LSR, 31)); - if (divisor < 0) __ rsb(result, result, Operand(0)); + if (divisor < 0) __ rsb(result, result, Operand::Zero()); // Compute the remainder. __ mov(ip, Operand(divisor)); // This sequence could be replaced with 'mls' when @@ -1221,6 +1412,42 @@ void LCodeGen::DoDivI(LDivI* instr) { LDivI* instr_; }; + if (instr->hydrogen()->HasPowerOf2Divisor()) { + Register dividend = ToRegister(instr->left()); + int32_t divisor = + HConstant::cast(instr->hydrogen()->right())->Integer32Value(); + int32_t test_value = 0; + int32_t power = 0; + + if (divisor > 0) { + test_value = divisor - 1; + power = WhichPowerOf2(divisor); + } else { + // Check for (0 / -x) that will produce negative zero. + if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) { + __ tst(dividend, Operand(dividend)); + DeoptimizeIf(eq, instr->environment()); + } + // Check for (kMinInt / -1). + if (divisor == -1 && instr->hydrogen()->CheckFlag(HValue::kCanOverflow)) { + __ cmp(dividend, Operand(kMinInt)); + DeoptimizeIf(eq, instr->environment()); + } + test_value = - divisor - 1; + power = WhichPowerOf2(-divisor); + } + + if (test_value != 0) { + // Deoptimize if remainder is not 0. + __ tst(dividend, Operand(test_value)); + DeoptimizeIf(ne, instr->environment()); + __ mov(dividend, Operand(dividend, ASR, power)); + } + if (divisor < 0) __ rsb(dividend, dividend, Operand(0)); + + return; + } + const Register left = ToRegister(instr->left()); const Register right = ToRegister(instr->right()); const Register scratch = scratch0(); @@ -1228,21 +1455,21 @@ void LCodeGen::DoDivI(LDivI* instr) { // Check for x / 0. if (instr->hydrogen()->CheckFlag(HValue::kCanBeDivByZero)) { - __ cmp(right, Operand(0)); + __ cmp(right, Operand::Zero()); DeoptimizeIf(eq, instr->environment()); } // Check for (0 / -x) that will produce negative zero. if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) { Label left_not_zero; - __ cmp(left, Operand(0)); + __ cmp(left, Operand::Zero()); __ b(ne, &left_not_zero); - __ cmp(right, Operand(0)); + __ cmp(right, Operand::Zero()); DeoptimizeIf(mi, instr->environment()); __ bind(&left_not_zero); } - // Check for (-kMinInt / -1). + // Check for (kMinInt / -1). if (instr->hydrogen()->CheckFlag(HValue::kCanOverflow)) { Label left_not_min_int; __ cmp(left, Operand(kMinInt)); @@ -1289,31 +1516,98 @@ void LCodeGen::DoDivI(LDivI* instr) { } +void LCodeGen::DoMultiplyAddD(LMultiplyAddD* instr) { + DwVfpRegister addend = ToDoubleRegister(instr->addend()); + DwVfpRegister multiplier = ToDoubleRegister(instr->multiplier()); + DwVfpRegister multiplicand = ToDoubleRegister(instr->multiplicand()); + + // This is computed in-place. + ASSERT(addend.is(ToDoubleRegister(instr->result()))); + + __ vmla(addend, multiplier, multiplicand); +} + + +void LCodeGen::DoMultiplySubD(LMultiplySubD* instr) { + DwVfpRegister minuend = ToDoubleRegister(instr->minuend()); + DwVfpRegister multiplier = ToDoubleRegister(instr->multiplier()); + DwVfpRegister multiplicand = ToDoubleRegister(instr->multiplicand()); + + // This is computed in-place. + ASSERT(minuend.is(ToDoubleRegister(instr->result()))); + + __ vmls(minuend, multiplier, multiplicand); +} + + void LCodeGen::DoMathFloorOfDiv(LMathFloorOfDiv* instr) { const Register result = ToRegister(instr->result()); const Register left = ToRegister(instr->left()); const Register remainder = ToRegister(instr->temp()); const Register scratch = scratch0(); - // We only optimize this for division by constants, because the standard - // integer division routine is usually slower than transitionning to VFP. - // This could be optimized on processors with SDIV available. - ASSERT(instr->right()->IsConstantOperand()); - int32_t divisor = ToInteger32(LConstantOperand::cast(instr->right())); - if (divisor < 0) { - __ cmp(left, Operand(0)); + if (!CpuFeatures::IsSupported(SUDIV)) { + // If the CPU doesn't support sdiv instruction, we only optimize when we + // have magic numbers for the divisor. The standard integer division routine + // is usually slower than transitionning to VFP. + ASSERT(instr->right()->IsConstantOperand()); + int32_t divisor = ToInteger32(LConstantOperand::cast(instr->right())); + ASSERT(LChunkBuilder::HasMagicNumberForDivisor(divisor)); + if (divisor < 0) { + __ cmp(left, Operand::Zero()); + DeoptimizeIf(eq, instr->environment()); + } + EmitSignedIntegerDivisionByConstant(result, + left, + divisor, + remainder, + scratch, + instr->environment()); + // We performed a truncating division. Correct the result if necessary. + __ cmp(remainder, Operand::Zero()); + __ teq(remainder, Operand(divisor), ne); + __ sub(result, result, Operand(1), LeaveCC, mi); + } else { + CpuFeatureScope scope(masm(), SUDIV); + const Register right = ToRegister(instr->right()); + + // Check for x / 0. + __ cmp(right, Operand::Zero()); DeoptimizeIf(eq, instr->environment()); + + // Check for (kMinInt / -1). + if (instr->hydrogen()->CheckFlag(HValue::kCanOverflow)) { + Label left_not_min_int; + __ cmp(left, Operand(kMinInt)); + __ b(ne, &left_not_min_int); + __ cmp(right, Operand(-1)); + DeoptimizeIf(eq, instr->environment()); + __ bind(&left_not_min_int); + } + + // Check for (0 / -x) that will produce negative zero. + if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) { + __ cmp(right, Operand::Zero()); + __ cmp(left, Operand::Zero(), mi); + // "right" can't be null because the code would have already been + // deoptimized. The Z flag is set only if (right < 0) and (left == 0). + // In this case we need to deoptimize to produce a -0. + DeoptimizeIf(eq, instr->environment()); + } + + Label done; + __ sdiv(result, left, right); + // If both operands have the same sign then we are done. + __ eor(remainder, left, Operand(right), SetCC); + __ b(pl, &done); + + // Check if the result needs to be corrected. + __ mls(remainder, result, right, left); + __ cmp(remainder, Operand::Zero()); + __ sub(result, result, Operand(1), LeaveCC, ne); + + __ bind(&done); } - EmitSignedIntegerDivisionByConstant(result, - left, - divisor, - remainder, - scratch, - instr->environment()); - // We operated a truncating division. Correct the result if necessary. - __ cmp(remainder, Operand(0)); - __ teq(remainder, Operand(divisor), ne); - __ sub(result, result, Operand(1), LeaveCC, mi); } @@ -1321,6 +1615,7 @@ void LCodeGen::DoDeferredBinaryOpStub(LPointerMap* pointer_map, LOperand* left_argument, LOperand* right_argument, Token::Value op) { + CpuFeatureScope vfp_scope(masm(), VFP2); Register left = ToRegister(left_argument); Register right = ToRegister(right_argument); @@ -1367,22 +1662,22 @@ void LCodeGen::DoMulI(LMulI* instr) { if (bailout_on_minus_zero && (constant < 0)) { // The case of a null constant will be handled separately. // If constant is negative and left is null, the result should be -0. - __ cmp(left, Operand(0)); + __ cmp(left, Operand::Zero()); DeoptimizeIf(eq, instr->environment()); } switch (constant) { case -1: - __ rsb(result, left, Operand(0)); + __ rsb(result, left, Operand::Zero()); break; case 0: if (bailout_on_minus_zero) { // If left is strictly negative and the constant is null, the // result is -0. Deoptimize if required, otherwise return 0. - __ cmp(left, Operand(0)); + __ cmp(left, Operand::Zero()); DeoptimizeIf(mi, instr->environment()); } - __ mov(result, Operand(0)); + __ mov(result, Operand::Zero()); break; case 1: __ Move(result, left); @@ -1409,7 +1704,7 @@ void LCodeGen::DoMulI(LMulI* instr) { } // Correct the sign of the result is the constant is negative. - if (constant < 0) __ rsb(result, result, Operand(0)); + if (constant < 0) __ rsb(result, result, Operand::Zero()); } else { // Generate standard code. @@ -1436,9 +1731,9 @@ void LCodeGen::DoMulI(LMulI* instr) { if (bailout_on_minus_zero) { // Bail out if the result is supposed to be negative zero. Label done; - __ cmp(result, Operand(0)); + __ cmp(result, Operand::Zero()); __ b(ne, &done); - __ cmp(ToRegister(instr->temp()), Operand(0)); + __ cmp(ToRegister(instr->temp()), Operand::Zero()); DeoptimizeIf(mi, instr->environment()); __ bind(&done); } @@ -1489,6 +1784,9 @@ void LCodeGen::DoShiftI(LShiftI* instr) { // Mask the right_op operand. __ and_(scratch, ToRegister(right_op), Operand(0x1F)); switch (instr->op()) { + case Token::ROR: + __ mov(result, Operand(left, ROR, scratch)); + break; case Token::SAR: __ mov(result, Operand(left, ASR, scratch)); break; @@ -1512,6 +1810,13 @@ void LCodeGen::DoShiftI(LShiftI* instr) { int value = ToInteger32(LConstantOperand::cast(right_op)); uint8_t shift_count = static_cast<uint8_t>(value & 0x1F); switch (instr->op()) { + case Token::ROR: + if (shift_count != 0) { + __ mov(result, Operand(left, ROR, shift_count)); + } else { + __ Move(result, left); + } + break; case Token::SAR: if (shift_count != 0) { __ mov(result, Operand(left, ASR, shift_count)); @@ -1566,6 +1871,27 @@ void LCodeGen::DoSubI(LSubI* instr) { } +void LCodeGen::DoRSubI(LRSubI* instr) { + LOperand* left = instr->left(); + LOperand* right = instr->right(); + LOperand* result = instr->result(); + bool can_overflow = instr->hydrogen()->CheckFlag(HValue::kCanOverflow); + SBit set_cond = can_overflow ? SetCC : LeaveCC; + + if (right->IsStackSlot() || right->IsArgument()) { + Register right_reg = EmitLoadRegister(right, ip); + __ rsb(ToRegister(result), ToRegister(left), Operand(right_reg), set_cond); + } else { + ASSERT(right->IsRegister() || right->IsConstantOperand()); + __ rsb(ToRegister(result), ToRegister(left), ToOperand(right), set_cond); + } + + if (can_overflow) { + DeoptimizeIf(vs, instr->environment()); + } +} + + void LCodeGen::DoConstantI(LConstantI* instr) { ASSERT(instr->result()->IsRegister()); __ mov(ToRegister(instr->result()), Operand(instr->value())); @@ -1575,6 +1901,7 @@ void LCodeGen::DoConstantI(LConstantI* instr) { void LCodeGen::DoConstantD(LConstantD* instr) { ASSERT(instr->result()->IsDoubleRegister()); DwVfpRegister result = ToDoubleRegister(instr->result()); + CpuFeatureScope scope(masm(), VFP2); double v = instr->value(); __ Vmov(result, v, scratch0()); } @@ -1686,6 +2013,15 @@ void LCodeGen::DoDateField(LDateField* instr) { } +void LCodeGen::DoSeqStringSetChar(LSeqStringSetChar* instr) { + SeqStringSetCharGenerator::Generate(masm(), + instr->encoding(), + ToRegister(instr->string()), + ToRegister(instr->index()), + ToRegister(instr->value())); +} + + void LCodeGen::DoBitNotI(LBitNotI* instr) { Register input = ToRegister(instr->value()); Register result = ToRegister(instr->result()); @@ -1743,9 +2079,10 @@ void LCodeGen::DoMathMinMax(LMathMinMax* instr) { __ mov(result_reg, right_op, LeaveCC, NegateCondition(condition)); } else { ASSERT(instr->hydrogen()->representation().IsDouble()); - DoubleRegister left_reg = ToDoubleRegister(left); - DoubleRegister right_reg = ToDoubleRegister(right); - DoubleRegister result_reg = ToDoubleRegister(instr->result()); + CpuFeatureScope scope(masm(), VFP2); + DwVfpRegister left_reg = ToDoubleRegister(left); + DwVfpRegister right_reg = ToDoubleRegister(right); + DwVfpRegister result_reg = ToDoubleRegister(instr->result()); Label check_nan_left, check_zero, return_left, return_right, done; __ VFPCompareAndSetFlags(left_reg, right_reg); __ b(vs, &check_nan_left); @@ -1788,9 +2125,10 @@ void LCodeGen::DoMathMinMax(LMathMinMax* instr) { void LCodeGen::DoArithmeticD(LArithmeticD* instr) { - DoubleRegister left = ToDoubleRegister(instr->left()); - DoubleRegister right = ToDoubleRegister(instr->right()); - DoubleRegister result = ToDoubleRegister(instr->result()); + CpuFeatureScope scope(masm(), VFP2); + DwVfpRegister left = ToDoubleRegister(instr->left()); + DwVfpRegister right = ToDoubleRegister(instr->right()); + DwVfpRegister result = ToDoubleRegister(instr->result()); switch (instr->op()) { case Token::ADD: __ vadd(result, left, right); @@ -1836,7 +2174,7 @@ void LCodeGen::DoArithmeticT(LArithmeticT* instr) { // Block literal pool emission to ensure nop indicating no inlined smi code // is in the correct position. Assembler::BlockConstPoolScope block_const_pool(masm()); - CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr); + CallCode(stub.GetCode(isolate()), RelocInfo::CODE_TARGET, instr); __ nop(); // Signals no inlined code. } @@ -1875,10 +2213,11 @@ void LCodeGen::DoBranch(LBranch* instr) { Representation r = instr->hydrogen()->value()->representation(); if (r.IsInteger32()) { Register reg = ToRegister(instr->value()); - __ cmp(reg, Operand(0)); + __ cmp(reg, Operand::Zero()); EmitBranch(true_block, false_block, ne); } else if (r.IsDouble()) { - DoubleRegister reg = ToDoubleRegister(instr->value()); + CpuFeatureScope scope(masm(), VFP2); + DwVfpRegister reg = ToDoubleRegister(instr->value()); Register scratch = scratch0(); // Test the double value. Zero and NaN are false. @@ -1893,7 +2232,7 @@ void LCodeGen::DoBranch(LBranch* instr) { __ CompareRoot(reg, Heap::kTrueValueRootIndex); EmitBranch(true_block, false_block, eq); } else if (type.IsSmi()) { - __ cmp(reg, Operand(0)); + __ cmp(reg, Operand::Zero()); EmitBranch(true_block, false_block, ne); } else { Label* true_label = chunk_->GetAssemblyLabel(true_block); @@ -1923,7 +2262,7 @@ void LCodeGen::DoBranch(LBranch* instr) { if (expected.Contains(ToBooleanStub::SMI)) { // Smis: 0 -> false, all other -> true. - __ cmp(reg, Operand(0)); + __ cmp(reg, Operand::Zero()); __ b(eq, false_label); __ JumpIfSmi(reg, true_label); } else if (expected.NeedsMap()) { @@ -1956,15 +2295,16 @@ void LCodeGen::DoBranch(LBranch* instr) { __ CompareInstanceType(map, ip, FIRST_NONSTRING_TYPE); __ b(ge, ¬_string); __ ldr(ip, FieldMemOperand(reg, String::kLengthOffset)); - __ cmp(ip, Operand(0)); + __ cmp(ip, Operand::Zero()); __ b(ne, true_label); __ b(false_label); __ bind(¬_string); } if (expected.Contains(ToBooleanStub::HEAP_NUMBER)) { + CpuFeatureScope scope(masm(), VFP2); // heap number -> false iff +0, -0, or NaN. - DoubleRegister dbl_scratch = double_scratch0(); + DwVfpRegister dbl_scratch = double_scratch0(); Label not_heap_number; __ CompareRoot(map, Heap::kHeapNumberMapRootIndex); __ b(ne, ¬_heap_number); @@ -2042,6 +2382,7 @@ void LCodeGen::DoCmpIDAndBranch(LCmpIDAndBranch* instr) { EmitGoto(next_block); } else { if (instr->is_double()) { + CpuFeatureScope scope(masm(), VFP2); // Compare left and right operands as doubles and load the // resulting flags into the normal status register. __ VFPCompareAndSetFlags(ToDoubleRegister(left), ToDoubleRegister(right)); @@ -2247,9 +2588,10 @@ void LCodeGen::DoStringCompareAndBranch(LStringCompareAndBranch* instr) { int true_block = chunk_->LookupDestination(instr->true_block_id()); int false_block = chunk_->LookupDestination(instr->false_block_id()); - Handle<Code> ic = CompareIC::GetUninitialized(op); + Handle<Code> ic = CompareIC::GetUninitialized(isolate(), op); CallCode(ic, RelocInfo::CODE_TARGET, instr); - __ cmp(r0, Operand(0)); // This instruction also signals no smi code inlined. + // This instruction also signals no smi code inlined. + __ cmp(r0, Operand::Zero()); Condition condition = ComputeCompareCondition(op); @@ -2333,7 +2675,7 @@ void LCodeGen::EmitClassOfTest(Label* is_true, __ JumpIfSmi(input, is_false); - if (class_name->IsEqualTo(CStrVector("Function"))) { + if (class_name->IsOneByteEqualTo(STATIC_ASCII_VECTOR("Function"))) { // Assuming the following assertions, we can use the same compares to test // for both being a function type and being in the object type range. STATIC_ASSERT(NUM_OF_CALLABLE_SPEC_OBJECT_TYPES == 2); @@ -2364,7 +2706,7 @@ void LCodeGen::EmitClassOfTest(Label* is_true, // Objects with a non-function constructor have class 'Object'. __ CompareObjectType(temp, temp2, temp2, JS_FUNCTION_TYPE); - if (class_name->IsEqualTo(CStrVector("Object"))) { + if (class_name->IsOneByteEqualTo(STATIC_ASCII_VECTOR("Object"))) { __ b(ne, is_true); } else { __ b(ne, is_false); @@ -2375,12 +2717,12 @@ void LCodeGen::EmitClassOfTest(Label* is_true, __ ldr(temp, FieldMemOperand(temp, JSFunction::kSharedFunctionInfoOffset)); __ ldr(temp, FieldMemOperand(temp, SharedFunctionInfo::kInstanceClassNameOffset)); - // The class name we are testing against is a symbol because it's a literal. - // The name in the constructor is a symbol because of the way the context is - // booted. This routine isn't expected to work for random API-created + // The class name we are testing against is internalized since it's a literal. + // The name in the constructor is internalized because of the way the context + // is booted. This routine isn't expected to work for random API-created // classes and it doesn't have to because you can't access it with natives - // syntax. Since both sides are symbols it is sufficient to use an identity - // comparison. + // syntax. Since both sides are internalized it is sufficient to use an + // identity comparison. __ cmp(temp, Operand(class_name)); // End with the answer in flags. } @@ -2421,9 +2763,9 @@ void LCodeGen::DoInstanceOf(LInstanceOf* instr) { ASSERT(ToRegister(instr->right()).is(r1)); // Function is in r1. InstanceofStub stub(InstanceofStub::kArgsInRegisters); - CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr); + CallCode(stub.GetCode(isolate()), RelocInfo::CODE_TARGET, instr); - __ cmp(r0, Operand(0)); + __ cmp(r0, Operand::Zero()); __ mov(r0, Operand(factory()->false_value()), LeaveCC, ne); __ mov(r0, Operand(factory()->true_value()), LeaveCC, eq); } @@ -2473,7 +2815,7 @@ void LCodeGen::DoInstanceOfKnownGlobal(LInstanceOfKnownGlobal* instr) { // We use Factory::the_hole_value() on purpose instead of loading from the // root array to force relocation to be able to later patch with // the cached map. - PredictableCodeSizeScope predictable(masm_); + PredictableCodeSizeScope predictable(masm_, 5 * Assembler::kInstrSize); Handle<JSGlobalPropertyCell> cell = factory()->NewJSGlobalPropertyCell(factory()->the_hole_value()); __ mov(ip, Operand(Handle<Object>(cell))); @@ -2537,7 +2879,7 @@ void LCodeGen::DoDeferredInstanceOfKnownGlobal(LInstanceOfKnownGlobal* instr, static const int kAdditionalDelta = 5; // Make sure that code size is predicable, since we use specific constants // offsets in the code to find embedded values.. - PredictableCodeSizeScope predictable(masm_); + PredictableCodeSizeScope predictable(masm_, 6 * Assembler::kInstrSize); int delta = masm_->InstructionsGeneratedSince(map_check) + kAdditionalDelta; Label before_push_delta; __ bind(&before_push_delta); @@ -2550,7 +2892,7 @@ void LCodeGen::DoDeferredInstanceOfKnownGlobal(LInstanceOfKnownGlobal* instr, __ nop(); } __ StoreToSafepointRegisterSlot(temp, temp); - CallCodeGeneric(stub.GetCode(), + CallCodeGeneric(stub.GetCode(isolate()), RelocInfo::CODE_TARGET, instr, RECORD_SAFEPOINT_WITH_REGISTERS_AND_NO_ARGUMENTS); @@ -2562,12 +2904,21 @@ void LCodeGen::DoDeferredInstanceOfKnownGlobal(LInstanceOfKnownGlobal* instr, } +void LCodeGen::DoInstanceSize(LInstanceSize* instr) { + Register object = ToRegister(instr->object()); + Register result = ToRegister(instr->result()); + __ ldr(result, FieldMemOperand(object, HeapObject::kMapOffset)); + __ ldrb(result, FieldMemOperand(result, Map::kInstanceSizeOffset)); +} + + void LCodeGen::DoCmpT(LCmpT* instr) { Token::Value op = instr->op(); - Handle<Code> ic = CompareIC::GetUninitialized(op); + Handle<Code> ic = CompareIC::GetUninitialized(isolate(), op); CallCode(ic, RelocInfo::CODE_TARGET, instr); - __ cmp(r0, Operand(0)); // This instruction also signals no smi code inlined. + // This instruction also signals no smi code inlined. + __ cmp(r0, Operand::Zero()); Condition condition = ComputeCompareCondition(op); __ LoadRoot(ToRegister(instr->result()), @@ -2580,16 +2931,41 @@ void LCodeGen::DoCmpT(LCmpT* instr) { void LCodeGen::DoReturn(LReturn* instr) { - if (FLAG_trace) { + if (FLAG_trace && info()->IsOptimizing()) { // Push the return value on the stack as the parameter. // Runtime::TraceExit returns its parameter in r0. __ push(r0); __ CallRuntime(Runtime::kTraceExit, 1); } - int32_t sp_delta = (GetParameterCount() + 1) * kPointerSize; - __ mov(sp, fp); - __ ldm(ia_w, sp, fp.bit() | lr.bit()); - __ add(sp, sp, Operand(sp_delta)); + if (info()->saves_caller_doubles() && CpuFeatures::IsSupported(VFP2)) { + CpuFeatureScope scope(masm(), VFP2); + ASSERT(NeedsEagerFrame()); + BitVector* doubles = chunk()->allocated_double_registers(); + BitVector::Iterator save_iterator(doubles); + int count = 0; + while (!save_iterator.Done()) { + __ vldr(DwVfpRegister::FromAllocationIndex(save_iterator.Current()), + MemOperand(sp, count * kDoubleSize)); + save_iterator.Advance(); + count++; + } + } + if (NeedsEagerFrame()) { + __ mov(sp, fp); + __ ldm(ia_w, sp, fp.bit() | lr.bit()); + + if (instr->has_constant_parameter_count()) { + int parameter_count = ToInteger32(instr->constant_parameter_count()); + int32_t sp_delta = (parameter_count + 1) * kPointerSize; + if (sp_delta != 0) { + __ add(sp, sp, Operand(sp_delta)); + } + } else { + Register reg = ToRegister(instr->parameter_count()); + __ add(reg, reg, Operand(1)); + __ add(sp, sp, Operand(reg, LSL, kPointerSizeLog2)); + } + } __ Jump(lr); } @@ -2918,7 +3294,179 @@ void LCodeGen::DoAccessArgumentsAt(LAccessArgumentsAt* instr) { } -void LCodeGen::DoLoadKeyedFastElement(LLoadKeyedFastElement* instr) { +void LCodeGen::DoLoadKeyedExternalArray(LLoadKeyed* instr) { + Register external_pointer = ToRegister(instr->elements()); + Register key = no_reg; + ElementsKind elements_kind = instr->elements_kind(); + bool key_is_constant = instr->key()->IsConstantOperand(); + int constant_key = 0; + if (key_is_constant) { + constant_key = ToInteger32(LConstantOperand::cast(instr->key())); + if (constant_key & 0xF0000000) { + Abort("array index constant value too big."); + } + } else { + key = ToRegister(instr->key()); + } + int element_size_shift = ElementsKindToShiftSize(elements_kind); + int shift_size = (instr->hydrogen()->key()->representation().IsTagged()) + ? (element_size_shift - kSmiTagSize) : element_size_shift; + int additional_offset = instr->additional_index() << element_size_shift; + + if (elements_kind == EXTERNAL_FLOAT_ELEMENTS || + elements_kind == EXTERNAL_DOUBLE_ELEMENTS) { + DwVfpRegister result = ToDoubleRegister(instr->result()); + Operand operand = key_is_constant + ? Operand(constant_key << element_size_shift) + : Operand(key, LSL, shift_size); + __ add(scratch0(), external_pointer, operand); + if (CpuFeatures::IsSupported(VFP2)) { + CpuFeatureScope scope(masm(), VFP2); + if (elements_kind == EXTERNAL_FLOAT_ELEMENTS) { + __ vldr(kScratchDoubleReg.low(), scratch0(), additional_offset); + __ vcvt_f64_f32(result, kScratchDoubleReg.low()); + } else { // i.e. elements_kind == EXTERNAL_DOUBLE_ELEMENTS + __ vldr(result, scratch0(), additional_offset); + } + } else { + if (elements_kind == EXTERNAL_FLOAT_ELEMENTS) { + Register value = external_pointer; + __ ldr(value, MemOperand(scratch0(), additional_offset)); + __ and_(sfpd_lo, value, Operand(kBinary32MantissaMask)); + + __ mov(scratch0(), Operand(value, LSR, kBinary32MantissaBits)); + __ and_(scratch0(), scratch0(), + Operand(kBinary32ExponentMask >> kBinary32MantissaBits)); + + Label exponent_rebiased; + __ teq(scratch0(), Operand(0x00)); + __ b(eq, &exponent_rebiased); + + __ teq(scratch0(), Operand(0xff)); + __ mov(scratch0(), Operand(0x7ff), LeaveCC, eq); + __ b(eq, &exponent_rebiased); + + // Rebias exponent. + __ add(scratch0(), + scratch0(), + Operand(-kBinary32ExponentBias + HeapNumber::kExponentBias)); + + __ bind(&exponent_rebiased); + __ and_(sfpd_hi, value, Operand(kBinary32SignMask)); + __ orr(sfpd_hi, sfpd_hi, + Operand(scratch0(), LSL, HeapNumber::kMantissaBitsInTopWord)); + + // Shift mantissa. + static const int kMantissaShiftForHiWord = + kBinary32MantissaBits - HeapNumber::kMantissaBitsInTopWord; + + static const int kMantissaShiftForLoWord = + kBitsPerInt - kMantissaShiftForHiWord; + + __ orr(sfpd_hi, sfpd_hi, + Operand(sfpd_lo, LSR, kMantissaShiftForHiWord)); + __ mov(sfpd_lo, Operand(sfpd_lo, LSL, kMantissaShiftForLoWord)); + + } else { + __ ldr(sfpd_lo, MemOperand(scratch0(), additional_offset)); + __ ldr(sfpd_hi, MemOperand(scratch0(), + additional_offset + kPointerSize)); + } + } + } else { + Register result = ToRegister(instr->result()); + MemOperand mem_operand = PrepareKeyedOperand( + key, external_pointer, key_is_constant, constant_key, + element_size_shift, shift_size, + instr->additional_index(), additional_offset); + switch (elements_kind) { + case EXTERNAL_BYTE_ELEMENTS: + __ ldrsb(result, mem_operand); + break; + case EXTERNAL_PIXEL_ELEMENTS: + case EXTERNAL_UNSIGNED_BYTE_ELEMENTS: + __ ldrb(result, mem_operand); + break; + case EXTERNAL_SHORT_ELEMENTS: + __ ldrsh(result, mem_operand); + break; + case EXTERNAL_UNSIGNED_SHORT_ELEMENTS: + __ ldrh(result, mem_operand); + break; + case EXTERNAL_INT_ELEMENTS: + __ ldr(result, mem_operand); + break; + case EXTERNAL_UNSIGNED_INT_ELEMENTS: + __ ldr(result, mem_operand); + if (!instr->hydrogen()->CheckFlag(HInstruction::kUint32)) { + __ cmp(result, Operand(0x80000000)); + DeoptimizeIf(cs, instr->environment()); + } + break; + case EXTERNAL_FLOAT_ELEMENTS: + case EXTERNAL_DOUBLE_ELEMENTS: + case FAST_HOLEY_DOUBLE_ELEMENTS: + case FAST_HOLEY_ELEMENTS: + case FAST_HOLEY_SMI_ELEMENTS: + case FAST_DOUBLE_ELEMENTS: + case FAST_ELEMENTS: + case FAST_SMI_ELEMENTS: + case DICTIONARY_ELEMENTS: + case NON_STRICT_ARGUMENTS_ELEMENTS: + UNREACHABLE(); + break; + } + } +} + + +void LCodeGen::DoLoadKeyedFixedDoubleArray(LLoadKeyed* instr) { + Register elements = ToRegister(instr->elements()); + bool key_is_constant = instr->key()->IsConstantOperand(); + Register key = no_reg; + DwVfpRegister result = ToDoubleRegister(instr->result()); + Register scratch = scratch0(); + + int element_size_shift = ElementsKindToShiftSize(FAST_DOUBLE_ELEMENTS); + int shift_size = (instr->hydrogen()->key()->representation().IsTagged()) + ? (element_size_shift - kSmiTagSize) : element_size_shift; + int constant_key = 0; + if (key_is_constant) { + constant_key = ToInteger32(LConstantOperand::cast(instr->key())); + if (constant_key & 0xF0000000) { + Abort("array index constant value too big."); + } + } else { + key = ToRegister(instr->key()); + } + + int base_offset = (FixedDoubleArray::kHeaderSize - kHeapObjectTag) + + ((constant_key + instr->additional_index()) << element_size_shift); + if (!key_is_constant) { + __ add(elements, elements, Operand(key, LSL, shift_size)); + } + if (CpuFeatures::IsSupported(VFP2)) { + CpuFeatureScope scope(masm(), VFP2); + __ add(elements, elements, Operand(base_offset)); + __ vldr(result, elements, 0); + if (instr->hydrogen()->RequiresHoleCheck()) { + __ ldr(scratch, MemOperand(elements, sizeof(kHoleNanLower32))); + __ cmp(scratch, Operand(kHoleNanUpper32)); + DeoptimizeIf(eq, instr->environment()); + } + } else { + __ ldr(sfpd_hi, MemOperand(elements, base_offset + kPointerSize)); + __ ldr(sfpd_lo, MemOperand(elements, base_offset)); + if (instr->hydrogen()->RequiresHoleCheck()) { + ASSERT(kPointerSize == sizeof(kHoleNanLower32)); + __ cmp(sfpd_hi, Operand(kHoleNanUpper32)); + DeoptimizeIf(eq, instr->environment()); + } + } +} + + +void LCodeGen::DoLoadKeyedFixedArray(LLoadKeyed* instr) { Register elements = ToRegister(instr->elements()); Register result = ToRegister(instr->result()); Register scratch = scratch0(); @@ -2932,7 +3480,7 @@ void LCodeGen::DoLoadKeyedFastElement(LLoadKeyedFastElement* instr) { store_base = elements; } else { Register key = EmitLoadRegister(instr->key(), scratch0()); - // Even though the HLoadKeyedFastElement instruction forces the input + // Even though the HLoadKeyed instruction forces the input // representation for the key to be an integer, the input gets replaced // during bound check elimination with the index argument to the bounds // check, which can be tagged, so that case must be handled here, too. @@ -2960,46 +3508,14 @@ void LCodeGen::DoLoadKeyedFastElement(LLoadKeyedFastElement* instr) { } -void LCodeGen::DoLoadKeyedFastDoubleElement( - LLoadKeyedFastDoubleElement* instr) { - Register elements = ToRegister(instr->elements()); - bool key_is_constant = instr->key()->IsConstantOperand(); - Register key = no_reg; - DwVfpRegister result = ToDoubleRegister(instr->result()); - Register scratch = scratch0(); - - int element_size_shift = ElementsKindToShiftSize(FAST_DOUBLE_ELEMENTS); - int shift_size = (instr->hydrogen()->key()->representation().IsTagged()) - ? (element_size_shift - kSmiTagSize) : element_size_shift; - int constant_key = 0; - if (key_is_constant) { - constant_key = ToInteger32(LConstantOperand::cast(instr->key())); - if (constant_key & 0xF0000000) { - Abort("array index constant value too big."); - } +void LCodeGen::DoLoadKeyed(LLoadKeyed* instr) { + if (instr->is_external()) { + DoLoadKeyedExternalArray(instr); + } else if (instr->hydrogen()->representation().IsDouble()) { + DoLoadKeyedFixedDoubleArray(instr); } else { - key = ToRegister(instr->key()); + DoLoadKeyedFixedArray(instr); } - - Operand operand = key_is_constant - ? Operand(((constant_key + instr->additional_index()) << - element_size_shift) + - FixedDoubleArray::kHeaderSize - kHeapObjectTag) - : Operand(key, LSL, shift_size); - __ add(elements, elements, operand); - if (!key_is_constant) { - __ add(elements, elements, - Operand((FixedDoubleArray::kHeaderSize - kHeapObjectTag) + - (instr->additional_index() << element_size_shift))); - } - - if (instr->hydrogen()->RequiresHoleCheck()) { - __ ldr(scratch, MemOperand(elements, sizeof(kHoleNanLower32))); - __ cmp(scratch, Operand(kHoleNanUpper32)); - DeoptimizeIf(eq, instr->environment()); - } - - __ vldr(result, elements, 0); } @@ -3039,87 +3555,6 @@ MemOperand LCodeGen::PrepareKeyedOperand(Register key, } -void LCodeGen::DoLoadKeyedSpecializedArrayElement( - LLoadKeyedSpecializedArrayElement* instr) { - Register external_pointer = ToRegister(instr->external_pointer()); - Register key = no_reg; - ElementsKind elements_kind = instr->elements_kind(); - bool key_is_constant = instr->key()->IsConstantOperand(); - int constant_key = 0; - if (key_is_constant) { - constant_key = ToInteger32(LConstantOperand::cast(instr->key())); - if (constant_key & 0xF0000000) { - Abort("array index constant value too big."); - } - } else { - key = ToRegister(instr->key()); - } - int element_size_shift = ElementsKindToShiftSize(elements_kind); - int shift_size = (instr->hydrogen()->key()->representation().IsTagged()) - ? (element_size_shift - kSmiTagSize) : element_size_shift; - int additional_offset = instr->additional_index() << element_size_shift; - - if (elements_kind == EXTERNAL_FLOAT_ELEMENTS || - elements_kind == EXTERNAL_DOUBLE_ELEMENTS) { - CpuFeatures::Scope scope(VFP3); - DwVfpRegister result = ToDoubleRegister(instr->result()); - Operand operand = key_is_constant - ? Operand(constant_key << element_size_shift) - : Operand(key, LSL, shift_size); - __ add(scratch0(), external_pointer, operand); - if (elements_kind == EXTERNAL_FLOAT_ELEMENTS) { - __ vldr(result.low(), scratch0(), additional_offset); - __ vcvt_f64_f32(result, result.low()); - } else { // i.e. elements_kind == EXTERNAL_DOUBLE_ELEMENTS - __ vldr(result, scratch0(), additional_offset); - } - } else { - Register result = ToRegister(instr->result()); - MemOperand mem_operand = PrepareKeyedOperand( - key, external_pointer, key_is_constant, constant_key, - element_size_shift, shift_size, - instr->additional_index(), additional_offset); - switch (elements_kind) { - case EXTERNAL_BYTE_ELEMENTS: - __ ldrsb(result, mem_operand); - break; - case EXTERNAL_PIXEL_ELEMENTS: - case EXTERNAL_UNSIGNED_BYTE_ELEMENTS: - __ ldrb(result, mem_operand); - break; - case EXTERNAL_SHORT_ELEMENTS: - __ ldrsh(result, mem_operand); - break; - case EXTERNAL_UNSIGNED_SHORT_ELEMENTS: - __ ldrh(result, mem_operand); - break; - case EXTERNAL_INT_ELEMENTS: - __ ldr(result, mem_operand); - break; - case EXTERNAL_UNSIGNED_INT_ELEMENTS: - __ ldr(result, mem_operand); - if (!instr->hydrogen()->CheckFlag(HInstruction::kUint32)) { - __ cmp(result, Operand(0x80000000)); - DeoptimizeIf(cs, instr->environment()); - } - break; - case EXTERNAL_FLOAT_ELEMENTS: - case EXTERNAL_DOUBLE_ELEMENTS: - case FAST_HOLEY_DOUBLE_ELEMENTS: - case FAST_HOLEY_ELEMENTS: - case FAST_HOLEY_SMI_ELEMENTS: - case FAST_DOUBLE_ELEMENTS: - case FAST_ELEMENTS: - case FAST_SMI_ELEMENTS: - case DICTIONARY_ELEMENTS: - case NON_STRICT_ARGUMENTS_ELEMENTS: - UNREACHABLE(); - break; - } - } -} - - void LCodeGen::DoLoadKeyedGeneric(LLoadKeyedGeneric* instr) { ASSERT(ToRegister(instr->object()).is(r1)); ASSERT(ToRegister(instr->key()).is(r0)); @@ -3246,7 +3681,7 @@ void LCodeGen::DoApplyArguments(LApplyArguments* instr) { // stack. Label invoke, loop; // length is a small non-negative integer, due to the test above. - __ cmp(length, Operand(0)); + __ cmp(length, Operand::Zero()); __ b(eq, &invoke); __ bind(&loop); __ ldr(scratch, MemOperand(elements, length, LSL, 2)); @@ -3292,8 +3727,14 @@ void LCodeGen::DoThisFunction(LThisFunction* instr) { void LCodeGen::DoContext(LContext* instr) { + // If there is a non-return use, the context must be moved to a register. Register result = ToRegister(instr->result()); - __ mov(result, cp); + for (HUseIterator it(instr->hydrogen()->uses()); !it.Done(); it.Advance()) { + if (!it.value()->IsReturn()) { + __ mov(result, cp); + return; + } + } } @@ -3450,18 +3891,19 @@ void LCodeGen::DoDeferredMathAbsTaggedHeapNumber(LUnaryMathOperation* instr) { void LCodeGen::EmitIntegerMathAbs(LUnaryMathOperation* instr) { Register input = ToRegister(instr->value()); Register result = ToRegister(instr->result()); - __ cmp(input, Operand(0)); + __ cmp(input, Operand::Zero()); __ Move(result, input, pl); // We can make rsb conditional because the previous cmp instruction // will clear the V (overflow) flag and rsb won't set this flag // if input is positive. - __ rsb(result, input, Operand(0), SetCC, mi); + __ rsb(result, input, Operand::Zero(), SetCC, mi); // Deoptimize on overflow. DeoptimizeIf(vs, instr->environment()); } void LCodeGen::DoMathAbs(LUnaryMathOperation* instr) { + CpuFeatureScope scope(masm(), VFP2); // Class for deferred case. class DeferredMathAbsTaggedHeapNumber: public LDeferredCode { public: @@ -3498,106 +3940,81 @@ void LCodeGen::DoMathAbs(LUnaryMathOperation* instr) { void LCodeGen::DoMathFloor(LUnaryMathOperation* instr) { - DoubleRegister input = ToDoubleRegister(instr->value()); + CpuFeatureScope scope(masm(), VFP2); + DwVfpRegister input = ToDoubleRegister(instr->value()); Register result = ToRegister(instr->result()); - Register scratch = scratch0(); + Register input_high = scratch0(); + Label done, exact; - __ EmitVFPTruncate(kRoundToMinusInf, - result, - input, - scratch, - double_scratch0()); - DeoptimizeIf(ne, instr->environment()); + __ vmov(input_high, input.high()); + __ TryInt32Floor(result, input, input_high, double_scratch0(), &done, &exact); + DeoptimizeIf(al, instr->environment()); + __ bind(&exact); if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) { // Test for -0. - Label done; - __ cmp(result, Operand(0)); + __ cmp(result, Operand::Zero()); __ b(ne, &done); - __ vmov(scratch, input.high()); - __ tst(scratch, Operand(HeapNumber::kSignMask)); - DeoptimizeIf(ne, instr->environment()); - __ bind(&done); + __ cmp(input_high, Operand::Zero()); + DeoptimizeIf(mi, instr->environment()); } + __ bind(&done); } void LCodeGen::DoMathRound(LUnaryMathOperation* instr) { - DoubleRegister input = ToDoubleRegister(instr->value()); + CpuFeatureScope scope(masm(), VFP2); + DwVfpRegister input = ToDoubleRegister(instr->value()); Register result = ToRegister(instr->result()); DwVfpRegister double_scratch1 = ToDoubleRegister(instr->temp()); - Register scratch = scratch0(); - Label done, check_sign_on_zero; - - // Extract exponent bits. - __ vmov(result, input.high()); - __ ubfx(scratch, - result, - HeapNumber::kExponentShift, - HeapNumber::kExponentBits); - - // If the number is in ]-0.5, +0.5[, the result is +/- 0. - __ cmp(scratch, Operand(HeapNumber::kExponentBias - 2)); - __ mov(result, Operand(0), LeaveCC, le); + DwVfpRegister input_plus_dot_five = double_scratch1; + Register input_high = scratch0(); + DwVfpRegister dot_five = double_scratch0(); + Label convert, done; + + __ Vmov(dot_five, 0.5, scratch0()); + __ vabs(double_scratch1, input); + __ VFPCompareAndSetFlags(double_scratch1, dot_five); + // If input is in [-0.5, -0], the result is -0. + // If input is in [+0, +0.5[, the result is +0. + // If the input is +0.5, the result is 1. + __ b(hi, &convert); // Out of [-0.5, +0.5]. if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) { - __ b(le, &check_sign_on_zero); - } else { - __ b(le, &done); - } - - // The following conversion will not work with numbers - // outside of ]-2^32, 2^32[. - __ cmp(scratch, Operand(HeapNumber::kExponentBias + 32)); - DeoptimizeIf(ge, instr->environment()); - - __ Vmov(double_scratch0(), 0.5, scratch); - __ vadd(double_scratch0(), input, double_scratch0()); - - // Save the original sign for later comparison. - __ and_(scratch, result, Operand(HeapNumber::kSignMask)); - - // Check sign of the result: if the sign changed, the input - // value was in ]0.5, 0[ and the result should be -0. - __ vmov(result, double_scratch0().high()); - __ eor(result, result, Operand(scratch), SetCC); - if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) { - DeoptimizeIf(mi, instr->environment()); - } else { - __ mov(result, Operand(0), LeaveCC, mi); - __ b(mi, &done); - } - - __ EmitVFPTruncate(kRoundToMinusInf, - result, - double_scratch0(), - scratch, - double_scratch1); - DeoptimizeIf(ne, instr->environment()); + __ vmov(input_high, input.high()); + __ cmp(input_high, Operand::Zero()); + DeoptimizeIf(mi, instr->environment()); // [-0.5, -0]. + } + __ VFPCompareAndSetFlags(input, dot_five); + __ mov(result, Operand(1), LeaveCC, eq); // +0.5. + // Remaining cases: [+0, +0.5[ or [-0.5, +0.5[, depending on + // flag kBailoutOnMinusZero. + __ mov(result, Operand::Zero(), LeaveCC, ne); + __ b(&done); - if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) { - // Test for -0. - __ cmp(result, Operand(0)); - __ b(ne, &done); - __ bind(&check_sign_on_zero); - __ vmov(scratch, input.high()); - __ tst(scratch, Operand(HeapNumber::kSignMask)); - DeoptimizeIf(ne, instr->environment()); - } + __ bind(&convert); + __ vadd(input_plus_dot_five, input, dot_five); + __ vmov(input_high, input_plus_dot_five.high()); + // Reuse dot_five (double_scratch0) as we no longer need this value. + __ TryInt32Floor(result, input_plus_dot_five, input_high, double_scratch0(), + &done, &done); + DeoptimizeIf(al, instr->environment()); __ bind(&done); } void LCodeGen::DoMathSqrt(LUnaryMathOperation* instr) { - DoubleRegister input = ToDoubleRegister(instr->value()); - DoubleRegister result = ToDoubleRegister(instr->result()); + CpuFeatureScope scope(masm(), VFP2); + DwVfpRegister input = ToDoubleRegister(instr->value()); + DwVfpRegister result = ToDoubleRegister(instr->result()); __ vsqrt(result, input); } void LCodeGen::DoMathPowHalf(LUnaryMathOperation* instr) { - DoubleRegister input = ToDoubleRegister(instr->value()); - DoubleRegister result = ToDoubleRegister(instr->result()); - DoubleRegister temp = ToDoubleRegister(instr->temp()); + CpuFeatureScope scope(masm(), VFP2); + DwVfpRegister input = ToDoubleRegister(instr->value()); + DwVfpRegister result = ToDoubleRegister(instr->result()); + DwVfpRegister temp = ToDoubleRegister(instr->temp()); // Note that according to ECMA-262 15.8.2.13: // Math.pow(-Infinity, 0.5) == Infinity @@ -3616,6 +4033,7 @@ void LCodeGen::DoMathPowHalf(LUnaryMathOperation* instr) { void LCodeGen::DoPower(LPower* instr) { + CpuFeatureScope scope(masm(), VFP2); Representation exponent_type = instr->hydrogen()->right()->representation(); // Having marked this as a call, we can use any registers. // Just make sure that the input/output registers are the expected ones. @@ -3648,6 +4066,7 @@ void LCodeGen::DoPower(LPower* instr) { void LCodeGen::DoRandom(LRandom* instr) { + CpuFeatureScope scope(masm(), VFP2); class DeferredDoRandom: public LDeferredCode { public: DeferredDoRandom(LCodeGen* codegen, LRandom* instr) @@ -3676,7 +4095,7 @@ void LCodeGen::DoRandom(LRandom* instr) { // Load state[0]. __ ldr(r1, FieldMemOperand(r2, ByteArray::kHeaderSize)); - __ cmp(r1, Operand(0)); + __ cmp(r1, Operand::Zero()); __ b(eq, deferred->entry()); // Load state[1]. __ ldr(r0, FieldMemOperand(r2, ByteArray::kHeaderSize + kSeedSize)); @@ -3711,7 +4130,7 @@ void LCodeGen::DoRandom(LRandom* instr) { // Move 0x41300000xxxxxxxx (x = random bits) to VFP. __ vmov(d7, r0, r1); // Move 0x4130000000000000 to VFP. - __ mov(r0, Operand(0, RelocInfo::NONE)); + __ mov(r0, Operand::Zero()); __ vmov(d8, r0, r1); // Subtract and store the result in the heap number. __ vsub(d7, d7, d8); @@ -3725,11 +4144,26 @@ void LCodeGen::DoDeferredRandom(LRandom* instr) { } +void LCodeGen::DoMathExp(LMathExp* instr) { + CpuFeatureScope scope(masm(), VFP2); + DwVfpRegister input = ToDoubleRegister(instr->value()); + DwVfpRegister result = ToDoubleRegister(instr->result()); + DwVfpRegister double_scratch1 = ToDoubleRegister(instr->double_temp()); + DwVfpRegister double_scratch2 = double_scratch0(); + Register temp1 = ToRegister(instr->temp1()); + Register temp2 = ToRegister(instr->temp2()); + + MathExpGenerator::EmitMathExp( + masm(), input, result, double_scratch1, double_scratch2, + temp1, temp2, scratch0()); +} + + void LCodeGen::DoMathLog(LUnaryMathOperation* instr) { ASSERT(ToDoubleRegister(instr->result()).is(d2)); TranscendentalCacheStub stub(TranscendentalCache::LOG, TranscendentalCacheStub::UNTAGGED); - CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr); + CallCode(stub.GetCode(isolate()), RelocInfo::CODE_TARGET, instr); } @@ -3737,7 +4171,7 @@ void LCodeGen::DoMathTan(LUnaryMathOperation* instr) { ASSERT(ToDoubleRegister(instr->result()).is(d2)); TranscendentalCacheStub stub(TranscendentalCache::TAN, TranscendentalCacheStub::UNTAGGED); - CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr); + CallCode(stub.GetCode(isolate()), RelocInfo::CODE_TARGET, instr); } @@ -3745,7 +4179,7 @@ void LCodeGen::DoMathCos(LUnaryMathOperation* instr) { ASSERT(ToDoubleRegister(instr->result()).is(d2)); TranscendentalCacheStub stub(TranscendentalCache::COS, TranscendentalCacheStub::UNTAGGED); - CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr); + CallCode(stub.GetCode(isolate()), RelocInfo::CODE_TARGET, instr); } @@ -3753,7 +4187,7 @@ void LCodeGen::DoMathSin(LUnaryMathOperation* instr) { ASSERT(ToDoubleRegister(instr->result()).is(d2)); TranscendentalCacheStub stub(TranscendentalCache::SIN, TranscendentalCacheStub::UNTAGGED); - CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr); + CallCode(stub.GetCode(isolate()), RelocInfo::CODE_TARGET, instr); } @@ -3845,7 +4279,7 @@ void LCodeGen::DoCallFunction(LCallFunction* instr) { int arity = instr->arity(); CallFunctionStub stub(arity, NO_CALL_FUNCTION_FLAGS); - CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr); + CallCode(stub.GetCode(isolate()), RelocInfo::CODE_TARGET, instr); __ ldr(cp, MemOperand(fp, StandardFrameConstants::kContextOffset)); } @@ -3877,9 +4311,29 @@ void LCodeGen::DoCallNew(LCallNew* instr) { ASSERT(ToRegister(instr->constructor()).is(r1)); ASSERT(ToRegister(instr->result()).is(r0)); + __ mov(r0, Operand(instr->arity())); + if (FLAG_optimize_constructed_arrays) { + // No cell in r2 for construct type feedback in optimized code + Handle<Object> undefined_value(isolate()->heap()->undefined_value(), + isolate()); + __ mov(r2, Operand(undefined_value)); + } CallConstructStub stub(NO_CALL_FUNCTION_FLAGS); + CallCode(stub.GetCode(isolate()), RelocInfo::CONSTRUCT_CALL, instr); +} + + +void LCodeGen::DoCallNewArray(LCallNewArray* instr) { + ASSERT(ToRegister(instr->constructor()).is(r1)); + ASSERT(ToRegister(instr->result()).is(r0)); + ASSERT(FLAG_optimize_constructed_arrays); + __ mov(r0, Operand(instr->arity())); - CallCode(stub.GetCode(), RelocInfo::CONSTRUCT_CALL, instr); + __ mov(r2, Operand(instr->hydrogen()->property_cell())); + Handle<Code> array_construct_code = + isolate()->builtins()->ArrayConstructCode(); + + CallCode(array_construct_code, RelocInfo::CONSTRUCT_CALL, instr); } @@ -3888,6 +4342,13 @@ void LCodeGen::DoCallRuntime(LCallRuntime* instr) { } +void LCodeGen::DoInnerAllocatedObject(LInnerAllocatedObject* instr) { + Register result = ToRegister(instr->result()); + Register base = ToRegister(instr->base_object()); + __ add(result, base, Operand(instr->offset())); +} + + void LCodeGen::DoStoreNamedField(LStoreNamedField* instr) { Register object = ToRegister(instr->object()); Register value = ToRegister(instr->value()); @@ -3962,28 +4423,9 @@ void LCodeGen::DoStoreNamedGeneric(LStoreNamedGeneric* instr) { } -void LCodeGen::DeoptIfTaggedButNotSmi(LEnvironment* environment, - HValue* value, - LOperand* operand) { - if (value->representation().IsTagged() && !value->type().IsSmi()) { - if (operand->IsRegister()) { - __ tst(ToRegister(operand), Operand(kSmiTagMask)); - } else { - __ mov(ip, ToOperand(operand)); - __ tst(ip, Operand(kSmiTagMask)); - } - DeoptimizeIf(ne, environment); - } -} - - void LCodeGen::DoBoundsCheck(LBoundsCheck* instr) { - DeoptIfTaggedButNotSmi(instr->environment(), - instr->hydrogen()->length(), - instr->length()); - DeoptIfTaggedButNotSmi(instr->environment(), - instr->hydrogen()->index(), - instr->index()); + if (instr->hydrogen()->skip_check()) return; + if (instr->index()->IsConstantOperand()) { int constant_index = ToInteger32(LConstantOperand::cast(instr->index())); @@ -4000,102 +4442,9 @@ void LCodeGen::DoBoundsCheck(LBoundsCheck* instr) { } -void LCodeGen::DoStoreKeyedFastElement(LStoreKeyedFastElement* instr) { - Register value = ToRegister(instr->value()); - Register elements = ToRegister(instr->object()); - Register key = instr->key()->IsRegister() ? ToRegister(instr->key()) : no_reg; - Register scratch = scratch0(); - Register store_base = scratch; - int offset = 0; - - // Do the store. - if (instr->key()->IsConstantOperand()) { - ASSERT(!instr->hydrogen()->NeedsWriteBarrier()); - LConstantOperand* const_operand = LConstantOperand::cast(instr->key()); - offset = FixedArray::OffsetOfElementAt(ToInteger32(const_operand) + - instr->additional_index()); - store_base = elements; - } else { - // Even though the HLoadKeyedFastElement instruction forces the input - // representation for the key to be an integer, the input gets replaced - // during bound check elimination with the index argument to the bounds - // check, which can be tagged, so that case must be handled here, too. - if (instr->hydrogen()->key()->representation().IsTagged()) { - __ add(scratch, elements, - Operand(key, LSL, kPointerSizeLog2 - kSmiTagSize)); - } else { - __ add(scratch, elements, Operand(key, LSL, kPointerSizeLog2)); - } - offset = FixedArray::OffsetOfElementAt(instr->additional_index()); - } - __ str(value, FieldMemOperand(store_base, offset)); - - if (instr->hydrogen()->NeedsWriteBarrier()) { - HType type = instr->hydrogen()->value()->type(); - SmiCheck check_needed = - type.IsHeapObject() ? OMIT_SMI_CHECK : INLINE_SMI_CHECK; - // Compute address of modified element and store it into key register. - __ add(key, store_base, Operand(offset - kHeapObjectTag)); - __ RecordWrite(elements, - key, - value, - kLRHasBeenSaved, - kSaveFPRegs, - EMIT_REMEMBERED_SET, - check_needed); - } -} - - -void LCodeGen::DoStoreKeyedFastDoubleElement( - LStoreKeyedFastDoubleElement* instr) { - DwVfpRegister value = ToDoubleRegister(instr->value()); - Register elements = ToRegister(instr->elements()); - Register key = no_reg; - Register scratch = scratch0(); - bool key_is_constant = instr->key()->IsConstantOperand(); - int constant_key = 0; - - // Calculate the effective address of the slot in the array to store the - // double value. - if (key_is_constant) { - constant_key = ToInteger32(LConstantOperand::cast(instr->key())); - if (constant_key & 0xF0000000) { - Abort("array index constant value too big."); - } - } else { - key = ToRegister(instr->key()); - } - int element_size_shift = ElementsKindToShiftSize(FAST_DOUBLE_ELEMENTS); - int shift_size = (instr->hydrogen()->key()->representation().IsTagged()) - ? (element_size_shift - kSmiTagSize) : element_size_shift; - Operand operand = key_is_constant - ? Operand((constant_key << element_size_shift) + - FixedDoubleArray::kHeaderSize - kHeapObjectTag) - : Operand(key, LSL, shift_size); - __ add(scratch, elements, operand); - if (!key_is_constant) { - __ add(scratch, scratch, - Operand(FixedDoubleArray::kHeaderSize - kHeapObjectTag)); - } - - if (instr->NeedsCanonicalization()) { - // Check for NaN. All NaNs must be canonicalized. - __ VFPCompareAndSetFlags(value, value); - // Only load canonical NaN if the comparison above set the overflow. - __ Vmov(value, - FixedDoubleArray::canonical_not_the_hole_nan_as_double(), - no_reg, vs); - } - - __ vstr(value, scratch, instr->additional_index() << element_size_shift); -} - - -void LCodeGen::DoStoreKeyedSpecializedArrayElement( - LStoreKeyedSpecializedArrayElement* instr) { - - Register external_pointer = ToRegister(instr->external_pointer()); +void LCodeGen::DoStoreKeyedExternalArray(LStoreKeyed* instr) { + CpuFeatureScope scope(masm(), VFP2); + Register external_pointer = ToRegister(instr->elements()); Register key = no_reg; ElementsKind elements_kind = instr->elements_kind(); bool key_is_constant = instr->key()->IsConstantOperand(); @@ -4115,7 +4464,7 @@ void LCodeGen::DoStoreKeyedSpecializedArrayElement( if (elements_kind == EXTERNAL_FLOAT_ELEMENTS || elements_kind == EXTERNAL_DOUBLE_ELEMENTS) { - CpuFeatures::Scope scope(VFP3); + CpuFeatureScope scope(masm(), VFP3); DwVfpRegister value(ToDoubleRegister(instr->value())); Operand operand(key_is_constant ? Operand(constant_key << element_size_shift) @@ -4164,6 +4513,115 @@ void LCodeGen::DoStoreKeyedSpecializedArrayElement( } +void LCodeGen::DoStoreKeyedFixedDoubleArray(LStoreKeyed* instr) { + CpuFeatureScope scope(masm(), VFP2); + DwVfpRegister value = ToDoubleRegister(instr->value()); + Register elements = ToRegister(instr->elements()); + Register key = no_reg; + Register scratch = scratch0(); + bool key_is_constant = instr->key()->IsConstantOperand(); + int constant_key = 0; + + // Calculate the effective address of the slot in the array to store the + // double value. + if (key_is_constant) { + constant_key = ToInteger32(LConstantOperand::cast(instr->key())); + if (constant_key & 0xF0000000) { + Abort("array index constant value too big."); + } + } else { + key = ToRegister(instr->key()); + } + int element_size_shift = ElementsKindToShiftSize(FAST_DOUBLE_ELEMENTS); + int shift_size = (instr->hydrogen()->key()->representation().IsTagged()) + ? (element_size_shift - kSmiTagSize) : element_size_shift; + Operand operand = key_is_constant + ? Operand((constant_key << element_size_shift) + + FixedDoubleArray::kHeaderSize - kHeapObjectTag) + : Operand(key, LSL, shift_size); + __ add(scratch, elements, operand); + if (!key_is_constant) { + __ add(scratch, scratch, + Operand(FixedDoubleArray::kHeaderSize - kHeapObjectTag)); + } + + if (instr->NeedsCanonicalization()) { + // Check for NaN. All NaNs must be canonicalized. + __ VFPCompareAndSetFlags(value, value); + Label after_canonicalization; + + // Only load canonical NaN if the comparison above set the overflow. + __ b(vc, &after_canonicalization); + __ Vmov(value, + FixedDoubleArray::canonical_not_the_hole_nan_as_double()); + + __ bind(&after_canonicalization); + } + + __ vstr(value, scratch, instr->additional_index() << element_size_shift); +} + + +void LCodeGen::DoStoreKeyedFixedArray(LStoreKeyed* instr) { + Register value = ToRegister(instr->value()); + Register elements = ToRegister(instr->elements()); + Register key = instr->key()->IsRegister() ? ToRegister(instr->key()) + : no_reg; + Register scratch = scratch0(); + Register store_base = scratch; + int offset = 0; + + // Do the store. + if (instr->key()->IsConstantOperand()) { + ASSERT(!instr->hydrogen()->NeedsWriteBarrier()); + LConstantOperand* const_operand = LConstantOperand::cast(instr->key()); + offset = FixedArray::OffsetOfElementAt(ToInteger32(const_operand) + + instr->additional_index()); + store_base = elements; + } else { + // Even though the HLoadKeyed instruction forces the input + // representation for the key to be an integer, the input gets replaced + // during bound check elimination with the index argument to the bounds + // check, which can be tagged, so that case must be handled here, too. + if (instr->hydrogen()->key()->representation().IsTagged()) { + __ add(scratch, elements, + Operand(key, LSL, kPointerSizeLog2 - kSmiTagSize)); + } else { + __ add(scratch, elements, Operand(key, LSL, kPointerSizeLog2)); + } + offset = FixedArray::OffsetOfElementAt(instr->additional_index()); + } + __ str(value, FieldMemOperand(store_base, offset)); + + if (instr->hydrogen()->NeedsWriteBarrier()) { + HType type = instr->hydrogen()->value()->type(); + SmiCheck check_needed = + type.IsHeapObject() ? OMIT_SMI_CHECK : INLINE_SMI_CHECK; + // Compute address of modified element and store it into key register. + __ add(key, store_base, Operand(offset - kHeapObjectTag)); + __ RecordWrite(elements, + key, + value, + kLRHasBeenSaved, + kSaveFPRegs, + EMIT_REMEMBERED_SET, + check_needed); + } +} + + +void LCodeGen::DoStoreKeyed(LStoreKeyed* instr) { + // By cases: external, fast double + if (instr->is_external()) { + DoStoreKeyedExternalArray(instr); + } else if (instr->hydrogen()->value()->representation().IsDouble()) { + DoStoreKeyedFixedDoubleArray(instr); + } else { + DoStoreKeyedFixedArray(instr); + } +} + + void LCodeGen::DoStoreKeyedGeneric(LStoreKeyedGeneric* instr) { ASSERT(ToRegister(instr->object()).is(r2)); ASSERT(ToRegister(instr->key()).is(r1)); @@ -4178,30 +4636,40 @@ void LCodeGen::DoStoreKeyedGeneric(LStoreKeyedGeneric* instr) { void LCodeGen::DoTransitionElementsKind(LTransitionElementsKind* instr) { Register object_reg = ToRegister(instr->object()); - Register new_map_reg = ToRegister(instr->new_map_temp()); Register scratch = scratch0(); Handle<Map> from_map = instr->original_map(); Handle<Map> to_map = instr->transitioned_map(); - ElementsKind from_kind = from_map->elements_kind(); - ElementsKind to_kind = to_map->elements_kind(); + ElementsKind from_kind = instr->from_kind(); + ElementsKind to_kind = instr->to_kind(); Label not_applicable; __ ldr(scratch, FieldMemOperand(object_reg, HeapObject::kMapOffset)); __ cmp(scratch, Operand(from_map)); __ b(ne, ¬_applicable); - __ mov(new_map_reg, Operand(to_map)); if (IsSimpleMapChangeTransition(from_kind, to_kind)) { + Register new_map_reg = ToRegister(instr->new_map_temp()); + __ mov(new_map_reg, Operand(to_map)); __ str(new_map_reg, FieldMemOperand(object_reg, HeapObject::kMapOffset)); // Write barrier. __ RecordWriteField(object_reg, HeapObject::kMapOffset, new_map_reg, scratch, kLRHasBeenSaved, kDontSaveFPRegs); + } else if (FLAG_compiled_transitions) { + PushSafepointRegistersScope scope(this, Safepoint::kWithRegisters); + __ Move(r0, object_reg); + __ Move(r1, to_map); + TransitionElementsKindStub stub(from_kind, to_kind); + __ CallStub(&stub); + RecordSafepointWithRegisters( + instr->pointer_map(), 0, Safepoint::kNoLazyDeopt); } else if (IsFastSmiElementsKind(from_kind) && IsFastDoubleElementsKind(to_kind)) { Register fixed_object_reg = ToRegister(instr->temp()); ASSERT(fixed_object_reg.is(r2)); + Register new_map_reg = ToRegister(instr->new_map_temp()); ASSERT(new_map_reg.is(r3)); + __ mov(new_map_reg, Operand(to_map)); __ mov(fixed_object_reg, object_reg); CallCode(isolate()->builtins()->TransitionElementsSmiToDouble(), RelocInfo::CODE_TARGET, instr); @@ -4209,7 +4677,9 @@ void LCodeGen::DoTransitionElementsKind(LTransitionElementsKind* instr) { IsFastObjectElementsKind(to_kind)) { Register fixed_object_reg = ToRegister(instr->temp()); ASSERT(fixed_object_reg.is(r2)); + Register new_map_reg = ToRegister(instr->new_map_temp()); ASSERT(new_map_reg.is(r3)); + __ mov(new_map_reg, Operand(to_map)); __ mov(fixed_object_reg, object_reg); CallCode(isolate()->builtins()->TransitionElementsDoubleToObject(), RelocInfo::CODE_TARGET, instr); @@ -4220,11 +4690,19 @@ void LCodeGen::DoTransitionElementsKind(LTransitionElementsKind* instr) { } +void LCodeGen::DoTrapAllocationMemento(LTrapAllocationMemento* instr) { + Register object = ToRegister(instr->object()); + Register temp = ToRegister(instr->temp()); + __ TestJSArrayForAllocationSiteInfo(object, temp); + DeoptimizeIf(eq, instr->environment()); +} + + void LCodeGen::DoStringAdd(LStringAdd* instr) { __ push(ToRegister(instr->left())); __ push(ToRegister(instr->right())); StringAddStub stub(NO_STRING_CHECK_IN_STUB); - CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr); + CallCode(stub.GetCode(isolate()), RelocInfo::CODE_TARGET, instr); } @@ -4259,7 +4737,7 @@ void LCodeGen::DoDeferredStringCharCodeAt(LStringCharCodeAt* instr) { // TODO(3095996): Get rid of this. For now, we need to make the // result register contain a valid pointer because it is already // contained in the register pointer map. - __ mov(result, Operand(0)); + __ mov(result, Operand::Zero()); PushSafepointRegistersScope scope(this, Safepoint::kWithRegisters); __ push(string); @@ -4300,7 +4778,7 @@ void LCodeGen::DoStringCharFromCode(LStringCharFromCode* instr) { Register result = ToRegister(instr->result()); ASSERT(!char_code.is(result)); - __ cmp(char_code, Operand(String::kMaxAsciiCharCode)); + __ cmp(char_code, Operand(String::kMaxOneByteCharCode)); __ b(hi, deferred->entry()); __ LoadRoot(result, Heap::kSingleCharacterStringCacheRootIndex); __ add(result, result, Operand(char_code, LSL, kPointerSizeLog2)); @@ -4319,7 +4797,7 @@ void LCodeGen::DoDeferredStringCharFromCode(LStringCharFromCode* instr) { // TODO(3095996): Get rid of this. For now, we need to make the // result register contain a valid pointer because it is already // contained in the register pointer map. - __ mov(result, Operand(0)); + __ mov(result, Operand::Zero()); PushSafepointRegistersScope scope(this, Safepoint::kWithRegisters); __ SmiTag(char_code); @@ -4337,6 +4815,7 @@ void LCodeGen::DoStringLength(LStringLength* instr) { void LCodeGen::DoInteger32ToDouble(LInteger32ToDouble* instr) { + CpuFeatureScope scope(masm(), VFP2); LOperand* input = instr->value(); ASSERT(input->IsRegister() || input->IsStackSlot()); LOperand* output = instr->result(); @@ -4354,6 +4833,7 @@ void LCodeGen::DoInteger32ToDouble(LInteger32ToDouble* instr) { void LCodeGen::DoUint32ToDouble(LUint32ToDouble* instr) { + CpuFeatureScope scope(masm(), VFP2); LOperand* input = instr->value(); LOperand* output = instr->result(); @@ -4415,13 +4895,49 @@ void LCodeGen::DoNumberTagU(LNumberTagU* instr) { } +// Convert unsigned integer with specified number of leading zeroes in binary +// representation to IEEE 754 double. +// Integer to convert is passed in register hiword. +// Resulting double is returned in registers hiword:loword. +// This functions does not work correctly for 0. +static void GenerateUInt2Double(MacroAssembler* masm, + Register hiword, + Register loword, + Register scratch, + int leading_zeroes) { + const int meaningful_bits = kBitsPerInt - leading_zeroes - 1; + const int biased_exponent = HeapNumber::kExponentBias + meaningful_bits; + + const int mantissa_shift_for_hi_word = + meaningful_bits - HeapNumber::kMantissaBitsInTopWord; + const int mantissa_shift_for_lo_word = + kBitsPerInt - mantissa_shift_for_hi_word; + masm->mov(scratch, Operand(biased_exponent << HeapNumber::kExponentShift)); + if (mantissa_shift_for_hi_word > 0) { + masm->mov(loword, Operand(hiword, LSL, mantissa_shift_for_lo_word)); + masm->orr(hiword, scratch, + Operand(hiword, LSR, mantissa_shift_for_hi_word)); + } else { + masm->mov(loword, Operand::Zero()); + masm->orr(hiword, scratch, + Operand(hiword, LSL, -mantissa_shift_for_hi_word)); + } + + // If least significant bit of biased exponent was not 1 it was corrupted + // by most significant bit of mantissa so we should fix that. + if (!(biased_exponent & 1)) { + masm->bic(hiword, hiword, Operand(1 << HeapNumber::kExponentShift)); + } +} + + void LCodeGen::DoDeferredNumberTagI(LInstruction* instr, LOperand* value, IntegerSignedness signedness) { Label slow; Register src = ToRegister(value); Register dst = ToRegister(instr->result()); - DoubleRegister dbl_scratch = double_scratch0(); + DwVfpRegister dbl_scratch = double_scratch0(); SwVfpRegister flt_scratch = dbl_scratch.low(); // Preserve the value of all registers. @@ -4436,16 +4952,40 @@ void LCodeGen::DoDeferredNumberTagI(LInstruction* instr, __ SmiUntag(src, dst); __ eor(src, src, Operand(0x80000000)); } - __ vmov(flt_scratch, src); - __ vcvt_f64_s32(dbl_scratch, flt_scratch); + if (CpuFeatures::IsSupported(VFP2)) { + CpuFeatureScope scope(masm(), VFP2); + __ vmov(flt_scratch, src); + __ vcvt_f64_s32(dbl_scratch, flt_scratch); + } else { + FloatingPointHelper::Destination dest = + FloatingPointHelper::kCoreRegisters; + FloatingPointHelper::ConvertIntToDouble(masm(), src, dest, d0, + sfpd_lo, sfpd_hi, + scratch0(), s0); + } } else { - __ vmov(flt_scratch, src); - __ vcvt_f64_u32(dbl_scratch, flt_scratch); + if (CpuFeatures::IsSupported(VFP2)) { + CpuFeatureScope scope(masm(), VFP2); + __ vmov(flt_scratch, src); + __ vcvt_f64_u32(dbl_scratch, flt_scratch); + } else { + Label no_leading_zero, done; + __ tst(src, Operand(0x80000000)); + __ b(ne, &no_leading_zero); + + // Integer has one leading zeros. + GenerateUInt2Double(masm(), sfpd_hi, sfpd_lo, r9, 1); + __ b(&done); + + __ bind(&no_leading_zero); + GenerateUInt2Double(masm(), sfpd_hi, sfpd_lo, r9, 0); + __ b(&done); + } } if (FLAG_inline_new) { - __ LoadRoot(r6, Heap::kHeapNumberMapRootIndex); - __ AllocateHeapNumber(r5, r3, r4, r6, &slow, DONT_TAG_RESULT); + __ LoadRoot(scratch0(), Heap::kHeapNumberMapRootIndex); + __ AllocateHeapNumber(r5, r3, r4, scratch0(), &slow, DONT_TAG_RESULT); __ Move(dst, r5); __ b(&done); } @@ -4456,7 +4996,7 @@ void LCodeGen::DoDeferredNumberTagI(LInstruction* instr, // TODO(3095996): Put a valid pointer value in the stack slot where the result // register is stored, as this register is in the pointer map, but contains an // integer value. - __ mov(ip, Operand(0)); + __ mov(ip, Operand::Zero()); __ StoreToSafepointRegisterSlot(ip, dst); CallRuntimeFromDeferred(Runtime::kAllocateHeapNumber, 0, instr); __ Move(dst, r0); @@ -4465,7 +5005,13 @@ void LCodeGen::DoDeferredNumberTagI(LInstruction* instr, // Done. Put the value in dbl_scratch into the value of the allocated heap // number. __ bind(&done); - __ vstr(dbl_scratch, dst, HeapNumber::kValueOffset); + if (CpuFeatures::IsSupported(VFP2)) { + CpuFeatureScope scope(masm(), VFP2); + __ vstr(dbl_scratch, dst, HeapNumber::kValueOffset); + } else { + __ str(sfpd_lo, MemOperand(dst, HeapNumber::kMantissaOffset)); + __ str(sfpd_hi, MemOperand(dst, HeapNumber::kExponentOffset)); + } __ add(dst, dst, Operand(kHeapObjectTag)); __ StoreToSafepointRegisterSlot(dst, dst); } @@ -4482,12 +5028,64 @@ void LCodeGen::DoNumberTagD(LNumberTagD* instr) { LNumberTagD* instr_; }; - DoubleRegister input_reg = ToDoubleRegister(instr->value()); + DwVfpRegister input_reg = ToDoubleRegister(instr->value()); Register scratch = scratch0(); Register reg = ToRegister(instr->result()); Register temp1 = ToRegister(instr->temp()); Register temp2 = ToRegister(instr->temp2()); + bool convert_hole = false; + HValue* change_input = instr->hydrogen()->value(); + if (change_input->IsLoadKeyed()) { + HLoadKeyed* load = HLoadKeyed::cast(change_input); + convert_hole = load->UsesMustHandleHole(); + } + + Label no_special_nan_handling; + Label done; + if (convert_hole) { + if (CpuFeatures::IsSupported(VFP2)) { + CpuFeatureScope scope(masm(), VFP2); + DwVfpRegister input_reg = ToDoubleRegister(instr->value()); + __ VFPCompareAndSetFlags(input_reg, input_reg); + __ b(vc, &no_special_nan_handling); + __ vmov(reg, scratch0(), input_reg); + __ cmp(scratch0(), Operand(kHoleNanUpper32)); + Label canonicalize; + __ b(ne, &canonicalize); + __ Move(reg, factory()->the_hole_value()); + __ b(&done); + __ bind(&canonicalize); + __ Vmov(input_reg, + FixedDoubleArray::canonical_not_the_hole_nan_as_double(), + no_reg); + } else { + Label not_hole; + __ cmp(sfpd_hi, Operand(kHoleNanUpper32)); + __ b(ne, ¬_hole); + __ Move(reg, factory()->the_hole_value()); + __ b(&done); + __ bind(¬_hole); + __ and_(scratch, sfpd_hi, Operand(0x7ff00000)); + __ cmp(scratch, Operand(0x7ff00000)); + __ b(ne, &no_special_nan_handling); + Label special_nan_handling; + __ tst(sfpd_hi, Operand(0x000FFFFF)); + __ b(ne, &special_nan_handling); + __ cmp(sfpd_lo, Operand(0)); + __ b(eq, &no_special_nan_handling); + __ bind(&special_nan_handling); + double canonical_nan = + FixedDoubleArray::canonical_not_the_hole_nan_as_double(); + uint64_t casted_nan = BitCast<uint64_t>(canonical_nan); + __ mov(sfpd_lo, + Operand(static_cast<uint32_t>(casted_nan & 0xFFFFFFFF))); + __ mov(sfpd_hi, + Operand(static_cast<uint32_t>(casted_nan >> 32))); + } + } + + __ bind(&no_special_nan_handling); DeferredNumberTagD* deferred = new(zone()) DeferredNumberTagD(this, instr); if (FLAG_inline_new) { __ LoadRoot(scratch, Heap::kHeapNumberMapRootIndex); @@ -4498,9 +5096,16 @@ void LCodeGen::DoNumberTagD(LNumberTagD* instr) { __ jmp(deferred->entry()); } __ bind(deferred->exit()); - __ vstr(input_reg, reg, HeapNumber::kValueOffset); + if (CpuFeatures::IsSupported(VFP2)) { + CpuFeatureScope scope(masm(), VFP2); + __ vstr(input_reg, reg, HeapNumber::kValueOffset); + } else { + __ str(sfpd_lo, MemOperand(reg, HeapNumber::kValueOffset)); + __ str(sfpd_hi, MemOperand(reg, HeapNumber::kValueOffset + kPointerSize)); + } // Now that we have finished with the object's real address tag it __ add(reg, reg, Operand(kHeapObjectTag)); + __ bind(&done); } @@ -4509,7 +5114,7 @@ void LCodeGen::DoDeferredNumberTagD(LNumberTagD* instr) { // result register contain a valid pointer because it is already // contained in the register pointer map. Register reg = ToRegister(instr->result()); - __ mov(reg, Operand(0)); + __ mov(reg, Operand::Zero()); PushSafepointRegistersScope scope(this, Safepoint::kWithRegisters); CallRuntimeFromDeferred(Runtime::kAllocateHeapNumber, 0, instr); @@ -4539,53 +5144,69 @@ void LCodeGen::DoSmiUntag(LSmiUntag* instr) { void LCodeGen::EmitNumberUntagD(Register input_reg, - DoubleRegister result_reg, + DwVfpRegister result_reg, bool deoptimize_on_undefined, bool deoptimize_on_minus_zero, - LEnvironment* env) { + LEnvironment* env, + NumberUntagDMode mode) { Register scratch = scratch0(); SwVfpRegister flt_scratch = double_scratch0().low(); ASSERT(!result_reg.is(double_scratch0())); + CpuFeatureScope scope(masm(), VFP2); Label load_smi, heap_number, done; - // Smi check. - __ UntagAndJumpIfSmi(scratch, input_reg, &load_smi); + if (mode == NUMBER_CANDIDATE_IS_ANY_TAGGED) { + // Smi check. + __ UntagAndJumpIfSmi(scratch, input_reg, &load_smi); - // Heap number map check. - __ ldr(scratch, FieldMemOperand(input_reg, HeapObject::kMapOffset)); - __ LoadRoot(ip, Heap::kHeapNumberMapRootIndex); - __ cmp(scratch, Operand(ip)); - if (deoptimize_on_undefined) { - DeoptimizeIf(ne, env); - } else { - Label heap_number; - __ b(eq, &heap_number); + // Heap number map check. + __ ldr(scratch, FieldMemOperand(input_reg, HeapObject::kMapOffset)); + __ LoadRoot(ip, Heap::kHeapNumberMapRootIndex); + __ cmp(scratch, Operand(ip)); + if (deoptimize_on_undefined) { + DeoptimizeIf(ne, env); + } else { + Label heap_number; + __ b(eq, &heap_number); - __ LoadRoot(ip, Heap::kUndefinedValueRootIndex); - __ cmp(input_reg, Operand(ip)); - DeoptimizeIf(ne, env); + __ LoadRoot(ip, Heap::kUndefinedValueRootIndex); + __ cmp(input_reg, Operand(ip)); + DeoptimizeIf(ne, env); + + // Convert undefined to NaN. + __ LoadRoot(ip, Heap::kNanValueRootIndex); + __ sub(ip, ip, Operand(kHeapObjectTag)); + __ vldr(result_reg, ip, HeapNumber::kValueOffset); + __ jmp(&done); - // Convert undefined to NaN. - __ LoadRoot(ip, Heap::kNanValueRootIndex); - __ sub(ip, ip, Operand(kHeapObjectTag)); + __ bind(&heap_number); + } + // Heap number to double register conversion. + __ sub(ip, input_reg, Operand(kHeapObjectTag)); __ vldr(result_reg, ip, HeapNumber::kValueOffset); + if (deoptimize_on_minus_zero) { + __ vmov(ip, result_reg.low()); + __ cmp(ip, Operand::Zero()); + __ b(ne, &done); + __ vmov(ip, result_reg.high()); + __ cmp(ip, Operand(HeapNumber::kSignMask)); + DeoptimizeIf(eq, env); + } __ jmp(&done); - - __ bind(&heap_number); - } - // Heap number to double register conversion. - __ sub(ip, input_reg, Operand(kHeapObjectTag)); - __ vldr(result_reg, ip, HeapNumber::kValueOffset); - if (deoptimize_on_minus_zero) { - __ vmov(ip, result_reg.low()); - __ cmp(ip, Operand(0)); - __ b(ne, &done); - __ vmov(ip, result_reg.high()); - __ cmp(ip, Operand(HeapNumber::kSignMask)); - DeoptimizeIf(eq, env); + } else if (mode == NUMBER_CANDIDATE_IS_SMI_OR_HOLE) { + __ SmiUntag(scratch, input_reg, SetCC); + DeoptimizeIf(cs, env); + } else if (mode == NUMBER_CANDIDATE_IS_SMI_CONVERT_HOLE) { + __ UntagAndJumpIfSmi(scratch, input_reg, &load_smi); + __ Vmov(result_reg, + FixedDoubleArray::hole_nan_as_double(), + no_reg); + __ b(&done); + } else { + __ SmiUntag(scratch, input_reg); + ASSERT(mode == NUMBER_CANDIDATE_IS_SMI); } - __ jmp(&done); // Smi to double register conversion __ bind(&load_smi); @@ -4620,8 +5241,8 @@ void LCodeGen::DoDeferredTaggedToI(LTaggedToI* instr) { __ cmp(scratch1, Operand(ip)); if (instr->truncating()) { + CpuFeatureScope scope(masm(), VFP2); Register scratch3 = ToRegister(instr->temp2()); - SwVfpRegister single_scratch = double_scratch.low(); ASSERT(!scratch3.is(input_reg) && !scratch3.is(scratch1) && !scratch3.is(scratch2)); @@ -4634,37 +5255,28 @@ void LCodeGen::DoDeferredTaggedToI(LTaggedToI* instr) { __ LoadRoot(ip, Heap::kUndefinedValueRootIndex); __ cmp(input_reg, Operand(ip)); DeoptimizeIf(ne, instr->environment()); - __ mov(input_reg, Operand(0)); + __ mov(input_reg, Operand::Zero()); __ b(&done); __ bind(&heap_number); __ sub(scratch1, input_reg, Operand(kHeapObjectTag)); __ vldr(double_scratch2, scratch1, HeapNumber::kValueOffset); - __ EmitECMATruncate(input_reg, - double_scratch2, - single_scratch, - scratch1, - scratch2, - scratch3); + __ ECMAToInt32VFP(input_reg, double_scratch2, double_scratch, + scratch1, scratch2, scratch3); } else { - CpuFeatures::Scope scope(VFP3); + CpuFeatureScope scope(masm(), VFP3); // Deoptimize if we don't have a heap number. DeoptimizeIf(ne, instr->environment()); __ sub(ip, input_reg, Operand(kHeapObjectTag)); __ vldr(double_scratch, ip, HeapNumber::kValueOffset); - __ EmitVFPTruncate(kRoundToZero, - input_reg, - double_scratch, - scratch1, - double_scratch2, - kCheckForInexactConversion); + __ TryDoubleToInt32Exact(input_reg, double_scratch, double_scratch2); DeoptimizeIf(ne, instr->environment()); if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) { - __ cmp(input_reg, Operand(0)); + __ cmp(input_reg, Operand::Zero()); __ b(ne, &done); __ vmov(scratch1, double_scratch.high()); __ tst(scratch1, Operand(HeapNumber::kSignMask)); @@ -4711,12 +5323,30 @@ void LCodeGen::DoNumberUntagD(LNumberUntagD* instr) { ASSERT(result->IsDoubleRegister()); Register input_reg = ToRegister(input); - DoubleRegister result_reg = ToDoubleRegister(result); + DwVfpRegister result_reg = ToDoubleRegister(result); + + NumberUntagDMode mode = NUMBER_CANDIDATE_IS_ANY_TAGGED; + HValue* value = instr->hydrogen()->value(); + if (value->type().IsSmi()) { + if (value->IsLoadKeyed()) { + HLoadKeyed* load = HLoadKeyed::cast(value); + if (load->UsesMustHandleHole()) { + if (load->hole_mode() == ALLOW_RETURN_HOLE) { + mode = NUMBER_CANDIDATE_IS_SMI_CONVERT_HOLE; + } else { + mode = NUMBER_CANDIDATE_IS_SMI_OR_HOLE; + } + } else { + mode = NUMBER_CANDIDATE_IS_SMI; + } + } + } EmitNumberUntagD(input_reg, result_reg, instr->hydrogen()->deoptimize_on_undefined(), instr->hydrogen()->deoptimize_on_minus_zero(), - instr->environment()); + instr->environment(), + mode); } @@ -4725,29 +5355,17 @@ void LCodeGen::DoDoubleToI(LDoubleToI* instr) { Register scratch1 = scratch0(); Register scratch2 = ToRegister(instr->temp()); DwVfpRegister double_input = ToDoubleRegister(instr->value()); + DwVfpRegister double_scratch = double_scratch0(); Label done; if (instr->truncating()) { Register scratch3 = ToRegister(instr->temp2()); - SwVfpRegister single_scratch = double_scratch0().low(); - __ EmitECMATruncate(result_reg, - double_input, - single_scratch, - scratch1, - scratch2, - scratch3); + __ ECMAToInt32VFP(result_reg, double_input, double_scratch, + scratch1, scratch2, scratch3); } else { - DwVfpRegister double_scratch = double_scratch0(); - __ EmitVFPTruncate(kRoundToMinusInf, - result_reg, - double_input, - scratch1, - double_scratch, - kCheckForInexactConversion); - - // Deoptimize if we had a vfp invalid exception, - // including inexact operation. + __ TryDoubleToInt32Exact(result_reg, double_input, double_scratch); + // Deoptimize if the input wasn't a int32 (inside a double). DeoptimizeIf(ne, instr->environment()); } __ bind(&done); @@ -4828,46 +5446,48 @@ void LCodeGen::DoCheckFunction(LCheckFunction* instr) { } -void LCodeGen::DoCheckMapCommon(Register reg, - Register scratch, +void LCodeGen::DoCheckMapCommon(Register map_reg, Handle<Map> map, CompareMapMode mode, LEnvironment* env) { Label success; - __ CompareMap(reg, scratch, map, &success, mode); + __ CompareMap(map_reg, map, &success, mode); DeoptimizeIf(ne, env); __ bind(&success); } void LCodeGen::DoCheckMaps(LCheckMaps* instr) { - Register scratch = scratch0(); + Register map_reg = scratch0(); LOperand* input = instr->value(); ASSERT(input->IsRegister()); Register reg = ToRegister(input); Label success; SmallMapList* map_set = instr->hydrogen()->map_set(); + __ ldr(map_reg, FieldMemOperand(reg, HeapObject::kMapOffset)); for (int i = 0; i < map_set->length() - 1; i++) { Handle<Map> map = map_set->at(i); - __ CompareMap(reg, scratch, map, &success, REQUIRE_EXACT_MAP); + __ CompareMap(map_reg, map, &success, REQUIRE_EXACT_MAP); __ b(eq, &success); } Handle<Map> map = map_set->last(); - DoCheckMapCommon(reg, scratch, map, REQUIRE_EXACT_MAP, instr->environment()); + DoCheckMapCommon(map_reg, map, REQUIRE_EXACT_MAP, instr->environment()); __ bind(&success); } void LCodeGen::DoClampDToUint8(LClampDToUint8* instr) { - DoubleRegister value_reg = ToDoubleRegister(instr->unclamped()); + CpuFeatureScope vfp_scope(masm(), VFP2); + DwVfpRegister value_reg = ToDoubleRegister(instr->unclamped()); Register result_reg = ToRegister(instr->result()); - DoubleRegister temp_reg = ToDoubleRegister(instr->temp()); + DwVfpRegister temp_reg = ToDoubleRegister(instr->temp()); __ ClampDoubleToUint8(result_reg, value_reg, temp_reg); } void LCodeGen::DoClampIToUint8(LClampIToUint8* instr) { + CpuFeatureScope scope(masm(), VFP2); Register unclamped_reg = ToRegister(instr->unclamped()); Register result_reg = ToRegister(instr->result()); __ ClampUint8(result_reg, unclamped_reg); @@ -4875,10 +5495,11 @@ void LCodeGen::DoClampIToUint8(LClampIToUint8* instr) { void LCodeGen::DoClampTToUint8(LClampTToUint8* instr) { + CpuFeatureScope scope(masm(), VFP2); Register scratch = scratch0(); Register input_reg = ToRegister(instr->unclamped()); Register result_reg = ToRegister(instr->result()); - DoubleRegister temp_reg = ToDoubleRegister(instr->temp()); + DwVfpRegister temp_reg = ToDoubleRegister(instr->temp()); Label is_smi, done, heap_number; // Both smi and heap number cases are handled. @@ -4893,7 +5514,7 @@ void LCodeGen::DoClampTToUint8(LClampTToUint8* instr) { // conversions. __ cmp(input_reg, Operand(factory()->undefined_value())); DeoptimizeIf(ne, instr->environment()); - __ mov(result_reg, Operand(0)); + __ mov(result_reg, Operand::Zero()); __ jmp(&done); // Heap number @@ -4912,31 +5533,31 @@ void LCodeGen::DoClampTToUint8(LClampTToUint8* instr) { void LCodeGen::DoCheckPrototypeMaps(LCheckPrototypeMaps* instr) { - Register temp1 = ToRegister(instr->temp()); - Register temp2 = ToRegister(instr->temp2()); + ASSERT(instr->temp()->Equals(instr->result())); + Register prototype_reg = ToRegister(instr->temp()); + Register map_reg = ToRegister(instr->temp2()); - Handle<JSObject> holder = instr->holder(); - Handle<JSObject> current_prototype = instr->prototype(); + ZoneList<Handle<JSObject> >* prototypes = instr->prototypes(); + ZoneList<Handle<Map> >* maps = instr->maps(); - // Load prototype object. - __ LoadHeapObject(temp1, current_prototype); + ASSERT(prototypes->length() == maps->length()); - // Check prototype maps up to the holder. - while (!current_prototype.is_identical_to(holder)) { - DoCheckMapCommon(temp1, temp2, - Handle<Map>(current_prototype->map()), - ALLOW_ELEMENT_TRANSITION_MAPS, instr->environment()); - current_prototype = - Handle<JSObject>(JSObject::cast(current_prototype->GetPrototype())); - // Load next prototype object. - __ LoadHeapObject(temp1, current_prototype); + if (instr->hydrogen()->CanOmitPrototypeChecks()) { + for (int i = 0; i < maps->length(); i++) { + prototype_maps_.Add(maps->at(i), info()->zone()); + } + __ LoadHeapObject(prototype_reg, + prototypes->at(prototypes->length() - 1)); + } else { + for (int i = 0; i < prototypes->length(); i++) { + __ LoadHeapObject(prototype_reg, prototypes->at(i)); + __ ldr(map_reg, FieldMemOperand(prototype_reg, HeapObject::kMapOffset)); + DoCheckMapCommon(map_reg, + maps->at(i), + ALLOW_ELEMENT_TRANSITION_MAPS, + instr->environment()); + } } - - // Check the holder map. - DoCheckMapCommon(temp1, temp2, - Handle<Map>(current_prototype->map()), - ALLOW_ELEMENT_TRANSITION_MAPS, instr->environment()); - DeoptimizeIf(ne, instr->environment()); } @@ -4968,12 +5589,8 @@ void LCodeGen::DoAllocateObject(LAllocateObject* instr) { // the constructor's prototype changes, but instance size and property // counts remain unchanged (if slack tracking finished). ASSERT(!constructor->shared()->IsInobjectSlackTrackingInProgress()); - __ AllocateInNewSpace(instance_size, - result, - scratch, - scratch2, - deferred->entry(), - TAG_OBJECT); + __ Allocate(instance_size, result, scratch, scratch2, deferred->entry(), + TAG_OBJECT); __ bind(deferred->exit()); if (FLAG_debug_code) { @@ -5013,7 +5630,7 @@ void LCodeGen::DoDeferredAllocateObject(LAllocateObject* instr) { // TODO(3095996): Get rid of this. For now, we need to make the // result register contain a valid pointer because it is already // contained in the register pointer map. - __ mov(result, Operand(0)); + __ mov(result, Operand::Zero()); PushSafepointRegistersScope scope(this, Safepoint::kWithRegisters); __ mov(r0, Operand(Smi::FromInt(instance_size))); @@ -5023,10 +5640,72 @@ void LCodeGen::DoDeferredAllocateObject(LAllocateObject* instr) { } +void LCodeGen::DoAllocate(LAllocate* instr) { + class DeferredAllocate: public LDeferredCode { + public: + DeferredAllocate(LCodeGen* codegen, LAllocate* instr) + : LDeferredCode(codegen), instr_(instr) { } + virtual void Generate() { codegen()->DoDeferredAllocate(instr_); } + virtual LInstruction* instr() { return instr_; } + private: + LAllocate* instr_; + }; + + DeferredAllocate* deferred = + new(zone()) DeferredAllocate(this, instr); + + Register result = ToRegister(instr->result()); + Register scratch = ToRegister(instr->temp1()); + Register scratch2 = ToRegister(instr->temp2()); + + // Allocate memory for the object. + AllocationFlags flags = TAG_OBJECT; + if (instr->hydrogen()->MustAllocateDoubleAligned()) { + flags = static_cast<AllocationFlags>(flags | DOUBLE_ALIGNMENT); + } + if (instr->size()->IsConstantOperand()) { + int32_t size = ToInteger32(LConstantOperand::cast(instr->size())); + if (instr->hydrogen()->CanAllocateInOldPointerSpace()) { + flags = static_cast<AllocationFlags>(flags | PRETENURE_OLD_POINTER_SPACE); + } + __ Allocate(size, result, scratch, scratch2, deferred->entry(), flags); + } else { + Register size = ToRegister(instr->size()); + __ AllocateInNewSpace(size, + result, + scratch, + scratch2, + deferred->entry(), + flags); + } + + __ bind(deferred->exit()); +} + + +void LCodeGen::DoDeferredAllocate(LAllocate* instr) { + Register size = ToRegister(instr->size()); + Register result = ToRegister(instr->result()); + + // TODO(3095996): Get rid of this. For now, we need to make the + // result register contain a valid pointer because it is already + // contained in the register pointer map. + __ mov(result, Operand(Smi::FromInt(0))); + + PushSafepointRegistersScope scope(this, Safepoint::kWithRegisters); + __ SmiTag(size, size); + __ push(size); + CallRuntimeFromDeferred(Runtime::kAllocateInNewSpace, 1, instr); + __ StoreToSafepointRegisterSlot(r0, result); +} + + void LCodeGen::DoArrayLiteral(LArrayLiteral* instr) { Handle<FixedArray> literals(instr->environment()->closure()->literals()); ElementsKind boilerplate_elements_kind = instr->hydrogen()->boilerplate_elements_kind(); + AllocationSiteMode allocation_site_mode = + instr->hydrogen()->allocation_site_mode(); // Deopt if the array literal boilerplate ElementsKind is of a type different // than the expected one. The check isn't necessary if the boilerplate has @@ -5058,8 +5737,8 @@ void LCodeGen::DoArrayLiteral(LArrayLiteral* instr) { ASSERT(instr->hydrogen()->depth() == 1); FastCloneShallowArrayStub::Mode mode = FastCloneShallowArrayStub::COPY_ON_WRITE_ELEMENTS; - FastCloneShallowArrayStub stub(mode, length); - CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr); + FastCloneShallowArrayStub stub(mode, DONT_TRACK_ALLOCATION_SITE, length); + CallCode(stub.GetCode(isolate()), RelocInfo::CODE_TARGET, instr); } else if (instr->hydrogen()->depth() > 1) { CallRuntime(Runtime::kCreateArrayLiteral, 3, instr); } else if (length > FastCloneShallowArrayStub::kMaximumClonedLength) { @@ -5067,10 +5746,10 @@ void LCodeGen::DoArrayLiteral(LArrayLiteral* instr) { } else { FastCloneShallowArrayStub::Mode mode = boilerplate_elements_kind == FAST_DOUBLE_ELEMENTS - ? FastCloneShallowArrayStub::CLONE_DOUBLE_ELEMENTS - : FastCloneShallowArrayStub::CLONE_ELEMENTS; - FastCloneShallowArrayStub stub(mode, length); - CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr); + ? FastCloneShallowArrayStub::CLONE_DOUBLE_ELEMENTS + : FastCloneShallowArrayStub::CLONE_ELEMENTS; + FastCloneShallowArrayStub stub(mode, allocation_site_mode, length); + CallCode(stub.GetCode(isolate()), RelocInfo::CODE_TARGET, instr); } } @@ -5078,10 +5757,14 @@ void LCodeGen::DoArrayLiteral(LArrayLiteral* instr) { void LCodeGen::EmitDeepCopy(Handle<JSObject> object, Register result, Register source, - int* offset) { + int* offset, + AllocationSiteMode mode) { ASSERT(!source.is(r2)); ASSERT(!result.is(r2)); + bool create_allocation_site_info = mode == TRACK_ALLOCATION_SITE && + object->map()->CanTrackAllocationSite(); + // Only elements backing stores for non-COW arrays need to be copied. Handle<FixedArrayBase> elements(object->elements()); bool has_elements = elements->length() > 0 && @@ -5091,8 +5774,13 @@ void LCodeGen::EmitDeepCopy(Handle<JSObject> object, // this object and its backing store. int object_offset = *offset; int object_size = object->map()->instance_size(); - int elements_offset = *offset + object_size; int elements_size = has_elements ? elements->Size() : 0; + int elements_offset = *offset + object_size; + if (create_allocation_site_info) { + elements_offset += AllocationSiteInfo::kSize; + *offset += AllocationSiteInfo::kSize; + } + *offset += object_size + elements_size; // Copy object header. @@ -5111,13 +5799,15 @@ void LCodeGen::EmitDeepCopy(Handle<JSObject> object, // Copy in-object properties. for (int i = 0; i < inobject_properties; i++) { int total_offset = object_offset + object->GetInObjectPropertyOffset(i); - Handle<Object> value = Handle<Object>(object->InObjectPropertyAt(i)); + Handle<Object> value = Handle<Object>(object->InObjectPropertyAt(i), + isolate()); if (value->IsJSObject()) { Handle<JSObject> value_object = Handle<JSObject>::cast(value); __ add(r2, result, Operand(*offset)); __ str(r2, FieldMemOperand(result, total_offset)); __ LoadHeapObject(source, value_object); - EmitDeepCopy(value_object, result, source, offset); + EmitDeepCopy(value_object, result, source, offset, + DONT_TRACK_ALLOCATION_SITE); } else if (value->IsHeapObject()) { __ LoadHeapObject(r2, Handle<HeapObject>::cast(value)); __ str(r2, FieldMemOperand(result, total_offset)); @@ -5127,6 +5817,14 @@ void LCodeGen::EmitDeepCopy(Handle<JSObject> object, } } + // Build Allocation Site Info if desired + if (create_allocation_site_info) { + __ mov(r2, Operand(Handle<Map>(isolate()->heap()-> + allocation_site_info_map()))); + __ str(r2, FieldMemOperand(result, object_size)); + __ str(source, FieldMemOperand(result, object_size + kPointerSize)); + } + if (has_elements) { // Copy elements backing store header. __ LoadHeapObject(source, elements); @@ -5156,13 +5854,14 @@ void LCodeGen::EmitDeepCopy(Handle<JSObject> object, Handle<FixedArray> fast_elements = Handle<FixedArray>::cast(elements); for (int i = 0; i < elements_length; i++) { int total_offset = elements_offset + FixedArray::OffsetOfElementAt(i); - Handle<Object> value(fast_elements->get(i)); + Handle<Object> value(fast_elements->get(i), isolate()); if (value->IsJSObject()) { Handle<JSObject> value_object = Handle<JSObject>::cast(value); __ add(r2, result, Operand(*offset)); __ str(r2, FieldMemOperand(result, total_offset)); __ LoadHeapObject(source, value_object); - EmitDeepCopy(value_object, result, source, offset); + EmitDeepCopy(value_object, result, source, offset, + DONT_TRACK_ALLOCATION_SITE); } else if (value->IsHeapObject()) { __ LoadHeapObject(r2, Handle<HeapObject>::cast(value)); __ str(r2, FieldMemOperand(result, total_offset)); @@ -5202,7 +5901,7 @@ void LCodeGen::DoFastLiteral(LFastLiteral* instr) { // Allocate all objects that are part of the literal in one big // allocation. This avoids multiple limit checks. Label allocated, runtime_allocate; - __ AllocateInNewSpace(size, r0, r2, r3, &runtime_allocate, TAG_OBJECT); + __ Allocate(size, r0, r2, r3, &runtime_allocate, TAG_OBJECT); __ jmp(&allocated); __ bind(&runtime_allocate); @@ -5213,7 +5912,8 @@ void LCodeGen::DoFastLiteral(LFastLiteral* instr) { __ bind(&allocated); int offset = 0; __ LoadHeapObject(r1, instr->hydrogen()->boilerplate()); - EmitDeepCopy(instr->hydrogen()->boilerplate(), r0, r1, &offset); + EmitDeepCopy(instr->hydrogen()->boilerplate(), r0, r1, &offset, + instr->hydrogen()->allocation_site_mode()); ASSERT_EQ(size, offset); } @@ -5224,25 +5924,26 @@ void LCodeGen::DoObjectLiteral(LObjectLiteral* instr) { instr->hydrogen()->constant_properties(); // Set up the parameters to the stub/runtime call. - __ LoadHeapObject(r4, literals); - __ mov(r3, Operand(Smi::FromInt(instr->hydrogen()->literal_index()))); - __ mov(r2, Operand(constant_properties)); + __ LoadHeapObject(r3, literals); + __ mov(r2, Operand(Smi::FromInt(instr->hydrogen()->literal_index()))); + __ mov(r1, Operand(constant_properties)); int flags = instr->hydrogen()->fast_elements() ? ObjectLiteral::kFastElements : ObjectLiteral::kNoFlags; - __ mov(r1, Operand(Smi::FromInt(flags))); - __ Push(r4, r3, r2, r1); + __ mov(r0, Operand(Smi::FromInt(flags))); // Pick the right runtime function or stub to call. int properties_count = constant_properties->length() / 2; if (instr->hydrogen()->depth() > 1) { + __ Push(r3, r2, r1, r0); CallRuntime(Runtime::kCreateObjectLiteral, 4, instr); } else if (flags != ObjectLiteral::kFastElements || properties_count > FastCloneShallowObjectStub::kMaximumClonedProperties) { + __ Push(r3, r2, r1, r0); CallRuntime(Runtime::kCreateObjectLiteralShallow, 4, instr); } else { FastCloneShallowObjectStub stub(properties_count); - CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr); + CallCode(stub.GetCode(isolate()), RelocInfo::CODE_TARGET, instr); } } @@ -5282,7 +5983,7 @@ void LCodeGen::DoRegExpLiteral(LRegExpLiteral* instr) { int size = JSRegExp::kSize + JSRegExp::kInObjectFieldCount * kPointerSize; Label allocated, runtime_allocate; - __ AllocateInNewSpace(size, r0, r2, r3, &runtime_allocate, TAG_OBJECT); + __ Allocate(size, r0, r2, r3, &runtime_allocate, TAG_OBJECT); __ jmp(&allocated); __ bind(&runtime_allocate); @@ -5316,7 +6017,7 @@ void LCodeGen::DoFunctionLiteral(LFunctionLiteral* instr) { FastNewClosureStub stub(shared_info->language_mode()); __ mov(r1, Operand(shared_info)); __ push(r1); - CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr); + CallCode(stub.GetCode(isolate()), RelocInfo::CODE_TARGET, instr); } else { __ mov(r2, Operand(shared_info)); __ mov(r1, Operand(pretenure @@ -5358,14 +6059,14 @@ Condition LCodeGen::EmitTypeofIs(Label* true_label, Handle<String> type_name) { Condition final_branch_condition = kNoCondition; Register scratch = scratch0(); - if (type_name->Equals(heap()->number_symbol())) { + if (type_name->Equals(heap()->number_string())) { __ JumpIfSmi(input, true_label); __ ldr(input, FieldMemOperand(input, HeapObject::kMapOffset)); __ LoadRoot(ip, Heap::kHeapNumberMapRootIndex); __ cmp(input, Operand(ip)); final_branch_condition = eq; - } else if (type_name->Equals(heap()->string_symbol())) { + } else if (type_name->Equals(heap()->string_string())) { __ JumpIfSmi(input, false_label); __ CompareObjectType(input, input, scratch, FIRST_NONSTRING_TYPE); __ b(ge, false_label); @@ -5373,17 +6074,17 @@ Condition LCodeGen::EmitTypeofIs(Label* true_label, __ tst(ip, Operand(1 << Map::kIsUndetectable)); final_branch_condition = eq; - } else if (type_name->Equals(heap()->boolean_symbol())) { + } else if (type_name->Equals(heap()->boolean_string())) { __ CompareRoot(input, Heap::kTrueValueRootIndex); __ b(eq, true_label); __ CompareRoot(input, Heap::kFalseValueRootIndex); final_branch_condition = eq; - } else if (FLAG_harmony_typeof && type_name->Equals(heap()->null_symbol())) { + } else if (FLAG_harmony_typeof && type_name->Equals(heap()->null_string())) { __ CompareRoot(input, Heap::kNullValueRootIndex); final_branch_condition = eq; - } else if (type_name->Equals(heap()->undefined_symbol())) { + } else if (type_name->Equals(heap()->undefined_string())) { __ CompareRoot(input, Heap::kUndefinedValueRootIndex); __ b(eq, true_label); __ JumpIfSmi(input, false_label); @@ -5393,7 +6094,7 @@ Condition LCodeGen::EmitTypeofIs(Label* true_label, __ tst(ip, Operand(1 << Map::kIsUndetectable)); final_branch_condition = ne; - } else if (type_name->Equals(heap()->function_symbol())) { + } else if (type_name->Equals(heap()->function_string())) { STATIC_ASSERT(NUM_OF_CALLABLE_SPEC_OBJECT_TYPES == 2); __ JumpIfSmi(input, false_label); __ CompareObjectType(input, scratch, input, JS_FUNCTION_TYPE); @@ -5401,14 +6102,21 @@ Condition LCodeGen::EmitTypeofIs(Label* true_label, __ cmp(input, Operand(JS_FUNCTION_PROXY_TYPE)); final_branch_condition = eq; - } else if (type_name->Equals(heap()->object_symbol())) { + } else if (type_name->Equals(heap()->object_string())) { __ JumpIfSmi(input, false_label); if (!FLAG_harmony_typeof) { __ CompareRoot(input, Heap::kNullValueRootIndex); __ b(eq, true_label); } - __ CompareObjectType(input, input, scratch, - FIRST_NONCALLABLE_SPEC_OBJECT_TYPE); + if (FLAG_harmony_symbols) { + __ CompareObjectType(input, input, scratch, SYMBOL_TYPE); + __ b(eq, true_label); + __ CompareInstanceType(input, scratch, + FIRST_NONCALLABLE_SPEC_OBJECT_TYPE); + } else { + __ CompareObjectType(input, input, scratch, + FIRST_NONCALLABLE_SPEC_OBJECT_TYPE); + } __ b(lt, false_label); __ CompareInstanceType(input, scratch, LAST_NONCALLABLE_SPEC_OBJECT_TYPE); __ b(gt, false_label); @@ -5455,6 +6163,7 @@ void LCodeGen::EmitIsConstructCall(Register temp1, Register temp2) { void LCodeGen::EnsureSpaceForLazyDeopt() { + if (info()->IsStub()) return; // Ensure that we have enough space after the previous lazy-bailout // instruction for patching the code here. int current_pc = masm()->pc_offset(); @@ -5487,6 +6196,11 @@ void LCodeGen::DoDeoptimize(LDeoptimize* instr) { } +void LCodeGen::DoDummyUse(LDummyUse* instr) { + // Nothing to see here, move on! +} + + void LCodeGen::DoDeleteProperty(LDeleteProperty* instr) { Register object = ToRegister(instr->object()); Register key = ToRegister(instr->key()); @@ -5547,8 +6261,8 @@ void LCodeGen::DoStackCheck(LStackCheck* instr) { __ cmp(sp, Operand(ip)); __ b(hs, &done); StackCheckStub stub; - PredictableCodeSizeScope predictable(masm_); - CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr); + PredictableCodeSizeScope predictable(masm_, 2 * Assembler::kInstrSize); + CallCode(stub.GetCode(isolate()), RelocInfo::CODE_TARGET, instr); EnsureSpaceForLazyDeopt(); __ bind(&done); RegisterEnvironmentForDeoptimization(env, Safepoint::kLazyDeopt); @@ -5641,7 +6355,7 @@ void LCodeGen::DoForInCacheArray(LForInCacheArray* instr) { FieldMemOperand(result, DescriptorArray::kEnumCacheOffset)); __ ldr(result, FieldMemOperand(result, FixedArray::SizeFor(instr->idx()))); - __ cmp(result, Operand(0)); + __ cmp(result, Operand::Zero()); DeoptimizeIf(eq, instr->environment()); __ bind(&done); @@ -5664,7 +6378,7 @@ void LCodeGen::DoLoadFieldByIndex(LLoadFieldByIndex* instr) { Register scratch = scratch0(); Label out_of_object, done; - __ cmp(index, Operand(0)); + __ cmp(index, Operand::Zero()); __ b(lt, &out_of_object); STATIC_ASSERT(kPointerSizeLog2 > kSmiTagSize); diff --git a/deps/v8/src/arm/lithium-codegen-arm.h b/deps/v8/src/arm/lithium-codegen-arm.h index 9281537c1e..d1f712ab80 100644 --- a/deps/v8/src/arm/lithium-codegen-arm.h +++ b/deps/v8/src/arm/lithium-codegen-arm.h @@ -54,6 +54,7 @@ class LCodeGen BASE_EMBEDDED { deoptimizations_(4, info->zone()), deopt_jump_table_(4, info->zone()), deoptimization_literals_(8, info->zone()), + prototype_maps_(0, info->zone()), inlined_function_count_(0), scope_(info->scope()), status_(UNUSED), @@ -61,6 +62,7 @@ class LCodeGen BASE_EMBEDDED { deferred_(8, info->zone()), osr_pc_offset_(-1), last_lazy_deopt_pc_(0), + frame_is_built_(false), safepoints_(info->zone()), resolver_(this), expected_safepoint_kind_(Safepoint::kSimple) { @@ -76,6 +78,15 @@ class LCodeGen BASE_EMBEDDED { Heap* heap() const { return isolate()->heap(); } Zone* zone() const { return zone_; } + bool NeedsEagerFrame() const { + return GetStackSlotCount() > 0 || + info()->is_non_deferred_calling() || + !info()->IsStub(); + } + bool NeedsDeferredFrame() const { + return !NeedsEagerFrame() && info()->is_deferred_calling(); + } + // Support for converting LOperands to assembler types. // LOperand must be a register. Register ToRegister(LOperand* op) const; @@ -84,12 +95,12 @@ class LCodeGen BASE_EMBEDDED { Register EmitLoadRegister(LOperand* op, Register scratch); // LOperand must be a double register. - DoubleRegister ToDoubleRegister(LOperand* op) const; + DwVfpRegister ToDoubleRegister(LOperand* op) const; // LOperand is loaded into dbl_scratch, unless already a double register. - DoubleRegister EmitLoadDoubleRegister(LOperand* op, - SwVfpRegister flt_scratch, - DoubleRegister dbl_scratch); + DwVfpRegister EmitLoadDoubleRegister(LOperand* op, + SwVfpRegister flt_scratch, + DwVfpRegister dbl_scratch); int ToInteger32(LConstantOperand* op) const; double ToDouble(LConstantOperand* op) const; Operand ToOperand(LOperand* op); @@ -128,10 +139,11 @@ class LCodeGen BASE_EMBEDDED { void DoDeferredStringCharCodeAt(LStringCharCodeAt* instr); void DoDeferredStringCharFromCode(LStringCharFromCode* instr); void DoDeferredAllocateObject(LAllocateObject* instr); + void DoDeferredAllocate(LAllocate* instr); void DoDeferredInstanceOfKnownGlobal(LInstanceOfKnownGlobal* instr, Label* map_check); - void DoCheckMapCommon(Register reg, Register scratch, Handle<Map> map, + void DoCheckMapCommon(Register map_reg, Handle<Map> map, CompareMapMode mode, LEnvironment* env); // Parallel move support. @@ -193,7 +205,6 @@ class LCodeGen BASE_EMBEDDED { Register temporary2); int GetStackSlotCount() const { return chunk()->spill_slot_count(); } - int GetParameterCount() const { return scope()->num_parameters(); } void Abort(const char* reason); void Comment(const char* format, ...); @@ -267,15 +278,17 @@ class LCodeGen BASE_EMBEDDED { LOperand* op, bool is_tagged, bool is_uint32, + bool arguments_known, int arguments_index, int arguments_count); + void RegisterDependentCodeForEmbeddedMaps(Handle<Code> code); void PopulateDeoptimizationData(Handle<Code> code); int DefineDeoptimizationLiteral(Handle<Object> literal); void PopulateDeoptimizationLiteralsWithInlinedFunctions(); Register ToRegister(int index) const; - DoubleRegister ToDoubleRegister(int index) const; + DwVfpRegister ToDoubleRegister(int index) const; // Specific math operations - used from DoUnaryMathOperation. void EmitIntegerMathAbs(LUnaryMathOperation* instr); @@ -308,14 +321,11 @@ class LCodeGen BASE_EMBEDDED { void EmitGoto(int block); void EmitBranch(int left_block, int right_block, Condition cc); void EmitNumberUntagD(Register input, - DoubleRegister result, + DwVfpRegister result, bool deoptimize_on_undefined, bool deoptimize_on_minus_zero, - LEnvironment* env); - - void DeoptIfTaggedButNotSmi(LEnvironment* environment, - HValue* value, - LOperand* operand); + LEnvironment* env, + NumberUntagDMode mode); // Emits optimized code for typeof x == "y". Modifies input register. // Returns the condition on which a final split to @@ -355,7 +365,8 @@ class LCodeGen BASE_EMBEDDED { void EmitDeepCopy(Handle<JSObject> object, Register result, Register source, - int* offset); + int* offset, + AllocationSiteMode mode); // Emit optimized code for integer division. // Inputs are signed. @@ -369,14 +380,24 @@ class LCodeGen BASE_EMBEDDED { LEnvironment* environment); struct JumpTableEntry { - explicit inline JumpTableEntry(Address entry) + inline JumpTableEntry(Address entry, bool frame, bool is_lazy) : label(), - address(entry) { } + address(entry), + needs_frame(frame), + is_lazy_deopt(is_lazy) { } Label label; Address address; + bool needs_frame; + bool is_lazy_deopt; }; void EnsureSpaceForLazyDeopt(); + void DoLoadKeyedExternalArray(LLoadKeyed* instr); + void DoLoadKeyedFixedDoubleArray(LLoadKeyed* instr); + void DoLoadKeyedFixedArray(LLoadKeyed* instr); + void DoStoreKeyedExternalArray(LStoreKeyed* instr); + void DoStoreKeyedFixedDoubleArray(LStoreKeyed* instr); + void DoStoreKeyedFixedArray(LStoreKeyed* instr); Zone* zone_; LPlatformChunk* const chunk_; @@ -389,6 +410,7 @@ class LCodeGen BASE_EMBEDDED { ZoneList<LEnvironment*> deoptimizations_; ZoneList<JumpTableEntry> deopt_jump_table_; ZoneList<Handle<Object> > deoptimization_literals_; + ZoneList<Handle<Map> > prototype_maps_; int inlined_function_count_; Scope* const scope_; Status status_; @@ -396,6 +418,7 @@ class LCodeGen BASE_EMBEDDED { ZoneList<LDeferredCode*> deferred_; int osr_pc_offset_; int last_lazy_deopt_pc_; + bool frame_is_built_; // Builder that keeps track of safepoints in the code. The table // itself is emitted at the end of the generated code. @@ -411,6 +434,7 @@ class LCodeGen BASE_EMBEDDED { PushSafepointRegistersScope(LCodeGen* codegen, Safepoint::Kind kind) : codegen_(codegen) { + ASSERT(codegen_->info()->is_calling()); ASSERT(codegen_->expected_safepoint_kind_ == Safepoint::kSimple); codegen_->expected_safepoint_kind_ = kind; diff --git a/deps/v8/src/arm/lithium-gap-resolver-arm.cc b/deps/v8/src/arm/lithium-gap-resolver-arm.cc index c100720d89..a65ab7e7d5 100644 --- a/deps/v8/src/arm/lithium-gap-resolver-arm.cc +++ b/deps/v8/src/arm/lithium-gap-resolver-arm.cc @@ -171,8 +171,10 @@ void LGapResolver::BreakCycle(int index) { } else if (source->IsStackSlot()) { __ ldr(kSavedValueRegister, cgen_->ToMemOperand(source)); } else if (source->IsDoubleRegister()) { + CpuFeatureScope scope(cgen_->masm(), VFP2); __ vmov(kScratchDoubleReg, cgen_->ToDoubleRegister(source)); } else if (source->IsDoubleStackSlot()) { + CpuFeatureScope scope(cgen_->masm(), VFP2); __ vldr(kScratchDoubleReg, cgen_->ToMemOperand(source)); } else { UNREACHABLE(); @@ -192,8 +194,10 @@ void LGapResolver::RestoreValue() { } else if (saved_destination_->IsStackSlot()) { __ str(kSavedValueRegister, cgen_->ToMemOperand(saved_destination_)); } else if (saved_destination_->IsDoubleRegister()) { + CpuFeatureScope scope(cgen_->masm(), VFP2); __ vmov(cgen_->ToDoubleRegister(saved_destination_), kScratchDoubleReg); } else if (saved_destination_->IsDoubleStackSlot()) { + CpuFeatureScope scope(cgen_->masm(), VFP2); __ vstr(kScratchDoubleReg, cgen_->ToMemOperand(saved_destination_)); } else { UNREACHABLE(); @@ -229,7 +233,8 @@ void LGapResolver::EmitMove(int index) { MemOperand destination_operand = cgen_->ToMemOperand(destination); if (in_cycle_) { if (!destination_operand.OffsetIsUint12Encodable()) { - // ip is overwritten while saving the value to the destination. + CpuFeatureScope scope(cgen_->masm(), VFP2); + // ip is overwritten while saving the value to the destination. // Therefore we can't use ip. It is OK if the read from the source // destroys ip, since that happens before the value is read. __ vldr(kScratchDoubleReg.low(), source_operand); @@ -267,7 +272,8 @@ void LGapResolver::EmitMove(int index) { } } else if (source->IsDoubleRegister()) { - DoubleRegister source_register = cgen_->ToDoubleRegister(source); + CpuFeatureScope scope(cgen_->masm(), VFP2); + DwVfpRegister source_register = cgen_->ToDoubleRegister(source); if (destination->IsDoubleRegister()) { __ vmov(cgen_->ToDoubleRegister(destination), source_register); } else { @@ -276,7 +282,8 @@ void LGapResolver::EmitMove(int index) { } } else if (source->IsDoubleStackSlot()) { - MemOperand source_operand = cgen_->ToMemOperand(source); + CpuFeatureScope scope(cgen_->masm(), VFP2); + MemOperand source_operand = cgen_->ToMemOperand(source); if (destination->IsDoubleRegister()) { __ vldr(cgen_->ToDoubleRegister(destination), source_operand); } else { diff --git a/deps/v8/src/arm/macro-assembler-arm.cc b/deps/v8/src/arm/macro-assembler-arm.cc index 623bd6a01a..e0e77cfd33 100644 --- a/deps/v8/src/arm/macro-assembler-arm.cc +++ b/deps/v8/src/arm/macro-assembler-arm.cc @@ -290,9 +290,9 @@ void MacroAssembler::Move(Register dst, Register src, Condition cond) { } -void MacroAssembler::Move(DoubleRegister dst, DoubleRegister src) { +void MacroAssembler::Move(DwVfpRegister dst, DwVfpRegister src) { ASSERT(CpuFeatures::IsSupported(VFP2)); - CpuFeatures::Scope scope(VFP2); + CpuFeatureScope scope(this, VFP2); if (!dst.is(src)) { vmov(dst, src); } @@ -304,7 +304,7 @@ void MacroAssembler::And(Register dst, Register src1, const Operand& src2, if (!src2.is_reg() && !src2.must_output_reloc_info(this) && src2.immediate() == 0) { - mov(dst, Operand(0, RelocInfo::NONE), LeaveCC, cond); + mov(dst, Operand::Zero(), LeaveCC, cond); } else if (!src2.is_single_instruction(this) && !src2.must_output_reloc_info(this) && CpuFeatures::IsSupported(ARMv7) && @@ -410,7 +410,7 @@ void MacroAssembler::Usat(Register dst, int satpos, const Operand& src, } tst(dst, Operand(~satval)); b(eq, &done); - mov(dst, Operand(0, RelocInfo::NONE), LeaveCC, mi); // 0 if negative. + mov(dst, Operand::Zero(), LeaveCC, mi); // 0 if negative. mov(dst, Operand(satval), LeaveCC, pl); // satval if positive. bind(&done); } else { @@ -422,6 +422,18 @@ void MacroAssembler::Usat(Register dst, int satpos, const Operand& src, void MacroAssembler::LoadRoot(Register destination, Heap::RootListIndex index, Condition cond) { + if (CpuFeatures::IsSupported(MOVW_MOVT_IMMEDIATE_LOADS) && + !Heap::RootCanBeWrittenAfterInitialization(index) && + !predictable_code_size()) { + Handle<Object> root(isolate()->heap()->roots_array_start()[index], + isolate()); + if (!isolate()->heap()->InNewSpace(*root)) { + // The CPU supports fast immediate values, and this root will never + // change. We will load it as a relocatable immediate value. + mov(destination, Operand(root), LeaveCC, cond); + return; + } + } ldr(destination, MemOperand(kRootRegister, index << kPointerSizeLog2), cond); } @@ -631,20 +643,24 @@ void MacroAssembler::PopSafepointRegisters() { void MacroAssembler::PushSafepointRegistersAndDoubles() { + // Number of d-regs not known at snapshot time. + ASSERT(!Serializer::enabled()); PushSafepointRegisters(); - sub(sp, sp, Operand(DwVfpRegister::kNumAllocatableRegisters * + sub(sp, sp, Operand(DwVfpRegister::NumAllocatableRegisters() * kDoubleSize)); - for (int i = 0; i < DwVfpRegister::kNumAllocatableRegisters; i++) { + for (int i = 0; i < DwVfpRegister::NumAllocatableRegisters(); i++) { vstr(DwVfpRegister::FromAllocationIndex(i), sp, i * kDoubleSize); } } void MacroAssembler::PopSafepointRegistersAndDoubles() { - for (int i = 0; i < DwVfpRegister::kNumAllocatableRegisters; i++) { + // Number of d-regs not known at snapshot time. + ASSERT(!Serializer::enabled()); + for (int i = 0; i < DwVfpRegister::NumAllocatableRegisters(); i++) { vldr(DwVfpRegister::FromAllocationIndex(i), sp, i * kDoubleSize); } - add(sp, sp, Operand(DwVfpRegister::kNumAllocatableRegisters * + add(sp, sp, Operand(DwVfpRegister::NumAllocatableRegisters() * kDoubleSize)); PopSafepointRegisters(); } @@ -679,8 +695,10 @@ MemOperand MacroAssembler::SafepointRegisterSlot(Register reg) { MemOperand MacroAssembler::SafepointRegistersAndDoublesSlot(Register reg) { + // Number of d-regs not known at snapshot time. + ASSERT(!Serializer::enabled()); // General purpose registers are pushed last on the stack. - int doubles_size = DwVfpRegister::kNumAllocatableRegisters * kDoubleSize; + int doubles_size = DwVfpRegister::NumAllocatableRegisters() * kDoubleSize; int register_offset = SafepointRegisterStackIndex(reg.code()) * kPointerSize; return MemOperand(sp, doubles_size + register_offset); } @@ -699,7 +717,7 @@ void MacroAssembler::Ldrd(Register dst1, Register dst2, // Generate two ldr instructions if ldrd is not available. if (CpuFeatures::IsSupported(ARMv7) && !predictable_code_size()) { - CpuFeatures::Scope scope(ARMv7); + CpuFeatureScope scope(this, ARMv7); ldrd(dst1, dst2, src, cond); } else { if ((src.am() == Offset) || (src.am() == NegOffset)) { @@ -741,7 +759,7 @@ void MacroAssembler::Strd(Register src1, Register src2, // Generate two str instructions if strd is not available. if (CpuFeatures::IsSupported(ARMv7) && !predictable_code_size()) { - CpuFeatures::Scope scope(ARMv7); + CpuFeatureScope scope(this, ARMv7); strd(src1, src2, dst, cond); } else { MemOperand dst2(dst); @@ -759,15 +777,6 @@ void MacroAssembler::Strd(Register src1, Register src2, } -void MacroAssembler::ClearFPSCRBits(const uint32_t bits_to_clear, - const Register scratch, - const Condition cond) { - vmrs(scratch, cond); - bic(scratch, scratch, Operand(bits_to_clear), LeaveCC, cond); - vmsr(scratch, cond); -} - - void MacroAssembler::VFPCompareAndSetFlags(const DwVfpRegister src1, const DwVfpRegister src2, const Condition cond) { @@ -803,19 +812,18 @@ void MacroAssembler::VFPCompareAndLoadFlags(const DwVfpRegister src1, void MacroAssembler::Vmov(const DwVfpRegister dst, const double imm, - const Register scratch, - const Condition cond) { - ASSERT(CpuFeatures::IsEnabled(VFP2)); + const Register scratch) { + ASSERT(IsEnabled(VFP2)); static const DoubleRepresentation minus_zero(-0.0); static const DoubleRepresentation zero(0.0); DoubleRepresentation value(imm); // Handle special values first. if (value.bits == zero.bits) { - vmov(dst, kDoubleRegZero, cond); + vmov(dst, kDoubleRegZero); } else if (value.bits == minus_zero.bits) { - vneg(dst, kDoubleRegZero, cond); + vneg(dst, kDoubleRegZero); } else { - vmov(dst, imm, scratch, cond); + vmov(dst, imm, scratch); } } @@ -853,7 +861,7 @@ void MacroAssembler::EnterExitFrame(bool save_doubles, int stack_space) { // Reserve room for saved entry sp and code object. sub(sp, sp, Operand(2 * kPointerSize)); if (emit_debug_code()) { - mov(ip, Operand(0)); + mov(ip, Operand::Zero()); str(ip, MemOperand(fp, ExitFrameConstants::kSPOffset)); } mov(ip, Operand(CodeObject())); @@ -867,12 +875,17 @@ void MacroAssembler::EnterExitFrame(bool save_doubles, int stack_space) { // Optionally save all double registers. if (save_doubles) { - DwVfpRegister first = d0; - DwVfpRegister last = - DwVfpRegister::from_code(DwVfpRegister::kNumRegisters - 1); - vstm(db_w, sp, first, last); + CpuFeatureScope scope(this, VFP2); + // Check CPU flags for number of registers, setting the Z condition flag. + CheckFor32DRegs(ip); + + // Push registers d0-d15, and possibly d16-d31, on the stack. + // If d16-d31 are not pushed, decrease the stack pointer instead. + vstm(db_w, sp, d16, d31, ne); + sub(sp, sp, Operand(16 * kDoubleSize), LeaveCC, eq); + vstm(db_w, sp, d0, d15); // Note that d0 will be accessible at - // fp - 2 * kPointerSize - DwVfpRegister::kNumRegisters * kDoubleSize, + // fp - 2 * kPointerSize - DwVfpRegister::kMaxNumRegisters * kDoubleSize, // since the sp slot and code slot were pushed after the fp. } @@ -927,17 +940,24 @@ void MacroAssembler::LeaveExitFrame(bool save_doubles, Register argument_count) { // Optionally restore all double registers. if (save_doubles) { + CpuFeatureScope scope(this, VFP2); // Calculate the stack location of the saved doubles and restore them. const int offset = 2 * kPointerSize; - sub(r3, fp, Operand(offset + DwVfpRegister::kNumRegisters * kDoubleSize)); - DwVfpRegister first = d0; - DwVfpRegister last = - DwVfpRegister::from_code(DwVfpRegister::kNumRegisters - 1); - vldm(ia, r3, first, last); + sub(r3, fp, + Operand(offset + DwVfpRegister::kMaxNumRegisters * kDoubleSize)); + + // Check CPU flags for number of registers, setting the Z condition flag. + CheckFor32DRegs(ip); + + // Pop registers d0-d15, and possibly d16-d31, from r3. + // If d16-d31 are not popped, increase r3 instead. + vldm(ia_w, r3, d0, d15); + vldm(ia_w, r3, d16, d31, ne); + add(r3, r3, Operand(16 * kDoubleSize), LeaveCC, eq); } // Clear top frame. - mov(r3, Operand(0, RelocInfo::NONE)); + mov(r3, Operand::Zero()); mov(ip, Operand(ExternalReference(Isolate::kCEntryFPAddress, isolate()))); str(r3, MemOperand(ip)); @@ -956,7 +976,7 @@ void MacroAssembler::LeaveExitFrame(bool save_doubles, } } -void MacroAssembler::GetCFunctionDoubleResult(const DoubleRegister dst) { +void MacroAssembler::GetCFunctionDoubleResult(const DwVfpRegister dst) { ASSERT(CpuFeatures::IsSupported(VFP2)); if (use_eabi_hardfloat()) { Move(dst, d0); @@ -1205,13 +1225,23 @@ void MacroAssembler::IsObjectJSStringType(Register object, } +void MacroAssembler::IsObjectNameType(Register object, + Register scratch, + Label* fail) { + ldr(scratch, FieldMemOperand(object, HeapObject::kMapOffset)); + ldrb(scratch, FieldMemOperand(scratch, Map::kInstanceTypeOffset)); + cmp(scratch, Operand(LAST_NAME_TYPE)); + b(hi, fail); +} + + #ifdef ENABLE_DEBUGGER_SUPPORT void MacroAssembler::DebugBreak() { - mov(r0, Operand(0, RelocInfo::NONE)); + mov(r0, Operand::Zero()); mov(r1, Operand(ExternalReference(Runtime::kDebugBreak, isolate()))); CEntryStub ces(1); ASSERT(AllowThisStubCall(&ces)); - Call(ces.GetCode(), RelocInfo::DEBUG_BREAK); + Call(ces.GetCode(isolate()), RelocInfo::DEBUG_BREAK); } #endif @@ -1238,7 +1268,7 @@ void MacroAssembler::PushTryHandler(StackHandler::Kind kind, // Push the frame pointer, context, state, and code object. if (kind == StackHandler::JS_ENTRY) { mov(r7, Operand(Smi::FromInt(0))); // Indicates no context. - mov(ip, Operand(0, RelocInfo::NONE)); // NULL frame pointer. + mov(ip, Operand::Zero()); // NULL frame pointer. stm(db_w, sp, r5.bit() | r6.bit() | r7.bit() | ip.bit()); } else { stm(db_w, sp, r5.bit() | r6.bit() | cp.bit() | fp.bit()); @@ -1362,7 +1392,7 @@ void MacroAssembler::CheckAccessGlobalProxy(Register holder_reg, ldr(scratch, MemOperand(fp, StandardFrameConstants::kContextOffset)); // In debug mode, make sure the lexical context is set. #ifdef DEBUG - cmp(scratch, Operand(0, RelocInfo::NONE)); + cmp(scratch, Operand::Zero()); Check(ne, "we should not have an empty lexical context"); #endif @@ -1534,12 +1564,12 @@ void MacroAssembler::LoadFromNumberDictionary(Label* miss, } -void MacroAssembler::AllocateInNewSpace(int object_size, - Register result, - Register scratch1, - Register scratch2, - Label* gc_required, - AllocationFlags flags) { +void MacroAssembler::Allocate(int object_size, + Register result, + Register scratch1, + Register scratch2, + Label* gc_required, + AllocationFlags flags) { if (!FLAG_inline_new) { if (emit_debug_code()) { // Trash the registers to simulate an allocation failure. @@ -1567,21 +1597,22 @@ void MacroAssembler::AllocateInNewSpace(int object_size, // The values must be adjacent in memory to allow the use of LDM. // Also, assert that the registers are numbered such that the values // are loaded in the correct order. - ExternalReference new_space_allocation_top = - ExternalReference::new_space_allocation_top_address(isolate()); - ExternalReference new_space_allocation_limit = - ExternalReference::new_space_allocation_limit_address(isolate()); + ExternalReference allocation_top = + AllocationUtils::GetAllocationTopReference(isolate(), flags); + ExternalReference allocation_limit = + AllocationUtils::GetAllocationLimitReference(isolate(), flags); + intptr_t top = - reinterpret_cast<intptr_t>(new_space_allocation_top.address()); + reinterpret_cast<intptr_t>(allocation_top.address()); intptr_t limit = - reinterpret_cast<intptr_t>(new_space_allocation_limit.address()); + reinterpret_cast<intptr_t>(allocation_limit.address()); ASSERT((limit - top) == kPointerSize); ASSERT(result.code() < ip.code()); // Set up allocation top address and object size registers. Register topaddr = scratch1; Register obj_size_reg = scratch2; - mov(topaddr, Operand(new_space_allocation_top)); + mov(topaddr, Operand(allocation_top)); Operand obj_size_operand = Operand(object_size); if (!obj_size_operand.is_single_instruction(this)) { // We are about to steal IP, so we need to load this value first @@ -1606,6 +1637,19 @@ void MacroAssembler::AllocateInNewSpace(int object_size, ldr(ip, MemOperand(topaddr, limit - top)); } + if ((flags & DOUBLE_ALIGNMENT) != 0) { + // Align the next allocation. Storing the filler map without checking top is + // always safe because the limit of the heap is always aligned. + ASSERT((flags & PRETENURE_OLD_POINTER_SPACE) == 0); + ASSERT(kPointerAlignment * 2 == kDoubleAlignment); + and_(scratch2, result, Operand(kDoubleAlignmentMask), SetCC); + Label aligned; + b(eq, &aligned); + mov(scratch2, Operand(isolate()->factory()->one_pointer_filler_map())); + str(scratch2, MemOperand(result, kDoubleSize / 2, PostIndex)); + bind(&aligned); + } + // Calculate new top and bail out if new space is exhausted. Use result // to calculate the new top. if (obj_size_operand.is_single_instruction(this)) { @@ -1633,6 +1677,7 @@ void MacroAssembler::AllocateInNewSpace(Register object_size, Register scratch2, Label* gc_required, AllocationFlags flags) { + ASSERT((flags & PRETENURE_OLD_POINTER_SPACE) == 0); if (!FLAG_inline_new) { if (emit_debug_code()) { // Trash the registers to simulate an allocation failure. @@ -1691,6 +1736,18 @@ void MacroAssembler::AllocateInNewSpace(Register object_size, ldr(ip, MemOperand(topaddr, limit - top)); } + if ((flags & DOUBLE_ALIGNMENT) != 0) { + // Align the next allocation. Storing the filler map without checking top is + // always safe because the limit of the heap is always aligned. + ASSERT(kPointerAlignment * 2 == kDoubleAlignment); + and_(scratch2, result, Operand(kDoubleAlignmentMask), SetCC); + Label aligned; + b(eq, &aligned); + mov(scratch2, Operand(isolate()->factory()->one_pointer_filler_map())); + str(scratch2, MemOperand(result, kDoubleSize / 2, PostIndex)); + bind(&aligned); + } + // Calculate new top and bail out if new space is exhausted. Use result // to calculate the new top. Object size may be in words so a shift is // required to get the number of bytes. @@ -1776,10 +1833,10 @@ void MacroAssembler::AllocateAsciiString(Register result, Label* gc_required) { // Calculate the number of bytes needed for the characters in the string while // observing object alignment. - ASSERT((SeqAsciiString::kHeaderSize & kObjectAlignmentMask) == 0); + ASSERT((SeqOneByteString::kHeaderSize & kObjectAlignmentMask) == 0); ASSERT(kCharSize == 1); add(scratch1, length, - Operand(kObjectAlignmentMask + SeqAsciiString::kHeaderSize)); + Operand(kObjectAlignmentMask + SeqOneByteString::kHeaderSize)); and_(scratch1, scratch1, Operand(~kObjectAlignmentMask)); // Allocate ASCII string in new space. @@ -1804,12 +1861,8 @@ void MacroAssembler::AllocateTwoByteConsString(Register result, Register scratch1, Register scratch2, Label* gc_required) { - AllocateInNewSpace(ConsString::kSize, - result, - scratch1, - scratch2, - gc_required, - TAG_OBJECT); + Allocate(ConsString::kSize, result, scratch1, scratch2, gc_required, + TAG_OBJECT); InitializeNewString(result, length, @@ -1824,12 +1877,8 @@ void MacroAssembler::AllocateAsciiConsString(Register result, Register scratch1, Register scratch2, Label* gc_required) { - AllocateInNewSpace(ConsString::kSize, - result, - scratch1, - scratch2, - gc_required, - TAG_OBJECT); + Allocate(ConsString::kSize, result, scratch1, scratch2, gc_required, + TAG_OBJECT); InitializeNewString(result, length, @@ -1844,12 +1893,8 @@ void MacroAssembler::AllocateTwoByteSlicedString(Register result, Register scratch1, Register scratch2, Label* gc_required) { - AllocateInNewSpace(SlicedString::kSize, - result, - scratch1, - scratch2, - gc_required, - TAG_OBJECT); + Allocate(SlicedString::kSize, result, scratch1, scratch2, gc_required, + TAG_OBJECT); InitializeNewString(result, length, @@ -1864,12 +1909,8 @@ void MacroAssembler::AllocateAsciiSlicedString(Register result, Register scratch1, Register scratch2, Label* gc_required) { - AllocateInNewSpace(SlicedString::kSize, - result, - scratch1, - scratch2, - gc_required, - TAG_OBJECT); + Allocate(SlicedString::kSize, result, scratch1, scratch2, gc_required, + TAG_OBJECT); InitializeNewString(result, length, @@ -1945,13 +1986,13 @@ void MacroAssembler::CheckFastSmiElements(Register map, void MacroAssembler::StoreNumberToDoubleElements(Register value_reg, Register key_reg, - Register receiver_reg, Register elements_reg, Register scratch1, Register scratch2, Register scratch3, Register scratch4, - Label* fail) { + Label* fail, + int elements_offset) { Label smi_value, maybe_nan, have_double_value, is_nan, done; Register mantissa_reg = scratch2; Register exponent_reg = scratch3; @@ -1978,8 +2019,10 @@ void MacroAssembler::StoreNumberToDoubleElements(Register value_reg, bind(&have_double_value); add(scratch1, elements_reg, Operand(key_reg, LSL, kDoubleSizeLog2 - kSmiTagSize)); - str(mantissa_reg, FieldMemOperand(scratch1, FixedDoubleArray::kHeaderSize)); - uint32_t offset = FixedDoubleArray::kHeaderSize + sizeof(kHoleNanLower32); + str(mantissa_reg, FieldMemOperand( + scratch1, FixedDoubleArray::kHeaderSize - elements_offset)); + uint32_t offset = FixedDoubleArray::kHeaderSize - elements_offset + + sizeof(kHoleNanLower32); str(exponent_reg, FieldMemOperand(scratch1, offset)); jmp(&done); @@ -1988,7 +2031,7 @@ void MacroAssembler::StoreNumberToDoubleElements(Register value_reg, // it's an Infinity, and the non-NaN code path applies. b(gt, &is_nan); ldr(mantissa_reg, FieldMemOperand(value_reg, HeapNumber::kMantissaOffset)); - cmp(mantissa_reg, Operand(0)); + cmp(mantissa_reg, Operand::Zero()); b(eq, &have_double_value); bind(&is_nan); // Load canonical NaN for storing into the double array. @@ -2000,7 +2043,8 @@ void MacroAssembler::StoreNumberToDoubleElements(Register value_reg, bind(&smi_value); add(scratch1, elements_reg, - Operand(FixedDoubleArray::kHeaderSize - kHeapObjectTag)); + Operand(FixedDoubleArray::kHeaderSize - kHeapObjectTag - + elements_offset)); add(scratch1, scratch1, Operand(key_reg, LSL, kDoubleSizeLog2 - kSmiTagSize)); // scratch1 is now effective address of the double element @@ -2023,7 +2067,7 @@ void MacroAssembler::StoreNumberToDoubleElements(Register value_reg, scratch4, s2); if (destination == FloatingPointHelper::kVFPRegisters) { - CpuFeatures::Scope scope(VFP2); + CpuFeatureScope scope(this, VFP2); vstr(d0, scratch1, 0); } else { str(mantissa_reg, MemOperand(scratch1, 0)); @@ -2172,15 +2216,18 @@ void MacroAssembler::TryGetFunctionPrototype(Register function, } -void MacroAssembler::CallStub(CodeStub* stub, Condition cond) { +void MacroAssembler::CallStub(CodeStub* stub, + TypeFeedbackId ast_id, + Condition cond) { ASSERT(AllowThisStubCall(stub)); // Stub calls are not allowed in some stubs. - Call(stub->GetCode(), RelocInfo::CODE_TARGET, TypeFeedbackId::None(), cond); + Call(stub->GetCode(isolate()), RelocInfo::CODE_TARGET, ast_id, cond); } void MacroAssembler::TailCallStub(CodeStub* stub, Condition cond) { - ASSERT(allow_stub_calls_ || stub->CompilingCallsToThisStubIsGCSafe()); - Jump(stub->GetCode(), RelocInfo::CODE_TARGET, cond); + ASSERT(allow_stub_calls_ || + stub->CompilingCallsToThisStubIsGCSafe(isolate())); + Jump(stub->GetCode(isolate()), RelocInfo::CODE_TARGET, cond); } @@ -2192,13 +2239,13 @@ static int AddressOffset(ExternalReference ref0, ExternalReference ref1) { void MacroAssembler::CallApiFunctionAndReturn(ExternalReference function, int stack_space) { ExternalReference next_address = - ExternalReference::handle_scope_next_address(); + ExternalReference::handle_scope_next_address(isolate()); const int kNextOffset = 0; const int kLimitOffset = AddressOffset( - ExternalReference::handle_scope_limit_address(), + ExternalReference::handle_scope_limit_address(isolate()), next_address); const int kLevelOffset = AddressOffset( - ExternalReference::handle_scope_level_address(), + ExternalReference::handle_scope_level_address(isolate()), next_address); // Allocate HandleScope in callee-save registers. @@ -2209,19 +2256,35 @@ void MacroAssembler::CallApiFunctionAndReturn(ExternalReference function, add(r6, r6, Operand(1)); str(r6, MemOperand(r7, kLevelOffset)); + if (FLAG_log_timer_events) { + FrameScope frame(this, StackFrame::MANUAL); + PushSafepointRegisters(); + PrepareCallCFunction(0, r0); + CallCFunction(ExternalReference::log_enter_external_function(isolate()), 0); + PopSafepointRegisters(); + } + // Native call returns to the DirectCEntry stub which redirects to the // return address pushed on stack (could have moved after GC). // DirectCEntry stub itself is generated early and never moves. DirectCEntryStub stub; stub.GenerateCall(this, function); + if (FLAG_log_timer_events) { + FrameScope frame(this, StackFrame::MANUAL); + PushSafepointRegisters(); + PrepareCallCFunction(0, r0); + CallCFunction(ExternalReference::log_leave_external_function(isolate()), 0); + PopSafepointRegisters(); + } + Label promote_scheduled_exception; Label delete_allocated_handles; Label leave_exit_frame; // If result is non-zero, dereference to get the result value // otherwise set it to undefined. - cmp(r0, Operand(0)); + cmp(r0, Operand::Zero()); LoadRoot(r0, Heap::kUndefinedValueRootIndex, eq); ldr(r0, MemOperand(r0), ne); @@ -2273,7 +2336,7 @@ void MacroAssembler::CallApiFunctionAndReturn(ExternalReference function, bool MacroAssembler::AllowThisStubCall(CodeStub* stub) { if (!has_frame_ && stub->SometimesSetsUpAFrame()) return false; - return allow_stub_calls_ || stub->CompilingCallsToThisStubIsGCSafe(); + return allow_stub_calls_ || stub->CompilingCallsToThisStubIsGCSafe(isolate()); } @@ -2361,278 +2424,243 @@ void MacroAssembler::SmiToDoubleVFPRegister(Register smi, } -// Tries to get a signed int32 out of a double precision floating point heap -// number. Rounds towards 0. Branch to 'not_int32' if the double is out of the -// 32bits signed integer range. -void MacroAssembler::ConvertToInt32(Register source, - Register dest, - Register scratch, - Register scratch2, - DwVfpRegister double_scratch, - Label *not_int32) { - if (CpuFeatures::IsSupported(VFP2)) { - CpuFeatures::Scope scope(VFP2); - sub(scratch, source, Operand(kHeapObjectTag)); - vldr(double_scratch, scratch, HeapNumber::kValueOffset); - vcvt_s32_f64(double_scratch.low(), double_scratch); - vmov(dest, double_scratch.low()); - // Signed vcvt instruction will saturate to the minimum (0x80000000) or - // maximun (0x7fffffff) signed 32bits integer when the double is out of - // range. When substracting one, the minimum signed integer becomes the - // maximun signed integer. - sub(scratch, dest, Operand(1)); - cmp(scratch, Operand(LONG_MAX - 1)); - // If equal then dest was LONG_MAX, if greater dest was LONG_MIN. - b(ge, not_int32); - } else { - // This code is faster for doubles that are in the ranges -0x7fffffff to - // -0x40000000 or 0x40000000 to 0x7fffffff. This corresponds almost to - // the range of signed int32 values that are not Smis. Jumps to the label - // 'not_int32' if the double isn't in the range -0x80000000.0 to - // 0x80000000.0 (excluding the endpoints). - Label right_exponent, done; - // Get exponent word. - ldr(scratch, FieldMemOperand(source, HeapNumber::kExponentOffset)); - // Get exponent alone in scratch2. - Ubfx(scratch2, - scratch, - HeapNumber::kExponentShift, - HeapNumber::kExponentBits); - // Load dest with zero. We use this either for the final shift or - // for the answer. - mov(dest, Operand(0, RelocInfo::NONE)); - // Check whether the exponent matches a 32 bit signed int that is not a Smi. - // A non-Smi integer is 1.xxx * 2^30 so the exponent is 30 (biased). This is - // the exponent that we are fastest at and also the highest exponent we can - // handle here. - const uint32_t non_smi_exponent = HeapNumber::kExponentBias + 30; - // The non_smi_exponent, 0x41d, is too big for ARM's immediate field so we - // split it up to avoid a constant pool entry. You can't do that in general - // for cmp because of the overflow flag, but we know the exponent is in the - // range 0-2047 so there is no overflow. - int fudge_factor = 0x400; - sub(scratch2, scratch2, Operand(fudge_factor)); - cmp(scratch2, Operand(non_smi_exponent - fudge_factor)); - // If we have a match of the int32-but-not-Smi exponent then skip some - // logic. - b(eq, &right_exponent); - // If the exponent is higher than that then go to slow case. This catches - // numbers that don't fit in a signed int32, infinities and NaNs. - b(gt, not_int32); - - // We know the exponent is smaller than 30 (biased). If it is less than - // 0 (biased) then the number is smaller in magnitude than 1.0 * 2^0, i.e. - // it rounds to zero. - const uint32_t zero_exponent = HeapNumber::kExponentBias + 0; - sub(scratch2, scratch2, Operand(zero_exponent - fudge_factor), SetCC); - // Dest already has a Smi zero. - b(lt, &done); - - // We have an exponent between 0 and 30 in scratch2. Subtract from 30 to - // get how much to shift down. - rsb(dest, scratch2, Operand(30)); - - bind(&right_exponent); - // Get the top bits of the mantissa. - and_(scratch2, scratch, Operand(HeapNumber::kMantissaMask)); - // Put back the implicit 1. - orr(scratch2, scratch2, Operand(1 << HeapNumber::kExponentShift)); - // Shift up the mantissa bits to take up the space the exponent used to - // take. We just orred in the implicit bit so that took care of one and - // we want to leave the sign bit 0 so we subtract 2 bits from the shift - // distance. - const int shift_distance = HeapNumber::kNonMantissaBitsInTopWord - 2; - mov(scratch2, Operand(scratch2, LSL, shift_distance)); - // Put sign in zero flag. - tst(scratch, Operand(HeapNumber::kSignMask)); - // Get the second half of the double. For some exponents we don't - // actually need this because the bits get shifted out again, but - // it's probably slower to test than just to do it. - ldr(scratch, FieldMemOperand(source, HeapNumber::kMantissaOffset)); - // Shift down 22 bits to get the last 10 bits. - orr(scratch, scratch2, Operand(scratch, LSR, 32 - shift_distance)); - // Move down according to the exponent. - mov(dest, Operand(scratch, LSR, dest)); - // Fix sign if sign bit was set. - rsb(dest, dest, Operand(0, RelocInfo::NONE), LeaveCC, ne); - bind(&done); - } +void MacroAssembler::TestDoubleIsInt32(DwVfpRegister double_input, + DwVfpRegister double_scratch) { + ASSERT(!double_input.is(double_scratch)); + ASSERT(CpuFeatures::IsSupported(VFP2)); + CpuFeatureScope scope(this, VFP2); + + vcvt_s32_f64(double_scratch.low(), double_input); + vcvt_f64_s32(double_scratch, double_scratch.low()); + VFPCompareAndSetFlags(double_input, double_scratch); } -void MacroAssembler::EmitVFPTruncate(VFPRoundingMode rounding_mode, - Register result, - DwVfpRegister double_input, - Register scratch, - DwVfpRegister double_scratch, - CheckForInexactConversion check_inexact) { - ASSERT(!result.is(scratch)); +void MacroAssembler::TryDoubleToInt32Exact(Register result, + DwVfpRegister double_input, + DwVfpRegister double_scratch) { ASSERT(!double_input.is(double_scratch)); - ASSERT(CpuFeatures::IsSupported(VFP2)); - CpuFeatures::Scope scope(VFP2); - Register prev_fpscr = result; - Label done; + CpuFeatureScope scope(this, VFP2); - // Test for values that can be exactly represented as a signed 32-bit integer. vcvt_s32_f64(double_scratch.low(), double_input); vmov(result, double_scratch.low()); vcvt_f64_s32(double_scratch, double_scratch.low()); VFPCompareAndSetFlags(double_input, double_scratch); - b(eq, &done); - - // Convert to integer, respecting rounding mode. - int32_t check_inexact_conversion = - (check_inexact == kCheckForInexactConversion) ? kVFPInexactExceptionBit : 0; - - // Set custom FPCSR: - // - Set rounding mode. - // - Clear vfp cumulative exception flags. - // - Make sure Flush-to-zero mode control bit is unset. - vmrs(prev_fpscr); - bic(scratch, - prev_fpscr, - Operand(kVFPExceptionMask | - check_inexact_conversion | - kVFPRoundingModeMask | - kVFPFlushToZeroMask)); - // 'Round To Nearest' is encoded by 0b00 so no bits need to be set. - if (rounding_mode != kRoundToNearest) { - orr(scratch, scratch, Operand(rounding_mode)); - } - vmsr(scratch); - - // Convert the argument to an integer. - vcvt_s32_f64(double_scratch.low(), - double_input, - (rounding_mode == kRoundToZero) ? kDefaultRoundToZero - : kFPSCRRounding); - - // Retrieve FPSCR. - vmrs(scratch); - // Restore FPSCR. - vmsr(prev_fpscr); - // Move the converted value into the result register. - vmov(result, double_scratch.low()); - // Check for vfp exceptions. - tst(scratch, Operand(kVFPExceptionMask | check_inexact_conversion)); - - bind(&done); } -void MacroAssembler::EmitOutOfInt32RangeTruncate(Register result, - Register input_high, - Register input_low, - Register scratch) { - Label done, normal_exponent, restore_sign; - - // Extract the biased exponent in result. - Ubfx(result, - input_high, - HeapNumber::kExponentShift, - HeapNumber::kExponentBits); +void MacroAssembler::TryInt32Floor(Register result, + DwVfpRegister double_input, + Register input_high, + DwVfpRegister double_scratch, + Label* done, + Label* exact) { + ASSERT(!result.is(input_high)); + ASSERT(!double_input.is(double_scratch)); + ASSERT(CpuFeatures::IsSupported(VFP2)); + CpuFeatureScope scope(this, VFP2); + Label negative, exception; + + // Test for NaN and infinities. + Sbfx(result, input_high, + HeapNumber::kExponentShift, HeapNumber::kExponentBits); + cmp(result, Operand(-1)); + b(eq, &exception); + // Test for values that can be exactly represented as a + // signed 32-bit integer. + TryDoubleToInt32Exact(result, double_input, double_scratch); + // If exact, return (result already fetched). + b(eq, exact); + cmp(input_high, Operand::Zero()); + b(mi, &negative); + + // Input is in ]+0, +inf[. + // If result equals 0x7fffffff input was out of range or + // in ]0x7fffffff, 0x80000000[. We ignore this last case which + // could fits into an int32, that means we always think input was + // out of range and always go to exception. + // If result < 0x7fffffff, go to done, result fetched. + cmn(result, Operand(1)); + b(mi, &exception); + b(done); + + // Input is in ]-inf, -0[. + // If x is a non integer negative number, + // floor(x) <=> round_to_zero(x) - 1. + bind(&negative); + sub(result, result, Operand(1), SetCC); + // If result is still negative, go to done, result fetched. + // Else, we had an overflow and we fall through exception. + b(mi, done); + bind(&exception); +} + + +void MacroAssembler::ECMAConvertNumberToInt32(Register source, + Register result, + Register scratch, + Register input_high, + Register input_low, + DwVfpRegister double_scratch1, + DwVfpRegister double_scratch2) { + if (CpuFeatures::IsSupported(VFP2)) { + CpuFeatureScope scope(this, VFP2); + vldr(double_scratch1, FieldMemOperand(source, HeapNumber::kValueOffset)); + ECMAToInt32VFP(result, double_scratch1, double_scratch2, + scratch, input_high, input_low); + } else { + Ldrd(input_low, input_high, + FieldMemOperand(source, HeapNumber::kValueOffset)); + ECMAToInt32NoVFP(result, scratch, input_high, input_low); + } +} - // Check for Infinity and NaNs, which should return 0. - cmp(result, Operand(HeapNumber::kExponentMask)); - mov(result, Operand(0), LeaveCC, eq); - b(eq, &done); - // Express exponent as delta to (number of mantissa bits + 31). - sub(result, - result, - Operand(HeapNumber::kExponentBias + HeapNumber::kMantissaBits + 31), - SetCC); +void MacroAssembler::ECMAToInt32VFP(Register result, + DwVfpRegister double_input, + DwVfpRegister double_scratch, + Register scratch, + Register input_high, + Register input_low) { + CpuFeatureScope scope(this, VFP2); + ASSERT(!input_high.is(result)); + ASSERT(!input_low.is(result)); + ASSERT(!input_low.is(input_high)); + ASSERT(!scratch.is(result) && + !scratch.is(input_high) && + !scratch.is(input_low)); + ASSERT(!double_input.is(double_scratch)); - // If the delta is strictly positive, all bits would be shifted away, - // which means that we can return 0. - b(le, &normal_exponent); - mov(result, Operand(0)); - b(&done); + Label out_of_range, negate, done; - bind(&normal_exponent); - const int kShiftBase = HeapNumber::kNonMantissaBitsInTopWord - 1; - // Calculate shift. - add(scratch, result, Operand(kShiftBase + HeapNumber::kMantissaBits), SetCC); + vcvt_s32_f64(double_scratch.low(), double_input); + vmov(result, double_scratch.low()); - // Save the sign. - Register sign = result; - result = no_reg; - and_(sign, input_high, Operand(HeapNumber::kSignMask)); + // If result is not saturated (0x7fffffff or 0x80000000), we are done. + sub(scratch, result, Operand(1)); + cmp(scratch, Operand(0x7ffffffe)); + b(lt, &done); + vmov(input_low, input_high, double_input); + Ubfx(scratch, input_high, + HeapNumber::kExponentShift, HeapNumber::kExponentBits); + // Load scratch with exponent - 1. This is faster than loading + // with exponent because Bias + 1 = 1024 which is an *ARM* immediate value. + sub(scratch, scratch, Operand(HeapNumber::kExponentBias + 1)); + // If exponent is greater than or equal to 84, the 32 less significant + // bits are 0s (2^84 = 1, 52 significant bits, 32 uncoded bits), + // the result is 0. + // Compare exponent with 84 (compare exponent - 1 with 83). + cmp(scratch, Operand(83)); + b(ge, &out_of_range); + + // If we reach this code, 31 <= exponent <= 83. + // So, we don't have to handle cases where 0 <= exponent <= 20 for + // which we would need to shift right the high part of the mantissa. + ECMAToInt32Tail(result, scratch, input_high, input_low, + &out_of_range, &negate, &done); +} + + +void MacroAssembler::ECMAToInt32NoVFP(Register result, + Register scratch, + Register input_high, + Register input_low) { + ASSERT(!result.is(scratch)); + ASSERT(!result.is(input_high)); + ASSERT(!result.is(input_low)); + ASSERT(!scratch.is(input_high)); + ASSERT(!scratch.is(input_low)); + ASSERT(!input_high.is(input_low)); + + Label both, out_of_range, negate, done; + + Ubfx(scratch, input_high, + HeapNumber::kExponentShift, HeapNumber::kExponentBits); + // Load scratch with exponent - 1. This is faster than loading + // with exponent because Bias + 1 = 1024 which is an *ARM* immediate value. + sub(scratch, scratch, Operand(HeapNumber::kExponentBias + 1)); + // If exponent is negative, 0 < input < 1, the result is 0. + // If exponent is greater than or equal to 84, the 32 less significant + // bits are 0s (2^84 = 1, 52 significant bits, 32 uncoded bits), + // the result is 0. + // This test also catch Nan and infinities which also return 0. + // Compare exponent with 84 (compare exponent - 1 with 83). + cmp(scratch, Operand(83)); + // We do an unsigned comparison so negative numbers are treated as big + // positive number and the two tests above are done in one test. + b(hs, &out_of_range); + + // Load scratch with 20 - exponent (load with 19 - (exponent - 1)). + rsb(scratch, scratch, Operand(19), SetCC); + b(mi, &both); + + // 0 <= exponent <= 20, shift only input_high. + // Scratch contains: 20 - exponent. + Ubfx(result, input_high, + 0, HeapNumber::kMantissaBitsInTopWord); // Set the implicit 1 before the mantissa part in input_high. - orr(input_high, - input_high, - Operand(1 << HeapNumber::kMantissaBitsInTopWord)); - // Shift the mantissa bits to the correct position. - // We don't need to clear non-mantissa bits as they will be shifted away. - // If they weren't, it would mean that the answer is in the 32bit range. - mov(input_high, Operand(input_high, LSL, scratch)); - - // Replace the shifted bits with bits from the lower mantissa word. - Label pos_shift, shift_done; - rsb(scratch, scratch, Operand(32), SetCC); - b(&pos_shift, ge); - - // Negate scratch. - rsb(scratch, scratch, Operand(0)); - mov(input_low, Operand(input_low, LSL, scratch)); - b(&shift_done); - - bind(&pos_shift); - mov(input_low, Operand(input_low, LSR, scratch)); + orr(result, result, Operand(1 << HeapNumber::kMantissaBitsInTopWord)); + mov(result, Operand(result, LSR, scratch)); + b(&negate); - bind(&shift_done); - orr(input_high, input_high, Operand(input_low)); - // Restore sign if necessary. - cmp(sign, Operand(0)); - result = sign; - sign = no_reg; - rsb(result, input_high, Operand(0), LeaveCC, ne); - mov(result, input_high, LeaveCC, eq); - bind(&done); + bind(&both); + // Restore scratch to exponent - 1 to be consistent with ECMAToInt32VFP. + rsb(scratch, scratch, Operand(19)); + ECMAToInt32Tail(result, scratch, input_high, input_low, + &out_of_range, &negate, &done); } -void MacroAssembler::EmitECMATruncate(Register result, - DwVfpRegister double_input, - SwVfpRegister single_scratch, - Register scratch, - Register input_high, - Register input_low) { - CpuFeatures::Scope scope(VFP2); - ASSERT(!input_high.is(result)); - ASSERT(!input_low.is(result)); - ASSERT(!input_low.is(input_high)); - ASSERT(!scratch.is(result) && - !scratch.is(input_high) && - !scratch.is(input_low)); - ASSERT(!single_scratch.is(double_input.low()) && - !single_scratch.is(double_input.high())); +void MacroAssembler::ECMAToInt32Tail(Register result, + Register scratch, + Register input_high, + Register input_low, + Label* out_of_range, + Label* negate, + Label* done) { + Label only_low; + + // On entry, scratch contains exponent - 1. + // Load scratch with 52 - exponent (load with 51 - (exponent - 1)). + rsb(scratch, scratch, Operand(51), SetCC); + b(ls, &only_low); + // 21 <= exponent <= 51, shift input_low and input_high + // to generate the result. + mov(input_low, Operand(input_low, LSR, scratch)); + // Scratch contains: 52 - exponent. + // We needs: exponent - 20. + // So we use: 32 - scratch = 32 - 52 + exponent = exponent - 20. + rsb(scratch, scratch, Operand(32)); + Ubfx(result, input_high, + 0, HeapNumber::kMantissaBitsInTopWord); + // Set the implicit 1 before the mantissa part in input_high. + orr(result, result, Operand(1 << HeapNumber::kMantissaBitsInTopWord)); + orr(result, input_low, Operand(result, LSL, scratch)); + b(negate); - Label done; + bind(out_of_range); + mov(result, Operand::Zero()); + b(done); - // Clear cumulative exception flags. - ClearFPSCRBits(kVFPExceptionMask, scratch); - // Try a conversion to a signed integer. - vcvt_s32_f64(single_scratch, double_input); - vmov(result, single_scratch); - // Retrieve he FPSCR. - vmrs(scratch); - // Check for overflow and NaNs. - tst(scratch, Operand(kVFPOverflowExceptionBit | - kVFPUnderflowExceptionBit | - kVFPInvalidOpExceptionBit)); - // If we had no exceptions we are done. - b(eq, &done); + bind(&only_low); + // 52 <= exponent <= 83, shift only input_low. + // On entry, scratch contains: 52 - exponent. + rsb(scratch, scratch, Operand::Zero()); + mov(result, Operand(input_low, LSL, scratch)); - // Load the double value and perform a manual truncation. - vmov(input_low, input_high, double_input); - EmitOutOfInt32RangeTruncate(result, - input_high, - input_low, - scratch); - bind(&done); + bind(negate); + // If input was positive, input_high ASR 31 equals 0 and + // input_high LSR 31 equals zero. + // New result = (result eor 0) + 0 = result. + // If the input was negative, we have to negate the result. + // Input_high ASR 31 equals 0xffffffff and input_high LSR 31 equals 1. + // New result = (result eor 0xffffffff) + 1 = 0 - result. + eor(result, result, Operand(input_high, ASR, 31)); + add(result, result, Operand(input_high, LSR, 31)); + + bind(done); } @@ -2687,7 +2715,10 @@ void MacroAssembler::CallRuntimeSaveDoubles(Runtime::FunctionId id) { const Runtime::Function* function = Runtime::FunctionForId(id); mov(r0, Operand(function->nargs)); mov(r1, Operand(ExternalReference(function, isolate()))); - CEntryStub stub(1, kSaveFPRegs); + SaveFPRegsMode mode = CpuFeatures::IsSupported(VFP2) + ? kSaveFPRegs + : kDontSaveFPRegs; + CEntryStub stub(1, mode); CallStub(&stub); } @@ -2730,7 +2761,7 @@ void MacroAssembler::JumpToExternalReference(const ExternalReference& builtin) { #endif mov(r1, Operand(builtin)); CEntryStub stub(1); - Jump(stub.GetCode(), RelocInfo::CODE_TARGET); + Jump(stub.GetCode(isolate()), RelocInfo::CODE_TARGET); } @@ -2982,6 +3013,19 @@ void MacroAssembler::LoadGlobalFunction(int index, Register function) { } +void MacroAssembler::LoadArrayFunction(Register function) { + // Load the global or builtins object from the current context. + ldr(function, + MemOperand(cp, Context::SlotOffset(Context::GLOBAL_OBJECT_INDEX))); + // Load the global context from the global or builtins object. + ldr(function, + FieldMemOperand(function, GlobalObject::kGlobalContextOffset)); + // Load the array function from the native context. + ldr(function, + MemOperand(function, Context::SlotOffset(Context::ARRAY_FUNCTION_INDEX))); +} + + void MacroAssembler::LoadGlobalFunctionInitialMap(Register function, Register map, Register scratch) { @@ -3089,6 +3133,20 @@ void MacroAssembler::AssertString(Register object) { } +void MacroAssembler::AssertName(Register object) { + if (emit_debug_code()) { + STATIC_ASSERT(kSmiTag == 0); + tst(object, Operand(kSmiTagMask)); + Check(ne, "Operand is a smi and not a name"); + push(object); + ldr(object, FieldMemOperand(object, HeapObject::kMapOffset)); + CompareInstanceType(object, object, LAST_NAME_TYPE); + pop(object); + Check(le, "Operand is not a name"); + } +} + + void MacroAssembler::AssertRootValue(Register src, Heap::RootListIndex root_value_index, @@ -3158,13 +3216,8 @@ void MacroAssembler::AllocateHeapNumber(Register result, TaggingMode tagging_mode) { // Allocate an object in the heap for the heap number and tag it as a heap // object. - AllocateInNewSpace(HeapNumber::kSize, - result, - scratch1, - scratch2, - gc_required, - tagging_mode == TAG_RESULT ? TAG_OBJECT : - NO_ALLOCATION_FLAGS); + Allocate(HeapNumber::kSize, result, scratch1, scratch2, gc_required, + tagging_mode == TAG_RESULT ? TAG_OBJECT : NO_ALLOCATION_FLAGS); // Store heap number map in the allocated object. AssertRegisterIsRoot(heap_number_map, Heap::kHeapNumberMapRootIndex); @@ -3224,7 +3277,7 @@ void MacroAssembler::CopyBytes(Register src, // Align src before copying in word size chunks. bind(&align_loop); - cmp(length, Operand(0)); + cmp(length, Operand::Zero()); b(eq, &done); bind(&align_loop_1); tst(src, Operand(kPointerSize - 1)); @@ -3259,7 +3312,7 @@ void MacroAssembler::CopyBytes(Register src, // Copy the last bytes if any left. bind(&byte_loop); - cmp(length, Operand(0)); + cmp(length, Operand::Zero()); b(eq, &done); bind(&byte_loop_1); ldrb(scratch, MemOperand(src, 1, PostIndex)); @@ -3297,7 +3350,7 @@ void MacroAssembler::CountLeadingZeros(Register zeros, // Answer. // Order of the next two lines is important: zeros register // can be the same as source register. Move(scratch, source); - mov(zeros, Operand(0, RelocInfo::NONE)); + mov(zeros, Operand::Zero()); // Top 16. tst(scratch, Operand(0xffff0000)); add(zeros, zeros, Operand(16), LeaveCC, eq); @@ -3321,6 +3374,13 @@ void MacroAssembler::CountLeadingZeros(Register zeros, // Answer. } +void MacroAssembler::CheckFor32DRegs(Register scratch) { + mov(scratch, Operand(ExternalReference::cpu_features())); + ldr(scratch, MemOperand(scratch)); + tst(scratch, Operand(1u << VFP32DREGS)); +} + + void MacroAssembler::JumpIfBothInstanceTypesAreNotSequentialAscii( Register first, Register second, @@ -3359,9 +3419,9 @@ int MacroAssembler::CalculateStackPassedWords(int num_reg_arguments, if (use_eabi_hardfloat()) { // In the hard floating point calling convention, we can use // all double registers to pass doubles. - if (num_double_arguments > DoubleRegister::kNumRegisters) { + if (num_double_arguments > DoubleRegister::NumRegisters()) { stack_passed_words += - 2 * (num_double_arguments - DoubleRegister::kNumRegisters); + 2 * (num_double_arguments - DoubleRegister::NumRegisters()); } } else { // In the soft floating point calling convention, every double @@ -3402,7 +3462,7 @@ void MacroAssembler::PrepareCallCFunction(int num_reg_arguments, } -void MacroAssembler::SetCallCDoubleArguments(DoubleRegister dreg) { +void MacroAssembler::SetCallCDoubleArguments(DwVfpRegister dreg) { ASSERT(CpuFeatures::IsSupported(VFP2)); if (use_eabi_hardfloat()) { Move(d0, dreg); @@ -3412,8 +3472,8 @@ void MacroAssembler::SetCallCDoubleArguments(DoubleRegister dreg) { } -void MacroAssembler::SetCallCDoubleArguments(DoubleRegister dreg1, - DoubleRegister dreg2) { +void MacroAssembler::SetCallCDoubleArguments(DwVfpRegister dreg1, + DwVfpRegister dreg2) { ASSERT(CpuFeatures::IsSupported(VFP2)); if (use_eabi_hardfloat()) { if (dreg2.is(d0)) { @@ -3431,7 +3491,7 @@ void MacroAssembler::SetCallCDoubleArguments(DoubleRegister dreg1, } -void MacroAssembler::SetCallCDoubleArguments(DoubleRegister dreg, +void MacroAssembler::SetCallCDoubleArguments(DwVfpRegister dreg, Register reg) { ASSERT(CpuFeatures::IsSupported(VFP2)); if (use_eabi_hardfloat()) { @@ -3684,7 +3744,7 @@ void MacroAssembler::EnsureNotWhite( // For ASCII (char-size of 1) we shift the smi tag away to get the length. // For UC16 (char-size of 2) we just leave the smi tag in place, thereby // getting the length multiplied by 2. - ASSERT(kAsciiStringTag == 4 && kStringEncodingMask == 4); + ASSERT(kOneByteStringTag == 4 && kStringEncodingMask == 4); ASSERT(kSmiTag == 0 && kSmiTagSize == 1); ldr(ip, FieldMemOperand(value, String::kLengthOffset)); tst(instance_type, Operand(kStringEncodingMask)); @@ -3714,8 +3774,8 @@ void MacroAssembler::ClampUint8(Register output_reg, Register input_reg) { void MacroAssembler::ClampDoubleToUint8(Register result_reg, - DoubleRegister input_reg, - DoubleRegister temp_double_reg) { + DwVfpRegister input_reg, + DwVfpRegister temp_double_reg) { Label above_zero; Label done; Label in_bounds; @@ -3725,7 +3785,7 @@ void MacroAssembler::ClampDoubleToUint8(Register result_reg, b(gt, &above_zero); // Double value is less than zero, NaN or Inf, return 0. - mov(result_reg, Operand(0)); + mov(result_reg, Operand::Zero()); b(al, &done); // Double value is >= 255, return 255. @@ -3808,6 +3868,29 @@ void MacroAssembler::CheckEnumCache(Register null_value, Label* call_runtime) { } +void MacroAssembler::TestJSArrayForAllocationSiteInfo( + Register receiver_reg, + Register scratch_reg) { + Label no_info_available; + ExternalReference new_space_start = + ExternalReference::new_space_start(isolate()); + ExternalReference new_space_allocation_top = + ExternalReference::new_space_allocation_top_address(isolate()); + add(scratch_reg, receiver_reg, + Operand(JSArray::kSize + AllocationSiteInfo::kSize - kHeapObjectTag)); + cmp(scratch_reg, Operand(new_space_start)); + b(lt, &no_info_available); + mov(ip, Operand(new_space_allocation_top)); + ldr(ip, MemOperand(ip)); + cmp(scratch_reg, ip); + b(gt, &no_info_available); + ldr(scratch_reg, MemOperand(scratch_reg, -AllocationSiteInfo::kSize)); + cmp(scratch_reg, + Operand(Handle<Map>(isolate()->heap()->allocation_site_info_map()))); + bind(&no_info_available); +} + + #ifdef DEBUG bool AreAliased(Register reg1, Register reg2, @@ -3834,7 +3917,6 @@ bool AreAliased(Register reg1, CodePatcher::CodePatcher(byte* address, int instructions) : address_(address), - instructions_(instructions), size_(instructions * Assembler::kInstrSize), masm_(NULL, address, size_ + Assembler::kGap) { // Create a new macro assembler pointing to the address of the code to patch. diff --git a/deps/v8/src/arm/macro-assembler-arm.h b/deps/v8/src/arm/macro-assembler-arm.h index e3e39a3879..5cbe995d76 100644 --- a/deps/v8/src/arm/macro-assembler-arm.h +++ b/deps/v8/src/arm/macro-assembler-arm.h @@ -54,20 +54,6 @@ inline Operand SmiUntagOperand(Register object) { const Register cp = { 8 }; // JavaScript context pointer const Register kRootRegister = { 10 }; // Roots array pointer. -// Flags used for the AllocateInNewSpace functions. -enum AllocationFlags { - // No special flags. - NO_ALLOCATION_FLAGS = 0, - // Return the pointer to the allocated already tagged as a heap object. - TAG_OBJECT = 1 << 0, - // The content of the result register already contains the allocation top in - // new space. - RESULT_CONTAINS_TOP = 1 << 1, - // Specify that the requested size of the space to allocate is specified in - // words instead of bytes. - SIZE_IN_WORDS = 1 << 2 -}; - // Flags used for AllocateHeapNumber enum TaggingMode { // Tag the result. @@ -178,7 +164,7 @@ class MacroAssembler: public Assembler { // Register move. May do nothing if the registers are identical. void Move(Register dst, Handle<Object> value); void Move(Register dst, Register src, Condition cond = al); - void Move(DoubleRegister dst, DoubleRegister src); + void Move(DwVfpRegister dst, DwVfpRegister src); // Load an object from the root table. void LoadRoot(Register destination, @@ -322,6 +308,7 @@ class MacroAssembler: public Assembler { // Push a handle. void Push(Handle<Object> handle); + void Push(Smi* smi) { Push(Handle<Smi>(smi, isolate())); } // Push two registers. Pushes leftmost register first (to highest address). void Push(Register src1, Register src2, Condition cond = al) { @@ -473,11 +460,6 @@ class MacroAssembler: public Assembler { const MemOperand& dst, Condition cond = al); - // Clear specified FPSCR bits. - void ClearFPSCRBits(const uint32_t bits_to_clear, - const Register scratch, - const Condition cond = al); - // Compare double values and move the result to the normal condition flags. void VFPCompareAndSetFlags(const DwVfpRegister src1, const DwVfpRegister src2, @@ -498,8 +480,7 @@ class MacroAssembler: public Assembler { void Vmov(const DwVfpRegister dst, const double imm, - const Register scratch = no_reg, - const Condition cond = al); + const Register scratch = no_reg); // Enter exit frame. // stack_space - extra stack space, used for alignment before call to C. @@ -533,6 +514,7 @@ class MacroAssembler: public Assembler { bool can_have_holes); void LoadGlobalFunction(int index, Register function); + void LoadArrayFunction(Register function); // Load the initial map from the global function. The registers // function and map can be the same, function is then overwritten. @@ -596,6 +578,10 @@ class MacroAssembler: public Assembler { Register scratch, Label* fail); + void IsObjectNameType(Register object, + Register scratch, + Label* fail); + #ifdef ENABLE_DEBUGGER_SUPPORT // --------------------------------------------------------------------------- // Debugger Support @@ -679,19 +665,20 @@ class MacroAssembler: public Assembler { // --------------------------------------------------------------------------- // Allocation support - // Allocate an object in new space. The object_size is specified - // either in bytes or in words if the allocation flag SIZE_IN_WORDS - // is passed. If the new space is exhausted control continues at the - // gc_required label. The allocated object is returned in result. If - // the flag tag_allocated_object is true the result is tagged as as - // a heap object. All registers are clobbered also when control - // continues at the gc_required label. - void AllocateInNewSpace(int object_size, - Register result, - Register scratch1, - Register scratch2, - Label* gc_required, - AllocationFlags flags); + // Allocate an object in new space or old pointer space. The object_size is + // specified either in bytes or in words if the allocation flag SIZE_IN_WORDS + // is passed. If the space is exhausted control continues at the gc_required + // label. The allocated object is returned in result. If the flag + // tag_allocated_object is true the result is tagged as as a heap object. + // All registers are clobbered also when control continues at the gc_required + // label. + void Allocate(int object_size, + Register result, + Register scratch1, + Register scratch2, + Label* gc_required, + AllocationFlags flags); + void AllocateInNewSpace(Register object_size, Register result, Register scratch1, @@ -831,14 +818,14 @@ class MacroAssembler: public Assembler { // case scratch2, scratch3 and scratch4 are unmodified. void StoreNumberToDoubleElements(Register value_reg, Register key_reg, - Register receiver_reg, // All regs below here overwritten. Register elements_reg, Register scratch1, Register scratch2, Register scratch3, Register scratch4, - Label* fail); + Label* fail, + int elements_offset = 0); // Compare an object's map with the specified map and its transitioned // elements maps if mode is ALLOW_ELEMENT_TRANSITION_MAPS. Condition flags are @@ -893,12 +880,15 @@ class MacroAssembler: public Assembler { // Load and check the instance type of an object for being a string. // Loads the type into the second argument register. - // Returns a condition that will be enabled if the object was a string. + // Returns a condition that will be enabled if the object was a string + // and the passed-in condition passed. If the passed-in condition failed + // then flags remain unchanged. Condition IsObjectStringType(Register obj, - Register type) { - ldr(type, FieldMemOperand(obj, HeapObject::kMapOffset)); - ldrb(type, FieldMemOperand(type, Map::kInstanceTypeOffset)); - tst(type, Operand(kIsNotStringMask)); + Register type, + Condition cond = al) { + ldr(type, FieldMemOperand(obj, HeapObject::kMapOffset), cond); + ldrb(type, FieldMemOperand(type, Map::kInstanceTypeOffset), cond); + tst(type, Operand(kIsNotStringMask), cond); ASSERT_EQ(0, kStringTag); return eq; } @@ -944,51 +934,55 @@ class MacroAssembler: public Assembler { Register scratch1, SwVfpRegister scratch2); - // Convert the HeapNumber pointed to by source to a 32bits signed integer - // dest. If the HeapNumber does not fit into a 32bits signed integer branch - // to not_int32 label. If VFP3 is available double_scratch is used but not - // scratch2. - void ConvertToInt32(Register source, - Register dest, - Register scratch, - Register scratch2, - DwVfpRegister double_scratch, - Label *not_int32); - - // Truncates a double using a specific rounding mode, and writes the value - // to the result register. - // Clears the z flag (ne condition) if an overflow occurs. - // If kCheckForInexactConversion is passed, the z flag is also cleared if the - // conversion was inexact, i.e. if the double value could not be converted - // exactly to a 32-bit integer. - void EmitVFPTruncate(VFPRoundingMode rounding_mode, - Register result, - DwVfpRegister double_input, - Register scratch, - DwVfpRegister double_scratch, - CheckForInexactConversion check - = kDontCheckForInexactConversion); - - // Helper for EmitECMATruncate. - // This will truncate a floating-point value outside of the signed 32bit - // integer range to a 32bit signed integer. - // Expects the double value loaded in input_high and input_low. - // Exits with the answer in 'result'. - // Note that this code does not work for values in the 32bit range! - void EmitOutOfInt32RangeTruncate(Register result, - Register input_high, - Register input_low, - Register scratch); + // Check if a double can be exactly represented as a signed 32-bit integer. + // Z flag set to one if true. + void TestDoubleIsInt32(DwVfpRegister double_input, + DwVfpRegister double_scratch); + + // Try to convert a double to a signed 32-bit integer. + // Z flag set to one and result assigned if the conversion is exact. + void TryDoubleToInt32Exact(Register result, + DwVfpRegister double_input, + DwVfpRegister double_scratch); + + // Floor a double and writes the value to the result register. + // Go to exact if the conversion is exact (to be able to test -0), + // fall through calling code if an overflow occurred, else go to done. + void TryInt32Floor(Register result, + DwVfpRegister double_input, + Register input_high, + DwVfpRegister double_scratch, + Label* done, + Label* exact); + + // Performs a truncating conversion of a heap floating point number as used by + // the JS bitwise operations. See ECMA-262 9.5: ToInt32. + // Exits with 'result' holding the answer. + void ECMAConvertNumberToInt32(Register source, + Register result, + Register scratch, + Register input_high, + Register input_low, + DwVfpRegister double_scratch1, + DwVfpRegister double_scratch2); // Performs a truncating conversion of a floating point number as used by // the JS bitwise operations. See ECMA-262 9.5: ToInt32. // Exits with 'result' holding the answer and all other registers clobbered. - void EmitECMATruncate(Register result, - DwVfpRegister double_input, - SwVfpRegister single_scratch, + void ECMAToInt32VFP(Register result, + DwVfpRegister double_input, + DwVfpRegister double_scratch, + Register scratch, + Register input_high, + Register input_low); + + // Performs a truncating conversion of a floating point number as used by + // the JS bitwise operations. See ECMA-262 9.5: ToInt32. + // Exits with 'result' holding the answer. + void ECMAToInt32NoVFP(Register result, Register scratch, - Register scratch2, - Register scratch3); + Register input_high, + Register input_low); // Count leading zeros in a 32 bit word. On ARM5 and later it uses the clz // instruction. On pre-ARM5 hardware this routine gives the wrong answer @@ -999,11 +993,18 @@ class MacroAssembler: public Assembler { Register source, Register scratch); + // Check whether d16-d31 are available on the CPU. The result is given by the + // Z condition flag: Z==0 if d16-d31 available, Z==1 otherwise. + void CheckFor32DRegs(Register scratch); + + // --------------------------------------------------------------------------- // Runtime calls // Call a code stub. - void CallStub(CodeStub* stub, Condition cond = al); + void CallStub(CodeStub* stub, + TypeFeedbackId ast_id = TypeFeedbackId::None(), + Condition cond = al); // Call a code stub. void TailCallStub(CodeStub* stub, Condition cond = al); @@ -1054,9 +1055,9 @@ class MacroAssembler: public Assembler { // whether soft or hard floating point ABI is used. These functions // abstract parameter passing for the three different ways we call // C functions from generated code. - void SetCallCDoubleArguments(DoubleRegister dreg); - void SetCallCDoubleArguments(DoubleRegister dreg1, DoubleRegister dreg2); - void SetCallCDoubleArguments(DoubleRegister dreg, Register reg); + void SetCallCDoubleArguments(DwVfpRegister dreg); + void SetCallCDoubleArguments(DwVfpRegister dreg1, DwVfpRegister dreg2); + void SetCallCDoubleArguments(DwVfpRegister dreg, Register reg); // Calls a C function and cleans up the space for arguments allocated // by PrepareCallCFunction. The called function is not allowed to trigger a @@ -1072,7 +1073,7 @@ class MacroAssembler: public Assembler { int num_reg_arguments, int num_double_arguments); - void GetCFunctionDoubleResult(const DoubleRegister dst); + void GetCFunctionDoubleResult(const DwVfpRegister dst); // Calls an API function. Allocates HandleScope, extracts returned value // from handle and propagates exceptions. Restores context. stack_space @@ -1202,7 +1203,7 @@ class MacroAssembler: public Assembler { // Souce and destination can be the same register. void UntagAndJumpIfNotSmi(Register dst, Register src, Label* non_smi_case); - // Jump the register contains a smi. + // Jump if the register contains a smi. inline void JumpIfSmi(Register value, Label* smi_label) { tst(value, Operand(kSmiTagMask)); b(eq, smi_label); @@ -1221,9 +1222,12 @@ class MacroAssembler: public Assembler { void AssertNotSmi(Register object); void AssertSmi(Register object); - // Abort execution if argument is a string, enabled via --debug-code. + // Abort execution if argument is not a string, enabled via --debug-code. void AssertString(Register object); + // Abort execution if argument is not a name, enabled via --debug-code. + void AssertName(Register object); + // Abort execution if argument is not the root value with the given index, // enabled via --debug-code. void AssertRootValue(Register src, @@ -1285,8 +1289,8 @@ class MacroAssembler: public Assembler { void ClampUint8(Register output_reg, Register input_reg); void ClampDoubleToUint8(Register result_reg, - DoubleRegister input_reg, - DoubleRegister temp_double_reg); + DwVfpRegister input_reg, + DwVfpRegister temp_double_reg); void LoadInstanceDescriptors(Register map, Register descriptors); @@ -1309,6 +1313,15 @@ class MacroAssembler: public Assembler { // in r0. Assumes that any other register can be used as a scratch. void CheckEnumCache(Register null_value, Label* call_runtime); + // AllocationSiteInfo support. Arrays may have an associated + // AllocationSiteInfo object that can be checked for in order to pretransition + // to another type. + // On entry, receiver_reg should point to the array object. + // scratch_reg gets clobbered. + // If allocation info is present, condition flags are set to eq + void TestJSArrayForAllocationSiteInfo(Register receiver_reg, + Register scratch_reg); + private: void CallCFunctionHelper(Register function, int num_reg_arguments, @@ -1350,6 +1363,16 @@ class MacroAssembler: public Assembler { // it. See the implementation for register usage. void JumpToHandlerEntry(); + // Helper for ECMAToInt32VFP and ECMAToInt32NoVFP. + // It is expected that 31 <= exponent <= 83, and scratch is exponent - 1. + void ECMAToInt32Tail(Register result, + Register scratch, + Register input_high, + Register input_low, + Label* out_of_range, + Label* negate, + Label* done); + // Compute memory operands for safepoint stack slots. static int SafepointRegisterStackIndex(int reg_code); MemOperand SafepointRegisterSlot(Register reg); @@ -1361,9 +1384,9 @@ class MacroAssembler: public Assembler { // This handle will be patched with the code object on installation. Handle<Object> code_object_; - // Needs access to SafepointRegisterStackIndex for optimized frame + // Needs access to SafepointRegisterStackIndex for compiled frame // traversal. - friend class OptimizedFrame; + friend class StandardFrame; }; @@ -1392,7 +1415,6 @@ class CodePatcher { private: byte* address_; // The address of the code being patched. - int instructions_; // Number of instructions of the expected patch size. int size_; // Number of bytes of the expected patch size. MacroAssembler masm_; // Macro assembler used to generate the code. }; diff --git a/deps/v8/src/arm/regexp-macro-assembler-arm.cc b/deps/v8/src/arm/regexp-macro-assembler-arm.cc index 17b8677842..0cb80c0ac2 100644 --- a/deps/v8/src/arm/regexp-macro-assembler-arm.cc +++ b/deps/v8/src/arm/regexp-macro-assembler-arm.cc @@ -204,7 +204,7 @@ void RegExpMacroAssemblerARM::CheckAtStart(Label* on_at_start) { Label not_at_start; // Did we start the match at the start of the string at all? __ ldr(r0, MemOperand(frame_pointer(), kStartIndex)); - __ cmp(r0, Operand(0, RelocInfo::NONE)); + __ cmp(r0, Operand::Zero()); BranchOrBacktrack(ne, ¬_at_start); // If we did, are we still at the start of the input? @@ -219,7 +219,7 @@ void RegExpMacroAssemblerARM::CheckAtStart(Label* on_at_start) { void RegExpMacroAssemblerARM::CheckNotAtStart(Label* on_not_at_start) { // Did we start the match at the start of the string at all? __ ldr(r0, MemOperand(frame_pointer(), kStartIndex)); - __ cmp(r0, Operand(0, RelocInfo::NONE)); + __ cmp(r0, Operand::Zero()); BranchOrBacktrack(ne, on_not_at_start); // If we did, are we still at the start of the input? __ ldr(r1, MemOperand(frame_pointer(), kInputStart)); @@ -261,7 +261,7 @@ void RegExpMacroAssemblerARM::CheckCharacters(Vector<const uc16> str, for (int i = 0; i < str.length(); i++) { if (mode_ == ASCII) { __ ldrb(r1, MemOperand(r0, char_size(), PostIndex)); - ASSERT(str[i] <= String::kMaxAsciiCharCode); + ASSERT(str[i] <= String::kMaxOneByteCharCode); __ cmp(r1, Operand(str[i])); } else { __ ldrh(r1, MemOperand(r0, char_size(), PostIndex)); @@ -337,8 +337,13 @@ void RegExpMacroAssemblerARM::CheckNotBackReferenceIgnoreCase( __ b(ne, &fail); __ sub(r3, r3, Operand('a')); __ cmp(r3, Operand('z' - 'a')); // Is r3 a lowercase letter? - __ b(hi, &fail); - + __ b(ls, &loop_check); // In range 'a'-'z'. + // Latin-1: Check for values in range [224,254] but not 247. + __ sub(r3, r3, Operand(224 - 'a')); + __ cmp(r3, Operand(254 - 224)); + __ b(hi, &fail); // Weren't Latin-1 letters. + __ cmp(r3, Operand(247 - 224)); // Check for 247. + __ b(eq, &fail); __ bind(&loop_check); __ cmp(r0, r1); @@ -385,7 +390,7 @@ void RegExpMacroAssemblerARM::CheckNotBackReferenceIgnoreCase( } // Check if function returned non-zero for success or zero for failure. - __ cmp(r0, Operand(0, RelocInfo::NONE)); + __ cmp(r0, Operand::Zero()); BranchOrBacktrack(eq, on_no_match); // On success, increment position by length of capture. __ add(current_input_offset(), current_input_offset(), Operand(r4)); @@ -508,7 +513,7 @@ void RegExpMacroAssemblerARM::CheckBitInTable( Handle<ByteArray> table, Label* on_bit_set) { __ mov(r0, Operand(table)); - if (mode_ != ASCII || kTableMask != String::kMaxAsciiCharCode) { + if (mode_ != ASCII || kTableMask != String::kMaxOneByteCharCode) { __ and_(r1, current_character(), Operand(kTableSize - 1)); __ add(r1, r1, Operand(ByteArray::kHeaderSize - kHeapObjectTag)); } else { @@ -517,7 +522,7 @@ void RegExpMacroAssemblerARM::CheckBitInTable( Operand(ByteArray::kHeaderSize - kHeapObjectTag)); } __ ldrb(r0, MemOperand(r0, r1)); - __ cmp(r0, Operand(0)); + __ cmp(r0, Operand::Zero()); BranchOrBacktrack(ne, on_bit_set); } @@ -530,29 +535,23 @@ bool RegExpMacroAssemblerARM::CheckSpecialCharacterClass(uc16 type, case 's': // Match space-characters if (mode_ == ASCII) { - // ASCII space characters are '\t'..'\r' and ' '. + // One byte space characters are '\t'..'\r', ' ' and \u00a0. Label success; __ cmp(current_character(), Operand(' ')); __ b(eq, &success); // Check range 0x09..0x0d __ sub(r0, current_character(), Operand('\t')); __ cmp(r0, Operand('\r' - '\t')); - BranchOrBacktrack(hi, on_no_match); + __ b(ls, &success); + // \u00a0 (NBSP). + __ cmp(r0, Operand(0x00a0 - '\t')); + BranchOrBacktrack(ne, on_no_match); __ bind(&success); return true; } return false; case 'S': - // Match non-space characters. - if (mode_ == ASCII) { - // ASCII space characters are '\t'..'\r' and ' '. - __ cmp(current_character(), Operand(' ')); - BranchOrBacktrack(eq, on_no_match); - __ sub(r0, current_character(), Operand('\t')); - __ cmp(r0, Operand('\r' - '\t')); - BranchOrBacktrack(ls, on_no_match); - return true; - } + // The emitted code for generic character classes is good enough. return false; case 'd': // Match ASCII digits ('0'..'9') @@ -613,7 +612,7 @@ bool RegExpMacroAssemblerARM::CheckSpecialCharacterClass(uc16 type, ExternalReference map = ExternalReference::re_word_character_map(); __ mov(r0, Operand(map)); __ ldrb(r0, MemOperand(r0, current_character())); - __ cmp(r0, Operand(0)); + __ cmp(r0, Operand::Zero()); BranchOrBacktrack(eq, on_no_match); return true; } @@ -627,7 +626,7 @@ bool RegExpMacroAssemblerARM::CheckSpecialCharacterClass(uc16 type, ExternalReference map = ExternalReference::re_word_character_map(); __ mov(r0, Operand(map)); __ ldrb(r0, MemOperand(r0, current_character())); - __ cmp(r0, Operand(0)); + __ cmp(r0, Operand::Zero()); BranchOrBacktrack(ne, on_no_match); if (mode_ != ASCII) { __ bind(&done); @@ -675,7 +674,7 @@ Handle<HeapObject> RegExpMacroAssemblerARM::GetCode(Handle<String> source) { // Set frame pointer in space for it if this is not a direct call // from generated code. __ add(frame_pointer(), sp, Operand(4 * kPointerSize)); - __ mov(r0, Operand(0, RelocInfo::NONE)); + __ mov(r0, Operand::Zero()); __ push(r0); // Make room for success counter and initialize it to 0. __ push(r0); // Make room for "position - 1" constant (value is irrelevant). // Check if we have space on the stack for registers. @@ -700,7 +699,7 @@ Handle<HeapObject> RegExpMacroAssemblerARM::GetCode(Handle<String> source) { __ bind(&stack_limit_hit); CallCheckStackGuardState(r0); - __ cmp(r0, Operand(0, RelocInfo::NONE)); + __ cmp(r0, Operand::Zero()); // If returned value is non-zero, we exit with the returned value as result. __ b(ne, &return_r0); @@ -728,7 +727,7 @@ Handle<HeapObject> RegExpMacroAssemblerARM::GetCode(Handle<String> source) { Label load_char_start_regexp, start_regexp; // Load newline if index is at start, previous character otherwise. - __ cmp(r1, Operand(0, RelocInfo::NONE)); + __ cmp(r1, Operand::Zero()); __ b(ne, &load_char_start_regexp); __ mov(current_character(), Operand('\n'), LeaveCC, eq); __ jmp(&start_regexp); @@ -834,7 +833,7 @@ Handle<HeapObject> RegExpMacroAssemblerARM::GetCode(Handle<String> source) { // Not a zero-length match, restart. __ b(ne, &load_char_start_regexp); // Offset from the end is zero if we already reached the end. - __ cmp(current_input_offset(), Operand(0)); + __ cmp(current_input_offset(), Operand::Zero()); __ b(eq, &exit_label_); // Advance current position after a zero-length match. __ add(current_input_offset(), @@ -873,7 +872,7 @@ Handle<HeapObject> RegExpMacroAssemblerARM::GetCode(Handle<String> source) { SafeCallTarget(&check_preempt_label_); CallCheckStackGuardState(r0); - __ cmp(r0, Operand(0, RelocInfo::NONE)); + __ cmp(r0, Operand::Zero()); // If returning non-zero, we should end execution with the given // result as return value. __ b(ne, &return_r0); @@ -900,7 +899,7 @@ Handle<HeapObject> RegExpMacroAssemblerARM::GetCode(Handle<String> source) { __ CallCFunction(grow_stack, num_arguments); // If return NULL, we have failed to grow the stack, and // must exit with a stack-overflow exception. - __ cmp(r0, Operand(0, RelocInfo::NONE)); + __ cmp(r0, Operand::Zero()); __ b(eq, &exit_with_exception); // Otherwise use return value as new stack pointer. __ mov(backtrack_stackpointer(), r0); @@ -1150,7 +1149,7 @@ int RegExpMacroAssemblerARM::CheckStackGuardState(Address* return_address, Handle<String> subject(frame_entry<String*>(re_frame, kInputString)); // Current string. - bool is_ascii = subject->IsAsciiRepresentationUnderneath(); + bool is_ascii = subject->IsOneByteRepresentationUnderneath(); ASSERT(re_code->instruction_start() <= *return_address); ASSERT(*return_address <= @@ -1181,7 +1180,7 @@ int RegExpMacroAssemblerARM::CheckStackGuardState(Address* return_address, } // String might have changed. - if (subject_tmp->IsAsciiRepresentation() != is_ascii) { + if (subject_tmp->IsOneByteRepresentation() != is_ascii) { // If we changed between an ASCII and an UC16 string, the specialized // code cannot be used, and we need to restart regexp matching from // scratch (including, potentially, compiling a new version of the code). diff --git a/deps/v8/src/arm/simulator-arm.cc b/deps/v8/src/arm/simulator-arm.cc index 5b8ba2adae..b7bc839059 100644 --- a/deps/v8/src/arm/simulator-arm.cc +++ b/deps/v8/src/arm/simulator-arm.cc @@ -34,6 +34,7 @@ #include "disasm.h" #include "assembler.h" +#include "codegen.h" #include "arm/constants-arm.h" #include "arm/simulator-arm.h" @@ -398,7 +399,7 @@ void ArmDebugger::Debug() { int32_t words; if (argc == next_arg) { words = 10; - } else if (argc == next_arg + 1) { + } else { if (!GetValue(argv[next_arg], &words)) { words = 10; } @@ -764,8 +765,8 @@ Simulator::Simulator(Isolate* isolate) : isolate_(isolate) { // All registers are initialized to zero to start with // even though s_registers_ & d_registers_ share the same // physical registers in the target. - for (int i = 0; i < num_s_registers; i++) { - vfp_register[i] = 0; + for (int i = 0; i < num_d_registers * 2; i++) { + vfp_registers_[i] = 0; } n_flag_FPSCR_ = false; z_flag_FPSCR_ = false; @@ -900,7 +901,7 @@ double Simulator::get_double_from_register_pair(int reg) { double dm_val = 0.0; // Read the bits from the unsigned integer register_[] array // into the double precision floating point value and return it. - char buffer[2 * sizeof(vfp_register[0])]; + char buffer[2 * sizeof(vfp_registers_[0])]; memcpy(buffer, ®isters_[reg], 2 * sizeof(registers_[0])); memcpy(&dm_val, buffer, 2 * sizeof(registers_[0])); return(dm_val); @@ -935,13 +936,13 @@ int32_t Simulator::get_pc() const { // Getting from and setting into VFP registers. void Simulator::set_s_register(int sreg, unsigned int value) { ASSERT((sreg >= 0) && (sreg < num_s_registers)); - vfp_register[sreg] = value; + vfp_registers_[sreg] = value; } unsigned int Simulator::get_s_register(int sreg) const { ASSERT((sreg >= 0) && (sreg < num_s_registers)); - return vfp_register[sreg]; + return vfp_registers_[sreg]; } @@ -949,12 +950,12 @@ template<class InputType, int register_size> void Simulator::SetVFPRegister(int reg_index, const InputType& value) { ASSERT(reg_index >= 0); if (register_size == 1) ASSERT(reg_index < num_s_registers); - if (register_size == 2) ASSERT(reg_index < num_d_registers); + if (register_size == 2) ASSERT(reg_index < DwVfpRegister::NumRegisters()); - char buffer[register_size * sizeof(vfp_register[0])]; - memcpy(buffer, &value, register_size * sizeof(vfp_register[0])); - memcpy(&vfp_register[reg_index * register_size], buffer, - register_size * sizeof(vfp_register[0])); + char buffer[register_size * sizeof(vfp_registers_[0])]; + memcpy(buffer, &value, register_size * sizeof(vfp_registers_[0])); + memcpy(&vfp_registers_[reg_index * register_size], buffer, + register_size * sizeof(vfp_registers_[0])); } @@ -962,13 +963,13 @@ template<class ReturnType, int register_size> ReturnType Simulator::GetFromVFPRegister(int reg_index) { ASSERT(reg_index >= 0); if (register_size == 1) ASSERT(reg_index < num_s_registers); - if (register_size == 2) ASSERT(reg_index < num_d_registers); + if (register_size == 2) ASSERT(reg_index < DwVfpRegister::NumRegisters()); ReturnType value = 0; - char buffer[register_size * sizeof(vfp_register[0])]; - memcpy(buffer, &vfp_register[register_size * reg_index], - register_size * sizeof(vfp_register[0])); - memcpy(&value, buffer, register_size * sizeof(vfp_register[0])); + char buffer[register_size * sizeof(vfp_registers_[0])]; + memcpy(buffer, &vfp_registers_[register_size * reg_index], + register_size * sizeof(vfp_registers_[0])); + memcpy(&value, buffer, register_size * sizeof(vfp_registers_[0])); return value; } @@ -977,8 +978,8 @@ ReturnType Simulator::GetFromVFPRegister(int reg_index) { // from r0-r3 or d0 and d1. void Simulator::GetFpArgs(double* x, double* y) { if (use_eabi_hardfloat()) { - *x = vfp_register[0]; - *y = vfp_register[1]; + *x = vfp_registers_[0]; + *y = vfp_registers_[1]; } else { // We use a char buffer to get around the strict-aliasing rules which // otherwise allow the compiler to optimize away the copy. @@ -996,7 +997,7 @@ void Simulator::GetFpArgs(double* x, double* y) { // from r0 and r1 or d0. void Simulator::GetFpArgs(double* x) { if (use_eabi_hardfloat()) { - *x = vfp_register[0]; + *x = vfp_registers_[0]; } else { // We use a char buffer to get around the strict-aliasing rules which // otherwise allow the compiler to optimize away the copy. @@ -1012,7 +1013,7 @@ void Simulator::GetFpArgs(double* x) { // from r0 and r1 or d0 and one integer value. void Simulator::GetFpArgs(double* x, int32_t* y) { if (use_eabi_hardfloat()) { - *x = vfp_register[0]; + *x = vfp_registers_[0]; *y = registers_[1]; } else { // We use a char buffer to get around the strict-aliasing rules which @@ -1031,10 +1032,10 @@ void Simulator::GetFpArgs(double* x, int32_t* y) { // The return value is either in r0/r1 or d0. void Simulator::SetFpResult(const double& result) { if (use_eabi_hardfloat()) { - char buffer[2 * sizeof(vfp_register[0])]; + char buffer[2 * sizeof(vfp_registers_[0])]; memcpy(buffer, &result, sizeof(buffer)); // Copy result to d0. - memcpy(vfp_register, buffer, sizeof(buffer)); + memcpy(vfp_registers_, buffer, sizeof(buffer)); } else { char buffer[2 * sizeof(registers_[0])]; memcpy(buffer, &result, sizeof(buffer)); @@ -1387,7 +1388,14 @@ int32_t Simulator::GetShiftRm(Instruction* instr, bool* carry_out) { } case ROR: { - UNIMPLEMENTED(); + if (shift_amount == 0) { + *carry_out = c_flag_; + } else { + uint32_t left = static_cast<uint32_t>(result) >> shift_amount; + uint32_t right = static_cast<uint32_t>(result) << (32 - shift_amount); + result = right | left; + *carry_out = (static_cast<uint32_t>(result) >> 31) != 0; + } break; } @@ -1459,7 +1467,14 @@ int32_t Simulator::GetShiftRm(Instruction* instr, bool* carry_out) { } case ROR: { - UNIMPLEMENTED(); + if (shift_amount == 0) { + *carry_out = c_flag_; + } else { + uint32_t left = static_cast<uint32_t>(result) >> shift_amount; + uint32_t right = static_cast<uint32_t>(result) << (32 - shift_amount); + result = right | left; + *carry_out = (static_cast<uint32_t>(result) >> 31) != 0; + } break; } @@ -1599,15 +1614,19 @@ void Simulator::HandleVList(Instruction* instr) { address += 1; } else { if (load) { - set_s_register_from_sinteger( - 2 * reg, ReadW(reinterpret_cast<int32_t>(address), instr)); - set_s_register_from_sinteger( - 2 * reg + 1, ReadW(reinterpret_cast<int32_t>(address + 1), instr)); + int32_t data[] = { + ReadW(reinterpret_cast<int32_t>(address), instr), + ReadW(reinterpret_cast<int32_t>(address + 1), instr) + }; + double d; + memcpy(&d, data, 8); + set_d_register_from_double(reg, d); } else { - WriteW(reinterpret_cast<int32_t>(address), - get_sinteger_from_s_register(2 * reg), instr); - WriteW(reinterpret_cast<int32_t>(address + 1), - get_sinteger_from_s_register(2 * reg + 1), instr); + int32_t data[2]; + double d = get_double_from_d_register(reg); + memcpy(data, &d, 8); + WriteW(reinterpret_cast<int32_t>(address), data[0], instr); + WriteW(reinterpret_cast<int32_t>(address + 1), data[1], instr); } address += 2; } @@ -1673,18 +1692,18 @@ void Simulator::SoftwareInterrupt(Instruction* instr) { switch (redirection->type()) { case ExternalReference::BUILTIN_FP_FP_CALL: case ExternalReference::BUILTIN_COMPARE_CALL: - arg0 = vfp_register[0]; - arg1 = vfp_register[1]; - arg2 = vfp_register[2]; - arg3 = vfp_register[3]; + arg0 = vfp_registers_[0]; + arg1 = vfp_registers_[1]; + arg2 = vfp_registers_[2]; + arg3 = vfp_registers_[3]; break; case ExternalReference::BUILTIN_FP_CALL: - arg0 = vfp_register[0]; - arg1 = vfp_register[1]; + arg0 = vfp_registers_[0]; + arg1 = vfp_registers_[1]; break; case ExternalReference::BUILTIN_FP_INT_CALL: - arg0 = vfp_register[0]; - arg1 = vfp_register[1]; + arg0 = vfp_registers_[0]; + arg1 = vfp_registers_[1]; arg2 = get_register(0); break; default: @@ -1762,7 +1781,7 @@ void Simulator::SoftwareInterrupt(Instruction* instr) { if (::v8::internal::FLAG_trace_sim) { PrintF("Returned %p\n", reinterpret_cast<void *>(*result)); } - set_register(r0, (int32_t) *result); + set_register(r0, reinterpret_cast<int32_t>(*result)); } else if (redirection->type() == ExternalReference::DIRECT_GETTER_CALL) { SimulatorRuntimeDirectGetterCall target = reinterpret_cast<SimulatorRuntimeDirectGetterCall>(external); @@ -1779,7 +1798,7 @@ void Simulator::SoftwareInterrupt(Instruction* instr) { if (::v8::internal::FLAG_trace_sim) { PrintF("Returned %p\n", reinterpret_cast<void *>(*result)); } - set_register(r0, (int32_t) *result); + set_register(r0, reinterpret_cast<int32_t>(*result)); } else { // builtin call. ASSERT(redirection->type() == ExternalReference::BUILTIN_CALL); @@ -2764,6 +2783,26 @@ void Simulator::DecodeTypeVFP(Instruction* instr) { double dm_value = get_double_from_d_register(vm); double dd_value = dn_value * dm_value; set_d_register_from_double(vd, dd_value); + } else if ((instr->Opc1Value() == 0x0)) { + // vmla, vmls + const bool is_vmls = (instr->Opc3Value() & 0x1); + + if (instr->SzValue() != 0x1) { + UNREACHABLE(); // Not used by V8. + } + + const double dd_val = get_double_from_d_register(vd); + const double dn_val = get_double_from_d_register(vn); + const double dm_val = get_double_from_d_register(vm); + + // Note: we do the mul and add/sub in separate steps to avoid getting a + // result with too high precision. + set_d_register_from_double(vd, dn_val * dm_val); + if (is_vmls) { + set_d_register_from_double(vd, dd_val - get_double_from_d_register(vd)); + } else { + set_d_register_from_double(vd, dd_val + get_double_from_d_register(vd)); + } } else if ((instr->Opc1Value() == 0x4) && !(instr->Opc3Value() & 0x1)) { // vdiv if (instr->SzValue() != 0x1) { @@ -2782,6 +2821,17 @@ void Simulator::DecodeTypeVFP(Instruction* instr) { if ((instr->VCValue() == 0x0) && (instr->VAValue() == 0x0)) { DecodeVMOVBetweenCoreAndSinglePrecisionRegisters(instr); + } else if ((instr->VLValue() == 0x0) && + (instr->VCValue() == 0x1) && + (instr->Bit(23) == 0x0)) { + // vmov (ARM core register to scalar) + int vd = instr->Bits(19, 16) | (instr->Bit(7) << 4); + double dd_value = get_double_from_d_register(vd); + int32_t data[2]; + memcpy(data, &dd_value, 8); + data[instr->Bit(21)] = get_register(instr->RtValue()); + memcpy(&dd_value, data, 8); + set_d_register_from_double(vd, dd_value); } else if ((instr->VLValue() == 0x1) && (instr->VCValue() == 0x0) && (instr->VAValue() == 0x7) && @@ -3055,15 +3105,15 @@ void Simulator::DecodeVCVTBetweenFloatingPointAndInteger(Instruction* instr) { if (src_precision == kDoublePrecision) { if (unsigned_integer) { - set_d_register_from_double(dst, - static_cast<double>((uint32_t)val)); + set_d_register_from_double( + dst, static_cast<double>(static_cast<uint32_t>(val))); } else { set_d_register_from_double(dst, static_cast<double>(val)); } } else { if (unsigned_integer) { - set_s_register_from_float(dst, - static_cast<float>((uint32_t)val)); + set_s_register_from_float( + dst, static_cast<float>(static_cast<uint32_t>(val))); } else { set_s_register_from_float(dst, static_cast<float>(val)); } @@ -3120,31 +3170,32 @@ void Simulator::DecodeType6CoprocessorIns(Instruction* instr) { switch (instr->OpcodeValue()) { case 0x2: // Load and store double to two GP registers - if (instr->Bits(7, 4) != 0x1) { + if (instr->Bits(7, 6) != 0 || instr->Bit(4) != 1) { UNIMPLEMENTED(); // Not used by V8. } else { int rt = instr->RtValue(); int rn = instr->RnValue(); - int vm = instr->VmValue(); + int vm = instr->VFPMRegValue(kDoublePrecision); if (instr->HasL()) { - int32_t rt_int_value = get_sinteger_from_s_register(2*vm); - int32_t rn_int_value = get_sinteger_from_s_register(2*vm+1); - - set_register(rt, rt_int_value); - set_register(rn, rn_int_value); + int32_t data[2]; + double d = get_double_from_d_register(vm); + memcpy(data, &d, 8); + set_register(rt, data[0]); + set_register(rn, data[1]); } else { - int32_t rs_val = get_register(rt); - int32_t rn_val = get_register(rn); - - set_s_register_from_sinteger(2*vm, rs_val); - set_s_register_from_sinteger((2*vm+1), rn_val); + int32_t data[] = { get_register(rt), get_register(rn) }; + double d; + memcpy(&d, data, 8); + set_d_register_from_double(vm, d); } } break; case 0x8: - case 0xC: { // Load and store double to memory. + case 0xA: + case 0xC: + case 0xE: { // Load and store double to memory. int rn = instr->RnValue(); - int vd = instr->VdValue(); + int vd = instr->VFPDRegValue(kDoublePrecision); int offset = instr->Immed8Value(); if (!instr->HasU()) { offset = -offset; @@ -3152,18 +3203,29 @@ void Simulator::DecodeType6CoprocessorIns(Instruction* instr) { int32_t address = get_register(rn) + 4 * offset; if (instr->HasL()) { // Load double from memory: vldr. - set_s_register_from_sinteger(2*vd, ReadW(address, instr)); - set_s_register_from_sinteger(2*vd + 1, ReadW(address + 4, instr)); + int32_t data[] = { + ReadW(address, instr), + ReadW(address + 4, instr) + }; + double val; + memcpy(&val, data, 8); + set_d_register_from_double(vd, val); } else { // Store double to memory: vstr. - WriteW(address, get_sinteger_from_s_register(2*vd), instr); - WriteW(address + 4, get_sinteger_from_s_register(2*vd + 1), instr); + int32_t data[2]; + double val = get_double_from_d_register(vd); + memcpy(data, &val, 8); + WriteW(address, data[0], instr); + WriteW(address + 4, data[1], instr); } break; } case 0x4: case 0x5: + case 0x6: + case 0x7: case 0x9: + case 0xB: // Load/store multiple double from memory: vldm/vstm. HandleVList(instr); break; @@ -3273,33 +3335,7 @@ void Simulator::Execute() { } -int32_t Simulator::Call(byte* entry, int argument_count, ...) { - va_list parameters; - va_start(parameters, argument_count); - // Set up arguments - - // First four arguments passed in registers. - ASSERT(argument_count >= 4); - set_register(r0, va_arg(parameters, int32_t)); - set_register(r1, va_arg(parameters, int32_t)); - set_register(r2, va_arg(parameters, int32_t)); - set_register(r3, va_arg(parameters, int32_t)); - - // Remaining arguments passed on stack. - int original_stack = get_register(sp); - // Compute position of stack on entry to generated code. - int entry_stack = (original_stack - (argument_count - 4) * sizeof(int32_t)); - if (OS::ActivationFrameAlignment() != 0) { - entry_stack &= -OS::ActivationFrameAlignment(); - } - // Store remaining arguments on stack, from low to high memory. - intptr_t* stack_argument = reinterpret_cast<intptr_t*>(entry_stack); - for (int i = 4; i < argument_count; i++) { - stack_argument[i - 4] = va_arg(parameters, int32_t); - } - va_end(parameters); - set_register(sp, entry_stack); - +void Simulator::CallInternal(byte* entry) { // Prepare to execute the code at entry set_register(pc, reinterpret_cast<int32_t>(entry)); // Put down marker for end of simulation. The simulator will stop simulation @@ -3353,6 +3389,37 @@ int32_t Simulator::Call(byte* entry, int argument_count, ...) { set_register(r9, r9_val); set_register(r10, r10_val); set_register(r11, r11_val); +} + + +int32_t Simulator::Call(byte* entry, int argument_count, ...) { + va_list parameters; + va_start(parameters, argument_count); + // Set up arguments + + // First four arguments passed in registers. + ASSERT(argument_count >= 4); + set_register(r0, va_arg(parameters, int32_t)); + set_register(r1, va_arg(parameters, int32_t)); + set_register(r2, va_arg(parameters, int32_t)); + set_register(r3, va_arg(parameters, int32_t)); + + // Remaining arguments passed on stack. + int original_stack = get_register(sp); + // Compute position of stack on entry to generated code. + int entry_stack = (original_stack - (argument_count - 4) * sizeof(int32_t)); + if (OS::ActivationFrameAlignment() != 0) { + entry_stack &= -OS::ActivationFrameAlignment(); + } + // Store remaining arguments on stack, from low to high memory. + intptr_t* stack_argument = reinterpret_cast<intptr_t*>(entry_stack); + for (int i = 4; i < argument_count; i++) { + stack_argument[i - 4] = va_arg(parameters, int32_t); + } + va_end(parameters); + set_register(sp, entry_stack); + + CallInternal(entry); // Pop stack passed arguments. CHECK_EQ(entry_stack, get_register(sp)); @@ -3363,6 +3430,27 @@ int32_t Simulator::Call(byte* entry, int argument_count, ...) { } +double Simulator::CallFP(byte* entry, double d0, double d1) { + if (use_eabi_hardfloat()) { + set_d_register_from_double(0, d0); + set_d_register_from_double(1, d1); + } else { + int buffer[2]; + ASSERT(sizeof(buffer[0]) * 2 == sizeof(d0)); + memcpy(buffer, &d0, sizeof(d0)); + set_dw_register(0, buffer); + memcpy(buffer, &d1, sizeof(d1)); + set_dw_register(2, buffer); + } + CallInternal(entry); + if (use_eabi_hardfloat()) { + return get_double_from_d_register(0); + } else { + return get_double_from_register_pair(0); + } +} + + uintptr_t Simulator::PushAddress(uintptr_t address) { int new_sp = get_register(sp) - sizeof(uintptr_t); uintptr_t* stack_slot = reinterpret_cast<uintptr_t*>(new_sp); diff --git a/deps/v8/src/arm/simulator-arm.h b/deps/v8/src/arm/simulator-arm.h index abc91bbc42..907a590665 100644 --- a/deps/v8/src/arm/simulator-arm.h +++ b/deps/v8/src/arm/simulator-arm.h @@ -142,7 +142,9 @@ class Simulator { num_s_registers = 32, d0 = 0, d1, d2, d3, d4, d5, d6, d7, d8, d9, d10, d11, d12, d13, d14, d15, - num_d_registers = 16 + d16, d17, d18, d19, d20, d21, d22, d23, + d24, d25, d26, d27, d28, d29, d30, d31, + num_d_registers = 32 }; explicit Simulator(Isolate* isolate); @@ -205,6 +207,8 @@ class Simulator { // generated RegExp code with 7 parameters. This is a convenience function, // which sets up the simulator state and grabs the result on return. int32_t Call(byte* entry, int argument_count, ...); + // Alternative: call a 2-argument double function. + double CallFP(byte* entry, double d0, double d1); // Push an address onto the JS stack. uintptr_t PushAddress(uintptr_t address); @@ -356,6 +360,8 @@ class Simulator { template<class InputType, int register_size> void SetVFPRegister(int reg_index, const InputType& value); + void CallInternal(byte* entry); + // Architecture state. // Saturating instructions require a Q flag to indicate saturation. // There is currently no way to read the CPSR directly, and thus read the Q @@ -367,7 +373,7 @@ class Simulator { bool v_flag_; // VFP architecture state. - unsigned int vfp_register[num_s_registers]; + unsigned int vfp_registers_[num_d_registers * 2]; bool n_flag_FPSCR_; bool z_flag_FPSCR_; bool c_flag_FPSCR_; diff --git a/deps/v8/src/arm/stub-cache-arm.cc b/deps/v8/src/arm/stub-cache-arm.cc index 9fc39d4ad8..3350c56c16 100644 --- a/deps/v8/src/arm/stub-cache-arm.cc +++ b/deps/v8/src/arm/stub-cache-arm.cc @@ -102,12 +102,7 @@ static void ProbeTable(Isolate* isolate, uint32_t mask = Code::kFlagsNotUsedInLookup; ASSERT(__ ImmediateFitsAddrMode1Instruction(mask)); __ bic(flags_reg, flags_reg, Operand(mask)); - // Using cmn and the negative instead of cmp means we can use movw. - if (flags < 0) { - __ cmn(flags_reg, Operand(-flags)); - } else { - __ cmp(flags_reg, Operand(flags)); - } + __ cmp(flags_reg, Operand(flags)); __ b(ne, &miss); #ifdef DEBUG @@ -130,14 +125,14 @@ static void ProbeTable(Isolate* isolate, // the property. This function may return false negatives, so miss_label // must always call a backup property check that is complete. // This function is safe to call if the receiver has fast properties. -// Name must be a symbol and receiver must be a heap object. +// Name must be unique and receiver must be a heap object. static void GenerateDictionaryNegativeLookup(MacroAssembler* masm, Label* miss_label, Register receiver, - Handle<String> name, + Handle<Name> name, Register scratch0, Register scratch1) { - ASSERT(name->IsSymbol()); + ASSERT(name->IsUniqueName()); Counters* counters = masm->isolate()->counters(); __ IncrementCounter(counters->negative_lookups(), 1, scratch0, scratch1); __ IncrementCounter(counters->negative_lookups_miss(), 1, scratch0, scratch1); @@ -173,13 +168,13 @@ static void GenerateDictionaryNegativeLookup(MacroAssembler* masm, __ ldr(properties, FieldMemOperand(receiver, JSObject::kPropertiesOffset)); - StringDictionaryLookupStub::GenerateNegativeLookup(masm, - miss_label, - &done, - receiver, - properties, - name, - scratch1); + NameDictionaryLookupStub::GenerateNegativeLookup(masm, + miss_label, + &done, + receiver, + properties, + name, + scratch1); __ bind(&done); __ DecrementCounter(counters->negative_lookups_miss(), 1, scratch0, scratch1); } @@ -228,7 +223,7 @@ void StubCache::GenerateProbe(MacroAssembler* masm, __ JumpIfSmi(receiver, &miss); // Get the map of the receiver and compute the hash. - __ ldr(scratch, FieldMemOperand(name, String::kHashFieldOffset)); + __ ldr(scratch, FieldMemOperand(name, Name::kHashFieldOffset)); __ ldr(ip, FieldMemOperand(receiver, HeapObject::kMapOffset)); __ add(scratch, scratch, Operand(ip)); uint32_t mask = kPrimaryTableSize - 1; @@ -320,26 +315,19 @@ void StubCompiler::GenerateDirectLoadGlobalFunctionPrototype( } -// Load a fast property out of a holder object (src). In-object properties -// are loaded directly otherwise the property is loaded from the properties -// fixed array. -void StubCompiler::GenerateFastPropertyLoad(MacroAssembler* masm, - Register dst, - Register src, - Handle<JSObject> holder, - int index) { - // Adjust for the number of properties stored in the holder. - index -= holder->map()->inobject_properties(); - if (index < 0) { - // Get the property straight out of the holder. - int offset = holder->map()->instance_size() + (index * kPointerSize); - __ ldr(dst, FieldMemOperand(src, offset)); - } else { +void StubCompiler::DoGenerateFastPropertyLoad(MacroAssembler* masm, + Register dst, + Register src, + bool inobject, + int index) { + int offset = index * kPointerSize; + if (!inobject) { // Calculate the offset into the properties array. - int offset = index * kPointerSize + FixedArray::kHeaderSize; + offset = offset + FixedArray::kHeaderSize; __ ldr(dst, FieldMemOperand(src, JSObject::kPropertiesOffset)); - __ ldr(dst, FieldMemOperand(dst, offset)); + src = dst; } + __ ldr(dst, FieldMemOperand(src, offset)); } @@ -437,12 +425,14 @@ void StubCompiler::GenerateStoreField(MacroAssembler* masm, Handle<JSObject> object, int index, Handle<Map> transition, - Handle<String> name, + Handle<Name> name, Register receiver_reg, Register name_reg, + Register value_reg, Register scratch1, Register scratch2, - Label* miss_label) { + Label* miss_label, + Label* miss_restore_name) { // r0 : value Label exit; @@ -479,17 +469,8 @@ void StubCompiler::GenerateStoreField(MacroAssembler* masm, holder = JSObject::cast(holder->GetPrototype()); } while (holder->GetPrototype()->IsJSObject()); } - // We need an extra register, push - __ push(name_reg); - Label miss_pop, done_check; CheckPrototypes(object, receiver_reg, Handle<JSObject>(holder), name_reg, - scratch1, scratch2, name, &miss_pop); - __ jmp(&done_check); - __ bind(&miss_pop); - __ pop(name_reg); - __ jmp(miss_label); - __ bind(&done_check); - __ pop(name_reg); + scratch1, scratch2, name, miss_restore_name); } // Stub never generated for non-global objects that require access @@ -536,14 +517,14 @@ void StubCompiler::GenerateStoreField(MacroAssembler* masm, if (index < 0) { // Set the property straight into the object. int offset = object->map()->instance_size() + (index * kPointerSize); - __ str(r0, FieldMemOperand(receiver_reg, offset)); + __ str(value_reg, FieldMemOperand(receiver_reg, offset)); // Skip updating write barrier if storing a smi. - __ JumpIfSmi(r0, &exit); + __ JumpIfSmi(value_reg, &exit); // Update the write barrier for the array address. // Pass the now unused name_reg as a scratch register. - __ mov(name_reg, r0); + __ mov(name_reg, value_reg); __ RecordWriteField(receiver_reg, offset, name_reg, @@ -556,14 +537,14 @@ void StubCompiler::GenerateStoreField(MacroAssembler* masm, // Get the properties array __ ldr(scratch1, FieldMemOperand(receiver_reg, JSObject::kPropertiesOffset)); - __ str(r0, FieldMemOperand(scratch1, offset)); + __ str(value_reg, FieldMemOperand(scratch1, offset)); // Skip updating write barrier if storing a smi. - __ JumpIfSmi(r0, &exit); + __ JumpIfSmi(value_reg, &exit); // Update the write barrier for the array address. // Ok to clobber receiver_reg and name_reg, since we return. - __ mov(name_reg, r0); + __ mov(name_reg, value_reg); __ RecordWriteField(scratch1, offset, name_reg, @@ -573,17 +554,19 @@ void StubCompiler::GenerateStoreField(MacroAssembler* masm, } // Return the value (register r0). + ASSERT(value_reg.is(r0)); __ bind(&exit); __ Ret(); } -void StubCompiler::GenerateLoadMiss(MacroAssembler* masm, Code::Kind kind) { - ASSERT(kind == Code::LOAD_IC || kind == Code::KEYED_LOAD_IC); - Handle<Code> code = (kind == Code::LOAD_IC) - ? masm->isolate()->builtins()->LoadIC_Miss() - : masm->isolate()->builtins()->KeyedLoadIC_Miss(); - __ Jump(code, RelocInfo::CODE_TARGET); +void BaseStoreStubCompiler::GenerateRestoreName(MacroAssembler* masm, + Label* label, + Handle<Name> name) { + if (!label->is_unused()) { + __ bind(label); + __ mov(this->name(), Operand(name)); + } } @@ -697,7 +680,7 @@ static void GenerateFastApiDirectCall(MacroAssembler* masm, // Pass the additional arguments. Handle<CallHandlerInfo> api_call_info = optimization.api_call_info(); - Handle<Object> call_data(api_call_info->data()); + Handle<Object> call_data(api_call_info->data(), masm->isolate()); if (masm->isolate()->heap()->InNewSpace(*call_data)) { __ Move(r0, api_call_info); __ ldr(r6, FieldMemOperand(r0, CallHandlerInfo::kDataOffset)); @@ -730,7 +713,7 @@ static void GenerateFastApiDirectCall(MacroAssembler* masm, __ mov(ip, Operand(argc)); __ str(ip, MemOperand(r0, 2 * kPointerSize)); // v8::Arguments::is_construct_call = 0 - __ mov(ip, Operand(0)); + __ mov(ip, Operand::Zero()); __ str(ip, MemOperand(r0, 3 * kPointerSize)); const int kStackUnwindSpace = argc + kFastApiCallArguments + 1; @@ -759,7 +742,7 @@ class CallInterceptorCompiler BASE_EMBEDDED { void Compile(MacroAssembler* masm, Handle<JSObject> object, Handle<JSObject> holder, - Handle<String> name, + Handle<Name> name, LookupResult* lookup, Register receiver, Register scratch1, @@ -790,7 +773,7 @@ class CallInterceptorCompiler BASE_EMBEDDED { Register scratch3, Handle<JSObject> interceptor_holder, LookupResult* lookup, - Handle<String> name, + Handle<Name> name, const CallOptimization& optimization, Label* miss_label) { ASSERT(optimization.is_constant_call()); @@ -884,7 +867,7 @@ class CallInterceptorCompiler BASE_EMBEDDED { Register scratch1, Register scratch2, Register scratch3, - Handle<String> name, + Handle<Name> name, Handle<JSObject> interceptor_holder, Label* miss_label) { Register holder = @@ -941,7 +924,7 @@ class CallInterceptorCompiler BASE_EMBEDDED { // property. static void GenerateCheckPropertyCell(MacroAssembler* masm, Handle<GlobalObject> global, - Handle<String> name, + Handle<Name> name, Register scratch, Label* miss) { Handle<JSGlobalPropertyCell> cell = @@ -961,7 +944,7 @@ static void GenerateCheckPropertyCell(MacroAssembler* masm, static void GenerateCheckPropertyCells(MacroAssembler* masm, Handle<JSObject> object, Handle<JSObject> holder, - Handle<String> name, + Handle<Name> name, Register scratch, Label* miss) { Handle<JSObject> current = object; @@ -989,7 +972,7 @@ static void StoreIntAsFloat(MacroAssembler* masm, Register scratch1, Register scratch2) { if (CpuFeatures::IsSupported(VFP2)) { - CpuFeatures::Scope scope(VFP2); + CpuFeatureScope scope(masm, VFP2); __ vmov(s0, ival); __ add(scratch1, dst, Operand(wordoffset, LSL, 2)); __ vcvt_f32_s32(s0, s0); @@ -1003,7 +986,7 @@ static void StoreIntAsFloat(MacroAssembler* masm, __ and_(fval, ival, Operand(kBinary32SignMask), SetCC); // Negate value if it is negative. - __ rsb(ival, ival, Operand(0, RelocInfo::NONE), LeaveCC, ne); + __ rsb(ival, ival, Operand::Zero(), LeaveCC, ne); // We have -1, 0 or 1, which we treat specially. Register ival contains // absolute value: it is either equal to 1 (special case of -1 and 1), @@ -1048,39 +1031,8 @@ static void StoreIntAsFloat(MacroAssembler* masm, } -// Convert unsigned integer with specified number of leading zeroes in binary -// representation to IEEE 754 double. -// Integer to convert is passed in register hiword. -// Resulting double is returned in registers hiword:loword. -// This functions does not work correctly for 0. -static void GenerateUInt2Double(MacroAssembler* masm, - Register hiword, - Register loword, - Register scratch, - int leading_zeroes) { - const int meaningful_bits = kBitsPerInt - leading_zeroes - 1; - const int biased_exponent = HeapNumber::kExponentBias + meaningful_bits; - - const int mantissa_shift_for_hi_word = - meaningful_bits - HeapNumber::kMantissaBitsInTopWord; - - const int mantissa_shift_for_lo_word = - kBitsPerInt - mantissa_shift_for_hi_word; - - __ mov(scratch, Operand(biased_exponent << HeapNumber::kExponentShift)); - if (mantissa_shift_for_hi_word > 0) { - __ mov(loword, Operand(hiword, LSL, mantissa_shift_for_lo_word)); - __ orr(hiword, scratch, Operand(hiword, LSR, mantissa_shift_for_hi_word)); - } else { - __ mov(loword, Operand(0, RelocInfo::NONE)); - __ orr(hiword, scratch, Operand(hiword, LSL, mantissa_shift_for_hi_word)); - } - - // If least significant bit of biased exponent was not 1 it was corrupted - // by most significant bit of mantissa so we should fix that. - if (!(biased_exponent & 1)) { - __ bic(hiword, hiword, Operand(1 << HeapNumber::kExponentShift)); - } +void StubCompiler::GenerateTailCall(MacroAssembler* masm, Handle<Code> code) { + __ Jump(code, RelocInfo::CODE_TARGET); } @@ -1094,9 +1046,11 @@ Register StubCompiler::CheckPrototypes(Handle<JSObject> object, Register holder_reg, Register scratch1, Register scratch2, - Handle<String> name, + Handle<Name> name, int save_at_depth, - Label* miss) { + Label* miss, + PrototypeCheckType check) { + Handle<JSObject> first = object; // Make sure there's no overlap between holder and object registers. ASSERT(!scratch1.is(object_reg) && !scratch1.is(holder_reg)); ASSERT(!scratch2.is(object_reg) && !scratch2.is(holder_reg) @@ -1124,11 +1078,12 @@ Register StubCompiler::CheckPrototypes(Handle<JSObject> object, if (!current->HasFastProperties() && !current->IsJSGlobalObject() && !current->IsJSGlobalProxy()) { - if (!name->IsSymbol()) { - name = factory()->LookupSymbol(name); + if (!name->IsUniqueName()) { + ASSERT(name->IsString()); + name = factory()->InternalizeString(Handle<String>::cast(name)); } ASSERT(current->property_dictionary()->FindEntry(*name) == - StringDictionary::kNotFound); + NameDictionary::kNotFound); GenerateDictionaryNegativeLookup(masm(), miss, reg, name, scratch1, scratch2); @@ -1137,9 +1092,15 @@ Register StubCompiler::CheckPrototypes(Handle<JSObject> object, reg = holder_reg; // From now on the object will be in holder_reg. __ ldr(reg, FieldMemOperand(scratch1, Map::kPrototypeOffset)); } else { - Handle<Map> current_map(current->map()); - __ CheckMap(reg, scratch1, current_map, miss, DONT_DO_SMI_CHECK, - ALLOW_ELEMENT_TRANSITION_MAPS); + Register map_reg = scratch1; + if (!current.is_identical_to(first) || check == CHECK_ALL_MAPS) { + Handle<Map> current_map(current->map()); + // CheckMap implicitly loads the map of |reg| into |map_reg|. + __ CheckMap(reg, map_reg, current_map, miss, DONT_DO_SMI_CHECK, + ALLOW_ELEMENT_TRANSITION_MAPS); + } else { + __ ldr(map_reg, FieldMemOperand(reg, HeapObject::kMapOffset)); + } // Check access rights to the global object. This has to happen after // the map check so that we know that the object is actually a global @@ -1152,7 +1113,7 @@ Register StubCompiler::CheckPrototypes(Handle<JSObject> object, if (heap()->InNewSpace(*prototype)) { // The prototype is in new space; we cannot store a reference to it // in the code. Load it from the map. - __ ldr(reg, FieldMemOperand(scratch1, Map::kPrototypeOffset)); + __ ldr(reg, FieldMemOperand(map_reg, Map::kPrototypeOffset)); } else { // The prototype is in old space; load it directly. __ mov(reg, Operand(prototype)); @@ -1170,9 +1131,11 @@ Register StubCompiler::CheckPrototypes(Handle<JSObject> object, // Log the check depth. LOG(masm()->isolate(), IntEvent("check-maps-depth", depth + 1)); - // Check the holder map. - __ CheckMap(reg, scratch1, Handle<Map>(current->map()), miss, - DONT_DO_SMI_CHECK, ALLOW_ELEMENT_TRANSITION_MAPS); + if (!holder.is_identical_to(first) || check == CHECK_ALL_MAPS) { + // Check the holder map. + __ CheckMap(reg, scratch1, Handle<Map>(holder->map()), miss, + DONT_DO_SMI_CHECK, ALLOW_ELEMENT_TRANSITION_MAPS); + } // Perform security check for access to the global object. ASSERT(holder->IsJSGlobalProxy() || !holder->IsAccessCheckNeeded()); @@ -1190,124 +1153,124 @@ Register StubCompiler::CheckPrototypes(Handle<JSObject> object, } -void StubCompiler::GenerateLoadField(Handle<JSObject> object, - Handle<JSObject> holder, - Register receiver, - Register scratch1, - Register scratch2, - Register scratch3, - int index, - Handle<String> name, - Label* miss) { - // Check that the receiver isn't a smi. - __ JumpIfSmi(receiver, miss); - - // Check that the maps haven't changed. - Register reg = CheckPrototypes( - object, receiver, holder, scratch1, scratch2, scratch3, name, miss); - GenerateFastPropertyLoad(masm(), r0, reg, holder, index); - __ Ret(); +void BaseLoadStubCompiler::HandlerFrontendFooter(Label* success, + Label* miss) { + if (!miss->is_unused()) { + __ b(success); + __ bind(miss); + TailCallBuiltin(masm(), MissBuiltin(kind())); + } } -void StubCompiler::GenerateLoadConstant(Handle<JSObject> object, - Handle<JSObject> holder, - Register receiver, - Register scratch1, - Register scratch2, - Register scratch3, - Handle<JSFunction> value, - Handle<String> name, - Label* miss) { - // Check that the receiver isn't a smi. - __ JumpIfSmi(receiver, miss); - - // Check that the maps haven't changed. - CheckPrototypes( - object, receiver, holder, scratch1, scratch2, scratch3, name, miss); - - // Return the constant value. - __ LoadHeapObject(r0, value); - __ Ret(); -} +Register BaseLoadStubCompiler::CallbackHandlerFrontend( + Handle<JSObject> object, + Register object_reg, + Handle<JSObject> holder, + Handle<Name> name, + Label* success, + Handle<ExecutableAccessorInfo> callback) { + Label miss; + Register reg = HandlerFrontendHeader(object, object_reg, holder, name, &miss); -void StubCompiler::GenerateDictionaryLoadCallback(Register receiver, - Register name_reg, - Register scratch1, - Register scratch2, - Register scratch3, - Handle<AccessorInfo> callback, - Handle<String> name, - Label* miss) { - ASSERT(!receiver.is(scratch1)); - ASSERT(!receiver.is(scratch2)); - ASSERT(!receiver.is(scratch3)); - - // Load the properties dictionary. - Register dictionary = scratch1; - __ ldr(dictionary, FieldMemOperand(receiver, JSObject::kPropertiesOffset)); - - // Probe the dictionary. - Label probe_done; - StringDictionaryLookupStub::GeneratePositiveLookup(masm(), - miss, + if (!holder->HasFastProperties() && !holder->IsJSGlobalObject()) { + ASSERT(!reg.is(scratch2())); + ASSERT(!reg.is(scratch3())); + ASSERT(!reg.is(scratch4())); + + // Load the properties dictionary. + Register dictionary = scratch4(); + __ ldr(dictionary, FieldMemOperand(reg, JSObject::kPropertiesOffset)); + + // Probe the dictionary. + Label probe_done; + NameDictionaryLookupStub::GeneratePositiveLookup(masm(), + &miss, &probe_done, dictionary, - name_reg, - scratch2, - scratch3); - __ bind(&probe_done); - - // If probing finds an entry in the dictionary, scratch3 contains the - // pointer into the dictionary. Check that the value is the callback. - Register pointer = scratch3; - const int kElementsStartOffset = StringDictionary::kHeaderSize + - StringDictionary::kElementsStartIndex * kPointerSize; - const int kValueOffset = kElementsStartOffset + kPointerSize; - __ ldr(scratch2, FieldMemOperand(pointer, kValueOffset)); - __ cmp(scratch2, Operand(callback)); - __ b(ne, miss); + this->name(), + scratch2(), + scratch3()); + __ bind(&probe_done); + + // If probing finds an entry in the dictionary, scratch3 contains the + // pointer into the dictionary. Check that the value is the callback. + Register pointer = scratch3(); + const int kElementsStartOffset = NameDictionary::kHeaderSize + + NameDictionary::kElementsStartIndex * kPointerSize; + const int kValueOffset = kElementsStartOffset + kPointerSize; + __ ldr(scratch2(), FieldMemOperand(pointer, kValueOffset)); + __ cmp(scratch2(), Operand(callback)); + __ b(ne, &miss); + } + + HandlerFrontendFooter(success, &miss); + return reg; } -void StubCompiler::GenerateLoadCallback(Handle<JSObject> object, - Handle<JSObject> holder, - Register receiver, - Register name_reg, - Register scratch1, - Register scratch2, - Register scratch3, - Register scratch4, - Handle<AccessorInfo> callback, - Handle<String> name, - Label* miss) { - // Check that the receiver isn't a smi. - __ JumpIfSmi(receiver, miss); +void BaseLoadStubCompiler::NonexistentHandlerFrontend( + Handle<JSObject> object, + Handle<JSObject> last, + Handle<Name> name, + Label* success, + Handle<GlobalObject> global) { + Label miss; - // Check that the maps haven't changed. - Register reg = CheckPrototypes(object, receiver, holder, scratch1, - scratch2, scratch3, name, miss); + Register reg = HandlerFrontendHeader(object, receiver(), last, name, &miss); - if (!holder->HasFastProperties() && !holder->IsJSGlobalObject()) { - GenerateDictionaryLoadCallback( - reg, name_reg, scratch2, scratch3, scratch4, callback, name, miss); + // If the last object in the prototype chain is a global object, + // check that the global property cell is empty. + if (!global.is_null()) { + GenerateCheckPropertyCell(masm(), global, name, scratch2(), &miss); } + if (!last->HasFastProperties()) { + __ ldr(scratch2(), FieldMemOperand(reg, HeapObject::kMapOffset)); + __ ldr(scratch2(), FieldMemOperand(scratch2(), Map::kPrototypeOffset)); + __ cmp(scratch2(), Operand(isolate()->factory()->null_value())); + __ b(ne, &miss); + } + + HandlerFrontendFooter(success, &miss); +} + + +void BaseLoadStubCompiler::GenerateLoadField(Register reg, + Handle<JSObject> holder, + PropertyIndex index) { + GenerateFastPropertyLoad(masm(), r0, reg, holder, index); + __ Ret(); +} + + +void BaseLoadStubCompiler::GenerateLoadConstant(Handle<JSFunction> value) { + // Return the constant value. + __ LoadHeapObject(r0, value); + __ Ret(); +} + + +void BaseLoadStubCompiler::GenerateLoadCallback( + Register reg, + Handle<ExecutableAccessorInfo> callback) { // Build AccessorInfo::args_ list on the stack and push property name below // the exit frame to make GC aware of them and store pointers to them. - __ push(receiver); - __ mov(scratch2, sp); // scratch2 = AccessorInfo::args_ + __ push(receiver()); + __ mov(scratch2(), sp); // scratch2 = AccessorInfo::args_ if (heap()->InNewSpace(callback->data())) { - __ Move(scratch3, callback); - __ ldr(scratch3, FieldMemOperand(scratch3, AccessorInfo::kDataOffset)); + __ Move(scratch3(), callback); + __ ldr(scratch3(), FieldMemOperand(scratch3(), + ExecutableAccessorInfo::kDataOffset)); } else { - __ Move(scratch3, Handle<Object>(callback->data())); + __ Move(scratch3(), Handle<Object>(callback->data(), + callback->GetIsolate())); } - __ Push(reg, scratch3); - __ mov(scratch3, Operand(ExternalReference::isolate_address())); - __ Push(scratch3, name_reg); - __ mov(r0, sp); // r0 = Handle<String> + __ Push(reg, scratch3()); + __ mov(scratch3(), Operand(ExternalReference::isolate_address())); + __ Push(scratch3(), name()); + __ mov(r0, sp); // r0 = Handle<Name> const int kApiStackSpace = 1; FrameScope frame_scope(masm(), StackFrame::MANUAL); @@ -1315,7 +1278,7 @@ void StubCompiler::GenerateLoadCallback(Handle<JSObject> object, // Create AccessorInfo instance on the stack above the exit frame with // scratch2 (internal::Object** args_) as the data. - __ str(scratch2, MemOperand(sp, 1 * kPointerSize)); + __ str(scratch2(), MemOperand(sp, 1 * kPointerSize)); __ add(r1, sp, Operand(1 * kPointerSize)); // r1 = AccessorInfo& const int kStackUnwindSpace = 5; @@ -1329,22 +1292,15 @@ void StubCompiler::GenerateLoadCallback(Handle<JSObject> object, } -void StubCompiler::GenerateLoadInterceptor(Handle<JSObject> object, - Handle<JSObject> interceptor_holder, - LookupResult* lookup, - Register receiver, - Register name_reg, - Register scratch1, - Register scratch2, - Register scratch3, - Handle<String> name, - Label* miss) { +void BaseLoadStubCompiler::GenerateLoadInterceptor( + Register holder_reg, + Handle<JSObject> object, + Handle<JSObject> interceptor_holder, + LookupResult* lookup, + Handle<Name> name) { ASSERT(interceptor_holder->HasNamedInterceptor()); ASSERT(!interceptor_holder->GetNamedInterceptor()->getter()->IsUndefined()); - // Check that the receiver isn't a smi. - __ JumpIfSmi(receiver, miss); - // So far the most popular follow ups for interceptor loads are FIELD // and CALLBACKS, so inline only them, other cases may be added // later. @@ -1353,8 +1309,9 @@ void StubCompiler::GenerateLoadInterceptor(Handle<JSObject> object, if (lookup->IsField()) { compile_followup_inline = true; } else if (lookup->type() == CALLBACKS && - lookup->GetCallbackObject()->IsAccessorInfo()) { - AccessorInfo* callback = AccessorInfo::cast(lookup->GetCallbackObject()); + lookup->GetCallbackObject()->IsExecutableAccessorInfo()) { + ExecutableAccessorInfo* callback = + ExecutableAccessorInfo::cast(lookup->GetCallbackObject()); compile_followup_inline = callback->getter() != NULL && callback->IsCompatibleReceiver(*object); } @@ -1364,17 +1321,14 @@ void StubCompiler::GenerateLoadInterceptor(Handle<JSObject> object, // Compile the interceptor call, followed by inline code to load the // property from further up the prototype chain if the call fails. // Check that the maps haven't changed. - Register holder_reg = CheckPrototypes(object, receiver, interceptor_holder, - scratch1, scratch2, scratch3, - name, miss); - ASSERT(holder_reg.is(receiver) || holder_reg.is(scratch1)); + ASSERT(holder_reg.is(receiver()) || holder_reg.is(scratch1())); // Preserve the receiver register explicitly whenever it is different from // the holder and it is needed should the interceptor return without any // result. The CALLBACKS case needs the receiver to be passed into C++ code, // the FIELD case might cause a miss during the prototype check. bool must_perfrom_prototype_check = *interceptor_holder != lookup->holder(); - bool must_preserve_receiver_reg = !receiver.is(holder_reg) && + bool must_preserve_receiver_reg = !receiver().is(holder_reg) && (lookup->type() == CALLBACKS || must_perfrom_prototype_check); // Save necessary data before invoking an interceptor. @@ -1382,93 +1336,42 @@ void StubCompiler::GenerateLoadInterceptor(Handle<JSObject> object, { FrameScope frame_scope(masm(), StackFrame::INTERNAL); if (must_preserve_receiver_reg) { - __ Push(receiver, holder_reg, name_reg); + __ Push(receiver(), holder_reg, this->name()); } else { - __ Push(holder_reg, name_reg); + __ Push(holder_reg, this->name()); } // Invoke an interceptor. Note: map checks from receiver to // interceptor's holder has been compiled before (see a caller // of this method.) CompileCallLoadPropertyWithInterceptor(masm(), - receiver, + receiver(), holder_reg, - name_reg, + this->name(), interceptor_holder); // Check if interceptor provided a value for property. If it's // the case, return immediately. Label interceptor_failed; - __ LoadRoot(scratch1, Heap::kNoInterceptorResultSentinelRootIndex); - __ cmp(r0, scratch1); + __ LoadRoot(scratch1(), Heap::kNoInterceptorResultSentinelRootIndex); + __ cmp(r0, scratch1()); __ b(eq, &interceptor_failed); frame_scope.GenerateLeaveFrame(); __ Ret(); __ bind(&interceptor_failed); - __ pop(name_reg); + __ pop(this->name()); __ pop(holder_reg); if (must_preserve_receiver_reg) { - __ pop(receiver); + __ pop(receiver()); } // Leave the internal frame. } - // Check that the maps from interceptor's holder to lookup's holder - // haven't changed. And load lookup's holder into |holder| register. - if (must_perfrom_prototype_check) { - holder_reg = CheckPrototypes(interceptor_holder, - holder_reg, - Handle<JSObject>(lookup->holder()), - scratch1, - scratch2, - scratch3, - name, - miss); - } - if (lookup->IsField()) { - // We found FIELD property in prototype chain of interceptor's holder. - // Retrieve a field from field's holder. - GenerateFastPropertyLoad(masm(), r0, holder_reg, - Handle<JSObject>(lookup->holder()), - lookup->GetFieldIndex()); - __ Ret(); - } else { - // We found CALLBACKS property in prototype chain of interceptor's - // holder. - ASSERT(lookup->type() == CALLBACKS); - Handle<AccessorInfo> callback( - AccessorInfo::cast(lookup->GetCallbackObject())); - ASSERT(callback->getter() != NULL); - - // Tail call to runtime. - // Important invariant in CALLBACKS case: the code above must be - // structured to never clobber |receiver| register. - __ Move(scratch2, callback); - // holder_reg is either receiver or scratch1. - if (!receiver.is(holder_reg)) { - ASSERT(scratch1.is(holder_reg)); - __ Push(receiver, holder_reg); - } else { - __ push(receiver); - __ push(holder_reg); - } - __ ldr(scratch3, - FieldMemOperand(scratch2, AccessorInfo::kDataOffset)); - __ mov(scratch1, Operand(ExternalReference::isolate_address())); - __ Push(scratch3, scratch1, scratch2, name_reg); - - ExternalReference ref = - ExternalReference(IC_Utility(IC::kLoadCallbackProperty), - masm()->isolate()); - __ TailCallExternalReference(ref, 6, 1); - } + GenerateLoadPostInterceptor(holder_reg, interceptor_holder, name, lookup); } else { // !compile_followup_inline // Call the runtime system to load the interceptor. // Check that the maps haven't changed. - Register holder_reg = CheckPrototypes(object, receiver, interceptor_holder, - scratch1, scratch2, scratch3, - name, miss); - PushInterceptorArguments(masm(), receiver, holder_reg, - name_reg, interceptor_holder); + PushInterceptorArguments(masm(), receiver(), holder_reg, + this->name(), interceptor_holder); ExternalReference ref = ExternalReference(IC_Utility(IC::kLoadPropertyWithInterceptorForLoad), @@ -1478,7 +1381,7 @@ void StubCompiler::GenerateLoadInterceptor(Handle<JSObject> object, } -void CallStubCompiler::GenerateNameCheck(Handle<String> name, Label* miss) { +void CallStubCompiler::GenerateNameCheck(Handle<Name> name, Label* miss) { if (kind_ == Code::KEYED_CALL_IC) { __ cmp(r2, Operand(name)); __ b(ne, miss); @@ -1488,7 +1391,7 @@ void CallStubCompiler::GenerateNameCheck(Handle<String> name, Label* miss) { void CallStubCompiler::GenerateGlobalReceiverCheck(Handle<JSObject> object, Handle<JSObject> holder, - Handle<String> name, + Handle<Name> name, Label* miss) { ASSERT(holder->IsGlobalObject()); @@ -1545,8 +1448,8 @@ void CallStubCompiler::GenerateMissBranch() { Handle<Code> CallStubCompiler::CompileCallField(Handle<JSObject> object, Handle<JSObject> holder, - int index, - Handle<String> name) { + PropertyIndex index, + Handle<Name> name) { // ----------- S t a t e ------------- // -- r2 : name // -- lr : return address @@ -1618,7 +1521,7 @@ Handle<Code> CallStubCompiler::CompileArrayPushCall( Label call_builtin; if (argc == 1) { // Otherwise fall through to call the builtin. - Label attempt_to_grow_elements; + Label attempt_to_grow_elements, with_write_barrier, check_double; Register elements = r6; Register end_elements = r5; @@ -1629,10 +1532,9 @@ Handle<Code> CallStubCompiler::CompileArrayPushCall( __ CheckMap(elements, r0, Heap::kFixedArrayMapRootIndex, - &call_builtin, + &check_double, DONT_DO_SMI_CHECK); - // Get the array's length into r0 and calculate new length. __ ldr(r0, FieldMemOperand(receiver, JSArray::kLengthOffset)); STATIC_ASSERT(kSmiTagSize == 1); @@ -1647,7 +1549,6 @@ Handle<Code> CallStubCompiler::CompileArrayPushCall( __ b(gt, &attempt_to_grow_elements); // Check if value is a smi. - Label with_write_barrier; __ ldr(r4, MemOperand(sp, (argc - 1) * kPointerSize)); __ JumpIfNotSmi(r4, &with_write_barrier); @@ -1667,6 +1568,40 @@ Handle<Code> CallStubCompiler::CompileArrayPushCall( __ Drop(argc + 1); __ Ret(); + __ bind(&check_double); + + // Check that the elements are in fast mode and writable. + __ CheckMap(elements, + r0, + Heap::kFixedDoubleArrayMapRootIndex, + &call_builtin, + DONT_DO_SMI_CHECK); + + // Get the array's length into r0 and calculate new length. + __ ldr(r0, FieldMemOperand(receiver, JSArray::kLengthOffset)); + STATIC_ASSERT(kSmiTagSize == 1); + STATIC_ASSERT(kSmiTag == 0); + __ add(r0, r0, Operand(Smi::FromInt(argc))); + + // Get the elements' length. + __ ldr(r4, FieldMemOperand(elements, FixedArray::kLengthOffset)); + + // Check if we could survive without allocation. + __ cmp(r0, r4); + __ b(gt, &call_builtin); + + __ ldr(r4, MemOperand(sp, (argc - 1) * kPointerSize)); + __ StoreNumberToDoubleElements( + r4, r0, elements, r3, r5, r2, r9, + &call_builtin, argc * kDoubleSize); + + // Save new length. + __ str(r0, FieldMemOperand(receiver, JSArray::kLengthOffset)); + + // Check for a smi. + __ Drop(argc + 1); + __ Ret(); + __ bind(&with_write_barrier); __ ldr(r3, FieldMemOperand(receiver, HeapObject::kMapOffset)); @@ -1678,6 +1613,11 @@ Handle<Code> CallStubCompiler::CompileArrayPushCall( // In case of fast smi-only, convert to fast object, otherwise bail out. __ bind(¬_fast_object); __ CheckFastSmiElements(r3, r7, &call_builtin); + + __ ldr(r7, FieldMemOperand(r4, HeapObject::kMapOffset)); + __ LoadRoot(ip, Heap::kHeapNumberMapRootIndex); + __ cmp(r7, ip); + __ b(eq, &call_builtin); // edx: receiver // r3: map Label try_holey_map; @@ -1688,7 +1628,9 @@ Handle<Code> CallStubCompiler::CompileArrayPushCall( &try_holey_map); __ mov(r2, receiver); ElementsTransitionGenerator:: - GenerateMapChangeElementsTransition(masm()); + GenerateMapChangeElementsTransition(masm(), + DONT_TRACK_ALLOCATION_SITE, + NULL); __ jmp(&fast_object); __ bind(&try_holey_map); @@ -1699,7 +1641,9 @@ Handle<Code> CallStubCompiler::CompileArrayPushCall( &call_builtin); __ mov(r2, receiver); ElementsTransitionGenerator:: - GenerateMapChangeElementsTransition(masm()); + GenerateMapChangeElementsTransition(masm(), + DONT_TRACK_ALLOCATION_SITE, + NULL); __ bind(&fast_object); } else { __ CheckFastObjectElements(r3, r3, &call_builtin); @@ -1922,8 +1866,9 @@ Handle<Code> CallStubCompiler::CompileStringCharCodeAtCall( r0, &miss); ASSERT(!object.is_identical_to(holder)); - CheckPrototypes(Handle<JSObject>(JSObject::cast(object->GetPrototype())), - r0, holder, r1, r3, r4, name, &miss); + CheckPrototypes( + Handle<JSObject>(JSObject::cast(object->GetPrototype(isolate()))), + r0, holder, r1, r3, r4, name, &miss); Register receiver = r1; Register index = r4; @@ -2002,8 +1947,9 @@ Handle<Code> CallStubCompiler::CompileStringCharAtCall( r0, &miss); ASSERT(!object.is_identical_to(holder)); - CheckPrototypes(Handle<JSObject>(JSObject::cast(object->GetPrototype())), - r0, holder, r1, r3, r4, name, &miss); + CheckPrototypes( + Handle<JSObject>(JSObject::cast(object->GetPrototype(isolate()))), + r0, holder, r1, r3, r4, name, &miss); Register receiver = r0; Register index = r4; @@ -2033,7 +1979,7 @@ Handle<Code> CallStubCompiler::CompileStringCharAtCall( if (index_out_of_range.is_linked()) { __ bind(&index_out_of_range); - __ LoadRoot(r0, Heap::kEmptyStringRootIndex); + __ LoadRoot(r0, Heap::kempty_stringRootIndex); __ Drop(argc + 1); __ Ret(); } @@ -2140,7 +2086,7 @@ Handle<Code> CallStubCompiler::CompileMathFloorCall( return Handle<Code>::null(); } - CpuFeatures::Scope scope_vfp2(VFP2); + CpuFeatureScope scope_vfp2(masm(), VFP2); const int argc = arguments().immediate(); // If the object is not a JSObject or we got an unexpected number of // arguments, bail out to the regular call. @@ -2173,10 +2119,7 @@ Handle<Code> CallStubCompiler::CompileMathFloorCall( __ CheckMap(r0, r1, Heap::kHeapNumberMapRootIndex, &slow, DONT_DO_SMI_CHECK); - Label wont_fit_smi, no_vfp_exception, restore_fpscr_and_return; - - // If vfp3 is enabled, we use the fpu rounding with the RM (round towards - // minus infinity) mode. + Label smi_check, just_return; // Load the HeapNumber value. // We will need access to the value in the core registers, so we load it @@ -2186,73 +2129,46 @@ Handle<Code> CallStubCompiler::CompileMathFloorCall( __ Ldrd(r4, r5, FieldMemOperand(r0, HeapNumber::kValueOffset)); __ vmov(d1, r4, r5); - // Backup FPSCR. - __ vmrs(r3); - // Set custom FPCSR: - // - Set rounding mode to "Round towards Minus Infinity" - // (i.e. bits [23:22] = 0b10). - // - Clear vfp cumulative exception flags (bits [3:0]). - // - Make sure Flush-to-zero mode control bit is unset (bit 22). - __ bic(r9, r3, - Operand(kVFPExceptionMask | kVFPRoundingModeMask | kVFPFlushToZeroMask)); - __ orr(r9, r9, Operand(kRoundToMinusInf)); - __ vmsr(r9); - - // Convert the argument to an integer. - __ vcvt_s32_f64(s0, d1, kFPSCRRounding); - - // Use vcvt latency to start checking for special cases. - // Get the argument exponent and clear the sign bit. - __ bic(r6, r5, Operand(HeapNumber::kSignMask)); - __ mov(r6, Operand(r6, LSR, HeapNumber::kMantissaBitsInTopWord)); - - // Retrieve FPSCR and check for vfp exceptions. - __ vmrs(r9); - __ tst(r9, Operand(kVFPExceptionMask)); - __ b(&no_vfp_exception, eq); - - // Check for NaN, Infinity, and -Infinity. + // Check for NaN, Infinities and -0. // They are invariant through a Math.Floor call, so just // return the original argument. - __ sub(r7, r6, Operand(HeapNumber::kExponentMask - >> HeapNumber::kMantissaBitsInTopWord), SetCC); - __ b(&restore_fpscr_and_return, eq); - // We had an overflow or underflow in the conversion. Check if we - // have a big exponent. - __ cmp(r7, Operand(HeapNumber::kMantissaBits)); - // If greater or equal, the argument is already round and in r0. - __ b(&restore_fpscr_and_return, ge); - __ b(&wont_fit_smi); - - __ bind(&no_vfp_exception); - // Move the result back to general purpose register r0. - __ vmov(r0, s0); - // Check if the result fits into a smi. + __ Sbfx(r3, r5, HeapNumber::kExponentShift, HeapNumber::kExponentBits); + __ cmp(r3, Operand(-1)); + __ b(eq, &just_return); + __ eor(r3, r5, Operand(0x80000000u)); + __ orr(r3, r3, r4, SetCC); + __ b(eq, &just_return); + // Test for values that can be exactly represented as a + // signed 32-bit integer. + __ TryDoubleToInt32Exact(r0, d1, d2); + // If exact, check smi + __ b(eq, &smi_check); + __ cmp(r5, Operand(0)); + + // If input is in ]+0, +inf[, the cmp has cleared overflow and negative + // (V=0 and N=0), the two following instructions won't execute and + // we fall through smi_check to check if the result can fit into a smi. + + // If input is in ]-inf, -0[, sub one and, go to slow if we have + // an overflow. Else we fall through smi check. + // Hint: if x is a negative, non integer number, + // floor(x) <=> round_to_zero(x) - 1. + __ sub(r0, r0, Operand(1), SetCC, mi); + __ b(vs, &slow); + + __ bind(&smi_check); + // Check if the result can fit into an smi. If we had an overflow, + // the result is either 0x80000000 or 0x7FFFFFFF and won't fit into an smi. __ add(r1, r0, Operand(0x40000000), SetCC); - __ b(&wont_fit_smi, mi); + // If result doesn't fit into an smi, branch to slow. + __ b(&slow, mi); // Tag the result. - STATIC_ASSERT(kSmiTag == 0); __ mov(r0, Operand(r0, LSL, kSmiTagSize)); - // Check for -0. - __ cmp(r0, Operand(0, RelocInfo::NONE)); - __ b(&restore_fpscr_and_return, ne); - // r5 already holds the HeapNumber exponent. - __ tst(r5, Operand(HeapNumber::kSignMask)); - // If our HeapNumber is negative it was -0, so load its address and return. - // Else r0 is loaded with 0, so we can also just return. - __ ldr(r0, MemOperand(sp, 0 * kPointerSize), ne); - - __ bind(&restore_fpscr_and_return); - // Restore FPSCR and return. - __ vmsr(r3); + __ bind(&just_return); __ Drop(argc + 1); __ Ret(); - __ bind(&wont_fit_smi); - // Restore FPCSR and fall to slow case. - __ vmsr(r3); - __ bind(&slow); // Tail call the full function. We do not have to patch the receiver // because the function makes no use of it. @@ -2418,23 +2334,15 @@ Handle<Code> CallStubCompiler::CompileFastApiCall( } -Handle<Code> CallStubCompiler::CompileCallConstant(Handle<Object> object, - Handle<JSObject> holder, - Handle<JSFunction> function, - Handle<String> name, - CheckType check) { +void CallStubCompiler::CompileHandlerFrontend(Handle<Object> object, + Handle<JSObject> holder, + Handle<Name> name, + CheckType check, + Label* success) { // ----------- S t a t e ------------- // -- r2 : name // -- lr : return address // ----------------------------------- - if (HasCustomCallGenerator(function)) { - Handle<Code> code = CompileCustomCall(object, holder, - Handle<JSGlobalPropertyCell>::null(), - function, name); - // A null handle means bail out to the regular compiler code below. - if (!code.is_null()) return code; - } - Label miss; GenerateNameCheck(name, &miss); @@ -2468,78 +2376,94 @@ Handle<Code> CallStubCompiler::CompileCallConstant(Handle<Object> object, break; case STRING_CHECK: - if (function->IsBuiltin() || !function->shared()->is_classic_mode()) { - // Check that the object is a two-byte string or a symbol. - __ CompareObjectType(r1, r3, r3, FIRST_NONSTRING_TYPE); - __ b(ge, &miss); - // Check that the maps starting from the prototype haven't changed. - GenerateDirectLoadGlobalFunctionPrototype( - masm(), Context::STRING_FUNCTION_INDEX, r0, &miss); - CheckPrototypes( - Handle<JSObject>(JSObject::cast(object->GetPrototype())), - r0, holder, r3, r1, r4, name, &miss); - } else { - // Calling non-strict non-builtins with a value as the receiver - // requires boxing. - __ jmp(&miss); - } + // Check that the object is a string. + __ CompareObjectType(r1, r3, r3, FIRST_NONSTRING_TYPE); + __ b(ge, &miss); + // Check that the maps starting from the prototype haven't changed. + GenerateDirectLoadGlobalFunctionPrototype( + masm(), Context::STRING_FUNCTION_INDEX, r0, &miss); + CheckPrototypes( + Handle<JSObject>(JSObject::cast(object->GetPrototype(isolate()))), + r0, holder, r3, r1, r4, name, &miss); break; - case NUMBER_CHECK: - if (function->IsBuiltin() || !function->shared()->is_classic_mode()) { - Label fast; - // Check that the object is a smi or a heap number. - __ JumpIfSmi(r1, &fast); - __ CompareObjectType(r1, r0, r0, HEAP_NUMBER_TYPE); - __ b(ne, &miss); - __ bind(&fast); - // Check that the maps starting from the prototype haven't changed. - GenerateDirectLoadGlobalFunctionPrototype( - masm(), Context::NUMBER_FUNCTION_INDEX, r0, &miss); - CheckPrototypes( - Handle<JSObject>(JSObject::cast(object->GetPrototype())), - r0, holder, r3, r1, r4, name, &miss); - } else { - // Calling non-strict non-builtins with a value as the receiver - // requires boxing. - __ jmp(&miss); - } + case SYMBOL_CHECK: + // Check that the object is a symbol. + __ CompareObjectType(r1, r1, r3, SYMBOL_TYPE); + __ b(ne, &miss); break; - case BOOLEAN_CHECK: - if (function->IsBuiltin() || !function->shared()->is_classic_mode()) { - Label fast; - // Check that the object is a boolean. - __ LoadRoot(ip, Heap::kTrueValueRootIndex); - __ cmp(r1, ip); - __ b(eq, &fast); - __ LoadRoot(ip, Heap::kFalseValueRootIndex); - __ cmp(r1, ip); - __ b(ne, &miss); - __ bind(&fast); - // Check that the maps starting from the prototype haven't changed. - GenerateDirectLoadGlobalFunctionPrototype( - masm(), Context::BOOLEAN_FUNCTION_INDEX, r0, &miss); - CheckPrototypes( - Handle<JSObject>(JSObject::cast(object->GetPrototype())), - r0, holder, r3, r1, r4, name, &miss); - } else { - // Calling non-strict non-builtins with a value as the receiver - // requires boxing. - __ jmp(&miss); - } + case NUMBER_CHECK: { + Label fast; + // Check that the object is a smi or a heap number. + __ JumpIfSmi(r1, &fast); + __ CompareObjectType(r1, r0, r0, HEAP_NUMBER_TYPE); + __ b(ne, &miss); + __ bind(&fast); + // Check that the maps starting from the prototype haven't changed. + GenerateDirectLoadGlobalFunctionPrototype( + masm(), Context::NUMBER_FUNCTION_INDEX, r0, &miss); + CheckPrototypes( + Handle<JSObject>(JSObject::cast(object->GetPrototype(isolate()))), + r0, holder, r3, r1, r4, name, &miss); break; + } + case BOOLEAN_CHECK: { + Label fast; + // Check that the object is a boolean. + __ LoadRoot(ip, Heap::kTrueValueRootIndex); + __ cmp(r1, ip); + __ b(eq, &fast); + __ LoadRoot(ip, Heap::kFalseValueRootIndex); + __ cmp(r1, ip); + __ b(ne, &miss); + __ bind(&fast); + // Check that the maps starting from the prototype haven't changed. + GenerateDirectLoadGlobalFunctionPrototype( + masm(), Context::BOOLEAN_FUNCTION_INDEX, r0, &miss); + CheckPrototypes( + Handle<JSObject>(JSObject::cast(object->GetPrototype(isolate()))), + r0, holder, r3, r1, r4, name, &miss); + break; + } } + __ b(success); + + // Handle call cache miss. + __ bind(&miss); + GenerateMissBranch(); +} + + +void CallStubCompiler::CompileHandlerBackend(Handle<JSFunction> function) { CallKind call_kind = CallICBase::Contextual::decode(extra_state_) ? CALL_AS_FUNCTION : CALL_AS_METHOD; __ InvokeFunction( function, arguments(), JUMP_FUNCTION, NullCallWrapper(), call_kind); +} - // Handle call cache miss. - __ bind(&miss); - GenerateMissBranch(); + +Handle<Code> CallStubCompiler::CompileCallConstant( + Handle<Object> object, + Handle<JSObject> holder, + Handle<Name> name, + CheckType check, + Handle<JSFunction> function) { + if (HasCustomCallGenerator(function)) { + Handle<Code> code = CompileCustomCall(object, holder, + Handle<JSGlobalPropertyCell>::null(), + function, Handle<String>::cast(name)); + // A null handle means bail out to the regular compiler code below. + if (!code.is_null()) return code; + } + + Label success; + + CompileHandlerFrontend(object, holder, name, check, &success); + __ bind(&success); + CompileHandlerBackend(function); // Return the generated code. return GetCode(function); @@ -2548,7 +2472,7 @@ Handle<Code> CallStubCompiler::CompileCallConstant(Handle<Object> object, Handle<Code> CallStubCompiler::CompileCallInterceptor(Handle<JSObject> object, Handle<JSObject> holder, - Handle<String> name) { + Handle<Name> name) { // ----------- S t a t e ------------- // -- r2 : name // -- lr : return address @@ -2589,13 +2513,14 @@ Handle<Code> CallStubCompiler::CompileCallGlobal( Handle<GlobalObject> holder, Handle<JSGlobalPropertyCell> cell, Handle<JSFunction> function, - Handle<String> name) { + Handle<Name> name) { // ----------- S t a t e ------------- // -- r2 : name // -- lr : return address // ----------------------------------- if (HasCustomCallGenerator(function)) { - Handle<Code> code = CompileCustomCall(object, holder, cell, function, name); + Handle<Code> code = CompileCustomCall( + object, holder, cell, function, Handle<String>::cast(name)); // A null handle means bail out to the regular compiler code below. if (!code.is_null()) return code; } @@ -2642,58 +2567,23 @@ Handle<Code> CallStubCompiler::CompileCallGlobal( } -Handle<Code> StoreStubCompiler::CompileStoreField(Handle<JSObject> object, - int index, - Handle<Map> transition, - Handle<String> name) { - // ----------- S t a t e ------------- - // -- r0 : value - // -- r1 : receiver - // -- r2 : name - // -- lr : return address - // ----------------------------------- - Label miss; - - GenerateStoreField(masm(), - object, - index, - transition, - name, - r1, r2, r3, r4, - &miss); - __ bind(&miss); - Handle<Code> ic = masm()->isolate()->builtins()->StoreIC_Miss(); - __ Jump(ic, RelocInfo::CODE_TARGET); - - // Return the generated code. - return GetCode(transition.is_null() - ? Code::FIELD - : Code::MAP_TRANSITION, name); -} - - Handle<Code> StoreStubCompiler::CompileStoreCallback( - Handle<String> name, - Handle<JSObject> receiver, + Handle<Name> name, + Handle<JSObject> object, Handle<JSObject> holder, - Handle<AccessorInfo> callback) { - // ----------- S t a t e ------------- - // -- r0 : value - // -- r1 : receiver - // -- r2 : name - // -- lr : return address - // ----------------------------------- + Handle<ExecutableAccessorInfo> callback) { Label miss; // Check that the maps haven't changed. - __ JumpIfSmi(r1, &miss); - CheckPrototypes(receiver, r1, holder, r3, r4, r5, name, &miss); + __ JumpIfSmi(receiver(), &miss); + CheckPrototypes(object, receiver(), holder, + scratch1(), scratch2(), scratch3(), name, &miss); // Stub never generated for non-global objects that require access checks. ASSERT(holder->IsJSGlobalProxy() || !holder->IsAccessCheckNeeded()); - __ push(r1); // receiver + __ push(receiver()); // receiver __ mov(ip, Operand(callback)); // callback info - __ Push(ip, r2, r0); + __ Push(ip, this->name(), value()); // Do tail-call to the runtime system. ExternalReference store_callback_property = @@ -2703,11 +2593,10 @@ Handle<Code> StoreStubCompiler::CompileStoreCallback( // Handle store cache miss. __ bind(&miss); - Handle<Code> ic = masm()->isolate()->builtins()->StoreIC_Miss(); - __ Jump(ic, RelocInfo::CODE_TARGET); + TailCallBuiltin(masm(), MissBuiltin(kind())); // Return the generated code. - return GetCode(Code::CALLBACKS, name); + return GetICCode(kind(), Code::CALLBACKS, name); } @@ -2756,62 +2645,28 @@ void StoreStubCompiler::GenerateStoreViaSetter( #define __ ACCESS_MASM(masm()) -Handle<Code> StoreStubCompiler::CompileStoreViaSetter( - Handle<String> name, - Handle<JSObject> receiver, - Handle<JSObject> holder, - Handle<JSFunction> setter) { - // ----------- S t a t e ------------- - // -- r0 : value - // -- r1 : receiver - // -- r2 : name - // -- lr : return address - // ----------------------------------- - Label miss; - - // Check that the maps haven't changed. - __ JumpIfSmi(r1, &miss); - CheckPrototypes(receiver, r1, holder, r3, r4, r5, name, &miss); - - GenerateStoreViaSetter(masm(), setter); - - __ bind(&miss); - Handle<Code> ic = masm()->isolate()->builtins()->StoreIC_Miss(); - __ Jump(ic, RelocInfo::CODE_TARGET); - - // Return the generated code. - return GetCode(Code::CALLBACKS, name); -} - - Handle<Code> StoreStubCompiler::CompileStoreInterceptor( - Handle<JSObject> receiver, - Handle<String> name) { - // ----------- S t a t e ------------- - // -- r0 : value - // -- r1 : receiver - // -- r2 : name - // -- lr : return address - // ----------------------------------- + Handle<JSObject> object, + Handle<Name> name) { Label miss; // Check that the map of the object hasn't changed. - __ CheckMap(r1, r3, Handle<Map>(receiver->map()), &miss, + __ CheckMap(receiver(), scratch1(), Handle<Map>(object->map()), &miss, DO_SMI_CHECK, ALLOW_ELEMENT_TRANSITION_MAPS); // Perform global security token check if needed. - if (receiver->IsJSGlobalProxy()) { - __ CheckAccessGlobalProxy(r1, r3, &miss); + if (object->IsJSGlobalProxy()) { + __ CheckAccessGlobalProxy(receiver(), scratch1(), &miss); } // Stub is never generated for non-global objects that require access // checks. - ASSERT(receiver->IsJSGlobalProxy() || !receiver->IsAccessCheckNeeded()); + ASSERT(object->IsJSGlobalProxy() || !object->IsAccessCheckNeeded()); - __ Push(r1, r2, r0); // Receiver, name, value. + __ Push(receiver(), this->name(), value()); - __ mov(r0, Operand(Smi::FromInt(strict_mode_))); - __ push(r0); // strict mode + __ mov(scratch1(), Operand(Smi::FromInt(strict_mode()))); + __ push(scratch1()); // strict mode // Do tail-call to the runtime system. ExternalReference store_ic_property = @@ -2821,133 +2676,117 @@ Handle<Code> StoreStubCompiler::CompileStoreInterceptor( // Handle store cache miss. __ bind(&miss); - Handle<Code> ic = masm()->isolate()->builtins()->StoreIC_Miss(); - __ Jump(ic, RelocInfo::CODE_TARGET); + TailCallBuiltin(masm(), MissBuiltin(kind())); // Return the generated code. - return GetCode(Code::INTERCEPTOR, name); + return GetICCode(kind(), Code::INTERCEPTOR, name); } Handle<Code> StoreStubCompiler::CompileStoreGlobal( Handle<GlobalObject> object, Handle<JSGlobalPropertyCell> cell, - Handle<String> name) { - // ----------- S t a t e ------------- - // -- r0 : value - // -- r1 : receiver - // -- r2 : name - // -- lr : return address - // ----------------------------------- + Handle<Name> name) { Label miss; // Check that the map of the global has not changed. - __ ldr(r3, FieldMemOperand(r1, HeapObject::kMapOffset)); - __ cmp(r3, Operand(Handle<Map>(object->map()))); + __ ldr(scratch1(), FieldMemOperand(receiver(), HeapObject::kMapOffset)); + __ cmp(scratch1(), Operand(Handle<Map>(object->map()))); __ b(ne, &miss); // Check that the value in the cell is not the hole. If it is, this // cell could have been deleted and reintroducing the global needs // to update the property details in the property dictionary of the // global object. We bail out to the runtime system to do that. - __ mov(r4, Operand(cell)); - __ LoadRoot(r5, Heap::kTheHoleValueRootIndex); - __ ldr(r6, FieldMemOperand(r4, JSGlobalPropertyCell::kValueOffset)); - __ cmp(r5, r6); + __ mov(scratch1(), Operand(cell)); + __ LoadRoot(scratch2(), Heap::kTheHoleValueRootIndex); + __ ldr(scratch3(), + FieldMemOperand(scratch1(), JSGlobalPropertyCell::kValueOffset)); + __ cmp(scratch3(), scratch2()); __ b(eq, &miss); // Store the value in the cell. - __ str(r0, FieldMemOperand(r4, JSGlobalPropertyCell::kValueOffset)); + __ str(value(), + FieldMemOperand(scratch1(), JSGlobalPropertyCell::kValueOffset)); // Cells are always rescanned, so no write barrier here. Counters* counters = masm()->isolate()->counters(); - __ IncrementCounter(counters->named_store_global_inline(), 1, r4, r3); + __ IncrementCounter( + counters->named_store_global_inline(), 1, scratch1(), scratch2()); __ Ret(); // Handle store cache miss. __ bind(&miss); - __ IncrementCounter(counters->named_store_global_inline_miss(), 1, r4, r3); - Handle<Code> ic = masm()->isolate()->builtins()->StoreIC_Miss(); - __ Jump(ic, RelocInfo::CODE_TARGET); + __ IncrementCounter( + counters->named_store_global_inline_miss(), 1, scratch1(), scratch2()); + TailCallBuiltin(masm(), MissBuiltin(kind())); // Return the generated code. - return GetCode(Code::NORMAL, name); + return GetICCode(kind(), Code::NORMAL, name); } -Handle<Code> LoadStubCompiler::CompileLoadNonexistent(Handle<String> name, - Handle<JSObject> object, - Handle<JSObject> last) { - // ----------- S t a t e ------------- - // -- r0 : receiver - // -- lr : return address - // ----------------------------------- - Label miss; - - // Check that receiver is not a smi. - __ JumpIfSmi(r0, &miss); +Handle<Code> LoadStubCompiler::CompileLoadNonexistent( + Handle<JSObject> object, + Handle<JSObject> last, + Handle<Name> name, + Handle<GlobalObject> global) { + Label success; - // Check the maps of the full prototype chain. - CheckPrototypes(object, r0, last, r3, r1, r4, name, &miss); - - // If the last object in the prototype chain is a global object, - // check that the global property cell is empty. - if (last->IsGlobalObject()) { - GenerateCheckPropertyCell( - masm(), Handle<GlobalObject>::cast(last), name, r1, &miss); - } + NonexistentHandlerFrontend(object, last, name, &success, global); + __ bind(&success); // Return undefined if maps of the full prototype chain are still the // same and no global property with this name contains a value. __ LoadRoot(r0, Heap::kUndefinedValueRootIndex); __ Ret(); - __ bind(&miss); - GenerateLoadMiss(masm(), Code::LOAD_IC); - // Return the generated code. - return GetCode(Code::NONEXISTENT, factory()->empty_string()); + return GetCode(kind(), Code::NONEXISTENT, name); } -Handle<Code> LoadStubCompiler::CompileLoadField(Handle<JSObject> object, - Handle<JSObject> holder, - int index, - Handle<String> name) { - // ----------- S t a t e ------------- - // -- r0 : receiver - // -- r2 : name - // -- lr : return address - // ----------------------------------- - Label miss; +Register* LoadStubCompiler::registers() { + // receiver, name, scratch1, scratch2, scratch3, scratch4. + static Register registers[] = { r0, r2, r3, r1, r4, r5 }; + return registers; +} - GenerateLoadField(object, holder, r0, r3, r1, r4, index, name, &miss); - __ bind(&miss); - GenerateLoadMiss(masm(), Code::LOAD_IC); - // Return the generated code. - return GetCode(Code::FIELD, name); +Register* KeyedLoadStubCompiler::registers() { + // receiver, name, scratch1, scratch2, scratch3, scratch4. + static Register registers[] = { r1, r0, r2, r3, r4, r5 }; + return registers; } -Handle<Code> LoadStubCompiler::CompileLoadCallback( - Handle<String> name, - Handle<JSObject> object, - Handle<JSObject> holder, - Handle<AccessorInfo> callback) { - // ----------- S t a t e ------------- - // -- r0 : receiver - // -- r2 : name - // -- lr : return address - // ----------------------------------- - Label miss; - GenerateLoadCallback(object, holder, r0, r2, r3, r1, r4, r5, callback, name, - &miss); - __ bind(&miss); - GenerateLoadMiss(masm(), Code::LOAD_IC); +Register* StoreStubCompiler::registers() { + // receiver, name, value, scratch1, scratch2, scratch3. + static Register registers[] = { r1, r2, r0, r3, r4, r5 }; + return registers; +} - // Return the generated code. - return GetCode(Code::CALLBACKS, name); + +Register* KeyedStoreStubCompiler::registers() { + // receiver, name, value, scratch1, scratch2, scratch3. + static Register registers[] = { r2, r1, r0, r3, r4, r5 }; + return registers; +} + + +void KeyedLoadStubCompiler::GenerateNameCheck(Handle<Name> name, + Register name_reg, + Label* miss) { + __ cmp(name_reg, Operand(name)); + __ b(ne, miss); +} + + +void KeyedStoreStubCompiler::GenerateNameCheck(Handle<Name> name, + Register name_reg, + Label* miss) { + __ cmp(name_reg, Operand(name)); + __ b(ne, miss); } @@ -2988,90 +2827,18 @@ void LoadStubCompiler::GenerateLoadViaGetter(MacroAssembler* masm, #define __ ACCESS_MASM(masm()) -Handle<Code> LoadStubCompiler::CompileLoadViaGetter( - Handle<String> name, - Handle<JSObject> receiver, - Handle<JSObject> holder, - Handle<JSFunction> getter) { - // ----------- S t a t e ------------- - // -- r0 : receiver - // -- r2 : name - // -- lr : return address - // ----------------------------------- - Label miss; - - // Check that the maps haven't changed. - __ JumpIfSmi(r0, &miss); - CheckPrototypes(receiver, r0, holder, r3, r4, r1, name, &miss); - - GenerateLoadViaGetter(masm(), getter); - - __ bind(&miss); - GenerateLoadMiss(masm(), Code::LOAD_IC); - - // Return the generated code. - return GetCode(Code::CALLBACKS, name); -} - - -Handle<Code> LoadStubCompiler::CompileLoadConstant(Handle<JSObject> object, - Handle<JSObject> holder, - Handle<JSFunction> value, - Handle<String> name) { - // ----------- S t a t e ------------- - // -- r0 : receiver - // -- r2 : name - // -- lr : return address - // ----------------------------------- - Label miss; - - GenerateLoadConstant(object, holder, r0, r3, r1, r4, value, name, &miss); - __ bind(&miss); - GenerateLoadMiss(masm(), Code::LOAD_IC); - - // Return the generated code. - return GetCode(Code::CONSTANT_FUNCTION, name); -} - - -Handle<Code> LoadStubCompiler::CompileLoadInterceptor(Handle<JSObject> object, - Handle<JSObject> holder, - Handle<String> name) { - // ----------- S t a t e ------------- - // -- r0 : receiver - // -- r2 : name - // -- lr : return address - // ----------------------------------- - Label miss; - - LookupResult lookup(isolate()); - LookupPostInterceptor(holder, name, &lookup); - GenerateLoadInterceptor(object, holder, &lookup, r0, r2, r3, r1, r4, name, - &miss); - __ bind(&miss); - GenerateLoadMiss(masm(), Code::LOAD_IC); - - // Return the generated code. - return GetCode(Code::INTERCEPTOR, name); -} - - Handle<Code> LoadStubCompiler::CompileLoadGlobal( Handle<JSObject> object, - Handle<GlobalObject> holder, + Handle<GlobalObject> global, Handle<JSGlobalPropertyCell> cell, - Handle<String> name, + Handle<Name> name, bool is_dont_delete) { - // ----------- S t a t e ------------- - // -- r0 : receiver - // -- r2 : name - // -- lr : return address - // ----------------------------------- - Label miss; + Label success, miss; - // Check that the map of the global has not changed. - __ JumpIfSmi(r0, &miss); - CheckPrototypes(object, r0, holder, r3, r4, r1, name, &miss); + __ CheckMap( + receiver(), scratch1(), Handle<Map>(object->map()), &miss, DO_SMI_CHECK); + HandlerFrontendHeader( + object, receiver(), Handle<JSObject>::cast(global), name, &miss); // Get the value from the cell. __ mov(r3, Operand(cell)); @@ -3084,301 +2851,49 @@ Handle<Code> LoadStubCompiler::CompileLoadGlobal( __ b(eq, &miss); } - __ mov(r0, r4); + HandlerFrontendFooter(&success, &miss); + __ bind(&success); + Counters* counters = masm()->isolate()->counters(); __ IncrementCounter(counters->named_load_global_stub(), 1, r1, r3); + __ mov(r0, r4); __ Ret(); - __ bind(&miss); - __ IncrementCounter(counters->named_load_global_stub_miss(), 1, r1, r3); - GenerateLoadMiss(masm(), Code::LOAD_IC); - // Return the generated code. - return GetCode(Code::NORMAL, name); + return GetICCode(kind(), Code::NORMAL, name); } -Handle<Code> KeyedLoadStubCompiler::CompileLoadField(Handle<String> name, - Handle<JSObject> receiver, - Handle<JSObject> holder, - int index) { - // ----------- S t a t e ------------- - // -- lr : return address - // -- r0 : key - // -- r1 : receiver - // ----------------------------------- - Label miss; - - // Check the key is the cached one. - __ cmp(r0, Operand(name)); - __ b(ne, &miss); - - GenerateLoadField(receiver, holder, r1, r2, r3, r4, index, name, &miss); - __ bind(&miss); - GenerateLoadMiss(masm(), Code::KEYED_LOAD_IC); - - return GetCode(Code::FIELD, name); -} - - -Handle<Code> KeyedLoadStubCompiler::CompileLoadCallback( - Handle<String> name, - Handle<JSObject> receiver, - Handle<JSObject> holder, - Handle<AccessorInfo> callback) { - // ----------- S t a t e ------------- - // -- lr : return address - // -- r0 : key - // -- r1 : receiver - // ----------------------------------- - Label miss; - - // Check the key is the cached one. - __ cmp(r0, Operand(name)); - __ b(ne, &miss); - - GenerateLoadCallback(receiver, holder, r1, r0, r2, r3, r4, r5, callback, name, - &miss); - __ bind(&miss); - GenerateLoadMiss(masm(), Code::KEYED_LOAD_IC); - - return GetCode(Code::CALLBACKS, name); -} - - -Handle<Code> KeyedLoadStubCompiler::CompileLoadConstant( - Handle<String> name, - Handle<JSObject> receiver, - Handle<JSObject> holder, - Handle<JSFunction> value) { - // ----------- S t a t e ------------- - // -- lr : return address - // -- r0 : key - // -- r1 : receiver - // ----------------------------------- - Label miss; - - // Check the key is the cached one. - __ cmp(r0, Operand(name)); - __ b(ne, &miss); - - GenerateLoadConstant(receiver, holder, r1, r2, r3, r4, value, name, &miss); - __ bind(&miss); - GenerateLoadMiss(masm(), Code::KEYED_LOAD_IC); - - // Return the generated code. - return GetCode(Code::CONSTANT_FUNCTION, name); -} - - -Handle<Code> KeyedLoadStubCompiler::CompileLoadInterceptor( - Handle<JSObject> receiver, - Handle<JSObject> holder, - Handle<String> name) { - // ----------- S t a t e ------------- - // -- lr : return address - // -- r0 : key - // -- r1 : receiver - // ----------------------------------- - Label miss; - - // Check the key is the cached one. - __ cmp(r0, Operand(name)); - __ b(ne, &miss); - - LookupResult lookup(isolate()); - LookupPostInterceptor(holder, name, &lookup); - GenerateLoadInterceptor(receiver, holder, &lookup, r1, r0, r2, r3, r4, name, - &miss); - __ bind(&miss); - GenerateLoadMiss(masm(), Code::KEYED_LOAD_IC); - - return GetCode(Code::INTERCEPTOR, name); -} - - -Handle<Code> KeyedLoadStubCompiler::CompileLoadArrayLength( - Handle<String> name) { - // ----------- S t a t e ------------- - // -- lr : return address - // -- r0 : key - // -- r1 : receiver - // ----------------------------------- - Label miss; - - // Check the key is the cached one. - __ cmp(r0, Operand(name)); - __ b(ne, &miss); - - GenerateLoadArrayLength(masm(), r1, r2, &miss); - __ bind(&miss); - GenerateLoadMiss(masm(), Code::KEYED_LOAD_IC); - - return GetCode(Code::CALLBACKS, name); -} - - -Handle<Code> KeyedLoadStubCompiler::CompileLoadStringLength( - Handle<String> name) { - // ----------- S t a t e ------------- - // -- lr : return address - // -- r0 : key - // -- r1 : receiver - // ----------------------------------- - Label miss; - - Counters* counters = masm()->isolate()->counters(); - __ IncrementCounter(counters->keyed_load_string_length(), 1, r2, r3); - - // Check the key is the cached one. - __ cmp(r0, Operand(name)); - __ b(ne, &miss); - - GenerateLoadStringLength(masm(), r1, r2, r3, &miss, true); - __ bind(&miss); - __ DecrementCounter(counters->keyed_load_string_length(), 1, r2, r3); - - GenerateLoadMiss(masm(), Code::KEYED_LOAD_IC); - - return GetCode(Code::CALLBACKS, name); -} - - -Handle<Code> KeyedLoadStubCompiler::CompileLoadFunctionPrototype( - Handle<String> name) { - // ----------- S t a t e ------------- - // -- lr : return address - // -- r0 : key - // -- r1 : receiver - // ----------------------------------- +Handle<Code> BaseLoadStubCompiler::CompilePolymorphicIC( + MapHandleList* receiver_maps, + CodeHandleList* handlers, + Handle<Name> name, + Code::StubType type, + IcCheckType check) { Label miss; - Counters* counters = masm()->isolate()->counters(); - __ IncrementCounter(counters->keyed_load_function_prototype(), 1, r2, r3); - - // Check the name hasn't changed. - __ cmp(r0, Operand(name)); - __ b(ne, &miss); - - GenerateLoadFunctionPrototype(masm(), r1, r2, r3, &miss); - __ bind(&miss); - __ DecrementCounter(counters->keyed_load_function_prototype(), 1, r2, r3); - GenerateLoadMiss(masm(), Code::KEYED_LOAD_IC); - - return GetCode(Code::CALLBACKS, name); -} - - -Handle<Code> KeyedLoadStubCompiler::CompileLoadElement( - Handle<Map> receiver_map) { - // ----------- S t a t e ------------- - // -- lr : return address - // -- r0 : key - // -- r1 : receiver - // ----------------------------------- - ElementsKind elements_kind = receiver_map->elements_kind(); - Handle<Code> stub = KeyedLoadElementStub(elements_kind).GetCode(); - - __ DispatchMap(r1, r2, receiver_map, stub, DO_SMI_CHECK); - - Handle<Code> ic = isolate()->builtins()->KeyedLoadIC_Miss(); - __ Jump(ic, RelocInfo::CODE_TARGET); - - // Return the generated code. - return GetCode(Code::NORMAL, factory()->empty_string()); -} + if (check == PROPERTY) { + GenerateNameCheck(name, this->name(), &miss); + } - -Handle<Code> KeyedLoadStubCompiler::CompileLoadPolymorphic( - MapHandleList* receiver_maps, - CodeHandleList* handler_ics) { - // ----------- S t a t e ------------- - // -- lr : return address - // -- r0 : key - // -- r1 : receiver - // ----------------------------------- - Label miss; - __ JumpIfSmi(r1, &miss); + __ JumpIfSmi(receiver(), &miss); + Register map_reg = scratch1(); int receiver_count = receiver_maps->length(); - __ ldr(r2, FieldMemOperand(r1, HeapObject::kMapOffset)); + __ ldr(map_reg, FieldMemOperand(receiver(), HeapObject::kMapOffset)); for (int current = 0; current < receiver_count; ++current) { __ mov(ip, Operand(receiver_maps->at(current))); - __ cmp(r2, ip); - __ Jump(handler_ics->at(current), RelocInfo::CODE_TARGET, eq); + __ cmp(map_reg, ip); + __ Jump(handlers->at(current), RelocInfo::CODE_TARGET, eq); } __ bind(&miss); - Handle<Code> miss_ic = isolate()->builtins()->KeyedLoadIC_Miss(); - __ Jump(miss_ic, RelocInfo::CODE_TARGET, al); + TailCallBuiltin(masm(), MissBuiltin(kind())); // Return the generated code. - return GetCode(Code::NORMAL, factory()->empty_string(), MEGAMORPHIC); -} - - -Handle<Code> KeyedStoreStubCompiler::CompileStoreField(Handle<JSObject> object, - int index, - Handle<Map> transition, - Handle<String> name) { - // ----------- S t a t e ------------- - // -- r0 : value - // -- r1 : name - // -- r2 : receiver - // -- lr : return address - // ----------------------------------- - Label miss; - - Counters* counters = masm()->isolate()->counters(); - __ IncrementCounter(counters->keyed_store_field(), 1, r3, r4); - - // Check that the name has not changed. - __ cmp(r1, Operand(name)); - __ b(ne, &miss); - - // r3 is used as scratch register. r1 and r2 keep their values if a jump to - // the miss label is generated. - GenerateStoreField(masm(), - object, - index, - transition, - name, - r2, r1, r3, r4, - &miss); - __ bind(&miss); - - __ DecrementCounter(counters->keyed_store_field(), 1, r3, r4); - Handle<Code> ic = masm()->isolate()->builtins()->KeyedStoreIC_Miss(); - __ Jump(ic, RelocInfo::CODE_TARGET); - - // Return the generated code. - return GetCode(transition.is_null() - ? Code::FIELD - : Code::MAP_TRANSITION, name); -} - - -Handle<Code> KeyedStoreStubCompiler::CompileStoreElement( - Handle<Map> receiver_map) { - // ----------- S t a t e ------------- - // -- r0 : value - // -- r1 : key - // -- r2 : receiver - // -- lr : return address - // -- r3 : scratch - // ----------------------------------- - ElementsKind elements_kind = receiver_map->elements_kind(); - bool is_js_array = receiver_map->instance_type() == JS_ARRAY_TYPE; - Handle<Code> stub = - KeyedStoreElementStub(is_js_array, elements_kind, grow_mode_).GetCode(); - - __ DispatchMap(r2, r3, receiver_map, stub, DO_SMI_CHECK); - - Handle<Code> ic = isolate()->builtins()->KeyedStoreIC_Miss(); - __ Jump(ic, RelocInfo::CODE_TARGET); - - // Return the generated code. - return GetCode(Code::NORMAL, factory()->empty_string()); + InlineCacheState state = + receiver_maps->length() > 1 ? POLYMORPHIC : MONOMORPHIC; + return GetICCode(kind(), type, name, state); } @@ -3386,38 +2901,31 @@ Handle<Code> KeyedStoreStubCompiler::CompileStorePolymorphic( MapHandleList* receiver_maps, CodeHandleList* handler_stubs, MapHandleList* transitioned_maps) { - // ----------- S t a t e ------------- - // -- r0 : value - // -- r1 : key - // -- r2 : receiver - // -- lr : return address - // -- r3 : scratch - // ----------------------------------- Label miss; - __ JumpIfSmi(r2, &miss); + __ JumpIfSmi(receiver(), &miss); int receiver_count = receiver_maps->length(); - __ ldr(r3, FieldMemOperand(r2, HeapObject::kMapOffset)); + __ ldr(scratch1(), FieldMemOperand(receiver(), HeapObject::kMapOffset)); for (int i = 0; i < receiver_count; ++i) { __ mov(ip, Operand(receiver_maps->at(i))); - __ cmp(r3, ip); + __ cmp(scratch1(), ip); if (transitioned_maps->at(i).is_null()) { __ Jump(handler_stubs->at(i), RelocInfo::CODE_TARGET, eq); } else { Label next_map; __ b(ne, &next_map); - __ mov(r3, Operand(transitioned_maps->at(i))); + __ mov(transition_map(), Operand(transitioned_maps->at(i))); __ Jump(handler_stubs->at(i), RelocInfo::CODE_TARGET, al); __ bind(&next_map); } } __ bind(&miss); - Handle<Code> miss_ic = isolate()->builtins()->KeyedStoreIC_Miss(); - __ Jump(miss_ic, RelocInfo::CODE_TARGET, al); + TailCallBuiltin(masm(), MissBuiltin(kind())); // Return the generated code. - return GetCode(Code::NORMAL, factory()->empty_string(), MEGAMORPHIC); + return GetICCode( + kind(), Code::NORMAL, factory()->empty_string(), POLYMORPHIC); } @@ -3524,7 +3032,8 @@ Handle<Code> ConstructStubCompiler::CompileConstructStub( __ bind(&next); } else { // Set the property to the constant value. - Handle<Object> constant(shared->GetThisPropertyAssignmentConstant(i)); + Handle<Object> constant(shared->GetThisPropertyAssignmentConstant(i), + isolate()); __ mov(r2, Operand(constant)); __ str(r2, MemOperand(r5, kPointerSize, PostIndex)); } @@ -3597,9 +3106,7 @@ void KeyedLoadStubCompiler::GenerateLoadDictionaryElement( // -- r0 : key // -- r1 : receiver // ----------------------------------- - Handle<Code> slow_ic = - masm->isolate()->builtins()->KeyedLoadIC_Slow(); - __ Jump(slow_ic, RelocInfo::CODE_TARGET); + TailCallBuiltin(masm, Builtins::kKeyedLoadIC_Slow); // Miss case, call the runtime. __ bind(&miss_force_generic); @@ -3609,10 +3116,7 @@ void KeyedLoadStubCompiler::GenerateLoadDictionaryElement( // -- r0 : key // -- r1 : receiver // ----------------------------------- - - Handle<Code> miss_ic = - masm->isolate()->builtins()->KeyedLoadIC_MissForceGeneric(); - __ Jump(miss_ic, RelocInfo::CODE_TARGET); + TailCallBuiltin(masm, Builtins::kKeyedLoadIC_MissForceGeneric); } @@ -3654,7 +3158,7 @@ static void GenerateSmiKeyCheck(MacroAssembler* masm, DwVfpRegister double_scratch1, Label* fail) { if (CpuFeatures::IsSupported(VFP2)) { - CpuFeatures::Scope scope(VFP2); + CpuFeatureScope scope(masm, VFP2); Label key_ok; // Check for smi or a smi inside a heap number. We convert the heap // number and check if the conversion is exact and fits into the smi @@ -3667,12 +3171,7 @@ static void GenerateSmiKeyCheck(MacroAssembler* masm, DONT_DO_SMI_CHECK); __ sub(ip, key, Operand(kHeapObjectTag)); __ vldr(double_scratch0, ip, HeapNumber::kValueOffset); - __ EmitVFPTruncate(kRoundToZero, - scratch0, - double_scratch0, - scratch1, - double_scratch1, - kCheckForInexactConversion); + __ TryDoubleToInt32Exact(scratch0, double_scratch0, double_scratch1); __ b(ne, fail); __ TrySmiTag(scratch0, fail, scratch1); __ mov(key, scratch0); @@ -3684,339 +3183,6 @@ static void GenerateSmiKeyCheck(MacroAssembler* masm, } -void KeyedLoadStubCompiler::GenerateLoadExternalArray( - MacroAssembler* masm, - ElementsKind elements_kind) { - // ---------- S t a t e -------------- - // -- lr : return address - // -- r0 : key - // -- r1 : receiver - // ----------------------------------- - Label miss_force_generic, slow, failed_allocation; - - Register key = r0; - Register receiver = r1; - - // This stub is meant to be tail-jumped to, the receiver must already - // have been verified by the caller to not be a smi. - - // Check that the key is a smi or a heap number convertible to a smi. - GenerateSmiKeyCheck(masm, key, r4, r5, d1, d2, &miss_force_generic); - - __ ldr(r3, FieldMemOperand(receiver, JSObject::kElementsOffset)); - // r3: elements array - - // Check that the index is in range. - __ ldr(ip, FieldMemOperand(r3, ExternalArray::kLengthOffset)); - __ cmp(key, ip); - // Unsigned comparison catches both negative and too-large values. - __ b(hs, &miss_force_generic); - - __ ldr(r3, FieldMemOperand(r3, ExternalArray::kExternalPointerOffset)); - // r3: base pointer of external storage - - // We are not untagging smi key and instead work with it - // as if it was premultiplied by 2. - STATIC_ASSERT((kSmiTag == 0) && (kSmiTagSize == 1)); - - Register value = r2; - switch (elements_kind) { - case EXTERNAL_BYTE_ELEMENTS: - __ ldrsb(value, MemOperand(r3, key, LSR, 1)); - break; - case EXTERNAL_PIXEL_ELEMENTS: - case EXTERNAL_UNSIGNED_BYTE_ELEMENTS: - __ ldrb(value, MemOperand(r3, key, LSR, 1)); - break; - case EXTERNAL_SHORT_ELEMENTS: - __ ldrsh(value, MemOperand(r3, key, LSL, 0)); - break; - case EXTERNAL_UNSIGNED_SHORT_ELEMENTS: - __ ldrh(value, MemOperand(r3, key, LSL, 0)); - break; - case EXTERNAL_INT_ELEMENTS: - case EXTERNAL_UNSIGNED_INT_ELEMENTS: - __ ldr(value, MemOperand(r3, key, LSL, 1)); - break; - case EXTERNAL_FLOAT_ELEMENTS: - if (CpuFeatures::IsSupported(VFP2)) { - CpuFeatures::Scope scope(VFP2); - __ add(r2, r3, Operand(key, LSL, 1)); - __ vldr(s0, r2, 0); - } else { - __ ldr(value, MemOperand(r3, key, LSL, 1)); - } - break; - case EXTERNAL_DOUBLE_ELEMENTS: - if (CpuFeatures::IsSupported(VFP2)) { - CpuFeatures::Scope scope(VFP2); - __ add(r2, r3, Operand(key, LSL, 2)); - __ vldr(d0, r2, 0); - } else { - __ add(r4, r3, Operand(key, LSL, 2)); - // r4: pointer to the beginning of the double we want to load. - __ ldr(r2, MemOperand(r4, 0)); - __ ldr(r3, MemOperand(r4, Register::kSizeInBytes)); - } - break; - case FAST_ELEMENTS: - case FAST_SMI_ELEMENTS: - case FAST_DOUBLE_ELEMENTS: - case FAST_HOLEY_ELEMENTS: - case FAST_HOLEY_SMI_ELEMENTS: - case FAST_HOLEY_DOUBLE_ELEMENTS: - case DICTIONARY_ELEMENTS: - case NON_STRICT_ARGUMENTS_ELEMENTS: - UNREACHABLE(); - break; - } - - // For integer array types: - // r2: value - // For float array type: - // s0: value (if VFP3 is supported) - // r2: value (if VFP3 is not supported) - // For double array type: - // d0: value (if VFP3 is supported) - // r2/r3: value (if VFP3 is not supported) - - if (elements_kind == EXTERNAL_INT_ELEMENTS) { - // For the Int and UnsignedInt array types, we need to see whether - // the value can be represented in a Smi. If not, we need to convert - // it to a HeapNumber. - Label box_int; - __ cmp(value, Operand(0xC0000000)); - __ b(mi, &box_int); - // Tag integer as smi and return it. - __ mov(r0, Operand(value, LSL, kSmiTagSize)); - __ Ret(); - - __ bind(&box_int); - if (CpuFeatures::IsSupported(VFP2)) { - CpuFeatures::Scope scope(VFP2); - // Allocate a HeapNumber for the result and perform int-to-double - // conversion. Don't touch r0 or r1 as they are needed if allocation - // fails. - __ LoadRoot(r6, Heap::kHeapNumberMapRootIndex); - - __ AllocateHeapNumber(r5, r3, r4, r6, &slow, DONT_TAG_RESULT); - // Now we can use r0 for the result as key is not needed any more. - __ add(r0, r5, Operand(kHeapObjectTag)); - __ vmov(s0, value); - __ vcvt_f64_s32(d0, s0); - __ vstr(d0, r5, HeapNumber::kValueOffset); - __ Ret(); - } else { - // Allocate a HeapNumber for the result and perform int-to-double - // conversion. Don't touch r0 or r1 as they are needed if allocation - // fails. - __ LoadRoot(r6, Heap::kHeapNumberMapRootIndex); - __ AllocateHeapNumber(r5, r3, r4, r6, &slow, TAG_RESULT); - // Now we can use r0 for the result as key is not needed any more. - __ mov(r0, r5); - Register dst1 = r1; - Register dst2 = r3; - FloatingPointHelper::Destination dest = - FloatingPointHelper::kCoreRegisters; - FloatingPointHelper::ConvertIntToDouble(masm, - value, - dest, - d0, - dst1, - dst2, - r9, - s0); - __ str(dst1, FieldMemOperand(r0, HeapNumber::kMantissaOffset)); - __ str(dst2, FieldMemOperand(r0, HeapNumber::kExponentOffset)); - __ Ret(); - } - } else if (elements_kind == EXTERNAL_UNSIGNED_INT_ELEMENTS) { - // The test is different for unsigned int values. Since we need - // the value to be in the range of a positive smi, we can't - // handle either of the top two bits being set in the value. - if (CpuFeatures::IsSupported(VFP2)) { - CpuFeatures::Scope scope(VFP2); - Label box_int, done; - __ tst(value, Operand(0xC0000000)); - __ b(ne, &box_int); - // Tag integer as smi and return it. - __ mov(r0, Operand(value, LSL, kSmiTagSize)); - __ Ret(); - - __ bind(&box_int); - __ vmov(s0, value); - // Allocate a HeapNumber for the result and perform int-to-double - // conversion. Don't use r0 and r1 as AllocateHeapNumber clobbers all - // registers - also when jumping due to exhausted young space. - __ LoadRoot(r6, Heap::kHeapNumberMapRootIndex); - __ AllocateHeapNumber(r2, r3, r4, r6, &slow, DONT_TAG_RESULT); - - __ vcvt_f64_u32(d0, s0); - __ vstr(d0, r2, HeapNumber::kValueOffset); - - __ add(r0, r2, Operand(kHeapObjectTag)); - __ Ret(); - } else { - // Check whether unsigned integer fits into smi. - Label box_int_0, box_int_1, done; - __ tst(value, Operand(0x80000000)); - __ b(ne, &box_int_0); - __ tst(value, Operand(0x40000000)); - __ b(ne, &box_int_1); - // Tag integer as smi and return it. - __ mov(r0, Operand(value, LSL, kSmiTagSize)); - __ Ret(); - - Register hiword = value; // r2. - Register loword = r3; - - __ bind(&box_int_0); - // Integer does not have leading zeros. - GenerateUInt2Double(masm, hiword, loword, r4, 0); - __ b(&done); - - __ bind(&box_int_1); - // Integer has one leading zero. - GenerateUInt2Double(masm, hiword, loword, r4, 1); - - - __ bind(&done); - // Integer was converted to double in registers hiword:loword. - // Wrap it into a HeapNumber. Don't use r0 and r1 as AllocateHeapNumber - // clobbers all registers - also when jumping due to exhausted young - // space. - __ LoadRoot(r6, Heap::kHeapNumberMapRootIndex); - __ AllocateHeapNumber(r4, r5, r7, r6, &slow, TAG_RESULT); - - __ str(hiword, FieldMemOperand(r4, HeapNumber::kExponentOffset)); - __ str(loword, FieldMemOperand(r4, HeapNumber::kMantissaOffset)); - - __ mov(r0, r4); - __ Ret(); - } - } else if (elements_kind == EXTERNAL_FLOAT_ELEMENTS) { - // For the floating-point array type, we need to always allocate a - // HeapNumber. - if (CpuFeatures::IsSupported(VFP2)) { - CpuFeatures::Scope scope(VFP2); - // Allocate a HeapNumber for the result. Don't use r0 and r1 as - // AllocateHeapNumber clobbers all registers - also when jumping due to - // exhausted young space. - __ LoadRoot(r6, Heap::kHeapNumberMapRootIndex); - __ AllocateHeapNumber(r2, r3, r4, r6, &slow, DONT_TAG_RESULT); - __ vcvt_f64_f32(d0, s0); - __ vstr(d0, r2, HeapNumber::kValueOffset); - - __ add(r0, r2, Operand(kHeapObjectTag)); - __ Ret(); - } else { - // Allocate a HeapNumber for the result. Don't use r0 and r1 as - // AllocateHeapNumber clobbers all registers - also when jumping due to - // exhausted young space. - __ LoadRoot(r6, Heap::kHeapNumberMapRootIndex); - __ AllocateHeapNumber(r3, r4, r5, r6, &slow, TAG_RESULT); - // VFP is not available, do manual single to double conversion. - - // r2: floating point value (binary32) - // r3: heap number for result - - // Extract mantissa to r0. OK to clobber r0 now as there are no jumps to - // the slow case from here. - __ and_(r0, value, Operand(kBinary32MantissaMask)); - - // Extract exponent to r1. OK to clobber r1 now as there are no jumps to - // the slow case from here. - __ mov(r1, Operand(value, LSR, kBinary32MantissaBits)); - __ and_(r1, r1, Operand(kBinary32ExponentMask >> kBinary32MantissaBits)); - - Label exponent_rebiased; - __ teq(r1, Operand(0x00)); - __ b(eq, &exponent_rebiased); - - __ teq(r1, Operand(0xff)); - __ mov(r1, Operand(0x7ff), LeaveCC, eq); - __ b(eq, &exponent_rebiased); - - // Rebias exponent. - __ add(r1, - r1, - Operand(-kBinary32ExponentBias + HeapNumber::kExponentBias)); - - __ bind(&exponent_rebiased); - __ and_(r2, value, Operand(kBinary32SignMask)); - value = no_reg; - __ orr(r2, r2, Operand(r1, LSL, HeapNumber::kMantissaBitsInTopWord)); - - // Shift mantissa. - static const int kMantissaShiftForHiWord = - kBinary32MantissaBits - HeapNumber::kMantissaBitsInTopWord; - - static const int kMantissaShiftForLoWord = - kBitsPerInt - kMantissaShiftForHiWord; - - __ orr(r2, r2, Operand(r0, LSR, kMantissaShiftForHiWord)); - __ mov(r0, Operand(r0, LSL, kMantissaShiftForLoWord)); - - __ str(r2, FieldMemOperand(r3, HeapNumber::kExponentOffset)); - __ str(r0, FieldMemOperand(r3, HeapNumber::kMantissaOffset)); - - __ mov(r0, r3); - __ Ret(); - } - } else if (elements_kind == EXTERNAL_DOUBLE_ELEMENTS) { - if (CpuFeatures::IsSupported(VFP2)) { - CpuFeatures::Scope scope(VFP2); - // Allocate a HeapNumber for the result. Don't use r0 and r1 as - // AllocateHeapNumber clobbers all registers - also when jumping due to - // exhausted young space. - __ LoadRoot(r6, Heap::kHeapNumberMapRootIndex); - __ AllocateHeapNumber(r2, r3, r4, r6, &slow, DONT_TAG_RESULT); - __ vstr(d0, r2, HeapNumber::kValueOffset); - - __ add(r0, r2, Operand(kHeapObjectTag)); - __ Ret(); - } else { - // Allocate a HeapNumber for the result. Don't use r0 and r1 as - // AllocateHeapNumber clobbers all registers - also when jumping due to - // exhausted young space. - __ LoadRoot(r7, Heap::kHeapNumberMapRootIndex); - __ AllocateHeapNumber(r4, r5, r6, r7, &slow, TAG_RESULT); - - __ str(r2, FieldMemOperand(r4, HeapNumber::kMantissaOffset)); - __ str(r3, FieldMemOperand(r4, HeapNumber::kExponentOffset)); - __ mov(r0, r4); - __ Ret(); - } - - } else { - // Tag integer as smi and return it. - __ mov(r0, Operand(value, LSL, kSmiTagSize)); - __ Ret(); - } - - // Slow case, key and receiver still in r0 and r1. - __ bind(&slow); - __ IncrementCounter( - masm->isolate()->counters()->keyed_load_external_array_slow(), - 1, r2, r3); - - // ---------- S t a t e -------------- - // -- lr : return address - // -- r0 : key - // -- r1 : receiver - // ----------------------------------- - - __ Push(r1, r0); - - __ TailCallRuntime(Runtime::kKeyedGetProperty, 2, 1); - - __ bind(&miss_force_generic); - Handle<Code> stub = - masm->isolate()->builtins()->KeyedLoadIC_MissForceGeneric(); - __ Jump(stub, RelocInfo::CODE_TARGET); -} - - void KeyedStoreStubCompiler::GenerateStoreExternalArray( MacroAssembler* masm, ElementsKind elements_kind) { @@ -4096,10 +3262,10 @@ void KeyedStoreStubCompiler::GenerateStoreExternalArray( } FloatingPointHelper::ConvertIntToDouble( masm, r5, destination, - d0, r6, r7, // These are: double_dst, dst1, dst2. + d0, r6, r7, // These are: double_dst, dst_mantissa, dst_exponent. r4, s2); // These are: scratch2, single_scratch. if (destination == FloatingPointHelper::kVFPRegisters) { - CpuFeatures::Scope scope(VFP2); + CpuFeatureScope scope(masm, VFP2); __ vstr(d0, r3, 0); } else { __ str(r6, MemOperand(r3, 0)); @@ -4135,7 +3301,7 @@ void KeyedStoreStubCompiler::GenerateStoreExternalArray( // +/-Infinity into integer arrays basically undefined. For more // reproducible behavior, convert these to zero. if (CpuFeatures::IsSupported(VFP2)) { - CpuFeatures::Scope scope(VFP2); + CpuFeatureScope scope(masm, VFP2); if (elements_kind == EXTERNAL_FLOAT_ELEMENTS) { // vldr requires offset to be a multiple of 4 so we can not @@ -4155,7 +3321,7 @@ void KeyedStoreStubCompiler::GenerateStoreExternalArray( // not include -kHeapObjectTag into it. __ sub(r5, value, Operand(kHeapObjectTag)); __ vldr(d0, r5, HeapNumber::kValueOffset); - __ EmitECMATruncate(r5, d0, s2, r6, r7, r9); + __ ECMAToInt32VFP(r5, d0, d1, r6, r7, r9); switch (elements_kind) { case EXTERNAL_BYTE_ELEMENTS: @@ -4263,18 +3429,18 @@ void KeyedStoreStubCompiler::GenerateStoreExternalArray( // and infinities. All these should be converted to 0. __ mov(r7, Operand(HeapNumber::kExponentMask)); __ and_(r9, r5, Operand(r7), SetCC); - __ mov(r5, Operand(0, RelocInfo::NONE), LeaveCC, eq); + __ mov(r5, Operand::Zero(), LeaveCC, eq); __ b(eq, &done); __ teq(r9, Operand(r7)); - __ mov(r5, Operand(0, RelocInfo::NONE), LeaveCC, eq); + __ mov(r5, Operand::Zero(), LeaveCC, eq); __ b(eq, &done); // Unbias exponent. __ mov(r9, Operand(r9, LSR, HeapNumber::kExponentShift)); __ sub(r9, r9, Operand(HeapNumber::kExponentBias), SetCC); // If exponent is negative then result is 0. - __ mov(r5, Operand(0, RelocInfo::NONE), LeaveCC, mi); + __ mov(r5, Operand::Zero(), LeaveCC, mi); __ b(mi, &done); // If exponent is too big then result is minimal value. @@ -4290,14 +3456,14 @@ void KeyedStoreStubCompiler::GenerateStoreExternalArray( __ mov(r5, Operand(r5, LSR, r9), LeaveCC, pl); __ b(pl, &sign); - __ rsb(r9, r9, Operand(0, RelocInfo::NONE)); + __ rsb(r9, r9, Operand::Zero()); __ mov(r5, Operand(r5, LSL, r9)); __ rsb(r9, r9, Operand(meaningfull_bits)); __ orr(r5, r5, Operand(r6, LSR, r9)); __ bind(&sign); - __ teq(r7, Operand(0, RelocInfo::NONE)); - __ rsb(r5, r5, Operand(0, RelocInfo::NONE), LeaveCC, ne); + __ teq(r7, Operand::Zero()); + __ rsb(r5, r5, Operand::Zero(), LeaveCC, ne); __ bind(&done); switch (elements_kind) { @@ -4342,9 +3508,7 @@ void KeyedStoreStubCompiler::GenerateStoreExternalArray( // -- r0 : key // -- r1 : receiver // ----------------------------------- - Handle<Code> slow_ic = - masm->isolate()->builtins()->KeyedStoreIC_Slow(); - __ Jump(slow_ic, RelocInfo::CODE_TARGET); + TailCallBuiltin(masm, Builtins::kKeyedStoreIC_Slow); // Miss case, call the runtime. __ bind(&miss_force_generic); @@ -4354,122 +3518,7 @@ void KeyedStoreStubCompiler::GenerateStoreExternalArray( // -- r0 : key // -- r1 : receiver // ----------------------------------- - - Handle<Code> miss_ic = - masm->isolate()->builtins()->KeyedStoreIC_MissForceGeneric(); - __ Jump(miss_ic, RelocInfo::CODE_TARGET); -} - - -void KeyedLoadStubCompiler::GenerateLoadFastElement(MacroAssembler* masm) { - // ----------- S t a t e ------------- - // -- lr : return address - // -- r0 : key - // -- r1 : receiver - // ----------------------------------- - Label miss_force_generic; - - // This stub is meant to be tail-jumped to, the receiver must already - // have been verified by the caller to not be a smi. - - // Check that the key is a smi or a heap number convertible to a smi. - GenerateSmiKeyCheck(masm, r0, r4, r5, d1, d2, &miss_force_generic); - - // Get the elements array. - __ ldr(r2, FieldMemOperand(r1, JSObject::kElementsOffset)); - __ AssertFastElements(r2); - - // Check that the key is within bounds. - __ ldr(r3, FieldMemOperand(r2, FixedArray::kLengthOffset)); - __ cmp(r0, Operand(r3)); - __ b(hs, &miss_force_generic); - - // Load the result and make sure it's not the hole. - __ add(r3, r2, Operand(FixedArray::kHeaderSize - kHeapObjectTag)); - STATIC_ASSERT(kSmiTag == 0 && kSmiTagSize < kPointerSizeLog2); - __ ldr(r4, - MemOperand(r3, r0, LSL, kPointerSizeLog2 - kSmiTagSize)); - __ LoadRoot(ip, Heap::kTheHoleValueRootIndex); - __ cmp(r4, ip); - __ b(eq, &miss_force_generic); - __ mov(r0, r4); - __ Ret(); - - __ bind(&miss_force_generic); - Handle<Code> stub = - masm->isolate()->builtins()->KeyedLoadIC_MissForceGeneric(); - __ Jump(stub, RelocInfo::CODE_TARGET); -} - - -void KeyedLoadStubCompiler::GenerateLoadFastDoubleElement( - MacroAssembler* masm) { - // ----------- S t a t e ------------- - // -- lr : return address - // -- r0 : key - // -- r1 : receiver - // ----------------------------------- - Label miss_force_generic, slow_allocate_heapnumber; - - Register key_reg = r0; - Register receiver_reg = r1; - Register elements_reg = r2; - Register heap_number_reg = r2; - Register indexed_double_offset = r3; - Register scratch = r4; - Register scratch2 = r5; - Register scratch3 = r6; - Register heap_number_map = r7; - - // This stub is meant to be tail-jumped to, the receiver must already - // have been verified by the caller to not be a smi. - - // Check that the key is a smi or a heap number convertible to a smi. - GenerateSmiKeyCheck(masm, key_reg, r4, r5, d1, d2, &miss_force_generic); - - // Get the elements array. - __ ldr(elements_reg, - FieldMemOperand(receiver_reg, JSObject::kElementsOffset)); - - // Check that the key is within bounds. - __ ldr(scratch, FieldMemOperand(elements_reg, FixedArray::kLengthOffset)); - __ cmp(key_reg, Operand(scratch)); - __ b(hs, &miss_force_generic); - - // Load the upper word of the double in the fixed array and test for NaN. - __ add(indexed_double_offset, elements_reg, - Operand(key_reg, LSL, kDoubleSizeLog2 - kSmiTagSize)); - uint32_t upper_32_offset = FixedArray::kHeaderSize + sizeof(kHoleNanLower32); - __ ldr(scratch, FieldMemOperand(indexed_double_offset, upper_32_offset)); - __ cmp(scratch, Operand(kHoleNanUpper32)); - __ b(&miss_force_generic, eq); - - // Non-NaN. Allocate a new heap number and copy the double value into it. - __ LoadRoot(heap_number_map, Heap::kHeapNumberMapRootIndex); - __ AllocateHeapNumber(heap_number_reg, scratch2, scratch3, - heap_number_map, &slow_allocate_heapnumber, TAG_RESULT); - - // Don't need to reload the upper 32 bits of the double, it's already in - // scratch. - __ str(scratch, FieldMemOperand(heap_number_reg, - HeapNumber::kExponentOffset)); - __ ldr(scratch, FieldMemOperand(indexed_double_offset, - FixedArray::kHeaderSize)); - __ str(scratch, FieldMemOperand(heap_number_reg, - HeapNumber::kMantissaOffset)); - - __ mov(r0, heap_number_reg); - __ Ret(); - - __ bind(&slow_allocate_heapnumber); - Handle<Code> slow_ic = - masm->isolate()->builtins()->KeyedLoadIC_Slow(); - __ Jump(slow_ic, RelocInfo::CODE_TARGET); - - __ bind(&miss_force_generic); - Handle<Code> miss_ic = - masm->isolate()->builtins()->KeyedLoadIC_MissForceGeneric(); - __ Jump(miss_ic, RelocInfo::CODE_TARGET); + TailCallBuiltin(masm, Builtins::kKeyedStoreIC_MissForceGeneric); } @@ -4477,7 +3526,7 @@ void KeyedStoreStubCompiler::GenerateStoreFastElement( MacroAssembler* masm, bool is_js_array, ElementsKind elements_kind, - KeyedAccessGrowMode grow_mode) { + KeyedAccessStoreMode store_mode) { // ----------- S t a t e ------------- // -- r0 : value // -- r1 : key @@ -4517,7 +3566,7 @@ void KeyedStoreStubCompiler::GenerateStoreFastElement( } // Compare smis. __ cmp(key_reg, scratch); - if (is_js_array && grow_mode == ALLOW_JSARRAY_GROWTH) { + if (is_js_array && IsGrowStoreMode(store_mode)) { __ b(hs, &grow); } else { __ b(hs, &miss_force_generic); @@ -4562,15 +3611,12 @@ void KeyedStoreStubCompiler::GenerateStoreFastElement( __ Ret(); __ bind(&miss_force_generic); - Handle<Code> ic = - masm->isolate()->builtins()->KeyedStoreIC_MissForceGeneric(); - __ Jump(ic, RelocInfo::CODE_TARGET); + TailCallBuiltin(masm, Builtins::kKeyedStoreIC_MissForceGeneric); __ bind(&transition_elements_kind); - Handle<Code> ic_miss = masm->isolate()->builtins()->KeyedStoreIC_Miss(); - __ Jump(ic_miss, RelocInfo::CODE_TARGET); + TailCallBuiltin(masm, Builtins::kKeyedStoreIC_Miss); - if (is_js_array && grow_mode == ALLOW_JSARRAY_GROWTH) { + if (is_js_array && IsGrowStoreMode(store_mode)) { // Grow the array by a single element if possible. __ bind(&grow); @@ -4588,8 +3634,7 @@ void KeyedStoreStubCompiler::GenerateStoreFastElement( __ b(ne, &check_capacity); int size = FixedArray::SizeFor(JSArray::kPreallocatedArrayElements); - __ AllocateInNewSpace(size, elements_reg, scratch, scratch2, &slow, - TAG_OBJECT); + __ Allocate(size, elements_reg, scratch, scratch2, &slow, TAG_OBJECT); __ LoadRoot(scratch, Heap::kFixedArrayMapRootIndex); __ str(scratch, FieldMemOperand(elements_reg, JSObject::kMapOffset)); @@ -4633,8 +3678,7 @@ void KeyedStoreStubCompiler::GenerateStoreFastElement( __ jmp(&finish_store); __ bind(&slow); - Handle<Code> ic_slow = masm->isolate()->builtins()->KeyedStoreIC_Slow(); - __ Jump(ic_slow, RelocInfo::CODE_TARGET); + TailCallBuiltin(masm, Builtins::kKeyedStoreIC_Slow); } } @@ -4642,15 +3686,18 @@ void KeyedStoreStubCompiler::GenerateStoreFastElement( void KeyedStoreStubCompiler::GenerateStoreFastDoubleElement( MacroAssembler* masm, bool is_js_array, - KeyedAccessGrowMode grow_mode) { + KeyedAccessStoreMode store_mode) { // ----------- S t a t e ------------- // -- r0 : value // -- r1 : key // -- r2 : receiver // -- lr : return address - // -- r3 : scratch + // -- r3 : scratch (elements backing store) // -- r4 : scratch // -- r5 : scratch + // -- r6 : scratch + // -- r7 : scratch + // -- r9 : scratch // ----------------------------------- Label miss_force_generic, transition_elements_kind, grow, slow; Label finish_store, check_capacity; @@ -4663,6 +3710,7 @@ void KeyedStoreStubCompiler::GenerateStoreFastDoubleElement( Register scratch2 = r5; Register scratch3 = r6; Register scratch4 = r7; + Register scratch5 = r9; Register length_reg = r7; // This stub is meant to be tail-jumped to, the receiver must already @@ -4684,7 +3732,7 @@ void KeyedStoreStubCompiler::GenerateStoreFastDoubleElement( // Compare smis, unsigned compare catches both negative and out-of-bound // indexes. __ cmp(key_reg, scratch1); - if (grow_mode == ALLOW_JSARRAY_GROWTH) { + if (IsGrowStoreMode(store_mode)) { __ b(hs, &grow); } else { __ b(hs, &miss_force_generic); @@ -4693,7 +3741,6 @@ void KeyedStoreStubCompiler::GenerateStoreFastDoubleElement( __ bind(&finish_store); __ StoreNumberToDoubleElements(value_reg, key_reg, - receiver_reg, // All registers after this are overwritten. elements_reg, scratch1, @@ -4705,15 +3752,12 @@ void KeyedStoreStubCompiler::GenerateStoreFastDoubleElement( // Handle store cache miss, replacing the ic with the generic stub. __ bind(&miss_force_generic); - Handle<Code> ic = - masm->isolate()->builtins()->KeyedStoreIC_MissForceGeneric(); - __ Jump(ic, RelocInfo::CODE_TARGET); + TailCallBuiltin(masm, Builtins::kKeyedStoreIC_MissForceGeneric); __ bind(&transition_elements_kind); - Handle<Code> ic_miss = masm->isolate()->builtins()->KeyedStoreIC_Miss(); - __ Jump(ic_miss, RelocInfo::CODE_TARGET); + TailCallBuiltin(masm, Builtins::kKeyedStoreIC_Miss); - if (is_js_array && grow_mode == ALLOW_JSARRAY_GROWTH) { + if (is_js_array && IsGrowStoreMode(store_mode)) { // Grow the array by a single element if possible. __ bind(&grow); @@ -4739,11 +3783,9 @@ void KeyedStoreStubCompiler::GenerateStoreFastDoubleElement( __ b(ne, &check_capacity); int size = FixedDoubleArray::SizeFor(JSArray::kPreallocatedArrayElements); - __ AllocateInNewSpace(size, elements_reg, scratch1, scratch2, &slow, - TAG_OBJECT); + __ Allocate(size, elements_reg, scratch1, scratch2, &slow, TAG_OBJECT); - // Initialize the new FixedDoubleArray. Leave elements unitialized for - // efficiency, they are guaranteed to be initialized before use. + // Initialize the new FixedDoubleArray. __ LoadRoot(scratch1, Heap::kFixedDoubleArrayMapRootIndex); __ str(scratch1, FieldMemOperand(elements_reg, JSObject::kMapOffset)); __ mov(scratch1, @@ -4751,6 +3793,25 @@ void KeyedStoreStubCompiler::GenerateStoreFastDoubleElement( __ str(scratch1, FieldMemOperand(elements_reg, FixedDoubleArray::kLengthOffset)); + __ mov(scratch1, elements_reg); + __ StoreNumberToDoubleElements(value_reg, + key_reg, + // All registers after this are overwritten. + scratch1, + scratch2, + scratch3, + scratch4, + scratch5, + &transition_elements_kind); + + __ mov(scratch1, Operand(kHoleNanLower32)); + __ mov(scratch2, Operand(kHoleNanUpper32)); + for (int i = 1; i < JSArray::kPreallocatedArrayElements; i++) { + int offset = FixedDoubleArray::OffsetOfElementAt(i); + __ str(scratch1, FieldMemOperand(elements_reg, offset)); + __ str(scratch2, FieldMemOperand(elements_reg, offset + kPointerSize)); + } + // Install the new backing store in the JSArray. __ str(elements_reg, FieldMemOperand(receiver_reg, JSObject::kElementsOffset)); @@ -4763,7 +3824,7 @@ void KeyedStoreStubCompiler::GenerateStoreFastDoubleElement( __ str(length_reg, FieldMemOperand(receiver_reg, JSArray::kLengthOffset)); __ ldr(elements_reg, FieldMemOperand(receiver_reg, JSObject::kElementsOffset)); - __ jmp(&finish_store); + __ Ret(); __ bind(&check_capacity); // Make sure that the backing store can hold additional elements. @@ -4778,8 +3839,7 @@ void KeyedStoreStubCompiler::GenerateStoreFastDoubleElement( __ jmp(&finish_store); __ bind(&slow); - Handle<Code> ic_slow = masm->isolate()->builtins()->KeyedStoreIC_Slow(); - __ Jump(ic_slow, RelocInfo::CODE_TARGET); + TailCallBuiltin(masm, Builtins::kKeyedStoreIC_Slow); } } diff --git a/deps/v8/src/array.js b/deps/v8/src/array.js index 250c30c32b..7cf744bedf 100644 --- a/deps/v8/src/array.js +++ b/deps/v8/src/array.js @@ -413,6 +413,7 @@ function ArrayJoin(separator) { ["Array.prototype.join"]); } + var length = TO_UINT32(this.length); if (IS_UNDEFINED(separator)) { separator = ','; } else if (!IS_STRING(separator)) { @@ -422,7 +423,7 @@ function ArrayJoin(separator) { var result = %_FastAsciiArrayJoin(this, separator); if (!IS_UNDEFINED(result)) return result; - return Join(this, TO_UINT32(this.length), separator, ConvertToString); + return Join(this, length, separator, ConvertToString); } @@ -441,8 +442,8 @@ function ArrayPop() { } n--; var value = this[n]; - this.length = n; delete this[n]; + this.length = n; return value; } @@ -581,7 +582,7 @@ function ArrayShift() { var first = this[0]; - if (IS_ARRAY(this)) { + if (IS_ARRAY(this) && !%IsObserved(this)) { SmartMove(this, 0, 1, len, 0); } else { SimpleMove(this, 0, 1, len, 0); @@ -602,7 +603,7 @@ function ArrayUnshift(arg1) { // length == 1 var len = TO_UINT32(this.length); var num_arguments = %_ArgumentsLength(); - if (IS_ARRAY(this)) { + if (IS_ARRAY(this) && !%IsObserved(this)) { SmartMove(this, 0, 0, len, num_arguments); } else { SimpleMove(this, 0, 0, len, num_arguments); @@ -649,6 +650,7 @@ function ArraySlice(start, end) { if (end_i < start_i) return result; if (IS_ARRAY(this) && + !%IsObserved(this) && (end_i > 1000) && (%EstimateNumberOfElements(this) < end_i)) { SmartSlice(this, start_i, end_i - start_i, len, result); @@ -705,7 +707,9 @@ function ArraySplice(start, delete_count) { var use_simple_splice = true; - if (IS_ARRAY(this) && num_additional_args !== del_count) { + if (IS_ARRAY(this) && + !%IsObserved(this) && + num_additional_args !== del_count) { // If we are only deleting/moving a few things near the end of the // array then the simple version is going to be faster, because it // doesn't touch most of the array. @@ -881,7 +885,7 @@ function ArraySort(comparefn) { // of a prototype property. var CopyFromPrototype = function CopyFromPrototype(obj, length) { var max = 0; - for (var proto = obj.__proto__; proto; proto = proto.__proto__) { + for (var proto = %GetPrototype(obj); proto; proto = %GetPrototype(proto)) { var indices = %GetArrayKeys(proto, length); if (indices.length > 0) { if (indices[0] == -1) { @@ -912,7 +916,7 @@ function ArraySort(comparefn) { // where a prototype of obj has an element. I.e., shadow all prototype // elements in that range. var ShadowPrototypeElements = function(obj, from, to) { - for (var proto = obj.__proto__; proto; proto = proto.__proto__) { + for (var proto = %GetPrototype(obj); proto; proto = %GetPrototype(proto)) { var indices = %GetArrayKeys(proto, to); if (indices.length > 0) { if (indices[0] == -1) { @@ -982,7 +986,7 @@ function ArraySort(comparefn) { } for (i = length - num_holes; i < length; i++) { // For compatability with Webkit, do not expose elements in the prototype. - if (i in obj.__proto__) { + if (i in %GetPrototype(obj)) { obj[i] = void 0; } else { delete obj[i]; @@ -1549,6 +1553,15 @@ function SetUpArray() { // exposed to user code. // Adding only the functions that are actually used. SetUpLockedPrototype(InternalArray, $Array(), $Array( + "concat", getFunction("concat", ArrayConcat), + "indexOf", getFunction("indexOf", ArrayIndexOf), + "join", getFunction("join", ArrayJoin), + "pop", getFunction("pop", ArrayPop), + "push", getFunction("push", ArrayPush), + "splice", getFunction("splice", ArraySplice) + )); + + SetUpLockedPrototype(InternalPackedArray, $Array(), $Array( "join", getFunction("join", ArrayJoin), "pop", getFunction("pop", ArrayPop), "push", getFunction("push", ArrayPush) diff --git a/deps/v8/src/assembler.cc b/deps/v8/src/assembler.cc index d81d4ae614..8536ca006f 100644 --- a/deps/v8/src/assembler.cc +++ b/deps/v8/src/assembler.cc @@ -91,6 +91,7 @@ namespace internal { struct DoubleConstant BASE_EMBEDDED { double min_int; double one_half; + double minus_one_half; double minus_zero; double zero; double uint8_max_value; @@ -103,18 +104,110 @@ static DoubleConstant double_constants; const char* const RelocInfo::kFillerCommentString = "DEOPTIMIZATION PADDING"; +static bool math_exp_data_initialized = false; +static Mutex* math_exp_data_mutex = NULL; +static double* math_exp_constants_array = NULL; +static double* math_exp_log_table_array = NULL; + // ----------------------------------------------------------------------------- // Implementation of AssemblerBase -AssemblerBase::AssemblerBase(Isolate* isolate) +AssemblerBase::AssemblerBase(Isolate* isolate, void* buffer, int buffer_size) : isolate_(isolate), - jit_cookie_(0) { + jit_cookie_(0), + enabled_cpu_features_(0), + emit_debug_code_(FLAG_debug_code), + predictable_code_size_(false) { if (FLAG_mask_constants_with_cookie && isolate != NULL) { jit_cookie_ = V8::RandomPrivate(isolate); } + + if (buffer == NULL) { + // Do our own buffer management. + if (buffer_size <= kMinimalBufferSize) { + buffer_size = kMinimalBufferSize; + if (isolate->assembler_spare_buffer() != NULL) { + buffer = isolate->assembler_spare_buffer(); + isolate->set_assembler_spare_buffer(NULL); + } + } + if (buffer == NULL) buffer = NewArray<byte>(buffer_size); + own_buffer_ = true; + } else { + // Use externally provided buffer instead. + ASSERT(buffer_size > 0); + own_buffer_ = false; + } + buffer_ = static_cast<byte*>(buffer); + buffer_size_ = buffer_size; + + pc_ = buffer_; +} + + +AssemblerBase::~AssemblerBase() { + if (own_buffer_) { + if (isolate() != NULL && + isolate()->assembler_spare_buffer() == NULL && + buffer_size_ == kMinimalBufferSize) { + isolate()->set_assembler_spare_buffer(buffer_); + } else { + DeleteArray(buffer_); + } + } +} + + +// ----------------------------------------------------------------------------- +// Implementation of PredictableCodeSizeScope + +PredictableCodeSizeScope::PredictableCodeSizeScope(AssemblerBase* assembler, + int expected_size) + : assembler_(assembler), + expected_size_(expected_size), + start_offset_(assembler->pc_offset()), + old_value_(assembler->predictable_code_size()) { + assembler_->set_predictable_code_size(true); +} + + +PredictableCodeSizeScope::~PredictableCodeSizeScope() { + // TODO(svenpanne) Remove the 'if' when everything works. + if (expected_size_ >= 0) { + CHECK_EQ(expected_size_, assembler_->pc_offset() - start_offset_); + } + assembler_->set_predictable_code_size(old_value_); +} + + +// ----------------------------------------------------------------------------- +// Implementation of CpuFeatureScope + +#ifdef DEBUG +CpuFeatureScope::CpuFeatureScope(AssemblerBase* assembler, CpuFeature f) + : assembler_(assembler) { + ASSERT(CpuFeatures::IsSafeForSnapshot(f)); + old_enabled_ = assembler_->enabled_cpu_features(); + uint64_t mask = static_cast<uint64_t>(1) << f; + // TODO(svenpanne) This special case below doesn't belong here! +#if V8_TARGET_ARCH_ARM + // VFP2 and ARMv7 are implied by VFP3. + if (f == VFP3) { + mask |= + static_cast<uint64_t>(1) << VFP2 | + static_cast<uint64_t>(1) << ARMv7; + } +#endif + assembler_->set_enabled_cpu_features(old_enabled_ | mask); } +CpuFeatureScope::~CpuFeatureScope() { + assembler_->set_enabled_cpu_features(old_enabled_); +} +#endif + + // ----------------------------------------------------------------------------- // Implementation of Label @@ -313,6 +406,7 @@ void RelocInfoWriter::Write(const RelocInfo* rinfo) { #ifdef DEBUG byte* begin_pos = pos_; #endif + ASSERT(rinfo->rmode() < RelocInfo::NUMBER_OF_MODES); ASSERT(rinfo->pc() - last_pc_ >= 0); ASSERT(RelocInfo::LAST_STANDARD_NONCOMPACT_ENUM - RelocInfo::LAST_COMPACT_ENUM <= kMaxStandardNonCompactModes); @@ -570,6 +664,15 @@ void RelocIterator::next() { } } } + if (code_age_sequence_ != NULL) { + byte* old_code_age_sequence = code_age_sequence_; + code_age_sequence_ = NULL; + if (SetMode(RelocInfo::CODE_AGE_SEQUENCE)) { + rinfo_.data_ = 0; + rinfo_.pc_ = old_code_age_sequence; + return; + } + } done_ = true; } @@ -585,6 +688,12 @@ RelocIterator::RelocIterator(Code* code, int mode_mask) { mode_mask_ = mode_mask; last_id_ = 0; last_position_ = 0; + byte* sequence = code->FindCodeAgeSequence(); + if (sequence != NULL && !Code::IsYoungSequence(sequence)) { + code_age_sequence_ = sequence; + } else { + code_age_sequence_ = NULL; + } if (mode_mask_ == 0) pos_ = end_; next(); } @@ -600,6 +709,7 @@ RelocIterator::RelocIterator(const CodeDesc& desc, int mode_mask) { mode_mask_ = mode_mask; last_id_ = 0; last_position_ = 0; + code_age_sequence_ = NULL; if (mode_mask_ == 0) pos_ = end_; next(); } @@ -609,11 +719,28 @@ RelocIterator::RelocIterator(const CodeDesc& desc, int mode_mask) { // Implementation of RelocInfo +#ifdef DEBUG +bool RelocInfo::RequiresRelocation(const CodeDesc& desc) { + // Ensure there are no code targets or embedded objects present in the + // deoptimization entries, they would require relocation after code + // generation. + int mode_mask = RelocInfo::kCodeTargetMask | + RelocInfo::ModeMask(RelocInfo::EMBEDDED_OBJECT) | + RelocInfo::ModeMask(RelocInfo::GLOBAL_PROPERTY_CELL) | + RelocInfo::kApplyMask; + RelocIterator it(desc, mode_mask); + return !it.done(); +} +#endif + + #ifdef ENABLE_DISASSEMBLER const char* RelocInfo::RelocModeName(RelocInfo::Mode rmode) { switch (rmode) { - case RelocInfo::NONE: - return "no reloc"; + case RelocInfo::NONE32: + return "no reloc 32"; + case RelocInfo::NONE64: + return "no reloc 64"; case RelocInfo::EMBEDDED_OBJECT: return "embedded object"; case RelocInfo::CONSTRUCT_CALL: @@ -652,6 +779,8 @@ const char* RelocInfo::RelocModeName(RelocInfo::Mode rmode) { UNREACHABLE(); #endif return "debug break slot"; + case RelocInfo::CODE_AGE_SEQUENCE: + return "code_age_sequence"; case RelocInfo::NUMBER_OF_MODES: UNREACHABLE(); return "number_of_modes"; @@ -660,7 +789,7 @@ const char* RelocInfo::RelocModeName(RelocInfo::Mode rmode) { } -void RelocInfo::Print(FILE* out) { +void RelocInfo::Print(Isolate* isolate, FILE* out) { PrintF(out, "%p %s", pc_, RelocModeName(rmode_)); if (IsComment(rmode_)) { PrintF(out, " (%s)", reinterpret_cast<char*>(data_)); @@ -682,11 +811,11 @@ void RelocInfo::Print(FILE* out) { } } else if (IsPosition(rmode_)) { PrintF(out, " (%" V8_PTR_PREFIX "d)", data()); - } else if (rmode_ == RelocInfo::RUNTIME_ENTRY && - Isolate::Current()->deoptimizer_data() != NULL) { + } else if (IsRuntimeEntry(rmode_) && + isolate->deoptimizer_data() != NULL) { // Depotimization bailouts are stored as runtime entries. int id = Deoptimizer::GetDeoptimizationId( - target_address(), Deoptimizer::EAGER); + isolate, target_address(), Deoptimizer::EAGER); if (id != Deoptimizer::kNotDeoptimizationEntry) { PrintF(out, " (deoptimization bailout %d)", id); } @@ -734,11 +863,15 @@ void RelocInfo::Verify() { case INTERNAL_REFERENCE: case CONST_POOL: case DEBUG_BREAK_SLOT: - case NONE: + case NONE32: + case NONE64: break; case NUMBER_OF_MODES: UNREACHABLE(); break; + case CODE_AGE_SEQUENCE: + ASSERT(Code::IsYoungSequence(pc_) || code_age_stub()->IsCode()); + break; } } #endif // VERIFY_HEAP @@ -750,12 +883,77 @@ void RelocInfo::Verify() { void ExternalReference::SetUp() { double_constants.min_int = kMinInt; double_constants.one_half = 0.5; + double_constants.minus_one_half = -0.5; double_constants.minus_zero = -0.0; double_constants.uint8_max_value = 255; double_constants.zero = 0.0; double_constants.canonical_non_hole_nan = OS::nan_value(); double_constants.the_hole_nan = BitCast<double>(kHoleNanInt64); double_constants.negative_infinity = -V8_INFINITY; + + math_exp_data_mutex = OS::CreateMutex(); +} + + +void ExternalReference::InitializeMathExpData() { + // Early return? + if (math_exp_data_initialized) return; + + math_exp_data_mutex->Lock(); + if (!math_exp_data_initialized) { + // If this is changed, generated code must be adapted too. + const int kTableSizeBits = 11; + const int kTableSize = 1 << kTableSizeBits; + const double kTableSizeDouble = static_cast<double>(kTableSize); + + math_exp_constants_array = new double[9]; + // Input values smaller than this always return 0. + math_exp_constants_array[0] = -708.39641853226408; + // Input values larger than this always return +Infinity. + math_exp_constants_array[1] = 709.78271289338397; + math_exp_constants_array[2] = V8_INFINITY; + // The rest is black magic. Do not attempt to understand it. It is + // loosely based on the "expd" function published at: + // http://herumi.blogspot.com/2011/08/fast-double-precision-exponential.html + const double constant3 = (1 << kTableSizeBits) / log(2.0); + math_exp_constants_array[3] = constant3; + math_exp_constants_array[4] = + static_cast<double>(static_cast<int64_t>(3) << 51); + math_exp_constants_array[5] = 1 / constant3; + math_exp_constants_array[6] = 3.0000000027955394; + math_exp_constants_array[7] = 0.16666666685227835; + math_exp_constants_array[8] = 1; + + math_exp_log_table_array = new double[kTableSize]; + for (int i = 0; i < kTableSize; i++) { + double value = pow(2, i / kTableSizeDouble); + + uint64_t bits = BitCast<uint64_t, double>(value); + bits &= (static_cast<uint64_t>(1) << 52) - 1; + double mantissa = BitCast<double, uint64_t>(bits); + + // <just testing> + uint64_t doublebits; + memcpy(&doublebits, &value, sizeof doublebits); + doublebits &= (static_cast<uint64_t>(1) << 52) - 1; + double mantissa2; + memcpy(&mantissa2, &doublebits, sizeof mantissa2); + CHECK_EQ(mantissa, mantissa2); + // </just testing> + + math_exp_log_table_array[i] = mantissa; + } + + math_exp_data_initialized = true; + } + math_exp_data_mutex->Unlock(); +} + + +void ExternalReference::TearDownMathExpData() { + delete[] math_exp_constants_array; + delete[] math_exp_log_table_array; + delete math_exp_data_mutex; } @@ -874,6 +1072,13 @@ ExternalReference ExternalReference::get_date_field_function( } +ExternalReference ExternalReference::get_make_code_young_function( + Isolate* isolate) { + return ExternalReference(Redirect( + isolate, FUNCTION_ADDR(Code::MakeCodeAgeSequenceYoung))); +} + + ExternalReference ExternalReference::date_cache_stamp(Isolate* isolate) { return ExternalReference(isolate->date_cache()->stamp_address()); } @@ -900,6 +1105,20 @@ ExternalReference ExternalReference::compute_output_frames_function( } +ExternalReference ExternalReference::log_enter_external_function( + Isolate* isolate) { + return ExternalReference( + Redirect(isolate, FUNCTION_ADDR(Logger::EnterExternal))); +} + + +ExternalReference ExternalReference::log_leave_external_function( + Isolate* isolate) { + return ExternalReference( + Redirect(isolate, FUNCTION_ADDR(Logger::LeaveExternal))); +} + + ExternalReference ExternalReference::keyed_lookup_cache_keys(Isolate* isolate) { return ExternalReference(isolate->keyed_lookup_cache()->keys_address()); } @@ -969,18 +1188,35 @@ ExternalReference ExternalReference::new_space_allocation_limit_address( } -ExternalReference ExternalReference::handle_scope_level_address() { - return ExternalReference(HandleScope::current_level_address()); +ExternalReference ExternalReference::old_pointer_space_allocation_top_address( + Isolate* isolate) { + return ExternalReference( + isolate->heap()->OldPointerSpaceAllocationTopAddress()); +} + + +ExternalReference ExternalReference::old_pointer_space_allocation_limit_address( + Isolate* isolate) { + return ExternalReference( + isolate->heap()->OldPointerSpaceAllocationLimitAddress()); } -ExternalReference ExternalReference::handle_scope_next_address() { - return ExternalReference(HandleScope::current_next_address()); +ExternalReference ExternalReference::handle_scope_level_address( + Isolate* isolate) { + return ExternalReference(HandleScope::current_level_address(isolate)); } -ExternalReference ExternalReference::handle_scope_limit_address() { - return ExternalReference(HandleScope::current_limit_address()); +ExternalReference ExternalReference::handle_scope_next_address( + Isolate* isolate) { + return ExternalReference(HandleScope::current_next_address(isolate)); +} + + +ExternalReference ExternalReference::handle_scope_limit_address( + Isolate* isolate) { + return ExternalReference(HandleScope::current_limit_address(isolate)); } @@ -1018,6 +1254,12 @@ ExternalReference ExternalReference::address_of_one_half() { } +ExternalReference ExternalReference::address_of_minus_one_half() { + return ExternalReference( + reinterpret_cast<void*>(&double_constants.minus_one_half)); +} + + ExternalReference ExternalReference::address_of_minus_zero() { return ExternalReference( reinterpret_cast<void*>(&double_constants.minus_zero)); @@ -1186,12 +1428,45 @@ ExternalReference ExternalReference::math_log_double_function( } +ExternalReference ExternalReference::math_exp_constants(int constant_index) { + ASSERT(math_exp_data_initialized); + return ExternalReference( + reinterpret_cast<void*>(math_exp_constants_array + constant_index)); +} + + +ExternalReference ExternalReference::math_exp_log_table() { + ASSERT(math_exp_data_initialized); + return ExternalReference(reinterpret_cast<void*>(math_exp_log_table_array)); +} + + ExternalReference ExternalReference::page_flags(Page* page) { return ExternalReference(reinterpret_cast<Address>(page) + MemoryChunk::kFlagsOffset); } +ExternalReference ExternalReference::ForDeoptEntry(Address entry) { + return ExternalReference(entry); +} + + +double power_helper(double x, double y) { + int y_int = static_cast<int>(y); + if (y == y_int) { + return power_double_int(x, y_int); // Returns 1 if exponent is 0. + } + if (y == 0.5) { + return (isinf(x)) ? V8_INFINITY : fast_sqrt(x + 0.0); // Convert -0 to +0. + } + if (y == -0.5) { + return (isinf(x)) ? 0 : 1.0 / fast_sqrt(x + 0.0); // Convert -0 to +0. + } + return power_double_double(x, y); +} + + // Helper function to compute x^y, where y is known to be an // integer. Uses binary decomposition to limit the number of // multiplications; see the discussion in "Hacker's Delight" by Henry @@ -1212,7 +1487,8 @@ double power_double_int(double x, int y) { double power_double_double(double x, double y) { -#ifdef __MINGW64_VERSION_MAJOR +#if defined(__MINGW64_VERSION_MAJOR) && \ + (!defined(__MINGW64_VERSION_RC) || __MINGW64_VERSION_RC < 1) // MinGW64 has a custom implementation for pow. This handles certain // special cases that are different. if ((x == 0.0 || isinf(x)) && isfinite(y)) { @@ -1330,6 +1606,10 @@ void PositionsRecorder::RecordPosition(int pos) { gdbjit_lineinfo_->SetPosition(assembler_->pc_offset(), pos, false); } #endif + LOG_CODE_EVENT(assembler_->isolate(), + CodeLinePosInfoAddPositionEvent(jit_handler_data_, + assembler_->pc_offset(), + pos)); } @@ -1342,6 +1622,11 @@ void PositionsRecorder::RecordStatementPosition(int pos) { gdbjit_lineinfo_->SetPosition(assembler_->pc_offset(), pos, true); } #endif + LOG_CODE_EVENT(assembler_->isolate(), + CodeLinePosInfoAddStatementPositionEvent( + jit_handler_data_, + assembler_->pc_offset(), + pos)); } diff --git a/deps/v8/src/assembler.h b/deps/v8/src/assembler.h index a0e55cc814..e26b5254df 100644 --- a/deps/v8/src/assembler.h +++ b/deps/v8/src/assembler.h @@ -56,18 +56,81 @@ struct StatsCounter; class AssemblerBase: public Malloced { public: - explicit AssemblerBase(Isolate* isolate); + AssemblerBase(Isolate* isolate, void* buffer, int buffer_size); + virtual ~AssemblerBase(); Isolate* isolate() const { return isolate_; } - int jit_cookie() { return jit_cookie_; } + int jit_cookie() const { return jit_cookie_; } + + bool emit_debug_code() const { return emit_debug_code_; } + void set_emit_debug_code(bool value) { emit_debug_code_ = value; } + + bool predictable_code_size() const { return predictable_code_size_; } + void set_predictable_code_size(bool value) { predictable_code_size_ = value; } + + uint64_t enabled_cpu_features() const { return enabled_cpu_features_; } + void set_enabled_cpu_features(uint64_t features) { + enabled_cpu_features_ = features; + } + bool IsEnabled(CpuFeature f) { + return (enabled_cpu_features_ & (static_cast<uint64_t>(1) << f)) != 0; + } // Overwrite a host NaN with a quiet target NaN. Used by mksnapshot for // cross-snapshotting. static void QuietNaN(HeapObject* nan) { } + int pc_offset() const { return static_cast<int>(pc_ - buffer_); } + + static const int kMinimalBufferSize = 4*KB; + + protected: + // The buffer into which code and relocation info are generated. It could + // either be owned by the assembler or be provided externally. + byte* buffer_; + int buffer_size_; + bool own_buffer_; + + // The program counter, which points into the buffer above and moves forward. + byte* pc_; + private: Isolate* isolate_; int jit_cookie_; + uint64_t enabled_cpu_features_; + bool emit_debug_code_; + bool predictable_code_size_; +}; + + +// Avoids using instructions that vary in size in unpredictable ways between the +// snapshot and the running VM. +class PredictableCodeSizeScope { + public: + PredictableCodeSizeScope(AssemblerBase* assembler, int expected_size); + ~PredictableCodeSizeScope(); + + private: + AssemblerBase* assembler_; + int expected_size_; + int start_offset_; + bool old_value_; +}; + + +// Enable a specified feature within a scope. +class CpuFeatureScope BASE_EMBEDDED { + public: +#ifdef DEBUG + CpuFeatureScope(AssemblerBase* assembler, CpuFeature f); + ~CpuFeatureScope(); + + private: + AssemblerBase* assembler_; + uint64_t old_enabled_; +#else + CpuFeatureScope(AssemblerBase* assembler, CpuFeature f) {} +#endif }; @@ -210,7 +273,14 @@ class RelocInfo BASE_EMBEDDED { // add more as needed // Pseudo-types NUMBER_OF_MODES, // There are at most 15 modes with noncompact encoding. - NONE, // never recorded + NONE32, // never recorded 32-bit value + NONE64, // never recorded 64-bit value + CODE_AGE_SEQUENCE, // Not stored in RelocInfo array, used explictly by + // code aging. + FIRST_REAL_RELOC_MODE = CODE_TARGET, + LAST_REAL_RELOC_MODE = CONST_POOL, + FIRST_PSEUDO_RELOC_MODE = CODE_AGE_SEQUENCE, + LAST_PSEUDO_RELOC_MODE = CODE_AGE_SEQUENCE, LAST_CODE_ENUM = DEBUG_BREAK, LAST_GCED_ENUM = GLOBAL_PROPERTY_CELL, // Modes <= LAST_COMPACT_ENUM are guaranteed to have compact encoding. @@ -224,7 +294,19 @@ class RelocInfo BASE_EMBEDDED { RelocInfo(byte* pc, Mode rmode, intptr_t data, Code* host) : pc_(pc), rmode_(rmode), data_(data), host_(host) { } + RelocInfo(byte* pc, double data64) + : pc_(pc), rmode_(NONE64), data64_(data64), host_(NULL) { + } + static inline bool IsRealRelocMode(Mode mode) { + return mode >= FIRST_REAL_RELOC_MODE && + mode <= LAST_REAL_RELOC_MODE; + } + static inline bool IsPseudoRelocMode(Mode mode) { + ASSERT(!IsRealRelocMode(mode)); + return mode >= FIRST_PSEUDO_RELOC_MODE && + mode <= LAST_PSEUDO_RELOC_MODE; + } static inline bool IsConstructCall(Mode mode) { return mode == CONSTRUCT_CALL; } @@ -234,6 +316,9 @@ class RelocInfo BASE_EMBEDDED { static inline bool IsEmbeddedObject(Mode mode) { return mode == EMBEDDED_OBJECT; } + static inline bool IsRuntimeEntry(Mode mode) { + return mode == RUNTIME_ENTRY; + } // Is the relocation mode affected by GC? static inline bool IsGCRelocMode(Mode mode) { return mode <= LAST_GCED_ENUM; @@ -262,6 +347,12 @@ class RelocInfo BASE_EMBEDDED { static inline bool IsDebugBreakSlot(Mode mode) { return mode == DEBUG_BREAK_SLOT; } + static inline bool IsNone(Mode mode) { + return mode == NONE32 || mode == NONE64; + } + static inline bool IsCodeAgeSequence(Mode mode) { + return mode == CODE_AGE_SEQUENCE; + } static inline int ModeMask(Mode mode) { return 1 << mode; } // Accessors @@ -269,6 +360,7 @@ class RelocInfo BASE_EMBEDDED { void set_pc(byte* pc) { pc_ = pc; } Mode rmode() const { return rmode_; } intptr_t data() const { return data_; } + double data64() const { return data64_; } Code* host() const { return host_; } // Apply a relocation by delta bytes @@ -281,7 +373,7 @@ class RelocInfo BASE_EMBEDDED { // Read/modify the code target in the branch/call instruction // this relocation applies to; - // can only be called if IsCodeTarget(rmode_) || rmode_ == RUNTIME_ENTRY + // can only be called if IsCodeTarget(rmode_) || IsRuntimeEntry(rmode_) INLINE(Address target_address()); INLINE(void set_target_address(Address target, WriteBarrierMode mode = UPDATE_WRITE_BARRIER)); @@ -290,11 +382,16 @@ class RelocInfo BASE_EMBEDDED { INLINE(Object** target_object_address()); INLINE(void set_target_object(Object* target, WriteBarrierMode mode = UPDATE_WRITE_BARRIER)); + INLINE(Address target_runtime_entry(Assembler* origin)); + INLINE(void set_target_runtime_entry(Address target, + WriteBarrierMode mode = + UPDATE_WRITE_BARRIER)); INLINE(JSGlobalPropertyCell* target_cell()); INLINE(Handle<JSGlobalPropertyCell> target_cell_handle()); INLINE(void set_target_cell(JSGlobalPropertyCell* cell, WriteBarrierMode mode = UPDATE_WRITE_BARRIER)); - + INLINE(Code* code_age_stub()); + INLINE(void set_code_age_stub(Code* stub)); // Read the address of the word containing the target_address in an // instruction stream. What this means exactly is architecture-independent. @@ -344,10 +441,16 @@ class RelocInfo BASE_EMBEDDED { // debugger. INLINE(bool IsPatchedDebugBreakSlotSequence()); +#ifdef DEBUG + // Check whether the given code contains relocation information that + // either is position-relative or movable by the garbage collector. + static bool RequiresRelocation(const CodeDesc& desc); +#endif + #ifdef ENABLE_DISASSEMBLER // Printing static const char* RelocModeName(Mode rmode); - void Print(FILE* out); + void Print(Isolate* isolate, FILE* out); #endif // ENABLE_DISASSEMBLER #ifdef VERIFY_HEAP void Verify(); @@ -366,7 +469,10 @@ class RelocInfo BASE_EMBEDDED { // comment). byte* pc_; Mode rmode_; - intptr_t data_; + union { + intptr_t data_; + double data64_; + }; Code* host_; // Code and Embedded Object pointers on some platforms are stored split // across two consecutive 32-bit instructions. Heap management @@ -487,6 +593,7 @@ class RelocIterator: public Malloced { byte* pos_; byte* end_; + byte* code_age_sequence_; RelocInfo rinfo_; bool done_; int mode_mask_; @@ -546,6 +653,8 @@ class ExternalReference BASE_EMBEDDED { }; static void SetUp(); + static void InitializeMathExpData(); + static void TearDownMathExpData(); typedef void* ExternalReferenceRedirector(void* original, Type type); @@ -595,10 +704,16 @@ class ExternalReference BASE_EMBEDDED { static ExternalReference get_date_field_function(Isolate* isolate); static ExternalReference date_cache_stamp(Isolate* isolate); + static ExternalReference get_make_code_young_function(Isolate* isolate); + // Deoptimization support. static ExternalReference new_deoptimizer_function(Isolate* isolate); static ExternalReference compute_output_frames_function(Isolate* isolate); + // Log support. + static ExternalReference log_enter_external_function(Isolate* isolate); + static ExternalReference log_leave_external_function(Isolate* isolate); + // Static data in the keyed lookup cache. static ExternalReference keyed_lookup_cache_keys(Isolate* isolate); static ExternalReference keyed_lookup_cache_field_offsets(Isolate* isolate); @@ -634,6 +749,10 @@ class ExternalReference BASE_EMBEDDED { // Used for fast allocation in generated code. static ExternalReference new_space_allocation_top_address(Isolate* isolate); static ExternalReference new_space_allocation_limit_address(Isolate* isolate); + static ExternalReference old_pointer_space_allocation_top_address( + Isolate* isolate); + static ExternalReference old_pointer_space_allocation_limit_address( + Isolate* isolate); static ExternalReference double_fp_operation(Token::Value operation, Isolate* isolate); @@ -641,9 +760,9 @@ class ExternalReference BASE_EMBEDDED { static ExternalReference power_double_double_function(Isolate* isolate); static ExternalReference power_double_int_function(Isolate* isolate); - static ExternalReference handle_scope_next_address(); - static ExternalReference handle_scope_limit_address(); - static ExternalReference handle_scope_level_address(); + static ExternalReference handle_scope_next_address(Isolate* isolate); + static ExternalReference handle_scope_limit_address(Isolate* isolate); + static ExternalReference handle_scope_level_address(Isolate* isolate); static ExternalReference scheduled_exception_address(Isolate* isolate); static ExternalReference address_of_pending_message_obj(Isolate* isolate); @@ -653,6 +772,7 @@ class ExternalReference BASE_EMBEDDED { // Static variables containing common double constants. static ExternalReference address_of_min_int(); static ExternalReference address_of_one_half(); + static ExternalReference address_of_minus_one_half(); static ExternalReference address_of_minus_zero(); static ExternalReference address_of_zero(); static ExternalReference address_of_uint8_max_value(); @@ -665,8 +785,15 @@ class ExternalReference BASE_EMBEDDED { static ExternalReference math_tan_double_function(Isolate* isolate); static ExternalReference math_log_double_function(Isolate* isolate); + static ExternalReference math_exp_constants(int constant_index); + static ExternalReference math_exp_log_table(); + static ExternalReference page_flags(Page* page); + static ExternalReference ForDeoptEntry(Address entry); + + static ExternalReference cpu_features(); + Address address() const {return reinterpret_cast<Address>(address_);} #ifdef ENABLE_DEBUGGER_SUPPORT @@ -760,6 +887,7 @@ class PositionsRecorder BASE_EMBEDDED { #ifdef ENABLE_GDB_JIT_INTERFACE gdbjit_lineinfo_ = NULL; #endif + jit_handler_data_ = NULL; } #ifdef ENABLE_GDB_JIT_INTERFACE @@ -779,7 +907,15 @@ class PositionsRecorder BASE_EMBEDDED { return lineinfo; } #endif + void AttachJITHandlerData(void* user_data) { + jit_handler_data_ = user_data; + } + void* DetachJITHandlerData() { + void* old_data = jit_handler_data_; + jit_handler_data_ = NULL; + return old_data; + } // Set current position to pos. void RecordPosition(int pos); @@ -802,6 +938,9 @@ class PositionsRecorder BASE_EMBEDDED { GDBJITLineInfo* gdbjit_lineinfo_; #endif + // Currently jit_handler_data_ is used to store JITHandler-specific data + // over the lifetime of a PositionsRecorder + void* jit_handler_data_; friend class PreservePositionScope; DISALLOW_COPY_AND_ASSIGN(PositionsRecorder); @@ -866,6 +1005,7 @@ inline int NumberOfBitsSet(uint32_t x) { bool EvalComparison(Token::Value op, double op1, double op2); // Computes pow(x, y) with the special cases in the spec for Math.pow. +double power_helper(double x, double y); double power_double_int(double x, int y); double power_double_double(double x, double y); diff --git a/deps/v8/src/ast.cc b/deps/v8/src/ast.cc index 52990b8fee..712bfd1b9d 100644 --- a/deps/v8/src/ast.cc +++ b/deps/v8/src/ast.cc @@ -29,6 +29,7 @@ #include <math.h> // For isfinite. #include "builtins.h" +#include "code-stubs.h" #include "conversions.h" #include "hashmap.h" #include "parser.h" @@ -96,13 +97,14 @@ VariableProxy::VariableProxy(Isolate* isolate, position_(position), interface_(interface) { // Names must be canonicalized for fast equality checks. - ASSERT(name->IsSymbol()); + ASSERT(name->IsInternalizedString()); } void VariableProxy::BindTo(Variable* var) { ASSERT(var_ == NULL); // must be bound only once ASSERT(var != NULL); // must bind + ASSERT(!FLAG_harmony_modules || interface_->IsUnified(var->interface())); ASSERT((is_this() && var->is_this()) || name_.is_identical_to(var->name())); // Ideally CONST-ness should match. However, this is very hard to achieve // because we don't know the exact semantics of conflicting (const and @@ -180,8 +182,8 @@ ObjectLiteral::Property::Property(Literal* key, key_ = key; value_ = value; Object* k = *key->handle(); - if (k->IsSymbol() && - isolate->heap()->Proto_symbol()->Equals(String::cast(k))) { + if (k->IsInternalizedString() && + isolate->heap()->proto_string()->Equals(String::cast(k))) { kind_ = PROTOTYPE; } else if (value_->AsMaterializedLiteral() != NULL) { kind_ = MATERIALIZED_LITERAL; @@ -411,12 +413,14 @@ void Property::RecordTypeFeedback(TypeFeedbackOracle* oracle, is_monomorphic_ = oracle->LoadIsMonomorphicNormal(this); receiver_types_.Clear(); if (key()->IsPropertyName()) { - if (oracle->LoadIsBuiltin(this, Builtins::kLoadIC_ArrayLength)) { + ArrayLengthStub array_stub(Code::LOAD_IC); + FunctionPrototypeStub proto_stub(Code::LOAD_IC); + StringLengthStub string_stub(Code::LOAD_IC, false); + if (oracle->LoadIsStub(this, &array_stub)) { is_array_length_ = true; - } else if (oracle->LoadIsBuiltin(this, Builtins::kLoadIC_StringLength)) { + } else if (oracle->LoadIsStub(this, &string_stub)) { is_string_length_ = true; - } else if (oracle->LoadIsBuiltin(this, - Builtins::kLoadIC_FunctionPrototype)) { + } else if (oracle->LoadIsStub(this, &proto_stub)) { is_function_prototype_ = true; } else { Literal* lit_key = key()->AsLiteral(); @@ -429,7 +433,7 @@ void Property::RecordTypeFeedback(TypeFeedbackOracle* oracle, } else if (is_monomorphic_) { receiver_types_.Add(oracle->LoadMonomorphicReceiverType(this), zone); - } else if (oracle->LoadIsMegamorphicWithTypeInfo(this)) { + } else if (oracle->LoadIsPolymorphic(this)) { receiver_types_.Reserve(kMaxKeyedPolymorphism, zone); oracle->CollectKeyedReceiverTypes(PropertyFeedbackId(), &receiver_types_); } @@ -451,7 +455,7 @@ void Assignment::RecordTypeFeedback(TypeFeedbackOracle* oracle, } else if (is_monomorphic_) { // Record receiver type for monomorphic keyed stores. receiver_types_.Add(oracle->StoreMonomorphicReceiverType(id), zone); - } else if (oracle->StoreIsMegamorphicWithTypeInfo(id)) { + } else if (oracle->StoreIsPolymorphic(id)) { receiver_types_.Reserve(kMaxKeyedPolymorphism, zone); oracle->CollectKeyedReceiverTypes(id, &receiver_types_); } @@ -467,7 +471,7 @@ void CountOperation::RecordTypeFeedback(TypeFeedbackOracle* oracle, // Record receiver type for monomorphic keyed stores. receiver_types_.Add( oracle->StoreMonomorphicReceiverType(id), zone); - } else if (oracle->StoreIsMegamorphicWithTypeInfo(id)) { + } else if (oracle->StoreIsPolymorphic(id)) { receiver_types_.Reserve(kMaxKeyedPolymorphism, zone); oracle->CollectKeyedReceiverTypes(id, &receiver_types_); } @@ -476,11 +480,12 @@ void CountOperation::RecordTypeFeedback(TypeFeedbackOracle* oracle, void CaseClause::RecordTypeFeedback(TypeFeedbackOracle* oracle) { TypeInfo info = oracle->SwitchType(this); + if (info.IsUninitialized()) info = TypeInfo::Unknown(); if (info.IsSmi()) { compare_type_ = SMI_ONLY; - } else if (info.IsSymbol()) { - compare_type_ = SYMBOL_ONLY; - } else if (info.IsNonSymbol()) { + } else if (info.IsInternalizedString()) { + compare_type_ = NAME_ONLY; + } else if (info.IsNonInternalizedString()) { compare_type_ = STRING_ONLY; } else if (info.IsNonPrimitive()) { compare_type_ = OBJECT_ONLY; @@ -600,18 +605,7 @@ void CallNew::RecordTypeFeedback(TypeFeedbackOracle* oracle) { is_monomorphic_ = oracle->CallNewIsMonomorphic(this); if (is_monomorphic_) { target_ = oracle->GetCallNewTarget(this); - } -} - - -void CompareOperation::RecordTypeFeedback(TypeFeedbackOracle* oracle) { - TypeInfo info = oracle->CompareType(this); - if (info.IsSmi()) { - compare_type_ = SMI_ONLY; - } else if (info.IsNonPrimitive()) { - compare_type_ = OBJECT_ONLY; - } else { - ASSERT(compare_type_ == NONE); + elements_kind_ = oracle->GetCallNewElementsKind(this); } } @@ -626,14 +620,6 @@ void ObjectLiteral::Property::RecordTypeFeedback(TypeFeedbackOracle* oracle) { // ---------------------------------------------------------------------------- // Implementation of AstVisitor -bool AstVisitor::CheckStackOverflow() { - if (stack_overflow_) return true; - StackLimitCheck check(isolate_); - if (!check.HasOverflowed()) return false; - return (stack_overflow_ = true); -} - - void AstVisitor::VisitDeclarations(ZoneList<Declaration*>* declarations) { for (int i = 0; i < declarations->length(); i++) { Visit(declarations->at(i)); @@ -1021,11 +1007,6 @@ CaseClause::CaseClause(Isolate* isolate, add_flag(kDontInline); \ add_flag(kDontSelfOptimize); \ } -#define DONT_INLINE_NODE(NodeType) \ - void AstConstructionVisitor::Visit##NodeType(NodeType* node) { \ - increase_node_count(); \ - add_flag(kDontInline); \ - } #define DONT_SELFOPTIMIZE_NODE(NodeType) \ void AstConstructionVisitor::Visit##NodeType(NodeType* node) { \ increase_node_count(); \ @@ -1052,8 +1033,10 @@ REGULAR_NODE(ReturnStatement) REGULAR_NODE(SwitchStatement) REGULAR_NODE(Conditional) REGULAR_NODE(Literal) +REGULAR_NODE(ArrayLiteral) REGULAR_NODE(ObjectLiteral) REGULAR_NODE(RegExpLiteral) +REGULAR_NODE(FunctionLiteral) REGULAR_NODE(Assignment) REGULAR_NODE(Throw) REGULAR_NODE(Property) @@ -1070,25 +1053,20 @@ REGULAR_NODE(CallNew) // LOOKUP variables only result from constructs that cannot be inlined anyway. REGULAR_NODE(VariableProxy) -// We currently do not optimize any modules. Note in particular, that module -// instance objects associated with ModuleLiterals are allocated during -// scope resolution, and references to them are embedded into the code. -// That code may hence neither be cached nor re-compiled. +// We currently do not optimize any modules. DONT_OPTIMIZE_NODE(ModuleDeclaration) DONT_OPTIMIZE_NODE(ImportDeclaration) DONT_OPTIMIZE_NODE(ExportDeclaration) DONT_OPTIMIZE_NODE(ModuleVariable) DONT_OPTIMIZE_NODE(ModulePath) DONT_OPTIMIZE_NODE(ModuleUrl) +DONT_OPTIMIZE_NODE(ModuleStatement) DONT_OPTIMIZE_NODE(WithStatement) DONT_OPTIMIZE_NODE(TryCatchStatement) DONT_OPTIMIZE_NODE(TryFinallyStatement) DONT_OPTIMIZE_NODE(DebuggerStatement) DONT_OPTIMIZE_NODE(SharedFunctionInfoLiteral) -DONT_INLINE_NODE(ArrayLiteral) // TODO(1322): Allow materialized literals. -DONT_INLINE_NODE(FunctionLiteral) - DONT_SELFOPTIMIZE_NODE(DoWhileStatement) DONT_SELFOPTIMIZE_NODE(WhileStatement) DONT_SELFOPTIMIZE_NODE(ForStatement) @@ -1103,8 +1081,9 @@ void AstConstructionVisitor::VisitCallRuntime(CallRuntime* node) { // optimize them. add_flag(kDontInline); } else if (node->function()->intrinsic_type == Runtime::INLINE && - (node->name()->IsEqualTo(CStrVector("_ArgumentsLength")) || - node->name()->IsEqualTo(CStrVector("_Arguments")))) { + (node->name()->IsOneByteEqualTo( + STATIC_ASCII_VECTOR("_ArgumentsLength")) || + node->name()->IsOneByteEqualTo(STATIC_ASCII_VECTOR("_Arguments")))) { // Don't inline the %_ArgumentsLength or %_Arguments because their // implementation will not work. There is no stack frame to get them // from. @@ -1114,7 +1093,6 @@ void AstConstructionVisitor::VisitCallRuntime(CallRuntime* node) { #undef REGULAR_NODE #undef DONT_OPTIMIZE_NODE -#undef DONT_INLINE_NODE #undef DONT_SELFOPTIMIZE_NODE #undef DONT_CACHE_NODE diff --git a/deps/v8/src/ast.h b/deps/v8/src/ast.h index 802ac65962..5debc74ebb 100644 --- a/deps/v8/src/ast.h +++ b/deps/v8/src/ast.h @@ -75,6 +75,7 @@ namespace internal { #define STATEMENT_NODE_LIST(V) \ V(Block) \ + V(ModuleStatement) \ V(ExpressionStatement) \ V(EmptyStatement) \ V(IfStatement) \ @@ -522,7 +523,7 @@ class ModuleDeclaration: public Declaration { ModuleDeclaration(VariableProxy* proxy, Module* module, Scope* scope) - : Declaration(proxy, LET, scope), + : Declaration(proxy, MODULE, scope), module_(module) { } @@ -645,6 +646,25 @@ class ModuleUrl: public Module { }; +class ModuleStatement: public Statement { + public: + DECLARE_NODE_TYPE(ModuleStatement) + + VariableProxy* proxy() const { return proxy_; } + Block* body() const { return body_; } + + protected: + ModuleStatement(VariableProxy* proxy, Block* body) + : proxy_(proxy), + body_(body) { + } + + private: + VariableProxy* proxy_; + Block* body_; +}; + + class IterationStatement: public BreakableStatement { public: // Type testing & conversion. @@ -948,7 +968,7 @@ class CaseClause: public ZoneObject { TypeFeedbackId CompareId() { return compare_id_; } void RecordTypeFeedback(TypeFeedbackOracle* oracle); bool IsSmiCompare() { return compare_type_ == SMI_ONLY; } - bool IsSymbolCompare() { return compare_type_ == SYMBOL_ONLY; } + bool IsNameCompare() { return compare_type_ == NAME_ONLY; } bool IsStringCompare() { return compare_type_ == STRING_ONLY; } bool IsObjectCompare() { return compare_type_ == OBJECT_ONLY; } @@ -960,7 +980,7 @@ class CaseClause: public ZoneObject { enum CompareTypeFeedback { NONE, SMI_ONLY, - SYMBOL_ONLY, + NAME_ONLY, STRING_ONLY, OBJECT_ONLY }; @@ -1151,7 +1171,7 @@ class Literal: public Expression { DECLARE_NODE_TYPE(Literal) virtual bool IsPropertyName() { - if (handle_->IsSymbol()) { + if (handle_->IsInternalizedString()) { uint32_t ignored; return !String::cast(*handle_)->AsArrayIndex(&ignored); } @@ -1163,8 +1183,8 @@ class Literal: public Expression { return Handle<String>::cast(handle_); } - virtual bool ToBooleanIsTrue() { return handle_->ToBoolean()->IsTrue(); } - virtual bool ToBooleanIsFalse() { return handle_->ToBoolean()->IsFalse(); } + virtual bool ToBooleanIsTrue() { return handle_->BooleanValue(); } + virtual bool ToBooleanIsFalse() { return !handle_->BooleanValue(); } // Identity testers. bool IsNull() const { @@ -1417,7 +1437,7 @@ class VariableProxy: public Expression { void MarkAsTrivial() { is_trivial_ = true; } void MarkAsLValue() { is_lvalue_ = true; } - // Bind this proxy to the variable var. + // Bind this proxy to the variable var. Interfaces must match. void BindTo(Variable* var); protected: @@ -1512,6 +1532,22 @@ class Call: public Expression { virtual SmallMapList* GetReceiverTypes() { return &receiver_types_; } virtual bool IsMonomorphic() { return is_monomorphic_; } CheckType check_type() const { return check_type_; } + + void set_string_check(Handle<JSObject> holder) { + holder_ = holder; + check_type_ = STRING_CHECK; + } + + void set_number_check(Handle<JSObject> holder) { + holder_ = holder; + check_type_ = NUMBER_CHECK; + } + + void set_map_check() { + holder_ = Handle<JSObject>::null(); + check_type_ = RECEIVER_MAP_CHECK; + } + Handle<JSFunction> target() { return target_; } // A cache for the holder, set as a side effect of computing the target of the @@ -1575,6 +1611,7 @@ class CallNew: public Expression { Handle<JSFunction> target() { return target_; } BailoutId ReturnId() const { return return_id_; } + ElementsKind elements_kind() const { return elements_kind_; } protected: CallNew(Isolate* isolate, @@ -1586,7 +1623,8 @@ class CallNew: public Expression { arguments_(arguments), pos_(pos), is_monomorphic_(false), - return_id_(GetNextId(isolate)) { } + return_id_(GetNextId(isolate)), + elements_kind_(GetInitialFastElementsKind()) { } private: Expression* expression_; @@ -1597,6 +1635,7 @@ class CallNew: public Expression { Handle<JSFunction> target_; const BailoutId return_id_; + ElementsKind elements_kind_; }; @@ -1777,9 +1816,6 @@ class CompareOperation: public Expression { // Type feedback information. TypeFeedbackId CompareOperationFeedbackId() const { return reuse(id()); } - void RecordTypeFeedback(TypeFeedbackOracle* oracle); - bool IsSmiCompare() { return compare_type_ == SMI_ONLY; } - bool IsObjectCompare() { return compare_type_ == OBJECT_ONLY; } // Match special cases. bool IsLiteralCompareTypeof(Expression** expr, Handle<String>* check); @@ -1796,8 +1832,7 @@ class CompareOperation: public Expression { op_(op), left_(left), right_(right), - pos_(pos), - compare_type_(NONE) { + pos_(pos) { ASSERT(Token::IsCompareOp(op)); } @@ -1806,9 +1841,6 @@ class CompareOperation: public Expression { Expression* left_; Expression* right_; int pos_; - - enum CompareTypeFeedback { NONE, SMI_ONLY, OBJECT_ONLY }; - CompareTypeFeedback compare_type_; }; @@ -2479,40 +2511,51 @@ inline ModuleVariable::ModuleVariable(VariableProxy* proxy) class AstVisitor BASE_EMBEDDED { public: - AstVisitor() : isolate_(Isolate::Current()), stack_overflow_(false) { } + AstVisitor() {} virtual ~AstVisitor() { } // Stack overflow check and dynamic dispatch. - void Visit(AstNode* node) { if (!CheckStackOverflow()) node->Accept(this); } + virtual void Visit(AstNode* node) = 0; // Iteration left-to-right. virtual void VisitDeclarations(ZoneList<Declaration*>* declarations); virtual void VisitStatements(ZoneList<Statement*>* statements); virtual void VisitExpressions(ZoneList<Expression*>* expressions); - // Stack overflow tracking support. - bool HasStackOverflow() const { return stack_overflow_; } - bool CheckStackOverflow(); - - // If a stack-overflow exception is encountered when visiting a - // node, calling SetStackOverflow will make sure that the visitor - // bails out without visiting more nodes. - void SetStackOverflow() { stack_overflow_ = true; } - void ClearStackOverflow() { stack_overflow_ = false; } - // Individual AST nodes. #define DEF_VISIT(type) \ virtual void Visit##type(type* node) = 0; AST_NODE_LIST(DEF_VISIT) #undef DEF_VISIT +}; - protected: - Isolate* isolate() { return isolate_; } - private: - Isolate* isolate_; - bool stack_overflow_; -}; +#define DEFINE_AST_VISITOR_SUBCLASS_MEMBERS() \ +public: \ + virtual void Visit(AstNode* node) { \ + if (!CheckStackOverflow()) node->Accept(this); \ + } \ + \ + void SetStackOverflow() { stack_overflow_ = true; } \ + void ClearStackOverflow() { stack_overflow_ = false; } \ + bool HasStackOverflow() const { return stack_overflow_; } \ + \ + bool CheckStackOverflow() { \ + if (stack_overflow_) return true; \ + StackLimitCheck check(isolate_); \ + if (!check.HasOverflowed()) return false; \ + return (stack_overflow_ = true); \ + } \ + \ +private: \ + void InitializeAstVisitor() { \ + isolate_ = Isolate::Current(); \ + stack_overflow_ = false; \ + } \ + Isolate* isolate() { return isolate_; } \ + \ + Isolate* isolate_; \ + bool stack_overflow_ // ---------------------------------------------------------------------------- @@ -2647,6 +2690,11 @@ class AstNodeFactory BASE_EMBEDDED { STATEMENT_WITH_LABELS(SwitchStatement) #undef STATEMENT_WITH_LABELS + ModuleStatement* NewModuleStatement(VariableProxy* proxy, Block* body) { + ModuleStatement* stmt = new(zone_) ModuleStatement(proxy, body); + VISIT_AND_RETURN(ModuleStatement, stmt) + } + ExpressionStatement* NewExpressionStatement(Expression* expression) { ExpressionStatement* stmt = new(zone_) ExpressionStatement(expression); VISIT_AND_RETURN(ExpressionStatement, stmt) diff --git a/deps/v8/src/atomicops.h b/deps/v8/src/atomicops.h index 1f0c44a67d..ebca91d27d 100644 --- a/deps/v8/src/atomicops.h +++ b/deps/v8/src/atomicops.h @@ -58,7 +58,7 @@ typedef int32_t Atomic32; #ifdef V8_HOST_ARCH_64_BIT // We need to be able to go between Atomic64 and AtomicWord implicitly. This // means Atomic64 and AtomicWord should be the same type on 64-bit. -#if defined(__APPLE__) +#if defined(__ILP32__) || defined(__APPLE__) // MacOS is an exception to the implicit conversion rule above, // because it uses long for intptr_t. typedef int64_t Atomic64; @@ -151,7 +151,9 @@ Atomic64 Release_Load(volatile const Atomic64* ptr); } } // namespace v8::internal // Include our platform specific implementation. -#if defined(_MSC_VER) && \ +#if defined(THREAD_SANITIZER) +#include "atomicops_internals_tsan.h" +#elif defined(_MSC_VER) && \ (defined(V8_HOST_ARCH_IA32) || defined(V8_HOST_ARCH_X64)) #include "atomicops_internals_x86_msvc.h" #elif defined(__APPLE__) && \ diff --git a/deps/v8/src/atomicops_internals_tsan.h b/deps/v8/src/atomicops_internals_tsan.h new file mode 100644 index 0000000000..6559336ad9 --- /dev/null +++ b/deps/v8/src/atomicops_internals_tsan.h @@ -0,0 +1,335 @@ +// Copyright 2012 the V8 project authors. All rights reserved. +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following +// disclaimer in the documentation and/or other materials provided +// with the distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived +// from this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + + +// This file is an internal atomic implementation for compiler-based +// ThreadSanitizer. Use base/atomicops.h instead. + +#ifndef V8_ATOMICOPS_INTERNALS_TSAN_H_ +#define V8_ATOMICOPS_INTERNALS_TSAN_H_ + +// This struct is not part of the public API of this module; clients may not +// use it. (However, it's exported via BASE_EXPORT because clients implicitly +// do use it at link time by inlining these functions.) +// Features of this x86. Values may not be correct before main() is run, +// but are set conservatively. +struct AtomicOps_x86CPUFeatureStruct { + bool has_amd_lock_mb_bug; // Processor has AMD memory-barrier bug; do lfence + // after acquire compare-and-swap. + bool has_sse2; // Processor has SSE2. +}; +extern struct AtomicOps_x86CPUFeatureStruct + AtomicOps_Internalx86CPUFeatures; + +#define ATOMICOPS_COMPILER_BARRIER() __asm__ __volatile__("" : : : "memory") + +namespace v8 { +namespace internal { + +#ifndef TSAN_INTERFACE_ATOMIC_H +#define TSAN_INTERFACE_ATOMIC_H + +#ifdef __cplusplus +extern "C" { +#endif + +typedef char __tsan_atomic8; +typedef short __tsan_atomic16; // NOLINT +typedef int __tsan_atomic32; +typedef long __tsan_atomic64; // NOLINT + +typedef enum { + __tsan_memory_order_relaxed = (1 << 0) + 100500, + __tsan_memory_order_consume = (1 << 1) + 100500, + __tsan_memory_order_acquire = (1 << 2) + 100500, + __tsan_memory_order_release = (1 << 3) + 100500, + __tsan_memory_order_acq_rel = (1 << 4) + 100500, + __tsan_memory_order_seq_cst = (1 << 5) + 100500, +} __tsan_memory_order; + +__tsan_atomic8 __tsan_atomic8_load(const volatile __tsan_atomic8* a, + __tsan_memory_order mo); +__tsan_atomic16 __tsan_atomic16_load(const volatile __tsan_atomic16* a, + __tsan_memory_order mo); +__tsan_atomic32 __tsan_atomic32_load(const volatile __tsan_atomic32* a, + __tsan_memory_order mo); +__tsan_atomic64 __tsan_atomic64_load(const volatile __tsan_atomic64* a, + __tsan_memory_order mo); + +void __tsan_atomic8_store(volatile __tsan_atomic8* a, __tsan_atomic8 v, + __tsan_memory_order mo); +void __tsan_atomic16_store(volatile __tsan_atomic16* a, __tsan_atomic16 v, + __tsan_memory_order mo); +void __tsan_atomic32_store(volatile __tsan_atomic32* a, __tsan_atomic32 v, + __tsan_memory_order mo); +void __tsan_atomic64_store(volatile __tsan_atomic64* a, __tsan_atomic64 v, + __tsan_memory_order mo); + +__tsan_atomic8 __tsan_atomic8_exchange(volatile __tsan_atomic8* a, + __tsan_atomic8 v, __tsan_memory_order mo); +__tsan_atomic16 __tsan_atomic16_exchange(volatile __tsan_atomic16* a, + __tsan_atomic16 v, __tsan_memory_order mo); +__tsan_atomic32 __tsan_atomic32_exchange(volatile __tsan_atomic32* a, + __tsan_atomic32 v, __tsan_memory_order mo); +__tsan_atomic64 __tsan_atomic64_exchange(volatile __tsan_atomic64* a, + __tsan_atomic64 v, __tsan_memory_order mo); + +__tsan_atomic8 __tsan_atomic8_fetch_add(volatile __tsan_atomic8* a, + __tsan_atomic8 v, __tsan_memory_order mo); +__tsan_atomic16 __tsan_atomic16_fetch_add(volatile __tsan_atomic16* a, + __tsan_atomic16 v, __tsan_memory_order mo); +__tsan_atomic32 __tsan_atomic32_fetch_add(volatile __tsan_atomic32* a, + __tsan_atomic32 v, __tsan_memory_order mo); +__tsan_atomic64 __tsan_atomic64_fetch_add(volatile __tsan_atomic64* a, + __tsan_atomic64 v, __tsan_memory_order mo); + +__tsan_atomic8 __tsan_atomic8_fetch_and(volatile __tsan_atomic8* a, + __tsan_atomic8 v, __tsan_memory_order mo); +__tsan_atomic16 __tsan_atomic16_fetch_and(volatile __tsan_atomic16* a, + __tsan_atomic16 v, __tsan_memory_order mo); +__tsan_atomic32 __tsan_atomic32_fetch_and(volatile __tsan_atomic32* a, + __tsan_atomic32 v, __tsan_memory_order mo); +__tsan_atomic64 __tsan_atomic64_fetch_and(volatile __tsan_atomic64* a, + __tsan_atomic64 v, __tsan_memory_order mo); + +__tsan_atomic8 __tsan_atomic8_fetch_or(volatile __tsan_atomic8* a, + __tsan_atomic8 v, __tsan_memory_order mo); +__tsan_atomic16 __tsan_atomic16_fetch_or(volatile __tsan_atomic16* a, + __tsan_atomic16 v, __tsan_memory_order mo); +__tsan_atomic32 __tsan_atomic32_fetch_or(volatile __tsan_atomic32* a, + __tsan_atomic32 v, __tsan_memory_order mo); +__tsan_atomic64 __tsan_atomic64_fetch_or(volatile __tsan_atomic64* a, + __tsan_atomic64 v, __tsan_memory_order mo); + +__tsan_atomic8 __tsan_atomic8_fetch_xor(volatile __tsan_atomic8* a, + __tsan_atomic8 v, __tsan_memory_order mo); +__tsan_atomic16 __tsan_atomic16_fetch_xor(volatile __tsan_atomic16* a, + __tsan_atomic16 v, __tsan_memory_order mo); +__tsan_atomic32 __tsan_atomic32_fetch_xor(volatile __tsan_atomic32* a, + __tsan_atomic32 v, __tsan_memory_order mo); +__tsan_atomic64 __tsan_atomic64_fetch_xor(volatile __tsan_atomic64* a, + __tsan_atomic64 v, __tsan_memory_order mo); + +int __tsan_atomic8_compare_exchange_weak(volatile __tsan_atomic8* a, + __tsan_atomic8* c, __tsan_atomic8 v, __tsan_memory_order mo); +int __tsan_atomic16_compare_exchange_weak(volatile __tsan_atomic16* a, + __tsan_atomic16* c, __tsan_atomic16 v, __tsan_memory_order mo); +int __tsan_atomic32_compare_exchange_weak(volatile __tsan_atomic32* a, + __tsan_atomic32* c, __tsan_atomic32 v, __tsan_memory_order mo); +int __tsan_atomic64_compare_exchange_weak(volatile __tsan_atomic64* a, + __tsan_atomic64* c, __tsan_atomic64 v, __tsan_memory_order mo); + +int __tsan_atomic8_compare_exchange_strong(volatile __tsan_atomic8* a, + __tsan_atomic8* c, __tsan_atomic8 v, __tsan_memory_order mo); +int __tsan_atomic16_compare_exchange_strong(volatile __tsan_atomic16* a, + __tsan_atomic16* c, __tsan_atomic16 v, __tsan_memory_order mo); +int __tsan_atomic32_compare_exchange_strong(volatile __tsan_atomic32* a, + __tsan_atomic32* c, __tsan_atomic32 v, __tsan_memory_order mo); +int __tsan_atomic64_compare_exchange_strong(volatile __tsan_atomic64* a, + __tsan_atomic64* c, __tsan_atomic64 v, __tsan_memory_order mo); + +void __tsan_atomic_thread_fence(__tsan_memory_order mo); + +#ifdef __cplusplus +} // extern "C" +#endif + +#endif // #ifndef TSAN_INTERFACE_ATOMIC_H + +inline Atomic32 NoBarrier_CompareAndSwap(volatile Atomic32* ptr, + Atomic32 old_value, + Atomic32 new_value) { + Atomic32 cmp = old_value; + __tsan_atomic32_compare_exchange_strong(ptr, &cmp, new_value, + __tsan_memory_order_relaxed); + return cmp; +} + +inline Atomic32 NoBarrier_AtomicExchange(volatile Atomic32* ptr, + Atomic32 new_value) { + return __tsan_atomic32_exchange(ptr, new_value, + __tsan_memory_order_relaxed); +} + +inline Atomic32 Acquire_AtomicExchange(volatile Atomic32* ptr, + Atomic32 new_value) { + return __tsan_atomic32_exchange(ptr, new_value, + __tsan_memory_order_acquire); +} + +inline Atomic32 Release_AtomicExchange(volatile Atomic32* ptr, + Atomic32 new_value) { + return __tsan_atomic32_exchange(ptr, new_value, + __tsan_memory_order_release); +} + +inline Atomic32 NoBarrier_AtomicIncrement(volatile Atomic32* ptr, + Atomic32 increment) { + return increment + __tsan_atomic32_fetch_add(ptr, increment, + __tsan_memory_order_relaxed); +} + +inline Atomic32 Barrier_AtomicIncrement(volatile Atomic32* ptr, + Atomic32 increment) { + return increment + __tsan_atomic32_fetch_add(ptr, increment, + __tsan_memory_order_acq_rel); +} + +inline Atomic32 Acquire_CompareAndSwap(volatile Atomic32* ptr, + Atomic32 old_value, + Atomic32 new_value) { + Atomic32 cmp = old_value; + __tsan_atomic32_compare_exchange_strong(ptr, &cmp, new_value, + __tsan_memory_order_acquire); + return cmp; +} + +inline Atomic32 Release_CompareAndSwap(volatile Atomic32* ptr, + Atomic32 old_value, + Atomic32 new_value) { + Atomic32 cmp = old_value; + __tsan_atomic32_compare_exchange_strong(ptr, &cmp, new_value, + __tsan_memory_order_release); + return cmp; +} + +inline void NoBarrier_Store(volatile Atomic32* ptr, Atomic32 value) { + __tsan_atomic32_store(ptr, value, __tsan_memory_order_relaxed); +} + +inline void Acquire_Store(volatile Atomic32* ptr, Atomic32 value) { + __tsan_atomic32_store(ptr, value, __tsan_memory_order_relaxed); + __tsan_atomic_thread_fence(__tsan_memory_order_seq_cst); +} + +inline void Release_Store(volatile Atomic32* ptr, Atomic32 value) { + __tsan_atomic32_store(ptr, value, __tsan_memory_order_release); +} + +inline Atomic32 NoBarrier_Load(volatile const Atomic32* ptr) { + return __tsan_atomic32_load(ptr, __tsan_memory_order_relaxed); +} + +inline Atomic32 Acquire_Load(volatile const Atomic32* ptr) { + return __tsan_atomic32_load(ptr, __tsan_memory_order_acquire); +} + +inline Atomic32 Release_Load(volatile const Atomic32* ptr) { + __tsan_atomic_thread_fence(__tsan_memory_order_seq_cst); + return __tsan_atomic32_load(ptr, __tsan_memory_order_relaxed); +} + +inline Atomic64 NoBarrier_CompareAndSwap(volatile Atomic64* ptr, + Atomic64 old_value, + Atomic64 new_value) { + Atomic64 cmp = old_value; + __tsan_atomic64_compare_exchange_strong(ptr, &cmp, new_value, + __tsan_memory_order_relaxed); + return cmp; +} + +inline Atomic64 NoBarrier_AtomicExchange(volatile Atomic64* ptr, + Atomic64 new_value) { + return __tsan_atomic64_exchange(ptr, new_value, __tsan_memory_order_relaxed); +} + +inline Atomic64 Acquire_AtomicExchange(volatile Atomic64* ptr, + Atomic64 new_value) { + return __tsan_atomic64_exchange(ptr, new_value, __tsan_memory_order_acquire); +} + +inline Atomic64 Release_AtomicExchange(volatile Atomic64* ptr, + Atomic64 new_value) { + return __tsan_atomic64_exchange(ptr, new_value, __tsan_memory_order_release); +} + +inline Atomic64 NoBarrier_AtomicIncrement(volatile Atomic64* ptr, + Atomic64 increment) { + return increment + __tsan_atomic64_fetch_add(ptr, increment, + __tsan_memory_order_relaxed); +} + +inline Atomic64 Barrier_AtomicIncrement(volatile Atomic64* ptr, + Atomic64 increment) { + return increment + __tsan_atomic64_fetch_add(ptr, increment, + __tsan_memory_order_acq_rel); +} + +inline void NoBarrier_Store(volatile Atomic64* ptr, Atomic64 value) { + __tsan_atomic64_store(ptr, value, __tsan_memory_order_relaxed); +} + +inline void Acquire_Store(volatile Atomic64* ptr, Atomic64 value) { + __tsan_atomic64_store(ptr, value, __tsan_memory_order_relaxed); + __tsan_atomic_thread_fence(__tsan_memory_order_seq_cst); +} + +inline void Release_Store(volatile Atomic64* ptr, Atomic64 value) { + __tsan_atomic64_store(ptr, value, __tsan_memory_order_release); +} + +inline Atomic64 NoBarrier_Load(volatile const Atomic64* ptr) { + return __tsan_atomic64_load(ptr, __tsan_memory_order_relaxed); +} + +inline Atomic64 Acquire_Load(volatile const Atomic64* ptr) { + return __tsan_atomic64_load(ptr, __tsan_memory_order_acquire); +} + +inline Atomic64 Release_Load(volatile const Atomic64* ptr) { + __tsan_atomic_thread_fence(__tsan_memory_order_seq_cst); + return __tsan_atomic64_load(ptr, __tsan_memory_order_relaxed); +} + +inline Atomic64 Acquire_CompareAndSwap(volatile Atomic64* ptr, + Atomic64 old_value, + Atomic64 new_value) { + Atomic64 cmp = old_value; + __tsan_atomic64_compare_exchange_strong(ptr, &cmp, new_value, + __tsan_memory_order_acquire); + return cmp; +} + +inline Atomic64 Release_CompareAndSwap(volatile Atomic64* ptr, + Atomic64 old_value, + Atomic64 new_value) { + Atomic64 cmp = old_value; + __tsan_atomic64_compare_exchange_strong(ptr, &cmp, new_value, + __tsan_memory_order_release); + return cmp; +} + +inline void MemoryBarrier() { + __tsan_atomic_thread_fence(__tsan_memory_order_seq_cst); +} + +} // namespace internal +} // namespace v8 + +#undef ATOMICOPS_COMPILER_BARRIER + +#endif // V8_ATOMICOPS_INTERNALS_TSAN_H_ diff --git a/deps/v8/src/bootstrapper.cc b/deps/v8/src/bootstrapper.cc index a368eefe76..58b2ad0387 100644 --- a/deps/v8/src/bootstrapper.cc +++ b/deps/v8/src/bootstrapper.cc @@ -63,8 +63,9 @@ NativesExternalStringResource::NativesExternalStringResource( } -Bootstrapper::Bootstrapper() - : nesting_(0), +Bootstrapper::Bootstrapper(Isolate* isolate) + : isolate_(isolate), + nesting_(0), extensions_cache_(Script::TYPE_EXTENSION), delete_these_non_arrays_on_tear_down_(NULL), delete_these_arrays_on_tear_down_(NULL) { @@ -73,9 +74,7 @@ Bootstrapper::Bootstrapper() Handle<String> Bootstrapper::NativesSourceLookup(int index) { ASSERT(0 <= index && index < Natives::GetBuiltinsCount()); - Isolate* isolate = Isolate::Current(); - Factory* factory = isolate->factory(); - Heap* heap = isolate->heap(); + Heap* heap = isolate_->heap(); if (heap->natives_source_cache()->get(index)->IsUndefined()) { // We can use external strings for the natives. Vector<const char> source = Natives::GetRawScriptSource(index); @@ -84,10 +83,11 @@ Handle<String> Bootstrapper::NativesSourceLookup(int index) { source.start(), source.length()); Handle<String> source_code = - factory->NewExternalStringFromAscii(resource); + isolate_->factory()->NewExternalStringFromAscii(resource); heap->natives_source_cache()->set(index, *source_code); } - Handle<Object> cached_source(heap->natives_source_cache()->get(index)); + Handle<Object> cached_source(heap->natives_source_cache()->get(index), + isolate_); return Handle<String>::cast(cached_source); } @@ -147,23 +147,13 @@ class Genesis BASE_EMBEDDED { v8::ExtensionConfiguration* extensions); ~Genesis() { } - Handle<Context> result() { return result_; } - - Genesis* previous() { return previous_; } - Isolate* isolate() const { return isolate_; } Factory* factory() const { return isolate_->factory(); } Heap* heap() const { return isolate_->heap(); } - private: - Handle<Context> native_context_; - Isolate* isolate_; - - // There may be more than one active genesis object: When GC is - // triggered during environment creation there may be weak handle - // processing callbacks which may create new environments. - Genesis* previous_; + Handle<Context> result() { return result_; } + private: Handle<Context> native_context() { return native_context_; } // Creates some basic objects. Used for creating a context from scratch. @@ -205,6 +195,9 @@ class Genesis BASE_EMBEDDED { // Used for creating a context from scratch. void InstallNativeFunctions(); void InstallExperimentalNativeFunctions(); + Handle<JSFunction> InstallInternalArray(Handle<JSBuiltinsObject> builtins, + const char* name, + ElementsKind elements_kind); bool InstallNatives(); bool InstallExperimentalNatives(); void InstallBuiltinFunctionIds(); @@ -230,9 +223,11 @@ class Genesis BASE_EMBEDDED { // provided. static bool InstallExtensions(Handle<Context> native_context, v8::ExtensionConfiguration* extensions); - static bool InstallExtension(const char* name, + static bool InstallExtension(Isolate* isolate, + const char* name, ExtensionStates* extension_states); - static bool InstallExtension(v8::RegisteredExtension* current, + static bool InstallExtension(Isolate* isolate, + v8::RegisteredExtension* current, ExtensionStates* extension_states); static void InstallSpecialObjects(Handle<Context> native_context); bool InstallJSBuiltins(Handle<JSBuiltinsObject> builtins); @@ -268,15 +263,20 @@ class Genesis BASE_EMBEDDED { static bool CompileBuiltin(Isolate* isolate, int index); static bool CompileExperimentalBuiltin(Isolate* isolate, int index); - static bool CompileNative(Vector<const char> name, Handle<String> source); - static bool CompileScriptCached(Vector<const char> name, + static bool CompileNative(Isolate* isolate, + Vector<const char> name, + Handle<String> source); + static bool CompileScriptCached(Isolate* isolate, + Vector<const char> name, Handle<String> source, SourceCodeCache* cache, v8::Extension* extension, Handle<Context> top_context, bool use_runtime_context); + Isolate* isolate_; Handle<Context> result_; + Handle<Context> native_context_; // Function instance maps. Function literal maps are created initially with // a read only prototype for the processing of JS builtins. Later the function @@ -298,14 +298,13 @@ void Bootstrapper::Iterate(ObjectVisitor* v) { Handle<Context> Bootstrapper::CreateEnvironment( - Isolate* isolate, Handle<Object> global_object, v8::Handle<v8::ObjectTemplate> global_template, v8::ExtensionConfiguration* extensions) { - HandleScope scope; - Handle<Context> env; - Genesis genesis(isolate, global_object, global_template, extensions); - env = genesis.result(); + HandleScope scope(isolate_); + Genesis genesis(isolate_, global_object, global_template, extensions); + Handle<Object> context(isolate_->global_handles()->Create(*genesis.result())); + Handle<Context> env = Handle<Context>::cast(context); if (!env.is_null()) { if (InstallExtensions(env, extensions)) { return env; @@ -353,11 +352,11 @@ static Handle<JSFunction> InstallFunction(Handle<JSObject> target, bool is_ecma_native) { Isolate* isolate = target->GetIsolate(); Factory* factory = isolate->factory(); - Handle<String> symbol = factory->LookupAsciiSymbol(name); + Handle<String> internalized_name = factory->InternalizeUtf8String(name); Handle<Code> call_code = Handle<Code>(isolate->builtins()->builtin(call)); Handle<JSFunction> function = prototype.is_null() ? - factory->NewFunctionWithoutPrototype(symbol, call_code) : - factory->NewFunctionWithPrototype(symbol, + factory->NewFunctionWithoutPrototype(internalized_name, call_code) : + factory->NewFunctionWithPrototype(internalized_name, type, instance_size, prototype, @@ -372,9 +371,9 @@ static Handle<JSFunction> InstallFunction(Handle<JSObject> target, } CHECK_NOT_EMPTY_HANDLE(isolate, JSObject::SetLocalPropertyIgnoreAttributes( - target, symbol, function, attributes)); + target, internalized_name, function, attributes)); if (is_ecma_native) { - function->shared()->set_instance_class_name(*symbol); + function->shared()->set_instance_class_name(*internalized_name); } function->shared()->set_native(true); return function; @@ -400,19 +399,19 @@ void Genesis::SetFunctionInstanceDescriptor( map->set_instance_descriptors(*descriptors); { // Add length. - CallbacksDescriptor d(*factory()->length_symbol(), *length, attribs); + CallbacksDescriptor d(*factory()->length_string(), *length, attribs); map->AppendDescriptor(&d, witness); } { // Add name. - CallbacksDescriptor d(*factory()->name_symbol(), *name, attribs); + CallbacksDescriptor d(*factory()->name_string(), *name, attribs); map->AppendDescriptor(&d, witness); } { // Add arguments. - CallbacksDescriptor d(*factory()->arguments_symbol(), *args, attribs); + CallbacksDescriptor d(*factory()->arguments_string(), *args, attribs); map->AppendDescriptor(&d, witness); } { // Add caller. - CallbacksDescriptor d(*factory()->caller_symbol(), *caller, attribs); + CallbacksDescriptor d(*factory()->caller_string(), *caller, attribs); map->AppendDescriptor(&d, witness); } if (prototypeMode != DONT_ADD_PROTOTYPE) { @@ -420,7 +419,7 @@ void Genesis::SetFunctionInstanceDescriptor( if (prototypeMode == ADD_WRITEABLE_PROTOTYPE) { attribs = static_cast<PropertyAttributes>(attribs & ~READ_ONLY); } - CallbacksDescriptor d(*factory()->prototype_symbol(), *prototype, attribs); + CallbacksDescriptor d(*factory()->prototype_string(), *prototype, attribs); map->AppendDescriptor(&d, witness); } } @@ -465,7 +464,7 @@ Handle<JSFunction> Genesis::CreateEmptyFunction(Isolate* isolate) { Factory* factory = isolate->factory(); Heap* heap = isolate->heap(); - Handle<String> object_name = Handle<String>(heap->Object_symbol()); + Handle<String> object_name = Handle<String>(heap->Object_string()); { // --- O b j e c t --- Handle<JSFunction> object_fun = @@ -478,19 +477,35 @@ Handle<JSFunction> Genesis::CreateEmptyFunction(Isolate* isolate) { native_context()->set_object_function(*object_fun); // Allocate a new prototype for the object function. - Handle<JSObject> prototype = factory->NewJSObject( - isolate->object_function(), - TENURED); + Handle<Map> object_prototype_map = + factory->NewMap(JS_OBJECT_TYPE, JSObject::kHeaderSize); + Handle<DescriptorArray> prototype_descriptors( + factory->NewDescriptorArray(0, 1)); + DescriptorArray::WhitenessWitness witness(*prototype_descriptors); + + Handle<Foreign> object_prototype( + factory->NewForeign(&Accessors::ObjectPrototype)); + PropertyAttributes attribs = static_cast<PropertyAttributes>(DONT_ENUM); + object_prototype_map->set_instance_descriptors(*prototype_descriptors); + + { // Add __proto__. + CallbacksDescriptor d(heap->proto_string(), *object_prototype, attribs); + object_prototype_map->AppendDescriptor(&d, witness); + } + Handle<JSObject> prototype = factory->NewJSObjectFromMap( + object_prototype_map, + TENURED); native_context()->set_initial_object_prototype(*prototype); SetPrototype(object_fun, prototype); } // Allocate the empty function as the prototype for function ECMAScript // 262 15.3.4. - Handle<String> symbol = factory->LookupAsciiSymbol("Empty"); + Handle<String> empty_string = + factory->InternalizeOneByteString(STATIC_ASCII_VECTOR("Empty")); Handle<JSFunction> empty_function = - factory->NewFunctionWithoutPrototype(symbol, CLASSIC_MODE); + factory->NewFunctionWithoutPrototype(empty_string, CLASSIC_MODE); // --- E m p t y --- Handle<Code> code = @@ -498,7 +513,8 @@ Handle<JSFunction> Genesis::CreateEmptyFunction(Isolate* isolate) { Builtins::kEmptyFunction)); empty_function->set_code(*code); empty_function->shared()->set_code(*code); - Handle<String> source = factory->NewStringFromAscii(CStrVector("() {}")); + Handle<String> source = + factory->NewStringFromOneByte(STATIC_ASCII_VECTOR("() {}")); Handle<Script> script = factory->NewScript(source); script->set_type(Smi::FromInt(Script::TYPE_NATIVE)); empty_function->shared()->set_script(*script); @@ -541,19 +557,19 @@ void Genesis::SetStrictFunctionInstanceDescriptor( map->set_instance_descriptors(*descriptors); { // Add length. - CallbacksDescriptor d(*factory()->length_symbol(), *length, attribs); + CallbacksDescriptor d(*factory()->length_string(), *length, attribs); map->AppendDescriptor(&d, witness); } { // Add name. - CallbacksDescriptor d(*factory()->name_symbol(), *name, attribs); + CallbacksDescriptor d(*factory()->name_string(), *name, attribs); map->AppendDescriptor(&d, witness); } { // Add arguments. - CallbacksDescriptor d(*factory()->arguments_symbol(), *arguments, attribs); + CallbacksDescriptor d(*factory()->arguments_string(), *arguments, attribs); map->AppendDescriptor(&d, witness); } { // Add caller. - CallbacksDescriptor d(*factory()->caller_symbol(), *caller, attribs); + CallbacksDescriptor d(*factory()->caller_string(), *caller, attribs); map->AppendDescriptor(&d, witness); } if (prototypeMode != DONT_ADD_PROTOTYPE) { @@ -561,7 +577,7 @@ void Genesis::SetStrictFunctionInstanceDescriptor( if (prototypeMode != ADD_WRITEABLE_PROTOTYPE) { attribs = static_cast<PropertyAttributes>(attribs | READ_ONLY); } - CallbacksDescriptor d(*factory()->prototype_symbol(), *prototype, attribs); + CallbacksDescriptor d(*factory()->prototype_string(), *prototype, attribs); map->AppendDescriptor(&d, witness); } } @@ -570,7 +586,8 @@ void Genesis::SetStrictFunctionInstanceDescriptor( // ECMAScript 5th Edition, 13.2.3 Handle<JSFunction> Genesis::GetThrowTypeErrorFunction() { if (throw_type_error_function.is_null()) { - Handle<String> name = factory()->LookupAsciiSymbol("ThrowTypeError"); + Handle<String> name = factory()->InternalizeOneByteString( + STATIC_ASCII_VECTOR("ThrowTypeError")); throw_type_error_function = factory()->NewFunctionWithoutPrototype(name, CLASSIC_MODE); Handle<Code> code(isolate()->builtins()->builtin( @@ -645,8 +662,8 @@ static void SetAccessors(Handle<Map> map, void Genesis::PoisonArgumentsAndCaller(Handle<Map> map) { - SetAccessors(map, factory()->arguments_symbol(), GetThrowTypeErrorFunction()); - SetAccessors(map, factory()->caller_symbol(), GetThrowTypeErrorFunction()); + SetAccessors(map, factory()->arguments_string(), GetThrowTypeErrorFunction()); + SetAccessors(map, factory()->caller_string(), GetThrowTypeErrorFunction()); } @@ -674,9 +691,8 @@ void Genesis::CreateRoots() { // closure and extension object later (we need the empty function // and the global object, but in order to create those, we need the // native context). - native_context_ = Handle<Context>::cast(isolate()->global_handles()->Create( - *factory()->NewNativeContext())); - AddToWeakNativeContextList(*native_context_); + native_context_ = factory()->NewNativeContext(); + AddToWeakNativeContextList(*native_context()); isolate()->set_context(*native_context()); // Allocate the message listeners object. @@ -713,7 +729,8 @@ Handle<JSGlobalProxy> Genesis::CreateNewGlobals( Handle<FunctionTemplateInfo> global_constructor = Handle<FunctionTemplateInfo>( FunctionTemplateInfo::cast(data->constructor())); - Handle<Object> proto_template(global_constructor->prototype_template()); + Handle<Object> proto_template(global_constructor->prototype_template(), + isolate()); if (!proto_template->IsUndefined()) { js_global_template = Handle<ObjectTemplateInfo>::cast(proto_template); @@ -721,7 +738,7 @@ Handle<JSGlobalProxy> Genesis::CreateNewGlobals( } if (js_global_template.is_null()) { - Handle<String> name = Handle<String>(heap()->empty_symbol()); + Handle<String> name = Handle<String>(heap()->empty_string()); Handle<Code> code = Handle<Code>(isolate()->builtins()->builtin( Builtins::kIllegal)); js_global_function = @@ -734,7 +751,7 @@ Handle<JSGlobalProxy> Genesis::CreateNewGlobals( JSObject::cast(js_global_function->instance_prototype())); CHECK_NOT_EMPTY_HANDLE(isolate(), JSObject::SetLocalPropertyIgnoreAttributes( - prototype, factory()->constructor_symbol(), + prototype, factory()->constructor_string(), isolate()->object_function(), NONE)); } else { Handle<FunctionTemplateInfo> js_global_constructor( @@ -755,7 +772,7 @@ Handle<JSGlobalProxy> Genesis::CreateNewGlobals( // Step 2: create or re-initialize the global proxy object. Handle<JSFunction> global_proxy_function; if (global_template.IsEmpty()) { - Handle<String> name = Handle<String>(heap()->empty_symbol()); + Handle<String> name = Handle<String>(heap()->empty_string()); Handle<Code> code = Handle<Code>(isolate()->builtins()->builtin( Builtins::kIllegal)); global_proxy_function = @@ -771,7 +788,8 @@ Handle<JSGlobalProxy> Genesis::CreateNewGlobals( factory()->OuterGlobalObject); } - Handle<String> global_name = factory()->LookupAsciiSymbol("global"); + Handle<String> global_name = factory()->InternalizeOneByteString( + STATIC_ASCII_VECTOR("global")); global_proxy_function->shared()->set_instance_class_name(*global_name); global_proxy_function->initial_map()->set_is_access_check_needed(true); @@ -803,15 +821,16 @@ void Genesis::HookUpGlobalProxy(Handle<GlobalObject> inner_global, void Genesis::HookUpInnerGlobal(Handle<GlobalObject> inner_global) { Handle<GlobalObject> inner_global_from_snapshot( - GlobalObject::cast(native_context_->extension())); - Handle<JSBuiltinsObject> builtins_global(native_context_->builtins()); - native_context_->set_extension(*inner_global); - native_context_->set_global_object(*inner_global); - native_context_->set_security_token(*inner_global); + GlobalObject::cast(native_context()->extension())); + Handle<JSBuiltinsObject> builtins_global(native_context()->builtins()); + native_context()->set_extension(*inner_global); + native_context()->set_global_object(*inner_global); + native_context()->set_security_token(*inner_global); static const PropertyAttributes attributes = static_cast<PropertyAttributes>(READ_ONLY | DONT_DELETE); ForceSetProperty(builtins_global, - factory()->LookupAsciiSymbol("global"), + factory()->InternalizeOneByteString( + STATIC_ASCII_VECTOR("global")), inner_global, attributes); // Set up the reference from the global object to the builtins object. @@ -842,7 +861,7 @@ bool Genesis::InitializeGlobal(Handle<GlobalObject> inner_global, Factory* factory = isolate->factory(); Heap* heap = isolate->heap(); - Handle<String> object_name = Handle<String>(heap->Object_symbol()); + Handle<String> object_name = Handle<String>(heap->Object_string()); CHECK_NOT_EMPTY_HANDLE(isolate, JSObject::SetLocalPropertyIgnoreAttributes( inner_global, object_name, @@ -878,7 +897,7 @@ bool Genesis::InitializeGlobal(Handle<GlobalObject> inner_global, initial_map->set_instance_descriptors(*array_descriptors); { // Add length. - CallbacksDescriptor d(*factory->length_symbol(), *array_length, attribs); + CallbacksDescriptor d(*factory->length_string(), *array_length, attribs); array_function->initial_map()->AppendDescriptor(&d, witness); } @@ -927,7 +946,7 @@ bool Genesis::InitializeGlobal(Handle<GlobalObject> inner_global, string_map->set_instance_descriptors(*string_descriptors); { // Add length. - CallbacksDescriptor d(*factory->length_symbol(), *string_length, attribs); + CallbacksDescriptor d(*factory->length_string(), *string_length, attribs); string_map->AppendDescriptor(&d, witness); } } @@ -964,28 +983,28 @@ bool Genesis::InitializeGlobal(Handle<GlobalObject> inner_global, { // ECMA-262, section 15.10.7.1. - FieldDescriptor field(heap->source_symbol(), + FieldDescriptor field(heap->source_string(), JSRegExp::kSourceFieldIndex, final); initial_map->AppendDescriptor(&field, witness); } { // ECMA-262, section 15.10.7.2. - FieldDescriptor field(heap->global_symbol(), + FieldDescriptor field(heap->global_string(), JSRegExp::kGlobalFieldIndex, final); initial_map->AppendDescriptor(&field, witness); } { // ECMA-262, section 15.10.7.3. - FieldDescriptor field(heap->ignore_case_symbol(), + FieldDescriptor field(heap->ignore_case_string(), JSRegExp::kIgnoreCaseFieldIndex, final); initial_map->AppendDescriptor(&field, witness); } { // ECMA-262, section 15.10.7.4. - FieldDescriptor field(heap->multiline_symbol(), + FieldDescriptor field(heap->multiline_string(), JSRegExp::kMultilineFieldIndex, final); initial_map->AppendDescriptor(&field, witness); @@ -994,7 +1013,7 @@ bool Genesis::InitializeGlobal(Handle<GlobalObject> inner_global, // ECMA-262, section 15.10.7.5. PropertyAttributes writable = static_cast<PropertyAttributes>(DONT_ENUM | DONT_DELETE); - FieldDescriptor field(heap->last_index_symbol(), + FieldDescriptor field(heap->last_index_string(), JSRegExp::kLastIndexFieldIndex, writable); initial_map->AppendDescriptor(&field, witness); @@ -1012,7 +1031,7 @@ bool Genesis::InitializeGlobal(Handle<GlobalObject> inner_global, proto_map->set_prototype(native_context()->initial_object_prototype()); Handle<JSObject> proto = factory->NewJSObjectFromMap(proto_map); proto->InObjectPropertyAtPut(JSRegExp::kSourceFieldIndex, - heap->query_colon_symbol()); + heap->query_colon_string()); proto->InObjectPropertyAtPut(JSRegExp::kGlobalFieldIndex, heap->false_value()); proto->InObjectPropertyAtPut(JSRegExp::kIgnoreCaseFieldIndex, @@ -1049,7 +1068,8 @@ bool Genesis::InitializeGlobal(Handle<GlobalObject> inner_global, // Make sure we can recognize argument objects at runtime. // This is done by introducing an anonymous function with // class_name equals 'Arguments'. - Handle<String> symbol = factory->LookupAsciiSymbol("Arguments"); + Handle<String> arguments_string = factory->InternalizeOneByteString( + STATIC_ASCII_VECTOR("Arguments")); Handle<Code> code = Handle<Code>( isolate->builtins()->builtin(Builtins::kIllegal)); Handle<JSObject> prototype = @@ -1057,14 +1077,14 @@ bool Genesis::InitializeGlobal(Handle<GlobalObject> inner_global, JSObject::cast(native_context()->object_function()->prototype())); Handle<JSFunction> function = - factory->NewFunctionWithPrototype(symbol, + factory->NewFunctionWithPrototype(arguments_string, JS_OBJECT_TYPE, JSObject::kHeaderSize, prototype, code, false); ASSERT(!function->has_initial_map()); - function->shared()->set_instance_class_name(*symbol); + function->shared()->set_instance_class_name(*arguments_string); function->shared()->set_expected_nof_properties(2); Handle<JSObject> result = factory->NewJSObject(function); @@ -1073,22 +1093,22 @@ bool Genesis::InitializeGlobal(Handle<GlobalObject> inner_global, // callee must be added as the second property. CHECK_NOT_EMPTY_HANDLE(isolate, JSObject::SetLocalPropertyIgnoreAttributes( - result, factory->length_symbol(), + result, factory->length_string(), factory->undefined_value(), DONT_ENUM)); CHECK_NOT_EMPTY_HANDLE(isolate, JSObject::SetLocalPropertyIgnoreAttributes( - result, factory->callee_symbol(), + result, factory->callee_string(), factory->undefined_value(), DONT_ENUM)); #ifdef DEBUG LookupResult lookup(isolate); - result->LocalLookup(heap->callee_symbol(), &lookup); + result->LocalLookup(heap->callee_string(), &lookup); ASSERT(lookup.IsField()); - ASSERT(lookup.GetFieldIndex() == Heap::kArgumentsCalleeIndex); + ASSERT(lookup.GetFieldIndex().field_index() == Heap::kArgumentsCalleeIndex); - result->LocalLookup(heap->length_symbol(), &lookup); + result->LocalLookup(heap->length_string(), &lookup); ASSERT(lookup.IsField()); - ASSERT(lookup.GetFieldIndex() == Heap::kArgumentsLengthIndex); + ASSERT(lookup.GetFieldIndex().field_index() == Heap::kArgumentsLengthIndex); ASSERT(result->map()->inobject_properties() > Heap::kArgumentsCalleeIndex); ASSERT(result->map()->inobject_properties() > Heap::kArgumentsLengthIndex); @@ -1147,17 +1167,17 @@ bool Genesis::InitializeGlobal(Handle<GlobalObject> inner_global, map->set_instance_descriptors(*descriptors); { // length - FieldDescriptor d(*factory->length_symbol(), 0, DONT_ENUM); + FieldDescriptor d(*factory->length_string(), 0, DONT_ENUM); map->AppendDescriptor(&d, witness); } { // callee - CallbacksDescriptor d(*factory->callee_symbol(), + CallbacksDescriptor d(*factory->callee_string(), *callee, attributes); map->AppendDescriptor(&d, witness); } { // caller - CallbacksDescriptor d(*factory->caller_symbol(), + CallbacksDescriptor d(*factory->caller_string(), *caller, attributes); map->AppendDescriptor(&d, witness); @@ -1179,14 +1199,14 @@ bool Genesis::InitializeGlobal(Handle<GlobalObject> inner_global, // Add length property only for strict mode boilerplate. CHECK_NOT_EMPTY_HANDLE(isolate, JSObject::SetLocalPropertyIgnoreAttributes( - result, factory->length_symbol(), + result, factory->length_string(), factory->undefined_value(), DONT_ENUM)); #ifdef DEBUG LookupResult lookup(isolate); - result->LocalLookup(heap->length_symbol(), &lookup); + result->LocalLookup(heap->length_string(), &lookup); ASSERT(lookup.IsField()); - ASSERT(lookup.GetFieldIndex() == Heap::kArgumentsLengthIndex); + ASSERT(lookup.GetFieldIndex().field_index() == Heap::kArgumentsLengthIndex); ASSERT(result->map()->inobject_properties() > Heap::kArgumentsLengthIndex); @@ -1201,13 +1221,14 @@ bool Genesis::InitializeGlobal(Handle<GlobalObject> inner_global, Handle<Code> code = Handle<Code>( isolate->builtins()->builtin(Builtins::kIllegal)); Handle<JSFunction> context_extension_fun = - factory->NewFunction(factory->empty_symbol(), + factory->NewFunction(factory->empty_string(), JS_CONTEXT_EXTENSION_OBJECT_TYPE, JSObject::kHeaderSize, code, true); - Handle<String> name = factory->LookupAsciiSymbol("context_extension"); + Handle<String> name = factory->InternalizeOneByteString( + STATIC_ASCII_VECTOR("context_extension")); context_extension_fun->shared()->set_instance_class_name(*name); native_context()->set_context_extension_function(*context_extension_fun); } @@ -1219,7 +1240,7 @@ bool Genesis::InitializeGlobal(Handle<GlobalObject> inner_global, Handle<Code>(isolate->builtins()->builtin( Builtins::kHandleApiCallAsFunction)); Handle<JSFunction> delegate = - factory->NewFunction(factory->empty_symbol(), JS_OBJECT_TYPE, + factory->NewFunction(factory->empty_string(), JS_OBJECT_TYPE, JSObject::kHeaderSize, code, true); native_context()->set_call_as_function_delegate(*delegate); delegate->shared()->DontAdaptArguments(); @@ -1231,7 +1252,7 @@ bool Genesis::InitializeGlobal(Handle<GlobalObject> inner_global, Handle<Code>(isolate->builtins()->builtin( Builtins::kHandleApiCallAsConstructor)); Handle<JSFunction> delegate = - factory->NewFunction(factory->empty_symbol(), JS_OBJECT_TYPE, + factory->NewFunction(factory->empty_string(), JS_OBJECT_TYPE, JSObject::kHeaderSize, code, true); native_context()->set_call_as_constructor_delegate(*delegate); delegate->shared()->DontAdaptArguments(); @@ -1240,8 +1261,9 @@ bool Genesis::InitializeGlobal(Handle<GlobalObject> inner_global, // Initialize the out of memory slot. native_context()->set_out_of_memory(heap->false_value()); - // Initialize the data slot. - native_context()->set_data(heap->undefined_value()); + // Initialize the embedder data slot. + Handle<FixedArray> embedder_data = factory->NewFixedArray(2); + native_context()->set_embedder_data(*embedder_data); { // Initialize the random seed slot. @@ -1286,7 +1308,7 @@ bool Genesis::CompileBuiltin(Isolate* isolate, int index) { Vector<const char> name = Natives::GetScriptName(index); Handle<String> source_code = isolate->bootstrapper()->NativesSourceLookup(index); - return CompileNative(name, source_code); + return CompileNative(isolate, name, source_code); } @@ -1296,23 +1318,25 @@ bool Genesis::CompileExperimentalBuiltin(Isolate* isolate, int index) { Handle<String> source_code = factory->NewStringFromAscii( ExperimentalNatives::GetRawScriptSource(index)); - return CompileNative(name, source_code); + return CompileNative(isolate, name, source_code); } -bool Genesis::CompileNative(Vector<const char> name, Handle<String> source) { - HandleScope scope; - Isolate* isolate = source->GetIsolate(); +bool Genesis::CompileNative(Isolate* isolate, + Vector<const char> name, + Handle<String> source) { + HandleScope scope(isolate); #ifdef ENABLE_DEBUGGER_SUPPORT isolate->debugger()->set_compiling_natives(true); #endif // During genesis, the boilerplate for stack overflow won't work until the // environment has been at least partially initialized. Add a stack check // before entering JS code to catch overflow early. - StackLimitCheck check(Isolate::Current()); + StackLimitCheck check(isolate); if (check.HasOverflowed()) return false; - bool result = CompileScriptCached(name, + bool result = CompileScriptCached(isolate, + name, source, NULL, NULL, @@ -1327,20 +1351,21 @@ bool Genesis::CompileNative(Vector<const char> name, Handle<String> source) { } -bool Genesis::CompileScriptCached(Vector<const char> name, +bool Genesis::CompileScriptCached(Isolate* isolate, + Vector<const char> name, Handle<String> source, SourceCodeCache* cache, v8::Extension* extension, Handle<Context> top_context, bool use_runtime_context) { - Factory* factory = source->GetIsolate()->factory(); - HandleScope scope; + Factory* factory = isolate->factory(); + HandleScope scope(isolate); Handle<SharedFunctionInfo> function_info; // If we can't find the function in the cache, we compile a new // function and insert it into the cache. if (cache == NULL || !cache->Lookup(name, &function_info)) { - ASSERT(source->IsAsciiRepresentation()); + ASSERT(source->IsOneByteRepresentation()); Handle<String> script_name = factory->NewStringFromUtf8(name); function_info = Compiler::Compile( source, @@ -1372,7 +1397,8 @@ bool Genesis::CompileScriptCached(Vector<const char> name, Handle<Object> receiver = Handle<Object>(use_runtime_context ? top_context->builtins() - : top_context->global_object()); + : top_context->global_object(), + isolate); bool has_pending_exception; Execution::Call(fun, receiver, 0, NULL, &has_pending_exception); if (has_pending_exception) return false; @@ -1380,16 +1406,17 @@ bool Genesis::CompileScriptCached(Vector<const char> name, } -#define INSTALL_NATIVE(Type, name, var) \ - Handle<String> var##_name = factory()->LookupAsciiSymbol(name); \ - Object* var##_native = \ - native_context()->builtins()->GetPropertyNoExceptionThrown( \ - *var##_name); \ +#define INSTALL_NATIVE(Type, name, var) \ + Handle<String> var##_name = \ + factory()->InternalizeOneByteString(STATIC_ASCII_VECTOR(name)); \ + Object* var##_native = \ + native_context()->builtins()->GetPropertyNoExceptionThrown( \ + *var##_name); \ native_context()->set_##var(Type::cast(var##_native)); void Genesis::InstallNativeFunctions() { - HandleScope scope; + HandleScope scope(isolate()); INSTALL_NATIVE(JSFunction, "CreateDate", create_date_fun); INSTALL_NATIVE(JSFunction, "ToNumber", to_number_fun); INSTALL_NATIVE(JSFunction, "ToString", to_string_fun); @@ -1409,19 +1436,87 @@ void Genesis::InstallNativeFunctions() { } void Genesis::InstallExperimentalNativeFunctions() { + if (FLAG_harmony_symbols) { + INSTALL_NATIVE(JSObject, "SymbolDelegate", symbol_delegate); + } if (FLAG_harmony_proxies) { INSTALL_NATIVE(JSFunction, "DerivedHasTrap", derived_has_trap); INSTALL_NATIVE(JSFunction, "DerivedGetTrap", derived_get_trap); INSTALL_NATIVE(JSFunction, "DerivedSetTrap", derived_set_trap); INSTALL_NATIVE(JSFunction, "ProxyEnumerate", proxy_enumerate); } + if (FLAG_harmony_observation) { + INSTALL_NATIVE(JSFunction, "NotifyChange", observers_notify_change); + INSTALL_NATIVE(JSFunction, "DeliverChangeRecords", + observers_deliver_changes); + } } #undef INSTALL_NATIVE +Handle<JSFunction> Genesis::InstallInternalArray( + Handle<JSBuiltinsObject> builtins, + const char* name, + ElementsKind elements_kind) { + // --- I n t e r n a l A r r a y --- + // An array constructor on the builtins object that works like + // the public Array constructor, except that its prototype + // doesn't inherit from Object.prototype. + // To be used only for internal work by builtins. Instances + // must not be leaked to user code. + Handle<JSFunction> array_function = + InstallFunction(builtins, + name, + JS_ARRAY_TYPE, + JSArray::kSize, + isolate()->initial_object_prototype(), + Builtins::kInternalArrayCode, + true); + Handle<JSObject> prototype = + factory()->NewJSObject(isolate()->object_function(), TENURED); + SetPrototype(array_function, prototype); + + // TODO(mvstanton): For performance reasons, this code would have to + // be changed to successfully run with FLAG_optimize_constructed_arrays. + // The next checkin to enable FLAG_optimize_constructed_arrays by + // default will address this. + CHECK(!FLAG_optimize_constructed_arrays); + array_function->shared()->set_construct_stub( + isolate()->builtins()->builtin(Builtins::kArrayConstructCode)); + + array_function->shared()->DontAdaptArguments(); + + MaybeObject* maybe_map = array_function->initial_map()->Copy(); + Map* new_map; + if (!maybe_map->To(&new_map)) return Handle<JSFunction>::null(); + new_map->set_elements_kind(elements_kind); + array_function->set_initial_map(new_map); + + // Make "length" magic on instances. + Handle<Map> initial_map(array_function->initial_map()); + Handle<DescriptorArray> array_descriptors( + factory()->NewDescriptorArray(0, 1)); + DescriptorArray::WhitenessWitness witness(*array_descriptors); + + Handle<Foreign> array_length(factory()->NewForeign( + &Accessors::ArrayLength)); + PropertyAttributes attribs = static_cast<PropertyAttributes>( + DONT_ENUM | DONT_DELETE); + initial_map->set_instance_descriptors(*array_descriptors); + + { // Add length. + CallbacksDescriptor d( + *factory()->length_string(), *array_length, attribs); + array_function->initial_map()->AppendDescriptor(&d, witness); + } + + return array_function; +} + + bool Genesis::InstallNatives() { - HandleScope scope; + HandleScope scope(isolate()); // Create a function for the builtins object. Allocate space for the // JavaScript builtins, a reference to the builtins object @@ -1429,11 +1524,12 @@ bool Genesis::InstallNatives() { Handle<Code> code = Handle<Code>( isolate()->builtins()->builtin(Builtins::kIllegal)); Handle<JSFunction> builtins_fun = - factory()->NewFunction(factory()->empty_symbol(), + factory()->NewFunction(factory()->empty_string(), JS_BUILTINS_OBJECT_TYPE, JSBuiltinsObject::kSize, code, true); - Handle<String> name = factory()->LookupAsciiSymbol("builtins"); + Handle<String> name = + factory()->InternalizeOneByteString(STATIC_ASCII_VECTOR("builtins")); builtins_fun->shared()->set_instance_class_name(*name); builtins_fun->initial_map()->set_dictionary_map(true); builtins_fun->initial_map()->set_prototype(heap()->null_value()); @@ -1452,11 +1548,12 @@ bool Genesis::InstallNatives() { // global object. static const PropertyAttributes attributes = static_cast<PropertyAttributes>(READ_ONLY | DONT_DELETE); - Handle<String> global_symbol = factory()->LookupAsciiSymbol("global"); - Handle<Object> global_obj(native_context()->global_object()); + Handle<String> global_string = + factory()->InternalizeOneByteString(STATIC_ASCII_VECTOR("global")); + Handle<Object> global_obj(native_context()->global_object(), isolate()); CHECK_NOT_EMPTY_HANDLE(isolate(), JSObject::SetLocalPropertyIgnoreAttributes( - builtins, global_symbol, global_obj, attributes)); + builtins, global_string, global_obj, attributes)); // Set up the reference from the global object to the builtins object. JSGlobalObject::cast(native_context()->global_object())-> @@ -1464,7 +1561,7 @@ bool Genesis::InstallNatives() { // Create a bridge function that has context in the native context. Handle<JSFunction> bridge = - factory()->NewFunction(factory()->empty_symbol(), + factory()->NewFunction(factory()->empty_string(), factory()->undefined_value()); ASSERT(bridge->context() == *isolate()->native_context()); @@ -1495,41 +1592,52 @@ bool Genesis::InstallNatives() { Handle<Foreign> script_source( factory()->NewForeign(&Accessors::ScriptSource)); Handle<Foreign> script_name(factory()->NewForeign(&Accessors::ScriptName)); - Handle<String> id_symbol(factory()->LookupAsciiSymbol("id")); + Handle<String> id_string(factory()->InternalizeOneByteString( + STATIC_ASCII_VECTOR("id"))); Handle<Foreign> script_id(factory()->NewForeign(&Accessors::ScriptId)); - Handle<String> line_offset_symbol( - factory()->LookupAsciiSymbol("line_offset")); + Handle<String> line_offset_string( + factory()->InternalizeOneByteString( + STATIC_ASCII_VECTOR("line_offset"))); Handle<Foreign> script_line_offset( factory()->NewForeign(&Accessors::ScriptLineOffset)); - Handle<String> column_offset_symbol( - factory()->LookupAsciiSymbol("column_offset")); + Handle<String> column_offset_string( + factory()->InternalizeOneByteString( + STATIC_ASCII_VECTOR("column_offset"))); Handle<Foreign> script_column_offset( factory()->NewForeign(&Accessors::ScriptColumnOffset)); - Handle<String> data_symbol(factory()->LookupAsciiSymbol("data")); + Handle<String> data_string(factory()->InternalizeOneByteString( + STATIC_ASCII_VECTOR("data"))); Handle<Foreign> script_data(factory()->NewForeign(&Accessors::ScriptData)); - Handle<String> type_symbol(factory()->LookupAsciiSymbol("type")); + Handle<String> type_string(factory()->InternalizeOneByteString( + STATIC_ASCII_VECTOR("type"))); Handle<Foreign> script_type(factory()->NewForeign(&Accessors::ScriptType)); - Handle<String> compilation_type_symbol( - factory()->LookupAsciiSymbol("compilation_type")); + Handle<String> compilation_type_string( + factory()->InternalizeOneByteString( + STATIC_ASCII_VECTOR("compilation_type"))); Handle<Foreign> script_compilation_type( factory()->NewForeign(&Accessors::ScriptCompilationType)); - Handle<String> line_ends_symbol(factory()->LookupAsciiSymbol("line_ends")); + Handle<String> line_ends_string(factory()->InternalizeOneByteString( + STATIC_ASCII_VECTOR("line_ends"))); Handle<Foreign> script_line_ends( factory()->NewForeign(&Accessors::ScriptLineEnds)); - Handle<String> context_data_symbol( - factory()->LookupAsciiSymbol("context_data")); + Handle<String> context_data_string( + factory()->InternalizeOneByteString( + STATIC_ASCII_VECTOR("context_data"))); Handle<Foreign> script_context_data( factory()->NewForeign(&Accessors::ScriptContextData)); - Handle<String> eval_from_script_symbol( - factory()->LookupAsciiSymbol("eval_from_script")); + Handle<String> eval_from_script_string( + factory()->InternalizeOneByteString( + STATIC_ASCII_VECTOR("eval_from_script"))); Handle<Foreign> script_eval_from_script( factory()->NewForeign(&Accessors::ScriptEvalFromScript)); - Handle<String> eval_from_script_position_symbol( - factory()->LookupAsciiSymbol("eval_from_script_position")); + Handle<String> eval_from_script_position_string( + factory()->InternalizeOneByteString( + STATIC_ASCII_VECTOR("eval_from_script_position"))); Handle<Foreign> script_eval_from_script_position( factory()->NewForeign(&Accessors::ScriptEvalFromScriptPosition)); - Handle<String> eval_from_function_name_symbol( - factory()->LookupAsciiSymbol("eval_from_function_name")); + Handle<String> eval_from_function_name_string( + factory()->InternalizeOneByteString( + STATIC_ASCII_VECTOR("eval_from_function_name"))); Handle<Foreign> script_eval_from_function_name( factory()->NewForeign(&Accessors::ScriptEvalFromFunctionName)); PropertyAttributes attribs = @@ -1538,67 +1646,67 @@ bool Genesis::InstallNatives() { { CallbacksDescriptor d( - *factory()->source_symbol(), *script_source, attribs); + *factory()->source_string(), *script_source, attribs); script_map->AppendDescriptor(&d, witness); } { - CallbacksDescriptor d(*factory()->name_symbol(), *script_name, attribs); + CallbacksDescriptor d(*factory()->name_string(), *script_name, attribs); script_map->AppendDescriptor(&d, witness); } { - CallbacksDescriptor d(*id_symbol, *script_id, attribs); + CallbacksDescriptor d(*id_string, *script_id, attribs); script_map->AppendDescriptor(&d, witness); } { - CallbacksDescriptor d(*line_offset_symbol, *script_line_offset, attribs); + CallbacksDescriptor d(*line_offset_string, *script_line_offset, attribs); script_map->AppendDescriptor(&d, witness); } { CallbacksDescriptor d( - *column_offset_symbol, *script_column_offset, attribs); + *column_offset_string, *script_column_offset, attribs); script_map->AppendDescriptor(&d, witness); } { - CallbacksDescriptor d(*data_symbol, *script_data, attribs); + CallbacksDescriptor d(*data_string, *script_data, attribs); script_map->AppendDescriptor(&d, witness); } { - CallbacksDescriptor d(*type_symbol, *script_type, attribs); + CallbacksDescriptor d(*type_string, *script_type, attribs); script_map->AppendDescriptor(&d, witness); } { CallbacksDescriptor d( - *compilation_type_symbol, *script_compilation_type, attribs); + *compilation_type_string, *script_compilation_type, attribs); script_map->AppendDescriptor(&d, witness); } { - CallbacksDescriptor d(*line_ends_symbol, *script_line_ends, attribs); + CallbacksDescriptor d(*line_ends_string, *script_line_ends, attribs); script_map->AppendDescriptor(&d, witness); } { CallbacksDescriptor d( - *context_data_symbol, *script_context_data, attribs); + *context_data_string, *script_context_data, attribs); script_map->AppendDescriptor(&d, witness); } { CallbacksDescriptor d( - *eval_from_script_symbol, *script_eval_from_script, attribs); + *eval_from_script_string, *script_eval_from_script, attribs); script_map->AppendDescriptor(&d, witness); } { CallbacksDescriptor d( - *eval_from_script_position_symbol, + *eval_from_script_position_string, *script_eval_from_script_position, attribs); script_map->AppendDescriptor(&d, witness); @@ -1606,7 +1714,7 @@ bool Genesis::InstallNatives() { { CallbacksDescriptor d( - *eval_from_function_name_symbol, + *eval_from_function_name_string, *script_eval_from_function_name, attribs); script_map->AppendDescriptor(&d, witness); @@ -1632,60 +1740,24 @@ bool Genesis::InstallNatives() { native_context()->set_opaque_reference_function(*opaque_reference_fun); } - { // --- I n t e r n a l A r r a y --- - // An array constructor on the builtins object that works like - // the public Array constructor, except that its prototype - // doesn't inherit from Object.prototype. - // To be used only for internal work by builtins. Instances - // must not be leaked to user code. + // InternalArrays should not use Smi-Only array optimizations. There are too + // many places in the C++ runtime code (e.g. RegEx) that assume that + // elements in InternalArrays can be set to non-Smi values without going + // through a common bottleneck that would make the SMI_ONLY -> FAST_ELEMENT + // transition easy to trap. Moreover, they rarely are smi-only. + { Handle<JSFunction> array_function = - InstallFunction(builtins, - "InternalArray", - JS_ARRAY_TYPE, - JSArray::kSize, - isolate()->initial_object_prototype(), - Builtins::kInternalArrayCode, - true); - Handle<JSObject> prototype = - factory()->NewJSObject(isolate()->object_function(), TENURED); - SetPrototype(array_function, prototype); - - array_function->shared()->set_construct_stub( - isolate()->builtins()->builtin(Builtins::kArrayConstructCode)); - array_function->shared()->DontAdaptArguments(); - - // InternalArrays should not use Smi-Only array optimizations. There are too - // many places in the C++ runtime code (e.g. RegEx) that assume that - // elements in InternalArrays can be set to non-Smi values without going - // through a common bottleneck that would make the SMI_ONLY -> FAST_ELEMENT - // transition easy to trap. Moreover, they rarely are smi-only. - MaybeObject* maybe_map = array_function->initial_map()->Copy(); - Map* new_map; - if (!maybe_map->To(&new_map)) return false; - new_map->set_elements_kind(FAST_HOLEY_ELEMENTS); - array_function->set_initial_map(new_map); - - // Make "length" magic on instances. - Handle<Map> initial_map(array_function->initial_map()); - Handle<DescriptorArray> array_descriptors( - factory()->NewDescriptorArray(0, 1)); - DescriptorArray::WhitenessWitness witness(*array_descriptors); - - Handle<Foreign> array_length(factory()->NewForeign( - &Accessors::ArrayLength)); - PropertyAttributes attribs = static_cast<PropertyAttributes>( - DONT_ENUM | DONT_DELETE); - initial_map->set_instance_descriptors(*array_descriptors); - - { // Add length. - CallbacksDescriptor d( - *factory()->length_symbol(), *array_length, attribs); - array_function->initial_map()->AppendDescriptor(&d, witness); - } - + InstallInternalArray(builtins, "InternalArray", FAST_HOLEY_ELEMENTS); + if (array_function.is_null()) return false; native_context()->set_internal_array_function(*array_function); } + { + Handle<JSFunction> array_function = + InstallInternalArray(builtins, "InternalPackedArray", FAST_ELEMENTS); + if (array_function.is_null()) return false; + } + if (FLAG_disable_native_files) { PrintF("Warning: Running without installed natives!\n"); return true; @@ -1713,9 +1785,10 @@ bool Genesis::InstallNatives() { HeapObject::cast(string_function->initial_map()->prototype())->map()); // Install Function.prototype.call and apply. - { Handle<String> key = factory()->function_class_symbol(); + { Handle<String> key = factory()->function_class_string(); Handle<JSFunction> function = - Handle<JSFunction>::cast(GetProperty(isolate()->global_object(), key)); + Handle<JSFunction>::cast( + GetProperty(isolate(), isolate()->global_object(), key)); Handle<JSObject> proto = Handle<JSObject>(JSObject::cast(function->instance_prototype())); @@ -1776,7 +1849,7 @@ bool Genesis::InstallNatives() { JSFunction* array_function = native_context()->array_function(); Handle<DescriptorArray> array_descriptors( array_function->initial_map()->instance_descriptors()); - String* length = heap()->length_symbol(); + String* length = heap()->length_string(); int old = array_descriptors->SearchWithCache( length, array_function->initial_map()); ASSERT(old != DescriptorArray::kNotFound); @@ -1786,14 +1859,14 @@ bool Genesis::InstallNatives() { initial_map->AppendDescriptor(&desc, witness); } { - FieldDescriptor index_field(heap()->index_symbol(), + FieldDescriptor index_field(heap()->index_string(), JSRegExpResult::kIndexIndex, NONE); initial_map->AppendDescriptor(&index_field, witness); } { - FieldDescriptor input_field(heap()->input_symbol(), + FieldDescriptor input_field(heap()->input_string(), JSRegExpResult::kInputIndex, NONE); initial_map->AppendDescriptor(&input_field, witness); @@ -1818,6 +1891,11 @@ bool Genesis::InstallExperimentalNatives() { for (int i = ExperimentalNatives::GetDebuggerCount(); i < ExperimentalNatives::GetBuiltinsCount(); i++) { + if (FLAG_harmony_symbols && + strcmp(ExperimentalNatives::GetScriptName(i).start(), + "native symbol.js") == 0) { + if (!CompileExperimentalBuiltin(isolate(), i)) return false; + } if (FLAG_harmony_proxies && strcmp(ExperimentalNatives::GetScriptName(i).start(), "native proxy.js") == 0) { @@ -1828,6 +1906,11 @@ bool Genesis::InstallExperimentalNatives() { "native collection.js") == 0) { if (!CompileExperimentalBuiltin(isolate(), i)) return false; } + if (FLAG_harmony_observation && + strcmp(ExperimentalNatives::GetScriptName(i).start(), + "native object-observe.js") == 0) { + if (!CompileExperimentalBuiltin(isolate(), i)) return false; + } } InstallExperimentalNativeFunctions(); @@ -1839,18 +1922,19 @@ bool Genesis::InstallExperimentalNatives() { static Handle<JSObject> ResolveBuiltinIdHolder( Handle<Context> native_context, const char* holder_expr) { - Factory* factory = native_context->GetIsolate()->factory(); + Isolate* isolate = native_context->GetIsolate(); + Factory* factory = isolate->factory(); Handle<GlobalObject> global(native_context->global_object()); const char* period_pos = strchr(holder_expr, '.'); if (period_pos == NULL) { - return Handle<JSObject>::cast( - GetProperty(global, factory->LookupAsciiSymbol(holder_expr))); + return Handle<JSObject>::cast(GetProperty( + isolate, global, factory->InternalizeUtf8String(holder_expr))); } ASSERT_EQ(".prototype", period_pos); Vector<const char> property(holder_expr, static_cast<int>(period_pos - holder_expr)); Handle<JSFunction> function = Handle<JSFunction>::cast( - GetProperty(global, factory->LookupSymbol(property))); + GetProperty(isolate, global, factory->InternalizeUtf8String(property))); return Handle<JSObject>(JSObject::cast(function->prototype())); } @@ -1859,7 +1943,7 @@ static void InstallBuiltinFunctionId(Handle<JSObject> holder, const char* function_name, BuiltinFunctionId id) { Factory* factory = holder->GetIsolate()->factory(); - Handle<String> name = factory->LookupAsciiSymbol(function_name); + Handle<String> name = factory->InternalizeUtf8String(function_name); Object* function_object = holder->GetProperty(*name)->ToObjectUnchecked(); Handle<JSFunction> function(JSFunction::cast(function_object)); function->shared()->set_function_data(Smi::FromInt(id)); @@ -1867,7 +1951,7 @@ static void InstallBuiltinFunctionId(Handle<JSObject> holder, void Genesis::InstallBuiltinFunctionIds() { - HandleScope scope; + HandleScope scope(isolate()); #define INSTALL_BUILTIN_ID(holder_expr, fun_name, name) \ { \ Handle<JSObject> holder = ResolveBuiltinIdHolder( \ @@ -1932,10 +2016,9 @@ void Genesis::InitializeNormalizedMapCaches() { bool Bootstrapper::InstallExtensions(Handle<Context> native_context, v8::ExtensionConfiguration* extensions) { - Isolate* isolate = native_context->GetIsolate(); - BootstrapperActive active; - SaveContext saved_context(isolate); - isolate->set_context(*native_context); + BootstrapperActive active(this); + SaveContext saved_context(isolate_); + isolate_->set_context(*native_context); if (!Genesis::InstallExtensions(native_context, extensions)) return false; Genesis::InstallSpecialObjects(native_context); return true; @@ -1945,12 +2028,13 @@ bool Bootstrapper::InstallExtensions(Handle<Context> native_context, void Genesis::InstallSpecialObjects(Handle<Context> native_context) { Isolate* isolate = native_context->GetIsolate(); Factory* factory = isolate->factory(); - HandleScope scope; + HandleScope scope(isolate); Handle<JSGlobalObject> global(JSGlobalObject::cast( native_context->global_object())); // Expose the natives in global if a name for it is specified. if (FLAG_expose_natives_as != NULL && strlen(FLAG_expose_natives_as) != 0) { - Handle<String> natives = factory->LookupAsciiSymbol(FLAG_expose_natives_as); + Handle<String> natives = + factory->InternalizeUtf8String(FLAG_expose_natives_as); CHECK_NOT_EMPTY_HANDLE(isolate, JSObject::SetLocalPropertyIgnoreAttributes( global, natives, @@ -1960,8 +2044,10 @@ void Genesis::InstallSpecialObjects(Handle<Context> native_context) { Handle<Object> Error = GetProperty(global, "Error"); if (Error->IsJSObject()) { - Handle<String> name = factory->LookupAsciiSymbol("stackTraceLimit"); - Handle<Smi> stack_trace_limit(Smi::FromInt(FLAG_stack_trace_limit)); + Handle<String> name = factory->InternalizeOneByteString( + STATIC_ASCII_VECTOR("stackTraceLimit")); + Handle<Smi> stack_trace_limit( + Smi::FromInt(FLAG_stack_trace_limit), isolate); CHECK_NOT_EMPTY_HANDLE(isolate, JSObject::SetLocalPropertyIgnoreAttributes( Handle<JSObject>::cast(Error), name, @@ -1971,7 +2057,7 @@ void Genesis::InstallSpecialObjects(Handle<Context> native_context) { #ifdef ENABLE_DEBUGGER_SUPPORT // Expose the debug global object in global if a name for it is specified. if (FLAG_expose_debug_as != NULL && strlen(FLAG_expose_debug_as) != 0) { - Debug* debug = Isolate::Current()->debug(); + Debug* debug = isolate->debug(); // If loading fails we just bail out without installing the // debugger but without tanking the whole context. if (!debug->Load()) return; @@ -1982,8 +2068,9 @@ void Genesis::InstallSpecialObjects(Handle<Context> native_context) { native_context->security_token()); Handle<String> debug_string = - factory->LookupAsciiSymbol(FLAG_expose_debug_as); - Handle<Object> global_proxy(debug->debug_context()->global_proxy()); + factory->InternalizeUtf8String(FLAG_expose_debug_as); + Handle<Object> global_proxy( + debug->debug_context()->global_proxy(), isolate); CHECK_NOT_EMPTY_HANDLE(isolate, JSObject::SetLocalPropertyIgnoreAttributes( global, debug_string, global_proxy, DONT_ENUM)); @@ -2020,26 +2107,22 @@ void Genesis::ExtensionStates::set_state(RegisteredExtension* extension, bool Genesis::InstallExtensions(Handle<Context> native_context, v8::ExtensionConfiguration* extensions) { - // TODO(isolates): Extensions on multiple isolates may take a little more - // effort. (The external API reads 'ignore'-- does that mean - // we can break the interface?) - - + Isolate* isolate = native_context->GetIsolate(); ExtensionStates extension_states; // All extensions have state UNVISITED. // Install auto extensions. v8::RegisteredExtension* current = v8::RegisteredExtension::first_extension(); while (current != NULL) { if (current->extension()->auto_enable()) - InstallExtension(current, &extension_states); + InstallExtension(isolate, current, &extension_states); current = current->next(); } - if (FLAG_expose_gc) InstallExtension("v8/gc", &extension_states); + if (FLAG_expose_gc) InstallExtension(isolate, "v8/gc", &extension_states); if (FLAG_expose_externalize_string) { - InstallExtension("v8/externalize", &extension_states); + InstallExtension(isolate, "v8/externalize", &extension_states); } if (FLAG_track_gc_object_stats) { - InstallExtension("v8/statistics", &extension_states); + InstallExtension(isolate, "v8/statistics", &extension_states); } if (extensions == NULL) return true; @@ -2047,7 +2130,7 @@ bool Genesis::InstallExtensions(Handle<Context> native_context, int count = v8::ImplementationUtilities::GetNameCount(extensions); const char** names = v8::ImplementationUtilities::GetNames(extensions); for (int i = 0; i < count; i++) { - if (!InstallExtension(names[i], &extension_states)) + if (!InstallExtension(isolate, names[i], &extension_states)) return false; } @@ -2057,7 +2140,8 @@ bool Genesis::InstallExtensions(Handle<Context> native_context, // Installs a named extension. This methods is unoptimized and does // not scale well if we want to support a large number of extensions. -bool Genesis::InstallExtension(const char* name, +bool Genesis::InstallExtension(Isolate* isolate, + const char* name, ExtensionStates* extension_states) { v8::RegisteredExtension* current = v8::RegisteredExtension::first_extension(); // Loop until we find the relevant extension @@ -2071,13 +2155,14 @@ bool Genesis::InstallExtension(const char* name, "v8::Context::New()", "Cannot find required extension"); return false; } - return InstallExtension(current, extension_states); + return InstallExtension(isolate, current, extension_states); } -bool Genesis::InstallExtension(v8::RegisteredExtension* current, +bool Genesis::InstallExtension(Isolate* isolate, + v8::RegisteredExtension* current, ExtensionStates* extension_states) { - HandleScope scope; + HandleScope scope(isolate); if (extension_states->get_state(current) == INSTALLED) return true; // The current node has already been visited so there must be a @@ -2092,19 +2177,21 @@ bool Genesis::InstallExtension(v8::RegisteredExtension* current, v8::Extension* extension = current->extension(); // Install the extension's dependencies for (int i = 0; i < extension->dependency_count(); i++) { - if (!InstallExtension(extension->dependencies()[i], extension_states)) + if (!InstallExtension(isolate, + extension->dependencies()[i], + extension_states)) { return false; + } } - Isolate* isolate = Isolate::Current(); Handle<String> source_code = isolate->factory()->NewExternalStringFromAscii(extension->source()); - bool result = CompileScriptCached( - CStrVector(extension->name()), - source_code, - isolate->bootstrapper()->extensions_cache(), - extension, - Handle<Context>(isolate->context()), - false); + bool result = CompileScriptCached(isolate, + CStrVector(extension->name()), + source_code, + isolate->bootstrapper()->extensions_cache(), + extension, + Handle<Context>(isolate->context()), + false); ASSERT(isolate->has_pending_exception() != result); if (!result) { // We print out the name of the extension that fail to install. @@ -2122,11 +2209,11 @@ bool Genesis::InstallExtension(v8::RegisteredExtension* current, bool Genesis::InstallJSBuiltins(Handle<JSBuiltinsObject> builtins) { - HandleScope scope; - Factory* factory = builtins->GetIsolate()->factory(); + HandleScope scope(isolate()); for (int i = 0; i < Builtins::NumberOfJavaScriptBuiltins(); i++) { Builtins::JavaScript id = static_cast<Builtins::JavaScript>(i); - Handle<String> name = factory->LookupAsciiSymbol(Builtins::GetName(id)); + Handle<String> name = + factory()->InternalizeUtf8String(Builtins::GetName(id)); Object* function_object = builtins->GetPropertyNoExceptionThrown(*name); Handle<JSFunction> function = Handle<JSFunction>(JSFunction::cast(function_object)); @@ -2196,21 +2283,22 @@ void Genesis::TransferNamedProperties(Handle<JSObject> from, PropertyDetails details = descs->GetDetails(i); switch (details.type()) { case FIELD: { - HandleScope inner; - Handle<String> key = Handle<String>(descs->GetKey(i)); + HandleScope inner(isolate()); + Handle<Name> key = Handle<Name>(descs->GetKey(i)); int index = descs->GetFieldIndex(i); - Handle<Object> value = Handle<Object>(from->FastPropertyAt(index)); - CHECK_NOT_EMPTY_HANDLE(to->GetIsolate(), + Handle<Object> value = Handle<Object>(from->FastPropertyAt(index), + isolate()); + CHECK_NOT_EMPTY_HANDLE(isolate(), JSObject::SetLocalPropertyIgnoreAttributes( to, key, value, details.attributes())); break; } case CONSTANT_FUNCTION: { - HandleScope inner; - Handle<String> key = Handle<String>(descs->GetKey(i)); + HandleScope inner(isolate()); + Handle<Name> key = Handle<Name>(descs->GetKey(i)); Handle<JSFunction> fun = Handle<JSFunction>(descs->GetConstantFunction(i)); - CHECK_NOT_EMPTY_HANDLE(to->GetIsolate(), + CHECK_NOT_EMPTY_HANDLE(isolate(), JSObject::SetLocalPropertyIgnoreAttributes( to, key, fun, details.attributes())); break; @@ -2220,11 +2308,11 @@ void Genesis::TransferNamedProperties(Handle<JSObject> from, to->LocalLookup(descs->GetKey(i), &result); // If the property is already there we skip it if (result.IsFound()) continue; - HandleScope inner; + HandleScope inner(isolate()); ASSERT(!to->HasFastProperties()); // Add to dictionary. - Handle<String> key = Handle<String>(descs->GetKey(i)); - Handle<Object> callbacks(descs->GetCallbacksObject(i)); + Handle<Name> key = Handle<Name>(descs->GetKey(i)); + Handle<Object> callbacks(descs->GetCallbacksObject(i), isolate()); PropertyDetails d = PropertyDetails(details.attributes(), CALLBACKS, details.descriptor_index()); @@ -2243,25 +2331,27 @@ void Genesis::TransferNamedProperties(Handle<JSObject> from, } } } else { - Handle<StringDictionary> properties = - Handle<StringDictionary>(from->property_dictionary()); + Handle<NameDictionary> properties = + Handle<NameDictionary>(from->property_dictionary()); int capacity = properties->Capacity(); for (int i = 0; i < capacity; i++) { Object* raw_key(properties->KeyAt(i)); if (properties->IsKey(raw_key)) { - ASSERT(raw_key->IsString()); + ASSERT(raw_key->IsName()); // If the property is already there we skip it. LookupResult result(isolate()); - to->LocalLookup(String::cast(raw_key), &result); + to->LocalLookup(Name::cast(raw_key), &result); if (result.IsFound()) continue; // Set the property. - Handle<String> key = Handle<String>(String::cast(raw_key)); - Handle<Object> value = Handle<Object>(properties->ValueAt(i)); + Handle<Name> key = Handle<Name>(Name::cast(raw_key)); + Handle<Object> value = Handle<Object>(properties->ValueAt(i), + isolate()); if (value->IsJSGlobalPropertyCell()) { - value = Handle<Object>(JSGlobalPropertyCell::cast(*value)->value()); + value = Handle<Object>(JSGlobalPropertyCell::cast(*value)->value(), + isolate()); } PropertyDetails details = properties->DetailsAt(i); - CHECK_NOT_EMPTY_HANDLE(to->GetIsolate(), + CHECK_NOT_EMPTY_HANDLE(isolate(), JSObject::SetLocalPropertyIgnoreAttributes( to, key, value, details.attributes())); } @@ -2281,8 +2371,8 @@ void Genesis::TransferIndexedProperties(Handle<JSObject> from, void Genesis::TransferObject(Handle<JSObject> from, Handle<JSObject> to) { - HandleScope outer; - Factory* factory = from->GetIsolate()->factory(); + HandleScope outer(isolate()); + Factory* factory = isolate()->factory(); ASSERT(!from->IsJSArray()); ASSERT(!to->IsJSArray()); @@ -2316,28 +2406,27 @@ void Genesis::MakeFunctionInstancePrototypeWritable() { Genesis::Genesis(Isolate* isolate, Handle<Object> global_object, v8::Handle<v8::ObjectTemplate> global_template, - v8::ExtensionConfiguration* extensions) : isolate_(isolate) { + v8::ExtensionConfiguration* extensions) + : isolate_(isolate), + active_(isolate->bootstrapper()) { result_ = Handle<Context>::null(); // If V8 isn't running and cannot be initialized, just return. if (!V8::IsRunning() && !V8::Initialize(NULL)) return; // Before creating the roots we must save the context and restore it // on all function exits. - HandleScope scope; SaveContext saved_context(isolate); // During genesis, the boilerplate for stack overflow won't work until the // environment has been at least partially initialized. Add a stack check // before entering JS code to catch overflow early. - StackLimitCheck check(Isolate::Current()); + StackLimitCheck check(isolate); if (check.HasOverflowed()) return; - Handle<Context> new_context = Snapshot::NewContextFromSnapshot(); - if (!new_context.is_null()) { - native_context_ = - Handle<Context>::cast(isolate->global_handles()->Create(*new_context)); - AddToWeakNativeContextList(*native_context_); - isolate->set_context(*native_context_); + native_context_ = Snapshot::NewContextFromSnapshot(); + if (!native_context().is_null()) { + AddToWeakNativeContextList(*native_context()); + isolate->set_context(*native_context()); isolate->counters()->contexts_created_by_snapshot()->Increment(); Handle<GlobalObject> inner_global; Handle<JSGlobalProxy> global_proxy = @@ -2373,7 +2462,7 @@ Genesis::Genesis(Isolate* isolate, InitializeExperimentalGlobal(); if (!InstallExperimentalNatives()) return; - result_ = native_context_; + result_ = native_context(); } diff --git a/deps/v8/src/bootstrapper.h b/deps/v8/src/bootstrapper.h index 179e65c354..e33415eeb9 100644 --- a/deps/v8/src/bootstrapper.h +++ b/deps/v8/src/bootstrapper.h @@ -54,8 +54,8 @@ class SourceCodeCache BASE_EMBEDDED { bool Lookup(Vector<const char> name, Handle<SharedFunctionInfo>* handle) { for (int i = 0; i < cache_->length(); i+=2) { - SeqAsciiString* str = SeqAsciiString::cast(cache_->get(i)); - if (str->IsEqualTo(name)) { + SeqOneByteString* str = SeqOneByteString::cast(cache_->get(i)); + if (str->IsUtf8EqualTo(name)) { *handle = Handle<SharedFunctionInfo>( SharedFunctionInfo::cast(cache_->get(i + 1))); return true; @@ -65,7 +65,7 @@ class SourceCodeCache BASE_EMBEDDED { } void Add(Vector<const char> name, Handle<SharedFunctionInfo> shared) { - HandleScope scope; + HandleScope scope(shared->GetIsolate()); int length = cache_->length(); Handle<FixedArray> new_array = FACTORY->NewFixedArray(length + 2, TENURED); @@ -95,7 +95,6 @@ class Bootstrapper { // Creates a JavaScript Global Context with initial object graph. // The returned value is a global handle casted to V8Environment*. Handle<Context> CreateEnvironment( - Isolate* isolate, Handle<Object> global_object, v8::Handle<v8::ObjectTemplate> global_template, v8::ExtensionConfiguration* extensions); @@ -132,6 +131,7 @@ class Bootstrapper { SourceCodeCache* extensions_cache() { return &extensions_cache_; } private: + Isolate* isolate_; typedef int NestingCounterType; NestingCounterType nesting_; SourceCodeCache extensions_cache_; @@ -144,7 +144,7 @@ class Bootstrapper { friend class Isolate; friend class NativesExternalStringResource; - Bootstrapper(); + explicit Bootstrapper(Isolate* isolate); DISALLOW_COPY_AND_ASSIGN(Bootstrapper); }; @@ -152,15 +152,18 @@ class Bootstrapper { class BootstrapperActive BASE_EMBEDDED { public: - BootstrapperActive() { - ++Isolate::Current()->bootstrapper()->nesting_; + explicit BootstrapperActive(Bootstrapper* bootstrapper) + : bootstrapper_(bootstrapper) { + ++bootstrapper_->nesting_; } ~BootstrapperActive() { - --Isolate::Current()->bootstrapper()->nesting_; + --bootstrapper_->nesting_; } private: + Bootstrapper* bootstrapper_; + DISALLOW_COPY_AND_ASSIGN(BootstrapperActive); }; diff --git a/deps/v8/src/builtins.cc b/deps/v8/src/builtins.cc index df70cd4fc7..f8d562b34b 100644 --- a/deps/v8/src/builtins.cc +++ b/deps/v8/src/builtins.cc @@ -150,7 +150,7 @@ static inline bool CalledAsConstructor(Isolate* isolate) { // Calculate the result using a full stack frame iterator and check // that the state of the stack is as we assume it to be in the // code below. - StackFrameIterator it; + StackFrameIterator it(isolate); ASSERT(it.frame()->is_exit()); it.Advance(); StackFrame* frame = it.frame(); @@ -186,9 +186,67 @@ BUILTIN(EmptyFunction) { } +#define CONVERT_ARG_STUB_CALLER_ARGS(name) \ + Arguments* name = reinterpret_cast<Arguments*>(args[0]); + + +RUNTIME_FUNCTION(MaybeObject*, ArrayConstructor_StubFailure) { + CONVERT_ARG_STUB_CALLER_ARGS(caller_args); + // ASSERT(args.length() == 3); + Handle<JSFunction> function = args.at<JSFunction>(1); + Handle<Object> type_info = args.at<Object>(2); + + JSArray* array = NULL; + bool holey = false; + if (caller_args->length() == 1 && (*caller_args)[0]->IsSmi()) { + int value = Smi::cast((*caller_args)[0])->value(); + holey = (value > 0 && value < JSObject::kInitialMaxFastElementArray); + } + + MaybeObject* maybe_array; + if (*type_info != isolate->heap()->undefined_value()) { + JSGlobalPropertyCell* cell = JSGlobalPropertyCell::cast(*type_info); + if (cell->value()->IsSmi()) { + Smi* smi = Smi::cast(cell->value()); + ElementsKind to_kind = static_cast<ElementsKind>(smi->value()); + if (holey && !IsFastHoleyElementsKind(to_kind)) { + to_kind = GetHoleyElementsKind(to_kind); + // Update the allocation site info to reflect the advice alteration. + cell->set_value(Smi::FromInt(to_kind)); + } + + AllocationSiteMode mode = AllocationSiteInfo::GetMode(to_kind); + if (mode == TRACK_ALLOCATION_SITE) { + maybe_array = isolate->heap()->AllocateEmptyJSArrayWithAllocationSite( + to_kind, type_info); + } else { + maybe_array = isolate->heap()->AllocateEmptyJSArray(to_kind); + } + if (!maybe_array->To(&array)) return maybe_array; + } + } + + ASSERT(function->has_initial_map()); + ElementsKind kind = function->initial_map()->elements_kind(); + if (holey) { + kind = GetHoleyElementsKind(kind); + } + + if (array == NULL) { + maybe_array = isolate->heap()->AllocateEmptyJSArray(kind); + if (!maybe_array->To(&array)) return maybe_array; + } + + maybe_array = ArrayConstructInitializeElements(array, caller_args); + if (maybe_array->IsFailure()) return maybe_array; + return array; +} + + static MaybeObject* ArrayCodeGenericCommon(Arguments* args, Isolate* isolate, JSFunction* constructor) { + ASSERT(args->length() >= 1); Heap* heap = isolate->heap(); isolate->counters()->array_function_runtime()->Increment(); @@ -197,8 +255,29 @@ static MaybeObject* ArrayCodeGenericCommon(Arguments* args, array = JSArray::cast((*args)[0]); // Initialize elements and length in case later allocations fail so that the // array object is initialized in a valid state. - array->set_length(Smi::FromInt(0)); - array->set_elements(heap->empty_fixed_array()); + MaybeObject* maybe_array = array->Initialize(0); + if (maybe_array->IsFailure()) return maybe_array; + + if (FLAG_optimize_constructed_arrays) { + AllocationSiteInfo* info = AllocationSiteInfo::FindForJSObject(array); + ElementsKind to_kind = array->GetElementsKind(); + if (info != NULL && info->GetElementsKindPayload(&to_kind)) { + if (IsMoreGeneralElementsKindTransition(array->GetElementsKind(), + to_kind)) { + // We have advice that we should change the elements kind + if (FLAG_trace_track_allocation_sites) { + PrintF("AllocationSiteInfo: pre-transitioning array %p(%s->%s)\n", + reinterpret_cast<void*>(array), + ElementsKindToString(array->GetElementsKind()), + ElementsKindToString(to_kind)); + } + + maybe_array = array->TransitionElementsKind(to_kind); + if (maybe_array->IsFailure()) return maybe_array; + } + } + } + if (!FLAG_smi_only_arrays) { Context* native_context = isolate->context()->native_context(); if (array->GetElementsKind() == GetInitialFastElementsKind() && @@ -215,97 +294,10 @@ static MaybeObject* ArrayCodeGenericCommon(Arguments* args, if (!maybe_obj->To(&array)) return maybe_obj; } - // Optimize the case where there is one argument and the argument is a - // small smi. - if (args->length() == 2) { - Object* obj = (*args)[1]; - if (obj->IsSmi()) { - int len = Smi::cast(obj)->value(); - if (len >= 0 && len < JSObject::kInitialMaxFastElementArray) { - Object* fixed_array; - { MaybeObject* maybe_obj = heap->AllocateFixedArrayWithHoles(len); - if (!maybe_obj->ToObject(&fixed_array)) return maybe_obj; - } - ElementsKind elements_kind = array->GetElementsKind(); - if (!IsFastHoleyElementsKind(elements_kind)) { - elements_kind = GetHoleyElementsKind(elements_kind); - MaybeObject* maybe_array = - array->TransitionElementsKind(elements_kind); - if (maybe_array->IsFailure()) return maybe_array; - } - // We do not use SetContent to skip the unnecessary elements type check. - array->set_elements(FixedArray::cast(fixed_array)); - array->set_length(Smi::cast(obj)); - return array; - } - } - // Take the argument as the length. - { MaybeObject* maybe_obj = array->Initialize(0); - if (!maybe_obj->ToObject(&obj)) return maybe_obj; - } - return array->SetElementsLength((*args)[1]); - } - - // Optimize the case where there are no parameters passed. - if (args->length() == 1) { - return array->Initialize(JSArray::kPreallocatedArrayElements); - } - - // Set length and elements on the array. - int number_of_elements = args->length() - 1; - MaybeObject* maybe_object = - array->EnsureCanContainElements(args, 1, number_of_elements, - ALLOW_CONVERTED_DOUBLE_ELEMENTS); - if (maybe_object->IsFailure()) return maybe_object; - - // Allocate an appropriately typed elements array. - MaybeObject* maybe_elms; - ElementsKind elements_kind = array->GetElementsKind(); - if (IsFastDoubleElementsKind(elements_kind)) { - maybe_elms = heap->AllocateUninitializedFixedDoubleArray( - number_of_elements); - } else { - maybe_elms = heap->AllocateFixedArrayWithHoles(number_of_elements); - } - FixedArrayBase* elms; - if (!maybe_elms->To<FixedArrayBase>(&elms)) return maybe_elms; - - // Fill in the content - switch (array->GetElementsKind()) { - case FAST_HOLEY_SMI_ELEMENTS: - case FAST_SMI_ELEMENTS: { - FixedArray* smi_elms = FixedArray::cast(elms); - for (int index = 0; index < number_of_elements; index++) { - smi_elms->set(index, (*args)[index+1], SKIP_WRITE_BARRIER); - } - break; - } - case FAST_HOLEY_ELEMENTS: - case FAST_ELEMENTS: { - AssertNoAllocation no_gc; - WriteBarrierMode mode = elms->GetWriteBarrierMode(no_gc); - FixedArray* object_elms = FixedArray::cast(elms); - for (int index = 0; index < number_of_elements; index++) { - object_elms->set(index, (*args)[index+1], mode); - } - break; - } - case FAST_HOLEY_DOUBLE_ELEMENTS: - case FAST_DOUBLE_ELEMENTS: { - FixedDoubleArray* double_elms = FixedDoubleArray::cast(elms); - for (int index = 0; index < number_of_elements; index++) { - double_elms->set(index, (*args)[index+1]->Number()); - } - break; - } - default: - UNREACHABLE(); - break; - } - - array->set_elements(elms); - array->set_length(Smi::FromInt(number_of_elements)); - return array; + Arguments adjusted_arguments(args->length() - 1, args->arguments() - 1); + ASSERT(adjusted_arguments.length() < 1 || + adjusted_arguments[0] == (*args)[1]); + return ArrayConstructInitializeElements(array, &adjusted_arguments); } @@ -325,23 +317,15 @@ BUILTIN(ArrayCodeGeneric) { } -static void MoveElements(Heap* heap, - AssertNoAllocation* no_gc, - FixedArray* dst, - int dst_index, - FixedArray* src, - int src_index, - int len) { +static void MoveDoubleElements(FixedDoubleArray* dst, + int dst_index, + FixedDoubleArray* src, + int src_index, + int len) { if (len == 0) return; - ASSERT(dst->map() != HEAP->fixed_cow_array_map()); memmove(dst->data_start() + dst_index, src->data_start() + src_index, - len * kPointerSize); - WriteBarrierMode mode = dst->GetWriteBarrierMode(*no_gc); - if (mode == UPDATE_WRITE_BARRIER) { - heap->RecordWrites(dst->address(), dst->OffsetOfElementAt(dst_index), len); - } - heap->incremental_marking()->RecordWrites(dst); + len * kDoubleSize); } @@ -351,24 +335,39 @@ static void FillWithHoles(Heap* heap, FixedArray* dst, int from, int to) { } -static FixedArray* LeftTrimFixedArray(Heap* heap, - FixedArray* elms, - int to_trim) { +static void FillWithHoles(FixedDoubleArray* dst, int from, int to) { + for (int i = from; i < to; i++) { + dst->set_the_hole(i); + } +} + + +static FixedArrayBase* LeftTrimFixedArray(Heap* heap, + FixedArrayBase* elms, + int to_trim) { + Map* map = elms->map(); + int entry_size; + if (elms->IsFixedArray()) { + entry_size = kPointerSize; + } else { + entry_size = kDoubleSize; + } ASSERT(elms->map() != HEAP->fixed_cow_array_map()); // For now this trick is only applied to fixed arrays in new and paged space. // In large object space the object's start must coincide with chunk // and thus the trick is just not applicable. ASSERT(!HEAP->lo_space()->Contains(elms)); - STATIC_ASSERT(FixedArray::kMapOffset == 0); - STATIC_ASSERT(FixedArray::kLengthOffset == kPointerSize); - STATIC_ASSERT(FixedArray::kHeaderSize == 2 * kPointerSize); + STATIC_ASSERT(FixedArrayBase::kMapOffset == 0); + STATIC_ASSERT(FixedArrayBase::kLengthOffset == kPointerSize); + STATIC_ASSERT(FixedArrayBase::kHeaderSize == 2 * kPointerSize); Object** former_start = HeapObject::RawField(elms, 0); const int len = elms->length(); - if (to_trim > FixedArray::kHeaderSize / kPointerSize && + if (to_trim * entry_size > FixedArrayBase::kHeaderSize && + elms->IsFixedArray() && !heap->new_space()->Contains(elms)) { // If we are doing a big trim in old space then we zap the space that was // formerly part of the array so that the GC (aided by the card-based @@ -382,14 +381,15 @@ static FixedArray* LeftTrimFixedArray(Heap* heap, // Technically in new space this write might be omitted (except for // debug mode which iterates through the heap), but to play safer // we still do it. - heap->CreateFillerObjectAt(elms->address(), to_trim * kPointerSize); + heap->CreateFillerObjectAt(elms->address(), to_trim * entry_size); - former_start[to_trim] = heap->fixed_array_map(); - former_start[to_trim + 1] = Smi::FromInt(len - to_trim); + int new_start_index = to_trim * (entry_size / kPointerSize); + former_start[new_start_index] = map; + former_start[new_start_index + 1] = Smi::FromInt(len - to_trim); // Maintain marking consistency for HeapObjectIterator and // IncrementalMarking. - int size_delta = to_trim * kPointerSize; + int size_delta = to_trim * entry_size; if (heap->marking()->TransferMark(elms->address(), elms->address() + size_delta)) { MemoryChunk::IncrementLiveBytesFromMutator(elms->address(), -size_delta); @@ -397,8 +397,8 @@ static FixedArray* LeftTrimFixedArray(Heap* heap, HEAP_PROFILE(heap, ObjectMoveEvent(elms->address(), elms->address() + size_delta)); - return FixedArray::cast(HeapObject::FromAddress( - elms->address() + to_trim * kPointerSize)); + return FixedArrayBase::cast(HeapObject::FromAddress( + elms->address() + to_trim * entry_size)); } @@ -427,19 +427,14 @@ static inline MaybeObject* EnsureJSArrayWithWritableFastElements( Map* map = elms->map(); if (map == heap->fixed_array_map()) { if (args == NULL || array->HasFastObjectElements()) return elms; - if (array->HasFastDoubleElements()) { - ASSERT(elms == heap->empty_fixed_array()); - MaybeObject* maybe_transition = - array->TransitionElementsKind(FAST_ELEMENTS); - if (maybe_transition->IsFailure()) return maybe_transition; - return elms; - } } else if (map == heap->fixed_cow_array_map()) { MaybeObject* maybe_writable_result = array->EnsureWritableFastElements(); if (args == NULL || array->HasFastObjectElements() || - maybe_writable_result->IsFailure()) { + !maybe_writable_result->To(&elms)) { return maybe_writable_result; } + } else if (map == heap->fixed_double_array_map()) { + if (args == NULL) return elms; } else { return NULL; } @@ -449,13 +444,28 @@ static inline MaybeObject* EnsureJSArrayWithWritableFastElements( int args_length = args->length(); if (first_added_arg >= args_length) return array->elements(); - MaybeObject* maybe_array = array->EnsureCanContainElements( - args, - first_added_arg, - args_length - first_added_arg, - DONT_ALLOW_DOUBLE_ELEMENTS); - if (maybe_array->IsFailure()) return maybe_array; - return array->elements(); + ElementsKind origin_kind = array->map()->elements_kind(); + ASSERT(!IsFastObjectElementsKind(origin_kind)); + ElementsKind target_kind = origin_kind; + int arg_count = args->length() - first_added_arg; + Object** arguments = args->arguments() - first_added_arg - (arg_count - 1); + for (int i = 0; i < arg_count; i++) { + Object* arg = arguments[i]; + if (arg->IsHeapObject()) { + if (arg->IsHeapNumber()) { + target_kind = FAST_DOUBLE_ELEMENTS; + } else { + target_kind = FAST_ELEMENTS; + break; + } + } + } + if (target_kind != origin_kind) { + MaybeObject* maybe_failure = array->TransitionElementsKind(target_kind); + if (maybe_failure->IsFailure()) return maybe_failure; + return array->elements(); + } + return elms; } @@ -499,127 +509,191 @@ MUST_USE_RESULT static MaybeObject* CallJsBuiltin( BUILTIN(ArrayPush) { Heap* heap = isolate->heap(); Object* receiver = *args.receiver(); - Object* elms_obj; - { MaybeObject* maybe_elms_obj = - EnsureJSArrayWithWritableFastElements(heap, receiver, &args, 1); - if (maybe_elms_obj == NULL) { - return CallJsBuiltin(isolate, "ArrayPush", args); - } - if (!maybe_elms_obj->ToObject(&elms_obj)) return maybe_elms_obj; + FixedArrayBase* elms_obj; + MaybeObject* maybe_elms_obj = + EnsureJSArrayWithWritableFastElements(heap, receiver, &args, 1); + if (maybe_elms_obj == NULL) { + return CallJsBuiltin(isolate, "ArrayPush", args); } - FixedArray* elms = FixedArray::cast(elms_obj); + if (!maybe_elms_obj->To(&elms_obj)) return maybe_elms_obj; + JSArray* array = JSArray::cast(receiver); + ASSERT(!array->map()->is_observed()); - int len = Smi::cast(array->length())->value(); - int to_add = args.length() - 1; - if (to_add == 0) { - return Smi::FromInt(len); - } - // Currently fixed arrays cannot grow too big, so - // we should never hit this case. - ASSERT(to_add <= (Smi::kMaxValue - len)); + ElementsKind kind = array->GetElementsKind(); - int new_length = len + to_add; + if (IsFastSmiOrObjectElementsKind(kind)) { + FixedArray* elms = FixedArray::cast(elms_obj); - if (new_length > elms->length()) { - // New backing storage is needed. - int capacity = new_length + (new_length >> 1) + 16; - Object* obj; - { MaybeObject* maybe_obj = heap->AllocateUninitializedFixedArray(capacity); - if (!maybe_obj->ToObject(&obj)) return maybe_obj; + int len = Smi::cast(array->length())->value(); + int to_add = args.length() - 1; + if (to_add == 0) { + return Smi::FromInt(len); } - FixedArray* new_elms = FixedArray::cast(obj); + // Currently fixed arrays cannot grow too big, so + // we should never hit this case. + ASSERT(to_add <= (Smi::kMaxValue - len)); - ElementsKind kind = array->GetElementsKind(); - CopyObjectToObjectElements(elms, kind, 0, new_elms, kind, 0, len); - FillWithHoles(heap, new_elms, new_length, capacity); + int new_length = len + to_add; - elms = new_elms; - } + if (new_length > elms->length()) { + // New backing storage is needed. + int capacity = new_length + (new_length >> 1) + 16; + FixedArray* new_elms; + MaybeObject* maybe_obj = heap->AllocateUninitializedFixedArray(capacity); + if (!maybe_obj->To(&new_elms)) return maybe_obj; - // Add the provided values. - AssertNoAllocation no_gc; - WriteBarrierMode mode = elms->GetWriteBarrierMode(no_gc); - for (int index = 0; index < to_add; index++) { - elms->set(index + len, args[index + 1], mode); - } + ElementsAccessor* accessor = array->GetElementsAccessor(); + MaybeObject* maybe_failure = accessor->CopyElements( + NULL, 0, kind, new_elms, 0, + ElementsAccessor::kCopyToEndAndInitializeToHole, elms_obj); + ASSERT(!maybe_failure->IsFailure()); + USE(maybe_failure); - if (elms != array->elements()) { - array->set_elements(elms); - } + elms = new_elms; + } - // Set the length. - array->set_length(Smi::FromInt(new_length)); - return Smi::FromInt(new_length); + // Add the provided values. + AssertNoAllocation no_gc; + WriteBarrierMode mode = elms->GetWriteBarrierMode(no_gc); + for (int index = 0; index < to_add; index++) { + elms->set(index + len, args[index + 1], mode); + } + + if (elms != array->elements()) { + array->set_elements(elms); + } + + // Set the length. + array->set_length(Smi::FromInt(new_length)); + return Smi::FromInt(new_length); + } else { + int len = Smi::cast(array->length())->value(); + int elms_len = elms_obj->length(); + + int to_add = args.length() - 1; + if (to_add == 0) { + return Smi::FromInt(len); + } + // Currently fixed arrays cannot grow too big, so + // we should never hit this case. + ASSERT(to_add <= (Smi::kMaxValue - len)); + + int new_length = len + to_add; + + FixedDoubleArray* new_elms; + + if (new_length > elms_len) { + // New backing storage is needed. + int capacity = new_length + (new_length >> 1) + 16; + MaybeObject* maybe_obj = + heap->AllocateUninitializedFixedDoubleArray(capacity); + if (!maybe_obj->To(&new_elms)) return maybe_obj; + + ElementsAccessor* accessor = array->GetElementsAccessor(); + MaybeObject* maybe_failure = accessor->CopyElements( + NULL, 0, kind, new_elms, 0, + ElementsAccessor::kCopyToEndAndInitializeToHole, elms_obj); + ASSERT(!maybe_failure->IsFailure()); + USE(maybe_failure); + } else { + // to_add is > 0 and new_length <= elms_len, so elms_obj cannot be the + // empty_fixed_array. + new_elms = FixedDoubleArray::cast(elms_obj); + } + + // Add the provided values. + AssertNoAllocation no_gc; + int index; + for (index = 0; index < to_add; index++) { + Object* arg = args[index + 1]; + new_elms->set(index + len, arg->Number()); + } + + if (new_elms != array->elements()) { + array->set_elements(new_elms); + } + + // Set the length. + array->set_length(Smi::FromInt(new_length)); + return Smi::FromInt(new_length); + } } BUILTIN(ArrayPop) { Heap* heap = isolate->heap(); Object* receiver = *args.receiver(); - Object* elms_obj; - { MaybeObject* maybe_elms_obj = - EnsureJSArrayWithWritableFastElements(heap, receiver, NULL, 0); - if (maybe_elms_obj == NULL) return CallJsBuiltin(isolate, "ArrayPop", args); - if (!maybe_elms_obj->ToObject(&elms_obj)) return maybe_elms_obj; - } - FixedArray* elms = FixedArray::cast(elms_obj); + FixedArrayBase* elms_obj; + MaybeObject* maybe_elms = + EnsureJSArrayWithWritableFastElements(heap, receiver, NULL, 0); + if (maybe_elms == NULL) return CallJsBuiltin(isolate, "ArrayPop", args); + if (!maybe_elms->To(&elms_obj)) return maybe_elms; + JSArray* array = JSArray::cast(receiver); + ASSERT(!array->map()->is_observed()); int len = Smi::cast(array->length())->value(); if (len == 0) return heap->undefined_value(); - // Get top element - MaybeObject* top = elms->get(len - 1); - - // Set the length. - array->set_length(Smi::FromInt(len - 1)); - - if (!top->IsTheHole()) { - // Delete the top element. - elms->set_the_hole(len - 1); - return top; + ElementsAccessor* accessor = array->GetElementsAccessor(); + int new_length = len - 1; + MaybeObject* maybe_result; + if (accessor->HasElement(array, array, new_length, elms_obj)) { + maybe_result = accessor->Get(array, array, new_length, elms_obj); + } else { + maybe_result = array->GetPrototype()->GetElement(len - 1); } - - top = array->GetPrototype()->GetElement(len - 1); - - return top; + if (maybe_result->IsFailure()) return maybe_result; + MaybeObject* maybe_failure = + accessor->SetLength(array, Smi::FromInt(new_length)); + if (maybe_failure->IsFailure()) return maybe_failure; + return maybe_result; } BUILTIN(ArrayShift) { Heap* heap = isolate->heap(); Object* receiver = *args.receiver(); - Object* elms_obj; - { MaybeObject* maybe_elms_obj = - EnsureJSArrayWithWritableFastElements(heap, receiver, NULL, 0); - if (maybe_elms_obj == NULL) - return CallJsBuiltin(isolate, "ArrayShift", args); - if (!maybe_elms_obj->ToObject(&elms_obj)) return maybe_elms_obj; - } + FixedArrayBase* elms_obj; + MaybeObject* maybe_elms_obj = + EnsureJSArrayWithWritableFastElements(heap, receiver, NULL, 0); + if (maybe_elms_obj == NULL) + return CallJsBuiltin(isolate, "ArrayShift", args); + if (!maybe_elms_obj->To(&elms_obj)) return maybe_elms_obj; + if (!IsJSArrayFastElementMovingAllowed(heap, JSArray::cast(receiver))) { return CallJsBuiltin(isolate, "ArrayShift", args); } - FixedArray* elms = FixedArray::cast(elms_obj); JSArray* array = JSArray::cast(receiver); - ASSERT(array->HasFastSmiOrObjectElements()); + ASSERT(!array->map()->is_observed()); int len = Smi::cast(array->length())->value(); if (len == 0) return heap->undefined_value(); // Get first element - Object* first = elms->get(0); + ElementsAccessor* accessor = array->GetElementsAccessor(); + Object* first; + MaybeObject* maybe_first = accessor->Get(receiver, array, 0, elms_obj); + if (!maybe_first->To(&first)) return maybe_first; if (first->IsTheHole()) { first = heap->undefined_value(); } - if (!heap->lo_space()->Contains(elms)) { - array->set_elements(LeftTrimFixedArray(heap, elms, 1)); + if (!heap->lo_space()->Contains(elms_obj)) { + array->set_elements(LeftTrimFixedArray(heap, elms_obj, 1)); } else { // Shift the elements. - AssertNoAllocation no_gc; - MoveElements(heap, &no_gc, elms, 0, elms, 1, len - 1); - elms->set(len - 1, heap->the_hole_value()); + if (elms_obj->IsFixedArray()) { + FixedArray* elms = FixedArray::cast(elms_obj); + AssertNoAllocation no_gc; + heap->MoveElements(elms, 0, 1, len - 1); + elms->set(len - 1, heap->the_hole_value()); + } else { + FixedDoubleArray* elms = FixedDoubleArray::cast(elms_obj); + MoveDoubleElements(elms, 0, elms, 1, len - 1); + elms->set_the_hole(len - 1); + } } // Set the length. @@ -632,19 +706,22 @@ BUILTIN(ArrayShift) { BUILTIN(ArrayUnshift) { Heap* heap = isolate->heap(); Object* receiver = *args.receiver(); - Object* elms_obj; - { MaybeObject* maybe_elms_obj = - EnsureJSArrayWithWritableFastElements(heap, receiver, NULL, 0); - if (maybe_elms_obj == NULL) - return CallJsBuiltin(isolate, "ArrayUnshift", args); - if (!maybe_elms_obj->ToObject(&elms_obj)) return maybe_elms_obj; - } + FixedArrayBase* elms_obj; + MaybeObject* maybe_elms_obj = + EnsureJSArrayWithWritableFastElements(heap, receiver, NULL, 0); + if (maybe_elms_obj == NULL) + return CallJsBuiltin(isolate, "ArrayUnshift", args); + if (!maybe_elms_obj->To(&elms_obj)) return maybe_elms_obj; + if (!IsJSArrayFastElementMovingAllowed(heap, JSArray::cast(receiver))) { return CallJsBuiltin(isolate, "ArrayUnshift", args); } - FixedArray* elms = FixedArray::cast(elms_obj); JSArray* array = JSArray::cast(receiver); - ASSERT(array->HasFastSmiOrObjectElements()); + ASSERT(!array->map()->is_observed()); + if (!array->HasFastSmiOrObjectElements()) { + return CallJsBuiltin(isolate, "ArrayUnshift", args); + } + FixedArray* elms = FixedArray::cast(elms_obj); int len = Smi::cast(array->length())->value(); int to_add = args.length() - 1; @@ -661,19 +738,23 @@ BUILTIN(ArrayUnshift) { if (new_length > elms->length()) { // New backing storage is needed. int capacity = new_length + (new_length >> 1) + 16; - Object* obj; - { MaybeObject* maybe_obj = heap->AllocateUninitializedFixedArray(capacity); - if (!maybe_obj->ToObject(&obj)) return maybe_obj; - } - FixedArray* new_elms = FixedArray::cast(obj); + FixedArray* new_elms; + MaybeObject* maybe_elms = heap->AllocateUninitializedFixedArray(capacity); + if (!maybe_elms->To(&new_elms)) return maybe_elms; + ElementsKind kind = array->GetElementsKind(); - CopyObjectToObjectElements(elms, kind, 0, new_elms, kind, to_add, len); - FillWithHoles(heap, new_elms, new_length, capacity); + ElementsAccessor* accessor = array->GetElementsAccessor(); + MaybeObject* maybe_failure = accessor->CopyElements( + NULL, 0, kind, new_elms, to_add, + ElementsAccessor::kCopyToEndAndInitializeToHole, elms); + ASSERT(!maybe_failure->IsFailure()); + USE(maybe_failure); + elms = new_elms; array->set_elements(elms); } else { AssertNoAllocation no_gc; - MoveElements(heap, &no_gc, elms, to_add, elms, 0, len); + heap->MoveElements(elms, to_add, 0, len); } // Add the provided values. @@ -692,16 +773,20 @@ BUILTIN(ArrayUnshift) { BUILTIN(ArraySlice) { Heap* heap = isolate->heap(); Object* receiver = *args.receiver(); - FixedArray* elms; + FixedArrayBase* elms; int len = -1; if (receiver->IsJSArray()) { JSArray* array = JSArray::cast(receiver); - if (!array->HasFastSmiOrObjectElements() || - !IsJSArrayFastElementMovingAllowed(heap, array)) { + if (!IsJSArrayFastElementMovingAllowed(heap, array)) { + return CallJsBuiltin(isolate, "ArraySlice", args); + } + + if (array->HasFastElements()) { + elms = array->elements(); + } else { return CallJsBuiltin(isolate, "ArraySlice", args); } - elms = FixedArray::cast(array->elements()); len = Smi::cast(array->length())->value(); } else { // Array.slice(arguments, ...) is quite a common idiom (notably more @@ -710,15 +795,19 @@ BUILTIN(ArraySlice) { isolate->context()->native_context()->arguments_boilerplate()->map(); bool is_arguments_object_with_fast_elements = - receiver->IsJSObject() - && JSObject::cast(receiver)->map() == arguments_map - && JSObject::cast(receiver)->HasFastSmiOrObjectElements(); + receiver->IsJSObject() && + JSObject::cast(receiver)->map() == arguments_map; if (!is_arguments_object_with_fast_elements) { return CallJsBuiltin(isolate, "ArraySlice", args); } - elms = FixedArray::cast(JSObject::cast(receiver)->elements()); - Object* len_obj = JSObject::cast(receiver) - ->InObjectPropertyAt(Heap::kArgumentsLengthIndex); + JSObject* object = JSObject::cast(receiver); + + if (object->HasFastElements()) { + elms = object->elements(); + } else { + return CallJsBuiltin(isolate, "ArraySlice", args); + } + Object* len_obj = object->InObjectPropertyAt(Heap::kArgumentsLengthIndex); if (!len_obj->IsSmi()) { return CallJsBuiltin(isolate, "ArraySlice", args); } @@ -726,12 +815,10 @@ BUILTIN(ArraySlice) { if (len > elms->length()) { return CallJsBuiltin(isolate, "ArraySlice", args); } - for (int i = 0; i < len; i++) { - if (elms->get(i) == heap->the_hole_value()) { - return CallJsBuiltin(isolate, "ArraySlice", args); - } - } } + + JSObject* object = JSObject::cast(receiver); + ASSERT(len >= 0); int n_arguments = args.length() - 1; @@ -744,6 +831,12 @@ BUILTIN(ArraySlice) { Object* arg1 = args[1]; if (arg1->IsSmi()) { relative_start = Smi::cast(arg1)->value(); + } else if (arg1->IsHeapNumber()) { + double start = HeapNumber::cast(arg1)->value(); + if (start < kMinInt || start > kMaxInt) { + return CallJsBuiltin(isolate, "ArraySlice", args); + } + relative_start = static_cast<int>(start); } else if (!arg1->IsUndefined()) { return CallJsBuiltin(isolate, "ArraySlice", args); } @@ -751,6 +844,12 @@ BUILTIN(ArraySlice) { Object* arg2 = args[2]; if (arg2->IsSmi()) { relative_end = Smi::cast(arg2)->value(); + } else if (arg2->IsHeapNumber()) { + double end = HeapNumber::cast(arg2)->value(); + if (end < kMinInt || end > kMaxInt) { + return CallJsBuiltin(isolate, "ArraySlice", args); + } + relative_end = static_cast<int>(end); } else if (!arg2->IsUndefined()) { return CallJsBuiltin(isolate, "ArraySlice", args); } @@ -765,21 +864,40 @@ BUILTIN(ArraySlice) { int final = (relative_end < 0) ? Max(len + relative_end, 0) : Min(relative_end, len); - ElementsKind elements_kind = JSObject::cast(receiver)->GetElementsKind(); - // Calculate the length of result array. int result_len = Max(final - k, 0); - MaybeObject* maybe_array = - heap->AllocateJSArrayAndStorage(elements_kind, - result_len, - result_len); + ElementsKind kind = object->GetElementsKind(); + if (IsHoleyElementsKind(kind)) { + bool packed = true; + ElementsAccessor* accessor = ElementsAccessor::ForKind(kind); + for (int i = k; i < final; i++) { + if (!accessor->HasElement(object, object, i, elms)) { + packed = false; + break; + } + } + if (packed) { + kind = GetPackedElementsKind(kind); + } else if (!receiver->IsJSArray()) { + return CallJsBuiltin(isolate, "ArraySlice", args); + } + } + JSArray* result_array; + MaybeObject* maybe_array = heap->AllocateJSArrayAndStorage(kind, + result_len, + result_len); + + AssertNoAllocation no_gc; + if (result_len == 0) return maybe_array; if (!maybe_array->To(&result_array)) return maybe_array; - CopyObjectToObjectElements(elms, elements_kind, k, - FixedArray::cast(result_array->elements()), - elements_kind, 0, result_len); + ElementsAccessor* accessor = object->GetElementsAccessor(); + MaybeObject* maybe_failure = accessor->CopyElements( + NULL, k, kind, result_array->elements(), 0, result_len, elms); + ASSERT(!maybe_failure->IsFailure()); + USE(maybe_failure); return result_array; } @@ -788,19 +906,19 @@ BUILTIN(ArraySlice) { BUILTIN(ArraySplice) { Heap* heap = isolate->heap(); Object* receiver = *args.receiver(); - Object* elms_obj; - { MaybeObject* maybe_elms_obj = - EnsureJSArrayWithWritableFastElements(heap, receiver, &args, 3); - if (maybe_elms_obj == NULL) - return CallJsBuiltin(isolate, "ArraySplice", args); - if (!maybe_elms_obj->ToObject(&elms_obj)) return maybe_elms_obj; + FixedArrayBase* elms_obj; + MaybeObject* maybe_elms = + EnsureJSArrayWithWritableFastElements(heap, receiver, &args, 3); + if (maybe_elms == NULL) { + return CallJsBuiltin(isolate, "ArraySplice", args); } + if (!maybe_elms->To(&elms_obj)) return maybe_elms; + if (!IsJSArrayFastElementMovingAllowed(heap, JSArray::cast(receiver))) { return CallJsBuiltin(isolate, "ArraySplice", args); } - FixedArray* elms = FixedArray::cast(elms_obj); JSArray* array = JSArray::cast(receiver); - ASSERT(array->HasFastSmiOrObjectElements()); + ASSERT(!array->map()->is_observed()); int len = Smi::cast(array->length())->value(); @@ -811,6 +929,12 @@ BUILTIN(ArraySplice) { Object* arg1 = args[1]; if (arg1->IsSmi()) { relative_start = Smi::cast(arg1)->value(); + } else if (arg1->IsHeapNumber()) { + double start = HeapNumber::cast(arg1)->value(); + if (start < kMinInt || start > kMaxInt) { + return CallJsBuiltin(isolate, "ArraySplice", args); + } + relative_start = static_cast<int>(start); } else if (!arg1->IsUndefined()) { return CallJsBuiltin(isolate, "ArraySplice", args); } @@ -840,51 +964,83 @@ BUILTIN(ArraySplice) { actual_delete_count = Min(Max(value, 0), len - actual_start); } + ElementsKind elements_kind = array->GetElementsKind(); + + int item_count = (n_arguments > 1) ? (n_arguments - 2) : 0; + int new_length = len - actual_delete_count + item_count; + + // For double mode we do not support changing the length. + if (new_length > len && IsFastDoubleElementsKind(elements_kind)) { + return CallJsBuiltin(isolate, "ArraySplice", args); + } + + if (new_length == 0) { + MaybeObject* maybe_array = heap->AllocateJSArrayWithElements( + elms_obj, elements_kind, actual_delete_count); + if (maybe_array->IsFailure()) return maybe_array; + array->set_elements(heap->empty_fixed_array()); + array->set_length(Smi::FromInt(0)); + return maybe_array; + } + JSArray* result_array = NULL; - ElementsKind elements_kind = - JSObject::cast(receiver)->GetElementsKind(); MaybeObject* maybe_array = heap->AllocateJSArrayAndStorage(elements_kind, actual_delete_count, actual_delete_count); if (!maybe_array->To(&result_array)) return maybe_array; - { - // Fill newly created array. - CopyObjectToObjectElements(elms, elements_kind, actual_start, - FixedArray::cast(result_array->elements()), - elements_kind, 0, actual_delete_count); + if (actual_delete_count > 0) { + AssertNoAllocation no_gc; + ElementsAccessor* accessor = array->GetElementsAccessor(); + MaybeObject* maybe_failure = accessor->CopyElements( + NULL, actual_start, elements_kind, result_array->elements(), + 0, actual_delete_count, elms_obj); + // Cannot fail since the origin and target array are of the same elements + // kind. + ASSERT(!maybe_failure->IsFailure()); + USE(maybe_failure); } - int item_count = (n_arguments > 1) ? (n_arguments - 2) : 0; - int new_length = len - actual_delete_count + item_count; - bool elms_changed = false; if (item_count < actual_delete_count) { // Shrink the array. - const bool trim_array = !heap->lo_space()->Contains(elms) && + const bool trim_array = !heap->lo_space()->Contains(elms_obj) && ((actual_start + item_count) < (len - actual_delete_count - actual_start)); if (trim_array) { const int delta = actual_delete_count - item_count; - { + if (elms_obj->IsFixedDoubleArray()) { + FixedDoubleArray* elms = FixedDoubleArray::cast(elms_obj); + MoveDoubleElements(elms, delta, elms, 0, actual_start); + } else { + FixedArray* elms = FixedArray::cast(elms_obj); AssertNoAllocation no_gc; - MoveElements(heap, &no_gc, elms, delta, elms, 0, actual_start); + heap->MoveElements(elms, delta, 0, actual_start); } - elms = LeftTrimFixedArray(heap, elms, delta); + elms_obj = LeftTrimFixedArray(heap, elms_obj, delta); elms_changed = true; } else { - AssertNoAllocation no_gc; - MoveElements(heap, &no_gc, - elms, actual_start + item_count, - elms, actual_start + actual_delete_count, - (len - actual_delete_count - actual_start)); - FillWithHoles(heap, elms, new_length, len); + if (elms_obj->IsFixedDoubleArray()) { + FixedDoubleArray* elms = FixedDoubleArray::cast(elms_obj); + MoveDoubleElements(elms, actual_start + item_count, + elms, actual_start + actual_delete_count, + (len - actual_delete_count - actual_start)); + FillWithHoles(elms, new_length, len); + } else { + FixedArray* elms = FixedArray::cast(elms_obj); + AssertNoAllocation no_gc; + heap->MoveElements(elms, actual_start + item_count, + actual_start + actual_delete_count, + (len - actual_delete_count - actual_start)); + FillWithHoles(heap, elms, new_length, len); + } } } else if (item_count > actual_delete_count) { + FixedArray* elms = FixedArray::cast(elms_obj); // Currently fixed arrays cannot grow too big, so // we should never hit this case. ASSERT((item_count - actual_delete_count) <= (Smi::kMaxValue - len)); @@ -893,48 +1049,60 @@ BUILTIN(ArraySplice) { if (new_length > elms->length()) { // New backing storage is needed. int capacity = new_length + (new_length >> 1) + 16; - Object* obj; - { MaybeObject* maybe_obj = - heap->AllocateUninitializedFixedArray(capacity); - if (!maybe_obj->ToObject(&obj)) return maybe_obj; - } - FixedArray* new_elms = FixedArray::cast(obj); + FixedArray* new_elms; + MaybeObject* maybe_obj = heap->AllocateUninitializedFixedArray(capacity); + if (!maybe_obj->To(&new_elms)) return maybe_obj; - { + AssertNoAllocation no_gc; + + ElementsKind kind = array->GetElementsKind(); + ElementsAccessor* accessor = array->GetElementsAccessor(); + if (actual_start > 0) { // Copy the part before actual_start as is. - ElementsKind kind = array->GetElementsKind(); - CopyObjectToObjectElements(elms, kind, 0, - new_elms, kind, 0, actual_start); - const int to_copy = len - actual_delete_count - actual_start; - CopyObjectToObjectElements(elms, kind, - actual_start + actual_delete_count, - new_elms, kind, - actual_start + item_count, to_copy); + MaybeObject* maybe_failure = accessor->CopyElements( + NULL, 0, kind, new_elms, 0, actual_start, elms); + ASSERT(!maybe_failure->IsFailure()); + USE(maybe_failure); } - - FillWithHoles(heap, new_elms, new_length, capacity); - - elms = new_elms; + MaybeObject* maybe_failure = accessor->CopyElements( + NULL, actual_start + actual_delete_count, kind, new_elms, + actual_start + item_count, + ElementsAccessor::kCopyToEndAndInitializeToHole, elms); + ASSERT(!maybe_failure->IsFailure()); + USE(maybe_failure); + + elms_obj = new_elms; elms_changed = true; } else { AssertNoAllocation no_gc; - MoveElements(heap, &no_gc, - elms, actual_start + item_count, - elms, actual_start + actual_delete_count, - (len - actual_delete_count - actual_start)); + heap->MoveElements(elms, actual_start + item_count, + actual_start + actual_delete_count, + (len - actual_delete_count - actual_start)); } } - AssertNoAllocation no_gc; - WriteBarrierMode mode = elms->GetWriteBarrierMode(no_gc); - for (int k = actual_start; k < actual_start + item_count; k++) { - elms->set(k, args[3 + k - actual_start], mode); + if (IsFastDoubleElementsKind(elements_kind)) { + FixedDoubleArray* elms = FixedDoubleArray::cast(elms_obj); + for (int k = actual_start; k < actual_start + item_count; k++) { + Object* arg = args[3 + k - actual_start]; + if (arg->IsSmi()) { + elms->set(k, Smi::cast(arg)->value()); + } else { + elms->set(k, HeapNumber::cast(arg)->value()); + } + } + } else { + FixedArray* elms = FixedArray::cast(elms_obj); + AssertNoAllocation no_gc; + WriteBarrierMode mode = elms->GetWriteBarrierMode(no_gc); + for (int k = actual_start; k < actual_start + item_count; k++) { + elms->set(k, args[3 + k - actual_start], mode); + } } if (elms_changed) { - array->set_elements(elms); + array->set_elements(elms_obj); } - // Set the length. array->set_length(Smi::FromInt(new_length)); @@ -956,14 +1124,15 @@ BUILTIN(ArrayConcat) { int n_arguments = args.length(); int result_len = 0; ElementsKind elements_kind = GetInitialFastElementsKind(); + bool has_double = false; + bool is_holey = false; for (int i = 0; i < n_arguments; i++) { Object* arg = args[i]; if (!arg->IsJSArray() || - !JSArray::cast(arg)->HasFastSmiOrObjectElements() || + !JSArray::cast(arg)->HasFastElements() || JSArray::cast(arg)->GetPrototype() != array_proto) { return CallJsBuiltin(isolate, "ArrayConcat", args); } - int len = Smi::cast(JSArray::cast(arg)->length())->value(); // We shouldn't overflow when adding another len. @@ -973,47 +1142,52 @@ BUILTIN(ArrayConcat) { result_len += len; ASSERT(result_len >= 0); - if (result_len > FixedArray::kMaxLength) { + if (result_len > FixedDoubleArray::kMaxLength) { return CallJsBuiltin(isolate, "ArrayConcat", args); } - if (!JSArray::cast(arg)->HasFastSmiElements()) { - if (IsFastSmiElementsKind(elements_kind)) { - if (IsFastHoleyElementsKind(elements_kind)) { - elements_kind = FAST_HOLEY_ELEMENTS; - } else { - elements_kind = FAST_ELEMENTS; - } - } - } - - if (JSArray::cast(arg)->HasFastHoleyElements()) { - elements_kind = GetHoleyElementsKind(elements_kind); + ElementsKind arg_kind = JSArray::cast(arg)->map()->elements_kind(); + has_double = has_double || IsFastDoubleElementsKind(arg_kind); + is_holey = is_holey || IsFastHoleyElementsKind(arg_kind); + if (IsMoreGeneralElementsKindTransition(elements_kind, arg_kind)) { + elements_kind = arg_kind; } } - // Allocate result. + if (is_holey) elements_kind = GetHoleyElementsKind(elements_kind); + + // If a double array is concatted into a fast elements array, the fast + // elements array needs to be initialized to contain proper holes, since + // boxing doubles may cause incremental marking. + ArrayStorageAllocationMode mode = + has_double && IsFastObjectElementsKind(elements_kind) + ? INITIALIZE_ARRAY_ELEMENTS_WITH_HOLE : DONT_INITIALIZE_ARRAY_ELEMENTS; JSArray* result_array; + // Allocate result. MaybeObject* maybe_array = heap->AllocateJSArrayAndStorage(elements_kind, result_len, - result_len); + result_len, + mode); if (!maybe_array->To(&result_array)) return maybe_array; if (result_len == 0) return result_array; - // Copy data. - int start_pos = 0; - FixedArray* result_elms(FixedArray::cast(result_array->elements())); + int j = 0; + FixedArrayBase* storage = result_array->elements(); + ElementsAccessor* accessor = ElementsAccessor::ForKind(elements_kind); for (int i = 0; i < n_arguments; i++) { JSArray* array = JSArray::cast(args[i]); int len = Smi::cast(array->length())->value(); - FixedArray* elms = FixedArray::cast(array->elements()); - CopyObjectToObjectElements(elms, elements_kind, 0, - result_elms, elements_kind, - start_pos, len); - start_pos += len; + ElementsKind from_kind = array->GetElementsKind(); + if (len > 0) { + MaybeObject* maybe_failure = + accessor->CopyElements(array, 0, from_kind, storage, j, len); + if (maybe_failure->IsFailure()) return maybe_failure; + j += len; + } } - ASSERT(start_pos == result_len); + + ASSERT(j == result_len); return result_array; } @@ -1024,7 +1198,7 @@ BUILTIN(ArrayConcat) { BUILTIN(StrictModePoisonPill) { - HandleScope scope; + HandleScope scope(isolate); return isolate->Throw(*isolate->factory()->NewTypeError( "strict_poison_pill", HandleVector<Object>(NULL, 0))); } @@ -1033,12 +1207,28 @@ BUILTIN(StrictModePoisonPill) { // +// Searches the hidden prototype chain of the given object for the first +// object that is an instance of the given type. If no such object can +// be found then Heap::null_value() is returned. +static inline Object* FindHidden(Heap* heap, + Object* object, + FunctionTemplateInfo* type) { + if (object->IsInstanceOf(type)) return object; + Object* proto = object->GetPrototype(heap->isolate()); + if (proto->IsJSObject() && + JSObject::cast(proto)->map()->is_hidden_prototype()) { + return FindHidden(heap, proto, type); + } + return heap->null_value(); +} + + // Returns the holder JSObject if the function can legally be called // with this receiver. Returns Heap::null_value() if the call is // illegal. Any arguments that don't fit the expected type is -// overwritten with undefined. Arguments that do fit the expected -// type is overwritten with the object in the prototype chain that -// actually has that type. +// overwritten with undefined. Note that holder and the arguments are +// implicitly rewritten with the first object in the hidden prototype +// chain that actually has the expected type. static inline Object* TypeCheck(Heap* heap, int argc, Object** argv, @@ -1051,15 +1241,10 @@ static inline Object* TypeCheck(Heap* heap, SignatureInfo* sig = SignatureInfo::cast(sig_obj); // If necessary, check the receiver Object* recv_type = sig->receiver(); - Object* holder = recv; if (!recv_type->IsUndefined()) { - for (; holder != heap->null_value(); holder = holder->GetPrototype()) { - if (holder->IsInstanceOf(FunctionTemplateInfo::cast(recv_type))) { - break; - } - } - if (holder == heap->null_value()) return holder; + holder = FindHidden(heap, holder, FunctionTemplateInfo::cast(recv_type)); + if (holder == heap->null_value()) return heap->null_value(); } Object* args_obj = sig->args(); // If there is no argument signature we're done @@ -1072,13 +1257,9 @@ static inline Object* TypeCheck(Heap* heap, if (argtype->IsUndefined()) continue; Object** arg = &argv[-1 - i]; Object* current = *arg; - for (; current != heap->null_value(); current = current->GetPrototype()) { - if (current->IsInstanceOf(FunctionTemplateInfo::cast(argtype))) { - *arg = current; - break; - } - } - if (current == heap->null_value()) *arg = heap->undefined_value(); + current = FindHidden(heap, current, FunctionTemplateInfo::cast(argtype)); + if (current == heap->null_value()) current = heap->undefined_value(); + *arg = current; } return holder; } @@ -1249,26 +1430,6 @@ BUILTIN(HandleApiCallAsConstructor) { } -static void Generate_LoadIC_ArrayLength(MacroAssembler* masm) { - LoadIC::GenerateArrayLength(masm); -} - - -static void Generate_LoadIC_StringLength(MacroAssembler* masm) { - LoadIC::GenerateStringLength(masm, false); -} - - -static void Generate_LoadIC_StringWrapperLength(MacroAssembler* masm) { - LoadIC::GenerateStringLength(masm, true); -} - - -static void Generate_LoadIC_FunctionPrototype(MacroAssembler* masm) { - LoadIC::GenerateFunctionPrototype(masm); -} - - static void Generate_LoadIC_Initialize(MacroAssembler* masm) { LoadIC::GenerateInitialize(masm); } @@ -1310,12 +1471,12 @@ static void Generate_KeyedLoadIC_Slow(MacroAssembler* masm) { static void Generate_KeyedLoadIC_Miss(MacroAssembler* masm) { - KeyedLoadIC::GenerateMiss(masm, false); + KeyedLoadIC::GenerateMiss(masm, MISS); } static void Generate_KeyedLoadIC_MissForceGeneric(MacroAssembler* masm) { - KeyedLoadIC::GenerateMiss(masm, true); + KeyedLoadIC::GenerateMiss(masm, MISS_FORCE_GENERIC); } @@ -1376,16 +1537,6 @@ static void Generate_StoreIC_Megamorphic_Strict(MacroAssembler* masm) { } -static void Generate_StoreIC_ArrayLength(MacroAssembler* masm) { - StoreIC::GenerateArrayLength(masm); -} - - -static void Generate_StoreIC_ArrayLength_Strict(MacroAssembler* masm) { - StoreIC::GenerateArrayLength(masm); -} - - static void Generate_StoreIC_GlobalProxy(MacroAssembler* masm) { StoreIC::GenerateGlobalProxy(masm, kNonStrictMode); } @@ -1412,12 +1563,12 @@ static void Generate_KeyedStoreIC_Generic_Strict(MacroAssembler* masm) { static void Generate_KeyedStoreIC_Miss(MacroAssembler* masm) { - KeyedStoreIC::GenerateMiss(masm, false); + KeyedStoreIC::GenerateMiss(masm, MISS); } static void Generate_KeyedStoreIC_MissForceGeneric(MacroAssembler* masm) { - KeyedStoreIC::GenerateMiss(masm, true); + KeyedStoreIC::GenerateMiss(masm, MISS_FORCE_GENERIC); } diff --git a/deps/v8/src/builtins.h b/deps/v8/src/builtins.h index ca70ae5403..12ed56af79 100644 --- a/deps/v8/src/builtins.h +++ b/deps/v8/src/builtins.h @@ -38,6 +38,25 @@ enum BuiltinExtraArguments { }; +#define CODE_AGE_LIST_WITH_ARG(V, A) \ + V(Quadragenarian, A) \ + V(Quinquagenarian, A) \ + V(Sexagenarian, A) \ + V(Septuagenarian, A) \ + V(Octogenarian, A) + +#define CODE_AGE_LIST_IGNORE_ARG(X, V) V(X) + +#define CODE_AGE_LIST(V) \ + CODE_AGE_LIST_WITH_ARG(CODE_AGE_LIST_IGNORE_ARG, V) + +#define DECLARE_CODE_AGE_BUILTIN(C, V) \ + V(Make##C##CodeYoungAgainOddMarking, BUILTIN, \ + UNINITIALIZED, Code::kNoExtraICState) \ + V(Make##C##CodeYoungAgainEvenMarking, BUILTIN, \ + UNINITIALIZED, Code::kNoExtraICState) + + // Define list of builtins implemented in C++. #define BUILTIN_LIST_C(V) \ V(Illegal, NO_EXTRA_ARGUMENTS) \ @@ -68,6 +87,8 @@ enum BuiltinExtraArguments { Code::kNoExtraICState) \ V(InRecompileQueue, BUILTIN, UNINITIALIZED, \ Code::kNoExtraICState) \ + V(InstallRecompiledCode, BUILTIN, UNINITIALIZED, \ + Code::kNoExtraICState) \ V(JSConstructStubCountdown, BUILTIN, UNINITIALIZED, \ Code::kNoExtraICState) \ V(JSConstructStubGeneric, BUILTIN, UNINITIALIZED, \ @@ -88,6 +109,8 @@ enum BuiltinExtraArguments { Code::kNoExtraICState) \ V(NotifyLazyDeoptimized, BUILTIN, UNINITIALIZED, \ Code::kNoExtraICState) \ + V(NotifyStubFailure, BUILTIN, UNINITIALIZED, \ + Code::kNoExtraICState) \ V(NotifyOSR, BUILTIN, UNINITIALIZED, \ Code::kNoExtraICState) \ \ @@ -113,14 +136,6 @@ enum BuiltinExtraArguments { Code::kNoExtraICState) \ V(LoadIC_Normal, LOAD_IC, MONOMORPHIC, \ Code::kNoExtraICState) \ - V(LoadIC_ArrayLength, LOAD_IC, MONOMORPHIC, \ - Code::kNoExtraICState) \ - V(LoadIC_StringLength, LOAD_IC, MONOMORPHIC, \ - Code::kNoExtraICState) \ - V(LoadIC_StringWrapperLength, LOAD_IC, MONOMORPHIC, \ - Code::kNoExtraICState) \ - V(LoadIC_FunctionPrototype, LOAD_IC, MONOMORPHIC, \ - Code::kNoExtraICState) \ V(LoadIC_Megamorphic, LOAD_IC, MEGAMORPHIC, \ Code::kNoExtraICState) \ V(LoadIC_Getter_ForDeopt, LOAD_IC, MONOMORPHIC, \ @@ -130,48 +145,44 @@ enum BuiltinExtraArguments { Code::kNoExtraICState) \ V(KeyedLoadIC_PreMonomorphic, KEYED_LOAD_IC, PREMONOMORPHIC, \ Code::kNoExtraICState) \ - V(KeyedLoadIC_Generic, KEYED_LOAD_IC, MEGAMORPHIC, \ + V(KeyedLoadIC_Generic, KEYED_LOAD_IC, GENERIC, \ Code::kNoExtraICState) \ V(KeyedLoadIC_String, KEYED_LOAD_IC, MEGAMORPHIC, \ Code::kNoExtraICState) \ - V(KeyedLoadIC_IndexedInterceptor, KEYED_LOAD_IC, MEGAMORPHIC, \ + V(KeyedLoadIC_IndexedInterceptor, KEYED_LOAD_IC, MONOMORPHIC, \ Code::kNoExtraICState) \ - V(KeyedLoadIC_NonStrictArguments, KEYED_LOAD_IC, MEGAMORPHIC, \ + V(KeyedLoadIC_NonStrictArguments, KEYED_LOAD_IC, MONOMORPHIC, \ Code::kNoExtraICState) \ \ V(StoreIC_Initialize, STORE_IC, UNINITIALIZED, \ Code::kNoExtraICState) \ - V(StoreIC_ArrayLength, STORE_IC, MONOMORPHIC, \ - Code::kNoExtraICState) \ V(StoreIC_Normal, STORE_IC, MONOMORPHIC, \ Code::kNoExtraICState) \ V(StoreIC_Megamorphic, STORE_IC, MEGAMORPHIC, \ Code::kNoExtraICState) \ - V(StoreIC_GlobalProxy, STORE_IC, MEGAMORPHIC, \ + V(StoreIC_GlobalProxy, STORE_IC, GENERIC, \ Code::kNoExtraICState) \ V(StoreIC_Initialize_Strict, STORE_IC, UNINITIALIZED, \ kStrictMode) \ - V(StoreIC_ArrayLength_Strict, STORE_IC, MONOMORPHIC, \ - kStrictMode) \ V(StoreIC_Normal_Strict, STORE_IC, MONOMORPHIC, \ kStrictMode) \ V(StoreIC_Megamorphic_Strict, STORE_IC, MEGAMORPHIC, \ kStrictMode) \ - V(StoreIC_GlobalProxy_Strict, STORE_IC, MEGAMORPHIC, \ + V(StoreIC_GlobalProxy_Strict, STORE_IC, GENERIC, \ kStrictMode) \ V(StoreIC_Setter_ForDeopt, STORE_IC, MONOMORPHIC, \ kStrictMode) \ \ V(KeyedStoreIC_Initialize, KEYED_STORE_IC, UNINITIALIZED, \ Code::kNoExtraICState) \ - V(KeyedStoreIC_Generic, KEYED_STORE_IC, MEGAMORPHIC, \ + V(KeyedStoreIC_Generic, KEYED_STORE_IC, GENERIC, \ Code::kNoExtraICState) \ \ V(KeyedStoreIC_Initialize_Strict, KEYED_STORE_IC, UNINITIALIZED, \ kStrictMode) \ - V(KeyedStoreIC_Generic_Strict, KEYED_STORE_IC, MEGAMORPHIC, \ + V(KeyedStoreIC_Generic_Strict, KEYED_STORE_IC, GENERIC, \ kStrictMode) \ - V(KeyedStoreIC_NonStrictArguments, KEYED_STORE_IC, MEGAMORPHIC, \ + V(KeyedStoreIC_NonStrictArguments, KEYED_STORE_IC, MONOMORPHIC, \ Code::kNoExtraICState) \ V(TransitionElementsSmiToDouble, BUILTIN, UNINITIALIZED, \ Code::kNoExtraICState) \ @@ -195,36 +206,36 @@ enum BuiltinExtraArguments { Code::kNoExtraICState) \ \ V(OnStackReplacement, BUILTIN, UNINITIALIZED, \ - Code::kNoExtraICState) - + Code::kNoExtraICState) \ + CODE_AGE_LIST_WITH_ARG(DECLARE_CODE_AGE_BUILTIN, V) #ifdef ENABLE_DEBUGGER_SUPPORT // Define list of builtins used by the debugger implemented in assembly. -#define BUILTIN_LIST_DEBUG_A(V) \ - V(Return_DebugBreak, BUILTIN, DEBUG_BREAK, \ - Code::kNoExtraICState) \ - V(CallFunctionStub_DebugBreak, BUILTIN, DEBUG_BREAK, \ - Code::kNoExtraICState) \ - V(CallFunctionStub_Recording_DebugBreak, BUILTIN, DEBUG_BREAK, \ - Code::kNoExtraICState) \ - V(CallConstructStub_DebugBreak, BUILTIN, DEBUG_BREAK, \ - Code::kNoExtraICState) \ - V(CallConstructStub_Recording_DebugBreak, BUILTIN, DEBUG_BREAK, \ - Code::kNoExtraICState) \ - V(LoadIC_DebugBreak, LOAD_IC, DEBUG_BREAK, \ - Code::kNoExtraICState) \ - V(KeyedLoadIC_DebugBreak, KEYED_LOAD_IC, DEBUG_BREAK, \ - Code::kNoExtraICState) \ - V(StoreIC_DebugBreak, STORE_IC, DEBUG_BREAK, \ - Code::kNoExtraICState) \ - V(KeyedStoreIC_DebugBreak, KEYED_STORE_IC, DEBUG_BREAK, \ - Code::kNoExtraICState) \ - V(Slot_DebugBreak, BUILTIN, DEBUG_BREAK, \ - Code::kNoExtraICState) \ - V(PlainReturn_LiveEdit, BUILTIN, DEBUG_BREAK, \ - Code::kNoExtraICState) \ - V(FrameDropper_LiveEdit, BUILTIN, DEBUG_BREAK, \ - Code::kNoExtraICState) +#define BUILTIN_LIST_DEBUG_A(V) \ + V(Return_DebugBreak, BUILTIN, DEBUG_STUB, \ + DEBUG_BREAK) \ + V(CallFunctionStub_DebugBreak, BUILTIN, DEBUG_STUB, \ + DEBUG_BREAK) \ + V(CallFunctionStub_Recording_DebugBreak, BUILTIN, DEBUG_STUB, \ + DEBUG_BREAK) \ + V(CallConstructStub_DebugBreak, BUILTIN, DEBUG_STUB, \ + DEBUG_BREAK) \ + V(CallConstructStub_Recording_DebugBreak, BUILTIN, DEBUG_STUB, \ + DEBUG_BREAK) \ + V(LoadIC_DebugBreak, LOAD_IC, DEBUG_STUB, \ + DEBUG_BREAK) \ + V(KeyedLoadIC_DebugBreak, KEYED_LOAD_IC, DEBUG_STUB, \ + DEBUG_BREAK) \ + V(StoreIC_DebugBreak, STORE_IC, DEBUG_STUB, \ + DEBUG_BREAK) \ + V(KeyedStoreIC_DebugBreak, KEYED_STORE_IC, DEBUG_STUB, \ + DEBUG_BREAK) \ + V(Slot_DebugBreak, BUILTIN, DEBUG_STUB, \ + DEBUG_BREAK) \ + V(PlainReturn_LiveEdit, BUILTIN, DEBUG_STUB, \ + DEBUG_BREAK) \ + V(FrameDropper_LiveEdit, BUILTIN, DEBUG_STUB, \ + DEBUG_BREAK) #else #define BUILTIN_LIST_DEBUG_A(V) #endif @@ -263,6 +274,7 @@ enum BuiltinExtraArguments { V(APPLY_PREPARE, 1) \ V(APPLY_OVERFLOW, 1) +MaybeObject* ArrayConstructor_StubFailure(Arguments args, Isolate* isolate); class BuiltinFunctionTable; class ObjectVisitor; @@ -356,6 +368,7 @@ class Builtins { CFunctionId id, BuiltinExtraArguments extra_args); static void Generate_InRecompileQueue(MacroAssembler* masm); + static void Generate_InstallRecompiledCode(MacroAssembler* masm); static void Generate_ParallelRecompile(MacroAssembler* masm); static void Generate_JSConstructStubCountdown(MacroAssembler* masm); static void Generate_JSConstructStubGeneric(MacroAssembler* masm); @@ -367,6 +380,7 @@ class Builtins { static void Generate_NotifyDeoptimized(MacroAssembler* masm); static void Generate_NotifyLazyDeoptimized(MacroAssembler* masm); static void Generate_NotifyOSR(MacroAssembler* masm); + static void Generate_NotifyStubFailure(MacroAssembler* masm); static void Generate_ArgumentsAdaptorTrampoline(MacroAssembler* masm); static void Generate_FunctionCall(MacroAssembler* masm); @@ -379,6 +393,14 @@ class Builtins { static void Generate_StringConstructCode(MacroAssembler* masm); static void Generate_OnStackReplacement(MacroAssembler* masm); +#define DECLARE_CODE_AGE_BUILTIN_GENERATOR(C) \ + static void Generate_Make##C##CodeYoungAgainEvenMarking( \ + MacroAssembler* masm); \ + static void Generate_Make##C##CodeYoungAgainOddMarking( \ + MacroAssembler* masm); + CODE_AGE_LIST(DECLARE_CODE_AGE_BUILTIN_GENERATOR) +#undef DECLARE_CODE_AGE_BUILTIN_GENERATOR + static void InitBuiltinFunctionTable(); bool initialized_; diff --git a/deps/v8/src/checks.cc b/deps/v8/src/checks.cc index 320fd6b5ea..a6405ecdec 100644 --- a/deps/v8/src/checks.cc +++ b/deps/v8/src/checks.cc @@ -46,7 +46,8 @@ extern "C" void V8_Fatal(const char* file, int line, const char* format, ...) { va_start(arguments, format); i::OS::VPrintError(format, arguments); va_end(arguments); - i::OS::PrintError("\n#\n\n"); + i::OS::PrintError("\n#\n"); + i::OS::DumpBacktrace(); } // First two times we may try to print a stack dump. if (fatal_error_handler_nesting_depth < 3) { diff --git a/deps/v8/src/code-stubs-hydrogen.cc b/deps/v8/src/code-stubs-hydrogen.cc new file mode 100644 index 0000000000..ae198bc249 --- /dev/null +++ b/deps/v8/src/code-stubs-hydrogen.cc @@ -0,0 +1,389 @@ +// Copyright 2012 the V8 project authors. All rights reserved. +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following +// disclaimer in the documentation and/or other materials provided +// with the distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived +// from this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +#include "v8.h" + +#include "code-stubs.h" +#include "hydrogen.h" +#include "lithium.h" + +namespace v8 { +namespace internal { + + +static LChunk* OptimizeGraph(HGraph* graph) { + Isolate* isolate = graph->isolate(); + AssertNoAllocation no_gc; + NoHandleAllocation no_handles(isolate); + HandleDereferenceGuard no_deref(isolate, HandleDereferenceGuard::DISALLOW); + + ASSERT(graph != NULL); + SmartArrayPointer<char> bailout_reason; + if (!graph->Optimize(&bailout_reason)) { + FATAL(bailout_reason.is_empty() ? "unknown" : *bailout_reason); + } + LChunk* chunk = LChunk::NewChunk(graph); + if (chunk == NULL) { + FATAL(graph->info()->bailout_reason()); + } + return chunk; +} + + +class CodeStubGraphBuilderBase : public HGraphBuilder { + public: + CodeStubGraphBuilderBase(Isolate* isolate, HydrogenCodeStub* stub) + : HGraphBuilder(&info_), + arguments_length_(NULL), + info_(stub, isolate), + context_(NULL) { + int major_key = stub->MajorKey(); + descriptor_ = isolate->code_stub_interface_descriptor(major_key); + if (descriptor_->register_param_count_ < 0) { + stub->InitializeInterfaceDescriptor(isolate, descriptor_); + } + parameters_.Reset(new HParameter*[descriptor_->register_param_count_]); + } + virtual bool BuildGraph(); + + protected: + virtual HValue* BuildCodeStub() = 0; + HParameter* GetParameter(int parameter) { + ASSERT(parameter < descriptor_->register_param_count_); + return parameters_[parameter]; + } + HValue* GetArgumentsLength() { + // This is initialized in BuildGraph() + ASSERT(arguments_length_ != NULL); + return arguments_length_; + } + CompilationInfo* info() { return &info_; } + HydrogenCodeStub* stub() { return info_.code_stub(); } + HContext* context() { return context_; } + Isolate* isolate() { return info_.isolate(); } + + private: + SmartArrayPointer<HParameter*> parameters_; + HValue* arguments_length_; + CompilationInfoWithZone info_; + CodeStubInterfaceDescriptor* descriptor_; + HContext* context_; +}; + + +bool CodeStubGraphBuilderBase::BuildGraph() { + if (FLAG_trace_hydrogen) { + const char* name = CodeStub::MajorName(stub()->MajorKey(), false); + PrintF("-----------------------------------------------------------\n"); + PrintF("Compiling stub %s using hydrogen\n", name); + isolate()->GetHTracer()->TraceCompilation(&info_); + } + + Zone* zone = this->zone(); + int param_count = descriptor_->register_param_count_; + HEnvironment* start_environment = graph()->start_environment(); + HBasicBlock* next_block = CreateBasicBlock(start_environment); + current_block()->Goto(next_block); + next_block->SetJoinId(BailoutId::StubEntry()); + set_current_block(next_block); + + HConstant* undefined_constant = new(zone) HConstant( + isolate()->factory()->undefined_value(), Representation::Tagged()); + AddInstruction(undefined_constant); + graph()->set_undefined_constant(undefined_constant); + + for (int i = 0; i < param_count; ++i) { + HParameter* param = + new(zone) HParameter(i, HParameter::REGISTER_PARAMETER); + AddInstruction(param); + start_environment->Bind(i, param); + parameters_[i] = param; + } + + HInstruction* stack_parameter_count; + if (descriptor_->stack_parameter_count_ != NULL) { + ASSERT(descriptor_->environment_length() == (param_count + 1)); + stack_parameter_count = new(zone) HParameter(param_count, + HParameter::REGISTER_PARAMETER); + // it's essential to bind this value to the environment in case of deopt + start_environment->Bind(param_count, stack_parameter_count); + AddInstruction(stack_parameter_count); + arguments_length_ = stack_parameter_count; + } else { + ASSERT(descriptor_->environment_length() == param_count); + stack_parameter_count = graph()->GetConstantMinus1(); + arguments_length_ = graph()->GetConstant0(); + } + + context_ = new(zone) HContext(); + AddInstruction(context_); + start_environment->BindContext(context_); + + AddSimulate(BailoutId::StubEntry()); + + HValue* return_value = BuildCodeStub(); + HReturn* hreturn_instruction = new(zone) HReturn(return_value, + context_, + stack_parameter_count); + current_block()->Finish(hreturn_instruction); + return true; +} + +template <class Stub> +class CodeStubGraphBuilder: public CodeStubGraphBuilderBase { + public: + explicit CodeStubGraphBuilder(Stub* stub) + : CodeStubGraphBuilderBase(Isolate::Current(), stub) {} + + protected: + virtual HValue* BuildCodeStub(); + Stub* casted_stub() { return static_cast<Stub*>(stub()); } +}; + + +template <> +HValue* CodeStubGraphBuilder<FastCloneShallowObjectStub>::BuildCodeStub() { + Zone* zone = this->zone(); + Factory* factory = isolate()->factory(); + + HInstruction* boilerplate = + AddInstruction(new(zone) HLoadKeyed(GetParameter(0), + GetParameter(1), + NULL, + FAST_ELEMENTS)); + + CheckBuilder builder(this, BailoutId::StubEntry()); + builder.CheckNotUndefined(boilerplate); + + int size = JSObject::kHeaderSize + casted_stub()->length() * kPointerSize; + HValue* boilerplate_size = + AddInstruction(new(zone) HInstanceSize(boilerplate)); + HValue* size_in_words = + AddInstruction(new(zone) HConstant(size >> kPointerSizeLog2, + Representation::Integer32())); + builder.CheckIntegerEq(boilerplate_size, size_in_words); + + HValue* size_in_bytes = + AddInstruction(new(zone) HConstant(size, Representation::Integer32())); + HAllocate::Flags flags = HAllocate::CAN_ALLOCATE_IN_NEW_SPACE; + if (FLAG_pretenure_literals) { + flags = static_cast<HAllocate::Flags>( + flags | HAllocate::CAN_ALLOCATE_IN_OLD_POINTER_SPACE); + } + HInstruction* object = + AddInstruction(new(zone) HAllocate(context(), + size_in_bytes, + HType::JSObject(), + flags)); + + for (int i = 0; i < size; i += kPointerSize) { + HInstruction* value = + AddInstruction(new(zone) HLoadNamedField(boilerplate, true, i)); + AddInstruction(new(zone) HStoreNamedField(object, + factory->empty_string(), + value, + true, i)); + AddSimulate(BailoutId::StubEntry()); + } + + builder.End(); + return object; +} + + +Handle<Code> FastCloneShallowObjectStub::GenerateCode() { + CodeStubGraphBuilder<FastCloneShallowObjectStub> builder(this); + LChunk* chunk = OptimizeGraph(builder.CreateGraph()); + return chunk->Codegen(Code::COMPILED_STUB); +} + + +template <> +HValue* CodeStubGraphBuilder<KeyedLoadFastElementStub>::BuildCodeStub() { + HInstruction* load = BuildUncheckedMonomorphicElementAccess( + GetParameter(0), GetParameter(1), NULL, NULL, + casted_stub()->is_js_array(), casted_stub()->elements_kind(), + false, Representation::Tagged()); + AddInstruction(load); + return load; +} + + +Handle<Code> KeyedLoadFastElementStub::GenerateCode() { + CodeStubGraphBuilder<KeyedLoadFastElementStub> builder(this); + LChunk* chunk = OptimizeGraph(builder.CreateGraph()); + return chunk->Codegen(Code::COMPILED_STUB); +} + + +template <> +HValue* CodeStubGraphBuilder<TransitionElementsKindStub>::BuildCodeStub() { + Zone* zone = this->zone(); + + HValue* js_array = GetParameter(0); + HValue* map = GetParameter(1); + + info()->MarkAsSavesCallerDoubles(); + + AddInstruction(new(zone) HTrapAllocationMemento(js_array)); + + HInstruction* array_length = + AddInstruction(new(zone) HJSArrayLength(js_array, + js_array, + HType::Smi())); + + Heap* heap = isolate()->heap(); + const int kMinFreeNewSpaceAfterGC = + ((heap->InitialSemiSpaceSize() - sizeof(FixedArrayBase)) / 2) / + kDoubleSize; + + HConstant* max_alloc_size = + new(zone) HConstant(kMinFreeNewSpaceAfterGC, Representation::Integer32()); + AddInstruction(max_alloc_size); + // Since we're forcing Integer32 representation for this HBoundsCheck, + // there's no need to Smi-check the index. + AddInstruction( + new(zone) HBoundsCheck(array_length, max_alloc_size, + DONT_ALLOW_SMI_KEY, Representation::Integer32())); + + IfBuilder if_builder(this, BailoutId::StubEntry()); + + if_builder.BeginTrue(array_length, graph()->GetConstant0(), Token::EQ); + + // Nothing to do, just change the map. + + if_builder.BeginFalse(); + + HInstruction* elements = + AddInstruction(new(zone) HLoadElements(js_array, js_array)); + + HInstruction* elements_length = + AddInstruction(new(zone) HFixedArrayBaseLength(elements)); + + ElementsKind to_kind = casted_stub()->to_kind(); + HValue* new_elements = + BuildAllocateElements(context(), to_kind, elements_length); + + // Fast elements kinds need to be initialized in case statements below cause a + // garbage collection. + Factory* factory = isolate()->factory(); + + ASSERT(!IsFastSmiElementsKind(to_kind)); + double nan_double = FixedDoubleArray::hole_nan_as_double(); + HValue* hole = IsFastObjectElementsKind(to_kind) + ? AddInstruction(new(zone) HConstant(factory->the_hole_value(), + Representation::Tagged())) + : AddInstruction(new(zone) HConstant(nan_double, + Representation::Double())); + + LoopBuilder builder(this, context(), LoopBuilder::kPostIncrement, + BailoutId::StubEntry()); + + HValue* zero = graph()->GetConstant0(); + HValue* start = IsFastElementsKind(to_kind) ? zero : array_length; + HValue* key = builder.BeginBody(start, elements_length, Token::LT); + + AddInstruction(new(zone) HStoreKeyed(new_elements, key, hole, to_kind)); + AddSimulate(BailoutId::StubEntry(), REMOVABLE_SIMULATE); + + builder.EndBody(); + + BuildCopyElements(context(), elements, + casted_stub()->from_kind(), new_elements, + to_kind, array_length); + + AddInstruction(new(zone) HStoreNamedField(js_array, + factory->elements_field_string(), + new_elements, true, + JSArray::kElementsOffset)); + AddSimulate(BailoutId::StubEntry()); + + if_builder.End(); + + AddInstruction(new(zone) HStoreNamedField(js_array, factory->length_string(), + map, true, JSArray::kMapOffset)); + AddSimulate(BailoutId::StubEntry()); + return js_array; +} + + +template <> +HValue* CodeStubGraphBuilder<ArrayNoArgumentConstructorStub>::BuildCodeStub() { + HInstruction* deopt = new(zone()) HSoftDeoptimize(); + AddInstruction(deopt); + current_block()->MarkAsDeoptimizing(); + return GetParameter(0); +} + + +Handle<Code> ArrayNoArgumentConstructorStub::GenerateCode() { + CodeStubGraphBuilder<ArrayNoArgumentConstructorStub> builder(this); + LChunk* chunk = OptimizeGraph(builder.CreateGraph()); + return chunk->Codegen(Code::COMPILED_STUB); +} + + +template <> +HValue* CodeStubGraphBuilder<ArraySingleArgumentConstructorStub>:: + BuildCodeStub() { + HInstruction* deopt = new(zone()) HSoftDeoptimize(); + AddInstruction(deopt); + current_block()->MarkAsDeoptimizing(); + return GetParameter(0); +} + + +Handle<Code> TransitionElementsKindStub::GenerateCode() { + CodeStubGraphBuilder<TransitionElementsKindStub> builder(this); + LChunk* chunk = OptimizeGraph(builder.CreateGraph()); + return chunk->Codegen(Code::COMPILED_STUB); +} + + +Handle<Code> ArraySingleArgumentConstructorStub::GenerateCode() { + CodeStubGraphBuilder<ArraySingleArgumentConstructorStub> builder(this); + LChunk* chunk = OptimizeGraph(builder.CreateGraph()); + return chunk->Codegen(Code::COMPILED_STUB); +} + + +template <> +HValue* CodeStubGraphBuilder<ArrayNArgumentsConstructorStub>::BuildCodeStub() { + HInstruction* deopt = new(zone()) HSoftDeoptimize(); + AddInstruction(deopt); + current_block()->MarkAsDeoptimizing(); + return GetParameter(0); +} + + +Handle<Code> ArrayNArgumentsConstructorStub::GenerateCode() { + CodeStubGraphBuilder<ArrayNArgumentsConstructorStub> builder(this); + LChunk* chunk = OptimizeGraph(builder.CreateGraph()); + return chunk->Codegen(Code::COMPILED_STUB); +} + +} } // namespace v8::internal diff --git a/deps/v8/src/code-stubs.cc b/deps/v8/src/code-stubs.cc index 7a720592db..95bc1e99cc 100644 --- a/deps/v8/src/code-stubs.cc +++ b/deps/v8/src/code-stubs.cc @@ -37,31 +37,17 @@ namespace v8 { namespace internal { -bool CodeStub::FindCodeInCache(Code** code_out) { - Heap* heap = Isolate::Current()->heap(); - int index = heap->code_stubs()->FindEntry(GetKey()); +bool CodeStub::FindCodeInCache(Code** code_out, Isolate* isolate) { + UnseededNumberDictionary* stubs = isolate->heap()->code_stubs(); + int index = stubs->FindEntry(GetKey()); if (index != UnseededNumberDictionary::kNotFound) { - *code_out = Code::cast(heap->code_stubs()->ValueAt(index)); + *code_out = Code::cast(stubs->ValueAt(index)); return true; } return false; } -void CodeStub::GenerateCode(MacroAssembler* masm) { - // Update the static counter each time a new code stub is generated. - masm->isolate()->counters()->code_stubs()->Increment(); - - // Nested stubs are not allowed for leaves. - AllowStubCallsScope allow_scope(masm, false); - - // Generate the code for the stub. - masm->set_generating_stub(true); - NoCurrentFrameScope scope(masm); - Generate(masm); -} - - SmartArrayPointer<const char> CodeStub::GetName() { char buffer[100]; NoAllocationStringAllocator allocator(buffer, @@ -72,8 +58,7 @@ SmartArrayPointer<const char> CodeStub::GetName() { } -void CodeStub::RecordCodeGeneration(Code* code, MacroAssembler* masm) { - Isolate* isolate = masm->isolate(); +void CodeStub::RecordCodeGeneration(Code* code, Isolate* isolate) { SmartArrayPointer<const char> name = GetName(); PROFILE(isolate, CodeCreateEvent(Logger::STUB_TAG, code, *name)); GDBJIT(AddCode(GDBJITInterface::STUB, *name, code)); @@ -87,14 +72,50 @@ int CodeStub::GetCodeKind() { } -Handle<Code> CodeStub::GetCode() { +Handle<Code> PlatformCodeStub::GenerateCode() { Isolate* isolate = Isolate::Current(); Factory* factory = isolate->factory(); + + // Generate the new code. + MacroAssembler masm(isolate, NULL, 256); + + { + // Update the static counter each time a new code stub is generated. + isolate->counters()->code_stubs()->Increment(); + + // Nested stubs are not allowed for leaves. + AllowStubCallsScope allow_scope(&masm, false); + + // Generate the code for the stub. + masm.set_generating_stub(true); + NoCurrentFrameScope scope(&masm); + Generate(&masm); + } + + // Create the code object. + CodeDesc desc; + masm.GetCode(&desc); + + // Copy the generated code into a heap object. + Code::Flags flags = Code::ComputeFlags( + static_cast<Code::Kind>(GetCodeKind()), + GetICState(), + GetExtraICState(), + GetStubType(), + GetStubFlags()); + Handle<Code> new_object = factory->NewCode( + desc, flags, masm.CodeObject(), NeedsImmovableCode()); + return new_object; +} + + +Handle<Code> CodeStub::GetCode(Isolate* isolate) { + Factory* factory = isolate->factory(); Heap* heap = isolate->heap(); Code* code; if (UseSpecialCache() - ? FindCodeInSpecialCache(&code) - : FindCodeInCache(&code)) { + ? FindCodeInSpecialCache(&code, isolate) + : FindCodeInCache(&code, isolate)) { ASSERT(IsPregenerated() == code->is_pregenerated()); return Handle<Code>(code); } @@ -102,23 +123,10 @@ Handle<Code> CodeStub::GetCode() { { HandleScope scope(isolate); - // Generate the new code. - MacroAssembler masm(isolate, NULL, 256); - GenerateCode(&masm); - - // Create the code object. - CodeDesc desc; - masm.GetCode(&desc); - - // Copy the generated code into a heap object. - Code::Flags flags = Code::ComputeFlags( - static_cast<Code::Kind>(GetCodeKind()), - GetICState()); - Handle<Code> new_object = factory->NewCode( - desc, flags, masm.CodeObject(), NeedsImmovableCode()); + Handle<Code> new_object = GenerateCode(); new_object->set_major_key(MajorKey()); FinishCode(new_object); - RecordCodeGeneration(*new_object, &masm); + RecordCodeGeneration(*new_object, isolate); #ifdef ENABLE_DISASSEMBLER if (FLAG_print_code_stubs) { @@ -169,20 +177,135 @@ void CodeStub::PrintName(StringStream* stream) { } +void BinaryOpStub::Generate(MacroAssembler* masm) { + // Explicitly allow generation of nested stubs. It is safe here because + // generation code does not use any raw pointers. + AllowStubCallsScope allow_stub_calls(masm, true); + + BinaryOpIC::TypeInfo operands_type = Max(left_type_, right_type_); + if (left_type_ == BinaryOpIC::ODDBALL && right_type_ == BinaryOpIC::ODDBALL) { + // The OddballStub handles a number and an oddball, not two oddballs. + operands_type = BinaryOpIC::GENERIC; + } + switch (operands_type) { + case BinaryOpIC::UNINITIALIZED: + GenerateTypeTransition(masm); + break; + case BinaryOpIC::SMI: + GenerateSmiStub(masm); + break; + case BinaryOpIC::INT32: + GenerateInt32Stub(masm); + break; + case BinaryOpIC::NUMBER: + GenerateNumberStub(masm); + break; + case BinaryOpIC::ODDBALL: + GenerateOddballStub(masm); + break; + case BinaryOpIC::STRING: + GenerateStringStub(masm); + break; + case BinaryOpIC::GENERIC: + GenerateGeneric(masm); + break; + default: + UNREACHABLE(); + } +} + + +#define __ ACCESS_MASM(masm) + + +void BinaryOpStub::GenerateCallRuntime(MacroAssembler* masm) { + switch (op_) { + case Token::ADD: + __ InvokeBuiltin(Builtins::ADD, JUMP_FUNCTION); + break; + case Token::SUB: + __ InvokeBuiltin(Builtins::SUB, JUMP_FUNCTION); + break; + case Token::MUL: + __ InvokeBuiltin(Builtins::MUL, JUMP_FUNCTION); + break; + case Token::DIV: + __ InvokeBuiltin(Builtins::DIV, JUMP_FUNCTION); + break; + case Token::MOD: + __ InvokeBuiltin(Builtins::MOD, JUMP_FUNCTION); + break; + case Token::BIT_OR: + __ InvokeBuiltin(Builtins::BIT_OR, JUMP_FUNCTION); + break; + case Token::BIT_AND: + __ InvokeBuiltin(Builtins::BIT_AND, JUMP_FUNCTION); + break; + case Token::BIT_XOR: + __ InvokeBuiltin(Builtins::BIT_XOR, JUMP_FUNCTION); + break; + case Token::SAR: + __ InvokeBuiltin(Builtins::SAR, JUMP_FUNCTION); + break; + case Token::SHR: + __ InvokeBuiltin(Builtins::SHR, JUMP_FUNCTION); + break; + case Token::SHL: + __ InvokeBuiltin(Builtins::SHL, JUMP_FUNCTION); + break; + default: + UNREACHABLE(); + } +} + + +#undef __ + + +void BinaryOpStub::PrintName(StringStream* stream) { + const char* op_name = Token::Name(op_); + const char* overwrite_name; + switch (mode_) { + case NO_OVERWRITE: overwrite_name = "Alloc"; break; + case OVERWRITE_RIGHT: overwrite_name = "OverwriteRight"; break; + case OVERWRITE_LEFT: overwrite_name = "OverwriteLeft"; break; + default: overwrite_name = "UnknownOverwrite"; break; + } + stream->Add("BinaryOpStub_%s_%s_%s+%s", + op_name, + overwrite_name, + BinaryOpIC::GetName(left_type_), + BinaryOpIC::GetName(right_type_)); +} + + +void BinaryOpStub::GenerateStringStub(MacroAssembler* masm) { + ASSERT(left_type_ == BinaryOpIC::STRING || right_type_ == BinaryOpIC::STRING); + ASSERT(op_ == Token::ADD); + if (left_type_ == BinaryOpIC::STRING && right_type_ == BinaryOpIC::STRING) { + GenerateBothStringStub(masm); + return; + } + // Try to add arguments as strings, otherwise, transition to the generic + // BinaryOpIC type. + GenerateAddStrings(masm); + GenerateTypeTransition(masm); +} + + void ICCompareStub::AddToSpecialCache(Handle<Code> new_object) { ASSERT(*known_map_ != NULL); Isolate* isolate = new_object->GetIsolate(); Factory* factory = isolate->factory(); return Map::UpdateCodeCache(known_map_, strict() ? - factory->strict_compare_ic_symbol() : - factory->compare_ic_symbol(), + factory->strict_compare_ic_string() : + factory->compare_ic_string(), new_object); } -bool ICCompareStub::FindCodeInSpecialCache(Code** code_out) { - Isolate* isolate = known_map_->GetIsolate(); +bool ICCompareStub::FindCodeInSpecialCache(Code** code_out, Isolate* isolate) { Factory* factory = isolate->factory(); Code::Flags flags = Code::ComputeFlags( static_cast<Code::Kind>(GetCodeKind()), @@ -191,12 +314,18 @@ bool ICCompareStub::FindCodeInSpecialCache(Code** code_out) { Handle<Object> probe( known_map_->FindInCodeCache( strict() ? - *factory->strict_compare_ic_symbol() : - *factory->compare_ic_symbol(), - flags)); + *factory->strict_compare_ic_string() : + *factory->compare_ic_string(), + flags), + isolate); if (probe->IsCode()) { *code_out = Code::cast(*probe); - ASSERT(op_ == (*code_out)->compare_operation() + Token::EQ); +#ifdef DEBUG + Token::Value cached_op; + ICCompareStub::DecodeMinorKey((*code_out)->stub_info(), NULL, NULL, NULL, + &cached_op); + ASSERT(op_ == cached_op); +#endif return true; } return false; @@ -204,7 +333,33 @@ bool ICCompareStub::FindCodeInSpecialCache(Code** code_out) { int ICCompareStub::MinorKey() { - return OpField::encode(op_ - Token::EQ) | StateField::encode(state_); + return OpField::encode(op_ - Token::EQ) | + LeftStateField::encode(left_) | + RightStateField::encode(right_) | + HandlerStateField::encode(state_); +} + + +void ICCompareStub::DecodeMinorKey(int minor_key, + CompareIC::State* left_state, + CompareIC::State* right_state, + CompareIC::State* handler_state, + Token::Value* op) { + if (left_state) { + *left_state = + static_cast<CompareIC::State>(LeftStateField::decode(minor_key)); + } + if (right_state) { + *right_state = + static_cast<CompareIC::State>(RightStateField::decode(minor_key)); + } + if (handler_state) { + *handler_state = + static_cast<CompareIC::State>(HandlerStateField::decode(minor_key)); + } + if (op) { + *op = static_cast<Token::Value>(OpField::decode(minor_key) + Token::EQ); + } } @@ -213,27 +368,31 @@ void ICCompareStub::Generate(MacroAssembler* masm) { case CompareIC::UNINITIALIZED: GenerateMiss(masm); break; - case CompareIC::SMIS: + case CompareIC::SMI: GenerateSmis(masm); break; - case CompareIC::HEAP_NUMBERS: - GenerateHeapNumbers(masm); + case CompareIC::NUMBER: + GenerateNumbers(masm); break; - case CompareIC::STRINGS: + case CompareIC::STRING: GenerateStrings(masm); break; - case CompareIC::SYMBOLS: - GenerateSymbols(masm); + case CompareIC::INTERNALIZED_STRING: + GenerateInternalizedStrings(masm); break; - case CompareIC::OBJECTS: + case CompareIC::UNIQUE_NAME: + GenerateUniqueNames(masm); + break; + case CompareIC::OBJECT: GenerateObjects(masm); break; - case CompareIC::KNOWN_OBJECTS: + case CompareIC::KNOWN_OBJECT: ASSERT(*known_map_ != NULL); GenerateKnownObjects(masm); break; - default: - UNREACHABLE(); + case CompareIC::GENERIC: + GenerateGeneric(masm); + break; } } @@ -269,36 +428,8 @@ void JSEntryStub::FinishCode(Handle<Code> code) { } -void KeyedLoadElementStub::Generate(MacroAssembler* masm) { - switch (elements_kind_) { - case FAST_ELEMENTS: - case FAST_HOLEY_ELEMENTS: - case FAST_SMI_ELEMENTS: - case FAST_HOLEY_SMI_ELEMENTS: - KeyedLoadStubCompiler::GenerateLoadFastElement(masm); - break; - case FAST_DOUBLE_ELEMENTS: - case FAST_HOLEY_DOUBLE_ELEMENTS: - KeyedLoadStubCompiler::GenerateLoadFastDoubleElement(masm); - break; - case EXTERNAL_BYTE_ELEMENTS: - case EXTERNAL_UNSIGNED_BYTE_ELEMENTS: - case EXTERNAL_SHORT_ELEMENTS: - case EXTERNAL_UNSIGNED_SHORT_ELEMENTS: - case EXTERNAL_INT_ELEMENTS: - case EXTERNAL_UNSIGNED_INT_ELEMENTS: - case EXTERNAL_FLOAT_ELEMENTS: - case EXTERNAL_DOUBLE_ELEMENTS: - case EXTERNAL_PIXEL_ELEMENTS: - KeyedLoadStubCompiler::GenerateLoadExternalArray(masm, elements_kind_); - break; - case DICTIONARY_ELEMENTS: - KeyedLoadStubCompiler::GenerateLoadDictionaryElement(masm); - break; - case NON_STRICT_ARGUMENTS_ELEMENTS: - UNREACHABLE(); - break; - } +void KeyedLoadDictionaryElementStub::Generate(MacroAssembler* masm) { + KeyedLoadStubCompiler::GenerateLoadDictionaryElement(masm); } @@ -311,14 +442,14 @@ void KeyedStoreElementStub::Generate(MacroAssembler* masm) { KeyedStoreStubCompiler::GenerateStoreFastElement(masm, is_js_array_, elements_kind_, - grow_mode_); + store_mode_); } break; case FAST_DOUBLE_ELEMENTS: case FAST_HOLEY_DOUBLE_ELEMENTS: KeyedStoreStubCompiler::GenerateStoreFastDoubleElement(masm, is_js_array_, - grow_mode_); + store_mode_); break; case EXTERNAL_BYTE_ELEMENTS: case EXTERNAL_UNSIGNED_BYTE_ELEMENTS: @@ -446,32 +577,33 @@ bool ToBooleanStub::Types::CanBeUndetectable() const { void ElementsTransitionAndStoreStub::Generate(MacroAssembler* masm) { Label fail; + AllocationSiteMode mode = AllocationSiteInfo::GetMode(from_, to_); ASSERT(!IsFastHoleyElementsKind(from_) || IsFastHoleyElementsKind(to_)); if (!FLAG_trace_elements_transitions) { if (IsFastSmiOrObjectElementsKind(to_)) { if (IsFastSmiOrObjectElementsKind(from_)) { ElementsTransitionGenerator:: - GenerateMapChangeElementsTransition(masm); + GenerateMapChangeElementsTransition(masm, mode, &fail); } else if (IsFastDoubleElementsKind(from_)) { ASSERT(!IsFastSmiElementsKind(to_)); - ElementsTransitionGenerator::GenerateDoubleToObject(masm, &fail); + ElementsTransitionGenerator::GenerateDoubleToObject(masm, mode, &fail); } else { UNREACHABLE(); } KeyedStoreStubCompiler::GenerateStoreFastElement(masm, is_jsarray_, to_, - grow_mode_); + store_mode_); } else if (IsFastSmiElementsKind(from_) && IsFastDoubleElementsKind(to_)) { - ElementsTransitionGenerator::GenerateSmiToDouble(masm, &fail); + ElementsTransitionGenerator::GenerateSmiToDouble(masm, mode, &fail); KeyedStoreStubCompiler::GenerateStoreFastDoubleElement(masm, is_jsarray_, - grow_mode_); + store_mode_); } else if (IsFastDoubleElementsKind(from_)) { ASSERT(to_ == FAST_HOLEY_DOUBLE_ELEMENTS); ElementsTransitionGenerator:: - GenerateMapChangeElementsTransition(masm); + GenerateMapChangeElementsTransition(masm, mode, &fail); } else { UNREACHABLE(); } @@ -481,6 +613,14 @@ void ElementsTransitionAndStoreStub::Generate(MacroAssembler* masm) { } +void StubFailureTrampolineStub::GenerateAheadOfTime(Isolate* isolate) { + int i = 0; + for (; i <= StubFailureTrampolineStub::kMaxExtraExpressionStackCount; ++i) { + StubFailureTrampolineStub(i).GetCode(isolate); + } +} + + FunctionEntryHook ProfileEntryHookStub::entry_hook_ = NULL; diff --git a/deps/v8/src/code-stubs.h b/deps/v8/src/code-stubs.h index a843841723..e91b241579 100644 --- a/deps/v8/src/code-stubs.h +++ b/deps/v8/src/code-stubs.h @@ -47,6 +47,10 @@ namespace internal { V(Compare) \ V(CompareIC) \ V(MathPow) \ + V(ArrayLength) \ + V(StringLength) \ + V(FunctionPrototype) \ + V(StoreArrayLength) \ V(RecordWrite) \ V(StoreBufferOverflow) \ V(RegExpExec) \ @@ -69,12 +73,19 @@ namespace internal { V(CEntry) \ V(JSEntry) \ V(KeyedLoadElement) \ + V(ArrayNoArgumentConstructor) \ + V(ArraySingleArgumentConstructor) \ + V(ArrayNArgumentsConstructor) \ V(KeyedStoreElement) \ V(DebuggerStatement) \ - V(StringDictionaryLookup) \ + V(NameDictionaryLookup) \ V(ElementsTransitionAndStore) \ + V(TransitionElementsKind) \ V(StoreArrayLiteralElement) \ - V(ProfileEntryHook) + V(StubFailureTrampoline) \ + V(ProfileEntryHook) \ + /* IC Handler stubs */ \ + V(LoadField) // List of code stubs only used on ARM platforms. #ifdef V8_TARGET_ARCH_ARM @@ -120,7 +131,7 @@ class CodeStub BASE_EMBEDDED { }; // Retrieve the code for the stub. Generate the code if needed. - Handle<Code> GetCode(); + Handle<Code> GetCode(Isolate* isolate); static Major MajorKeyFromKey(uint32_t key) { return static_cast<Major>(MajorKeyBits::decode(key)); @@ -138,18 +149,18 @@ class CodeStub BASE_EMBEDDED { virtual ~CodeStub() {} - bool CompilingCallsToThisStubIsGCSafe() { + bool CompilingCallsToThisStubIsGCSafe(Isolate* isolate) { bool is_pregenerated = IsPregenerated(); Code* code = NULL; - CHECK(!is_pregenerated || FindCodeInCache(&code)); + CHECK(!is_pregenerated || FindCodeInCache(&code, isolate)); return is_pregenerated; } // See comment above, where Instanceof is defined. virtual bool IsPregenerated() { return false; } - static void GenerateStubsAheadOfTime(); - static void GenerateFPStubs(); + static void GenerateStubsAheadOfTime(Isolate* isolate); + static void GenerateFPStubs(Isolate* isolate); // Some stubs put untagged junk on the stack that cannot be scanned by the // GC. This means that we must be statically sure that no GC can occur while @@ -160,22 +171,37 @@ class CodeStub BASE_EMBEDDED { virtual bool SometimesSetsUpAFrame() { return true; } // Lookup the code in the (possibly custom) cache. - bool FindCodeInCache(Code** code_out); + bool FindCodeInCache(Code** code_out, Isolate* isolate); + + // Returns information for computing the number key. + virtual Major MajorKey() = 0; + virtual int MinorKey() = 0; protected: static bool CanUseFPRegisters(); - private: - // Nonvirtual wrapper around the stub-specific Generate function. Call - // this function to set up the macro assembler and generate the code. - void GenerateCode(MacroAssembler* masm); - // Generates the assembler code for the stub. - virtual void Generate(MacroAssembler* masm) = 0; + virtual Handle<Code> GenerateCode() = 0; + + // BinaryOpStub needs to override this. + virtual InlineCacheState GetICState() { + return UNINITIALIZED; + } + virtual Code::ExtraICState GetExtraICState() { + return Code::kNoExtraICState; + } + virtual Code::StubType GetStubType() { + return Code::NORMAL; + } + + // Returns whether the code generated for this stub needs to be allocated as + // a fixed (non-moveable) code object. + virtual bool NeedsImmovableCode() { return false; } + private: // Perform bookkeeping required after code generation when stub code is // initially generated. - void RecordCodeGeneration(Code* code, MacroAssembler* masm); + void RecordCodeGeneration(Code* code, Isolate* isolate); // Finish the code object after it has been generated. virtual void FinishCode(Handle<Code> code) { } @@ -184,25 +210,18 @@ class CodeStub BASE_EMBEDDED { // registering stub in the stub cache. virtual void Activate(Code* code) { } - // Returns information for computing the number key. - virtual Major MajorKey() = 0; - virtual int MinorKey() = 0; - // BinaryOpStub needs to override this. virtual int GetCodeKind(); - // BinaryOpStub needs to override this. - virtual InlineCacheState GetICState() { - return UNINITIALIZED; - } - // Add the code to a specialized cache, specific to an individual // stub type. Please note, this method must add the code object to a // roots object, otherwise we will remove the code during GC. virtual void AddToSpecialCache(Handle<Code> new_object) { } // Find code in a specialized cache, work is delegated to the specific stub. - virtual bool FindCodeInSpecialCache(Code** code_out) { return false; } + virtual bool FindCodeInSpecialCache(Code** code_out, Isolate* isolate) { + return false; + } // If a stub uses a special cache override this. virtual bool UseSpecialCache() { return false; } @@ -211,10 +230,6 @@ class CodeStub BASE_EMBEDDED { SmartArrayPointer<const char> GetName(); virtual void PrintName(StringStream* stream); - // Returns whether the code generated for this stub needs to be allocated as - // a fixed (non-moveable) code object. - virtual bool NeedsImmovableCode() { return false; } - // Computes the key based on major and minor. uint32_t GetKey() { ASSERT(static_cast<int>(MajorKey()) < NUMBER_OF_IDS); @@ -230,6 +245,58 @@ class CodeStub BASE_EMBEDDED { }; +class PlatformCodeStub : public CodeStub { + public: + // Retrieve the code for the stub. Generate the code if needed. + virtual Handle<Code> GenerateCode(); + + virtual int GetCodeKind() { return Code::STUB; } + virtual int GetStubFlags() { return -1; } + + protected: + // Generates the assembler code for the stub. + virtual void Generate(MacroAssembler* masm) = 0; +}; + + +struct CodeStubInterfaceDescriptor { + CodeStubInterfaceDescriptor() + : register_param_count_(-1), + stack_parameter_count_(NULL), + extra_expression_stack_count_(0), + register_params_(NULL) { } + int register_param_count_; + const Register* stack_parameter_count_; + int extra_expression_stack_count_; + Register* register_params_; + Address deoptimization_handler_; + + int environment_length() const { + if (stack_parameter_count_ != NULL) { + return register_param_count_ + 1; + } + return register_param_count_; + } +}; + + +class HydrogenCodeStub : public CodeStub { + public: + // Retrieve the code for the stub. Generate the code if needed. + virtual Handle<Code> GenerateCode() = 0; + + virtual int GetCodeKind() { return Code::COMPILED_STUB; } + + CodeStubInterfaceDescriptor* GetInterfaceDescriptor(Isolate* isolate) { + return isolate->code_stub_interface_descriptor(MajorKey()); + } + + virtual void InitializeInterfaceDescriptor( + Isolate* isolate, + CodeStubInterfaceDescriptor* descriptor) = 0; +}; + + // Helper interface to prepare to/restore after making runtime calls. class RuntimeCallHelper { public: @@ -287,7 +354,7 @@ class NopRuntimeCallHelper : public RuntimeCallHelper { }; -class StackCheckStub : public CodeStub { +class StackCheckStub : public PlatformCodeStub { public: StackCheckStub() { } @@ -299,7 +366,7 @@ class StackCheckStub : public CodeStub { }; -class InterruptStub : public CodeStub { +class InterruptStub : public PlatformCodeStub { public: InterruptStub() { } @@ -311,7 +378,7 @@ class InterruptStub : public CodeStub { }; -class ToNumberStub: public CodeStub { +class ToNumberStub: public PlatformCodeStub { public: ToNumberStub() { } @@ -323,7 +390,7 @@ class ToNumberStub: public CodeStub { }; -class FastNewClosureStub : public CodeStub { +class FastNewClosureStub : public PlatformCodeStub { public: explicit FastNewClosureStub(LanguageMode language_mode) : language_mode_(language_mode) { } @@ -339,7 +406,7 @@ class FastNewClosureStub : public CodeStub { }; -class FastNewContextStub : public CodeStub { +class FastNewContextStub : public PlatformCodeStub { public: static const int kMaximumSlots = 64; @@ -357,7 +424,7 @@ class FastNewContextStub : public CodeStub { }; -class FastNewBlockContextStub : public CodeStub { +class FastNewBlockContextStub : public PlatformCodeStub { public: static const int kMaximumSlots = 64; @@ -375,20 +442,25 @@ class FastNewBlockContextStub : public CodeStub { }; -class FastCloneShallowArrayStub : public CodeStub { +class FastCloneShallowArrayStub : public PlatformCodeStub { public: // Maximum length of copied elements array. static const int kMaximumClonedLength = 8; - enum Mode { CLONE_ELEMENTS, CLONE_DOUBLE_ELEMENTS, COPY_ON_WRITE_ELEMENTS, - CLONE_ANY_ELEMENTS + CLONE_ANY_ELEMENTS, + LAST_CLONE_MODE = CLONE_ANY_ELEMENTS }; - FastCloneShallowArrayStub(Mode mode, int length) + static const int kFastCloneModeCount = LAST_CLONE_MODE + 1; + + FastCloneShallowArrayStub(Mode mode, + AllocationSiteMode allocation_site_mode, + int length) : mode_(mode), + allocation_site_mode_(allocation_site_mode), length_((mode == COPY_ON_WRITE_ELEMENTS) ? 0 : length) { ASSERT_GE(length_, 0); ASSERT_LE(length_, kMaximumClonedLength); @@ -398,17 +470,26 @@ class FastCloneShallowArrayStub : public CodeStub { private: Mode mode_; + AllocationSiteMode allocation_site_mode_; int length_; + class AllocationSiteModeBits: public BitField<AllocationSiteMode, 0, 1> {}; + class ModeBits: public BitField<Mode, 1, 4> {}; + class LengthBits: public BitField<int, 5, 4> {}; + // Ensure data fits within available bits. + STATIC_ASSERT(LAST_ALLOCATION_SITE_MODE == 1); + STATIC_ASSERT(kFastCloneModeCount < 16); + STATIC_ASSERT(kMaximumClonedLength < 16); Major MajorKey() { return FastCloneShallowArray; } int MinorKey() { - ASSERT(mode_ == 0 || mode_ == 1 || mode_ == 2 || mode_ == 3); - return length_ * 4 + mode_; + return AllocationSiteModeBits::encode(allocation_site_mode_) + | ModeBits::encode(mode_) + | LengthBits::encode(length_); } }; -class FastCloneShallowObjectStub : public CodeStub { +class FastCloneShallowObjectStub : public HydrogenCodeStub { public: // Maximum number of properties in copied object. static const int kMaximumClonedProperties = 6; @@ -418,17 +499,25 @@ class FastCloneShallowObjectStub : public CodeStub { ASSERT_LE(length_, kMaximumClonedProperties); } - void Generate(MacroAssembler* masm); + int length() const { return length_; } + + virtual Handle<Code> GenerateCode(); + + virtual void InitializeInterfaceDescriptor( + Isolate* isolate, + CodeStubInterfaceDescriptor* descriptor); private: int length_; Major MajorKey() { return FastCloneShallowObject; } int MinorKey() { return length_; } + + DISALLOW_COPY_AND_ASSIGN(FastCloneShallowObjectStub); }; -class InstanceofStub: public CodeStub { +class InstanceofStub: public PlatformCodeStub { public: enum Flags { kNoFlags = 0, @@ -466,7 +555,7 @@ class InstanceofStub: public CodeStub { }; -class MathPowStub: public CodeStub { +class MathPowStub: public PlatformCodeStub { public: enum ExponentType { INTEGER, DOUBLE, TAGGED, ON_STACK}; @@ -482,150 +571,325 @@ class MathPowStub: public CodeStub { }; -class ICCompareStub: public CodeStub { +class ICStub: public PlatformCodeStub { public: - ICCompareStub(Token::Value op, CompareIC::State state) - : op_(op), state_(state) { - ASSERT(Token::IsCompareOp(op)); + explicit ICStub(Code::Kind kind) : kind_(kind) { } + virtual int GetCodeKind() { return kind_; } + virtual InlineCacheState GetICState() { return MONOMORPHIC; } + + bool Describes(Code* code) { + return GetMajorKey(code) == MajorKey() && code->stub_info() == MinorKey(); + } + + protected: + class KindBits: public BitField<Code::Kind, 0, 4> {}; + virtual void FinishCode(Handle<Code> code) { + code->set_stub_info(MinorKey()); + } + Code::Kind kind() { return kind_; } + + virtual int MinorKey() { + return KindBits::encode(kind_); } + private: + Code::Kind kind_; +}; + + +class ArrayLengthStub: public ICStub { + public: + explicit ArrayLengthStub(Code::Kind kind) : ICStub(kind) { } virtual void Generate(MacroAssembler* masm); - void set_known_map(Handle<Map> map) { known_map_ = map; } + private: + virtual CodeStub::Major MajorKey() { return ArrayLength; } +}; + + +class FunctionPrototypeStub: public ICStub { + public: + explicit FunctionPrototypeStub(Code::Kind kind) : ICStub(kind) { } + virtual void Generate(MacroAssembler* masm); private: - class OpField: public BitField<int, 0, 3> { }; - class StateField: public BitField<int, 3, 5> { }; + virtual CodeStub::Major MajorKey() { return FunctionPrototype; } +}; - virtual void FinishCode(Handle<Code> code) { - code->set_compare_state(state_); - code->set_compare_operation(op_ - Token::EQ); + +class StringLengthStub: public ICStub { + public: + StringLengthStub(Code::Kind kind, bool support_wrapper) + : ICStub(kind), support_wrapper_(support_wrapper) { } + virtual void Generate(MacroAssembler* masm); + + private: + STATIC_ASSERT(KindBits::kSize == 4); + class WrapperModeBits: public BitField<bool, 4, 1> {}; + virtual CodeStub::Major MajorKey() { return StringLength; } + virtual int MinorKey() { + return KindBits::encode(kind()) | WrapperModeBits::encode(support_wrapper_); } - virtual CodeStub::Major MajorKey() { return CompareIC; } - virtual int MinorKey(); + bool support_wrapper_; +}; - virtual int GetCodeKind() { return Code::COMPARE_IC; } - void GenerateSmis(MacroAssembler* masm); - void GenerateHeapNumbers(MacroAssembler* masm); - void GenerateSymbols(MacroAssembler* masm); - void GenerateStrings(MacroAssembler* masm); - void GenerateObjects(MacroAssembler* masm); - void GenerateMiss(MacroAssembler* masm); - void GenerateKnownObjects(MacroAssembler* masm); +class StoreICStub: public ICStub { + public: + StoreICStub(Code::Kind kind, StrictModeFlag strict_mode) + : ICStub(kind), strict_mode_(strict_mode) { } - bool strict() const { return op_ == Token::EQ_STRICT; } - Condition GetCondition() const { return CompareIC::ComputeCondition(op_); } + protected: + virtual Code::ExtraICState GetExtraICState() { + return strict_mode_; + } - virtual void AddToSpecialCache(Handle<Code> new_object); - virtual bool FindCodeInSpecialCache(Code** code_out); - virtual bool UseSpecialCache() { return state_ == CompareIC::KNOWN_OBJECTS; } + private: + STATIC_ASSERT(KindBits::kSize == 4); + class StrictModeBits: public BitField<bool, 4, 1> {}; + virtual int MinorKey() { + return KindBits::encode(kind()) | StrictModeBits::encode(strict_mode_); + } - Token::Value op_; - CompareIC::State state_; - Handle<Map> known_map_; + StrictModeFlag strict_mode_; }; -// Flags that control the compare stub code generation. -enum CompareFlags { - NO_COMPARE_FLAGS = 0, - NO_SMI_COMPARE_IN_STUB = 1 << 0, - NO_NUMBER_COMPARE_IN_STUB = 1 << 1, - CANT_BOTH_BE_NAN = 1 << 2 +class StoreArrayLengthStub: public StoreICStub { + public: + explicit StoreArrayLengthStub(Code::Kind kind, StrictModeFlag strict_mode) + : StoreICStub(kind, strict_mode) { } + virtual void Generate(MacroAssembler* masm); + + private: + virtual CodeStub::Major MajorKey() { return StoreArrayLength; } }; -enum NaNInformation { - kBothCouldBeNaN, - kCantBothBeNaN +class HandlerStub: public ICStub { + public: + explicit HandlerStub(Code::Kind kind) : ICStub(kind) { } + virtual int GetCodeKind() { return Code::STUB; } + virtual int GetStubFlags() { return kind(); } }; -class CompareStub: public CodeStub { +class LoadFieldStub: public HandlerStub { public: - CompareStub(Condition cc, - bool strict, - CompareFlags flags, - Register lhs, - Register rhs) : - cc_(cc), - strict_(strict), - never_nan_nan_((flags & CANT_BOTH_BE_NAN) != 0), - include_number_compare_((flags & NO_NUMBER_COMPARE_IN_STUB) == 0), - include_smi_compare_((flags & NO_SMI_COMPARE_IN_STUB) == 0), - lhs_(lhs), - rhs_(rhs) { } - - CompareStub(Condition cc, - bool strict, - CompareFlags flags) : - cc_(cc), - strict_(strict), - never_nan_nan_((flags & CANT_BOTH_BE_NAN) != 0), - include_number_compare_((flags & NO_NUMBER_COMPARE_IN_STUB) == 0), - include_smi_compare_((flags & NO_SMI_COMPARE_IN_STUB) == 0), - lhs_(no_reg), - rhs_(no_reg) { } + LoadFieldStub(Register reg, bool inobject, int index) + : HandlerStub(Code::LOAD_IC), + reg_(reg), + inobject_(inobject), + index_(index) { } + virtual void Generate(MacroAssembler* masm); - void Generate(MacroAssembler* masm); + protected: + virtual Code::StubType GetStubType() { return Code::FIELD; } private: - Condition cc_; - bool strict_; - // Only used for 'equal' comparisons. Tells the stub that we already know - // that at least one side of the comparison is not NaN. This allows the - // stub to use object identity in the positive case. We ignore it when - // generating the minor key for other comparisons to avoid creating more - // stubs. - bool never_nan_nan_; - // Do generate the number comparison code in the stub. Stubs without number - // comparison code is used when the number comparison has been inlined, and - // the stub will be called if one of the operands is not a number. - bool include_number_compare_; - - // Generate the comparison code for two smi operands in the stub. - bool include_smi_compare_; - - // Register holding the left hand side of the comparison if the stub gives - // a choice, no_reg otherwise. - - Register lhs_; - // Register holding the right hand side of the comparison if the stub gives - // a choice, no_reg otherwise. - Register rhs_; - - // Encoding of the minor key in 16 bits. - class StrictField: public BitField<bool, 0, 1> {}; - class NeverNanNanField: public BitField<bool, 1, 1> {}; - class IncludeNumberCompareField: public BitField<bool, 2, 1> {}; - class IncludeSmiCompareField: public BitField<bool, 3, 1> {}; - class RegisterField: public BitField<bool, 4, 1> {}; - class ConditionField: public BitField<int, 5, 11> {}; - - Major MajorKey() { return Compare; } + STATIC_ASSERT(KindBits::kSize == 4); + class RegisterBits: public BitField<int, 4, 6> {}; + class InobjectBits: public BitField<bool, 10, 1> {}; + class IndexBits: public BitField<int, 11, 11> {}; + virtual CodeStub::Major MajorKey() { return LoadField; } + virtual int MinorKey() { + return KindBits::encode(kind()) + | RegisterBits::encode(reg_.code()) + | InobjectBits::encode(inobject_) + | IndexBits::encode(index_); + } - int MinorKey(); + Register reg_; + bool inobject_; + int index_; +}; - virtual int GetCodeKind() { return Code::COMPARE_IC; } - virtual void FinishCode(Handle<Code> code) { - code->set_compare_state(CompareIC::GENERIC); + +class BinaryOpStub: public PlatformCodeStub { + public: + BinaryOpStub(Token::Value op, OverwriteMode mode) + : op_(op), + mode_(mode), + platform_specific_bit_(false), + left_type_(BinaryOpIC::UNINITIALIZED), + right_type_(BinaryOpIC::UNINITIALIZED), + result_type_(BinaryOpIC::UNINITIALIZED) { + Initialize(); + ASSERT(OpBits::is_valid(Token::NUM_TOKENS)); } - // Branch to the label if the given object isn't a symbol. - void BranchIfNonSymbol(MacroAssembler* masm, - Label* label, - Register object, - Register scratch); + BinaryOpStub( + int key, + BinaryOpIC::TypeInfo left_type, + BinaryOpIC::TypeInfo right_type, + BinaryOpIC::TypeInfo result_type = BinaryOpIC::UNINITIALIZED) + : op_(OpBits::decode(key)), + mode_(ModeBits::decode(key)), + platform_specific_bit_(PlatformSpecificBits::decode(key)), + left_type_(left_type), + right_type_(right_type), + result_type_(result_type) { } + + static void decode_types_from_minor_key(int minor_key, + BinaryOpIC::TypeInfo* left_type, + BinaryOpIC::TypeInfo* right_type, + BinaryOpIC::TypeInfo* result_type) { + *left_type = + static_cast<BinaryOpIC::TypeInfo>(LeftTypeBits::decode(minor_key)); + *right_type = + static_cast<BinaryOpIC::TypeInfo>(RightTypeBits::decode(minor_key)); + *result_type = + static_cast<BinaryOpIC::TypeInfo>(ResultTypeBits::decode(minor_key)); + } + + static Token::Value decode_op_from_minor_key(int minor_key) { + return static_cast<Token::Value>(OpBits::decode(minor_key)); + } + + enum SmiCodeGenerateHeapNumberResults { + ALLOW_HEAPNUMBER_RESULTS, + NO_HEAPNUMBER_RESULTS + }; + + private: + Token::Value op_; + OverwriteMode mode_; + bool platform_specific_bit_; // Indicates SSE3 on IA32, VFP2 on ARM. + + // Operand type information determined at runtime. + BinaryOpIC::TypeInfo left_type_; + BinaryOpIC::TypeInfo right_type_; + BinaryOpIC::TypeInfo result_type_; - // Unfortunately you have to run without snapshots to see most of these - // names in the profile since most compare stubs end up in the snapshot. virtual void PrintName(StringStream* stream); + + // Minor key encoding in 19 bits TTTRRRLLLSOOOOOOOMM. + class ModeBits: public BitField<OverwriteMode, 0, 2> {}; + class OpBits: public BitField<Token::Value, 2, 7> {}; + class PlatformSpecificBits: public BitField<bool, 9, 1> {}; + class LeftTypeBits: public BitField<BinaryOpIC::TypeInfo, 10, 3> {}; + class RightTypeBits: public BitField<BinaryOpIC::TypeInfo, 13, 3> {}; + class ResultTypeBits: public BitField<BinaryOpIC::TypeInfo, 16, 3> {}; + + Major MajorKey() { return BinaryOp; } + int MinorKey() { + return OpBits::encode(op_) + | ModeBits::encode(mode_) + | PlatformSpecificBits::encode(platform_specific_bit_) + | LeftTypeBits::encode(left_type_) + | RightTypeBits::encode(right_type_) + | ResultTypeBits::encode(result_type_); + } + + + // Platform-independent implementation. + void Generate(MacroAssembler* masm); + void GenerateCallRuntime(MacroAssembler* masm); + + // Platform-independent signature, platform-specific implementation. + void Initialize(); + void GenerateAddStrings(MacroAssembler* masm); + void GenerateBothStringStub(MacroAssembler* masm); + void GenerateGeneric(MacroAssembler* masm); + void GenerateGenericStub(MacroAssembler* masm); + void GenerateNumberStub(MacroAssembler* masm); + void GenerateInt32Stub(MacroAssembler* masm); + void GenerateLoadArguments(MacroAssembler* masm); + void GenerateOddballStub(MacroAssembler* masm); + void GenerateRegisterArgsPush(MacroAssembler* masm); + void GenerateReturn(MacroAssembler* masm); + void GenerateSmiStub(MacroAssembler* masm); + void GenerateStringStub(MacroAssembler* masm); + void GenerateTypeTransition(MacroAssembler* masm); + void GenerateTypeTransitionWithSavedArgs(MacroAssembler* masm); + void GenerateUninitializedStub(MacroAssembler* masm); + + // Entirely platform-specific methods are defined as static helper + // functions in the <arch>/code-stubs-<arch>.cc files. + + virtual int GetCodeKind() { return Code::BINARY_OP_IC; } + + virtual InlineCacheState GetICState() { + return BinaryOpIC::ToState(Max(left_type_, right_type_)); + } + + virtual void FinishCode(Handle<Code> code) { + code->set_stub_info(MinorKey()); + } + + friend class CodeGenerator; +}; + + +class ICCompareStub: public PlatformCodeStub { + public: + ICCompareStub(Token::Value op, + CompareIC::State left, + CompareIC::State right, + CompareIC::State handler) + : op_(op), + left_(left), + right_(right), + state_(handler) { + ASSERT(Token::IsCompareOp(op)); + } + + virtual void Generate(MacroAssembler* masm); + + void set_known_map(Handle<Map> map) { known_map_ = map; } + + static void DecodeMinorKey(int minor_key, + CompareIC::State* left_state, + CompareIC::State* right_state, + CompareIC::State* handler_state, + Token::Value* op); + + static CompareIC::State CompareState(int minor_key) { + return static_cast<CompareIC::State>(HandlerStateField::decode(minor_key)); + } + + private: + class OpField: public BitField<int, 0, 3> { }; + class LeftStateField: public BitField<int, 3, 4> { }; + class RightStateField: public BitField<int, 7, 4> { }; + class HandlerStateField: public BitField<int, 11, 4> { }; + + virtual void FinishCode(Handle<Code> code) { + code->set_stub_info(MinorKey()); + } + + virtual CodeStub::Major MajorKey() { return CompareIC; } + virtual int MinorKey(); + + virtual int GetCodeKind() { return Code::COMPARE_IC; } + + void GenerateSmis(MacroAssembler* masm); + void GenerateNumbers(MacroAssembler* masm); + void GenerateInternalizedStrings(MacroAssembler* masm); + void GenerateStrings(MacroAssembler* masm); + void GenerateUniqueNames(MacroAssembler* masm); + void GenerateObjects(MacroAssembler* masm); + void GenerateMiss(MacroAssembler* masm); + void GenerateKnownObjects(MacroAssembler* masm); + void GenerateGeneric(MacroAssembler* masm); + + bool strict() const { return op_ == Token::EQ_STRICT; } + Condition GetCondition() const { return CompareIC::ComputeCondition(op_); } + + virtual void AddToSpecialCache(Handle<Code> new_object); + virtual bool FindCodeInSpecialCache(Code** code_out, Isolate* isolate); + virtual bool UseSpecialCache() { return state_ == CompareIC::KNOWN_OBJECT; } + + Token::Value op_; + CompareIC::State left_; + CompareIC::State right_; + CompareIC::State state_; + Handle<Map> known_map_; }; -class CEntryStub : public CodeStub { +class CEntryStub : public PlatformCodeStub { public: explicit CEntryStub(int result_size, SaveFPRegsMode save_doubles = kDontSaveFPRegs) @@ -638,7 +902,7 @@ class CEntryStub : public CodeStub { // their code generation. On machines that always have gp registers (x64) we // can generate both variants ahead of time. virtual bool IsPregenerated(); - static void GenerateAheadOfTime(); + static void GenerateAheadOfTime(Isolate* isolate); private: void GenerateCore(MacroAssembler* masm, @@ -659,7 +923,7 @@ class CEntryStub : public CodeStub { }; -class JSEntryStub : public CodeStub { +class JSEntryStub : public PlatformCodeStub { public: JSEntryStub() { } @@ -693,7 +957,7 @@ class JSConstructEntryStub : public JSEntryStub { }; -class ArgumentsAccessStub: public CodeStub { +class ArgumentsAccessStub: public PlatformCodeStub { public: enum Type { READ_ELEMENT, @@ -720,7 +984,7 @@ class ArgumentsAccessStub: public CodeStub { }; -class RegExpExecStub: public CodeStub { +class RegExpExecStub: public PlatformCodeStub { public: RegExpExecStub() { } @@ -732,7 +996,7 @@ class RegExpExecStub: public CodeStub { }; -class RegExpConstructResultStub: public CodeStub { +class RegExpConstructResultStub: public PlatformCodeStub { public: RegExpConstructResultStub() { } @@ -744,7 +1008,7 @@ class RegExpConstructResultStub: public CodeStub { }; -class CallFunctionStub: public CodeStub { +class CallFunctionStub: public PlatformCodeStub { public: CallFunctionStub(int argc, CallFunctionFlags flags) : argc_(argc), flags_(flags) { } @@ -785,7 +1049,7 @@ class CallFunctionStub: public CodeStub { }; -class CallConstructStub: public CodeStub { +class CallConstructStub: public PlatformCodeStub { public: explicit CallConstructStub(CallFunctionFlags flags) : flags_(flags) {} @@ -860,6 +1124,13 @@ class StringCharCodeAtGenerator { void GenerateSlow(MacroAssembler* masm, const RuntimeCallHelper& call_helper); + // Skip handling slow case and directly jump to bailout. + void SkipSlow(MacroAssembler* masm, Label* bailout) { + masm->bind(&index_not_smi_); + masm->bind(&call_runtime_); + masm->jmp(bailout); + } + private: Register object_; Register index_; @@ -900,6 +1171,12 @@ class StringCharFromCodeGenerator { void GenerateSlow(MacroAssembler* masm, const RuntimeCallHelper& call_helper); + // Skip handling slow case and directly jump to bailout. + void SkipSlow(MacroAssembler* masm, Label* bailout) { + masm->bind(&slow_case_); + masm->jmp(bailout); + } + private: Register code_; Register result_; @@ -942,13 +1219,25 @@ class StringCharAtGenerator { // Generates the fast case code. On the fallthrough path |result| // register contains the result. - void GenerateFast(MacroAssembler* masm); + void GenerateFast(MacroAssembler* masm) { + char_code_at_generator_.GenerateFast(masm); + char_from_code_generator_.GenerateFast(masm); + } // Generates the slow case code. Must not be naturally // reachable. Expected to be put after a ret instruction (e.g., in // deferred code). Always jumps back to the fast case. void GenerateSlow(MacroAssembler* masm, - const RuntimeCallHelper& call_helper); + const RuntimeCallHelper& call_helper) { + char_code_at_generator_.GenerateSlow(masm, call_helper); + char_from_code_generator_.GenerateSlow(masm, call_helper); + } + + // Skip handling slow case and directly jump to bailout. + void SkipSlow(MacroAssembler* masm, Label* bailout) { + char_code_at_generator_.SkipSlow(masm, bailout); + char_from_code_generator_.SkipSlow(masm, bailout); + } private: StringCharCodeAtGenerator char_code_at_generator_; @@ -976,60 +1265,180 @@ class AllowStubCallsScope { }; -class KeyedLoadElementStub : public CodeStub { +class KeyedLoadDictionaryElementStub : public PlatformCodeStub { public: - explicit KeyedLoadElementStub(ElementsKind elements_kind) - : elements_kind_(elements_kind) - { } + KeyedLoadDictionaryElementStub() {} Major MajorKey() { return KeyedLoadElement; } - int MinorKey() { return elements_kind_; } + int MinorKey() { return DICTIONARY_ELEMENTS; } void Generate(MacroAssembler* masm); private: - ElementsKind elements_kind_; + DISALLOW_COPY_AND_ASSIGN(KeyedLoadDictionaryElementStub); +}; + + +class KeyedLoadFastElementStub : public HydrogenCodeStub { + public: + KeyedLoadFastElementStub(bool is_js_array, ElementsKind elements_kind) { + bit_field_ = ElementsKindBits::encode(elements_kind) | + IsJSArrayBits::encode(is_js_array); + } + + bool is_js_array() const { + return IsJSArrayBits::decode(bit_field_); + } - DISALLOW_COPY_AND_ASSIGN(KeyedLoadElementStub); + ElementsKind elements_kind() const { + return ElementsKindBits::decode(bit_field_); + } + + virtual Handle<Code> GenerateCode(); + + virtual void InitializeInterfaceDescriptor( + Isolate* isolate, + CodeStubInterfaceDescriptor* descriptor); + + private: + class IsJSArrayBits: public BitField<bool, 8, 1> {}; + class ElementsKindBits: public BitField<ElementsKind, 0, 8> {}; + uint32_t bit_field_; + + Major MajorKey() { return KeyedLoadElement; } + int MinorKey() { return bit_field_; } + + DISALLOW_COPY_AND_ASSIGN(KeyedLoadFastElementStub); }; -class KeyedStoreElementStub : public CodeStub { +class TransitionElementsKindStub : public HydrogenCodeStub { + public: + TransitionElementsKindStub(ElementsKind from_kind, + ElementsKind to_kind) { + bit_field_ = FromKindBits::encode(from_kind) | + ToKindBits::encode(to_kind); + } + + ElementsKind from_kind() const { + return FromKindBits::decode(bit_field_); + } + + ElementsKind to_kind() const { + return ToKindBits::decode(bit_field_); + } + + virtual Handle<Code> GenerateCode(); + + virtual void InitializeInterfaceDescriptor( + Isolate* isolate, + CodeStubInterfaceDescriptor* descriptor); + + private: + class FromKindBits: public BitField<ElementsKind, 8, 8> {}; + class ToKindBits: public BitField<ElementsKind, 0, 8> {}; + uint32_t bit_field_; + + Major MajorKey() { return TransitionElementsKind; } + int MinorKey() { return bit_field_; } + + DISALLOW_COPY_AND_ASSIGN(TransitionElementsKindStub); +}; + + +class ArrayNoArgumentConstructorStub : public HydrogenCodeStub { + public: + ArrayNoArgumentConstructorStub() { + } + + Major MajorKey() { return ArrayNoArgumentConstructor; } + int MinorKey() { return 0; } + + virtual Handle<Code> GenerateCode(); + + virtual void InitializeInterfaceDescriptor( + Isolate* isolate, + CodeStubInterfaceDescriptor* descriptor); + + private: + DISALLOW_COPY_AND_ASSIGN(ArrayNoArgumentConstructorStub); +}; + + +class ArraySingleArgumentConstructorStub : public HydrogenCodeStub { + public: + ArraySingleArgumentConstructorStub() { + } + + Major MajorKey() { return ArraySingleArgumentConstructor; } + int MinorKey() { return 0; } + + virtual Handle<Code> GenerateCode(); + + virtual void InitializeInterfaceDescriptor( + Isolate* isolate, + CodeStubInterfaceDescriptor* descriptor); + + private: + DISALLOW_COPY_AND_ASSIGN(ArraySingleArgumentConstructorStub); +}; + + +class ArrayNArgumentsConstructorStub : public HydrogenCodeStub { + public: + ArrayNArgumentsConstructorStub() { + } + + Major MajorKey() { return ArrayNArgumentsConstructor; } + int MinorKey() { return 0; } + + virtual Handle<Code> GenerateCode(); + + virtual void InitializeInterfaceDescriptor( + Isolate* isolate, + CodeStubInterfaceDescriptor* descriptor); + + private: + DISALLOW_COPY_AND_ASSIGN(ArrayNArgumentsConstructorStub); +}; + + +class KeyedStoreElementStub : public PlatformCodeStub { public: KeyedStoreElementStub(bool is_js_array, ElementsKind elements_kind, - KeyedAccessGrowMode grow_mode) + KeyedAccessStoreMode store_mode) : is_js_array_(is_js_array), elements_kind_(elements_kind), - grow_mode_(grow_mode), + store_mode_(store_mode), fp_registers_(CanUseFPRegisters()) { } Major MajorKey() { return KeyedStoreElement; } int MinorKey() { return ElementsKindBits::encode(elements_kind_) | IsJSArrayBits::encode(is_js_array_) | - GrowModeBits::encode(grow_mode_) | + StoreModeBits::encode(store_mode_) | FPRegisters::encode(fp_registers_); } void Generate(MacroAssembler* masm); private: - class ElementsKindBits: public BitField<ElementsKind, 0, 8> {}; - class GrowModeBits: public BitField<KeyedAccessGrowMode, 8, 1> {}; - class IsJSArrayBits: public BitField<bool, 9, 1> {}; - class FPRegisters: public BitField<bool, 10, 1> {}; + class ElementsKindBits: public BitField<ElementsKind, 0, 8> {}; + class StoreModeBits: public BitField<KeyedAccessStoreMode, 8, 4> {}; + class IsJSArrayBits: public BitField<bool, 12, 1> {}; + class FPRegisters: public BitField<bool, 13, 1> {}; bool is_js_array_; ElementsKind elements_kind_; - KeyedAccessGrowMode grow_mode_; + KeyedAccessStoreMode store_mode_; bool fp_registers_; DISALLOW_COPY_AND_ASSIGN(KeyedStoreElementStub); }; -class ToBooleanStub: public CodeStub { +class ToBooleanStub: public PlatformCodeStub { public: enum Type { UNDEFINED, @@ -1053,6 +1462,9 @@ class ToBooleanStub: public CodeStub { bool IsEmpty() const { return set_.IsEmpty(); } bool Contains(Type type) const { return set_.Contains(type); } + bool ContainsAnyOf(Types types) const { + return set_.ContainsAnyOf(types.set_); + } void Add(Type type) { set_.Add(type); } byte ToByte() const { return set_.ToIntegral(); } void Print(StringStream* stream) const; @@ -1096,25 +1508,25 @@ class ToBooleanStub: public CodeStub { }; -class ElementsTransitionAndStoreStub : public CodeStub { +class ElementsTransitionAndStoreStub : public PlatformCodeStub { public: ElementsTransitionAndStoreStub(ElementsKind from, ElementsKind to, bool is_jsarray, StrictModeFlag strict_mode, - KeyedAccessGrowMode grow_mode) + KeyedAccessStoreMode store_mode) : from_(from), to_(to), is_jsarray_(is_jsarray), strict_mode_(strict_mode), - grow_mode_(grow_mode) {} + store_mode_(store_mode) {} private: - class FromBits: public BitField<ElementsKind, 0, 8> {}; - class ToBits: public BitField<ElementsKind, 8, 8> {}; - class IsJSArrayBits: public BitField<bool, 16, 1> {}; - class StrictModeBits: public BitField<StrictModeFlag, 17, 1> {}; - class GrowModeBits: public BitField<KeyedAccessGrowMode, 18, 1> {}; + class FromBits: public BitField<ElementsKind, 0, 8> {}; + class ToBits: public BitField<ElementsKind, 8, 8> {}; + class IsJSArrayBits: public BitField<bool, 16, 1> {}; + class StrictModeBits: public BitField<StrictModeFlag, 17, 1> {}; + class StoreModeBits: public BitField<KeyedAccessStoreMode, 18, 4> {}; Major MajorKey() { return ElementsTransitionAndStore; } int MinorKey() { @@ -1122,7 +1534,7 @@ class ElementsTransitionAndStoreStub : public CodeStub { ToBits::encode(to_) | IsJSArrayBits::encode(is_jsarray_) | StrictModeBits::encode(strict_mode_) | - GrowModeBits::encode(grow_mode_); + StoreModeBits::encode(store_mode_); } void Generate(MacroAssembler* masm); @@ -1131,13 +1543,13 @@ class ElementsTransitionAndStoreStub : public CodeStub { ElementsKind to_; bool is_jsarray_; StrictModeFlag strict_mode_; - KeyedAccessGrowMode grow_mode_; + KeyedAccessStoreMode store_mode_; DISALLOW_COPY_AND_ASSIGN(ElementsTransitionAndStoreStub); }; -class StoreArrayLiteralElementStub : public CodeStub { +class StoreArrayLiteralElementStub : public PlatformCodeStub { public: StoreArrayLiteralElementStub() : fp_registers_(CanUseFPRegisters()) { } @@ -1156,7 +1568,30 @@ class StoreArrayLiteralElementStub : public CodeStub { }; -class ProfileEntryHookStub : public CodeStub { +class StubFailureTrampolineStub : public PlatformCodeStub { + public: + static const int kMaxExtraExpressionStackCount = 1; + + explicit StubFailureTrampolineStub(int extra_expression_stack_count) + : extra_expression_stack_count_(extra_expression_stack_count) {} + + virtual bool IsPregenerated() { return true; } + + static void GenerateAheadOfTime(Isolate* isolate); + + private: + Major MajorKey() { return StubFailureTrampoline; } + int MinorKey() { return extra_expression_stack_count_; } + + void Generate(MacroAssembler* masm); + + int extra_expression_stack_count_; + + DISALLOW_COPY_AND_ASSIGN(StubFailureTrampolineStub); +}; + + +class ProfileEntryHookStub : public PlatformCodeStub { public: explicit ProfileEntryHookStub() {} @@ -1171,6 +1606,8 @@ class ProfileEntryHookStub : public CodeStub { // non-NULL hook. static bool SetFunctionEntryHook(FunctionEntryHook entry_hook); + static bool HasEntryHook() { return entry_hook_ != NULL; } + private: static void EntryHookTrampoline(intptr_t function, intptr_t stack_pointer); diff --git a/deps/v8/src/codegen.cc b/deps/v8/src/codegen.cc index 0163580e90..90ab2b5a20 100644 --- a/deps/v8/src/codegen.cc +++ b/deps/v8/src/codegen.cc @@ -76,16 +76,22 @@ void CodeGenerator::MakeCodePrologue(CompilationInfo* info) { if (FLAG_trace_codegen || print_source || print_ast) { PrintF("*** Generate code for %s function: ", ftype); - info->function()->name()->ShortPrint(); + if (info->IsStub()) { + const char* name = + CodeStub::MajorName(info->code_stub()->MajorKey(), true); + PrintF("%s", name == NULL ? "<unknown>" : name); + } else { + info->function()->name()->ShortPrint(); + } PrintF(" ***\n"); } - if (print_source) { + if (!info->IsStub() && print_source) { PrintF("--- Source from AST ---\n%s\n", PrettyPrinter().PrintProgram(info->function())); } - if (print_ast) { + if (!info->IsStub() && print_ast) { PrintF("--- AST ---\n%s\n", AstPrinter().PrintProgram(info->function())); } @@ -107,6 +113,7 @@ Handle<Code> CodeGenerator::MakeCodeEpilogue(MacroAssembler* masm, if (!code.is_null()) { isolate->counters()->total_compiled_code_size()->Increment( code->instruction_size()); + code->set_prologue_offset(info->prologue_offset()); } return code; } @@ -116,23 +123,29 @@ void CodeGenerator::PrintCode(Handle<Code> code, CompilationInfo* info) { #ifdef ENABLE_DISASSEMBLER bool print_code = Isolate::Current()->bootstrapper()->IsActive() ? FLAG_print_builtin_code - : (FLAG_print_code || (info->IsOptimizing() && FLAG_print_opt_code)); + : (FLAG_print_code || + (info->IsStub() && FLAG_print_code_stubs) || + (info->IsOptimizing() && FLAG_print_opt_code)); if (print_code) { // Print the source code if available. FunctionLiteral* function = info->function(); - Handle<Script> script = info->script(); - if (!script->IsUndefined() && !script->source()->IsUndefined()) { - PrintF("--- Raw source ---\n"); - StringInputBuffer stream(String::cast(script->source())); - stream.Seek(function->start_position()); - // fun->end_position() points to the last character in the stream. We - // need to compensate by adding one to calculate the length. - int source_len = - function->end_position() - function->start_position() + 1; - for (int i = 0; i < source_len; i++) { - if (stream.has_more()) PrintF("%c", stream.GetNext()); + if (code->kind() != Code::COMPILED_STUB) { + Handle<Script> script = info->script(); + if (!script->IsUndefined() && !script->source()->IsUndefined()) { + PrintF("--- Raw source ---\n"); + ConsStringIteratorOp op; + StringCharacterStream stream(String::cast(script->source()), + &op, + function->start_position()); + // fun->end_position() points to the last character in the stream. We + // need to compensate by adding one to calculate the length. + int source_len = + function->end_position() - function->start_position() + 1; + for (int i = 0; i < source_len; i++) { + if (stream.HasMore()) PrintF("%c", stream.GetNext()); + } + PrintF("\n\n"); } - PrintF("\n\n"); } if (info->IsOptimizing()) { if (FLAG_print_unopt_code) { @@ -144,7 +157,12 @@ void CodeGenerator::PrintCode(Handle<Code> code, CompilationInfo* info) { } else { PrintF("--- Code ---\n"); } - code->Disassemble(*function->debug_name()->ToCString()); + if (info->IsStub()) { + CodeStub::Major major_key = info->code_stub()->MajorKey(); + code->Disassemble(CodeStub::MajorName(major_key, false)); + } else { + code->Disassemble(*function->debug_name()->ToCString()); + } } #endif // ENABLE_DISASSEMBLER } @@ -153,12 +171,13 @@ void CodeGenerator::PrintCode(Handle<Code> code, CompilationInfo* info) { bool CodeGenerator::ShouldGenerateLog(Expression* type) { ASSERT(type != NULL); Isolate* isolate = Isolate::Current(); - if (!isolate->logger()->is_logging() && !CpuProfiler::is_profiling(isolate)) { + if (!isolate->logger()->is_logging() && + !isolate->cpu_profiler()->is_profiling()) { return false; } Handle<String> name = Handle<String>::cast(type->AsLiteral()->handle()); if (FLAG_log_regexp) { - if (name->IsEqualTo(CStrVector("regexp"))) + if (name->IsOneByteEqualTo(STATIC_ASCII_VECTOR("regexp"))) return true; } return false; diff --git a/deps/v8/src/codegen.h b/deps/v8/src/codegen.h index 08a777f2ad..09907c4a20 100644 --- a/deps/v8/src/codegen.h +++ b/deps/v8/src/codegen.h @@ -90,19 +90,41 @@ namespace internal { typedef double (*UnaryMathFunction)(double x); UnaryMathFunction CreateTranscendentalFunction(TranscendentalCache::Type type); +UnaryMathFunction CreateExpFunction(); UnaryMathFunction CreateSqrtFunction(); class ElementsTransitionGenerator : public AllStatic { public: - static void GenerateMapChangeElementsTransition(MacroAssembler* masm); - static void GenerateSmiToDouble(MacroAssembler* masm, Label* fail); - static void GenerateDoubleToObject(MacroAssembler* masm, Label* fail); + // If |mode| is set to DONT_TRACK_ALLOCATION_SITE, + // |allocation_site_info_found| may be NULL. + static void GenerateMapChangeElementsTransition(MacroAssembler* masm, + AllocationSiteMode mode, + Label* allocation_site_info_found); + static void GenerateSmiToDouble(MacroAssembler* masm, + AllocationSiteMode mode, + Label* fail); + static void GenerateDoubleToObject(MacroAssembler* masm, + AllocationSiteMode mode, + Label* fail); private: DISALLOW_COPY_AND_ASSIGN(ElementsTransitionGenerator); }; + +class SeqStringSetCharGenerator : public AllStatic { + public: + static void Generate(MacroAssembler* masm, + String::Encoding encoding, + Register string, + Register index, + Register value); + private: + DISALLOW_COPY_AND_ASSIGN(SeqStringSetCharGenerator); +}; + + } } // namespace v8::internal #endif // V8_CODEGEN_H_ diff --git a/deps/v8/src/collection.js b/deps/v8/src/collection.js index d36fe18fa0..c872efbb30 100644 --- a/deps/v8/src/collection.js +++ b/deps/v8/src/collection.js @@ -88,6 +88,25 @@ function SetDelete(key) { } +function SetGetSize() { + if (!IS_SET(this)) { + throw MakeTypeError('incompatible_method_receiver', + ['Set.prototype.size', this]); + } + return %SetGetSize(this); +} + + +function SetClear() { + if (!IS_SET(this)) { + throw MakeTypeError('incompatible_method_receiver', + ['Set.prototype.clear', this]); + } + // Replace the internal table with a new empty table. + %SetInitialize(this); +} + + function MapConstructor() { if (%_IsConstructCall()) { %MapInitialize(this); @@ -145,6 +164,25 @@ function MapDelete(key) { } +function MapGetSize() { + if (!IS_MAP(this)) { + throw MakeTypeError('incompatible_method_receiver', + ['Map.prototype.size', this]); + } + return %MapGetSize(this); +} + + +function MapClear() { + if (!IS_MAP(this)) { + throw MakeTypeError('incompatible_method_receiver', + ['Map.prototype.clear', this]); + } + // Replace the internal table with a new empty table. + %MapInitialize(this); +} + + function WeakMapConstructor() { if (%_IsConstructCall()) { %WeakMapInitialize(this); @@ -159,7 +197,7 @@ function WeakMapGet(key) { throw MakeTypeError('incompatible_method_receiver', ['WeakMap.prototype.get', this]); } - if (!IS_SPEC_OBJECT(key)) { + if (!(IS_SPEC_OBJECT(key) || IS_SYMBOL(key))) { throw %MakeTypeError('invalid_weakmap_key', [this, key]); } return %WeakMapGet(this, key); @@ -171,7 +209,7 @@ function WeakMapSet(key, value) { throw MakeTypeError('incompatible_method_receiver', ['WeakMap.prototype.set', this]); } - if (!IS_SPEC_OBJECT(key)) { + if (!(IS_SPEC_OBJECT(key) || IS_SYMBOL(key))) { throw %MakeTypeError('invalid_weakmap_key', [this, key]); } return %WeakMapSet(this, key, value); @@ -183,7 +221,7 @@ function WeakMapHas(key) { throw MakeTypeError('incompatible_method_receiver', ['WeakMap.prototype.has', this]); } - if (!IS_SPEC_OBJECT(key)) { + if (!(IS_SPEC_OBJECT(key) || IS_SYMBOL(key))) { throw %MakeTypeError('invalid_weakmap_key', [this, key]); } return %WeakMapHas(this, key); @@ -195,7 +233,7 @@ function WeakMapDelete(key) { throw MakeTypeError('incompatible_method_receiver', ['WeakMap.prototype.delete', this]); } - if (!IS_SPEC_OBJECT(key)) { + if (!(IS_SPEC_OBJECT(key) || IS_SYMBOL(key))) { throw %MakeTypeError('invalid_weakmap_key', [this, key]); } return %WeakMapDelete(this, key); @@ -215,18 +253,22 @@ function WeakMapDelete(key) { %SetProperty($Map.prototype, "constructor", $Map, DONT_ENUM); // Set up the non-enumerable functions on the Set prototype object. + InstallGetter($Set.prototype, "size", SetGetSize); InstallFunctions($Set.prototype, DONT_ENUM, $Array( "add", SetAdd, "has", SetHas, - "delete", SetDelete + "delete", SetDelete, + "clear", SetClear )); // Set up the non-enumerable functions on the Map prototype object. + InstallGetter($Map.prototype, "size", MapGetSize); InstallFunctions($Map.prototype, DONT_ENUM, $Array( "get", MapGet, "set", MapSet, "has", MapHas, - "delete", MapDelete + "delete", MapDelete, + "clear", MapClear )); // Set up the WeakMap constructor function. diff --git a/deps/v8/src/compilation-cache.cc b/deps/v8/src/compilation-cache.cc index c0645760b3..7ace2f7db0 100644 --- a/deps/v8/src/compilation-cache.cc +++ b/deps/v8/src/compilation-cache.cc @@ -67,7 +67,7 @@ CompilationCache::~CompilationCache() {} static Handle<CompilationCacheTable> AllocateTable(Isolate* isolate, int size) { CALL_HEAP_FUNCTION(isolate, - CompilationCacheTable::Allocate(size), + CompilationCacheTable::Allocate(isolate->heap(), size), CompilationCacheTable); } @@ -98,7 +98,7 @@ void CompilationSubCache::Age() { void CompilationSubCache::IterateFunctions(ObjectVisitor* v) { - Object* undefined = isolate()->heap()->raw_unchecked_undefined_value(); + Object* undefined = isolate()->heap()->undefined_value(); for (int i = 0; i < generations_; i++) { if (tables_[i] != undefined) { reinterpret_cast<CompilationCacheTable*>(tables_[i])->IterateElements(v); diff --git a/deps/v8/src/compiler.cc b/deps/v8/src/compiler.cc index 86374371e9..2c4dae5d4e 100644 --- a/deps/v8/src/compiler.cc +++ b/deps/v8/src/compiler.cc @@ -33,6 +33,7 @@ #include "codegen.h" #include "compilation-cache.h" #include "debug.h" +#include "deoptimizer.h" #include "full-codegen.h" #include "gdb-jit.h" #include "hydrogen.h" @@ -52,57 +53,69 @@ namespace internal { CompilationInfo::CompilationInfo(Handle<Script> script, Zone* zone) - : isolate_(script->GetIsolate()), - flags_(LanguageModeField::encode(CLASSIC_MODE)), - function_(NULL), - scope_(NULL), - global_scope_(NULL), + : flags_(LanguageModeField::encode(CLASSIC_MODE)), script_(script), - extension_(NULL), - pre_parse_data_(NULL), - osr_ast_id_(BailoutId::None()), - zone_(zone), - deferred_handles_(NULL) { - Initialize(BASE); + osr_ast_id_(BailoutId::None()) { + Initialize(script->GetIsolate(), BASE, zone); } CompilationInfo::CompilationInfo(Handle<SharedFunctionInfo> shared_info, Zone* zone) - : isolate_(shared_info->GetIsolate()), - flags_(LanguageModeField::encode(CLASSIC_MODE) | - IsLazy::encode(true)), - function_(NULL), - scope_(NULL), - global_scope_(NULL), + : flags_(LanguageModeField::encode(CLASSIC_MODE) | IsLazy::encode(true)), shared_info_(shared_info), script_(Handle<Script>(Script::cast(shared_info->script()))), - extension_(NULL), - pre_parse_data_(NULL), - osr_ast_id_(BailoutId::None()), - zone_(zone), - deferred_handles_(NULL) { - Initialize(BASE); + osr_ast_id_(BailoutId::None()) { + Initialize(script_->GetIsolate(), BASE, zone); } CompilationInfo::CompilationInfo(Handle<JSFunction> closure, Zone* zone) - : isolate_(closure->GetIsolate()), - flags_(LanguageModeField::encode(CLASSIC_MODE) | - IsLazy::encode(true)), - function_(NULL), - scope_(NULL), - global_scope_(NULL), + : flags_(LanguageModeField::encode(CLASSIC_MODE) | IsLazy::encode(true)), closure_(closure), shared_info_(Handle<SharedFunctionInfo>(closure->shared())), script_(Handle<Script>(Script::cast(shared_info_->script()))), - extension_(NULL), - pre_parse_data_(NULL), context_(closure->context()), - osr_ast_id_(BailoutId::None()), - zone_(zone), - deferred_handles_(NULL) { - Initialize(BASE); + osr_ast_id_(BailoutId::None()) { + Initialize(script_->GetIsolate(), BASE, zone); +} + + +CompilationInfo::CompilationInfo(HydrogenCodeStub* stub, + Isolate* isolate, Zone* zone) + : flags_(LanguageModeField::encode(CLASSIC_MODE) | + IsLazy::encode(true)), + osr_ast_id_(BailoutId::None()) { + Initialize(isolate, STUB, zone); + code_stub_ = stub; +} + + +void CompilationInfo::Initialize(Isolate* isolate, Mode mode, Zone* zone) { + isolate_ = isolate; + function_ = NULL; + scope_ = NULL; + global_scope_ = NULL; + extension_ = NULL; + pre_parse_data_ = NULL; + zone_ = zone; + deferred_handles_ = NULL; + code_stub_ = NULL; + prologue_offset_ = kPrologueOffsetNotSet; + opt_count_ = shared_info().is_null() ? 0 : shared_info()->opt_count(); + if (mode == STUB) { + mode_ = STUB; + return; + } + mode_ = V8::UseCrankshaft() ? mode : NONOPT; + if (script_->type()->value() == Script::TYPE_NATIVE) { + MarkAsNative(); + } + if (!shared_info_.is_null()) { + ASSERT(language_mode() == CLASSIC_MODE); + SetLanguageMode(shared_info_->language_mode()); + } + set_bailout_reason("unknown"); } @@ -111,6 +124,33 @@ CompilationInfo::~CompilationInfo() { } +int CompilationInfo::num_parameters() const { + if (IsStub()) { + return 0; + } else { + return scope()->num_parameters(); + } +} + + +int CompilationInfo::num_heap_slots() const { + if (IsStub()) { + return 0; + } else { + return scope()->num_heap_slots(); + } +} + + +Code::Flags CompilationInfo::flags() const { + if (IsStub()) { + return Code::ComputeFlags(Code::COMPILED_STUB); + } else { + return Code::ComputeFlags(Code::OPTIMIZED_FUNCTION); + } +} + + // Disable optimization for the rest of the compilation pipeline. void CompilationInfo::DisableOptimization() { bool is_optimizable_closure = @@ -194,6 +234,11 @@ void OptimizingCompiler::RecordOptimizationStats() { code_size, compilation_time); } + if (FLAG_hydrogen_stats) { + isolate()->GetHStatistics()->IncrementSubtotals(time_taken_to_create_graph_, + time_taken_to_optimize_, + time_taken_to_codegen_); + } } @@ -233,7 +278,7 @@ OptimizingCompiler::Status OptimizingCompiler::CreateGraph() { // Fall back to using the full code generator if it's not possible // to use the Hydrogen-based optimizing compiler. We already have // generated code for this from the shared function object. - if (AlwaysFullCompiler(info()->isolate())) { + if (AlwaysFullCompiler(isolate())) { info()->SetCode(code); return SetLastStatus(BAILED_OUT); } @@ -242,7 +287,7 @@ OptimizingCompiler::Status OptimizingCompiler::CreateGraph() { // the optimizing compiler. const int kMaxOptCount = FLAG_deopt_every_n_times == 0 ? FLAG_max_opt_count : 1000; - if (info()->shared_info()->opt_count() > kMaxOptCount) { + if (info()->opt_count() > kMaxOptCount) { info()->set_bailout_reason("optimized too many times"); return AbortOptimization(); } @@ -273,8 +318,8 @@ OptimizingCompiler::Status OptimizingCompiler::CreateGraph() { if (*FLAG_hydrogen_filter != '\0') { Vector<const char> filter = CStrVector(FLAG_hydrogen_filter); if ((filter[0] == '-' - && name->IsEqualTo(filter.SubVector(1, filter.length()))) - || (filter[0] != '-' && !name->IsEqualTo(filter))) { + && name->IsUtf8EqualTo(filter.SubVector(1, filter.length()))) + || (filter[0] != '-' && !name->IsUtf8EqualTo(filter))) { info()->SetCode(code); return SetLastStatus(BAILED_OUT); } @@ -284,10 +329,9 @@ OptimizingCompiler::Status OptimizingCompiler::CreateGraph() { // doesn't have deoptimization support. Alternatively, we may decide to // run the full code generator to get a baseline for the compile-time // performance of the hydrogen-based compiler. - Timer t(this, &time_taken_to_create_graph_); bool should_recompile = !info()->shared_info()->has_deoptimization_support(); if (should_recompile || FLAG_hydrogen_stats) { - HPhase phase(HPhase::kFullCodeGen); + HPhase phase(HPhase::kFullCodeGen, isolate()); CompilationInfoWithZone unoptimized(info()->shared_info()); // Note that we use the same AST that we will use for generating the // optimized code. @@ -317,17 +361,18 @@ OptimizingCompiler::Status OptimizingCompiler::CreateGraph() { if (FLAG_trace_hydrogen) { PrintF("-----------------------------------------------------------\n"); PrintF("Compiling method %s using hydrogen\n", *name->ToCString()); - HTracer::Instance()->TraceCompilation(info()->function()); + isolate()->GetHTracer()->TraceCompilation(info()); } Handle<Context> native_context( info()->closure()->context()->native_context()); oracle_ = new(info()->zone()) TypeFeedbackOracle( - code, native_context, info()->isolate(), info()->zone()); - graph_builder_ = new(info()->zone()) HGraphBuilder(info(), oracle_); - HPhase phase(HPhase::kTotal); + code, native_context, isolate(), info()->zone()); + graph_builder_ = new(info()->zone()) HOptimizedGraphBuilder(info(), oracle_); + + Timer t(this, &time_taken_to_create_graph_); graph_ = graph_builder_->CreateGraph(); - if (info()->isolate()->has_pending_exception()) { + if (isolate()->has_pending_exception()) { info()->SetCode(Handle<Code>::null()); return SetLastStatus(FAILED); } @@ -350,7 +395,8 @@ OptimizingCompiler::Status OptimizingCompiler::CreateGraph() { OptimizingCompiler::Status OptimizingCompiler::OptimizeGraph() { AssertNoAllocation no_gc; - NoHandleAllocation no_handles; + NoHandleAllocation no_handles(isolate()); + HandleDereferenceGuard no_deref(isolate(), HandleDereferenceGuard::DISALLOW); ASSERT(last_status() == SUCCEEDED); Timer t(this, &time_taken_to_optimize_); @@ -371,15 +417,17 @@ OptimizingCompiler::Status OptimizingCompiler::OptimizeGraph() { OptimizingCompiler::Status OptimizingCompiler::GenerateAndInstallCode() { ASSERT(last_status() == SUCCEEDED); - Timer timer(this, &time_taken_to_codegen_); - ASSERT(chunk_ != NULL); - ASSERT(graph_ != NULL); - Handle<Code> optimized_code = chunk_->Codegen(); - if (optimized_code.is_null()) { - info()->set_bailout_reason("code generation failed"); - return AbortOptimization(); + { // Scope for timer. + Timer timer(this, &time_taken_to_codegen_); + ASSERT(chunk_ != NULL); + ASSERT(graph_ != NULL); + Handle<Code> optimized_code = chunk_->Codegen(Code::OPTIMIZED_FUNCTION); + if (optimized_code.is_null()) { + info()->set_bailout_reason("code generation failed"); + return AbortOptimization(); + } + info()->SetCode(optimized_code); } - info()->SetCode(optimized_code); RecordOptimizationStats(); return SetLastStatus(SUCCEEDED); } @@ -390,6 +438,8 @@ static bool GenerateCode(CompilationInfo* info) { !info->IsCompilingForDebugging() && info->IsOptimizing(); if (is_optimizing) { + Logger::TimerEventScope timer( + info->isolate(), Logger::TimerEventScope::v8_recompile_synchronous); return MakeCrankshaftCode(info); } else { if (info->IsOptimizing()) { @@ -397,6 +447,8 @@ static bool GenerateCode(CompilationInfo* info) { // BASE or NONOPT. info->DisableOptimization(); } + Logger::TimerEventScope timer( + info->isolate(), Logger::TimerEventScope::v8_compile_full_code); return FullCodeGenerator::MakeCode(info); } } @@ -425,6 +477,13 @@ bool Compiler::MakeCodeForLiveEdit(CompilationInfo* info) { #endif +static bool DebuggerWantsEagerCompilation(CompilationInfo* info, + bool allow_lazy_without_ctx = false) { + return LiveEditFunctionTracker::IsActive(info->isolate()) || + (info->isolate()->DebuggerHasBreakPoints() && !allow_lazy_without_ctx); +} + + static Handle<SharedFunctionInfo> MakeFunctionInfo(CompilationInfo* info) { Isolate* isolate = info->isolate(); ZoneScope zone_scope(info->zone(), DELETE_ON_EXIT); @@ -432,7 +491,9 @@ static Handle<SharedFunctionInfo> MakeFunctionInfo(CompilationInfo* info) { ASSERT(!isolate->native_context().is_null()); Handle<Script> script = info->script(); - script->set_context_data((*isolate->native_context())->data()); + // TODO(svenpanne) Obscure place for this, perhaps move to OnBeforeCompile? + FixedArray* array = isolate->native_context()->embedder_data(); + script->set_context_data(array->get(0)); #ifdef ENABLE_DEBUGGER_SUPPORT if (info->is_eval()) { @@ -460,8 +521,9 @@ static Handle<SharedFunctionInfo> MakeFunctionInfo(CompilationInfo* info) { // Only allow non-global compiles for eval. ASSERT(info->is_eval() || info->is_global()); ParsingFlags flags = kNoParsingFlags; - if (info->pre_parse_data() != NULL || - String::cast(script->source())->length() > FLAG_min_preparse_length) { + if ((info->pre_parse_data() != NULL || + String::cast(script->source())->length() > FLAG_min_preparse_length) && + !DebuggerWantsEagerCompilation(info)) { flags = kAllowLazy; } if (!ParserApi::Parse(info, flags)) { @@ -620,6 +682,7 @@ Handle<SharedFunctionInfo> Compiler::CompileEval(Handle<String> source, Handle<Context> context, bool is_global, LanguageMode language_mode, + ParseRestriction restriction, int scope_position) { Isolate* isolate = source->GetIsolate(); int source_length = source->length(); @@ -646,6 +709,7 @@ Handle<SharedFunctionInfo> Compiler::CompileEval(Handle<String> source, info.MarkAsEval(); if (is_global) info.MarkAsGlobal(); info.SetLanguageMode(language_mode); + info.SetParseRestriction(restriction); info.SetContext(context); result = MakeFunctionInfo(&info); if (!result.is_null()) { @@ -688,7 +752,7 @@ static bool InstallFullCode(CompilationInfo* info) { Handle<ScopeInfo> scope_info = ScopeInfo::Create(info->scope(), info->zone()); shared->set_scope_info(*scope_info); - shared->set_code(*code); + shared->ReplaceCode(*code); if (!function.is_null()) { function->ReplaceCode(*code); ASSERT(!function->IsOptimized()); @@ -821,7 +885,7 @@ bool Compiler::CompileLazy(CompilationInfo* info) { if (info->IsOptimizing()) { Handle<Code> code = info->code(); - ASSERT(shared->scope_info() != ScopeInfo::Empty()); + ASSERT(shared->scope_info() != ScopeInfo::Empty(isolate)); info->closure()->ReplaceCode(*code); InsertCodeIntoOptimizedCodeMap(info); return true; @@ -837,10 +901,14 @@ bool Compiler::CompileLazy(CompilationInfo* info) { void Compiler::RecompileParallel(Handle<JSFunction> closure) { - if (closure->IsInRecompileQueue()) return; ASSERT(closure->IsMarkedForParallelRecompilation()); Isolate* isolate = closure->GetIsolate(); + // Here we prepare compile data for the parallel recompilation thread, but + // this still happens synchronously and interrupts execution. + Logger::TimerEventScope timer( + isolate, Logger::TimerEventScope::v8_recompile_synchronous); + if (!isolate->optimizing_compiler_thread()->IsQueueAvailable()) { if (FLAG_trace_parallel_recompilation) { PrintF(" ** Compilation queue, will retry opting on next run.\n"); @@ -849,7 +917,7 @@ void Compiler::RecompileParallel(Handle<JSFunction> closure) { } SmartPointer<CompilationInfo> info(new CompilationInfoWithZone(closure)); - VMState state(isolate, PARALLEL_COMPILER_PROLOGUE); + VMState state(isolate, PARALLEL_COMPILER); PostponeInterruptsScope postpone(isolate); Handle<SharedFunctionInfo> shared = info->shared_info(); @@ -860,7 +928,9 @@ void Compiler::RecompileParallel(Handle<JSFunction> closure) { { CompilationHandleScope handle_scope(*info); - if (InstallCodeFromOptimizedCodeMap(*info)) return; + if (InstallCodeFromOptimizedCodeMap(*info)) { + return; + } if (ParserApi::Parse(*info, kNoParsingFlags)) { LanguageMode language_mode = info->function()->language_mode(); @@ -873,11 +943,12 @@ void Compiler::RecompileParallel(Handle<JSFunction> closure) { new(info->zone()) OptimizingCompiler(*info); OptimizingCompiler::Status status = compiler->CreateGraph(); if (status == OptimizingCompiler::SUCCEEDED) { - isolate->optimizing_compiler_thread()->QueueForOptimization(compiler); - shared->code()->set_profiler_ticks(0); - closure->ReplaceCode(isolate->builtins()->builtin( - Builtins::kInRecompileQueue)); info.Detach(); + shared->code()->set_profiler_ticks(0); + // Do a scavenge to put off the next scavenge as far as possible. + // This may ease the issue that GVN blocks the next scavenge. + isolate->heap()->CollectGarbage(NEW_SPACE, "parallel recompile"); + isolate->optimizing_compiler_thread()->QueueForOptimization(compiler); } else if (status == OptimizingCompiler::BAILED_OUT) { isolate->clear_pending_exception(); InstallFullCode(*info); @@ -886,14 +957,44 @@ void Compiler::RecompileParallel(Handle<JSFunction> closure) { } } - if (isolate->has_pending_exception()) { - isolate->clear_pending_exception(); + if (shared->code()->stack_check_patched_for_osr()) { + // At this point we either put the function on recompilation queue or + // aborted optimization. In either case we want to continue executing + // the unoptimized code without running into OSR. If the unoptimized + // code has been patched for OSR, unpatch it. + InterruptStub interrupt_stub; + Handle<Code> check_code = interrupt_stub.GetCode(isolate); + Handle<Code> replacement_code = + isolate->builtins()->OnStackReplacement(); + Deoptimizer::RevertStackCheckCode(shared->code(), + *check_code, + *replacement_code); } + + if (isolate->has_pending_exception()) isolate->clear_pending_exception(); } void Compiler::InstallOptimizedCode(OptimizingCompiler* optimizing_compiler) { SmartPointer<CompilationInfo> info(optimizing_compiler->info()); + // The function may have already been optimized by OSR. Simply continue. + // Except when OSR already disabled optimization for some reason. + if (info->shared_info()->optimization_disabled()) { + info->SetCode(Handle<Code>(info->shared_info()->code())); + InstallFullCode(*info); + if (FLAG_trace_parallel_recompilation) { + PrintF(" ** aborting optimization for "); + info->closure()->PrintName(); + PrintF(" as it has been disabled.\n"); + } + ASSERT(!info->closure()->IsMarkedForInstallingRecompiledCode()); + return; + } + + Isolate* isolate = info->isolate(); + VMState state(isolate, PARALLEL_COMPILER); + Logger::TimerEventScope timer( + isolate, Logger::TimerEventScope::v8_recompile_synchronous); // If crankshaft succeeded, install the optimized code else install // the unoptimized code. OptimizingCompiler::Status status = optimizing_compiler->last_status(); @@ -910,16 +1011,25 @@ void Compiler::InstallOptimizedCode(OptimizingCompiler* optimizing_compiler) { InstallCodeCommon(*info); if (status == OptimizingCompiler::SUCCEEDED) { Handle<Code> code = info->code(); - ASSERT(info->shared_info()->scope_info() != ScopeInfo::Empty()); + ASSERT(info->shared_info()->scope_info() != ScopeInfo::Empty(isolate)); info->closure()->ReplaceCode(*code); if (info->shared_info()->SearchOptimizedCodeMap( info->closure()->context()->native_context()) == -1) { InsertCodeIntoOptimizedCodeMap(*info); } + if (FLAG_trace_parallel_recompilation) { + PrintF(" ** Optimized code for "); + info->closure()->PrintName(); + PrintF(" installed.\n"); + } } else { info->SetCode(Handle<Code>(info->shared_info()->code())); InstallFullCode(*info); } + // Optimized code is finally replacing unoptimized code. Reset the latter's + // profiler ticks to prevent too soon re-opt after a deopt. + info->shared_info()->code()->set_profiler_ticks(0); + ASSERT(!info->closure()->IsMarkedForInstallingRecompiledCode()); } @@ -931,7 +1041,8 @@ Handle<SharedFunctionInfo> Compiler::BuildFunctionInfo(FunctionLiteral* literal, info.SetScope(literal->scope()); info.SetLanguageMode(literal->scope()->language_mode()); - LiveEditFunctionTracker live_edit_tracker(info.isolate(), literal); + Isolate* isolate = info.isolate(); + LiveEditFunctionTracker live_edit_tracker(isolate, literal); // Determine if the function can be lazily compiled. This is necessary to // allow some of our builtin JS files to be lazily compiled. These // builtins cannot be handled lazily by the parser, since we have to know @@ -943,14 +1054,13 @@ Handle<SharedFunctionInfo> Compiler::BuildFunctionInfo(FunctionLiteral* literal, // Debug::FindSharedFunctionInfoInScript. bool allow_lazy_without_ctx = literal->AllowsLazyCompilationWithoutContext(); bool allow_lazy = literal->AllowsLazyCompilation() && - !LiveEditFunctionTracker::IsActive(info.isolate()) && - (!info.isolate()->DebuggerHasBreakPoints() || allow_lazy_without_ctx); + !DebuggerWantsEagerCompilation(&info, allow_lazy_without_ctx); - Handle<ScopeInfo> scope_info(ScopeInfo::Empty()); + Handle<ScopeInfo> scope_info(ScopeInfo::Empty(isolate)); // Generate code if (FLAG_lazy && allow_lazy && !literal->is_parenthesized()) { - Handle<Code> code = info.isolate()->builtins()->LazyCompile(); + Handle<Code> code = isolate->builtins()->LazyCompile(); info.SetCode(code); } else if (GenerateCode(&info)) { ASSERT(!info.code().is_null()); @@ -1024,7 +1134,7 @@ void Compiler::RecordFunctionCompilation(Logger::LogEventsAndTags tag, // script name and line number. Check explicitly whether logging is // enabled as finding the line number is not free. if (info->isolate()->logger()->is_logging_code_events() || - CpuProfiler::is_profiling(info->isolate())) { + info->isolate()->cpu_profiler()->is_profiling()) { Handle<Script> script = info->script(); Handle<Code> code = info->code(); if (*code == info->isolate()->builtins()->builtin(Builtins::kLazyCompile)) diff --git a/deps/v8/src/compiler.h b/deps/v8/src/compiler.h index af9459566d..5e69661b4d 100644 --- a/deps/v8/src/compiler.h +++ b/deps/v8/src/compiler.h @@ -35,7 +35,17 @@ namespace v8 { namespace internal { +static const int kPrologueOffsetNotSet = -1; + class ScriptDataImpl; +class HydrogenCodeStub; + +// ParseRestriction is used to restrict the set of valid statements in a +// unit of compilation. Restriction violations cause a syntax error. +enum ParseRestriction { + NO_PARSE_RESTRICTION, // All expressions are allowed. + ONLY_SINGLE_FUNCTION_LITERAL // Only a single FunctionLiteral expression. +}; // CompilationInfo encapsulates some information known at compile time. It // is constructed based on the resources available at compile-time. @@ -44,16 +54,15 @@ class CompilationInfo { CompilationInfo(Handle<Script> script, Zone* zone); CompilationInfo(Handle<SharedFunctionInfo> shared_info, Zone* zone); CompilationInfo(Handle<JSFunction> closure, Zone* zone); + CompilationInfo(HydrogenCodeStub* stub, Isolate* isolate, Zone* zone); - virtual ~CompilationInfo(); + ~CompilationInfo(); Isolate* isolate() { ASSERT(Isolate::Current() == isolate_); return isolate_; } - Zone* zone() { - return zone_; - } + Zone* zone() { return zone_; } bool is_lazy() const { return IsLazy::decode(flags_); } bool is_eval() const { return IsEval::decode(flags_); } bool is_global() const { return IsGlobal::decode(flags_); } @@ -70,10 +79,15 @@ class CompilationInfo { Handle<JSFunction> closure() const { return closure_; } Handle<SharedFunctionInfo> shared_info() const { return shared_info_; } Handle<Script> script() const { return script_; } + HydrogenCodeStub* code_stub() {return code_stub_; } v8::Extension* extension() const { return extension_; } ScriptDataImpl* pre_parse_data() const { return pre_parse_data_; } Handle<Context> context() const { return context_; } BailoutId osr_ast_id() const { return osr_ast_id_; } + int opt_count() const { return opt_count_; } + int num_parameters() const; + int num_heap_slots() const; + Code::Flags flags() const; void MarkAsEval() { ASSERT(!is_lazy()); @@ -96,9 +110,47 @@ class CompilationInfo { void MarkAsNative() { flags_ |= IsNative::encode(true); } + bool is_native() const { return IsNative::decode(flags_); } + + bool is_calling() const { + return is_deferred_calling() || is_non_deferred_calling(); + } + + void MarkAsDeferredCalling() { + flags_ |= IsDeferredCalling::encode(true); + } + + bool is_deferred_calling() const { + return IsDeferredCalling::decode(flags_); + } + + void MarkAsNonDeferredCalling() { + flags_ |= IsNonDeferredCalling::encode(true); + } + + bool is_non_deferred_calling() const { + return IsNonDeferredCalling::decode(flags_); + } + + void MarkAsSavesCallerDoubles() { + flags_ |= SavesCallerDoubles::encode(true); + } + + bool saves_caller_doubles() const { + return SavesCallerDoubles::decode(flags_); + } + + void SetParseRestriction(ParseRestriction restriction) { + flags_ = ParseRestricitonField::update(flags_, restriction); + } + + ParseRestriction parse_restriction() const { + return ParseRestricitonField::decode(flags_); + } + void SetFunction(FunctionLiteral* literal) { ASSERT(function_ == NULL); function_ = literal; @@ -149,6 +201,7 @@ class CompilationInfo { // Accessors for the different compilation modes. bool IsOptimizing() const { return mode_ == OPTIMIZE; } bool IsOptimizable() const { return mode_ == BASE; } + bool IsStub() const { return mode_ == STUB; } void SetOptimizing(BailoutId osr_ast_id) { SetMode(OPTIMIZE); osr_ast_id_ = osr_ast_id; @@ -186,6 +239,16 @@ class CompilationInfo { const char* bailout_reason() const { return bailout_reason_; } void set_bailout_reason(const char* reason) { bailout_reason_ = reason; } + int prologue_offset() const { + ASSERT_NE(kPrologueOffsetNotSet, prologue_offset_); + return prologue_offset_; + } + + void set_prologue_offset(int prologue_offset) { + ASSERT_EQ(kPrologueOffsetNotSet, prologue_offset_); + prologue_offset_ = prologue_offset; + } + private: Isolate* isolate_; @@ -197,21 +260,11 @@ class CompilationInfo { enum Mode { BASE, OPTIMIZE, - NONOPT + NONOPT, + STUB }; - void Initialize(Mode mode) { - mode_ = V8::UseCrankshaft() ? mode : NONOPT; - ASSERT(!script_.is_null()); - if (script_->type()->value() == Script::TYPE_NATIVE) { - MarkAsNative(); - } - if (!shared_info_.is_null()) { - ASSERT(language_mode() == CLASSIC_MODE); - SetLanguageMode(shared_info_->language_mode()); - } - set_bailout_reason("unknown"); - } + void Initialize(Isolate* isolate, Mode mode, Zone* zone); void SetMode(Mode mode) { ASSERT(V8::UseCrankshaft()); @@ -237,7 +290,16 @@ class CompilationInfo { // If compiling for debugging produce just full code matching the // initial mode setting. class IsCompilingForDebugging: public BitField<bool, 8, 1> {}; - + // If the compiled code contains calls that require building a frame + class IsCalling: public BitField<bool, 9, 1> {}; + // If the compiled code contains calls that require building a frame + class IsDeferredCalling: public BitField<bool, 10, 1> {}; + // If the compiled code contains calls that require building a frame + class IsNonDeferredCalling: public BitField<bool, 11, 1> {}; + // If the compiled code saves double caller registers that it clobbers. + class SavesCallerDoubles: public BitField<bool, 12, 1> {}; + // If the set of valid statements is restricted. + class ParseRestricitonField: public BitField<ParseRestriction, 13, 1> {}; unsigned flags_; @@ -249,6 +311,8 @@ class CompilationInfo { Scope* scope_; // The global scope provided as a convenience. Scope* global_scope_; + // For compiled stubs, the stub object + HydrogenCodeStub* code_stub_; // The compiled code. Handle<Code> code_; @@ -285,6 +349,12 @@ class CompilationInfo { const char* bailout_reason_; + int prologue_offset_; + + // A copy of shared_info()->opt_count() to avoid handle deref + // during graph optimization. + int opt_count_; + DISALLOW_COPY_AND_ASSIGN(CompilationInfo); }; @@ -305,6 +375,10 @@ class CompilationInfoWithZone: public CompilationInfo { : CompilationInfo(closure, &zone_), zone_(closure->GetIsolate()), zone_scope_(&zone_, DELETE_ON_EXIT) {} + explicit CompilationInfoWithZone(HydrogenCodeStub* stub, Isolate* isolate) + : CompilationInfo(stub, isolate, &zone_), + zone_(isolate), + zone_scope_(&zone_, DELETE_ON_EXIT) {} private: Zone zone_; @@ -330,7 +404,7 @@ class CompilationHandleScope BASE_EMBEDDED { class HGraph; -class HGraphBuilder; +class HOptimizedGraphBuilder; class LChunk; // A helper class that calls the three compilation phases in @@ -362,6 +436,7 @@ class OptimizingCompiler: public ZoneObject { Status last_status() const { return last_status_; } CompilationInfo* info() const { return info_; } + Isolate* isolate() const { return info()->isolate(); } MUST_USE_RESULT Status AbortOptimization() { info_->AbortOptimization(); @@ -372,7 +447,7 @@ class OptimizingCompiler: public ZoneObject { private: CompilationInfo* info_; TypeFeedbackOracle* oracle_; - HGraphBuilder* graph_builder_; + HOptimizedGraphBuilder* graph_builder_; HGraph* graph_; LChunk* chunk_; int64_t time_taken_to_create_graph_; @@ -441,6 +516,7 @@ class Compiler : public AllStatic { Handle<Context> context, bool is_global, LanguageMode language_mode, + ParseRestriction restriction, int scope_position); // Compile from function info (used for lazy compilation). Returns true on diff --git a/deps/v8/src/contexts.cc b/deps/v8/src/contexts.cc index 93c9795404..5edbc5ac2d 100644 --- a/deps/v8/src/contexts.cc +++ b/deps/v8/src/contexts.cc @@ -55,6 +55,15 @@ JSBuiltinsObject* Context::builtins() { } +Context* Context::global_context() { + Context* current = this; + while (!current->IsGlobalContext()) { + current = current->previous(); + } + return current; +} + + Context* Context::native_context() { // Fast case: the global object for this context has been set. In // that case, the global object has a direct pointer to the global @@ -183,6 +192,10 @@ Handle<Object> Context::Lookup(Handle<String> name, ? IMMUTABLE_CHECK_INITIALIZED_HARMONY : IMMUTABLE_IS_INITIALIZED_HARMONY; break; + case MODULE: + *attributes = READ_ONLY; + *binding_flags = IMMUTABLE_IS_INITIALIZED_HARMONY; + break; case DYNAMIC: case DYNAMIC_GLOBAL: case DYNAMIC_LOCAL: @@ -251,8 +264,6 @@ void Context::AddOptimizedFunction(JSFunction* function) { } } - CHECK(function->next_function_link()->IsUndefined()); - // Check that the context belongs to the weak native contexts list. bool found = false; Object* context = GetHeap()->native_contexts_list(); @@ -265,6 +276,16 @@ void Context::AddOptimizedFunction(JSFunction* function) { } CHECK(found); #endif + + // If the function link field is already used then the function was + // enqueued as a code flushing candidate and we remove it now. + if (!function->next_function_link()->IsUndefined()) { + CodeFlusher* flusher = GetHeap()->mark_compact_collector()->code_flusher(); + flusher->EvictCandidate(function); + } + + ASSERT(function->next_function_link()->IsUndefined()); + function->set_next_function_link(get(OPTIMIZED_FUNCTIONS_LIST)); set(OPTIMIZED_FUNCTIONS_LIST, function); } @@ -306,14 +327,11 @@ void Context::ClearOptimizedFunctions() { Handle<Object> Context::ErrorMessageForCodeGenerationFromStrings() { - Handle<Object> result(error_message_for_code_gen_from_strings()); - if (result->IsUndefined()) { - const char* error = - "Code generation from strings disallowed for this context"; - Isolate* isolate = Isolate::Current(); - result = isolate->factory()->NewStringFromAscii(i::CStrVector(error)); - } - return result; + Handle<Object> result(error_message_for_code_gen_from_strings(), + GetIsolate()); + if (!result->IsUndefined()) return result; + return GetIsolate()->factory()->NewStringFromAscii(i::CStrVector( + "Code generation from strings disallowed for this context")); } @@ -322,7 +340,7 @@ bool Context::IsBootstrappingOrValidParentContext( Object* object, Context* child) { // During bootstrapping we allow all objects to pass as // contexts. This is necessary to fix circular dependencies. - if (Isolate::Current()->bootstrapper()->IsActive()) return true; + if (child->GetIsolate()->bootstrapper()->IsActive()) return true; if (!object->IsContext()) return false; Context* context = Context::cast(object); return context->IsNativeContext() || context->IsGlobalContext() || diff --git a/deps/v8/src/contexts.h b/deps/v8/src/contexts.h index 378185f947..cd7ed6adc5 100644 --- a/deps/v8/src/contexts.h +++ b/deps/v8/src/contexts.h @@ -152,16 +152,19 @@ enum BindingFlags { V(CONTEXT_EXTENSION_FUNCTION_INDEX, JSFunction, context_extension_function) \ V(OUT_OF_MEMORY_INDEX, Object, out_of_memory) \ V(MAP_CACHE_INDEX, Object, map_cache) \ - V(CONTEXT_DATA_INDEX, Object, data) \ + V(EMBEDDER_DATA_INDEX, FixedArray, embedder_data) \ V(ALLOW_CODE_GEN_FROM_STRINGS_INDEX, Object, allow_code_gen_from_strings) \ V(ERROR_MESSAGE_FOR_CODE_GEN_FROM_STRINGS_INDEX, Object, \ error_message_for_code_gen_from_strings) \ + V(SYMBOL_DELEGATE_INDEX, JSObject, symbol_delegate) \ V(TO_COMPLETE_PROPERTY_DESCRIPTOR_INDEX, JSFunction, \ to_complete_property_descriptor) \ V(DERIVED_HAS_TRAP_INDEX, JSFunction, derived_has_trap) \ V(DERIVED_GET_TRAP_INDEX, JSFunction, derived_get_trap) \ V(DERIVED_SET_TRAP_INDEX, JSFunction, derived_set_trap) \ - V(PROXY_ENUMERATE, JSFunction, proxy_enumerate) \ + V(PROXY_ENUMERATE_INDEX, JSFunction, proxy_enumerate) \ + V(OBSERVERS_NOTIFY_CHANGE_INDEX, JSFunction, observers_notify_change) \ + V(OBSERVERS_DELIVER_CHANGES_INDEX, JSFunction, observers_deliver_changes) \ V(RANDOM_SEED_INDEX, ByteArray, random_seed) // JSFunctions are pairs (context, function code), sometimes also called @@ -281,14 +284,17 @@ class Context: public FixedArray { OPAQUE_REFERENCE_FUNCTION_INDEX, CONTEXT_EXTENSION_FUNCTION_INDEX, OUT_OF_MEMORY_INDEX, - CONTEXT_DATA_INDEX, + EMBEDDER_DATA_INDEX, ALLOW_CODE_GEN_FROM_STRINGS_INDEX, ERROR_MESSAGE_FOR_CODE_GEN_FROM_STRINGS_INDEX, + SYMBOL_DELEGATE_INDEX, TO_COMPLETE_PROPERTY_DESCRIPTOR_INDEX, DERIVED_HAS_TRAP_INDEX, DERIVED_GET_TRAP_INDEX, DERIVED_SET_TRAP_INDEX, - PROXY_ENUMERATE, + PROXY_ENUMERATE_INDEX, + OBSERVERS_NOTIFY_CHANGE_INDEX, + OBSERVERS_DELIVER_CHANGES_INDEX, RANDOM_SEED_INDEX, // Properties from here are treated as weak references by the full GC. @@ -341,6 +347,9 @@ class Context: public FixedArray { // The builtins object. JSBuiltinsObject* builtins(); + // Get the innermost global context by traversing the context chain. + Context* global_context(); + // Compute the native context by traversing the context chain. Context* native_context(); @@ -450,6 +459,9 @@ class Context: public FixedArray { static bool IsBootstrappingOrValidParentContext(Object* object, Context* kid); static bool IsBootstrappingOrGlobalObject(Object* object); #endif + + STATIC_CHECK(kHeaderSize == Internals::kContextHeaderSize); + STATIC_CHECK(EMBEDDER_DATA_INDEX == Internals::kContextEmbedderDataIndex); }; } } // namespace v8::internal diff --git a/deps/v8/src/conversions-inl.h b/deps/v8/src/conversions-inl.h index e272fe6c08..7edaf22407 100644 --- a/deps/v8/src/conversions-inl.h +++ b/deps/v8/src/conversions-inl.h @@ -212,7 +212,7 @@ double InternalStringToIntDouble(UnicodeCache* unicode_cache, } // Rounding up may cause overflow. - if ((number & ((int64_t)1 << 53)) != 0) { + if ((number & (static_cast<int64_t>(1) << 53)) != 0) { exponent++; number >>= 1; } @@ -481,9 +481,9 @@ double InternalStringToDouble(UnicodeCache* unicode_cache, sign = NEGATIVE; } - static const char kInfinitySymbol[] = "Infinity"; - if (*current == kInfinitySymbol[0]) { - if (!SubStringEquals(¤t, end, kInfinitySymbol)) { + static const char kInfinityString[] = "Infinity"; + if (*current == kInfinityString[0]) { + if (!SubStringEquals(¤t, end, kInfinityString)) { return JunkStringValue(); } diff --git a/deps/v8/src/counters.cc b/deps/v8/src/counters.cc index 811c0aa2e6..7c8265e981 100644 --- a/deps/v8/src/counters.cc +++ b/deps/v8/src/counters.cc @@ -81,17 +81,22 @@ void HistogramTimer::Start() { stop_time_ = 0; start_time_ = OS::Ticks(); } + if (FLAG_log_internal_timer_events) { + LOG(Isolate::Current(), TimerEvent(Logger::START, histogram_.name_)); + } } // Stop the timer and record the results. void HistogramTimer::Stop() { if (histogram_.Enabled()) { stop_time_ = OS::Ticks(); - // Compute the delta between start and stop, in milliseconds. int milliseconds = static_cast<int>(stop_time_ - start_time_) / 1000; histogram_.AddSample(milliseconds); } + if (FLAG_log_internal_timer_events) { + LOG(Isolate::Current(), TimerEvent(Logger::END, histogram_.name_)); + } } } } // namespace v8::internal diff --git a/deps/v8/src/cpu-profiler.cc b/deps/v8/src/cpu-profiler.cc index 3cbac77858..7979eb4d21 100644 --- a/deps/v8/src/cpu-profiler.cc +++ b/deps/v8/src/cpu-profiler.cc @@ -39,7 +39,6 @@ namespace v8 { namespace internal { -static const int kEventsBufferSize = 256 * KB; static const int kTickSamplesBufferChunkSize = 64 * KB; static const int kTickSamplesBufferChunksCount = 16; static const int kProfilerStackSize = 64 * KB; @@ -58,7 +57,7 @@ ProfilerEventsProcessor::ProfilerEventsProcessor(ProfileGenerator* generator) void ProfilerEventsProcessor::CallbackCreateEvent(Logger::LogEventsAndTags tag, const char* prefix, - String* name, + Name* name, Address start) { if (FilterOutCodeCreateEvent(tag)) return; CodeEventsContainer evt_rec; @@ -74,7 +73,7 @@ void ProfilerEventsProcessor::CallbackCreateEvent(Logger::LogEventsAndTags tag, void ProfilerEventsProcessor::CodeCreateEvent(Logger::LogEventsAndTags tag, - String* name, + Name* name, String* resource_name, int line_number, Address start, @@ -259,109 +258,66 @@ void ProfilerEventsProcessor::Run() { } -void CpuProfiler::StartProfiling(const char* title) { - ASSERT(Isolate::Current()->cpu_profiler() != NULL); - Isolate::Current()->cpu_profiler()->StartCollectingProfile(title); -} - - -void CpuProfiler::StartProfiling(String* title) { - ASSERT(Isolate::Current()->cpu_profiler() != NULL); - Isolate::Current()->cpu_profiler()->StartCollectingProfile(title); -} - - -CpuProfile* CpuProfiler::StopProfiling(const char* title) { - Isolate* isolate = Isolate::Current(); - return is_profiling(isolate) ? - isolate->cpu_profiler()->StopCollectingProfile(title) : NULL; -} - - -CpuProfile* CpuProfiler::StopProfiling(Object* security_token, String* title) { - Isolate* isolate = Isolate::Current(); - return is_profiling(isolate) ? - isolate->cpu_profiler()->StopCollectingProfile( - security_token, title) : NULL; -} - - int CpuProfiler::GetProfilesCount() { - ASSERT(Isolate::Current()->cpu_profiler() != NULL); // The count of profiles doesn't depend on a security token. - return Isolate::Current()->cpu_profiler()->profiles_->Profiles( - TokenEnumerator::kNoSecurityToken)->length(); + return profiles_->Profiles(TokenEnumerator::kNoSecurityToken)->length(); } CpuProfile* CpuProfiler::GetProfile(Object* security_token, int index) { - ASSERT(Isolate::Current()->cpu_profiler() != NULL); - CpuProfiler* profiler = Isolate::Current()->cpu_profiler(); - const int token = profiler->token_enumerator_->GetTokenId(security_token); - return profiler->profiles_->Profiles(token)->at(index); + const int token = token_enumerator_->GetTokenId(security_token); + return profiles_->Profiles(token)->at(index); } CpuProfile* CpuProfiler::FindProfile(Object* security_token, unsigned uid) { - ASSERT(Isolate::Current()->cpu_profiler() != NULL); - CpuProfiler* profiler = Isolate::Current()->cpu_profiler(); - const int token = profiler->token_enumerator_->GetTokenId(security_token); - return profiler->profiles_->GetProfile(token, uid); + const int token = token_enumerator_->GetTokenId(security_token); + return profiles_->GetProfile(token, uid); } -TickSample* CpuProfiler::TickSampleEvent(Isolate* isolate) { - if (CpuProfiler::is_profiling(isolate)) { - return isolate->cpu_profiler()->processor_->TickSampleEvent(); - } else { - return NULL; - } +TickSample* CpuProfiler::TickSampleEvent() { + if (is_profiling_) return processor_->TickSampleEvent(); + return NULL; } void CpuProfiler::DeleteAllProfiles() { - Isolate* isolate = Isolate::Current(); - ASSERT(isolate->cpu_profiler() != NULL); - if (is_profiling(isolate)) { - isolate->cpu_profiler()->StopProcessor(); - } - isolate->cpu_profiler()->ResetProfiles(); + if (is_profiling_) StopProcessor(); + ResetProfiles(); } void CpuProfiler::DeleteProfile(CpuProfile* profile) { - ASSERT(Isolate::Current()->cpu_profiler() != NULL); - Isolate::Current()->cpu_profiler()->profiles_->RemoveProfile(profile); + profiles_->RemoveProfile(profile); delete profile; } bool CpuProfiler::HasDetachedProfiles() { - ASSERT(Isolate::Current()->cpu_profiler() != NULL); - return Isolate::Current()->cpu_profiler()->profiles_->HasDetachedProfiles(); + return profiles_->HasDetachedProfiles(); } -void CpuProfiler::CallbackEvent(String* name, Address entry_point) { - Isolate::Current()->cpu_profiler()->processor_->CallbackCreateEvent( +void CpuProfiler::CallbackEvent(Name* name, Address entry_point) { + processor_->CallbackCreateEvent( Logger::CALLBACK_TAG, CodeEntry::kEmptyNamePrefix, name, entry_point); } void CpuProfiler::CodeCreateEvent(Logger::LogEventsAndTags tag, Code* code, const char* comment) { - Isolate::Current()->cpu_profiler()->processor_->CodeCreateEvent( + processor_->CodeCreateEvent( tag, comment, code->address(), code->ExecutableSize()); } void CpuProfiler::CodeCreateEvent(Logger::LogEventsAndTags tag, - Code* code, String* name) { - Isolate* isolate = Isolate::Current(); - isolate->cpu_profiler()->processor_->CodeCreateEvent( + Code* code, Name* name) { + processor_->CodeCreateEvent( tag, name, - isolate->heap()->empty_string(), + isolate_->heap()->empty_string(), v8::CpuProfileNode::kNoLineNumberInfo, code->address(), code->ExecutableSize(), @@ -372,12 +328,11 @@ void CpuProfiler::CodeCreateEvent(Logger::LogEventsAndTags tag, void CpuProfiler::CodeCreateEvent(Logger::LogEventsAndTags tag, Code* code, SharedFunctionInfo* shared, - String* name) { - Isolate* isolate = Isolate::Current(); - isolate->cpu_profiler()->processor_->CodeCreateEvent( + Name* name) { + processor_->CodeCreateEvent( tag, name, - isolate->heap()->empty_string(), + isolate_->heap()->empty_string(), v8::CpuProfileNode::kNoLineNumberInfo, code->address(), code->ExecutableSize(), @@ -389,7 +344,7 @@ void CpuProfiler::CodeCreateEvent(Logger::LogEventsAndTags tag, Code* code, SharedFunctionInfo* shared, String* source, int line) { - Isolate::Current()->cpu_profiler()->processor_->CodeCreateEvent( + processor_->CodeCreateEvent( tag, shared->DebugName(), source, @@ -402,7 +357,7 @@ void CpuProfiler::CodeCreateEvent(Logger::LogEventsAndTags tag, void CpuProfiler::CodeCreateEvent(Logger::LogEventsAndTags tag, Code* code, int args_count) { - Isolate::Current()->cpu_profiler()->processor_->CodeCreateEvent( + processor_->CodeCreateEvent( tag, args_count, code->address(), @@ -411,7 +366,7 @@ void CpuProfiler::CodeCreateEvent(Logger::LogEventsAndTags tag, void CpuProfiler::CodeMoveEvent(Address from, Address to) { - Isolate::Current()->cpu_profiler()->processor_->CodeMoveEvent(from, to); + processor_->CodeMoveEvent(from, to); } @@ -420,19 +375,18 @@ void CpuProfiler::CodeDeleteEvent(Address from) { void CpuProfiler::SharedFunctionInfoMoveEvent(Address from, Address to) { - CpuProfiler* profiler = Isolate::Current()->cpu_profiler(); - profiler->processor_->SharedFunctionInfoMoveEvent(from, to); + processor_->SharedFunctionInfoMoveEvent(from, to); } -void CpuProfiler::GetterCallbackEvent(String* name, Address entry_point) { - Isolate::Current()->cpu_profiler()->processor_->CallbackCreateEvent( +void CpuProfiler::GetterCallbackEvent(Name* name, Address entry_point) { + processor_->CallbackCreateEvent( Logger::CALLBACK_TAG, "get ", name, entry_point); } void CpuProfiler::RegExpCodeCreateEvent(Code* code, String* source) { - Isolate::Current()->cpu_profiler()->processor_->RegExpCodeCreateEvent( + processor_->RegExpCodeCreateEvent( Logger::REG_EXP_TAG, "RegExp: ", source, @@ -441,14 +395,15 @@ void CpuProfiler::RegExpCodeCreateEvent(Code* code, String* source) { } -void CpuProfiler::SetterCallbackEvent(String* name, Address entry_point) { - Isolate::Current()->cpu_profiler()->processor_->CallbackCreateEvent( +void CpuProfiler::SetterCallbackEvent(Name* name, Address entry_point) { + processor_->CallbackCreateEvent( Logger::CALLBACK_TAG, "set ", name, entry_point); } -CpuProfiler::CpuProfiler() - : profiles_(new CpuProfilesCollection()), +CpuProfiler::CpuProfiler(Isolate* isolate) + : isolate_(isolate), + profiles_(new CpuProfilesCollection()), next_profile_uid_(1), token_enumerator_(new TokenEnumerator()), generator_(NULL), @@ -469,43 +424,41 @@ void CpuProfiler::ResetProfiles() { profiles_ = new CpuProfilesCollection(); } -void CpuProfiler::StartCollectingProfile(const char* title) { - if (profiles_->StartProfiling(title, next_profile_uid_++)) { +void CpuProfiler::StartProfiling(const char* title, bool record_samples) { + if (profiles_->StartProfiling(title, next_profile_uid_++, record_samples)) { StartProcessorIfNotStarted(); } processor_->AddCurrentStack(); } -void CpuProfiler::StartCollectingProfile(String* title) { - StartCollectingProfile(profiles_->GetName(title)); +void CpuProfiler::StartProfiling(String* title, bool record_samples) { + StartProfiling(profiles_->GetName(title), record_samples); } void CpuProfiler::StartProcessorIfNotStarted() { if (processor_ == NULL) { - Isolate* isolate = Isolate::Current(); - // Disable logging when using the new implementation. - saved_logging_nesting_ = isolate->logger()->logging_nesting_; - isolate->logger()->logging_nesting_ = 0; + saved_logging_nesting_ = isolate_->logger()->logging_nesting_; + isolate_->logger()->logging_nesting_ = 0; generator_ = new ProfileGenerator(profiles_); processor_ = new ProfilerEventsProcessor(generator_); - NoBarrier_Store(&is_profiling_, true); + is_profiling_ = true; processor_->Start(); // Enumerate stuff we already have in the heap. - if (isolate->heap()->HasBeenSetUp()) { + if (isolate_->heap()->HasBeenSetUp()) { if (!FLAG_prof_browser_mode) { bool saved_log_code_flag = FLAG_log_code; FLAG_log_code = true; - isolate->logger()->LogCodeObjects(); + isolate_->logger()->LogCodeObjects(); FLAG_log_code = saved_log_code_flag; } - isolate->logger()->LogCompiledFunctions(); - isolate->logger()->LogAccessorCallbacks(); + isolate_->logger()->LogCompiledFunctions(); + isolate_->logger()->LogAccessorCallbacks(); } // Enable stack sampling. - Sampler* sampler = reinterpret_cast<Sampler*>(isolate->logger()->ticker_); + Sampler* sampler = reinterpret_cast<Sampler*>(isolate_->logger()->ticker_); if (!sampler->IsActive()) { sampler->Start(); need_to_stop_sampler_ = true; @@ -515,7 +468,8 @@ void CpuProfiler::StartProcessorIfNotStarted() { } -CpuProfile* CpuProfiler::StopCollectingProfile(const char* title) { +CpuProfile* CpuProfiler::StopProfiling(const char* title) { + if (!is_profiling_) return NULL; const double actual_sampling_rate = generator_->actual_sampling_rate(); StopProcessorIfLastProfile(title); CpuProfile* result = @@ -529,8 +483,8 @@ CpuProfile* CpuProfiler::StopCollectingProfile(const char* title) { } -CpuProfile* CpuProfiler::StopCollectingProfile(Object* security_token, - String* title) { +CpuProfile* CpuProfiler::StopProfiling(Object* security_token, String* title) { + if (!is_profiling_) return NULL; const double actual_sampling_rate = generator_->actual_sampling_rate(); const char* profile_title = profiles_->GetName(title); StopProcessorIfLastProfile(profile_title); @@ -545,14 +499,14 @@ void CpuProfiler::StopProcessorIfLastProfile(const char* title) { void CpuProfiler::StopProcessor() { - Logger* logger = Isolate::Current()->logger(); + Logger* logger = isolate_->logger(); Sampler* sampler = reinterpret_cast<Sampler*>(logger->ticker_); sampler->DecreaseProfilingDepth(); if (need_to_stop_sampler_) { sampler->Stop(); need_to_stop_sampler_ = false; } - NoBarrier_Store(&is_profiling_, false); + is_profiling_ = false; processor_->Stop(); processor_->Join(); delete processor_; @@ -563,20 +517,4 @@ void CpuProfiler::StopProcessor() { } -void CpuProfiler::SetUp() { - Isolate* isolate = Isolate::Current(); - if (isolate->cpu_profiler() == NULL) { - isolate->set_cpu_profiler(new CpuProfiler()); - } -} - - -void CpuProfiler::TearDown() { - Isolate* isolate = Isolate::Current(); - if (isolate->cpu_profiler() != NULL) { - delete isolate->cpu_profiler(); - } - isolate->set_cpu_profiler(NULL); -} - } } // namespace v8::internal diff --git a/deps/v8/src/cpu-profiler.h b/deps/v8/src/cpu-profiler.h index 9cd4484209..89d9c81c15 100644 --- a/deps/v8/src/cpu-profiler.h +++ b/deps/v8/src/cpu-profiler.h @@ -134,10 +134,10 @@ class ProfilerEventsProcessor : public Thread { // Events adding methods. Called by VM threads. void CallbackCreateEvent(Logger::LogEventsAndTags tag, - const char* prefix, String* name, + const char* prefix, Name* name, Address start); void CodeCreateEvent(Logger::LogEventsAndTags tag, - String* name, + Name* name, String* resource_name, int line_number, Address start, unsigned size, Address shared); @@ -184,86 +184,71 @@ class ProfilerEventsProcessor : public Thread { unsigned enqueue_order_; }; -} } // namespace v8::internal - -#define PROFILE(isolate, Call) \ - LOG_CODE_EVENT(isolate, Call); \ - do { \ - if (v8::internal::CpuProfiler::is_profiling(isolate)) { \ - v8::internal::CpuProfiler::Call; \ - } \ +#define PROFILE(IsolateGetter, Call) \ + do { \ + Isolate* cpu_profiler_isolate = (IsolateGetter); \ + LOG_CODE_EVENT(cpu_profiler_isolate, Call); \ + CpuProfiler* cpu_profiler = cpu_profiler_isolate->cpu_profiler(); \ + if (cpu_profiler->is_profiling()) { \ + cpu_profiler->Call; \ + } \ } while (false) -namespace v8 { -namespace internal { - - -// TODO(isolates): isolatify this class. class CpuProfiler { public: - static void SetUp(); - static void TearDown(); - - static void StartProfiling(const char* title); - static void StartProfiling(String* title); - static CpuProfile* StopProfiling(const char* title); - static CpuProfile* StopProfiling(Object* security_token, String* title); - static int GetProfilesCount(); - static CpuProfile* GetProfile(Object* security_token, int index); - static CpuProfile* FindProfile(Object* security_token, unsigned uid); - static void DeleteAllProfiles(); - static void DeleteProfile(CpuProfile* profile); - static bool HasDetachedProfiles(); + explicit CpuProfiler(Isolate* isolate); + ~CpuProfiler(); + + void StartProfiling(const char* title, bool record_samples = false); + void StartProfiling(String* title, bool record_samples); + CpuProfile* StopProfiling(const char* title); + CpuProfile* StopProfiling(Object* security_token, String* title); + int GetProfilesCount(); + CpuProfile* GetProfile(Object* security_token, int index); + CpuProfile* FindProfile(Object* security_token, unsigned uid); + void DeleteAllProfiles(); + void DeleteProfile(CpuProfile* profile); + bool HasDetachedProfiles(); // Invoked from stack sampler (thread or signal handler.) - static TickSample* TickSampleEvent(Isolate* isolate); + TickSample* TickSampleEvent(); // Must be called via PROFILE macro, otherwise will crash when // profiling is not enabled. - static void CallbackEvent(String* name, Address entry_point); - static void CodeCreateEvent(Logger::LogEventsAndTags tag, - Code* code, const char* comment); - static void CodeCreateEvent(Logger::LogEventsAndTags tag, - Code* code, String* name); - static void CodeCreateEvent(Logger::LogEventsAndTags tag, - Code* code, - SharedFunctionInfo* shared, - String* name); - static void CodeCreateEvent(Logger::LogEventsAndTags tag, - Code* code, + void CallbackEvent(Name* name, Address entry_point); + void CodeCreateEvent(Logger::LogEventsAndTags tag, + Code* code, const char* comment); + void CodeCreateEvent(Logger::LogEventsAndTags tag, + Code* code, Name* name); + void CodeCreateEvent(Logger::LogEventsAndTags tag, + Code* code, SharedFunctionInfo* shared, - String* source, int line); - static void CodeCreateEvent(Logger::LogEventsAndTags tag, - Code* code, int args_count); - static void CodeMovingGCEvent() {} - static void CodeMoveEvent(Address from, Address to); - static void CodeDeleteEvent(Address from); - static void GetterCallbackEvent(String* name, Address entry_point); - static void RegExpCodeCreateEvent(Code* code, String* source); - static void SetterCallbackEvent(String* name, Address entry_point); - static void SharedFunctionInfoMoveEvent(Address from, Address to); - - // TODO(isolates): this doesn't have to use atomics anymore. - - static INLINE(bool is_profiling(Isolate* isolate)) { - CpuProfiler* profiler = isolate->cpu_profiler(); - return profiler != NULL && NoBarrier_Load(&profiler->is_profiling_); - } + Name* name); + void CodeCreateEvent(Logger::LogEventsAndTags tag, + Code* code, + SharedFunctionInfo* shared, + String* source, int line); + void CodeCreateEvent(Logger::LogEventsAndTags tag, + Code* code, int args_count); + void CodeMovingGCEvent() {} + void CodeMoveEvent(Address from, Address to); + void CodeDeleteEvent(Address from); + void GetterCallbackEvent(Name* name, Address entry_point); + void RegExpCodeCreateEvent(Code* code, String* source); + void SetterCallbackEvent(Name* name, Address entry_point); + void SharedFunctionInfoMoveEvent(Address from, Address to); + + INLINE(bool is_profiling() const) { return is_profiling_; } private: - CpuProfiler(); - ~CpuProfiler(); - void StartCollectingProfile(const char* title); - void StartCollectingProfile(String* title); void StartProcessorIfNotStarted(); - CpuProfile* StopCollectingProfile(const char* title); - CpuProfile* StopCollectingProfile(Object* security_token, String* title); void StopProcessorIfLastProfile(const char* title); void StopProcessor(); void ResetProfiles(); + Isolate* isolate_; CpuProfilesCollection* profiles_; unsigned next_profile_uid_; TokenEnumerator* token_enumerator_; @@ -271,7 +256,7 @@ class CpuProfiler { ProfilerEventsProcessor* processor_; int saved_logging_nesting_; bool need_to_stop_sampler_; - Atomic32 is_profiling_; + bool is_profiling_; private: DISALLOW_COPY_AND_ASSIGN(CpuProfiler); diff --git a/deps/v8/src/d8-debug.cc b/deps/v8/src/d8-debug.cc index de0faa8ae6..a20de43b76 100644 --- a/deps/v8/src/d8-debug.cc +++ b/deps/v8/src/d8-debug.cc @@ -54,7 +54,9 @@ void HandleDebugEvent(DebugEvent event, Handle<Object> exec_state, Handle<Object> event_data, Handle<Value> data) { - HandleScope scope; + // TODO(svenpanne) There should be a way to retrieve this in the callback. + Isolate* isolate = Isolate::GetCurrent(); + HandleScope scope(isolate); // Check for handled event. if (event != Break && event != Exception && event != AfterCompile) { @@ -69,7 +71,7 @@ void HandleDebugEvent(DebugEvent event, Function::Cast(*event_data->Get(to_json_fun_name)); Local<Value> event_json = to_json_fun->Call(event_data, 0, NULL); if (try_catch.HasCaught()) { - Shell::ReportException(&try_catch); + Shell::ReportException(isolate, &try_catch); return; } @@ -77,7 +79,7 @@ void HandleDebugEvent(DebugEvent event, Handle<Object> details = Shell::DebugMessageDetails(Handle<String>::Cast(event_json)); if (try_catch.HasCaught()) { - Shell::ReportException(&try_catch); + Shell::ReportException(isolate, &try_catch); return; } String::Utf8Value str(details->Get(String::New("text"))); @@ -93,7 +95,7 @@ void HandleDebugEvent(DebugEvent event, Local<Object> cmd_processor = Object::Cast(*fun->Call(exec_state, 0, NULL)); if (try_catch.HasCaught()) { - Shell::ReportException(&try_catch); + Shell::ReportException(isolate, &try_catch); return; } @@ -114,7 +116,7 @@ void HandleDebugEvent(DebugEvent event, Handle<Value> request = Shell::DebugCommandToJSONRequest(String::New(command)); if (try_catch.HasCaught()) { - Shell::ReportException(&try_catch); + Shell::ReportException(isolate, &try_catch); continue; } @@ -138,7 +140,7 @@ void HandleDebugEvent(DebugEvent event, args[0] = request; Handle<Value> response_val = fun->Call(cmd_processor, kArgc, args); if (try_catch.HasCaught()) { - Shell::ReportException(&try_catch); + Shell::ReportException(isolate, &try_catch); continue; } Handle<String> response = Handle<String>::Cast(response_val); @@ -146,7 +148,7 @@ void HandleDebugEvent(DebugEvent event, // Convert the debugger response into text details and the running state. Handle<Object> response_details = Shell::DebugMessageDetails(response); if (try_catch.HasCaught()) { - Shell::ReportException(&try_catch); + Shell::ReportException(isolate, &try_catch); continue; } String::Utf8Value text_str(response_details->Get(String::New("text"))); @@ -159,8 +161,8 @@ void HandleDebugEvent(DebugEvent event, } -void RunRemoteDebugger(int port) { - RemoteDebugger debugger(port); +void RunRemoteDebugger(Isolate* isolate, int port) { + RemoteDebugger debugger(isolate, port); debugger.Run(); } @@ -273,15 +275,15 @@ RemoteDebuggerEvent* RemoteDebugger::GetEvent() { void RemoteDebugger::HandleMessageReceived(char* message) { - Locker lock; - HandleScope scope; + Locker lock(isolate_); + HandleScope scope(isolate_); // Print the event details. TryCatch try_catch; Handle<Object> details = Shell::DebugMessageDetails(Handle<String>::Cast(String::New(message))); if (try_catch.HasCaught()) { - Shell::ReportException(&try_catch); + Shell::ReportException(isolate_, &try_catch); PrintPrompt(); return; } @@ -302,15 +304,15 @@ void RemoteDebugger::HandleMessageReceived(char* message) { void RemoteDebugger::HandleKeyboardCommand(char* command) { - Locker lock; - HandleScope scope; + Locker lock(isolate_); + HandleScope scope(isolate_); // Convert the debugger command to a JSON debugger request. TryCatch try_catch; Handle<Value> request = Shell::DebugCommandToJSONRequest(String::New(command)); if (try_catch.HasCaught()) { - Shell::ReportException(&try_catch); + Shell::ReportException(isolate_, &try_catch); PrintPrompt(); return; } diff --git a/deps/v8/src/d8-debug.h b/deps/v8/src/d8-debug.h index aeff3c121c..a6cea2a81b 100644 --- a/deps/v8/src/d8-debug.h +++ b/deps/v8/src/d8-debug.h @@ -43,7 +43,7 @@ void HandleDebugEvent(DebugEvent event, // Start the remove debugger connecting to a V8 debugger agent on the specified // port. -void RunRemoteDebugger(int port); +void RunRemoteDebugger(Isolate* isolate, int port); // Forward declerations. class RemoteDebuggerEvent; @@ -53,8 +53,9 @@ class ReceiverThread; // Remote debugging class. class RemoteDebugger { public: - explicit RemoteDebugger(int port) - : port_(port), + explicit RemoteDebugger(Isolate* isolate, int port) + : isolate_(isolate), + port_(port), event_access_(i::OS::CreateMutex()), event_available_(i::OS::CreateSemaphore(0)), head_(NULL), tail_(NULL) {} @@ -79,6 +80,7 @@ class RemoteDebugger { // Get connection to agent in debugged V8. i::Socket* conn() { return conn_; } + Isolate* isolate_; int port_; // Port used to connect to debugger V8. i::Socket* conn_; // Connection to debugger agent in debugged V8. diff --git a/deps/v8/src/d8-posix.cc b/deps/v8/src/d8-posix.cc index 8a278e4e42..1be782a241 100644 --- a/deps/v8/src/d8-posix.cc +++ b/deps/v8/src/d8-posix.cc @@ -449,7 +449,7 @@ static bool WaitForChild(int pid, // Implementation of the system() function (see d8.h for details). Handle<Value> Shell::System(const Arguments& args) { - HandleScope scope; + HandleScope scope(args.GetIsolate()); int read_timeout = -1; int total_timeout = -1; if (!GetTimeouts(args, &read_timeout, &total_timeout)) return v8::Undefined(); diff --git a/deps/v8/src/d8-readline.cc b/deps/v8/src/d8-readline.cc index ed7721c513..5226364c64 100644 --- a/deps/v8/src/d8-readline.cc +++ b/deps/v8/src/d8-readline.cc @@ -25,8 +25,8 @@ // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - #include <cstdio> // NOLINT +#include <string.h> // NOLINT #include <readline/readline.h> // NOLINT #include <readline/history.h> // NOLINT @@ -35,7 +35,6 @@ #include "d8.h" - // There are incompatibilities between different versions and different // implementations of readline. This smooths out one known incompatibility. #if RL_READLINE_VERSION >= 0x0500 @@ -50,7 +49,7 @@ class ReadLineEditor: public LineEditor { public: ReadLineEditor() : LineEditor(LineEditor::READLINE, "readline") { } virtual Handle<String> Prompt(const char* prompt); - virtual bool Open(); + virtual bool Open(Isolate* isolate); virtual bool Close(); virtual void AddHistory(const char* str); @@ -58,9 +57,13 @@ class ReadLineEditor: public LineEditor { static const int kMaxHistoryEntries; private: +#ifndef V8_SHARED static char** AttemptedCompletion(const char* text, int start, int end); static char* CompletionGenerator(const char* text, int state); +#endif // V8_SHARED static char kWordBreakCharacters[]; + + Isolate* isolate_; }; @@ -74,9 +77,19 @@ const char* ReadLineEditor::kHistoryFileName = ".d8_history"; const int ReadLineEditor::kMaxHistoryEntries = 1000; -bool ReadLineEditor::Open() { +bool ReadLineEditor::Open(Isolate* isolate) { + isolate_ = isolate; + rl_initialize(); + +#ifdef V8_SHARED + // Don't do completion on shared library mode + // http://cnswww.cns.cwru.edu/php/chet/readline/readline.html#SEC24 + rl_bind_key('\t', rl_insert); +#else rl_attempted_completion_function = AttemptedCompletion; +#endif // V8_SHARED + rl_completer_word_break_characters = kWordBreakCharacters; rl_bind_key('\t', rl_complete); using_history(); @@ -122,6 +135,7 @@ void ReadLineEditor::AddHistory(const char* str) { } +#ifndef V8_SHARED char** ReadLineEditor::AttemptedCompletion(const char* text, int start, int end) { @@ -134,12 +148,14 @@ char** ReadLineEditor::AttemptedCompletion(const char* text, char* ReadLineEditor::CompletionGenerator(const char* text, int state) { static unsigned current_index; static Persistent<Array> current_completions; + Isolate* isolate = read_line_editor.isolate_; + Locker lock(isolate); if (state == 0) { HandleScope scope; Local<String> full_text = String::New(rl_line_buffer, rl_point); Handle<Array> completions = - Shell::GetCompletions(String::New(text), full_text); - current_completions = Persistent<Array>::New(completions); + Shell::GetCompletions(isolate, String::New(text), full_text); + current_completions = Persistent<Array>::New(isolate, completions); current_index = 0; } if (current_index < current_completions->Length()) { @@ -150,11 +166,12 @@ char* ReadLineEditor::CompletionGenerator(const char* text, int state) { String::Utf8Value str(str_obj); return strdup(*str); } else { - current_completions.Dispose(); + current_completions.Dispose(isolate); current_completions.Clear(); return NULL; } } +#endif // V8_SHARED } // namespace v8 diff --git a/deps/v8/src/d8.cc b/deps/v8/src/d8.cc index b3b1bb8a17..805a0cf1ad 100644 --- a/deps/v8/src/d8.cc +++ b/deps/v8/src/d8.cc @@ -67,42 +67,84 @@ namespace v8 { -LineEditor *LineEditor::first_ = NULL; - -LineEditor::LineEditor(Type type, const char* name) - : type_(type), - name_(name), - next_(first_) { - first_ = this; +static Handle<Value> Throw(const char* message) { + return ThrowException(String::New(message)); } -LineEditor* LineEditor::Get() { - LineEditor* current = first_; - LineEditor* best = current; - while (current != NULL) { - if (current->type_ > best->type_) - best = current; - current = current->next_; +// TODO(rossberg): should replace these by proper uses of HasInstance, +// once we figure out a good way to make the templates global. +const char kArrayBufferMarkerPropName[] = "d8::_is_array_buffer_"; +const char kArrayMarkerPropName[] = "d8::_is_typed_array_"; + + +#define FOR_EACH_SYMBOL(V) \ + V(ArrayBuffer, "ArrayBuffer") \ + V(ArrayBufferMarkerPropName, kArrayBufferMarkerPropName) \ + V(ArrayMarkerPropName, kArrayMarkerPropName) \ + V(buffer, "buffer") \ + V(byteLength, "byteLength") \ + V(byteOffset, "byteOffset") \ + V(BYTES_PER_ELEMENT, "BYTES_PER_ELEMENT") \ + V(length, "length") + + +class Symbols { + public: + explicit Symbols(Isolate* isolate) : isolate_(isolate) { + HandleScope scope(isolate); +#define INIT_SYMBOL(name, value) \ + name##_ = Persistent<String>::New(isolate, String::NewSymbol(value)); + FOR_EACH_SYMBOL(INIT_SYMBOL) +#undef INIT_SYMBOL + isolate->SetData(this); + } + + ~Symbols() { +#define DISPOSE_SYMBOL(name, value) name##_.Dispose(isolate_); + FOR_EACH_SYMBOL(DISPOSE_SYMBOL) +#undef DISPOSE_SYMBOL + isolate_->SetData(NULL); // Not really needed, just to be sure... + } + +#define DEFINE_SYMBOL_GETTER(name, value) \ + static Persistent<String> name(Isolate* isolate) { \ + return reinterpret_cast<Symbols*>(isolate->GetData())->name##_; \ } - return best; + FOR_EACH_SYMBOL(DEFINE_SYMBOL_GETTER) +#undef DEFINE_SYMBOL_GETTER + + private: + Isolate* isolate_; +#define DEFINE_MEMBER(name, value) Persistent<String> name##_; + FOR_EACH_SYMBOL(DEFINE_MEMBER) +#undef DEFINE_MEMBER +}; + + +LineEditor *LineEditor::current_ = NULL; + + +LineEditor::LineEditor(Type type, const char* name) + : type_(type), name_(name) { + if (current_ == NULL || current_->type_ < type) current_ = this; } class DumbLineEditor: public LineEditor { public: - DumbLineEditor() : LineEditor(LineEditor::DUMB, "dumb") { } + explicit DumbLineEditor(Isolate* isolate) + : LineEditor(LineEditor::DUMB, "dumb"), isolate_(isolate) { } virtual Handle<String> Prompt(const char* prompt); + private: + Isolate* isolate_; }; -static DumbLineEditor dumb_line_editor; - - Handle<String> DumbLineEditor::Prompt(const char* prompt) { printf("%s", prompt); - return Shell::ReadFromStdin(); + return Shell::ReadFromStdin(isolate_); } @@ -115,7 +157,6 @@ i::Mutex* Shell::context_mutex_(i::OS::CreateMutex()); Persistent<Context> Shell::utility_context_; #endif // V8_SHARED -LineEditor* Shell::console = NULL; Persistent<Context> Shell::evaluation_context_; ShellOptions Shell::options; const char* Shell::kPrompt = "d8> "; @@ -140,7 +181,8 @@ const char* Shell::ToCString(const v8::String::Utf8Value& value) { // Executes a string within the current v8 context. -bool Shell::ExecuteString(Handle<String> source, +bool Shell::ExecuteString(Isolate* isolate, + Handle<String> source, Handle<Value> name, bool print_result, bool report_exceptions) { @@ -149,7 +191,7 @@ bool Shell::ExecuteString(Handle<String> source, #else bool FLAG_debugger = false; #endif // !V8_SHARED && ENABLE_DEBUGGER_SUPPORT - HandleScope handle_scope; + HandleScope handle_scope(isolate); TryCatch try_catch; options.script_executed = true; if (FLAG_debugger) { @@ -160,7 +202,7 @@ bool Shell::ExecuteString(Handle<String> source, if (script.IsEmpty()) { // Print errors that happened during compilation. if (report_exceptions && !FLAG_debugger) - ReportException(&try_catch); + ReportException(isolate, &try_catch); return false; } else { Handle<Value> result = script->Run(); @@ -168,17 +210,33 @@ bool Shell::ExecuteString(Handle<String> source, ASSERT(try_catch.HasCaught()); // Print errors that happened during execution. if (report_exceptions && !FLAG_debugger) - ReportException(&try_catch); + ReportException(isolate, &try_catch); return false; } else { ASSERT(!try_catch.HasCaught()); - if (print_result && !result->IsUndefined()) { - // If all went well and the result wasn't undefined then print - // the returned value. - v8::String::Utf8Value str(result); - size_t count = fwrite(*str, sizeof(**str), str.length(), stdout); - (void) count; // Silence GCC-4.5.x "unused result" warning. - printf("\n"); + if (print_result) { +#if !defined(V8_SHARED) + if (options.test_shell) { +#endif + if (!result->IsUndefined()) { + // If all went well and the result wasn't undefined then print + // the returned value. + v8::String::Utf8Value str(result); + fwrite(*str, sizeof(**str), str.length(), stdout); + printf("\n"); + } +#if !defined(V8_SHARED) + } else { + Context::Scope context_scope(utility_context_); + Handle<Object> global = utility_context_->Global(); + Handle<Value> fun = global->Get(String::New("Stringify")); + Handle<Value> argv[1] = { result }; + Handle<Value> s = Handle<Function>::Cast(fun)->Call(global, 1, argv); + v8::String::Utf8Value str(s); + fwrite(*str, sizeof(**str), str.length(), stdout); + printf("\n"); + } +#endif } return true; } @@ -196,7 +254,7 @@ Handle<Value> Shell::Print(const Arguments& args) { Handle<Value> Shell::Write(const Arguments& args) { for (int i = 0; i < args.Length(); i++) { - HandleScope handle_scope; + HandleScope handle_scope(args.GetIsolate()); if (i != 0) { printf(" "); } @@ -213,36 +271,36 @@ Handle<Value> Shell::Write(const Arguments& args) { Exit(1); } } - return Undefined(); + return Undefined(args.GetIsolate()); } Handle<Value> Shell::EnableProfiler(const Arguments& args) { V8::ResumeProfiler(); - return Undefined(); + return Undefined(args.GetIsolate()); } Handle<Value> Shell::DisableProfiler(const Arguments& args) { V8::PauseProfiler(); - return Undefined(); + return Undefined(args.GetIsolate()); } Handle<Value> Shell::Read(const Arguments& args) { String::Utf8Value file(args[0]); if (*file == NULL) { - return ThrowException(String::New("Error loading file")); + return Throw("Error loading file"); } - Handle<String> source = ReadFile(*file); + Handle<String> source = ReadFile(args.GetIsolate(), *file); if (source.IsEmpty()) { - return ThrowException(String::New("Error loading file")); + return Throw("Error loading file"); } return source; } -Handle<String> Shell::ReadFromStdin() { +Handle<String> Shell::ReadFromStdin(Isolate* isolate) { static const int kBufferSize = 256; char buffer[kBufferSize]; Handle<String> accumulator = String::New(""); @@ -253,7 +311,7 @@ Handle<String> Shell::ReadFromStdin() { // If fgets gets an error, just give up. char* input = NULL; { // Release lock for blocking input. - Unlocker unlock(Isolate::GetCurrent()); + Unlocker unlock(isolate); input = fgets(buffer, kBufferSize, stdin); } if (input == NULL) return Handle<String>(); @@ -274,20 +332,24 @@ Handle<String> Shell::ReadFromStdin() { Handle<Value> Shell::Load(const Arguments& args) { for (int i = 0; i < args.Length(); i++) { - HandleScope handle_scope; + HandleScope handle_scope(args.GetIsolate()); String::Utf8Value file(args[i]); if (*file == NULL) { - return ThrowException(String::New("Error loading file")); + return Throw("Error loading file"); } - Handle<String> source = ReadFile(*file); + Handle<String> source = ReadFile(args.GetIsolate(), *file); if (source.IsEmpty()) { - return ThrowException(String::New("Error loading file")); + return Throw("Error loading file"); } - if (!ExecuteString(source, String::New(*file), false, true)) { - return ThrowException(String::New("Error executing file")); + if (!ExecuteString(args.GetIsolate(), + source, + String::New(*file), + false, + true)) { + return Throw("Error executing file"); } } - return Undefined(); + return Undefined(args.GetIsolate()); } static int32_t convertToInt(Local<Value> value_in, TryCatch* try_catch) { @@ -314,7 +376,7 @@ static int32_t convertToUint(Local<Value> value_in, TryCatch* try_catch) { if (try_catch->HasCaught()) return 0; if (raw_value < 0) { - ThrowException(String::New("Array length must not be negative.")); + Throw("Array length must not be negative."); return 0; } @@ -323,41 +385,38 @@ static int32_t convertToUint(Local<Value> value_in, TryCatch* try_catch) { ASSERT(kMaxLength == i::ExternalArray::kMaxLength); #endif // V8_SHARED if (raw_value > static_cast<int32_t>(kMaxLength)) { - ThrowException( - String::New("Array length exceeds maximum length.")); + Throw("Array length exceeds maximum length."); } return raw_value; } -// TODO(rossberg): should replace these by proper uses of HasInstance, -// once we figure out a good way to make the templates global. -const char kArrayBufferMarkerPropName[] = "d8::_is_array_buffer_"; -const char kArrayMarkerPropName[] = "d8::_is_typed_array_"; - - -Handle<Value> Shell::CreateExternalArrayBuffer(Handle<Object> buffer, +Handle<Value> Shell::CreateExternalArrayBuffer(Isolate* isolate, + Handle<Object> buffer, int32_t length) { static const int32_t kMaxSize = 0x7fffffff; // Make sure the total size fits into a (signed) int. if (length < 0 || length > kMaxSize) { - return ThrowException(String::New("ArrayBuffer exceeds maximum size (2G)")); + return Throw("ArrayBuffer exceeds maximum size (2G)"); } uint8_t* data = new uint8_t[length]; if (data == NULL) { - return ThrowException(String::New("Memory allocation failed")); + return Throw("Memory allocation failed"); } memset(data, 0, length); - buffer->SetHiddenValue(String::New(kArrayBufferMarkerPropName), True()); - Persistent<Object> persistent_array = Persistent<Object>::New(buffer); - persistent_array.MakeWeak(data, ExternalArrayWeakCallback); - persistent_array.MarkIndependent(); - V8::AdjustAmountOfExternalAllocatedMemory(length); + buffer->SetHiddenValue(Symbols::ArrayBufferMarkerPropName(isolate), True()); + Persistent<Object> persistent_array = + Persistent<Object>::New(isolate, buffer); + persistent_array.MakeWeak(isolate, data, ExternalArrayWeakCallback); + persistent_array.MarkIndependent(isolate); + isolate->AdjustAmountOfExternalAllocatedMemory(length); buffer->SetIndexedPropertiesToExternalArrayData( data, v8::kExternalByteArray, length); - buffer->Set(String::New("byteLength"), Int32::New(length), ReadOnly); + buffer->Set(Symbols::byteLength(isolate), + Int32::New(length, isolate), + ReadOnly); return buffer; } @@ -373,18 +432,18 @@ Handle<Value> Shell::ArrayBuffer(const Arguments& args) { } if (args.Length() == 0) { - return ThrowException( - String::New("ArrayBuffer constructor must have one argument")); + return Throw("ArrayBuffer constructor must have one argument"); } TryCatch try_catch; int32_t length = convertToUint(args[0], &try_catch); if (try_catch.HasCaught()) return try_catch.ReThrow(); - return CreateExternalArrayBuffer(args.This(), length); + return CreateExternalArrayBuffer(args.GetIsolate(), args.This(), length); } -Handle<Object> Shell::CreateExternalArray(Handle<Object> array, +Handle<Object> Shell::CreateExternalArray(Isolate* isolate, + Handle<Object> array, Handle<Object> buffer, ExternalArrayType type, int32_t length, @@ -400,12 +459,22 @@ Handle<Object> Shell::CreateExternalArray(Handle<Object> array, array->SetIndexedPropertiesToExternalArrayData( static_cast<uint8_t*>(data) + byteOffset, type, length); - array->SetHiddenValue(String::New(kArrayMarkerPropName), Int32::New(type)); - array->Set(String::New("byteLength"), Int32::New(byteLength), ReadOnly); - array->Set(String::New("byteOffset"), Int32::New(byteOffset), ReadOnly); - array->Set(String::New("length"), Int32::New(length), ReadOnly); - array->Set(String::New("BYTES_PER_ELEMENT"), Int32::New(element_size)); - array->Set(String::New("buffer"), buffer, ReadOnly); + array->SetHiddenValue(Symbols::ArrayMarkerPropName(isolate), + Int32::New(type, isolate)); + array->Set(Symbols::byteLength(isolate), + Int32::New(byteLength, isolate), + ReadOnly); + array->Set(Symbols::byteOffset(isolate), + Int32::New(byteOffset, isolate), + ReadOnly); + array->Set(Symbols::length(isolate), + Int32::New(length, isolate), + ReadOnly); + array->Set(Symbols::BYTES_PER_ELEMENT(isolate), + Int32::New(element_size, isolate)); + array->Set(Symbols::buffer(isolate), + buffer, + ReadOnly); return array; } @@ -414,6 +483,7 @@ Handle<Object> Shell::CreateExternalArray(Handle<Object> array, Handle<Value> Shell::CreateExternalArray(const Arguments& args, ExternalArrayType type, int32_t element_size) { + Isolate* isolate = args.GetIsolate(); if (!args.IsConstructCall()) { Handle<Value>* rec_args = new Handle<Value>[args.Length()]; for (int i = 0; i < args.Length(); ++i) rec_args[i] = args[i]; @@ -439,16 +509,15 @@ Handle<Value> Shell::CreateExternalArray(const Arguments& args, int32_t byteOffset; bool init_from_array = false; if (args.Length() == 0) { - return ThrowException( - String::New("Array constructor must have at least one argument")); + return Throw("Array constructor must have at least one argument"); } if (args[0]->IsObject() && !args[0]->ToObject()->GetHiddenValue( - String::New(kArrayBufferMarkerPropName)).IsEmpty()) { + Symbols::ArrayBufferMarkerPropName(isolate)).IsEmpty()) { // Construct from ArrayBuffer. buffer = args[0]->ToObject(); int32_t bufferLength = - convertToUint(buffer->Get(String::New("byteLength")), &try_catch); + convertToUint(buffer->Get(Symbols::byteLength(isolate)), &try_catch); if (try_catch.HasCaught()) return try_catch.ReThrow(); if (args.Length() < 2 || args[1]->IsUndefined()) { @@ -457,11 +526,10 @@ Handle<Value> Shell::CreateExternalArray(const Arguments& args, byteOffset = convertToUint(args[1], &try_catch); if (try_catch.HasCaught()) return try_catch.ReThrow(); if (byteOffset > bufferLength) { - return ThrowException(String::New("byteOffset out of bounds")); + return Throw("byteOffset out of bounds"); } if (byteOffset % element_size != 0) { - return ThrowException( - String::New("byteOffset must be multiple of element size")); + return Throw("byteOffset must be multiple of element size"); } } @@ -469,23 +537,23 @@ Handle<Value> Shell::CreateExternalArray(const Arguments& args, byteLength = bufferLength - byteOffset; length = byteLength / element_size; if (byteLength % element_size != 0) { - return ThrowException( - String::New("buffer size must be multiple of element size")); + return Throw("buffer size must be multiple of element size"); } } else { length = convertToUint(args[2], &try_catch); if (try_catch.HasCaught()) return try_catch.ReThrow(); byteLength = length * element_size; if (byteOffset + byteLength > bufferLength) { - return ThrowException(String::New("length out of bounds")); + return Throw("length out of bounds"); } } } else { if (args[0]->IsObject() && - args[0]->ToObject()->Has(String::New("length"))) { + args[0]->ToObject()->Has(Symbols::length(isolate))) { // Construct from array. - length = convertToUint( - args[0]->ToObject()->Get(String::New("length")), &try_catch); + Local<Value> value = args[0]->ToObject()->Get(Symbols::length(isolate)); + if (try_catch.HasCaught()) return try_catch.ReThrow(); + length = convertToUint(value, &try_catch); if (try_catch.HasCaught()) return try_catch.ReThrow(); init_from_array = true; } else { @@ -497,21 +565,26 @@ Handle<Value> Shell::CreateExternalArray(const Arguments& args, byteOffset = 0; Handle<Object> global = Context::GetCurrent()->Global(); - Handle<Value> array_buffer = global->Get(String::New("ArrayBuffer")); + Handle<Value> array_buffer = global->Get(Symbols::ArrayBuffer(isolate)); ASSERT(!try_catch.HasCaught() && array_buffer->IsFunction()); - Handle<Value> buffer_args[] = { Uint32::New(byteLength) }; + Handle<Value> buffer_args[] = { Uint32::New(byteLength, isolate) }; Handle<Value> result = Handle<Function>::Cast(array_buffer)->NewInstance( 1, buffer_args); if (try_catch.HasCaught()) return result; buffer = result->ToObject(); } - Handle<Object> array = CreateExternalArray( - args.This(), buffer, type, length, byteLength, byteOffset, element_size); + Handle<Object> array = + CreateExternalArray(isolate, args.This(), buffer, type, length, + byteLength, byteOffset, element_size); if (init_from_array) { Handle<Object> init = args[0]->ToObject(); - for (int i = 0; i < length; ++i) array->Set(i, init->Get(i)); + for (int i = 0; i < length; ++i) { + Local<Value> value = init->Get(i); + if (try_catch.HasCaught()) return try_catch.ReThrow(); + array->Set(i, value); + } } return array; @@ -522,25 +595,23 @@ Handle<Value> Shell::ArrayBufferSlice(const Arguments& args) { TryCatch try_catch; if (!args.This()->IsObject()) { - return ThrowException( - String::New("'slice' invoked on non-object receiver")); + return Throw("'slice' invoked on non-object receiver"); } + Isolate* isolate = args.GetIsolate(); Local<Object> self = args.This(); Local<Value> marker = - self->GetHiddenValue(String::New(kArrayBufferMarkerPropName)); + self->GetHiddenValue(Symbols::ArrayBufferMarkerPropName(isolate)); if (marker.IsEmpty()) { - return ThrowException( - String::New("'slice' invoked on wrong receiver type")); + return Throw("'slice' invoked on wrong receiver type"); } int32_t length = - convertToUint(self->Get(String::New("byteLength")), &try_catch); + convertToUint(self->Get(Symbols::byteLength(isolate)), &try_catch); if (try_catch.HasCaught()) return try_catch.ReThrow(); if (args.Length() == 0) { - return ThrowException( - String::New("'slice' must have at least one argument")); + return Throw("'slice' must have at least one argument"); } int32_t begin = convertToInt(args[0], &try_catch); if (try_catch.HasCaught()) return try_catch.ReThrow(); @@ -561,7 +632,7 @@ Handle<Value> Shell::ArrayBufferSlice(const Arguments& args) { } Local<Function> constructor = Local<Function>::Cast(self->GetConstructor()); - Handle<Value> new_args[] = { Uint32::New(end - begin) }; + Handle<Value> new_args[] = { Uint32::New(end - begin, isolate) }; Handle<Value> result = constructor->NewInstance(1, new_args); if (try_catch.HasCaught()) return result; Handle<Object> buffer = result->ToObject(); @@ -579,32 +650,31 @@ Handle<Value> Shell::ArraySubArray(const Arguments& args) { TryCatch try_catch; if (!args.This()->IsObject()) { - return ThrowException( - String::New("'subarray' invoked on non-object receiver")); + return Throw("'subarray' invoked on non-object receiver"); } + Isolate* isolate = args.GetIsolate(); Local<Object> self = args.This(); - Local<Value> marker = self->GetHiddenValue(String::New(kArrayMarkerPropName)); + Local<Value> marker = + self->GetHiddenValue(Symbols::ArrayMarkerPropName(isolate)); if (marker.IsEmpty()) { - return ThrowException( - String::New("'subarray' invoked on wrong receiver type")); + return Throw("'subarray' invoked on wrong receiver type"); } - Handle<Object> buffer = self->Get(String::New("buffer"))->ToObject(); + Handle<Object> buffer = self->Get(Symbols::buffer(isolate))->ToObject(); if (try_catch.HasCaught()) return try_catch.ReThrow(); int32_t length = - convertToUint(self->Get(String::New("length")), &try_catch); + convertToUint(self->Get(Symbols::length(isolate)), &try_catch); if (try_catch.HasCaught()) return try_catch.ReThrow(); int32_t byteOffset = - convertToUint(self->Get(String::New("byteOffset")), &try_catch); + convertToUint(self->Get(Symbols::byteOffset(isolate)), &try_catch); if (try_catch.HasCaught()) return try_catch.ReThrow(); int32_t element_size = - convertToUint(self->Get(String::New("BYTES_PER_ELEMENT")), &try_catch); + convertToUint(self->Get(Symbols::BYTES_PER_ELEMENT(isolate)), &try_catch); if (try_catch.HasCaught()) return try_catch.ReThrow(); if (args.Length() == 0) { - return ThrowException( - String::New("'subarray' must have at least one argument")); + return Throw("'subarray' must have at least one argument"); } int32_t begin = convertToInt(args[0], &try_catch); if (try_catch.HasCaught()) return try_catch.ReThrow(); @@ -629,7 +699,7 @@ Handle<Value> Shell::ArraySubArray(const Arguments& args) { Local<Function> constructor = Local<Function>::Cast(self->GetConstructor()); Handle<Value> construct_args[] = { - buffer, Uint32::New(byteOffset), Uint32::New(length) + buffer, Uint32::New(byteOffset, isolate), Uint32::New(length, isolate) }; return constructor->NewInstance(3, construct_args); } @@ -639,35 +709,33 @@ Handle<Value> Shell::ArraySet(const Arguments& args) { TryCatch try_catch; if (!args.This()->IsObject()) { - return ThrowException( - String::New("'set' invoked on non-object receiver")); + return Throw("'set' invoked on non-object receiver"); } + Isolate* isolate = args.GetIsolate(); Local<Object> self = args.This(); - Local<Value> marker = self->GetHiddenValue(String::New(kArrayMarkerPropName)); + Local<Value> marker = + self->GetHiddenValue(Symbols::ArrayMarkerPropName(isolate)); if (marker.IsEmpty()) { - return ThrowException( - String::New("'set' invoked on wrong receiver type")); + return Throw("'set' invoked on wrong receiver type"); } int32_t length = - convertToUint(self->Get(String::New("length")), &try_catch); + convertToUint(self->Get(Symbols::length(isolate)), &try_catch); if (try_catch.HasCaught()) return try_catch.ReThrow(); int32_t element_size = - convertToUint(self->Get(String::New("BYTES_PER_ELEMENT")), &try_catch); + convertToUint(self->Get(Symbols::BYTES_PER_ELEMENT(isolate)), &try_catch); if (try_catch.HasCaught()) return try_catch.ReThrow(); if (args.Length() == 0) { - return ThrowException( - String::New("'set' must have at least one argument")); + return Throw("'set' must have at least one argument"); } if (!args[0]->IsObject() || - !args[0]->ToObject()->Has(String::New("length"))) { - return ThrowException( - String::New("'set' invoked with non-array argument")); + !args[0]->ToObject()->Has(Symbols::length(isolate))) { + return Throw("'set' invoked with non-array argument"); } Handle<Object> source = args[0]->ToObject(); int32_t source_length = - convertToUint(source->Get(String::New("length")), &try_catch); + convertToUint(source->Get(Symbols::length(isolate)), &try_catch); if (try_catch.HasCaught()) return try_catch.ReThrow(); int32_t offset; @@ -678,31 +746,32 @@ Handle<Value> Shell::ArraySet(const Arguments& args) { if (try_catch.HasCaught()) return try_catch.ReThrow(); } if (offset + source_length > length) { - return ThrowException(String::New("offset or source length out of bounds")); + return Throw("offset or source length out of bounds"); } int32_t source_element_size; - if (source->GetHiddenValue(String::New(kArrayMarkerPropName)).IsEmpty()) { + if (source->GetHiddenValue(Symbols::ArrayMarkerPropName(isolate)).IsEmpty()) { source_element_size = 0; } else { source_element_size = - convertToUint(source->Get(String::New("BYTES_PER_ELEMENT")), &try_catch); + convertToUint(source->Get(Symbols::BYTES_PER_ELEMENT(isolate)), + &try_catch); if (try_catch.HasCaught()) return try_catch.ReThrow(); } if (element_size == source_element_size && self->GetConstructor()->StrictEquals(source->GetConstructor())) { // Use memmove on the array buffers. - Handle<Object> buffer = self->Get(String::New("buffer"))->ToObject(); + Handle<Object> buffer = self->Get(Symbols::buffer(isolate))->ToObject(); if (try_catch.HasCaught()) return try_catch.ReThrow(); Handle<Object> source_buffer = - source->Get(String::New("buffer"))->ToObject(); + source->Get(Symbols::buffer(isolate))->ToObject(); if (try_catch.HasCaught()) return try_catch.ReThrow(); int32_t byteOffset = - convertToUint(self->Get(String::New("byteOffset")), &try_catch); + convertToUint(self->Get(Symbols::byteOffset(isolate)), &try_catch); if (try_catch.HasCaught()) return try_catch.ReThrow(); int32_t source_byteOffset = - convertToUint(source->Get(String::New("byteOffset")), &try_catch); + convertToUint(source->Get(Symbols::byteOffset(isolate)), &try_catch); if (try_catch.HasCaught()) return try_catch.ReThrow(); uint8_t* dest = byteOffset + offset * element_size + static_cast<uint8_t*>( @@ -718,10 +787,10 @@ Handle<Value> Shell::ArraySet(const Arguments& args) { } } else { // Need to copy element-wise to make the right conversions. - Handle<Object> buffer = self->Get(String::New("buffer"))->ToObject(); + Handle<Object> buffer = self->Get(Symbols::buffer(isolate))->ToObject(); if (try_catch.HasCaught()) return try_catch.ReThrow(); Handle<Object> source_buffer = - source->Get(String::New("buffer"))->ToObject(); + source->Get(Symbols::buffer(isolate))->ToObject(); if (try_catch.HasCaught()) return try_catch.ReThrow(); if (buffer->StrictEquals(source_buffer)) { @@ -729,10 +798,10 @@ Handle<Value> Shell::ArraySet(const Arguments& args) { // This gets a bit tricky in the case of different element sizes // (which, of course, is extremely unlikely to ever occur in practice). int32_t byteOffset = - convertToUint(self->Get(String::New("byteOffset")), &try_catch); + convertToUint(self->Get(Symbols::byteOffset(isolate)), &try_catch); if (try_catch.HasCaught()) return try_catch.ReThrow(); int32_t source_byteOffset = - convertToUint(source->Get(String::New("byteOffset")), &try_catch); + convertToUint(source->Get(Symbols::byteOffset(isolate)), &try_catch); if (try_catch.HasCaught()) return try_catch.ReThrow(); // Copy as much as we can from left to right. @@ -772,17 +841,19 @@ Handle<Value> Shell::ArraySet(const Arguments& args) { } } - return Undefined(); + return Undefined(args.GetIsolate()); } -void Shell::ExternalArrayWeakCallback(Persistent<Value> object, void* data) { - HandleScope scope; +void Shell::ExternalArrayWeakCallback(v8::Isolate* isolate, + Persistent<Value> object, + void* data) { + HandleScope scope(isolate); int32_t length = - object->ToObject()->Get(String::New("byteLength"))->Uint32Value(); - V8::AdjustAmountOfExternalAllocatedMemory(-length); + object->ToObject()->Get(Symbols::byteLength(isolate))->Uint32Value(); + isolate->AdjustAmountOfExternalAllocatedMemory(-length); delete[] static_cast<uint8_t*>(data); - object.Dispose(); + object.Dispose(isolate); } @@ -835,18 +906,16 @@ Handle<Value> Shell::Uint8ClampedArray(const Arguments& args) { Handle<Value> Shell::Yield(const Arguments& args) { - v8::Unlocker unlocker; - return Undefined(); + v8::Unlocker unlocker(args.GetIsolate()); + return Undefined(args.GetIsolate()); } Handle<Value> Shell::Quit(const Arguments& args) { int exit_code = args[0]->Int32Value(); -#ifndef V8_SHARED OnExit(); -#endif // V8_SHARED exit(exit_code); - return Undefined(); + return Undefined(args.GetIsolate()); } @@ -855,8 +924,8 @@ Handle<Value> Shell::Version(const Arguments& args) { } -void Shell::ReportException(v8::TryCatch* try_catch) { - HandleScope handle_scope; +void Shell::ReportException(Isolate* isolate, v8::TryCatch* try_catch) { + HandleScope handle_scope(isolate); #if !defined(V8_SHARED) && defined(ENABLE_DEBUGGER_SUPPORT) bool enter_context = !Context::InContext(); if (enter_context) utility_context_->Enter(); @@ -902,8 +971,10 @@ void Shell::ReportException(v8::TryCatch* try_catch) { #ifndef V8_SHARED -Handle<Array> Shell::GetCompletions(Handle<String> text, Handle<String> full) { - HandleScope handle_scope; +Handle<Array> Shell::GetCompletions(Isolate* isolate, + Handle<String> text, + Handle<String> full) { + HandleScope handle_scope(isolate); Context::Scope context_scope(utility_context_); Handle<Object> global = utility_context_->Global(); Handle<Value> fun = global->Get(String::New("GetCompletions")); @@ -1044,13 +1115,13 @@ void Shell::AddHistogramSample(void* histogram, int sample) { } -void Shell::InstallUtilityScript() { - Locker lock; - HandleScope scope; +void Shell::InstallUtilityScript(Isolate* isolate) { + Locker lock(isolate); + HandleScope scope(isolate); // If we use the utility context, we have to set the security tokens so that // utility, evaluation and debug context can all access each other. - utility_context_->SetSecurityToken(Undefined()); - evaluation_context_->SetSecurityToken(Undefined()); + utility_context_->SetSecurityToken(Undefined(isolate)); + evaluation_context_->SetSecurityToken(Undefined(isolate)); Context::Scope utility_scope(utility_context_); #ifdef ENABLE_DEBUGGER_SUPPORT @@ -1145,7 +1216,7 @@ Handle<FunctionTemplate> Shell::CreateArrayTemplate(InvocationCallback fun) { } -Handle<ObjectTemplate> Shell::CreateGlobalTemplate() { +Handle<ObjectTemplate> Shell::CreateGlobalTemplate(Isolate* isolate) { Handle<ObjectTemplate> global_template = ObjectTemplate::New(); global_template->Set(String::New("print"), FunctionTemplate::New(Print)); global_template->Set(String::New("write"), FunctionTemplate::New(Write)); @@ -1165,7 +1236,7 @@ Handle<ObjectTemplate> Shell::CreateGlobalTemplate() { // Bind the handlers for external arrays. PropertyAttribute attr = static_cast<PropertyAttribute>(ReadOnly | DontDelete); - global_template->Set(String::New("ArrayBuffer"), + global_template->Set(Symbols::ArrayBuffer(isolate), CreateArrayBufferTemplate(ArrayBuffer), attr); global_template->Set(String::New("Int8Array"), CreateArrayTemplate(Int8Array), attr); @@ -1186,12 +1257,6 @@ Handle<ObjectTemplate> Shell::CreateGlobalTemplate() { global_template->Set(String::New("Uint8ClampedArray"), CreateArrayTemplate(Uint8ClampedArray), attr); -#ifdef LIVE_OBJECT_LIST - global_template->Set(String::New("lol_is_enabled"), True()); -#else - global_template->Set(String::New("lol_is_enabled"), False()); -#endif - #if !defined(V8_SHARED) && !defined(_WIN32) && !defined(_WIN64) Handle<ObjectTemplate> os_templ = ObjectTemplate::New(); AddOSMethods(os_templ); @@ -1202,7 +1267,7 @@ Handle<ObjectTemplate> Shell::CreateGlobalTemplate() { } -void Shell::Initialize() { +void Shell::Initialize(Isolate* isolate) { #ifdef COMPRESS_STARTUP_DATA_BZ2 BZip2Decompressor startup_data_decompressor; int bz2_result = startup_data_decompressor.Decompress(); @@ -1223,12 +1288,15 @@ void Shell::Initialize() { V8::SetAddHistogramSampleFunction(AddHistogramSample); } #endif // V8_SHARED - if (options.test_shell) return; +} + +void Shell::InitializeDebugger(Isolate* isolate) { + if (options.test_shell) return; #ifndef V8_SHARED - Locker lock; - HandleScope scope; - Handle<ObjectTemplate> global_template = CreateGlobalTemplate(); + Locker lock(isolate); + HandleScope scope(isolate); + Handle<ObjectTemplate> global_template = CreateGlobalTemplate(isolate); utility_context_ = Context::New(NULL, global_template); #ifdef ENABLE_DEBUGGER_SUPPORT @@ -1242,13 +1310,13 @@ void Shell::Initialize() { } -Persistent<Context> Shell::CreateEvaluationContext() { +Persistent<Context> Shell::CreateEvaluationContext(Isolate* isolate) { #ifndef V8_SHARED // This needs to be a critical section since this is not thread-safe i::ScopedLock lock(context_mutex_); #endif // V8_SHARED // Initialize the global objects - Handle<ObjectTemplate> global_template = CreateGlobalTemplate(); + Handle<ObjectTemplate> global_template = CreateGlobalTemplate(isolate); Persistent<Context> context = Context::New(NULL, global_template); ASSERT(!context.IsEmpty()); Context::Scope scope(context); @@ -1291,10 +1359,13 @@ int CompareKeys(const void* a, const void* b) { return strcmp(static_cast<const CounterAndKey*>(a)->key, static_cast<const CounterAndKey*>(b)->key); } +#endif // V8_SHARED void Shell::OnExit() { - if (console != NULL) console->Close(); + LineEditor* line_editor = LineEditor::Get(); + if (line_editor) line_editor->Close(); +#ifndef V8_SHARED if (i::FLAG_dump_counters) { int number_of_counters = 0; for (CounterMap::Iterator i(counter_map_); i.More(); i.Next()) { @@ -1327,10 +1398,12 @@ void Shell::OnExit() { "-------------+\n"); delete [] counters; } + delete context_mutex_; delete counters_file_; delete counter_map_; -} #endif // V8_SHARED +} + static FILE* FOpen(const char* path, const char* mode) { @@ -1354,9 +1427,9 @@ static FILE* FOpen(const char* path, const char* mode) { } -static char* ReadChars(const char* name, int* size_out) { +static char* ReadChars(Isolate* isolate, const char* name, int* size_out) { // Release the V8 lock while reading files. - v8::Unlocker unlocker(Isolate::GetCurrent()); + v8::Unlocker unlocker(isolate); FILE* file = FOpen(name, "rb"); if (file == NULL) return NULL; @@ -1381,24 +1454,27 @@ Handle<Value> Shell::ReadBuffer(const Arguments& args) { String::Utf8Value filename(args[0]); int length; if (*filename == NULL) { - return ThrowException(String::New("Error loading file")); + return Throw("Error loading file"); } - uint8_t* data = reinterpret_cast<uint8_t*>(ReadChars(*filename, &length)); + uint8_t* data = reinterpret_cast<uint8_t*>( + ReadChars(args.GetIsolate(), *filename, &length)); if (data == NULL) { - return ThrowException(String::New("Error reading file")); + return Throw("Error reading file"); } + Isolate* isolate = args.GetIsolate(); Handle<Object> buffer = Object::New(); - buffer->SetHiddenValue(String::New(kArrayBufferMarkerPropName), True()); - Persistent<Object> persistent_buffer = Persistent<Object>::New(buffer); - persistent_buffer.MakeWeak(data, ExternalArrayWeakCallback); - persistent_buffer.MarkIndependent(); - V8::AdjustAmountOfExternalAllocatedMemory(length); + buffer->SetHiddenValue(Symbols::ArrayBufferMarkerPropName(isolate), True()); + Persistent<Object> persistent_buffer = + Persistent<Object>::New(isolate, buffer); + persistent_buffer.MakeWeak(isolate, data, ExternalArrayWeakCallback); + persistent_buffer.MarkIndependent(isolate); + isolate->AdjustAmountOfExternalAllocatedMemory(length); buffer->SetIndexedPropertiesToExternalArrayData( data, kExternalUnsignedByteArray, length); - buffer->Set(String::New("byteLength"), - Int32::New(static_cast<int32_t>(length)), ReadOnly); + buffer->Set(Symbols::byteLength(isolate), + Int32::New(static_cast<int32_t>(length), isolate), ReadOnly); return buffer; } @@ -1427,29 +1503,29 @@ static char* ReadWord(char* data) { // Reads a file into a v8 string. -Handle<String> Shell::ReadFile(const char* name) { +Handle<String> Shell::ReadFile(Isolate* isolate, const char* name) { int size = 0; - char* chars = ReadChars(name, &size); + char* chars = ReadChars(isolate, name, &size); if (chars == NULL) return Handle<String>(); - Handle<String> result = String::New(chars); + Handle<String> result = String::New(chars, size); delete[] chars; return result; } -void Shell::RunShell() { - Locker locker; +void Shell::RunShell(Isolate* isolate) { + Locker locker(isolate); Context::Scope context_scope(evaluation_context_); - HandleScope outer_scope; + HandleScope outer_scope(isolate); Handle<String> name = String::New("(d8)"); - console = LineEditor::Get(); + LineEditor* console = LineEditor::Get(); printf("V8 version %s [console: %s]\n", V8::GetVersion(), console->name()); - console->Open(); + console->Open(isolate); while (true) { - HandleScope inner_scope; + HandleScope inner_scope(isolate); Handle<String> input = console->Prompt(Shell::kPrompt); if (input.IsEmpty()) break; - ExecuteString(input, name, true, true); + ExecuteString(isolate, input, name, true, true); } printf("\n"); } @@ -1459,9 +1535,9 @@ void Shell::RunShell() { class ShellThread : public i::Thread { public: // Takes ownership of the underlying char array of |files|. - ShellThread(int no, char* files) + ShellThread(Isolate* isolate, char* files) : Thread("d8:ShellThread"), - no_(no), files_(files) { } + isolate_(isolate), files_(files) { } ~ShellThread() { delete[] files_; @@ -1469,7 +1545,7 @@ class ShellThread : public i::Thread { virtual void Run(); private: - int no_; + Isolate* isolate_; char* files_; }; @@ -1487,13 +1563,14 @@ void ShellThread::Run() { } // Prepare the context for this thread. - Locker locker; - HandleScope outer_scope; - Persistent<Context> thread_context = Shell::CreateEvaluationContext(); + Locker locker(isolate_); + HandleScope outer_scope(isolate_); + Persistent<Context> thread_context = + Shell::CreateEvaluationContext(isolate_); Context::Scope context_scope(thread_context); while ((ptr != NULL) && (*ptr != '\0')) { - HandleScope inner_scope; + HandleScope inner_scope(isolate_); char* filename = ptr; ptr = ReadWord(ptr); @@ -1502,16 +1579,16 @@ void ShellThread::Run() { continue; } - Handle<String> str = Shell::ReadFile(filename); + Handle<String> str = Shell::ReadFile(isolate_, filename); if (str.IsEmpty()) { printf("File '%s' not found\n", filename); Shell::Exit(1); } - Shell::ExecuteString(str, String::New(filename), false, false); + Shell::ExecuteString(isolate_, str, String::New(filename), false, false); } - thread_context.Dispose(); + thread_context.Dispose(thread_context->GetIsolate()); ptr = next_line; } } @@ -1530,15 +1607,15 @@ SourceGroup::~SourceGroup() { } -void SourceGroup::Execute() { +void SourceGroup::Execute(Isolate* isolate) { for (int i = begin_offset_; i < end_offset_; ++i) { const char* arg = argv_[i]; if (strcmp(arg, "-e") == 0 && i + 1 < end_offset_) { // Execute argument given to -e option directly. - HandleScope handle_scope; + HandleScope handle_scope(isolate); Handle<String> file_name = String::New("unnamed"); Handle<String> source = String::New(argv_[i + 1]); - if (!Shell::ExecuteString(source, file_name, false, true)) { + if (!Shell::ExecuteString(isolate, source, file_name, false, true)) { Shell::Exit(1); } ++i; @@ -1546,14 +1623,14 @@ void SourceGroup::Execute() { // Ignore other options. They have been parsed already. } else { // Use all other arguments as names of files to load and run. - HandleScope handle_scope; + HandleScope handle_scope(isolate); Handle<String> file_name = String::New(arg); - Handle<String> source = ReadFile(arg); + Handle<String> source = ReadFile(isolate, arg); if (source.IsEmpty()) { printf("Error reading '%s'\n", arg); Shell::Exit(1); } - if (!Shell::ExecuteString(source, file_name, false, true)) { + if (!Shell::ExecuteString(isolate, source, file_name, false, true)) { Shell::Exit(1); } } @@ -1561,9 +1638,9 @@ void SourceGroup::Execute() { } -Handle<String> SourceGroup::ReadFile(const char* name) { +Handle<String> SourceGroup::ReadFile(Isolate* isolate, const char* name) { int size; - char* chars = ReadChars(name, &size); + char* chars = ReadChars(isolate, name, &size); if (chars == NULL) return Handle<String>(); Handle<String> result = String::New(chars, size); delete[] chars; @@ -1588,13 +1665,14 @@ void SourceGroup::ExecuteInThread() { { Isolate::Scope iscope(isolate); Locker lock(isolate); - HandleScope scope; - Persistent<Context> context = Shell::CreateEvaluationContext(); + HandleScope scope(isolate); + Symbols symbols(isolate); + Persistent<Context> context = Shell::CreateEvaluationContext(isolate); { Context::Scope cscope(context); - Execute(); + Execute(isolate); } - context.Dispose(); + context.Dispose(isolate); if (Shell::options.send_idle_notification) { const int kLongIdlePauseInMs = 1000; V8::ContextDisposedNotification(); @@ -1760,21 +1838,21 @@ bool Shell::SetOptions(int argc, char* argv[]) { } -int Shell::RunMain(int argc, char* argv[]) { +int Shell::RunMain(Isolate* isolate, int argc, char* argv[]) { #ifndef V8_SHARED i::List<i::Thread*> threads(1); if (options.parallel_files != NULL) { for (int i = 0; i < options.num_parallel_files; i++) { char* files = NULL; - { Locker lock(Isolate::GetCurrent()); + { Locker lock(isolate); int size = 0; - files = ReadChars(options.parallel_files[i], &size); + files = ReadChars(isolate, options.parallel_files[i], &size); } if (files == NULL) { printf("File list '%s' not found\n", options.parallel_files[i]); Exit(1); } - ShellThread* thread = new ShellThread(threads.length(), files); + ShellThread* thread = new ShellThread(isolate, files); thread->Start(); threads.Add(thread); } @@ -1784,9 +1862,9 @@ int Shell::RunMain(int argc, char* argv[]) { } #endif // V8_SHARED { // NOLINT - Locker lock; - HandleScope scope; - Persistent<Context> context = CreateEvaluationContext(); + Locker lock(isolate); + HandleScope scope(isolate); + Persistent<Context> context = CreateEvaluationContext(isolate); if (options.last_run) { // Keep using the same context in the interactive shell. evaluation_context_ = context; @@ -1794,16 +1872,16 @@ int Shell::RunMain(int argc, char* argv[]) { // If the interactive debugger is enabled make sure to activate // it before running the files passed on the command line. if (i::FLAG_debugger) { - InstallUtilityScript(); + InstallUtilityScript(isolate); } #endif // !V8_SHARED && ENABLE_DEBUGGER_SUPPORT } { Context::Scope cscope(context); - options.isolate_sources[0].Execute(); + options.isolate_sources[0].Execute(isolate); } if (!options.last_run) { - context.Dispose(); + context.Dispose(isolate); if (options.send_idle_notification) { const int kLongIdlePauseInMs = 1000; V8::ContextDisposedNotification(); @@ -1832,7 +1910,7 @@ int Shell::RunMain(int argc, char* argv[]) { } if (threads.length() > 0 && options.use_preemption) { - Locker lock; + Locker lock(isolate); Locker::StopPreemption(); } #endif // V8_SHARED @@ -1842,64 +1920,66 @@ int Shell::RunMain(int argc, char* argv[]) { int Shell::Main(int argc, char* argv[]) { if (!SetOptions(argc, argv)) return 1; - Initialize(); - int result = 0; - if (options.stress_opt || options.stress_deopt) { - Testing::SetStressRunType( - options.stress_opt ? Testing::kStressTypeOpt - : Testing::kStressTypeDeopt); - int stress_runs = Testing::GetStressRuns(); - for (int i = 0; i < stress_runs && result == 0; i++) { - printf("============ Stress %d/%d ============\n", i + 1, stress_runs); - Testing::PrepareStressRun(i); - options.last_run = (i == stress_runs - 1); - result = RunMain(argc, argv); - } - printf("======== Full Deoptimization =======\n"); - Testing::DeoptimizeAll(); + Isolate* isolate = Isolate::GetCurrent(); + DumbLineEditor dumb_line_editor(isolate); + { + Initialize(isolate); + Symbols symbols(isolate); + InitializeDebugger(isolate); + + if (options.stress_opt || options.stress_deopt) { + Testing::SetStressRunType(options.stress_opt + ? Testing::kStressTypeOpt + : Testing::kStressTypeDeopt); + int stress_runs = Testing::GetStressRuns(); + for (int i = 0; i < stress_runs && result == 0; i++) { + printf("============ Stress %d/%d ============\n", i + 1, stress_runs); + Testing::PrepareStressRun(i); + options.last_run = (i == stress_runs - 1); + result = RunMain(isolate, argc, argv); + } + printf("======== Full Deoptimization =======\n"); + Testing::DeoptimizeAll(); #if !defined(V8_SHARED) - } else if (i::FLAG_stress_runs > 0) { - int stress_runs = i::FLAG_stress_runs; - for (int i = 0; i < stress_runs && result == 0; i++) { - printf("============ Run %d/%d ============\n", i + 1, stress_runs); - options.last_run = (i == stress_runs - 1); - result = RunMain(argc, argv); - } + } else if (i::FLAG_stress_runs > 0) { + int stress_runs = i::FLAG_stress_runs; + for (int i = 0; i < stress_runs && result == 0; i++) { + printf("============ Run %d/%d ============\n", i + 1, stress_runs); + options.last_run = (i == stress_runs - 1); + result = RunMain(isolate, argc, argv); + } #endif - } else { - result = RunMain(argc, argv); - } + } else { + result = RunMain(isolate, argc, argv); + } #if !defined(V8_SHARED) && defined(ENABLE_DEBUGGER_SUPPORT) - // Run remote debugger if requested, but never on --test - if (i::FLAG_remote_debugger && !options.test_shell) { - InstallUtilityScript(); - RunRemoteDebugger(i::FLAG_debugger_port); - return 0; - } + // Run remote debugger if requested, but never on --test + if (i::FLAG_remote_debugger && !options.test_shell) { + InstallUtilityScript(isolate); + RunRemoteDebugger(isolate, i::FLAG_debugger_port); + return 0; + } #endif // !V8_SHARED && ENABLE_DEBUGGER_SUPPORT - // Run interactive shell if explicitly requested or if no script has been - // executed, but never on --test + // Run interactive shell if explicitly requested or if no script has been + // executed, but never on --test - if (( options.interactive_shell - || !options.script_executed ) - && !options.test_shell ) { + if (( options.interactive_shell || !options.script_executed ) + && !options.test_shell ) { #if !defined(V8_SHARED) && defined(ENABLE_DEBUGGER_SUPPORT) - if (!i::FLAG_debugger) { - InstallUtilityScript(); - } + if (!i::FLAG_debugger) { + InstallUtilityScript(isolate); + } #endif // !V8_SHARED && ENABLE_DEBUGGER_SUPPORT - RunShell(); + RunShell(isolate); + } } - V8::Dispose(); -#ifndef V8_SHARED OnExit(); -#endif // V8_SHARED return result; } diff --git a/deps/v8/src/d8.gyp b/deps/v8/src/d8.gyp index a8361e6b4e..cce8f2a1fb 100644 --- a/deps/v8/src/d8.gyp +++ b/deps/v8/src/d8.gyp @@ -45,6 +45,10 @@ 'd8.cc', ], 'conditions': [ + [ 'console=="readline"', { + 'libraries': [ '-lreadline', ], + 'sources': [ 'd8-readline.cc' ], + }], [ 'component!="shared_library"', { 'sources': [ 'd8-debug.cc', '<(SHARED_INTERMEDIATE_DIR)/d8-js.cc', ], 'conditions': [ @@ -57,10 +61,6 @@ 'd8_js2c', ], }], - [ 'console=="readline"', { - 'libraries': [ '-lreadline', ], - 'sources': [ 'd8-readline.cc' ], - }], ['(OS=="linux" or OS=="mac" or OS=="freebsd" or OS=="netbsd" \ or OS=="openbsd" or OS=="solaris" or OS=="android")', { 'sources': [ 'd8-posix.cc', ] diff --git a/deps/v8/src/d8.h b/deps/v8/src/d8.h index a62a81fd9c..621ac74095 100644 --- a/deps/v8/src/d8.h +++ b/deps/v8/src/d8.h @@ -123,17 +123,16 @@ class LineEditor { virtual ~LineEditor() { } virtual Handle<String> Prompt(const char* prompt) = 0; - virtual bool Open() { return true; } + virtual bool Open(Isolate* isolate) { return true; } virtual bool Close() { return true; } virtual void AddHistory(const char* str) { } const char* name() { return name_; } - static LineEditor* Get(); + static LineEditor* Get() { return current_; } private: Type type_; const char* name_; - LineEditor* next_; - static LineEditor* first_; + static LineEditor* current_; }; @@ -158,7 +157,7 @@ class SourceGroup { void End(int offset) { end_offset_ = offset; } - void Execute(); + void Execute(Isolate* isolate); #ifndef V8_SHARED void StartExecuteInThread(); @@ -187,7 +186,7 @@ class SourceGroup { #endif // V8_SHARED void ExitShell(int exit_code); - Handle<String> ReadFile(const char* name); + Handle<String> ReadFile(Isolate* isolate, const char* name); const char** argv_; int begin_offset_; @@ -266,22 +265,24 @@ class Shell : public i::AllStatic { #endif // V8_SHARED public: - static bool ExecuteString(Handle<String> source, + static bool ExecuteString(Isolate* isolate, + Handle<String> source, Handle<Value> name, bool print_result, bool report_exceptions); static const char* ToCString(const v8::String::Utf8Value& value); - static void ReportException(TryCatch* try_catch); - static Handle<String> ReadFile(const char* name); - static Persistent<Context> CreateEvaluationContext(); - static int RunMain(int argc, char* argv[]); + static void ReportException(Isolate* isolate, TryCatch* try_catch); + static Handle<String> ReadFile(Isolate* isolate, const char* name); + static Persistent<Context> CreateEvaluationContext(Isolate* isolate); + static int RunMain(Isolate* isolate, int argc, char* argv[]); static int Main(int argc, char* argv[]); static void Exit(int exit_code); + static void OnExit(); #ifndef V8_SHARED - static Handle<Array> GetCompletions(Handle<String> text, + static Handle<Array> GetCompletions(Isolate* isolate, + Handle<String> text, Handle<String> full); - static void OnExit(); static int* LookupCounter(const char* name); static void* CreateHistogram(const char* name, int min, @@ -310,9 +311,9 @@ class Shell : public i::AllStatic { static Handle<Value> DisableProfiler(const Arguments& args); static Handle<Value> Read(const Arguments& args); static Handle<Value> ReadBuffer(const Arguments& args); - static Handle<String> ReadFromStdin(); + static Handle<String> ReadFromStdin(Isolate* isolate); static Handle<Value> ReadLine(const Arguments& args) { - return ReadFromStdin(); + return ReadFromStdin(args.GetIsolate()); } static Handle<Value> Load(const Arguments& args); static Handle<Value> ArrayBuffer(const Arguments& args); @@ -365,7 +366,6 @@ class Shell : public i::AllStatic { static void AddOSMethods(Handle<ObjectTemplate> os_template); - static LineEditor* console; static const char* kPrompt; static ShellOptions options; @@ -382,17 +382,20 @@ class Shell : public i::AllStatic { static i::Mutex* context_mutex_; static Counter* GetCounter(const char* name, bool is_histogram); - static void InstallUtilityScript(); + static void InstallUtilityScript(Isolate* isolate); #endif // V8_SHARED - static void Initialize(); - static void RunShell(); + static void Initialize(Isolate* isolate); + static void InitializeDebugger(Isolate* isolate); + static void RunShell(Isolate* isolate); static bool SetOptions(int argc, char* argv[]); - static Handle<ObjectTemplate> CreateGlobalTemplate(); + static Handle<ObjectTemplate> CreateGlobalTemplate(Isolate* isolate); static Handle<FunctionTemplate> CreateArrayBufferTemplate(InvocationCallback); static Handle<FunctionTemplate> CreateArrayTemplate(InvocationCallback); - static Handle<Value> CreateExternalArrayBuffer(Handle<Object> buffer, + static Handle<Value> CreateExternalArrayBuffer(Isolate* isolate, + Handle<Object> buffer, int32_t size); - static Handle<Object> CreateExternalArray(Handle<Object> array, + static Handle<Object> CreateExternalArray(Isolate* isolate, + Handle<Object> array, Handle<Object> buffer, ExternalArrayType type, int32_t length, @@ -402,7 +405,9 @@ class Shell : public i::AllStatic { static Handle<Value> CreateExternalArray(const Arguments& args, ExternalArrayType type, int32_t element_size); - static void ExternalArrayWeakCallback(Persistent<Value> object, void* data); + static void ExternalArrayWeakCallback(Isolate* isolate, + Persistent<Value> object, + void* data); }; diff --git a/deps/v8/src/d8.js b/deps/v8/src/d8.js index 819135add4..1ff0ce8980 100644 --- a/deps/v8/src/d8.js +++ b/deps/v8/src/d8.js @@ -71,7 +71,7 @@ function GetCompletions(global, last, full) { result.push(name); } } - current = ToInspectableObject(current.__proto__); + current = ToInspectableObject(Object.getPrototypeOf(current)); } return result; } @@ -123,10 +123,6 @@ Debug.State = { var trace_compile = false; // Tracing all compile events? var trace_debug_json = false; // Tracing all debug json packets? var last_cmd = ''; -//var lol_is_enabled; // Set to true in d8.cc if LIVE_OBJECT_LIST is defined. -var lol_next_dump_index = 0; -var kDefaultLolLinesToPrintAtATime = 10; -var kMaxLolLinesToPrintAtATime = 1000; var repeat_cmd_line = ''; var is_running = true; // Global variable used to store whether a handle was requested. @@ -507,13 +503,6 @@ function DebugRequest(cmd_line) { this.request_ = void 0; break; - case 'liveobjectlist': - case 'lol': - if (lol_is_enabled) { - this.request_ = this.lolToJSONRequest_(args, is_repeating); - break; - } - default: throw new Error('Unknown command "' + cmd + '"'); } @@ -558,53 +547,10 @@ DebugRequest.prototype.createRequest = function(command) { }; -// Note: we use detected command repetition as a signal for continuation here. -DebugRequest.prototype.createLOLRequest = function(command, - start_index, - lines_to_dump, - is_continuation) { - if (is_continuation) { - start_index = lol_next_dump_index; - } - - if (lines_to_dump) { - lines_to_dump = parseInt(lines_to_dump); - } else { - lines_to_dump = kDefaultLolLinesToPrintAtATime; - } - if (lines_to_dump > kMaxLolLinesToPrintAtATime) { - lines_to_dump = kMaxLolLinesToPrintAtATime; - } - - // Save the next start_index to dump from: - lol_next_dump_index = start_index + lines_to_dump; - - var request = this.createRequest(command); - request.arguments = {}; - request.arguments.start = start_index; - request.arguments.count = lines_to_dump; - - return request; -}; - - // Create a JSON request for the evaluation command. DebugRequest.prototype.makeEvaluateJSONRequest_ = function(expression) { lookup_handle = null; - if (lol_is_enabled) { - // Check if the expression is a obj id in the form @<obj id>. - var obj_id_match = expression.match(/^@([0-9]+)$/); - if (obj_id_match) { - var obj_id = parseInt(obj_id_match[1]); - // Build a dump request. - var request = this.createRequest('getobj'); - request.arguments = {}; - request.arguments.obj_id = obj_id; - return request.toJSONProtocol(); - } - } - // Check if the expression is a handle id in the form #<handle>#. var handle_match = expression.match(/^#([0-9]*)#$/); if (handle_match) { @@ -1170,10 +1116,6 @@ DebugRequest.prototype.infoCommandToJSONRequest_ = function(args) { // Build a evaluate request from the text command. request = this.createRequest('frame'); last_cmd = 'info args'; - } else if (lol_is_enabled && - args && (args == 'liveobjectlist' || args == 'lol')) { - // Build a evaluate request from the text command. - return this.liveObjectListToJSONRequest_(null); } else { throw new Error('Invalid info arguments.'); } @@ -1224,262 +1166,6 @@ DebugRequest.prototype.gcToJSONRequest_ = function(args) { }; -// Args: [v[erbose]] [<N>] [i[ndex] <i>] [t[ype] <type>] [sp[ace] <space>] -DebugRequest.prototype.lolMakeListRequest = - function(cmd, args, first_arg_index, is_repeating) { - - var request; - var start_index = 0; - var dump_limit = void 0; - var type_filter = void 0; - var space_filter = void 0; - var prop_filter = void 0; - var is_verbose = false; - var i; - - for (i = first_arg_index; i < args.length; i++) { - var arg = args[i]; - // Check for [v[erbose]]: - if (arg === 'verbose' || arg === 'v') { - // Nothing to do. This is already implied by args.length > 3. - is_verbose = true; - - // Check for [<N>]: - } else if (arg.match(/^[0-9]+$/)) { - dump_limit = arg; - is_verbose = true; - - // Check for i[ndex] <i>: - } else if (arg === 'index' || arg === 'i') { - i++; - if (args.length < i) { - throw new Error('Missing index after ' + arg + '.'); - } - start_index = parseInt(args[i]); - // The user input start index starts at 1: - if (start_index <= 0) { - throw new Error('Invalid index ' + args[i] + '.'); - } - start_index -= 1; - is_verbose = true; - - // Check for t[ype] <type>: - } else if (arg === 'type' || arg === 't') { - i++; - if (args.length < i) { - throw new Error('Missing type after ' + arg + '.'); - } - type_filter = args[i]; - - // Check for space <heap space name>: - } else if (arg === 'space' || arg === 'sp') { - i++; - if (args.length < i) { - throw new Error('Missing space name after ' + arg + '.'); - } - space_filter = args[i]; - - // Check for property <prop name>: - } else if (arg === 'property' || arg === 'prop') { - i++; - if (args.length < i) { - throw new Error('Missing property name after ' + arg + '.'); - } - prop_filter = args[i]; - - } else { - throw new Error('Unknown args at ' + arg + '.'); - } - } - - // Build the verbose request: - if (is_verbose) { - request = this.createLOLRequest('lol-'+cmd, - start_index, - dump_limit, - is_repeating); - request.arguments.verbose = true; - } else { - request = this.createRequest('lol-'+cmd); - request.arguments = {}; - } - - request.arguments.filter = {}; - if (type_filter) { - request.arguments.filter.type = type_filter; - } - if (space_filter) { - request.arguments.filter.space = space_filter; - } - if (prop_filter) { - request.arguments.filter.prop = prop_filter; - } - - return request; -}; - - -function extractObjId(args) { - var id = args; - id = id.match(/^@([0-9]+)$/); - if (id) { - id = id[1]; - } else { - throw new Error('Invalid obj id ' + args + '.'); - } - return parseInt(id); -} - - -DebugRequest.prototype.lolToJSONRequest_ = function(args, is_repeating) { - var request; - // Use default command if one is not specified: - if (!args) { - args = 'info'; - } - - var orig_args = args; - var first_arg_index; - - var arg, i; - var args = args.split(/\s+/g); - var cmd = args[0]; - var id; - - // Command: <id> [v[erbose]] ... - if (cmd.match(/^[0-9]+$/)) { - // Convert to the padded list command: - // Command: l[ist] <dummy> <id> [v[erbose]] ... - - // Insert the implicit 'list' in front and process as normal: - cmd = 'list'; - args.unshift(cmd); - } - - switch(cmd) { - // Command: c[apture] - case 'capture': - case 'c': - request = this.createRequest('lol-capture'); - break; - - // Command: clear|d[elete] <id>|all - case 'clear': - case 'delete': - case 'del': { - if (args.length < 2) { - throw new Error('Missing argument after ' + cmd + '.'); - } else if (args.length > 2) { - throw new Error('Too many arguments after ' + cmd + '.'); - } - id = args[1]; - if (id.match(/^[0-9]+$/)) { - // Delete a specific lol record: - request = this.createRequest('lol-delete'); - request.arguments = {}; - request.arguments.id = parseInt(id); - } else if (id === 'all') { - // Delete all: - request = this.createRequest('lol-reset'); - } else { - throw new Error('Invalid argument after ' + cmd + '.'); - } - break; - } - - // Command: diff <id1> <id2> [<dump options>] - case 'diff': - first_arg_index = 3; - - // Command: list <dummy> <id> [<dump options>] - case 'list': - - // Command: ret[ainers] <obj id> [<dump options>] - case 'retainers': - case 'ret': - case 'retaining-paths': - case 'rp': { - if (cmd === 'ret') cmd = 'retainers'; - else if (cmd === 'rp') cmd = 'retaining-paths'; - - if (!first_arg_index) first_arg_index = 2; - - if (args.length < first_arg_index) { - throw new Error('Too few arguments after ' + cmd + '.'); - } - - var request_cmd = (cmd === 'list') ? 'diff':cmd; - request = this.lolMakeListRequest(request_cmd, - args, - first_arg_index, - is_repeating); - - if (cmd === 'diff') { - request.arguments.id1 = parseInt(args[1]); - request.arguments.id2 = parseInt(args[2]); - } else if (cmd == 'list') { - request.arguments.id1 = 0; - request.arguments.id2 = parseInt(args[1]); - } else { - request.arguments.id = extractObjId(args[1]); - } - break; - } - - // Command: getid - case 'getid': { - request = this.createRequest('lol-getid'); - request.arguments = {}; - request.arguments.address = args[1]; - break; - } - - // Command: inf[o] [<N>] - case 'info': - case 'inf': { - if (args.length > 2) { - throw new Error('Too many arguments after ' + cmd + '.'); - } - // Built the info request: - request = this.createLOLRequest('lol-info', 0, args[1], is_repeating); - break; - } - - // Command: path <obj id 1> <obj id 2> - case 'path': { - request = this.createRequest('lol-path'); - request.arguments = {}; - if (args.length > 2) { - request.arguments.id1 = extractObjId(args[1]); - request.arguments.id2 = extractObjId(args[2]); - } else { - request.arguments.id1 = 0; - request.arguments.id2 = extractObjId(args[1]); - } - break; - } - - // Command: print - case 'print': { - request = this.createRequest('lol-print'); - request.arguments = {}; - request.arguments.id = extractObjId(args[1]); - break; - } - - // Command: reset - case 'reset': { - request = this.createRequest('lol-reset'); - break; - } - - default: - throw new Error('Invalid arguments.'); - } - return request.toJSONProtocol(); -}; - - // Create a JSON request for the threads command. DebugRequest.prototype.threadsCommandToJSONRequest_ = function(args) { // Build a threads request from the text command. @@ -1545,7 +1231,6 @@ DebugRequest.prototype.helpCommand_ = function(args) { print('inf[o] br[eak] - prints info about breakpoints in use'); print('inf[o] ar[gs] - prints info about arguments of the current function'); print('inf[o] lo[cals] - prints info about locals in the current function'); - print('inf[o] liveobjectlist|lol - same as \'lol info\''); print(''); print('step [in | next | out| min [step count]]'); print('c[ontinue] - continue executing after a breakpoint'); @@ -1566,49 +1251,6 @@ DebugRequest.prototype.helpCommand_ = function(args) { print(''); print('gc - runs the garbage collector'); print(''); - - if (lol_is_enabled) { - print('liveobjectlist|lol <command> - live object list tracking.'); - print(' where <command> can be:'); - print(' c[apture] - captures a LOL list.'); - print(' clear|del[ete] <id>|all - clears LOL of id <id>.'); - print(' If \'all\' is unspecified instead, will clear all.'); - print(' diff <id1> <id2> [<dump options>]'); - print(' - prints the diff between LOLs id1 and id2.'); - print(' - also see <dump options> below.'); - print(' getid <address> - gets the obj id for the specified address if available.'); - print(' The address must be in hex form prefixed with 0x.'); - print(' inf[o] [<N>] - lists summary info of all LOL lists.'); - print(' If N is specified, will print N items at a time.'); - print(' [l[ist]] <id> [<dump options>]'); - print(' - prints the listing of objects in LOL id.'); - print(' - also see <dump options> below.'); - print(' reset - clears all LOL lists.'); - print(' ret[ainers] <id> [<dump options>]'); - print(' - prints the list of retainers of obj id.'); - print(' - also see <dump options> below.'); - print(' path <id1> <id2> - prints the retaining path from obj id1 to id2.'); - print(' If only one id is specified, will print the path from'); - print(' roots to the specified object if available.'); - print(' print <id> - prints the obj for the specified obj id if available.'); - print(''); - print(' <dump options> includes:'); - print(' [v[erbose]] - do verbose dump.'); - print(' [<N>] - dump N items at a time. Implies verbose dump.'); - print(' If unspecified, N will default to '+ - kDefaultLolLinesToPrintAtATime+'. Max N is '+ - kMaxLolLinesToPrintAtATime+'.'); - print(' [i[ndex] <i>] - start dump from index i. Implies verbose dump.'); - print(' [t[ype] <type>] - filter by type.'); - print(' [sp[ace] <space name>] - filter by heap space where <space name> is one of'); - print(' { cell, code, lo, map, new, old-data, old-pointer }.'); - print(''); - print(' If the verbose option, or an option that implies a verbose dump'); - print(' is specified, then a verbose dump will requested. Else, a summary dump'); - print(' will be requested.'); - print(''); - } - print('trace compile'); // hidden command: trace debug json - toggles tracing of debug json packets print(''); @@ -1709,237 +1351,6 @@ function refObjectToString_(protocolPackage, handle) { } -function decodeLolCaptureResponse(body) { - var result; - result = 'Captured live object list '+ body.id + - ': count '+ body.count + ' size ' + body.size; - return result; -} - - -function decodeLolDeleteResponse(body) { - var result; - result = 'Deleted live object list '+ body.id; - return result; -} - - -function digitsIn(value) { - var digits = 0; - if (value === 0) value = 1; - while (value >= 1) { - digits++; - value /= 10; - } - return digits; -} - - -function padding(value, max_digits) { - var padding_digits = max_digits - digitsIn(value); - var padding = ''; - while (padding_digits > 0) { - padding += ' '; - padding_digits--; - } - return padding; -} - - -function decodeLolInfoResponse(body) { - var result; - var lists = body.lists; - var length = lists.length; - var first_index = body.first_index + 1; - var has_more = ((first_index + length) <= body.count); - result = 'captured live object lists'; - if (has_more || (first_index != 1)) { - result += ' ['+ length +' of '+ body.count + - ': starting from '+ first_index +']'; - } - result += ':\n'; - var max_digits = digitsIn(body.count); - var last_count = 0; - var last_size = 0; - for (var i = 0; i < length; i++) { - var entry = lists[i]; - var count = entry.count; - var size = entry.size; - var index = first_index + i; - result += ' [' + padding(index, max_digits) + index + '] id '+ entry.id + - ': count '+ count; - if (last_count > 0) { - result += '(+' + (count - last_count) + ')'; - } - result += ' size '+ size; - if (last_size > 0) { - result += '(+' + (size - last_size) + ')'; - } - result += '\n'; - last_count = count; - last_size = size; - } - result += ' total: '+length+' lists\n'; - if (has_more) { - result += ' -- press <enter> for more --\n'; - } else { - repeat_cmd_line = ''; - } - if (length === 0) result += ' none\n'; - - return result; -} - - -function decodeLolListResponse(body, title) { - - var result; - var total_count = body.count; - var total_size = body.size; - var length; - var max_digits; - var i; - var entry; - var index; - - var max_count_digits = digitsIn(total_count); - var max_size_digits; - - var summary = body.summary; - if (summary) { - - var roots_count = 0; - var found_root = body.found_root || 0; - var found_weak_root = body.found_weak_root || 0; - - // Print the summary result: - result = 'summary of objects:\n'; - length = summary.length; - if (found_root !== 0) { - roots_count++; - } - if (found_weak_root !== 0) { - roots_count++; - } - max_digits = digitsIn(length + roots_count); - max_size_digits = digitsIn(total_size); - - index = 1; - if (found_root !== 0) { - result += ' [' + padding(index, max_digits) + index + '] ' + - ' count '+ 1 + padding(0, max_count_digits) + - ' '+ padding(0, max_size_digits+1) + - ' : <root>\n'; - index++; - } - if (found_weak_root !== 0) { - result += ' [' + padding(index, max_digits) + index + '] ' + - ' count '+ 1 + padding(0, max_count_digits) + - ' '+ padding(0, max_size_digits+1) + - ' : <weak root>\n'; - index++; - } - - for (i = 0; i < length; i++) { - entry = summary[i]; - var count = entry.count; - var size = entry.size; - result += ' [' + padding(index, max_digits) + index + '] ' + - ' count '+ count + padding(count, max_count_digits) + - ' size '+ size + padding(size, max_size_digits) + - ' : <' + entry.desc + '>\n'; - index++; - } - result += '\n total count: '+(total_count+roots_count)+'\n'; - if (body.size) { - result += ' total size: '+body.size+'\n'; - } - - } else { - // Print the full dump result: - var first_index = body.first_index + 1; - var elements = body.elements; - length = elements.length; - var has_more = ((first_index + length) <= total_count); - result = title; - if (has_more || (first_index != 1)) { - result += ' ['+ length +' of '+ total_count + - ': starting from '+ first_index +']'; - } - result += ':\n'; - if (length === 0) result += ' none\n'; - max_digits = digitsIn(length); - - var max_id = 0; - var max_size = 0; - for (i = 0; i < length; i++) { - entry = elements[i]; - if (entry.id > max_id) max_id = entry.id; - if (entry.size > max_size) max_size = entry.size; - } - var max_id_digits = digitsIn(max_id); - max_size_digits = digitsIn(max_size); - - for (i = 0; i < length; i++) { - entry = elements[i]; - index = first_index + i; - result += ' ['+ padding(index, max_digits) + index +']'; - if (entry.id !== 0) { - result += ' @' + entry.id + padding(entry.id, max_id_digits) + - ': size ' + entry.size + ', ' + - padding(entry.size, max_size_digits) + entry.desc + '\n'; - } else { - // Must be a root or weak root: - result += ' ' + entry.desc + '\n'; - } - } - if (has_more) { - result += ' -- press <enter> for more --\n'; - } else { - repeat_cmd_line = ''; - } - if (length === 0) result += ' none\n'; - } - - return result; -} - - -function decodeLolDiffResponse(body) { - var title = 'objects'; - return decodeLolListResponse(body, title); -} - - -function decodeLolRetainersResponse(body) { - var title = 'retainers for @' + body.id; - return decodeLolListResponse(body, title); -} - - -function decodeLolPathResponse(body) { - return body.path; -} - - -function decodeLolResetResponse(body) { - return 'Reset all live object lists.'; -} - - -function decodeLolGetIdResponse(body) { - if (body.id == 0) { - return 'Address is invalid, or object has been moved or collected'; - } - return 'obj id is @' + body.id; -} - - -function decodeLolPrintResponse(body) { - return body.dump; -} - - // Rounds number 'num' to 'length' decimal places. function roundNumber(num, length) { var factor = Math.pow(10, length); @@ -2276,34 +1687,6 @@ function DebugResponseDetails(response) { } break; - case 'lol-capture': - details.text = decodeLolCaptureResponse(body); - break; - case 'lol-delete': - details.text = decodeLolDeleteResponse(body); - break; - case 'lol-diff': - details.text = decodeLolDiffResponse(body); - break; - case 'lol-getid': - details.text = decodeLolGetIdResponse(body); - break; - case 'lol-info': - details.text = decodeLolInfoResponse(body); - break; - case 'lol-print': - details.text = decodeLolPrintResponse(body); - break; - case 'lol-reset': - details.text = decodeLolResetResponse(body); - break; - case 'lol-retainers': - details.text = decodeLolRetainersResponse(body); - break; - case 'lol-path': - details.text = decodeLolPathResponse(body); - break; - default: details.text = 'Response for unknown command \'' + response.command() + '\'' + @@ -2811,3 +2194,59 @@ function SimpleArrayToJSON_(array) { json += ']'; return json; } + + +// A more universal stringify that supports more types than JSON. +// Used by the d8 shell to output results. +var stringifyDepthLimit = 4; // To avoid crashing on cyclic objects + +function Stringify(x, depth) { + if (depth === undefined) + depth = stringifyDepthLimit; + else if (depth === 0) + return "*"; + switch (typeof x) { + case "undefined": + return "undefined"; + case "boolean": + case "number": + case "function": + return x.toString(); + case "string": + return "\"" + x.toString() + "\""; + // TODO(rossberg): add symbol case + case "object": + if (x === null) return "null"; + if (x.constructor && x.constructor.name === "Array") { + var elems = []; + for (var i = 0; i < x.length; ++i) { + elems.push( + {}.hasOwnProperty.call(x, i) ? Stringify(x[i], depth - 1) : ""); + } + return "[" + elems.join(", ") + "]"; + } + try { + var string = String(x); + if (string && string !== "[object Object]") return string; + } catch(e) {} + var props = []; + for (var name in x) { + var desc = Object.getOwnPropertyDescriptor(x, name); + if (desc === void 0) continue; + if ("value" in desc) { + props.push(name + ": " + Stringify(desc.value, depth - 1)); + } + if ("get" in desc) { + var getter = desc.get.toString(); + props.push("get " + name + getter.slice(getter.indexOf('('))); + } + if ("set" in desc) { + var setter = desc.set.toString(); + props.push("set " + name + setter.slice(setter.indexOf('('))); + } + } + return "{" + props.join(", ") + "}"; + default: + return "[crazy non-standard shit]"; + } +} diff --git a/deps/v8/src/data-flow.h b/deps/v8/src/data-flow.h index 71f56e718b..7eeb794fa0 100644 --- a/deps/v8/src/data-flow.h +++ b/deps/v8/src/data-flow.h @@ -199,6 +199,61 @@ class BitVector: public ZoneObject { uint32_t* data_; }; +class GrowableBitVector BASE_EMBEDDED { + public: + class Iterator BASE_EMBEDDED { + public: + Iterator(const GrowableBitVector* target, Zone* zone) + : it_(target->bits_ == NULL + ? new(zone) BitVector(1, zone) + : target->bits_) { } + bool Done() const { return it_.Done(); } + void Advance() { it_.Advance(); } + int Current() const { return it_.Current(); } + private: + BitVector::Iterator it_; + }; + + GrowableBitVector() : bits_(NULL) { } + + bool Contains(int value) const { + if (!InBitsRange(value)) return false; + return bits_->Contains(value); + } + + void Add(int value, Zone* zone) { + EnsureCapacity(value, zone); + bits_->Add(value); + } + + void Union(const GrowableBitVector& other, Zone* zone) { + for (Iterator it(&other, zone); !it.Done(); it.Advance()) { + Add(it.Current(), zone); + } + } + + void Clear() { if (bits_ != NULL) bits_->Clear(); } + + private: + static const int kInitialLength = 1024; + + bool InBitsRange(int value) const { + return bits_ != NULL && bits_->length() > value; + } + + void EnsureCapacity(int value, Zone* zone) { + if (InBitsRange(value)) return; + int new_length = bits_ == NULL ? kInitialLength : bits_->length(); + while (new_length <= value) new_length *= 2; + BitVector* new_bits = new(zone) BitVector(new_length, zone); + if (bits_ != NULL) new_bits->CopyFrom(*bits_); + bits_ = new_bits; + } + + BitVector* bits_; +}; + + } } // namespace v8::internal diff --git a/deps/v8/src/date.js b/deps/v8/src/date.js index a54cb238c5..c75d12c651 100644 --- a/deps/v8/src/date.js +++ b/deps/v8/src/date.js @@ -107,7 +107,7 @@ function MakeDay(year, month, date) { } // Now we rely on year and month being SMIs. - return %DateMakeDay(year, month) + date - 1; + return %DateMakeDay(year | 0, month | 0) + date - 1; } diff --git a/deps/v8/src/debug-agent.cc b/deps/v8/src/debug-agent.cc index e856222775..811c00e0cc 100644 --- a/deps/v8/src/debug-agent.cc +++ b/deps/v8/src/debug-agent.cc @@ -192,21 +192,14 @@ void DebuggerAgentSession::Run() { } // Convert UTF-8 to UTF-16. - unibrow::Utf8InputBuffer<> buf(msg, StrLength(msg)); - int len = 0; - while (buf.has_more()) { - buf.GetNext(); - len++; - } - ScopedVector<int16_t> temp(len + 1); - buf.Reset(msg, StrLength(msg)); - for (int i = 0; i < len; i++) { - temp[i] = buf.GetNext(); - } + unibrow::Utf8Decoder<128> decoder(msg, StrLength(msg)); + int utf16_length = decoder.Utf16Length(); + ScopedVector<uint16_t> temp(utf16_length + 1); + decoder.WriteUtf16(temp.start(), utf16_length); // Send the request received to the debugger. - v8::Debug::SendCommand(reinterpret_cast<const uint16_t *>(temp.start()), - len, + v8::Debug::SendCommand(temp.start(), + utf16_length, NULL, reinterpret_cast<v8::Isolate*>(agent_->isolate())); diff --git a/deps/v8/src/debug-debugger.js b/deps/v8/src/debug-debugger.js index 163a0bd829..7787312ddc 100644 --- a/deps/v8/src/debug-debugger.js +++ b/deps/v8/src/debug-debugger.js @@ -110,7 +110,6 @@ var debugger_flags = { } }, }; -var lol_is_enabled = %HasLOLEnabled(); // Create a new break point object and add it to the list of break points. @@ -1306,9 +1305,12 @@ ProtocolMessage.prototype.setOption = function(name, value) { }; -ProtocolMessage.prototype.failed = function(message) { +ProtocolMessage.prototype.failed = function(message, opt_details) { this.success = false; this.message = message; + if (IS_OBJECT(opt_details)) { + this.error_details = opt_details; + } }; @@ -1355,6 +1357,9 @@ ProtocolMessage.prototype.toJSONProtocol = function() { if (this.message) { json.message = this.message; } + if (this.error_details) { + json.error_details = this.error_details; + } json.running = this.running; return JSON.stringify(json); }; @@ -1427,10 +1432,10 @@ DebugCommandProcessor.prototype.processDebugJSONRequest = function( this.scopesRequest_(request, response); } else if (request.command == 'scope') { this.scopeRequest_(request, response); + } else if (request.command == 'setVariableValue') { + this.setVariableValueRequest_(request, response); } else if (request.command == 'evaluate') { this.evaluateRequest_(request, response); - } else if (lol_is_enabled && request.command == 'getobj') { - this.getobjRequest_(request, response); } else if (request.command == 'lookup') { this.lookupRequest_(request, response); } else if (request.command == 'references') { @@ -1460,28 +1465,6 @@ DebugCommandProcessor.prototype.processDebugJSONRequest = function( } else if (request.command == 'gc') { this.gcRequest_(request, response); - // LiveObjectList tools: - } else if (lol_is_enabled && request.command == 'lol-capture') { - this.lolCaptureRequest_(request, response); - } else if (lol_is_enabled && request.command == 'lol-delete') { - this.lolDeleteRequest_(request, response); - } else if (lol_is_enabled && request.command == 'lol-diff') { - this.lolDiffRequest_(request, response); - } else if (lol_is_enabled && request.command == 'lol-getid') { - this.lolGetIdRequest_(request, response); - } else if (lol_is_enabled && request.command == 'lol-info') { - this.lolInfoRequest_(request, response); - } else if (lol_is_enabled && request.command == 'lol-reset') { - this.lolResetRequest_(request, response); - } else if (lol_is_enabled && request.command == 'lol-retainers') { - this.lolRetainersRequest_(request, response); - } else if (lol_is_enabled && request.command == 'lol-path') { - this.lolPathRequest_(request, response); - } else if (lol_is_enabled && request.command == 'lol-print') { - this.lolPrintRequest_(request, response); - } else if (lol_is_enabled && request.command == 'lol-stats') { - this.lolStatsRequest_(request, response); - } else { throw new Error('Unknown command "' + request.command + '" in request'); } @@ -1953,11 +1936,12 @@ DebugCommandProcessor.prototype.frameRequest_ = function(request, response) { }; -DebugCommandProcessor.prototype.frameForScopeRequest_ = function(request) { +DebugCommandProcessor.prototype.resolveFrameFromScopeDescription_ = + function(scope_description) { // Get the frame for which the scope or scopes are requested. // With no frameNumber argument use the currently selected frame. - if (request.arguments && !IS_UNDEFINED(request.arguments.frameNumber)) { - frame_index = request.arguments.frameNumber; + if (scope_description && !IS_UNDEFINED(scope_description.frameNumber)) { + frame_index = scope_description.frameNumber; if (frame_index < 0 || this.exec_state_.frameCount() <= frame_index) { throw new Error('Invalid frame number'); } @@ -1971,13 +1955,13 @@ DebugCommandProcessor.prototype.frameForScopeRequest_ = function(request) { // Gets scope host object from request. It is either a function // ('functionHandle' argument must be specified) or a stack frame // ('frameNumber' may be specified and the current frame is taken by default). -DebugCommandProcessor.prototype.scopeHolderForScopeRequest_ = - function(request) { - if (request.arguments && "functionHandle" in request.arguments) { - if (!IS_NUMBER(request.arguments.functionHandle)) { +DebugCommandProcessor.prototype.resolveScopeHolder_ = + function(scope_description) { + if (scope_description && "functionHandle" in scope_description) { + if (!IS_NUMBER(scope_description.functionHandle)) { throw new Error('Function handle must be a number'); } - var function_mirror = LookupMirror(request.arguments.functionHandle); + var function_mirror = LookupMirror(scope_description.functionHandle); if (!function_mirror) { throw new Error('Failed to find function object by handle'); } @@ -1992,14 +1976,14 @@ DebugCommandProcessor.prototype.scopeHolderForScopeRequest_ = } // Get the frame for which the scopes are requested. - var frame = this.frameForScopeRequest_(request); + var frame = this.resolveFrameFromScopeDescription_(scope_description); return frame; } } DebugCommandProcessor.prototype.scopesRequest_ = function(request, response) { - var scope_holder = this.scopeHolderForScopeRequest_(request); + var scope_holder = this.resolveScopeHolder_(request.arguments); // Fill all scopes for this frame or function. var total_scopes = scope_holder.scopeCount(); @@ -2018,7 +2002,7 @@ DebugCommandProcessor.prototype.scopesRequest_ = function(request, response) { DebugCommandProcessor.prototype.scopeRequest_ = function(request, response) { // Get the frame or function for which the scope is requested. - var scope_holder = this.scopeHolderForScopeRequest_(request); + var scope_holder = this.resolveScopeHolder_(request.arguments); // With no scope argument just return top scope. var scope_index = 0; @@ -2033,6 +2017,77 @@ DebugCommandProcessor.prototype.scopeRequest_ = function(request, response) { }; +// Reads value from protocol description. Description may be in form of type +// (for singletons), raw value (primitive types supported in JSON), +// string value description plus type (for primitive values) or handle id. +// Returns raw value or throws exception. +DebugCommandProcessor.resolveValue_ = function(value_description) { + if ("handle" in value_description) { + var value_mirror = LookupMirror(value_description.handle); + if (!value_mirror) { + throw new Error("Failed to resolve value by handle, ' #" + + mapping.handle + "# not found"); + } + return value_mirror.value(); + } else if ("stringDescription" in value_description) { + if (value_description.type == BOOLEAN_TYPE) { + return Boolean(value_description.stringDescription); + } else if (value_description.type == NUMBER_TYPE) { + return Number(value_description.stringDescription); + } if (value_description.type == STRING_TYPE) { + return String(value_description.stringDescription); + } else { + throw new Error("Unknown type"); + } + } else if ("value" in value_description) { + return value_description.value; + } else if (value_description.type == UNDEFINED_TYPE) { + return void 0; + } else if (value_description.type == NULL_TYPE) { + return null; + } else { + throw new Error("Failed to parse value description"); + } +}; + + +DebugCommandProcessor.prototype.setVariableValueRequest_ = + function(request, response) { + if (!request.arguments) { + response.failed('Missing arguments'); + return; + } + + if (IS_UNDEFINED(request.arguments.name)) { + response.failed('Missing variable name'); + } + var variable_name = request.arguments.name; + + var scope_description = request.arguments.scope; + + // Get the frame or function for which the scope is requested. + var scope_holder = this.resolveScopeHolder_(scope_description); + + if (IS_UNDEFINED(scope_description.number)) { + response.failed('Missing scope number'); + } + var scope_index = %ToNumber(scope_description.number); + + var scope = scope_holder.scope(scope_index); + + var new_value = + DebugCommandProcessor.resolveValue_(request.arguments.newValue); + + scope.setVariableValue(variable_name, new_value); + + var new_value_mirror = MakeMirror(new_value); + + response.body = { + newValue: new_value_mirror + }; +}; + + DebugCommandProcessor.prototype.evaluateRequest_ = function(request, response) { if (!request.arguments) { return response.failed('Missing arguments'); @@ -2063,16 +2118,14 @@ DebugCommandProcessor.prototype.evaluateRequest_ = function(request, response) { additional_context_object = {}; for (var i = 0; i < additional_context.length; i++) { var mapping = additional_context[i]; - if (!IS_STRING(mapping.name) || !IS_NUMBER(mapping.handle)) { + + if (!IS_STRING(mapping.name)) { return response.failed("Context element #" + i + - " must contain name:string and handle:number"); + " doesn't contain name:string property"); } - var context_value_mirror = LookupMirror(mapping.handle); - if (!context_value_mirror) { - return response.failed("Context object '" + mapping.name + - "' #" + mapping.handle + "# not found"); - } - additional_context_object[mapping.name] = context_value_mirror.value(); + + var raw_value = DebugCommandProcessor.resolveValue_(mapping); + additional_context_object[mapping.name] = raw_value; } } @@ -2113,24 +2166,6 @@ DebugCommandProcessor.prototype.evaluateRequest_ = function(request, response) { }; -DebugCommandProcessor.prototype.getobjRequest_ = function(request, response) { - if (!request.arguments) { - return response.failed('Missing arguments'); - } - - // Pull out arguments. - var obj_id = request.arguments.obj_id; - - // Check for legal arguments. - if (IS_UNDEFINED(obj_id)) { - return response.failed('Argument "obj_id" missing'); - } - - // Dump the object. - response.body = MakeMirror(%GetLOLObj(obj_id)); -}; - - DebugCommandProcessor.prototype.lookupRequest_ = function(request, response) { if (!request.arguments) { return response.failed('Missing arguments'); @@ -2387,8 +2422,17 @@ DebugCommandProcessor.prototype.changeLiveRequest_ = function( var new_source = request.arguments.new_source; - var result_description = Debug.LiveEdit.SetScriptSource(the_script, - new_source, preview_only, change_log); + var result_description; + try { + result_description = Debug.LiveEdit.SetScriptSource(the_script, + new_source, preview_only, change_log); + } catch (e) { + if (e instanceof Debug.LiveEdit.Failure && "details" in e) { + response.failed(e.message, e.details); + return; + } + throw e; + } response.body = {change_log: change_log, result: result_description}; if (!preview_only && !this.running_ && result_description.stack_modified) { @@ -2480,86 +2524,6 @@ DebugCommandProcessor.prototype.gcRequest_ = function(request, response) { }; -DebugCommandProcessor.prototype.lolCaptureRequest_ = - function(request, response) { - response.body = %CaptureLOL(); -}; - - -DebugCommandProcessor.prototype.lolDeleteRequest_ = - function(request, response) { - var id = request.arguments.id; - var result = %DeleteLOL(id); - if (result) { - response.body = { id: id }; - } else { - response.failed('Failed to delete: live object list ' + id + ' not found.'); - } -}; - - -DebugCommandProcessor.prototype.lolDiffRequest_ = function(request, response) { - var id1 = request.arguments.id1; - var id2 = request.arguments.id2; - var verbose = request.arguments.verbose; - var filter = request.arguments.filter; - if (verbose === true) { - var start = request.arguments.start; - var count = request.arguments.count; - response.body = %DumpLOL(id1, id2, start, count, filter); - } else { - response.body = %SummarizeLOL(id1, id2, filter); - } -}; - - -DebugCommandProcessor.prototype.lolGetIdRequest_ = function(request, response) { - var address = request.arguments.address; - response.body = {}; - response.body.id = %GetLOLObjId(address); -}; - - -DebugCommandProcessor.prototype.lolInfoRequest_ = function(request, response) { - var start = request.arguments.start; - var count = request.arguments.count; - response.body = %InfoLOL(start, count); -}; - - -DebugCommandProcessor.prototype.lolResetRequest_ = function(request, response) { - %ResetLOL(); -}; - - -DebugCommandProcessor.prototype.lolRetainersRequest_ = - function(request, response) { - var id = request.arguments.id; - var verbose = request.arguments.verbose; - var start = request.arguments.start; - var count = request.arguments.count; - var filter = request.arguments.filter; - - response.body = %GetLOLObjRetainers(id, Mirror.prototype, verbose, - start, count, filter); -}; - - -DebugCommandProcessor.prototype.lolPathRequest_ = function(request, response) { - var id1 = request.arguments.id1; - var id2 = request.arguments.id2; - response.body = {}; - response.body.path = %GetLOLPath(id1, id2, Mirror.prototype); -}; - - -DebugCommandProcessor.prototype.lolPrintRequest_ = function(request, response) { - var id = request.arguments.id; - response.body = {}; - response.body.dump = %PrintLOLObj(id); -}; - - // Check whether the previously processed command caused the VM to become // running. DebugCommandProcessor.prototype.isRunning = function() { @@ -2663,3 +2627,7 @@ function ValueToProtocolValue_(value, mirror_serializer) { } return json; } + +Debug.TestApi = { + CommandProcessorResolveValue: DebugCommandProcessor.resolveValue_ +}; diff --git a/deps/v8/src/debug.cc b/deps/v8/src/debug.cc index 48c5519f79..7a876e71fd 100644 --- a/deps/v8/src/debug.cc +++ b/deps/v8/src/debug.cc @@ -261,8 +261,12 @@ void BreakLocationIterator::Reset() { // Create relocation iterators for the two code objects. if (reloc_iterator_ != NULL) delete reloc_iterator_; if (reloc_iterator_original_ != NULL) delete reloc_iterator_original_; - reloc_iterator_ = new RelocIterator(debug_info_->code()); - reloc_iterator_original_ = new RelocIterator(debug_info_->original_code()); + reloc_iterator_ = new RelocIterator( + debug_info_->code(), + ~RelocInfo::ModeMask(RelocInfo::CODE_AGE_SEQUENCE)); + reloc_iterator_original_ = new RelocIterator( + debug_info_->original_code(), + ~RelocInfo::ModeMask(RelocInfo::CODE_AGE_SEQUENCE)); // Position at the first break point. break_point_ = -1; @@ -385,8 +389,8 @@ void BreakLocationIterator::ClearDebugBreak() { } -void BreakLocationIterator::PrepareStepIn() { - HandleScope scope; +void BreakLocationIterator::PrepareStepIn(Isolate* isolate) { + HandleScope scope(isolate); // Step in can only be prepared if currently positioned on an IC call, // construct call or CallFunction stub call. @@ -613,10 +617,10 @@ void ScriptCache::Add(Handle<Script> script) { Handle<Script> script_ = Handle<Script>::cast( (global_handles->Create(*script))); - global_handles->MakeWeak( - reinterpret_cast<Object**>(script_.location()), - this, - ScriptCache::HandleWeakScript); + global_handles->MakeWeak(reinterpret_cast<Object**>(script_.location()), + this, + NULL, + ScriptCache::HandleWeakScript); entry->value = script_.location(); } @@ -659,7 +663,9 @@ void ScriptCache::Clear() { } -void ScriptCache::HandleWeakScript(v8::Persistent<v8::Value> obj, void* data) { +void ScriptCache::HandleWeakScript(v8::Isolate* isolate, + v8::Persistent<v8::Value> obj, + void* data) { ScriptCache* script_cache = reinterpret_cast<ScriptCache*>(data); // Find the location of the global handle. Script** location = @@ -672,7 +678,7 @@ void ScriptCache::HandleWeakScript(v8::Persistent<v8::Value> obj, void* data) { script_cache->collected_scripts_.Add(id); // Clear the weak handle. - obj.Dispose(); + obj.Dispose(isolate); obj.Clear(); } @@ -692,8 +698,10 @@ void Debug::SetUp(bool create_heap_objects) { } -void Debug::HandleWeakDebugInfo(v8::Persistent<v8::Value> obj, void* data) { - Debug* debug = Isolate::Current()->debug(); +void Debug::HandleWeakDebugInfo(v8::Isolate* isolate, + v8::Persistent<v8::Value> obj, + void* data) { + Debug* debug = reinterpret_cast<Isolate*>(isolate)->debug(); DebugInfoListNode* node = reinterpret_cast<DebugInfoListNode*>(data); // We need to clear all breakpoints associated with the function to restore // original code and avoid patching the code twice later because @@ -717,10 +725,10 @@ DebugInfoListNode::DebugInfoListNode(DebugInfo* debug_info): next_(NULL) { // Globalize the request debug info object and make it weak. debug_info_ = Handle<DebugInfo>::cast( (global_handles->Create(debug_info))); - global_handles->MakeWeak( - reinterpret_cast<Object**>(debug_info_.location()), - this, - Debug::HandleWeakDebugInfo); + global_handles->MakeWeak(reinterpret_cast<Object**>(debug_info_.location()), + this, + NULL, + Debug::HandleWeakDebugInfo); } @@ -770,8 +778,11 @@ bool Debug::CompileDebuggerScript(int index) { factory->NewFunctionFromSharedFunctionInfo(function_info, context); Handle<Object> exception = - Execution::TryCall(function, Handle<Object>(context->global_object()), - 0, NULL, &caught_exception); + Execution::TryCall(function, + Handle<Object>(context->global_object(), isolate), + 0, + NULL, + &caught_exception); // Check for caught exceptions. if (caught_exception) { @@ -782,9 +793,11 @@ bool Debug::CompileDebuggerScript(int index) { "error_loading_debugger", &computed_location, Vector<Handle<Object> >::empty(), Handle<String>(), Handle<JSArray>()); ASSERT(!isolate->has_pending_exception()); - isolate->set_pending_exception(*exception); - MessageHandler::ReportMessage(Isolate::Current(), NULL, message); - isolate->clear_pending_exception(); + if (!exception.is_null()) { + isolate->set_pending_exception(*exception); + MessageHandler::ReportMessage(Isolate::Current(), NULL, message); + isolate->clear_pending_exception(); + } return false; } @@ -817,7 +830,6 @@ bool Debug::Load() { HandleScope scope(isolate_); Handle<Context> context = isolate_->bootstrapper()->CreateEnvironment( - isolate_, Handle<Object>::null(), v8::Handle<ObjectTemplate>(), NULL); @@ -830,12 +842,16 @@ bool Debug::Load() { isolate_->set_context(*context); // Expose the builtins object in the debugger context. - Handle<String> key = isolate_->factory()->LookupAsciiSymbol("builtins"); + Handle<String> key = isolate_->factory()->InternalizeOneByteString( + STATIC_ASCII_VECTOR("builtins")); Handle<GlobalObject> global = Handle<GlobalObject>(context->global_object()); RETURN_IF_EMPTY_HANDLE_VALUE( isolate_, - JSReceiver::SetProperty(global, key, Handle<Object>(global->builtins()), - NONE, kNonStrictMode), + JSReceiver::SetProperty(global, + key, + Handle<Object>(global->builtins(), isolate_), + NONE, + kNonStrictMode), false); // Compile the JavaScript for the debugger in the debugger context. @@ -940,10 +956,10 @@ Object* Debug::Break(Arguments args) { // If there is one or more real break points check whether any of these are // triggered. - Handle<Object> break_points_hit(heap->undefined_value()); + Handle<Object> break_points_hit(heap->undefined_value(), isolate_); if (break_location_iterator.HasBreakPoint()) { Handle<Object> break_point_objects = - Handle<Object>(break_location_iterator.BreakPointObjects()); + Handle<Object>(break_location_iterator.BreakPointObjects(), isolate_); break_points_hit = CheckBreakPoints(break_point_objects); } @@ -1061,7 +1077,7 @@ Handle<Object> Debug::CheckBreakPoints(Handle<Object> break_point_objects) { Handle<FixedArray> array(FixedArray::cast(*break_point_objects)); break_points_hit = factory->NewFixedArray(array->length()); for (int i = 0; i < array->length(); i++) { - Handle<Object> o(array->get(i)); + Handle<Object> o(array->get(i), isolate_); if (CheckBreakPoint(o)) { break_points_hit->set(break_points_hit_count++, *o); } @@ -1093,12 +1109,13 @@ bool Debug::CheckBreakPoint(Handle<Object> break_point_object) { if (!break_point_object->IsJSObject()) return true; // Get the function IsBreakPointTriggered (defined in debug-debugger.js). - Handle<String> is_break_point_triggered_symbol = - factory->LookupAsciiSymbol("IsBreakPointTriggered"); + Handle<String> is_break_point_triggered_string = + factory->InternalizeOneByteString( + STATIC_ASCII_VECTOR("IsBreakPointTriggered")); Handle<JSFunction> check_break_point = Handle<JSFunction>(JSFunction::cast( debug_context()->global_object()->GetPropertyNoExceptionThrown( - *is_break_point_triggered_symbol))); + *is_break_point_triggered_string))); // Get the break id as an object. Handle<Object> break_id = factory->NewNumberFromInt(Debug::break_id()); @@ -1283,7 +1300,8 @@ void Debug::FloodWithOneShot(Handle<JSFunction> function) { void Debug::FloodBoundFunctionWithOneShot(Handle<JSFunction> function) { Handle<FixedArray> new_bindings(function->function_bindings()); - Handle<Object> bindee(new_bindings->get(JSFunction::kBoundFunctionIndex)); + Handle<Object> bindee(new_bindings->get(JSFunction::kBoundFunctionIndex), + isolate_); if (!bindee.is_null() && bindee->IsJSFunction() && !JSFunction::cast(*bindee)->IsBuiltin()) { @@ -1481,7 +1499,8 @@ void Debug::PrepareStep(StepAction step_action, int step_count) { // from the code object. Handle<Object> obj( isolate_->heap()->code_stubs()->SlowReverseLookup( - *call_function_stub)); + *call_function_stub), + isolate_); ASSERT(!obj.is_null()); ASSERT(!(*obj)->IsUndefined()); ASSERT(obj->IsSmi()); @@ -1535,7 +1554,7 @@ void Debug::PrepareStep(StepAction step_action, int step_count) { } // Step in or Step in min - it.PrepareStepIn(); + it.PrepareStepIn(isolate_); ActivateStepIn(frame); } } @@ -1579,7 +1598,7 @@ bool Debug::StepNextContinue(BreakLocationIterator* break_location_iterator, // object. bool Debug::IsDebugBreak(Address addr) { Code* code = Code::GetCodeFromTargetAddress(addr); - return code->ic_state() == DEBUG_BREAK; + return code->is_debug_break(); } @@ -1654,10 +1673,12 @@ Handle<Object> Debug::GetSourceBreakLocations( Handle<SharedFunctionInfo> shared) { Isolate* isolate = Isolate::Current(); Heap* heap = isolate->heap(); - if (!HasDebugInfo(shared)) return Handle<Object>(heap->undefined_value()); + if (!HasDebugInfo(shared)) { + return Handle<Object>(heap->undefined_value(), isolate); + } Handle<DebugInfo> debug_info = GetDebugInfo(shared); if (debug_info->GetBreakPointCount() == 0) { - return Handle<Object>(heap->undefined_value()); + return Handle<Object>(heap->undefined_value(), isolate); } Handle<FixedArray> locations = isolate->factory()->NewFixedArray(debug_info->GetBreakPointCount()); @@ -1692,9 +1713,10 @@ void Debug::HandleStepIn(Handle<JSFunction> function, Handle<Object> holder, Address fp, bool is_constructor) { + Isolate* isolate = function->GetIsolate(); // If the frame pointer is not supplied by the caller find it. if (fp == 0) { - StackFrameIterator it; + StackFrameIterator it(isolate); it.Advance(); // For constructor functions skip another frame. if (is_constructor) { @@ -1713,9 +1735,9 @@ void Debug::HandleStepIn(Handle<JSFunction> function, } else if (!function->IsBuiltin()) { // Don't allow step into functions in the native context. if (function->shared()->code() == - Isolate::Current()->builtins()->builtin(Builtins::kFunctionApply) || + isolate->builtins()->builtin(Builtins::kFunctionApply) || function->shared()->code() == - Isolate::Current()->builtins()->builtin(Builtins::kFunctionCall)) { + isolate->builtins()->builtin(Builtins::kFunctionCall)) { // Handle function.apply and function.call separately to flood the // function to be called and not the code for Builtins::FunctionApply or // Builtins::FunctionCall. The receiver of call/apply is the target @@ -1978,7 +2000,7 @@ void Debug::PrepareForBreakPoints() { // If preparing for the first break point make sure to deoptimize all // functions as debugging does not work with optimized code. if (!has_break_points_) { - Deoptimizer::DeoptimizeAll(); + Deoptimizer::DeoptimizeAll(isolate_); Handle<Code> lazy_compile = Handle<Code>(isolate_->builtins()->builtin(Builtins::kLazyCompile)); @@ -1993,14 +2015,15 @@ void Debug::PrepareForBreakPoints() { { // We are going to iterate heap to find all functions without // debug break slots. - isolate_->heap()->CollectAllGarbage(Heap::kMakeHeapIterableMask, - "preparing for breakpoints"); + Heap* heap = isolate_->heap(); + heap->CollectAllGarbage(Heap::kMakeHeapIterableMask, + "preparing for breakpoints"); // Ensure no GC in this scope as we are going to use gc_metadata // field in the Code object to mark active functions. AssertNoAllocation no_allocation; - Object* active_code_marker = isolate_->heap()->the_hole_value(); + Object* active_code_marker = heap->the_hole_value(); CollectActiveFunctionsFromThread(isolate_, isolate_->thread_local_top(), @@ -2014,7 +2037,7 @@ void Debug::PrepareForBreakPoints() { // Scan the heap for all non-optimized functions which have no // debug break slots and are not active or inlined into an active // function and mark them for lazy compilation. - HeapIterator iterator; + HeapIterator iterator(heap); HeapObject* obj = NULL; while (((obj = iterator.next()) != NULL)) { if (obj->IsJSFunction()) { @@ -2109,11 +2132,12 @@ Object* Debug::FindSharedFunctionInfoInScript(Handle<Script> script, int target_start_position = RelocInfo::kNoPosition; Handle<JSFunction> target_function; Handle<SharedFunctionInfo> target; + Heap* heap = isolate_->heap(); while (!done) { { // Extra scope for iterator and no-allocation. - isolate_->heap()->EnsureHeapIsIterable(); + heap->EnsureHeapIsIterable(); AssertNoAllocation no_alloc_during_heap_iteration; - HeapIterator iterator; + HeapIterator iterator(heap); for (HeapObject* obj = iterator.next(); obj != NULL; obj = iterator.next()) { bool found_next_candidate = false; @@ -2173,9 +2197,7 @@ Object* Debug::FindSharedFunctionInfoInScript(Handle<Script> script, } // End for loop. } // End no-allocation scope. - if (target.is_null()) { - return isolate_->heap()->undefined_value(); - } + if (target.is_null()) return heap->undefined_value(); // There will be at least one break point when we are done. has_break_points_ = true; @@ -2419,11 +2441,11 @@ void Debug::ClearMirrorCache() { ASSERT(isolate_->context() == *Debug::debug_context()); // Clear the mirror cache. - Handle<String> function_name = - isolate_->factory()->LookupSymbol(CStrVector("ClearMirrorCache")); + Handle<String> function_name = isolate_->factory()->InternalizeOneByteString( + STATIC_ASCII_VECTOR("ClearMirrorCache")); Handle<Object> fun( - Isolate::Current()->global_object()->GetPropertyNoExceptionThrown( - *function_name)); + isolate_->global_object()->GetPropertyNoExceptionThrown(*function_name), + isolate_); ASSERT(fun->IsJSFunction()); bool caught_exception; Execution::TryCall(Handle<JSFunction>::cast(fun), @@ -2449,7 +2471,7 @@ void Debug::CreateScriptCache() { // Scan heap for Script objects. int count = 0; - HeapIterator iterator; + HeapIterator iterator(heap); AssertNoAllocation no_allocation; for (HeapObject* obj = iterator.next(); obj != NULL; obj = iterator.next()) { @@ -2548,10 +2570,10 @@ Handle<Object> Debugger::MakeJSObject(Vector<const char> constructor_name, // Create the execution state object. Handle<String> constructor_str = - isolate_->factory()->LookupSymbol(constructor_name); + isolate_->factory()->InternalizeUtf8String(constructor_name); Handle<Object> constructor( - isolate_->global_object()->GetPropertyNoExceptionThrown( - *constructor_str)); + isolate_->global_object()->GetPropertyNoExceptionThrown(*constructor_str), + isolate_); ASSERT(constructor->IsJSFunction()); if (!constructor->IsJSFunction()) { *caught_exception = true; @@ -2639,7 +2661,7 @@ Handle<Object> Debugger::MakeScriptCollectedEvent(int id, bool* caught_exception) { // Create the script collected event object. Handle<Object> exec_state = MakeExecutionState(caught_exception); - Handle<Object> id_object = Handle<Smi>(Smi::FromInt(id)); + Handle<Object> id_object = Handle<Smi>(Smi::FromInt(id), isolate_); Handle<Object> argv[] = { exec_state, id_object }; return MakeJSObject(CStrVector("MakeScriptCollectedEvent"), @@ -2778,11 +2800,14 @@ void Debugger::OnAfterCompile(Handle<Script> script, // script. Make sure that these break points are set. // Get the function UpdateScriptBreakPoints (defined in debug-debugger.js). - Handle<String> update_script_break_points_symbol = - isolate_->factory()->LookupAsciiSymbol("UpdateScriptBreakPoints"); + Handle<String> update_script_break_points_string = + isolate_->factory()->InternalizeOneByteString( + STATIC_ASCII_VECTOR("UpdateScriptBreakPoints")); Handle<Object> update_script_break_points = - Handle<Object>(debug->debug_context()->global_object()-> - GetPropertyNoExceptionThrown(*update_script_break_points_symbol)); + Handle<Object>( + debug->debug_context()->global_object()->GetPropertyNoExceptionThrown( + *update_script_break_points_string), + isolate_); if (!update_script_break_points->IsJSFunction()) { return; } @@ -2932,7 +2957,7 @@ void Debugger::CallJSEventCallback(v8::DebugEvent event, Handle<JSFunction> fun(Handle<JSFunction>::cast(event_listener_)); // Invoke the JavaScript debug event listener. - Handle<Object> argv[] = { Handle<Object>(Smi::FromInt(event)), + Handle<Object> argv[] = { Handle<Object>(Smi::FromInt(event), isolate_), exec_state, event_data, event_listener_data_ }; @@ -3315,7 +3340,8 @@ Handle<Object> Debugger::Call(Handle<JSFunction> fun, Handle<Object> argv[] = { exec_state, data }; Handle<Object> result = Execution::Call( fun, - Handle<Object>(isolate_->debug()->debug_context_->global_proxy()), + Handle<Object>(isolate_->debug()->debug_context_->global_proxy(), + isolate_), ARRAY_SIZE(argv), argv, pending_exception); @@ -3541,7 +3567,8 @@ v8::Handle<v8::Object> MessageImpl::GetEventData() const { v8::Handle<v8::String> MessageImpl::GetJSON() const { - v8::HandleScope scope; + v8::HandleScope scope( + reinterpret_cast<v8::Isolate*>(event_data_->GetIsolate())); if (IsEvent()) { // Call toJSONProtocol on the debug event object. @@ -3758,6 +3785,7 @@ void MessageDispatchHelperThread::Schedule() { void MessageDispatchHelperThread::Run() { + Isolate* isolate = Isolate::Current(); while (true) { sem_->Wait(); { @@ -3765,8 +3793,8 @@ void MessageDispatchHelperThread::Run() { already_signalled_ = false; } { - Locker locker; - Isolate::Current()->debugger()->CallMessageDispatchHandler(); + Locker locker(reinterpret_cast<v8::Isolate*>(isolate)); + isolate->debugger()->CallMessageDispatchHandler(); } } } diff --git a/deps/v8/src/debug.h b/deps/v8/src/debug.h index 150e29e308..c7f06815b9 100644 --- a/deps/v8/src/debug.h +++ b/deps/v8/src/debug.h @@ -97,7 +97,7 @@ class BreakLocationIterator { void ClearBreakPoint(Handle<Object> break_point_object); void SetOneShot(); void ClearOneShot(); - void PrepareStepIn(); + void PrepareStepIn(Isolate* isolate); bool IsExit() const; bool HasBreakPoint(); bool IsDebugBreak(); @@ -189,7 +189,9 @@ class ScriptCache : private HashMap { void Clear(); // Weak handle callback for scripts in the cache. - static void HandleWeakScript(v8::Persistent<v8::Value> obj, void* data); + static void HandleWeakScript(v8::Isolate* isolate, + v8::Persistent<v8::Value> obj, + void* data); // List used during GC to temporarily store id's of collected scripts. List<int> collected_scripts_; @@ -384,7 +386,9 @@ class Debug { static const int kEstimatedNofBreakPointsInFunction = 16; // Passed to MakeWeak. - static void HandleWeakDebugInfo(v8::Persistent<v8::Value> obj, void* data); + static void HandleWeakDebugInfo(v8::Isolate* isolate, + v8::Persistent<v8::Value> obj, + void* data); friend class Debugger; friend Handle<FixedArray> GetDebuggedFunctions(); // In test-debug.cc @@ -875,7 +879,9 @@ class Debugger { void set_loading_debugger(bool v) { is_loading_debugger_ = v; } bool is_loading_debugger() const { return is_loading_debugger_; } void set_live_edit_enabled(bool v) { live_edit_enabled_ = v; } - bool live_edit_enabled() const { return live_edit_enabled_; } + bool live_edit_enabled() const { + return FLAG_enable_liveedit && live_edit_enabled_ ; + } void set_force_debugger_active(bool force_debugger_active) { force_debugger_active_ = force_debugger_active; } diff --git a/deps/v8/src/deoptimizer.cc b/deps/v8/src/deoptimizer.cc index ad893b3b43..f03025cbfb 100644 --- a/deps/v8/src/deoptimizer.cc +++ b/deps/v8/src/deoptimizer.cc @@ -1,4 +1,4 @@ -// Copyright 2012 the V8 project authors. All rights reserved. +// Copyright 2013 the V8 project authors. All rights reserved. // Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions are // met: @@ -40,28 +40,40 @@ namespace v8 { namespace internal { -DeoptimizerData::DeoptimizerData() { - eager_deoptimization_entry_code_ = NULL; - lazy_deoptimization_entry_code_ = NULL; - current_ = NULL; - deoptimizing_code_list_ = NULL; +static MemoryChunk* AllocateCodeChunk(MemoryAllocator* allocator) { + return allocator->AllocateChunk(Deoptimizer::GetMaxDeoptTableSize(), + OS::CommitPageSize(), + EXECUTABLE, + NULL); +} + + +DeoptimizerData::DeoptimizerData(MemoryAllocator* allocator) + : allocator_(allocator), + eager_deoptimization_entry_code_entries_(-1), + lazy_deoptimization_entry_code_entries_(-1), + eager_deoptimization_entry_code_(AllocateCodeChunk(allocator)), + lazy_deoptimization_entry_code_(AllocateCodeChunk(allocator)), + current_(NULL), #ifdef ENABLE_DEBUGGER_SUPPORT - deoptimized_frame_info_ = NULL; + deoptimized_frame_info_(NULL), #endif -} + deoptimizing_code_list_(NULL) { } DeoptimizerData::~DeoptimizerData() { - if (eager_deoptimization_entry_code_ != NULL) { - Isolate::Current()->memory_allocator()->Free( - eager_deoptimization_entry_code_); - eager_deoptimization_entry_code_ = NULL; - } - if (lazy_deoptimization_entry_code_ != NULL) { - Isolate::Current()->memory_allocator()->Free( - lazy_deoptimization_entry_code_); - lazy_deoptimization_entry_code_ = NULL; + allocator_->Free(eager_deoptimization_entry_code_); + eager_deoptimization_entry_code_ = NULL; + allocator_->Free(lazy_deoptimization_entry_code_); + lazy_deoptimization_entry_code_ = NULL; + + DeoptimizingCodeListNode* current = deoptimizing_code_list_; + while (current != NULL) { + DeoptimizingCodeListNode* prev = current; + current = current->next(); + delete prev; } + deoptimizing_code_list_ = NULL; } @@ -74,6 +86,36 @@ void DeoptimizerData::Iterate(ObjectVisitor* v) { #endif +Code* DeoptimizerData::FindDeoptimizingCode(Address addr) { + for (DeoptimizingCodeListNode* node = deoptimizing_code_list_; + node != NULL; + node = node->next()) { + if (node->code()->contains(addr)) return *node->code(); + } + return NULL; +} + + +void DeoptimizerData::RemoveDeoptimizingCode(Code* code) { + for (DeoptimizingCodeListNode *prev = NULL, *cur = deoptimizing_code_list_; + cur != NULL; + prev = cur, cur = cur->next()) { + if (*cur->code() == code) { + if (prev == NULL) { + deoptimizing_code_list_ = cur->next(); + } else { + prev->set_next(cur->next()); + } + delete cur; + return; + } + } + // Deoptimizing code is removed through weak callback. Each object is expected + // to be removed once and only once. + UNREACHABLE(); +} + + // We rely on this function not causing a GC. It is called from generated code // without having a real stack frame in place. Deoptimizer* Deoptimizer::New(JSFunction* function, @@ -82,7 +124,6 @@ Deoptimizer* Deoptimizer::New(JSFunction* function, Address from, int fp_to_sp_delta, Isolate* isolate) { - ASSERT(isolate == Isolate::Current()); Deoptimizer* deoptimizer = new Deoptimizer(isolate, function, type, @@ -96,8 +137,21 @@ Deoptimizer* Deoptimizer::New(JSFunction* function, } +// No larger than 2K on all platforms +static const int kDeoptTableMaxEpilogueCodeSize = 2 * KB; + + +size_t Deoptimizer::GetMaxDeoptTableSize() { + int entries_size = + Deoptimizer::kMaxNumberOfEntries * Deoptimizer::table_entry_size_; + int commit_page_size = static_cast<int>(OS::CommitPageSize()); + int page_count = ((kDeoptTableMaxEpilogueCodeSize + entries_size - 1) / + commit_page_size) + 1; + return static_cast<size_t>(commit_page_size * page_count); +} + + Deoptimizer* Deoptimizer::Grab(Isolate* isolate) { - ASSERT(isolate == Isolate::Current()); Deoptimizer* result = isolate->deoptimizer_data()->current_; ASSERT(result != NULL); result->DeleteFrameDescriptions(); @@ -127,7 +181,6 @@ DeoptimizedFrameInfo* Deoptimizer::DebuggerInspectableFrame( JavaScriptFrame* frame, int jsframe_index, Isolate* isolate) { - ASSERT(isolate == Isolate::Current()); ASSERT(frame->is_optimized()); ASSERT(isolate->deoptimizer_data()->deoptimized_frame_info_ == NULL); @@ -213,7 +266,6 @@ DeoptimizedFrameInfo* Deoptimizer::DebuggerInspectableFrame( void Deoptimizer::DeleteDebuggerInspectableFrame(DeoptimizedFrameInfo* info, Isolate* isolate) { - ASSERT(isolate == Isolate::Current()); ASSERT(isolate->deoptimizer_data()->deoptimized_frame_info_ == info); delete info; isolate->deoptimizer_data()->deoptimized_frame_info_ = NULL; @@ -228,45 +280,6 @@ void Deoptimizer::GenerateDeoptimizationEntries(MacroAssembler* masm, } -class DeoptimizingVisitor : public OptimizedFunctionVisitor { - public: - virtual void EnterContext(Context* context) { - if (FLAG_trace_deopt) { - PrintF("[deoptimize context: %" V8PRIxPTR "]\n", - reinterpret_cast<intptr_t>(context)); - } - } - - virtual void VisitFunction(JSFunction* function) { - Deoptimizer::DeoptimizeFunction(function); - } - - virtual void LeaveContext(Context* context) { - context->ClearOptimizedFunctions(); - } -}; - - -void Deoptimizer::DeoptimizeAll() { - AssertNoAllocation no_allocation; - - if (FLAG_trace_deopt) { - PrintF("[deoptimize all contexts]\n"); - } - - DeoptimizingVisitor visitor; - VisitAllOptimizedFunctions(&visitor); -} - - -void Deoptimizer::DeoptimizeGlobalObject(JSObject* object) { - AssertNoAllocation no_allocation; - - DeoptimizingVisitor visitor; - VisitAllOptimizedFunctionsForGlobalObject(object, &visitor); -} - - void Deoptimizer::VisitAllOptimizedFunctionsForContext( Context* context, OptimizedFunctionVisitor* visitor) { Isolate* isolate = context->GetIsolate(); @@ -296,51 +309,171 @@ void Deoptimizer::VisitAllOptimizedFunctionsForContext( } -void Deoptimizer::VisitAllOptimizedFunctionsForGlobalObject( - JSObject* object, OptimizedFunctionVisitor* visitor) { +void Deoptimizer::VisitAllOptimizedFunctions( + Isolate* isolate, + OptimizedFunctionVisitor* visitor) { + AssertNoAllocation no_allocation; + + // Run through the list of all native contexts and deoptimize. + Object* context = isolate->heap()->native_contexts_list(); + while (!context->IsUndefined()) { + VisitAllOptimizedFunctionsForContext(Context::cast(context), visitor); + context = Context::cast(context)->get(Context::NEXT_CONTEXT_LINK); + } +} + + +// Removes the functions selected by the given filter from the optimized +// function list of the given context and partitions the removed functions +// into one or more lists such that all functions in a list share the same +// code. The head of each list is written in the deoptimizing_functions field +// of the corresponding code object. +// The found code objects are returned in the given zone list. +static void PartitionOptimizedFunctions(Context* context, + OptimizedFunctionFilter* filter, + ZoneList<Code*>* partitions, + Zone* zone, + Object* undefined) { + AssertNoAllocation no_allocation; + Object* current = context->get(Context::OPTIMIZED_FUNCTIONS_LIST); + Object* remainder_head = undefined; + Object* remainder_tail = undefined; + ASSERT_EQ(0, partitions->length()); + while (current != undefined) { + JSFunction* function = JSFunction::cast(current); + current = function->next_function_link(); + if (filter->TakeFunction(function)) { + Code* code = function->code(); + if (code->deoptimizing_functions() == undefined) { + partitions->Add(code, zone); + } else { + ASSERT(partitions->Contains(code)); + } + function->set_next_function_link(code->deoptimizing_functions()); + code->set_deoptimizing_functions(function); + } else { + if (remainder_head == undefined) { + remainder_head = function; + } else { + JSFunction::cast(remainder_tail)->set_next_function_link(function); + } + remainder_tail = function; + } + } + if (remainder_tail != undefined) { + JSFunction::cast(remainder_tail)->set_next_function_link(undefined); + } + context->set(Context::OPTIMIZED_FUNCTIONS_LIST, remainder_head); +} + + +class DeoptimizeAllFilter : public OptimizedFunctionFilter { + public: + virtual bool TakeFunction(JSFunction* function) { + return true; + } +}; + + +class DeoptimizeWithMatchingCodeFilter : public OptimizedFunctionFilter { + public: + explicit DeoptimizeWithMatchingCodeFilter(Code* code) : code_(code) {} + virtual bool TakeFunction(JSFunction* function) { + return function->code() == code_; + } + private: + Code* code_; +}; + + +void Deoptimizer::DeoptimizeAll(Isolate* isolate) { AssertNoAllocation no_allocation; + if (FLAG_trace_deopt) { + PrintF("[deoptimize all contexts]\n"); + } + + DeoptimizeAllFilter filter; + DeoptimizeAllFunctionsWith(isolate, &filter); +} + + +void Deoptimizer::DeoptimizeGlobalObject(JSObject* object) { + AssertNoAllocation no_allocation; + DeoptimizeAllFilter filter; if (object->IsJSGlobalProxy()) { Object* proto = object->GetPrototype(); ASSERT(proto->IsJSGlobalObject()); - VisitAllOptimizedFunctionsForContext( - GlobalObject::cast(proto)->native_context(), visitor); + DeoptimizeAllFunctionsForContext( + GlobalObject::cast(proto)->native_context(), &filter); } else if (object->IsGlobalObject()) { - VisitAllOptimizedFunctionsForContext( - GlobalObject::cast(object)->native_context(), visitor); + DeoptimizeAllFunctionsForContext( + GlobalObject::cast(object)->native_context(), &filter); } } -void Deoptimizer::VisitAllOptimizedFunctions( - OptimizedFunctionVisitor* visitor) { +void Deoptimizer::DeoptimizeFunction(JSFunction* function) { + if (!function->IsOptimized()) return; + Code* code = function->code(); + Context* context = function->context()->native_context(); + Isolate* isolate = context->GetIsolate(); + Object* undefined = isolate->heap()->undefined_value(); + Zone* zone = isolate->runtime_zone(); + ZoneScope zone_scope(zone, DELETE_ON_EXIT); + ZoneList<Code*> codes(1, zone); + DeoptimizeWithMatchingCodeFilter filter(code); + PartitionOptimizedFunctions(context, &filter, &codes, zone, undefined); + ASSERT_EQ(1, codes.length()); + DeoptimizeFunctionWithPreparedFunctionList( + JSFunction::cast(codes.at(0)->deoptimizing_functions())); + codes.at(0)->set_deoptimizing_functions(undefined); +} + + +void Deoptimizer::DeoptimizeAllFunctionsForContext( + Context* context, OptimizedFunctionFilter* filter) { + ASSERT(context->IsNativeContext()); + Isolate* isolate = context->GetIsolate(); + Object* undefined = isolate->heap()->undefined_value(); + Zone* zone = isolate->runtime_zone(); + ZoneScope zone_scope(zone, DELETE_ON_EXIT); + ZoneList<Code*> codes(1, zone); + PartitionOptimizedFunctions(context, filter, &codes, zone, undefined); + for (int i = 0; i < codes.length(); ++i) { + DeoptimizeFunctionWithPreparedFunctionList( + JSFunction::cast(codes.at(i)->deoptimizing_functions())); + codes.at(i)->set_deoptimizing_functions(undefined); + } +} + + +void Deoptimizer::DeoptimizeAllFunctionsWith(Isolate* isolate, + OptimizedFunctionFilter* filter) { AssertNoAllocation no_allocation; // Run through the list of all native contexts and deoptimize. - Object* context = Isolate::Current()->heap()->native_contexts_list(); + Object* context = isolate->heap()->native_contexts_list(); while (!context->IsUndefined()) { - // GC can happen when the context is not fully initialized, - // so the global field of the context can be undefined. - Object* global = Context::cast(context)->get(Context::GLOBAL_OBJECT_INDEX); - if (!global->IsUndefined()) { - VisitAllOptimizedFunctionsForGlobalObject(JSObject::cast(global), - visitor); - } + DeoptimizeAllFunctionsForContext(Context::cast(context), filter); context = Context::cast(context)->get(Context::NEXT_CONTEXT_LINK); } } -void Deoptimizer::HandleWeakDeoptimizedCode( - v8::Persistent<v8::Value> obj, void* data) { +void Deoptimizer::HandleWeakDeoptimizedCode(v8::Isolate* isolate, + v8::Persistent<v8::Value> obj, + void* parameter) { DeoptimizingCodeListNode* node = - reinterpret_cast<DeoptimizingCodeListNode*>(data); - RemoveDeoptimizingCode(*node->code()); + reinterpret_cast<DeoptimizingCodeListNode*>(parameter); + DeoptimizerData* data = + reinterpret_cast<Isolate*>(isolate)->deoptimizer_data(); + data->RemoveDeoptimizingCode(*node->code()); #ifdef DEBUG - node = Isolate::Current()->deoptimizer_data()->deoptimizing_code_list_; - while (node != NULL) { - ASSERT(node != reinterpret_cast<DeoptimizingCodeListNode*>(data)); - node = node->next(); + for (DeoptimizingCodeListNode* current = data->deoptimizing_code_list_; + current != NULL; + current = current->next()) { + ASSERT(current != node); } #endif } @@ -351,6 +484,38 @@ void Deoptimizer::ComputeOutputFrames(Deoptimizer* deoptimizer) { } +bool Deoptimizer::TraceEnabledFor(BailoutType deopt_type, + StackFrame::Type frame_type) { + switch (deopt_type) { + case EAGER: + case LAZY: + case DEBUGGER: + return (frame_type == StackFrame::STUB) + ? FLAG_trace_stub_failures + : FLAG_trace_deopt; + case OSR: + return FLAG_trace_osr; + } + UNREACHABLE(); + return false; +} + + +const char* Deoptimizer::MessageFor(BailoutType type) { + switch (type) { + case EAGER: + case LAZY: + return "DEOPT"; + case DEBUGGER: + return "DEOPT FOR DEBUGGER"; + case OSR: + return "OSR"; + } + UNREACHABLE(); + return NULL; +} + + Deoptimizer::Deoptimizer(Isolate* isolate, JSFunction* function, BailoutType type, @@ -371,69 +536,77 @@ Deoptimizer::Deoptimizer(Isolate* isolate, output_(NULL), deferred_arguments_objects_values_(0), deferred_arguments_objects_(0), - deferred_heap_numbers_(0) { - if (FLAG_trace_deopt && type != OSR) { - if (type == DEBUGGER) { - PrintF("**** DEOPT FOR DEBUGGER: "); - } else { - PrintF("**** DEOPT: "); - } - function->PrintName(); - PrintF(" at bailout #%u, address 0x%" V8PRIxPTR ", frame size %d\n", - bailout_id, - reinterpret_cast<intptr_t>(from), - fp_to_sp_delta - (2 * kPointerSize)); - } else if (FLAG_trace_osr && type == OSR) { - PrintF("**** OSR: "); - function->PrintName(); - PrintF(" at ast id #%u, address 0x%" V8PRIxPTR ", frame size %d\n", - bailout_id, - reinterpret_cast<intptr_t>(from), - fp_to_sp_delta - (2 * kPointerSize)); - } - function->shared()->increment_deopt_count(); - // Find the optimized code. - if (type == EAGER) { - ASSERT(from == NULL); - optimized_code_ = function_->code(); - if (FLAG_trace_deopt && FLAG_code_comments) { - // Print instruction associated with this bailout. - const char* last_comment = NULL; - int mask = RelocInfo::ModeMask(RelocInfo::COMMENT) - | RelocInfo::ModeMask(RelocInfo::RUNTIME_ENTRY); - for (RelocIterator it(optimized_code_, mask); !it.done(); it.next()) { - RelocInfo* info = it.rinfo(); - if (info->rmode() == RelocInfo::COMMENT) { - last_comment = reinterpret_cast<const char*>(info->data()); - } - if (info->rmode() == RelocInfo::RUNTIME_ENTRY) { - unsigned id = Deoptimizer::GetDeoptimizationId( - info->target_address(), Deoptimizer::EAGER); - if (id == bailout_id && last_comment != NULL) { - PrintF(" %s\n", last_comment); - break; - } - } - } - } - } else if (type == LAZY) { - optimized_code_ = FindDeoptimizingCodeFromAddress(from); - ASSERT(optimized_code_ != NULL); - } else if (type == OSR) { - // The function has already been optimized and we're transitioning - // from the unoptimized shared version to the optimized one in the - // function. The return address (from) points to unoptimized code. - optimized_code_ = function_->code(); - ASSERT(optimized_code_->kind() == Code::OPTIMIZED_FUNCTION); - ASSERT(!optimized_code_->contains(from)); - } else if (type == DEBUGGER) { - optimized_code_ = optimized_code; - ASSERT(optimized_code_->contains(from)); + deferred_heap_numbers_(0), + trace_(false) { + // For COMPILED_STUBs called from builtins, the function pointer is a SMI + // indicating an internal frame. + if (function->IsSmi()) { + function = NULL; + } + if (function != NULL && function->IsOptimized()) { + function->shared()->increment_deopt_count(); } + compiled_code_ = FindOptimizedCode(function, optimized_code); + StackFrame::Type frame_type = function == NULL + ? StackFrame::STUB + : StackFrame::JAVA_SCRIPT; + trace_ = TraceEnabledFor(type, frame_type); + if (trace_) Trace(); ASSERT(HEAP->allow_allocation(false)); unsigned size = ComputeInputFrameSize(); input_ = new(size) FrameDescription(size, function); - input_->SetFrameType(StackFrame::JAVA_SCRIPT); + input_->SetFrameType(frame_type); +} + + +Code* Deoptimizer::FindOptimizedCode(JSFunction* function, + Code* optimized_code) { + switch (bailout_type_) { + case Deoptimizer::EAGER: + ASSERT(from_ == NULL); + return function->code(); + case Deoptimizer::LAZY: { + Code* compiled_code = + isolate_->deoptimizer_data()->FindDeoptimizingCode(from_); + return (compiled_code == NULL) + ? static_cast<Code*>(isolate_->heap()->FindCodeObject(from_)) + : compiled_code; + } + case Deoptimizer::OSR: { + // The function has already been optimized and we're transitioning + // from the unoptimized shared version to the optimized one in the + // function. The return address (from_) points to unoptimized code. + Code* compiled_code = function->code(); + ASSERT(compiled_code->kind() == Code::OPTIMIZED_FUNCTION); + ASSERT(!compiled_code->contains(from_)); + return compiled_code; + } + case Deoptimizer::DEBUGGER: + ASSERT(optimized_code->contains(from_)); + return optimized_code; + } + UNREACHABLE(); + return NULL; +} + + +void Deoptimizer::Trace() { + PrintF("**** %s: ", Deoptimizer::MessageFor(bailout_type_)); + PrintFunctionName(); + PrintF(" at id #%u, address 0x%" V8PRIxPTR ", frame size %d\n", + bailout_id_, + reinterpret_cast<intptr_t>(from_), + fp_to_sp_delta_ - (2 * kPointerSize)); + if (bailout_type_ == EAGER) compiled_code_->PrintDeoptLocation(bailout_id_); +} + + +void Deoptimizer::PrintFunctionName() { + if (function_->IsJSFunction()) { + function_->PrintName(); + } else { + PrintF("%s", Code::Kind2String(compiled_code_->kind())); + } } @@ -454,44 +627,41 @@ void Deoptimizer::DeleteFrameDescriptions() { } -Address Deoptimizer::GetDeoptimizationEntry(int id, BailoutType type) { +Address Deoptimizer::GetDeoptimizationEntry(Isolate* isolate, + int id, + BailoutType type, + GetEntryMode mode) { ASSERT(id >= 0); - if (id >= kNumberOfEntries) return NULL; - MemoryChunk* base = NULL; - DeoptimizerData* data = Isolate::Current()->deoptimizer_data(); - if (type == EAGER) { - if (data->eager_deoptimization_entry_code_ == NULL) { - data->eager_deoptimization_entry_code_ = CreateCode(type); - } - base = data->eager_deoptimization_entry_code_; - } else { - if (data->lazy_deoptimization_entry_code_ == NULL) { - data->lazy_deoptimization_entry_code_ = CreateCode(type); - } - base = data->lazy_deoptimization_entry_code_; - } - return - static_cast<Address>(base->area_start()) + (id * table_entry_size_); -} - - -int Deoptimizer::GetDeoptimizationId(Address addr, BailoutType type) { - MemoryChunk* base = NULL; - DeoptimizerData* data = Isolate::Current()->deoptimizer_data(); - if (type == EAGER) { - base = data->eager_deoptimization_entry_code_; + if (id >= kMaxNumberOfEntries) return NULL; + if (mode == ENSURE_ENTRY_CODE) { + EnsureCodeForDeoptimizationEntry(isolate, type, id); } else { - base = data->lazy_deoptimization_entry_code_; + ASSERT(mode == CALCULATE_ENTRY_ADDRESS); } + DeoptimizerData* data = isolate->deoptimizer_data(); + MemoryChunk* base = (type == EAGER) + ? data->eager_deoptimization_entry_code_ + : data->lazy_deoptimization_entry_code_; + return base->area_start() + (id * table_entry_size_); +} + + +int Deoptimizer::GetDeoptimizationId(Isolate* isolate, + Address addr, + BailoutType type) { + DeoptimizerData* data = isolate->deoptimizer_data(); + MemoryChunk* base = (type == EAGER) + ? data->eager_deoptimization_entry_code_ + : data->lazy_deoptimization_entry_code_; + Address start = base->area_start(); if (base == NULL || - addr < base->area_start() || - addr >= base->area_start() + - (kNumberOfEntries * table_entry_size_)) { + addr < start || + addr >= start + (kMaxNumberOfEntries * table_entry_size_)) { return kNotDeoptimizationEntry; } ASSERT_EQ(0, - static_cast<int>(addr - base->area_start()) % table_entry_size_); - return static_cast<int>(addr - base->area_start()) / table_entry_size_; + static_cast<int>(addr - start) % table_entry_size_); + return static_cast<int>(addr - start) / table_entry_size_; } @@ -515,7 +685,7 @@ int Deoptimizer::GetOutputInfo(DeoptimizationOutputData* data, shared->SourceCodePrint(&stream, -1); PrintF("[source:\n%s\n]", *stream.ToCString()); - UNREACHABLE(); + FATAL("unable to find pc offset during deoptimization"); return -1; } @@ -542,18 +712,18 @@ void Deoptimizer::DoComputeOutputFrames() { // Print some helpful diagnostic information. int64_t start = OS::Ticks(); - if (FLAG_trace_deopt) { + if (trace_) { PrintF("[deoptimizing%s: begin 0x%08" V8PRIxPTR " ", (bailout_type_ == LAZY ? " (lazy)" : ""), reinterpret_cast<intptr_t>(function_)); - function_->PrintName(); + PrintFunctionName(); PrintF(" @%d]\n", bailout_id_); } // Determine basic deoptimization information. The optimized frame is // described by the input data. DeoptimizationInputData* input_data = - DeoptimizationInputData::cast(optimized_code_->deoptimization_data()); + DeoptimizationInputData::cast(compiled_code_->deoptimization_data()); BailoutId node_id = input_data->AstId(bailout_id_); ByteArray* translations = input_data->TranslationByteArray(); unsigned translation_index = @@ -598,6 +768,9 @@ void Deoptimizer::DoComputeOutputFrames() { case Translation::SETTER_STUB_FRAME: DoComputeAccessorStubFrame(&iterator, i, true); break; + case Translation::COMPILED_STUB_FRAME: + DoComputeCompiledStubFrame(&iterator, i); + break; case Translation::BEGIN: case Translation::REGISTER: case Translation::INT32_REGISTER: @@ -610,19 +783,20 @@ void Deoptimizer::DoComputeOutputFrames() { case Translation::LITERAL: case Translation::ARGUMENTS_OBJECT: case Translation::DUPLICATE: + default: UNREACHABLE(); break; } } // Print some helpful diagnostic information. - if (FLAG_trace_deopt) { + if (trace_) { double ms = static_cast<double>(OS::Ticks() - start) / 1000; int index = output_count_ - 1; // Index of the topmost frame. JSFunction* function = output_[index]->GetFunction(); PrintF("[deoptimizing: end 0x%08" V8PRIxPTR " ", reinterpret_cast<intptr_t>(function)); - function->PrintName(); + if (function != NULL) function->PrintName(); PrintF(" => node=%d, pc=0x%08" V8PRIxPTR ", state=%s, alignment=%s," " took %0.3f ms]\n", node_id.ToInt(), @@ -636,13 +810,548 @@ void Deoptimizer::DoComputeOutputFrames() { } +void Deoptimizer::DoComputeArgumentsAdaptorFrame(TranslationIterator* iterator, + int frame_index) { + JSFunction* function = JSFunction::cast(ComputeLiteral(iterator->Next())); + unsigned height = iterator->Next(); + unsigned height_in_bytes = height * kPointerSize; + if (trace_) { + PrintF(" translating arguments adaptor => height=%d\n", height_in_bytes); + } + + unsigned fixed_frame_size = ArgumentsAdaptorFrameConstants::kFrameSize; + unsigned output_frame_size = height_in_bytes + fixed_frame_size; + + // Allocate and store the output frame description. + FrameDescription* output_frame = + new(output_frame_size) FrameDescription(output_frame_size, function); + output_frame->SetFrameType(StackFrame::ARGUMENTS_ADAPTOR); + + // Arguments adaptor can not be topmost or bottommost. + ASSERT(frame_index > 0 && frame_index < output_count_ - 1); + ASSERT(output_[frame_index] == NULL); + output_[frame_index] = output_frame; + + // The top address of the frame is computed from the previous + // frame's top and this frame's size. + intptr_t top_address; + top_address = output_[frame_index - 1]->GetTop() - output_frame_size; + output_frame->SetTop(top_address); + + // Compute the incoming parameter translation. + int parameter_count = height; + unsigned output_offset = output_frame_size; + for (int i = 0; i < parameter_count; ++i) { + output_offset -= kPointerSize; + DoTranslateCommand(iterator, frame_index, output_offset); + } + + // Read caller's PC from the previous frame. + output_offset -= kPointerSize; + intptr_t callers_pc = output_[frame_index - 1]->GetPc(); + output_frame->SetFrameSlot(output_offset, callers_pc); + if (trace_) { + PrintF(" 0x%08" V8PRIxPTR ": [top + %d] <- 0x%08" + V8PRIxPTR " ; caller's pc\n", + top_address + output_offset, output_offset, callers_pc); + } + + // Read caller's FP from the previous frame, and set this frame's FP. + output_offset -= kPointerSize; + intptr_t value = output_[frame_index - 1]->GetFp(); + output_frame->SetFrameSlot(output_offset, value); + intptr_t fp_value = top_address + output_offset; + output_frame->SetFp(fp_value); + if (trace_) { + PrintF(" 0x%08" V8PRIxPTR ": [top + %d] <- 0x%08" + V8PRIxPTR " ; caller's fp\n", + fp_value, output_offset, value); + } + + // A marker value is used in place of the context. + output_offset -= kPointerSize; + intptr_t context = reinterpret_cast<intptr_t>( + Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR)); + output_frame->SetFrameSlot(output_offset, context); + if (trace_) { + PrintF(" 0x%08" V8PRIxPTR ": [top + %d] <- 0x%08" + V8PRIxPTR " ; context (adaptor sentinel)\n", + top_address + output_offset, output_offset, context); + } + + // The function was mentioned explicitly in the ARGUMENTS_ADAPTOR_FRAME. + output_offset -= kPointerSize; + value = reinterpret_cast<intptr_t>(function); + output_frame->SetFrameSlot(output_offset, value); + if (trace_) { + PrintF(" 0x%08" V8PRIxPTR ": [top + %d] <- 0x%08" + V8PRIxPTR " ; function\n", + top_address + output_offset, output_offset, value); + } + + // Number of incoming arguments. + output_offset -= kPointerSize; + value = reinterpret_cast<intptr_t>(Smi::FromInt(height - 1)); + output_frame->SetFrameSlot(output_offset, value); + if (trace_) { + PrintF(" 0x%08" V8PRIxPTR ": [top + %d] <- 0x%08" + V8PRIxPTR " ; argc (%d)\n", + top_address + output_offset, output_offset, value, height - 1); + } + + ASSERT(0 == output_offset); + + Builtins* builtins = isolate_->builtins(); + Code* adaptor_trampoline = + builtins->builtin(Builtins::kArgumentsAdaptorTrampoline); + intptr_t pc_value = reinterpret_cast<intptr_t>( + adaptor_trampoline->instruction_start() + + isolate_->heap()->arguments_adaptor_deopt_pc_offset()->value()); + output_frame->SetPc(pc_value); +} + + +void Deoptimizer::DoComputeConstructStubFrame(TranslationIterator* iterator, + int frame_index) { + Builtins* builtins = isolate_->builtins(); + Code* construct_stub = builtins->builtin(Builtins::kJSConstructStubGeneric); + JSFunction* function = JSFunction::cast(ComputeLiteral(iterator->Next())); + unsigned height = iterator->Next(); + unsigned height_in_bytes = height * kPointerSize; + if (trace_) { + PrintF(" translating construct stub => height=%d\n", height_in_bytes); + } + + unsigned fixed_frame_size = ConstructFrameConstants::kFrameSize; + unsigned output_frame_size = height_in_bytes + fixed_frame_size; + + // Allocate and store the output frame description. + FrameDescription* output_frame = + new(output_frame_size) FrameDescription(output_frame_size, function); + output_frame->SetFrameType(StackFrame::CONSTRUCT); + + // Construct stub can not be topmost or bottommost. + ASSERT(frame_index > 0 && frame_index < output_count_ - 1); + ASSERT(output_[frame_index] == NULL); + output_[frame_index] = output_frame; + + // The top address of the frame is computed from the previous + // frame's top and this frame's size. + intptr_t top_address; + top_address = output_[frame_index - 1]->GetTop() - output_frame_size; + output_frame->SetTop(top_address); + + // Compute the incoming parameter translation. + int parameter_count = height; + unsigned output_offset = output_frame_size; + for (int i = 0; i < parameter_count; ++i) { + output_offset -= kPointerSize; + DoTranslateCommand(iterator, frame_index, output_offset); + } + + // Read caller's PC from the previous frame. + output_offset -= kPointerSize; + intptr_t callers_pc = output_[frame_index - 1]->GetPc(); + output_frame->SetFrameSlot(output_offset, callers_pc); + if (trace_) { + PrintF(" 0x%08" V8PRIxPTR ": [top + %d] <- 0x%08" + V8PRIxPTR " ; caller's pc\n", + top_address + output_offset, output_offset, callers_pc); + } + + // Read caller's FP from the previous frame, and set this frame's FP. + output_offset -= kPointerSize; + intptr_t value = output_[frame_index - 1]->GetFp(); + output_frame->SetFrameSlot(output_offset, value); + intptr_t fp_value = top_address + output_offset; + output_frame->SetFp(fp_value); + if (trace_) { + PrintF(" 0x%08" V8PRIxPTR ": [top + %d] <- 0x%08" + V8PRIxPTR " ; caller's fp\n", + fp_value, output_offset, value); + } + + // The context can be gotten from the previous frame. + output_offset -= kPointerSize; + value = output_[frame_index - 1]->GetContext(); + output_frame->SetFrameSlot(output_offset, value); + if (trace_) { + PrintF(" 0x%08" V8PRIxPTR ": [top + %d] <- 0x%08" + V8PRIxPTR " ; context\n", + top_address + output_offset, output_offset, value); + } + + // A marker value is used in place of the function. + output_offset -= kPointerSize; + value = reinterpret_cast<intptr_t>(Smi::FromInt(StackFrame::CONSTRUCT)); + output_frame->SetFrameSlot(output_offset, value); + if (trace_) { + PrintF(" 0x%08" V8PRIxPTR ": [top + %d] <- 0x%08" + V8PRIxPTR " ; function (construct sentinel)\n", + top_address + output_offset, output_offset, value); + } + + // The output frame reflects a JSConstructStubGeneric frame. + output_offset -= kPointerSize; + value = reinterpret_cast<intptr_t>(construct_stub); + output_frame->SetFrameSlot(output_offset, value); + if (trace_) { + PrintF(" 0x%08" V8PRIxPTR ": [top + %d] <- 0x%08" + V8PRIxPTR " ; code object\n", + top_address + output_offset, output_offset, value); + } + + // Number of incoming arguments. + output_offset -= kPointerSize; + value = reinterpret_cast<intptr_t>(Smi::FromInt(height - 1)); + output_frame->SetFrameSlot(output_offset, value); + if (trace_) { + PrintF(" 0x%08" V8PRIxPTR ": [top + %d] <- 0x%08" + V8PRIxPTR " ; argc (%d)\n", + top_address + output_offset, output_offset, value, height - 1); + } + + // Constructor function being invoked by the stub (only present on some + // architectures, indicated by kConstructorOffset). + if (ConstructFrameConstants::kConstructorOffset != kMinInt) { + output_offset -= kPointerSize; + value = reinterpret_cast<intptr_t>(function); + output_frame->SetFrameSlot(output_offset, value); + if (trace_) { + PrintF(" 0x%08" V8PRIxPTR ": [top + %d] <- 0x%08" + V8PRIxPTR " ; constructor function\n", + top_address + output_offset, output_offset, value); + } + } + + // The newly allocated object was passed as receiver in the artificial + // constructor stub environment created by HEnvironment::CopyForInlining(). + output_offset -= kPointerSize; + value = output_frame->GetFrameSlot(output_frame_size - kPointerSize); + output_frame->SetFrameSlot(output_offset, value); + if (trace_) { + PrintF(" 0x%08" V8PRIxPTR ": [top + %d] <- 0x%08" + V8PRIxPTR " ; allocated receiver\n", + top_address + output_offset, output_offset, value); + } + + ASSERT(0 == output_offset); + + intptr_t pc = reinterpret_cast<intptr_t>( + construct_stub->instruction_start() + + isolate_->heap()->construct_stub_deopt_pc_offset()->value()); + output_frame->SetPc(pc); +} + + +void Deoptimizer::DoComputeAccessorStubFrame(TranslationIterator* iterator, + int frame_index, + bool is_setter_stub_frame) { + JSFunction* accessor = JSFunction::cast(ComputeLiteral(iterator->Next())); + // The receiver (and the implicit return value, if any) are expected in + // registers by the LoadIC/StoreIC, so they don't belong to the output stack + // frame. This means that we have to use a height of 0. + unsigned height = 0; + unsigned height_in_bytes = height * kPointerSize; + const char* kind = is_setter_stub_frame ? "setter" : "getter"; + if (trace_) { + PrintF(" translating %s stub => height=%u\n", kind, height_in_bytes); + } + + // We need 1 stack entry for the return address + 4 stack entries from + // StackFrame::INTERNAL (FP, context, frame type, code object, see + // MacroAssembler::EnterFrame). For a setter stub frame we need one additional + // entry for the implicit return value, see + // StoreStubCompiler::CompileStoreViaSetter. + unsigned fixed_frame_entries = 1 + 4 + (is_setter_stub_frame ? 1 : 0); + unsigned fixed_frame_size = fixed_frame_entries * kPointerSize; + unsigned output_frame_size = height_in_bytes + fixed_frame_size; + + // Allocate and store the output frame description. + FrameDescription* output_frame = + new(output_frame_size) FrameDescription(output_frame_size, accessor); + output_frame->SetFrameType(StackFrame::INTERNAL); + + // A frame for an accessor stub can not be the topmost or bottommost one. + ASSERT(frame_index > 0 && frame_index < output_count_ - 1); + ASSERT(output_[frame_index] == NULL); + output_[frame_index] = output_frame; + + // The top address of the frame is computed from the previous frame's top and + // this frame's size. + intptr_t top_address = output_[frame_index - 1]->GetTop() - output_frame_size; + output_frame->SetTop(top_address); + + unsigned output_offset = output_frame_size; + + // Read caller's PC from the previous frame. + output_offset -= kPointerSize; + intptr_t callers_pc = output_[frame_index - 1]->GetPc(); + output_frame->SetFrameSlot(output_offset, callers_pc); + if (trace_) { + PrintF(" 0x%08" V8PRIxPTR ": [top + %u] <- 0x%08" V8PRIxPTR + " ; caller's pc\n", + top_address + output_offset, output_offset, callers_pc); + } + + // Read caller's FP from the previous frame, and set this frame's FP. + output_offset -= kPointerSize; + intptr_t value = output_[frame_index - 1]->GetFp(); + output_frame->SetFrameSlot(output_offset, value); + intptr_t fp_value = top_address + output_offset; + output_frame->SetFp(fp_value); + if (trace_) { + PrintF(" 0x%08" V8PRIxPTR ": [top + %u] <- 0x%08" V8PRIxPTR + " ; caller's fp\n", + fp_value, output_offset, value); + } + + // The context can be gotten from the previous frame. + output_offset -= kPointerSize; + value = output_[frame_index - 1]->GetContext(); + output_frame->SetFrameSlot(output_offset, value); + if (trace_) { + PrintF(" 0x%08" V8PRIxPTR ": [top + %u] <- 0x%08" V8PRIxPTR + " ; context\n", + top_address + output_offset, output_offset, value); + } + + // A marker value is used in place of the function. + output_offset -= kPointerSize; + value = reinterpret_cast<intptr_t>(Smi::FromInt(StackFrame::INTERNAL)); + output_frame->SetFrameSlot(output_offset, value); + if (trace_) { + PrintF(" 0x%08" V8PRIxPTR ": [top + %u] <- 0x%08" V8PRIxPTR + " ; function (%s sentinel)\n", + top_address + output_offset, output_offset, value, kind); + } + + // Get Code object from accessor stub. + output_offset -= kPointerSize; + Builtins::Name name = is_setter_stub_frame ? + Builtins::kStoreIC_Setter_ForDeopt : + Builtins::kLoadIC_Getter_ForDeopt; + Code* accessor_stub = isolate_->builtins()->builtin(name); + value = reinterpret_cast<intptr_t>(accessor_stub); + output_frame->SetFrameSlot(output_offset, value); + if (trace_) { + PrintF(" 0x%08" V8PRIxPTR ": [top + %u] <- 0x%08" V8PRIxPTR + " ; code object\n", + top_address + output_offset, output_offset, value); + } + + // Skip receiver. + Translation::Opcode opcode = + static_cast<Translation::Opcode>(iterator->Next()); + iterator->Skip(Translation::NumberOfOperandsFor(opcode)); + + if (is_setter_stub_frame) { + // The implicit return value was part of the artificial setter stub + // environment. + output_offset -= kPointerSize; + DoTranslateCommand(iterator, frame_index, output_offset); + } + + ASSERT(0 == output_offset); + + Smi* offset = is_setter_stub_frame ? + isolate_->heap()->setter_stub_deopt_pc_offset() : + isolate_->heap()->getter_stub_deopt_pc_offset(); + intptr_t pc = reinterpret_cast<intptr_t>( + accessor_stub->instruction_start() + offset->value()); + output_frame->SetPc(pc); +} + + +void Deoptimizer::DoComputeCompiledStubFrame(TranslationIterator* iterator, + int frame_index) { + // + // FROM TO + // | .... | | .... | + // +-------------------------+ +-------------------------+ + // | JSFunction continuation | | JSFunction continuation | + // +-------------------------+ +-------------------------+ + // | | saved frame (FP) | | saved frame (FP) | + // | +=========================+<-fpreg +=========================+<-fpreg + // | | JSFunction context | | JSFunction context | + // v +-------------------------+ +-------------------------| + // | COMPILED_STUB marker | | STUB_FAILURE marker | + // +-------------------------+ +-------------------------+ + // | | | caller args.arguments_ | + // | ... | +-------------------------+ + // | | | caller args.length_ | + // |-------------------------|<-spreg +-------------------------+ + // | caller args pointer | + // +-------------------------+ + // | caller stack param 1 | + // parameters in registers +-------------------------+ + // and spilled to stack | .... | + // +-------------------------+ + // | caller stack param n | + // +-------------------------+<-spreg + // reg = number of parameters + // reg = failure handler address + // reg = saved frame + // reg = JSFunction context + // + + ASSERT(compiled_code_->kind() == Code::COMPILED_STUB); + int major_key = compiled_code_->major_key(); + CodeStubInterfaceDescriptor* descriptor = + isolate_->code_stub_interface_descriptor(major_key); + + // The output frame must have room for all pushed register parameters + // and the standard stack frame slots. Include space for an argument + // object to the callee and optionally the space to pass the argument + // object to the stub failure handler. + int height_in_bytes = kPointerSize * descriptor->register_param_count_ + + sizeof(Arguments) + kPointerSize; + int fixed_frame_size = StandardFrameConstants::kFixedFrameSize; + int input_frame_size = input_->GetFrameSize(); + int output_frame_size = height_in_bytes + fixed_frame_size; + if (trace_) { + PrintF(" translating %s => StubFailureTrampolineStub, height=%d\n", + CodeStub::MajorName(static_cast<CodeStub::Major>(major_key), false), + height_in_bytes); + } + + // The stub failure trampoline is a single frame. + FrameDescription* output_frame = + new(output_frame_size) FrameDescription(output_frame_size, NULL); + output_frame->SetFrameType(StackFrame::STUB_FAILURE_TRAMPOLINE); + ASSERT(frame_index == 0); + output_[frame_index] = output_frame; + + // The top address for the output frame can be computed from the input + // frame pointer and the output frame's height. Subtract space for the + // context and function slots. + Register fp_reg = StubFailureTrampolineFrame::fp_register(); + intptr_t top_address = input_->GetRegister(fp_reg.code()) - + (2 * kPointerSize) - height_in_bytes; + output_frame->SetTop(top_address); + + // Read caller's PC (JSFunction continuation) from the input frame. + unsigned input_frame_offset = input_frame_size - kPointerSize; + unsigned output_frame_offset = output_frame_size - kPointerSize; + intptr_t value = input_->GetFrameSlot(input_frame_offset); + output_frame->SetFrameSlot(output_frame_offset, value); + if (trace_) { + PrintF(" 0x%08" V8PRIxPTR ": [top + %d] <- 0x%08" + V8PRIxPTR " ; caller's pc\n", + top_address + output_frame_offset, output_frame_offset, value); + } + + // Read caller's FP from the input frame, and set this frame's FP. + input_frame_offset -= kPointerSize; + value = input_->GetFrameSlot(input_frame_offset); + output_frame_offset -= kPointerSize; + output_frame->SetFrameSlot(output_frame_offset, value); + intptr_t frame_ptr = input_->GetRegister(fp_reg.code()); + output_frame->SetRegister(fp_reg.code(), frame_ptr); + output_frame->SetFp(frame_ptr); + if (trace_) { + PrintF(" 0x%08" V8PRIxPTR ": [top + %d] <- 0x%08" + V8PRIxPTR " ; caller's fp\n", + top_address + output_frame_offset, output_frame_offset, value); + } + + // The context can be gotten from the input frame. + Register context_reg = StubFailureTrampolineFrame::context_register(); + input_frame_offset -= kPointerSize; + value = input_->GetFrameSlot(input_frame_offset); + output_frame->SetRegister(context_reg.code(), value); + output_frame_offset -= kPointerSize; + output_frame->SetFrameSlot(output_frame_offset, value); + if (trace_) { + PrintF(" 0x%08" V8PRIxPTR ": [top + %d] <- 0x%08" + V8PRIxPTR " ; context\n", + top_address + output_frame_offset, output_frame_offset, value); + } + + // A marker value is used in place of the function. + output_frame_offset -= kPointerSize; + value = reinterpret_cast<intptr_t>( + Smi::FromInt(StackFrame::STUB_FAILURE_TRAMPOLINE)); + output_frame->SetFrameSlot(output_frame_offset, value); + if (trace_) { + PrintF(" 0x%08" V8PRIxPTR ": [top + %d] <- 0x%08" + V8PRIxPTR " ; function (stub failure sentinel)\n", + top_address + output_frame_offset, output_frame_offset, value); + } + + intptr_t caller_arg_count = 0; + if (descriptor->stack_parameter_count_ != NULL) { + caller_arg_count = + input_->GetRegister(descriptor->stack_parameter_count_->code()); + } + + // Build the Arguments object for the caller's parameters and a pointer to it. + output_frame_offset -= kPointerSize; + value = frame_ptr + StandardFrameConstants::kCallerSPOffset + + (caller_arg_count - 1) * kPointerSize; + output_frame->SetFrameSlot(output_frame_offset, value); + if (trace_) { + PrintF(" 0x%08" V8PRIxPTR ": [top + %d] <- 0x%08" + V8PRIxPTR " ; args.arguments\n", + top_address + output_frame_offset, output_frame_offset, value); + } + + output_frame_offset -= kPointerSize; + value = caller_arg_count; + output_frame->SetFrameSlot(output_frame_offset, value); + if (trace_) { + PrintF(" 0x%08" V8PRIxPTR ": [top + %d] <- 0x%08" + V8PRIxPTR " ; args.length\n", + top_address + output_frame_offset, output_frame_offset, value); + } + + output_frame_offset -= kPointerSize; + value = frame_ptr - (output_frame_size - output_frame_offset) - + StandardFrameConstants::kMarkerOffset + kPointerSize; + output_frame->SetFrameSlot(output_frame_offset, value); + if (trace_) { + PrintF(" 0x%08" V8PRIxPTR ": [top + %d] <- 0x%08" + V8PRIxPTR " ; args*\n", + top_address + output_frame_offset, output_frame_offset, value); + } + + // Copy the register parameters to the failure frame. + for (int i = 0; i < descriptor->register_param_count_; ++i) { + output_frame_offset -= kPointerSize; + DoTranslateCommand(iterator, 0, output_frame_offset); + } + + ASSERT(0 == output_frame_offset); + + // Copy the double registers from the input into the output frame. + CopyDoubleRegisters(output_frame); + + // Fill registers containing handler and number of parameters. + SetPlatformCompiledStubRegisters(output_frame, descriptor); + + // Compute this frame's PC, state, and continuation. + Code* trampoline = NULL; + int extra = descriptor->extra_expression_stack_count_; + StubFailureTrampolineStub(extra).FindCodeInCache(&trampoline, isolate_); + ASSERT(trampoline != NULL); + output_frame->SetPc(reinterpret_cast<intptr_t>( + trampoline->instruction_start())); + output_frame->SetState(Smi::FromInt(FullCodeGenerator::NO_REGISTERS)); + Code* notify_failure = + isolate_->builtins()->builtin(Builtins::kNotifyStubFailure); + output_frame->SetContinuation( + reinterpret_cast<intptr_t>(notify_failure->entry())); +} + + void Deoptimizer::MaterializeHeapObjects(JavaScriptFrameIterator* it) { ASSERT_NE(DEBUGGER, bailout_type_); // Handlify all argument object values before triggering any allocation. List<Handle<Object> > values(deferred_arguments_objects_values_.length()); for (int i = 0; i < deferred_arguments_objects_values_.length(); ++i) { - values.Add(Handle<Object>(deferred_arguments_objects_values_[i])); + values.Add(Handle<Object>(deferred_arguments_objects_values_[i], + isolate_)); } // Play it safe and clear all unhandlified values before we continue. @@ -654,7 +1363,7 @@ void Deoptimizer::MaterializeHeapObjects(JavaScriptFrameIterator* it) { for (int i = 0; i < deferred_heap_numbers_.length(); i++) { HeapNumberMaterializationDescriptor d = deferred_heap_numbers_[i]; Handle<Object> num = isolate_->factory()->NewNumber(d.value()); - if (FLAG_trace_deopt) { + if (trace_) { PrintF("Materializing a new heap number %p [%e] in slot %p\n", reinterpret_cast<void*>(*num), d.value(), @@ -699,9 +1408,10 @@ void Deoptimizer::MaterializeHeapObjects(JavaScriptFrameIterator* it) { } frame->SetExpression(i, *arguments); ASSERT_EQ(Memory::Object_at(descriptor.slot_address()), *arguments); - if (FLAG_trace_deopt) { - PrintF("Materializing %sarguments object for %p: ", + if (trace_) { + PrintF("Materializing %sarguments object of length %d for %p: ", frame->has_adapted_arguments() ? "(adapted) " : "", + arguments->elements()->length(), reinterpret_cast<void*>(descriptor.slot_address())); arguments->ShortPrint(); PrintF("\n"); @@ -734,7 +1444,7 @@ void Deoptimizer::MaterializeHeapNumbersForDebuggerInspectableFrame( int index = (info->parameters_count() - 1) - static_cast<int>(slot - parameters_top) / kPointerSize; - if (FLAG_trace_deopt) { + if (trace_) { PrintF("Materializing a new heap number %p [%e] in slot %p" "for parameter slot #%d\n", reinterpret_cast<void*>(*num), @@ -750,7 +1460,7 @@ void Deoptimizer::MaterializeHeapNumbersForDebuggerInspectableFrame( int index = info->expression_count() - 1 - static_cast<int>(slot - expressions_top) / kPointerSize; - if (FLAG_trace_deopt) { + if (trace_) { PrintF("Materializing a new heap number %p [%e] in slot %p" "for expression slot #%d\n", reinterpret_cast<void*>(*num), @@ -789,6 +1499,7 @@ void Deoptimizer::DoTranslateCommand(TranslationIterator* iterator, case Translation::CONSTRUCT_STUB_FRAME: case Translation::GETTER_STUB_FRAME: case Translation::SETTER_STUB_FRAME: + case Translation::COMPILED_STUB_FRAME: case Translation::DUPLICATE: UNREACHABLE(); return; @@ -796,7 +1507,7 @@ void Deoptimizer::DoTranslateCommand(TranslationIterator* iterator, case Translation::REGISTER: { int input_reg = iterator->Next(); intptr_t input_value = input_->GetRegister(input_reg); - if (FLAG_trace_deopt) { + if (trace_) { PrintF( " 0x%08" V8PRIxPTR ": [top + %d] <- 0x%08" V8PRIxPTR " ; %s ", output_[frame_index]->GetTop() + output_offset, @@ -814,7 +1525,7 @@ void Deoptimizer::DoTranslateCommand(TranslationIterator* iterator, int input_reg = iterator->Next(); intptr_t value = input_->GetRegister(input_reg); bool is_smi = Smi::IsValid(value); - if (FLAG_trace_deopt) { + if (trace_) { PrintF( " 0x%08" V8PRIxPTR ": [top + %d] <- %" V8PRIdPTR " ; %s (%s)\n", output_[frame_index]->GetTop() + output_offset, @@ -841,7 +1552,7 @@ void Deoptimizer::DoTranslateCommand(TranslationIterator* iterator, int input_reg = iterator->Next(); uintptr_t value = static_cast<uintptr_t>(input_->GetRegister(input_reg)); bool is_smi = (value <= static_cast<uintptr_t>(Smi::kMaxValue)); - if (FLAG_trace_deopt) { + if (trace_) { PrintF( " 0x%08" V8PRIxPTR ": [top + %d] <- %" V8PRIuPTR " ; uint %s (%s)\n", @@ -868,7 +1579,7 @@ void Deoptimizer::DoTranslateCommand(TranslationIterator* iterator, case Translation::DOUBLE_REGISTER: { int input_reg = iterator->Next(); double value = input_->GetDoubleRegister(input_reg); - if (FLAG_trace_deopt) { + if (trace_) { PrintF(" 0x%08" V8PRIxPTR ": [top + %d] <- %e ; %s\n", output_[frame_index]->GetTop() + output_offset, output_offset, @@ -887,7 +1598,7 @@ void Deoptimizer::DoTranslateCommand(TranslationIterator* iterator, unsigned input_offset = input_->GetOffsetFromSlotIndex(input_slot_index); intptr_t input_value = input_->GetFrameSlot(input_offset); - if (FLAG_trace_deopt) { + if (trace_) { PrintF(" 0x%08" V8PRIxPTR ": ", output_[frame_index]->GetTop() + output_offset); PrintF("[top + %d] <- 0x%08" V8PRIxPTR " ; [sp + %d] ", @@ -907,7 +1618,7 @@ void Deoptimizer::DoTranslateCommand(TranslationIterator* iterator, input_->GetOffsetFromSlotIndex(input_slot_index); intptr_t value = input_->GetFrameSlot(input_offset); bool is_smi = Smi::IsValid(value); - if (FLAG_trace_deopt) { + if (trace_) { PrintF(" 0x%08" V8PRIxPTR ": ", output_[frame_index]->GetTop() + output_offset); PrintF("[top + %d] <- %" V8PRIdPTR " ; [sp + %d] (%s)\n", @@ -937,7 +1648,7 @@ void Deoptimizer::DoTranslateCommand(TranslationIterator* iterator, uintptr_t value = static_cast<uintptr_t>(input_->GetFrameSlot(input_offset)); bool is_smi = (value <= static_cast<uintptr_t>(Smi::kMaxValue)); - if (FLAG_trace_deopt) { + if (trace_) { PrintF(" 0x%08" V8PRIxPTR ": ", output_[frame_index]->GetTop() + output_offset); PrintF("[top + %d] <- %" V8PRIuPTR " ; [sp + %d] (uint32 %s)\n", @@ -965,7 +1676,7 @@ void Deoptimizer::DoTranslateCommand(TranslationIterator* iterator, unsigned input_offset = input_->GetOffsetFromSlotIndex(input_slot_index); double value = input_->GetDoubleFrameSlot(input_offset); - if (FLAG_trace_deopt) { + if (trace_) { PrintF(" 0x%08" V8PRIxPTR ": [top + %d] <- %e ; [sp + %d]\n", output_[frame_index]->GetTop() + output_offset, output_offset, @@ -981,7 +1692,7 @@ void Deoptimizer::DoTranslateCommand(TranslationIterator* iterator, case Translation::LITERAL: { Object* literal = ComputeLiteral(iterator->Next()); - if (FLAG_trace_deopt) { + if (trace_) { PrintF(" 0x%08" V8PRIxPTR ": [top + %d] <- ", output_[frame_index]->GetTop() + output_offset, output_offset); @@ -994,14 +1705,15 @@ void Deoptimizer::DoTranslateCommand(TranslationIterator* iterator, } case Translation::ARGUMENTS_OBJECT: { + bool args_known = iterator->Next(); int args_index = iterator->Next() + 1; // Skip receiver. int args_length = iterator->Next() - 1; // Skip receiver. - if (FLAG_trace_deopt) { + if (trace_) { PrintF(" 0x%08" V8PRIxPTR ": [top + %d] <- ", output_[frame_index]->GetTop() + output_offset, output_offset); isolate_->heap()->arguments_marker()->ShortPrint(); - PrintF(" ; arguments object\n"); + PrintF(" ; %sarguments object\n", args_known ? "" : "dummy "); } // Use the arguments marker value as a sentinel and fill in the arguments // object after the deoptimized frame is built. @@ -1014,7 +1726,9 @@ void Deoptimizer::DoTranslateCommand(TranslationIterator* iterator, // actual arguments object after the deoptimized frame is built. for (int i = 0; i < args_length; i++) { unsigned input_offset = input_->GetOffsetFromSlotIndex(args_index + i); - intptr_t input_value = input_->GetFrameSlot(input_offset); + intptr_t input_value = args_known + ? input_->GetFrameSlot(input_offset) + : reinterpret_cast<intptr_t>(isolate_->heap()->the_hole_value()); AddArgumentsObjectValue(input_value); } return; @@ -1097,6 +1811,7 @@ bool Deoptimizer::DoOsrTranslateCommand(TranslationIterator* iterator, case Translation::CONSTRUCT_STUB_FRAME: case Translation::GETTER_STUB_FRAME: case Translation::SETTER_STUB_FRAME: + case Translation::COMPILED_STUB_FRAME: case Translation::DUPLICATE: UNREACHABLE(); // Malformed input. return false; @@ -1267,6 +1982,7 @@ void Deoptimizer::PatchStackCheckCode(Code* unoptimized_code, // Iterate over the stack check table and patch every stack check // call to an unconditional call to the replacement code. ASSERT(unoptimized_code->kind() == Code::FUNCTION); + ASSERT(!unoptimized_code->stack_check_patched_for_osr()); Address stack_check_cursor = unoptimized_code->instruction_start() + unoptimized_code->stack_check_table_offset(); uint32_t table_length = Memory::uint32_at(stack_check_cursor); @@ -1280,6 +1996,7 @@ void Deoptimizer::PatchStackCheckCode(Code* unoptimized_code, replacement_code); stack_check_cursor += 2 * kIntSize; } + unoptimized_code->set_stack_check_patched_for_osr(true); } @@ -1289,6 +2006,7 @@ void Deoptimizer::RevertStackCheckCode(Code* unoptimized_code, // Iterate over the stack check table and revert the patched // stack check calls. ASSERT(unoptimized_code->kind() == Code::FUNCTION); + ASSERT(unoptimized_code->stack_check_patched_for_osr()); Address stack_check_cursor = unoptimized_code->instruction_start() + unoptimized_code->stack_check_table_offset(); uint32_t table_length = Memory::uint32_at(stack_check_cursor); @@ -1302,6 +2020,7 @@ void Deoptimizer::RevertStackCheckCode(Code* unoptimized_code, replacement_code); stack_check_cursor += 2 * kIntSize; } + unoptimized_code->set_stack_check_patched_for_osr(false); } @@ -1316,8 +2035,8 @@ unsigned Deoptimizer::ComputeInputFrameSize() const { // size matches with the stack height we can compute based on the // environment at the OSR entry. The code for that his built into // the DoComputeOsrOutputFrame function for now. - } else { - unsigned stack_slots = optimized_code_->stack_slots(); + } else if (compiled_code_->kind() != Code::COMPILED_STUB) { + unsigned stack_slots = compiled_code_->stack_slots(); unsigned outgoing_size = ComputeOutgoingArgumentSize(); ASSERT(result == fixed_size + (stack_slots * kPointerSize) + outgoing_size); } @@ -1337,6 +2056,10 @@ unsigned Deoptimizer::ComputeFixedSize(JSFunction* function) const { unsigned Deoptimizer::ComputeIncomingArgumentSize(JSFunction* function) const { // The incoming arguments is the values for formal parameters and // the receiver. Every slot contains a pointer. + if (function->IsSmi()) { + ASSERT(Smi::cast(function) == Smi::FromInt(StackFrame::STUB)); + return 0; + } unsigned arguments = function->shared()->formal_parameter_count() + 1; return arguments * kPointerSize; } @@ -1344,7 +2067,7 @@ unsigned Deoptimizer::ComputeIncomingArgumentSize(JSFunction* function) const { unsigned Deoptimizer::ComputeOutgoingArgumentSize() const { DeoptimizationInputData* data = DeoptimizationInputData::cast( - optimized_code_->deoptimization_data()); + compiled_code_->deoptimization_data()); unsigned height = data->ArgumentsStackHeight(bailout_id_)->value(); return height * kPointerSize; } @@ -1352,7 +2075,7 @@ unsigned Deoptimizer::ComputeOutgoingArgumentSize() const { Object* Deoptimizer::ComputeLiteral(int index) const { DeoptimizationInputData* data = DeoptimizationInputData::cast( - optimized_code_->deoptimization_data()); + compiled_code_->deoptimization_data()); FixedArray* literals = data->LiteralArray(); return literals->get(index); } @@ -1377,110 +2100,52 @@ void Deoptimizer::AddDoubleValue(intptr_t slot_address, double value) { } -MemoryChunk* Deoptimizer::CreateCode(BailoutType type) { +void Deoptimizer::EnsureCodeForDeoptimizationEntry(Isolate* isolate, + BailoutType type, + int max_entry_id) { // We cannot run this if the serializer is enabled because this will // cause us to emit relocation information for the external // references. This is fine because the deoptimizer's code section // isn't meant to be serialized at all. - ASSERT(!Serializer::enabled()); - - MacroAssembler masm(Isolate::Current(), NULL, 16 * KB); + ASSERT(type == EAGER || type == LAZY); + DeoptimizerData* data = isolate->deoptimizer_data(); + int entry_count = (type == EAGER) + ? data->eager_deoptimization_entry_code_entries_ + : data->lazy_deoptimization_entry_code_entries_; + if (max_entry_id < entry_count) return; + entry_count = Max(entry_count, Deoptimizer::kMinNumberOfEntries); + while (max_entry_id >= entry_count) entry_count *= 2; + ASSERT(entry_count <= Deoptimizer::kMaxNumberOfEntries); + + MacroAssembler masm(isolate, NULL, 16 * KB); masm.set_emit_debug_code(false); - GenerateDeoptimizationEntries(&masm, kNumberOfEntries, type); + GenerateDeoptimizationEntries(&masm, entry_count, type); CodeDesc desc; masm.GetCode(&desc); - ASSERT(desc.reloc_size == 0); - - MemoryChunk* chunk = - Isolate::Current()->memory_allocator()->AllocateChunk(desc.instr_size, - EXECUTABLE, - NULL); - ASSERT(chunk->area_size() >= desc.instr_size); - if (chunk == NULL) { - V8::FatalProcessOutOfMemory("Not enough memory for deoptimization table"); - } + ASSERT(!RelocInfo::RequiresRelocation(desc)); + + MemoryChunk* chunk = (type == EAGER) + ? data->eager_deoptimization_entry_code_ + : data->lazy_deoptimization_entry_code_; + ASSERT(static_cast<int>(Deoptimizer::GetMaxDeoptTableSize()) >= + desc.instr_size); + chunk->CommitArea(desc.instr_size); memcpy(chunk->area_start(), desc.buffer, desc.instr_size); CPU::FlushICache(chunk->area_start(), desc.instr_size); - return chunk; -} - -Code* Deoptimizer::FindDeoptimizingCodeFromAddress(Address addr) { - DeoptimizingCodeListNode* node = - Isolate::Current()->deoptimizer_data()->deoptimizing_code_list_; - while (node != NULL) { - if (node->code()->contains(addr)) return *node->code(); - node = node->next(); - } - return NULL; -} - - -void Deoptimizer::RemoveDeoptimizingCode(Code* code) { - DeoptimizerData* data = Isolate::Current()->deoptimizer_data(); - ASSERT(data->deoptimizing_code_list_ != NULL); - // Run through the code objects to find this one and remove it. - DeoptimizingCodeListNode* prev = NULL; - DeoptimizingCodeListNode* current = data->deoptimizing_code_list_; - while (current != NULL) { - if (*current->code() == code) { - // Unlink from list. If prev is NULL we are looking at the first element. - if (prev == NULL) { - data->deoptimizing_code_list_ = current->next(); - } else { - prev->set_next(current->next()); - } - delete current; - return; - } - // Move to next in list. - prev = current; - current = current->next(); - } - // Deoptimizing code is removed through weak callback. Each object is expected - // to be removed once and only once. - UNREACHABLE(); -} - - -static Object* CutOutRelatedFunctionsList(Context* context, - Code* code, - Object* undefined) { - Object* result_list_head = undefined; - Object* head; - Object* current; - current = head = context->get(Context::OPTIMIZED_FUNCTIONS_LIST); - JSFunction* prev = NULL; - while (current != undefined) { - JSFunction* func = JSFunction::cast(current); - current = func->next_function_link(); - if (func->code() == code) { - func->set_next_function_link(result_list_head); - result_list_head = func; - if (prev) { - prev->set_next_function_link(current); - } else { - head = current; - } - } else { - prev = func; - } - } - if (head != context->get(Context::OPTIMIZED_FUNCTIONS_LIST)) { - context->set(Context::OPTIMIZED_FUNCTIONS_LIST, head); + if (type == EAGER) { + data->eager_deoptimization_entry_code_entries_ = entry_count; + } else { + data->lazy_deoptimization_entry_code_entries_ = entry_count; } - return result_list_head; } void Deoptimizer::ReplaceCodeForRelatedFunctions(JSFunction* function, Code* code) { - Context* context = function->context()->native_context(); - SharedFunctionInfo* shared = function->shared(); - - Object* undefined = Isolate::Current()->heap()->undefined_value(); - Object* current = CutOutRelatedFunctionsList(context, code, undefined); + Object* undefined = function->GetHeap()->undefined_value(); + Object* current = function; while (current != undefined) { JSFunction* func = JSFunction::cast(current); @@ -1541,6 +2206,8 @@ int FrameDescription::ComputeParametersCount() { // Can't use GetExpression(0) because it would cause infinite recursion. return reinterpret_cast<Smi*>(*GetFrameSlotPointer(0))->value(); } + case StackFrame::STUB: + return -1; // Minus receiver. default: UNREACHABLE(); return 0; @@ -1603,10 +2270,9 @@ int32_t TranslationIterator::Next() { } -Handle<ByteArray> TranslationBuffer::CreateByteArray() { +Handle<ByteArray> TranslationBuffer::CreateByteArray(Factory* factory) { int length = contents_.length(); - Handle<ByteArray> result = - Isolate::Current()->factory()->NewByteArray(length, TENURED); + Handle<ByteArray> result = factory->NewByteArray(length, TENURED); memcpy(result->GetDataStartAddress(), contents_.ToVector().start(), length); return result; } @@ -1648,6 +2314,11 @@ void Translation::BeginJSFrame(BailoutId node_id, } +void Translation::BeginCompiledStubFrame() { + buffer_->Add(COMPILED_STUB_FRAME, zone()); +} + + void Translation::StoreRegister(Register reg) { buffer_->Add(REGISTER, zone()); buffer_->Add(reg.code(), zone()); @@ -1702,8 +2373,11 @@ void Translation::StoreLiteral(int literal_id) { } -void Translation::StoreArgumentsObject(int args_index, int args_length) { +void Translation::StoreArgumentsObject(bool args_known, + int args_index, + int args_length) { buffer_->Add(ARGUMENTS_OBJECT, zone()); + buffer_->Add(args_known, zone()); buffer_->Add(args_index, zone()); buffer_->Add(args_length, zone()); } @@ -1729,13 +2403,14 @@ int Translation::NumberOfOperandsFor(Opcode opcode) { case UINT32_STACK_SLOT: case DOUBLE_STACK_SLOT: case LITERAL: + case COMPILED_STUB_FRAME: return 1; case BEGIN: case ARGUMENTS_ADAPTOR_FRAME: case CONSTRUCT_STUB_FRAME: - case ARGUMENTS_OBJECT: return 2; case JS_FRAME: + case ARGUMENTS_OBJECT: return 3; } UNREACHABLE(); @@ -1759,6 +2434,8 @@ const char* Translation::StringFor(Opcode opcode) { return "GETTER_STUB_FRAME"; case SETTER_STUB_FRAME: return "SETTER_STUB_FRAME"; + case COMPILED_STUB_FRAME: + return "COMPILED_STUB_FRAME"; case REGISTER: return "REGISTER"; case INT32_REGISTER: @@ -1790,17 +2467,18 @@ const char* Translation::StringFor(Opcode opcode) { DeoptimizingCodeListNode::DeoptimizingCodeListNode(Code* code): next_(NULL) { - GlobalHandles* global_handles = Isolate::Current()->global_handles(); + GlobalHandles* global_handles = code->GetIsolate()->global_handles(); // Globalize the code object and make it weak. code_ = Handle<Code>::cast(global_handles->Create(code)); global_handles->MakeWeak(reinterpret_cast<Object**>(code_.location()), this, + NULL, Deoptimizer::HandleWeakDeoptimizedCode); } DeoptimizingCodeListNode::~DeoptimizingCodeListNode() { - GlobalHandles* global_handles = Isolate::Current()->global_handles(); + GlobalHandles* global_handles = code_->GetIsolate()->global_handles(); global_handles->Destroy(reinterpret_cast<Object**>(code_.location())); } @@ -1864,8 +2542,13 @@ SlotRef SlotRef::ComputeSlotForNextArgument(TranslationIterator* iterator, case Translation::LITERAL: { int literal_index = iterator->Next(); - return SlotRef(data->LiteralArray()->get(literal_index)); + return SlotRef(data->GetIsolate(), + data->LiteralArray()->get(literal_index)); } + + case Translation::COMPILED_STUB_FRAME: + UNREACHABLE(); + break; } UNREACHABLE(); @@ -1961,7 +2644,7 @@ DeoptimizedFrameInfo::DeoptimizedFrameInfo(Deoptimizer* deoptimizer, expression_stack_ = new Object*[expression_count_]; // Get the source position using the unoptimized code. Address pc = reinterpret_cast<Address>(output_frame->GetPc()); - Code* code = Code::cast(Isolate::Current()->heap()->FindCodeObject(pc)); + Code* code = Code::cast(deoptimizer->isolate()->heap()->FindCodeObject(pc)); source_position_ = code->SourcePosition(pc); for (int i = 0; i < expression_count_; i++) { diff --git a/deps/v8/src/deoptimizer.h b/deps/v8/src/deoptimizer.h index f67f986ba1..db0cc0bdeb 100644 --- a/deps/v8/src/deoptimizer.h +++ b/deps/v8/src/deoptimizer.h @@ -87,19 +87,33 @@ class OptimizedFunctionVisitor BASE_EMBEDDED { }; +class OptimizedFunctionFilter BASE_EMBEDDED { + public: + virtual ~OptimizedFunctionFilter() {} + + virtual bool TakeFunction(JSFunction* function) = 0; +}; + + class Deoptimizer; class DeoptimizerData { public: - DeoptimizerData(); + explicit DeoptimizerData(MemoryAllocator* allocator); ~DeoptimizerData(); #ifdef ENABLE_DEBUGGER_SUPPORT void Iterate(ObjectVisitor* v); #endif + Code* FindDeoptimizingCode(Address addr); + void RemoveDeoptimizingCode(Code* code); + private: + MemoryAllocator* allocator_; + int eager_deoptimization_entry_code_entries_; + int lazy_deoptimization_entry_code_entries_; MemoryChunk* eager_deoptimization_entry_code_; MemoryChunk* lazy_deoptimization_entry_code_; Deoptimizer* current_; @@ -131,8 +145,14 @@ class Deoptimizer : public Malloced { DEBUGGER }; + static bool TraceEnabledFor(BailoutType deopt_type, + StackFrame::Type frame_type); + static const char* MessageFor(BailoutType type); + int output_count() const { return output_count_; } + Code::Kind compiled_code_kind() const { return compiled_code_->kind(); } + // Number of created JS frames. Not all created frames are necessarily JS. int jsframe_count() const { return jsframe_count_; } @@ -171,17 +191,21 @@ class Deoptimizer : public Malloced { static void ReplaceCodeForRelatedFunctions(JSFunction* function, Code* code); // Deoptimize all functions in the heap. - static void DeoptimizeAll(); + static void DeoptimizeAll(Isolate* isolate); static void DeoptimizeGlobalObject(JSObject* object); + static void DeoptimizeAllFunctionsWith(Isolate* isolate, + OptimizedFunctionFilter* filter); + + static void DeoptimizeAllFunctionsForContext( + Context* context, OptimizedFunctionFilter* filter); + static void VisitAllOptimizedFunctionsForContext( Context* context, OptimizedFunctionVisitor* visitor); - static void VisitAllOptimizedFunctionsForGlobalObject( - JSObject* object, OptimizedFunctionVisitor* visitor); - - static void VisitAllOptimizedFunctions(OptimizedFunctionVisitor* visitor); + static void VisitAllOptimizedFunctions(Isolate* isolate, + OptimizedFunctionVisitor* visitor); // The size in bytes of the code required at a lazy deopt patch site. static int patch_size(); @@ -226,8 +250,21 @@ class Deoptimizer : public Malloced { static void ComputeOutputFrames(Deoptimizer* deoptimizer); - static Address GetDeoptimizationEntry(int id, BailoutType type); - static int GetDeoptimizationId(Address addr, BailoutType type); + + enum GetEntryMode { + CALCULATE_ENTRY_ADDRESS, + ENSURE_ENTRY_CODE + }; + + + static Address GetDeoptimizationEntry( + Isolate* isolate, + int id, + BailoutType type, + GetEntryMode mode = ENSURE_ENTRY_CODE); + static int GetDeoptimizationId(Isolate* isolate, + Address addr, + BailoutType type); static int GetOutputInfo(DeoptimizationOutputData* data, BailoutId node_id, SharedFunctionInfo* shared); @@ -283,8 +320,17 @@ class Deoptimizer : public Malloced { int ConvertJSFrameIndexToFrameIndex(int jsframe_index); + static size_t GetMaxDeoptTableSize(); + + static void EnsureCodeForDeoptimizationEntry(Isolate* isolate, + BailoutType type, + int max_entry_id); + + Isolate* isolate() const { return isolate_; } + private: - static const int kNumberOfEntries = 16384; + static const int kMinNumberOfEntries = 64; + static const int kMaxNumberOfEntries = 16384; Deoptimizer(Isolate* isolate, JSFunction* function, @@ -293,6 +339,9 @@ class Deoptimizer : public Malloced { Address from, int fp_to_sp_delta, Code* optimized_code); + Code* FindOptimizedCode(JSFunction* function, Code* optimized_code); + void Trace(); + void PrintFunctionName(); void DeleteFrameDescriptions(); void DoComputeOutputFrames(); @@ -305,6 +354,8 @@ class Deoptimizer : public Malloced { void DoComputeAccessorStubFrame(TranslationIterator* iterator, int frame_index, bool is_setter_stub_frame); + void DoComputeCompiledStubFrame(TranslationIterator* iterator, + int frame_index); void DoTranslateCommand(TranslationIterator* iterator, int frame_index, unsigned output_offset); @@ -327,24 +378,35 @@ class Deoptimizer : public Malloced { void AddArgumentsObjectValue(intptr_t value); void AddDoubleValue(intptr_t slot_address, double value); - static MemoryChunk* CreateCode(BailoutType type); static void GenerateDeoptimizationEntries( MacroAssembler* masm, int count, BailoutType type); // Weak handle callback for deoptimizing code objects. - static void HandleWeakDeoptimizedCode( - v8::Persistent<v8::Value> obj, void* data); - static Code* FindDeoptimizingCodeFromAddress(Address addr); - static void RemoveDeoptimizingCode(Code* code); + static void HandleWeakDeoptimizedCode(v8::Isolate* isolate, + v8::Persistent<v8::Value> obj, + void* data); + + // Deoptimize function assuming that function->next_function_link() points + // to a list that contains all functions that share the same optimized code. + static void DeoptimizeFunctionWithPreparedFunctionList(JSFunction* function); // Fill the input from from a JavaScript frame. This is used when // the debugger needs to inspect an optimized frame. For normal // deoptimizations the input frame is filled in generated code. void FillInputFrame(Address tos, JavaScriptFrame* frame); + // Fill the given output frame's registers to contain the failure handler + // address and the number of parameters for a stub failure trampoline. + void SetPlatformCompiledStubRegisters(FrameDescription* output_frame, + CodeStubInterfaceDescriptor* desc); + + // Fill the given output frame's double registers with the original values + // from the input frame's double registers. + void CopyDoubleRegisters(FrameDescription* output_frame); + Isolate* isolate_; JSFunction* function_; - Code* optimized_code_; + Code* compiled_code_; unsigned bailout_id_; BailoutType bailout_type_; Address from_; @@ -364,6 +426,8 @@ class Deoptimizer : public Malloced { List<ArgumentsObjectMaterializationDescriptor> deferred_arguments_objects_; List<HeapNumberMaterializationDescriptor> deferred_heap_numbers_; + bool trace_; + static const int table_entry_size_; friend class FrameDescription; @@ -514,16 +578,13 @@ class FrameDescription { uintptr_t frame_size_; // Number of bytes. JSFunction* function_; intptr_t registers_[Register::kNumRegisters]; - double double_registers_[DoubleRegister::kNumAllocatableRegisters]; + double double_registers_[DoubleRegister::kMaxNumRegisters]; intptr_t top_; intptr_t pc_; intptr_t fp_; intptr_t context_; StackFrame::Type type_; Smi* state_; -#ifdef DEBUG - Code::Kind kind_; -#endif // Continuation is the PC where the execution continues after // deoptimizing. @@ -550,7 +611,7 @@ class TranslationBuffer BASE_EMBEDDED { int CurrentIndex() const { return contents_.length(); } void Add(int32_t value, Zone* zone); - Handle<ByteArray> CreateByteArray(); + Handle<ByteArray> CreateByteArray(Factory* factory); private: ZoneList<uint8_t> contents_; @@ -587,6 +648,7 @@ class Translation BASE_EMBEDDED { GETTER_STUB_FRAME, SETTER_STUB_FRAME, ARGUMENTS_ADAPTOR_FRAME, + COMPILED_STUB_FRAME, REGISTER, INT32_REGISTER, UINT32_REGISTER, @@ -617,6 +679,7 @@ class Translation BASE_EMBEDDED { // Commands. void BeginJSFrame(BailoutId node_id, int literal_id, unsigned height); + void BeginCompiledStubFrame(); void BeginArgumentsAdaptorFrame(int literal_id, unsigned height); void BeginConstructStubFrame(int literal_id, unsigned height); void BeginGetterStubFrame(int literal_id); @@ -630,7 +693,7 @@ class Translation BASE_EMBEDDED { void StoreUint32StackSlot(int index); void StoreDoubleStackSlot(int index); void StoreLiteral(int literal_id); - void StoreArgumentsObject(int args_index, int args_length); + void StoreArgumentsObject(bool args_known, int args_index, int args_length); void MarkDuplicate(); Zone* zone() const { return zone_; } @@ -688,36 +751,35 @@ class SlotRef BASE_EMBEDDED { SlotRef(Address addr, SlotRepresentation representation) : addr_(addr), representation_(representation) { } - explicit SlotRef(Object* literal) - : literal_(literal), representation_(LITERAL) { } + SlotRef(Isolate* isolate, Object* literal) + : literal_(literal, isolate), representation_(LITERAL) { } - Handle<Object> GetValue() { + Handle<Object> GetValue(Isolate* isolate) { switch (representation_) { case TAGGED: - return Handle<Object>(Memory::Object_at(addr_)); + return Handle<Object>(Memory::Object_at(addr_), isolate); case INT32: { int value = Memory::int32_at(addr_); if (Smi::IsValid(value)) { - return Handle<Object>(Smi::FromInt(value)); + return Handle<Object>(Smi::FromInt(value), isolate); } else { - return Isolate::Current()->factory()->NewNumberFromInt(value); + return isolate->factory()->NewNumberFromInt(value); } } case UINT32: { uint32_t value = Memory::uint32_at(addr_); if (value <= static_cast<uint32_t>(Smi::kMaxValue)) { - return Handle<Object>(Smi::FromInt(static_cast<int>(value))); + return Handle<Object>(Smi::FromInt(static_cast<int>(value)), isolate); } else { - return Isolate::Current()->factory()->NewNumber( - static_cast<double>(value)); + return isolate->factory()->NewNumber(static_cast<double>(value)); } } case DOUBLE: { double value = Memory::double_at(addr_); - return Isolate::Current()->factory()->NewNumber(value); + return isolate->factory()->NewNumber(value); } case LITERAL: diff --git a/deps/v8/src/disassembler.cc b/deps/v8/src/disassembler.cc index 9f8b9a820b..f168f84ae7 100644 --- a/deps/v8/src/disassembler.cc +++ b/deps/v8/src/disassembler.cc @@ -111,11 +111,12 @@ static void DumpBuffer(FILE* f, StringBuilder* out) { static const int kOutBufferSize = 2048 + String::kMaxShortPrintLength; static const int kRelocInfoPosition = 57; -static int DecodeIt(FILE* f, +static int DecodeIt(Isolate* isolate, + FILE* f, const V8NameConverter& converter, byte* begin, byte* end) { - NoHandleAllocation ha; + NoHandleAllocation ha(isolate); AssertNoAllocation no_alloc; ExternalReferenceEncoder ref_encoder; Heap* heap = HEAP; @@ -281,13 +282,22 @@ static int DecodeIt(FILE* f, if (rmode == RelocInfo::CODE_TARGET_WITH_ID) { out.AddFormatted(" (id = %d)", static_cast<int>(relocinfo.data())); } - } else if (rmode == RelocInfo::RUNTIME_ENTRY && - Isolate::Current()->deoptimizer_data() != NULL) { + } else if (RelocInfo::IsRuntimeEntry(rmode) && + isolate->deoptimizer_data() != NULL) { // A runtime entry reloinfo might be a deoptimization bailout. Address addr = relocinfo.target_address(); - int id = Deoptimizer::GetDeoptimizationId(addr, Deoptimizer::EAGER); + int id = Deoptimizer::GetDeoptimizationId(isolate, + addr, + Deoptimizer::EAGER); if (id == Deoptimizer::kNotDeoptimizationEntry) { - out.AddFormatted(" ;; %s", RelocInfo::RelocModeName(rmode)); + id = Deoptimizer::GetDeoptimizationId(isolate, + addr, + Deoptimizer::LAZY); + if (id == Deoptimizer::kNotDeoptimizationEntry) { + out.AddFormatted(" ;; %s", RelocInfo::RelocModeName(rmode)); + } else { + out.AddFormatted(" ;; lazy deoptimization bailout %d", id); + } } else { out.AddFormatted(" ;; deoptimization bailout %d", id); } @@ -314,15 +324,17 @@ static int DecodeIt(FILE* f, } -int Disassembler::Decode(FILE* f, byte* begin, byte* end) { +int Disassembler::Decode(Isolate* isolate, FILE* f, byte* begin, byte* end) { V8NameConverter defaultConverter(NULL); - return DecodeIt(f, defaultConverter, begin, end); + return DecodeIt(isolate, f, defaultConverter, begin, end); } // Called by Code::CodePrint. void Disassembler::Decode(FILE* f, Code* code) { - int decode_size = (code->kind() == Code::OPTIMIZED_FUNCTION) + Isolate* isolate = code->GetIsolate(); + int decode_size = (code->kind() == Code::OPTIMIZED_FUNCTION || + code->kind() == Code::COMPILED_STUB) ? static_cast<int>(code->safepoint_table_offset()) : code->instruction_size(); // If there might be a stack check table, stop before reaching it. @@ -334,13 +346,15 @@ void Disassembler::Decode(FILE* f, Code* code) { byte* begin = code->instruction_start(); byte* end = begin + decode_size; V8NameConverter v8NameConverter(code); - DecodeIt(f, v8NameConverter, begin, end); + DecodeIt(isolate, f, v8NameConverter, begin, end); } #else // ENABLE_DISASSEMBLER void Disassembler::Dump(FILE* f, byte* begin, byte* end) {} -int Disassembler::Decode(FILE* f, byte* begin, byte* end) { return 0; } +int Disassembler::Decode(Isolate* isolate, FILE* f, byte* begin, byte* end) { + return 0; +} void Disassembler::Decode(FILE* f, Code* code) {} #endif // ENABLE_DISASSEMBLER diff --git a/deps/v8/src/disassembler.h b/deps/v8/src/disassembler.h index 4a87dca678..8789150036 100644 --- a/deps/v8/src/disassembler.h +++ b/deps/v8/src/disassembler.h @@ -41,7 +41,7 @@ class Disassembler : public AllStatic { // Decode instructions in the the interval [begin, end) and print the // code into f. Returns the number of bytes disassembled or 1 if no // instruction could be decoded. - static int Decode(FILE* f, byte* begin, byte* end); + static int Decode(Isolate* isolate, FILE* f, byte* begin, byte* end); // Decode instructions in code. static void Decode(FILE* f, Code* code); diff --git a/deps/v8/src/elements-kind.cc b/deps/v8/src/elements-kind.cc index 655a23bf1e..7b1651a953 100644 --- a/deps/v8/src/elements-kind.cc +++ b/deps/v8/src/elements-kind.cc @@ -35,9 +35,14 @@ namespace v8 { namespace internal { -void PrintElementsKind(FILE* out, ElementsKind kind) { +const char* ElementsKindToString(ElementsKind kind) { ElementsAccessor* accessor = ElementsAccessor::ForKind(kind); - PrintF(out, "%s", accessor->name()); + return accessor->name(); +} + + +void PrintElementsKind(FILE* out, ElementsKind kind) { + PrintF(out, "%s", ElementsKindToString(kind)); } diff --git a/deps/v8/src/elements-kind.h b/deps/v8/src/elements-kind.h index 3be7711a35..cb3bb9c9e9 100644 --- a/deps/v8/src/elements-kind.h +++ b/deps/v8/src/elements-kind.h @@ -77,6 +77,7 @@ const int kElementsKindCount = LAST_ELEMENTS_KIND - FIRST_ELEMENTS_KIND + 1; const int kFastElementsKindCount = LAST_FAST_ELEMENTS_KIND - FIRST_FAST_ELEMENTS_KIND + 1; +const char* ElementsKindToString(ElementsKind kind); void PrintElementsKind(FILE* out, ElementsKind kind); ElementsKind GetInitialFastElementsKind(); @@ -109,6 +110,13 @@ inline bool IsFastDoubleElementsKind(ElementsKind kind) { } +inline bool IsDoubleOrFloatElementsKind(ElementsKind kind) { + return IsFastDoubleElementsKind(kind) || + kind == EXTERNAL_DOUBLE_ELEMENTS || + kind == EXTERNAL_FLOAT_ELEMENTS; +} + + inline bool IsFastSmiOrObjectElementsKind(ElementsKind kind) { return kind == FAST_SMI_ELEMENTS || kind == FAST_HOLEY_SMI_ELEMENTS || diff --git a/deps/v8/src/elements.cc b/deps/v8/src/elements.cc index 6afbcc0ee3..9deef60619 100644 --- a/deps/v8/src/elements.cc +++ b/deps/v8/src/elements.cc @@ -27,10 +27,11 @@ #include "v8.h" +#include "arguments.h" #include "objects.h" #include "elements.h" #include "utils.h" - +#include "v8conversions.h" // Each concrete ElementsAccessor can handle exactly one ElementsKind, // several abstract ElementsAccessor classes are used to allow sharing @@ -146,33 +147,36 @@ static Failure* ThrowArrayLengthRangeError(Heap* heap) { } -void CopyObjectToObjectElements(FixedArray* from, - ElementsKind from_kind, - uint32_t from_start, - FixedArray* to, - ElementsKind to_kind, - uint32_t to_start, - int raw_copy_size) { - ASSERT(to->map() != HEAP->fixed_cow_array_map()); +static void CopyObjectToObjectElements(FixedArrayBase* from_base, + ElementsKind from_kind, + uint32_t from_start, + FixedArrayBase* to_base, + ElementsKind to_kind, + uint32_t to_start, + int raw_copy_size) { + ASSERT(to_base->map() != HEAP->fixed_cow_array_map()); + AssertNoAllocation no_allocation; int copy_size = raw_copy_size; if (raw_copy_size < 0) { ASSERT(raw_copy_size == ElementsAccessor::kCopyToEnd || raw_copy_size == ElementsAccessor::kCopyToEndAndInitializeToHole); - copy_size = Min(from->length() - from_start, - to->length() - to_start); -#ifdef DEBUG - // FAST_*_ELEMENTS arrays cannot be uninitialized. Ensure they are already - // marked with the hole. + copy_size = Min(from_base->length() - from_start, + to_base->length() - to_start); if (raw_copy_size == ElementsAccessor::kCopyToEndAndInitializeToHole) { - for (int i = to_start + copy_size; i < to->length(); ++i) { - ASSERT(to->get(i)->IsTheHole()); + int start = to_start + copy_size; + int length = to_base->length() - start; + if (length > 0) { + Heap* heap = from_base->GetHeap(); + MemsetPointer(FixedArray::cast(to_base)->data_start() + start, + heap->the_hole_value(), length); } } -#endif } - ASSERT((copy_size + static_cast<int>(to_start)) <= to->length() && - (copy_size + static_cast<int>(from_start)) <= from->length()); + ASSERT((copy_size + static_cast<int>(to_start)) <= to_base->length() && + (copy_size + static_cast<int>(from_start)) <= from_base->length()); if (copy_size == 0) return; + FixedArray* from = FixedArray::cast(from_base); + FixedArray* to = FixedArray::cast(to_base); ASSERT(IsFastSmiOrObjectElementsKind(from_kind)); ASSERT(IsFastSmiOrObjectElementsKind(to_kind)); Address to_address = to->address() + FixedArray::kHeaderSize; @@ -193,31 +197,34 @@ void CopyObjectToObjectElements(FixedArray* from, } -static void CopyDictionaryToObjectElements(SeededNumberDictionary* from, +static void CopyDictionaryToObjectElements(FixedArrayBase* from_base, uint32_t from_start, - FixedArray* to, + FixedArrayBase* to_base, ElementsKind to_kind, uint32_t to_start, int raw_copy_size) { + SeededNumberDictionary* from = SeededNumberDictionary::cast(from_base); + AssertNoAllocation no_allocation; int copy_size = raw_copy_size; Heap* heap = from->GetHeap(); if (raw_copy_size < 0) { ASSERT(raw_copy_size == ElementsAccessor::kCopyToEnd || raw_copy_size == ElementsAccessor::kCopyToEndAndInitializeToHole); copy_size = from->max_number_key() + 1 - from_start; -#ifdef DEBUG - // Fast object arrays cannot be uninitialized. Ensure they are already - // marked with the hole. if (raw_copy_size == ElementsAccessor::kCopyToEndAndInitializeToHole) { - for (int i = to_start + copy_size; i < to->length(); ++i) { - ASSERT(to->get(i)->IsTheHole()); + int start = to_start + copy_size; + int length = to_base->length() - start; + if (length > 0) { + Heap* heap = from->GetHeap(); + MemsetPointer(FixedArray::cast(to_base)->data_start() + start, + heap->the_hole_value(), length); } } -#endif } - ASSERT(to != from); + ASSERT(to_base != from_base); ASSERT(IsFastSmiOrObjectElementsKind(to_kind)); if (copy_size == 0) return; + FixedArray* to = FixedArray::cast(to_base); uint32_t to_length = to->length(); if (to_start + copy_size > to_length) { copy_size = to_length - to_start; @@ -244,9 +251,9 @@ static void CopyDictionaryToObjectElements(SeededNumberDictionary* from, MUST_USE_RESULT static MaybeObject* CopyDoubleToObjectElements( - FixedDoubleArray* from, + FixedArrayBase* from_base, uint32_t from_start, - FixedArray* to, + FixedArrayBase* to_base, ElementsKind to_kind, uint32_t to_start, int raw_copy_size) { @@ -255,21 +262,26 @@ MUST_USE_RESULT static MaybeObject* CopyDoubleToObjectElements( if (raw_copy_size < 0) { ASSERT(raw_copy_size == ElementsAccessor::kCopyToEnd || raw_copy_size == ElementsAccessor::kCopyToEndAndInitializeToHole); - copy_size = Min(from->length() - from_start, - to->length() - to_start); -#ifdef DEBUG - // FAST_*_ELEMENTS arrays cannot be uninitialized. Ensure they are already - // marked with the hole. + copy_size = Min(from_base->length() - from_start, + to_base->length() - to_start); if (raw_copy_size == ElementsAccessor::kCopyToEndAndInitializeToHole) { - for (int i = to_start + copy_size; i < to->length(); ++i) { - ASSERT(to->get(i)->IsTheHole()); + // Also initialize the area that will be copied over since HeapNumber + // allocation below can cause an incremental marking step, requiring all + // existing heap objects to be propertly initialized. + int start = to_start; + int length = to_base->length() - start; + if (length > 0) { + Heap* heap = from_base->GetHeap(); + MemsetPointer(FixedArray::cast(to_base)->data_start() + start, + heap->the_hole_value(), length); } } -#endif } - ASSERT((copy_size + static_cast<int>(to_start)) <= to->length() && - (copy_size + static_cast<int>(from_start)) <= from->length()); - if (copy_size == 0) return from; + ASSERT((copy_size + static_cast<int>(to_start)) <= to_base->length() && + (copy_size + static_cast<int>(from_start)) <= from_base->length()); + if (copy_size == 0) return from_base; + FixedDoubleArray* from = FixedDoubleArray::cast(from_base); + FixedArray* to = FixedArray::cast(to_base); for (int i = 0; i < copy_size; ++i) { if (IsFastSmiElementsKind(to_kind)) { UNIMPLEMENTED(); @@ -298,26 +310,28 @@ MUST_USE_RESULT static MaybeObject* CopyDoubleToObjectElements( } -static void CopyDoubleToDoubleElements(FixedDoubleArray* from, +static void CopyDoubleToDoubleElements(FixedArrayBase* from_base, uint32_t from_start, - FixedDoubleArray* to, + FixedArrayBase* to_base, uint32_t to_start, int raw_copy_size) { int copy_size = raw_copy_size; if (raw_copy_size < 0) { ASSERT(raw_copy_size == ElementsAccessor::kCopyToEnd || raw_copy_size == ElementsAccessor::kCopyToEndAndInitializeToHole); - copy_size = Min(from->length() - from_start, - to->length() - to_start); + copy_size = Min(from_base->length() - from_start, + to_base->length() - to_start); if (raw_copy_size == ElementsAccessor::kCopyToEndAndInitializeToHole) { - for (int i = to_start + copy_size; i < to->length(); ++i) { - to->set_the_hole(i); + for (int i = to_start + copy_size; i < to_base->length(); ++i) { + FixedDoubleArray::cast(to_base)->set_the_hole(i); } } } - ASSERT((copy_size + static_cast<int>(to_start)) <= to->length() && - (copy_size + static_cast<int>(from_start)) <= from->length()); + ASSERT((copy_size + static_cast<int>(to_start)) <= to_base->length() && + (copy_size + static_cast<int>(from_start)) <= from_base->length()); if (copy_size == 0) return; + FixedDoubleArray* from = FixedDoubleArray::cast(from_base); + FixedDoubleArray* to = FixedDoubleArray::cast(to_base); Address to_address = to->address() + FixedDoubleArray::kHeaderSize; Address from_address = from->address() + FixedDoubleArray::kHeaderSize; to_address += kDoubleSize * to_start; @@ -329,25 +343,27 @@ static void CopyDoubleToDoubleElements(FixedDoubleArray* from, } -static void CopySmiToDoubleElements(FixedArray* from, +static void CopySmiToDoubleElements(FixedArrayBase* from_base, uint32_t from_start, - FixedDoubleArray* to, + FixedArrayBase* to_base, uint32_t to_start, int raw_copy_size) { int copy_size = raw_copy_size; if (raw_copy_size < 0) { ASSERT(raw_copy_size == ElementsAccessor::kCopyToEnd || raw_copy_size == ElementsAccessor::kCopyToEndAndInitializeToHole); - copy_size = from->length() - from_start; + copy_size = from_base->length() - from_start; if (raw_copy_size == ElementsAccessor::kCopyToEndAndInitializeToHole) { - for (int i = to_start + copy_size; i < to->length(); ++i) { - to->set_the_hole(i); + for (int i = to_start + copy_size; i < to_base->length(); ++i) { + FixedDoubleArray::cast(to_base)->set_the_hole(i); } } } - ASSERT((copy_size + static_cast<int>(to_start)) <= to->length() && - (copy_size + static_cast<int>(from_start)) <= from->length()); + ASSERT((copy_size + static_cast<int>(to_start)) <= to_base->length() && + (copy_size + static_cast<int>(from_start)) <= from_base->length()); if (copy_size == 0) return; + FixedArray* from = FixedArray::cast(from_base); + FixedDoubleArray* to = FixedDoubleArray::cast(to_base); Object* the_hole = from->GetHeap()->the_hole_value(); for (uint32_t from_end = from_start + static_cast<uint32_t>(copy_size); from_start < from_end; from_start++, to_start++) { @@ -361,9 +377,9 @@ static void CopySmiToDoubleElements(FixedArray* from, } -static void CopyPackedSmiToDoubleElements(FixedArray* from, +static void CopyPackedSmiToDoubleElements(FixedArrayBase* from_base, uint32_t from_start, - FixedDoubleArray* to, + FixedArrayBase* to_base, uint32_t to_start, int packed_size, int raw_copy_size) { @@ -372,52 +388,55 @@ static void CopyPackedSmiToDoubleElements(FixedArray* from, if (raw_copy_size < 0) { ASSERT(raw_copy_size == ElementsAccessor::kCopyToEnd || raw_copy_size == ElementsAccessor::kCopyToEndAndInitializeToHole); - copy_size = from->length() - from_start; + copy_size = packed_size - from_start; if (raw_copy_size == ElementsAccessor::kCopyToEndAndInitializeToHole) { - to_end = to->length(); + to_end = to_base->length(); + for (uint32_t i = to_start + copy_size; i < to_end; ++i) { + FixedDoubleArray::cast(to_base)->set_the_hole(i); + } } else { to_end = to_start + static_cast<uint32_t>(copy_size); } } else { to_end = to_start + static_cast<uint32_t>(copy_size); } - ASSERT(static_cast<int>(to_end) <= to->length()); + ASSERT(static_cast<int>(to_end) <= to_base->length()); ASSERT(packed_size >= 0 && packed_size <= copy_size); - ASSERT((copy_size + static_cast<int>(to_start)) <= to->length() && - (copy_size + static_cast<int>(from_start)) <= from->length()); + ASSERT((copy_size + static_cast<int>(to_start)) <= to_base->length() && + (copy_size + static_cast<int>(from_start)) <= from_base->length()); if (copy_size == 0) return; + FixedArray* from = FixedArray::cast(from_base); + FixedDoubleArray* to = FixedDoubleArray::cast(to_base); for (uint32_t from_end = from_start + static_cast<uint32_t>(packed_size); from_start < from_end; from_start++, to_start++) { Object* smi = from->get(from_start); ASSERT(!smi->IsTheHole()); to->set(to_start, Smi::cast(smi)->value()); } - - while (to_start < to_end) { - to->set_the_hole(to_start++); - } } -static void CopyObjectToDoubleElements(FixedArray* from, +static void CopyObjectToDoubleElements(FixedArrayBase* from_base, uint32_t from_start, - FixedDoubleArray* to, + FixedArrayBase* to_base, uint32_t to_start, int raw_copy_size) { int copy_size = raw_copy_size; if (raw_copy_size < 0) { ASSERT(raw_copy_size == ElementsAccessor::kCopyToEnd || raw_copy_size == ElementsAccessor::kCopyToEndAndInitializeToHole); - copy_size = from->length() - from_start; + copy_size = from_base->length() - from_start; if (raw_copy_size == ElementsAccessor::kCopyToEndAndInitializeToHole) { - for (int i = to_start + copy_size; i < to->length(); ++i) { - to->set_the_hole(i); + for (int i = to_start + copy_size; i < to_base->length(); ++i) { + FixedDoubleArray::cast(to_base)->set_the_hole(i); } } } - ASSERT((copy_size + static_cast<int>(to_start)) <= to->length() && - (copy_size + static_cast<int>(from_start)) <= from->length()); + ASSERT((copy_size + static_cast<int>(to_start)) <= to_base->length() && + (copy_size + static_cast<int>(from_start)) <= from_base->length()); if (copy_size == 0) return; + FixedArray* from = FixedArray::cast(from_base); + FixedDoubleArray* to = FixedDoubleArray::cast(to_base); Object* the_hole = from->GetHeap()->the_hole_value(); for (uint32_t from_end = from_start + copy_size; from_start < from_end; from_start++, to_start++) { @@ -431,23 +450,25 @@ static void CopyObjectToDoubleElements(FixedArray* from, } -static void CopyDictionaryToDoubleElements(SeededNumberDictionary* from, +static void CopyDictionaryToDoubleElements(FixedArrayBase* from_base, uint32_t from_start, - FixedDoubleArray* to, + FixedArrayBase* to_base, uint32_t to_start, int raw_copy_size) { + SeededNumberDictionary* from = SeededNumberDictionary::cast(from_base); int copy_size = raw_copy_size; if (copy_size < 0) { ASSERT(copy_size == ElementsAccessor::kCopyToEnd || copy_size == ElementsAccessor::kCopyToEndAndInitializeToHole); copy_size = from->max_number_key() + 1 - from_start; if (raw_copy_size == ElementsAccessor::kCopyToEndAndInitializeToHole) { - for (int i = to_start + copy_size; i < to->length(); ++i) { - to->set_the_hole(i); + for (int i = to_start + copy_size; i < to_base->length(); ++i) { + FixedDoubleArray::cast(to_base)->set_the_hole(i); } } } if (copy_size == 0) return; + FixedDoubleArray* to = FixedDoubleArray::cast(to_base); uint32_t to_length = to->length(); if (to_start + copy_size > to_length) { copy_size = to_length - to_start; @@ -463,6 +484,66 @@ static void CopyDictionaryToDoubleElements(SeededNumberDictionary* from, } +static void TraceTopFrame(Isolate* isolate) { + StackFrameIterator it(isolate); + if (it.done()) { + PrintF("unknown location (no JavaScript frames present)"); + return; + } + StackFrame* raw_frame = it.frame(); + if (raw_frame->is_internal()) { + Isolate* isolate = Isolate::Current(); + Code* apply_builtin = isolate->builtins()->builtin( + Builtins::kFunctionApply); + if (raw_frame->unchecked_code() == apply_builtin) { + PrintF("apply from "); + it.Advance(); + raw_frame = it.frame(); + } + } + JavaScriptFrame::PrintTop(isolate, stdout, false, true); +} + + +void CheckArrayAbuse(JSObject* obj, const char* op, uint32_t key, + bool allow_appending) { + Object* raw_length = NULL; + const char* elements_type = "array"; + if (obj->IsJSArray()) { + JSArray* array = JSArray::cast(obj); + raw_length = array->length(); + } else { + raw_length = Smi::FromInt(obj->elements()->length()); + elements_type = "object"; + } + + if (raw_length->IsNumber()) { + double n = raw_length->Number(); + if (FastI2D(FastD2UI(n)) == n) { + int32_t int32_length = DoubleToInt32(n); + uint32_t compare_length = static_cast<uint32_t>(int32_length); + if (allow_appending) compare_length++; + if (key >= compare_length) { + PrintF("[OOB %s %s (%s length = %d, element accessed = %d) in ", + elements_type, op, elements_type, + static_cast<int>(int32_length), + static_cast<int>(key)); + TraceTopFrame(obj->GetIsolate()); + PrintF("]\n"); + } + } else { + PrintF("[%s elements length not integer value in ", elements_type); + TraceTopFrame(obj->GetIsolate()); + PrintF("]\n"); + } + } else { + PrintF("[%s elements length not a number in ", elements_type); + TraceTopFrame(obj->GetIsolate()); + PrintF("]\n"); + } +} + + // Base class for element handler implementations. Contains the // the common logic for objects with different ElementsKinds. // Subclasses must specialize method for which the element @@ -503,8 +584,8 @@ class ElementsAccessorBase : public ElementsAccessor { Map* map = fixed_array_base->map(); // Arrays that have been shifted in place can't be verified. Heap* heap = holder->GetHeap(); - if (map == heap->raw_unchecked_one_pointer_filler_map() || - map == heap->raw_unchecked_two_pointer_filler_map() || + if (map == heap->one_pointer_filler_map() || + map == heap->two_pointer_filler_map() || map == heap->free_space_map()) { return; } @@ -527,10 +608,9 @@ class ElementsAccessorBase : public ElementsAccessor { static bool HasElementImpl(Object* receiver, JSObject* holder, uint32_t key, - BackingStore* backing_store) { - MaybeObject* element = - ElementsAccessorSubclass::GetImpl(receiver, holder, key, backing_store); - return !element->IsTheHole(); + FixedArrayBase* backing_store) { + return ElementsAccessorSubclass::GetAttributesImpl( + receiver, holder, key, backing_store) != ABSENT; } virtual bool HasElement(Object* receiver, @@ -541,7 +621,7 @@ class ElementsAccessorBase : public ElementsAccessor { backing_store = holder->elements(); } return ElementsAccessorSubclass::HasElementImpl( - receiver, holder, key, BackingStore::cast(backing_store)); + receiver, holder, key, backing_store); } MUST_USE_RESULT virtual MaybeObject* Get(Object* receiver, @@ -551,29 +631,107 @@ class ElementsAccessorBase : public ElementsAccessor { if (backing_store == NULL) { backing_store = holder->elements(); } + + if (!IsExternalArrayElementsKind(ElementsTraits::Kind) && + FLAG_trace_js_array_abuse) { + CheckArrayAbuse(holder, "elements read", key); + } + + if (IsExternalArrayElementsKind(ElementsTraits::Kind) && + FLAG_trace_external_array_abuse) { + CheckArrayAbuse(holder, "external elements read", key); + } + return ElementsAccessorSubclass::GetImpl( - receiver, holder, key, BackingStore::cast(backing_store)); + receiver, holder, key, backing_store); } MUST_USE_RESULT static MaybeObject* GetImpl(Object* receiver, JSObject* obj, uint32_t key, - BackingStore* backing_store) { + FixedArrayBase* backing_store) { return (key < ElementsAccessorSubclass::GetCapacityImpl(backing_store)) - ? backing_store->get(key) + ? BackingStore::cast(backing_store)->get(key) : backing_store->GetHeap()->the_hole_value(); } + MUST_USE_RESULT virtual PropertyAttributes GetAttributes( + Object* receiver, + JSObject* holder, + uint32_t key, + FixedArrayBase* backing_store) { + if (backing_store == NULL) { + backing_store = holder->elements(); + } + return ElementsAccessorSubclass::GetAttributesImpl( + receiver, holder, key, backing_store); + } + + MUST_USE_RESULT static PropertyAttributes GetAttributesImpl( + Object* receiver, + JSObject* obj, + uint32_t key, + FixedArrayBase* backing_store) { + if (key >= ElementsAccessorSubclass::GetCapacityImpl(backing_store)) { + return ABSENT; + } + return BackingStore::cast(backing_store)->is_the_hole(key) ? ABSENT : NONE; + } + + MUST_USE_RESULT virtual PropertyType GetType( + Object* receiver, + JSObject* holder, + uint32_t key, + FixedArrayBase* backing_store) { + if (backing_store == NULL) { + backing_store = holder->elements(); + } + return ElementsAccessorSubclass::GetTypeImpl( + receiver, holder, key, backing_store); + } + + MUST_USE_RESULT static PropertyType GetTypeImpl( + Object* receiver, + JSObject* obj, + uint32_t key, + FixedArrayBase* backing_store) { + if (key >= ElementsAccessorSubclass::GetCapacityImpl(backing_store)) { + return NONEXISTENT; + } + return BackingStore::cast(backing_store)->is_the_hole(key) + ? NONEXISTENT : FIELD; + } + + MUST_USE_RESULT virtual AccessorPair* GetAccessorPair( + Object* receiver, + JSObject* holder, + uint32_t key, + FixedArrayBase* backing_store) { + if (backing_store == NULL) { + backing_store = holder->elements(); + } + return ElementsAccessorSubclass::GetAccessorPairImpl( + receiver, holder, key, backing_store); + } + + MUST_USE_RESULT static AccessorPair* GetAccessorPairImpl( + Object* receiver, + JSObject* obj, + uint32_t key, + FixedArrayBase* backing_store) { + return NULL; + } + MUST_USE_RESULT virtual MaybeObject* SetLength(JSArray* array, Object* length) { return ElementsAccessorSubclass::SetLengthImpl( - array, length, BackingStore::cast(array->elements())); + array, length, array->elements()); } MUST_USE_RESULT static MaybeObject* SetLengthImpl( JSObject* obj, Object* length, - BackingStore* backing_store); + FixedArrayBase* backing_store); MUST_USE_RESULT virtual MaybeObject* SetCapacityAndLength( JSArray* array, @@ -600,7 +758,7 @@ class ElementsAccessorBase : public ElementsAccessor { MUST_USE_RESULT static MaybeObject* CopyElementsImpl(FixedArrayBase* from, uint32_t from_start, FixedArrayBase* to, - ElementsKind to_kind, + ElementsKind from_kind, uint32_t to_start, int packed_size, int copy_size) { @@ -610,8 +768,8 @@ class ElementsAccessorBase : public ElementsAccessor { MUST_USE_RESULT virtual MaybeObject* CopyElements(JSObject* from_holder, uint32_t from_start, + ElementsKind from_kind, FixedArrayBase* to, - ElementsKind to_kind, uint32_t to_start, int copy_size, FixedArrayBase* from) { @@ -621,8 +779,7 @@ class ElementsAccessorBase : public ElementsAccessor { } if (from_holder) { - ElementsKind elements_kind = from_holder->GetElementsKind(); - bool is_packed = IsFastPackedElementsKind(elements_kind) && + bool is_packed = IsFastPackedElementsKind(from_kind) && from_holder->IsJSArray(); if (is_packed) { packed_size = Smi::cast(JSArray::cast(from_holder)->length())->value(); @@ -631,11 +788,8 @@ class ElementsAccessorBase : public ElementsAccessor { } } } - if (from->length() == 0) { - return from; - } return ElementsAccessorSubclass::CopyElementsImpl( - from, from_start, to, to_kind, to_start, packed_size, copy_size); + from, from_start, to, from_kind, to_start, packed_size, copy_size); } MUST_USE_RESULT virtual MaybeObject* AddElementsToFixedArray( @@ -654,25 +808,22 @@ class ElementsAccessorBase : public ElementsAccessor { if (from == NULL) { from = holder->elements(); } - BackingStore* backing_store = BackingStore::cast(from); - uint32_t len1 = ElementsAccessorSubclass::GetCapacityImpl(backing_store); // Optimize if 'other' is empty. // We cannot optimize if 'this' is empty, as other may have holes. + uint32_t len1 = ElementsAccessorSubclass::GetCapacityImpl(from); if (len1 == 0) return to; // Compute how many elements are not in other. uint32_t extra = 0; for (uint32_t y = 0; y < len1; y++) { - uint32_t key = - ElementsAccessorSubclass::GetKeyForIndexImpl(backing_store, y); + uint32_t key = ElementsAccessorSubclass::GetKeyForIndexImpl(from, y); if (ElementsAccessorSubclass::HasElementImpl( - receiver, holder, key, backing_store)) { + receiver, holder, key, from)) { MaybeObject* maybe_value = - ElementsAccessorSubclass::GetImpl(receiver, holder, - key, backing_store); + ElementsAccessorSubclass::GetImpl(receiver, holder, key, from); Object* value; - if (!maybe_value->ToObject(&value)) return maybe_value; + if (!maybe_value->To(&value)) return maybe_value; ASSERT(!value->IsTheHole()); if (!HasKey(to, value)) { extra++; @@ -684,9 +835,8 @@ class ElementsAccessorBase : public ElementsAccessor { // Allocate the result FixedArray* result; - MaybeObject* maybe_obj = - backing_store->GetHeap()->AllocateFixedArray(len0 + extra); - if (!maybe_obj->To<FixedArray>(&result)) return maybe_obj; + MaybeObject* maybe_obj = from->GetHeap()->AllocateFixedArray(len0 + extra); + if (!maybe_obj->To(&result)) return maybe_obj; // Fill in the content { @@ -702,14 +852,13 @@ class ElementsAccessorBase : public ElementsAccessor { uint32_t index = 0; for (uint32_t y = 0; y < len1; y++) { uint32_t key = - ElementsAccessorSubclass::GetKeyForIndexImpl(backing_store, y); + ElementsAccessorSubclass::GetKeyForIndexImpl(from, y); if (ElementsAccessorSubclass::HasElementImpl( - receiver, holder, key, backing_store)) { + receiver, holder, key, from)) { MaybeObject* maybe_value = - ElementsAccessorSubclass::GetImpl(receiver, holder, - key, backing_store); + ElementsAccessorSubclass::GetImpl(receiver, holder, key, from); Object* value; - if (!maybe_value->ToObject(&value)) return maybe_value; + if (!maybe_value->To(&value)) return maybe_value; if (!value->IsTheHole() && !HasKey(to, value)) { result->set(len0 + index, value); index++; @@ -721,24 +870,22 @@ class ElementsAccessorBase : public ElementsAccessor { } protected: - static uint32_t GetCapacityImpl(BackingStore* backing_store) { + static uint32_t GetCapacityImpl(FixedArrayBase* backing_store) { return backing_store->length(); } virtual uint32_t GetCapacity(FixedArrayBase* backing_store) { - return ElementsAccessorSubclass::GetCapacityImpl( - BackingStore::cast(backing_store)); + return ElementsAccessorSubclass::GetCapacityImpl(backing_store); } - static uint32_t GetKeyForIndexImpl(BackingStore* backing_store, + static uint32_t GetKeyForIndexImpl(FixedArrayBase* backing_store, uint32_t index) { return index; } virtual uint32_t GetKeyForIndex(FixedArrayBase* backing_store, uint32_t index) { - return ElementsAccessorSubclass::GetKeyForIndexImpl( - BackingStore::cast(backing_store), index); + return ElementsAccessorSubclass::GetKeyForIndexImpl(backing_store, index); } private: @@ -764,17 +911,17 @@ class FastElementsAccessor // Adjusts the length of the fast backing store or returns the new length or // undefined in case conversion to a slow backing store should be performed. - static MaybeObject* SetLengthWithoutNormalize(BackingStore* backing_store, + static MaybeObject* SetLengthWithoutNormalize(FixedArrayBase* backing_store, JSArray* array, Object* length_object, uint32_t length) { uint32_t old_capacity = backing_store->length(); Object* old_length = array->length(); - bool same_size = old_length->IsSmi() && - static_cast<uint32_t>(Smi::cast(old_length)->value()) == length; + bool same_or_smaller_size = old_length->IsSmi() && + static_cast<uint32_t>(Smi::cast(old_length)->value()) >= length; ElementsKind kind = array->GetElementsKind(); - if (!same_size && IsFastElementsKind(kind) && + if (!same_or_smaller_size && IsFastElementsKind(kind) && !IsFastHoleyElementsKind(kind)) { kind = GetHoleyElementsKind(kind); MaybeObject* maybe_obj = array->TransitionElementsKind(kind); @@ -802,7 +949,7 @@ class FastElementsAccessor // Otherwise, fill the unused tail with holes. int old_length = FastD2IChecked(array->length()->Number()); for (int i = length; i < old_length; i++) { - backing_store->set_the_hole(i); + BackingStore::cast(backing_store)->set_the_hole(i); } } return length_object; @@ -829,32 +976,38 @@ class FastElementsAccessor ASSERT(obj->HasFastSmiOrObjectElements() || obj->HasFastDoubleElements() || obj->HasFastArgumentsElements()); - typename KindTraits::BackingStore* backing_store = - KindTraits::BackingStore::cast(obj->elements()); Heap* heap = obj->GetHeap(); - if (backing_store->map() == heap->non_strict_arguments_elements_map()) { - backing_store = - KindTraits::BackingStore::cast( - FixedArray::cast(backing_store)->get(1)); - } else { - ElementsKind kind = KindTraits::Kind; - if (IsFastPackedElementsKind(kind)) { - MaybeObject* transitioned = - obj->TransitionElementsKind(GetHoleyElementsKind(kind)); - if (transitioned->IsFailure()) return transitioned; - } - if (IsFastSmiOrObjectElementsKind(KindTraits::Kind)) { - Object* writable; - MaybeObject* maybe = obj->EnsureWritableFastElements(); - if (!maybe->ToObject(&writable)) return maybe; - backing_store = KindTraits::BackingStore::cast(writable); - } + Object* elements = obj->elements(); + if (elements == heap->empty_fixed_array()) { + return heap->true_value(); + } + typename KindTraits::BackingStore* backing_store = + KindTraits::BackingStore::cast(elements); + bool is_non_strict_arguments_elements_map = + backing_store->map() == heap->non_strict_arguments_elements_map(); + if (is_non_strict_arguments_elements_map) { + backing_store = KindTraits::BackingStore::cast( + FixedArray::cast(backing_store)->get(1)); } uint32_t length = static_cast<uint32_t>( obj->IsJSArray() ? Smi::cast(JSArray::cast(obj)->length())->value() : backing_store->length()); if (key < length) { + if (!is_non_strict_arguments_elements_map) { + ElementsKind kind = KindTraits::Kind; + if (IsFastPackedElementsKind(kind)) { + MaybeObject* transitioned = + obj->TransitionElementsKind(GetHoleyElementsKind(kind)); + if (transitioned->IsFailure()) return transitioned; + } + if (IsFastSmiOrObjectElementsKind(KindTraits::Kind)) { + Object* writable; + MaybeObject* maybe = obj->EnsureWritableFastElements(); + if (!maybe->ToObject(&writable)) return maybe; + backing_store = KindTraits::BackingStore::cast(writable); + } + } backing_store->set_the_hole(key); // If an old space backing store is larger than a certain size and // has too few used values, normalize it. @@ -890,11 +1043,11 @@ class FastElementsAccessor Object* receiver, JSObject* holder, uint32_t key, - typename KindTraits::BackingStore* backing_store) { + FixedArrayBase* backing_store) { if (key >= static_cast<uint32_t>(backing_store->length())) { return false; } - return !backing_store->is_the_hole(key); + return !BackingStore::cast(backing_store)->is_the_hole(key); } static void ValidateContents(JSObject* holder, int length) { @@ -921,6 +1074,41 @@ class FastElementsAccessor }; +static inline ElementsKind ElementsKindForArray(FixedArrayBase* array) { + switch (array->map()->instance_type()) { + case FIXED_ARRAY_TYPE: + if (array->IsDictionary()) { + return DICTIONARY_ELEMENTS; + } else { + return FAST_HOLEY_ELEMENTS; + } + case FIXED_DOUBLE_ARRAY_TYPE: + return FAST_HOLEY_DOUBLE_ELEMENTS; + case EXTERNAL_BYTE_ARRAY_TYPE: + return EXTERNAL_BYTE_ELEMENTS; + case EXTERNAL_UNSIGNED_BYTE_ARRAY_TYPE: + return EXTERNAL_UNSIGNED_BYTE_ELEMENTS; + case EXTERNAL_SHORT_ARRAY_TYPE: + return EXTERNAL_SHORT_ELEMENTS; + case EXTERNAL_UNSIGNED_SHORT_ARRAY_TYPE: + return EXTERNAL_UNSIGNED_SHORT_ELEMENTS; + case EXTERNAL_INT_ARRAY_TYPE: + return EXTERNAL_INT_ELEMENTS; + case EXTERNAL_UNSIGNED_INT_ARRAY_TYPE: + return EXTERNAL_UNSIGNED_INT_ELEMENTS; + case EXTERNAL_FLOAT_ARRAY_TYPE: + return EXTERNAL_FLOAT_ELEMENTS; + case EXTERNAL_DOUBLE_ARRAY_TYPE: + return EXTERNAL_DOUBLE_ELEMENTS; + case EXTERNAL_PIXEL_ARRAY_TYPE: + return EXTERNAL_PIXEL_ELEMENTS; + default: + UNREACHABLE(); + } + return FAST_HOLEY_ELEMENTS; +} + + template<typename FastElementsAccessorSubclass, typename KindTraits> class FastSmiOrObjectElementsAccessor @@ -936,36 +1124,49 @@ class FastSmiOrObjectElementsAccessor static MaybeObject* CopyElementsImpl(FixedArrayBase* from, uint32_t from_start, FixedArrayBase* to, - ElementsKind to_kind, + ElementsKind from_kind, uint32_t to_start, int packed_size, int copy_size) { - if (IsFastSmiOrObjectElementsKind(to_kind)) { - CopyObjectToObjectElements( - FixedArray::cast(from), KindTraits::Kind, from_start, - FixedArray::cast(to), to_kind, to_start, copy_size); - } else if (IsFastDoubleElementsKind(to_kind)) { - if (IsFastSmiElementsKind(KindTraits::Kind)) { - if (IsFastPackedElementsKind(KindTraits::Kind) && - packed_size != kPackedSizeNotKnown) { - CopyPackedSmiToDoubleElements( - FixedArray::cast(from), from_start, - FixedDoubleArray::cast(to), to_start, - packed_size, copy_size); - } else { - CopySmiToDoubleElements( - FixedArray::cast(from), from_start, - FixedDoubleArray::cast(to), to_start, copy_size); - } - } else { - CopyObjectToDoubleElements( - FixedArray::cast(from), from_start, - FixedDoubleArray::cast(to), to_start, copy_size); + ElementsKind to_kind = KindTraits::Kind; + switch (from_kind) { + case FAST_SMI_ELEMENTS: + case FAST_HOLEY_SMI_ELEMENTS: + case FAST_ELEMENTS: + case FAST_HOLEY_ELEMENTS: + CopyObjectToObjectElements( + from, from_kind, from_start, to, to_kind, to_start, copy_size); + return to->GetHeap()->undefined_value(); + case FAST_DOUBLE_ELEMENTS: + case FAST_HOLEY_DOUBLE_ELEMENTS: + return CopyDoubleToObjectElements( + from, from_start, to, to_kind, to_start, copy_size); + case DICTIONARY_ELEMENTS: + CopyDictionaryToObjectElements( + from, from_start, to, to_kind, to_start, copy_size); + return to->GetHeap()->undefined_value(); + case NON_STRICT_ARGUMENTS_ELEMENTS: { + // TODO(verwaest): This is a temporary hack to support extending + // NON_STRICT_ARGUMENTS_ELEMENTS in SetFastElementsCapacityAndLength. + // This case should be UNREACHABLE(). + FixedArray* parameter_map = FixedArray::cast(from); + FixedArrayBase* arguments = FixedArrayBase::cast(parameter_map->get(1)); + ElementsKind from_kind = ElementsKindForArray(arguments); + return CopyElementsImpl(arguments, from_start, to, from_kind, + to_start, packed_size, copy_size); } - } else { - UNREACHABLE(); + case EXTERNAL_BYTE_ELEMENTS: + case EXTERNAL_UNSIGNED_BYTE_ELEMENTS: + case EXTERNAL_SHORT_ELEMENTS: + case EXTERNAL_UNSIGNED_SHORT_ELEMENTS: + case EXTERNAL_INT_ELEMENTS: + case EXTERNAL_UNSIGNED_INT_ELEMENTS: + case EXTERNAL_FLOAT_ELEMENTS: + case EXTERNAL_DOUBLE_ELEMENTS: + case EXTERNAL_PIXEL_ELEMENTS: + UNREACHABLE(); } - return to->GetHeap()->undefined_value(); + return NULL; } @@ -1054,25 +1255,40 @@ class FastDoubleElementsAccessor static MaybeObject* CopyElementsImpl(FixedArrayBase* from, uint32_t from_start, FixedArrayBase* to, - ElementsKind to_kind, + ElementsKind from_kind, uint32_t to_start, int packed_size, int copy_size) { - switch (to_kind) { + switch (from_kind) { case FAST_SMI_ELEMENTS: - case FAST_ELEMENTS: + CopyPackedSmiToDoubleElements( + from, from_start, to, to_start, packed_size, copy_size); + break; case FAST_HOLEY_SMI_ELEMENTS: - case FAST_HOLEY_ELEMENTS: - return CopyDoubleToObjectElements( - FixedDoubleArray::cast(from), from_start, FixedArray::cast(to), - to_kind, to_start, copy_size); + CopySmiToDoubleElements(from, from_start, to, to_start, copy_size); + break; case FAST_DOUBLE_ELEMENTS: case FAST_HOLEY_DOUBLE_ELEMENTS: - CopyDoubleToDoubleElements(FixedDoubleArray::cast(from), from_start, - FixedDoubleArray::cast(to), - to_start, copy_size); - return from; - default: + CopyDoubleToDoubleElements(from, from_start, to, to_start, copy_size); + break; + case FAST_ELEMENTS: + case FAST_HOLEY_ELEMENTS: + CopyObjectToDoubleElements(from, from_start, to, to_start, copy_size); + break; + case DICTIONARY_ELEMENTS: + CopyDictionaryToDoubleElements( + from, from_start, to, to_start, copy_size); + break; + case NON_STRICT_ARGUMENTS_ELEMENTS: + case EXTERNAL_BYTE_ELEMENTS: + case EXTERNAL_UNSIGNED_BYTE_ELEMENTS: + case EXTERNAL_SHORT_ELEMENTS: + case EXTERNAL_UNSIGNED_SHORT_ELEMENTS: + case EXTERNAL_INT_ELEMENTS: + case EXTERNAL_UNSIGNED_INT_ELEMENTS: + case EXTERNAL_FLOAT_ELEMENTS: + case EXTERNAL_DOUBLE_ELEMENTS: + case EXTERNAL_PIXEL_ELEMENTS: UNREACHABLE(); } return to->GetHeap()->undefined_value(); @@ -1129,17 +1345,37 @@ class ExternalElementsAccessor MUST_USE_RESULT static MaybeObject* GetImpl(Object* receiver, JSObject* obj, uint32_t key, - BackingStore* backing_store) { + FixedArrayBase* backing_store) { return key < ExternalElementsAccessorSubclass::GetCapacityImpl(backing_store) - ? backing_store->get(key) + ? BackingStore::cast(backing_store)->get(key) : backing_store->GetHeap()->undefined_value(); } + MUST_USE_RESULT static PropertyAttributes GetAttributesImpl( + Object* receiver, + JSObject* obj, + uint32_t key, + FixedArrayBase* backing_store) { + return + key < ExternalElementsAccessorSubclass::GetCapacityImpl(backing_store) + ? NONE : ABSENT; + } + + MUST_USE_RESULT static PropertyType GetTypeImpl( + Object* receiver, + JSObject* obj, + uint32_t key, + FixedArrayBase* backing_store) { + return + key < ExternalElementsAccessorSubclass::GetCapacityImpl(backing_store) + ? FIELD : NONEXISTENT; + } + MUST_USE_RESULT static MaybeObject* SetLengthImpl( JSObject* obj, Object* length, - BackingStore* backing_store) { + FixedArrayBase* backing_store) { // External arrays do not support changing their length. UNREACHABLE(); return obj; @@ -1155,7 +1391,7 @@ class ExternalElementsAccessor static bool HasElementImpl(Object* receiver, JSObject* holder, uint32_t key, - BackingStore* backing_store) { + FixedArrayBase* backing_store) { uint32_t capacity = ExternalElementsAccessorSubclass::GetCapacityImpl(backing_store); return key < capacity; @@ -1264,10 +1500,11 @@ class DictionaryElementsAccessor // Adjusts the length of the dictionary backing store and returns the new // length according to ES5 section 15.4.5.2 behavior. MUST_USE_RESULT static MaybeObject* SetLengthWithoutNormalize( - SeededNumberDictionary* dict, + FixedArrayBase* store, JSArray* array, Object* length_object, uint32_t length) { + SeededNumberDictionary* dict = SeededNumberDictionary::cast(store); Heap* heap = array->GetHeap(); int capacity = dict->Capacity(); uint32_t new_length = length; @@ -1340,7 +1577,7 @@ class DictionaryElementsAccessor if (mode == JSObject::STRICT_DELETION) { // Deleting a non-configurable property in strict mode. HandleScope scope(isolate); - Handle<Object> holder(obj); + Handle<Object> holder(obj, isolate); Handle<Object> name = isolate->factory()->NewNumberFromUint(key); Handle<Object> args[2] = { name, holder }; Handle<Object> error = @@ -1367,29 +1604,12 @@ class DictionaryElementsAccessor MUST_USE_RESULT static MaybeObject* CopyElementsImpl(FixedArrayBase* from, uint32_t from_start, FixedArrayBase* to, - ElementsKind to_kind, + ElementsKind from_kind, uint32_t to_start, int packed_size, int copy_size) { - switch (to_kind) { - case FAST_SMI_ELEMENTS: - case FAST_ELEMENTS: - case FAST_HOLEY_SMI_ELEMENTS: - case FAST_HOLEY_ELEMENTS: - CopyDictionaryToObjectElements( - SeededNumberDictionary::cast(from), from_start, - FixedArray::cast(to), to_kind, to_start, copy_size); - return from; - case FAST_DOUBLE_ELEMENTS: - case FAST_HOLEY_DOUBLE_ELEMENTS: - CopyDictionaryToDoubleElements( - SeededNumberDictionary::cast(from), from_start, - FixedDoubleArray::cast(to), to_start, copy_size); - return from; - default: - UNREACHABLE(); - } - return to->GetHeap()->undefined_value(); + UNREACHABLE(); + return NULL; } @@ -1407,7 +1627,8 @@ class DictionaryElementsAccessor Object* receiver, JSObject* obj, uint32_t key, - SeededNumberDictionary* backing_store) { + FixedArrayBase* store) { + SeededNumberDictionary* backing_store = SeededNumberDictionary::cast(store); int entry = backing_store->FindEntry(key); if (entry != SeededNumberDictionary::kNotFound) { Object* element = backing_store->ValueAt(entry); @@ -1424,16 +1645,59 @@ class DictionaryElementsAccessor return obj->GetHeap()->the_hole_value(); } + MUST_USE_RESULT static PropertyAttributes GetAttributesImpl( + Object* receiver, + JSObject* obj, + uint32_t key, + FixedArrayBase* backing_store) { + SeededNumberDictionary* dictionary = + SeededNumberDictionary::cast(backing_store); + int entry = dictionary->FindEntry(key); + if (entry != SeededNumberDictionary::kNotFound) { + return dictionary->DetailsAt(entry).attributes(); + } + return ABSENT; + } + + MUST_USE_RESULT static PropertyType GetTypeImpl( + Object* receiver, + JSObject* obj, + uint32_t key, + FixedArrayBase* store) { + SeededNumberDictionary* backing_store = SeededNumberDictionary::cast(store); + int entry = backing_store->FindEntry(key); + if (entry != SeededNumberDictionary::kNotFound) { + return backing_store->DetailsAt(entry).type(); + } + return NONEXISTENT; + } + + MUST_USE_RESULT static AccessorPair* GetAccessorPairImpl( + Object* receiver, + JSObject* obj, + uint32_t key, + FixedArrayBase* store) { + SeededNumberDictionary* backing_store = SeededNumberDictionary::cast(store); + int entry = backing_store->FindEntry(key); + if (entry != SeededNumberDictionary::kNotFound && + backing_store->DetailsAt(entry).type() == CALLBACKS && + backing_store->ValueAt(entry)->IsAccessorPair()) { + return AccessorPair::cast(backing_store->ValueAt(entry)); + } + return NULL; + } + static bool HasElementImpl(Object* receiver, JSObject* holder, uint32_t key, - SeededNumberDictionary* backing_store) { - return backing_store->FindEntry(key) != + FixedArrayBase* backing_store) { + return SeededNumberDictionary::cast(backing_store)->FindEntry(key) != SeededNumberDictionary::kNotFound; } - static uint32_t GetKeyForIndexImpl(SeededNumberDictionary* dict, + static uint32_t GetKeyForIndexImpl(FixedArrayBase* store, uint32_t index) { + SeededNumberDictionary* dict = SeededNumberDictionary::cast(store); Object* key = dict->KeyAt(index); return Smi::cast(key)->value(); } @@ -1456,7 +1720,8 @@ class NonStrictArgumentsElementsAccessor : public ElementsAccessorBase< MUST_USE_RESULT static MaybeObject* GetImpl(Object* receiver, JSObject* obj, uint32_t key, - FixedArray* parameter_map) { + FixedArrayBase* parameters) { + FixedArray* parameter_map = FixedArray::cast(parameters); Object* probe = GetParameterMapArg(obj, parameter_map, key); if (!probe->IsTheHole()) { Context* context = Context::cast(parameter_map->get(0)); @@ -1483,10 +1748,61 @@ class NonStrictArgumentsElementsAccessor : public ElementsAccessorBase< } } + MUST_USE_RESULT static PropertyAttributes GetAttributesImpl( + Object* receiver, + JSObject* obj, + uint32_t key, + FixedArrayBase* backing_store) { + FixedArray* parameter_map = FixedArray::cast(backing_store); + Object* probe = GetParameterMapArg(obj, parameter_map, key); + if (!probe->IsTheHole()) { + return NONE; + } else { + // If not aliased, check the arguments. + FixedArray* arguments = FixedArray::cast(parameter_map->get(1)); + return ElementsAccessor::ForArray(arguments)->GetAttributes( + receiver, obj, key, arguments); + } + } + + MUST_USE_RESULT static PropertyType GetTypeImpl( + Object* receiver, + JSObject* obj, + uint32_t key, + FixedArrayBase* parameters) { + FixedArray* parameter_map = FixedArray::cast(parameters); + Object* probe = GetParameterMapArg(obj, parameter_map, key); + if (!probe->IsTheHole()) { + return FIELD; + } else { + // If not aliased, check the arguments. + FixedArray* arguments = FixedArray::cast(parameter_map->get(1)); + return ElementsAccessor::ForArray(arguments)->GetType( + receiver, obj, key, arguments); + } + } + + MUST_USE_RESULT static AccessorPair* GetAccessorPairImpl( + Object* receiver, + JSObject* obj, + uint32_t key, + FixedArrayBase* parameters) { + FixedArray* parameter_map = FixedArray::cast(parameters); + Object* probe = GetParameterMapArg(obj, parameter_map, key); + if (!probe->IsTheHole()) { + return NULL; + } else { + // If not aliased, check the arguments. + FixedArray* arguments = FixedArray::cast(parameter_map->get(1)); + return ElementsAccessor::ForArray(arguments)->GetAccessorPair( + receiver, obj, key, arguments); + } + } + MUST_USE_RESULT static MaybeObject* SetLengthImpl( JSObject* obj, Object* length, - FixedArray* parameter_map) { + FixedArrayBase* parameter_map) { // TODO(mstarzinger): This was never implemented but will be used once we // correctly implement [[DefineOwnProperty]] on arrays. UNIMPLEMENTED(); @@ -1520,24 +1836,22 @@ class NonStrictArgumentsElementsAccessor : public ElementsAccessorBase< MUST_USE_RESULT static MaybeObject* CopyElementsImpl(FixedArrayBase* from, uint32_t from_start, FixedArrayBase* to, - ElementsKind to_kind, + ElementsKind from_kind, uint32_t to_start, int packed_size, int copy_size) { - FixedArray* parameter_map = FixedArray::cast(from); - FixedArray* arguments = FixedArray::cast(parameter_map->get(1)); - ElementsAccessor* accessor = ElementsAccessor::ForArray(arguments); - return accessor->CopyElements(NULL, from_start, to, to_kind, - to_start, copy_size, arguments); + UNREACHABLE(); + return NULL; } - static uint32_t GetCapacityImpl(FixedArray* parameter_map) { + static uint32_t GetCapacityImpl(FixedArrayBase* backing_store) { + FixedArray* parameter_map = FixedArray::cast(backing_store); FixedArrayBase* arguments = FixedArrayBase::cast(parameter_map->get(1)); return Max(static_cast<uint32_t>(parameter_map->length() - 2), ForArray(arguments)->GetCapacity(arguments)); } - static uint32_t GetKeyForIndexImpl(FixedArray* dict, + static uint32_t GetKeyForIndexImpl(FixedArrayBase* dict, uint32_t index) { return index; } @@ -1545,12 +1859,14 @@ class NonStrictArgumentsElementsAccessor : public ElementsAccessorBase< static bool HasElementImpl(Object* receiver, JSObject* holder, uint32_t key, - FixedArray* parameter_map) { + FixedArrayBase* parameters) { + FixedArray* parameter_map = FixedArray::cast(parameters); Object* probe = GetParameterMapArg(holder, parameter_map, key); if (!probe->IsTheHole()) { return true; } else { - FixedArrayBase* arguments = FixedArrayBase::cast(parameter_map->get(1)); + FixedArrayBase* arguments = + FixedArrayBase::cast(FixedArray::cast(parameter_map)->get(1)); ElementsAccessor* accessor = ElementsAccessor::ForArray(arguments); return !accessor->Get(receiver, holder, key, arguments)->IsTheHole(); } @@ -1563,7 +1879,7 @@ class NonStrictArgumentsElementsAccessor : public ElementsAccessorBase< uint32_t length = holder->IsJSArray() ? Smi::cast(JSArray::cast(holder)->length())->value() : parameter_map->length(); - return key < (length - 2 ) + return key < (length - 2) ? parameter_map->get(key + 2) : parameter_map->GetHeap()->the_hole_value(); } @@ -1571,35 +1887,7 @@ class NonStrictArgumentsElementsAccessor : public ElementsAccessorBase< ElementsAccessor* ElementsAccessor::ForArray(FixedArrayBase* array) { - switch (array->map()->instance_type()) { - case FIXED_ARRAY_TYPE: - if (array->IsDictionary()) { - return elements_accessors_[DICTIONARY_ELEMENTS]; - } else { - return elements_accessors_[FAST_HOLEY_ELEMENTS]; - } - case EXTERNAL_BYTE_ARRAY_TYPE: - return elements_accessors_[EXTERNAL_BYTE_ELEMENTS]; - case EXTERNAL_UNSIGNED_BYTE_ARRAY_TYPE: - return elements_accessors_[EXTERNAL_UNSIGNED_BYTE_ELEMENTS]; - case EXTERNAL_SHORT_ARRAY_TYPE: - return elements_accessors_[EXTERNAL_SHORT_ELEMENTS]; - case EXTERNAL_UNSIGNED_SHORT_ARRAY_TYPE: - return elements_accessors_[EXTERNAL_UNSIGNED_SHORT_ELEMENTS]; - case EXTERNAL_INT_ARRAY_TYPE: - return elements_accessors_[EXTERNAL_INT_ELEMENTS]; - case EXTERNAL_UNSIGNED_INT_ARRAY_TYPE: - return elements_accessors_[EXTERNAL_UNSIGNED_INT_ELEMENTS]; - case EXTERNAL_FLOAT_ARRAY_TYPE: - return elements_accessors_[EXTERNAL_FLOAT_ELEMENTS]; - case EXTERNAL_DOUBLE_ARRAY_TYPE: - return elements_accessors_[EXTERNAL_DOUBLE_ELEMENTS]; - case EXTERNAL_PIXEL_ARRAY_TYPE: - return elements_accessors_[EXTERNAL_PIXEL_ELEMENTS]; - default: - UNREACHABLE(); - return NULL; - } + return elements_accessors_[ElementsKindForArray(array)]; } @@ -1630,7 +1918,7 @@ MUST_USE_RESULT MaybeObject* ElementsAccessorBase<ElementsAccessorSubclass, ElementsKindTraits>:: SetLengthImpl(JSObject* obj, Object* length, - typename ElementsKindTraits::BackingStore* backing_store) { + FixedArrayBase* backing_store) { JSArray* array = JSArray::cast(obj); // Fast case: The new length fits into a Smi. @@ -1686,4 +1974,100 @@ MUST_USE_RESULT MaybeObject* ElementsAccessorBase<ElementsAccessorSubclass, } +MUST_USE_RESULT MaybeObject* ArrayConstructInitializeElements( + JSArray* array, Arguments* args) { + Heap* heap = array->GetIsolate()->heap(); + + // Optimize the case where there is one argument and the argument is a + // small smi. + if (args->length() == 1) { + Object* obj = (*args)[0]; + if (obj->IsSmi()) { + int len = Smi::cast(obj)->value(); + if (len > 0 && len < JSObject::kInitialMaxFastElementArray) { + ElementsKind elements_kind = array->GetElementsKind(); + MaybeObject* maybe_array = array->Initialize(len, len); + if (maybe_array->IsFailure()) return maybe_array; + + if (!IsFastHoleyElementsKind(elements_kind)) { + elements_kind = GetHoleyElementsKind(elements_kind); + maybe_array = array->TransitionElementsKind(elements_kind); + if (maybe_array->IsFailure()) return maybe_array; + } + + return array; + } else if (len == 0) { + return array->Initialize(JSArray::kPreallocatedArrayElements); + } + } + + // Take the argument as the length. + MaybeObject* maybe_obj = array->Initialize(0); + if (!maybe_obj->To(&obj)) return maybe_obj; + + return array->SetElementsLength((*args)[0]); + } + + // Optimize the case where there are no parameters passed. + if (args->length() == 0) { + return array->Initialize(JSArray::kPreallocatedArrayElements); + } + + // Set length and elements on the array. + int number_of_elements = args->length(); + MaybeObject* maybe_object = + array->EnsureCanContainElements(args, 0, number_of_elements, + ALLOW_CONVERTED_DOUBLE_ELEMENTS); + if (maybe_object->IsFailure()) return maybe_object; + + // Allocate an appropriately typed elements array. + MaybeObject* maybe_elms; + ElementsKind elements_kind = array->GetElementsKind(); + if (IsFastDoubleElementsKind(elements_kind)) { + maybe_elms = heap->AllocateUninitializedFixedDoubleArray( + number_of_elements); + } else { + maybe_elms = heap->AllocateFixedArrayWithHoles(number_of_elements); + } + FixedArrayBase* elms; + if (!maybe_elms->To(&elms)) return maybe_elms; + + // Fill in the content + switch (array->GetElementsKind()) { + case FAST_HOLEY_SMI_ELEMENTS: + case FAST_SMI_ELEMENTS: { + FixedArray* smi_elms = FixedArray::cast(elms); + for (int index = 0; index < number_of_elements; index++) { + smi_elms->set(index, (*args)[index], SKIP_WRITE_BARRIER); + } + break; + } + case FAST_HOLEY_ELEMENTS: + case FAST_ELEMENTS: { + AssertNoAllocation no_gc; + WriteBarrierMode mode = elms->GetWriteBarrierMode(no_gc); + FixedArray* object_elms = FixedArray::cast(elms); + for (int index = 0; index < number_of_elements; index++) { + object_elms->set(index, (*args)[index], mode); + } + break; + } + case FAST_HOLEY_DOUBLE_ELEMENTS: + case FAST_DOUBLE_ELEMENTS: { + FixedDoubleArray* double_elms = FixedDoubleArray::cast(elms); + for (int index = 0; index < number_of_elements; index++) { + double_elms->set(index, (*args)[index]->Number()); + } + break; + } + default: + UNREACHABLE(); + break; + } + + array->set_elements(elms); + array->set_length(Smi::FromInt(number_of_elements)); + return array; +} + } } // namespace v8::internal diff --git a/deps/v8/src/elements.h b/deps/v8/src/elements.h index 822fca50ee..6353aaecf5 100644 --- a/deps/v8/src/elements.h +++ b/deps/v8/src/elements.h @@ -71,6 +71,39 @@ class ElementsAccessor { uint32_t key, FixedArrayBase* backing_store = NULL) = 0; + // Returns an element's attributes, or ABSENT if there is no such + // element. This method doesn't iterate up the prototype chain. The caller + // can optionally pass in the backing store to use for the check, which must + // be compatible with the ElementsKind of the ElementsAccessor. If + // backing_store is NULL, the holder->elements() is used as the backing store. + MUST_USE_RESULT virtual PropertyAttributes GetAttributes( + Object* receiver, + JSObject* holder, + uint32_t key, + FixedArrayBase* backing_store = NULL) = 0; + + // Returns an element's type, or NONEXISTENT if there is no such + // element. This method doesn't iterate up the prototype chain. The caller + // can optionally pass in the backing store to use for the check, which must + // be compatible with the ElementsKind of the ElementsAccessor. If + // backing_store is NULL, the holder->elements() is used as the backing store. + MUST_USE_RESULT virtual PropertyType GetType( + Object* receiver, + JSObject* holder, + uint32_t key, + FixedArrayBase* backing_store = NULL) = 0; + + // Returns an element's accessors, or NULL if the element does not exist or + // is plain. This method doesn't iterate up the prototype chain. The caller + // can optionally pass in the backing store to use for the check, which must + // be compatible with the ElementsKind of the ElementsAccessor. If + // backing_store is NULL, the holder->elements() is used as the backing store. + MUST_USE_RESULT virtual AccessorPair* GetAccessorPair( + Object* receiver, + JSObject* holder, + uint32_t key, + FixedArrayBase* backing_store = NULL) = 0; + // Modifies the length data property as specified for JSArrays and resizes the // underlying backing store accordingly. The method honors the semantics of // changing array sizes as defined in EcmaScript 5.1 15.4.5.2, i.e. array that @@ -110,17 +143,17 @@ class ElementsAccessor { MUST_USE_RESULT virtual MaybeObject* CopyElements( JSObject* source_holder, uint32_t source_start, + ElementsKind source_kind, FixedArrayBase* destination, - ElementsKind destination_kind, uint32_t destination_start, int copy_size, FixedArrayBase* source = NULL) = 0; MUST_USE_RESULT MaybeObject* CopyElements(JSObject* from_holder, FixedArrayBase* to, - ElementsKind to_kind, + ElementsKind from_kind, FixedArrayBase* from = NULL) { - return CopyElements(from_holder, 0, to, to_kind, 0, + return CopyElements(from_holder, 0, from_kind, to, 0, kCopyToEndAndInitializeToHole, from); } @@ -164,15 +197,11 @@ class ElementsAccessor { DISALLOW_COPY_AND_ASSIGN(ElementsAccessor); }; +void CheckArrayAbuse(JSObject* obj, const char* op, uint32_t key, + bool allow_appending = false); -void CopyObjectToObjectElements(FixedArray* from_obj, - ElementsKind from_kind, - uint32_t from_start, - FixedArray* to_obj, - ElementsKind to_kind, - uint32_t to_start, - int copy_size); - +MUST_USE_RESULT MaybeObject* ArrayConstructInitializeElements( + JSArray* array, Arguments* args); } } // namespace v8::internal diff --git a/deps/v8/src/execution.cc b/deps/v8/src/execution.cc index 89091ba429..dee3112682 100644 --- a/deps/v8/src/execution.cc +++ b/deps/v8/src/execution.cc @@ -106,7 +106,7 @@ static Handle<Object> Invoke(bool is_construct, // Save and restore context around invocation and block the // allocation of handles without explicit handle scopes. SaveContext save(isolate); - NoHandleAllocation na; + NoHandleAllocation na(isolate); JSEntryFunction stub_entry = FUNCTION_CAST<JSEntryFunction>(code->entry()); // Call the function through the right JS entry stub. @@ -124,10 +124,10 @@ static Handle<Object> Invoke(bool is_construct, // Update the pending exception flag and return the value. *has_pending_exception = value->IsException(); - ASSERT(*has_pending_exception == Isolate::Current()->has_pending_exception()); + ASSERT(*has_pending_exception == isolate->has_pending_exception()); if (*has_pending_exception) { isolate->ReportPendingMessages(); - if (isolate->pending_exception() == Failure::OutOfMemoryException()) { + if (isolate->pending_exception()->IsOutOfMemory()) { if (!isolate->ignore_out_of_memory()) { V8::FatalProcessOutOfMemory("JS", true); } @@ -169,7 +169,9 @@ Handle<Object> Execution::Call(Handle<Object> callable, // Under some circumstances, 'global' can be the JSBuiltinsObject // In that case, don't rewrite. (FWIW, the same holds for // GetIsolate()->global_object()->global_receiver().) - if (!global->IsJSBuiltinsObject()) receiver = Handle<Object>(global); + if (!global->IsJSBuiltinsObject()) { + receiver = Handle<Object>(global, func->GetIsolate()); + } } else { receiver = ToObject(receiver, pending_exception); } @@ -184,7 +186,7 @@ Handle<Object> Execution::New(Handle<JSFunction> func, int argc, Handle<Object> argv[], bool* pending_exception) { - return Invoke(true, func, Isolate::Current()->global_object(), argc, argv, + return Invoke(true, func, func->GetIsolate()->global_object(), argc, argv, pending_exception); } @@ -206,11 +208,14 @@ Handle<Object> Execution::TryCall(Handle<JSFunction> func, Handle<Object> result = Invoke(false, func, receiver, argc, args, caught_exception); + Isolate* isolate = func->GetIsolate(); if (*caught_exception) { ASSERT(catcher.HasCaught()); - Isolate* isolate = Isolate::Current(); ASSERT(isolate->has_pending_exception()); ASSERT(isolate->external_caught_exception()); + if (isolate->is_out_of_memory() && !isolate->ignore_out_of_memory()) { + V8::FatalProcessOutOfMemory("OOM during Execution::TryCall"); + } if (isolate->pending_exception() == isolate->heap()->termination_exception()) { result = isolate->factory()->termination_exception(); @@ -220,8 +225,8 @@ Handle<Object> Execution::TryCall(Handle<JSFunction> func, isolate->OptionalRescheduleException(true); } - ASSERT(!Isolate::Current()->has_pending_exception()); - ASSERT(!Isolate::Current()->external_caught_exception()); + ASSERT(!isolate->has_pending_exception()); + ASSERT(!isolate->external_caught_exception()); return result; } @@ -239,7 +244,7 @@ Handle<Object> Execution::GetFunctionDelegate(Handle<Object> object) { while (fun->IsJSFunctionProxy()) { fun = JSFunctionProxy::cast(fun)->call_trap(); } - if (fun->IsJSFunction()) return Handle<Object>(fun); + if (fun->IsJSFunction()) return Handle<Object>(fun, isolate); // Objects created through the API can have an instance-call handler // that should be used when calling the object as a function. @@ -263,7 +268,7 @@ Handle<Object> Execution::TryGetFunctionDelegate(Handle<Object> object, while (fun->IsJSFunctionProxy()) { fun = JSFunctionProxy::cast(fun)->call_trap(); } - if (fun->IsJSFunction()) return Handle<Object>(fun); + if (fun->IsJSFunction()) return Handle<Object>(fun, isolate); // Objects created through the API can have an instance-call handler // that should be used when calling the object as a function. @@ -296,7 +301,7 @@ Handle<Object> Execution::GetConstructorDelegate(Handle<Object> object) { while (fun->IsJSFunctionProxy()) { fun = JSFunctionProxy::cast(fun)->call_trap(); } - if (fun->IsJSFunction()) return Handle<Object>(fun); + if (fun->IsJSFunction()) return Handle<Object>(fun, isolate); // Objects created through the API can have an instance-call handler // that should be used when calling the object as a function. @@ -324,7 +329,7 @@ Handle<Object> Execution::TryGetConstructorDelegate( while (fun->IsJSFunctionProxy()) { fun = JSFunctionProxy::cast(fun)->call_trap(); } - if (fun->IsJSFunction()) return Handle<Object>(fun); + if (fun->IsJSFunction()) return Handle<Object>(fun, isolate); // Objects created through the API can have an instance-call handler // that should be used when calling the object as a function. @@ -427,44 +432,6 @@ void StackGuard::TerminateExecution() { } -bool StackGuard::IsRuntimeProfilerTick() { - ExecutionAccess access(isolate_); - return (thread_local_.interrupt_flags_ & RUNTIME_PROFILER_TICK) != 0; -} - - -void StackGuard::RequestRuntimeProfilerTick() { - // Ignore calls if we're not optimizing or if we can't get the lock. - if (FLAG_opt && ExecutionAccess::TryLock(isolate_)) { - thread_local_.interrupt_flags_ |= RUNTIME_PROFILER_TICK; - if (thread_local_.postpone_interrupts_nesting_ == 0) { - thread_local_.jslimit_ = thread_local_.climit_ = kInterruptLimit; - isolate_->heap()->SetStackLimits(); - } - ExecutionAccess::Unlock(isolate_); - } -} - - -void StackGuard::RequestCodeReadyEvent() { - ASSERT(FLAG_parallel_recompilation); - if (ExecutionAccess::TryLock(isolate_)) { - thread_local_.interrupt_flags_ |= CODE_READY; - if (thread_local_.postpone_interrupts_nesting_ == 0) { - thread_local_.jslimit_ = thread_local_.climit_ = kInterruptLimit; - isolate_->heap()->SetStackLimits(); - } - ExecutionAccess::Unlock(isolate_); - } -} - - -bool StackGuard::IsCodeReadyEvent() { - ExecutionAccess access(isolate_); - return (thread_local_.interrupt_flags_ & CODE_READY) != 0; -} - - bool StackGuard::IsGCRequest() { ExecutionAccess access(isolate_); return (thread_local_.interrupt_flags_ & GC_REQUEST) != 0; @@ -615,22 +582,6 @@ void StackGuard::InitThread(const ExecutionAccess& lock) { } while (false) -Handle<Object> Execution::ToBoolean(Handle<Object> obj) { - // See the similar code in runtime.js:ToBoolean. - if (obj->IsBoolean()) return obj; - bool result = true; - if (obj->IsString()) { - result = Handle<String>::cast(obj)->length() != 0; - } else if (obj->IsNull() || obj->IsUndefined()) { - result = false; - } else if (obj->IsNumber()) { - double value = obj->Number(); - result = !((value == 0) || isnan(value)); - } - return Handle<Object>(HEAP->ToBoolean(result)); -} - - Handle<Object> Execution::ToNumber(Handle<Object> obj, bool* exc) { RETURN_NATIVE_CALL(to_number, { obj }, exc); } @@ -697,9 +648,8 @@ Handle<Object> Execution::CharAt(Handle<String> string, uint32_t index) { return factory->undefined_value(); } - Handle<Object> char_at = - GetProperty(isolate->js_builtins_object(), - factory->char_at_symbol()); + Handle<Object> char_at = GetProperty( + isolate, isolate->js_builtins_object(), factory->char_at_string()); if (!char_at->IsJSFunction()) { return factory->undefined_value(); } @@ -800,7 +750,7 @@ Handle<String> Execution::GetStackTraceLine(Handle<Object> recv, args, &caught_exception); if (caught_exception || !result->IsString()) { - return isolate->factory()->empty_symbol(); + return isolate->factory()->empty_string(); } return Handle<String>::cast(result); @@ -930,25 +880,10 @@ MaybeObject* Execution::HandleStackGuardInterrupt(Isolate* isolate) { stack_guard->Continue(GC_REQUEST); } - if (stack_guard->IsCodeReadyEvent()) { - ASSERT(FLAG_parallel_recompilation); - if (FLAG_trace_parallel_recompilation) { - PrintF(" ** CODE_READY event received.\n"); - } - stack_guard->Continue(CODE_READY); - } - if (!stack_guard->IsTerminateExecution()) { - isolate->optimizing_compiler_thread()->InstallOptimizedFunctions(); - } isolate->counters()->stack_interrupts()->Increment(); - // If FLAG_count_based_interrupts, every interrupt is a profiler interrupt. - if (FLAG_count_based_interrupts || - stack_guard->IsRuntimeProfilerTick()) { - isolate->counters()->runtime_profiler_ticks()->Increment(); - stack_guard->Continue(RUNTIME_PROFILER_TICK); - isolate->runtime_profiler()->OptimizeNow(); - } + isolate->counters()->runtime_profiler_ticks()->Increment(); + isolate->runtime_profiler()->OptimizeNow(); #ifdef ENABLE_DEBUGGER_SUPPORT if (stack_guard->IsDebugBreak() || stack_guard->IsDebugCommand()) { DebugBreakHelper(); diff --git a/deps/v8/src/execution.h b/deps/v8/src/execution.h index 9f5d9ff2cd..b104180c9e 100644 --- a/deps/v8/src/execution.h +++ b/deps/v8/src/execution.h @@ -41,9 +41,7 @@ enum InterruptFlag { DEBUGCOMMAND = 1 << 2, PREEMPT = 1 << 3, TERMINATE = 1 << 4, - RUNTIME_PROFILER_TICK = 1 << 5, - GC_REQUEST = 1 << 6, - CODE_READY = 1 << 7 + GC_REQUEST = 1 << 5 }; @@ -92,9 +90,6 @@ class Execution : public AllStatic { Handle<Object> argv[], bool* caught_exception); - // ECMA-262 9.2 - static Handle<Object> ToBoolean(Handle<Object> obj); - // ECMA-262 9.3 static Handle<Object> ToNumber(Handle<Object> obj, bool* exc); @@ -194,10 +189,6 @@ class StackGuard { void Interrupt(); bool IsTerminateExecution(); void TerminateExecution(); - bool IsRuntimeProfilerTick(); - void RequestRuntimeProfilerTick(); - bool IsCodeReadyEvent(); - void RequestCodeReadyEvent(); #ifdef ENABLE_DEBUGGER_SUPPORT bool IsDebugBreak(); void DebugBreak(); diff --git a/deps/v8/src/extensions/externalize-string-extension.cc b/deps/v8/src/extensions/externalize-string-extension.cc index 50d876136f..76d20303f8 100644 --- a/deps/v8/src/extensions/externalize-string-extension.cc +++ b/deps/v8/src/extensions/externalize-string-extension.cc @@ -93,13 +93,13 @@ v8::Handle<v8::Value> ExternalizeStringExtension::Externalize( return v8::ThrowException(v8::String::New( "externalizeString() can't externalize twice.")); } - if (string->IsAsciiRepresentation() && !force_two_byte) { - char* data = new char[string->length()]; + if (string->IsOneByteRepresentation() && !force_two_byte) { + uint8_t* data = new uint8_t[string->length()]; String::WriteToFlat(*string, data, 0, string->length()); SimpleAsciiStringResource* resource = new SimpleAsciiStringResource( - data, string->length()); + reinterpret_cast<char*>(data), string->length()); result = string->MakeExternal(resource); - if (result && !string->IsSymbol()) { + if (result && !string->IsInternalizedString()) { HEAP->external_string_table()->AddString(*string); } if (!result) delete resource; @@ -109,7 +109,7 @@ v8::Handle<v8::Value> ExternalizeStringExtension::Externalize( SimpleTwoByteStringResource* resource = new SimpleTwoByteStringResource( data, string->length()); result = string->MakeExternal(resource); - if (result && !string->IsSymbol()) { + if (result && !string->IsInternalizedString()) { HEAP->external_string_table()->AddString(*string); } if (!result) delete resource; @@ -127,7 +127,8 @@ v8::Handle<v8::Value> ExternalizeStringExtension::IsAscii( return v8::ThrowException(v8::String::New( "isAsciiString() requires a single string argument.")); } - return Utils::OpenHandle(*args[0].As<v8::String>())->IsAsciiRepresentation() ? + return + Utils::OpenHandle(*args[0].As<v8::String>())->IsOneByteRepresentation() ? v8::True() : v8::False(); } diff --git a/deps/v8/src/extensions/gc-extension.cc b/deps/v8/src/extensions/gc-extension.cc index f921552aaa..813b9219bf 100644 --- a/deps/v8/src/extensions/gc-extension.cc +++ b/deps/v8/src/extensions/gc-extension.cc @@ -40,7 +40,11 @@ v8::Handle<v8::FunctionTemplate> GCExtension::GetNativeFunction( v8::Handle<v8::Value> GCExtension::GC(const v8::Arguments& args) { - HEAP->CollectAllGarbage(Heap::kNoGCFlags, "gc extension"); + if (args[0]->BooleanValue()) { + HEAP->CollectGarbage(NEW_SPACE, "gc extension"); + } else { + HEAP->CollectAllGarbage(Heap::kNoGCFlags, "gc extension"); + } return v8::Undefined(); } diff --git a/deps/v8/src/factory.cc b/deps/v8/src/factory.cc index a2bb9391ee..fece9a09c9 100644 --- a/deps/v8/src/factory.cc +++ b/deps/v8/src/factory.cc @@ -70,11 +70,12 @@ Handle<FixedDoubleArray> Factory::NewFixedDoubleArray(int size, } -Handle<StringDictionary> Factory::NewStringDictionary(int at_least_space_for) { +Handle<NameDictionary> Factory::NewNameDictionary(int at_least_space_for) { ASSERT(0 <= at_least_space_for); CALL_HEAP_FUNCTION(isolate(), - StringDictionary::Allocate(at_least_space_for), - StringDictionary); + NameDictionary::Allocate(isolate()->heap(), + at_least_space_for), + NameDictionary); } @@ -82,7 +83,8 @@ Handle<SeededNumberDictionary> Factory::NewSeededNumberDictionary( int at_least_space_for) { ASSERT(0 <= at_least_space_for); CALL_HEAP_FUNCTION(isolate(), - SeededNumberDictionary::Allocate(at_least_space_for), + SeededNumberDictionary::Allocate(isolate()->heap(), + at_least_space_for), SeededNumberDictionary); } @@ -91,7 +93,8 @@ Handle<UnseededNumberDictionary> Factory::NewUnseededNumberDictionary( int at_least_space_for) { ASSERT(0 <= at_least_space_for); CALL_HEAP_FUNCTION(isolate(), - UnseededNumberDictionary::Allocate(at_least_space_for), + UnseededNumberDictionary::Allocate(isolate()->heap(), + at_least_space_for), UnseededNumberDictionary); } @@ -99,7 +102,8 @@ Handle<UnseededNumberDictionary> Factory::NewUnseededNumberDictionary( Handle<ObjectHashSet> Factory::NewObjectHashSet(int at_least_space_for) { ASSERT(0 <= at_least_space_for); CALL_HEAP_FUNCTION(isolate(), - ObjectHashSet::Allocate(at_least_space_for), + ObjectHashSet::Allocate(isolate()->heap(), + at_least_space_for), ObjectHashSet); } @@ -107,7 +111,8 @@ Handle<ObjectHashSet> Factory::NewObjectHashSet(int at_least_space_for) { Handle<ObjectHashTable> Factory::NewObjectHashTable(int at_least_space_for) { ASSERT(0 <= at_least_space_for); CALL_HEAP_FUNCTION(isolate(), - ObjectHashTable::Allocate(at_least_space_for), + ObjectHashTable::Allocate(isolate()->heap(), + at_least_space_for), ObjectHashTable); } @@ -157,50 +162,48 @@ Handle<TypeFeedbackInfo> Factory::NewTypeFeedbackInfo() { } -// Symbols are created in the old generation (data space). -Handle<String> Factory::LookupSymbol(Vector<const char> string) { +// Internalized strings are created in the old generation (data space). +Handle<String> Factory::InternalizeUtf8String(Vector<const char> string) { CALL_HEAP_FUNCTION(isolate(), - isolate()->heap()->LookupSymbol(string), + isolate()->heap()->InternalizeUtf8String(string), String); } -// Symbols are created in the old generation (data space). -Handle<String> Factory::LookupSymbol(Handle<String> string) { +// Internalized strings are created in the old generation (data space). +Handle<String> Factory::InternalizeString(Handle<String> string) { CALL_HEAP_FUNCTION(isolate(), - isolate()->heap()->LookupSymbol(*string), + isolate()->heap()->InternalizeString(*string), String); } -Handle<String> Factory::LookupAsciiSymbol(Vector<const char> string) { +Handle<String> Factory::InternalizeOneByteString(Vector<const uint8_t> string) { CALL_HEAP_FUNCTION(isolate(), - isolate()->heap()->LookupAsciiSymbol(string), + isolate()->heap()->InternalizeOneByteString(string), String); } -Handle<String> Factory::LookupAsciiSymbol(Handle<SeqAsciiString> string, - int from, - int length) { +Handle<String> Factory::InternalizeOneByteString( + Handle<SeqOneByteString> string, int from, int length) { CALL_HEAP_FUNCTION(isolate(), - isolate()->heap()->LookupAsciiSymbol(string, - from, - length), + isolate()->heap()->InternalizeOneByteString( + string, from, length), String); } -Handle<String> Factory::LookupTwoByteSymbol(Vector<const uc16> string) { +Handle<String> Factory::InternalizeTwoByteString(Vector<const uc16> string) { CALL_HEAP_FUNCTION(isolate(), - isolate()->heap()->LookupTwoByteSymbol(string), + isolate()->heap()->InternalizeTwoByteString(string), String); } -Handle<String> Factory::NewStringFromAscii(Vector<const char> string, - PretenureFlag pretenure) { +Handle<String> Factory::NewStringFromOneByte(Vector<const uint8_t> string, + PretenureFlag pretenure) { CALL_HEAP_FUNCTION( isolate(), - isolate()->heap()->AllocateStringFromAscii(string, pretenure), + isolate()->heap()->AllocateStringFromOneByte(string, pretenure), String); } @@ -222,12 +225,12 @@ Handle<String> Factory::NewStringFromTwoByte(Vector<const uc16> string, } -Handle<SeqAsciiString> Factory::NewRawAsciiString(int length, +Handle<SeqOneByteString> Factory::NewRawOneByteString(int length, PretenureFlag pretenure) { CALL_HEAP_FUNCTION( isolate(), - isolate()->heap()->AllocateRawAsciiString(length, pretenure), - SeqAsciiString); + isolate()->heap()->AllocateRawOneByteString(length, pretenure), + SeqOneByteString); } @@ -285,6 +288,14 @@ Handle<String> Factory::NewExternalStringFromTwoByte( } +Handle<Symbol> Factory::NewSymbol() { + CALL_HEAP_FUNCTION( + isolate(), + isolate()->heap()->AllocateSymbol(), + Symbol); +} + + Handle<Context> Factory::NewNativeContext() { CALL_HEAP_FUNCTION( isolate(), @@ -363,9 +374,25 @@ Handle<Struct> Factory::NewStruct(InstanceType type) { } -Handle<AccessorInfo> Factory::NewAccessorInfo() { - Handle<AccessorInfo> info = - Handle<AccessorInfo>::cast(NewStruct(ACCESSOR_INFO_TYPE)); +Handle<DeclaredAccessorDescriptor> Factory::NewDeclaredAccessorDescriptor() { + return Handle<DeclaredAccessorDescriptor>::cast( + NewStruct(DECLARED_ACCESSOR_DESCRIPTOR_TYPE)); +} + + +Handle<DeclaredAccessorInfo> Factory::NewDeclaredAccessorInfo() { + Handle<DeclaredAccessorInfo> info = + Handle<DeclaredAccessorInfo>::cast( + NewStruct(DECLARED_ACCESSOR_INFO_TYPE)); + info->set_flag(0); // Must clear the flag, it was initialized as undefined. + return info; +} + + +Handle<ExecutableAccessorInfo> Factory::NewExecutableAccessorInfo() { + Handle<ExecutableAccessorInfo> info = + Handle<ExecutableAccessorInfo>::cast( + NewStruct(EXECUTABLE_ACCESSOR_INFO_TYPE)); info->set_flag(0); // Must clear the flag, it was initialized as undefined. return info; } @@ -525,6 +552,12 @@ Handle<FixedArray> Factory::CopyFixedArray(Handle<FixedArray> array) { } +Handle<FixedArray> Factory::CopySizeFixedArray(Handle<FixedArray> array, + int new_length) { + CALL_HEAP_FUNCTION(isolate(), array->CopySize(new_length), FixedArray); +} + + Handle<FixedDoubleArray> Factory::CopyFixedDoubleArray( Handle<FixedDoubleArray> array) { CALL_HEAP_FUNCTION(isolate(), array->Copy(), FixedDoubleArray); @@ -670,9 +703,11 @@ Handle<Object> Factory::NewReferenceError(Handle<String> message) { } -Handle<Object> Factory::NewError(const char* maker, const char* type, - Vector< Handle<Object> > args) { - v8::HandleScope scope; // Instantiate a closeable HandleScope for EscapeFrom. +Handle<Object> Factory::NewError(const char* maker, + const char* type, + Vector< Handle<Object> > args) { + // Instantiate a closeable HandleScope for EscapeFrom. + v8::HandleScope scope(reinterpret_cast<v8::Isolate*>(isolate())); Handle<FixedArray> array = NewFixedArray(args.length()); for (int i = 0; i < args.length(); i++) { array->set(i, *args[i]); @@ -735,16 +770,17 @@ Handle<String> Factory::EmergencyNewError(const char* type, Handle<Object> Factory::NewError(const char* maker, const char* type, Handle<JSArray> args) { - Handle<String> make_str = LookupAsciiSymbol(maker); + Handle<String> make_str = InternalizeUtf8String(maker); Handle<Object> fun_obj( - isolate()->js_builtins_object()->GetPropertyNoExceptionThrown(*make_str)); + isolate()->js_builtins_object()->GetPropertyNoExceptionThrown(*make_str), + isolate()); // If the builtins haven't been properly configured yet this error // constructor may not have been defined. Bail out. if (!fun_obj->IsJSFunction()) { return EmergencyNewError(type, args); } Handle<JSFunction> fun = Handle<JSFunction>::cast(fun_obj); - Handle<Object> type_obj = LookupAsciiSymbol(type); + Handle<Object> type_obj = InternalizeUtf8String(type); Handle<Object> argv[] = { type_obj, args }; // Invoke the JavaScript factory method. If an exception is thrown while @@ -766,7 +802,7 @@ Handle<Object> Factory::NewError(Handle<String> message) { Handle<Object> Factory::NewError(const char* constructor, Handle<String> message) { - Handle<String> constr = LookupAsciiSymbol(constructor); + Handle<String> constr = InternalizeUtf8String(constructor); Handle<JSFunction> fun = Handle<JSFunction>( JSFunction::cast(isolate()->js_builtins_object()-> GetPropertyNoExceptionThrown(*constr))); @@ -844,7 +880,7 @@ Handle<JSFunction> Factory::NewFunctionWithPrototype(Handle<String> name, // Currently safe because it is only invoked from Genesis. CHECK_NOT_EMPTY_HANDLE(isolate(), JSObject::SetLocalPropertyIgnoreAttributes( - prototype, constructor_symbol(), + prototype, constructor_string(), function, DONT_ENUM)); return function; } @@ -870,6 +906,13 @@ Handle<ScopeInfo> Factory::NewScopeInfo(int length) { } +Handle<JSObject> Factory::NewExternal(void* value) { + CALL_HEAP_FUNCTION(isolate(), + isolate()->heap()->AllocateExternal(value), + JSObject); +} + + Handle<Code> Factory::NewCode(const CodeDesc& desc, Code::Flags flags, Handle<Object> self_ref, @@ -895,9 +938,9 @@ Handle<Code> Factory::CopyCode(Handle<Code> code, Vector<byte> reloc_info) { } -Handle<String> Factory::SymbolFromString(Handle<String> value) { +Handle<String> Factory::InternalizedStringFromString(Handle<String> value) { CALL_HEAP_FUNCTION(isolate(), - isolate()->heap()->LookupSymbol(*value), String); + isolate()->heap()->InternalizeString(*value), String); } @@ -926,10 +969,11 @@ Handle<GlobalObject> Factory::NewGlobalObject( -Handle<JSObject> Factory::NewJSObjectFromMap(Handle<Map> map) { +Handle<JSObject> Factory::NewJSObjectFromMap(Handle<Map> map, + PretenureFlag pretenure) { CALL_HEAP_FUNCTION( isolate(), - isolate()->heap()->AllocateJSObjectFromMap(*map, NOT_TENURED), + isolate()->heap()->AllocateJSObjectFromMap(*map, pretenure), JSObject); } @@ -937,6 +981,9 @@ Handle<JSObject> Factory::NewJSObjectFromMap(Handle<Map> map) { Handle<JSArray> Factory::NewJSArray(int capacity, ElementsKind elements_kind, PretenureFlag pretenure) { + if (capacity != 0) { + elements_kind = GetHoleyElementsKind(elements_kind); + } CALL_HEAP_FUNCTION(isolate(), isolate()->heap()->AllocateJSArrayAndStorage( elements_kind, @@ -955,6 +1002,7 @@ Handle<JSArray> Factory::NewJSArrayWithElements(Handle<FixedArrayBase> elements, isolate(), isolate()->heap()->AllocateJSArrayWithElements(*elements, elements_kind, + elements->length(), pretenure), JSArray); } @@ -1238,13 +1286,17 @@ Handle<JSFunction> Factory::CreateApiFunction( ASSERT(type != INVALID_TYPE); Handle<JSFunction> result = - NewFunction(Factory::empty_symbol(), + NewFunction(Factory::empty_string(), type, instance_size, code, true); + + // Set length. + result->shared()->set_length(obj->length()); + // Set class name. - Handle<Object> class_name = Handle<Object>(obj->class_name()); + Handle<Object> class_name = Handle<Object>(obj->class_name(), isolate()); if (class_name->IsString()) { result->shared()->set_instance_class_name(*class_name); result->shared()->set_name(*class_name); @@ -1290,7 +1342,7 @@ Handle<JSFunction> Factory::CreateApiFunction( while (true) { Object* props = info->property_accessors(); if (!props->IsUndefined()) { - Handle<Object> props_handle(props); + Handle<Object> props_handle(props, isolate()); NeanderArray props_array(props_handle); max_number_of_additional_properties += props_array.length(); } @@ -1302,11 +1354,12 @@ Handle<JSFunction> Factory::CreateApiFunction( Map::EnsureDescriptorSlack(map, max_number_of_additional_properties); while (true) { - Handle<Object> props = Handle<Object>(obj->property_accessors()); + Handle<Object> props = Handle<Object>(obj->property_accessors(), + isolate()); if (!props->IsUndefined()) { Map::AppendCallbackDescriptors(map, props); } - Handle<Object> parent = Handle<Object>(obj->parent_template()); + Handle<Object> parent = Handle<Object>(obj->parent_template(), isolate()); if (parent->IsUndefined()) break; obj = Handle<FunctionTemplateInfo>::cast(parent); } @@ -1318,7 +1371,9 @@ Handle<JSFunction> Factory::CreateApiFunction( Handle<MapCache> Factory::NewMapCache(int at_least_space_for) { CALL_HEAP_FUNCTION(isolate(), - MapCache::Allocate(at_least_space_for), MapCache); + MapCache::Allocate(isolate()->heap(), + at_least_space_for), + MapCache); } @@ -1353,7 +1408,7 @@ Handle<Map> Factory::ObjectLiteralMapFromCache(Handle<Context> context, // Check to see whether there is a matching element in the cache. Handle<MapCache> cache = Handle<MapCache>(MapCache::cast(context->map_cache())); - Handle<Object> result = Handle<Object>(cache->Lookup(*keys)); + Handle<Object> result = Handle<Object>(cache->Lookup(*keys), isolate()); if (result->IsMap()) return Handle<Map>::cast(result); // Create a new map and add it to the cache. Handle<Map> map = @@ -1405,7 +1460,7 @@ void Factory::ConfigureInstance(Handle<FunctionTemplateInfo> desc, bool* pending_exception) { // Configure the instance by adding the properties specified by the // instance template. - Handle<Object> instance_template = Handle<Object>(desc->instance_template()); + Handle<Object> instance_template(desc->instance_template(), isolate()); if (!instance_template->IsUndefined()) { Execution::ConfigureInstance(instance, instance_template, @@ -1418,17 +1473,15 @@ void Factory::ConfigureInstance(Handle<FunctionTemplateInfo> desc, Handle<Object> Factory::GlobalConstantFor(Handle<String> name) { Heap* h = isolate()->heap(); - if (name->Equals(h->undefined_symbol())) return undefined_value(); - if (name->Equals(h->nan_symbol())) return nan_value(); - if (name->Equals(h->infinity_symbol())) return infinity_value(); + if (name->Equals(h->undefined_string())) return undefined_value(); + if (name->Equals(h->nan_string())) return nan_value(); + if (name->Equals(h->infinity_string())) return infinity_value(); return Handle<Object>::null(); } Handle<Object> Factory::ToBoolean(bool value) { - return Handle<Object>(value - ? isolate()->heap()->true_value() - : isolate()->heap()->false_value()); + return value ? true_value() : false_value(); } diff --git a/deps/v8/src/factory.h b/deps/v8/src/factory.h index 51065aac41..8695bcd520 100644 --- a/deps/v8/src/factory.h +++ b/deps/v8/src/factory.h @@ -60,7 +60,7 @@ class Factory { Handle<UnseededNumberDictionary> NewUnseededNumberDictionary( int at_least_space_for); - Handle<StringDictionary> NewStringDictionary(int at_least_space_for); + Handle<NameDictionary> NewNameDictionary(int at_least_space_for); Handle<ObjectHashSet> NewObjectHashSet(int at_least_space_for); @@ -79,16 +79,16 @@ class Factory { Handle<TypeFeedbackInfo> NewTypeFeedbackInfo(); - Handle<String> LookupSymbol(Vector<const char> str); - Handle<String> LookupSymbol(Handle<String> str); - Handle<String> LookupAsciiSymbol(Vector<const char> str); - Handle<String> LookupAsciiSymbol(Handle<SeqAsciiString>, + Handle<String> InternalizeUtf8String(Vector<const char> str); + Handle<String> InternalizeUtf8String(const char* str) { + return InternalizeUtf8String(CStrVector(str)); + } + Handle<String> InternalizeString(Handle<String> str); + Handle<String> InternalizeOneByteString(Vector<const uint8_t> str); + Handle<String> InternalizeOneByteString(Handle<SeqOneByteString>, int from, int length); - Handle<String> LookupTwoByteSymbol(Vector<const uc16> str); - Handle<String> LookupAsciiSymbol(const char* str) { - return LookupSymbol(CStrVector(str)); - } + Handle<String> InternalizeTwoByteString(Vector<const uc16> str); // String creation functions. Most of the string creation functions take @@ -113,9 +113,15 @@ class Factory { // two byte. // // ASCII strings are pretenured when used as keys in the SourceCodeCache. - Handle<String> NewStringFromAscii( - Vector<const char> str, + Handle<String> NewStringFromOneByte( + Vector<const uint8_t> str, PretenureFlag pretenure = NOT_TENURED); + // TODO(dcarney): remove this function. + inline Handle<String> NewStringFromAscii( + Vector<const char> str, + PretenureFlag pretenure = NOT_TENURED) { + return NewStringFromOneByte(Vector<const uint8_t>::cast(str), pretenure); + } // UTF8 strings are pretenured when used for regexp literal patterns and // flags in the parser. @@ -130,7 +136,7 @@ class Factory { // Allocates and partially initializes an ASCII or TwoByte String. The // characters of the string are uninitialized. Currently used in regexp code // only, where they are pretenured. - Handle<SeqAsciiString> NewRawAsciiString( + Handle<SeqOneByteString> NewRawOneByteString( int length, PretenureFlag pretenure = NOT_TENURED); Handle<SeqTwoByteString> NewRawTwoByteString( @@ -160,6 +166,9 @@ class Factory { Handle<String> NewExternalStringFromTwoByte( const ExternalTwoByteString::Resource* resource); + // Create a symbol. + Handle<Symbol> NewSymbol(); + // Create a global (but otherwise uninitialized) context. Handle<Context> NewNativeContext(); @@ -189,14 +198,18 @@ class Factory { Handle<Context> previous, Handle<ScopeInfo> scope_info); - // Return the Symbol matching the passed in string. - Handle<String> SymbolFromString(Handle<String> value); + // Return the internalized version of the passed in string. + Handle<String> InternalizedStringFromString(Handle<String> value); // Allocate a new struct. The struct is pretenured (allocated directly in // the old generation). Handle<Struct> NewStruct(InstanceType type); - Handle<AccessorInfo> NewAccessorInfo(); + Handle<DeclaredAccessorDescriptor> NewDeclaredAccessorDescriptor(); + + Handle<DeclaredAccessorInfo> NewDeclaredAccessorInfo(); + + Handle<ExecutableAccessorInfo> NewExecutableAccessorInfo(); Handle<Script> NewScript(Handle<String> source); @@ -239,6 +252,9 @@ class Factory { Handle<FixedArray> CopyFixedArray(Handle<FixedArray> array); + Handle<FixedArray> CopySizeFixedArray(Handle<FixedArray> array, + int new_length); + Handle<FixedDoubleArray> CopyFixedDoubleArray( Handle<FixedDoubleArray> array); @@ -267,7 +283,8 @@ class Factory { // JS objects are pretenured when allocated by the bootstrapper and // runtime. - Handle<JSObject> NewJSObjectFromMap(Handle<Map> map); + Handle<JSObject> NewJSObjectFromMap(Handle<Map> map, + PretenureFlag pretenure = NOT_TENURED); // JS modules are pretenured. Handle<JSModule> NewJSModule(Handle<Context> context, @@ -325,6 +342,8 @@ class Factory { Handle<ScopeInfo> NewScopeInfo(int length); + Handle<JSObject> NewExternal(void* value); + Handle<Code> NewCode(const CodeDesc& desc, Code::Flags flags, Handle<Object> self_reference, @@ -420,16 +439,16 @@ class Factory { ROOT_LIST(ROOT_ACCESSOR) #undef ROOT_ACCESSOR_ACCESSOR -#define SYMBOL_ACCESSOR(name, str) \ +#define STRING_ACCESSOR(name, str) \ inline Handle<String> name() { \ return Handle<String>(BitCast<String**>( \ &isolate()->heap()->roots_[Heap::k##name##RootIndex])); \ } - SYMBOL_LIST(SYMBOL_ACCESSOR) -#undef SYMBOL_ACCESSOR + INTERNALIZED_STRING_LIST(STRING_ACCESSOR) +#undef STRING_ACCESSOR - Handle<String> hidden_symbol() { - return Handle<String>(&isolate()->heap()->hidden_symbol_); + Handle<String> hidden_string() { + return Handle<String>(&isolate()->heap()->hidden_string_); } Handle<SharedFunctionInfo> NewSharedFunctionInfo( diff --git a/deps/v8/src/flag-definitions.h b/deps/v8/src/flag-definitions.h index 4c7c090f40..c4b560bc8f 100644 --- a/deps/v8/src/flag-definitions.h +++ b/deps/v8/src/flag-definitions.h @@ -141,22 +141,32 @@ DEFINE_bool(harmony_typeof, false, "enable harmony semantics for typeof") DEFINE_bool(harmony_scoping, false, "enable harmony block scoping") DEFINE_bool(harmony_modules, false, "enable harmony modules (implies block scoping)") +DEFINE_bool(harmony_symbols, false, + "enable harmony symbols (a.k.a. private names)") DEFINE_bool(harmony_proxies, false, "enable harmony proxies") DEFINE_bool(harmony_collections, false, "enable harmony collections (sets, maps, and weak maps)") +DEFINE_bool(harmony_observation, false, + "enable harmony object observation (implies harmony collections") DEFINE_bool(harmony, false, "enable all harmony features (except typeof)") DEFINE_implication(harmony, harmony_scoping) DEFINE_implication(harmony, harmony_modules) +DEFINE_implication(harmony, harmony_symbols) DEFINE_implication(harmony, harmony_proxies) DEFINE_implication(harmony, harmony_collections) +DEFINE_implication(harmony, harmony_observation) DEFINE_implication(harmony_modules, harmony_scoping) +DEFINE_implication(harmony_observation, harmony_collections) // Flags for experimental implementation features. DEFINE_bool(packed_arrays, true, "optimizes arrays that have no holes") DEFINE_bool(smi_only_arrays, true, "tracks arrays with only smi values") +DEFINE_bool(compiled_transitions, false, "use optimizing compiler to " + "generate array elements transition stubs") DEFINE_bool(clever_optimizations, true, "Optimize object size, Array shift, DOM strings and string +") +DEFINE_bool(pretenure_literals, false, "allocate literals in old space") // Flags for data representation optimizations DEFINE_bool(unbox_double_arrays, true, "automatically unbox arrays of doubles") @@ -177,6 +187,7 @@ DEFINE_int(max_inlined_nodes, 196, DEFINE_int(max_inlined_nodes_cumulative, 196, "maximum cumulative number of AST nodes considered for inlining") DEFINE_bool(loop_invariant_code_motion, true, "loop invariant code motion") +DEFINE_bool(fast_math, true, "faster (but maybe less accurate) math functions") DEFINE_bool(collect_megamorphic_maps_from_stub_cache, true, "crankshaft harvests type feedback from stub cache") @@ -189,6 +200,8 @@ DEFINE_bool(trace_all_uses, false, "trace all use positions") DEFINE_bool(trace_range, false, "trace range analysis") DEFINE_bool(trace_gvn, false, "trace global value numbering") DEFINE_bool(trace_representation, false, "trace representation types") +DEFINE_bool(trace_track_allocation_sites, false, + "trace the tracking of allocation sites") DEFINE_bool(stress_pointer_maps, false, "pointer map for every instruction") DEFINE_bool(stress_environments, false, "environment for every instruction") DEFINE_int(deopt_every_n_times, @@ -198,13 +211,20 @@ DEFINE_bool(trap_on_deopt, false, "put a break point before deoptimizing") DEFINE_bool(deoptimize_uncommon_cases, true, "deoptimize uncommon cases") DEFINE_bool(polymorphic_inlining, true, "polymorphic inlining") DEFINE_bool(use_osr, true, "use on-stack replacement") +DEFINE_bool(idefs, false, "use informative definitions") DEFINE_bool(array_bounds_checks_elimination, true, "perform array bounds checks elimination") DEFINE_bool(array_index_dehoisting, true, "perform array index dehoisting") DEFINE_bool(dead_code_elimination, true, "use dead code elimination") +DEFINE_bool(fold_constants, true, "use constant folding") DEFINE_bool(trace_dead_code_elimination, false, "trace dead code elimination") - +DEFINE_bool(unreachable_code_elimination, false, + "eliminate unreachable code (hidden behind soft deopts)") +DEFINE_bool(track_allocation_sites, true, + "Use allocation site info to reduce transitions") +DEFINE_bool(optimize_constructed_arrays, false, + "Use allocation site info on constructed arrays") DEFINE_bool(trace_osr, false, "trace on-stack replacement") DEFINE_int(stress_runs, 0, "number of stress runs") DEFINE_bool(optimize_closures, true, "optimize closures") @@ -221,14 +241,19 @@ DEFINE_int(loop_weight, 1, "loop weight for representation inference") DEFINE_bool(optimize_for_in, true, "optimize functions containing for-in loops") DEFINE_bool(opt_safe_uint32_operations, true, - "allow uint32 values on optimize frames if they are used only in" + "allow uint32 values on optimize frames if they are used only in " "safe operations") DEFINE_bool(parallel_recompilation, false, "optimizing hot functions asynchronously on a separate thread") DEFINE_bool(trace_parallel_recompilation, false, "track parallel recompilation") -DEFINE_int(parallel_recompilation_queue_length, 2, +DEFINE_int(parallel_recompilation_queue_length, 3, "the length of the parallel compilation queue") +DEFINE_int(parallel_recompilation_delay, 0, + "artificial compilation delay in ms") +DEFINE_bool(omit_prototype_checks_for_leaf_maps, true, + "do not emit prototype checks if all prototypes have leaf maps, " + "deoptimize the optimized code if the layout of the maps changes.") // Experimental profiler changes. DEFINE_bool(experimental_profiler, true, "enable all profiler experiments") @@ -239,8 +264,6 @@ DEFINE_bool(self_optimization, false, DEFINE_bool(direct_self_opt, false, "call recompile stub directly when self-optimizing") DEFINE_bool(retry_self_opt, false, "re-try self-optimization if it failed") -DEFINE_bool(count_based_interrupts, false, - "trigger profiler ticks based on counting instead of timing") DEFINE_bool(interrupt_at_exit, false, "insert an interrupt check at function exit") DEFINE_bool(weighted_back_edges, false, @@ -256,7 +279,6 @@ DEFINE_implication(experimental_profiler, watch_ic_patching) DEFINE_implication(experimental_profiler, self_optimization) // Not implying direct_self_opt here because it seems to be a bad idea. DEFINE_implication(experimental_profiler, retry_self_opt) -DEFINE_implication(experimental_profiler, count_based_interrupts) DEFINE_implication(experimental_profiler, interrupt_at_exit) DEFINE_implication(experimental_profiler, weighted_back_edges) @@ -293,8 +315,12 @@ DEFINE_bool(enable_movw_movt, false, "instruction pairs (ARM only)") DEFINE_bool(enable_unaligned_accesses, true, "enable unaligned accesses for ARMv7 (ARM only)") +DEFINE_bool(enable_32dregs, true, + "enable use of d16-d31 registers on ARM - this requires VFP3") DEFINE_bool(enable_fpu, true, "enable use of MIPS FPU instructions if available (MIPS only)") +DEFINE_bool(enable_vldr_imm, false, + "enable use of constant pools for double immediate (ARM only)") // bootstrapper.cc DEFINE_string(expose_natives_as, NULL, "expose natives in global object") @@ -327,7 +353,9 @@ DEFINE_bool(trace_opt_stats, false, "trace lazy optimization statistics") DEFINE_bool(opt, true, "use adaptive optimizations") DEFINE_bool(always_opt, false, "always try to optimize functions") DEFINE_bool(prepare_always_opt, false, "prepare for turning on always opt") -DEFINE_bool(trace_deopt, false, "trace deoptimization") +DEFINE_bool(trace_deopt, false, "trace optimize function deoptimization") +DEFINE_bool(trace_stub_failures, false, + "trace deoptimization of generated code stubs") // compiler.cc DEFINE_int(min_preparse_length, 1024, @@ -344,6 +372,14 @@ DEFINE_bool(cache_prototype_transitions, true, "cache prototype transitions") // debug.cc DEFINE_bool(trace_debug_json, false, "trace debugging JSON request/response") +DEFINE_bool(trace_js_array_abuse, false, + "trace out-of-bounds accesses to JS arrays") +DEFINE_bool(trace_external_array_abuse, false, + "trace out-of-bounds-accesses to external arrays") +DEFINE_bool(trace_array_abuse, false, + "trace out-of-bounds accesses to all arrays") +DEFINE_implication(trace_array_abuse, trace_js_array_abuse) +DEFINE_implication(trace_array_abuse, trace_external_array_abuse) DEFINE_bool(debugger_auto_break, true, "automatically set the debug break flag when debugger commands are " "in the queue") @@ -388,14 +424,27 @@ DEFINE_bool(trace_external_memory, false, "it is adjusted.") DEFINE_bool(collect_maps, true, "garbage collect maps from which no objects can be reached") +DEFINE_bool(weak_embedded_maps_in_optimized_code, true, + "make maps embedded in optimized code weak") DEFINE_bool(flush_code, true, - "flush code that we expect not to use again before full gc") + "flush code that we expect not to use again (during full gc)") +DEFINE_bool(flush_code_incrementally, true, + "flush code that we expect not to use again (incrementally)") +DEFINE_bool(age_code, true, + "track un-executed functions to age code and flush only " + "old code") DEFINE_bool(incremental_marking, true, "use incremental marking") DEFINE_bool(incremental_marking_steps, true, "do incremental marking steps") DEFINE_bool(trace_incremental_marking, false, "trace progress of the incremental marking") DEFINE_bool(track_gc_object_stats, false, "track object counts and memory usage") +DEFINE_bool(parallel_sweeping, true, "enable parallel sweeping") +DEFINE_bool(concurrent_sweeping, false, "enable concurrent sweeping") +DEFINE_int(sweeper_threads, 0, + "number of parallel and concurrent sweeping threads") +DEFINE_bool(parallel_marking, false, "enable parallel marking") +DEFINE_int(marking_threads, 0, "number of parallel marking threads") #ifdef VERIFY_HEAP DEFINE_bool(verify_heap, false, "verify heap pointers before and after GC") #endif @@ -406,12 +455,6 @@ DEFINE_bool(use_idle_notification, true, // ic.cc DEFINE_bool(use_ic, true, "use inline caching") -#ifdef LIVE_OBJECT_LIST -// liveobjectlist.cc -DEFINE_string(lol_workdir, NULL, "path for lol temp files") -DEFINE_bool(verify_lol, false, "perform debugging verification for lol") -#endif - // macro-assembler-ia32.cc DEFINE_bool(native_code_counters, false, "generate extra code for manipulating stats counters") @@ -429,6 +472,9 @@ DEFINE_bool(incremental_code_compaction, true, DEFINE_bool(cleanup_code_caches_at_gc, true, "Flush inline caches prior to mark compact collection and " "flush code caches in maps during mark compact cycle.") +DEFINE_bool(use_marking_progress_bar, true, + "Use a progress bar to scan large objects in increments when " + "incremental marking is active.") DEFINE_int(random_seed, 0, "Default seed for initializing random generator " "(0, the default, means to use system random).") @@ -642,12 +688,14 @@ DEFINE_bool(prof_lazy, false, DEFINE_bool(prof_browser_mode, true, "Used with --prof, turns on browser-compatible mode for profiling.") DEFINE_bool(log_regexp, false, "Log regular expression execution.") -DEFINE_bool(sliding_state_window, false, - "Update sliding state window counters.") DEFINE_string(logfile, "v8.log", "Specify the name of the log file.") DEFINE_bool(ll_prof, false, "Enable low-level linux profiler.") DEFINE_string(gc_fake_mmap, "/tmp/__v8_gc__", "Specify the name of the file for fake gc mmap used in ll_prof") +DEFINE_bool(log_internal_timer_events, false, "Time internal events.") +DEFINE_bool(log_timer_events, false, + "Time events including external callbacks.") +DEFINE_implication(log_timer_events, log_internal_timer_events) // // Disassembler only flags diff --git a/deps/v8/src/flags.cc b/deps/v8/src/flags.cc index bca0eff58d..ff725adcf4 100644 --- a/deps/v8/src/flags.cc +++ b/deps/v8/src/flags.cc @@ -542,6 +542,7 @@ void FlagList::PrintHelp() { void FlagList::EnforceFlagImplications() { #define FLAG_MODE_DEFINE_IMPLICATIONS #include "flag-definitions.h" +#undef FLAG_MODE_DEFINE_IMPLICATIONS } } } // namespace v8::internal diff --git a/deps/v8/src/frames-inl.h b/deps/v8/src/frames-inl.h index 27a526cef1..83b37a5fe5 100644 --- a/deps/v8/src/frames-inl.h +++ b/deps/v8/src/frames-inl.h @@ -235,6 +235,11 @@ inline Object* JavaScriptFrame::function() const { } +inline StubFrame::StubFrame(StackFrameIterator* iterator) + : StandardFrame(iterator) { +} + + inline OptimizedFrame::OptimizedFrame(StackFrameIterator* iterator) : JavaScriptFrame(iterator) { } @@ -250,6 +255,11 @@ inline InternalFrame::InternalFrame(StackFrameIterator* iterator) } +inline StubFailureTrampolineFrame::StubFailureTrampolineFrame( + StackFrameIterator* iterator) : StandardFrame(iterator) { +} + + inline ConstructFrame::ConstructFrame(StackFrameIterator* iterator) : InternalFrame(iterator) { } diff --git a/deps/v8/src/frames.cc b/deps/v8/src/frames.cc index 18dc54164a..ed407e796b 100644 --- a/deps/v8/src/frames.cc +++ b/deps/v8/src/frames.cc @@ -88,14 +88,6 @@ class StackHandlerIterator BASE_EMBEDDED { #define INITIALIZE_SINGLETON(type, field) field##_(this), -StackFrameIterator::StackFrameIterator() - : isolate_(Isolate::Current()), - STACK_FRAME_TYPE_LIST(INITIALIZE_SINGLETON) - frame_(NULL), handler_(NULL), - thread_(isolate_->thread_local_top()), - fp_(NULL), sp_(NULL), advance_(&StackFrameIterator::AdvanceWithHandler) { - Reset(); -} StackFrameIterator::StackFrameIterator(Isolate* isolate) : isolate_(isolate), STACK_FRAME_TYPE_LIST(INITIALIZE_SINGLETON) @@ -210,11 +202,6 @@ StackFrame* StackFrameIterator::SingletonFor(StackFrame::Type type) { // ------------------------------------------------------------------------- -StackTraceFrameIterator::StackTraceFrameIterator() { - if (!done() && !IsValidFrame()) Advance(); -} - - StackTraceFrameIterator::StackTraceFrameIterator(Isolate* isolate) : JavaScriptFrameIterator(isolate) { if (!done() && !IsValidFrame()) Advance(); @@ -484,7 +471,7 @@ Address StackFrame::UnpaddedFP() const { Code* EntryFrame::unchecked_code() const { - return HEAP->raw_unchecked_js_entry_code(); + return HEAP->js_entry_code(); } @@ -507,7 +494,7 @@ StackFrame::Type EntryFrame::GetCallerState(State* state) const { Code* EntryConstructFrame::unchecked_code() const { - return HEAP->raw_unchecked_js_construct_entry_code(); + return HEAP->js_construct_entry_code(); } @@ -617,13 +604,7 @@ bool StandardFrame::IsExpressionInsideHandler(int n) const { } -void OptimizedFrame::Iterate(ObjectVisitor* v) const { -#ifdef DEBUG - // Make sure that optimized frames do not contain any stack handlers. - StackHandlerIterator it(this, top_handler()); - ASSERT(it.done()); -#endif - +void StandardFrame::IterateCompiledFrame(ObjectVisitor* v) const { // Make sure that we're not doing "safe" stack frame iteration. We cannot // possibly find pointers in optimized frames in that state. ASSERT(!SafeStackFrameIterator::is_active(isolate())); @@ -649,7 +630,9 @@ void OptimizedFrame::Iterate(ObjectVisitor* v) const { // Skip saved double registers. if (safepoint_entry.has_doubles()) { - parameters_base += DoubleRegister::kNumAllocatableRegisters * + // Number of doubles not known at snapshot time. + ASSERT(!Serializer::enabled()); + parameters_base += DoubleRegister::NumAllocatableRegisters() * kDoubleSize / kPointerSize; } @@ -681,14 +664,51 @@ void OptimizedFrame::Iterate(ObjectVisitor* v) const { } } - // Visit the context and the function. + // Visit the return address in the callee and incoming arguments. + IteratePc(v, pc_address(), code); + + // Visit the context in stub frame and JavaScript frame. + // Visit the function in JavaScript frame. Object** fixed_base = &Memory::Object_at( - fp() + JavaScriptFrameConstants::kFunctionOffset); + fp() + StandardFrameConstants::kMarkerOffset); Object** fixed_limit = &Memory::Object_at(fp()); v->VisitPointers(fixed_base, fixed_limit); +} - // Visit the return address in the callee and incoming arguments. - IteratePc(v, pc_address(), code); + +void StubFrame::Iterate(ObjectVisitor* v) const { + IterateCompiledFrame(v); +} + + +Code* StubFrame::unchecked_code() const { + return static_cast<Code*>(isolate()->heap()->FindCodeObject(pc())); +} + + +Address StubFrame::GetCallerStackPointer() const { + return fp() + ExitFrameConstants::kCallerSPDisplacement; +} + + +int StubFrame::GetNumberOfIncomingArguments() const { + return 0; +} + + +void OptimizedFrame::Iterate(ObjectVisitor* v) const { +#ifdef DEBUG + // Make sure that optimized frames do not contain any stack handlers. + StackHandlerIterator it(this, top_handler()); + ASSERT(it.done()); +#endif + + IterateCompiledFrame(v); +} + + +void JavaScriptFrame::SetParameterValue(int index, Object* value) const { + Memory::Object_at(GetParameterSlot(index)) = value; } @@ -751,13 +771,14 @@ void JavaScriptFrame::Summarize(List<FrameSummary>* functions) { } -void JavaScriptFrame::PrintTop(FILE* file, +void JavaScriptFrame::PrintTop(Isolate* isolate, + FILE* file, bool print_args, bool print_line_number) { // constructor calls - HandleScope scope; + HandleScope scope(isolate); AssertNoAllocation no_allocation; - JavaScriptFrameIterator it; + JavaScriptFrameIterator it(isolate); while (!it.done()) { if (it.frame()->is_java_script()) { JavaScriptFrame* frame = it.frame(); @@ -1052,7 +1073,7 @@ void StackFrame::PrintIndex(StringStream* accumulator, void JavaScriptFrame::Print(StringStream* accumulator, PrintMode mode, int index) const { - HandleScope scope; + HandleScope scope(isolate()); Object* receiver = this->receiver(); Object* function = this->function(); @@ -1066,7 +1087,7 @@ void JavaScriptFrame::Print(StringStream* accumulator, // doesn't contain scope info, scope_info will return 0 for the number of // parameters, stack local variables, context local variables, stack slots, // or context slots. - Handle<ScopeInfo> scope_info(ScopeInfo::Empty()); + Handle<ScopeInfo> scope_info(ScopeInfo::Empty(isolate())); if (function->IsJSFunction()) { Handle<SharedFunctionInfo> shared(JSFunction::cast(function)->shared()); @@ -1271,6 +1292,42 @@ void InternalFrame::Iterate(ObjectVisitor* v) const { } +void StubFailureTrampolineFrame::Iterate(ObjectVisitor* v) const { + Object** base = &Memory::Object_at(sp()); + Object** limit = &Memory::Object_at(fp() + + kFirstRegisterParameterFrameOffset); + v->VisitPointers(base, limit); + base = &Memory::Object_at(fp() + StandardFrameConstants::kMarkerOffset); + const int offset = StandardFrameConstants::kContextOffset; + limit = &Memory::Object_at(fp() + offset) + 1; + v->VisitPointers(base, limit); + IteratePc(v, pc_address(), LookupCode()); +} + + +Address StubFailureTrampolineFrame::GetCallerStackPointer() const { + return fp() + StandardFrameConstants::kCallerSPOffset; +} + + +Code* StubFailureTrampolineFrame::unchecked_code() const { + int i = 0; + for (; i <= StubFailureTrampolineStub::kMaxExtraExpressionStackCount; ++i) { + Code* trampoline; + StubFailureTrampolineStub(i).FindCodeInCache(&trampoline, isolate()); + ASSERT(trampoline != NULL); + Address current_pc = pc(); + Address code_start = trampoline->instruction_start(); + Address code_end = code_start + trampoline->instruction_size(); + if (code_start <= current_pc && current_pc < code_end) { + return trampoline; + } + } + UNREACHABLE(); + return NULL; +} + + // ------------------------------------------------------------------------- @@ -1432,9 +1489,9 @@ static StackFrame* AllocateFrameCopy(StackFrame* frame, Zone* zone) { return NULL; } -Vector<StackFrame*> CreateStackMap(Zone* zone) { +Vector<StackFrame*> CreateStackMap(Isolate* isolate, Zone* zone) { ZoneList<StackFrame*> list(10, zone); - for (StackFrameIterator it; !it.done(); it.Advance()) { + for (StackFrameIterator it(isolate); !it.done(); it.Advance()) { StackFrame* frame = AllocateFrameCopy(it.frame(), zone); list.Add(frame, zone); } diff --git a/deps/v8/src/frames.h b/deps/v8/src/frames.h index 30f7e1f00e..ca0d5bec9b 100644 --- a/deps/v8/src/frames.h +++ b/deps/v8/src/frames.h @@ -130,15 +130,33 @@ class StackHandler BASE_EMBEDDED { }; -#define STACK_FRAME_TYPE_LIST(V) \ - V(ENTRY, EntryFrame) \ - V(ENTRY_CONSTRUCT, EntryConstructFrame) \ - V(EXIT, ExitFrame) \ - V(JAVA_SCRIPT, JavaScriptFrame) \ - V(OPTIMIZED, OptimizedFrame) \ - V(INTERNAL, InternalFrame) \ - V(CONSTRUCT, ConstructFrame) \ - V(ARGUMENTS_ADAPTOR, ArgumentsAdaptorFrame) +#define STACK_FRAME_TYPE_LIST(V) \ + V(ENTRY, EntryFrame) \ + V(ENTRY_CONSTRUCT, EntryConstructFrame) \ + V(EXIT, ExitFrame) \ + V(JAVA_SCRIPT, JavaScriptFrame) \ + V(OPTIMIZED, OptimizedFrame) \ + V(STUB, StubFrame) \ + V(STUB_FAILURE_TRAMPOLINE, StubFailureTrampolineFrame) \ + V(INTERNAL, InternalFrame) \ + V(CONSTRUCT, ConstructFrame) \ + V(ARGUMENTS_ADAPTOR, ArgumentsAdaptorFrame) + + +class StandardFrameConstants : public AllStatic { + public: + // Fixed part of the frame consists of return address, caller fp, + // context and function. + // StandardFrame::IterateExpressions assumes that kContextOffset is the last + // object pointer. + static const int kFixedFrameSize = 4 * kPointerSize; + static const int kExpressionsOffset = -3 * kPointerSize; + static const int kMarkerOffset = -2 * kPointerSize; + static const int kContextOffset = -1 * kPointerSize; + static const int kCallerFPOffset = 0 * kPointerSize; + static const int kCallerPCOffset = +1 * kPointerSize; + static const int kCallerSPOffset = +2 * kPointerSize; +}; // Abstract base class for all stack frames. @@ -193,6 +211,9 @@ class StackFrame BASE_EMBEDDED { bool is_optimized() const { return type() == OPTIMIZED; } bool is_arguments_adaptor() const { return type() == ARGUMENTS_ADAPTOR; } bool is_internal() const { return type() == INTERNAL; } + bool is_stub_failure_trampoline() const { + return type() == STUB_FAILURE_TRAMPOLINE; + } bool is_construct() const { return type() == CONSTRUCT; } virtual bool is_standard() const { return false; } @@ -263,12 +284,12 @@ class StackFrame BASE_EMBEDDED { PrintMode mode, int index) const { } + Isolate* isolate() const { return isolate_; } + protected: inline explicit StackFrame(StackFrameIterator* iterator); virtual ~StackFrame() { } - Isolate* isolate() const { return isolate_; } - // Compute the stack pointer for the calling frame. virtual Address GetCallerStackPointer() const = 0; @@ -448,6 +469,9 @@ class StandardFrame: public StackFrame { // construct frame. static inline bool IsConstructFrame(Address fp); + // Used by OptimizedFrames and StubFrames. + void IterateCompiledFrame(ObjectVisitor* v) const; + private: friend class StackFrame; friend class StackFrameIterator; @@ -461,7 +485,7 @@ class FrameSummary BASE_EMBEDDED { Code* code, int offset, bool is_constructor) - : receiver_(receiver), + : receiver_(receiver, function->GetIsolate()), function_(function), code_(code), offset_(offset), @@ -500,6 +524,9 @@ class JavaScriptFrame: public StandardFrame { return GetNumberOfIncomingArguments(); } + // Debugger access. + void SetParameterValue(int index, Object* value) const; + // Check if this frame is a constructor frame invoked through 'new'. bool IsConstructor() const; @@ -534,7 +561,10 @@ class JavaScriptFrame: public StandardFrame { return static_cast<JavaScriptFrame*>(frame); } - static void PrintTop(FILE* file, bool print_args, bool print_line_number); + static void PrintTop(Isolate* isolate, + FILE* file, + bool print_args, + bool print_line_number); protected: inline explicit JavaScriptFrame(StackFrameIterator* iterator); @@ -555,6 +585,27 @@ class JavaScriptFrame: public StandardFrame { }; +class StubFrame : public StandardFrame { + public: + virtual Type type() const { return STUB; } + + // GC support. + virtual void Iterate(ObjectVisitor* v) const; + + // Determine the code for the frame. + virtual Code* unchecked_code() const; + + protected: + inline explicit StubFrame(StackFrameIterator* iterator); + + virtual Address GetCallerStackPointer() const; + + virtual int GetNumberOfIncomingArguments() const; + + friend class StackFrameIterator; +}; + + class OptimizedFrame : public JavaScriptFrame { public: virtual Type type() const { return OPTIMIZED; } @@ -640,6 +691,39 @@ class InternalFrame: public StandardFrame { }; +class StubFailureTrampolineFrame: public StandardFrame { + public: + // sizeof(Arguments) - sizeof(Arguments*) is 3 * kPointerSize), but the + // presubmit script complains about using sizeof() on a type. + static const int kFirstRegisterParameterFrameOffset = + StandardFrameConstants::kMarkerOffset - 3 * kPointerSize; + + static const int kCallerStackParameterCountFrameOffset = + StandardFrameConstants::kMarkerOffset - 2 * kPointerSize; + + virtual Type type() const { return STUB_FAILURE_TRAMPOLINE; } + + // Get the code associated with this frame. + // This method could be called during marking phase of GC. + virtual Code* unchecked_code() const; + + virtual void Iterate(ObjectVisitor* v) const; + + // Architecture-specific register description. + static Register fp_register(); + static Register context_register(); + + protected: + inline explicit StubFailureTrampolineFrame( + StackFrameIterator* iterator); + + virtual Address GetCallerStackPointer() const; + + private: + friend class StackFrameIterator; +}; + + // Construct frames are special trampoline frames introduced to handle // function invocations through 'new'. class ConstructFrame: public InternalFrame { @@ -661,10 +745,6 @@ class ConstructFrame: public InternalFrame { class StackFrameIterator BASE_EMBEDDED { public: - // An iterator that iterates over the current thread's stack, - // and uses current isolate. - StackFrameIterator(); - // An iterator that iterates over the isolate's current thread's stack, explicit StackFrameIterator(Isolate* isolate); @@ -724,8 +804,6 @@ class StackFrameIterator BASE_EMBEDDED { template<typename Iterator> class JavaScriptFrameIteratorTemp BASE_EMBEDDED { public: - JavaScriptFrameIteratorTemp() { if (!done()) Advance(); } - inline explicit JavaScriptFrameIteratorTemp(Isolate* isolate); inline JavaScriptFrameIteratorTemp(Isolate* isolate, ThreadLocalTop* top); @@ -884,6 +962,8 @@ class SafeStackTraceFrameIterator: public SafeJavaScriptFrameIterator { class StackFrameLocator BASE_EMBEDDED { public: + explicit StackFrameLocator(Isolate* isolate) : iterator_(isolate) {} + // Find the nth JavaScript frame on the stack. The caller must // guarantee that such a frame exists. JavaScriptFrame* FindJavaScriptFrame(int n); @@ -895,7 +975,7 @@ class StackFrameLocator BASE_EMBEDDED { // Reads all frames on the current stack and copies them into the current // zone memory. -Vector<StackFrame*> CreateStackMap(Zone* zone); +Vector<StackFrame*> CreateStackMap(Isolate* isolate, Zone* zone); } } // namespace v8::internal diff --git a/deps/v8/src/full-codegen.cc b/deps/v8/src/full-codegen.cc index 9592e0afa2..cb6f228a17 100644 --- a/deps/v8/src/full-codegen.cc +++ b/deps/v8/src/full-codegen.cc @@ -86,6 +86,10 @@ void BreakableStatementChecker::VisitModuleUrl(ModuleUrl* module) { } +void BreakableStatementChecker::VisitModuleStatement(ModuleStatement* stmt) { +} + + void BreakableStatementChecker::VisitBlock(Block* stmt) { } @@ -303,6 +307,8 @@ bool FullCodeGenerator::MakeCode(CompilationInfo* info) { #ifdef ENABLE_GDB_JIT_INTERFACE masm.positions_recorder()->StartGDBJITLineInfoRecording(); #endif + LOG_CODE_EVENT(isolate, + CodeStartLinePosInfoRecordEvent(masm.positions_recorder())); FullCodeGenerator cgen(&masm, info); cgen.Generate(); @@ -330,6 +336,7 @@ bool FullCodeGenerator::MakeCode(CompilationInfo* info) { code->set_allow_osr_at_loop_nesting_level(0); code->set_profiler_ticks(0); code->set_stack_check_table_offset(table_offset); + code->set_stack_check_patched_for_osr(false); CodeGenerator::PrintCode(code, info); info->SetCode(code); // May be an empty handle. #ifdef ENABLE_GDB_JIT_INTERFACE @@ -340,6 +347,11 @@ bool FullCodeGenerator::MakeCode(CompilationInfo* info) { GDBJIT(RegisterDetailedLineInfo(*code, lineinfo)); } #endif + if (!code.is_null()) { + void* line_info = + masm.positions_recorder()->DetachJITHandlerData(); + LOG_CODE_EVENT(isolate, CodeEndLinePosInfoRecordEvent(*code, line_info)); + } return !code.is_null(); } @@ -394,6 +406,7 @@ void FullCodeGenerator::Initialize() { !Snapshot::HaveASnapshotToStartFrom(); masm_->set_emit_debug_code(generate_debug_code_); masm_->set_predictable_code_size(true); + InitializeAstVisitor(); } @@ -443,18 +456,8 @@ void FullCodeGenerator::PrepareForBailoutForId(BailoutId id, State state) { StateField::encode(state) | PcField::encode(masm_->pc_offset()); ASSERT(Smi::IsValid(pc_and_state)); BailoutEntry entry = { id, pc_and_state }; -#ifdef DEBUG - if (FLAG_enable_slow_asserts) { - // Assert that we don't have multiple bailout entries for the same node. - for (int i = 0; i < bailout_entries_.length(); i++) { - if (bailout_entries_.at(i).id == entry.id) { - AstPrinter printer; - PrintF("%s", printer.PrintProgram(info_->function())); - UNREACHABLE(); - } - } - } -#endif // DEBUG + ASSERT(!prepared_bailout_ids_.Contains(id.ToInt())); + prepared_bailout_ids_.Add(id.ToInt(), zone()); bailout_entries_.Add(entry, zone()); } @@ -466,9 +469,8 @@ void FullCodeGenerator::RecordTypeFeedbackCell( } -void FullCodeGenerator::RecordStackCheck(BailoutId ast_id) { - // The pc offset does not need to be encoded and packed together with a - // state. +void FullCodeGenerator::RecordBackEdge(BailoutId ast_id) { + // The pc offset does not need to be encoded and packed together with a state. ASSERT(masm_->pc_offset() > 0); BailoutEntry entry = { ast_id, static_cast<unsigned>(masm_->pc_offset()) }; stack_checks_.Add(entry, zone()); @@ -582,16 +584,137 @@ void FullCodeGenerator::DoTest(const TestContext* context) { } +void FullCodeGenerator::AllocateModules(ZoneList<Declaration*>* declarations) { + ASSERT(scope_->is_global_scope()); + + for (int i = 0; i < declarations->length(); i++) { + ModuleDeclaration* declaration = declarations->at(i)->AsModuleDeclaration(); + if (declaration != NULL) { + ModuleLiteral* module = declaration->module()->AsModuleLiteral(); + if (module != NULL) { + Comment cmnt(masm_, "[ Link nested modules"); + Scope* scope = module->body()->scope(); + Interface* interface = scope->interface(); + ASSERT(interface->IsModule() && interface->IsFrozen()); + + interface->Allocate(scope->module_var()->index()); + + // Set up module context. + ASSERT(scope->interface()->Index() >= 0); + __ Push(Smi::FromInt(scope->interface()->Index())); + __ Push(scope->GetScopeInfo()); + __ CallRuntime(Runtime::kPushModuleContext, 2); + StoreToFrameField(StandardFrameConstants::kContextOffset, + context_register()); + + AllocateModules(scope->declarations()); + + // Pop module context. + LoadContextField(context_register(), Context::PREVIOUS_INDEX); + // Update local stack frame context field. + StoreToFrameField(StandardFrameConstants::kContextOffset, + context_register()); + } + } + } +} + + +// Modules have their own local scope, represented by their own context. +// Module instance objects have an accessor for every export that forwards +// access to the respective slot from the module's context. (Exports that are +// modules themselves, however, are simple data properties.) +// +// All modules have a _hosting_ scope/context, which (currently) is the +// (innermost) enclosing global scope. To deal with recursion, nested modules +// are hosted by the same scope as global ones. +// +// For every (global or nested) module literal, the hosting context has an +// internal slot that points directly to the respective module context. This +// enables quick access to (statically resolved) module members by 2-dimensional +// access through the hosting context. For example, +// +// module A { +// let x; +// module B { let y; } +// } +// module C { let z; } +// +// allocates contexts as follows: +// +// [header| .A | .B | .C | A | C ] (global) +// | | | +// | | +-- [header| z ] (module) +// | | +// | +------- [header| y ] (module) +// | +// +------------ [header| x | B ] (module) +// +// Here, .A, .B, .C are the internal slots pointing to the hosted module +// contexts, whereas A, B, C hold the actual instance objects (note that every +// module context also points to the respective instance object through its +// extension slot in the header). +// +// To deal with arbitrary recursion and aliases between modules, +// they are created and initialized in several stages. Each stage applies to +// all modules in the hosting global scope, including nested ones. +// +// 1. Allocate: for each module _literal_, allocate the module contexts and +// respective instance object and wire them up. This happens in the +// PushModuleContext runtime function, as generated by AllocateModules +// (invoked by VisitDeclarations in the hosting scope). +// +// 2. Bind: for each module _declaration_ (i.e. literals as well as aliases), +// assign the respective instance object to respective local variables. This +// happens in VisitModuleDeclaration, and uses the instance objects created +// in the previous stage. +// For each module _literal_, this phase also constructs a module descriptor +// for the next stage. This happens in VisitModuleLiteral. +// +// 3. Populate: invoke the DeclareModules runtime function to populate each +// _instance_ object with accessors for it exports. This is generated by +// DeclareModules (invoked by VisitDeclarations in the hosting scope again), +// and uses the descriptors generated in the previous stage. +// +// 4. Initialize: execute the module bodies (and other code) in sequence. This +// happens by the separate statements generated for module bodies. To reenter +// the module scopes properly, the parser inserted ModuleStatements. + void FullCodeGenerator::VisitDeclarations( ZoneList<Declaration*>* declarations) { + Handle<FixedArray> saved_modules = modules_; + int saved_module_index = module_index_; ZoneList<Handle<Object> >* saved_globals = globals_; ZoneList<Handle<Object> > inner_globals(10, zone()); globals_ = &inner_globals; + if (scope_->num_modules() != 0) { + // This is a scope hosting modules. Allocate a descriptor array to pass + // to the runtime for initialization. + Comment cmnt(masm_, "[ Allocate modules"); + ASSERT(scope_->is_global_scope()); + modules_ = + isolate()->factory()->NewFixedArray(scope_->num_modules(), TENURED); + module_index_ = 0; + + // Generate code for allocating all modules, including nested ones. + // The allocated contexts are stored in internal variables in this scope. + AllocateModules(declarations); + } + AstVisitor::VisitDeclarations(declarations); + + if (scope_->num_modules() != 0) { + // Initialize modules from descriptor array. + ASSERT(module_index_ == modules_->length()); + DeclareModules(modules_); + modules_ = saved_modules; + module_index_ = saved_module_index; + } + if (!globals_->is_empty()) { // Invoke the platform-dependent code generator to do the actual - // declaration the global functions and variables. + // declaration of the global functions and variables. Handle<FixedArray> array = isolate()->factory()->NewFixedArray(globals_->length(), TENURED); for (int i = 0; i < globals_->length(); ++i) @@ -604,19 +727,23 @@ void FullCodeGenerator::VisitDeclarations( void FullCodeGenerator::VisitModuleLiteral(ModuleLiteral* module) { - // Allocate a module context statically. Block* block = module->body(); Scope* saved_scope = scope(); scope_ = block->scope(); - Interface* interface = module->interface(); - Handle<JSModule> instance = interface->Instance(); + Interface* interface = scope_->interface(); Comment cmnt(masm_, "[ ModuleLiteral"); SetStatementPosition(block); + ASSERT(!modules_.is_null()); + ASSERT(module_index_ < modules_->length()); + int index = module_index_++; + // Set up module context. - __ Push(instance); - __ CallRuntime(Runtime::kPushModuleContext, 1); + ASSERT(interface->Index() >= 0); + __ Push(Smi::FromInt(interface->Index())); + __ Push(Smi::FromInt(0)); + __ CallRuntime(Runtime::kPushModuleContext, 2); StoreToFrameField(StandardFrameConstants::kContextOffset, context_register()); { @@ -624,6 +751,11 @@ void FullCodeGenerator::VisitModuleLiteral(ModuleLiteral* module) { VisitDeclarations(scope_->declarations()); } + // Populate the module description. + Handle<ModuleInfo> description = + ModuleInfo::Create(isolate(), interface, scope_); + modules_->set(index, *description); + scope_ = saved_scope; // Pop module context. LoadContextField(context_register(), Context::PREVIOUS_INDEX); @@ -644,8 +776,20 @@ void FullCodeGenerator::VisitModulePath(ModulePath* module) { } -void FullCodeGenerator::VisitModuleUrl(ModuleUrl* decl) { - // TODO(rossberg) +void FullCodeGenerator::VisitModuleUrl(ModuleUrl* module) { + // TODO(rossberg): dummy allocation for now. + Scope* scope = module->body()->scope(); + Interface* interface = scope_->interface(); + + ASSERT(interface->IsModule() && interface->IsFrozen()); + ASSERT(!modules_.is_null()); + ASSERT(module_index_ < modules_->length()); + interface->Allocate(scope->module_var()->index()); + int index = module_index_++; + + Handle<ModuleInfo> description = + ModuleInfo::Create(isolate(), interface, scope_); + modules_->set(index, *description); } @@ -904,37 +1048,28 @@ void FullCodeGenerator::VisitBlock(Block* stmt) { // Push a block context when entering a block with block scoped variables. if (stmt->scope() != NULL) { scope_ = stmt->scope(); - if (scope_->is_module_scope()) { - // If this block is a module body, then we have already allocated and - // initialized the declarations earlier. Just push the context. - ASSERT(!scope_->interface()->Instance().is_null()); - __ Push(scope_->interface()->Instance()); - __ CallRuntime(Runtime::kPushModuleContext, 1); - StoreToFrameField( - StandardFrameConstants::kContextOffset, context_register()); - } else { - { Comment cmnt(masm_, "[ Extend block context"); - Handle<ScopeInfo> scope_info = scope_->GetScopeInfo(); - int heap_slots = - scope_info->ContextLength() - Context::MIN_CONTEXT_SLOTS; - __ Push(scope_info); - PushFunctionArgumentForContextAllocation(); - if (heap_slots <= FastNewBlockContextStub::kMaximumSlots) { - FastNewBlockContextStub stub(heap_slots); - __ CallStub(&stub); - } else { - __ CallRuntime(Runtime::kPushBlockContext, 2); - } - - // Replace the context stored in the frame. - StoreToFrameField(StandardFrameConstants::kContextOffset, - context_register()); - } - { Comment cmnt(masm_, "[ Declarations"); - VisitDeclarations(scope_->declarations()); + ASSERT(!scope_->is_module_scope()); + { Comment cmnt(masm_, "[ Extend block context"); + Handle<ScopeInfo> scope_info = scope_->GetScopeInfo(); + int heap_slots = scope_info->ContextLength() - Context::MIN_CONTEXT_SLOTS; + __ Push(scope_info); + PushFunctionArgumentForContextAllocation(); + if (heap_slots <= FastNewBlockContextStub::kMaximumSlots) { + FastNewBlockContextStub stub(heap_slots); + __ CallStub(&stub); + } else { + __ CallRuntime(Runtime::kPushBlockContext, 2); } + + // Replace the context stored in the frame. + StoreToFrameField(StandardFrameConstants::kContextOffset, + context_register()); + } + { Comment cmnt(masm_, "[ Declarations"); + VisitDeclarations(scope_->declarations()); } } + PrepareForBailoutForId(stmt->EntryId(), NO_REGISTERS); VisitStatements(stmt->statements()); scope_ = saved_scope; @@ -951,6 +1086,26 @@ void FullCodeGenerator::VisitBlock(Block* stmt) { } +void FullCodeGenerator::VisitModuleStatement(ModuleStatement* stmt) { + Comment cmnt(masm_, "[ Module context"); + + __ Push(Smi::FromInt(stmt->proxy()->interface()->Index())); + __ Push(Smi::FromInt(0)); + __ CallRuntime(Runtime::kPushModuleContext, 2); + StoreToFrameField( + StandardFrameConstants::kContextOffset, context_register()); + + Scope* saved_scope = scope_; + scope_ = stmt->body()->scope(); + VisitStatements(stmt->body()->statements()); + scope_ = saved_scope; + LoadContextField(context_register(), Context::PREVIOUS_INDEX); + // Update local stack frame context field. + StoreToFrameField(StandardFrameConstants::kContextOffset, + context_register()); +} + + void FullCodeGenerator::VisitExpressionStatement(ExpressionStatement* stmt) { Comment cmnt(masm_, "[ ExpressionStatement"); SetStatementPosition(stmt); @@ -1111,7 +1266,7 @@ void FullCodeGenerator::VisitDoWhileStatement(DoWhileStatement* stmt) { // Check stack before looping. PrepareForBailoutForId(stmt->BackEdgeId(), NO_REGISTERS); __ bind(&stack_check); - EmitStackCheck(stmt, &body); + EmitBackEdgeBookkeeping(stmt, &body); __ jmp(&body); PrepareForBailoutForId(stmt->ExitId(), NO_REGISTERS); @@ -1140,7 +1295,7 @@ void FullCodeGenerator::VisitWhileStatement(WhileStatement* stmt) { SetStatementPosition(stmt); // Check stack before looping. - EmitStackCheck(stmt, &body); + EmitBackEdgeBookkeeping(stmt, &body); __ bind(&test); VisitForControl(stmt->cond(), @@ -1186,7 +1341,7 @@ void FullCodeGenerator::VisitForStatement(ForStatement* stmt) { SetStatementPosition(stmt); // Check stack before looping. - EmitStackCheck(stmt, &body); + EmitBackEdgeBookkeeping(stmt, &body); __ bind(&test); if (stmt->cond() != NULL) { diff --git a/deps/v8/src/full-codegen.h b/deps/v8/src/full-codegen.h index 89b51f9582..c4f3fcc722 100644 --- a/deps/v8/src/full-codegen.h +++ b/deps/v8/src/full-codegen.h @@ -35,6 +35,7 @@ #include "code-stubs.h" #include "codegen.h" #include "compiler.h" +#include "data-flow.h" namespace v8 { namespace internal { @@ -48,7 +49,9 @@ class JumpPatchSite; // debugger to piggybag on. class BreakableStatementChecker: public AstVisitor { public: - BreakableStatementChecker() : is_breakable_(false) {} + BreakableStatementChecker() : is_breakable_(false) { + InitializeAstVisitor(); + } void Check(Statement* stmt); void Check(Expression* stmt); @@ -63,6 +66,7 @@ class BreakableStatementChecker: public AstVisitor { bool is_breakable_; + DEFINE_AST_VISITOR_SUBCLASS_MEMBERS(); DISALLOW_COPY_AND_ASSIGN(BreakableStatementChecker); }; @@ -396,9 +400,15 @@ class FullCodeGenerator: public AstVisitor { void VisitInDuplicateContext(Expression* expr); void VisitDeclarations(ZoneList<Declaration*>* declarations); + void DeclareModules(Handle<FixedArray> descriptions); void DeclareGlobals(Handle<FixedArray> pairs); int DeclareGlobalsFlags(); + // Generate code to allocate all (including nested) modules and contexts. + // Because of recursive linking and the presence of module alias declarations, + // this has to be a separate pass _before_ populating or executing any module. + void AllocateModules(ZoneList<Declaration*>* declarations); + // Try to perform a comparison as a fast inlined literal compare if // the operands allow it. Returns true if the compare operations // has been matched and all code generated; false otherwise. @@ -442,14 +452,13 @@ class FullCodeGenerator: public AstVisitor { // neither a with nor a catch context. void EmitDebugCheckDeclarationContext(Variable* variable); - // Platform-specific code for checking the stack limit at the back edge of - // a loop. // This is meant to be called at loop back edges, |back_edge_target| is // the jump target of the back edge and is used to approximate the amount // of code inside the loop. - void EmitStackCheck(IterationStatement* stmt, Label* back_edge_target); - // Record the OSR AST id corresponding to a stack check in the code. - void RecordStackCheck(BailoutId osr_ast_id); + void EmitBackEdgeBookkeeping(IterationStatement* stmt, + Label* back_edge_target); + // Record the OSR AST id corresponding to a back edge in the code. + void RecordBackEdge(BailoutId osr_ast_id); // Emit a table of stack check ids and pcs into the code stream. Return // the offset of the start of the table. unsigned EmitStackCheckTable(); @@ -804,8 +813,13 @@ class FullCodeGenerator: public AstVisitor { NestedStatement* nesting_stack_; int loop_depth_; ZoneList<Handle<Object> >* globals_; + Handle<FixedArray> modules_; + int module_index_; const ExpressionContext* context_; ZoneList<BailoutEntry> bailout_entries_; + GrowableBitVector prepared_bailout_ids_; + // TODO(svenpanne) Rename this to something like back_edges_ and rename + // related functions accordingly. ZoneList<BailoutEntry> stack_checks_; ZoneList<TypeFeedbackCellEntry> type_feedback_cells_; int ic_total_count_; @@ -816,6 +830,7 @@ class FullCodeGenerator: public AstVisitor { friend class NestedStatement; + DEFINE_AST_VISITOR_SUBCLASS_MEMBERS(); DISALLOW_COPY_AND_ASSIGN(FullCodeGenerator); }; diff --git a/deps/v8/src/func-name-inferrer.cc b/deps/v8/src/func-name-inferrer.cc index 2dd0bbc15d..84d3bf06b8 100644 --- a/deps/v8/src/func-name-inferrer.cc +++ b/deps/v8/src/func-name-inferrer.cc @@ -55,14 +55,14 @@ void FuncNameInferrer::PushEnclosingName(Handle<String> name) { void FuncNameInferrer::PushLiteralName(Handle<String> name) { - if (IsOpen() && !isolate()->heap()->prototype_symbol()->Equals(*name)) { + if (IsOpen() && !isolate()->heap()->prototype_string()->Equals(*name)) { names_stack_.Add(Name(name, kLiteralName), zone()); } } void FuncNameInferrer::PushVariableName(Handle<String> name) { - if (IsOpen() && !isolate()->heap()->result_symbol()->Equals(*name)) { + if (IsOpen() && !isolate()->heap()->result_string()->Equals(*name)) { names_stack_.Add(Name(name, kVariableName), zone()); } } @@ -85,7 +85,7 @@ Handle<String> FuncNameInferrer::MakeNameFromStackHelper(int pos, if (prev->length() > 0) { Factory* factory = isolate()->factory(); Handle<String> curr = factory->NewConsString( - factory->dot_symbol(), names_stack_.at(pos).name); + factory->dot_string(), names_stack_.at(pos).name); return MakeNameFromStackHelper(pos + 1, factory->NewConsString(prev, curr)); } else { diff --git a/deps/v8/src/global-handles.cc b/deps/v8/src/global-handles.cc index c09ba4b476..14c00f1997 100644 --- a/deps/v8/src/global-handles.cc +++ b/deps/v8/src/global-handles.cc @@ -46,7 +46,7 @@ class GlobalHandles::Node { // State transition diagram: // FREE -> NORMAL <-> WEAK -> PENDING -> NEAR_DEATH -> { NORMAL, WEAK, FREE } enum State { - FREE, + FREE = 0, NORMAL, // Normal global handle. WEAK, // Flagged as weak but not yet finalized. PENDING, // Has been recognized as only reachable by weak handles. @@ -59,51 +59,67 @@ class GlobalHandles::Node { return reinterpret_cast<Node*>(location); } - Node() {} + Node() { + ASSERT(OFFSET_OF(Node, class_id_) == Internals::kNodeClassIdOffset); + ASSERT(OFFSET_OF(Node, flags_) == Internals::kNodeFlagsOffset); + STATIC_ASSERT(static_cast<int>(NodeState::kMask) == + Internals::kNodeStateMask); + STATIC_ASSERT(WEAK == Internals::kNodeStateIsWeakValue); + STATIC_ASSERT(NEAR_DEATH == Internals::kNodeStateIsNearDeathValue); + STATIC_ASSERT(static_cast<int>(IsIndependent::kShift) == + Internals::kNodeIsIndependentShift); + STATIC_ASSERT(static_cast<int>(IsPartiallyDependent::kShift) == + Internals::kNodeIsPartiallyDependentShift); + } #ifdef DEBUG ~Node() { // TODO(1428): if it's a weak handle we should have invoked its callback. // Zap the values for eager trapping. - object_ = NULL; + object_ = reinterpret_cast<Object*>(kGlobalHandleZapValue); class_id_ = v8::HeapProfiler::kPersistentHandleNoClassId; index_ = 0; - independent_ = false; - in_new_space_list_ = false; + set_independent(false); + set_partially_dependent(false); + set_in_new_space_list(false); parameter_or_next_free_.next_free = NULL; - callback_ = NULL; + near_death_callback_ = NULL; } #endif void Initialize(int index, Node** first_free) { index_ = static_cast<uint8_t>(index); ASSERT(static_cast<int>(index_) == index); - state_ = FREE; - in_new_space_list_ = false; + set_state(FREE); + set_in_new_space_list(false); parameter_or_next_free_.next_free = *first_free; *first_free = this; } void Acquire(Object* object, GlobalHandles* global_handles) { - ASSERT(state_ == FREE); + ASSERT(state() == FREE); object_ = object; class_id_ = v8::HeapProfiler::kPersistentHandleNoClassId; - independent_ = false; - state_ = NORMAL; + set_independent(false); + set_partially_dependent(false); + set_state(NORMAL); parameter_or_next_free_.parameter = NULL; - callback_ = NULL; + near_death_callback_ = NULL; IncreaseBlockUses(global_handles); } void Release(GlobalHandles* global_handles) { - ASSERT(state_ != FREE); - if (IsWeakRetainer()) { - global_handles->number_of_weak_handles_--; - if (object_->IsJSGlobalObject()) { - global_handles->number_of_global_object_weak_handles_--; - } - } - state_ = FREE; + ASSERT(state() != FREE); + set_state(FREE); + // TODO(mstarzinger): Put behind debug flag once embedders are stabilized. + object_ = reinterpret_cast<Object*>(kGlobalHandleZapValue); +#ifdef DEBUG + // Zap the values for eager trapping. + class_id_ = v8::HeapProfiler::kPersistentHandleNoClassId; + set_independent(false); + set_partially_dependent(false); + near_death_callback_ = NULL; +#endif parameter_or_next_free_.next_free = global_handles->first_free_; global_handles->first_free_ = this; DecreaseBlockUses(global_handles); @@ -118,106 +134,129 @@ class GlobalHandles::Node { bool has_wrapper_class_id() const { return class_id_ != v8::HeapProfiler::kPersistentHandleNoClassId; } + uint16_t wrapper_class_id() const { return class_id_; } - void set_wrapper_class_id(uint16_t class_id) { - class_id_ = class_id; + + // State and flag accessors. + + State state() const { + return NodeState::decode(flags_); + } + void set_state(State state) { + flags_ = NodeState::update(flags_, state); } - // State accessors. + bool is_independent() { + return IsIndependent::decode(flags_); + } + void set_independent(bool v) { + flags_ = IsIndependent::update(flags_, v); + } + + bool is_partially_dependent() { + return IsPartiallyDependent::decode(flags_); + } + void set_partially_dependent(bool v) { + flags_ = IsPartiallyDependent::update(flags_, v); + } - State state() const { return state_; } + bool is_in_new_space_list() { + return IsInNewSpaceList::decode(flags_); + } + void set_in_new_space_list(bool v) { + flags_ = IsInNewSpaceList::update(flags_, v); + } bool IsNearDeath() const { // Check for PENDING to ensure correct answer when processing callbacks. - return state_ == PENDING || state_ == NEAR_DEATH; + return state() == PENDING || state() == NEAR_DEATH; } - bool IsWeak() const { return state_ == WEAK; } + bool IsWeak() const { return state() == WEAK; } - bool IsRetainer() const { return state_ != FREE; } + bool IsRetainer() const { return state() != FREE; } - bool IsStrongRetainer() const { return state_ == NORMAL; } + bool IsStrongRetainer() const { return state() == NORMAL; } bool IsWeakRetainer() const { - return state_ == WEAK || state_ == PENDING || state_ == NEAR_DEATH; + return state() == WEAK || state() == PENDING || state() == NEAR_DEATH; } void MarkPending() { - ASSERT(state_ == WEAK); - state_ = PENDING; + ASSERT(state() == WEAK); + set_state(PENDING); } // Independent flag accessors. void MarkIndependent() { - ASSERT(state_ != FREE); - independent_ = true; + ASSERT(state() != FREE); + set_independent(true); } - bool is_independent() const { return independent_; } - // In-new-space-list flag accessors. - void set_in_new_space_list(bool v) { in_new_space_list_ = v; } - bool is_in_new_space_list() const { return in_new_space_list_; } + void MarkPartiallyDependent(GlobalHandles* global_handles) { + ASSERT(state() != FREE); + if (global_handles->isolate()->heap()->InNewSpace(object_)) { + set_partially_dependent(true); + } + } + void clear_partially_dependent() { set_partially_dependent(false); } // Callback accessor. - WeakReferenceCallback callback() { return callback_; } + // TODO(svenpanne) Re-enable or nuke later. + // WeakReferenceCallback callback() { return callback_; } // Callback parameter accessors. void set_parameter(void* parameter) { - ASSERT(state_ != FREE); + ASSERT(state() != FREE); parameter_or_next_free_.parameter = parameter; } void* parameter() const { - ASSERT(state_ != FREE); + ASSERT(state() != FREE); return parameter_or_next_free_.parameter; } // Accessors for next free node in the free list. Node* next_free() { - ASSERT(state_ == FREE); + ASSERT(state() == FREE); return parameter_or_next_free_.next_free; } void set_next_free(Node* value) { - ASSERT(state_ == FREE); + ASSERT(state() == FREE); parameter_or_next_free_.next_free = value; } void MakeWeak(GlobalHandles* global_handles, void* parameter, - WeakReferenceCallback callback) { - ASSERT(state_ != FREE); - if (!IsWeakRetainer()) { - global_handles->number_of_weak_handles_++; - if (object_->IsJSGlobalObject()) { - global_handles->number_of_global_object_weak_handles_++; - } - } - state_ = WEAK; + WeakReferenceCallback weak_reference_callback, + NearDeathCallback near_death_callback) { + ASSERT(state() != FREE); + set_state(WEAK); set_parameter(parameter); - callback_ = callback; + if (weak_reference_callback != NULL) { + flags_ = IsWeakCallback::update(flags_, true); + near_death_callback_ = + reinterpret_cast<NearDeathCallback>(weak_reference_callback); + } else { + flags_ = IsWeakCallback::update(flags_, false); + near_death_callback_ = near_death_callback; + } } void ClearWeakness(GlobalHandles* global_handles) { - ASSERT(state_ != FREE); - if (IsWeakRetainer()) { - global_handles->number_of_weak_handles_--; - if (object_->IsJSGlobalObject()) { - global_handles->number_of_global_object_weak_handles_--; - } - } - state_ = NORMAL; + ASSERT(state() != FREE); + set_state(NORMAL); set_parameter(NULL); } bool PostGarbageCollectionProcessing(Isolate* isolate, GlobalHandles* global_handles) { - if (state_ != Node::PENDING) return false; - WeakReferenceCallback func = callback(); - if (func == NULL) { + if (state() != Node::PENDING) return false; + if (near_death_callback_ == NULL) { Release(global_handles); return false; } void* par = parameter(); - state_ = NEAR_DEATH; + set_state(NEAR_DEATH); set_parameter(NULL); v8::Persistent<v8::Object> object = ToApi<v8::Object>(handle()); @@ -230,11 +269,21 @@ class GlobalHandles::Node { ExternalTwoByteString::cast(object_)->resource() != NULL); // Leaving V8. VMState state(isolate, EXTERNAL); - func(object, par); + if (near_death_callback_ != NULL) { + if (IsWeakCallback::decode(flags_)) { + WeakReferenceCallback callback = + reinterpret_cast<WeakReferenceCallback>(near_death_callback_); + callback(object, par); + } else { + near_death_callback_(reinterpret_cast<v8::Isolate*>(isolate), + object, + par); + } + } } // Absence of explicit cleanup or revival of weak handle // in most of the cases would lead to memory leak. - ASSERT(state_ != NEAR_DEATH); + ASSERT(state() != NEAR_DEATH); return true; } @@ -256,14 +305,18 @@ class GlobalHandles::Node { // Index in the containing handle block. uint8_t index_; - // Need one more bit for MSVC as it treats enums as signed. - State state_ : 4; + // This stores three flags (independent, partially_dependent and + // in_new_space_list) and a State. + class NodeState: public BitField<State, 0, 4> {}; + class IsIndependent: public BitField<bool, 4, 1> {}; + class IsPartiallyDependent: public BitField<bool, 5, 1> {}; + class IsInNewSpaceList: public BitField<bool, 6, 1> {}; + class IsWeakCallback: public BitField<bool, 7, 1> {}; - bool independent_ : 1; - bool in_new_space_list_ : 1; + uint8_t flags_; - // Handle specific callback. - WeakReferenceCallback callback_; + // Handle specific callback - might be a weak reference in disguise. + NearDeathCallback near_death_callback_; // Provided data for callback. In FREE state, this is used for // the free list link. @@ -382,8 +435,6 @@ class GlobalHandles::NodeIterator { GlobalHandles::GlobalHandles(Isolate* isolate) : isolate_(isolate), - number_of_weak_handles_(0), - number_of_global_object_weak_handles_(0), number_of_global_handles_(0), first_block_(NULL), first_used_block_(NULL), @@ -431,10 +482,15 @@ void GlobalHandles::Destroy(Object** location) { } -void GlobalHandles::MakeWeak(Object** location, void* parameter, - WeakReferenceCallback callback) { - ASSERT(callback != NULL); - Node::FromLocation(location)->MakeWeak(this, parameter, callback); +void GlobalHandles::MakeWeak(Object** location, + void* parameter, + WeakReferenceCallback weak_reference_callback, + NearDeathCallback near_death_callback) { + ASSERT(near_death_callback != NULL); + Node::FromLocation(location)->MakeWeak(this, + parameter, + weak_reference_callback, + near_death_callback); } @@ -448,6 +504,11 @@ void GlobalHandles::MarkIndependent(Object** location) { } +void GlobalHandles::MarkPartiallyDependent(Object** location) { + Node::FromLocation(location)->MarkPartiallyDependent(this); +} + + bool GlobalHandles::IsIndependent(Object** location) { return Node::FromLocation(location)->is_independent(); } @@ -463,14 +524,6 @@ bool GlobalHandles::IsWeak(Object** location) { } -void GlobalHandles::SetWrapperClassId(Object** location, uint16_t class_id) { - Node::FromLocation(location)->set_wrapper_class_id(class_id); -} - -uint16_t GlobalHandles::GetWrapperClassId(Object** location) { - return Node::FromLocation(location)->wrapper_class_id(); -} - void GlobalHandles::IterateWeakRoots(ObjectVisitor* v) { for (NodeIterator it(this); !it.done(); it.Advance()) { if (it.node()->IsWeakRetainer()) v->VisitPointer(it.node()->location()); @@ -478,16 +531,6 @@ void GlobalHandles::IterateWeakRoots(ObjectVisitor* v) { } -void GlobalHandles::IterateWeakRoots(WeakReferenceGuest f, - WeakReferenceCallback callback) { - for (NodeIterator it(this); !it.done(); it.Advance()) { - if (it.node()->IsWeak() && it.node()->callback() == callback) { - f(it.node()->object(), it.node()->parameter()); - } - } -} - - void GlobalHandles::IdentifyWeakHandles(WeakSlotCallback f) { for (NodeIterator it(this); !it.done(); it.Advance()) { if (it.node()->IsWeak() && f(it.node()->location())) { @@ -501,8 +544,9 @@ void GlobalHandles::IterateNewSpaceStrongAndDependentRoots(ObjectVisitor* v) { for (int i = 0; i < new_space_nodes_.length(); ++i) { Node* node = new_space_nodes_[i]; if (node->IsStrongRetainer() || - (node->IsWeakRetainer() && !node->is_independent())) { - v->VisitPointer(node->location()); + (node->IsWeakRetainer() && !node->is_independent() && + !node->is_partially_dependent())) { + v->VisitPointer(node->location()); } } } @@ -513,8 +557,8 @@ void GlobalHandles::IdentifyNewSpaceWeakIndependentHandles( for (int i = 0; i < new_space_nodes_.length(); ++i) { Node* node = new_space_nodes_[i]; ASSERT(node->is_in_new_space_list()); - if (node->is_independent() && node->IsWeak() && - f(isolate_->heap(), node->location())) { + if ((node->is_independent() || node->is_partially_dependent()) && + node->IsWeak() && f(isolate_->heap(), node->location())) { node->MarkPending(); } } @@ -525,15 +569,61 @@ void GlobalHandles::IterateNewSpaceWeakIndependentRoots(ObjectVisitor* v) { for (int i = 0; i < new_space_nodes_.length(); ++i) { Node* node = new_space_nodes_[i]; ASSERT(node->is_in_new_space_list()); - if (node->is_independent() && node->IsWeakRetainer()) { + if ((node->is_independent() || node->is_partially_dependent()) && + node->IsWeakRetainer()) { v->VisitPointer(node->location()); } } } +bool GlobalHandles::IterateObjectGroups(ObjectVisitor* v, + WeakSlotCallbackWithHeap can_skip) { + int last = 0; + bool any_group_was_visited = false; + for (int i = 0; i < object_groups_.length(); i++) { + ObjectGroup* entry = object_groups_.at(i); + ASSERT(entry != NULL); + + Object*** objects = entry->objects_; + bool group_should_be_visited = false; + for (size_t j = 0; j < entry->length_; j++) { + Object* object = *objects[j]; + if (object->IsHeapObject()) { + if (!can_skip(isolate_->heap(), &object)) { + group_should_be_visited = true; + break; + } + } + } + + if (!group_should_be_visited) { + object_groups_[last++] = entry; + continue; + } + + // An object in the group requires visiting, so iterate over all + // objects in the group. + for (size_t j = 0; j < entry->length_; ++j) { + Object* object = *objects[j]; + if (object->IsHeapObject()) { + v->VisitPointer(&object); + any_group_was_visited = true; + } + } + + // Once the entire group has been iterated over, set the object + // group to NULL so it won't be processed again. + entry->Dispose(); + object_groups_.at(i) = NULL; + } + object_groups_.Rewind(last); + return any_group_was_visited; +} + + bool GlobalHandles::PostGarbageCollectionProcessing( - GarbageCollector collector) { + GarbageCollector collector, GCTracer* tracer) { // Process weak global handle callbacks. This must be done after the // GC is completely done, because the callbacks may invoke arbitrary // API functions. @@ -547,7 +637,10 @@ bool GlobalHandles::PostGarbageCollectionProcessing( // Skip dependent handles. Their weak callbacks might expect to be // called between two global garbage collection callbacks which // are not called for minor collections. - if (!node->is_independent()) continue; + if (!node->is_independent() && !node->is_partially_dependent()) { + continue; + } + node->clear_partially_dependent(); if (node->PostGarbageCollectionProcessing(isolate_, this)) { if (initial_post_gc_processing_count != post_gc_processing_count_) { // Weak callback triggered another GC and another round of @@ -563,6 +656,7 @@ bool GlobalHandles::PostGarbageCollectionProcessing( } } else { for (NodeIterator it(this); !it.done(); it.Advance()) { + it.node()->clear_partially_dependent(); if (it.node()->PostGarbageCollectionProcessing(isolate_, this)) { if (initial_post_gc_processing_count != post_gc_processing_count_) { // See the comment above. @@ -579,10 +673,17 @@ bool GlobalHandles::PostGarbageCollectionProcessing( for (int i = 0; i < new_space_nodes_.length(); ++i) { Node* node = new_space_nodes_[i]; ASSERT(node->is_in_new_space_list()); - if (node->IsRetainer() && isolate_->heap()->InNewSpace(node->object())) { - new_space_nodes_[last++] = node; + if (node->IsRetainer()) { + if (isolate_->heap()->InNewSpace(node->object())) { + new_space_nodes_[last++] = node; + tracer->increment_nodes_copied_in_new_space(); + } else { + node->set_in_new_space_list(false); + tracer->increment_nodes_promoted(); + } } else { node->set_in_new_space_list(false); + tracer->increment_nodes_died_in_new_space(); } } new_space_nodes_.Rewind(last); @@ -610,7 +711,7 @@ void GlobalHandles::IterateAllRoots(ObjectVisitor* v) { void GlobalHandles::IterateAllRootsWithClassIds(ObjectVisitor* v) { for (NodeIterator it(this); !it.done(); it.Advance()) { - if (it.node()->has_wrapper_class_id() && it.node()->IsRetainer()) { + if (it.node()->IsRetainer() && it.node()->has_wrapper_class_id()) { v->VisitEmbedderReference(it.node()->location(), it.node()->wrapper_class_id()); } @@ -618,6 +719,40 @@ void GlobalHandles::IterateAllRootsWithClassIds(ObjectVisitor* v) { } +void GlobalHandles::IterateAllRootsInNewSpaceWithClassIds(ObjectVisitor* v) { + for (int i = 0; i < new_space_nodes_.length(); ++i) { + Node* node = new_space_nodes_[i]; + if (node->IsRetainer() && node->has_wrapper_class_id()) { + v->VisitEmbedderReference(node->location(), + node->wrapper_class_id()); + } + } +} + + +int GlobalHandles::NumberOfWeakHandles() { + int count = 0; + for (NodeIterator it(this); !it.done(); it.Advance()) { + if (it.node()->IsWeakRetainer()) { + count++; + } + } + return count; +} + + +int GlobalHandles::NumberOfGlobalObjectWeakHandles() { + int count = 0; + for (NodeIterator it(this); !it.done(); it.Advance()) { + if (it.node()->IsWeakRetainer() && + it.node()->object()->IsJSGlobalObject()) { + count++; + } + } + return count; +} + + void GlobalHandles::RecordStats(HeapStats* stats) { *stats->global_handle_count = 0; *stats->weak_global_handle_count = 0; diff --git a/deps/v8/src/global-handles.h b/deps/v8/src/global-handles.h index 866317ee13..990014467c 100644 --- a/deps/v8/src/global-handles.h +++ b/deps/v8/src/global-handles.h @@ -108,8 +108,6 @@ class ImplicitRefGroup { }; -typedef void (*WeakReferenceGuest)(Object* object, void* parameter); - class GlobalHandles { public: ~GlobalHandles(); @@ -128,21 +126,17 @@ class GlobalHandles { // reason is that Smi::FromInt(0) does not change during garage collection. void MakeWeak(Object** location, void* parameter, - WeakReferenceCallback callback); + WeakReferenceCallback weak_reference_callback, + NearDeathCallback near_death_callback); - static void SetWrapperClassId(Object** location, uint16_t class_id); - static uint16_t GetWrapperClassId(Object** location); + void RecordStats(HeapStats* stats); // Returns the current number of weak handles. - int NumberOfWeakHandles() { return number_of_weak_handles_; } - - void RecordStats(HeapStats* stats); + int NumberOfWeakHandles(); // Returns the current number of weak handles to global objects. // These handles are also included in NumberOfWeakHandles(). - int NumberOfGlobalObjectWeakHandles() { - return number_of_global_object_weak_handles_; - } + int NumberOfGlobalObjectWeakHandles(); // Returns the current number of handles to global objects. int NumberOfGlobalHandles() { @@ -155,6 +149,9 @@ class GlobalHandles { // Clear the weakness of a global handle. void MarkIndependent(Object** location); + // Mark the reference to this object externaly unreachable. + void MarkPartiallyDependent(Object** location); + static bool IsIndependent(Object** location); // Tells whether global handle is near death. @@ -165,7 +162,8 @@ class GlobalHandles { // Process pending weak handles. // Returns true if next major GC is likely to collect more garbage. - bool PostGarbageCollectionProcessing(GarbageCollector collector); + bool PostGarbageCollectionProcessing(GarbageCollector collector, + GCTracer* tracer); // Iterates over all strong handles. void IterateStrongRoots(ObjectVisitor* v); @@ -176,13 +174,13 @@ class GlobalHandles { // Iterates over all handles that have embedder-assigned class ID. void IterateAllRootsWithClassIds(ObjectVisitor* v); + // Iterates over all handles in the new space that have embedder-assigned + // class ID. + void IterateAllRootsInNewSpaceWithClassIds(ObjectVisitor* v); + // Iterates over all weak roots in heap. void IterateWeakRoots(ObjectVisitor* v); - // Iterates over weak roots that are bound to a given callback. - void IterateWeakRoots(WeakReferenceGuest f, - WeakReferenceCallback callback); - // Find all weak handles satisfying the callback predicate, mark // them as pending. void IdentifyWeakHandles(WeakSlotCallback f); @@ -195,16 +193,22 @@ class GlobalHandles { // Iterates over strong and dependent handles. See the node above. void IterateNewSpaceStrongAndDependentRoots(ObjectVisitor* v); - // Finds weak independent handles satisfying the callback predicate - // and marks them as pending. See the note above. + // Finds weak independent or partially independent handles satisfying + // the callback predicate and marks them as pending. See the note above. void IdentifyNewSpaceWeakIndependentHandles(WeakSlotCallbackWithHeap f); - // Iterates over weak independent handles. See the note above. + // Iterates over weak independent or partially independent handles. + // See the note above. void IterateNewSpaceWeakIndependentRoots(ObjectVisitor* v); + // Iterate over objects in object groups that have at least one object + // which requires visiting. The callback has to return true if objects + // can be skipped and false otherwise. + bool IterateObjectGroups(ObjectVisitor* v, WeakSlotCallbackWithHeap can_skip); + // Add an object group. // Should be only used in GC callback function before a collection. - // All groups are destroyed after a mark-compact collection. + // All groups are destroyed after a garbage collection. void AddObjectGroup(Object*** handles, size_t length, v8::RetainedObjectInfo* info); @@ -248,14 +252,6 @@ class GlobalHandles { Isolate* isolate_; - // Field always containing the number of weak and near-death handles. - int number_of_weak_handles_; - - // Field always containing the number of weak and near-death handles - // to global objects. These objects are also included in - // number_of_weak_handles_. - int number_of_global_object_weak_handles_; - // Field always containing the number of handles to global objects. int number_of_global_handles_; diff --git a/deps/v8/src/globals.h b/deps/v8/src/globals.h index babffbf659..1606996d20 100644 --- a/deps/v8/src/globals.h +++ b/deps/v8/src/globals.h @@ -253,15 +253,13 @@ const int kBinary32ExponentShift = 23; // other bits set. const uint64_t kQuietNaNMask = static_cast<uint64_t>(0xfff) << 51; -// ASCII/UTF-16 constants +// Latin1/UTF-16 constants // Code-point values in Unicode 4.0 are 21 bits wide. // Code units in UTF-16 are 16 bits wide. typedef uint16_t uc16; typedef int32_t uc32; -const int kASCIISize = kCharSize; +const int kOneByteSize = kCharSize; const int kUC16Size = sizeof(uc16); // NOLINT -const uc32 kMaxAsciiCharCode = 0x7f; -const uint32_t kMaxAsciiCharCodeU = 0x7fu; // The expression OFFSET_OF(type, field) computes the byte-offset diff --git a/deps/v8/src/handles-inl.h b/deps/v8/src/handles-inl.h index 130798647b..fd60a350d7 100644 --- a/deps/v8/src/handles-inl.h +++ b/deps/v8/src/handles-inl.h @@ -37,25 +37,33 @@ namespace v8 { namespace internal { -inline Isolate* GetIsolateForHandle(Object* obj) { - return Isolate::Current(); -} - -inline Isolate* GetIsolateForHandle(HeapObject* obj) { - return obj->GetIsolate(); -} - template<typename T> Handle<T>::Handle(T* obj) { ASSERT(!obj->IsFailure()); - location_ = HandleScope::CreateHandle(obj, GetIsolateForHandle(obj)); + location_ = HandleScope::CreateHandle(obj->GetIsolate(), obj); } template<typename T> Handle<T>::Handle(T* obj, Isolate* isolate) { ASSERT(!obj->IsFailure()); - location_ = HandleScope::CreateHandle(obj, isolate); + location_ = HandleScope::CreateHandle(isolate, obj); +} + + +template <typename T> +inline bool Handle<T>::is_identical_to(const Handle<T> other) const { + ASSERT(location_ == NULL || + reinterpret_cast<Address>(*location_) != kZapValue); +#ifdef DEBUG + if (FLAG_enable_slow_asserts) { + Isolate* isolate = Isolate::Current(); + CHECK(isolate->AllowHandleDereference() || + Heap::RelocationLock::IsLocked(isolate->heap()) || + !isolate->optimizing_compiler_thread()->IsOptimizerThread()); + } +#endif // DEBUG + return *location_ == *other.location_; } @@ -63,23 +71,20 @@ template <typename T> inline T* Handle<T>::operator*() const { ASSERT(location_ != NULL); ASSERT(reinterpret_cast<Address>(*location_) != kHandleZapValue); + SLOW_ASSERT(Isolate::Current()->AllowHandleDereference()); return *BitCast<T**>(location_); } - -HandleScope::HandleScope() { - Isolate* isolate = Isolate::Current(); - v8::ImplementationUtilities::HandleScopeData* current = - isolate->handle_scope_data(); - isolate_ = isolate; - prev_next_ = current->next; - prev_limit_ = current->limit; - current->level++; +template <typename T> +inline T** Handle<T>::location() const { + ASSERT(location_ == NULL || + reinterpret_cast<Address>(*location_) != kZapValue); + SLOW_ASSERT(Isolate::Current()->AllowHandleDereference()); + return location_; } HandleScope::HandleScope(Isolate* isolate) { - ASSERT(isolate == Isolate::Current()); v8::ImplementationUtilities::HandleScopeData* current = isolate->handle_scope_data(); isolate_ = isolate; @@ -94,7 +99,6 @@ HandleScope::~HandleScope() { } void HandleScope::CloseScope() { - ASSERT(isolate_ == Isolate::Current()); v8::ImplementationUtilities::HandleScopeData* current = isolate_->handle_scope_data(); current->next = prev_next_; @@ -118,7 +122,7 @@ Handle<T> HandleScope::CloseAndEscape(Handle<T> handle_value) { isolate_->handle_scope_data(); // Allocate one handle in the parent scope. ASSERT(current->level > 0); - Handle<T> result(CreateHandle<T>(value, isolate_)); + Handle<T> result(CreateHandle<T>(isolate_, value)); // Reinitialize the current scope (so that it's ready // to be used or closed again). prev_next_ = current->next; @@ -129,13 +133,12 @@ Handle<T> HandleScope::CloseAndEscape(Handle<T> handle_value) { template <typename T> -T** HandleScope::CreateHandle(T* value, Isolate* isolate) { - ASSERT(isolate == Isolate::Current()); +T** HandleScope::CreateHandle(Isolate* isolate, T* value) { v8::ImplementationUtilities::HandleScopeData* current = isolate->handle_scope_data(); internal::Object** cur = current->next; - if (cur == current->limit) cur = Extend(); + if (cur == current->limit) cur = Extend(isolate); // Update the current next field, set the value in the created // handle, and return the result. ASSERT(cur < current->limit); @@ -148,10 +151,10 @@ T** HandleScope::CreateHandle(T* value, Isolate* isolate) { #ifdef DEBUG -inline NoHandleAllocation::NoHandleAllocation() { - Isolate* isolate = Isolate::Current(); +inline NoHandleAllocation::NoHandleAllocation(Isolate* isolate) + : isolate_(isolate) { v8::ImplementationUtilities::HandleScopeData* current = - isolate->handle_scope_data(); + isolate_->handle_scope_data(); active_ = !isolate->optimizing_compiler_thread()->IsOptimizerThread(); if (active_) { @@ -170,14 +173,26 @@ inline NoHandleAllocation::~NoHandleAllocation() { // Restore state in current handle scope to re-enable handle // allocations. v8::ImplementationUtilities::HandleScopeData* data = - Isolate::Current()->handle_scope_data(); + isolate_->handle_scope_data(); ASSERT_EQ(0, data->level); data->level = level_; } } -#endif +HandleDereferenceGuard::HandleDereferenceGuard(Isolate* isolate, State state) + : isolate_(isolate) { + old_state_ = isolate_->AllowHandleDereference(); + isolate_->SetAllowHandleDereference(state == ALLOW); +} + + +HandleDereferenceGuard::~HandleDereferenceGuard() { + isolate_->SetAllowHandleDereference(old_state_); +} + +#endif + } } // namespace v8::internal #endif // V8_HANDLES_INL_H_ diff --git a/deps/v8/src/handles.cc b/deps/v8/src/handles.cc index 46399d65ea..2958d2cc0a 100644 --- a/deps/v8/src/handles.cc +++ b/deps/v8/src/handles.cc @@ -45,8 +45,7 @@ namespace v8 { namespace internal { -int HandleScope::NumberOfHandles() { - Isolate* isolate = Isolate::Current(); +int HandleScope::NumberOfHandles(Isolate* isolate) { HandleScopeImplementer* impl = isolate->handle_scope_implementer(); int n = impl->blocks()->length(); if (n == 0) return 0; @@ -55,8 +54,7 @@ int HandleScope::NumberOfHandles() { } -Object** HandleScope::Extend() { - Isolate* isolate = Isolate::Current(); +Object** HandleScope::Extend(Isolate* isolate) { v8::ImplementationUtilities::HandleScopeData* current = isolate->handle_scope_data(); @@ -97,7 +95,6 @@ Object** HandleScope::Extend() { void HandleScope::DeleteExtensions(Isolate* isolate) { - ASSERT(isolate == Isolate::Current()); v8::ImplementationUtilities::HandleScopeData* current = isolate->handle_scope_data(); isolate->handle_scope_implementer()->DeleteExtensions(current->limit); @@ -112,21 +109,18 @@ void HandleScope::ZapRange(Object** start, Object** end) { } -Address HandleScope::current_level_address() { - return reinterpret_cast<Address>( - &Isolate::Current()->handle_scope_data()->level); +Address HandleScope::current_level_address(Isolate* isolate) { + return reinterpret_cast<Address>(&isolate->handle_scope_data()->level); } -Address HandleScope::current_next_address() { - return reinterpret_cast<Address>( - &Isolate::Current()->handle_scope_data()->next); +Address HandleScope::current_next_address(Isolate* isolate) { + return reinterpret_cast<Address>(&isolate->handle_scope_data()->next); } -Address HandleScope::current_limit_address() { - return reinterpret_cast<Address>( - &Isolate::Current()->handle_scope_data()->limit); +Address HandleScope::current_limit_address(Isolate* isolate) { + return reinterpret_cast<Address>(&isolate->handle_scope_data()->limit); } @@ -229,12 +223,12 @@ Handle<Object> SetPrototype(Handle<JSFunction> function, } -Handle<Object> SetProperty(Handle<Object> object, +Handle<Object> SetProperty(Isolate* isolate, + Handle<Object> object, Handle<Object> key, Handle<Object> value, PropertyAttributes attributes, StrictModeFlag strict_mode) { - Isolate* isolate = Isolate::Current(); CALL_HEAP_FUNCTION( isolate, Runtime::SetObjectProperty( @@ -266,7 +260,7 @@ Handle<Object> ForceDeleteProperty(Handle<JSObject> object, Handle<Object> SetPropertyWithInterceptor(Handle<JSObject> object, - Handle<String> key, + Handle<Name> key, Handle<Object> value, PropertyAttributes attributes, StrictModeFlag strict_mode) { @@ -282,14 +276,14 @@ Handle<Object> SetPropertyWithInterceptor(Handle<JSObject> object, Handle<Object> GetProperty(Handle<JSReceiver> obj, const char* name) { Isolate* isolate = obj->GetIsolate(); - Handle<String> str = isolate->factory()->LookupAsciiSymbol(name); + Handle<String> str = isolate->factory()->InternalizeUtf8String(name); CALL_HEAP_FUNCTION(isolate, obj->GetProperty(*str), Object); } -Handle<Object> GetProperty(Handle<Object> obj, +Handle<Object> GetProperty(Isolate* isolate, + Handle<Object> obj, Handle<Object> key) { - Isolate* isolate = Isolate::Current(); CALL_HEAP_FUNCTION(isolate, Runtime::GetObjectProperty(isolate, obj, key), Object); } @@ -297,7 +291,7 @@ Handle<Object> GetProperty(Handle<Object> obj, Handle<Object> GetPropertyWithInterceptor(Handle<JSObject> receiver, Handle<JSObject> holder, - Handle<String> name, + Handle<Name> name, PropertyAttributes* attributes) { Isolate* isolate = receiver->GetIsolate(); CALL_HEAP_FUNCTION(isolate, @@ -315,8 +309,8 @@ Handle<Object> SetPrototype(Handle<JSObject> obj, Handle<Object> value) { } -Handle<Object> LookupSingleCharacterStringFromCode(uint32_t index) { - Isolate* isolate = Isolate::Current(); +Handle<Object> LookupSingleCharacterStringFromCode(Isolate* isolate, + uint32_t index) { CALL_HEAP_FUNCTION( isolate, isolate->heap()->LookupSingleCharacterStringFromCode(index), Object); @@ -350,14 +344,16 @@ Handle<Object> SetAccessor(Handle<JSObject> obj, Handle<AccessorInfo> info) { // collector will call the weak callback on the global handle // associated with the wrapper and get rid of both the wrapper and the // handle. -static void ClearWrapperCache(Persistent<v8::Value> handle, void*) { +static void ClearWrapperCache(v8::Isolate* v8_isolate, + Persistent<v8::Value> handle, + void*) { Handle<Object> cache = Utils::OpenHandle(*handle); JSValue* wrapper = JSValue::cast(*cache); Foreign* foreign = Script::cast(wrapper->value())->wrapper(); ASSERT(foreign->foreign_address() == reinterpret_cast<Address>(cache.location())); foreign->set_foreign_address(0); - Isolate* isolate = Isolate::Current(); + Isolate* isolate = reinterpret_cast<Isolate*>(v8_isolate); isolate->global_handles()->Destroy(cache.location()); isolate->counters()->script_wrappers()->Decrement(); } @@ -369,19 +365,30 @@ Handle<JSValue> GetScriptWrapper(Handle<Script> script) { return Handle<JSValue>( reinterpret_cast<JSValue**>(script->wrapper()->foreign_address())); } - Isolate* isolate = Isolate::Current(); + Isolate* isolate = script->GetIsolate(); // Construct a new script wrapper. isolate->counters()->script_wrappers()->Increment(); Handle<JSFunction> constructor = isolate->script_function(); Handle<JSValue> result = Handle<JSValue>::cast(isolate->factory()->NewJSObject(constructor)); + + // The allocation might have triggered a GC, which could have called this + // function recursively, and a wrapper has already been created and cached. + // In that case, simply return the cached wrapper. + if (script->wrapper()->foreign_address() != NULL) { + return Handle<JSValue>( + reinterpret_cast<JSValue**>(script->wrapper()->foreign_address())); + } + result->set_value(*script); // Create a new weak global handle and use it to cache the wrapper // for future use. The cache will automatically be cleared by the // garbage collector when it is not used anymore. Handle<Object> handle = isolate->global_handles()->Create(*result); - isolate->global_handles()->MakeWeak(handle.location(), NULL, + isolate->global_handles()->MakeWeak(handle.location(), + NULL, + NULL, &ClearWrapperCache); script->wrapper()->set_foreign_address( reinterpret_cast<Address>(handle.location())); @@ -423,7 +430,7 @@ static void CalculateLineEnds(Isolate* isolate, Vector<const SourceChar> src, bool with_last_line) { const int src_len = src.length(); - StringSearch<char, SourceChar> search(isolate, CStrVector("\n")); + StringSearch<uint8_t, SourceChar> search(isolate, STATIC_ASCII_VECTOR("\n")); // Find and record line ends. int position = 0; @@ -457,7 +464,7 @@ Handle<FixedArray> CalculateLineEnds(Handle<String> src, if (content.IsAscii()) { CalculateLineEnds(isolate, &line_ends, - content.ToAsciiVector(), + content.ToOneByteVector(), with_last_line); } else { CalculateLineEnds(isolate, @@ -544,6 +551,7 @@ void CustomArguments::IterateInstance(ObjectVisitor* v) { // Compute the property keys from the interceptor. +// TODO(rossberg): support symbols in API, and filter here if needed. v8::Handle<v8::Array> GetKeysForNamedInterceptor(Handle<JSReceiver> receiver, Handle<JSObject> object) { Isolate* isolate = receiver->GetIsolate(); @@ -593,6 +601,27 @@ v8::Handle<v8::Array> GetKeysForIndexedInterceptor(Handle<JSReceiver> receiver, } +Handle<Object> GetScriptNameOrSourceURL(Handle<Script> script) { + Isolate* isolate = script->GetIsolate(); + Handle<String> name_or_source_url_key = + isolate->factory()->InternalizeOneByteString( + STATIC_ASCII_VECTOR("nameOrSourceURL")); + Handle<JSValue> script_wrapper = GetScriptWrapper(script); + Handle<Object> property = GetProperty(isolate, + script_wrapper, + name_or_source_url_key); + ASSERT(property->IsJSFunction()); + Handle<JSFunction> method = Handle<JSFunction>::cast(property); + bool caught_exception; + Handle<Object> result = Execution::TryCall(method, script_wrapper, 0, + NULL, &caught_exception); + if (caught_exception) { + result = isolate->factory()->undefined_value(); + } + return result; +} + + static bool ContainsOnlyValidKeys(Handle<FixedArray> array) { int len = array->length(); for (int i = 0; i < len; i++) { @@ -619,7 +648,7 @@ Handle<FixedArray> GetKeysInFixedArrayFor(Handle<JSReceiver> object, // Only collect keys if access is permitted. for (Handle<Object> p = object; *p != isolate->heap()->null_value(); - p = Handle<Object>(p->GetPrototype(), isolate)) { + p = Handle<Object>(p->GetPrototype(isolate), isolate)) { if (p->IsJSProxy()) { Handle<JSProxy> proxy(JSProxy::cast(*p), isolate); Handle<Object> args[] = { proxy }; @@ -726,10 +755,10 @@ Handle<FixedArray> GetEnumPropertyKeys(Handle<JSObject> object, // to kInvalidEnumCache, this means that the map itself has never used the // present enum cache. The first step to using the cache is to set the // enum length of the map by counting the number of own descriptors that - // are not DONT_ENUM. + // are not DONT_ENUM or SYMBOLIC. if (own_property_count == Map::kInvalidEnumCache) { own_property_count = object->map()->NumberOfDescribedProperties( - OWN_DESCRIPTORS, DONT_ENUM); + OWN_DESCRIPTORS, DONT_SHOW); if (cache_result) object->map()->SetEnumLength(own_property_count); } @@ -756,7 +785,7 @@ Handle<FixedArray> GetEnumPropertyKeys(Handle<JSObject> object, } isolate->counters()->enum_cache_misses()->Increment(); - int num_enum = map->NumberOfDescribedProperties(ALL_DESCRIPTORS, DONT_ENUM); + int num_enum = map->NumberOfDescribedProperties(ALL_DESCRIPTORS, DONT_SHOW); Handle<FixedArray> storage = isolate->factory()->NewFixedArray(num_enum); Handle<FixedArray> indices = isolate->factory()->NewFixedArray(num_enum); @@ -770,9 +799,10 @@ Handle<FixedArray> GetEnumPropertyKeys(Handle<JSObject> object, for (int i = 0; i < descs->number_of_descriptors(); i++) { PropertyDetails details = descs->GetDetails(i); - if (!details.IsDontEnum()) { + Object* key = descs->GetKey(i); + if (!(details.IsDontEnum() || key->IsSymbol())) { if (i < real_size) ++enum_size; - storage->set(index, descs->GetKey(i)); + storage->set(index, key); if (!indices.is_null()) { if (details.type() != FIELD) { indices = Handle<FixedArray>(); @@ -803,7 +833,7 @@ Handle<FixedArray> GetEnumPropertyKeys(Handle<JSObject> object, return ReduceFixedArrayTo(storage, enum_size); } else { - Handle<StringDictionary> dictionary(object->property_dictionary()); + Handle<NameDictionary> dictionary(object->property_dictionary()); int length = dictionary->NumberOfElements(); if (length == 0) { @@ -824,7 +854,7 @@ Handle<FixedArray> GetEnumPropertyKeys(Handle<JSObject> object, // many properties were added but subsequently deleted. int next_enumeration = dictionary->NextEnumerationIndex(); if (!object->IsGlobalObject() && next_enumeration > (length * 3) / 2) { - StringDictionary::DoGenerateNewEnumerationIndices(dictionary); + NameDictionary::DoGenerateNewEnumerationIndices(dictionary); next_enumeration = dictionary->NextEnumerationIndex(); } @@ -832,7 +862,7 @@ Handle<FixedArray> GetEnumPropertyKeys(Handle<JSObject> object, isolate->factory()->NewFixedArray(next_enumeration); storage = Handle<FixedArray>(dictionary->CopyEnumKeysTo(*storage)); - ASSERT(storage->length() == object->NumberOfLocalProperties(DONT_ENUM)); + ASSERT(storage->length() == object->NumberOfLocalProperties(DONT_SHOW)); return storage; } } @@ -863,168 +893,8 @@ Handle<ObjectHashTable> PutIntoObjectHashTable(Handle<ObjectHashTable> table, } -// This method determines the type of string involved and then gets the UTF8 -// length of the string. It doesn't flatten the string and has log(n) recursion -// for a string of length n. If the failure flag gets set, then we have to -// flatten the string and retry. Failures are caused by surrogate pairs in deep -// cons strings. - -// Single surrogate characters that are encountered in the UTF-16 character -// sequence of the input string get counted as 3 UTF-8 bytes, because that -// is the way that WriteUtf8 will encode them. Surrogate pairs are counted and -// encoded as one 4-byte UTF-8 sequence. - -// This function conceptually uses recursion on the two halves of cons strings. -// However, in order to avoid the recursion going too deep it recurses on the -// second string of the cons, but iterates on the first substring (by manually -// eliminating it as a tail recursion). This means it counts the UTF-8 length -// from the end to the start, which makes no difference to the total. - -// Surrogate pairs are recognized even if they are split across two sides of a -// cons, which complicates the implementation somewhat. Therefore, too deep -// recursion cannot always be avoided. This case is detected, and the failure -// flag is set, a signal to the caller that the string should be flattened and -// the operation retried. -int Utf8LengthHelper(String* input, - int from, - int to, - bool followed_by_surrogate, - int max_recursion, - bool* failure, - bool* starts_with_surrogate) { - if (from == to) return 0; - int total = 0; - bool dummy; - while (true) { - if (input->IsAsciiRepresentation()) { - *starts_with_surrogate = false; - return total + to - from; - } - switch (StringShape(input).representation_tag()) { - case kConsStringTag: { - ConsString* str = ConsString::cast(input); - String* first = str->first(); - String* second = str->second(); - int first_length = first->length(); - if (first_length - from > to - first_length) { - if (first_length < to) { - // Right hand side is shorter. No need to check the recursion depth - // since this can only happen log(n) times. - bool right_starts_with_surrogate = false; - total += Utf8LengthHelper(second, - 0, - to - first_length, - followed_by_surrogate, - max_recursion - 1, - failure, - &right_starts_with_surrogate); - if (*failure) return 0; - followed_by_surrogate = right_starts_with_surrogate; - input = first; - to = first_length; - } else { - // We only need the left hand side. - input = first; - } - } else { - if (first_length > from) { - // Left hand side is shorter. - if (first->IsAsciiRepresentation()) { - total += first_length - from; - *starts_with_surrogate = false; - starts_with_surrogate = &dummy; - input = second; - from = 0; - to -= first_length; - } else if (second->IsAsciiRepresentation()) { - followed_by_surrogate = false; - total += to - first_length; - input = first; - to = first_length; - } else if (max_recursion > 0) { - bool right_starts_with_surrogate = false; - // Recursing on the long one. This may fail. - total += Utf8LengthHelper(second, - 0, - to - first_length, - followed_by_surrogate, - max_recursion - 1, - failure, - &right_starts_with_surrogate); - if (*failure) return 0; - input = first; - to = first_length; - followed_by_surrogate = right_starts_with_surrogate; - } else { - *failure = true; - return 0; - } - } else { - // We only need the right hand side. - input = second; - from = 0; - to -= first_length; - } - } - continue; - } - case kExternalStringTag: - case kSeqStringTag: { - Vector<const uc16> vector = input->GetFlatContent().ToUC16Vector(); - const uc16* p = vector.start(); - int previous = unibrow::Utf16::kNoPreviousCharacter; - for (int i = from; i < to; i++) { - uc16 c = p[i]; - total += unibrow::Utf8::Length(c, previous); - previous = c; - } - if (to - from > 0) { - if (unibrow::Utf16::IsLeadSurrogate(previous) && - followed_by_surrogate) { - total -= unibrow::Utf8::kBytesSavedByCombiningSurrogates; - } - if (unibrow::Utf16::IsTrailSurrogate(p[from])) { - *starts_with_surrogate = true; - } - } - return total; - } - case kSlicedStringTag: { - SlicedString* str = SlicedString::cast(input); - int offset = str->offset(); - input = str->parent(); - from += offset; - to += offset; - continue; - } - default: - break; - } - UNREACHABLE(); - return 0; - } - return 0; -} - - -int Utf8Length(Handle<String> str) { - bool dummy; - bool failure; - int len; - const int kRecursionBudget = 100; - do { - failure = false; - len = Utf8LengthHelper( - *str, 0, str->length(), false, kRecursionBudget, &failure, &dummy); - if (failure) FlattenString(str); - } while (failure); - return len; -} - - DeferredHandleScope::DeferredHandleScope(Isolate* isolate) : impl_(isolate->handle_scope_implementer()) { - ASSERT(impl_->isolate() == Isolate::Current()); impl_->BeginDeferredScope(); v8::ImplementationUtilities::HandleScopeData* data = impl_->isolate()->handle_scope_data(); diff --git a/deps/v8/src/handles.h b/deps/v8/src/handles.h index a1d88c2f8f..3e408b73f2 100644 --- a/deps/v8/src/handles.h +++ b/deps/v8/src/handles.h @@ -58,25 +58,19 @@ class Handle { a = b; // Fake assignment to enforce type checks. USE(a); #endif - location_ = reinterpret_cast<T**>(handle.location()); + location_ = reinterpret_cast<T**>(handle.location_); } INLINE(T* operator ->() const) { return operator*(); } // Check if this handle refers to the exact same object as the other handle. - bool is_identical_to(const Handle<T> other) const { - return operator*() == *other; - } + INLINE(bool is_identical_to(const Handle<T> other) const); // Provides the C++ dereference operator. INLINE(T* operator*() const); // Returns the address to where the raw pointer is stored. - T** location() const { - ASSERT(location_ == NULL || - reinterpret_cast<Address>(*location_) != kZapValue); - return location_; - } + INLINE(T** location() const); template <class S> static Handle<T> cast(Handle<S> that) { T::cast(*that); @@ -92,9 +86,19 @@ class Handle { private: T** location_; + + // Handles of different classes are allowed to access each other's location_. + template<class S> friend class Handle; }; +// Convenience wrapper. +template<class T> +inline Handle<T> handle(T* t, Isolate* isolate) { + return Handle<T>(t, isolate); +} + + class DeferredHandles; class HandleScopeImplementer; @@ -113,24 +117,23 @@ class HandleScopeImplementer; // for which the handle scope has been deleted is undefined. class HandleScope { public: - inline HandleScope(); explicit inline HandleScope(Isolate* isolate); inline ~HandleScope(); // Counts the number of allocated handles. - static int NumberOfHandles(); + static int NumberOfHandles(Isolate* isolate); // Creates a new handle with the given value. template <typename T> - static inline T** CreateHandle(T* value, Isolate* isolate); + static inline T** CreateHandle(Isolate* isolate, T* value); // Deallocates any extensions used by the current scope. static void DeleteExtensions(Isolate* isolate); - static Address current_next_address(); - static Address current_limit_address(); - static Address current_level_address(); + static Address current_next_address(Isolate* isolate); + static Address current_limit_address(Isolate* isolate); + static Address current_level_address(Isolate* isolate); // Closes the HandleScope (invalidating all handles // created in the scope of the HandleScope) and returns @@ -155,7 +158,7 @@ class HandleScope { Object** prev_limit_; // Extend the handle scope making room for more handles. - static internal::Object** Extend(); + static internal::Object** Extend(Isolate* isolate); // Zaps the handles in the half-open interval [start, end). static void ZapRange(internal::Object** start, internal::Object** end); @@ -207,9 +210,8 @@ void FlattenString(Handle<String> str); // string. Handle<String> FlattenGetString(Handle<String> str); -int Utf8Length(Handle<String> str); - -Handle<Object> SetProperty(Handle<Object> object, +Handle<Object> SetProperty(Isolate* isolate, + Handle<Object> object, Handle<Object> key, Handle<Object> value, PropertyAttributes attributes, @@ -226,7 +228,8 @@ Handle<Object> ForceDeleteProperty(Handle<JSObject> object, Handle<Object> GetProperty(Handle<JSReceiver> obj, const char* name); -Handle<Object> GetProperty(Handle<Object> obj, +Handle<Object> GetProperty(Isolate* isolate, + Handle<Object> obj, Handle<Object> key); Handle<Object> GetPropertyWithInterceptor(Handle<JSObject> receiver, @@ -236,7 +239,8 @@ Handle<Object> GetPropertyWithInterceptor(Handle<JSObject> receiver, Handle<Object> SetPrototype(Handle<JSObject> obj, Handle<Object> value); -Handle<Object> LookupSingleCharacterStringFromCode(uint32_t index); +Handle<Object> LookupSingleCharacterStringFromCode(Isolate* isolate, + uint32_t index); Handle<JSObject> Copy(Handle<JSObject> obj); @@ -260,6 +264,7 @@ int GetScriptLineNumber(Handle<Script> script, int code_position); // The safe version does not make heap allocations but may work much slower. int GetScriptLineNumberSafe(Handle<Script> script, int code_position); int GetScriptColumnNumber(Handle<Script> script, int code_position); +Handle<Object> GetScriptNameOrSourceURL(Handle<Script> script); // Computes the enumerable keys from interceptors. Used for debug mirrors and // by GetKeysInFixedArrayFor below. @@ -321,17 +326,34 @@ Handle<ObjectHashTable> PutIntoObjectHashTable(Handle<ObjectHashTable> table, class NoHandleAllocation BASE_EMBEDDED { public: #ifndef DEBUG - NoHandleAllocation() {} + explicit NoHandleAllocation(Isolate* isolate) {} ~NoHandleAllocation() {} #else - inline NoHandleAllocation(); + explicit inline NoHandleAllocation(Isolate* isolate); inline ~NoHandleAllocation(); private: + Isolate* isolate_; int level_; bool active_; #endif }; + +class HandleDereferenceGuard BASE_EMBEDDED { + public: + enum State { ALLOW, DISALLOW }; +#ifndef DEBUG + HandleDereferenceGuard(Isolate* isolate, State state) { } + ~HandleDereferenceGuard() { } +#else + inline HandleDereferenceGuard(Isolate* isolate, State state); + inline ~HandleDereferenceGuard(); + private: + Isolate* isolate_; + bool old_state_; +#endif +}; + } } // namespace v8::internal #endif // V8_HANDLES_H_ diff --git a/deps/v8/src/heap-inl.h b/deps/v8/src/heap-inl.h index bace902d4d..9ed65d8107 100644 --- a/deps/v8/src/heap-inl.h +++ b/deps/v8/src/heap-inl.h @@ -91,30 +91,55 @@ MaybeObject* Heap::AllocateStringFromUtf8(Vector<const char> str, if (non_ascii_start >= length) { // If the string is ASCII, we do not need to convert the characters // since UTF8 is backwards compatible with ASCII. - return AllocateStringFromAscii(str, pretenure); + return AllocateStringFromOneByte(str, pretenure); } // Non-ASCII and we need to decode. return AllocateStringFromUtf8Slow(str, non_ascii_start, pretenure); } -MaybeObject* Heap::AllocateSymbol(Vector<const char> str, - int chars, - uint32_t hash_field) { - unibrow::Utf8InputBuffer<> buffer(str.start(), - static_cast<unsigned>(str.length())); - return AllocateInternalSymbol(&buffer, chars, hash_field); +template<> +bool inline Heap::IsOneByte(Vector<const char> str, int chars) { + // TODO(dcarney): incorporate Latin-1 check when Latin-1 is supported? + // ASCII only check. + return chars == str.length(); } -MaybeObject* Heap::AllocateAsciiSymbol(Vector<const char> str, - uint32_t hash_field) { - if (str.length() > SeqAsciiString::kMaxLength) { - return Failure::OutOfMemoryException(); +template<> +bool inline Heap::IsOneByte(String* str, int chars) { + return str->IsOneByteRepresentation(); +} + + +MaybeObject* Heap::AllocateInternalizedStringFromUtf8( + Vector<const char> str, int chars, uint32_t hash_field) { + if (IsOneByte(str, chars)) { + return AllocateOneByteInternalizedString( + Vector<const uint8_t>::cast(str), hash_field); + } + return AllocateInternalizedStringImpl<false>(str, chars, hash_field); +} + + +template<typename T> +MaybeObject* Heap::AllocateInternalizedStringImpl( + T t, int chars, uint32_t hash_field) { + if (IsOneByte(t, chars)) { + return AllocateInternalizedStringImpl<true>(t, chars, hash_field); + } + return AllocateInternalizedStringImpl<false>(t, chars, hash_field); +} + + +MaybeObject* Heap::AllocateOneByteInternalizedString(Vector<const uint8_t> str, + uint32_t hash_field) { + if (str.length() > SeqOneByteString::kMaxLength) { + return Failure::OutOfMemoryException(0x2); } // Compute map and object size. - Map* map = ascii_symbol_map(); - int size = SeqAsciiString::SizeFor(str.length()); + Map* map = ascii_internalized_string_map(); + int size = SeqOneByteString::SizeFor(str.length()); // Allocate string. Object* result; @@ -134,20 +159,20 @@ MaybeObject* Heap::AllocateAsciiSymbol(Vector<const char> str, ASSERT_EQ(size, answer->Size()); // Fill in the characters. - memcpy(answer->address() + SeqAsciiString::kHeaderSize, + memcpy(answer->address() + SeqOneByteString::kHeaderSize, str.start(), str.length()); return answer; } -MaybeObject* Heap::AllocateTwoByteSymbol(Vector<const uc16> str, - uint32_t hash_field) { +MaybeObject* Heap::AllocateTwoByteInternalizedString(Vector<const uc16> str, + uint32_t hash_field) { if (str.length() > SeqTwoByteString::kMaxLength) { - return Failure::OutOfMemoryException(); + return Failure::OutOfMemoryException(0x3); } // Compute map and object size. - Map* map = symbol_map(); + Map* map = internalized_string_map(); int size = SeqTwoByteString::SizeFor(str.length()); // Allocate string. @@ -239,8 +264,9 @@ MaybeObject* Heap::NumberFromInt32( MaybeObject* Heap::NumberFromUint32( uint32_t value, PretenureFlag pretenure) { - if ((int32_t)value >= 0 && Smi::IsValid((int32_t)value)) { - return Smi::FromInt((int32_t)value); + if (static_cast<int32_t>(value) >= 0 && + Smi::IsValid(static_cast<int32_t>(value))) { + return Smi::FromInt(static_cast<int32_t>(value)); } // Bypass NumberFromDouble to avoid various redundant checks. return AllocateHeapNumber(FastUI2D(value), pretenure); @@ -294,8 +320,8 @@ bool Heap::InNewSpace(Object* object) { } -bool Heap::InNewSpace(Address addr) { - return new_space_.Contains(addr); +bool Heap::InNewSpace(Address address) { + return new_space_.Contains(address); } @@ -309,6 +335,16 @@ bool Heap::InToSpace(Object* object) { } +bool Heap::InOldPointerSpace(Address address) { + return old_pointer_space_->Contains(address); +} + + +bool Heap::InOldPointerSpace(Object* object) { + return InOldPointerSpace(reinterpret_cast<Address>(object)); +} + + bool Heap::OldGenerationAllocationLimitReached() { if (!incremental_marking()->IsStopped()) return false; return OldGenerationSpaceAvailable() < 0; @@ -430,6 +466,15 @@ void Heap::ScavengeObject(HeapObject** p, HeapObject* object) { } +MaybeObject* Heap::AllocateEmptyJSArrayWithAllocationSite( + ElementsKind elements_kind, + Handle<Object> allocation_site_payload) { + return AllocateJSArrayAndStorageWithAllocationSite(elements_kind, 0, 0, + allocation_site_payload, + DONT_INITIALIZE_ARRAY_ELEMENTS); +} + + bool Heap::CollectGarbage(AllocationSpace space, const char* gc_reason) { const char* collector_reason = NULL; GarbageCollector collector = SelectGarbageCollector(space, &collector_reason); @@ -460,7 +505,7 @@ intptr_t Heap::AdjustAmountOfExternalAllocatedMemory( intptr_t change_in_bytes) { ASSERT(HasBeenSetUp()); intptr_t amount = amount_of_external_allocated_memory_ + change_in_bytes; - if (change_in_bytes >= 0) { + if (change_in_bytes > 0) { // Avoid overflow. if (amount > amount_of_external_allocated_memory_) { amount_of_external_allocated_memory_ = amount; @@ -607,21 +652,13 @@ void ExternalStringTable::Verify() { Object* obj = Object::cast(new_space_strings_[i]); // TODO(yangguo): check that the object is indeed an external string. ASSERT(heap_->InNewSpace(obj)); - ASSERT(obj != HEAP->raw_unchecked_the_hole_value()); - if (obj->IsExternalAsciiString()) { - ExternalAsciiString* string = ExternalAsciiString::cast(obj); - ASSERT(String::IsAscii(string->GetChars(), string->length())); - } + ASSERT(obj != HEAP->the_hole_value()); } for (int i = 0; i < old_space_strings_.length(); ++i) { Object* obj = Object::cast(old_space_strings_[i]); // TODO(yangguo): check that the object is indeed an external string. ASSERT(!heap_->InNewSpace(obj)); - ASSERT(obj != HEAP->raw_unchecked_the_hole_value()); - if (obj->IsExternalAsciiString()) { - ExternalAsciiString* string = ExternalAsciiString::cast(obj); - ASSERT(String::IsAscii(string->GetChars(), string->length())); - } + ASSERT(obj != HEAP->the_hole_value()); } #endif } @@ -644,6 +681,19 @@ void ExternalStringTable::ShrinkNewStrings(int position) { } +void ErrorObjectList::Add(JSObject* object) { + list_.Add(object); +} + + +void ErrorObjectList::Iterate(ObjectVisitor* v) { + if (!list_.is_empty()) { + Object** start = &list_[0]; + v->VisitPointers(start, start + list_.length()); + } +} + + void Heap::ClearInstanceofCache() { set_instanceof_cache_function(the_hole_value()); } @@ -739,6 +789,18 @@ AlwaysAllocateScope::~AlwaysAllocateScope() { } +#ifdef VERIFY_HEAP +NoWeakEmbeddedMapsVerificationScope::NoWeakEmbeddedMapsVerificationScope() { + HEAP->no_weak_embedded_maps_verification_scope_depth_++; +} + + +NoWeakEmbeddedMapsVerificationScope::~NoWeakEmbeddedMapsVerificationScope() { + HEAP->no_weak_embedded_maps_verification_scope_depth_--; +} +#endif + + void VerifyPointersVisitor::VisitPointers(Object** start, Object** end) { for (Object** current = start; current < end; current++) { if ((*current)->IsHeapObject()) { diff --git a/deps/v8/src/heap-profiler.cc b/deps/v8/src/heap-profiler.cc index 301b09993e..c9f1d501da 100644 --- a/deps/v8/src/heap-profiler.cc +++ b/deps/v8/src/heap-profiler.cc @@ -28,13 +28,13 @@ #include "v8.h" #include "heap-profiler.h" -#include "profile-generator.h" +#include "heap-snapshot-generator-inl.h" namespace v8 { namespace internal { -HeapProfiler::HeapProfiler() - : snapshots_(new HeapSnapshotsCollection()), +HeapProfiler::HeapProfiler(Heap* heap) + : snapshots_(new HeapSnapshotsCollection(heap)), next_snapshot_uid_(1) { } @@ -45,15 +45,16 @@ HeapProfiler::~HeapProfiler() { void HeapProfiler::ResetSnapshots() { + Heap* the_heap = heap(); delete snapshots_; - snapshots_ = new HeapSnapshotsCollection(); + snapshots_ = new HeapSnapshotsCollection(the_heap); } void HeapProfiler::SetUp() { Isolate* isolate = Isolate::Current(); if (isolate->heap_profiler() == NULL) { - isolate->set_heap_profiler(new HeapProfiler()); + isolate->set_heap_profiler(new HeapProfiler(isolate->heap())); } } @@ -65,23 +66,29 @@ void HeapProfiler::TearDown() { } -HeapSnapshot* HeapProfiler::TakeSnapshot(const char* name, - int type, - v8::ActivityControl* control) { +HeapSnapshot* HeapProfiler::TakeSnapshot( + const char* name, + int type, + v8::ActivityControl* control, + v8::HeapProfiler::ObjectNameResolver* resolver) { ASSERT(Isolate::Current()->heap_profiler() != NULL); return Isolate::Current()->heap_profiler()->TakeSnapshotImpl(name, type, - control); + control, + resolver); } -HeapSnapshot* HeapProfiler::TakeSnapshot(String* name, - int type, - v8::ActivityControl* control) { +HeapSnapshot* HeapProfiler::TakeSnapshot( + String* name, + int type, + v8::ActivityControl* control, + v8::HeapProfiler::ObjectNameResolver* resolver) { ASSERT(Isolate::Current()->heap_profiler() != NULL); return Isolate::Current()->heap_profiler()->TakeSnapshotImpl(name, type, - control); + control, + resolver); } @@ -122,16 +129,18 @@ v8::RetainedObjectInfo* HeapProfiler::ExecuteWrapperClassCallback( } -HeapSnapshot* HeapProfiler::TakeSnapshotImpl(const char* name, - int type, - v8::ActivityControl* control) { +HeapSnapshot* HeapProfiler::TakeSnapshotImpl( + const char* name, + int type, + v8::ActivityControl* control, + v8::HeapProfiler::ObjectNameResolver* resolver) { HeapSnapshot::Type s_type = static_cast<HeapSnapshot::Type>(type); HeapSnapshot* result = snapshots_->NewSnapshot(s_type, name, next_snapshot_uid_++); bool generation_completed = true; switch (s_type) { case HeapSnapshot::kFull: { - HeapSnapshotGenerator generator(result, control); + HeapSnapshotGenerator generator(result, control, resolver, heap()); generation_completed = generator.GenerateSnapshot(); break; } @@ -147,10 +156,13 @@ HeapSnapshot* HeapProfiler::TakeSnapshotImpl(const char* name, } -HeapSnapshot* HeapProfiler::TakeSnapshotImpl(String* name, - int type, - v8::ActivityControl* control) { - return TakeSnapshotImpl(snapshots_->names()->GetName(name), type, control); +HeapSnapshot* HeapProfiler::TakeSnapshotImpl( + String* name, + int type, + v8::ActivityControl* control, + v8::HeapProfiler::ObjectNameResolver* resolver) { + return TakeSnapshotImpl(snapshots_->names()->GetName(name), type, control, + resolver); } void HeapProfiler::StartHeapObjectsTrackingImpl() { diff --git a/deps/v8/src/heap-profiler.h b/deps/v8/src/heap-profiler.h index 346177b8ba..c8c94f58d6 100644 --- a/deps/v8/src/heap-profiler.h +++ b/deps/v8/src/heap-profiler.h @@ -51,12 +51,16 @@ class HeapProfiler { static size_t GetMemorySizeUsedByProfiler(); - static HeapSnapshot* TakeSnapshot(const char* name, - int type, - v8::ActivityControl* control); - static HeapSnapshot* TakeSnapshot(String* name, - int type, - v8::ActivityControl* control); + static HeapSnapshot* TakeSnapshot( + const char* name, + int type, + v8::ActivityControl* control, + v8::HeapProfiler::ObjectNameResolver* resolver); + static HeapSnapshot* TakeSnapshot( + String* name, + int type, + v8::ActivityControl* control, + v8::HeapProfiler::ObjectNameResolver* resolver); static void StartHeapObjectsTracking(); static void StopHeapObjectsTracking(); @@ -79,20 +83,26 @@ class HeapProfiler { } private: - HeapProfiler(); + explicit HeapProfiler(Heap* heap); ~HeapProfiler(); - HeapSnapshot* TakeSnapshotImpl(const char* name, - int type, - v8::ActivityControl* control); - HeapSnapshot* TakeSnapshotImpl(String* name, - int type, - v8::ActivityControl* control); + HeapSnapshot* TakeSnapshotImpl( + const char* name, + int type, + v8::ActivityControl* control, + v8::HeapProfiler::ObjectNameResolver* resolver); + HeapSnapshot* TakeSnapshotImpl( + String* name, + int type, + v8::ActivityControl* control, + v8::HeapProfiler::ObjectNameResolver* resolver); void ResetSnapshots(); void StartHeapObjectsTrackingImpl(); void StopHeapObjectsTrackingImpl(); SnapshotObjectId PushHeapObjectsStatsImpl(OutputStream* stream); + Heap* heap() const { return snapshots_->heap(); } + HeapSnapshotsCollection* snapshots_; unsigned next_snapshot_uid_; List<v8::HeapProfiler::WrapperInfoCallback> wrapper_callbacks_; diff --git a/deps/v8/src/heap-snapshot-generator-inl.h b/deps/v8/src/heap-snapshot-generator-inl.h new file mode 100644 index 0000000000..43002d2d2b --- /dev/null +++ b/deps/v8/src/heap-snapshot-generator-inl.h @@ -0,0 +1,87 @@ +// Copyright 2013 the V8 project authors. All rights reserved. +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following +// disclaimer in the documentation and/or other materials provided +// with the distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived +// from this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +#ifndef V8_HEAP_SNAPSHOT_GENERATOR_INL_H_ +#define V8_HEAP_SNAPSHOT_GENERATOR_INL_H_ + +#include "heap-snapshot-generator.h" + +namespace v8 { +namespace internal { + + +HeapEntry* HeapGraphEdge::from() const { + return &snapshot()->entries()[from_index_]; +} + + +HeapSnapshot* HeapGraphEdge::snapshot() const { + return to_entry_->snapshot(); +} + + +int HeapEntry::index() const { + return static_cast<int>(this - &snapshot_->entries().first()); +} + + +int HeapEntry::set_children_index(int index) { + children_index_ = index; + int next_index = index + children_count_; + children_count_ = 0; + return next_index; +} + + +HeapGraphEdge** HeapEntry::children_arr() { + ASSERT(children_index_ >= 0); + return &snapshot_->children()[children_index_]; +} + + +SnapshotObjectId HeapObjectsMap::GetNthGcSubrootId(int delta) { + return kGcRootsFirstSubrootId + delta * kObjectIdStep; +} + + +HeapObject* V8HeapExplorer::GetNthGcSubrootObject(int delta) { + return reinterpret_cast<HeapObject*>( + reinterpret_cast<char*>(kFirstGcSubrootObject) + + delta * HeapObjectsMap::kObjectIdStep); +} + + +int V8HeapExplorer::GetGcSubrootOrder(HeapObject* subroot) { + return static_cast<int>( + (reinterpret_cast<char*>(subroot) - + reinterpret_cast<char*>(kFirstGcSubrootObject)) / + HeapObjectsMap::kObjectIdStep); +} + +} } // namespace v8::internal + +#endif // V8_HEAP_SNAPSHOT_GENERATOR_INL_H_ diff --git a/deps/v8/src/heap-snapshot-generator.cc b/deps/v8/src/heap-snapshot-generator.cc new file mode 100644 index 0000000000..824e50793d --- /dev/null +++ b/deps/v8/src/heap-snapshot-generator.cc @@ -0,0 +1,2707 @@ +// Copyright 2013 the V8 project authors. All rights reserved. +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following +// disclaimer in the documentation and/or other materials provided +// with the distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived +// from this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +#include "v8.h" + +#include "heap-snapshot-generator-inl.h" + +#include "heap-profiler.h" +#include "debug.h" + +namespace v8 { +namespace internal { + + +HeapGraphEdge::HeapGraphEdge(Type type, const char* name, int from, int to) + : type_(type), + from_index_(from), + to_index_(to), + name_(name) { + ASSERT(type == kContextVariable + || type == kProperty + || type == kInternal + || type == kShortcut); +} + + +HeapGraphEdge::HeapGraphEdge(Type type, int index, int from, int to) + : type_(type), + from_index_(from), + to_index_(to), + index_(index) { + ASSERT(type == kElement || type == kHidden || type == kWeak); +} + + +void HeapGraphEdge::ReplaceToIndexWithEntry(HeapSnapshot* snapshot) { + to_entry_ = &snapshot->entries()[to_index_]; +} + + +const int HeapEntry::kNoEntry = -1; + +HeapEntry::HeapEntry(HeapSnapshot* snapshot, + Type type, + const char* name, + SnapshotObjectId id, + int self_size) + : type_(type), + children_count_(0), + children_index_(-1), + self_size_(self_size), + id_(id), + snapshot_(snapshot), + name_(name) { } + + +void HeapEntry::SetNamedReference(HeapGraphEdge::Type type, + const char* name, + HeapEntry* entry) { + HeapGraphEdge edge(type, name, this->index(), entry->index()); + snapshot_->edges().Add(edge); + ++children_count_; +} + + +void HeapEntry::SetIndexedReference(HeapGraphEdge::Type type, + int index, + HeapEntry* entry) { + HeapGraphEdge edge(type, index, this->index(), entry->index()); + snapshot_->edges().Add(edge); + ++children_count_; +} + + +Handle<HeapObject> HeapEntry::GetHeapObject() { + return snapshot_->collection()->FindHeapObjectById(id()); +} + + +void HeapEntry::Print( + const char* prefix, const char* edge_name, int max_depth, int indent) { + STATIC_CHECK(sizeof(unsigned) == sizeof(id())); + OS::Print("%6d @%6u %*c %s%s: ", + self_size(), id(), indent, ' ', prefix, edge_name); + if (type() != kString) { + OS::Print("%s %.40s\n", TypeAsString(), name_); + } else { + OS::Print("\""); + const char* c = name_; + while (*c && (c - name_) <= 40) { + if (*c != '\n') + OS::Print("%c", *c); + else + OS::Print("\\n"); + ++c; + } + OS::Print("\"\n"); + } + if (--max_depth == 0) return; + Vector<HeapGraphEdge*> ch = children(); + for (int i = 0; i < ch.length(); ++i) { + HeapGraphEdge& edge = *ch[i]; + const char* edge_prefix = ""; + EmbeddedVector<char, 64> index; + const char* edge_name = index.start(); + switch (edge.type()) { + case HeapGraphEdge::kContextVariable: + edge_prefix = "#"; + edge_name = edge.name(); + break; + case HeapGraphEdge::kElement: + OS::SNPrintF(index, "%d", edge.index()); + break; + case HeapGraphEdge::kInternal: + edge_prefix = "$"; + edge_name = edge.name(); + break; + case HeapGraphEdge::kProperty: + edge_name = edge.name(); + break; + case HeapGraphEdge::kHidden: + edge_prefix = "$"; + OS::SNPrintF(index, "%d", edge.index()); + break; + case HeapGraphEdge::kShortcut: + edge_prefix = "^"; + edge_name = edge.name(); + break; + case HeapGraphEdge::kWeak: + edge_prefix = "w"; + OS::SNPrintF(index, "%d", edge.index()); + break; + default: + OS::SNPrintF(index, "!!! unknown edge type: %d ", edge.type()); + } + edge.to()->Print(edge_prefix, edge_name, max_depth, indent + 2); + } +} + + +const char* HeapEntry::TypeAsString() { + switch (type()) { + case kHidden: return "/hidden/"; + case kObject: return "/object/"; + case kClosure: return "/closure/"; + case kString: return "/string/"; + case kCode: return "/code/"; + case kArray: return "/array/"; + case kRegExp: return "/regexp/"; + case kHeapNumber: return "/number/"; + case kNative: return "/native/"; + case kSynthetic: return "/synthetic/"; + default: return "???"; + } +} + + +// It is very important to keep objects that form a heap snapshot +// as small as possible. +namespace { // Avoid littering the global namespace. + +template <size_t ptr_size> struct SnapshotSizeConstants; + +template <> struct SnapshotSizeConstants<4> { + static const int kExpectedHeapGraphEdgeSize = 12; + static const int kExpectedHeapEntrySize = 24; + static const int kExpectedHeapSnapshotsCollectionSize = 100; + static const int kExpectedHeapSnapshotSize = 136; + static const size_t kMaxSerializableSnapshotRawSize = 256 * MB; +}; + +template <> struct SnapshotSizeConstants<8> { + static const int kExpectedHeapGraphEdgeSize = 24; + static const int kExpectedHeapEntrySize = 32; + static const int kExpectedHeapSnapshotsCollectionSize = 152; + static const int kExpectedHeapSnapshotSize = 168; + static const uint64_t kMaxSerializableSnapshotRawSize = + static_cast<uint64_t>(6000) * MB; +}; + +} // namespace + +HeapSnapshot::HeapSnapshot(HeapSnapshotsCollection* collection, + HeapSnapshot::Type type, + const char* title, + unsigned uid) + : collection_(collection), + type_(type), + title_(title), + uid_(uid), + root_index_(HeapEntry::kNoEntry), + gc_roots_index_(HeapEntry::kNoEntry), + natives_root_index_(HeapEntry::kNoEntry), + max_snapshot_js_object_id_(0) { + STATIC_CHECK( + sizeof(HeapGraphEdge) == + SnapshotSizeConstants<kPointerSize>::kExpectedHeapGraphEdgeSize); + STATIC_CHECK( + sizeof(HeapEntry) == + SnapshotSizeConstants<kPointerSize>::kExpectedHeapEntrySize); + for (int i = 0; i < VisitorSynchronization::kNumberOfSyncTags; ++i) { + gc_subroot_indexes_[i] = HeapEntry::kNoEntry; + } +} + + +void HeapSnapshot::Delete() { + collection_->RemoveSnapshot(this); + delete this; +} + + +void HeapSnapshot::RememberLastJSObjectId() { + max_snapshot_js_object_id_ = collection_->last_assigned_id(); +} + + +HeapEntry* HeapSnapshot::AddRootEntry() { + ASSERT(root_index_ == HeapEntry::kNoEntry); + ASSERT(entries_.is_empty()); // Root entry must be the first one. + HeapEntry* entry = AddEntry(HeapEntry::kObject, + "", + HeapObjectsMap::kInternalRootObjectId, + 0); + root_index_ = entry->index(); + ASSERT(root_index_ == 0); + return entry; +} + + +HeapEntry* HeapSnapshot::AddGcRootsEntry() { + ASSERT(gc_roots_index_ == HeapEntry::kNoEntry); + HeapEntry* entry = AddEntry(HeapEntry::kObject, + "(GC roots)", + HeapObjectsMap::kGcRootsObjectId, + 0); + gc_roots_index_ = entry->index(); + return entry; +} + + +HeapEntry* HeapSnapshot::AddGcSubrootEntry(int tag) { + ASSERT(gc_subroot_indexes_[tag] == HeapEntry::kNoEntry); + ASSERT(0 <= tag && tag < VisitorSynchronization::kNumberOfSyncTags); + HeapEntry* entry = AddEntry( + HeapEntry::kObject, + VisitorSynchronization::kTagNames[tag], + HeapObjectsMap::GetNthGcSubrootId(tag), + 0); + gc_subroot_indexes_[tag] = entry->index(); + return entry; +} + + +HeapEntry* HeapSnapshot::AddEntry(HeapEntry::Type type, + const char* name, + SnapshotObjectId id, + int size) { + HeapEntry entry(this, type, name, id, size); + entries_.Add(entry); + return &entries_.last(); +} + + +void HeapSnapshot::FillChildren() { + ASSERT(children().is_empty()); + children().Allocate(edges().length()); + int children_index = 0; + for (int i = 0; i < entries().length(); ++i) { + HeapEntry* entry = &entries()[i]; + children_index = entry->set_children_index(children_index); + } + ASSERT(edges().length() == children_index); + for (int i = 0; i < edges().length(); ++i) { + HeapGraphEdge* edge = &edges()[i]; + edge->ReplaceToIndexWithEntry(this); + edge->from()->add_child(edge); + } +} + + +class FindEntryById { + public: + explicit FindEntryById(SnapshotObjectId id) : id_(id) { } + int operator()(HeapEntry* const* entry) { + if ((*entry)->id() == id_) return 0; + return (*entry)->id() < id_ ? -1 : 1; + } + private: + SnapshotObjectId id_; +}; + + +HeapEntry* HeapSnapshot::GetEntryById(SnapshotObjectId id) { + List<HeapEntry*>* entries_by_id = GetSortedEntriesList(); + // Perform a binary search by id. + int index = SortedListBSearch(*entries_by_id, FindEntryById(id)); + if (index == -1) + return NULL; + return entries_by_id->at(index); +} + + +template<class T> +static int SortByIds(const T* entry1_ptr, + const T* entry2_ptr) { + if ((*entry1_ptr)->id() == (*entry2_ptr)->id()) return 0; + return (*entry1_ptr)->id() < (*entry2_ptr)->id() ? -1 : 1; +} + + +List<HeapEntry*>* HeapSnapshot::GetSortedEntriesList() { + if (sorted_entries_.is_empty()) { + sorted_entries_.Allocate(entries_.length()); + for (int i = 0; i < entries_.length(); ++i) { + sorted_entries_[i] = &entries_[i]; + } + sorted_entries_.Sort(SortByIds); + } + return &sorted_entries_; +} + + +void HeapSnapshot::Print(int max_depth) { + root()->Print("", "", max_depth, 0); +} + + +template<typename T, class P> +static size_t GetMemoryUsedByList(const List<T, P>& list) { + return list.length() * sizeof(T) + sizeof(list); +} + + +size_t HeapSnapshot::RawSnapshotSize() const { + STATIC_CHECK(SnapshotSizeConstants<kPointerSize>::kExpectedHeapSnapshotSize == + sizeof(HeapSnapshot)); // NOLINT + return + sizeof(*this) + + GetMemoryUsedByList(entries_) + + GetMemoryUsedByList(edges_) + + GetMemoryUsedByList(children_) + + GetMemoryUsedByList(sorted_entries_); +} + + +// We split IDs on evens for embedder objects (see +// HeapObjectsMap::GenerateId) and odds for native objects. +const SnapshotObjectId HeapObjectsMap::kInternalRootObjectId = 1; +const SnapshotObjectId HeapObjectsMap::kGcRootsObjectId = + HeapObjectsMap::kInternalRootObjectId + HeapObjectsMap::kObjectIdStep; +const SnapshotObjectId HeapObjectsMap::kGcRootsFirstSubrootId = + HeapObjectsMap::kGcRootsObjectId + HeapObjectsMap::kObjectIdStep; +const SnapshotObjectId HeapObjectsMap::kFirstAvailableObjectId = + HeapObjectsMap::kGcRootsFirstSubrootId + + VisitorSynchronization::kNumberOfSyncTags * HeapObjectsMap::kObjectIdStep; + +HeapObjectsMap::HeapObjectsMap(Heap* heap) + : next_id_(kFirstAvailableObjectId), + entries_map_(AddressesMatch), + heap_(heap) { + // This dummy element solves a problem with entries_map_. + // When we do lookup in HashMap we see no difference between two cases: + // it has an entry with NULL as the value or it has created + // a new entry on the fly with NULL as the default value. + // With such dummy element we have a guaranty that all entries_map_ entries + // will have the value field grater than 0. + // This fact is using in MoveObject method. + entries_.Add(EntryInfo(0, NULL, 0)); +} + + +void HeapObjectsMap::SnapshotGenerationFinished() { + RemoveDeadEntries(); +} + + +void HeapObjectsMap::MoveObject(Address from, Address to) { + ASSERT(to != NULL); + ASSERT(from != NULL); + if (from == to) return; + void* from_value = entries_map_.Remove(from, AddressHash(from)); + if (from_value == NULL) { + // It may occur that some untracked object moves to an address X and there + // is a tracked object at that address. In this case we should remove the + // entry as we know that the object has died. + void* to_value = entries_map_.Remove(to, AddressHash(to)); + if (to_value != NULL) { + int to_entry_info_index = + static_cast<int>(reinterpret_cast<intptr_t>(to_value)); + entries_.at(to_entry_info_index).addr = NULL; + } + } else { + HashMap::Entry* to_entry = entries_map_.Lookup(to, AddressHash(to), true); + if (to_entry->value != NULL) { + // We found the existing entry with to address for an old object. + // Without this operation we will have two EntryInfo's with the same + // value in addr field. It is bad because later at RemoveDeadEntries + // one of this entry will be removed with the corresponding entries_map_ + // entry. + int to_entry_info_index = + static_cast<int>(reinterpret_cast<intptr_t>(to_entry->value)); + entries_.at(to_entry_info_index).addr = NULL; + } + int from_entry_info_index = + static_cast<int>(reinterpret_cast<intptr_t>(from_value)); + entries_.at(from_entry_info_index).addr = to; + to_entry->value = from_value; + } +} + + +SnapshotObjectId HeapObjectsMap::FindEntry(Address addr) { + HashMap::Entry* entry = entries_map_.Lookup(addr, AddressHash(addr), false); + if (entry == NULL) return 0; + int entry_index = static_cast<int>(reinterpret_cast<intptr_t>(entry->value)); + EntryInfo& entry_info = entries_.at(entry_index); + ASSERT(static_cast<uint32_t>(entries_.length()) > entries_map_.occupancy()); + return entry_info.id; +} + + +SnapshotObjectId HeapObjectsMap::FindOrAddEntry(Address addr, + unsigned int size) { + ASSERT(static_cast<uint32_t>(entries_.length()) > entries_map_.occupancy()); + HashMap::Entry* entry = entries_map_.Lookup(addr, AddressHash(addr), true); + if (entry->value != NULL) { + int entry_index = + static_cast<int>(reinterpret_cast<intptr_t>(entry->value)); + EntryInfo& entry_info = entries_.at(entry_index); + entry_info.accessed = true; + entry_info.size = size; + return entry_info.id; + } + entry->value = reinterpret_cast<void*>(entries_.length()); + SnapshotObjectId id = next_id_; + next_id_ += kObjectIdStep; + entries_.Add(EntryInfo(id, addr, size)); + ASSERT(static_cast<uint32_t>(entries_.length()) > entries_map_.occupancy()); + return id; +} + + +void HeapObjectsMap::StopHeapObjectsTracking() { + time_intervals_.Clear(); +} + +void HeapObjectsMap::UpdateHeapObjectsMap() { + HEAP->CollectAllGarbage(Heap::kMakeHeapIterableMask, + "HeapSnapshotsCollection::UpdateHeapObjectsMap"); + HeapIterator iterator(heap_); + for (HeapObject* obj = iterator.next(); + obj != NULL; + obj = iterator.next()) { + FindOrAddEntry(obj->address(), obj->Size()); + } + RemoveDeadEntries(); +} + + +SnapshotObjectId HeapObjectsMap::PushHeapObjectsStats(OutputStream* stream) { + UpdateHeapObjectsMap(); + time_intervals_.Add(TimeInterval(next_id_)); + int prefered_chunk_size = stream->GetChunkSize(); + List<v8::HeapStatsUpdate> stats_buffer; + ASSERT(!entries_.is_empty()); + EntryInfo* entry_info = &entries_.first(); + EntryInfo* end_entry_info = &entries_.last() + 1; + for (int time_interval_index = 0; + time_interval_index < time_intervals_.length(); + ++time_interval_index) { + TimeInterval& time_interval = time_intervals_[time_interval_index]; + SnapshotObjectId time_interval_id = time_interval.id; + uint32_t entries_size = 0; + EntryInfo* start_entry_info = entry_info; + while (entry_info < end_entry_info && entry_info->id < time_interval_id) { + entries_size += entry_info->size; + ++entry_info; + } + uint32_t entries_count = + static_cast<uint32_t>(entry_info - start_entry_info); + if (time_interval.count != entries_count || + time_interval.size != entries_size) { + stats_buffer.Add(v8::HeapStatsUpdate( + time_interval_index, + time_interval.count = entries_count, + time_interval.size = entries_size)); + if (stats_buffer.length() >= prefered_chunk_size) { + OutputStream::WriteResult result = stream->WriteHeapStatsChunk( + &stats_buffer.first(), stats_buffer.length()); + if (result == OutputStream::kAbort) return last_assigned_id(); + stats_buffer.Clear(); + } + } + } + ASSERT(entry_info == end_entry_info); + if (!stats_buffer.is_empty()) { + OutputStream::WriteResult result = stream->WriteHeapStatsChunk( + &stats_buffer.first(), stats_buffer.length()); + if (result == OutputStream::kAbort) return last_assigned_id(); + } + stream->EndOfStream(); + return last_assigned_id(); +} + + +void HeapObjectsMap::RemoveDeadEntries() { + ASSERT(entries_.length() > 0 && + entries_.at(0).id == 0 && + entries_.at(0).addr == NULL); + int first_free_entry = 1; + for (int i = 1; i < entries_.length(); ++i) { + EntryInfo& entry_info = entries_.at(i); + if (entry_info.accessed) { + if (first_free_entry != i) { + entries_.at(first_free_entry) = entry_info; + } + entries_.at(first_free_entry).accessed = false; + HashMap::Entry* entry = entries_map_.Lookup( + entry_info.addr, AddressHash(entry_info.addr), false); + ASSERT(entry); + entry->value = reinterpret_cast<void*>(first_free_entry); + ++first_free_entry; + } else { + if (entry_info.addr) { + entries_map_.Remove(entry_info.addr, AddressHash(entry_info.addr)); + } + } + } + entries_.Rewind(first_free_entry); + ASSERT(static_cast<uint32_t>(entries_.length()) - 1 == + entries_map_.occupancy()); +} + + +SnapshotObjectId HeapObjectsMap::GenerateId(v8::RetainedObjectInfo* info) { + SnapshotObjectId id = static_cast<SnapshotObjectId>(info->GetHash()); + const char* label = info->GetLabel(); + id ^= StringHasher::HashSequentialString(label, + static_cast<int>(strlen(label)), + HEAP->HashSeed()); + intptr_t element_count = info->GetElementCount(); + if (element_count != -1) + id ^= ComputeIntegerHash(static_cast<uint32_t>(element_count), + v8::internal::kZeroHashSeed); + return id << 1; +} + + +size_t HeapObjectsMap::GetUsedMemorySize() const { + return + sizeof(*this) + + sizeof(HashMap::Entry) * entries_map_.capacity() + + GetMemoryUsedByList(entries_) + + GetMemoryUsedByList(time_intervals_); +} + + +HeapSnapshotsCollection::HeapSnapshotsCollection(Heap* heap) + : is_tracking_objects_(false), + snapshots_uids_(HeapSnapshotsMatch), + token_enumerator_(new TokenEnumerator()), + ids_(heap) { +} + + +static void DeleteHeapSnapshot(HeapSnapshot** snapshot_ptr) { + delete *snapshot_ptr; +} + + +HeapSnapshotsCollection::~HeapSnapshotsCollection() { + delete token_enumerator_; + snapshots_.Iterate(DeleteHeapSnapshot); +} + + +HeapSnapshot* HeapSnapshotsCollection::NewSnapshot(HeapSnapshot::Type type, + const char* name, + unsigned uid) { + is_tracking_objects_ = true; // Start watching for heap objects moves. + return new HeapSnapshot(this, type, name, uid); +} + + +void HeapSnapshotsCollection::SnapshotGenerationFinished( + HeapSnapshot* snapshot) { + ids_.SnapshotGenerationFinished(); + if (snapshot != NULL) { + snapshots_.Add(snapshot); + HashMap::Entry* entry = + snapshots_uids_.Lookup(reinterpret_cast<void*>(snapshot->uid()), + static_cast<uint32_t>(snapshot->uid()), + true); + ASSERT(entry->value == NULL); + entry->value = snapshot; + } +} + + +HeapSnapshot* HeapSnapshotsCollection::GetSnapshot(unsigned uid) { + HashMap::Entry* entry = snapshots_uids_.Lookup(reinterpret_cast<void*>(uid), + static_cast<uint32_t>(uid), + false); + return entry != NULL ? reinterpret_cast<HeapSnapshot*>(entry->value) : NULL; +} + + +void HeapSnapshotsCollection::RemoveSnapshot(HeapSnapshot* snapshot) { + snapshots_.RemoveElement(snapshot); + unsigned uid = snapshot->uid(); + snapshots_uids_.Remove(reinterpret_cast<void*>(uid), + static_cast<uint32_t>(uid)); +} + + +Handle<HeapObject> HeapSnapshotsCollection::FindHeapObjectById( + SnapshotObjectId id) { + // First perform a full GC in order to avoid dead objects. + HEAP->CollectAllGarbage(Heap::kMakeHeapIterableMask, + "HeapSnapshotsCollection::FindHeapObjectById"); + AssertNoAllocation no_allocation; + HeapObject* object = NULL; + HeapIterator iterator(heap(), HeapIterator::kFilterUnreachable); + // Make sure that object with the given id is still reachable. + for (HeapObject* obj = iterator.next(); + obj != NULL; + obj = iterator.next()) { + if (ids_.FindEntry(obj->address()) == id) { + ASSERT(object == NULL); + object = obj; + // Can't break -- kFilterUnreachable requires full heap traversal. + } + } + return object != NULL ? Handle<HeapObject>(object) : Handle<HeapObject>(); +} + + +size_t HeapSnapshotsCollection::GetUsedMemorySize() const { + STATIC_CHECK(SnapshotSizeConstants<kPointerSize>:: + kExpectedHeapSnapshotsCollectionSize == + sizeof(HeapSnapshotsCollection)); // NOLINT + size_t size = sizeof(*this); + size += names_.GetUsedMemorySize(); + size += ids_.GetUsedMemorySize(); + size += sizeof(HashMap::Entry) * snapshots_uids_.capacity(); + size += GetMemoryUsedByList(snapshots_); + for (int i = 0; i < snapshots_.length(); ++i) { + size += snapshots_[i]->RawSnapshotSize(); + } + return size; +} + + +HeapEntriesMap::HeapEntriesMap() + : entries_(HeapThingsMatch) { +} + + +int HeapEntriesMap::Map(HeapThing thing) { + HashMap::Entry* cache_entry = entries_.Lookup(thing, Hash(thing), false); + if (cache_entry == NULL) return HeapEntry::kNoEntry; + return static_cast<int>(reinterpret_cast<intptr_t>(cache_entry->value)); +} + + +void HeapEntriesMap::Pair(HeapThing thing, int entry) { + HashMap::Entry* cache_entry = entries_.Lookup(thing, Hash(thing), true); + ASSERT(cache_entry->value == NULL); + cache_entry->value = reinterpret_cast<void*>(static_cast<intptr_t>(entry)); +} + + +HeapObjectsSet::HeapObjectsSet() + : entries_(HeapEntriesMap::HeapThingsMatch) { +} + + +void HeapObjectsSet::Clear() { + entries_.Clear(); +} + + +bool HeapObjectsSet::Contains(Object* obj) { + if (!obj->IsHeapObject()) return false; + HeapObject* object = HeapObject::cast(obj); + return entries_.Lookup(object, HeapEntriesMap::Hash(object), false) != NULL; +} + + +void HeapObjectsSet::Insert(Object* obj) { + if (!obj->IsHeapObject()) return; + HeapObject* object = HeapObject::cast(obj); + entries_.Lookup(object, HeapEntriesMap::Hash(object), true); +} + + +const char* HeapObjectsSet::GetTag(Object* obj) { + HeapObject* object = HeapObject::cast(obj); + HashMap::Entry* cache_entry = + entries_.Lookup(object, HeapEntriesMap::Hash(object), false); + return cache_entry != NULL + ? reinterpret_cast<const char*>(cache_entry->value) + : NULL; +} + + +void HeapObjectsSet::SetTag(Object* obj, const char* tag) { + if (!obj->IsHeapObject()) return; + HeapObject* object = HeapObject::cast(obj); + HashMap::Entry* cache_entry = + entries_.Lookup(object, HeapEntriesMap::Hash(object), true); + cache_entry->value = const_cast<char*>(tag); +} + + +HeapObject* const V8HeapExplorer::kInternalRootObject = + reinterpret_cast<HeapObject*>( + static_cast<intptr_t>(HeapObjectsMap::kInternalRootObjectId)); +HeapObject* const V8HeapExplorer::kGcRootsObject = + reinterpret_cast<HeapObject*>( + static_cast<intptr_t>(HeapObjectsMap::kGcRootsObjectId)); +HeapObject* const V8HeapExplorer::kFirstGcSubrootObject = + reinterpret_cast<HeapObject*>( + static_cast<intptr_t>(HeapObjectsMap::kGcRootsFirstSubrootId)); +HeapObject* const V8HeapExplorer::kLastGcSubrootObject = + reinterpret_cast<HeapObject*>( + static_cast<intptr_t>(HeapObjectsMap::kFirstAvailableObjectId)); + + +V8HeapExplorer::V8HeapExplorer( + HeapSnapshot* snapshot, + SnapshottingProgressReportingInterface* progress, + v8::HeapProfiler::ObjectNameResolver* resolver) + : heap_(Isolate::Current()->heap()), + snapshot_(snapshot), + collection_(snapshot_->collection()), + progress_(progress), + filler_(NULL), + global_object_name_resolver_(resolver) { +} + + +V8HeapExplorer::~V8HeapExplorer() { +} + + +HeapEntry* V8HeapExplorer::AllocateEntry(HeapThing ptr) { + return AddEntry(reinterpret_cast<HeapObject*>(ptr)); +} + + +HeapEntry* V8HeapExplorer::AddEntry(HeapObject* object) { + if (object == kInternalRootObject) { + snapshot_->AddRootEntry(); + return snapshot_->root(); + } else if (object == kGcRootsObject) { + HeapEntry* entry = snapshot_->AddGcRootsEntry(); + return entry; + } else if (object >= kFirstGcSubrootObject && object < kLastGcSubrootObject) { + HeapEntry* entry = snapshot_->AddGcSubrootEntry(GetGcSubrootOrder(object)); + return entry; + } else if (object->IsJSFunction()) { + JSFunction* func = JSFunction::cast(object); + SharedFunctionInfo* shared = func->shared(); + const char* name = shared->bound() ? "native_bind" : + collection_->names()->GetName(String::cast(shared->name())); + return AddEntry(object, HeapEntry::kClosure, name); + } else if (object->IsJSRegExp()) { + JSRegExp* re = JSRegExp::cast(object); + return AddEntry(object, + HeapEntry::kRegExp, + collection_->names()->GetName(re->Pattern())); + } else if (object->IsJSObject()) { + const char* name = collection_->names()->GetName( + GetConstructorName(JSObject::cast(object))); + if (object->IsJSGlobalObject()) { + const char* tag = objects_tags_.GetTag(object); + if (tag != NULL) { + name = collection_->names()->GetFormatted("%s / %s", name, tag); + } + } + return AddEntry(object, HeapEntry::kObject, name); + } else if (object->IsString()) { + return AddEntry(object, + HeapEntry::kString, + collection_->names()->GetName(String::cast(object))); + } else if (object->IsCode()) { + return AddEntry(object, HeapEntry::kCode, ""); + } else if (object->IsSharedFunctionInfo()) { + String* name = String::cast(SharedFunctionInfo::cast(object)->name()); + return AddEntry(object, + HeapEntry::kCode, + collection_->names()->GetName(name)); + } else if (object->IsScript()) { + Object* name = Script::cast(object)->name(); + return AddEntry(object, + HeapEntry::kCode, + name->IsString() + ? collection_->names()->GetName(String::cast(name)) + : ""); + } else if (object->IsNativeContext()) { + return AddEntry(object, HeapEntry::kHidden, "system / NativeContext"); + } else if (object->IsContext()) { + return AddEntry(object, HeapEntry::kObject, "system / Context"); + } else if (object->IsFixedArray() || + object->IsFixedDoubleArray() || + object->IsByteArray() || + object->IsExternalArray()) { + return AddEntry(object, HeapEntry::kArray, ""); + } else if (object->IsHeapNumber()) { + return AddEntry(object, HeapEntry::kHeapNumber, "number"); + } + return AddEntry(object, HeapEntry::kHidden, GetSystemEntryName(object)); +} + + +HeapEntry* V8HeapExplorer::AddEntry(HeapObject* object, + HeapEntry::Type type, + const char* name) { + int object_size = object->Size(); + SnapshotObjectId object_id = + collection_->GetObjectId(object->address(), object_size); + return snapshot_->AddEntry(type, name, object_id, object_size); +} + + +class GcSubrootsEnumerator : public ObjectVisitor { + public: + GcSubrootsEnumerator( + SnapshotFillerInterface* filler, V8HeapExplorer* explorer) + : filler_(filler), + explorer_(explorer), + previous_object_count_(0), + object_count_(0) { + } + void VisitPointers(Object** start, Object** end) { + object_count_ += end - start; + } + void Synchronize(VisitorSynchronization::SyncTag tag) { + // Skip empty subroots. + if (previous_object_count_ != object_count_) { + previous_object_count_ = object_count_; + filler_->AddEntry(V8HeapExplorer::GetNthGcSubrootObject(tag), explorer_); + } + } + private: + SnapshotFillerInterface* filler_; + V8HeapExplorer* explorer_; + intptr_t previous_object_count_; + intptr_t object_count_; +}; + + +void V8HeapExplorer::AddRootEntries(SnapshotFillerInterface* filler) { + filler->AddEntry(kInternalRootObject, this); + filler->AddEntry(kGcRootsObject, this); + GcSubrootsEnumerator enumerator(filler, this); + heap_->IterateRoots(&enumerator, VISIT_ALL); +} + + +const char* V8HeapExplorer::GetSystemEntryName(HeapObject* object) { + switch (object->map()->instance_type()) { + case MAP_TYPE: + switch (Map::cast(object)->instance_type()) { +#define MAKE_STRING_MAP_CASE(instance_type, size, name, Name) \ + case instance_type: return "system / Map (" #Name ")"; + STRING_TYPE_LIST(MAKE_STRING_MAP_CASE) +#undef MAKE_STRING_MAP_CASE + default: return "system / Map"; + } + case JS_GLOBAL_PROPERTY_CELL_TYPE: return "system / JSGlobalPropertyCell"; + case FOREIGN_TYPE: return "system / Foreign"; + case ODDBALL_TYPE: return "system / Oddball"; +#define MAKE_STRUCT_CASE(NAME, Name, name) \ + case NAME##_TYPE: return "system / "#Name; + STRUCT_LIST(MAKE_STRUCT_CASE) +#undef MAKE_STRUCT_CASE + default: return "system"; + } +} + + +int V8HeapExplorer::EstimateObjectsCount(HeapIterator* iterator) { + int objects_count = 0; + for (HeapObject* obj = iterator->next(); + obj != NULL; + obj = iterator->next()) { + objects_count++; + } + return objects_count; +} + + +class IndexedReferencesExtractor : public ObjectVisitor { + public: + IndexedReferencesExtractor(V8HeapExplorer* generator, + HeapObject* parent_obj, + int parent) + : generator_(generator), + parent_obj_(parent_obj), + parent_(parent), + next_index_(1) { + } + void VisitPointers(Object** start, Object** end) { + for (Object** p = start; p < end; p++) { + if (CheckVisitedAndUnmark(p)) continue; + generator_->SetHiddenReference(parent_obj_, parent_, next_index_++, *p); + } + } + static void MarkVisitedField(HeapObject* obj, int offset) { + if (offset < 0) return; + Address field = obj->address() + offset; + ASSERT(!Memory::Object_at(field)->IsFailure()); + ASSERT(Memory::Object_at(field)->IsHeapObject()); + *field |= kFailureTag; + } + + private: + bool CheckVisitedAndUnmark(Object** field) { + if ((*field)->IsFailure()) { + intptr_t untagged = reinterpret_cast<intptr_t>(*field) & ~kFailureTagMask; + *field = reinterpret_cast<Object*>(untagged | kHeapObjectTag); + ASSERT((*field)->IsHeapObject()); + return true; + } + return false; + } + V8HeapExplorer* generator_; + HeapObject* parent_obj_; + int parent_; + int next_index_; +}; + + +void V8HeapExplorer::ExtractReferences(HeapObject* obj) { + HeapEntry* heap_entry = GetEntry(obj); + if (heap_entry == NULL) return; // No interest in this object. + int entry = heap_entry->index(); + + bool extract_indexed_refs = true; + if (obj->IsJSGlobalProxy()) { + ExtractJSGlobalProxyReferences(JSGlobalProxy::cast(obj)); + } else if (obj->IsJSObject()) { + ExtractJSObjectReferences(entry, JSObject::cast(obj)); + } else if (obj->IsString()) { + ExtractStringReferences(entry, String::cast(obj)); + } else if (obj->IsContext()) { + ExtractContextReferences(entry, Context::cast(obj)); + } else if (obj->IsMap()) { + ExtractMapReferences(entry, Map::cast(obj)); + } else if (obj->IsSharedFunctionInfo()) { + ExtractSharedFunctionInfoReferences(entry, SharedFunctionInfo::cast(obj)); + } else if (obj->IsScript()) { + ExtractScriptReferences(entry, Script::cast(obj)); + } else if (obj->IsCodeCache()) { + ExtractCodeCacheReferences(entry, CodeCache::cast(obj)); + } else if (obj->IsCode()) { + ExtractCodeReferences(entry, Code::cast(obj)); + } else if (obj->IsJSGlobalPropertyCell()) { + ExtractJSGlobalPropertyCellReferences( + entry, JSGlobalPropertyCell::cast(obj)); + extract_indexed_refs = false; + } + if (extract_indexed_refs) { + SetInternalReference(obj, entry, "map", obj->map(), HeapObject::kMapOffset); + IndexedReferencesExtractor refs_extractor(this, obj, entry); + obj->Iterate(&refs_extractor); + } +} + + +void V8HeapExplorer::ExtractJSGlobalProxyReferences(JSGlobalProxy* proxy) { + // We need to reference JS global objects from snapshot's root. + // We use JSGlobalProxy because this is what embedder (e.g. browser) + // uses for the global object. + Object* object = proxy->map()->prototype(); + bool is_debug_object = false; +#ifdef ENABLE_DEBUGGER_SUPPORT + is_debug_object = object->IsGlobalObject() && + Isolate::Current()->debug()->IsDebugGlobal(GlobalObject::cast(object)); +#endif + if (!is_debug_object) { + SetUserGlobalReference(object); + } +} + + +void V8HeapExplorer::ExtractJSObjectReferences( + int entry, JSObject* js_obj) { + HeapObject* obj = js_obj; + ExtractClosureReferences(js_obj, entry); + ExtractPropertyReferences(js_obj, entry); + ExtractElementReferences(js_obj, entry); + ExtractInternalReferences(js_obj, entry); + SetPropertyReference( + obj, entry, heap_->proto_string(), js_obj->GetPrototype()); + if (obj->IsJSFunction()) { + JSFunction* js_fun = JSFunction::cast(js_obj); + Object* proto_or_map = js_fun->prototype_or_initial_map(); + if (!proto_or_map->IsTheHole()) { + if (!proto_or_map->IsMap()) { + SetPropertyReference( + obj, entry, + heap_->prototype_string(), proto_or_map, + NULL, + JSFunction::kPrototypeOrInitialMapOffset); + } else { + SetPropertyReference( + obj, entry, + heap_->prototype_string(), js_fun->prototype()); + } + } + SharedFunctionInfo* shared_info = js_fun->shared(); + // JSFunction has either bindings or literals and never both. + bool bound = shared_info->bound(); + TagObject(js_fun->literals_or_bindings(), + bound ? "(function bindings)" : "(function literals)"); + SetInternalReference(js_fun, entry, + bound ? "bindings" : "literals", + js_fun->literals_or_bindings(), + JSFunction::kLiteralsOffset); + TagObject(shared_info, "(shared function info)"); + SetInternalReference(js_fun, entry, + "shared", shared_info, + JSFunction::kSharedFunctionInfoOffset); + TagObject(js_fun->unchecked_context(), "(context)"); + SetInternalReference(js_fun, entry, + "context", js_fun->unchecked_context(), + JSFunction::kContextOffset); + for (int i = JSFunction::kNonWeakFieldsEndOffset; + i < JSFunction::kSize; + i += kPointerSize) { + SetWeakReference(js_fun, entry, i, *HeapObject::RawField(js_fun, i), i); + } + } else if (obj->IsGlobalObject()) { + GlobalObject* global_obj = GlobalObject::cast(obj); + SetInternalReference(global_obj, entry, + "builtins", global_obj->builtins(), + GlobalObject::kBuiltinsOffset); + SetInternalReference(global_obj, entry, + "native_context", global_obj->native_context(), + GlobalObject::kNativeContextOffset); + SetInternalReference(global_obj, entry, + "global_receiver", global_obj->global_receiver(), + GlobalObject::kGlobalReceiverOffset); + } + TagObject(js_obj->properties(), "(object properties)"); + SetInternalReference(obj, entry, + "properties", js_obj->properties(), + JSObject::kPropertiesOffset); + TagObject(js_obj->elements(), "(object elements)"); + SetInternalReference(obj, entry, + "elements", js_obj->elements(), + JSObject::kElementsOffset); +} + + +void V8HeapExplorer::ExtractStringReferences(int entry, String* string) { + if (string->IsConsString()) { + ConsString* cs = ConsString::cast(string); + SetInternalReference(cs, entry, "first", cs->first(), + ConsString::kFirstOffset); + SetInternalReference(cs, entry, "second", cs->second(), + ConsString::kSecondOffset); + } else if (string->IsSlicedString()) { + SlicedString* ss = SlicedString::cast(string); + SetInternalReference(ss, entry, "parent", ss->parent(), + SlicedString::kParentOffset); + } +} + + +void V8HeapExplorer::ExtractContextReferences(int entry, Context* context) { + if (context == context->declaration_context()) { + ScopeInfo* scope_info = context->closure()->shared()->scope_info(); + // Add context allocated locals. + int context_locals = scope_info->ContextLocalCount(); + for (int i = 0; i < context_locals; ++i) { + String* local_name = scope_info->ContextLocalName(i); + int idx = Context::MIN_CONTEXT_SLOTS + i; + SetContextReference(context, entry, local_name, context->get(idx), + Context::OffsetOfElementAt(idx)); + } + if (scope_info->HasFunctionName()) { + String* name = scope_info->FunctionName(); + VariableMode mode; + int idx = scope_info->FunctionContextSlotIndex(name, &mode); + if (idx >= 0) { + SetContextReference(context, entry, name, context->get(idx), + Context::OffsetOfElementAt(idx)); + } + } + } + +#define EXTRACT_CONTEXT_FIELD(index, type, name) \ + SetInternalReference(context, entry, #name, context->get(Context::index), \ + FixedArray::OffsetOfElementAt(Context::index)); + EXTRACT_CONTEXT_FIELD(CLOSURE_INDEX, JSFunction, closure); + EXTRACT_CONTEXT_FIELD(PREVIOUS_INDEX, Context, previous); + EXTRACT_CONTEXT_FIELD(EXTENSION_INDEX, Object, extension); + EXTRACT_CONTEXT_FIELD(GLOBAL_OBJECT_INDEX, GlobalObject, global); + if (context->IsNativeContext()) { + TagObject(context->jsfunction_result_caches(), + "(context func. result caches)"); + TagObject(context->normalized_map_cache(), "(context norm. map cache)"); + TagObject(context->runtime_context(), "(runtime context)"); + TagObject(context->embedder_data(), "(context data)"); + NATIVE_CONTEXT_FIELDS(EXTRACT_CONTEXT_FIELD); +#undef EXTRACT_CONTEXT_FIELD + for (int i = Context::FIRST_WEAK_SLOT; + i < Context::NATIVE_CONTEXT_SLOTS; + ++i) { + SetWeakReference(context, entry, i, context->get(i), + FixedArray::OffsetOfElementAt(i)); + } + } +} + + +void V8HeapExplorer::ExtractMapReferences(int entry, Map* map) { + SetInternalReference(map, entry, + "prototype", map->prototype(), Map::kPrototypeOffset); + SetInternalReference(map, entry, + "constructor", map->constructor(), + Map::kConstructorOffset); + if (map->HasTransitionArray()) { + TransitionArray* transitions = map->transitions(); + + Object* back_pointer = transitions->back_pointer_storage(); + TagObject(transitions->back_pointer_storage(), "(back pointer)"); + SetInternalReference(transitions, entry, + "backpointer", back_pointer, + TransitionArray::kBackPointerStorageOffset); + IndexedReferencesExtractor transitions_refs(this, transitions, entry); + transitions->Iterate(&transitions_refs); + + TagObject(transitions, "(transition array)"); + SetInternalReference(map, entry, + "transitions", transitions, + Map::kTransitionsOrBackPointerOffset); + } else { + Object* back_pointer = map->GetBackPointer(); + TagObject(back_pointer, "(back pointer)"); + SetInternalReference(map, entry, + "backpointer", back_pointer, + Map::kTransitionsOrBackPointerOffset); + } + DescriptorArray* descriptors = map->instance_descriptors(); + TagObject(descriptors, "(map descriptors)"); + SetInternalReference(map, entry, + "descriptors", descriptors, + Map::kDescriptorsOffset); + + SetInternalReference(map, entry, + "code_cache", map->code_cache(), + Map::kCodeCacheOffset); +} + + +void V8HeapExplorer::ExtractSharedFunctionInfoReferences( + int entry, SharedFunctionInfo* shared) { + HeapObject* obj = shared; + SetInternalReference(obj, entry, + "name", shared->name(), + SharedFunctionInfo::kNameOffset); + TagObject(shared->code(), "(code)"); + SetInternalReference(obj, entry, + "code", shared->code(), + SharedFunctionInfo::kCodeOffset); + TagObject(shared->scope_info(), "(function scope info)"); + SetInternalReference(obj, entry, + "scope_info", shared->scope_info(), + SharedFunctionInfo::kScopeInfoOffset); + SetInternalReference(obj, entry, + "instance_class_name", shared->instance_class_name(), + SharedFunctionInfo::kInstanceClassNameOffset); + SetInternalReference(obj, entry, + "script", shared->script(), + SharedFunctionInfo::kScriptOffset); + TagObject(shared->construct_stub(), "(code)"); + SetInternalReference(obj, entry, + "construct_stub", shared->construct_stub(), + SharedFunctionInfo::kConstructStubOffset); + SetInternalReference(obj, entry, + "function_data", shared->function_data(), + SharedFunctionInfo::kFunctionDataOffset); + SetInternalReference(obj, entry, + "debug_info", shared->debug_info(), + SharedFunctionInfo::kDebugInfoOffset); + SetInternalReference(obj, entry, + "inferred_name", shared->inferred_name(), + SharedFunctionInfo::kInferredNameOffset); + SetInternalReference(obj, entry, + "this_property_assignments", + shared->this_property_assignments(), + SharedFunctionInfo::kThisPropertyAssignmentsOffset); + SetWeakReference(obj, entry, + 1, shared->initial_map(), + SharedFunctionInfo::kInitialMapOffset); +} + + +void V8HeapExplorer::ExtractScriptReferences(int entry, Script* script) { + HeapObject* obj = script; + SetInternalReference(obj, entry, + "source", script->source(), + Script::kSourceOffset); + SetInternalReference(obj, entry, + "name", script->name(), + Script::kNameOffset); + SetInternalReference(obj, entry, + "data", script->data(), + Script::kDataOffset); + SetInternalReference(obj, entry, + "context_data", script->context_data(), + Script::kContextOffset); + TagObject(script->line_ends(), "(script line ends)"); + SetInternalReference(obj, entry, + "line_ends", script->line_ends(), + Script::kLineEndsOffset); +} + + +void V8HeapExplorer::ExtractCodeCacheReferences( + int entry, CodeCache* code_cache) { + TagObject(code_cache->default_cache(), "(default code cache)"); + SetInternalReference(code_cache, entry, + "default_cache", code_cache->default_cache(), + CodeCache::kDefaultCacheOffset); + TagObject(code_cache->normal_type_cache(), "(code type cache)"); + SetInternalReference(code_cache, entry, + "type_cache", code_cache->normal_type_cache(), + CodeCache::kNormalTypeCacheOffset); +} + + +void V8HeapExplorer::ExtractCodeReferences(int entry, Code* code) { + TagObject(code->relocation_info(), "(code relocation info)"); + SetInternalReference(code, entry, + "relocation_info", code->relocation_info(), + Code::kRelocationInfoOffset); + SetInternalReference(code, entry, + "handler_table", code->handler_table(), + Code::kHandlerTableOffset); + TagObject(code->deoptimization_data(), "(code deopt data)"); + SetInternalReference(code, entry, + "deoptimization_data", code->deoptimization_data(), + Code::kDeoptimizationDataOffset); + if (code->kind() == Code::FUNCTION) { + SetInternalReference(code, entry, + "type_feedback_info", code->type_feedback_info(), + Code::kTypeFeedbackInfoOffset); + } + SetInternalReference(code, entry, + "gc_metadata", code->gc_metadata(), + Code::kGCMetadataOffset); +} + + +void V8HeapExplorer::ExtractJSGlobalPropertyCellReferences( + int entry, JSGlobalPropertyCell* cell) { + SetInternalReference(cell, entry, "value", cell->value()); +} + + +void V8HeapExplorer::ExtractClosureReferences(JSObject* js_obj, int entry) { + if (!js_obj->IsJSFunction()) return; + + JSFunction* func = JSFunction::cast(js_obj); + if (func->shared()->bound()) { + FixedArray* bindings = func->function_bindings(); + SetNativeBindReference(js_obj, entry, "bound_this", + bindings->get(JSFunction::kBoundThisIndex)); + SetNativeBindReference(js_obj, entry, "bound_function", + bindings->get(JSFunction::kBoundFunctionIndex)); + for (int i = JSFunction::kBoundArgumentsStartIndex; + i < bindings->length(); i++) { + const char* reference_name = collection_->names()->GetFormatted( + "bound_argument_%d", + i - JSFunction::kBoundArgumentsStartIndex); + SetNativeBindReference(js_obj, entry, reference_name, + bindings->get(i)); + } + } +} + + +void V8HeapExplorer::ExtractPropertyReferences(JSObject* js_obj, int entry) { + if (js_obj->HasFastProperties()) { + DescriptorArray* descs = js_obj->map()->instance_descriptors(); + int real_size = js_obj->map()->NumberOfOwnDescriptors(); + for (int i = 0; i < descs->number_of_descriptors(); i++) { + if (descs->GetDetails(i).descriptor_index() > real_size) continue; + switch (descs->GetType(i)) { + case FIELD: { + int index = descs->GetFieldIndex(i); + + Name* k = descs->GetKey(i); + if (index < js_obj->map()->inobject_properties()) { + Object* value = js_obj->InObjectPropertyAt(index); + if (k != heap_->hidden_string()) { + SetPropertyReference( + js_obj, entry, + k, value, + NULL, + js_obj->GetInObjectPropertyOffset(index)); + } else { + TagObject(value, "(hidden properties)"); + SetInternalReference( + js_obj, entry, + "hidden_properties", value, + js_obj->GetInObjectPropertyOffset(index)); + } + } else { + Object* value = js_obj->FastPropertyAt(index); + if (k != heap_->hidden_string()) { + SetPropertyReference(js_obj, entry, k, value); + } else { + TagObject(value, "(hidden properties)"); + SetInternalReference(js_obj, entry, "hidden_properties", value); + } + } + break; + } + case CONSTANT_FUNCTION: + SetPropertyReference( + js_obj, entry, + descs->GetKey(i), descs->GetConstantFunction(i)); + break; + case CALLBACKS: { + Object* callback_obj = descs->GetValue(i); + if (callback_obj->IsAccessorPair()) { + AccessorPair* accessors = AccessorPair::cast(callback_obj); + if (Object* getter = accessors->getter()) { + SetPropertyReference(js_obj, entry, descs->GetKey(i), + getter, "get-%s"); + } + if (Object* setter = accessors->setter()) { + SetPropertyReference(js_obj, entry, descs->GetKey(i), + setter, "set-%s"); + } + } + break; + } + case NORMAL: // only in slow mode + case HANDLER: // only in lookup results, not in descriptors + case INTERCEPTOR: // only in lookup results, not in descriptors + break; + case TRANSITION: + case NONEXISTENT: + UNREACHABLE(); + break; + } + } + } else { + NameDictionary* dictionary = js_obj->property_dictionary(); + int length = dictionary->Capacity(); + for (int i = 0; i < length; ++i) { + Object* k = dictionary->KeyAt(i); + if (dictionary->IsKey(k)) { + Object* target = dictionary->ValueAt(i); + // We assume that global objects can only have slow properties. + Object* value = target->IsJSGlobalPropertyCell() + ? JSGlobalPropertyCell::cast(target)->value() + : target; + if (k != heap_->hidden_string()) { + SetPropertyReference(js_obj, entry, String::cast(k), value); + } else { + TagObject(value, "(hidden properties)"); + SetInternalReference(js_obj, entry, "hidden_properties", value); + } + } + } + } +} + + +void V8HeapExplorer::ExtractElementReferences(JSObject* js_obj, int entry) { + if (js_obj->HasFastObjectElements()) { + FixedArray* elements = FixedArray::cast(js_obj->elements()); + int length = js_obj->IsJSArray() ? + Smi::cast(JSArray::cast(js_obj)->length())->value() : + elements->length(); + for (int i = 0; i < length; ++i) { + if (!elements->get(i)->IsTheHole()) { + SetElementReference(js_obj, entry, i, elements->get(i)); + } + } + } else if (js_obj->HasDictionaryElements()) { + SeededNumberDictionary* dictionary = js_obj->element_dictionary(); + int length = dictionary->Capacity(); + for (int i = 0; i < length; ++i) { + Object* k = dictionary->KeyAt(i); + if (dictionary->IsKey(k)) { + ASSERT(k->IsNumber()); + uint32_t index = static_cast<uint32_t>(k->Number()); + SetElementReference(js_obj, entry, index, dictionary->ValueAt(i)); + } + } + } +} + + +void V8HeapExplorer::ExtractInternalReferences(JSObject* js_obj, int entry) { + int length = js_obj->GetInternalFieldCount(); + for (int i = 0; i < length; ++i) { + Object* o = js_obj->GetInternalField(i); + SetInternalReference( + js_obj, entry, i, o, js_obj->GetInternalFieldOffset(i)); + } +} + + +String* V8HeapExplorer::GetConstructorName(JSObject* object) { + Heap* heap = object->GetHeap(); + if (object->IsJSFunction()) return heap->closure_string(); + String* constructor_name = object->constructor_name(); + if (constructor_name == heap->Object_string()) { + // Look up an immediate "constructor" property, if it is a function, + // return its name. This is for instances of binding objects, which + // have prototype constructor type "Object". + Object* constructor_prop = NULL; + LookupResult result(heap->isolate()); + object->LocalLookupRealNamedProperty(heap->constructor_string(), &result); + if (!result.IsFound()) return object->constructor_name(); + + constructor_prop = result.GetLazyValue(); + if (constructor_prop->IsJSFunction()) { + Object* maybe_name = + JSFunction::cast(constructor_prop)->shared()->name(); + if (maybe_name->IsString()) { + String* name = String::cast(maybe_name); + if (name->length() > 0) return name; + } + } + } + return object->constructor_name(); +} + + +HeapEntry* V8HeapExplorer::GetEntry(Object* obj) { + if (!obj->IsHeapObject()) return NULL; + return filler_->FindOrAddEntry(obj, this); +} + + +class RootsReferencesExtractor : public ObjectVisitor { + private: + struct IndexTag { + IndexTag(int index, VisitorSynchronization::SyncTag tag) + : index(index), tag(tag) { } + int index; + VisitorSynchronization::SyncTag tag; + }; + + public: + RootsReferencesExtractor() + : collecting_all_references_(false), + previous_reference_count_(0) { + } + + void VisitPointers(Object** start, Object** end) { + if (collecting_all_references_) { + for (Object** p = start; p < end; p++) all_references_.Add(*p); + } else { + for (Object** p = start; p < end; p++) strong_references_.Add(*p); + } + } + + void SetCollectingAllReferences() { collecting_all_references_ = true; } + + void FillReferences(V8HeapExplorer* explorer) { + ASSERT(strong_references_.length() <= all_references_.length()); + for (int i = 0; i < reference_tags_.length(); ++i) { + explorer->SetGcRootsReference(reference_tags_[i].tag); + } + int strong_index = 0, all_index = 0, tags_index = 0; + while (all_index < all_references_.length()) { + if (strong_index < strong_references_.length() && + strong_references_[strong_index] == all_references_[all_index]) { + explorer->SetGcSubrootReference(reference_tags_[tags_index].tag, + false, + all_references_[all_index++]); + ++strong_index; + } else { + explorer->SetGcSubrootReference(reference_tags_[tags_index].tag, + true, + all_references_[all_index++]); + } + if (reference_tags_[tags_index].index == all_index) ++tags_index; + } + } + + void Synchronize(VisitorSynchronization::SyncTag tag) { + if (collecting_all_references_ && + previous_reference_count_ != all_references_.length()) { + previous_reference_count_ = all_references_.length(); + reference_tags_.Add(IndexTag(previous_reference_count_, tag)); + } + } + + private: + bool collecting_all_references_; + List<Object*> strong_references_; + List<Object*> all_references_; + int previous_reference_count_; + List<IndexTag> reference_tags_; +}; + + +bool V8HeapExplorer::IterateAndExtractReferences( + SnapshotFillerInterface* filler) { + HeapIterator iterator(heap_, HeapIterator::kFilterUnreachable); + + filler_ = filler; + bool interrupted = false; + + // Heap iteration with filtering must be finished in any case. + for (HeapObject* obj = iterator.next(); + obj != NULL; + obj = iterator.next(), progress_->ProgressStep()) { + if (!interrupted) { + ExtractReferences(obj); + if (!progress_->ProgressReport(false)) interrupted = true; + } + } + if (interrupted) { + filler_ = NULL; + return false; + } + + SetRootGcRootsReference(); + RootsReferencesExtractor extractor; + heap_->IterateRoots(&extractor, VISIT_ONLY_STRONG); + extractor.SetCollectingAllReferences(); + heap_->IterateRoots(&extractor, VISIT_ALL); + extractor.FillReferences(this); + filler_ = NULL; + return progress_->ProgressReport(true); +} + + +bool V8HeapExplorer::IsEssentialObject(Object* object) { + return object->IsHeapObject() + && !object->IsOddball() + && object != heap_->empty_byte_array() + && object != heap_->empty_fixed_array() + && object != heap_->empty_descriptor_array() + && object != heap_->fixed_array_map() + && object != heap_->global_property_cell_map() + && object != heap_->shared_function_info_map() + && object != heap_->free_space_map() + && object != heap_->one_pointer_filler_map() + && object != heap_->two_pointer_filler_map(); +} + + +void V8HeapExplorer::SetContextReference(HeapObject* parent_obj, + int parent_entry, + String* reference_name, + Object* child_obj, + int field_offset) { + HeapEntry* child_entry = GetEntry(child_obj); + if (child_entry != NULL) { + filler_->SetNamedReference(HeapGraphEdge::kContextVariable, + parent_entry, + collection_->names()->GetName(reference_name), + child_entry); + IndexedReferencesExtractor::MarkVisitedField(parent_obj, field_offset); + } +} + + +void V8HeapExplorer::SetNativeBindReference(HeapObject* parent_obj, + int parent_entry, + const char* reference_name, + Object* child_obj) { + HeapEntry* child_entry = GetEntry(child_obj); + if (child_entry != NULL) { + filler_->SetNamedReference(HeapGraphEdge::kShortcut, + parent_entry, + reference_name, + child_entry); + } +} + + +void V8HeapExplorer::SetElementReference(HeapObject* parent_obj, + int parent_entry, + int index, + Object* child_obj) { + HeapEntry* child_entry = GetEntry(child_obj); + if (child_entry != NULL) { + filler_->SetIndexedReference(HeapGraphEdge::kElement, + parent_entry, + index, + child_entry); + } +} + + +void V8HeapExplorer::SetInternalReference(HeapObject* parent_obj, + int parent_entry, + const char* reference_name, + Object* child_obj, + int field_offset) { + HeapEntry* child_entry = GetEntry(child_obj); + if (child_entry == NULL) return; + if (IsEssentialObject(child_obj)) { + filler_->SetNamedReference(HeapGraphEdge::kInternal, + parent_entry, + reference_name, + child_entry); + } + IndexedReferencesExtractor::MarkVisitedField(parent_obj, field_offset); +} + + +void V8HeapExplorer::SetInternalReference(HeapObject* parent_obj, + int parent_entry, + int index, + Object* child_obj, + int field_offset) { + HeapEntry* child_entry = GetEntry(child_obj); + if (child_entry == NULL) return; + if (IsEssentialObject(child_obj)) { + filler_->SetNamedReference(HeapGraphEdge::kInternal, + parent_entry, + collection_->names()->GetName(index), + child_entry); + } + IndexedReferencesExtractor::MarkVisitedField(parent_obj, field_offset); +} + + +void V8HeapExplorer::SetHiddenReference(HeapObject* parent_obj, + int parent_entry, + int index, + Object* child_obj) { + HeapEntry* child_entry = GetEntry(child_obj); + if (child_entry != NULL && IsEssentialObject(child_obj)) { + filler_->SetIndexedReference(HeapGraphEdge::kHidden, + parent_entry, + index, + child_entry); + } +} + + +void V8HeapExplorer::SetWeakReference(HeapObject* parent_obj, + int parent_entry, + int index, + Object* child_obj, + int field_offset) { + HeapEntry* child_entry = GetEntry(child_obj); + if (child_entry != NULL) { + filler_->SetIndexedReference(HeapGraphEdge::kWeak, + parent_entry, + index, + child_entry); + IndexedReferencesExtractor::MarkVisitedField(parent_obj, field_offset); + } +} + + +void V8HeapExplorer::SetPropertyReference(HeapObject* parent_obj, + int parent_entry, + Name* reference_name, + Object* child_obj, + const char* name_format_string, + int field_offset) { + HeapEntry* child_entry = GetEntry(child_obj); + if (child_entry != NULL) { + HeapGraphEdge::Type type = + reference_name->IsSymbol() || String::cast(reference_name)->length() > 0 + ? HeapGraphEdge::kProperty : HeapGraphEdge::kInternal; + const char* name = name_format_string != NULL && reference_name->IsString() + ? collection_->names()->GetFormatted( + name_format_string, + *String::cast(reference_name)->ToCString( + DISALLOW_NULLS, ROBUST_STRING_TRAVERSAL)) : + collection_->names()->GetName(reference_name); + + filler_->SetNamedReference(type, + parent_entry, + name, + child_entry); + IndexedReferencesExtractor::MarkVisitedField(parent_obj, field_offset); + } +} + + +void V8HeapExplorer::SetRootGcRootsReference() { + filler_->SetIndexedAutoIndexReference( + HeapGraphEdge::kElement, + snapshot_->root()->index(), + snapshot_->gc_roots()); +} + + +void V8HeapExplorer::SetUserGlobalReference(Object* child_obj) { + HeapEntry* child_entry = GetEntry(child_obj); + ASSERT(child_entry != NULL); + filler_->SetNamedAutoIndexReference( + HeapGraphEdge::kShortcut, + snapshot_->root()->index(), + child_entry); +} + + +void V8HeapExplorer::SetGcRootsReference(VisitorSynchronization::SyncTag tag) { + filler_->SetIndexedAutoIndexReference( + HeapGraphEdge::kElement, + snapshot_->gc_roots()->index(), + snapshot_->gc_subroot(tag)); +} + + +void V8HeapExplorer::SetGcSubrootReference( + VisitorSynchronization::SyncTag tag, bool is_weak, Object* child_obj) { + HeapEntry* child_entry = GetEntry(child_obj); + if (child_entry != NULL) { + const char* name = GetStrongGcSubrootName(child_obj); + if (name != NULL) { + filler_->SetNamedReference( + HeapGraphEdge::kInternal, + snapshot_->gc_subroot(tag)->index(), + name, + child_entry); + } else { + filler_->SetIndexedAutoIndexReference( + is_weak ? HeapGraphEdge::kWeak : HeapGraphEdge::kElement, + snapshot_->gc_subroot(tag)->index(), + child_entry); + } + } +} + + +const char* V8HeapExplorer::GetStrongGcSubrootName(Object* object) { + if (strong_gc_subroot_names_.is_empty()) { +#define NAME_ENTRY(name) strong_gc_subroot_names_.SetTag(heap_->name(), #name); +#define ROOT_NAME(type, name, camel_name) NAME_ENTRY(name) + STRONG_ROOT_LIST(ROOT_NAME) +#undef ROOT_NAME +#define STRUCT_MAP_NAME(NAME, Name, name) NAME_ENTRY(name##_map) + STRUCT_LIST(STRUCT_MAP_NAME) +#undef STRUCT_MAP_NAME +#define STRING_NAME(name, str) NAME_ENTRY(name) + INTERNALIZED_STRING_LIST(STRING_NAME) +#undef STRING_NAME +#undef NAME_ENTRY + CHECK(!strong_gc_subroot_names_.is_empty()); + } + return strong_gc_subroot_names_.GetTag(object); +} + + +void V8HeapExplorer::TagObject(Object* obj, const char* tag) { + if (IsEssentialObject(obj)) { + HeapEntry* entry = GetEntry(obj); + if (entry->name()[0] == '\0') { + entry->set_name(tag); + } + } +} + + +class GlobalObjectsEnumerator : public ObjectVisitor { + public: + virtual void VisitPointers(Object** start, Object** end) { + for (Object** p = start; p < end; p++) { + if ((*p)->IsNativeContext()) { + Context* context = Context::cast(*p); + JSObject* proxy = context->global_proxy(); + if (proxy->IsJSGlobalProxy()) { + Object* global = proxy->map()->prototype(); + if (global->IsJSGlobalObject()) { + objects_.Add(Handle<JSGlobalObject>(JSGlobalObject::cast(global))); + } + } + } + } + } + int count() { return objects_.length(); } + Handle<JSGlobalObject>& at(int i) { return objects_[i]; } + + private: + List<Handle<JSGlobalObject> > objects_; +}; + + +// Modifies heap. Must not be run during heap traversal. +void V8HeapExplorer::TagGlobalObjects() { + Isolate* isolate = Isolate::Current(); + HandleScope scope(isolate); + GlobalObjectsEnumerator enumerator; + isolate->global_handles()->IterateAllRoots(&enumerator); + const char** urls = NewArray<const char*>(enumerator.count()); + for (int i = 0, l = enumerator.count(); i < l; ++i) { + if (global_object_name_resolver_) { + HandleScope scope(isolate); + Handle<JSGlobalObject> global_obj = enumerator.at(i); + urls[i] = global_object_name_resolver_->GetName( + Utils::ToLocal(Handle<JSObject>::cast(global_obj))); + } else { + urls[i] = NULL; + } + } + + AssertNoAllocation no_allocation; + for (int i = 0, l = enumerator.count(); i < l; ++i) { + objects_tags_.SetTag(*enumerator.at(i), urls[i]); + } + + DeleteArray(urls); +} + + +class GlobalHandlesExtractor : public ObjectVisitor { + public: + explicit GlobalHandlesExtractor(NativeObjectsExplorer* explorer) + : explorer_(explorer) {} + virtual ~GlobalHandlesExtractor() {} + virtual void VisitPointers(Object** start, Object** end) { + UNREACHABLE(); + } + virtual void VisitEmbedderReference(Object** p, uint16_t class_id) { + explorer_->VisitSubtreeWrapper(p, class_id); + } + private: + NativeObjectsExplorer* explorer_; +}; + + +class BasicHeapEntriesAllocator : public HeapEntriesAllocator { + public: + BasicHeapEntriesAllocator( + HeapSnapshot* snapshot, + HeapEntry::Type entries_type) + : snapshot_(snapshot), + collection_(snapshot_->collection()), + entries_type_(entries_type) { + } + virtual HeapEntry* AllocateEntry(HeapThing ptr); + private: + HeapSnapshot* snapshot_; + HeapSnapshotsCollection* collection_; + HeapEntry::Type entries_type_; +}; + + +HeapEntry* BasicHeapEntriesAllocator::AllocateEntry(HeapThing ptr) { + v8::RetainedObjectInfo* info = reinterpret_cast<v8::RetainedObjectInfo*>(ptr); + intptr_t elements = info->GetElementCount(); + intptr_t size = info->GetSizeInBytes(); + const char* name = elements != -1 + ? collection_->names()->GetFormatted( + "%s / %" V8_PTR_PREFIX "d entries", info->GetLabel(), elements) + : collection_->names()->GetCopy(info->GetLabel()); + return snapshot_->AddEntry( + entries_type_, + name, + HeapObjectsMap::GenerateId(info), + size != -1 ? static_cast<int>(size) : 0); +} + + +NativeObjectsExplorer::NativeObjectsExplorer( + HeapSnapshot* snapshot, SnapshottingProgressReportingInterface* progress) + : snapshot_(snapshot), + collection_(snapshot_->collection()), + progress_(progress), + embedder_queried_(false), + objects_by_info_(RetainedInfosMatch), + native_groups_(StringsMatch), + filler_(NULL) { + synthetic_entries_allocator_ = + new BasicHeapEntriesAllocator(snapshot, HeapEntry::kSynthetic); + native_entries_allocator_ = + new BasicHeapEntriesAllocator(snapshot, HeapEntry::kNative); +} + + +NativeObjectsExplorer::~NativeObjectsExplorer() { + for (HashMap::Entry* p = objects_by_info_.Start(); + p != NULL; + p = objects_by_info_.Next(p)) { + v8::RetainedObjectInfo* info = + reinterpret_cast<v8::RetainedObjectInfo*>(p->key); + info->Dispose(); + List<HeapObject*>* objects = + reinterpret_cast<List<HeapObject*>* >(p->value); + delete objects; + } + for (HashMap::Entry* p = native_groups_.Start(); + p != NULL; + p = native_groups_.Next(p)) { + v8::RetainedObjectInfo* info = + reinterpret_cast<v8::RetainedObjectInfo*>(p->value); + info->Dispose(); + } + delete synthetic_entries_allocator_; + delete native_entries_allocator_; +} + + +int NativeObjectsExplorer::EstimateObjectsCount() { + FillRetainedObjects(); + return objects_by_info_.occupancy(); +} + + +void NativeObjectsExplorer::FillRetainedObjects() { + if (embedder_queried_) return; + Isolate* isolate = Isolate::Current(); + const GCType major_gc_type = kGCTypeMarkSweepCompact; + // Record objects that are joined into ObjectGroups. + isolate->heap()->CallGCPrologueCallbacks(major_gc_type); + List<ObjectGroup*>* groups = isolate->global_handles()->object_groups(); + for (int i = 0; i < groups->length(); ++i) { + ObjectGroup* group = groups->at(i); + if (group->info_ == NULL) continue; + List<HeapObject*>* list = GetListMaybeDisposeInfo(group->info_); + for (size_t j = 0; j < group->length_; ++j) { + HeapObject* obj = HeapObject::cast(*group->objects_[j]); + list->Add(obj); + in_groups_.Insert(obj); + } + group->info_ = NULL; // Acquire info object ownership. + } + isolate->global_handles()->RemoveObjectGroups(); + isolate->heap()->CallGCEpilogueCallbacks(major_gc_type); + // Record objects that are not in ObjectGroups, but have class ID. + GlobalHandlesExtractor extractor(this); + isolate->global_handles()->IterateAllRootsWithClassIds(&extractor); + embedder_queried_ = true; +} + +void NativeObjectsExplorer::FillImplicitReferences() { + Isolate* isolate = Isolate::Current(); + List<ImplicitRefGroup*>* groups = + isolate->global_handles()->implicit_ref_groups(); + for (int i = 0; i < groups->length(); ++i) { + ImplicitRefGroup* group = groups->at(i); + HeapObject* parent = *group->parent_; + int parent_entry = + filler_->FindOrAddEntry(parent, native_entries_allocator_)->index(); + ASSERT(parent_entry != HeapEntry::kNoEntry); + Object*** children = group->children_; + for (size_t j = 0; j < group->length_; ++j) { + Object* child = *children[j]; + HeapEntry* child_entry = + filler_->FindOrAddEntry(child, native_entries_allocator_); + filler_->SetNamedReference( + HeapGraphEdge::kInternal, + parent_entry, + "native", + child_entry); + } + } + isolate->global_handles()->RemoveImplicitRefGroups(); +} + +List<HeapObject*>* NativeObjectsExplorer::GetListMaybeDisposeInfo( + v8::RetainedObjectInfo* info) { + HashMap::Entry* entry = + objects_by_info_.Lookup(info, InfoHash(info), true); + if (entry->value != NULL) { + info->Dispose(); + } else { + entry->value = new List<HeapObject*>(4); + } + return reinterpret_cast<List<HeapObject*>* >(entry->value); +} + + +bool NativeObjectsExplorer::IterateAndExtractReferences( + SnapshotFillerInterface* filler) { + filler_ = filler; + FillRetainedObjects(); + FillImplicitReferences(); + if (EstimateObjectsCount() > 0) { + for (HashMap::Entry* p = objects_by_info_.Start(); + p != NULL; + p = objects_by_info_.Next(p)) { + v8::RetainedObjectInfo* info = + reinterpret_cast<v8::RetainedObjectInfo*>(p->key); + SetNativeRootReference(info); + List<HeapObject*>* objects = + reinterpret_cast<List<HeapObject*>* >(p->value); + for (int i = 0; i < objects->length(); ++i) { + SetWrapperNativeReferences(objects->at(i), info); + } + } + SetRootNativeRootsReference(); + } + filler_ = NULL; + return true; +} + + +class NativeGroupRetainedObjectInfo : public v8::RetainedObjectInfo { + public: + explicit NativeGroupRetainedObjectInfo(const char* label) + : disposed_(false), + hash_(reinterpret_cast<intptr_t>(label)), + label_(label) { + } + + virtual ~NativeGroupRetainedObjectInfo() {} + virtual void Dispose() { + CHECK(!disposed_); + disposed_ = true; + delete this; + } + virtual bool IsEquivalent(RetainedObjectInfo* other) { + return hash_ == other->GetHash() && !strcmp(label_, other->GetLabel()); + } + virtual intptr_t GetHash() { return hash_; } + virtual const char* GetLabel() { return label_; } + + private: + bool disposed_; + intptr_t hash_; + const char* label_; +}; + + +NativeGroupRetainedObjectInfo* NativeObjectsExplorer::FindOrAddGroupInfo( + const char* label) { + const char* label_copy = collection_->names()->GetCopy(label); + uint32_t hash = StringHasher::HashSequentialString( + label_copy, + static_cast<int>(strlen(label_copy)), + HEAP->HashSeed()); + HashMap::Entry* entry = native_groups_.Lookup(const_cast<char*>(label_copy), + hash, true); + if (entry->value == NULL) { + entry->value = new NativeGroupRetainedObjectInfo(label); + } + return static_cast<NativeGroupRetainedObjectInfo*>(entry->value); +} + + +void NativeObjectsExplorer::SetNativeRootReference( + v8::RetainedObjectInfo* info) { + HeapEntry* child_entry = + filler_->FindOrAddEntry(info, native_entries_allocator_); + ASSERT(child_entry != NULL); + NativeGroupRetainedObjectInfo* group_info = + FindOrAddGroupInfo(info->GetGroupLabel()); + HeapEntry* group_entry = + filler_->FindOrAddEntry(group_info, synthetic_entries_allocator_); + filler_->SetNamedAutoIndexReference( + HeapGraphEdge::kInternal, + group_entry->index(), + child_entry); +} + + +void NativeObjectsExplorer::SetWrapperNativeReferences( + HeapObject* wrapper, v8::RetainedObjectInfo* info) { + HeapEntry* wrapper_entry = filler_->FindEntry(wrapper); + ASSERT(wrapper_entry != NULL); + HeapEntry* info_entry = + filler_->FindOrAddEntry(info, native_entries_allocator_); + ASSERT(info_entry != NULL); + filler_->SetNamedReference(HeapGraphEdge::kInternal, + wrapper_entry->index(), + "native", + info_entry); + filler_->SetIndexedAutoIndexReference(HeapGraphEdge::kElement, + info_entry->index(), + wrapper_entry); +} + + +void NativeObjectsExplorer::SetRootNativeRootsReference() { + for (HashMap::Entry* entry = native_groups_.Start(); + entry; + entry = native_groups_.Next(entry)) { + NativeGroupRetainedObjectInfo* group_info = + static_cast<NativeGroupRetainedObjectInfo*>(entry->value); + HeapEntry* group_entry = + filler_->FindOrAddEntry(group_info, native_entries_allocator_); + ASSERT(group_entry != NULL); + filler_->SetIndexedAutoIndexReference( + HeapGraphEdge::kElement, + snapshot_->root()->index(), + group_entry); + } +} + + +void NativeObjectsExplorer::VisitSubtreeWrapper(Object** p, uint16_t class_id) { + if (in_groups_.Contains(*p)) return; + Isolate* isolate = Isolate::Current(); + v8::RetainedObjectInfo* info = + isolate->heap_profiler()->ExecuteWrapperClassCallback(class_id, p); + if (info == NULL) return; + GetListMaybeDisposeInfo(info)->Add(HeapObject::cast(*p)); +} + + +class SnapshotFiller : public SnapshotFillerInterface { + public: + explicit SnapshotFiller(HeapSnapshot* snapshot, HeapEntriesMap* entries) + : snapshot_(snapshot), + collection_(snapshot->collection()), + entries_(entries) { } + HeapEntry* AddEntry(HeapThing ptr, HeapEntriesAllocator* allocator) { + HeapEntry* entry = allocator->AllocateEntry(ptr); + entries_->Pair(ptr, entry->index()); + return entry; + } + HeapEntry* FindEntry(HeapThing ptr) { + int index = entries_->Map(ptr); + return index != HeapEntry::kNoEntry ? &snapshot_->entries()[index] : NULL; + } + HeapEntry* FindOrAddEntry(HeapThing ptr, HeapEntriesAllocator* allocator) { + HeapEntry* entry = FindEntry(ptr); + return entry != NULL ? entry : AddEntry(ptr, allocator); + } + void SetIndexedReference(HeapGraphEdge::Type type, + int parent, + int index, + HeapEntry* child_entry) { + HeapEntry* parent_entry = &snapshot_->entries()[parent]; + parent_entry->SetIndexedReference(type, index, child_entry); + } + void SetIndexedAutoIndexReference(HeapGraphEdge::Type type, + int parent, + HeapEntry* child_entry) { + HeapEntry* parent_entry = &snapshot_->entries()[parent]; + int index = parent_entry->children_count() + 1; + parent_entry->SetIndexedReference(type, index, child_entry); + } + void SetNamedReference(HeapGraphEdge::Type type, + int parent, + const char* reference_name, + HeapEntry* child_entry) { + HeapEntry* parent_entry = &snapshot_->entries()[parent]; + parent_entry->SetNamedReference(type, reference_name, child_entry); + } + void SetNamedAutoIndexReference(HeapGraphEdge::Type type, + int parent, + HeapEntry* child_entry) { + HeapEntry* parent_entry = &snapshot_->entries()[parent]; + int index = parent_entry->children_count() + 1; + parent_entry->SetNamedReference( + type, + collection_->names()->GetName(index), + child_entry); + } + + private: + HeapSnapshot* snapshot_; + HeapSnapshotsCollection* collection_; + HeapEntriesMap* entries_; +}; + + +HeapSnapshotGenerator::HeapSnapshotGenerator( + HeapSnapshot* snapshot, + v8::ActivityControl* control, + v8::HeapProfiler::ObjectNameResolver* resolver, + Heap* heap) + : snapshot_(snapshot), + control_(control), + v8_heap_explorer_(snapshot_, this, resolver), + dom_explorer_(snapshot_, this), + heap_(heap) { +} + + +bool HeapSnapshotGenerator::GenerateSnapshot() { + v8_heap_explorer_.TagGlobalObjects(); + + // TODO(1562) Profiler assumes that any object that is in the heap after + // full GC is reachable from the root when computing dominators. + // This is not true for weakly reachable objects. + // As a temporary solution we call GC twice. + Isolate::Current()->heap()->CollectAllGarbage( + Heap::kMakeHeapIterableMask, + "HeapSnapshotGenerator::GenerateSnapshot"); + Isolate::Current()->heap()->CollectAllGarbage( + Heap::kMakeHeapIterableMask, + "HeapSnapshotGenerator::GenerateSnapshot"); + +#ifdef VERIFY_HEAP + Heap* debug_heap = Isolate::Current()->heap(); + CHECK(!debug_heap->old_data_space()->was_swept_conservatively()); + CHECK(!debug_heap->old_pointer_space()->was_swept_conservatively()); + CHECK(!debug_heap->code_space()->was_swept_conservatively()); + CHECK(!debug_heap->cell_space()->was_swept_conservatively()); + CHECK(!debug_heap->map_space()->was_swept_conservatively()); +#endif + + // The following code uses heap iterators, so we want the heap to be + // stable. It should follow TagGlobalObjects as that can allocate. + AssertNoAllocation no_alloc; + +#ifdef VERIFY_HEAP + debug_heap->Verify(); +#endif + + SetProgressTotal(1); // 1 pass. + +#ifdef VERIFY_HEAP + debug_heap->Verify(); +#endif + + if (!FillReferences()) return false; + + snapshot_->FillChildren(); + snapshot_->RememberLastJSObjectId(); + + progress_counter_ = progress_total_; + if (!ProgressReport(true)) return false; + return true; +} + + +void HeapSnapshotGenerator::ProgressStep() { + ++progress_counter_; +} + + +bool HeapSnapshotGenerator::ProgressReport(bool force) { + const int kProgressReportGranularity = 10000; + if (control_ != NULL + && (force || progress_counter_ % kProgressReportGranularity == 0)) { + return + control_->ReportProgressValue(progress_counter_, progress_total_) == + v8::ActivityControl::kContinue; + } + return true; +} + + +void HeapSnapshotGenerator::SetProgressTotal(int iterations_count) { + if (control_ == NULL) return; + HeapIterator iterator(heap_, HeapIterator::kFilterUnreachable); + progress_total_ = iterations_count * ( + v8_heap_explorer_.EstimateObjectsCount(&iterator) + + dom_explorer_.EstimateObjectsCount()); + progress_counter_ = 0; +} + + +bool HeapSnapshotGenerator::FillReferences() { + SnapshotFiller filler(snapshot_, &entries_); + v8_heap_explorer_.AddRootEntries(&filler); + return v8_heap_explorer_.IterateAndExtractReferences(&filler) + && dom_explorer_.IterateAndExtractReferences(&filler); +} + + +template<int bytes> struct MaxDecimalDigitsIn; +template<> struct MaxDecimalDigitsIn<4> { + static const int kSigned = 11; + static const int kUnsigned = 10; +}; +template<> struct MaxDecimalDigitsIn<8> { + static const int kSigned = 20; + static const int kUnsigned = 20; +}; + + +class OutputStreamWriter { + public: + explicit OutputStreamWriter(v8::OutputStream* stream) + : stream_(stream), + chunk_size_(stream->GetChunkSize()), + chunk_(chunk_size_), + chunk_pos_(0), + aborted_(false) { + ASSERT(chunk_size_ > 0); + } + bool aborted() { return aborted_; } + void AddCharacter(char c) { + ASSERT(c != '\0'); + ASSERT(chunk_pos_ < chunk_size_); + chunk_[chunk_pos_++] = c; + MaybeWriteChunk(); + } + void AddString(const char* s) { + AddSubstring(s, StrLength(s)); + } + void AddSubstring(const char* s, int n) { + if (n <= 0) return; + ASSERT(static_cast<size_t>(n) <= strlen(s)); + const char* s_end = s + n; + while (s < s_end) { + int s_chunk_size = Min( + chunk_size_ - chunk_pos_, static_cast<int>(s_end - s)); + ASSERT(s_chunk_size > 0); + memcpy(chunk_.start() + chunk_pos_, s, s_chunk_size); + s += s_chunk_size; + chunk_pos_ += s_chunk_size; + MaybeWriteChunk(); + } + } + void AddNumber(unsigned n) { AddNumberImpl<unsigned>(n, "%u"); } + void Finalize() { + if (aborted_) return; + ASSERT(chunk_pos_ < chunk_size_); + if (chunk_pos_ != 0) { + WriteChunk(); + } + stream_->EndOfStream(); + } + + private: + template<typename T> + void AddNumberImpl(T n, const char* format) { + // Buffer for the longest value plus trailing \0 + static const int kMaxNumberSize = + MaxDecimalDigitsIn<sizeof(T)>::kUnsigned + 1; + if (chunk_size_ - chunk_pos_ >= kMaxNumberSize) { + int result = OS::SNPrintF( + chunk_.SubVector(chunk_pos_, chunk_size_), format, n); + ASSERT(result != -1); + chunk_pos_ += result; + MaybeWriteChunk(); + } else { + EmbeddedVector<char, kMaxNumberSize> buffer; + int result = OS::SNPrintF(buffer, format, n); + USE(result); + ASSERT(result != -1); + AddString(buffer.start()); + } + } + void MaybeWriteChunk() { + ASSERT(chunk_pos_ <= chunk_size_); + if (chunk_pos_ == chunk_size_) { + WriteChunk(); + } + } + void WriteChunk() { + if (aborted_) return; + if (stream_->WriteAsciiChunk(chunk_.start(), chunk_pos_) == + v8::OutputStream::kAbort) aborted_ = true; + chunk_pos_ = 0; + } + + v8::OutputStream* stream_; + int chunk_size_; + ScopedVector<char> chunk_; + int chunk_pos_; + bool aborted_; +}; + + +// type, name|index, to_node. +const int HeapSnapshotJSONSerializer::kEdgeFieldsCount = 3; +// type, name, id, self_size, children_index. +const int HeapSnapshotJSONSerializer::kNodeFieldsCount = 5; + +void HeapSnapshotJSONSerializer::Serialize(v8::OutputStream* stream) { + ASSERT(writer_ == NULL); + writer_ = new OutputStreamWriter(stream); + + HeapSnapshot* original_snapshot = NULL; + if (snapshot_->RawSnapshotSize() >= + SnapshotSizeConstants<kPointerSize>::kMaxSerializableSnapshotRawSize) { + // The snapshot is too big. Serialize a fake snapshot. + original_snapshot = snapshot_; + snapshot_ = CreateFakeSnapshot(); + } + + SerializeImpl(); + + delete writer_; + writer_ = NULL; + + if (original_snapshot != NULL) { + delete snapshot_; + snapshot_ = original_snapshot; + } +} + + +HeapSnapshot* HeapSnapshotJSONSerializer::CreateFakeSnapshot() { + HeapSnapshot* result = new HeapSnapshot(snapshot_->collection(), + HeapSnapshot::kFull, + snapshot_->title(), + snapshot_->uid()); + result->AddRootEntry(); + const char* text = snapshot_->collection()->names()->GetFormatted( + "The snapshot is too big. " + "Maximum snapshot size is %" V8_PTR_PREFIX "u MB. " + "Actual snapshot size is %" V8_PTR_PREFIX "u MB.", + SnapshotSizeConstants<kPointerSize>::kMaxSerializableSnapshotRawSize / MB, + (snapshot_->RawSnapshotSize() + MB - 1) / MB); + HeapEntry* message = result->AddEntry(HeapEntry::kString, text, 0, 4); + result->root()->SetIndexedReference(HeapGraphEdge::kElement, 1, message); + result->FillChildren(); + return result; +} + + +void HeapSnapshotJSONSerializer::SerializeImpl() { + ASSERT(0 == snapshot_->root()->index()); + writer_->AddCharacter('{'); + writer_->AddString("\"snapshot\":{"); + SerializeSnapshot(); + if (writer_->aborted()) return; + writer_->AddString("},\n"); + writer_->AddString("\"nodes\":["); + SerializeNodes(); + if (writer_->aborted()) return; + writer_->AddString("],\n"); + writer_->AddString("\"edges\":["); + SerializeEdges(); + if (writer_->aborted()) return; + writer_->AddString("],\n"); + writer_->AddString("\"strings\":["); + SerializeStrings(); + if (writer_->aborted()) return; + writer_->AddCharacter(']'); + writer_->AddCharacter('}'); + writer_->Finalize(); +} + + +int HeapSnapshotJSONSerializer::GetStringId(const char* s) { + HashMap::Entry* cache_entry = strings_.Lookup( + const_cast<char*>(s), ObjectHash(s), true); + if (cache_entry->value == NULL) { + cache_entry->value = reinterpret_cast<void*>(next_string_id_++); + } + return static_cast<int>(reinterpret_cast<intptr_t>(cache_entry->value)); +} + + +static int utoa(unsigned value, const Vector<char>& buffer, int buffer_pos) { + int number_of_digits = 0; + unsigned t = value; + do { + ++number_of_digits; + } while (t /= 10); + + buffer_pos += number_of_digits; + int result = buffer_pos; + do { + int last_digit = value % 10; + buffer[--buffer_pos] = '0' + last_digit; + value /= 10; + } while (value); + return result; +} + + +void HeapSnapshotJSONSerializer::SerializeEdge(HeapGraphEdge* edge, + bool first_edge) { + // The buffer needs space for 3 unsigned ints, 3 commas, \n and \0 + static const int kBufferSize = + MaxDecimalDigitsIn<sizeof(unsigned)>::kUnsigned * 3 + 3 + 2; // NOLINT + EmbeddedVector<char, kBufferSize> buffer; + int edge_name_or_index = edge->type() == HeapGraphEdge::kElement + || edge->type() == HeapGraphEdge::kHidden + || edge->type() == HeapGraphEdge::kWeak + ? edge->index() : GetStringId(edge->name()); + int buffer_pos = 0; + if (!first_edge) { + buffer[buffer_pos++] = ','; + } + buffer_pos = utoa(edge->type(), buffer, buffer_pos); + buffer[buffer_pos++] = ','; + buffer_pos = utoa(edge_name_or_index, buffer, buffer_pos); + buffer[buffer_pos++] = ','; + buffer_pos = utoa(entry_index(edge->to()), buffer, buffer_pos); + buffer[buffer_pos++] = '\n'; + buffer[buffer_pos++] = '\0'; + writer_->AddString(buffer.start()); +} + + +void HeapSnapshotJSONSerializer::SerializeEdges() { + List<HeapGraphEdge*>& edges = snapshot_->children(); + for (int i = 0; i < edges.length(); ++i) { + ASSERT(i == 0 || + edges[i - 1]->from()->index() <= edges[i]->from()->index()); + SerializeEdge(edges[i], i == 0); + if (writer_->aborted()) return; + } +} + + +void HeapSnapshotJSONSerializer::SerializeNode(HeapEntry* entry) { + // The buffer needs space for 5 unsigned ints, 5 commas, \n and \0 + static const int kBufferSize = + 5 * MaxDecimalDigitsIn<sizeof(unsigned)>::kUnsigned // NOLINT + + 5 + 1 + 1; + EmbeddedVector<char, kBufferSize> buffer; + int buffer_pos = 0; + if (entry_index(entry) != 0) { + buffer[buffer_pos++] = ','; + } + buffer_pos = utoa(entry->type(), buffer, buffer_pos); + buffer[buffer_pos++] = ','; + buffer_pos = utoa(GetStringId(entry->name()), buffer, buffer_pos); + buffer[buffer_pos++] = ','; + buffer_pos = utoa(entry->id(), buffer, buffer_pos); + buffer[buffer_pos++] = ','; + buffer_pos = utoa(entry->self_size(), buffer, buffer_pos); + buffer[buffer_pos++] = ','; + buffer_pos = utoa(entry->children_count(), buffer, buffer_pos); + buffer[buffer_pos++] = '\n'; + buffer[buffer_pos++] = '\0'; + writer_->AddString(buffer.start()); +} + + +void HeapSnapshotJSONSerializer::SerializeNodes() { + List<HeapEntry>& entries = snapshot_->entries(); + for (int i = 0; i < entries.length(); ++i) { + SerializeNode(&entries[i]); + if (writer_->aborted()) return; + } +} + + +void HeapSnapshotJSONSerializer::SerializeSnapshot() { + writer_->AddString("\"title\":\""); + writer_->AddString(snapshot_->title()); + writer_->AddString("\""); + writer_->AddString(",\"uid\":"); + writer_->AddNumber(snapshot_->uid()); + writer_->AddString(",\"meta\":"); + // The object describing node serialization layout. + // We use a set of macros to improve readability. +#define JSON_A(s) "[" s "]" +#define JSON_O(s) "{" s "}" +#define JSON_S(s) "\"" s "\"" + writer_->AddString(JSON_O( + JSON_S("node_fields") ":" JSON_A( + JSON_S("type") "," + JSON_S("name") "," + JSON_S("id") "," + JSON_S("self_size") "," + JSON_S("edge_count")) "," + JSON_S("node_types") ":" JSON_A( + JSON_A( + JSON_S("hidden") "," + JSON_S("array") "," + JSON_S("string") "," + JSON_S("object") "," + JSON_S("code") "," + JSON_S("closure") "," + JSON_S("regexp") "," + JSON_S("number") "," + JSON_S("native") "," + JSON_S("synthetic")) "," + JSON_S("string") "," + JSON_S("number") "," + JSON_S("number") "," + JSON_S("number") "," + JSON_S("number") "," + JSON_S("number")) "," + JSON_S("edge_fields") ":" JSON_A( + JSON_S("type") "," + JSON_S("name_or_index") "," + JSON_S("to_node")) "," + JSON_S("edge_types") ":" JSON_A( + JSON_A( + JSON_S("context") "," + JSON_S("element") "," + JSON_S("property") "," + JSON_S("internal") "," + JSON_S("hidden") "," + JSON_S("shortcut") "," + JSON_S("weak")) "," + JSON_S("string_or_number") "," + JSON_S("node")))); +#undef JSON_S +#undef JSON_O +#undef JSON_A + writer_->AddString(",\"node_count\":"); + writer_->AddNumber(snapshot_->entries().length()); + writer_->AddString(",\"edge_count\":"); + writer_->AddNumber(snapshot_->edges().length()); +} + + +static void WriteUChar(OutputStreamWriter* w, unibrow::uchar u) { + static const char hex_chars[] = "0123456789ABCDEF"; + w->AddString("\\u"); + w->AddCharacter(hex_chars[(u >> 12) & 0xf]); + w->AddCharacter(hex_chars[(u >> 8) & 0xf]); + w->AddCharacter(hex_chars[(u >> 4) & 0xf]); + w->AddCharacter(hex_chars[u & 0xf]); +} + +void HeapSnapshotJSONSerializer::SerializeString(const unsigned char* s) { + writer_->AddCharacter('\n'); + writer_->AddCharacter('\"'); + for ( ; *s != '\0'; ++s) { + switch (*s) { + case '\b': + writer_->AddString("\\b"); + continue; + case '\f': + writer_->AddString("\\f"); + continue; + case '\n': + writer_->AddString("\\n"); + continue; + case '\r': + writer_->AddString("\\r"); + continue; + case '\t': + writer_->AddString("\\t"); + continue; + case '\"': + case '\\': + writer_->AddCharacter('\\'); + writer_->AddCharacter(*s); + continue; + default: + if (*s > 31 && *s < 128) { + writer_->AddCharacter(*s); + } else if (*s <= 31) { + // Special character with no dedicated literal. + WriteUChar(writer_, *s); + } else { + // Convert UTF-8 into \u UTF-16 literal. + unsigned length = 1, cursor = 0; + for ( ; length <= 4 && *(s + length) != '\0'; ++length) { } + unibrow::uchar c = unibrow::Utf8::CalculateValue(s, length, &cursor); + if (c != unibrow::Utf8::kBadChar) { + WriteUChar(writer_, c); + ASSERT(cursor != 0); + s += cursor - 1; + } else { + writer_->AddCharacter('?'); + } + } + } + } + writer_->AddCharacter('\"'); +} + + +void HeapSnapshotJSONSerializer::SerializeStrings() { + List<HashMap::Entry*> sorted_strings; + SortHashMap(&strings_, &sorted_strings); + writer_->AddString("\"<dummy>\""); + for (int i = 0; i < sorted_strings.length(); ++i) { + writer_->AddCharacter(','); + SerializeString( + reinterpret_cast<const unsigned char*>(sorted_strings[i]->key)); + if (writer_->aborted()) return; + } +} + + +template<typename T> +inline static int SortUsingEntryValue(const T* x, const T* y) { + uintptr_t x_uint = reinterpret_cast<uintptr_t>((*x)->value); + uintptr_t y_uint = reinterpret_cast<uintptr_t>((*y)->value); + if (x_uint > y_uint) { + return 1; + } else if (x_uint == y_uint) { + return 0; + } else { + return -1; + } +} + + +void HeapSnapshotJSONSerializer::SortHashMap( + HashMap* map, List<HashMap::Entry*>* sorted_entries) { + for (HashMap::Entry* p = map->Start(); p != NULL; p = map->Next(p)) + sorted_entries->Add(p); + sorted_entries->Sort(SortUsingEntryValue); +} + +} } // namespace v8::internal diff --git a/deps/v8/src/heap-snapshot-generator.h b/deps/v8/src/heap-snapshot-generator.h new file mode 100644 index 0000000000..7331b5f0b6 --- /dev/null +++ b/deps/v8/src/heap-snapshot-generator.h @@ -0,0 +1,697 @@ +// Copyright 2013 the V8 project authors. All rights reserved. +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following +// disclaimer in the documentation and/or other materials provided +// with the distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived +// from this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +#ifndef V8_HEAP_SNAPSHOT_GENERATOR_H_ +#define V8_HEAP_SNAPSHOT_GENERATOR_H_ + +namespace v8 { +namespace internal { + +class HeapEntry; +class HeapSnapshot; + +class HeapGraphEdge BASE_EMBEDDED { + public: + enum Type { + kContextVariable = v8::HeapGraphEdge::kContextVariable, + kElement = v8::HeapGraphEdge::kElement, + kProperty = v8::HeapGraphEdge::kProperty, + kInternal = v8::HeapGraphEdge::kInternal, + kHidden = v8::HeapGraphEdge::kHidden, + kShortcut = v8::HeapGraphEdge::kShortcut, + kWeak = v8::HeapGraphEdge::kWeak + }; + + HeapGraphEdge() { } + HeapGraphEdge(Type type, const char* name, int from, int to); + HeapGraphEdge(Type type, int index, int from, int to); + void ReplaceToIndexWithEntry(HeapSnapshot* snapshot); + + Type type() const { return static_cast<Type>(type_); } + int index() const { + ASSERT(type_ == kElement || type_ == kHidden || type_ == kWeak); + return index_; + } + const char* name() const { + ASSERT(type_ == kContextVariable + || type_ == kProperty + || type_ == kInternal + || type_ == kShortcut); + return name_; + } + INLINE(HeapEntry* from() const); + HeapEntry* to() const { return to_entry_; } + + private: + INLINE(HeapSnapshot* snapshot() const); + + unsigned type_ : 3; + int from_index_ : 29; + union { + // During entries population |to_index_| is used for storing the index, + // afterwards it is replaced with a pointer to the entry. + int to_index_; + HeapEntry* to_entry_; + }; + union { + int index_; + const char* name_; + }; +}; + + +// HeapEntry instances represent an entity from the heap (or a special +// virtual node, e.g. root). +class HeapEntry BASE_EMBEDDED { + public: + enum Type { + kHidden = v8::HeapGraphNode::kHidden, + kArray = v8::HeapGraphNode::kArray, + kString = v8::HeapGraphNode::kString, + kObject = v8::HeapGraphNode::kObject, + kCode = v8::HeapGraphNode::kCode, + kClosure = v8::HeapGraphNode::kClosure, + kRegExp = v8::HeapGraphNode::kRegExp, + kHeapNumber = v8::HeapGraphNode::kHeapNumber, + kNative = v8::HeapGraphNode::kNative, + kSynthetic = v8::HeapGraphNode::kSynthetic + }; + static const int kNoEntry; + + HeapEntry() { } + HeapEntry(HeapSnapshot* snapshot, + Type type, + const char* name, + SnapshotObjectId id, + int self_size); + + HeapSnapshot* snapshot() { return snapshot_; } + Type type() { return static_cast<Type>(type_); } + const char* name() { return name_; } + void set_name(const char* name) { name_ = name; } + inline SnapshotObjectId id() { return id_; } + int self_size() { return self_size_; } + INLINE(int index() const); + int children_count() const { return children_count_; } + INLINE(int set_children_index(int index)); + void add_child(HeapGraphEdge* edge) { + children_arr()[children_count_++] = edge; + } + Vector<HeapGraphEdge*> children() { + return Vector<HeapGraphEdge*>(children_arr(), children_count_); } + + void SetIndexedReference( + HeapGraphEdge::Type type, int index, HeapEntry* entry); + void SetNamedReference( + HeapGraphEdge::Type type, const char* name, HeapEntry* entry); + + void Print( + const char* prefix, const char* edge_name, int max_depth, int indent); + + Handle<HeapObject> GetHeapObject(); + + private: + INLINE(HeapGraphEdge** children_arr()); + const char* TypeAsString(); + + unsigned type_: 4; + int children_count_: 28; + int children_index_; + int self_size_; + SnapshotObjectId id_; + HeapSnapshot* snapshot_; + const char* name_; +}; + + +class HeapSnapshotsCollection; + +// HeapSnapshot represents a single heap snapshot. It is stored in +// HeapSnapshotsCollection, which is also a factory for +// HeapSnapshots. All HeapSnapshots share strings copied from JS heap +// to be able to return them even if they were collected. +// HeapSnapshotGenerator fills in a HeapSnapshot. +class HeapSnapshot { + public: + enum Type { + kFull = v8::HeapSnapshot::kFull + }; + + HeapSnapshot(HeapSnapshotsCollection* collection, + Type type, + const char* title, + unsigned uid); + void Delete(); + + HeapSnapshotsCollection* collection() { return collection_; } + Type type() { return type_; } + const char* title() { return title_; } + unsigned uid() { return uid_; } + size_t RawSnapshotSize() const; + HeapEntry* root() { return &entries_[root_index_]; } + HeapEntry* gc_roots() { return &entries_[gc_roots_index_]; } + HeapEntry* natives_root() { return &entries_[natives_root_index_]; } + HeapEntry* gc_subroot(int index) { + return &entries_[gc_subroot_indexes_[index]]; + } + List<HeapEntry>& entries() { return entries_; } + List<HeapGraphEdge>& edges() { return edges_; } + List<HeapGraphEdge*>& children() { return children_; } + void RememberLastJSObjectId(); + SnapshotObjectId max_snapshot_js_object_id() const { + return max_snapshot_js_object_id_; + } + + HeapEntry* AddEntry(HeapEntry::Type type, + const char* name, + SnapshotObjectId id, + int size); + HeapEntry* AddRootEntry(); + HeapEntry* AddGcRootsEntry(); + HeapEntry* AddGcSubrootEntry(int tag); + HeapEntry* AddNativesRootEntry(); + HeapEntry* GetEntryById(SnapshotObjectId id); + List<HeapEntry*>* GetSortedEntriesList(); + void FillChildren(); + + void Print(int max_depth); + void PrintEntriesSize(); + + private: + HeapSnapshotsCollection* collection_; + Type type_; + const char* title_; + unsigned uid_; + int root_index_; + int gc_roots_index_; + int natives_root_index_; + int gc_subroot_indexes_[VisitorSynchronization::kNumberOfSyncTags]; + List<HeapEntry> entries_; + List<HeapGraphEdge> edges_; + List<HeapGraphEdge*> children_; + List<HeapEntry*> sorted_entries_; + SnapshotObjectId max_snapshot_js_object_id_; + + friend class HeapSnapshotTester; + + DISALLOW_COPY_AND_ASSIGN(HeapSnapshot); +}; + + +class HeapObjectsMap { + public: + explicit HeapObjectsMap(Heap* heap); + + Heap* heap() const { return heap_; } + + void SnapshotGenerationFinished(); + SnapshotObjectId FindEntry(Address addr); + SnapshotObjectId FindOrAddEntry(Address addr, unsigned int size); + void MoveObject(Address from, Address to); + SnapshotObjectId last_assigned_id() const { + return next_id_ - kObjectIdStep; + } + + void StopHeapObjectsTracking(); + SnapshotObjectId PushHeapObjectsStats(OutputStream* stream); + size_t GetUsedMemorySize() const; + + static SnapshotObjectId GenerateId(v8::RetainedObjectInfo* info); + static inline SnapshotObjectId GetNthGcSubrootId(int delta); + + static const int kObjectIdStep = 2; + static const SnapshotObjectId kInternalRootObjectId; + static const SnapshotObjectId kGcRootsObjectId; + static const SnapshotObjectId kNativesRootObjectId; + static const SnapshotObjectId kGcRootsFirstSubrootId; + static const SnapshotObjectId kFirstAvailableObjectId; + + private: + struct EntryInfo { + EntryInfo(SnapshotObjectId id, Address addr, unsigned int size) + : id(id), addr(addr), size(size), accessed(true) { } + EntryInfo(SnapshotObjectId id, Address addr, unsigned int size, bool accessed) + : id(id), addr(addr), size(size), accessed(accessed) { } + SnapshotObjectId id; + Address addr; + unsigned int size; + bool accessed; + }; + struct TimeInterval { + explicit TimeInterval(SnapshotObjectId id) : id(id), size(0), count(0) { } + SnapshotObjectId id; + uint32_t size; + uint32_t count; + }; + + void UpdateHeapObjectsMap(); + void RemoveDeadEntries(); + + static bool AddressesMatch(void* key1, void* key2) { + return key1 == key2; + } + + static uint32_t AddressHash(Address addr) { + return ComputeIntegerHash( + static_cast<uint32_t>(reinterpret_cast<uintptr_t>(addr)), + v8::internal::kZeroHashSeed); + } + + SnapshotObjectId next_id_; + HashMap entries_map_; + List<EntryInfo> entries_; + List<TimeInterval> time_intervals_; + Heap* heap_; + + DISALLOW_COPY_AND_ASSIGN(HeapObjectsMap); +}; + + +class HeapSnapshotsCollection { + public: + explicit HeapSnapshotsCollection(Heap* heap); + ~HeapSnapshotsCollection(); + + Heap* heap() const { return ids_.heap(); } + + bool is_tracking_objects() { return is_tracking_objects_; } + SnapshotObjectId PushHeapObjectsStats(OutputStream* stream) { + return ids_.PushHeapObjectsStats(stream); + } + void StartHeapObjectsTracking() { is_tracking_objects_ = true; } + void StopHeapObjectsTracking() { ids_.StopHeapObjectsTracking(); } + + HeapSnapshot* NewSnapshot( + HeapSnapshot::Type type, const char* name, unsigned uid); + void SnapshotGenerationFinished(HeapSnapshot* snapshot); + List<HeapSnapshot*>* snapshots() { return &snapshots_; } + HeapSnapshot* GetSnapshot(unsigned uid); + void RemoveSnapshot(HeapSnapshot* snapshot); + + StringsStorage* names() { return &names_; } + TokenEnumerator* token_enumerator() { return token_enumerator_; } + + SnapshotObjectId FindObjectId(Address object_addr) { + return ids_.FindEntry(object_addr); + } + SnapshotObjectId GetObjectId(Address object_addr, int object_size) { + return ids_.FindOrAddEntry(object_addr, object_size); + } + Handle<HeapObject> FindHeapObjectById(SnapshotObjectId id); + void ObjectMoveEvent(Address from, Address to) { ids_.MoveObject(from, to); } + SnapshotObjectId last_assigned_id() const { + return ids_.last_assigned_id(); + } + size_t GetUsedMemorySize() const; + + private: + INLINE(static bool HeapSnapshotsMatch(void* key1, void* key2)) { + return key1 == key2; + } + + bool is_tracking_objects_; // Whether tracking object moves is needed. + List<HeapSnapshot*> snapshots_; + // Mapping from snapshots' uids to HeapSnapshot* pointers. + HashMap snapshots_uids_; + StringsStorage names_; + TokenEnumerator* token_enumerator_; + // Mapping from HeapObject addresses to objects' uids. + HeapObjectsMap ids_; + + DISALLOW_COPY_AND_ASSIGN(HeapSnapshotsCollection); +}; + + +// A typedef for referencing anything that can be snapshotted living +// in any kind of heap memory. +typedef void* HeapThing; + + +// An interface that creates HeapEntries by HeapThings. +class HeapEntriesAllocator { + public: + virtual ~HeapEntriesAllocator() { } + virtual HeapEntry* AllocateEntry(HeapThing ptr) = 0; +}; + + +// The HeapEntriesMap instance is used to track a mapping between +// real heap objects and their representations in heap snapshots. +class HeapEntriesMap { + public: + HeapEntriesMap(); + + int Map(HeapThing thing); + void Pair(HeapThing thing, int entry); + + private: + static uint32_t Hash(HeapThing thing) { + return ComputeIntegerHash( + static_cast<uint32_t>(reinterpret_cast<uintptr_t>(thing)), + v8::internal::kZeroHashSeed); + } + static bool HeapThingsMatch(HeapThing key1, HeapThing key2) { + return key1 == key2; + } + + HashMap entries_; + + friend class HeapObjectsSet; + + DISALLOW_COPY_AND_ASSIGN(HeapEntriesMap); +}; + + +class HeapObjectsSet { + public: + HeapObjectsSet(); + void Clear(); + bool Contains(Object* object); + void Insert(Object* obj); + const char* GetTag(Object* obj); + void SetTag(Object* obj, const char* tag); + bool is_empty() const { return entries_.occupancy() == 0; } + + private: + HashMap entries_; + + DISALLOW_COPY_AND_ASSIGN(HeapObjectsSet); +}; + + +// An interface used to populate a snapshot with nodes and edges. +class SnapshotFillerInterface { + public: + virtual ~SnapshotFillerInterface() { } + virtual HeapEntry* AddEntry(HeapThing ptr, + HeapEntriesAllocator* allocator) = 0; + virtual HeapEntry* FindEntry(HeapThing ptr) = 0; + virtual HeapEntry* FindOrAddEntry(HeapThing ptr, + HeapEntriesAllocator* allocator) = 0; + virtual void SetIndexedReference(HeapGraphEdge::Type type, + int parent_entry, + int index, + HeapEntry* child_entry) = 0; + virtual void SetIndexedAutoIndexReference(HeapGraphEdge::Type type, + int parent_entry, + HeapEntry* child_entry) = 0; + virtual void SetNamedReference(HeapGraphEdge::Type type, + int parent_entry, + const char* reference_name, + HeapEntry* child_entry) = 0; + virtual void SetNamedAutoIndexReference(HeapGraphEdge::Type type, + int parent_entry, + HeapEntry* child_entry) = 0; +}; + + +class SnapshottingProgressReportingInterface { + public: + virtual ~SnapshottingProgressReportingInterface() { } + virtual void ProgressStep() = 0; + virtual bool ProgressReport(bool force) = 0; +}; + + +// An implementation of V8 heap graph extractor. +class V8HeapExplorer : public HeapEntriesAllocator { + public: + V8HeapExplorer(HeapSnapshot* snapshot, + SnapshottingProgressReportingInterface* progress, + v8::HeapProfiler::ObjectNameResolver* resolver); + virtual ~V8HeapExplorer(); + virtual HeapEntry* AllocateEntry(HeapThing ptr); + void AddRootEntries(SnapshotFillerInterface* filler); + int EstimateObjectsCount(HeapIterator* iterator); + bool IterateAndExtractReferences(SnapshotFillerInterface* filler); + void TagGlobalObjects(); + + static String* GetConstructorName(JSObject* object); + + static HeapObject* const kInternalRootObject; + + private: + HeapEntry* AddEntry(HeapObject* object); + HeapEntry* AddEntry(HeapObject* object, + HeapEntry::Type type, + const char* name); + const char* GetSystemEntryName(HeapObject* object); + + void ExtractReferences(HeapObject* obj); + void ExtractJSGlobalProxyReferences(JSGlobalProxy* proxy); + void ExtractJSObjectReferences(int entry, JSObject* js_obj); + void ExtractStringReferences(int entry, String* obj); + void ExtractContextReferences(int entry, Context* context); + void ExtractMapReferences(int entry, Map* map); + void ExtractSharedFunctionInfoReferences(int entry, + SharedFunctionInfo* shared); + void ExtractScriptReferences(int entry, Script* script); + void ExtractCodeCacheReferences(int entry, CodeCache* code_cache); + void ExtractCodeReferences(int entry, Code* code); + void ExtractJSGlobalPropertyCellReferences(int entry, + JSGlobalPropertyCell* cell); + void ExtractClosureReferences(JSObject* js_obj, int entry); + void ExtractPropertyReferences(JSObject* js_obj, int entry); + void ExtractElementReferences(JSObject* js_obj, int entry); + void ExtractInternalReferences(JSObject* js_obj, int entry); + bool IsEssentialObject(Object* object); + void SetContextReference(HeapObject* parent_obj, + int parent, + String* reference_name, + Object* child, + int field_offset); + void SetNativeBindReference(HeapObject* parent_obj, + int parent, + const char* reference_name, + Object* child); + void SetElementReference(HeapObject* parent_obj, + int parent, + int index, + Object* child); + void SetInternalReference(HeapObject* parent_obj, + int parent, + const char* reference_name, + Object* child, + int field_offset = -1); + void SetInternalReference(HeapObject* parent_obj, + int parent, + int index, + Object* child, + int field_offset = -1); + void SetHiddenReference(HeapObject* parent_obj, + int parent, + int index, + Object* child); + void SetWeakReference(HeapObject* parent_obj, + int parent, + int index, + Object* child_obj, + int field_offset); + void SetPropertyReference(HeapObject* parent_obj, + int parent, + Name* reference_name, + Object* child, + const char* name_format_string = NULL, + int field_offset = -1); + void SetUserGlobalReference(Object* user_global); + void SetRootGcRootsReference(); + void SetGcRootsReference(VisitorSynchronization::SyncTag tag); + void SetGcSubrootReference( + VisitorSynchronization::SyncTag tag, bool is_weak, Object* child); + const char* GetStrongGcSubrootName(Object* object); + void TagObject(Object* obj, const char* tag); + + HeapEntry* GetEntry(Object* obj); + + static inline HeapObject* GetNthGcSubrootObject(int delta); + static inline int GetGcSubrootOrder(HeapObject* subroot); + + Heap* heap_; + HeapSnapshot* snapshot_; + HeapSnapshotsCollection* collection_; + SnapshottingProgressReportingInterface* progress_; + SnapshotFillerInterface* filler_; + HeapObjectsSet objects_tags_; + HeapObjectsSet strong_gc_subroot_names_; + v8::HeapProfiler::ObjectNameResolver* global_object_name_resolver_; + + static HeapObject* const kGcRootsObject; + static HeapObject* const kFirstGcSubrootObject; + static HeapObject* const kLastGcSubrootObject; + + friend class IndexedReferencesExtractor; + friend class GcSubrootsEnumerator; + friend class RootsReferencesExtractor; + + DISALLOW_COPY_AND_ASSIGN(V8HeapExplorer); +}; + + +class NativeGroupRetainedObjectInfo; + + +// An implementation of retained native objects extractor. +class NativeObjectsExplorer { + public: + NativeObjectsExplorer(HeapSnapshot* snapshot, + SnapshottingProgressReportingInterface* progress); + virtual ~NativeObjectsExplorer(); + void AddRootEntries(SnapshotFillerInterface* filler); + int EstimateObjectsCount(); + bool IterateAndExtractReferences(SnapshotFillerInterface* filler); + + private: + void FillRetainedObjects(); + void FillImplicitReferences(); + List<HeapObject*>* GetListMaybeDisposeInfo(v8::RetainedObjectInfo* info); + void SetNativeRootReference(v8::RetainedObjectInfo* info); + void SetRootNativeRootsReference(); + void SetWrapperNativeReferences(HeapObject* wrapper, + v8::RetainedObjectInfo* info); + void VisitSubtreeWrapper(Object** p, uint16_t class_id); + + static uint32_t InfoHash(v8::RetainedObjectInfo* info) { + return ComputeIntegerHash(static_cast<uint32_t>(info->GetHash()), + v8::internal::kZeroHashSeed); + } + static bool RetainedInfosMatch(void* key1, void* key2) { + return key1 == key2 || + (reinterpret_cast<v8::RetainedObjectInfo*>(key1))->IsEquivalent( + reinterpret_cast<v8::RetainedObjectInfo*>(key2)); + } + INLINE(static bool StringsMatch(void* key1, void* key2)) { + return strcmp(reinterpret_cast<char*>(key1), + reinterpret_cast<char*>(key2)) == 0; + } + + NativeGroupRetainedObjectInfo* FindOrAddGroupInfo(const char* label); + + HeapSnapshot* snapshot_; + HeapSnapshotsCollection* collection_; + SnapshottingProgressReportingInterface* progress_; + bool embedder_queried_; + HeapObjectsSet in_groups_; + // RetainedObjectInfo* -> List<HeapObject*>* + HashMap objects_by_info_; + HashMap native_groups_; + HeapEntriesAllocator* synthetic_entries_allocator_; + HeapEntriesAllocator* native_entries_allocator_; + // Used during references extraction. + SnapshotFillerInterface* filler_; + + static HeapThing const kNativesRootObject; + + friend class GlobalHandlesExtractor; + + DISALLOW_COPY_AND_ASSIGN(NativeObjectsExplorer); +}; + + +class HeapSnapshotGenerator : public SnapshottingProgressReportingInterface { + public: + HeapSnapshotGenerator(HeapSnapshot* snapshot, + v8::ActivityControl* control, + v8::HeapProfiler::ObjectNameResolver* resolver, + Heap* heap); + bool GenerateSnapshot(); + + private: + bool FillReferences(); + void ProgressStep(); + bool ProgressReport(bool force = false); + void SetProgressTotal(int iterations_count); + + HeapSnapshot* snapshot_; + v8::ActivityControl* control_; + V8HeapExplorer v8_heap_explorer_; + NativeObjectsExplorer dom_explorer_; + // Mapping from HeapThing pointers to HeapEntry* pointers. + HeapEntriesMap entries_; + // Used during snapshot generation. + int progress_counter_; + int progress_total_; + Heap* heap_; + + DISALLOW_COPY_AND_ASSIGN(HeapSnapshotGenerator); +}; + +class OutputStreamWriter; + +class HeapSnapshotJSONSerializer { + public: + explicit HeapSnapshotJSONSerializer(HeapSnapshot* snapshot) + : snapshot_(snapshot), + strings_(ObjectsMatch), + next_node_id_(1), + next_string_id_(1), + writer_(NULL) { + } + void Serialize(v8::OutputStream* stream); + + private: + INLINE(static bool ObjectsMatch(void* key1, void* key2)) { + return key1 == key2; + } + + INLINE(static uint32_t ObjectHash(const void* key)) { + return ComputeIntegerHash( + static_cast<uint32_t>(reinterpret_cast<uintptr_t>(key)), + v8::internal::kZeroHashSeed); + } + + HeapSnapshot* CreateFakeSnapshot(); + int GetStringId(const char* s); + int entry_index(HeapEntry* e) { return e->index() * kNodeFieldsCount; } + void SerializeEdge(HeapGraphEdge* edge, bool first_edge); + void SerializeEdges(); + void SerializeImpl(); + void SerializeNode(HeapEntry* entry); + void SerializeNodes(); + void SerializeSnapshot(); + void SerializeString(const unsigned char* s); + void SerializeStrings(); + void SortHashMap(HashMap* map, List<HashMap::Entry*>* sorted_entries); + + static const int kEdgeFieldsCount; + static const int kNodeFieldsCount; + + HeapSnapshot* snapshot_; + HashMap strings_; + int next_node_id_; + int next_string_id_; + OutputStreamWriter* writer_; + + friend class HeapSnapshotJSONSerializerEnumerator; + friend class HeapSnapshotJSONSerializerIterator; + + DISALLOW_COPY_AND_ASSIGN(HeapSnapshotJSONSerializer); +}; + + +} } // namespace v8::internal + +#endif // V8_HEAP_SNAPSHOT_GENERATOR_H_ diff --git a/deps/v8/src/heap.cc b/deps/v8/src/heap.cc index e3fcb93a75..3cf23d0367 100644 --- a/deps/v8/src/heap.cc +++ b/deps/v8/src/heap.cc @@ -37,7 +37,6 @@ #include "global-handles.h" #include "heap-profiler.h" #include "incremental-marking.h" -#include "liveobjectlist-inl.h" #include "mark-compact.h" #include "natives.h" #include "objects-visiting.h" @@ -117,7 +116,6 @@ Heap::Heap() allocation_allowed_(true), allocation_timeout_(0), disallow_allocation_failure_(false), - debug_utils_(NULL), #endif // DEBUG new_space_high_promotion_mode_active_(false), old_gen_promotion_limit_(kMinimumPromotionLimit), @@ -129,7 +127,7 @@ Heap::Heap() amount_of_external_allocated_memory_at_last_global_gc_(0), old_gen_exhausted_(false), store_buffer_rebuilder_(store_buffer()), - hidden_symbol_(NULL), + hidden_string_(NULL), global_gc_prologue_callback_(NULL), global_gc_epilogue_callback_(NULL), gc_safe_size_of_old_object_(NULL), @@ -137,15 +135,18 @@ Heap::Heap() tracer_(NULL), young_survivors_after_last_gc_(0), high_survival_rate_period_length_(0), + low_survival_rate_period_length_(0), survival_rate_(0), previous_survival_rate_trend_(Heap::STABLE), survival_rate_trend_(Heap::STABLE), - max_gc_pause_(0), - total_gc_time_ms_(0), + max_gc_pause_(0.0), + total_gc_time_ms_(0.0), max_alive_after_gc_(0), min_in_mutator_(kMaxInt), alive_after_last_gc_(0), last_gc_end_timestamp_(0.0), + marking_time_(0.0), + sweeping_time_(0.0), store_buffer_(this), marking_(this), incremental_marking_(this), @@ -156,6 +157,9 @@ Heap::Heap() ms_count_at_last_idle_notification_(0), gc_count_at_last_idle_gc_(0), scavenges_since_last_idle_round_(kIdleScavengeThreshold), +#ifdef VERIFY_HEAP + no_weak_embedded_maps_verification_scope_depth_(0), +#endif promotion_queue_(this), configured_(false), chunks_queued_for_free_(NULL), @@ -212,6 +216,20 @@ intptr_t Heap::CommittedMemory() { lo_space_->Size(); } + +size_t Heap::CommittedPhysicalMemory() { + if (!HasBeenSetUp()) return 0; + + return new_space_.CommittedPhysicalMemory() + + old_pointer_space_->CommittedPhysicalMemory() + + old_data_space_->CommittedPhysicalMemory() + + code_space_->CommittedPhysicalMemory() + + map_space_->CommittedPhysicalMemory() + + cell_space_->CommittedPhysicalMemory() + + lo_space_->CommittedPhysicalMemory(); +} + + intptr_t Heap::CommittedMemoryExecutable() { if (!HasBeenSetUp()) return 0; @@ -378,7 +396,7 @@ void Heap::PrintShortHeapStatistics() { this->SizeOfObjects() / KB, this->Available() / KB, this->CommittedMemory() / KB); - PrintPID("Total time spent in GC : %d ms\n", total_gc_time_ms_); + PrintPID("Total time spent in GC : %.1f ms\n", total_gc_time_ms_); } @@ -406,6 +424,10 @@ void Heap::GarbageCollectionPrologue() { gc_count_++; unflattened_strings_length_ = 0; + if (FLAG_flush_code && FLAG_flush_code_incrementally) { + mark_compact_collector()->EnableCodeFlushing(true); + } + #ifdef VERIFY_HEAP if (FLAG_verify_heap) { Verify(); @@ -421,14 +443,13 @@ void Heap::GarbageCollectionPrologue() { ReportStatisticsBeforeGC(); #endif // DEBUG - LiveObjectList::GCPrologue(); store_buffer()->GCPrologue(); } intptr_t Heap::SizeOfObjects() { intptr_t total = 0; - AllSpaces spaces; + AllSpaces spaces(this); for (Space* space = spaces.next(); space != NULL; space = spaces.next()) { total += space->SizeOfObjects(); } @@ -437,7 +458,7 @@ intptr_t Heap::SizeOfObjects() { void Heap::RepairFreeListsAfterBoot() { - PagedSpaces spaces; + PagedSpaces spaces(this); for (PagedSpace* space = spaces.next(); space != NULL; space = spaces.next()) { @@ -448,7 +469,6 @@ void Heap::RepairFreeListsAfterBoot() { void Heap::GarbageCollectionEpilogue() { store_buffer()->GCEpilogue(); - LiveObjectList::GCEpilogue(); // In release mode, we only zap the from space under heap verification. if (Heap::ShouldZapGarbage()) { @@ -472,10 +492,10 @@ void Heap::GarbageCollectionEpilogue() { isolate_->counters()->alive_after_last_gc()->Set( static_cast<int>(SizeOfObjects())); - isolate_->counters()->symbol_table_capacity()->Set( - symbol_table()->Capacity()); + isolate_->counters()->string_table_capacity()->Set( + string_table()->Capacity()); isolate_->counters()->number_of_symbols()->Set( - symbol_table()->NumberOfElements()); + string_table()->NumberOfElements()); if (CommittedMemory() > 0) { isolate_->counters()->external_fragmentation_total()->AddSample( @@ -532,6 +552,8 @@ void Heap::GarbageCollectionEpilogue() { #ifdef ENABLE_DEBUGGER_SUPPORT isolate_->debug()->AfterGarbageCollection(); #endif // ENABLE_DEBUGGER_SUPPORT + + error_object_list_.DeferredFormatStackTrace(isolate()); } @@ -569,7 +591,6 @@ void Heap::CollectAllAvailableGarbage(const char* gc_reason) { mark_compact_collector()->SetFlags(kNoGCFlags); new_space_.Shrink(); UncommitFromSpace(); - Shrink(); incremental_marking()->UncommitMarkingDeque(); } @@ -597,7 +618,7 @@ bool Heap::CollectGarbage(AllocationSpace space, } if (collector == MARK_COMPACTOR && - !mark_compact_collector()->abort_incremental_marking_ && + !mark_compact_collector()->abort_incremental_marking() && !incremental_marking()->IsStopped() && !incremental_marking()->should_hurry() && FLAG_incremental_marking_steps) { @@ -625,24 +646,24 @@ bool Heap::CollectGarbage(AllocationSpace space, // Tell the tracer which collector we've selected. tracer.set_collector(collector); - HistogramTimer* rate = (collector == SCAVENGER) - ? isolate_->counters()->gc_scavenger() - : isolate_->counters()->gc_compactor(); - rate->Start(); - next_gc_likely_to_collect_more = - PerformGarbageCollection(collector, &tracer); - rate->Stop(); - - ASSERT(collector == SCAVENGER || incremental_marking()->IsStopped()); + { + HistogramTimerScope histogram_timer_scope( + (collector == SCAVENGER) ? isolate_->counters()->gc_scavenger() + : isolate_->counters()->gc_compactor()); + next_gc_likely_to_collect_more = + PerformGarbageCollection(collector, &tracer); + } - // This can do debug callbacks and restart incremental marking. GarbageCollectionEpilogue(); } - if (incremental_marking()->IsStopped()) { - if (incremental_marking()->WorthActivating() && NextGCIsLikelyToBeFull()) { - incremental_marking()->Start(); - } + // Start incremental marking for the next cycle. The heap snapshot + // generator needs incremental marking to stay off after it aborted. + if (!mark_compact_collector()->abort_incremental_marking() && + incremental_marking()->IsStopped() && + incremental_marking()->WorthActivating() && + NextGCIsLikelyToBeFull()) { + incremental_marking()->Start(); } return next_gc_likely_to_collect_more; @@ -659,25 +680,49 @@ void Heap::PerformScavenge() { } +void Heap::MoveElements(FixedArray* array, + int dst_index, + int src_index, + int len) { + if (len == 0) return; + + ASSERT(array->map() != HEAP->fixed_cow_array_map()); + Object** dst_objects = array->data_start() + dst_index; + memmove(dst_objects, + array->data_start() + src_index, + len * kPointerSize); + if (!InNewSpace(array)) { + for (int i = 0; i < len; i++) { + // TODO(hpayer): check store buffer for entries + if (InNewSpace(dst_objects[i])) { + RecordWrite(array->address(), array->OffsetOfElementAt(dst_index + i)); + } + } + } + incremental_marking()->RecordWrites(array); +} + + #ifdef VERIFY_HEAP -// Helper class for verifying the symbol table. -class SymbolTableVerifier : public ObjectVisitor { +// Helper class for verifying the string table. +class StringTableVerifier : public ObjectVisitor { public: void VisitPointers(Object** start, Object** end) { // Visit all HeapObject pointers in [start, end). for (Object** p = start; p < end; p++) { if ((*p)->IsHeapObject()) { - // Check that the symbol is actually a symbol. - CHECK((*p)->IsTheHole() || (*p)->IsUndefined() || (*p)->IsSymbol()); + // Check that the string is actually internalized. + CHECK((*p)->IsTheHole() || (*p)->IsUndefined() || + (*p)->IsInternalizedString()); } } } }; -static void VerifySymbolTable() { - SymbolTableVerifier verifier; - HEAP->symbol_table()->IterateElements(&verifier); +static void VerifyStringTable() { + StringTableVerifier verifier; + HEAP->string_table()->IterateElements(&verifier); } #endif // VERIFY_HEAP @@ -744,11 +789,6 @@ void Heap::EnsureFromSpaceIsCommitted() { if (new_space_.CommitFromSpaceIfNeeded()) return; // Committing memory to from space failed. - // Try shrinking and try again. - Shrink(); - if (new_space_.CommitFromSpaceIfNeeded()) return; - - // Committing memory to from space failed again. // Memory is exhausted and we will die. V8::FatalProcessOutOfMemory("Committing semi space failed."); } @@ -777,7 +817,6 @@ void Heap::ClearJSFunctionResultCaches() { } - void Heap::ClearNormalizedMapCaches() { if (isolate_->bootstrapper()->IsActive() && !incremental_marking()->IsMarking()) { @@ -838,23 +877,17 @@ bool Heap::PerformGarbageCollection(GarbageCollector collector, #ifdef VERIFY_HEAP if (FLAG_verify_heap) { - VerifySymbolTable(); + VerifyStringTable(); } #endif - if (collector == MARK_COMPACTOR && global_gc_prologue_callback_) { - ASSERT(!allocation_allowed_); - GCTracer::Scope scope(tracer, GCTracer::Scope::EXTERNAL); - global_gc_prologue_callback_(); - } - GCType gc_type = collector == MARK_COMPACTOR ? kGCTypeMarkSweepCompact : kGCTypeScavenge; - for (int i = 0; i < gc_prologue_callbacks_.length(); ++i) { - if (gc_type & gc_prologue_callbacks_[i].gc_type) { - gc_prologue_callbacks_[i].callback(gc_type, kNoGCCallbackFlags); - } + { + GCTracer::Scope scope(tracer, GCTracer::Scope::EXTERNAL); + VMState state(isolate_, EXTERNAL); + CallGCPrologueCallbacks(gc_type); } EnsureFromSpaceIsCommitted(); @@ -939,11 +972,16 @@ bool Heap::PerformGarbageCollection(GarbageCollector collector, isolate_->counters()->objs_since_last_young()->Set(0); + // Callbacks that fire after this point might trigger nested GCs and + // restart incremental marking, the assertion can't be moved down. + ASSERT(collector == SCAVENGER || incremental_marking()->IsStopped()); + gc_post_processing_depth_++; { DisableAssertNoAllocation allow_allocation; GCTracer::Scope scope(tracer, GCTracer::Scope::EXTERNAL); next_gc_likely_to_collect_more = - isolate_->global_handles()->PostGarbageCollectionProcessing(collector); + isolate_->global_handles()->PostGarbageCollectionProcessing( + collector, tracer); } gc_post_processing_depth_--; @@ -956,22 +994,15 @@ bool Heap::PerformGarbageCollection(GarbageCollector collector, amount_of_external_allocated_memory_; } - GCCallbackFlags callback_flags = kNoGCCallbackFlags; - for (int i = 0; i < gc_epilogue_callbacks_.length(); ++i) { - if (gc_type & gc_epilogue_callbacks_[i].gc_type) { - gc_epilogue_callbacks_[i].callback(gc_type, callback_flags); - } - } - - if (collector == MARK_COMPACTOR && global_gc_epilogue_callback_) { - ASSERT(!allocation_allowed_); + { GCTracer::Scope scope(tracer, GCTracer::Scope::EXTERNAL); - global_gc_epilogue_callback_(); + VMState state(isolate_, EXTERNAL); + CallGCEpilogueCallbacks(gc_type); } #ifdef VERIFY_HEAP if (FLAG_verify_heap) { - VerifySymbolTable(); + VerifyStringTable(); } #endif @@ -979,6 +1010,30 @@ bool Heap::PerformGarbageCollection(GarbageCollector collector, } +void Heap::CallGCPrologueCallbacks(GCType gc_type) { + if (gc_type == kGCTypeMarkSweepCompact && global_gc_prologue_callback_) { + global_gc_prologue_callback_(); + } + for (int i = 0; i < gc_prologue_callbacks_.length(); ++i) { + if (gc_type & gc_prologue_callbacks_[i].gc_type) { + gc_prologue_callbacks_[i].callback(gc_type, kNoGCCallbackFlags); + } + } +} + + +void Heap::CallGCEpilogueCallbacks(GCType gc_type) { + for (int i = 0; i < gc_epilogue_callbacks_.length(); ++i) { + if (gc_type & gc_epilogue_callbacks_[i].gc_type) { + gc_epilogue_callbacks_[i].callback(gc_type, kNoGCCallbackFlags); + } + } + if (gc_type == kGCTypeMarkSweepCompact && global_gc_epilogue_callback_) { + global_gc_epilogue_callback_(); + } +} + + void Heap::MarkCompact(GCTracer* tracer) { gc_state_ = MARK_COMPACT; LOG(isolate_, ResourceEvent("markcompact", "begin")); @@ -1250,7 +1305,8 @@ void Heap::Scavenge() { incremental_marking()->PrepareForScavenge(); - AdvanceSweepers(static_cast<int>(new_space_.Size())); + paged_space(OLD_DATA_SPACE)->EnsureSweeperProgress(new_space_.Size()); + paged_space(OLD_POINTER_SPACE)->EnsureSweeperProgress(new_space_.Size()); // Flip the semispaces. After flipping, to space is empty, from space has // live objects. @@ -1305,10 +1361,24 @@ void Heap::Scavenge() { } } + // Copy objects reachable from the code flushing candidates list. + MarkCompactCollector* collector = mark_compact_collector(); + if (collector->is_code_flushing_enabled()) { + collector->code_flusher()->IteratePointersToFromSpace(&scavenge_visitor); + } + // Scavenge object reachable from the native contexts list directly. scavenge_visitor.VisitPointer(BitCast<Object**>(&native_contexts_list_)); new_space_front = DoScavenge(&scavenge_visitor, new_space_front); + + while (isolate()->global_handles()->IterateObjectGroups( + &scavenge_visitor, &IsUnscavengedHeapObject)) { + new_space_front = DoScavenge(&scavenge_visitor, new_space_front); + } + isolate()->global_handles()->RemoveObjectGroups(); + isolate()->global_handles()->RemoveImplicitRefGroups(); + isolate_->global_handles()->IdentifyNewSpaceWeakIndependentHandles( &IsUnscavengedHeapObject); isolate_->global_handles()->IterateNewSpaceWeakIndependentRoots( @@ -1318,9 +1388,10 @@ void Heap::Scavenge() { UpdateNewSpaceReferencesInExternalStringTable( &UpdateNewSpaceReferenceInExternalStringTableEntry); + error_object_list_.UpdateReferencesInNewSpace(this); + promotion_queue_.Destroy(); - LiveObjectList::UpdateReferencesForScavengeGC(); if (!FLAG_watch_ic_patching) { isolate()->runtime_profiler()->UpdateSamplesAfterScavenge(); } @@ -1548,13 +1619,41 @@ void Heap::ProcessWeakReferences(WeakObjectRetainer* retainer) { void Heap::VisitExternalResources(v8::ExternalResourceVisitor* visitor) { AssertNoAllocation no_allocation; - class VisitorAdapter : public ObjectVisitor { + // Both the external string table and the string table may contain + // external strings, but neither lists them exhaustively, nor is the + // intersection set empty. Therefore we iterate over the external string + // table first, ignoring internalized strings, and then over the + // internalized string table. + + class ExternalStringTableVisitorAdapter : public ObjectVisitor { + public: + explicit ExternalStringTableVisitorAdapter( + v8::ExternalResourceVisitor* visitor) : visitor_(visitor) {} + virtual void VisitPointers(Object** start, Object** end) { + for (Object** p = start; p < end; p++) { + // Visit non-internalized external strings, + // since internalized strings are listed in the string table. + if (!(*p)->IsInternalizedString()) { + ASSERT((*p)->IsExternalString()); + visitor_->VisitExternalString(Utils::ToLocal( + Handle<String>(String::cast(*p)))); + } + } + } + private: + v8::ExternalResourceVisitor* visitor_; + } external_string_table_visitor(visitor); + + external_string_table_.Iterate(&external_string_table_visitor); + + class StringTableVisitorAdapter : public ObjectVisitor { public: - explicit VisitorAdapter(v8::ExternalResourceVisitor* visitor) - : visitor_(visitor) {} + explicit StringTableVisitorAdapter( + v8::ExternalResourceVisitor* visitor) : visitor_(visitor) {} virtual void VisitPointers(Object** start, Object** end) { for (Object** p = start; p < end; p++) { if ((*p)->IsExternalString()) { + ASSERT((*p)->IsInternalizedString()); visitor_->VisitExternalString(Utils::ToLocal( Handle<String>(String::cast(*p)))); } @@ -1562,8 +1661,9 @@ void Heap::VisitExternalResources(v8::ExternalResourceVisitor* visitor) { } private: v8::ExternalResourceVisitor* visitor_; - } visitor_adapter(visitor); - external_string_table_.Iterate(&visitor_adapter); + } string_table_visitor(visitor); + + string_table()->IterateElements(&string_table_visitor); } @@ -1660,7 +1760,7 @@ template<MarksHandling marks_handling, class ScavengingVisitor : public StaticVisitorBase { public: static void Initialize() { - table_.Register(kVisitSeqAsciiString, &EvacuateSeqAsciiString); + table_.Register(kVisitSeqOneByteString, &EvacuateSeqOneByteString); table_.Register(kVisitSeqTwoByteString, &EvacuateSeqTwoByteString); table_.Register(kVisitShortcutCandidate, &EvacuateShortcutCandidate); table_.Register(kVisitByteArray, &EvacuateByteArray); @@ -1754,7 +1854,7 @@ class ScavengingVisitor : public StaticVisitorBase { HEAP_PROFILE(heap, ObjectMoveEvent(source->address(), target->address())); Isolate* isolate = heap->isolate(); if (isolate->logger()->is_logging_code_events() || - CpuProfiler::is_profiling(isolate)) { + isolate->cpu_profiler()->is_profiling()) { if (target->IsSharedFunctionInfo()) { PROFILE(isolate, SharedFunctionInfoMoveEvent( source->address(), target->address())); @@ -1904,11 +2004,11 @@ class ScavengingVisitor : public StaticVisitorBase { } - static inline void EvacuateSeqAsciiString(Map* map, + static inline void EvacuateSeqOneByteString(Map* map, HeapObject** slot, HeapObject* object) { - int object_size = SeqAsciiString::cast(object)-> - SeqAsciiStringSize(map->instance_type()); + int object_size = SeqOneByteString::cast(object)-> + SeqOneByteStringSize(map->instance_type()); EvacuateObject<DATA_OBJECT, UNKNOWN_SIZE, kObjectAlignment>( map, slot, object, object_size); } @@ -2010,7 +2110,7 @@ static void InitializeScavengingVisitorsTables() { void Heap::SelectScavengingVisitorsTable() { bool logging_and_profiling = isolate()->logger()->is_logging() || - CpuProfiler::is_profiling(isolate()) || + isolate()->cpu_profiler()->is_profiling() || (isolate()->heap_profiler() != NULL && isolate()->heap_profiler()->is_profiling()); @@ -2100,6 +2200,8 @@ MaybeObject* Heap::AllocateMap(InstanceType instance_type, map->set_inobject_properties(0); map->set_pre_allocated_property_fields(0); map->set_code_cache(empty_fixed_array(), SKIP_WRITE_BARRIER); + map->set_dependent_code(DependentCode::cast(empty_fixed_array()), + SKIP_WRITE_BARRIER); map->init_back_pointer(undefined_value()); map->set_unused_property_fields(0); map->set_instance_descriptors(empty_descriptor_array()); @@ -2171,11 +2273,11 @@ const Heap::StringTypeTable Heap::string_type_table[] = { }; -const Heap::ConstantSymbolTable Heap::constant_symbol_table[] = { -#define CONSTANT_SYMBOL_ELEMENT(name, contents) \ +const Heap::ConstantStringTable Heap::constant_string_table[] = { +#define CONSTANT_STRING_ELEMENT(name, contents) \ {contents, k##name##RootIndex}, - SYMBOL_LIST(CONSTANT_SYMBOL_ELEMENT) -#undef CONSTANT_SYMBOL_ELEMENT + INTERNALIZED_STRING_LIST(CONSTANT_STRING_ELEMENT) +#undef CONSTANT_STRING_ELEMENT }; @@ -2235,14 +2337,18 @@ bool Heap::CreateInitialMaps() { // Fix the instance_descriptors for the existing maps. meta_map()->set_code_cache(empty_fixed_array()); + meta_map()->set_dependent_code(DependentCode::cast(empty_fixed_array())); meta_map()->init_back_pointer(undefined_value()); meta_map()->set_instance_descriptors(empty_descriptor_array()); fixed_array_map()->set_code_cache(empty_fixed_array()); + fixed_array_map()->set_dependent_code( + DependentCode::cast(empty_fixed_array())); fixed_array_map()->init_back_pointer(undefined_value()); fixed_array_map()->set_instance_descriptors(empty_descriptor_array()); oddball_map()->set_code_cache(empty_fixed_array()); + oddball_map()->set_dependent_code(DependentCode::cast(empty_fixed_array())); oddball_map()->init_back_pointer(undefined_value()); oddball_map()->set_instance_descriptors(empty_descriptor_array()); @@ -2274,6 +2380,11 @@ bool Heap::CreateInitialMaps() { } set_heap_number_map(Map::cast(obj)); + { MaybeObject* maybe_obj = AllocateMap(SYMBOL_TYPE, Symbol::kSize); + if (!maybe_obj->ToObject(&obj)) return false; + } + set_symbol_map(Map::cast(obj)); + { MaybeObject* maybe_obj = AllocateMap(FOREIGN_TYPE, Foreign::kSize); if (!maybe_obj->ToObject(&obj)) return false; } @@ -2475,6 +2586,14 @@ bool Heap::CreateInitialMaps() { } set_message_object_map(Map::cast(obj)); + Map* external_map; + { MaybeObject* maybe_obj = + AllocateMap(JS_OBJECT_TYPE, JSObject::kHeaderSize + kPointerSize); + if (!maybe_obj->To(&external_map)) return false; + } + external_map->set_is_extensible(false); + set_external_map(external_map); + ASSERT(!InNewSpace(empty_fixed_array())); return true; } @@ -2570,13 +2689,13 @@ bool Heap::CreateApiObjects() { void Heap::CreateJSEntryStub() { JSEntryStub stub; - set_js_entry_code(*stub.GetCode()); + set_js_entry_code(*stub.GetCode(isolate())); } void Heap::CreateJSConstructEntryStub() { JSConstructEntryStub stub; - set_js_construct_entry_code(*stub.GetCode()); + set_js_construct_entry_code(*stub.GetCode(isolate())); } @@ -2585,7 +2704,7 @@ void Heap::CreateFixedStubs() { // for cooking and uncooking (check out frames.cc). // The eliminates the need for doing dictionary lookup in the // stub cache for these stubs. - HandleScope scope; + HandleScope scope(isolate()); // gcc-4.4 has problem generating correct code of following snippet: // { JSEntryStub stub; // js_entry_code_ = *stub.GetCode(); @@ -2601,7 +2720,7 @@ void Heap::CreateFixedStubs() { // create them if we need them during the creation of another stub. // Stub creation mixes raw pointers and handles in an unsafe manner so // we cannot create stubs while we are creating stubs. - CodeStub::GenerateStubsAheadOfTime(); + CodeStub::GenerateStubsAheadOfTime(isolate()); } @@ -2626,17 +2745,18 @@ bool Heap::CreateInitialObjects() { set_infinity_value(HeapNumber::cast(obj)); // The hole has not been created yet, but we want to put something - // predictable in the gaps in the symbol table, so lets make that Smi zero. + // predictable in the gaps in the string table, so lets make that Smi zero. set_the_hole_value(reinterpret_cast<Oddball*>(Smi::FromInt(0))); - // Allocate initial symbol table. - { MaybeObject* maybe_obj = SymbolTable::Allocate(kInitialSymbolTableSize); + // Allocate initial string table. + { MaybeObject* maybe_obj = + StringTable::Allocate(this, kInitialStringTableSize); if (!maybe_obj->ToObject(&obj)) return false; } - // Don't use set_symbol_table() due to asserts. - roots_[kSymbolTableRootIndex] = obj; + // Don't use set_string_table() due to asserts. + roots_[kStringTableRootIndex] = obj; - // Finish initializing oddballs after creating symboltable. + // Finish initializing oddballs after creating the string table. { MaybeObject* maybe_obj = undefined_value()->Initialize("undefined", nan_value(), @@ -2692,31 +2812,25 @@ bool Heap::CreateInitialObjects() { } set_termination_exception(obj); - // Allocate the empty string. - { MaybeObject* maybe_obj = AllocateRawAsciiString(0, TENURED); - if (!maybe_obj->ToObject(&obj)) return false; - } - set_empty_string(String::cast(obj)); - - for (unsigned i = 0; i < ARRAY_SIZE(constant_symbol_table); i++) { + for (unsigned i = 0; i < ARRAY_SIZE(constant_string_table); i++) { { MaybeObject* maybe_obj = - LookupAsciiSymbol(constant_symbol_table[i].contents); + InternalizeUtf8String(constant_string_table[i].contents); if (!maybe_obj->ToObject(&obj)) return false; } - roots_[constant_symbol_table[i].index] = String::cast(obj); + roots_[constant_string_table[i].index] = String::cast(obj); } - // Allocate the hidden symbol which is used to identify the hidden properties + // Allocate the hidden string which is used to identify the hidden properties // in JSObjects. The hash code has a special value so that it will not match // the empty string when searching for the property. It cannot be part of the // loop above because it needs to be allocated manually with the special - // hash code in place. The hash code for the hidden_symbol is zero to ensure + // hash code in place. The hash code for the hidden_string is zero to ensure // that it will always be at the first entry in property descriptors. - { MaybeObject* maybe_obj = - AllocateSymbol(CStrVector(""), 0, String::kEmptyStringHash); + { MaybeObject* maybe_obj = AllocateOneByteInternalizedString( + OneByteVector("", 0), String::kEmptyStringHash); if (!maybe_obj->ToObject(&obj)) return false; } - hidden_symbol_ = String::cast(obj); + hidden_string_ = String::cast(obj); // Allocate the foreign for __proto__. { MaybeObject* maybe_obj = @@ -2727,7 +2841,7 @@ bool Heap::CreateInitialObjects() { // Allocate the code_stubs dictionary. The initial size is set to avoid // expanding the dictionary during bootstrapping. - { MaybeObject* maybe_obj = UnseededNumberDictionary::Allocate(128); + { MaybeObject* maybe_obj = UnseededNumberDictionary::Allocate(this, 128); if (!maybe_obj->ToObject(&obj)) return false; } set_code_stubs(UnseededNumberDictionary::cast(obj)); @@ -2735,7 +2849,7 @@ bool Heap::CreateInitialObjects() { // Allocate the non_monomorphic_cache used in stub-cache.cc. The initial size // is set to avoid expanding the dictionary during bootstrapping. - { MaybeObject* maybe_obj = UnseededNumberDictionary::Allocate(64); + { MaybeObject* maybe_obj = UnseededNumberDictionary::Allocate(this, 64); if (!maybe_obj->ToObject(&obj)) return false; } set_non_monomorphic_cache(UnseededNumberDictionary::cast(obj)); @@ -2752,23 +2866,24 @@ bool Heap::CreateInitialObjects() { CreateFixedStubs(); // Allocate the dictionary of intrinsic function names. - { MaybeObject* maybe_obj = StringDictionary::Allocate(Runtime::kNumFunctions); + { MaybeObject* maybe_obj = + NameDictionary::Allocate(this, Runtime::kNumFunctions); if (!maybe_obj->ToObject(&obj)) return false; } { MaybeObject* maybe_obj = Runtime::InitializeIntrinsicFunctionNames(this, obj); if (!maybe_obj->ToObject(&obj)) return false; } - set_intrinsic_function_names(StringDictionary::cast(obj)); + set_intrinsic_function_names(NameDictionary::cast(obj)); { MaybeObject* maybe_obj = AllocateInitialNumberStringCache(); if (!maybe_obj->ToObject(&obj)) return false; } set_number_string_cache(FixedArray::cast(obj)); - // Allocate cache for single character ASCII strings. + // Allocate cache for single character one byte strings. { MaybeObject* maybe_obj = - AllocateFixedArray(String::kMaxAsciiCharCode + 1, TENURED); + AllocateFixedArray(String::kMaxOneByteCharCode + 1, TENURED); if (!maybe_obj->ToObject(&obj)) return false; } set_single_character_string_cache(FixedArray::cast(obj)); @@ -2792,6 +2907,15 @@ bool Heap::CreateInitialObjects() { } set_natives_source_cache(FixedArray::cast(obj)); + // Allocate object to hold object observation state. + { MaybeObject* maybe_obj = AllocateMap(JS_OBJECT_TYPE, JSObject::kHeaderSize); + if (!maybe_obj->ToObject(&obj)) return false; + } + { MaybeObject* maybe_obj = AllocateJSObjectFromMap(Map::cast(obj)); + if (!maybe_obj->ToObject(&obj)) return false; + } + set_observation_state(JSObject::cast(obj)); + // Handling of script id generation is in FACTORY->NewScript. set_last_script_id(undefined_value()); @@ -2811,15 +2935,44 @@ bool Heap::CreateInitialObjects() { } +bool Heap::RootCanBeWrittenAfterInitialization(Heap::RootListIndex root_index) { + RootListIndex writable_roots[] = { + kStoreBufferTopRootIndex, + kStackLimitRootIndex, + kNumberStringCacheRootIndex, + kInstanceofCacheFunctionRootIndex, + kInstanceofCacheMapRootIndex, + kInstanceofCacheAnswerRootIndex, + kCodeStubsRootIndex, + kNonMonomorphicCacheRootIndex, + kPolymorphicCodeCacheRootIndex, + kLastScriptIdRootIndex, + kEmptyScriptRootIndex, + kRealStackLimitRootIndex, + kArgumentsAdaptorDeoptPCOffsetRootIndex, + kConstructStubDeoptPCOffsetRootIndex, + kGetterStubDeoptPCOffsetRootIndex, + kSetterStubDeoptPCOffsetRootIndex, + kStringTableRootIndex, + }; + + for (unsigned int i = 0; i < ARRAY_SIZE(writable_roots); i++) { + if (root_index == writable_roots[i]) + return true; + } + return false; +} + + Object* RegExpResultsCache::Lookup(Heap* heap, String* key_string, Object* key_pattern, ResultsCacheType type) { FixedArray* cache; - if (!key_string->IsSymbol()) return Smi::FromInt(0); + if (!key_string->IsInternalizedString()) return Smi::FromInt(0); if (type == STRING_SPLIT_SUBSTRINGS) { ASSERT(key_pattern->IsString()); - if (!key_pattern->IsSymbol()) return Smi::FromInt(0); + if (!key_pattern->IsInternalizedString()) return Smi::FromInt(0); cache = heap->string_split_cache(); } else { ASSERT(type == REGEXP_MULTIPLE_INDICES); @@ -2850,10 +3003,10 @@ void RegExpResultsCache::Enter(Heap* heap, FixedArray* value_array, ResultsCacheType type) { FixedArray* cache; - if (!key_string->IsSymbol()) return; + if (!key_string->IsInternalizedString()) return; if (type == STRING_SPLIT_SUBSTRINGS) { ASSERT(key_pattern->IsString()); - if (!key_pattern->IsSymbol()) return; + if (!key_pattern->IsInternalizedString()) return; cache = heap->string_split_cache(); } else { ASSERT(type == REGEXP_MULTIPLE_INDICES); @@ -2885,14 +3038,14 @@ void RegExpResultsCache::Enter(Heap* heap, } } // If the array is a reasonably short list of substrings, convert it into a - // list of symbols. + // list of internalized strings. if (type == STRING_SPLIT_SUBSTRINGS && value_array->length() < 100) { for (int i = 0; i < value_array->length(); i++) { String* str = String::cast(value_array->get(i)); - Object* symbol; - MaybeObject* maybe_symbol = heap->LookupSymbol(str); - if (maybe_symbol->ToObject(&symbol)) { - value_array->set(i, symbol); + Object* internalized_str; + MaybeObject* maybe_string = heap->InternalizeString(str); + if (maybe_string->ToObject(&internalized_str)) { + value_array->set(i, internalized_str); } } } @@ -3028,7 +3181,7 @@ MaybeObject* Heap::NumberToString(Object* number, } Object* js_string; - MaybeObject* maybe_js_string = AllocateStringFromAscii(CStrVector(str)); + MaybeObject* maybe_js_string = AllocateStringFromOneByte(CStrVector(str)); if (maybe_js_string->ToObject(&js_string)) { SetNumberStringCache(number, String::cast(js_string)); } @@ -3121,11 +3274,11 @@ MaybeObject* Heap::AllocateSharedFunctionInfo(Object* name) { Code* illegal = isolate_->builtins()->builtin(Builtins::kIllegal); share->set_code(illegal); share->ClearOptimizedCodeMap(); - share->set_scope_info(ScopeInfo::Empty()); + share->set_scope_info(ScopeInfo::Empty(isolate_)); Code* construct_stub = isolate_->builtins()->builtin(Builtins::kJSConstructStubGeneric); share->set_construct_stub(construct_stub); - share->set_instance_class_name(Object_symbol()); + share->set_instance_class_name(Object_string()); share->set_function_data(undefined_value(), SKIP_WRITE_BARRIER); share->set_script(undefined_value(), SKIP_WRITE_BARRIER); share->set_debug_info(undefined_value(), SKIP_WRITE_BARRIER); @@ -3189,25 +3342,26 @@ static inline bool Between(uint32_t character, uint32_t from, uint32_t to) { MUST_USE_RESULT static inline MaybeObject* MakeOrFindTwoCharacterString( Heap* heap, - uint32_t c1, - uint32_t c2) { - String* symbol; + uint16_t c1, + uint16_t c2) { + String* result; // Numeric strings have a different hash algorithm not known by - // LookupTwoCharsSymbolIfExists, so we skip this step for such strings. + // LookupTwoCharsStringIfExists, so we skip this step for such strings. if ((!Between(c1, '0', '9') || !Between(c2, '0', '9')) && - heap->symbol_table()->LookupTwoCharsSymbolIfExists(c1, c2, &symbol)) { - return symbol; + heap->string_table()->LookupTwoCharsStringIfExists(c1, c2, &result)) { + return result; // Now we know the length is 2, we might as well make use of that fact // when building the new string. - } else if ((c1 | c2) <= String::kMaxAsciiCharCodeU) { // We can do this - ASSERT(IsPowerOf2(String::kMaxAsciiCharCodeU + 1)); // because of this. + } else if (static_cast<unsigned>(c1 | c2) <= String::kMaxOneByteCharCodeU) { + // We can do this. + ASSERT(IsPowerOf2(String::kMaxOneByteCharCodeU + 1)); // because of this. Object* result; - { MaybeObject* maybe_result = heap->AllocateRawAsciiString(2); + { MaybeObject* maybe_result = heap->AllocateRawOneByteString(2); if (!maybe_result->ToObject(&result)) return maybe_result; } - char* dest = SeqAsciiString::cast(result)->GetChars(); - dest[0] = c1; - dest[1] = c2; + uint8_t* dest = SeqOneByteString::cast(result)->GetChars(); + dest[0] = static_cast<uint8_t>(c1); + dest[1] = static_cast<uint8_t>(c2); return result; } else { Object* result; @@ -3236,27 +3390,26 @@ MaybeObject* Heap::AllocateConsString(String* first, String* second) { int length = first_length + second_length; // Optimization for 2-byte strings often used as keys in a decompression - // dictionary. Check whether we already have the string in the symbol + // dictionary. Check whether we already have the string in the string // table to prevent creation of many unneccesary strings. if (length == 2) { - unsigned c1 = first->Get(0); - unsigned c2 = second->Get(0); + uint16_t c1 = first->Get(0); + uint16_t c2 = second->Get(0); return MakeOrFindTwoCharacterString(this, c1, c2); } - bool first_is_ascii = first->IsAsciiRepresentation(); - bool second_is_ascii = second->IsAsciiRepresentation(); - bool is_ascii = first_is_ascii && second_is_ascii; - + bool first_is_one_byte = first->IsOneByteRepresentation(); + bool second_is_one_byte = second->IsOneByteRepresentation(); + bool is_one_byte = first_is_one_byte && second_is_one_byte; // Make sure that an out of memory exception is thrown if the length // of the new cons string is too large. if (length > String::kMaxLength || length < 0) { isolate()->context()->mark_out_of_memory(); - return Failure::OutOfMemoryException(); + return Failure::OutOfMemoryException(0x4); } bool is_ascii_data_in_two_byte_string = false; - if (!is_ascii) { + if (!is_one_byte) { // At least one of the strings uses two-byte representation so we // can't use the fast case code for short ASCII strings below, but // we can try to save memory if all chars actually fit in ASCII. @@ -3273,37 +3426,37 @@ MaybeObject* Heap::AllocateConsString(String* first, String* second) { STATIC_ASSERT(ConsString::kMinLength <= SlicedString::kMinLength); ASSERT(first->IsFlat()); ASSERT(second->IsFlat()); - if (is_ascii) { + if (is_one_byte) { Object* result; - { MaybeObject* maybe_result = AllocateRawAsciiString(length); + { MaybeObject* maybe_result = AllocateRawOneByteString(length); if (!maybe_result->ToObject(&result)) return maybe_result; } // Copy the characters into the new object. - char* dest = SeqAsciiString::cast(result)->GetChars(); + uint8_t* dest = SeqOneByteString::cast(result)->GetChars(); // Copy first part. - const char* src; + const uint8_t* src; if (first->IsExternalString()) { src = ExternalAsciiString::cast(first)->GetChars(); } else { - src = SeqAsciiString::cast(first)->GetChars(); + src = SeqOneByteString::cast(first)->GetChars(); } for (int i = 0; i < first_length; i++) *dest++ = src[i]; // Copy second part. if (second->IsExternalString()) { src = ExternalAsciiString::cast(second)->GetChars(); } else { - src = SeqAsciiString::cast(second)->GetChars(); + src = SeqOneByteString::cast(second)->GetChars(); } for (int i = 0; i < second_length; i++) *dest++ = src[i]; return result; } else { if (is_ascii_data_in_two_byte_string) { Object* result; - { MaybeObject* maybe_result = AllocateRawAsciiString(length); + { MaybeObject* maybe_result = AllocateRawOneByteString(length); if (!maybe_result->ToObject(&result)) return maybe_result; } // Copy the characters into the new object. - char* dest = SeqAsciiString::cast(result)->GetChars(); + uint8_t* dest = SeqOneByteString::cast(result)->GetChars(); String::WriteToFlat(first, dest, 0, first_length); String::WriteToFlat(second, dest + first_length, 0, second_length); isolate_->counters()->string_add_runtime_ext_to_ascii()->Increment(); @@ -3322,7 +3475,7 @@ MaybeObject* Heap::AllocateConsString(String* first, String* second) { } } - Map* map = (is_ascii || is_ascii_data_in_two_byte_string) ? + Map* map = (is_one_byte || is_ascii_data_in_two_byte_string) ? cons_ascii_string_map() : cons_string_map(); Object* result; @@ -3352,10 +3505,10 @@ MaybeObject* Heap::AllocateSubString(String* buffer, return LookupSingleCharacterStringFromCode(buffer->Get(start)); } else if (length == 2) { // Optimization for 2-byte strings often used as keys in a decompression - // dictionary. Check whether we already have the string in the symbol - // table to prevent creation of many unneccesary strings. - unsigned c1 = buffer->Get(start); - unsigned c2 = buffer->Get(start + 1); + // dictionary. Check whether we already have the string in the string + // table to prevent creation of many unnecessary strings. + uint16_t c1 = buffer->Get(start); + uint16_t c2 = buffer->Get(start + 1); return MakeOrFindTwoCharacterString(this, c1, c2); } @@ -3370,17 +3523,17 @@ MaybeObject* Heap::AllocateSubString(String* buffer, // WriteToFlat takes care of the case when an indirect string has a // different encoding from its underlying string. These encodings may // differ because of externalization. - bool is_ascii = buffer->IsAsciiRepresentation(); - { MaybeObject* maybe_result = is_ascii - ? AllocateRawAsciiString(length, pretenure) + bool is_one_byte = buffer->IsOneByteRepresentation(); + { MaybeObject* maybe_result = is_one_byte + ? AllocateRawOneByteString(length, pretenure) : AllocateRawTwoByteString(length, pretenure); if (!maybe_result->ToObject(&result)) return maybe_result; } String* string_result = String::cast(result); // Copy the characters into the new object. - if (is_ascii) { - ASSERT(string_result->IsAsciiRepresentation()); - char* dest = SeqAsciiString::cast(string_result)->GetChars(); + if (is_one_byte) { + ASSERT(string_result->IsOneByteRepresentation()); + uint8_t* dest = SeqOneByteString::cast(string_result)->GetChars(); String::WriteToFlat(buffer, dest, start, end); } else { ASSERT(string_result->IsTwoByteRepresentation()); @@ -3404,7 +3557,7 @@ MaybeObject* Heap::AllocateSubString(String* buffer, // indirect ASCII string is pointing to a two-byte string, the two-byte char // codes of the underlying string must still fit into ASCII (because // externalization must not change char codes). - { Map* map = buffer->IsAsciiRepresentation() + { Map* map = buffer->IsOneByteRepresentation() ? sliced_ascii_string_map() : sliced_string_map(); MaybeObject* maybe_result = Allocate(map, NEW_SPACE); @@ -3440,11 +3593,9 @@ MaybeObject* Heap::AllocateExternalStringFromAscii( size_t length = resource->length(); if (length > static_cast<size_t>(String::kMaxLength)) { isolate()->context()->mark_out_of_memory(); - return Failure::OutOfMemoryException(); + return Failure::OutOfMemoryException(0x5); } - ASSERT(String::IsAscii(resource->data(), static_cast<int>(length))); - Map* map = external_ascii_string_map(); Object* result; { MaybeObject* maybe_result = Allocate(map, NEW_SPACE); @@ -3465,15 +3616,15 @@ MaybeObject* Heap::AllocateExternalStringFromTwoByte( size_t length = resource->length(); if (length > static_cast<size_t>(String::kMaxLength)) { isolate()->context()->mark_out_of_memory(); - return Failure::OutOfMemoryException(); + return Failure::OutOfMemoryException(0x6); } // For small strings we check whether the resource contains only - // ASCII characters. If yes, we use a different string map. + // one byte characters. If yes, we use a different string map. static const size_t kAsciiCheckLengthLimit = 32; - bool is_ascii = length <= kAsciiCheckLengthLimit && - String::IsAscii(resource->data(), static_cast<int>(length)); - Map* map = is_ascii ? + bool is_one_byte = length <= kAsciiCheckLengthLimit && + String::IsOneByte(resource->data(), static_cast<int>(length)); + Map* map = is_one_byte ? external_string_with_ascii_data_map() : external_string_map(); Object* result; { MaybeObject* maybe_result = Allocate(map, NEW_SPACE); @@ -3490,14 +3641,15 @@ MaybeObject* Heap::AllocateExternalStringFromTwoByte( MaybeObject* Heap::LookupSingleCharacterStringFromCode(uint16_t code) { - if (code <= String::kMaxAsciiCharCode) { + if (code <= String::kMaxOneByteCharCode) { Object* value = single_character_string_cache()->get(code); if (value != undefined_value()) return value; - char buffer[1]; - buffer[0] = static_cast<char>(code); + uint8_t buffer[1]; + buffer[0] = static_cast<uint8_t>(code); Object* result; - MaybeObject* maybe_result = LookupSymbol(Vector<const char>(buffer, 1)); + MaybeObject* maybe_result = + InternalizeOneByteString(Vector<const uint8_t>(buffer, 1)); if (!maybe_result->ToObject(&result)) return maybe_result; single_character_string_cache()->set(code, result); @@ -3516,7 +3668,7 @@ MaybeObject* Heap::LookupSingleCharacterStringFromCode(uint16_t code) { MaybeObject* Heap::AllocateByteArray(int length, PretenureFlag pretenure) { if (length < 0 || length > ByteArray::kMaxLength) { - return Failure::OutOfMemoryException(); + return Failure::OutOfMemoryException(0x7); } if (pretenure == NOT_TENURED) { return AllocateByteArray(length); @@ -3538,7 +3690,7 @@ MaybeObject* Heap::AllocateByteArray(int length, PretenureFlag pretenure) { MaybeObject* Heap::AllocateByteArray(int length) { if (length < 0 || length > ByteArray::kMaxLength) { - return Failure::OutOfMemoryException(); + return Failure::OutOfMemoryException(0x8); } int size = ByteArray::SizeFor(length); AllocationSpace space = @@ -3633,18 +3785,28 @@ MaybeObject* Heap::CreateCode(const CodeDesc& desc, ASSERT(!isolate_->code_range()->exists() || isolate_->code_range()->contains(code->address())); code->set_instruction_size(desc.instr_size); + // TODO(mstarzinger): Remove once we found the bug. + CHECK(reloc_info->IsByteArray()); code->set_relocation_info(reloc_info); + // TODO(mstarzinger): Remove once we found the bug. + CHECK(code->relocation_info()->IsByteArray()); code->set_flags(flags); if (code->is_call_stub() || code->is_keyed_call_stub()) { code->set_check_type(RECEIVER_MAP_CHECK); } code->set_deoptimization_data(empty_fixed_array(), SKIP_WRITE_BARRIER); - code->set_type_feedback_info(undefined_value(), SKIP_WRITE_BARRIER); + code->InitializeTypeFeedbackInfoNoWriteBarrier(undefined_value()); code->set_handler_table(empty_fixed_array(), SKIP_WRITE_BARRIER); code->set_gc_metadata(Smi::FromInt(0)); code->set_ic_age(global_ic_age_); + code->set_prologue_offset(kPrologueOffsetNotSet); + if (code->kind() == Code::OPTIMIZED_FUNCTION) { + code->set_marked_for_deoptimization(false); + } // Allow self references to created code object by patching the handle to // point to the newly allocated Code object. + CHECK(code->IsCode()); + CHECK(code->relocation_info()->IsByteArray()); if (!self_reference.is_null()) { *(self_reference.location()) = code; } @@ -3653,6 +3815,8 @@ MaybeObject* Heap::CreateCode(const CodeDesc& desc, // that are dereferenced during the copy to point directly to the actual heap // objects. These pointers can include references to the code object itself, // through the self_reference parameter. + CHECK(code->IsCode()); + CHECK(code->relocation_info()->IsByteArray()); code->CopyFrom(desc); #ifdef VERIFY_HEAP @@ -3746,6 +3910,28 @@ MaybeObject* Heap::CopyCode(Code* code, Vector<byte> reloc_info) { } +MaybeObject* Heap::AllocateWithAllocationSite(Map* map, AllocationSpace space, + Handle<Object> allocation_site_info_payload) { + ASSERT(gc_state_ == NOT_IN_GC); + ASSERT(map->instance_type() != MAP_TYPE); + // If allocation failures are disallowed, we may allocate in a different + // space when new space is full and the object is not a large object. + AllocationSpace retry_space = + (space != NEW_SPACE) ? space : TargetSpaceId(map->instance_type()); + int size = map->instance_size() + AllocationSiteInfo::kSize; + Object* result; + MaybeObject* maybe_result = AllocateRaw(size, space, retry_space); + if (!maybe_result->ToObject(&result)) return maybe_result; + // No need for write barrier since object is white and map is in old space. + HeapObject::cast(result)->set_map_no_write_barrier(map); + AllocationSiteInfo* alloc_info = reinterpret_cast<AllocationSiteInfo*>( + reinterpret_cast<Address>(result) + map->instance_size()); + alloc_info->set_map_no_write_barrier(allocation_site_info_map()); + alloc_info->set_payload(*allocation_site_info_payload, SKIP_WRITE_BARRIER); + return result; +} + + MaybeObject* Heap::Allocate(Map* map, AllocationSpace space) { ASSERT(gc_state_ == NOT_IN_GC); ASSERT(map->instance_type() != MAP_TYPE); @@ -3753,11 +3939,10 @@ MaybeObject* Heap::Allocate(Map* map, AllocationSpace space) { // space when new space is full and the object is not a large object. AllocationSpace retry_space = (space != NEW_SPACE) ? space : TargetSpaceId(map->instance_type()); + int size = map->instance_size(); Object* result; - { MaybeObject* maybe_result = - AllocateRaw(map->instance_size(), space, retry_space); - if (!maybe_result->ToObject(&result)) return maybe_result; - } + MaybeObject* maybe_result = AllocateRaw(size, space, retry_space); + if (!maybe_result->ToObject(&result)) return maybe_result; // No need for write barrier since object is white and map is in old space. HeapObject::cast(result)->set_map_no_write_barrier(map); return result; @@ -3802,7 +3987,7 @@ MaybeObject* Heap::AllocateFunctionPrototype(JSFunction* function) { // constructor to the function. MaybeObject* maybe_failure = JSObject::cast(prototype)->SetLocalPropertyIgnoreAttributes( - constructor_symbol(), function, DONT_ENUM); + constructor_string(), function, DONT_ENUM); if (maybe_failure->IsFailure()) return maybe_failure; return prototype; @@ -3887,9 +4072,9 @@ MaybeObject* Heap::AllocateArgumentsObject(Object* callee, int length) { static bool HasDuplicates(DescriptorArray* descriptors) { int count = descriptors->number_of_descriptors(); if (count > 1) { - String* prev_key = descriptors->GetKey(0); + Name* prev_key = descriptors->GetKey(0); for (int i = 1; i != count; i++) { - String* current_key = descriptors->GetKey(i); + Name* current_key = descriptors->GetKey(i); if (prev_key == current_key) return true; prev_key = current_key; } @@ -3941,7 +4126,7 @@ MaybeObject* Heap::AllocateInitialMap(JSFunction* fun) { DescriptorArray::WhitenessWitness witness(descriptors); for (int i = 0; i < count; i++) { String* name = fun->shared()->GetThisPropertyAssignmentName(i); - ASSERT(name->IsSymbol()); + ASSERT(name->IsInternalizedString()); FieldDescriptor field(name, i, NONE, i + 1); descriptors->Set(i, &field, witness); } @@ -4025,15 +4210,53 @@ MaybeObject* Heap::AllocateJSObjectFromMap(Map* map, PretenureFlag pretenure) { (pretenure == TENURED) ? OLD_POINTER_SPACE : NEW_SPACE; if (map->instance_size() > Page::kMaxNonCodeHeapObjectSize) space = LO_SPACE; Object* obj; - { MaybeObject* maybe_obj = Allocate(map, space); - if (!maybe_obj->ToObject(&obj)) return maybe_obj; + MaybeObject* maybe_obj = Allocate(map, space); + if (!maybe_obj->To(&obj)) return maybe_obj; + + // Initialize the JSObject. + InitializeJSObjectFromMap(JSObject::cast(obj), + FixedArray::cast(properties), + map); + ASSERT(JSObject::cast(obj)->HasFastElements()); + return obj; +} + + +MaybeObject* Heap::AllocateJSObjectFromMapWithAllocationSite(Map* map, + Handle<Object> allocation_site_info_payload) { + // JSFunctions should be allocated using AllocateFunction to be + // properly initialized. + ASSERT(map->instance_type() != JS_FUNCTION_TYPE); + + // Both types of global objects should be allocated using + // AllocateGlobalObject to be properly initialized. + ASSERT(map->instance_type() != JS_GLOBAL_OBJECT_TYPE); + ASSERT(map->instance_type() != JS_BUILTINS_OBJECT_TYPE); + + // Allocate the backing storage for the properties. + int prop_size = + map->pre_allocated_property_fields() + + map->unused_property_fields() - + map->inobject_properties(); + ASSERT(prop_size >= 0); + Object* properties; + { MaybeObject* maybe_properties = AllocateFixedArray(prop_size); + if (!maybe_properties->ToObject(&properties)) return maybe_properties; } + // Allocate the JSObject. + AllocationSpace space = NEW_SPACE; + if (map->instance_size() > Page::kMaxNonCodeHeapObjectSize) space = LO_SPACE; + Object* obj; + MaybeObject* maybe_obj = AllocateWithAllocationSite(map, space, + allocation_site_info_payload); + if (!maybe_obj->To(&obj)) return maybe_obj; + // Initialize the JSObject. InitializeJSObjectFromMap(JSObject::cast(obj), FixedArray::cast(properties), map); - ASSERT(JSObject::cast(obj)->HasFastSmiOrObjectElements()); + ASSERT(JSObject::cast(obj)->HasFastElements()); return obj; } @@ -4061,6 +4284,51 @@ MaybeObject* Heap::AllocateJSObject(JSFunction* constructor, } +MaybeObject* Heap::AllocateJSObjectWithAllocationSite(JSFunction* constructor, + Handle<Object> allocation_site_info_payload) { + // Allocate the initial map if absent. + if (!constructor->has_initial_map()) { + Object* initial_map; + { MaybeObject* maybe_initial_map = AllocateInitialMap(constructor); + if (!maybe_initial_map->ToObject(&initial_map)) return maybe_initial_map; + } + constructor->set_initial_map(Map::cast(initial_map)); + Map::cast(initial_map)->set_constructor(constructor); + } + // Allocate the object based on the constructors initial map, or the payload + // advice + Map* initial_map = constructor->initial_map(); + + JSGlobalPropertyCell* cell = JSGlobalPropertyCell::cast( + *allocation_site_info_payload); + Smi* smi = Smi::cast(cell->value()); + ElementsKind to_kind = static_cast<ElementsKind>(smi->value()); + AllocationSiteMode mode = TRACK_ALLOCATION_SITE; + if (to_kind != initial_map->elements_kind()) { + MaybeObject* maybe_new_map = constructor->GetElementsTransitionMap( + isolate(), to_kind); + if (!maybe_new_map->To(&initial_map)) return maybe_new_map; + // Possibly alter the mode, since we found an updated elements kind + // in the type info cell. + mode = AllocationSiteInfo::GetMode(to_kind); + } + + MaybeObject* result; + if (mode == TRACK_ALLOCATION_SITE) { + result = AllocateJSObjectFromMapWithAllocationSite(initial_map, + allocation_site_info_payload); + } else { + result = AllocateJSObjectFromMap(initial_map, NOT_TENURED); + } +#ifdef DEBUG + // Make sure result is NOT a global object if valid. + Object* non_failure; + ASSERT(!result->ToObject(&non_failure) || !non_failure->IsGlobalObject()); +#endif + return result; +} + + MaybeObject* Heap::AllocateJSModule(Context* context, ScopeInfo* scope_info) { // Allocate a fresh map. Modules do not have a prototype. Map* map; @@ -4082,13 +4350,66 @@ MaybeObject* Heap::AllocateJSArrayAndStorage( int capacity, ArrayStorageAllocationMode mode, PretenureFlag pretenure) { + MaybeObject* maybe_array = AllocateJSArray(elements_kind, pretenure); + JSArray* array; + if (!maybe_array->To(&array)) return maybe_array; + + // TODO(mvstanton): this body of code is duplicate with AllocateJSArrayStorage + // for performance reasons. ASSERT(capacity >= length); - if (length != 0 && mode == INITIALIZE_ARRAY_ELEMENTS_WITH_HOLE) { - elements_kind = GetHoleyElementsKind(elements_kind); + + if (capacity == 0) { + array->set_length(Smi::FromInt(0)); + array->set_elements(empty_fixed_array()); + return array; } - MaybeObject* maybe_array = AllocateJSArray(elements_kind, pretenure); + + FixedArrayBase* elms; + MaybeObject* maybe_elms = NULL; + if (IsFastDoubleElementsKind(elements_kind)) { + if (mode == DONT_INITIALIZE_ARRAY_ELEMENTS) { + maybe_elms = AllocateUninitializedFixedDoubleArray(capacity); + } else { + ASSERT(mode == INITIALIZE_ARRAY_ELEMENTS_WITH_HOLE); + maybe_elms = AllocateFixedDoubleArrayWithHoles(capacity); + } + } else { + ASSERT(IsFastSmiOrObjectElementsKind(elements_kind)); + if (mode == DONT_INITIALIZE_ARRAY_ELEMENTS) { + maybe_elms = AllocateUninitializedFixedArray(capacity); + } else { + ASSERT(mode == INITIALIZE_ARRAY_ELEMENTS_WITH_HOLE); + maybe_elms = AllocateFixedArrayWithHoles(capacity); + } + } + if (!maybe_elms->To(&elms)) return maybe_elms; + + array->set_elements(elms); + array->set_length(Smi::FromInt(length)); + return array; +} + + +MaybeObject* Heap::AllocateJSArrayAndStorageWithAllocationSite( + ElementsKind elements_kind, + int length, + int capacity, + Handle<Object> allocation_site_payload, + ArrayStorageAllocationMode mode) { + MaybeObject* maybe_array = AllocateJSArrayWithAllocationSite(elements_kind, + allocation_site_payload); JSArray* array; if (!maybe_array->To(&array)) return maybe_array; + return AllocateJSArrayStorage(array, length, capacity, mode); +} + + +MaybeObject* Heap::AllocateJSArrayStorage( + JSArray* array, + int length, + int capacity, + ArrayStorageAllocationMode mode) { + ASSERT(capacity >= length); if (capacity == 0) { array->set_length(Smi::FromInt(0)); @@ -4098,7 +4419,8 @@ MaybeObject* Heap::AllocateJSArrayAndStorage( FixedArrayBase* elms; MaybeObject* maybe_elms = NULL; - if (elements_kind == FAST_DOUBLE_ELEMENTS) { + ElementsKind elements_kind = array->GetElementsKind(); + if (IsFastDoubleElementsKind(elements_kind)) { if (mode == DONT_INITIALIZE_ARRAY_ELEMENTS) { maybe_elms = AllocateUninitializedFixedDoubleArray(capacity); } else { @@ -4125,13 +4447,14 @@ MaybeObject* Heap::AllocateJSArrayAndStorage( MaybeObject* Heap::AllocateJSArrayWithElements( FixedArrayBase* elements, ElementsKind elements_kind, + int length, PretenureFlag pretenure) { MaybeObject* maybe_array = AllocateJSArray(elements_kind, pretenure); JSArray* array; if (!maybe_array->To(&array)) return maybe_array; array->set_elements(elements); - array->set_length(Smi::FromInt(elements->length())); + array->set_length(Smi::FromInt(length)); array->ValidateElements(); return array; } @@ -4204,9 +4527,10 @@ MaybeObject* Heap::AllocateGlobalObject(JSFunction* constructor) { int initial_size = map->instance_type() == JS_GLOBAL_OBJECT_TYPE ? 64 : 512; // Allocate a dictionary object for backing storage. - StringDictionary* dictionary; + NameDictionary* dictionary; MaybeObject* maybe_dictionary = - StringDictionary::Allocate( + NameDictionary::Allocate( + this, map->NumberOfOwnDescriptors() * 2 + initial_size); if (!maybe_dictionary->To(&dictionary)) return maybe_dictionary; @@ -4280,6 +4604,7 @@ MaybeObject* Heap::CopyJSObject(JSObject* source) { (object_size - JSObject::kHeaderSize) / kPointerSize); } else { wb_mode = SKIP_WRITE_BARRIER; + { MaybeObject* maybe_clone = new_space_.AllocateRaw(object_size); if (!maybe_clone->ToObject(&clone)) return maybe_clone; } @@ -4323,6 +4648,113 @@ MaybeObject* Heap::CopyJSObject(JSObject* source) { } +MaybeObject* Heap::CopyJSObjectWithAllocationSite(JSObject* source) { + // Never used to copy functions. If functions need to be copied we + // have to be careful to clear the literals array. + SLOW_ASSERT(!source->IsJSFunction()); + + // Make the clone. + Map* map = source->map(); + int object_size = map->instance_size(); + Object* clone; + + ASSERT(map->CanTrackAllocationSite()); + ASSERT(map->instance_type() == JS_ARRAY_TYPE); + WriteBarrierMode wb_mode = UPDATE_WRITE_BARRIER; + + // If we're forced to always allocate, we use the general allocation + // functions which may leave us with an object in old space. + int adjusted_object_size = object_size; + if (always_allocate()) { + // We'll only track origin if we are certain to allocate in new space + const int kMinFreeNewSpaceAfterGC = InitialSemiSpaceSize() * 3/4; + if ((object_size + AllocationSiteInfo::kSize) < kMinFreeNewSpaceAfterGC) { + adjusted_object_size += AllocationSiteInfo::kSize; + } + + { MaybeObject* maybe_clone = + AllocateRaw(adjusted_object_size, NEW_SPACE, OLD_POINTER_SPACE); + if (!maybe_clone->ToObject(&clone)) return maybe_clone; + } + Address clone_address = HeapObject::cast(clone)->address(); + CopyBlock(clone_address, + source->address(), + object_size); + // Update write barrier for all fields that lie beyond the header. + int write_barrier_offset = adjusted_object_size > object_size + ? JSArray::kSize + AllocationSiteInfo::kSize + : JSObject::kHeaderSize; + if (((object_size - write_barrier_offset) / kPointerSize) > 0) { + RecordWrites(clone_address, + write_barrier_offset, + (object_size - write_barrier_offset) / kPointerSize); + } + + // Track allocation site information, if we failed to allocate it inline. + if (InNewSpace(clone) && + adjusted_object_size == object_size) { + MaybeObject* maybe_alloc_info = + AllocateStruct(ALLOCATION_SITE_INFO_TYPE); + AllocationSiteInfo* alloc_info; + if (maybe_alloc_info->To(&alloc_info)) { + alloc_info->set_map_no_write_barrier(allocation_site_info_map()); + alloc_info->set_payload(source, SKIP_WRITE_BARRIER); + } + } + } else { + wb_mode = SKIP_WRITE_BARRIER; + adjusted_object_size += AllocationSiteInfo::kSize; + + { MaybeObject* maybe_clone = new_space_.AllocateRaw(adjusted_object_size); + if (!maybe_clone->ToObject(&clone)) return maybe_clone; + } + SLOW_ASSERT(InNewSpace(clone)); + // Since we know the clone is allocated in new space, we can copy + // the contents without worrying about updating the write barrier. + CopyBlock(HeapObject::cast(clone)->address(), + source->address(), + object_size); + } + + if (adjusted_object_size > object_size) { + AllocationSiteInfo* alloc_info = reinterpret_cast<AllocationSiteInfo*>( + reinterpret_cast<Address>(clone) + object_size); + alloc_info->set_map_no_write_barrier(allocation_site_info_map()); + alloc_info->set_payload(source, SKIP_WRITE_BARRIER); + } + + SLOW_ASSERT( + JSObject::cast(clone)->GetElementsKind() == source->GetElementsKind()); + FixedArrayBase* elements = FixedArrayBase::cast(source->elements()); + FixedArray* properties = FixedArray::cast(source->properties()); + // Update elements if necessary. + if (elements->length() > 0) { + Object* elem; + { MaybeObject* maybe_elem; + if (elements->map() == fixed_cow_array_map()) { + maybe_elem = FixedArray::cast(elements); + } else if (source->HasFastDoubleElements()) { + maybe_elem = CopyFixedDoubleArray(FixedDoubleArray::cast(elements)); + } else { + maybe_elem = CopyFixedArray(FixedArray::cast(elements)); + } + if (!maybe_elem->ToObject(&elem)) return maybe_elem; + } + JSObject::cast(clone)->set_elements(FixedArrayBase::cast(elem), wb_mode); + } + // Update properties if necessary. + if (properties->length() > 0) { + Object* prop; + { MaybeObject* maybe_prop = CopyFixedArray(properties); + if (!maybe_prop->ToObject(&prop)) return maybe_prop; + } + JSObject::cast(clone)->set_properties(FixedArray::cast(prop), wb_mode); + } + // Return the new clone. + return clone; +} + + MaybeObject* Heap::ReinitializeJSReceiver( JSReceiver* object, InstanceType type, int size) { ASSERT(type >= FIRST_JS_OBJECT_TYPE); @@ -4349,7 +4781,8 @@ MaybeObject* Heap::ReinitializeJSReceiver( SharedFunctionInfo* shared = NULL; if (type == JS_FUNCTION_TYPE) { String* name; - maybe = LookupAsciiSymbol("<freezing call trap>"); + maybe = + InternalizeOneByteString(STATIC_ASCII_VECTOR("<freezing call trap>")); if (!maybe->To<String>(&name)) return maybe; maybe = AllocateSharedFunctionInfo(name); if (!maybe->To<SharedFunctionInfo>(&shared)) return maybe; @@ -4409,7 +4842,7 @@ MaybeObject* Heap::ReinitializeJSGlobalProxy(JSFunction* constructor, } -MaybeObject* Heap::AllocateStringFromAscii(Vector<const char> string, +MaybeObject* Heap::AllocateStringFromOneByte(Vector<const uint8_t> string, PretenureFlag pretenure) { int length = string.length(); if (length == 1) { @@ -4417,12 +4850,14 @@ MaybeObject* Heap::AllocateStringFromAscii(Vector<const char> string, } Object* result; { MaybeObject* maybe_result = - AllocateRawAsciiString(string.length(), pretenure); + AllocateRawOneByteString(string.length(), pretenure); if (!maybe_result->ToObject(&result)) return maybe_result; } // Copy the characters into the new object. - CopyChars(SeqAsciiString::cast(result)->GetChars(), string.start(), length); + CopyChars(SeqOneByteString::cast(result)->GetChars(), + string.start(), + length); return result; } @@ -4432,37 +4867,31 @@ MaybeObject* Heap::AllocateStringFromUtf8Slow(Vector<const char> string, PretenureFlag pretenure) { // Continue counting the number of characters in the UTF-8 string, starting // from the first non-ascii character or word. - int chars = non_ascii_start; Access<UnicodeCache::Utf8Decoder> decoder(isolate_->unicode_cache()->utf8_decoder()); - decoder->Reset(string.start() + non_ascii_start, string.length() - chars); - while (decoder->has_more()) { - uint32_t r = decoder->GetNext(); - if (r <= unibrow::Utf16::kMaxNonSurrogateCharCode) { - chars++; - } else { - chars += 2; - } - } - + decoder->Reset(string.start() + non_ascii_start, + string.length() - non_ascii_start); + int utf16_length = decoder->Utf16Length(); + ASSERT(utf16_length > 0); + // Allocate string. Object* result; - { MaybeObject* maybe_result = AllocateRawTwoByteString(chars, pretenure); + { + int chars = non_ascii_start + utf16_length; + MaybeObject* maybe_result = AllocateRawTwoByteString(chars, pretenure); if (!maybe_result->ToObject(&result)) return maybe_result; } - // Convert and copy the characters into the new object. SeqTwoByteString* twobyte = SeqTwoByteString::cast(result); - decoder->Reset(string.start(), string.length()); - int i = 0; - while (i < chars) { - uint32_t r = decoder->GetNext(); - if (r > unibrow::Utf16::kMaxNonSurrogateCharCode) { - twobyte->SeqTwoByteStringSet(i++, unibrow::Utf16::LeadSurrogate(r)); - twobyte->SeqTwoByteStringSet(i++, unibrow::Utf16::TrailSurrogate(r)); - } else { - twobyte->SeqTwoByteStringSet(i++, r); + // Copy ascii portion. + uint16_t* data = twobyte->GetChars(); + if (non_ascii_start != 0) { + const char* ascii_data = string.start(); + for (int i = 0; i < non_ascii_start; i++) { + *data++ = *ascii_data++; } } + // Now write the remainder. + decoder->WriteUtf16(data, utf16_length); return result; } @@ -4474,11 +4903,11 @@ MaybeObject* Heap::AllocateStringFromTwoByte(Vector<const uc16> string, int length = string.length(); const uc16* start = string.start(); - if (String::IsAscii(start, length)) { - MaybeObject* maybe_result = AllocateRawAsciiString(length, pretenure); + if (String::IsOneByte(start, length)) { + MaybeObject* maybe_result = AllocateRawOneByteString(length, pretenure); if (!maybe_result->ToObject(&result)) return maybe_result; - CopyChars(SeqAsciiString::cast(result)->GetChars(), start, length); - } else { // It's not an ASCII string. + CopyChars(SeqOneByteString::cast(result)->GetChars(), start, length); + } else { // It's not a one byte string. MaybeObject* maybe_result = AllocateRawTwoByteString(length, pretenure); if (!maybe_result->ToObject(&result)) return maybe_result; CopyChars(SeqTwoByteString::cast(result)->GetChars(), start, length); @@ -4487,61 +4916,98 @@ MaybeObject* Heap::AllocateStringFromTwoByte(Vector<const uc16> string, } -Map* Heap::SymbolMapForString(String* string) { - // If the string is in new space it cannot be used as a symbol. +Map* Heap::InternalizedStringMapForString(String* string) { + // If the string is in new space it cannot be used as internalized. if (InNewSpace(string)) return NULL; - // Find the corresponding symbol map for strings. + // Find the corresponding internalized string map for strings. switch (string->map()->instance_type()) { - case STRING_TYPE: return symbol_map(); - case ASCII_STRING_TYPE: return ascii_symbol_map(); - case CONS_STRING_TYPE: return cons_symbol_map(); - case CONS_ASCII_STRING_TYPE: return cons_ascii_symbol_map(); - case EXTERNAL_STRING_TYPE: return external_symbol_map(); - case EXTERNAL_ASCII_STRING_TYPE: return external_ascii_symbol_map(); + case STRING_TYPE: return internalized_string_map(); + case ASCII_STRING_TYPE: return ascii_internalized_string_map(); + case CONS_STRING_TYPE: return cons_internalized_string_map(); + case CONS_ASCII_STRING_TYPE: return cons_ascii_internalized_string_map(); + case EXTERNAL_STRING_TYPE: return external_internalized_string_map(); + case EXTERNAL_ASCII_STRING_TYPE: + return external_ascii_internalized_string_map(); case EXTERNAL_STRING_WITH_ASCII_DATA_TYPE: - return external_symbol_with_ascii_data_map(); - case SHORT_EXTERNAL_STRING_TYPE: return short_external_symbol_map(); + return external_internalized_string_with_ascii_data_map(); + case SHORT_EXTERNAL_STRING_TYPE: + return short_external_internalized_string_map(); case SHORT_EXTERNAL_ASCII_STRING_TYPE: - return short_external_ascii_symbol_map(); + return short_external_ascii_internalized_string_map(); case SHORT_EXTERNAL_STRING_WITH_ASCII_DATA_TYPE: - return short_external_symbol_with_ascii_data_map(); + return short_external_internalized_string_with_ascii_data_map(); default: return NULL; // No match found. } } -MaybeObject* Heap::AllocateInternalSymbol(unibrow::CharacterStream* buffer, - int chars, - uint32_t hash_field) { - ASSERT(chars >= 0); - // Ensure the chars matches the number of characters in the buffer. - ASSERT(static_cast<unsigned>(chars) == buffer->Utf16Length()); - // Determine whether the string is ASCII. - bool is_ascii = true; - while (buffer->has_more()) { - if (buffer->GetNext() > unibrow::Utf8::kMaxOneByteChar) { - is_ascii = false; - break; +static inline void WriteOneByteData(Vector<const char> vector, + uint8_t* chars, + int len) { + // Only works for ascii. + ASSERT(vector.length() == len); + memcpy(chars, vector.start(), len); +} + +static inline void WriteTwoByteData(Vector<const char> vector, + uint16_t* chars, + int len) { + const uint8_t* stream = reinterpret_cast<const uint8_t*>(vector.start()); + unsigned stream_length = vector.length(); + while (stream_length != 0) { + unsigned consumed = 0; + uint32_t c = unibrow::Utf8::ValueOf(stream, stream_length, &consumed); + ASSERT(c != unibrow::Utf8::kBadChar); + ASSERT(consumed <= stream_length); + stream_length -= consumed; + stream += consumed; + if (c > unibrow::Utf16::kMaxNonSurrogateCharCode) { + len -= 2; + if (len < 0) break; + *chars++ = unibrow::Utf16::LeadSurrogate(c); + *chars++ = unibrow::Utf16::TrailSurrogate(c); + } else { + len -= 1; + if (len < 0) break; + *chars++ = c; } } - buffer->Rewind(); + ASSERT(stream_length == 0); + ASSERT(len == 0); +} + + +static inline void WriteOneByteData(String* s, uint8_t* chars, int len) { + ASSERT(s->length() == len); + String::WriteToFlat(s, chars, 0, len); +} + +static inline void WriteTwoByteData(String* s, uint16_t* chars, int len) { + ASSERT(s->length() == len); + String::WriteToFlat(s, chars, 0, len); +} + +template<bool is_one_byte, typename T> +MaybeObject* Heap::AllocateInternalizedStringImpl( + T t, int chars, uint32_t hash_field) { + ASSERT(chars >= 0); // Compute map and object size. int size; Map* map; - if (is_ascii) { - if (chars > SeqAsciiString::kMaxLength) { - return Failure::OutOfMemoryException(); + if (is_one_byte) { + if (chars > SeqOneByteString::kMaxLength) { + return Failure::OutOfMemoryException(0x9); } - map = ascii_symbol_map(); - size = SeqAsciiString::SizeFor(chars); + map = ascii_internalized_string_map(); + size = SeqOneByteString::SizeFor(chars); } else { if (chars > SeqTwoByteString::kMaxLength) { - return Failure::OutOfMemoryException(); + return Failure::OutOfMemoryException(0xa); } - map = symbol_map(); + map = internalized_string_map(); size = SeqTwoByteString::SizeFor(chars); } @@ -4561,28 +5027,34 @@ MaybeObject* Heap::AllocateInternalSymbol(unibrow::CharacterStream* buffer, ASSERT_EQ(size, answer->Size()); - // Fill in the characters. - int i = 0; - while (i < chars) { - uint32_t character = buffer->GetNext(); - if (character > unibrow::Utf16::kMaxNonSurrogateCharCode) { - answer->Set(i++, unibrow::Utf16::LeadSurrogate(character)); - answer->Set(i++, unibrow::Utf16::TrailSurrogate(character)); - } else { - answer->Set(i++, character); - } + if (is_one_byte) { + WriteOneByteData(t, SeqOneByteString::cast(answer)->GetChars(), chars); + } else { + WriteTwoByteData(t, SeqTwoByteString::cast(answer)->GetChars(), chars); } return answer; } -MaybeObject* Heap::AllocateRawAsciiString(int length, PretenureFlag pretenure) { - if (length < 0 || length > SeqAsciiString::kMaxLength) { - return Failure::OutOfMemoryException(); +// Need explicit instantiations. +template +MaybeObject* Heap::AllocateInternalizedStringImpl<true>(String*, int, uint32_t); +template +MaybeObject* Heap::AllocateInternalizedStringImpl<false>( + String*, int, uint32_t); +template +MaybeObject* Heap::AllocateInternalizedStringImpl<false>( + Vector<const char>, int, uint32_t); + + +MaybeObject* Heap::AllocateRawOneByteString(int length, + PretenureFlag pretenure) { + if (length < 0 || length > SeqOneByteString::kMaxLength) { + return Failure::OutOfMemoryException(0xb); } - int size = SeqAsciiString::SizeFor(length); - ASSERT(size <= SeqAsciiString::kMaxSize); + int size = SeqOneByteString::SizeFor(length); + ASSERT(size <= SeqOneByteString::kMaxSize); AllocationSpace space = (pretenure == TENURED) ? OLD_DATA_SPACE : NEW_SPACE; AllocationSpace retry_space = OLD_DATA_SPACE; @@ -4610,15 +5082,6 @@ MaybeObject* Heap::AllocateRawAsciiString(int length, PretenureFlag pretenure) { String::cast(result)->set_hash_field(String::kEmptyHashField); ASSERT_EQ(size, HeapObject::cast(result)->Size()); -#ifdef VERIFY_HEAP - if (FLAG_verify_heap) { - // Initialize string's content to ensure ASCII-ness (character range 0-127) - // as required when verifying the heap. - char* dest = SeqAsciiString::cast(result)->GetChars(); - memset(dest, 0x0F, length * kCharSize); - } -#endif - return result; } @@ -4626,7 +5089,7 @@ MaybeObject* Heap::AllocateRawAsciiString(int length, PretenureFlag pretenure) { MaybeObject* Heap::AllocateRawTwoByteString(int length, PretenureFlag pretenure) { if (length < 0 || length > SeqTwoByteString::kMaxLength) { - return Failure::OutOfMemoryException(); + return Failure::OutOfMemoryException(0xc); } int size = SeqTwoByteString::SizeFor(length); ASSERT(size <= SeqTwoByteString::kMaxSize); @@ -4678,6 +5141,25 @@ MaybeObject* Heap::AllocateJSArray( } +MaybeObject* Heap::AllocateJSArrayWithAllocationSite( + ElementsKind elements_kind, + Handle<Object> allocation_site_info_payload) { + Context* native_context = isolate()->context()->native_context(); + JSFunction* array_function = native_context->array_function(); + Map* map = array_function->initial_map(); + Object* maybe_map_array = native_context->js_array_maps(); + if (!maybe_map_array->IsUndefined()) { + Object* maybe_transitioned_map = + FixedArray::cast(maybe_map_array)->get(elements_kind); + if (!maybe_transitioned_map->IsUndefined()) { + map = Map::cast(maybe_transitioned_map); + } + } + return AllocateJSObjectFromMapWithAllocationSite(map, + allocation_site_info_payload); +} + + MaybeObject* Heap::AllocateEmptyFixedArray() { int size = FixedArray::SizeFor(0); Object* result; @@ -4695,7 +5177,7 @@ MaybeObject* Heap::AllocateEmptyFixedArray() { MaybeObject* Heap::AllocateRawFixedArray(int length) { if (length < 0 || length > FixedArray::kMaxLength) { - return Failure::OutOfMemoryException(); + return Failure::OutOfMemoryException(0xd); } ASSERT(length > 0); // Use the general function if we're forced to always allocate. @@ -4771,7 +5253,7 @@ MaybeObject* Heap::AllocateFixedArray(int length) { MaybeObject* Heap::AllocateRawFixedArray(int length, PretenureFlag pretenure) { if (length < 0 || length > FixedArray::kMaxLength) { - return Failure::OutOfMemoryException(); + return Failure::OutOfMemoryException(0xe); } AllocationSpace space = @@ -4904,7 +5386,7 @@ MaybeObject* Heap::AllocateFixedDoubleArrayWithHoles( MaybeObject* Heap::AllocateRawFixedDoubleArray(int length, PretenureFlag pretenure) { if (length < 0 || length > FixedDoubleArray::kMaxLength) { - return Failure::OutOfMemoryException(); + return Failure::OutOfMemoryException(0xf); } AllocationSpace space = @@ -4948,6 +5430,34 @@ MaybeObject* Heap::AllocateHashTable(int length, PretenureFlag pretenure) { } +MaybeObject* Heap::AllocateSymbol(PretenureFlag pretenure) { + // Statically ensure that it is safe to allocate symbols in paged spaces. + STATIC_ASSERT(Symbol::kSize <= Page::kNonCodeObjectAreaSize); + AllocationSpace space = (pretenure == TENURED) ? OLD_DATA_SPACE : NEW_SPACE; + + Object* result; + MaybeObject* maybe = AllocateRaw(Symbol::kSize, space, OLD_DATA_SPACE); + if (!maybe->ToObject(&result)) return maybe; + + HeapObject::cast(result)->set_map_no_write_barrier(symbol_map()); + + // Generate a random hash value. + int hash; + int attempts = 0; + do { + hash = V8::RandomPrivate(isolate()) & Name::kHashBitMask; + attempts++; + } while (hash == 0 && attempts < 30); + if (hash == 0) hash = 1; // never return 0 + + Symbol::cast(result)->set_hash_field( + Name::kIsNotArrayIndexMask | (hash << Name::kHashShift)); + + ASSERT(result->IsSymbol()); + return result; +} + + MaybeObject* Heap::AllocateNativeContext() { Object* result; { MaybeObject* maybe_result = @@ -4990,7 +5500,7 @@ MaybeObject* Heap::AllocateModuleContext(ScopeInfo* scope_info) { } Context* context = reinterpret_cast<Context*>(result); context->set_map_no_write_barrier(module_context_map()); - // Context links will be set later. + // Instance link will be set later. context->set_extension(Smi::FromInt(0)); return context; } @@ -5077,6 +5587,20 @@ MaybeObject* Heap::AllocateScopeInfo(int length) { } +MaybeObject* Heap::AllocateExternal(void* value) { + Foreign* foreign; + { MaybeObject* maybe_result = AllocateForeign(static_cast<Address>(value)); + if (!maybe_result->To(&foreign)) return maybe_result; + } + JSObject* external; + { MaybeObject* maybe_result = AllocateJSObjectFromMap(external_map()); + if (!maybe_result->To(&external)) return maybe_result; + } + external->SetInternalField(0, foreign); + return external; +} + + MaybeObject* Heap::AllocateStruct(InstanceType type) { Map* map; switch (type) { @@ -5166,10 +5690,6 @@ bool Heap::IdleNotification(int hint) { AdvanceIdleIncrementalMarking(step_size); contexts_disposed_ = 0; } - // Make sure that we have no pending context disposals. - // Take into account that we might have decided to delay full collection - // because incremental marking is in progress. - ASSERT((contexts_disposed_ == 0) || !incremental_marking()->IsStopped()); // After context disposal there is likely a lot of garbage remaining, reset // the idle notification counters in order to trigger more incremental GCs // on subsequent idle notifications. @@ -5190,9 +5710,9 @@ bool Heap::IdleNotification(int hint) { // 3. many lazy sweep steps. // Use mark-sweep-compact events to count incremental GCs in a round. - if (incremental_marking()->IsStopped()) { - if (!IsSweepingComplete() && + if (!mark_compact_collector()->AreSweeperThreadsActivated() && + !IsSweepingComplete() && !AdvanceSweepers(static_cast<int>(step_size))) { return false; } @@ -5303,9 +5823,10 @@ bool Heap::IdleGlobalGC() { void Heap::Print() { if (!HasBeenSetUp()) return; isolate()->PrintStack(); - AllSpaces spaces; - for (Space* space = spaces.next(); space != NULL; space = spaces.next()) + AllSpaces spaces(this); + for (Space* space = spaces.next(); space != NULL; space = spaces.next()) { space->Print(); + } } @@ -5334,7 +5855,7 @@ void Heap::ReportHeapStatistics(const char* title) { PrintF("old_gen_limit_factor_ %d\n", old_gen_limit_factor_); PrintF("\n"); - PrintF("Number of handles : %d\n", HandleScope::NumberOfHandles()); + PrintF("Number of handles : %d\n", HandleScope::NumberOfHandles(isolate_)); isolate_->global_handles()->PrintStats(); PrintF("\n"); @@ -5431,95 +5952,96 @@ void Heap::Verify() { #endif -MaybeObject* Heap::LookupSymbol(Vector<const char> string) { - Object* symbol = NULL; +MaybeObject* Heap::InternalizeUtf8String(Vector<const char> string) { + Object* result = NULL; Object* new_table; { MaybeObject* maybe_new_table = - symbol_table()->LookupSymbol(string, &symbol); + string_table()->LookupUtf8String(string, &result); if (!maybe_new_table->ToObject(&new_table)) return maybe_new_table; } - // Can't use set_symbol_table because SymbolTable::cast knows that - // SymbolTable is a singleton and checks for identity. - roots_[kSymbolTableRootIndex] = new_table; - ASSERT(symbol != NULL); - return symbol; + // Can't use set_string_table because StringTable::cast knows that + // StringTable is a singleton and checks for identity. + roots_[kStringTableRootIndex] = new_table; + ASSERT(result != NULL); + return result; } -MaybeObject* Heap::LookupAsciiSymbol(Vector<const char> string) { - Object* symbol = NULL; +MaybeObject* Heap::InternalizeOneByteString(Vector<const uint8_t> string) { + Object* result = NULL; Object* new_table; { MaybeObject* maybe_new_table = - symbol_table()->LookupAsciiSymbol(string, &symbol); + string_table()->LookupOneByteString(string, &result); if (!maybe_new_table->ToObject(&new_table)) return maybe_new_table; } - // Can't use set_symbol_table because SymbolTable::cast knows that - // SymbolTable is a singleton and checks for identity. - roots_[kSymbolTableRootIndex] = new_table; - ASSERT(symbol != NULL); - return symbol; + // Can't use set_string_table because StringTable::cast knows that + // StringTable is a singleton and checks for identity. + roots_[kStringTableRootIndex] = new_table; + ASSERT(result != NULL); + return result; } -MaybeObject* Heap::LookupAsciiSymbol(Handle<SeqAsciiString> string, +MaybeObject* Heap::InternalizeOneByteString(Handle<SeqOneByteString> string, int from, int length) { - Object* symbol = NULL; + Object* result = NULL; Object* new_table; { MaybeObject* maybe_new_table = - symbol_table()->LookupSubStringAsciiSymbol(string, + string_table()->LookupSubStringOneByteString(string, from, length, - &symbol); + &result); if (!maybe_new_table->ToObject(&new_table)) return maybe_new_table; } - // Can't use set_symbol_table because SymbolTable::cast knows that - // SymbolTable is a singleton and checks for identity. - roots_[kSymbolTableRootIndex] = new_table; - ASSERT(symbol != NULL); - return symbol; + // Can't use set_string_table because StringTable::cast knows that + // StringTable is a singleton and checks for identity. + roots_[kStringTableRootIndex] = new_table; + ASSERT(result != NULL); + return result; } -MaybeObject* Heap::LookupTwoByteSymbol(Vector<const uc16> string) { - Object* symbol = NULL; +MaybeObject* Heap::InternalizeTwoByteString(Vector<const uc16> string) { + Object* result = NULL; Object* new_table; { MaybeObject* maybe_new_table = - symbol_table()->LookupTwoByteSymbol(string, &symbol); + string_table()->LookupTwoByteString(string, &result); if (!maybe_new_table->ToObject(&new_table)) return maybe_new_table; } - // Can't use set_symbol_table because SymbolTable::cast knows that - // SymbolTable is a singleton and checks for identity. - roots_[kSymbolTableRootIndex] = new_table; - ASSERT(symbol != NULL); - return symbol; + // Can't use set_string_table because StringTable::cast knows that + // StringTable is a singleton and checks for identity. + roots_[kStringTableRootIndex] = new_table; + ASSERT(result != NULL); + return result; } -MaybeObject* Heap::LookupSymbol(String* string) { - if (string->IsSymbol()) return string; - Object* symbol = NULL; +MaybeObject* Heap::InternalizeString(String* string) { + if (string->IsInternalizedString()) return string; + Object* result = NULL; Object* new_table; { MaybeObject* maybe_new_table = - symbol_table()->LookupString(string, &symbol); + string_table()->LookupString(string, &result); if (!maybe_new_table->ToObject(&new_table)) return maybe_new_table; } - // Can't use set_symbol_table because SymbolTable::cast knows that - // SymbolTable is a singleton and checks for identity. - roots_[kSymbolTableRootIndex] = new_table; - ASSERT(symbol != NULL); - return symbol; + // Can't use set_string_table because StringTable::cast knows that + // StringTable is a singleton and checks for identity. + roots_[kStringTableRootIndex] = new_table; + ASSERT(result != NULL); + return result; } -bool Heap::LookupSymbolIfExists(String* string, String** symbol) { - if (string->IsSymbol()) { - *symbol = string; +bool Heap::InternalizeStringIfExists(String* string, String** result) { + if (string->IsInternalizedString()) { + *result = string; return true; } - return symbol_table()->LookupSymbolIfExists(string, symbol); + return string_table()->LookupStringIfExists(string, result); } + void Heap::ZapFromSpace() { NewSpacePageIterator it(new_space_.FromSpaceStart(), new_space_.FromSpaceEnd()); @@ -5744,12 +6266,13 @@ void Heap::IterateRoots(ObjectVisitor* v, VisitMode mode) { void Heap::IterateWeakRoots(ObjectVisitor* v, VisitMode mode) { - v->VisitPointer(reinterpret_cast<Object**>(&roots_[kSymbolTableRootIndex])); - v->Synchronize(VisitorSynchronization::kSymbolTable); + v->VisitPointer(reinterpret_cast<Object**>(&roots_[kStringTableRootIndex])); + v->Synchronize(VisitorSynchronization::kStringTable); if (mode != VISIT_ALL_IN_SCAVENGE && mode != VISIT_ALL_IN_SWEEP_NEWSPACE) { // Scavenge collections have special processing for this. external_string_table_.Iterate(v); + error_object_list_.Iterate(v); } v->Synchronize(VisitorSynchronization::kExternalStringsTable); } @@ -5759,8 +6282,8 @@ void Heap::IterateStrongRoots(ObjectVisitor* v, VisitMode mode) { v->VisitPointers(&roots_[0], &roots_[kStrongRootListLength]); v->Synchronize(VisitorSynchronization::kStrongRootList); - v->VisitPointer(BitCast<Object**>(&hidden_symbol_)); - v->Synchronize(VisitorSynchronization::kSymbol); + v->VisitPointer(BitCast<Object**>(&hidden_string_)); + v->Synchronize(VisitorSynchronization::kInternalizedString); isolate_->bootstrapper()->Iterate(v); v->Synchronize(VisitorSynchronization::kBootstrapper); @@ -5931,7 +6454,7 @@ void Heap::RecordStats(HeapStats* stats, bool take_snapshot) { *stats->os_error = OS::GetLastError(); isolate()->memory_allocator()->Available(); if (take_snapshot) { - HeapIterator iterator; + HeapIterator iterator(this); for (HeapObject* obj = iterator.next(); obj != NULL; obj = iterator.next()) { @@ -5961,172 +6484,6 @@ intptr_t Heap::PromotedExternalMemorySize() { - amount_of_external_allocated_memory_at_last_global_gc_; } -#ifdef DEBUG - -// Tags 0, 1, and 3 are used. Use 2 for marking visited HeapObject. -static const int kMarkTag = 2; - - -class HeapDebugUtils { - public: - explicit HeapDebugUtils(Heap* heap) - : search_for_any_global_(false), - search_target_(NULL), - found_target_(false), - object_stack_(20), - heap_(heap) { - } - - class MarkObjectVisitor : public ObjectVisitor { - public: - explicit MarkObjectVisitor(HeapDebugUtils* utils) : utils_(utils) { } - - void VisitPointers(Object** start, Object** end) { - // Copy all HeapObject pointers in [start, end) - for (Object** p = start; p < end; p++) { - if ((*p)->IsHeapObject()) - utils_->MarkObjectRecursively(p); - } - } - - HeapDebugUtils* utils_; - }; - - void MarkObjectRecursively(Object** p) { - if (!(*p)->IsHeapObject()) return; - - HeapObject* obj = HeapObject::cast(*p); - - Object* map = obj->map(); - - if (!map->IsHeapObject()) return; // visited before - - if (found_target_) return; // stop if target found - object_stack_.Add(obj); - if ((search_for_any_global_ && obj->IsJSGlobalObject()) || - (!search_for_any_global_ && (obj == search_target_))) { - found_target_ = true; - return; - } - - // not visited yet - Map* map_p = reinterpret_cast<Map*>(HeapObject::cast(map)); - - Address map_addr = map_p->address(); - - obj->set_map_no_write_barrier(reinterpret_cast<Map*>(map_addr + kMarkTag)); - - MarkObjectRecursively(&map); - - MarkObjectVisitor mark_visitor(this); - - obj->IterateBody(map_p->instance_type(), obj->SizeFromMap(map_p), - &mark_visitor); - - if (!found_target_) // don't pop if found the target - object_stack_.RemoveLast(); - } - - - class UnmarkObjectVisitor : public ObjectVisitor { - public: - explicit UnmarkObjectVisitor(HeapDebugUtils* utils) : utils_(utils) { } - - void VisitPointers(Object** start, Object** end) { - // Copy all HeapObject pointers in [start, end) - for (Object** p = start; p < end; p++) { - if ((*p)->IsHeapObject()) - utils_->UnmarkObjectRecursively(p); - } - } - - HeapDebugUtils* utils_; - }; - - - void UnmarkObjectRecursively(Object** p) { - if (!(*p)->IsHeapObject()) return; - - HeapObject* obj = HeapObject::cast(*p); - - Object* map = obj->map(); - - if (map->IsHeapObject()) return; // unmarked already - - Address map_addr = reinterpret_cast<Address>(map); - - map_addr -= kMarkTag; - - ASSERT_TAG_ALIGNED(map_addr); - - HeapObject* map_p = HeapObject::FromAddress(map_addr); - - obj->set_map_no_write_barrier(reinterpret_cast<Map*>(map_p)); - - UnmarkObjectRecursively(reinterpret_cast<Object**>(&map_p)); - - UnmarkObjectVisitor unmark_visitor(this); - - obj->IterateBody(Map::cast(map_p)->instance_type(), - obj->SizeFromMap(Map::cast(map_p)), - &unmark_visitor); - } - - - void MarkRootObjectRecursively(Object** root) { - if (search_for_any_global_) { - ASSERT(search_target_ == NULL); - } else { - ASSERT(search_target_->IsHeapObject()); - } - found_target_ = false; - object_stack_.Clear(); - - MarkObjectRecursively(root); - UnmarkObjectRecursively(root); - - if (found_target_) { - PrintF("=====================================\n"); - PrintF("==== Path to object ====\n"); - PrintF("=====================================\n\n"); - - ASSERT(!object_stack_.is_empty()); - for (int i = 0; i < object_stack_.length(); i++) { - if (i > 0) PrintF("\n |\n |\n V\n\n"); - Object* obj = object_stack_[i]; - obj->Print(); - } - PrintF("=====================================\n"); - } - } - - // Helper class for visiting HeapObjects recursively. - class MarkRootVisitor: public ObjectVisitor { - public: - explicit MarkRootVisitor(HeapDebugUtils* utils) : utils_(utils) { } - - void VisitPointers(Object** start, Object** end) { - // Visit all HeapObject pointers in [start, end) - for (Object** p = start; p < end; p++) { - if ((*p)->IsHeapObject()) - utils_->MarkRootObjectRecursively(p); - } - } - - HeapDebugUtils* utils_; - }; - - bool search_for_any_global_; - Object* search_target_; - bool found_target_; - List<Object*> object_stack_; - Heap* heap_; - - friend class Heap; -}; - -#endif - V8_DECLARE_ONCE(initialize_gc_once); @@ -6136,10 +6493,9 @@ static void InitializeGCOnce() { MarkCompactCollector::Initialize(); } -bool Heap::SetUp(bool create_heap_objects) { +bool Heap::SetUp() { #ifdef DEBUG allocation_timeout_ = FLAG_gc_interval; - debug_utils_ = new HeapDebugUtils(this); #endif // Initialize heap spaces and initial maps and objects. Whenever something @@ -6228,27 +6584,31 @@ bool Heap::SetUp(bool create_heap_objects) { } } - if (create_heap_objects) { - // Create initial maps. - if (!CreateInitialMaps()) return false; - if (!CreateApiObjects()) return false; - - // Create initial objects - if (!CreateInitialObjects()) return false; - - native_contexts_list_ = undefined_value(); - } - LOG(isolate_, IntPtrTEvent("heap-capacity", Capacity())); LOG(isolate_, IntPtrTEvent("heap-available", Available())); store_buffer()->SetUp(); if (FLAG_parallel_recompilation) relocation_mutex_ = OS::CreateMutex(); +#ifdef DEBUG + relocation_mutex_locked_ = false; +#endif // DEBUG return true; } +bool Heap::CreateHeapObjects() { + // Create initial maps. + if (!CreateInitialMaps()) return false; + if (!CreateApiObjects()) return false; + + // Create initial objects + if (!CreateInitialObjects()) return false; + + native_contexts_list_ = undefined_value(); + return true; +} + void Heap::SetStackLimits() { ASSERT(isolate_ != NULL); @@ -6275,14 +6635,16 @@ void Heap::TearDown() { #endif if (FLAG_print_cumulative_gc_stat) { - PrintF("\n\n"); + PrintF("\n"); PrintF("gc_count=%d ", gc_count_); PrintF("mark_sweep_count=%d ", ms_count_); - PrintF("max_gc_pause=%d ", get_max_gc_pause()); - PrintF("total_gc_time=%d ", total_gc_time_ms_); - PrintF("min_in_mutator=%d ", get_min_in_mutator()); + PrintF("max_gc_pause=%.1f ", get_max_gc_pause()); + PrintF("total_gc_time=%.1f ", total_gc_time_ms_); + PrintF("min_in_mutator=%.1f ", get_min_in_mutator()); PrintF("max_alive_after_gc=%" V8_PTR_PREFIX "d ", get_max_alive_after_gc()); + PrintF("total_marking_time=%.1f ", marking_time()); + PrintF("total_sweeping_time=%.1f ", sweeping_time()); PrintF("\n\n"); } @@ -6290,6 +6652,8 @@ void Heap::TearDown() { external_string_table_.TearDown(); + error_object_list_.TearDown(); + new_space_.TearDown(); if (old_pointer_space_ != NULL) { @@ -6334,22 +6698,6 @@ void Heap::TearDown() { isolate_->memory_allocator()->TearDown(); delete relocation_mutex_; - -#ifdef DEBUG - delete debug_utils_; - debug_utils_ = NULL; -#endif -} - - -void Heap::Shrink() { - // Try to shrink all paged spaces. - PagedSpaces spaces; - for (PagedSpace* space = spaces.next(); - space != NULL; - space = spaces.next()) { - space->ReleaseAllUnusedPages(); - } } @@ -6417,19 +6765,19 @@ void Heap::PrintHandles() { Space* AllSpaces::next() { switch (counter_++) { case NEW_SPACE: - return HEAP->new_space(); + return heap_->new_space(); case OLD_POINTER_SPACE: - return HEAP->old_pointer_space(); + return heap_->old_pointer_space(); case OLD_DATA_SPACE: - return HEAP->old_data_space(); + return heap_->old_data_space(); case CODE_SPACE: - return HEAP->code_space(); + return heap_->code_space(); case MAP_SPACE: - return HEAP->map_space(); + return heap_->map_space(); case CELL_SPACE: - return HEAP->cell_space(); + return heap_->cell_space(); case LO_SPACE: - return HEAP->lo_space(); + return heap_->lo_space(); default: return NULL; } @@ -6439,15 +6787,15 @@ Space* AllSpaces::next() { PagedSpace* PagedSpaces::next() { switch (counter_++) { case OLD_POINTER_SPACE: - return HEAP->old_pointer_space(); + return heap_->old_pointer_space(); case OLD_DATA_SPACE: - return HEAP->old_data_space(); + return heap_->old_data_space(); case CODE_SPACE: - return HEAP->code_space(); + return heap_->code_space(); case MAP_SPACE: - return HEAP->map_space(); + return heap_->map_space(); case CELL_SPACE: - return HEAP->cell_space(); + return heap_->cell_space(); default: return NULL; } @@ -6458,26 +6806,28 @@ PagedSpace* PagedSpaces::next() { OldSpace* OldSpaces::next() { switch (counter_++) { case OLD_POINTER_SPACE: - return HEAP->old_pointer_space(); + return heap_->old_pointer_space(); case OLD_DATA_SPACE: - return HEAP->old_data_space(); + return heap_->old_data_space(); case CODE_SPACE: - return HEAP->code_space(); + return heap_->code_space(); default: return NULL; } } -SpaceIterator::SpaceIterator() - : current_space_(FIRST_SPACE), +SpaceIterator::SpaceIterator(Heap* heap) + : heap_(heap), + current_space_(FIRST_SPACE), iterator_(NULL), size_func_(NULL) { } -SpaceIterator::SpaceIterator(HeapObjectCallback size_func) - : current_space_(FIRST_SPACE), +SpaceIterator::SpaceIterator(Heap* heap, HeapObjectCallback size_func) + : heap_(heap), + current_space_(FIRST_SPACE), iterator_(NULL), size_func_(size_func) { } @@ -6517,25 +6867,26 @@ ObjectIterator* SpaceIterator::CreateIterator() { switch (current_space_) { case NEW_SPACE: - iterator_ = new SemiSpaceIterator(HEAP->new_space(), size_func_); + iterator_ = new SemiSpaceIterator(heap_->new_space(), size_func_); break; case OLD_POINTER_SPACE: - iterator_ = new HeapObjectIterator(HEAP->old_pointer_space(), size_func_); + iterator_ = + new HeapObjectIterator(heap_->old_pointer_space(), size_func_); break; case OLD_DATA_SPACE: - iterator_ = new HeapObjectIterator(HEAP->old_data_space(), size_func_); + iterator_ = new HeapObjectIterator(heap_->old_data_space(), size_func_); break; case CODE_SPACE: - iterator_ = new HeapObjectIterator(HEAP->code_space(), size_func_); + iterator_ = new HeapObjectIterator(heap_->code_space(), size_func_); break; case MAP_SPACE: - iterator_ = new HeapObjectIterator(HEAP->map_space(), size_func_); + iterator_ = new HeapObjectIterator(heap_->map_space(), size_func_); break; case CELL_SPACE: - iterator_ = new HeapObjectIterator(HEAP->cell_space(), size_func_); + iterator_ = new HeapObjectIterator(heap_->cell_space(), size_func_); break; case LO_SPACE: - iterator_ = new LargeObjectIterator(HEAP->lo_space(), size_func_); + iterator_ = new LargeObjectIterator(heap_->lo_space(), size_func_); break; } @@ -6606,15 +6957,18 @@ class UnreachableObjectsFilter : public HeapObjectsFilter { }; -HeapIterator::HeapIterator() - : filtering_(HeapIterator::kNoFiltering), +HeapIterator::HeapIterator(Heap* heap) + : heap_(heap), + filtering_(HeapIterator::kNoFiltering), filter_(NULL) { Init(); } -HeapIterator::HeapIterator(HeapIterator::HeapObjectsFiltering filtering) - : filtering_(filtering), +HeapIterator::HeapIterator(Heap* heap, + HeapIterator::HeapObjectsFiltering filtering) + : heap_(heap), + filtering_(filtering), filter_(NULL) { Init(); } @@ -6627,7 +6981,7 @@ HeapIterator::~HeapIterator() { void HeapIterator::Init() { // Start the iteration. - space_iterator_ = new SpaceIterator; + space_iterator_ = new SpaceIterator(heap_); switch (filtering_) { case kFilterUnreachable: filter_ = new UnreachableObjectsFilter; @@ -6694,7 +7048,7 @@ void HeapIterator::reset() { } -#if defined(DEBUG) || defined(LIVE_OBJECT_LIST) +#ifdef DEBUG Object* const PathTracer::kAnyGlobalObject = reinterpret_cast<Object*>(NULL); @@ -6861,10 +7215,8 @@ void PathTracer::ProcessResults() { PrintF("=====================================\n"); } } -#endif // DEBUG || LIVE_OBJECT_LIST -#ifdef DEBUG // Triggers a depth-first traversal of reachable objects from one // given root object and finds a path to a specific heap object and // prints it. @@ -6894,9 +7246,9 @@ void Heap::TracePathToGlobal() { #endif -static intptr_t CountTotalHolesSize() { +static intptr_t CountTotalHolesSize(Heap* heap) { intptr_t holes_size = 0; - OldSpaces spaces; + OldSpaces spaces(heap); for (OldSpace* space = spaces.next(); space != NULL; space = spaces.next()) { @@ -6917,6 +7269,9 @@ GCTracer::GCTracer(Heap* heap, allocated_since_last_gc_(0), spent_in_mutator_(0), promoted_objects_size_(0), + nodes_died_in_new_space_(0), + nodes_copied_in_new_space_(0), + nodes_promoted_(0), heap_(heap), gc_reason_(gc_reason), collector_reason_(collector_reason) { @@ -6929,7 +7284,7 @@ GCTracer::GCTracer(Heap* heap, scopes_[i] = 0; } - in_free_list_or_wasted_before_gc_ = CountTotalHolesSize(); + in_free_list_or_wasted_before_gc_ = CountTotalHolesSize(heap); allocated_since_last_gc_ = heap_->SizeOfObjects() - heap_->alive_after_last_gc_; @@ -6957,7 +7312,7 @@ GCTracer::~GCTracer() { heap_->alive_after_last_gc_ = heap_->SizeOfObjects(); heap_->last_gc_end_timestamp_ = OS::TimeCurrentMillis(); - int time = static_cast<int>(heap_->last_gc_end_timestamp_ - start_time_); + double time = heap_->last_gc_end_timestamp_ - start_time_; // Update cumulative GC statistics if required. if (FLAG_print_cumulative_gc_stat) { @@ -6967,7 +7322,7 @@ GCTracer::~GCTracer() { heap_->alive_after_last_gc_); if (!first_gc) { heap_->min_in_mutator_ = Min(heap_->min_in_mutator_, - static_cast<int>(spent_in_mutator_)); + spent_in_mutator_); } } else if (FLAG_trace_gc_verbose) { heap_->total_gc_time_ms_ += time; @@ -6975,6 +7330,9 @@ GCTracer::~GCTracer() { if (collector_ == SCAVENGER && FLAG_trace_gc_ignore_scavenger) return; + heap_->AddMarkingTime(scopes_[Scope::MC_MARK]); + + if (FLAG_print_cumulative_gc_stat && !FLAG_trace_gc) return; PrintPID("%8.0f ms: ", heap_->isolate()->time_millis_since_init()); if (!FLAG_trace_gc_nvp) { @@ -6991,16 +7349,16 @@ GCTracer::~GCTracer() { end_memory_size_mb); if (external_time > 0) PrintF("%d / ", external_time); - PrintF("%d ms", time); + PrintF("%.1f ms", time); if (steps_count_ > 0) { if (collector_ == SCAVENGER) { - PrintF(" (+ %d ms in %d steps since last GC)", - static_cast<int>(steps_took_since_last_gc_), + PrintF(" (+ %.1f ms in %d steps since last GC)", + steps_took_since_last_gc_, steps_count_since_last_gc_); } else { - PrintF(" (+ %d ms in %d steps since start of marking, " - "biggest step %f ms)", - static_cast<int>(steps_took_), + PrintF(" (+ %.1f ms in %d steps since start of marking, " + "biggest step %.1f ms)", + steps_took_, steps_count_, longest_step_); } @@ -7016,8 +7374,8 @@ GCTracer::~GCTracer() { PrintF(".\n"); } else { - PrintF("pause=%d ", time); - PrintF("mutator=%d ", static_cast<int>(spent_in_mutator_)); + PrintF("pause=%.1f ", time); + PrintF("mutator=%.1f ", spent_in_mutator_); PrintF("gc="); switch (collector_) { case SCAVENGER: @@ -7031,39 +7389,39 @@ GCTracer::~GCTracer() { } PrintF(" "); - PrintF("external=%d ", static_cast<int>(scopes_[Scope::EXTERNAL])); - PrintF("mark=%d ", static_cast<int>(scopes_[Scope::MC_MARK])); - PrintF("sweep=%d ", static_cast<int>(scopes_[Scope::MC_SWEEP])); - PrintF("sweepns=%d ", static_cast<int>(scopes_[Scope::MC_SWEEP_NEWSPACE])); - PrintF("evacuate=%d ", static_cast<int>(scopes_[Scope::MC_EVACUATE_PAGES])); - PrintF("new_new=%d ", - static_cast<int>(scopes_[Scope::MC_UPDATE_NEW_TO_NEW_POINTERS])); - PrintF("root_new=%d ", - static_cast<int>(scopes_[Scope::MC_UPDATE_ROOT_TO_NEW_POINTERS])); - PrintF("old_new=%d ", - static_cast<int>(scopes_[Scope::MC_UPDATE_OLD_TO_NEW_POINTERS])); - PrintF("compaction_ptrs=%d ", - static_cast<int>(scopes_[Scope::MC_UPDATE_POINTERS_TO_EVACUATED])); - PrintF("intracompaction_ptrs=%d ", static_cast<int>(scopes_[ - Scope::MC_UPDATE_POINTERS_BETWEEN_EVACUATED])); - PrintF("misc_compaction=%d ", - static_cast<int>(scopes_[Scope::MC_UPDATE_MISC_POINTERS])); + PrintF("external=%.1f ", scopes_[Scope::EXTERNAL]); + PrintF("mark=%.1f ", scopes_[Scope::MC_MARK]); + PrintF("sweep=%.1f ", scopes_[Scope::MC_SWEEP]); + PrintF("sweepns=%.1f ", scopes_[Scope::MC_SWEEP_NEWSPACE]); + PrintF("evacuate=%.1f ", scopes_[Scope::MC_EVACUATE_PAGES]); + PrintF("new_new=%.1f ", scopes_[Scope::MC_UPDATE_NEW_TO_NEW_POINTERS]); + PrintF("root_new=%.1f ", scopes_[Scope::MC_UPDATE_ROOT_TO_NEW_POINTERS]); + PrintF("old_new=%.1f ", scopes_[Scope::MC_UPDATE_OLD_TO_NEW_POINTERS]); + PrintF("compaction_ptrs=%.1f ", + scopes_[Scope::MC_UPDATE_POINTERS_TO_EVACUATED]); + PrintF("intracompaction_ptrs=%.1f ", + scopes_[Scope::MC_UPDATE_POINTERS_BETWEEN_EVACUATED]); + PrintF("misc_compaction=%.1f ", scopes_[Scope::MC_UPDATE_MISC_POINTERS]); PrintF("total_size_before=%" V8_PTR_PREFIX "d ", start_object_size_); PrintF("total_size_after=%" V8_PTR_PREFIX "d ", heap_->SizeOfObjects()); PrintF("holes_size_before=%" V8_PTR_PREFIX "d ", in_free_list_or_wasted_before_gc_); - PrintF("holes_size_after=%" V8_PTR_PREFIX "d ", CountTotalHolesSize()); + PrintF("holes_size_after=%" V8_PTR_PREFIX "d ", CountTotalHolesSize(heap_)); PrintF("allocated=%" V8_PTR_PREFIX "d ", allocated_since_last_gc_); PrintF("promoted=%" V8_PTR_PREFIX "d ", promoted_objects_size_); + PrintF("nodes_died_in_new=%d ", nodes_died_in_new_space_); + PrintF("nodes_copied_in_new=%d ", nodes_copied_in_new_space_); + PrintF("nodes_promoted=%d ", nodes_promoted_); if (collector_ == SCAVENGER) { PrintF("stepscount=%d ", steps_count_since_last_gc_); - PrintF("stepstook=%d ", static_cast<int>(steps_took_since_last_gc_)); + PrintF("stepstook=%.1f ", steps_took_since_last_gc_); } else { PrintF("stepscount=%d ", steps_count_); - PrintF("stepstook=%d ", static_cast<int>(steps_took_)); + PrintF("stepstook=%.1f ", steps_took_); + PrintF("longeststep=%.1f ", longest_step_); } PrintF("\n"); @@ -7084,7 +7442,7 @@ const char* GCTracer::CollectorString() { } -int KeyedLookupCache::Hash(Map* map, String* name) { +int KeyedLookupCache::Hash(Map* map, Name* name) { // Uses only lower 32 bits if pointers are larger. uintptr_t addr_hash = static_cast<uint32_t>(reinterpret_cast<uintptr_t>(map)) >> kMapHashShift; @@ -7092,7 +7450,7 @@ int KeyedLookupCache::Hash(Map* map, String* name) { } -int KeyedLookupCache::Lookup(Map* map, String* name) { +int KeyedLookupCache::Lookup(Map* map, Name* name) { int index = (Hash(map, name) & kHashMask); for (int i = 0; i < kEntriesPerBucket; i++) { Key& key = keys_[index + i]; @@ -7104,37 +7462,43 @@ int KeyedLookupCache::Lookup(Map* map, String* name) { } -void KeyedLookupCache::Update(Map* map, String* name, int field_offset) { - String* symbol; - if (HEAP->LookupSymbolIfExists(name, &symbol)) { - int index = (Hash(map, symbol) & kHashMask); - // After a GC there will be free slots, so we use them in order (this may - // help to get the most frequently used one in position 0). - for (int i = 0; i< kEntriesPerBucket; i++) { - Key& key = keys_[index]; - Object* free_entry_indicator = NULL; - if (key.map == free_entry_indicator) { - key.map = map; - key.name = symbol; - field_offsets_[index + i] = field_offset; - return; - } - } - // No free entry found in this bucket, so we move them all down one and - // put the new entry at position zero. - for (int i = kEntriesPerBucket - 1; i > 0; i--) { - Key& key = keys_[index + i]; - Key& key2 = keys_[index + i - 1]; - key = key2; - field_offsets_[index + i] = field_offsets_[index + i - 1]; +void KeyedLookupCache::Update(Map* map, Name* name, int field_offset) { + if (!name->IsUniqueName()) { + String* internalized_string; + if (!HEAP->InternalizeStringIfExists( + String::cast(name), &internalized_string)) { + return; } + name = internalized_string; + } - // Write the new first entry. + int index = (Hash(map, name) & kHashMask); + // After a GC there will be free slots, so we use them in order (this may + // help to get the most frequently used one in position 0). + for (int i = 0; i< kEntriesPerBucket; i++) { Key& key = keys_[index]; - key.map = map; - key.name = symbol; - field_offsets_[index] = field_offset; + Object* free_entry_indicator = NULL; + if (key.map == free_entry_indicator) { + key.map = map; + key.name = name; + field_offsets_[index + i] = field_offset; + return; + } } + // No free entry found in this bucket, so we move them all down one and + // put the new entry at position zero. + for (int i = kEntriesPerBucket - 1; i > 0; i--) { + Key& key = keys_[index + i]; + Key& key2 = keys_[index + i - 1]; + key = key2; + field_offsets_[index + i] = field_offsets_[index + i - 1]; + } + + // Write the new first entry. + Key& key = keys_[index]; + key.map = map; + key.name = name; + field_offsets_[index] = field_offset; } @@ -7184,7 +7548,7 @@ void TranscendentalCache::Clear() { void ExternalStringTable::CleanUp() { int last = 0; for (int i = 0; i < new_space_strings_.length(); ++i) { - if (new_space_strings_[i] == heap_->raw_unchecked_the_hole_value()) { + if (new_space_strings_[i] == heap_->the_hole_value()) { continue; } if (heap_->InNewSpace(new_space_strings_[i])) { @@ -7194,15 +7558,18 @@ void ExternalStringTable::CleanUp() { } } new_space_strings_.Rewind(last); + new_space_strings_.Trim(); + last = 0; for (int i = 0; i < old_space_strings_.length(); ++i) { - if (old_space_strings_[i] == heap_->raw_unchecked_the_hole_value()) { + if (old_space_strings_[i] == heap_->the_hole_value()) { continue; } ASSERT(!heap_->InNewSpace(old_space_strings_[i])); old_space_strings_[last++] = old_space_strings_[i]; } old_space_strings_.Rewind(last); + old_space_strings_.Trim(); #ifdef VERIFY_HEAP if (FLAG_verify_heap) { Verify(); @@ -7217,6 +7584,119 @@ void ExternalStringTable::TearDown() { } +// Update all references. +void ErrorObjectList::UpdateReferences() { + for (int i = 0; i < list_.length(); i++) { + HeapObject* object = HeapObject::cast(list_[i]); + MapWord first_word = object->map_word(); + if (first_word.IsForwardingAddress()) { + list_[i] = first_word.ToForwardingAddress(); + } + } +} + + +// Unforwarded objects in new space are dead and removed from the list. +void ErrorObjectList::UpdateReferencesInNewSpace(Heap* heap) { + if (list_.is_empty()) return; + if (!nested_) { + int write_index = 0; + for (int i = 0; i < list_.length(); i++) { + MapWord first_word = HeapObject::cast(list_[i])->map_word(); + if (first_word.IsForwardingAddress()) { + list_[write_index++] = first_word.ToForwardingAddress(); + } + } + list_.Rewind(write_index); + } else { + // If a GC is triggered during DeferredFormatStackTrace, we do not move + // objects in the list, just remove dead ones, as to not confuse the + // loop in DeferredFormatStackTrace. + for (int i = 0; i < list_.length(); i++) { + MapWord first_word = HeapObject::cast(list_[i])->map_word(); + list_[i] = first_word.IsForwardingAddress() + ? first_word.ToForwardingAddress() + : heap->the_hole_value(); + } + } +} + + +void ErrorObjectList::DeferredFormatStackTrace(Isolate* isolate) { + // If formatting the stack trace causes a GC, this method will be + // recursively called. In that case, skip the recursive call, since + // the loop modifies the list while iterating over it. + if (nested_ || list_.is_empty() || isolate->has_pending_exception()) return; + nested_ = true; + HandleScope scope(isolate); + Handle<String> stack_key = isolate->factory()->stack_string(); + int write_index = 0; + int budget = kBudgetPerGC; + for (int i = 0; i < list_.length(); i++) { + Object* object = list_[i]; + JSFunction* getter_fun; + + { AssertNoAllocation assert; + // Skip possible holes in the list. + if (object->IsTheHole()) continue; + if (isolate->heap()->InNewSpace(object) || budget == 0) { + list_[write_index++] = object; + continue; + } + + // Check whether the stack property is backed by the original getter. + LookupResult lookup(isolate); + JSObject::cast(object)->LocalLookupRealNamedProperty(*stack_key, &lookup); + if (!lookup.IsFound() || lookup.type() != CALLBACKS) continue; + Object* callback = lookup.GetCallbackObject(); + if (!callback->IsAccessorPair()) continue; + Object* getter_obj = AccessorPair::cast(callback)->getter(); + if (!getter_obj->IsJSFunction()) continue; + getter_fun = JSFunction::cast(getter_obj); + String* key = isolate->heap()->hidden_stack_trace_string(); + if (key != getter_fun->GetHiddenProperty(key)) continue; + } + + budget--; + HandleScope scope(isolate); + bool has_exception = false; +#ifdef DEBUG + Handle<Map> map(HeapObject::cast(object)->map(), isolate); +#endif + Handle<Object> object_handle(object, isolate); + Handle<Object> getter_handle(getter_fun, isolate); + Execution::Call(getter_handle, object_handle, 0, NULL, &has_exception); + ASSERT(*map == HeapObject::cast(*object_handle)->map()); + if (has_exception) { + // Hit an exception (most likely a stack overflow). + // Wrap up this pass and retry after another GC. + isolate->clear_pending_exception(); + // We use the handle since calling the getter might have caused a GC. + list_[write_index++] = *object_handle; + budget = 0; + } + } + list_.Rewind(write_index); + list_.Trim(); + nested_ = false; +} + + +void ErrorObjectList::RemoveUnmarked(Heap* heap) { + for (int i = 0; i < list_.length(); i++) { + HeapObject* object = HeapObject::cast(list_[i]); + if (!Marking::MarkBitFrom(object).Get()) { + list_[i] = heap->the_hole_value(); + } + } +} + + +void ErrorObjectList::TearDown() { + list_.Free(); +} + + void Heap::QueueMemoryChunkForFree(MemoryChunk* chunk) { chunk->set_next_chunk(chunks_queued_for_free_); chunks_queued_for_free_ = chunk; diff --git a/deps/v8/src/heap.h b/deps/v8/src/heap.h index 0ab7ae0bd8..d26c38476a 100644 --- a/deps/v8/src/heap.h +++ b/deps/v8/src/heap.h @@ -61,8 +61,6 @@ namespace internal { V(Map, global_property_cell_map, GlobalPropertyCellMap) \ V(Map, shared_function_info_map, SharedFunctionInfoMap) \ V(Map, meta_map, MetaMap) \ - V(Map, ascii_symbol_map, AsciiSymbolMap) \ - V(Map, ascii_string_map, AsciiStringMap) \ V(Map, heap_number_map, HeapNumberMap) \ V(Map, native_context_map, NativeContextMap) \ V(Map, fixed_array_map, FixedArrayMap) \ @@ -74,7 +72,6 @@ namespace internal { V(Map, hash_table_map, HashTableMap) \ V(FixedArray, empty_fixed_array, EmptyFixedArray) \ V(ByteArray, empty_byte_array, EmptyByteArray) \ - V(String, empty_string, EmptyString) \ V(DescriptorArray, empty_descriptor_array, EmptyDescriptorArray) \ V(Smi, stack_limit, StackLimit) \ V(Oddball, arguments_marker, ArgumentsMarker) \ @@ -90,29 +87,42 @@ namespace internal { V(FixedArray, regexp_multiple_cache, RegExpMultipleCache) \ V(Object, termination_exception, TerminationException) \ V(Smi, hash_seed, HashSeed) \ - V(Map, string_map, StringMap) \ V(Map, symbol_map, SymbolMap) \ + V(Map, string_map, StringMap) \ + V(Map, ascii_string_map, AsciiStringMap) \ V(Map, cons_string_map, ConsStringMap) \ V(Map, cons_ascii_string_map, ConsAsciiStringMap) \ V(Map, sliced_string_map, SlicedStringMap) \ V(Map, sliced_ascii_string_map, SlicedAsciiStringMap) \ - V(Map, cons_symbol_map, ConsSymbolMap) \ - V(Map, cons_ascii_symbol_map, ConsAsciiSymbolMap) \ - V(Map, external_symbol_map, ExternalSymbolMap) \ - V(Map, external_symbol_with_ascii_data_map, ExternalSymbolWithAsciiDataMap) \ - V(Map, external_ascii_symbol_map, ExternalAsciiSymbolMap) \ V(Map, external_string_map, ExternalStringMap) \ V(Map, external_string_with_ascii_data_map, ExternalStringWithAsciiDataMap) \ V(Map, external_ascii_string_map, ExternalAsciiStringMap) \ - V(Map, short_external_symbol_map, ShortExternalSymbolMap) \ - V(Map, \ - short_external_symbol_with_ascii_data_map, \ - ShortExternalSymbolWithAsciiDataMap) \ - V(Map, short_external_ascii_symbol_map, ShortExternalAsciiSymbolMap) \ V(Map, short_external_string_map, ShortExternalStringMap) \ V(Map, \ short_external_string_with_ascii_data_map, \ ShortExternalStringWithAsciiDataMap) \ + V(Map, internalized_string_map, InternalizedStringMap) \ + V(Map, ascii_internalized_string_map, AsciiInternalizedStringMap) \ + V(Map, cons_internalized_string_map, ConsInternalizedStringMap) \ + V(Map, cons_ascii_internalized_string_map, ConsAsciiInternalizedStringMap) \ + V(Map, \ + external_internalized_string_map, \ + ExternalInternalizedStringMap) \ + V(Map, \ + external_internalized_string_with_ascii_data_map, \ + ExternalInternalizedStringWithAsciiDataMap) \ + V(Map, \ + external_ascii_internalized_string_map, \ + ExternalAsciiInternalizedStringMap) \ + V(Map, \ + short_external_internalized_string_map, \ + ShortExternalInternalizedStringMap) \ + V(Map, \ + short_external_internalized_string_with_ascii_data_map, \ + ShortExternalInternalizedStringWithAsciiDataMap) \ + V(Map, \ + short_external_ascii_internalized_string_map, \ + ShortExternalAsciiInternalizedStringMap) \ V(Map, short_external_ascii_string_map, ShortExternalAsciiStringMap) \ V(Map, undetectable_string_map, UndetectableStringMap) \ V(Map, undetectable_ascii_string_map, UndetectableAsciiStringMap) \ @@ -150,107 +160,111 @@ namespace internal { V(Object, last_script_id, LastScriptId) \ V(Script, empty_script, EmptyScript) \ V(Smi, real_stack_limit, RealStackLimit) \ - V(StringDictionary, intrinsic_function_names, IntrinsicFunctionNames) \ + V(NameDictionary, intrinsic_function_names, IntrinsicFunctionNames) \ V(Smi, arguments_adaptor_deopt_pc_offset, ArgumentsAdaptorDeoptPCOffset) \ V(Smi, construct_stub_deopt_pc_offset, ConstructStubDeoptPCOffset) \ V(Smi, getter_stub_deopt_pc_offset, GetterStubDeoptPCOffset) \ - V(Smi, setter_stub_deopt_pc_offset, SetterStubDeoptPCOffset) + V(Smi, setter_stub_deopt_pc_offset, SetterStubDeoptPCOffset) \ + V(JSObject, observation_state, ObservationState) \ + V(Map, external_map, ExternalMap) #define ROOT_LIST(V) \ STRONG_ROOT_LIST(V) \ - V(SymbolTable, symbol_table, SymbolTable) - -#define SYMBOL_LIST(V) \ - V(Array_symbol, "Array") \ - V(Object_symbol, "Object") \ - V(Proto_symbol, "__proto__") \ - V(StringImpl_symbol, "StringImpl") \ - V(arguments_symbol, "arguments") \ - V(Arguments_symbol, "Arguments") \ - V(call_symbol, "call") \ - V(apply_symbol, "apply") \ - V(caller_symbol, "caller") \ - V(boolean_symbol, "boolean") \ - V(Boolean_symbol, "Boolean") \ - V(callee_symbol, "callee") \ - V(constructor_symbol, "constructor") \ - V(code_symbol, ".code") \ - V(result_symbol, ".result") \ - V(dot_for_symbol, ".for.") \ - V(catch_var_symbol, ".catch-var") \ - V(empty_symbol, "") \ - V(eval_symbol, "eval") \ - V(function_symbol, "function") \ - V(length_symbol, "length") \ - V(module_symbol, "module") \ - V(name_symbol, "name") \ - V(native_symbol, "native") \ - V(null_symbol, "null") \ - V(number_symbol, "number") \ - V(Number_symbol, "Number") \ - V(nan_symbol, "NaN") \ - V(RegExp_symbol, "RegExp") \ - V(source_symbol, "source") \ - V(global_symbol, "global") \ - V(ignore_case_symbol, "ignoreCase") \ - V(multiline_symbol, "multiline") \ - V(input_symbol, "input") \ - V(index_symbol, "index") \ - V(last_index_symbol, "lastIndex") \ - V(object_symbol, "object") \ - V(prototype_symbol, "prototype") \ - V(string_symbol, "string") \ - V(String_symbol, "String") \ - V(Date_symbol, "Date") \ - V(this_symbol, "this") \ - V(to_string_symbol, "toString") \ - V(char_at_symbol, "CharAt") \ - V(undefined_symbol, "undefined") \ - V(value_of_symbol, "valueOf") \ - V(InitializeVarGlobal_symbol, "InitializeVarGlobal") \ - V(InitializeConstGlobal_symbol, "InitializeConstGlobal") \ - V(KeyedLoadElementMonomorphic_symbol, \ + V(StringTable, string_table, StringTable) + +#define INTERNALIZED_STRING_LIST(V) \ + V(Array_string, "Array") \ + V(Object_string, "Object") \ + V(proto_string, "__proto__") \ + V(StringImpl_string, "StringImpl") \ + V(arguments_string, "arguments") \ + V(Arguments_string, "Arguments") \ + V(call_string, "call") \ + V(apply_string, "apply") \ + V(caller_string, "caller") \ + V(boolean_string, "boolean") \ + V(Boolean_string, "Boolean") \ + V(callee_string, "callee") \ + V(constructor_string, "constructor") \ + V(code_string, ".code") \ + V(result_string, ".result") \ + V(dot_for_string, ".for.") \ + V(catch_var_string, ".catch-var") \ + V(empty_string, "") \ + V(eval_string, "eval") \ + V(function_string, "function") \ + V(length_string, "length") \ + V(module_string, "module") \ + V(name_string, "name") \ + V(native_string, "native") \ + V(null_string, "null") \ + V(number_string, "number") \ + V(Number_string, "Number") \ + V(nan_string, "NaN") \ + V(RegExp_string, "RegExp") \ + V(source_string, "source") \ + V(global_string, "global") \ + V(ignore_case_string, "ignoreCase") \ + V(multiline_string, "multiline") \ + V(input_string, "input") \ + V(index_string, "index") \ + V(last_index_string, "lastIndex") \ + V(object_string, "object") \ + V(prototype_string, "prototype") \ + V(string_string, "string") \ + V(String_string, "String") \ + V(Date_string, "Date") \ + V(this_string, "this") \ + V(to_string_string, "toString") \ + V(char_at_string, "CharAt") \ + V(undefined_string, "undefined") \ + V(value_of_string, "valueOf") \ + V(stack_string, "stack") \ + V(InitializeVarGlobal_string, "InitializeVarGlobal") \ + V(InitializeConstGlobal_string, "InitializeConstGlobal") \ + V(KeyedLoadElementMonomorphic_string, \ "KeyedLoadElementMonomorphic") \ - V(KeyedStoreElementMonomorphic_symbol, \ + V(KeyedStoreElementMonomorphic_string, \ "KeyedStoreElementMonomorphic") \ - V(KeyedStoreAndGrowElementMonomorphic_symbol, \ - "KeyedStoreAndGrowElementMonomorphic") \ - V(stack_overflow_symbol, "kStackOverflowBoilerplate") \ - V(illegal_access_symbol, "illegal access") \ - V(out_of_memory_symbol, "out-of-memory") \ - V(illegal_execution_state_symbol, "illegal execution state") \ - V(get_symbol, "get") \ - V(set_symbol, "set") \ - V(function_class_symbol, "Function") \ - V(illegal_argument_symbol, "illegal argument") \ - V(MakeReferenceError_symbol, "MakeReferenceError") \ - V(MakeSyntaxError_symbol, "MakeSyntaxError") \ - V(MakeTypeError_symbol, "MakeTypeError") \ - V(invalid_lhs_in_assignment_symbol, "invalid_lhs_in_assignment") \ - V(invalid_lhs_in_for_in_symbol, "invalid_lhs_in_for_in") \ - V(invalid_lhs_in_postfix_op_symbol, "invalid_lhs_in_postfix_op") \ - V(invalid_lhs_in_prefix_op_symbol, "invalid_lhs_in_prefix_op") \ - V(illegal_return_symbol, "illegal_return") \ - V(illegal_break_symbol, "illegal_break") \ - V(illegal_continue_symbol, "illegal_continue") \ - V(unknown_label_symbol, "unknown_label") \ - V(redeclaration_symbol, "redeclaration") \ - V(failure_symbol, "<failure>") \ - V(space_symbol, " ") \ - V(exec_symbol, "exec") \ - V(zero_symbol, "0") \ - V(global_eval_symbol, "GlobalEval") \ - V(identity_hash_symbol, "v8::IdentityHash") \ - V(closure_symbol, "(closure)") \ - V(use_strict, "use strict") \ - V(dot_symbol, ".") \ - V(anonymous_function_symbol, "(anonymous function)") \ - V(compare_ic_symbol, "==") \ - V(strict_compare_ic_symbol, "===") \ - V(infinity_symbol, "Infinity") \ - V(minus_infinity_symbol, "-Infinity") \ - V(hidden_stack_trace_symbol, "v8::hidden_stack_trace") \ - V(query_colon_symbol, "(?:)") + V(stack_overflow_string, "kStackOverflowBoilerplate") \ + V(illegal_access_string, "illegal access") \ + V(out_of_memory_string, "out-of-memory") \ + V(illegal_execution_state_string, "illegal execution state") \ + V(get_string, "get") \ + V(set_string, "set") \ + V(map_field_string, "%map") \ + V(elements_field_string, "%elements") \ + V(length_field_string, "%length") \ + V(function_class_string, "Function") \ + V(illegal_argument_string, "illegal argument") \ + V(MakeReferenceError_string, "MakeReferenceError") \ + V(MakeSyntaxError_string, "MakeSyntaxError") \ + V(MakeTypeError_string, "MakeTypeError") \ + V(invalid_lhs_in_assignment_string, "invalid_lhs_in_assignment") \ + V(invalid_lhs_in_for_in_string, "invalid_lhs_in_for_in") \ + V(invalid_lhs_in_postfix_op_string, "invalid_lhs_in_postfix_op") \ + V(invalid_lhs_in_prefix_op_string, "invalid_lhs_in_prefix_op") \ + V(illegal_return_string, "illegal_return") \ + V(illegal_break_string, "illegal_break") \ + V(illegal_continue_string, "illegal_continue") \ + V(unknown_label_string, "unknown_label") \ + V(redeclaration_string, "redeclaration") \ + V(failure_string, "<failure>") \ + V(space_string, " ") \ + V(exec_string, "exec") \ + V(zero_string, "0") \ + V(global_eval_string, "GlobalEval") \ + V(identity_hash_string, "v8::IdentityHash") \ + V(closure_string, "(closure)") \ + V(use_strict_string, "use strict") \ + V(dot_string, ".") \ + V(anonymous_function_string, "(anonymous function)") \ + V(compare_ic_string, "==") \ + V(strict_compare_ic_string, "===") \ + V(infinity_string, "Infinity") \ + V(minus_infinity_string, "-Infinity") \ + V(hidden_stack_trace_string, "v8::hidden_stack_trace") \ + V(query_colon_string, "(?:)") // Forward declarations. class GCTracer; @@ -284,14 +298,6 @@ class StoreBufferRebuilder { -// The all static Heap captures the interface to the global object heap. -// All JavaScript contexts by this process share the same object heap. - -#ifdef DEBUG -class HeapDebugUtils; -#endif - - // A queue of objects promoted during scavenge. Each object is accompanied // by it's size to avoid dereferencing a map pointer for scanning. class PromotionQueue { @@ -433,6 +439,41 @@ class ExternalStringTable { }; +// The stack property of an error object is implemented as a getter that +// formats the attached raw stack trace into a string. This raw stack trace +// keeps code and function objects alive until the getter is called the first +// time. To release those objects, we call the getter after each GC for +// newly tenured error objects that are kept in a list. +class ErrorObjectList { + public: + inline void Add(JSObject* object); + + inline void Iterate(ObjectVisitor* v); + + void TearDown(); + + void RemoveUnmarked(Heap* heap); + + void DeferredFormatStackTrace(Isolate* isolate); + + void UpdateReferences(); + + void UpdateReferencesInNewSpace(Heap* heap); + + private: + static const int kBudgetPerGC = 16; + + ErrorObjectList() : nested_(false) { } + + friend class Heap; + + List<Object*> list_; + bool nested_; + + DISALLOW_COPY_AND_ASSIGN(ErrorObjectList); +}; + + enum ArrayStorageAllocationMode { DONT_INITIALIZE_ARRAY_ELEMENTS, INITIALIZE_ARRAY_ELEMENTS_WITH_HOLE @@ -447,10 +488,13 @@ class Heap { intptr_t max_executable_size); bool ConfigureHeapDefault(); - // Initializes the global object heap. If create_heap_objects is true, - // also creates the basic non-mutable objects. + // Prepares the heap, setting up memory areas that are needed in the isolate + // without actually creating any objects. + bool SetUp(); + + // Bootstraps the object heap with the core set of objects required to run. // Returns whether it succeeded. - bool SetUp(bool create_heap_objects); + bool CreateHeapObjects(); // Destroys all memory allocated by the heap. void TearDown(); @@ -487,6 +531,9 @@ class Heap { // Returns the amount of executable memory currently committed for the heap. intptr_t CommittedMemoryExecutable(); + // Returns the amount of phyical memory currently committed for the heap. + size_t CommittedPhysicalMemory(); + // Returns the available bytes in space w/o growing. // Heap doesn't guarantee that it can allocate an object that requires // all available bytes. Check MaxHeapObjectSize() instead. @@ -543,6 +590,13 @@ class Heap { return new_space_.allocation_limit_address(); } + Address* OldPointerSpaceAllocationTopAddress() { + return old_pointer_space_->allocation_top_address(); + } + Address* OldPointerSpaceAllocationLimitAddress() { + return old_pointer_space_->allocation_limit_address(); + } + // Uncommit unused semi space. bool UncommitFromSpace() { return new_space_.UncommitFromSpace(); } @@ -552,7 +606,12 @@ class Heap { // failed. // Please note this does not perform a garbage collection. MUST_USE_RESULT MaybeObject* AllocateJSObject( - JSFunction* constructor, PretenureFlag pretenure = NOT_TENURED); + JSFunction* constructor, + PretenureFlag pretenure = NOT_TENURED); + + MUST_USE_RESULT MaybeObject* AllocateJSObjectWithAllocationSite( + JSFunction* constructor, + Handle<Object> allocation_site_info_payload); MUST_USE_RESULT MaybeObject* AllocateJSModule(Context* context, ScopeInfo* scope_info); @@ -566,6 +625,10 @@ class Heap { pretenure); } + inline MUST_USE_RESULT MaybeObject* AllocateEmptyJSArrayWithAllocationSite( + ElementsKind elements_kind, + Handle<Object> allocation_site_payload); + // Allocate a JSArray with a specified length but elements that are left // uninitialized. MUST_USE_RESULT MaybeObject* AllocateJSArrayAndStorage( @@ -575,10 +638,24 @@ class Heap { ArrayStorageAllocationMode mode = DONT_INITIALIZE_ARRAY_ELEMENTS, PretenureFlag pretenure = NOT_TENURED); + MUST_USE_RESULT MaybeObject* AllocateJSArrayAndStorageWithAllocationSite( + ElementsKind elements_kind, + int length, + int capacity, + Handle<Object> allocation_site_payload, + ArrayStorageAllocationMode mode = DONT_INITIALIZE_ARRAY_ELEMENTS); + + MUST_USE_RESULT MaybeObject* AllocateJSArrayStorage( + JSArray* array, + int length, + int capacity, + ArrayStorageAllocationMode mode = DONT_INITIALIZE_ARRAY_ELEMENTS); + // Allocate a JSArray with no elements MUST_USE_RESULT MaybeObject* AllocateJSArrayWithElements( FixedArrayBase* array_base, ElementsKind elements_kind, + int length, PretenureFlag pretenure = NOT_TENURED); // Allocates and initializes a new global object based on a constructor. @@ -592,6 +669,8 @@ class Heap { // Returns failure if allocation failed. MUST_USE_RESULT MaybeObject* CopyJSObject(JSObject* source); + MUST_USE_RESULT MaybeObject* CopyJSObjectWithAllocationSite(JSObject* source); + // Allocates the function prototype. // Returns Failure::RetryAfterGC(requested_bytes, space) if the allocation // failed. @@ -633,12 +712,18 @@ class Heap { MUST_USE_RESULT MaybeObject* AllocateJSObjectFromMap( Map* map, PretenureFlag pretenure = NOT_TENURED); + MUST_USE_RESULT MaybeObject* AllocateJSObjectFromMapWithAllocationSite( + Map* map, Handle<Object> allocation_site_info_payload); + // Allocates a heap object based on the map. // Returns Failure::RetryAfterGC(requested_bytes, space) if the allocation // failed. // Please note this function does not perform a garbage collection. MUST_USE_RESULT MaybeObject* Allocate(Map* map, AllocationSpace space); + MUST_USE_RESULT MaybeObject* AllocateWithAllocationSite(Map* map, + AllocationSpace space, Handle<Object> allocation_site_info_payload); + // Allocates a JS Map in the heap. // Returns Failure::RetryAfterGC(requested_bytes, space) if the allocation // failed. @@ -661,6 +746,9 @@ class Heap { // Allocates a serialized scope info. MUST_USE_RESULT MaybeObject* AllocateScopeInfo(int length); + // Allocates an External object for v8's external API. + MUST_USE_RESULT MaybeObject* AllocateExternal(void* value); + // Allocates an empty PolymorphicCodeCache. MUST_USE_RESULT MaybeObject* AllocatePolymorphicCodeCache(); @@ -697,9 +785,16 @@ class Heap { // Returns Failure::RetryAfterGC(requested_bytes, space) if the allocation // failed. // Please note this does not perform a garbage collection. - MUST_USE_RESULT MaybeObject* AllocateStringFromAscii( - Vector<const char> str, + MUST_USE_RESULT MaybeObject* AllocateStringFromOneByte( + Vector<const uint8_t> str, PretenureFlag pretenure = NOT_TENURED); + // TODO(dcarney): remove this function. + MUST_USE_RESULT inline MaybeObject* AllocateStringFromOneByte( + Vector<const char> str, + PretenureFlag pretenure = NOT_TENURED) { + return AllocateStringFromOneByte(Vector<const uint8_t>::cast(str), + pretenure); + } MUST_USE_RESULT inline MaybeObject* AllocateStringFromUtf8( Vector<const char> str, PretenureFlag pretenure = NOT_TENURED); @@ -711,28 +806,33 @@ class Heap { Vector<const uc16> str, PretenureFlag pretenure = NOT_TENURED); - // Allocates a symbol in old space based on the character stream. - // Returns Failure::RetryAfterGC(requested_bytes, space) if the allocation - // failed. + // Allocates an internalized string in old space based on the character + // stream. Returns Failure::RetryAfterGC(requested_bytes, space) if the + // allocation failed. // Please note this function does not perform a garbage collection. - MUST_USE_RESULT inline MaybeObject* AllocateSymbol(Vector<const char> str, - int chars, - uint32_t hash_field); + MUST_USE_RESULT inline MaybeObject* AllocateInternalizedStringFromUtf8( + Vector<const char> str, + int chars, + uint32_t hash_field); - MUST_USE_RESULT inline MaybeObject* AllocateAsciiSymbol( - Vector<const char> str, + MUST_USE_RESULT inline MaybeObject* AllocateOneByteInternalizedString( + Vector<const uint8_t> str, uint32_t hash_field); - MUST_USE_RESULT inline MaybeObject* AllocateTwoByteSymbol( + MUST_USE_RESULT inline MaybeObject* AllocateTwoByteInternalizedString( Vector<const uc16> str, uint32_t hash_field); - MUST_USE_RESULT MaybeObject* AllocateInternalSymbol( - unibrow::CharacterStream* buffer, int chars, uint32_t hash_field); + template<typename T> + static inline bool IsOneByte(T t, int chars); - MUST_USE_RESULT MaybeObject* AllocateExternalSymbol( - Vector<const char> str, - int chars); + template<typename T> + MUST_USE_RESULT inline MaybeObject* AllocateInternalizedStringImpl( + T t, int chars, uint32_t hash_field); + + template<bool is_one_byte, typename T> + MUST_USE_RESULT MaybeObject* AllocateInternalizedStringImpl( + T t, int chars, uint32_t hash_field); // Allocates and partially initializes a String. There are two String // encodings: ASCII and two byte. These functions allocate a string of the @@ -741,7 +841,7 @@ class Heap { // Returns Failure::RetryAfterGC(requested_bytes, space) if the allocation // failed. // Please note this does not perform a garbage collection. - MUST_USE_RESULT MaybeObject* AllocateRawAsciiString( + MUST_USE_RESULT MaybeObject* AllocateRawOneByteString( int length, PretenureFlag pretenure = NOT_TENURED); MUST_USE_RESULT MaybeObject* AllocateRawTwoByteString( @@ -778,6 +878,13 @@ class Heap { void* external_pointer, PretenureFlag pretenure); + // Allocate a symbol. + // Returns Failure::RetryAfterGC(requested_bytes, space) if the allocation + // failed. + // Please note this does not perform a garbage collection. + MUST_USE_RESULT MaybeObject* AllocateSymbol( + PretenureFlag pretenure = NOT_TENURED); + // Allocate a tenured JS global property cell. // Returns Failure::RetryAfterGC(requested_bytes, space) if the allocation // failed. @@ -800,6 +907,10 @@ class Heap { // Please note this does not perform a garbage collection. MUST_USE_RESULT MaybeObject* AllocateUninitializedFixedArray(int length); + // Move len elements within a given array from src_index index to dst_index + // index. + void MoveElements(FixedArray* array, int dst_index, int src_index, int len); + // Make a copy of src and return it. Returns // Failure::RetryAfterGC(requested_bytes, space) if the allocation failed. MUST_USE_RESULT inline MaybeObject* CopyFixedArray(FixedArray* src); @@ -1024,28 +1135,28 @@ class Heap { // the provided data as the relocation information. MUST_USE_RESULT MaybeObject* CopyCode(Code* code, Vector<byte> reloc_info); - // Finds the symbol for string in the symbol table. - // If not found, a new symbol is added to the table and returned. + // Finds the internalized copy for string in the string table. + // If not found, a new string is added to the table and returned. // Returns Failure::RetryAfterGC(requested_bytes, space) if allocation // failed. // Please note this function does not perform a garbage collection. - MUST_USE_RESULT MaybeObject* LookupSymbol(Vector<const char> str); - MUST_USE_RESULT MaybeObject* LookupAsciiSymbol(Vector<const char> str); - MUST_USE_RESULT MaybeObject* LookupTwoByteSymbol(Vector<const uc16> str); - MUST_USE_RESULT MaybeObject* LookupAsciiSymbol(const char* str) { - return LookupSymbol(CStrVector(str)); + MUST_USE_RESULT MaybeObject* InternalizeUtf8String(Vector<const char> str); + MUST_USE_RESULT MaybeObject* InternalizeUtf8String(const char* str) { + return InternalizeUtf8String(CStrVector(str)); } - MUST_USE_RESULT MaybeObject* LookupSymbol(String* str); - MUST_USE_RESULT MaybeObject* LookupAsciiSymbol(Handle<SeqAsciiString> string, - int from, - int length); + MUST_USE_RESULT MaybeObject* InternalizeOneByteString( + Vector<const uint8_t> str); + MUST_USE_RESULT MaybeObject* InternalizeTwoByteString(Vector<const uc16> str); + MUST_USE_RESULT MaybeObject* InternalizeString(String* str); + MUST_USE_RESULT MaybeObject* InternalizeOneByteString( + Handle<SeqOneByteString> string, int from, int length); - bool LookupSymbolIfExists(String* str, String** symbol); - bool LookupTwoCharsSymbolIfExists(String* str, String** symbol); + bool InternalizeStringIfExists(String* str, String** result); + bool InternalizeTwoCharsStringIfExists(String* str, String** result); - // Compute the matching symbol map for a string if possible. + // Compute the matching internalized string map for a string if possible. // NULL is returned if string is in new space or not flattened. - Map* SymbolMapForString(String* str); + Map* InternalizedStringMapForString(String* str); // Tries to flatten a string before compare operation. // @@ -1173,15 +1284,15 @@ class Heap { STRUCT_LIST(STRUCT_MAP_ACCESSOR) #undef STRUCT_MAP_ACCESSOR -#define SYMBOL_ACCESSOR(name, str) String* name() { \ +#define STRING_ACCESSOR(name, str) String* name() { \ return String::cast(roots_[k##name##RootIndex]); \ } - SYMBOL_LIST(SYMBOL_ACCESSOR) -#undef SYMBOL_ACCESSOR + INTERNALIZED_STRING_LIST(STRING_ACCESSOR) +#undef STRING_ACCESSOR - // The hidden_symbol is special because it is the empty string, but does + // The hidden_string is special because it is the empty string, but does // not match the empty string. - String* hidden_symbol() { return hidden_symbol_; } + String* hidden_string() { return hidden_string_; } void set_native_contexts_list(Object* object) { native_contexts_list_ = object; @@ -1206,11 +1317,15 @@ class Heap { // Returns whether the object resides in new space. inline bool InNewSpace(Object* object); - inline bool InNewSpace(Address addr); - inline bool InNewSpacePage(Address addr); + inline bool InNewSpace(Address address); + inline bool InNewSpacePage(Address address); inline bool InFromSpace(Object* object); inline bool InToSpace(Object* object); + // Returns whether the object resides in old pointer space. + inline bool InOldPointerSpace(Address address); + inline bool InOldPointerSpace(Object* object); + // Checks whether an address/object in the heap (including auxiliary // area and unused area). bool Contains(Address addr); @@ -1268,6 +1383,11 @@ class Heap { #ifdef VERIFY_HEAP // Verify the heap is in its normal state before or after a GC. void Verify(); + + + bool weak_embedded_maps_verification_enabled() { + return no_weak_embedded_maps_verification_scope_depth_ == 0; + } #endif #ifdef DEBUG @@ -1302,26 +1422,23 @@ class Heap { // Print short heap statistics. void PrintShortHeapStatistics(); - // Makes a new symbol object + // Makes a new internalized string object // Returns Failure::RetryAfterGC(requested_bytes, space) if the allocation // failed. // Please note this function does not perform a garbage collection. - MUST_USE_RESULT MaybeObject* CreateSymbol( + MUST_USE_RESULT MaybeObject* CreateInternalizedString( const char* str, int length, int hash); - MUST_USE_RESULT MaybeObject* CreateSymbol(String* str); + MUST_USE_RESULT MaybeObject* CreateInternalizedString(String* str); // Write barrier support for address[offset] = o. - inline void RecordWrite(Address address, int offset); + INLINE(void RecordWrite(Address address, int offset)); // Write barrier support for address[start : start + len[ = o. - inline void RecordWrites(Address address, int start, int len); + INLINE(void RecordWrites(Address address, int start, int len)); // Given an address occupied by a live code object, return that object. Object* FindCodeObject(Address a); - // Invoke Shrink on shrinkable spaces. - void Shrink(); - enum HeapState { NOT_IN_GC, SCAVENGE, MARK_COMPACT }; inline HeapState gc_state() { return gc_state_; } @@ -1428,17 +1545,17 @@ class Heap { STRONG_ROOT_LIST(ROOT_INDEX_DECLARATION) #undef ROOT_INDEX_DECLARATION -#define SYMBOL_INDEX_DECLARATION(name, str) k##name##RootIndex, - SYMBOL_LIST(SYMBOL_INDEX_DECLARATION) -#undef SYMBOL_DECLARATION +#define STRING_INDEX_DECLARATION(name, str) k##name##RootIndex, + INTERNALIZED_STRING_LIST(STRING_INDEX_DECLARATION) +#undef STRING_DECLARATION // Utility type maps #define DECLARE_STRUCT_MAP(NAME, Name, name) k##Name##MapRootIndex, STRUCT_LIST(DECLARE_STRUCT_MAP) #undef DECLARE_STRUCT_MAP - kSymbolTableRootIndex, - kStrongRootListLength = kSymbolTableRootIndex, + kStringTableRootIndex, + kStrongRootListLength = kStringTableRootIndex, kRootListLength }; @@ -1446,7 +1563,11 @@ class Heap { STATIC_CHECK(kNullValueRootIndex == Internals::kNullValueRootIndex); STATIC_CHECK(kTrueValueRootIndex == Internals::kTrueValueRootIndex); STATIC_CHECK(kFalseValueRootIndex == Internals::kFalseValueRootIndex); - STATIC_CHECK(kempty_symbolRootIndex == Internals::kEmptySymbolRootIndex); + STATIC_CHECK(kempty_stringRootIndex == Internals::kEmptyStringRootIndex); + + // Generated code can embed direct references to non-writable roots if + // they are in new space. + static bool RootCanBeWrittenAfterInitialization(RootListIndex root_index); MUST_USE_RESULT MaybeObject* NumberToString( Object* number, bool check_number_string_cache = true); @@ -1530,13 +1651,31 @@ class Heap { } // Returns maximum GC pause. - int get_max_gc_pause() { return max_gc_pause_; } + double get_max_gc_pause() { return max_gc_pause_; } // Returns maximum size of objects alive after GC. intptr_t get_max_alive_after_gc() { return max_alive_after_gc_; } // Returns minimal interval between two subsequent collections. - int get_min_in_mutator() { return min_in_mutator_; } + double get_min_in_mutator() { return min_in_mutator_; } + + // TODO(hpayer): remove, should be handled by GCTracer + void AddMarkingTime(double marking_time) { + marking_time_ += marking_time; + } + + double marking_time() const { + return marking_time_; + } + + // TODO(hpayer): remove, should be handled by GCTracer + void AddSweepingTime(double sweeping_time) { + sweeping_time_ += sweeping_time; + } + + double sweeping_time() const { + return sweeping_time_; + } MarkCompactCollector* mark_compact_collector() { return &mark_compact_collector_; @@ -1555,20 +1694,32 @@ class Heap { } bool IsSweepingComplete() { - return old_data_space()->IsSweepingComplete() && - old_pointer_space()->IsSweepingComplete(); + return !mark_compact_collector()->IsConcurrentSweepingInProgress() && + old_data_space()->IsLazySweepingComplete() && + old_pointer_space()->IsLazySweepingComplete(); } bool AdvanceSweepers(int step_size) { + ASSERT(!FLAG_parallel_sweeping && !FLAG_concurrent_sweeping); bool sweeping_complete = old_data_space()->AdvanceSweeper(step_size); sweeping_complete &= old_pointer_space()->AdvanceSweeper(step_size); return sweeping_complete; } + bool EnsureSweepersProgressed(int step_size) { + bool sweeping_complete = old_data_space()->EnsureSweeperProgress(step_size); + sweeping_complete &= old_pointer_space()->EnsureSweeperProgress(step_size); + return sweeping_complete; + } + ExternalStringTable* external_string_table() { return &external_string_table_; } + ErrorObjectList* error_object_list() { + return &error_object_list_; + } + // Returns the current sweep generation. int sweep_generation() { return sweep_generation_; @@ -1576,13 +1727,8 @@ class Heap { inline Isolate* isolate(); - inline void CallGlobalGCPrologueCallback() { - if (global_gc_prologue_callback_ != NULL) global_gc_prologue_callback_(); - } - - inline void CallGlobalGCEpilogueCallback() { - if (global_gc_epilogue_callback_ != NULL) global_gc_epilogue_callback_(); - } + void CallGCPrologueCallbacks(GCType gc_type); + void CallGCEpilogueCallbacks(GCType gc_type); inline bool OldGenerationAllocationLimitReached(); @@ -1683,14 +1829,27 @@ class Heap { explicit RelocationLock(Heap* heap) : heap_(heap) { if (FLAG_parallel_recompilation) { heap_->relocation_mutex_->Lock(); +#ifdef DEBUG + heap_->relocation_mutex_locked_ = true; +#endif // DEBUG } } + ~RelocationLock() { if (FLAG_parallel_recompilation) { +#ifdef DEBUG + heap_->relocation_mutex_locked_ = false; +#endif // DEBUG heap_->relocation_mutex_->Unlock(); } } +#ifdef DEBUG + static bool IsLocked(Heap* heap) { + return heap->relocation_mutex_locked_; + } +#endif // DEBUG + private: Heap* heap_; }; @@ -1781,8 +1940,6 @@ class Heap { // Do we expect to be able to handle allocation failure at this // time? bool disallow_allocation_failure_; - - HeapDebugUtils* debug_utils_; #endif // DEBUG // Indicates that the new space should be kept small due to high promotion @@ -1831,7 +1988,7 @@ class Heap { RootListIndex index; }; - struct ConstantSymbolTable { + struct ConstantStringTable { const char* contents; RootListIndex index; }; @@ -1843,12 +2000,12 @@ class Heap { }; static const StringTypeTable string_type_table[]; - static const ConstantSymbolTable constant_symbol_table[]; + static const ConstantStringTable constant_string_table[]; static const StructTable struct_table[]; - // The special hidden symbol which is an empty string, but does not match + // The special hidden string which is an empty string, but does not match // any string when looked up in properties. - String* hidden_symbol_; + String* hidden_string_; // GC callback function, called before and after mark-compact GC. // Allocations in the callback function are disallowed. @@ -1899,7 +2056,6 @@ class Heap { bool PerformGarbageCollection(GarbageCollector collector, GCTracer* tracer); - inline void UpdateOldSpaceLimits(); // Allocate an uninitialized object in map space. The behavior is identical @@ -1926,15 +2082,19 @@ class Heap { void CreateFixedStubs(); - MaybeObject* CreateOddball(const char* to_string, - Object* to_number, - byte kind); + MUST_USE_RESULT MaybeObject* CreateOddball(const char* to_string, + Object* to_number, + byte kind); // Allocate a JSArray with no elements MUST_USE_RESULT MaybeObject* AllocateJSArray( ElementsKind elements_kind, PretenureFlag pretenure = NOT_TENURED); + MUST_USE_RESULT MaybeObject* AllocateJSArrayWithAllocationSite( + ElementsKind elements_kind, + Handle<Object> allocation_site_info_payload); + // Allocate empty fixed array. MUST_USE_RESULT MaybeObject* AllocateEmptyFixedArray(); @@ -1981,7 +2141,6 @@ class Heap { GCTracer* tracer_; - // Allocates a small number to string cache. MUST_USE_RESULT MaybeObject* AllocateInitialNumberStringCache(); // Creates and installs the full-sized number string cache. @@ -2092,7 +2251,7 @@ class Heap { void ClearObjectStats(bool clear_last_time_stats = false); - static const int kInitialSymbolTableSize = 2048; + static const int kInitialStringTableSize = 2048; static const int kInitialEvalCacheSize = 64; static const int kInitialNumberStringCacheSize = 256; @@ -2103,22 +2262,28 @@ class Heap { size_t object_sizes_last_time_[OBJECT_STATS_COUNT]; // Maximum GC pause. - int max_gc_pause_; + double max_gc_pause_; // Total time spent in GC. - int total_gc_time_ms_; + double total_gc_time_ms_; // Maximum size of objects alive after GC. intptr_t max_alive_after_gc_; // Minimal interval between two subsequent collections. - int min_in_mutator_; + double min_in_mutator_; // Size of objects alive after last GC. intptr_t alive_after_last_gc_; double last_gc_end_timestamp_; + // Cumulative GC time spent in marking + double marking_time_; + + // Cumulative GC time spent in sweeping + double sweeping_time_; + MarkCompactCollector mark_compact_collector_; StoreBuffer store_buffer_; @@ -2136,6 +2301,10 @@ class Heap { unsigned int gc_count_at_last_idle_gc_; int scavenges_since_last_idle_round_; +#ifdef VERIFY_HEAP + int no_weak_embedded_maps_verification_scope_depth_; +#endif + static const int kMaxMarkSweepsInIdleRound = 7; static const int kIdleScavengeThreshold = 5; @@ -2148,11 +2317,16 @@ class Heap { ExternalStringTable external_string_table_; + ErrorObjectList error_object_list_; + VisitorDispatchTable<ScavengingCallback> scavenging_visitors_table_; MemoryChunk* chunks_queued_for_free_; Mutex* relocation_mutex_; +#ifdef DEBUG + bool relocation_mutex_locked_; +#endif // DEBUG; friend class Factory; friend class GCTracer; @@ -2163,6 +2337,9 @@ class Heap { friend class MarkCompactCollector; friend class MarkCompactMarkingVisitor; friend class MapCompact; +#ifdef VERIFY_HEAP + friend class NoWeakEmbeddedMapsVerificationScope; +#endif DISALLOW_COPY_AND_ASSIGN(Heap); }; @@ -2223,6 +2400,14 @@ class AlwaysAllocateScope { DisallowAllocationFailure disallow_allocation_failure_; }; +#ifdef VERIFY_HEAP +class NoWeakEmbeddedMapsVerificationScope { + public: + inline NoWeakEmbeddedMapsVerificationScope(); + inline ~NoWeakEmbeddedMapsVerificationScope(); +}; +#endif + // Visitor class to verify interior pointers in spaces that do not contain // or care about intergenerational references. All heap object pointers have to @@ -2235,37 +2420,40 @@ class VerifyPointersVisitor: public ObjectVisitor { }; -// Space iterator for iterating over all spaces of the heap. -// Returns each space in turn, and null when it is done. +// Space iterator for iterating over all spaces of the heap. Returns each space +// in turn, and null when it is done. class AllSpaces BASE_EMBEDDED { public: + explicit AllSpaces(Heap* heap) : heap_(heap), counter_(FIRST_SPACE) {} Space* next(); - AllSpaces() { counter_ = FIRST_SPACE; } private: + Heap* heap_; int counter_; }; // Space iterator for iterating over all old spaces of the heap: Old pointer -// space, old data space and code space. -// Returns each space in turn, and null when it is done. +// space, old data space and code space. Returns each space in turn, and null +// when it is done. class OldSpaces BASE_EMBEDDED { public: + explicit OldSpaces(Heap* heap) : heap_(heap), counter_(OLD_POINTER_SPACE) {} OldSpace* next(); - OldSpaces() { counter_ = OLD_POINTER_SPACE; } private: + Heap* heap_; int counter_; }; -// Space iterator for iterating over all the paged spaces of the heap: -// Map space, old pointer space, old data space, code space and cell space. -// Returns each space in turn, and null when it is done. +// Space iterator for iterating over all the paged spaces of the heap: Map +// space, old pointer space, old data space, code space and cell space. Returns +// each space in turn, and null when it is done. class PagedSpaces BASE_EMBEDDED { public: + explicit PagedSpaces(Heap* heap) : heap_(heap), counter_(OLD_POINTER_SPACE) {} PagedSpace* next(); - PagedSpaces() { counter_ = OLD_POINTER_SPACE; } private: + Heap* heap_; int counter_; }; @@ -2275,8 +2463,8 @@ class PagedSpaces BASE_EMBEDDED { // returned object iterators is handled by the space iterator. class SpaceIterator : public Malloced { public: - SpaceIterator(); - explicit SpaceIterator(HeapObjectCallback size_func); + explicit SpaceIterator(Heap* heap); + SpaceIterator(Heap* heap, HeapObjectCallback size_func); virtual ~SpaceIterator(); bool has_next(); @@ -2285,6 +2473,7 @@ class SpaceIterator : public Malloced { private: ObjectIterator* CreateIterator(); + Heap* heap_; int current_space_; // from enum AllocationSpace. ObjectIterator* iterator_; // object iterator for the current space. HeapObjectCallback size_func_; @@ -2309,8 +2498,8 @@ class HeapIterator BASE_EMBEDDED { kFilterUnreachable }; - HeapIterator(); - explicit HeapIterator(HeapObjectsFiltering filtering); + explicit HeapIterator(Heap* heap); + HeapIterator(Heap* heap, HeapObjectsFiltering filtering); ~HeapIterator(); HeapObject* next(); @@ -2323,6 +2512,7 @@ class HeapIterator BASE_EMBEDDED { void Shutdown(); HeapObject* NextObject(); + Heap* heap_; HeapObjectsFiltering filtering_; HeapObjectsFilter* filter_; // Space iterator for iterating all the spaces. @@ -2337,10 +2527,10 @@ class HeapIterator BASE_EMBEDDED { class KeyedLookupCache { public: // Lookup field offset for (map, name). If absent, -1 is returned. - int Lookup(Map* map, String* name); + int Lookup(Map* map, Name* name); // Update an element in the cache. - void Update(Map* map, String* name, int field_offset); + void Update(Map* map, Name* name, int field_offset); // Clear the cache. void Clear(); @@ -2365,7 +2555,7 @@ class KeyedLookupCache { } } - static inline int Hash(Map* map, String* name); + static inline int Hash(Map* map, Name* name); // Get the address of the keys and field_offsets arrays. Used in // generated code to perform cache lookups. @@ -2379,7 +2569,7 @@ class KeyedLookupCache { struct Key { Map* map; - String* name; + Name* name; }; Key keys_[kLength]; @@ -2399,8 +2589,8 @@ class DescriptorLookupCache { public: // Lookup descriptor index for (map, name). // If absent, kAbsent is returned. - int Lookup(Map* source, String* name) { - if (!StringShape(name).IsSymbol()) return kAbsent; + int Lookup(Map* source, Name* name) { + if (!name->IsUniqueName()) return kAbsent; int index = Hash(source, name); Key& key = keys_[index]; if ((key.source == source) && (key.name == name)) return results_[index]; @@ -2408,9 +2598,9 @@ class DescriptorLookupCache { } // Update an element in the cache. - void Update(Map* source, String* name, int result) { + void Update(Map* source, Name* name, int result) { ASSERT(result != kAbsent); - if (StringShape(name).IsSymbol()) { + if (name->IsUniqueName()) { int index = Hash(source, name); Key& key = keys_[index]; key.source = source; @@ -2433,7 +2623,7 @@ class DescriptorLookupCache { } } - static int Hash(Object* source, String* name) { + static int Hash(Object* source, Name* name) { // Uses only lower 32 bits if pointers are larger. uint32_t source_hash = static_cast<uint32_t>(reinterpret_cast<uintptr_t>(source)) @@ -2447,7 +2637,7 @@ class DescriptorLookupCache { static const int kLength = 64; struct Key { Map* source; - String* name; + Name* name; }; Key keys_[kLength]; @@ -2548,6 +2738,18 @@ class GCTracer BASE_EMBEDDED { promoted_objects_size_ += object_size; } + void increment_nodes_died_in_new_space() { + nodes_died_in_new_space_++; + } + + void increment_nodes_copied_in_new_space() { + nodes_copied_in_new_space_++; + } + + void increment_nodes_promoted() { + nodes_promoted_++; + } + private: // Returns a string matching the collector. const char* CollectorString(); @@ -2592,6 +2794,15 @@ class GCTracer BASE_EMBEDDED { // Size of objects promoted during the current collection. intptr_t promoted_objects_size_; + // Number of died nodes in the new space. + int nodes_died_in_new_space_; + + // Number of copied nodes to the new space. + int nodes_copied_in_new_space_; + + // Number of promoted nodes to the old space. + int nodes_promoted_; + // Incremental marking steps counters. int steps_count_; double steps_took_; @@ -2760,7 +2971,7 @@ class IntrusiveMarking { }; -#if defined(DEBUG) || defined(LIVE_OBJECT_LIST) +#ifdef DEBUG // Helper class for tracing paths to a search target Object from all roots. // The TracePathFrom() method can be used to trace paths from a specific // object to the search target object. @@ -2817,7 +3028,7 @@ class PathTracer : public ObjectVisitor { private: DISALLOW_IMPLICIT_CONSTRUCTORS(PathTracer); }; -#endif // DEBUG || LIVE_OBJECT_LIST +#endif // DEBUG } } // namespace v8::internal diff --git a/deps/v8/src/hydrogen-instructions.cc b/deps/v8/src/hydrogen-instructions.cc index 79550f3eac..fc80748a6a 100644 --- a/deps/v8/src/hydrogen-instructions.cc +++ b/deps/v8/src/hydrogen-instructions.cc @@ -27,6 +27,7 @@ #include "v8.h" +#include "double.h" #include "factory.h" #include "hydrogen.h" @@ -75,6 +76,12 @@ int HValue::LoopWeight() const { } +Isolate* HValue::isolate() const { + ASSERT(block() != NULL); + return block()->isolate(); +} + + void HValue::AssumeRepresentation(Representation r) { if (CheckFlag(kFlexibleRepresentation)) { ChangeRepresentation(r); @@ -85,6 +92,191 @@ void HValue::AssumeRepresentation(Representation r) { } +void HValue::InferRepresentation(HInferRepresentation* h_infer) { + ASSERT(CheckFlag(kFlexibleRepresentation)); + Representation new_rep = RepresentationFromInputs(); + UpdateRepresentation(new_rep, h_infer, "inputs"); + new_rep = RepresentationFromUses(); + UpdateRepresentation(new_rep, h_infer, "uses"); +} + + +Representation HValue::RepresentationFromUses() { + if (HasNoUses()) return Representation::None(); + + // Array of use counts for each representation. + int use_count[Representation::kNumRepresentations] = { 0 }; + + for (HUseIterator it(uses()); !it.Done(); it.Advance()) { + HValue* use = it.value(); + Representation rep = use->observed_input_representation(it.index()); + if (rep.IsNone()) continue; + if (FLAG_trace_representation) { + PrintF("#%d %s is used by #%d %s as %s%s\n", + id(), Mnemonic(), use->id(), use->Mnemonic(), rep.Mnemonic(), + (use->CheckFlag(kTruncatingToInt32) ? "-trunc" : "")); + } + use_count[rep.kind()] += use->LoopWeight(); + } + if (IsPhi()) HPhi::cast(this)->AddIndirectUsesTo(&use_count[0]); + int tagged_count = use_count[Representation::kTagged]; + int double_count = use_count[Representation::kDouble]; + int int32_count = use_count[Representation::kInteger32]; + + if (tagged_count > 0) return Representation::Tagged(); + if (double_count > 0) return Representation::Double(); + if (int32_count > 0) return Representation::Integer32(); + + return Representation::None(); +} + + +void HValue::UpdateRepresentation(Representation new_rep, + HInferRepresentation* h_infer, + const char* reason) { + Representation r = representation(); + if (new_rep.is_more_general_than(r)) { + // When an HConstant is marked "not convertible to integer", then + // never try to represent it as an integer. + if (new_rep.IsInteger32() && !IsConvertibleToInteger()) { + new_rep = Representation::Tagged(); + if (FLAG_trace_representation) { + PrintF("Changing #%d %s representation %s -> %s because it's NCTI" + " (%s want i)\n", + id(), Mnemonic(), r.Mnemonic(), new_rep.Mnemonic(), reason); + } + } else { + if (FLAG_trace_representation) { + PrintF("Changing #%d %s representation %s -> %s based on %s\n", + id(), Mnemonic(), r.Mnemonic(), new_rep.Mnemonic(), reason); + } + } + ChangeRepresentation(new_rep); + AddDependantsToWorklist(h_infer); + } +} + + +void HValue::AddDependantsToWorklist(HInferRepresentation* h_infer) { + for (HUseIterator it(uses()); !it.Done(); it.Advance()) { + h_infer->AddToWorklist(it.value()); + } + for (int i = 0; i < OperandCount(); ++i) { + h_infer->AddToWorklist(OperandAt(i)); + } +} + + +// This method is recursive but it is guaranteed to terminate because +// RedefinedOperand() always dominates "this". +bool HValue::IsRelationTrue(NumericRelation relation, + HValue* other, + int offset, + int scale) { + if (this == other) { + return scale == 0 && relation.IsExtendable(offset); + } + + // Test the direct relation. + if (IsRelationTrueInternal(relation, other, offset, scale)) return true; + + // If scale is 0 try the reversed relation. + if (scale == 0 && + // TODO(mmassi): do we need the full, recursive IsRelationTrue? + other->IsRelationTrueInternal(relation.Reversed(), this, -offset)) { + return true; + } + + // Try decomposition (but do not accept scaled compounds). + DecompositionResult decomposition; + if (TryDecompose(&decomposition) && + decomposition.scale() == 0 && + decomposition.base()->IsRelationTrue(relation, other, + offset + decomposition.offset(), + scale)) { + return true; + } + + // Pass the request to the redefined value. + HValue* redefined = RedefinedOperand(); + return redefined != NULL && redefined->IsRelationTrue(relation, other, + offset, scale); +} + + +bool HValue::TryGuaranteeRange(HValue* upper_bound) { + RangeEvaluationContext context = RangeEvaluationContext(this, upper_bound); + TryGuaranteeRangeRecursive(&context); + bool result = context.is_range_satisfied(); + if (result) { + context.lower_bound_guarantee()->SetResponsibilityForRange(DIRECTION_LOWER); + context.upper_bound_guarantee()->SetResponsibilityForRange(DIRECTION_UPPER); + } + return result; +} + + +void HValue::TryGuaranteeRangeRecursive(RangeEvaluationContext* context) { + // Check if we already know that this value satisfies the lower bound. + if (context->lower_bound_guarantee() == NULL) { + if (IsRelationTrueInternal(NumericRelation::Ge(), context->lower_bound(), + context->offset(), context->scale())) { + context->set_lower_bound_guarantee(this); + } + } + + // Check if we already know that this value satisfies the upper bound. + if (context->upper_bound_guarantee() == NULL) { + if (IsRelationTrueInternal(NumericRelation::Lt(), context->upper_bound(), + context->offset(), context->scale()) || + (context->scale() == 0 && + context->upper_bound()->IsRelationTrue(NumericRelation::Gt(), + this, -context->offset()))) { + context->set_upper_bound_guarantee(this); + } + } + + if (context->is_range_satisfied()) return; + + // See if our RedefinedOperand() satisfies the constraints. + if (RedefinedOperand() != NULL) { + RedefinedOperand()->TryGuaranteeRangeRecursive(context); + } + if (context->is_range_satisfied()) return; + + // See if the constraints can be satisfied by decomposition. + DecompositionResult decomposition; + if (TryDecompose(&decomposition)) { + context->swap_candidate(&decomposition); + context->candidate()->TryGuaranteeRangeRecursive(context); + context->swap_candidate(&decomposition); + } + if (context->is_range_satisfied()) return; + + // Try to modify this to satisfy the constraint. + + TryGuaranteeRangeChanging(context); +} + + +RangeEvaluationContext::RangeEvaluationContext(HValue* value, HValue* upper) + : lower_bound_(upper->block()->graph()->GetConstant0()), + lower_bound_guarantee_(NULL), + candidate_(value), + upper_bound_(upper), + upper_bound_guarantee_(NULL), + offset_(0), + scale_(0) { +} + + +HValue* RangeEvaluationContext::ConvertGuarantee(HValue* guarantee) { + return guarantee->IsBoundsCheckBaseIndexInformation() + ? HBoundsCheckBaseIndexInformation::cast(guarantee)->bounds_check() + : guarantee; +} + + static int32_t ConvertAndSetOverflow(int64_t result, bool* overflow) { if (result > kMaxInt) { *overflow = true; @@ -285,6 +477,55 @@ HType HType::TypeFromValue(Handle<Object> value) { } +bool HValue::Dominates(HValue* dominator, HValue* dominated) { + if (dominator->block() != dominated->block()) { + // If they are in different blocks we can use the dominance relation + // between the blocks. + return dominator->block()->Dominates(dominated->block()); + } else { + // Otherwise we must see which instruction comes first, considering + // that phis always precede regular instructions. + if (dominator->IsInstruction()) { + if (dominated->IsInstruction()) { + for (HInstruction* next = HInstruction::cast(dominator)->next(); + next != NULL; + next = next->next()) { + if (next == dominated) return true; + } + return false; + } else if (dominated->IsPhi()) { + return false; + } else { + UNREACHABLE(); + } + } else if (dominator->IsPhi()) { + if (dominated->IsInstruction()) { + return true; + } else { + // We cannot compare which phi comes first. + UNREACHABLE(); + } + } else { + UNREACHABLE(); + } + return false; + } +} + + +bool HValue::TestDominanceUsingProcessedFlag(HValue* dominator, + HValue* dominated) { + if (dominator->block() != dominated->block()) { + return dominator->block()->Dominates(dominated->block()); + } else { + // If both arguments are in the same block we check if dominator is a phi + // or if dominated has not already been processed: in either case we know + // that dominator precedes dominated. + return dominator->IsPhi() || !dominated->CheckFlag(kIDefsProcessingDone); + } +} + + bool HValue::IsDefinedAfter(HBasicBlock* other) const { return block()->block_id() > other->block_id(); } @@ -301,6 +542,7 @@ HUseListNode* HUseListNode::tail() { bool HValue::CheckUsesForFlag(Flag f) { for (HUseIterator it(uses()); !it.Done(); it.Advance()) { + if (it.value()->IsSimulate()) continue; if (!it.value()->CheckFlag(f)) return false; } return true; @@ -396,6 +638,16 @@ const char* HValue::Mnemonic() const { } +bool HValue::IsInteger32Constant() { + return IsConstant() && HConstant::cast(this)->HasInteger32Value(); +} + + +int32_t HValue::GetInteger32Constant() { + return HConstant::cast(this)->Integer32Value(); +} + + void HValue::SetOperandAt(int index, HValue* value) { RegisterUse(index, value); InternalSetOperandAt(index, value); @@ -491,6 +743,11 @@ void HValue::PrintNameTo(StringStream* stream) { } +bool HValue::HasMonomorphicJSObjectType() { + return !GetMonomorphicJSObjectMap().is_null(); +} + + bool HValue::UpdateInferredType() { HType type = CalculateInferredType(); bool result = (!type.Equals(type_)); @@ -657,10 +914,74 @@ void HInstruction::Verify() { // HValue::DataEquals. The default implementation is UNREACHABLE. We // don't actually care whether DataEquals returns true or false here. if (CheckFlag(kUseGVN)) DataEquals(this); + + // Verify that all uses are in the graph. + for (HUseIterator use = uses(); !use.Done(); use.Advance()) { + if (use.value()->IsInstruction()) { + ASSERT(HInstruction::cast(use.value())->IsLinked()); + } + } } #endif +HNumericConstraint* HNumericConstraint::AddToGraph( + HValue* constrained_value, + NumericRelation relation, + HValue* related_value, + HInstruction* insertion_point) { + if (insertion_point == NULL) { + if (constrained_value->IsInstruction()) { + insertion_point = HInstruction::cast(constrained_value); + } else if (constrained_value->IsPhi()) { + insertion_point = constrained_value->block()->first(); + } else { + UNREACHABLE(); + } + } + HNumericConstraint* result = + new(insertion_point->block()->zone()) HNumericConstraint( + constrained_value, relation, related_value); + result->InsertAfter(insertion_point); + return result; +} + + +void HNumericConstraint::PrintDataTo(StringStream* stream) { + stream->Add("("); + constrained_value()->PrintNameTo(stream); + stream->Add(" %s ", relation().Mnemonic()); + related_value()->PrintNameTo(stream); + stream->Add(")"); +} + + +HInductionVariableAnnotation* HInductionVariableAnnotation::AddToGraph( + HPhi* phi, + NumericRelation relation, + int operand_index) { + HInductionVariableAnnotation* result = + new(phi->block()->zone()) HInductionVariableAnnotation(phi, relation, + operand_index); + result->InsertAfter(phi->block()->first()); + return result; +} + + +void HInductionVariableAnnotation::PrintDataTo(StringStream* stream) { + stream->Add("("); + RedefinedOperand()->PrintNameTo(stream); + stream->Add(" %s ", relation().Mnemonic()); + induction_base()->PrintNameTo(stream); + stream->Add(")"); +} + + +void HDummyUse::PrintDataTo(StringStream* stream) { + value()->PrintNameTo(stream); +} + + void HUnaryCall::PrintDataTo(StringStream* stream) { value()->PrintNameTo(stream); stream->Add(" "); @@ -677,10 +998,187 @@ void HBinaryCall::PrintDataTo(StringStream* stream) { } +void HBoundsCheck::TryGuaranteeRangeChanging(RangeEvaluationContext* context) { + if (context->candidate()->ActualValue() != base()->ActualValue() || + context->scale() < scale()) { + return; + } + + // TODO(mmassi) + // Instead of checking for "same basic block" we should check for + // "dominates and postdominates". + if (context->upper_bound() == length() && + context->lower_bound_guarantee() != NULL && + context->lower_bound_guarantee() != this && + context->lower_bound_guarantee()->block() != block() && + offset() < context->offset() && + index_can_increase() && + context->upper_bound_guarantee() == NULL) { + offset_ = context->offset(); + SetResponsibilityForRange(DIRECTION_UPPER); + context->set_upper_bound_guarantee(this); + } else if (context->upper_bound_guarantee() != NULL && + context->upper_bound_guarantee() != this && + context->upper_bound_guarantee()->block() != block() && + offset() > context->offset() && + index_can_decrease() && + context->lower_bound_guarantee() == NULL) { + offset_ = context->offset(); + SetResponsibilityForRange(DIRECTION_LOWER); + context->set_lower_bound_guarantee(this); + } +} + + +void HBoundsCheck::ApplyIndexChange() { + if (skip_check()) return; + + DecompositionResult decomposition; + bool index_is_decomposable = index()->TryDecompose(&decomposition); + if (index_is_decomposable) { + ASSERT(decomposition.base() == base()); + if (decomposition.offset() == offset() && + decomposition.scale() == scale()) return; + } else { + return; + } + + ReplaceAllUsesWith(index()); + + HValue* current_index = decomposition.base(); + int actual_offset = decomposition.offset() + offset(); + int actual_scale = decomposition.scale() + scale(); + + if (actual_offset != 0) { + HConstant* add_offset = new(block()->graph()->zone()) HConstant( + actual_offset, index()->representation()); + add_offset->InsertBefore(this); + HInstruction* add = HAdd::New(block()->graph()->zone(), + block()->graph()->GetInvalidContext(), current_index, add_offset); + add->InsertBefore(this); + add->AssumeRepresentation(index()->representation()); + current_index = add; + } + + if (actual_scale != 0) { + HConstant* sar_scale = new(block()->graph()->zone()) HConstant( + actual_scale, index()->representation()); + sar_scale->InsertBefore(this); + HInstruction* sar = HSar::New(block()->graph()->zone(), + block()->graph()->GetInvalidContext(), current_index, sar_scale); + sar->InsertBefore(this); + sar->AssumeRepresentation(index()->representation()); + current_index = sar; + } + + SetOperandAt(0, current_index); + + base_ = NULL; + offset_ = 0; + scale_ = 0; + responsibility_direction_ = DIRECTION_NONE; +} + + +void HBoundsCheck::AddInformativeDefinitions() { + // TODO(mmassi): Executing this code during AddInformativeDefinitions + // is a hack. Move it to some other HPhase. + if (FLAG_array_bounds_checks_elimination) { + if (index()->TryGuaranteeRange(length())) { + set_skip_check(true); + } + if (DetectCompoundIndex()) { + HBoundsCheckBaseIndexInformation* base_index_info = + new(block()->graph()->zone()) + HBoundsCheckBaseIndexInformation(this); + base_index_info->InsertAfter(this); + } + } +} + + +bool HBoundsCheck::IsRelationTrueInternal(NumericRelation relation, + HValue* related_value, + int offset, + int scale) { + if (related_value == length()) { + // A HBoundsCheck is smaller than the length it compared against. + return NumericRelation::Lt().CompoundImplies(relation, 0, 0, offset, scale); + } else if (related_value == block()->graph()->GetConstant0()) { + // A HBoundsCheck is greater than or equal to zero. + return NumericRelation::Ge().CompoundImplies(relation, 0, 0, offset, scale); + } else { + return false; + } +} + + void HBoundsCheck::PrintDataTo(StringStream* stream) { index()->PrintNameTo(stream); stream->Add(" "); length()->PrintNameTo(stream); + if (base() != NULL && (offset() != 0 || scale() != 0)) { + stream->Add(" base: (("); + if (base() != index()) { + index()->PrintNameTo(stream); + } else { + stream->Add("index"); + } + stream->Add(" + %d) >> %d)", offset(), scale()); + } + if (skip_check()) { + stream->Add(" [DISABLED]"); + } +} + + +void HBoundsCheck::InferRepresentation(HInferRepresentation* h_infer) { + ASSERT(CheckFlag(kFlexibleRepresentation)); + Representation r; + HValue* actual_length = length()->ActualValue(); + HValue* actual_index = index()->ActualValue(); + if (key_mode_ == DONT_ALLOW_SMI_KEY || + !actual_length->representation().IsTagged()) { + r = Representation::Integer32(); + } else if (actual_index->representation().IsTagged() || + (actual_index->IsConstant() && + HConstant::cast(actual_index)->HasSmiValue())) { + // If the index is tagged, or a constant that holds a Smi, allow the length + // to be tagged, since it is usually already tagged from loading it out of + // the length field of a JSArray. This allows for direct comparison without + // untagging. + r = Representation::Tagged(); + } else { + r = Representation::Integer32(); + } + UpdateRepresentation(r, h_infer, "boundscheck"); +} + + +bool HBoundsCheckBaseIndexInformation::IsRelationTrueInternal( + NumericRelation relation, + HValue* related_value, + int offset, + int scale) { + if (related_value == bounds_check()->length()) { + return NumericRelation::Lt().CompoundImplies( + relation, + bounds_check()->offset(), bounds_check()->scale(), offset, scale); + } else if (related_value == block()->graph()->GetConstant0()) { + return NumericRelation::Ge().CompoundImplies( + relation, + bounds_check()->offset(), bounds_check()->scale(), offset, scale); + } else { + return false; + } +} + + +void HBoundsCheckBaseIndexInformation::PrintDataTo(StringStream* stream) { + stream->Add("base: "); + base_index()->PrintNameTo(stream); + stream->Add(", check: "); + base_index()->PrintNameTo(stream); } @@ -768,6 +1266,27 @@ void HIsNilAndBranch::PrintDataTo(StringStream* stream) { void HReturn::PrintDataTo(StringStream* stream) { value()->PrintNameTo(stream); + stream->Add(" (pop "); + parameter_count()->PrintNameTo(stream); + stream->Add(" values)"); +} + + +Representation HBranch::observed_input_representation(int index) { + static const ToBooleanStub::Types tagged_types( + ToBooleanStub::UNDEFINED | + ToBooleanStub::NULL_TYPE | + ToBooleanStub::SPEC_OBJECT | + ToBooleanStub::STRING); + if (expected_input_types_.ContainsAnyOf(tagged_types)) { + return Representation::Tagged(); + } else if (expected_input_types_.Contains(ToBooleanStub::HEAP_NUMBER)) { + return Representation::Double(); + } else if (expected_input_types_.Contains(ToBooleanStub::SMI)) { + return Representation::Integer32(); + } else { + return Representation::None(); + } } @@ -866,16 +1385,6 @@ void HLoadFieldByIndex::PrintDataTo(StringStream* stream) { } -HValue* HConstant::Canonicalize() { - return HasNoUses() ? NULL : this; -} - - -HValue* HTypeof::Canonicalize() { - return HasNoUses() ? NULL : this; -} - - HValue* HBitwise::Canonicalize() { if (!representation().IsInteger32()) return this; // If x is an int32, then x & -1 == x, x | 0 == x and x ^ 0 == x. @@ -942,6 +1451,12 @@ void HTypeof::PrintDataTo(StringStream* stream) { } +void HForceRepresentation::PrintDataTo(StringStream* stream) { + stream->Add("%s ", representation().Mnemonic()); + value()->PrintNameTo(stream); +} + + void HChange::PrintDataTo(StringStream* stream) { HUnaryOperation::PrintDataTo(stream); stream->Add(" %s to %s", from().Mnemonic(), to().Mnemonic()); @@ -954,8 +1469,10 @@ void HChange::PrintDataTo(StringStream* stream) { void HJSArrayLength::PrintDataTo(StringStream* stream) { value()->PrintNameTo(stream); - stream->Add(" "); - typecheck()->PrintNameTo(stream); + if (HasTypeCheck()) { + stream->Add(" "); + typecheck()->PrintNameTo(stream); + } } @@ -1022,10 +1539,9 @@ HValue* HCheckInstanceType::Canonicalize() { value()->type().IsString()) { return NULL; } - if (check_ == IS_SYMBOL && - value()->IsConstant() && - HConstant::cast(value())->handle()->IsSymbol()) { - return NULL; + + if (check_ == IS_INTERNALIZED_STRING && value()->IsConstant()) { + if (HConstant::cast(value())->HasInternalizedStringValue()) return NULL; } return this; } @@ -1055,9 +1571,9 @@ void HCheckInstanceType::GetCheckMaskAndTag(uint8_t* mask, uint8_t* tag) { *mask = kIsNotStringMask; *tag = kStringTag; return; - case IS_SYMBOL: - *mask = kIsSymbolMask; - *tag = kSymbolTag; + case IS_INTERNALIZED_STRING: + *mask = kIsInternalizedMask; + *tag = kInternalizedTag; return; default: UNREACHABLE(); @@ -1065,10 +1581,32 @@ void HCheckInstanceType::GetCheckMaskAndTag(uint8_t* mask, uint8_t* tag) { } +void HCheckMaps::SetSideEffectDominator(GVNFlag side_effect, + HValue* dominator) { + ASSERT(side_effect == kChangesMaps); + // TODO(mstarzinger): For now we specialize on HStoreNamedField, but once + // type information is rich enough we should generalize this to any HType + // for which the map is known. + if (HasNoUses() && dominator->IsStoreNamedField()) { + HStoreNamedField* store = HStoreNamedField::cast(dominator); + Handle<Map> map = store->transition(); + if (map.is_null() || store->object() != value()) return; + for (int i = 0; i < map_set()->length(); i++) { + if (map.is_identical_to(map_set()->at(i))) { + DeleteAndReplaceWith(NULL); + return; + } + } + } +} + + void HLoadElements::PrintDataTo(StringStream* stream) { value()->PrintNameTo(stream); - stream->Add(" "); - typecheck()->PrintNameTo(stream); + if (HasTypeCheck()) { + stream->Add(" "); + typecheck()->PrintNameTo(stream); + } } @@ -1093,7 +1631,7 @@ const char* HCheckInstanceType::GetCheckName() { case IS_SPEC_OBJECT: return "object"; case IS_JS_ARRAY: return "array"; case IS_STRING: return "string"; - case IS_SYMBOL: return "symbol"; + case IS_INTERNALIZED_STRING: return "internalized_string"; } UNREACHABLE(); return ""; @@ -1106,7 +1644,9 @@ void HCheckInstanceType::PrintDataTo(StringStream* stream) { void HCheckPrototypeMaps::PrintDataTo(StringStream* stream) { - stream->Add("[receiver_prototype=%p,holder=%p]", *prototype(), *holder()); + stream->Add("[receiver_prototype=%p,holder=%p]%s", + *prototypes_.first(), *prototypes_.last(), + CanOmitPrototypeChecks() ? " (omitted)" : ""); } @@ -1261,6 +1801,11 @@ Range* HMod::InferRange(Zone* zone) { if (a->CanBeMinusZero() || a->CanBeNegative()) { result->set_can_be_minus_zero(true); } + + if (right()->range()->Includes(-1) && left()->range()->Includes(kMinInt)) { + SetFlag(HValue::kCanOverflow); + } + if (!right()->range()->CanBeZero()) { ClearFlag(HValue::kCanBeDivByZero); } @@ -1271,6 +1816,60 @@ Range* HMod::InferRange(Zone* zone) { } +void HPhi::AddInformativeDefinitions() { + if (OperandCount() == 2) { + // If one of the operands is an OSR block give up (this cannot be an + // induction variable). + if (OperandAt(0)->block()->is_osr_entry() || + OperandAt(1)->block()->is_osr_entry()) return; + + for (int operand_index = 0; operand_index < 2; operand_index++) { + int other_operand_index = (operand_index + 1) % 2; + + static NumericRelation relations[] = { + NumericRelation::Ge(), + NumericRelation::Le() + }; + + // Check if this phi is an induction variable. If, e.g., we know that + // its first input is greater than the phi itself, then that must be + // the back edge, and the phi is always greater than its second input. + for (int relation_index = 0; relation_index < 2; relation_index++) { + if (OperandAt(operand_index)->IsRelationTrue(relations[relation_index], + this)) { + HInductionVariableAnnotation::AddToGraph(this, + relations[relation_index], + other_operand_index); + } + } + } + } +} + + +bool HPhi::IsRelationTrueInternal(NumericRelation relation, + HValue* other, + int offset, + int scale) { + if (CheckFlag(kNumericConstraintEvaluationInProgress)) return false; + + SetFlag(kNumericConstraintEvaluationInProgress); + bool result = true; + for (int i = 0; i < OperandCount(); i++) { + // Skip OSR entry blocks + if (OperandAt(i)->block()->is_osr_entry()) continue; + + if (!OperandAt(i)->IsRelationTrue(relation, other, offset, scale)) { + result = false; + break; + } + } + ClearFlag(kNumericConstraintEvaluationInProgress); + + return result; +} + + Range* HMathMinMax::InferRange(Zone* zone) { if (representation().IsInteger32()) { Range* a = left()->range(); @@ -1356,15 +1955,11 @@ void HPhi::InitRealUses(int phi_id) { for (HUseIterator it(uses()); !it.Done(); it.Advance()) { HValue* value = it.value(); if (!value->IsPhi()) { - Representation rep = value->ObservedInputRepresentation(it.index()); + Representation rep = value->observed_input_representation(it.index()); non_phi_uses_[rep.kind()] += value->LoopWeight(); if (FLAG_trace_representation) { - PrintF("%d %s is used by %d %s as %s\n", - this->id(), - this->Mnemonic(), - value->id(), - value->Mnemonic(), - rep.Mnemonic()); + PrintF("#%d Phi is used by real #%d %s as %s\n", + id(), value->id(), value->Mnemonic(), rep.Mnemonic()); } } } @@ -1373,11 +1968,8 @@ void HPhi::InitRealUses(int phi_id) { void HPhi::AddNonPhiUsesFrom(HPhi* other) { if (FLAG_trace_representation) { - PrintF("adding to %d %s uses of %d %s: i%d d%d t%d\n", - this->id(), - this->Mnemonic(), - other->id(), - other->Mnemonic(), + PrintF("adding to #%d Phi uses of #%d Phi: i%d d%d t%d\n", + id(), other->id(), other->non_phi_uses_[Representation::kInteger32], other->non_phi_uses_[Representation::kDouble], other->non_phi_uses_[Representation::kTagged]); @@ -1396,9 +1988,20 @@ void HPhi::AddIndirectUsesTo(int* dest) { } -void HPhi::ResetInteger32Uses() { - non_phi_uses_[Representation::kInteger32] = 0; - indirect_uses_[Representation::kInteger32] = 0; +void HSimulate::MergeInto(HSimulate* other) { + for (int i = 0; i < values_.length(); ++i) { + HValue* value = values_[i]; + if (HasAssignedIndexAt(i)) { + other->AddAssignedValue(GetAssignedIndexAt(i), value); + } else { + if (other->pop_count_ > 0) { + other->pop_count_--; + } else { + other->AddPushedValue(value); + } + } + } + other->pop_count_ += pop_count(); } @@ -1407,7 +2010,7 @@ void HSimulate::PrintDataTo(StringStream* stream) { if (pop_count_ > 0) stream->Add(" pop %d", pop_count_); if (values_.length() > 0) { if (pop_count_ > 0) stream->Add(" /"); - for (int i = 0; i < values_.length(); ++i) { + for (int i = values_.length() - 1; i >= 0; --i) { if (i > 0) stream->Add(","); if (HasAssignedIndexAt(i)) { stream->Add(" var[%d] = ", GetAssignedIndexAt(i)); @@ -1443,89 +2046,111 @@ static bool IsInteger32(double value) { HConstant::HConstant(Handle<Object> handle, Representation r) - : handle_(handle), - has_int32_value_(false), - has_double_value_(false) { - set_representation(r); - SetFlag(kUseGVN); + : handle_(handle), + has_int32_value_(false), + has_double_value_(false), + is_internalized_string_(false), + boolean_value_(handle->BooleanValue()) { if (handle_->IsNumber()) { double n = handle_->Number(); has_int32_value_ = IsInteger32(n); int32_value_ = DoubleToInt32(n); double_value_ = n; has_double_value_ = true; + } else { + type_from_value_ = HType::TypeFromValue(handle_); + is_internalized_string_ = handle_->IsInternalizedString(); + } + if (r.IsNone()) { + if (has_int32_value_) { + r = Representation::Integer32(); + } else if (has_double_value_) { + r = Representation::Double(); + } else { + r = Representation::Tagged(); + } } + Initialize(r); } -HConstant::HConstant(int32_t integer_value, Representation r) +HConstant::HConstant(Handle<Object> handle, + Representation r, + HType type, + bool is_internalize_string, + bool boolean_value) + : handle_(handle), + has_int32_value_(false), + has_double_value_(false), + is_internalized_string_(is_internalize_string), + boolean_value_(boolean_value), + type_from_value_(type) { + ASSERT(!handle.is_null()); + ASSERT(!type.IsUninitialized()); + ASSERT(!type.IsTaggedNumber()); + Initialize(r); +} + + +HConstant::HConstant(int32_t integer_value, + Representation r, + Handle<Object> optional_handle) : has_int32_value_(true), has_double_value_(true), + is_internalized_string_(false), + boolean_value_(integer_value != 0), int32_value_(integer_value), double_value_(FastI2D(integer_value)) { - set_representation(r); - SetFlag(kUseGVN); + Initialize(r); } -HConstant::HConstant(double double_value, Representation r) +HConstant::HConstant(double double_value, + Representation r, + Handle<Object> optional_handle) : has_int32_value_(IsInteger32(double_value)), has_double_value_(true), + is_internalized_string_(false), + boolean_value_(double_value != 0 && !isnan(double_value)), int32_value_(DoubleToInt32(double_value)), double_value_(double_value) { + Initialize(r); +} + + +void HConstant::Initialize(Representation r) { set_representation(r); SetFlag(kUseGVN); + if (representation().IsInteger32()) { + ClearGVNFlag(kDependsOnOsrEntries); + } } HConstant* HConstant::CopyToRepresentation(Representation r, Zone* zone) const { if (r.IsInteger32() && !has_int32_value_) return NULL; if (r.IsDouble() && !has_double_value_) return NULL; - if (handle_.is_null()) { - ASSERT(has_int32_value_ || has_double_value_); - if (has_int32_value_) return new(zone) HConstant(int32_value_, r); - return new(zone) HConstant(double_value_, r); - } - return new(zone) HConstant(handle_, r); + if (has_int32_value_) return new(zone) HConstant(int32_value_, r, handle_); + if (has_double_value_) return new(zone) HConstant(double_value_, r, handle_); + ASSERT(!handle_.is_null()); + return new(zone) HConstant( + handle_, r, type_from_value_, is_internalized_string_, boolean_value_); } HConstant* HConstant::CopyToTruncatedInt32(Zone* zone) const { if (has_int32_value_) { - if (handle_.is_null()) { - return new(zone) HConstant(int32_value_, Representation::Integer32()); - } else { - // Re-use the existing Handle if possible. - return new(zone) HConstant(handle_, Representation::Integer32()); - } - } else if (has_double_value_) { - return new(zone) HConstant(DoubleToInt32(double_value_), - Representation::Integer32()); - } else { - return NULL; - } -} - - -bool HConstant::ToBoolean() { - // Converts the constant's boolean value according to - // ECMAScript section 9.2 ToBoolean conversion. - if (HasInteger32Value()) return Integer32Value() != 0; - if (HasDoubleValue()) { - double v = DoubleValue(); - return v != 0 && !isnan(v); + return new(zone) HConstant( + int32_value_, Representation::Integer32(), handle_); } - Handle<Object> literal = handle(); - if (literal->IsTrue()) return true; - if (literal->IsFalse()) return false; - if (literal->IsUndefined()) return false; - if (literal->IsNull()) return false; - if (literal->IsString() && String::cast(*literal)->length() == 0) { - return false; + if (has_double_value_) { + return new(zone) HConstant( + DoubleToInt32(double_value_), Representation::Integer32(), handle_); } - return true; + return NULL; } + void HConstant::PrintDataTo(StringStream* stream) { if (has_int32_value_) { stream->Add("%d ", int32_value_); @@ -1553,6 +2178,60 @@ void HBinaryOperation::PrintDataTo(StringStream* stream) { } +void HBinaryOperation::InferRepresentation(HInferRepresentation* h_infer) { + ASSERT(CheckFlag(kFlexibleRepresentation)); + Representation new_rep = RepresentationFromInputs(); + UpdateRepresentation(new_rep, h_infer, "inputs"); + // When the operation has information about its own output type, don't look + // at uses. + if (!observed_output_representation_.IsNone()) return; + new_rep = RepresentationFromUses(); + UpdateRepresentation(new_rep, h_infer, "uses"); +} + + +Representation HBinaryOperation::RepresentationFromInputs() { + // Determine the worst case of observed input representations and + // the currently assumed output representation. + Representation rep = representation(); + if (observed_output_representation_.is_more_general_than(rep)) { + rep = observed_output_representation_; + } + for (int i = 1; i <= 2; ++i) { + Representation input_rep = observed_input_representation(i); + if (input_rep.is_more_general_than(rep)) rep = input_rep; + } + // If any of the actual input representation is more general than what we + // have so far but not Tagged, use that representation instead. + Representation left_rep = left()->representation(); + Representation right_rep = right()->representation(); + + if (left_rep.is_more_general_than(rep) && + left()->CheckFlag(kFlexibleRepresentation)) { + rep = left_rep; + } + if (right_rep.is_more_general_than(rep) && + right()->CheckFlag(kFlexibleRepresentation)) { + rep = right_rep; + } + return rep; +} + + +void HBinaryOperation::AssumeRepresentation(Representation r) { + set_observed_input_representation(r, r); + HValue::AssumeRepresentation(r); +} + + +void HMathMinMax::InferRepresentation(HInferRepresentation* h_infer) { + ASSERT(CheckFlag(kFlexibleRepresentation)); + Representation new_rep = RepresentationFromInputs(); + UpdateRepresentation(new_rep, h_infer, "inputs"); + // Do not care about uses. +} + + Range* HBitwise::InferRange(Zone* zone) { if (op() == Token::BIT_XOR) return HValue::InferRange(zone); const int32_t kDefaultMask = static_cast<int32_t>(0xffffffff); @@ -1629,7 +2308,7 @@ Range* HShl::InferRange(Zone* zone) { } -Range* HLoadKeyedSpecializedArrayElement::InferRange(Zone* zone) { +Range* HLoadKeyed::InferRange(Zone* zone) { switch (elements_kind()) { case EXTERNAL_PIXEL_ELEMENTS: return new(zone) Range(0, 255); @@ -1661,6 +2340,16 @@ void HStringCompareAndBranch::PrintDataTo(StringStream* stream) { } +void HCompareIDAndBranch::AddInformativeDefinitions() { + NumericRelation r = NumericRelation::FromToken(token()); + if (r.IsNone()) return; + + HNumericConstraint::AddToGraph(left(), r, right(), SuccessorAt(0)->first()); + HNumericConstraint::AddToGraph( + left(), r.Negated(), right(), SuccessorAt(1)->first()); +} + + void HCompareIDAndBranch::PrintDataTo(StringStream* stream) { stream->Add(Token::Name(token())); stream->Add(" "); @@ -1684,9 +2373,19 @@ void HGoto::PrintDataTo(StringStream* stream) { } -void HCompareIDAndBranch::SetInputRepresentation(Representation r) { - input_representation_ = r; - if (r.IsDouble()) { +void HCompareIDAndBranch::InferRepresentation(HInferRepresentation* h_infer) { + Representation rep = Representation::None(); + Representation left_rep = left()->representation(); + Representation right_rep = right()->representation(); + bool observed_integers = + observed_input_representation(0).IsInteger32() && + observed_input_representation(1).IsInteger32(); + bool inputs_are_not_doubles = + !left_rep.IsDouble() && !right_rep.IsDouble(); + if (observed_integers && inputs_are_not_doubles) { + rep = Representation::Integer32(); + } else { + rep = Representation::Double(); // According to the ES5 spec (11.9.3, 11.8.5), Equality comparisons (==, === // and !=) have special handling of undefined, e.g. undefined == undefined // is 'true'. Relational comparisons have a different semantic, first @@ -1703,9 +2402,8 @@ void HCompareIDAndBranch::SetInputRepresentation(Representation r) { if (!Token::IsOrderedRelationalCompareOp(token_)) { SetFlag(kDeoptimizeOnUndefined); } - } else { - ASSERT(r.IsInteger32()); } + ChangeRepresentation(rep); } @@ -1856,38 +2554,64 @@ void HLoadNamedGeneric::PrintDataTo(StringStream* stream) { } -void HLoadKeyedFastElement::PrintDataTo(StringStream* stream) { - object()->PrintNameTo(stream); +void HLoadKeyed::PrintDataTo(StringStream* stream) { + if (!is_external()) { + elements()->PrintNameTo(stream); + } else { + ASSERT(elements_kind() >= FIRST_EXTERNAL_ARRAY_ELEMENTS_KIND && + elements_kind() <= LAST_EXTERNAL_ARRAY_ELEMENTS_KIND); + elements()->PrintNameTo(stream); + stream->Add("."); + stream->Add(ElementsKindToString(elements_kind())); + } + stream->Add("["); key()->PrintNameTo(stream); - stream->Add("] "); - dependency()->PrintNameTo(stream); + if (IsDehoisted()) { + stream->Add(" + %d]", index_offset()); + } else { + stream->Add("]"); + } + + if (HasDependency()) { + stream->Add(" "); + dependency()->PrintNameTo(stream); + } + if (RequiresHoleCheck()) { stream->Add(" check_hole"); } } -bool HLoadKeyedFastElement::RequiresHoleCheck() const { +bool HLoadKeyed::UsesMustHandleHole() const { if (IsFastPackedElementsKind(elements_kind())) { return false; } + if (hole_mode() == ALLOW_RETURN_HOLE) return true; + + if (IsFastDoubleElementsKind(elements_kind())) { + return false; + } + for (HUseIterator it(uses()); !it.Done(); it.Advance()) { HValue* use = it.value(); - if (!use->IsChange()) return true; + if (!use->IsChange()) { + return false; + } } - return false; + return true; } -void HLoadKeyedFastDoubleElement::PrintDataTo(StringStream* stream) { - elements()->PrintNameTo(stream); - stream->Add("["); - key()->PrintNameTo(stream); - stream->Add("] "); - dependency()->PrintNameTo(stream); +bool HLoadKeyed::RequiresHoleCheck() const { + if (IsFastPackedElementsKind(elements_kind())) { + return false; + } + + return !UsesMustHandleHole(); } @@ -1903,21 +2627,22 @@ HValue* HLoadKeyedGeneric::Canonicalize() { // Recognize generic keyed loads that use property name generated // by for-in statement as a key and rewrite them into fast property load // by index. - if (key()->IsLoadKeyedFastElement()) { - HLoadKeyedFastElement* key_load = HLoadKeyedFastElement::cast(key()); - if (key_load->object()->IsForInCacheArray()) { + if (key()->IsLoadKeyed()) { + HLoadKeyed* key_load = HLoadKeyed::cast(key()); + if (key_load->elements()->IsForInCacheArray()) { HForInCacheArray* names_cache = - HForInCacheArray::cast(key_load->object()); + HForInCacheArray::cast(key_load->elements()); if (names_cache->enumerable() == object()) { HForInCacheArray* index_cache = names_cache->index_cache(); HCheckMapValue* map_check = new(block()->zone()) HCheckMapValue(object(), names_cache->map()); - HInstruction* index = new(block()->zone()) HLoadKeyedFastElement( + HInstruction* index = new(block()->zone()) HLoadKeyed( index_cache, key_load->key(), - key_load->key()); + key_load->key(), + key_load->elements_kind()); map_check->InsertBefore(this); index->InsertBefore(this); HLoadFieldByIndex* load = new(block()->zone()) HLoadFieldByIndex( @@ -1932,56 +2657,6 @@ HValue* HLoadKeyedGeneric::Canonicalize() { } -void HLoadKeyedSpecializedArrayElement::PrintDataTo( - StringStream* stream) { - external_pointer()->PrintNameTo(stream); - stream->Add("."); - switch (elements_kind()) { - case EXTERNAL_BYTE_ELEMENTS: - stream->Add("byte"); - break; - case EXTERNAL_UNSIGNED_BYTE_ELEMENTS: - stream->Add("u_byte"); - break; - case EXTERNAL_SHORT_ELEMENTS: - stream->Add("short"); - break; - case EXTERNAL_UNSIGNED_SHORT_ELEMENTS: - stream->Add("u_short"); - break; - case EXTERNAL_INT_ELEMENTS: - stream->Add("int"); - break; - case EXTERNAL_UNSIGNED_INT_ELEMENTS: - stream->Add("u_int"); - break; - case EXTERNAL_FLOAT_ELEMENTS: - stream->Add("float"); - break; - case EXTERNAL_DOUBLE_ELEMENTS: - stream->Add("double"); - break; - case EXTERNAL_PIXEL_ELEMENTS: - stream->Add("pixel"); - break; - case FAST_ELEMENTS: - case FAST_SMI_ELEMENTS: - case FAST_DOUBLE_ELEMENTS: - case FAST_HOLEY_ELEMENTS: - case FAST_HOLEY_SMI_ELEMENTS: - case FAST_HOLEY_DOUBLE_ELEMENTS: - case DICTIONARY_ELEMENTS: - case NON_STRICT_ARGUMENTS_ELEMENTS: - UNREACHABLE(); - break; - } - stream->Add("["); - key()->PrintNameTo(stream); - stream->Add("] "); - dependency()->PrintNameTo(stream); -} - - void HStoreNamedGeneric::PrintDataTo(StringStream* stream) { object()->PrintNameTo(stream); stream->Add("."); @@ -2008,20 +2683,25 @@ void HStoreNamedField::PrintDataTo(StringStream* stream) { } -void HStoreKeyedFastElement::PrintDataTo(StringStream* stream) { - object()->PrintNameTo(stream); - stream->Add("["); - key()->PrintNameTo(stream); - stream->Add("] = "); - value()->PrintNameTo(stream); -} - +void HStoreKeyed::PrintDataTo(StringStream* stream) { + if (!is_external()) { + elements()->PrintNameTo(stream); + } else { + elements()->PrintNameTo(stream); + stream->Add("."); + stream->Add(ElementsKindToString(elements_kind())); + ASSERT(elements_kind() >= FIRST_EXTERNAL_ARRAY_ELEMENTS_KIND && + elements_kind() <= LAST_EXTERNAL_ARRAY_ELEMENTS_KIND); + } -void HStoreKeyedFastDoubleElement::PrintDataTo(StringStream* stream) { - elements()->PrintNameTo(stream); stream->Add("["); key()->PrintNameTo(stream); - stream->Add("] = "); + if (IsDehoisted()) { + stream->Add(" + %d] = ", index_offset()); + } else { + stream->Add("] = "); + } + value()->PrintNameTo(stream); } @@ -2035,56 +2715,6 @@ void HStoreKeyedGeneric::PrintDataTo(StringStream* stream) { } -void HStoreKeyedSpecializedArrayElement::PrintDataTo( - StringStream* stream) { - external_pointer()->PrintNameTo(stream); - stream->Add("."); - switch (elements_kind()) { - case EXTERNAL_BYTE_ELEMENTS: - stream->Add("byte"); - break; - case EXTERNAL_UNSIGNED_BYTE_ELEMENTS: - stream->Add("u_byte"); - break; - case EXTERNAL_SHORT_ELEMENTS: - stream->Add("short"); - break; - case EXTERNAL_UNSIGNED_SHORT_ELEMENTS: - stream->Add("u_short"); - break; - case EXTERNAL_INT_ELEMENTS: - stream->Add("int"); - break; - case EXTERNAL_UNSIGNED_INT_ELEMENTS: - stream->Add("u_int"); - break; - case EXTERNAL_FLOAT_ELEMENTS: - stream->Add("float"); - break; - case EXTERNAL_DOUBLE_ELEMENTS: - stream->Add("double"); - break; - case EXTERNAL_PIXEL_ELEMENTS: - stream->Add("pixel"); - break; - case FAST_SMI_ELEMENTS: - case FAST_ELEMENTS: - case FAST_DOUBLE_ELEMENTS: - case FAST_HOLEY_SMI_ELEMENTS: - case FAST_HOLEY_ELEMENTS: - case FAST_HOLEY_DOUBLE_ELEMENTS: - case DICTIONARY_ELEMENTS: - case NON_STRICT_ARGUMENTS_ELEMENTS: - UNREACHABLE(); - break; - } - stream->Add("["); - key()->PrintNameTo(stream); - stream->Add("] = "); - value()->PrintNameTo(stream); -} - - void HTransitionElementsKind::PrintDataTo(StringStream* stream) { object()->PrintNameTo(stream); ElementsKind from_kind = original_map()->elements_kind(); @@ -2119,6 +2749,12 @@ void HLoadGlobalGeneric::PrintDataTo(StringStream* stream) { } +void HInnerAllocatedObject::PrintDataTo(StringStream* stream) { + base_object()->PrintNameTo(stream); + stream->Add(" offset %d", offset()); +} + + void HStoreGlobalCell::PrintDataTo(StringStream* stream) { stream->Add("[%p] = ", *cell()); value()->PrintNameTo(stream); @@ -2175,6 +2811,15 @@ HType HCheckSmi::CalculateInferredType() { } +void HCheckSmiOrInt32::InferRepresentation(HInferRepresentation* h_infer) { + ASSERT(CheckFlag(kFlexibleRepresentation)); + ASSERT(UseCount() == 1); + HUseIterator use = uses(); + Representation r = use.value()->RequiredInputRepresentation(use.index()); + UpdateRepresentation(r, h_infer, "checksmiorint32"); +} + + HType HPhi::CalculateInferredType() { HType result = HType::Uninitialized(); for (int i = 0; i < OperandCount(); ++i) { @@ -2190,7 +2835,8 @@ HType HConstant::CalculateInferredType() { return Smi::IsValid(int32_value_) ? HType::Smi() : HType::HeapNumber(); } if (has_double_value_) return HType::HeapNumber(); - return HType::TypeFromValue(handle_); + ASSERT(!type_from_value_.IsUninitialized()); + return type_from_value_; } @@ -2255,6 +2901,17 @@ HType HAllocateObject::CalculateInferredType() { } +HType HAllocate::CalculateInferredType() { + return type_; +} + + +void HAllocate::PrintDataTo(StringStream* stream) { + size()->PrintNameTo(stream); + if (!GuaranteedInNewSpace()) stream->Add(" (pretenure)"); +} + + HType HFastLiteral::CalculateInferredType() { // TODO(mstarzinger): Be smarter, could also be JSArray here. return HType::JSObject(); @@ -2375,36 +3032,41 @@ HValue* HAdd::EnsureAndPropagateNotMinusZero(BitVector* visited) { } -bool HStoreKeyedFastDoubleElement::NeedsCanonicalization() { - // If value was loaded from unboxed double backing store or - // converted from an integer then we don't have to canonicalize it. - if (value()->IsLoadKeyedFastDoubleElement() || - (value()->IsChange() && HChange::cast(value())->from().IsInteger32())) { +bool HStoreKeyed::NeedsCanonicalization() { + // If value is an integer or smi or comes from the result of a keyed load or + // constant then it is either be a non-hole value or in the case of a constant + // the hole is only being stored explicitly: no need for canonicalization. + if (value()->IsLoadKeyed() || value()->IsConstant()) { return false; } + + if (value()->IsChange()) { + if (HChange::cast(value())->from().IsInteger32()) { + return false; + } + if (HChange::cast(value())->value()->type().IsSmi()) { + return false; + } + } return true; } #define H_CONSTANT_INT32(val) \ -new(zone) HConstant(FACTORY->NewNumberFromInt(val, TENURED), \ - Representation::Integer32()) +new(zone) HConstant(static_cast<int32_t>(val), Representation::Integer32()) #define H_CONSTANT_DOUBLE(val) \ -new(zone) HConstant(FACTORY->NewNumber(val, TENURED), \ - Representation::Double()) +new(zone) HConstant(static_cast<double>(val), Representation::Double()) #define DEFINE_NEW_H_SIMPLE_ARITHMETIC_INSTR(HInstr, op) \ -HInstruction* HInstr::New##HInstr(Zone* zone, \ - HValue* context, \ - HValue* left, \ - HValue* right) { \ - if (left->IsConstant() && right->IsConstant()) { \ +HInstruction* HInstr::New( \ + Zone* zone, HValue* context, HValue* left, HValue* right) { \ + if (FLAG_fold_constants && left->IsConstant() && right->IsConstant()) { \ HConstant* c_left = HConstant::cast(left); \ HConstant* c_right = HConstant::cast(right); \ if ((c_left->HasNumberValue() && c_right->HasNumberValue())) { \ double double_res = c_left->DoubleValue() op c_right->DoubleValue(); \ if (TypeInfo::IsInt32Double(double_res)) { \ - return H_CONSTANT_INT32(static_cast<int32_t>(double_res)); \ + return H_CONSTANT_INT32(double_res); \ } \ return H_CONSTANT_DOUBLE(double_res); \ } \ @@ -2420,11 +3082,170 @@ DEFINE_NEW_H_SIMPLE_ARITHMETIC_INSTR(HSub, -) #undef DEFINE_NEW_H_SIMPLE_ARITHMETIC_INSTR -HInstruction* HMod::NewHMod(Zone* zone, - HValue* context, - HValue* left, - HValue* right) { - if (left->IsConstant() && right->IsConstant()) { +HInstruction* HStringAdd::New( + Zone* zone, HValue* context, HValue* left, HValue* right) { + if (FLAG_fold_constants && left->IsConstant() && right->IsConstant()) { + HConstant* c_right = HConstant::cast(right); + HConstant* c_left = HConstant::cast(left); + if (c_left->HasStringValue() && c_right->HasStringValue()) { + return new(zone) HConstant(FACTORY->NewConsString(c_left->StringValue(), + c_right->StringValue()), + Representation::Tagged()); + } + } + return new(zone) HStringAdd(context, left, right); +} + + +HInstruction* HStringCharFromCode::New( + Zone* zone, HValue* context, HValue* char_code) { + if (FLAG_fold_constants && char_code->IsConstant()) { + HConstant* c_code = HConstant::cast(char_code); + Isolate* isolate = Isolate::Current(); + if (c_code->HasNumberValue()) { + if (isfinite(c_code->DoubleValue())) { + uint32_t code = c_code->NumberValueAsInteger32() & 0xffff; + return new(zone) HConstant(LookupSingleCharacterStringFromCode(isolate, + code), + Representation::Tagged()); + } + return new(zone) HConstant(isolate->factory()->empty_string(), + Representation::Tagged()); + } + } + return new(zone) HStringCharFromCode(context, char_code); +} + + +HInstruction* HStringLength::New(Zone* zone, HValue* string) { + if (FLAG_fold_constants && string->IsConstant()) { + HConstant* c_string = HConstant::cast(string); + if (c_string->HasStringValue()) { + return H_CONSTANT_INT32(c_string->StringValue()->length()); + } + } + return new(zone) HStringLength(string); +} + + +HInstruction* HUnaryMathOperation::New( + Zone* zone, HValue* context, HValue* value, BuiltinFunctionId op) { + do { + if (!FLAG_fold_constants) break; + if (!value->IsConstant()) break; + HConstant* constant = HConstant::cast(value); + if (!constant->HasNumberValue()) break; + double d = constant->DoubleValue(); + if (isnan(d)) { // NaN poisons everything. + return H_CONSTANT_DOUBLE(OS::nan_value()); + } + if (isinf(d)) { // +Infinity and -Infinity. + switch (op) { + case kMathSin: + case kMathCos: + case kMathTan: + return H_CONSTANT_DOUBLE(OS::nan_value()); + case kMathExp: + return H_CONSTANT_DOUBLE((d > 0.0) ? d : 0.0); + case kMathLog: + case kMathSqrt: + return H_CONSTANT_DOUBLE((d > 0.0) ? d : OS::nan_value()); + case kMathPowHalf: + case kMathAbs: + return H_CONSTANT_DOUBLE((d > 0.0) ? d : -d); + case kMathRound: + case kMathFloor: + return H_CONSTANT_DOUBLE(d); + default: + UNREACHABLE(); + break; + } + } + switch (op) { + case kMathSin: + return H_CONSTANT_DOUBLE(fast_sin(d)); + case kMathCos: + return H_CONSTANT_DOUBLE(fast_cos(d)); + case kMathTan: + return H_CONSTANT_DOUBLE(fast_tan(d)); + case kMathExp: + return H_CONSTANT_DOUBLE(fast_exp(d)); + case kMathLog: + return H_CONSTANT_DOUBLE(fast_log(d)); + case kMathSqrt: + return H_CONSTANT_DOUBLE(fast_sqrt(d)); + case kMathPowHalf: + return H_CONSTANT_DOUBLE(power_double_double(d, 0.5)); + case kMathAbs: + return H_CONSTANT_DOUBLE((d >= 0.0) ? d + 0.0 : -d); + case kMathRound: + // -0.5 .. -0.0 round to -0.0. + if ((d >= -0.5 && Double(d).Sign() < 0)) return H_CONSTANT_DOUBLE(-0.0); + // Doubles are represented as Significant * 2 ^ Exponent. If the + // Exponent is not negative, the double value is already an integer. + if (Double(d).Exponent() >= 0) return H_CONSTANT_DOUBLE(d); + return H_CONSTANT_DOUBLE(floor(d + 0.5)); + case kMathFloor: + return H_CONSTANT_DOUBLE(floor(d)); + default: + UNREACHABLE(); + break; + } + } while (false); + return new(zone) HUnaryMathOperation(context, value, op); +} + + +HInstruction* HPower::New(Zone* zone, HValue* left, HValue* right) { + if (FLAG_fold_constants && left->IsConstant() && right->IsConstant()) { + HConstant* c_left = HConstant::cast(left); + HConstant* c_right = HConstant::cast(right); + if (c_left->HasNumberValue() && c_right->HasNumberValue()) { + double result = power_helper(c_left->DoubleValue(), + c_right->DoubleValue()); + return H_CONSTANT_DOUBLE(isnan(result) ? OS::nan_value() : result); + } + } + return new(zone) HPower(left, right); +} + + +HInstruction* HMathMinMax::New( + Zone* zone, HValue* context, HValue* left, HValue* right, Operation op) { + if (FLAG_fold_constants && left->IsConstant() && right->IsConstant()) { + HConstant* c_left = HConstant::cast(left); + HConstant* c_right = HConstant::cast(right); + if (c_left->HasNumberValue() && c_right->HasNumberValue()) { + double d_left = c_left->DoubleValue(); + double d_right = c_right->DoubleValue(); + if (op == kMathMin) { + if (d_left > d_right) return H_CONSTANT_DOUBLE(d_right); + if (d_left < d_right) return H_CONSTANT_DOUBLE(d_left); + if (d_left == d_right) { + // Handle +0 and -0. + return H_CONSTANT_DOUBLE((Double(d_left).Sign() == -1) ? d_left + : d_right); + } + } else { + if (d_left < d_right) return H_CONSTANT_DOUBLE(d_right); + if (d_left > d_right) return H_CONSTANT_DOUBLE(d_left); + if (d_left == d_right) { + // Handle +0 and -0. + return H_CONSTANT_DOUBLE((Double(d_left).Sign() == -1) ? d_right + : d_left); + } + } + // All comparisons failed, must be NaN. + return H_CONSTANT_DOUBLE(OS::nan_value()); + } + } + return new(zone) HMathMinMax(context, left, right, op); +} + + +HInstruction* HMod::New( + Zone* zone, HValue* context, HValue* left, HValue* right) { + if (FLAG_fold_constants && left->IsConstant() && right->IsConstant()) { HConstant* c_left = HConstant::cast(left); HConstant* c_right = HConstant::cast(right); if (c_left->HasInteger32Value() && c_right->HasInteger32Value()) { @@ -2443,21 +3264,23 @@ HInstruction* HMod::NewHMod(Zone* zone, } -HInstruction* HDiv::NewHDiv(Zone* zone, - HValue* context, - HValue* left, - HValue* right) { +HInstruction* HDiv::New( + Zone* zone, HValue* context, HValue* left, HValue* right) { // If left and right are constant values, try to return a constant value. - if (left->IsConstant() && right->IsConstant()) { + if (FLAG_fold_constants && left->IsConstant() && right->IsConstant()) { HConstant* c_left = HConstant::cast(left); HConstant* c_right = HConstant::cast(right); if ((c_left->HasNumberValue() && c_right->HasNumberValue())) { if (c_right->DoubleValue() != 0) { double double_res = c_left->DoubleValue() / c_right->DoubleValue(); if (TypeInfo::IsInt32Double(double_res)) { - return H_CONSTANT_INT32(static_cast<int32_t>(double_res)); + return H_CONSTANT_INT32(double_res); } return H_CONSTANT_DOUBLE(double_res); + } else { + int sign = Double(c_left->DoubleValue()).Sign() * + Double(c_right->DoubleValue()).Sign(); // Right could be -0. + return H_CONSTANT_DOUBLE(sign * V8_INFINITY); } } } @@ -2465,12 +3288,9 @@ HInstruction* HDiv::NewHDiv(Zone* zone, } -HInstruction* HBitwise::NewHBitwise(Zone* zone, - Token::Value op, - HValue* context, - HValue* left, - HValue* right) { - if (left->IsConstant() && right->IsConstant()) { +HInstruction* HBitwise::New( + Zone* zone, Token::Value op, HValue* context, HValue* left, HValue* right) { + if (FLAG_fold_constants && left->IsConstant() && right->IsConstant()) { HConstant* c_left = HConstant::cast(left); HConstant* c_right = HConstant::cast(right); if ((c_left->HasNumberValue() && c_right->HasNumberValue())) { @@ -2499,11 +3319,9 @@ HInstruction* HBitwise::NewHBitwise(Zone* zone, #define DEFINE_NEW_H_BITWISE_INSTR(HInstr, result) \ -HInstruction* HInstr::New##HInstr(Zone* zone, \ - HValue* context, \ - HValue* left, \ - HValue* right) { \ - if (left->IsConstant() && right->IsConstant()) { \ +HInstruction* HInstr::New( \ + Zone* zone, HValue* context, HValue* left, HValue* right) { \ + if (FLAG_fold_constants && left->IsConstant() && right->IsConstant()) { \ HConstant* c_left = HConstant::cast(left); \ HConstant* c_right = HConstant::cast(right); \ if ((c_left->HasNumberValue() && c_right->HasNumberValue())) { \ @@ -2522,19 +3340,16 @@ c_left->NumberValueAsInteger32() << (c_right->NumberValueAsInteger32() & 0x1f)) #undef DEFINE_NEW_H_BITWISE_INSTR -HInstruction* HShr::NewHShr(Zone* zone, - HValue* context, - HValue* left, - HValue* right) { - if (left->IsConstant() && right->IsConstant()) { +HInstruction* HShr::New( + Zone* zone, HValue* context, HValue* left, HValue* right) { + if (FLAG_fold_constants && left->IsConstant() && right->IsConstant()) { HConstant* c_left = HConstant::cast(left); HConstant* c_right = HConstant::cast(right); if ((c_left->HasNumberValue() && c_right->HasNumberValue())) { int32_t left_val = c_left->NumberValueAsInteger32(); int32_t right_val = c_right->NumberValueAsInteger32() & 0x1f; if ((right_val == 0) && (left_val < 0)) { - return H_CONSTANT_DOUBLE( - static_cast<double>(static_cast<uint32_t>(left_val))); + return H_CONSTANT_DOUBLE(static_cast<uint32_t>(left_val)); } return H_CONSTANT_INT32(static_cast<uint32_t>(left_val) >> right_val); } @@ -2561,7 +3376,41 @@ void HBitwise::PrintDataTo(StringStream* stream) { } -Representation HPhi::InferredRepresentation() { +void HPhi::InferRepresentation(HInferRepresentation* h_infer) { + ASSERT(CheckFlag(kFlexibleRepresentation)); + // If there are non-Phi uses, and all of them have observed the same + // representation, than that's what this Phi is going to use. + Representation new_rep = RepresentationObservedByAllNonPhiUses(); + if (!new_rep.IsNone()) { + UpdateRepresentation(new_rep, h_infer, "unanimous use observations"); + return; + } + new_rep = RepresentationFromInputs(); + UpdateRepresentation(new_rep, h_infer, "inputs"); + new_rep = RepresentationFromUses(); + UpdateRepresentation(new_rep, h_infer, "uses"); + new_rep = RepresentationFromUseRequirements(); + UpdateRepresentation(new_rep, h_infer, "use requirements"); +} + + +Representation HPhi::RepresentationObservedByAllNonPhiUses() { + int non_phi_use_count = 0; + for (int i = Representation::kInteger32; + i < Representation::kNumRepresentations; ++i) { + non_phi_use_count += non_phi_uses_[i]; + } + if (non_phi_use_count <= 1) return Representation::None(); + for (int i = 0; i < Representation::kNumRepresentations; ++i) { + if (non_phi_uses_[i] == non_phi_use_count) { + return Representation::FromKind(static_cast<Representation::Kind>(i)); + } + } + return Representation::None(); +} + + +Representation HPhi::RepresentationFromInputs() { bool double_occurred = false; bool int32_occurred = false; for (int i = 0; i < OperandCount(); ++i) { @@ -2570,6 +3419,7 @@ Representation HPhi::InferredRepresentation() { HPhi* hint_value = HUnknownOSRValue::cast(value)->incoming_value(); if (hint_value != NULL) { Representation hint = hint_value->representation(); + if (hint.IsTagged()) return hint; if (hint.IsDouble()) double_occurred = true; if (hint.IsInteger32()) int32_occurred = true; } @@ -2588,7 +3438,9 @@ Representation HPhi::InferredRepresentation() { return Representation::Tagged(); } } else { - return Representation::Tagged(); + if (value->IsPhi() && !IsConvertibleToInteger()) { + return Representation::Tagged(); + } } } } @@ -2601,6 +3453,37 @@ Representation HPhi::InferredRepresentation() { } +Representation HPhi::RepresentationFromUseRequirements() { + Representation all_uses_require = Representation::None(); + bool all_uses_require_the_same = true; + for (HUseIterator it(uses()); !it.Done(); it.Advance()) { + // We check for observed_input_representation elsewhere. + Representation use_rep = + it.value()->RequiredInputRepresentation(it.index()); + // No useful info from this use -> look at the next one. + if (use_rep.IsNone()) { + continue; + } + if (use_rep.Equals(all_uses_require)) { + continue; + } + // This use's representation contradicts what we've seen so far. + if (!all_uses_require.IsNone()) { + ASSERT(!use_rep.Equals(all_uses_require)); + all_uses_require_the_same = false; + break; + } + // Otherwise, initialize observed representation. + all_uses_require = use_rep; + } + if (all_uses_require_the_same) { + return all_uses_require; + } + + return Representation::None(); +} + + // Node-specific verification code is only included in debug mode. #ifdef DEBUG @@ -2639,12 +3522,6 @@ void HCheckFunction::Verify() { ASSERT(HasNoUses()); } - -void HCheckPrototypeMaps::Verify() { - HInstruction::Verify(); - ASSERT(HasNoUses()); -} - #endif } } // namespace v8::internal diff --git a/deps/v8/src/hydrogen-instructions.h b/deps/v8/src/hydrogen-instructions.h index 015212dd7b..f741f292e8 100644 --- a/deps/v8/src/hydrogen-instructions.h +++ b/deps/v8/src/hydrogen-instructions.h @@ -45,6 +45,7 @@ namespace internal { // Forward declarations. class HBasicBlock; class HEnvironment; +class HInferRepresentation; class HInstruction; class HLoopInformation; class HValue; @@ -63,6 +64,7 @@ class LChunkBuilder; V(AbnormalExit) \ V(AccessArgumentsAt) \ V(Add) \ + V(Allocate) \ V(AllocateObject) \ V(ApplyArguments) \ V(ArgumentsElements) \ @@ -73,6 +75,7 @@ class LChunkBuilder; V(BitNot) \ V(BlockEntry) \ V(BoundsCheck) \ + V(BoundsCheckBaseIndexInformation) \ V(Branch) \ V(CallConstantFunction) \ V(CallFunction) \ @@ -81,6 +84,7 @@ class LChunkBuilder; V(CallKnownGlobal) \ V(CallNamed) \ V(CallNew) \ + V(CallNewArray) \ V(CallRuntime) \ V(CallStub) \ V(Change) \ @@ -90,6 +94,7 @@ class LChunkBuilder; V(CheckNonSmi) \ V(CheckPrototypeMaps) \ V(CheckSmi) \ + V(CheckSmiOrInt32) \ V(ClampToUint8) \ V(ClassOfTestAndBranch) \ V(CompareIDAndBranch) \ @@ -103,6 +108,7 @@ class LChunkBuilder; V(DeleteProperty) \ V(Deoptimize) \ V(Div) \ + V(DummyUse) \ V(ElementsKind) \ V(EnterInlined) \ V(FastLiteral) \ @@ -115,9 +121,12 @@ class LChunkBuilder; V(Goto) \ V(HasCachedArrayIndexAndBranch) \ V(HasInstanceTypeAndBranch) \ + V(InductionVariableAnnotation) \ V(In) \ + V(InnerAllocatedObject) \ V(InstanceOf) \ V(InstanceOfKnownGlobal) \ + V(InstanceSize) \ V(InvokeFunction) \ V(IsConstructCallAndBranch) \ V(IsNilAndBranch) \ @@ -133,10 +142,8 @@ class LChunkBuilder; V(LoadFunctionPrototype) \ V(LoadGlobalCell) \ V(LoadGlobalGeneric) \ - V(LoadKeyedFastDoubleElement) \ - V(LoadKeyedFastElement) \ + V(LoadKeyed) \ V(LoadKeyedGeneric) \ - V(LoadKeyedSpecializedArrayElement) \ V(LoadNamedField) \ V(LoadNamedFieldPolymorphic) \ V(LoadNamedGeneric) \ @@ -145,6 +152,7 @@ class LChunkBuilder; V(MathMinMax) \ V(Mod) \ V(Mul) \ + V(NumericConstraint) \ V(ObjectLiteral) \ V(OsrEntry) \ V(OuterContext) \ @@ -154,7 +162,9 @@ class LChunkBuilder; V(Random) \ V(RegExpLiteral) \ V(Return) \ + V(Ror) \ V(Sar) \ + V(SeqStringSetChar) \ V(Shl) \ V(Shr) \ V(Simulate) \ @@ -163,10 +173,8 @@ class LChunkBuilder; V(StoreContextSlot) \ V(StoreGlobalCell) \ V(StoreGlobalGeneric) \ - V(StoreKeyedFastDoubleElement) \ - V(StoreKeyedFastElement) \ + V(StoreKeyed) \ V(StoreKeyedGeneric) \ - V(StoreKeyedSpecializedArrayElement) \ V(StoreNamedField) \ V(StoreNamedGeneric) \ V(StringAdd) \ @@ -179,6 +187,7 @@ class LChunkBuilder; V(Throw) \ V(ToFastProperties) \ V(TransitionElementsKind) \ + V(TrapAllocationMemento) \ V(Typeof) \ V(TypeofIsAndBranch) \ V(UnaryMathOperation) \ @@ -193,6 +202,7 @@ class LChunkBuilder; V(WrapReceiver) #define GVN_TRACKED_FLAG_LIST(V) \ + V(Maps) \ V(NewSpacePromotion) #define GVN_UNTRACKED_FLAG_LIST(V) \ @@ -205,7 +215,6 @@ class LChunkBuilder; V(DoubleArrayElements) \ V(SpecializedArrayElements) \ V(GlobalVars) \ - V(Maps) \ V(ArrayLengths) \ V(ContextSlots) \ V(OsrEntries) @@ -228,11 +237,9 @@ class LChunkBuilder; #ifdef DEBUG -#define ASSERT_ALLOCATION_DISABLED do { \ - OptimizingCompilerThread* thread = \ - ISOLATE->optimizing_compiler_thread(); \ - ASSERT(thread->IsOptimizerThread() || !HEAP->IsAllocationAllowed()); \ - } while (0) +#define ASSERT_ALLOCATION_DISABLED \ + ASSERT(isolate()->optimizing_compiler_thread()->IsOptimizerThread() || \ + !isolate()->heap()->IsAllocationAllowed()) #else #define ASSERT_ALLOCATION_DISABLED do {} while (0) #endif @@ -311,9 +318,9 @@ class Representation { public: enum Kind { kNone, - kTagged, - kDouble, kInteger32, + kDouble, + kTagged, kExternal, kNumRepresentations }; @@ -326,10 +333,18 @@ class Representation { static Representation Double() { return Representation(kDouble); } static Representation External() { return Representation(kExternal); } + static Representation FromKind(Kind kind) { return Representation(kind); } + bool Equals(const Representation& other) { return kind_ == other.kind_; } + bool is_more_general_than(const Representation& other) { + ASSERT(kind_ != kExternal); + ASSERT(other.kind_ != kExternal); + return kind_ > other.kind_; + } + Kind kind() const { return static_cast<Kind>(kind_); } bool IsNone() const { return kind_ == kNone; } bool IsTagged() const { return kind_ == kTagged; } @@ -372,7 +387,7 @@ class HType { return HType(static_cast<Type>(type_ & other.type_)); } - bool Equals(const HType& other) { + bool Equals(const HType& other) const { return type_ == other.type_; } @@ -380,61 +395,61 @@ class HType { return Combine(other).Equals(other); } - bool IsTagged() { + bool IsTagged() const { ASSERT(type_ != kUninitialized); return ((type_ & kTagged) == kTagged); } - bool IsTaggedPrimitive() { + bool IsTaggedPrimitive() const { ASSERT(type_ != kUninitialized); return ((type_ & kTaggedPrimitive) == kTaggedPrimitive); } - bool IsTaggedNumber() { + bool IsTaggedNumber() const { ASSERT(type_ != kUninitialized); return ((type_ & kTaggedNumber) == kTaggedNumber); } - bool IsSmi() { + bool IsSmi() const { ASSERT(type_ != kUninitialized); return ((type_ & kSmi) == kSmi); } - bool IsHeapNumber() { + bool IsHeapNumber() const { ASSERT(type_ != kUninitialized); return ((type_ & kHeapNumber) == kHeapNumber); } - bool IsString() { + bool IsString() const { ASSERT(type_ != kUninitialized); return ((type_ & kString) == kString); } - bool IsBoolean() { + bool IsBoolean() const { ASSERT(type_ != kUninitialized); return ((type_ & kBoolean) == kBoolean); } - bool IsNonPrimitive() { + bool IsNonPrimitive() const { ASSERT(type_ != kUninitialized); return ((type_ & kNonPrimitive) == kNonPrimitive); } - bool IsJSArray() { + bool IsJSArray() const { ASSERT(type_ != kUninitialized); return ((type_ & kJSArray) == kJSArray); } - bool IsJSObject() { + bool IsJSObject() const { ASSERT(type_ != kUninitialized); return ((type_ & kJSObject) == kJSObject); } - bool IsUninitialized() { + bool IsUninitialized() const { return type_ == kUninitialized; } - bool IsHeapObject() { + bool IsHeapObject() const { ASSERT(type_ != kUninitialized); return IsHeapNumber() || IsString() || IsNonPrimitive(); } @@ -539,6 +554,244 @@ enum GVNFlag { #undef COUNT_FLAG }; + +class NumericRelation { + public: + enum Kind { NONE, EQ, GT, GE, LT, LE, NE }; + static const char* MnemonicFromKind(Kind kind) { + switch (kind) { + case NONE: return "NONE"; + case EQ: return "EQ"; + case GT: return "GT"; + case GE: return "GE"; + case LT: return "LT"; + case LE: return "LE"; + case NE: return "NE"; + } + UNREACHABLE(); + return NULL; + } + const char* Mnemonic() const { return MnemonicFromKind(kind_); } + + static NumericRelation None() { return NumericRelation(NONE); } + static NumericRelation Eq() { return NumericRelation(EQ); } + static NumericRelation Gt() { return NumericRelation(GT); } + static NumericRelation Ge() { return NumericRelation(GE); } + static NumericRelation Lt() { return NumericRelation(LT); } + static NumericRelation Le() { return NumericRelation(LE); } + static NumericRelation Ne() { return NumericRelation(NE); } + + bool IsNone() { return kind_ == NONE; } + + static NumericRelation FromToken(Token::Value token) { + switch (token) { + case Token::EQ: return Eq(); + case Token::EQ_STRICT: return Eq(); + case Token::LT: return Lt(); + case Token::GT: return Gt(); + case Token::LTE: return Le(); + case Token::GTE: return Ge(); + case Token::NE: return Ne(); + case Token::NE_STRICT: return Ne(); + default: return None(); + } + } + + // The semantics of "Reversed" is that if "x rel y" is true then also + // "y rel.Reversed() x" is true, and that rel.Reversed().Reversed() == rel. + NumericRelation Reversed() { + switch (kind_) { + case NONE: return None(); + case EQ: return Eq(); + case GT: return Lt(); + case GE: return Le(); + case LT: return Gt(); + case LE: return Ge(); + case NE: return Ne(); + } + UNREACHABLE(); + return None(); + } + + // The semantics of "Negated" is that if "x rel y" is true then also + // "!(x rel.Negated() y)" is true. + NumericRelation Negated() { + switch (kind_) { + case NONE: return None(); + case EQ: return Ne(); + case GT: return Le(); + case GE: return Lt(); + case LT: return Ge(); + case LE: return Gt(); + case NE: return Eq(); + } + UNREACHABLE(); + return None(); + } + + // The semantics of "Implies" is that if "x rel y" is true + // then also "x other_relation y" is true. + bool Implies(NumericRelation other_relation) { + switch (kind_) { + case NONE: return false; + case EQ: return (other_relation.kind_ == EQ) + || (other_relation.kind_ == GE) + || (other_relation.kind_ == LE); + case GT: return (other_relation.kind_ == GT) + || (other_relation.kind_ == GE) + || (other_relation.kind_ == NE); + case LT: return (other_relation.kind_ == LT) + || (other_relation.kind_ == LE) + || (other_relation.kind_ == NE); + case GE: return (other_relation.kind_ == GE); + case LE: return (other_relation.kind_ == LE); + case NE: return (other_relation.kind_ == NE); + } + UNREACHABLE(); + return false; + } + + // The semantics of "IsExtendable" is that if + // "rel.IsExtendable(direction)" is true then + // "x rel y" implies "(x + direction) rel y" . + bool IsExtendable(int direction) { + switch (kind_) { + case NONE: return false; + case EQ: return false; + case GT: return (direction >= 0); + case GE: return (direction >= 0); + case LT: return (direction <= 0); + case LE: return (direction <= 0); + case NE: return false; + } + UNREACHABLE(); + return false; + } + + // CompoundImplies returns true when + // "((x + my_offset) >> my_scale) rel y" implies + // "((x + other_offset) >> other_scale) other_relation y". + bool CompoundImplies(NumericRelation other_relation, + int my_offset, + int my_scale, + int other_offset = 0, + int other_scale = 0) { + return Implies(other_relation) && ComponentsImply( + my_offset, my_scale, other_offset, other_scale); + } + + private: + // ComponentsImply returns true when + // "((x + my_offset) >> my_scale) rel y" implies + // "((x + other_offset) >> other_scale) rel y". + bool ComponentsImply(int my_offset, + int my_scale, + int other_offset, + int other_scale) { + switch (kind_) { + case NONE: break; // Fall through to UNREACHABLE(). + case EQ: + case NE: return my_offset == other_offset && my_scale == other_scale; + case GT: + case GE: return my_offset <= other_offset && my_scale >= other_scale; + case LT: + case LE: return my_offset >= other_offset && my_scale <= other_scale; + } + UNREACHABLE(); + return false; + } + + explicit NumericRelation(Kind kind) : kind_(kind) {} + + Kind kind_; +}; + + +class DecompositionResult BASE_EMBEDDED { + public: + DecompositionResult() : base_(NULL), offset_(0), scale_(0) {} + + HValue* base() { return base_; } + int offset() { return offset_; } + int scale() { return scale_; } + + bool Apply(HValue* other_base, int other_offset, int other_scale = 0) { + if (base_ == NULL) { + base_ = other_base; + offset_ = other_offset; + scale_ = other_scale; + return true; + } else { + if (scale_ == 0) { + base_ = other_base; + offset_ += other_offset; + scale_ = other_scale; + return true; + } else { + return false; + } + } + } + + void SwapValues(HValue** other_base, int* other_offset, int* other_scale) { + swap(&base_, other_base); + swap(&offset_, other_offset); + swap(&scale_, other_scale); + } + + private: + template <class T> void swap(T* a, T* b) { + T c(*a); + *a = *b; + *b = c; + } + + HValue* base_; + int offset_; + int scale_; +}; + + +class RangeEvaluationContext BASE_EMBEDDED { + public: + RangeEvaluationContext(HValue* value, HValue* upper); + + HValue* lower_bound() { return lower_bound_; } + HValue* lower_bound_guarantee() { return lower_bound_guarantee_; } + HValue* candidate() { return candidate_; } + HValue* upper_bound() { return upper_bound_; } + HValue* upper_bound_guarantee() { return upper_bound_guarantee_; } + int offset() { return offset_; } + int scale() { return scale_; } + + bool is_range_satisfied() { + return lower_bound_guarantee() != NULL && upper_bound_guarantee() != NULL; + } + + void set_lower_bound_guarantee(HValue* guarantee) { + lower_bound_guarantee_ = ConvertGuarantee(guarantee); + } + void set_upper_bound_guarantee(HValue* guarantee) { + upper_bound_guarantee_ = ConvertGuarantee(guarantee); + } + + void swap_candidate(DecompositionResult* other_candicate) { + other_candicate->SwapValues(&candidate_, &offset_, &scale_); + } + + private: + HValue* ConvertGuarantee(HValue* guarantee); + + HValue* lower_bound_; + HValue* lower_bound_guarantee_; + HValue* candidate_; + HValue* upper_bound_; + HValue* upper_bound_guarantee_; + int offset_; + int scale_; +}; + + typedef EnumSet<GVNFlag> GVNFlagSet; @@ -571,7 +824,13 @@ class HValue: public ZoneObject { // HGraph::ComputeSafeUint32Operations is responsible for setting this // flag. kUint32, - kLastFlag = kUint32 + // If a phi is involved in the evaluation of a numeric constraint the + // recursion can cause an endless cycle: we use this flag to exit the loop. + kNumericConstraintEvaluationInProgress, + // This flag is set to true after the SetupInformativeDefinitions() pass + // has processed this instruction. + kIDefsProcessingDone, + kLastFlag = kIDefsProcessingDone }; STATIC_ASSERT(kLastFlag < kBitsPerInt); @@ -624,6 +883,9 @@ class HValue: public ZoneObject { void SetBlock(HBasicBlock* block); int LoopWeight() const; + // Note: Never call this method for an unlinked value. + Isolate* isolate() const; + int id() const { return id_; } void set_id(int id) { id_ = id; } @@ -632,13 +894,15 @@ class HValue: public ZoneObject { virtual bool EmitAtUses() { return false; } Representation representation() const { return representation_; } void ChangeRepresentation(Representation r) { - // Representation was already set and is allowed to be changed. - ASSERT(!r.IsNone()); ASSERT(CheckFlag(kFlexibleRepresentation)); RepresentationChanged(r); representation_ = r; + if (r.IsTagged()) { + // Tagged is the bottom of the lattice, don't go any further. + ClearFlag(kFlexibleRepresentation); + } } - void AssumeRepresentation(Representation r); + virtual void AssumeRepresentation(Representation r); virtual bool IsConvertibleToInteger() const { return true; } @@ -663,6 +927,48 @@ class HValue: public ZoneObject { return NULL; } + // There are HInstructions that do not really change a value, they + // only add pieces of information to it (like bounds checks, map checks, + // smi checks...). + // We call these instructions "informative definitions", or "iDef". + // One of the iDef operands is special because it is the value that is + // "transferred" to the output, we call it the "redefined operand". + // If an HValue is an iDef it must override RedefinedOperandIndex() so that + // it does not return kNoRedefinedOperand; + static const int kNoRedefinedOperand = -1; + virtual int RedefinedOperandIndex() { return kNoRedefinedOperand; } + bool IsInformativeDefinition() { + return RedefinedOperandIndex() != kNoRedefinedOperand; + } + HValue* RedefinedOperand() { + return IsInformativeDefinition() ? OperandAt(RedefinedOperandIndex()) + : NULL; + } + + // A purely informative definition is an idef that will not emit code and + // should therefore be removed from the graph in the RestoreActualValues + // phase (so that live ranges will be shorter). + virtual bool IsPurelyInformativeDefinition() { return false; } + + // This method must always return the original HValue SSA definition + // (regardless of any iDef of this value). + HValue* ActualValue() { + return IsInformativeDefinition() ? RedefinedOperand()->ActualValue() + : this; + } + + virtual void AddInformativeDefinitions() {} + + void UpdateRedefinedUsesWhileSettingUpInformativeDefinitions() { + UpdateRedefinedUsesInner<TestDominanceUsingProcessedFlag>(); + } + void UpdateRedefinedUses() { + UpdateRedefinedUsesInner<Dominates>(); + } + + bool IsInteger32Constant(); + int32_t GetInteger32Constant(); + bool IsDefinedAfter(HBasicBlock* other) const; // Operands. @@ -736,16 +1042,11 @@ class HValue: public ZoneObject { void ComputeInitialRange(Zone* zone); // Representation helpers. - virtual Representation RequiredInputRepresentation(int index) = 0; - - virtual Representation InferredRepresentation() { - return representation(); - } - - // Type feedback access. - virtual Representation ObservedInputRepresentation(int index) { - return RequiredInputRepresentation(index); + virtual Representation observed_input_representation(int index) { + return Representation::None(); } + virtual Representation RequiredInputRepresentation(int index) = 0; + virtual void InferRepresentation(HInferRepresentation* h_infer); // This gives the instruction an opportunity to replace itself with an // instruction that does the same in some better way. To replace an @@ -765,6 +1066,14 @@ class HValue: public ZoneObject { const char* Mnemonic() const; + // Type information helpers. + bool HasMonomorphicJSObjectType(); + + // TODO(mstarzinger): For now instructions can override this function to + // specify statically known types, once HType can convey more information + // it should be based on the HType. + virtual Handle<Map> GetMonomorphicJSObjectMap() { return Handle<Map>(); } + // Updated the inferred type of this instruction and returns true if // it has changed. bool UpdateInferredType(); @@ -786,14 +1095,50 @@ class HValue: public ZoneObject { virtual void Verify() = 0; #endif + bool IsRelationTrue(NumericRelation relation, + HValue* other, + int offset = 0, + int scale = 0); + + bool TryGuaranteeRange(HValue* upper_bound); + virtual bool TryDecompose(DecompositionResult* decomposition) { + if (RedefinedOperand() != NULL) { + return RedefinedOperand()->TryDecompose(decomposition); + } else { + return false; + } + } + protected: + void TryGuaranteeRangeRecursive(RangeEvaluationContext* context); + + enum RangeGuaranteeDirection { + DIRECTION_NONE = 0, + DIRECTION_UPPER = 1, + DIRECTION_LOWER = 2, + DIRECTION_BOTH = DIRECTION_UPPER | DIRECTION_LOWER + }; + virtual void SetResponsibilityForRange(RangeGuaranteeDirection direction) {} + virtual void TryGuaranteeRangeChanging(RangeEvaluationContext* context) {} + // This function must be overridden for instructions with flag kUseGVN, to // compare the non-Operand parts of the instruction. virtual bool DataEquals(HValue* other) { UNREACHABLE(); return false; } + + virtual Representation RepresentationFromInputs() { + return representation(); + } + Representation RepresentationFromUses(); + virtual void UpdateRepresentation(Representation new_rep, + HInferRepresentation* h_infer, + const char* reason); + void AddDependantsToWorklist(HInferRepresentation* h_infer); + virtual void RepresentationChanged(Representation to) { } + virtual Range* InferRange(Zone* zone); virtual void DeleteFromGraph() = 0; virtual void InternalSetOperandAt(int index, HValue* value) = 0; @@ -803,11 +1148,51 @@ class HValue: public ZoneObject { } void set_representation(Representation r) { - // Representation is set-once. ASSERT(representation_.IsNone() && !r.IsNone()); representation_ = r; } + // Signature of a function testing if a HValue properly dominates another. + typedef bool (*DominanceTest)(HValue*, HValue*); + + // Simple implementation of DominanceTest implemented walking the chain + // of Hinstructions (used in UpdateRedefinedUsesInner). + static bool Dominates(HValue* dominator, HValue* dominated); + + // A fast implementation of DominanceTest that works only for the + // "current" instruction in the SetupInformativeDefinitions() phase. + // During that phase we use a flag to mark processed instructions, and by + // checking the flag we can quickly test if an instruction comes before or + // after the "current" one. + static bool TestDominanceUsingProcessedFlag(HValue* dominator, + HValue* dominated); + + // If we are redefining an operand, update all its dominated uses (the + // function that checks if a use is dominated is the template argument). + template<DominanceTest TestDominance> + void UpdateRedefinedUsesInner() { + HValue* input = RedefinedOperand(); + if (input != NULL) { + for (HUseIterator uses = input->uses(); !uses.Done(); uses.Advance()) { + HValue* use = uses.value(); + if (TestDominance(this, use)) { + use->SetOperandAt(uses.index(), this); + } + } + } + } + + // Informative definitions can override this method to state any numeric + // relation they provide on the redefined value. + // Returns true if it is guaranteed that: + // ((this + offset) >> scale) relation other + virtual bool IsRelationTrueInternal(NumericRelation relation, + HValue* other, + int offset = 0, + int scale = 0) { + return false; + } + static GVNFlagSet AllDependsOnFlagSet() { GVNFlagSet result; // Create changes mask. @@ -1010,6 +1395,74 @@ class HBlockEntry: public HTemplateInstruction<0> { }; +class HDummyUse: public HTemplateInstruction<1> { + public: + explicit HDummyUse(HValue* value) { + SetOperandAt(0, value); + // Pretend to be a Smi so that the HChange instructions inserted + // before any use generate as little code as possible. + set_representation(Representation::Tagged()); + set_type(HType::Smi()); + } + + HValue* value() { return OperandAt(0); } + + virtual Representation RequiredInputRepresentation(int index) { + return Representation::None(); + } + + virtual void PrintDataTo(StringStream* stream); + + DECLARE_CONCRETE_INSTRUCTION(DummyUse); +}; + + +class HNumericConstraint : public HTemplateInstruction<2> { + public: + static HNumericConstraint* AddToGraph(HValue* constrained_value, + NumericRelation relation, + HValue* related_value, + HInstruction* insertion_point = NULL); + + HValue* constrained_value() { return OperandAt(0); } + HValue* related_value() { return OperandAt(1); } + NumericRelation relation() { return relation_; } + + virtual int RedefinedOperandIndex() { return 0; } + virtual bool IsPurelyInformativeDefinition() { return true; } + + virtual Representation RequiredInputRepresentation(int index) { + return representation(); + } + + virtual void PrintDataTo(StringStream* stream); + + virtual bool IsRelationTrueInternal(NumericRelation other_relation, + HValue* other_related_value, + int offset = 0, + int scale = 0) { + if (related_value() == other_related_value) { + return relation().CompoundImplies(other_relation, offset, scale); + } else { + return false; + } + } + + DECLARE_CONCRETE_INSTRUCTION(NumericConstraint) + + private: + HNumericConstraint(HValue* constrained_value, + NumericRelation relation, + HValue* related_value) + : relation_(relation) { + SetOperandAt(0, constrained_value); + SetOperandAt(1, related_value); + } + + NumericRelation relation_; +}; + + // We insert soft-deoptimize when we hit code with unknown typefeedback, // so that we get a chance of re-optimizing with useful typefeedback. // HSoftDeoptimize does not end a basic block as opposed to HDeoptimize. @@ -1116,6 +1569,7 @@ class HBranch: public HUnaryControlInstruction { virtual Representation RequiredInputRepresentation(int index) { return Representation::None(); } + virtual Representation observed_input_representation(int index); ToBooleanStub::Types expected_input_types() const { return expected_input_types_; @@ -1156,10 +1610,12 @@ class HCompareMap: public HUnaryControlInstruction { }; -class HReturn: public HTemplateControlInstruction<0, 1> { +class HReturn: public HTemplateControlInstruction<0, 3> { public: - explicit HReturn(HValue* value) { + HReturn(HValue* value, HValue* context, HValue* parameter_count) { SetOperandAt(0, value); + SetOperandAt(1, context); + SetOperandAt(2, parameter_count); } virtual Representation RequiredInputRepresentation(int index) { @@ -1169,6 +1625,8 @@ class HReturn: public HTemplateControlInstruction<0, 1> { virtual void PrintDataTo(StringStream* stream); HValue* value() { return OperandAt(0); } + HValue* context() { return OperandAt(1); } + HValue* parameter_count() { return OperandAt(2); } DECLARE_CONCRETE_INSTRUCTION(Return) }; @@ -1245,6 +1703,8 @@ class HForceRepresentation: public HTemplateInstruction<1> { return representation(); // Same as the output representation. } + virtual void PrintDataTo(StringStream* stream); + DECLARE_CONCRETE_INSTRUCTION(ForceRepresentation) }; @@ -1320,14 +1780,24 @@ class HClampToUint8: public HUnaryOperation { }; +enum RemovableSimulate { + REMOVABLE_SIMULATE, + FIXED_SIMULATE +}; + + class HSimulate: public HInstruction { public: - HSimulate(BailoutId ast_id, int pop_count, Zone* zone) + HSimulate(BailoutId ast_id, + int pop_count, + Zone* zone, + RemovableSimulate removable) : ast_id_(ast_id), pop_count_(pop_count), values_(2, zone), assigned_indexes_(2, zone), - zone_(zone) {} + zone_(zone), + removable_(removable) {} virtual ~HSimulate() {} virtual void PrintDataTo(StringStream* stream); @@ -1361,6 +1831,9 @@ class HSimulate: public HInstruction { return Representation::None(); } + void MergeInto(HSimulate* other); + bool is_candidate_for_removal() { return removable_ == REMOVABLE_SIMULATE; } + DECLARE_CONCRETE_INSTRUCTION(Simulate) #ifdef DEBUG @@ -1387,6 +1860,7 @@ class HSimulate: public HInstruction { ZoneList<HValue*> values_; ZoneList<int> assigned_indexes_; Zone* zone_; + RemovableSimulate removable_; }; @@ -1412,7 +1886,7 @@ class HStackCheck: public HTemplateInstruction<1> { // The stack check eliminator might try to eliminate the same stack // check instruction multiple times. if (IsLinked()) { - DeleteFromGraph(); + DeleteAndReplaceWith(NULL); } } @@ -1440,18 +1914,18 @@ class HEnterInlined: public HTemplateInstruction<0> { HEnterInlined(Handle<JSFunction> closure, int arguments_count, FunctionLiteral* function, - CallKind call_kind, InliningKind inlining_kind, Variable* arguments_var, - ZoneList<HValue*>* arguments_values) + ZoneList<HValue*>* arguments_values, + bool undefined_receiver) : closure_(closure), arguments_count_(arguments_count), arguments_pushed_(false), function_(function), - call_kind_(call_kind), inlining_kind_(inlining_kind), arguments_var_(arguments_var), - arguments_values_(arguments_values) { + arguments_values_(arguments_values), + undefined_receiver_(undefined_receiver) { } virtual void PrintDataTo(StringStream* stream); @@ -1461,8 +1935,8 @@ class HEnterInlined: public HTemplateInstruction<0> { bool arguments_pushed() const { return arguments_pushed_; } void set_arguments_pushed() { arguments_pushed_ = true; } FunctionLiteral* function() const { return function_; } - CallKind call_kind() const { return call_kind_; } InliningKind inlining_kind() const { return inlining_kind_; } + bool undefined_receiver() const { return undefined_receiver_; } virtual Representation RequiredInputRepresentation(int index) { return Representation::None(); @@ -1478,10 +1952,10 @@ class HEnterInlined: public HTemplateInstruction<0> { int arguments_count_; bool arguments_pushed_; FunctionLiteral* function_; - CallKind call_kind_; InliningKind inlining_kind_; Variable* arguments_var_; ZoneList<HValue*>* arguments_values_; + bool undefined_receiver_; }; @@ -1871,6 +2345,25 @@ class HCallNew: public HBinaryCall { }; +class HCallNewArray: public HCallNew { + public: + HCallNewArray(HValue* context, HValue* constructor, int argument_count, + Handle<JSGlobalPropertyCell> type_cell) + : HCallNew(context, constructor, argument_count), + type_cell_(type_cell) { + } + + Handle<JSGlobalPropertyCell> property_cell() const { + return type_cell_; + } + + DECLARE_CONCRETE_INSTRUCTION(CallNewArray) + + private: + Handle<JSGlobalPropertyCell> type_cell_; +}; + + class HCallRuntime: public HCall<1> { public: HCallRuntime(HValue* context, @@ -1908,7 +2401,7 @@ class HJSArrayLength: public HTemplateInstruction<2> { // object. It is guaranteed to be 32 bit integer, but it can be // represented as either a smi or heap number. SetOperandAt(0, value); - SetOperandAt(1, typecheck); + SetOperandAt(1, typecheck != NULL ? typecheck : value); set_representation(Representation::Tagged()); SetFlag(kUseGVN); SetGVNFlag(kDependsOnArrayLengths); @@ -1922,7 +2415,11 @@ class HJSArrayLength: public HTemplateInstruction<2> { virtual void PrintDataTo(StringStream* stream); HValue* value() { return OperandAt(0); } - HValue* typecheck() { return OperandAt(1); } + HValue* typecheck() { + ASSERT(HasTypeCheck()); + return OperandAt(1); + } + bool HasTypeCheck() const { return OperandAt(0) != OperandAt(1); } DECLARE_CONCRETE_INSTRUCTION(JSArrayLength) @@ -2013,6 +2510,9 @@ class HBitNot: public HUnaryOperation { virtual Representation RequiredInputRepresentation(int index) { return Representation::Integer32(); } + virtual Representation observed_input_representation(int index) { + return Representation::Integer32(); + } virtual HType CalculateInferredType(); virtual HValue* Canonicalize(); @@ -2029,35 +2529,10 @@ class HBitNot: public HUnaryOperation { class HUnaryMathOperation: public HTemplateInstruction<2> { public: - HUnaryMathOperation(HValue* context, HValue* value, BuiltinFunctionId op) - : op_(op) { - SetOperandAt(0, context); - SetOperandAt(1, value); - switch (op) { - case kMathFloor: - case kMathRound: - case kMathCeil: - set_representation(Representation::Integer32()); - break; - case kMathAbs: - set_representation(Representation::Tagged()); - SetFlag(kFlexibleRepresentation); - SetGVNFlag(kChangesNewSpacePromotion); - break; - case kMathSqrt: - case kMathPowHalf: - case kMathLog: - case kMathSin: - case kMathCos: - case kMathTan: - set_representation(Representation::Double()); - SetGVNFlag(kChangesNewSpacePromotion); - break; - default: - UNREACHABLE(); - } - SetFlag(kUseGVN); - } + static HInstruction* New(Zone* zone, + HValue* context, + HValue* value, + BuiltinFunctionId op); HValue* context() { return OperandAt(0); } HValue* value() { return OperandAt(1); } @@ -2075,10 +2550,10 @@ class HUnaryMathOperation: public HTemplateInstruction<2> { switch (op_) { case kMathFloor: case kMathRound: - case kMathCeil: case kMathSqrt: case kMathPowHalf: case kMathLog: + case kMathExp: case kMathSin: case kMathCos: case kMathTan: @@ -2106,6 +2581,39 @@ class HUnaryMathOperation: public HTemplateInstruction<2> { } private: + HUnaryMathOperation(HValue* context, HValue* value, BuiltinFunctionId op) + : op_(op) { + SetOperandAt(0, context); + SetOperandAt(1, value); + switch (op) { + case kMathFloor: + case kMathRound: + case kMathCeil: + set_representation(Representation::Integer32()); + break; + case kMathAbs: + // Not setting representation here: it is None intentionally. + SetFlag(kFlexibleRepresentation); + SetGVNFlag(kChangesNewSpacePromotion); + break; + case kMathSqrt: + case kMathPowHalf: + case kMathLog: + case kMathSin: + case kMathCos: + case kMathTan: + set_representation(Representation::Double()); + SetGVNFlag(kChangesNewSpacePromotion); + break; + case kMathExp: + set_representation(Representation::Double()); + break; + default: + UNREACHABLE(); + } + SetFlag(kUseGVN); + } + virtual bool IsDeletable() const { return true; } BuiltinFunctionId op_; @@ -2116,14 +2624,18 @@ class HLoadElements: public HTemplateInstruction<2> { public: HLoadElements(HValue* value, HValue* typecheck) { SetOperandAt(0, value); - SetOperandAt(1, typecheck); + SetOperandAt(1, typecheck != NULL ? typecheck : value); set_representation(Representation::Tagged()); SetFlag(kUseGVN); SetGVNFlag(kDependsOnElementsPointer); } HValue* value() { return OperandAt(0); } - HValue* typecheck() { return OperandAt(1); } + HValue* typecheck() { + ASSERT(HasTypeCheck()); + return OperandAt(1); + } + bool HasTypeCheck() const { return OperandAt(0) != OperandAt(1); } virtual void PrintDataTo(StringStream* stream); @@ -2177,6 +2689,7 @@ class HCheckMaps: public HTemplateInstruction<2> { SetOperandAt(1, typecheck != NULL ? typecheck : value); set_representation(Representation::Tagged()); SetFlag(kUseGVN); + SetFlag(kTrackSideEffectDominators); SetGVNFlag(kDependsOnMaps); SetGVNFlag(kDependsOnElementsKind); map_set()->Add(map, zone); @@ -2186,6 +2699,7 @@ class HCheckMaps: public HTemplateInstruction<2> { SetOperandAt(1, value); set_representation(Representation::Tagged()); SetFlag(kUseGVN); + SetFlag(kTrackSideEffectDominators); SetGVNFlag(kDependsOnMaps); SetGVNFlag(kDependsOnElementsKind); for (int i = 0; i < maps->length(); i++) { @@ -2220,6 +2734,7 @@ class HCheckMaps: public HTemplateInstruction<2> { virtual Representation RequiredInputRepresentation(int index) { return Representation::Tagged(); } + virtual void SetSideEffectDominator(GVNFlag side_effect, HValue* dominator); virtual void PrintDataTo(StringStream* stream); virtual HType CalculateInferredType(); @@ -2250,6 +2765,7 @@ class HCheckFunction: public HUnaryOperation { : HUnaryOperation(value), target_(function) { set_representation(Representation::Tagged()); SetFlag(kUseGVN); + target_in_new_space_ = Isolate::Current()->heap()->InNewSpace(*function); } virtual Representation RequiredInputRepresentation(int index) { @@ -2263,6 +2779,7 @@ class HCheckFunction: public HUnaryOperation { #endif Handle<JSFunction> target() const { return target_; } + bool target_in_new_space() const { return target_in_new_space_; } DECLARE_CONCRETE_INSTRUCTION(CheckFunction) @@ -2274,6 +2791,7 @@ class HCheckFunction: public HUnaryOperation { private: Handle<JSFunction> target_; + bool target_in_new_space_; }; @@ -2288,8 +2806,9 @@ class HCheckInstanceType: public HUnaryOperation { static HCheckInstanceType* NewIsString(HValue* value, Zone* zone) { return new(zone) HCheckInstanceType(value, IS_STRING); } - static HCheckInstanceType* NewIsSymbol(HValue* value, Zone* zone) { - return new(zone) HCheckInstanceType(value, IS_SYMBOL); + static HCheckInstanceType* NewIsInternalizedString( + HValue* value, Zone* zone) { + return new(zone) HCheckInstanceType(value, IS_INTERNALIZED_STRING); } virtual void PrintDataTo(StringStream* stream); @@ -2320,7 +2839,7 @@ class HCheckInstanceType: public HUnaryOperation { IS_SPEC_OBJECT, IS_JS_ARRAY, IS_STRING, - IS_SYMBOL, + IS_INTERNALIZED_STRING, LAST_INTERVAL_CHECK = IS_JS_ARRAY }; @@ -2374,18 +2893,24 @@ class HCheckNonSmi: public HUnaryOperation { class HCheckPrototypeMaps: public HTemplateInstruction<0> { public: - HCheckPrototypeMaps(Handle<JSObject> prototype, Handle<JSObject> holder) - : prototype_(prototype), holder_(holder) { + HCheckPrototypeMaps(Handle<JSObject> prototype, + Handle<JSObject> holder, + Zone* zone) : prototypes_(2, zone), maps_(2, zone) { SetFlag(kUseGVN); SetGVNFlag(kDependsOnMaps); + // Keep a list of all objects on the prototype chain up to the holder + // and the expected maps. + while (true) { + prototypes_.Add(prototype, zone); + maps_.Add(Handle<Map>(prototype->map()), zone); + if (prototype.is_identical_to(holder)) break; + prototype = Handle<JSObject>(JSObject::cast(prototype->GetPrototype())); + } } -#ifdef DEBUG - virtual void Verify(); -#endif + ZoneList<Handle<JSObject> >* prototypes() { return &prototypes_; } - Handle<JSObject> prototype() const { return prototype_; } - Handle<JSObject> holder() const { return holder_; } + ZoneList<Handle<Map> >* maps() { return &maps_; } DECLARE_CONCRETE_INSTRUCTION(CheckPrototypeMaps) @@ -2397,21 +2922,45 @@ class HCheckPrototypeMaps: public HTemplateInstruction<0> { virtual intptr_t Hashcode() { ASSERT_ALLOCATION_DISABLED; - intptr_t hash = reinterpret_cast<intptr_t>(*prototype()); - hash = 17 * hash + reinterpret_cast<intptr_t>(*holder()); + // Dereferencing to use the object's raw address for hashing is safe. + HandleDereferenceGuard allow_handle_deref(isolate(), + HandleDereferenceGuard::ALLOW); + SLOW_ASSERT(Heap::RelocationLock::IsLocked(isolate()->heap()) || + !isolate()->optimizing_compiler_thread()->IsOptimizerThread()); + intptr_t hash = 0; + for (int i = 0; i < prototypes_.length(); i++) { + hash = 17 * hash + reinterpret_cast<intptr_t>(*prototypes_[i]); + hash = 17 * hash + reinterpret_cast<intptr_t>(*maps_[i]); + } return hash; } + bool CanOmitPrototypeChecks() { + for (int i = 0; i < maps()->length(); i++) { + if (!maps()->at(i)->CanOmitPrototypeChecks()) return false; + } + return true; + } + protected: virtual bool DataEquals(HValue* other) { HCheckPrototypeMaps* b = HCheckPrototypeMaps::cast(other); - return prototype_.is_identical_to(b->prototype()) && - holder_.is_identical_to(b->holder()); +#ifdef DEBUG + if (prototypes_.length() != b->prototypes()->length()) return false; + for (int i = 0; i < prototypes_.length(); i++) { + if (!prototypes_[i].is_identical_to(b->prototypes()->at(i))) return false; + if (!maps_[i].is_identical_to(b->maps()->at(i))) return false; + } + return true; +#else + return prototypes_.first().is_identical_to(b->prototypes()->first()) && + prototypes_.last().is_identical_to(b->prototypes()->last()); +#endif // DEBUG } private: - Handle<JSObject> prototype_; - Handle<JSObject> holder_; + ZoneList<Handle<JSObject> > prototypes_; + ZoneList<Handle<Map> > maps_; }; @@ -2438,6 +2987,38 @@ class HCheckSmi: public HUnaryOperation { }; +class HCheckSmiOrInt32: public HUnaryOperation { + public: + explicit HCheckSmiOrInt32(HValue* value) : HUnaryOperation(value) { + SetFlag(kFlexibleRepresentation); + SetFlag(kUseGVN); + } + + virtual int RedefinedOperandIndex() { return 0; } + virtual Representation RequiredInputRepresentation(int index) { + return representation(); + } + virtual void InferRepresentation(HInferRepresentation* h_infer); + + virtual Representation observed_input_representation(int index) { + return Representation::Integer32(); + } + + virtual HValue* Canonicalize() { + if (representation().IsTagged() && !value()->type().IsSmi()) { + return this; + } else { + return value(); + } + } + + DECLARE_CONCRETE_INSTRUCTION(CheckSmiOrInt32) + + protected: + virtual bool DataEquals(HValue* other) { return true; } +}; + + class HPhi: public HValue { public: HPhi(int merged_index, Zone* zone) @@ -2451,13 +3032,15 @@ class HPhi: public HValue { indirect_uses_[i] = 0; } ASSERT(merged_index >= 0); - set_representation(Representation::Tagged()); SetFlag(kFlexibleRepresentation); } - virtual Representation InferredRepresentation(); + virtual Representation RepresentationFromInputs(); virtual Range* InferRange(Zone* zone); + virtual void InferRepresentation(HInferRepresentation* h_infer); + Representation RepresentationObservedByAllNonPhiUses(); + Representation RepresentationFromUseRequirements(); virtual Representation RequiredInputRepresentation(int index) { return representation(); } @@ -2472,6 +3055,8 @@ class HPhi: public HValue { int merged_index() const { return merged_index_; } + virtual void AddInformativeDefinitions(); + virtual void PrintTo(StringStream* stream); #ifdef DEBUG @@ -2521,20 +3106,28 @@ class HPhi: public HValue { bool AllOperandsConvertibleToInteger() { for (int i = 0; i < OperandCount(); ++i) { if (!OperandAt(i)->IsConvertibleToInteger()) { + if (FLAG_trace_representation) { + HValue* input = OperandAt(i); + PrintF("#%d %s: Input #%d %s at %d is NCTI\n", + id(), Mnemonic(), input->id(), input->Mnemonic(), i); + } return false; } } return true; } - void ResetInteger32Uses(); - protected: virtual void DeleteFromGraph(); virtual void InternalSetOperandAt(int index, HValue* value) { inputs_[index] = value; } + virtual bool IsRelationTrueInternal(NumericRelation relation, + HValue* other, + int offset = 0, + int scale = 0); + private: ZoneList<HValue*> inputs_; int merged_index_; @@ -2547,6 +3140,53 @@ class HPhi: public HValue { }; +class HInductionVariableAnnotation : public HUnaryOperation { + public: + static HInductionVariableAnnotation* AddToGraph(HPhi* phi, + NumericRelation relation, + int operand_index); + + NumericRelation relation() { return relation_; } + HValue* induction_base() { return phi_->OperandAt(operand_index_); } + + virtual int RedefinedOperandIndex() { return 0; } + virtual bool IsPurelyInformativeDefinition() { return true; } + virtual Representation RequiredInputRepresentation(int index) { + return representation(); + } + + virtual void PrintDataTo(StringStream* stream); + + virtual bool IsRelationTrueInternal(NumericRelation other_relation, + HValue* other_related_value, + int offset = 0, + int scale = 0) { + if (induction_base() == other_related_value) { + return relation().CompoundImplies(other_relation, offset, scale); + } else { + return false; + } + } + + DECLARE_CONCRETE_INSTRUCTION(InductionVariableAnnotation) + + private: + HInductionVariableAnnotation(HPhi* phi, + NumericRelation relation, + int operand_index) + : HUnaryOperation(phi), + phi_(phi), relation_(relation), operand_index_(operand_index) { + } + + // We need to store the phi both here and in the instruction operand because + // the operand can change if a new idef of the phi is added between the phi + // and this instruction (inserting an idef updates every use). + HPhi* phi_; + NumericRelation relation_; + int operand_index_; +}; + + class HArgumentsObject: public HTemplateInstruction<0> { public: HArgumentsObject() { @@ -2568,8 +3208,17 @@ class HArgumentsObject: public HTemplateInstruction<0> { class HConstant: public HTemplateInstruction<0> { public: HConstant(Handle<Object> handle, Representation r); - HConstant(int32_t value, Representation r); - HConstant(double value, Representation r); + HConstant(int32_t value, + Representation r, + Handle<Object> optional_handle = Handle<Object>::null()); + HConstant(double value, + Representation r, + Handle<Object> optional_handle = Handle<Object>::null()); + HConstant(Handle<Object> handle, + Representation r, + HType type, + bool is_internalized_string, + bool boolean_value); Handle<Object> handle() { if (handle_.is_null()) { @@ -2594,18 +3243,20 @@ class HConstant: public HTemplateInstruction<0> { } ASSERT(!handle_.is_null()); - Heap* heap = HEAP; + Heap* heap = isolate()->heap(); // We should have handled minus_zero_value and nan_value in the // has_double_value_ clause above. + // Dereferencing is safe to compare against immovable singletons. + HandleDereferenceGuard allow_handle_deref(isolate(), + HandleDereferenceGuard::ALLOW); ASSERT(*handle_ != heap->minus_zero_value()); ASSERT(*handle_ != heap->nan_value()); - if (*handle_ == heap->undefined_value()) return true; - if (*handle_ == heap->null_value()) return true; - if (*handle_ == heap->true_value()) return true; - if (*handle_ == heap->false_value()) return true; - if (*handle_ == heap->the_hole_value()) return true; - if (*handle_ == heap->empty_string()) return true; - return false; + return *handle_ == heap->undefined_value() || + *handle_ == heap->null_value() || + *handle_ == heap->true_value() || + *handle_ == heap->false_value() || + *handle_ == heap->the_hole_value() || + *handle_ == heap->empty_string(); } virtual Representation RequiredInputRepresentation(int index) { @@ -2617,7 +3268,6 @@ class HConstant: public HTemplateInstruction<0> { } virtual bool EmitAtUses() { return !representation().IsDouble(); } - virtual HValue* Canonicalize(); virtual void PrintDataTo(StringStream* stream); virtual HType CalculateInferredType(); bool IsInteger() { return handle()->IsSmi(); } @@ -2628,6 +3278,9 @@ class HConstant: public HTemplateInstruction<0> { ASSERT(HasInteger32Value()); return int32_value_; } + bool HasSmiValue() const { + return HasInteger32Value() && Smi::IsValid(Integer32Value()); + } bool HasDoubleValue() const { return has_double_value_; } double DoubleValue() const { ASSERT(HasDoubleValue()); @@ -2641,8 +3294,20 @@ class HConstant: public HTemplateInstruction<0> { // representation of the number in int32_value_. return int32_value_; } + bool HasStringValue() const { + if (has_double_value_ || has_int32_value_) return false; + ASSERT(!handle_.is_null()); + return type_from_value_.IsString(); + } + Handle<String> StringValue() const { + ASSERT(HasStringValue()); + return Handle<String>::cast(handle_); + } + bool HasInternalizedStringValue() const { + return HasStringValue() && is_internalized_string_; + } - bool ToBoolean(); + bool BooleanValue() const { return boolean_value_; } bool IsUint32() { return HasInteger32Value() && (Integer32Value() >= 0); @@ -2658,6 +3323,11 @@ class HConstant: public HTemplateInstruction<0> { hash = static_cast<intptr_t>(BitCast<int64_t>(double_value_)); } else { ASSERT(!handle_.is_null()); + // Dereferencing to use the object's raw address for hashing is safe. + HandleDereferenceGuard allow_handle_deref(isolate(), + HandleDereferenceGuard::ALLOW); + SLOW_ASSERT(Heap::RelocationLock::IsLocked(isolate()->heap()) || + !isolate()->optimizing_compiler_thread()->IsOptimizerThread()); hash = reinterpret_cast<intptr_t>(*handle_); } @@ -2685,11 +3355,13 @@ class HConstant: public HTemplateInstruction<0> { } else { ASSERT(!handle_.is_null()); return !other_constant->handle_.is_null() && - *handle_ == *other_constant->handle_; + handle_.is_identical_to(other_constant->handle_); } } private: + void Initialize(Representation r); + virtual bool IsDeletable() const { return true; } // If this is a numerical constant, handle_ either points to to the @@ -2705,18 +3377,24 @@ class HConstant: public HTemplateInstruction<0> { // not the converse. bool has_int32_value_ : 1; bool has_double_value_ : 1; + bool is_internalized_string_ : 1; // TODO(yangguo): make this part of HType. + bool boolean_value_ : 1; int32_t int32_value_; double double_value_; + HType type_from_value_; }; class HBinaryOperation: public HTemplateInstruction<3> { public: - HBinaryOperation(HValue* context, HValue* left, HValue* right) { + HBinaryOperation(HValue* context, HValue* left, HValue* right) + : observed_output_representation_(Representation::None()) { ASSERT(left != NULL && right != NULL); SetOperandAt(0, context); SetOperandAt(1, left); SetOperandAt(2, right); + observed_input_representation_[0] = Representation::None(); + observed_input_representation_[1] = Representation::None(); } HValue* context() { return OperandAt(0); } @@ -2735,11 +3413,34 @@ class HBinaryOperation: public HTemplateInstruction<3> { return right(); } + void set_observed_input_representation(Representation left, + Representation right) { + observed_input_representation_[0] = left; + observed_input_representation_[1] = right; + } + + virtual void initialize_output_representation(Representation observed) { + observed_output_representation_ = observed; + } + + virtual Representation observed_input_representation(int index) { + if (index == 0) return Representation::Tagged(); + return observed_input_representation_[index - 1]; + } + + virtual void InferRepresentation(HInferRepresentation* h_infer); + virtual Representation RepresentationFromInputs(); + virtual void AssumeRepresentation(Representation r); + virtual bool IsCommutative() const { return false; } virtual void PrintDataTo(StringStream* stream); DECLARE_ABSTRACT_INSTRUCTION(BinaryOperation) + + private: + Representation observed_input_representation_[2]; + Representation observed_output_representation_; }; @@ -2879,49 +3580,147 @@ enum BoundsCheckKeyMode { }; +class HBoundsCheckBaseIndexInformation; + + class HBoundsCheck: public HTemplateInstruction<2> { public: - HBoundsCheck(HValue* index, HValue* length, - BoundsCheckKeyMode key_mode = DONT_ALLOW_SMI_KEY) - : key_mode_(key_mode) { + // Normally HBoundsCheck should be created using the + // HGraphBuilder::AddBoundsCheck() helper, which also guards the index with + // a HCheckSmiOrInt32 check. + // However when building stubs, where we know that the arguments are Int32, + // it makes sense to invoke this constructor directly. + HBoundsCheck(HValue* index, + HValue* length, + BoundsCheckKeyMode key_mode = DONT_ALLOW_SMI_KEY, + Representation r = Representation::None()) + : key_mode_(key_mode), skip_check_(false), + base_(NULL), offset_(0), scale_(0), + responsibility_direction_(DIRECTION_NONE) { SetOperandAt(0, index); SetOperandAt(1, length); - set_representation(Representation::Integer32()); + if (r.IsNone()) { + // In the normal compilation pipeline the representation is flexible + // (see InferRepresentation). + SetFlag(kFlexibleRepresentation); + } else { + // When compiling stubs we want to set the representation explicitly + // so the compilation pipeline can skip the HInferRepresentation phase. + set_representation(r); + } SetFlag(kUseGVN); } - virtual Representation RequiredInputRepresentation(int arg_index) { - if (key_mode_ == DONT_ALLOW_SMI_KEY || - !length()->representation().IsTagged()) { - return Representation::Integer32(); - } - // If the index is tagged and isn't constant, then allow the length - // to be tagged, since it is usually already tagged from loading it out of - // the length field of a JSArray. This allows for direct comparison without - // untagging. - if (index()->representation().IsTagged() && !index()->IsConstant()) { - return Representation::Tagged(); - } - // Also allow the length to be tagged if the index is constant, because - // it can be tagged to allow direct comparison. - if (index()->IsConstant() && - index()->representation().IsInteger32() && - arg_index == 1) { - return Representation::Tagged(); + bool skip_check() { return skip_check_; } + void set_skip_check(bool skip_check) { skip_check_ = skip_check; } + HValue* base() { return base_; } + int offset() { return offset_; } + int scale() { return scale_; } + bool index_can_increase() { + return (responsibility_direction_ & DIRECTION_LOWER) == 0; + } + bool index_can_decrease() { + return (responsibility_direction_ & DIRECTION_UPPER) == 0; + } + + void ApplyIndexChange(); + bool DetectCompoundIndex() { + ASSERT(base() == NULL); + + DecompositionResult decomposition; + if (index()->TryDecompose(&decomposition)) { + base_ = decomposition.base(); + offset_ = decomposition.offset(); + scale_ = decomposition.scale(); + return true; + } else { + base_ = index(); + offset_ = 0; + scale_ = 0; + return false; } + } + + virtual Representation RequiredInputRepresentation(int arg_index) { + return representation(); + } + virtual Representation observed_input_representation(int index) { return Representation::Integer32(); } + virtual bool IsRelationTrueInternal(NumericRelation relation, + HValue* related_value, + int offset = 0, + int scale = 0); + virtual void PrintDataTo(StringStream* stream); + virtual void InferRepresentation(HInferRepresentation* h_infer); HValue* index() { return OperandAt(0); } HValue* length() { return OperandAt(1); } + virtual int RedefinedOperandIndex() { return 0; } + virtual bool IsPurelyInformativeDefinition() { return skip_check(); } + virtual void AddInformativeDefinitions(); + DECLARE_CONCRETE_INSTRUCTION(BoundsCheck) protected: + friend class HBoundsCheckBaseIndexInformation; + + virtual void SetResponsibilityForRange(RangeGuaranteeDirection direction) { + responsibility_direction_ = static_cast<RangeGuaranteeDirection>( + responsibility_direction_ | direction); + } + virtual bool DataEquals(HValue* other) { return true; } + virtual void TryGuaranteeRangeChanging(RangeEvaluationContext* context); BoundsCheckKeyMode key_mode_; + bool skip_check_; + HValue* base_; + int offset_; + int scale_; + RangeGuaranteeDirection responsibility_direction_; +}; + + +class HBoundsCheckBaseIndexInformation: public HTemplateInstruction<2> { + public: + explicit HBoundsCheckBaseIndexInformation(HBoundsCheck* check) { + DecompositionResult decomposition; + if (check->index()->TryDecompose(&decomposition)) { + SetOperandAt(0, decomposition.base()); + SetOperandAt(1, check); + } else { + UNREACHABLE(); + } + } + + HValue* base_index() { return OperandAt(0); } + HBoundsCheck* bounds_check() { return HBoundsCheck::cast(OperandAt(1)); } + + DECLARE_CONCRETE_INSTRUCTION(BoundsCheckBaseIndexInformation) + + virtual Representation RequiredInputRepresentation(int arg_index) { + return representation(); + } + + virtual bool IsRelationTrueInternal(NumericRelation relation, + HValue* related_value, + int offset = 0, + int scale = 0); + virtual void PrintDataTo(StringStream* stream); + + virtual int RedefinedOperandIndex() { return 0; } + virtual bool IsPurelyInformativeDefinition() { return true; } + + protected: + virtual void SetResponsibilityForRange(RangeGuaranteeDirection direction) { + bounds_check()->SetResponsibilityForRange(direction); + } + virtual void TryGuaranteeRangeChanging(RangeEvaluationContext* context) { + bounds_check()->TryGuaranteeRangeChanging(context); + } }; @@ -2929,12 +3728,9 @@ class HBitwiseBinaryOperation: public HBinaryOperation { public: HBitwiseBinaryOperation(HValue* context, HValue* left, HValue* right) : HBinaryOperation(context, left, right) { - set_representation(Representation::Tagged()); SetFlag(kFlexibleRepresentation); + SetFlag(kTruncatingToInt32); SetAllSideEffects(); - observed_input_representation_[0] = Representation::Tagged(); - observed_input_representation_[1] = Representation::None(); - observed_input_representation_[2] = Representation::None(); } virtual Representation RequiredInputRepresentation(int index) { @@ -2947,28 +3743,32 @@ class HBitwiseBinaryOperation: public HBinaryOperation { if (!to.IsTagged()) { ASSERT(to.IsInteger32()); ClearAllSideEffects(); - SetFlag(kTruncatingToInt32); SetFlag(kUseGVN); + } else { + SetAllSideEffects(); + ClearFlag(kUseGVN); } } - virtual HType CalculateInferredType(); - - virtual Representation ObservedInputRepresentation(int index) { - return observed_input_representation_[index]; + virtual void UpdateRepresentation(Representation new_rep, + HInferRepresentation* h_infer, + const char* reason) { + // We only generate either int32 or generic tagged bitwise operations. + if (new_rep.IsDouble()) new_rep = Representation::Integer32(); + HValue::UpdateRepresentation(new_rep, h_infer, reason); } - void InitializeObservedInputRepresentation(Representation r) { - observed_input_representation_[1] = r; - observed_input_representation_[2] = r; + virtual void initialize_output_representation(Representation observed) { + if (observed.IsDouble()) observed = Representation::Integer32(); + HBinaryOperation::initialize_output_representation(observed); } + virtual HType CalculateInferredType(); + DECLARE_ABSTRACT_INSTRUCTION(BitwiseBinaryOperation) private: virtual bool IsDeletable() const { return true; } - - Representation observed_input_representation_[3]; }; @@ -2979,6 +3779,9 @@ class HMathFloorOfDiv: public HBinaryOperation { set_representation(Representation::Integer32()); SetFlag(kUseGVN); SetFlag(kCanOverflow); + if (!right->IsConstant()) { + SetFlag(kCanBeDivByZero); + } } virtual HValue* EnsureAndPropagateNotMinusZero(BitVector* visited); @@ -3001,13 +3804,15 @@ class HArithmeticBinaryOperation: public HBinaryOperation { public: HArithmeticBinaryOperation(HValue* context, HValue* left, HValue* right) : HBinaryOperation(context, left, right) { - set_representation(Representation::Tagged()); - SetFlag(kFlexibleRepresentation); SetAllSideEffects(); + SetFlag(kFlexibleRepresentation); } virtual void RepresentationChanged(Representation to) { - if (!to.IsTagged()) { + if (to.IsTagged()) { + SetAllSideEffects(); + ClearFlag(kUseGVN); + } else { ClearAllSideEffects(); SetFlag(kUseGVN); } @@ -3020,13 +3825,6 @@ class HArithmeticBinaryOperation: public HBinaryOperation { : representation(); } - virtual Representation InferredRepresentation() { - if (left()->representation().Equals(right()->representation())) { - return left()->representation(); - } - return HValue::InferredRepresentation(); - } - private: virtual bool IsDeletable() const { return true; } }; @@ -3045,11 +3843,9 @@ class HCompareGeneric: public HBinaryOperation { } virtual Representation RequiredInputRepresentation(int index) { - return Representation::Tagged(); - } - - Representation GetInputRepresentation() const { - return Representation::Tagged(); + return index == 0 + ? Representation::Tagged() + : representation(); } Token::Value token() const { return token_; } @@ -3068,6 +3864,7 @@ class HCompareIDAndBranch: public HTemplateControlInstruction<2, 2> { public: HCompareIDAndBranch(HValue* left, HValue* right, Token::Value token) : token_(token) { + SetFlag(kFlexibleRepresentation); ASSERT(Token::IsCompareOp(token)); SetOperandAt(0, left); SetOperandAt(1, right); @@ -3077,20 +3874,28 @@ class HCompareIDAndBranch: public HTemplateControlInstruction<2, 2> { HValue* right() { return OperandAt(1); } Token::Value token() const { return token_; } - void SetInputRepresentation(Representation r); - Representation GetInputRepresentation() const { - return input_representation_; + void set_observed_input_representation(Representation left, + Representation right) { + observed_input_representation_[0] = left; + observed_input_representation_[1] = right; } + virtual void InferRepresentation(HInferRepresentation* h_infer); + virtual Representation RequiredInputRepresentation(int index) { - return input_representation_; + return representation(); + } + virtual Representation observed_input_representation(int index) { + return observed_input_representation_[index]; } virtual void PrintDataTo(StringStream* stream); + virtual void AddInformativeDefinitions(); + DECLARE_CONCRETE_INSTRUCTION(CompareIDAndBranch) private: - Representation input_representation_; + Representation observed_input_representation_[2]; Token::Value token_; }; @@ -3151,6 +3956,9 @@ class HIsNilAndBranch: public HUnaryControlInstruction { virtual Representation RequiredInputRepresentation(int index) { return Representation::Tagged(); } + virtual Representation observed_input_representation(int index) { + return Representation::Tagged(); + } DECLARE_CONCRETE_INSTRUCTION(IsNilAndBranch) @@ -3410,16 +4218,30 @@ class HInstanceOfKnownGlobal: public HTemplateInstruction<2> { }; -class HPower: public HTemplateInstruction<2> { +// TODO(mstarzinger): This instruction should be modeled as a load of the map +// field followed by a load of the instance size field once HLoadNamedField is +// flexible enough to accommodate byte-field loads. +class HInstanceSize: public HTemplateInstruction<1> { public: - HPower(HValue* left, HValue* right) { - SetOperandAt(0, left); - SetOperandAt(1, right); - set_representation(Representation::Double()); - SetFlag(kUseGVN); - SetGVNFlag(kChangesNewSpacePromotion); + explicit HInstanceSize(HValue* object) { + SetOperandAt(0, object); + set_representation(Representation::Integer32()); } + HValue* object() { return OperandAt(0); } + + virtual Representation RequiredInputRepresentation(int index) { + return Representation::Tagged(); + } + + DECLARE_CONCRETE_INSTRUCTION(InstanceSize) +}; + + +class HPower: public HTemplateInstruction<2> { + public: + static HInstruction* New(Zone* zone, HValue* left, HValue* right); + HValue* left() { return OperandAt(0); } HValue* right() const { return OperandAt(1); } @@ -3428,6 +4250,9 @@ class HPower: public HTemplateInstruction<2> { ? Representation::Double() : Representation::None(); } + virtual Representation observed_input_representation(int index) { + return RequiredInputRepresentation(index); + } DECLARE_CONCRETE_INSTRUCTION(Power) @@ -3435,6 +4260,14 @@ class HPower: public HTemplateInstruction<2> { virtual bool DataEquals(HValue* other) { return true; } private: + HPower(HValue* left, HValue* right) { + SetOperandAt(0, left); + SetOperandAt(1, right); + set_representation(Representation::Double()); + SetFlag(kUseGVN); + SetGVNFlag(kChangesNewSpacePromotion); + } + virtual bool IsDeletable() const { return !right()->representation().IsTagged(); } @@ -3463,10 +4296,10 @@ class HRandom: public HTemplateInstruction<1> { class HAdd: public HArithmeticBinaryOperation { public: - HAdd(HValue* context, HValue* left, HValue* right) - : HArithmeticBinaryOperation(context, left, right) { - SetFlag(kCanOverflow); - } + static HInstruction* New(Zone* zone, + HValue* context, + HValue* left, + HValue* right); // Add is only commutative if two integer values are added and not if two // tagged values are added (because it might be a String concatenation). @@ -3476,39 +4309,56 @@ class HAdd: public HArithmeticBinaryOperation { virtual HValue* EnsureAndPropagateNotMinusZero(BitVector* visited); - static HInstruction* NewHAdd(Zone* zone, - HValue* context, - HValue* left, - HValue* right); - virtual HType CalculateInferredType(); virtual HValue* Canonicalize(); + virtual bool TryDecompose(DecompositionResult* decomposition) { + if (left()->IsInteger32Constant()) { + decomposition->Apply(right(), left()->GetInteger32Constant()); + return true; + } else if (right()->IsInteger32Constant()) { + decomposition->Apply(left(), right()->GetInteger32Constant()); + return true; + } else { + return false; + } + } + DECLARE_CONCRETE_INSTRUCTION(Add) protected: virtual bool DataEquals(HValue* other) { return true; } virtual Range* InferRange(Zone* zone); + + private: + HAdd(HValue* context, HValue* left, HValue* right) + : HArithmeticBinaryOperation(context, left, right) { + SetFlag(kCanOverflow); + } }; class HSub: public HArithmeticBinaryOperation { public: - HSub(HValue* context, HValue* left, HValue* right) - : HArithmeticBinaryOperation(context, left, right) { - SetFlag(kCanOverflow); - } + static HInstruction* New(Zone* zone, + HValue* context, + HValue* left, + HValue* right); virtual HValue* EnsureAndPropagateNotMinusZero(BitVector* visited); virtual HValue* Canonicalize(); - static HInstruction* NewHSub(Zone* zone, - HValue* context, - HValue* left, - HValue* right); + virtual bool TryDecompose(DecompositionResult* decomposition) { + if (right()->IsInteger32Constant()) { + decomposition->Apply(left(), -right()->GetInteger32Constant()); + return true; + } else { + return false; + } + } DECLARE_CONCRETE_INSTRUCTION(Sub) @@ -3516,15 +4366,21 @@ class HSub: public HArithmeticBinaryOperation { virtual bool DataEquals(HValue* other) { return true; } virtual Range* InferRange(Zone* zone); + + private: + HSub(HValue* context, HValue* left, HValue* right) + : HArithmeticBinaryOperation(context, left, right) { + SetFlag(kCanOverflow); + } }; class HMul: public HArithmeticBinaryOperation { public: - HMul(HValue* context, HValue* left, HValue* right) - : HArithmeticBinaryOperation(context, left, right) { - SetFlag(kCanOverflow); - } + static HInstruction* New(Zone* zone, + HValue* context, + HValue* left, + HValue* right); virtual HValue* EnsureAndPropagateNotMinusZero(BitVector* visited); @@ -3533,26 +4389,27 @@ class HMul: public HArithmeticBinaryOperation { return !representation().IsTagged(); } - static HInstruction* NewHMul(Zone* zone, - HValue* context, - HValue* left, - HValue* right); - DECLARE_CONCRETE_INSTRUCTION(Mul) protected: virtual bool DataEquals(HValue* other) { return true; } virtual Range* InferRange(Zone* zone); + + private: + HMul(HValue* context, HValue* left, HValue* right) + : HArithmeticBinaryOperation(context, left, right) { + SetFlag(kCanOverflow); + } }; class HMod: public HArithmeticBinaryOperation { public: - HMod(HValue* context, HValue* left, HValue* right) - : HArithmeticBinaryOperation(context, left, right) { - SetFlag(kCanBeDivByZero); - } + static HInstruction* New(Zone* zone, + HValue* context, + HValue* left, + HValue* right); bool HasPowerOf2Divisor() { if (right()->IsConstant() && @@ -3566,41 +4423,53 @@ class HMod: public HArithmeticBinaryOperation { virtual HValue* EnsureAndPropagateNotMinusZero(BitVector* visited); - static HInstruction* NewHMod(Zone* zone, - HValue* context, - HValue* left, - HValue* right); - DECLARE_CONCRETE_INSTRUCTION(Mod) protected: virtual bool DataEquals(HValue* other) { return true; } virtual Range* InferRange(Zone* zone); + + private: + HMod(HValue* context, HValue* left, HValue* right) + : HArithmeticBinaryOperation(context, left, right) { + SetFlag(kCanBeDivByZero); + } }; class HDiv: public HArithmeticBinaryOperation { public: - HDiv(HValue* context, HValue* left, HValue* right) - : HArithmeticBinaryOperation(context, left, right) { - SetFlag(kCanBeDivByZero); - SetFlag(kCanOverflow); + static HInstruction* New(Zone* zone, + HValue* context, + HValue* left, + HValue* right); + + bool HasPowerOf2Divisor() { + if (right()->IsConstant() && + HConstant::cast(right())->HasInteger32Value()) { + int32_t value = HConstant::cast(right())->Integer32Value(); + return value != 0 && (IsPowerOf2(value) || IsPowerOf2(-value)); + } + + return false; } virtual HValue* EnsureAndPropagateNotMinusZero(BitVector* visited); - static HInstruction* NewHDiv(Zone* zone, - HValue* context, - HValue* left, - HValue* right); - DECLARE_CONCRETE_INSTRUCTION(Div) protected: virtual bool DataEquals(HValue* other) { return true; } virtual Range* InferRange(Zone* zone); + + private: + HDiv(HValue* context, HValue* left, HValue* right) + : HArithmeticBinaryOperation(context, left, right) { + SetFlag(kCanBeDivByZero); + SetFlag(kCanOverflow); + } }; @@ -3608,19 +4477,28 @@ class HMathMinMax: public HArithmeticBinaryOperation { public: enum Operation { kMathMin, kMathMax }; - HMathMinMax(HValue* context, HValue* left, HValue* right, Operation op) - : HArithmeticBinaryOperation(context, left, right), - operation_(op) { } + static HInstruction* New(Zone* zone, + HValue* context, + HValue* left, + HValue* right, + Operation op); virtual Representation RequiredInputRepresentation(int index) { - return index == 0 - ? Representation::Tagged() - : representation(); - } + return index == 0 ? Representation::Tagged() + : representation(); + } + + virtual Representation observed_input_representation(int index) { + return RequiredInputRepresentation(index); + } - virtual Representation InferredRepresentation() { - if (left()->representation().IsInteger32() && - right()->representation().IsInteger32()) { + virtual void InferRepresentation(HInferRepresentation* h_infer); + + virtual Representation RepresentationFromInputs() { + Representation left_rep = left()->representation(); + Representation right_rep = right()->representation(); + if ((left_rep.IsNone() || left_rep.IsInteger32()) && + (right_rep.IsNone() || right_rep.IsInteger32())) { return Representation::Integer32(); } return Representation::Double(); @@ -3641,18 +4519,21 @@ class HMathMinMax: public HArithmeticBinaryOperation { virtual Range* InferRange(Zone* zone); private: + HMathMinMax(HValue* context, HValue* left, HValue* right, Operation op) + : HArithmeticBinaryOperation(context, left, right), + operation_(op) { } + Operation operation_; }; class HBitwise: public HBitwiseBinaryOperation { public: - HBitwise(Token::Value op, HValue* context, HValue* left, HValue* right) - : HBitwiseBinaryOperation(context, left, right), op_(op) { - ASSERT(op == Token::BIT_AND || - op == Token::BIT_OR || - op == Token::BIT_XOR); - } + static HInstruction* New(Zone* zone, + Token::Value op, + HValue* context, + HValue* left, + HValue* right); Token::Value op() const { return op_; } @@ -3660,12 +4541,6 @@ class HBitwise: public HBitwiseBinaryOperation { virtual HValue* Canonicalize(); - static HInstruction* NewHBitwise(Zone* zone, - Token::Value op, - HValue* context, - HValue* left, - HValue* right); - virtual void PrintDataTo(StringStream* stream); DECLARE_CONCRETE_INSTRUCTION(Bitwise) @@ -3678,61 +4553,107 @@ class HBitwise: public HBitwiseBinaryOperation { virtual Range* InferRange(Zone* zone); private: + HBitwise(Token::Value op, HValue* context, HValue* left, HValue* right) + : HBitwiseBinaryOperation(context, left, right), op_(op) { + ASSERT(op == Token::BIT_AND || op == Token::BIT_OR || op == Token::BIT_XOR); + } + Token::Value op_; }; class HShl: public HBitwiseBinaryOperation { public: - HShl(HValue* context, HValue* left, HValue* right) - : HBitwiseBinaryOperation(context, left, right) { } + static HInstruction* New(Zone* zone, + HValue* context, + HValue* left, + HValue* right); virtual Range* InferRange(Zone* zone); - static HInstruction* NewHShl(Zone* zone, - HValue* context, - HValue* left, - HValue* right); - DECLARE_CONCRETE_INSTRUCTION(Shl) protected: virtual bool DataEquals(HValue* other) { return true; } + + private: + HShl(HValue* context, HValue* left, HValue* right) + : HBitwiseBinaryOperation(context, left, right) { } }; class HShr: public HBitwiseBinaryOperation { public: - HShr(HValue* context, HValue* left, HValue* right) - : HBitwiseBinaryOperation(context, left, right) { } + static HInstruction* New(Zone* zone, + HValue* context, + HValue* left, + HValue* right); + + virtual bool TryDecompose(DecompositionResult* decomposition) { + if (right()->IsInteger32Constant()) { + if (decomposition->Apply(left(), 0, right()->GetInteger32Constant())) { + // This is intended to look for HAdd and HSub, to handle compounds + // like ((base + offset) >> scale) with one single decomposition. + left()->TryDecompose(decomposition); + return true; + } + } + return false; + } virtual Range* InferRange(Zone* zone); - static HInstruction* NewHShr(Zone* zone, - HValue* context, - HValue* left, - HValue* right); - DECLARE_CONCRETE_INSTRUCTION(Shr) protected: virtual bool DataEquals(HValue* other) { return true; } + + private: + HShr(HValue* context, HValue* left, HValue* right) + : HBitwiseBinaryOperation(context, left, right) { } }; class HSar: public HBitwiseBinaryOperation { public: + static HInstruction* New(Zone* zone, + HValue* context, + HValue* left, + HValue* right); + + virtual bool TryDecompose(DecompositionResult* decomposition) { + if (right()->IsInteger32Constant()) { + if (decomposition->Apply(left(), 0, right()->GetInteger32Constant())) { + // This is intended to look for HAdd and HSub, to handle compounds + // like ((base + offset) >> scale) with one single decomposition. + left()->TryDecompose(decomposition); + return true; + } + } + return false; + } + + virtual Range* InferRange(Zone* zone); + + DECLARE_CONCRETE_INSTRUCTION(Sar) + + protected: + virtual bool DataEquals(HValue* other) { return true; } + + private: HSar(HValue* context, HValue* left, HValue* right) : HBitwiseBinaryOperation(context, left, right) { } +}; - virtual Range* InferRange(Zone* zone); - static HInstruction* NewHSar(Zone* zone, - HValue* context, - HValue* left, - HValue* right); +class HRor: public HBitwiseBinaryOperation { + public: + HRor(HValue* context, HValue* left, HValue* right) + : HBitwiseBinaryOperation(context, left, right) { + ChangeRepresentation(Representation::Integer32()); + } - DECLARE_CONCRETE_INSTRUCTION(Sar) + DECLARE_CONCRETE_INSTRUCTION(Ror) protected: virtual bool DataEquals(HValue* other) { return true; } @@ -3760,11 +4681,20 @@ class HOsrEntry: public HTemplateInstruction<0> { class HParameter: public HTemplateInstruction<0> { public: - explicit HParameter(unsigned index) : index_(index) { + enum ParameterKind { + STACK_PARAMETER, + REGISTER_PARAMETER + }; + + explicit HParameter(unsigned index, + ParameterKind kind = STACK_PARAMETER) + : index_(index), + kind_(kind) { set_representation(Representation::Tagged()); } unsigned index() const { return index_; } + ParameterKind kind() const { return kind_; } virtual void PrintDataTo(StringStream* stream); @@ -3776,6 +4706,7 @@ class HParameter: public HTemplateInstruction<0> { private: unsigned index_; + ParameterKind kind_; }; @@ -3847,13 +4778,18 @@ class HLoadGlobalCell: public HTemplateInstruction<0> { SetGVNFlag(kDependsOnGlobalVars); } - Handle<JSGlobalPropertyCell> cell() const { return cell_; } + Handle<JSGlobalPropertyCell> cell() const { return cell_; } bool RequiresHoleCheck() const; virtual void PrintDataTo(StringStream* stream); virtual intptr_t Hashcode() { ASSERT_ALLOCATION_DISABLED; + // Dereferencing to use the object's raw address for hashing is safe. + HandleDereferenceGuard allow_handle_deref(isolate(), + HandleDereferenceGuard::ALLOW); + SLOW_ASSERT(Heap::RelocationLock::IsLocked(isolate()->heap()) || + !isolate()->optimizing_compiler_thread()->IsOptimizerThread()); return reinterpret_cast<intptr_t>(*cell_); } @@ -3910,6 +4846,132 @@ class HLoadGlobalGeneric: public HTemplateInstruction<2> { }; +class HAllocateObject: public HTemplateInstruction<1> { + public: + HAllocateObject(HValue* context, Handle<JSFunction> constructor) + : constructor_(constructor) { + SetOperandAt(0, context); + set_representation(Representation::Tagged()); + SetGVNFlag(kChangesNewSpacePromotion); + } + + // Maximum instance size for which allocations will be inlined. + static const int kMaxSize = 64 * kPointerSize; + + HValue* context() { return OperandAt(0); } + Handle<JSFunction> constructor() { return constructor_; } + + virtual Representation RequiredInputRepresentation(int index) { + return Representation::Tagged(); + } + virtual Handle<Map> GetMonomorphicJSObjectMap() { + ASSERT(constructor()->has_initial_map()); + return Handle<Map>(constructor()->initial_map()); + } + virtual HType CalculateInferredType(); + + DECLARE_CONCRETE_INSTRUCTION(AllocateObject) + + private: + // TODO(svenpanne) Might be safe, but leave it out until we know for sure. + // virtual bool IsDeletable() const { return true; } + + Handle<JSFunction> constructor_; +}; + + +class HAllocate: public HTemplateInstruction<2> { + public: + enum Flags { + CAN_ALLOCATE_IN_NEW_SPACE = 1 << 0, + CAN_ALLOCATE_IN_OLD_DATA_SPACE = 1 << 1, + CAN_ALLOCATE_IN_OLD_POINTER_SPACE = 1 << 2, + ALLOCATE_DOUBLE_ALIGNED = 1 << 3 + }; + + HAllocate(HValue* context, HValue* size, HType type, Flags flags) + : type_(type), + flags_(flags) { + ASSERT((flags & CAN_ALLOCATE_IN_OLD_DATA_SPACE) == 0); // unimplemented + SetOperandAt(0, context); + SetOperandAt(1, size); + set_representation(Representation::Tagged()); + SetGVNFlag(kChangesNewSpacePromotion); + } + + HValue* context() { return OperandAt(0); } + HValue* size() { return OperandAt(1); } + + virtual Representation RequiredInputRepresentation(int index) { + if (index == 0) { + return Representation::Tagged(); + } else { + return Representation::Integer32(); + } + } + + virtual HType CalculateInferredType(); + + bool CanAllocateInNewSpace() const { + return (flags_ & CAN_ALLOCATE_IN_NEW_SPACE) != 0; + } + + bool CanAllocateInOldDataSpace() const { + return (flags_ & CAN_ALLOCATE_IN_OLD_DATA_SPACE) != 0; + } + + bool CanAllocateInOldPointerSpace() const { + return (flags_ & CAN_ALLOCATE_IN_OLD_POINTER_SPACE) != 0; + } + + bool CanAllocateInOldSpace() const { + return CanAllocateInOldDataSpace() || + CanAllocateInOldPointerSpace(); + } + + bool GuaranteedInNewSpace() const { + return CanAllocateInNewSpace() && !CanAllocateInOldSpace(); + } + + bool MustAllocateDoubleAligned() const { + return (flags_ & ALLOCATE_DOUBLE_ALIGNED) != 0; + } + + virtual void PrintDataTo(StringStream* stream); + + DECLARE_CONCRETE_INSTRUCTION(Allocate) + + private: + HType type_; + Flags flags_; +}; + + +class HInnerAllocatedObject: public HTemplateInstruction<1> { + public: + HInnerAllocatedObject(HValue* value, int offset) + : offset_(offset) { + ASSERT(value->IsAllocate()); + SetOperandAt(0, value); + set_representation(Representation::Tagged()); + } + + HValue* base_object() { return OperandAt(0); } + int offset() { return offset_; } + + virtual Representation RequiredInputRepresentation(int index) { + return Representation::Tagged(); + } + + virtual void PrintDataTo(StringStream* stream); + + DECLARE_CONCRETE_INSTRUCTION(InnerAllocatedObject) + + private: + int offset_; +}; + + inline bool StoringValueNeedsWriteBarrier(HValue* value) { return !value->type().IsBoolean() && !value->type().IsSmi() @@ -3919,7 +4981,18 @@ inline bool StoringValueNeedsWriteBarrier(HValue* value) { inline bool ReceiverObjectNeedsWriteBarrier(HValue* object, HValue* new_space_dominator) { - return !object->IsAllocateObject() || (object != new_space_dominator); + if (object->IsInnerAllocatedObject()) { + return ReceiverObjectNeedsWriteBarrier( + HInnerAllocatedObject::cast(object)->base_object(), + new_space_dominator); + } + if (object != new_space_dominator) return true; + if (object->IsFastLiteral()) return false; + if (object->IsAllocateObject()) return false; + if (object->IsAllocate()) { + return !HAllocate::cast(object)->GuaranteedInNewSpace(); + } + return true; } @@ -4243,33 +5316,79 @@ class ArrayInstructionInterface { virtual bool IsDehoisted() = 0; virtual void SetDehoisted(bool is_dehoisted) = 0; virtual ~ArrayInstructionInterface() { }; + + static Representation KeyedAccessIndexRequirement(Representation r) { + return r.IsInteger32() ? Representation::Integer32() + : Representation::Tagged(); + } }; -class HLoadKeyedFastElement + +enum LoadKeyedHoleMode { + NEVER_RETURN_HOLE, + ALLOW_RETURN_HOLE +}; + + +class HLoadKeyed : public HTemplateInstruction<3>, public ArrayInstructionInterface { public: - HLoadKeyedFastElement(HValue* obj, - HValue* key, - HValue* dependency, - ElementsKind elements_kind = FAST_ELEMENTS) + HLoadKeyed(HValue* obj, + HValue* key, + HValue* dependency, + ElementsKind elements_kind, + LoadKeyedHoleMode mode = NEVER_RETURN_HOLE) : bit_field_(0) { - ASSERT(IsFastSmiOrObjectElementsKind(elements_kind)); - bit_field_ = ElementsKindField::encode(elements_kind); - if (IsFastSmiElementsKind(elements_kind) && - IsFastPackedElementsKind(elements_kind)) { - set_type(HType::Smi()); - } + bit_field_ = ElementsKindField::encode(elements_kind) | + HoleModeField::encode(mode); + SetOperandAt(0, obj); SetOperandAt(1, key); - SetOperandAt(2, dependency); - set_representation(Representation::Tagged()); - SetGVNFlag(kDependsOnArrayElements); + SetOperandAt(2, dependency != NULL ? dependency : obj); + + if (!is_external()) { + // I can detect the case between storing double (holey and fast) and + // smi/object by looking at elements_kind_. + ASSERT(IsFastSmiOrObjectElementsKind(elements_kind) || + IsFastDoubleElementsKind(elements_kind)); + + if (IsFastSmiOrObjectElementsKind(elements_kind)) { + if (IsFastSmiElementsKind(elements_kind)) { + set_type(HType::Smi()); + } + + set_representation(Representation::Tagged()); + SetGVNFlag(kDependsOnArrayElements); + } else { + set_representation(Representation::Double()); + SetGVNFlag(kDependsOnDoubleArrayElements); + } + } else { + if (elements_kind == EXTERNAL_FLOAT_ELEMENTS || + elements_kind == EXTERNAL_DOUBLE_ELEMENTS) { + set_representation(Representation::Double()); + } else { + set_representation(Representation::Integer32()); + } + + SetGVNFlag(kDependsOnSpecializedArrayElements); + // Native code could change the specialized array. + SetGVNFlag(kDependsOnCalls); + } + SetFlag(kUseGVN); } - HValue* object() { return OperandAt(0); } + bool is_external() const { + return IsExternalArrayElementsKind(elements_kind()); + } + HValue* elements() { return OperandAt(0); } HValue* key() { return OperandAt(1); } - HValue* dependency() { return OperandAt(2); } + HValue* dependency() { + ASSERT(HasDependency()); + return OperandAt(2); + } + bool HasDependency() const { return OperandAt(0) != OperandAt(2); } uint32_t index_offset() { return IndexOffsetField::decode(bit_field_); } void SetIndexOffset(uint32_t index_offset) { bit_field_ = IndexOffsetField::update(bit_field_, index_offset); @@ -4283,166 +5402,82 @@ class HLoadKeyedFastElement ElementsKind elements_kind() const { return ElementsKindField::decode(bit_field_); } + LoadKeyedHoleMode hole_mode() const { + return HoleModeField::decode(bit_field_); + } virtual Representation RequiredInputRepresentation(int index) { - // The key is supposed to be Integer32. - if (index == 0) return Representation::Tagged(); - if (index == 1) return Representation::Integer32(); + // kind_fast: tagged[int32] (none) + // kind_double: tagged[int32] (none) + // kind_external: external[int32] (none) + if (index == 0) { + return is_external() ? Representation::External() + : Representation::Tagged(); + } + if (index == 1) { + return ArrayInstructionInterface::KeyedAccessIndexRequirement( + OperandAt(1)->representation()); + } return Representation::None(); } + virtual Representation observed_input_representation(int index) { + return RequiredInputRepresentation(index); + } + virtual void PrintDataTo(StringStream* stream); + bool UsesMustHandleHole() const; bool RequiresHoleCheck() const; - DECLARE_CONCRETE_INSTRUCTION(LoadKeyedFastElement) + virtual Range* InferRange(Zone* zone); + + DECLARE_CONCRETE_INSTRUCTION(LoadKeyed) protected: virtual bool DataEquals(HValue* other) { - if (!other->IsLoadKeyedFastElement()) return false; - HLoadKeyedFastElement* other_load = HLoadKeyedFastElement::cast(other); + if (!other->IsLoadKeyed()) return false; + HLoadKeyed* other_load = HLoadKeyed::cast(other); + if (IsDehoisted() && index_offset() != other_load->index_offset()) return false; return elements_kind() == other_load->elements_kind(); } private: - virtual bool IsDeletable() const { return !RequiresHoleCheck(); } - - class ElementsKindField: public BitField<ElementsKind, 0, 4> {}; - class IndexOffsetField: public BitField<uint32_t, 4, 27> {}; - class IsDehoistedField: public BitField<bool, 31, 1> {}; - uint32_t bit_field_; -}; - - -enum HoleCheckMode { PERFORM_HOLE_CHECK, OMIT_HOLE_CHECK }; - - -class HLoadKeyedFastDoubleElement - : public HTemplateInstruction<3>, public ArrayInstructionInterface { - public: - HLoadKeyedFastDoubleElement( - HValue* elements, - HValue* key, - HValue* dependency, - HoleCheckMode hole_check_mode = PERFORM_HOLE_CHECK) - : index_offset_(0), - is_dehoisted_(false), - hole_check_mode_(hole_check_mode) { - SetOperandAt(0, elements); - SetOperandAt(1, key); - SetOperandAt(2, dependency); - set_representation(Representation::Double()); - SetGVNFlag(kDependsOnDoubleArrayElements); - SetFlag(kUseGVN); - } - - HValue* elements() { return OperandAt(0); } - HValue* key() { return OperandAt(1); } - HValue* dependency() { return OperandAt(2); } - uint32_t index_offset() { return index_offset_; } - void SetIndexOffset(uint32_t index_offset) { index_offset_ = index_offset; } - HValue* GetKey() { return key(); } - void SetKey(HValue* key) { SetOperandAt(1, key); } - bool IsDehoisted() { return is_dehoisted_; } - void SetDehoisted(bool is_dehoisted) { is_dehoisted_ = is_dehoisted; } - - virtual Representation RequiredInputRepresentation(int index) { - // The key is supposed to be Integer32. - if (index == 0) return Representation::Tagged(); - if (index == 1) return Representation::Integer32(); - return Representation::None(); - } - - bool RequiresHoleCheck() const { - return hole_check_mode_ == PERFORM_HOLE_CHECK; - } - - virtual void PrintDataTo(StringStream* stream); - - DECLARE_CONCRETE_INSTRUCTION(LoadKeyedFastDoubleElement) - - protected: - virtual bool DataEquals(HValue* other) { - if (!other->IsLoadKeyedFastDoubleElement()) return false; - HLoadKeyedFastDoubleElement* other_load = - HLoadKeyedFastDoubleElement::cast(other); - return hole_check_mode_ == other_load->hole_check_mode_; - } - - private: - virtual bool IsDeletable() const { return !RequiresHoleCheck(); } - - uint32_t index_offset_; - bool is_dehoisted_; - HoleCheckMode hole_check_mode_; -}; - - -class HLoadKeyedSpecializedArrayElement - : public HTemplateInstruction<3>, public ArrayInstructionInterface { - public: - HLoadKeyedSpecializedArrayElement(HValue* external_elements, - HValue* key, - HValue* dependency, - ElementsKind elements_kind) - : elements_kind_(elements_kind), - index_offset_(0), - is_dehoisted_(false) { - SetOperandAt(0, external_elements); - SetOperandAt(1, key); - SetOperandAt(2, dependency); - if (elements_kind == EXTERNAL_FLOAT_ELEMENTS || - elements_kind == EXTERNAL_DOUBLE_ELEMENTS) { - set_representation(Representation::Double()); - } else { - set_representation(Representation::Integer32()); - } - SetGVNFlag(kDependsOnSpecializedArrayElements); - // Native code could change the specialized array. - SetGVNFlag(kDependsOnCalls); - SetFlag(kUseGVN); - } - - virtual void PrintDataTo(StringStream* stream); - - virtual Representation RequiredInputRepresentation(int index) { - // The key is supposed to be Integer32. - if (index == 0) return Representation::External(); - if (index == 1) return Representation::Integer32(); - return Representation::None(); + virtual bool IsDeletable() const { + return !RequiresHoleCheck(); } - HValue* external_pointer() { return OperandAt(0); } - HValue* key() { return OperandAt(1); } - HValue* dependency() { return OperandAt(2); } - ElementsKind elements_kind() const { return elements_kind_; } - uint32_t index_offset() { return index_offset_; } - void SetIndexOffset(uint32_t index_offset) { index_offset_ = index_offset; } - HValue* GetKey() { return key(); } - void SetKey(HValue* key) { SetOperandAt(1, key); } - bool IsDehoisted() { return is_dehoisted_; } - void SetDehoisted(bool is_dehoisted) { is_dehoisted_ = is_dehoisted; } - - virtual Range* InferRange(Zone* zone); - - DECLARE_CONCRETE_INSTRUCTION(LoadKeyedSpecializedArrayElement) - - protected: - virtual bool DataEquals(HValue* other) { - if (!other->IsLoadKeyedSpecializedArrayElement()) return false; - HLoadKeyedSpecializedArrayElement* cast_other = - HLoadKeyedSpecializedArrayElement::cast(other); - return elements_kind_ == cast_other->elements_kind(); - } + // Establish some checks around our packed fields + enum LoadKeyedBits { + kBitsForElementsKind = 5, + kBitsForHoleMode = 1, + kBitsForIndexOffset = 25, + kBitsForIsDehoisted = 1, - private: - virtual bool IsDeletable() const { return true; } + kStartElementsKind = 0, + kStartHoleMode = kStartElementsKind + kBitsForElementsKind, + kStartIndexOffset = kStartHoleMode + kBitsForHoleMode, + kStartIsDehoisted = kStartIndexOffset + kBitsForIndexOffset + }; - ElementsKind elements_kind_; - uint32_t index_offset_; - bool is_dehoisted_; + STATIC_ASSERT((kBitsForElementsKind + kBitsForIndexOffset + + kBitsForIsDehoisted) <= sizeof(uint32_t)*8); + STATIC_ASSERT(kElementsKindCount <= (1 << kBitsForElementsKind)); + class ElementsKindField: + public BitField<ElementsKind, kStartElementsKind, kBitsForElementsKind> + {}; // NOLINT + class HoleModeField: + public BitField<LoadKeyedHoleMode, kStartHoleMode, kBitsForHoleMode> + {}; // NOLINT + class IndexOffsetField: + public BitField<uint32_t, kStartIndexOffset, kBitsForIndexOffset> + {}; // NOLINT + class IsDehoistedField: + public BitField<bool, kStartIsDehoisted, kBitsForIsDehoisted> + {}; // NOLINT + uint32_t bit_field_; }; @@ -4463,6 +5498,7 @@ class HLoadKeyedGeneric: public HTemplateInstruction<3> { virtual void PrintDataTo(StringStream* stream); virtual Representation RequiredInputRepresentation(int index) { + // tagged[tagged] return Representation::Tagged(); } @@ -4568,84 +5604,83 @@ class HStoreNamedGeneric: public HTemplateInstruction<3> { }; -class HStoreKeyedFastElement +class HStoreKeyed : public HTemplateInstruction<3>, public ArrayInstructionInterface { public: - HStoreKeyedFastElement(HValue* obj, HValue* key, HValue* val, - ElementsKind elements_kind = FAST_ELEMENTS) - : elements_kind_(elements_kind), index_offset_(0), is_dehoisted_(false) { + HStoreKeyed(HValue* obj, HValue* key, HValue* val, + ElementsKind elements_kind) + : elements_kind_(elements_kind), + index_offset_(0), + is_dehoisted_(false), + new_space_dominator_(NULL) { SetOperandAt(0, obj); SetOperandAt(1, key); SetOperandAt(2, val); - SetGVNFlag(kChangesArrayElements); - } - - virtual Representation RequiredInputRepresentation(int index) { - // The key is supposed to be Integer32. - return index == 1 - ? Representation::Integer32() - : Representation::Tagged(); - } - HValue* object() { return OperandAt(0); } - HValue* key() { return OperandAt(1); } - HValue* value() { return OperandAt(2); } - bool value_is_smi() { - return IsFastSmiElementsKind(elements_kind_); - } - uint32_t index_offset() { return index_offset_; } - void SetIndexOffset(uint32_t index_offset) { index_offset_ = index_offset; } - HValue* GetKey() { return key(); } - void SetKey(HValue* key) { SetOperandAt(1, key); } - bool IsDehoisted() { return is_dehoisted_; } - void SetDehoisted(bool is_dehoisted) { is_dehoisted_ = is_dehoisted; } - - bool NeedsWriteBarrier() { - if (value_is_smi()) { - return false; + if (IsFastObjectElementsKind(elements_kind)) { + SetFlag(kTrackSideEffectDominators); + SetGVNFlag(kDependsOnNewSpacePromotion); + } + if (is_external()) { + SetGVNFlag(kChangesSpecializedArrayElements); + } else if (IsFastDoubleElementsKind(elements_kind)) { + SetGVNFlag(kChangesDoubleArrayElements); + SetFlag(kDeoptimizeOnUndefined); } else { - return StoringValueNeedsWriteBarrier(value()); + SetGVNFlag(kChangesArrayElements); } - } - virtual void PrintDataTo(StringStream* stream); + // EXTERNAL_{UNSIGNED_,}{BYTE,SHORT,INT}_ELEMENTS are truncating. + if (elements_kind >= EXTERNAL_BYTE_ELEMENTS && + elements_kind <= EXTERNAL_UNSIGNED_INT_ELEMENTS) { + SetFlag(kTruncatingToInt32); + } + } - DECLARE_CONCRETE_INSTRUCTION(StoreKeyedFastElement) + virtual Representation RequiredInputRepresentation(int index) { + // kind_fast: tagged[int32] = tagged + // kind_double: tagged[int32] = double + // kind_external: external[int32] = (double | int32) + if (index == 0) { + return is_external() ? Representation::External() + : Representation::Tagged(); + } else if (index == 1) { + return ArrayInstructionInterface::KeyedAccessIndexRequirement( + OperandAt(1)->representation()); + } - private: - ElementsKind elements_kind_; - uint32_t index_offset_; - bool is_dehoisted_; -}; + ASSERT_EQ(index, 2); + if (IsDoubleOrFloatElementsKind(elements_kind())) { + return Representation::Double(); + } + return is_external() ? Representation::Integer32() + : Representation::Tagged(); + } -class HStoreKeyedFastDoubleElement - : public HTemplateInstruction<3>, public ArrayInstructionInterface { - public: - HStoreKeyedFastDoubleElement(HValue* elements, - HValue* key, - HValue* val) - : index_offset_(0), is_dehoisted_(false) { - SetOperandAt(0, elements); - SetOperandAt(1, key); - SetOperandAt(2, val); - SetFlag(kDeoptimizeOnUndefined); - SetGVNFlag(kChangesDoubleArrayElements); + bool is_external() const { + return IsExternalArrayElementsKind(elements_kind()); } - virtual Representation RequiredInputRepresentation(int index) { - if (index == 1) { - return Representation::Integer32(); - } else if (index == 2) { + virtual Representation observed_input_representation(int index) { + if (index < 2) return RequiredInputRepresentation(index); + if (IsDoubleOrFloatElementsKind(elements_kind())) { return Representation::Double(); - } else { - return Representation::Tagged(); } + if (is_external()) { + return Representation::Integer32(); + } + // For fast object elements kinds, don't assume anything. + return Representation::None(); } HValue* elements() { return OperandAt(0); } HValue* key() { return OperandAt(1); } HValue* value() { return OperandAt(2); } + bool value_is_smi() const { + return IsFastSmiElementsKind(elements_kind_); + } + ElementsKind elements_kind() const { return elements_kind_; } uint32_t index_offset() { return index_offset_; } void SetIndexOffset(uint32_t index_offset) { index_offset_ = index_offset; } HValue* GetKey() { return key(); } @@ -4653,70 +5688,33 @@ class HStoreKeyedFastDoubleElement bool IsDehoisted() { return is_dehoisted_; } void SetDehoisted(bool is_dehoisted) { is_dehoisted_ = is_dehoisted; } - bool NeedsWriteBarrier() { - return StoringValueNeedsWriteBarrier(value()); - } - - bool NeedsCanonicalization(); - - virtual void PrintDataTo(StringStream* stream); - - DECLARE_CONCRETE_INSTRUCTION(StoreKeyedFastDoubleElement) - - private: - uint32_t index_offset_; - bool is_dehoisted_; -}; - - -class HStoreKeyedSpecializedArrayElement - : public HTemplateInstruction<3>, public ArrayInstructionInterface { - public: - HStoreKeyedSpecializedArrayElement(HValue* external_elements, - HValue* key, - HValue* val, - ElementsKind elements_kind) - : elements_kind_(elements_kind), index_offset_(0), is_dehoisted_(false) { - SetGVNFlag(kChangesSpecializedArrayElements); - SetOperandAt(0, external_elements); - SetOperandAt(1, key); - SetOperandAt(2, val); + virtual void SetSideEffectDominator(GVNFlag side_effect, HValue* dominator) { + ASSERT(side_effect == kChangesNewSpacePromotion); + new_space_dominator_ = dominator; } - virtual void PrintDataTo(StringStream* stream); + HValue* new_space_dominator() const { return new_space_dominator_; } - virtual Representation RequiredInputRepresentation(int index) { - if (index == 0) { - return Representation::External(); + bool NeedsWriteBarrier() { + if (value_is_smi()) { + return false; } else { - bool float_or_double_elements = - elements_kind() == EXTERNAL_FLOAT_ELEMENTS || - elements_kind() == EXTERNAL_DOUBLE_ELEMENTS; - if (index == 2 && float_or_double_elements) { - return Representation::Double(); - } else { - return Representation::Integer32(); - } + return StoringValueNeedsWriteBarrier(value()) && + ReceiverObjectNeedsWriteBarrier(elements(), new_space_dominator()); } } - HValue* external_pointer() { return OperandAt(0); } - HValue* key() { return OperandAt(1); } - HValue* value() { return OperandAt(2); } - ElementsKind elements_kind() const { return elements_kind_; } - uint32_t index_offset() { return index_offset_; } - void SetIndexOffset(uint32_t index_offset) { index_offset_ = index_offset; } - HValue* GetKey() { return key(); } - void SetKey(HValue* key) { SetOperandAt(1, key); } - bool IsDehoisted() { return is_dehoisted_; } - void SetDehoisted(bool is_dehoisted) { is_dehoisted_ = is_dehoisted; } + bool NeedsCanonicalization(); + + virtual void PrintDataTo(StringStream* stream); - DECLARE_CONCRETE_INSTRUCTION(StoreKeyedSpecializedArrayElement) + DECLARE_CONCRETE_INSTRUCTION(StoreKeyed) private: ElementsKind elements_kind_; uint32_t index_offset_; bool is_dehoisted_; + HValue* new_space_dominator_; }; @@ -4742,6 +5740,7 @@ class HStoreKeyedGeneric: public HTemplateInstruction<4> { StrictModeFlag strict_mode_flag() { return strict_mode_flag_; } virtual Representation RequiredInputRepresentation(int index) { + // tagged[tagged] = tagged return Representation::Tagged(); } @@ -4754,14 +5753,18 @@ class HStoreKeyedGeneric: public HTemplateInstruction<4> { }; -class HTransitionElementsKind: public HTemplateInstruction<1> { +class HTransitionElementsKind: public HTemplateInstruction<2> { public: - HTransitionElementsKind(HValue* object, + HTransitionElementsKind(HValue* context, + HValue* object, Handle<Map> original_map, Handle<Map> transitioned_map) : original_map_(original_map), - transitioned_map_(transitioned_map) { + transitioned_map_(transitioned_map), + from_kind_(original_map->elements_kind()), + to_kind_(transitioned_map->elements_kind()) { SetOperandAt(0, object); + SetOperandAt(1, context); SetFlag(kUseGVN); SetGVNFlag(kChangesElementsKind); if (original_map->has_fast_double_elements()) { @@ -4780,8 +5783,11 @@ class HTransitionElementsKind: public HTemplateInstruction<1> { } HValue* object() { return OperandAt(0); } + HValue* context() { return OperandAt(1); } Handle<Map> original_map() { return original_map_; } Handle<Map> transitioned_map() { return transitioned_map_; } + ElementsKind from_kind() { return from_kind_; } + ElementsKind to_kind() { return to_kind_; } virtual void PrintDataTo(StringStream* stream); @@ -4797,18 +5803,17 @@ class HTransitionElementsKind: public HTemplateInstruction<1> { private: Handle<Map> original_map_; Handle<Map> transitioned_map_; + ElementsKind from_kind_; + ElementsKind to_kind_; }; class HStringAdd: public HBinaryOperation { public: - HStringAdd(HValue* context, HValue* left, HValue* right) - : HBinaryOperation(context, left, right) { - set_representation(Representation::Tagged()); - SetFlag(kUseGVN); - SetGVNFlag(kDependsOnMaps); - SetGVNFlag(kChangesNewSpacePromotion); - } + static HInstruction* New(Zone* zone, + HValue* context, + HValue* left, + HValue* right); virtual Representation RequiredInputRepresentation(int index) { return Representation::Tagged(); @@ -4823,8 +5828,17 @@ class HStringAdd: public HBinaryOperation { protected: virtual bool DataEquals(HValue* other) { return true; } + + private: + HStringAdd(HValue* context, HValue* left, HValue* right) + : HBinaryOperation(context, left, right) { + set_representation(Representation::Tagged()); + SetFlag(kUseGVN); + SetGVNFlag(kDependsOnMaps); + SetGVNFlag(kChangesNewSpacePromotion); + } + // TODO(svenpanne) Might be safe, but leave it out until we know for sure. - // private: // virtual bool IsDeletable() const { return true; } }; @@ -4869,13 +5883,9 @@ class HStringCharCodeAt: public HTemplateInstruction<3> { class HStringCharFromCode: public HTemplateInstruction<2> { public: - HStringCharFromCode(HValue* context, HValue* char_code) { - SetOperandAt(0, context); - SetOperandAt(1, char_code); - set_representation(Representation::Tagged()); - SetFlag(kUseGVN); - SetGVNFlag(kChangesNewSpacePromotion); - } + static HInstruction* New(Zone* zone, + HValue* context, + HValue* char_code); virtual Representation RequiredInputRepresentation(int index) { return index == 0 @@ -4891,19 +5901,23 @@ class HStringCharFromCode: public HTemplateInstruction<2> { DECLARE_CONCRETE_INSTRUCTION(StringCharFromCode) + private: + HStringCharFromCode(HValue* context, HValue* char_code) { + SetOperandAt(0, context); + SetOperandAt(1, char_code); + set_representation(Representation::Tagged()); + SetFlag(kUseGVN); + SetGVNFlag(kChangesNewSpacePromotion); + } + // TODO(svenpanne) Might be safe, but leave it out until we know for sure. - // private: - // virtual bool IsDeletable() const { return true; } + // virtual bool IsDeletable() const { return true; } }; class HStringLength: public HUnaryOperation { public: - explicit HStringLength(HValue* string) : HUnaryOperation(string) { - set_representation(Representation::Tagged()); - SetFlag(kUseGVN); - SetGVNFlag(kDependsOnMaps); - } + static HInstruction* New(Zone* zone, HValue* string); virtual Representation RequiredInputRepresentation(int index) { return Representation::Tagged(); @@ -4924,56 +5938,42 @@ class HStringLength: public HUnaryOperation { } private: - virtual bool IsDeletable() const { return true; } -}; - - -class HAllocateObject: public HTemplateInstruction<1> { - public: - HAllocateObject(HValue* context, Handle<JSFunction> constructor) - : constructor_(constructor) { - SetOperandAt(0, context); + explicit HStringLength(HValue* string) : HUnaryOperation(string) { set_representation(Representation::Tagged()); - SetGVNFlag(kChangesNewSpacePromotion); - } - - // Maximum instance size for which allocations will be inlined. - static const int kMaxSize = 64 * kPointerSize; - - HValue* context() { return OperandAt(0); } - Handle<JSFunction> constructor() { return constructor_; } - - virtual Representation RequiredInputRepresentation(int index) { - return Representation::Tagged(); + SetFlag(kUseGVN); + SetGVNFlag(kDependsOnMaps); } - virtual HType CalculateInferredType(); - - DECLARE_CONCRETE_INSTRUCTION(AllocateObject) - - private: - // TODO(svenpanne) Might be safe, but leave it out until we know for sure. - // virtual bool IsDeletable() const { return true; } - Handle<JSFunction> constructor_; + virtual bool IsDeletable() const { return true; } }; template <int V> class HMaterializedLiteral: public HTemplateInstruction<V> { public: + HMaterializedLiteral<V>(int index, int depth, AllocationSiteMode mode) + : literal_index_(index), depth_(depth), allocation_site_mode_(mode) { + this->set_representation(Representation::Tagged()); + } + HMaterializedLiteral<V>(int index, int depth) - : literal_index_(index), depth_(depth) { + : literal_index_(index), depth_(depth), + allocation_site_mode_(DONT_TRACK_ALLOCATION_SITE) { this->set_representation(Representation::Tagged()); } int literal_index() const { return literal_index_; } int depth() const { return depth_; } + AllocationSiteMode allocation_site_mode() const { + return allocation_site_mode_; + } private: virtual bool IsDeletable() const { return true; } int literal_index_; int depth_; + AllocationSiteMode allocation_site_mode_; }; @@ -4983,8 +5983,9 @@ class HFastLiteral: public HMaterializedLiteral<1> { Handle<JSObject> boilerplate, int total_size, int literal_index, - int depth) - : HMaterializedLiteral<1>(literal_index, depth), + int depth, + AllocationSiteMode mode) + : HMaterializedLiteral<1>(literal_index, depth, mode), boilerplate_(boilerplate), total_size_(total_size) { SetOperandAt(0, context); @@ -4999,10 +6000,12 @@ class HFastLiteral: public HMaterializedLiteral<1> { HValue* context() { return OperandAt(0); } Handle<JSObject> boilerplate() const { return boilerplate_; } int total_size() const { return total_size_; } - virtual Representation RequiredInputRepresentation(int index) { return Representation::Tagged(); } + virtual Handle<Map> GetMonomorphicJSObjectMap() { + return Handle<Map>(boilerplate()->map()); + } virtual HType CalculateInferredType(); DECLARE_CONCRETE_INSTRUCTION(FastLiteral) @@ -5019,8 +6022,9 @@ class HArrayLiteral: public HMaterializedLiteral<1> { Handle<HeapObject> boilerplate_object, int length, int literal_index, - int depth) - : HMaterializedLiteral<1>(literal_index, depth), + int depth, + AllocationSiteMode mode) + : HMaterializedLiteral<1>(literal_index, depth, mode), length_(length), boilerplate_object_(boilerplate_object) { SetOperandAt(0, context); @@ -5036,7 +6040,6 @@ class HArrayLiteral: public HMaterializedLiteral<1> { } Handle<HeapObject> boilerplate_object() const { return boilerplate_object_; } int length() const { return length_; } - bool IsCopyOnWrite() const; virtual Representation RequiredInputRepresentation(int index) { @@ -5165,7 +6168,6 @@ class HTypeof: public HTemplateInstruction<2> { HValue* context() { return OperandAt(0); } HValue* value() { return OperandAt(1); } - virtual HValue* Canonicalize(); virtual void PrintDataTo(StringStream* stream); virtual Representation RequiredInputRepresentation(int index) { @@ -5179,6 +6181,22 @@ class HTypeof: public HTemplateInstruction<2> { }; +class HTrapAllocationMemento : public HTemplateInstruction<1> { + public: + explicit HTrapAllocationMemento(HValue* obj) { + SetOperandAt(0, obj); + } + + virtual Representation RequiredInputRepresentation(int index) { + return Representation::Tagged(); + } + + HValue* object() { return OperandAt(0); } + + DECLARE_CONCRETE_INSTRUCTION(TrapAllocationMemento) +}; + + class HToFastProperties: public HUnaryOperation { public: explicit HToFastProperties(HValue* value) : HUnaryOperation(value) { @@ -5237,6 +6255,33 @@ class HDateField: public HUnaryOperation { }; +class HSeqStringSetChar: public HTemplateInstruction<3> { + public: + HSeqStringSetChar(String::Encoding encoding, + HValue* string, + HValue* index, + HValue* value) : encoding_(encoding) { + SetOperandAt(0, string); + SetOperandAt(1, index); + SetOperandAt(2, value); + } + + String::Encoding encoding() { return encoding_; } + HValue* string() { return OperandAt(0); } + HValue* index() { return OperandAt(1); } + HValue* value() { return OperandAt(2); } + + virtual Representation RequiredInputRepresentation(int index) { + return Representation::Tagged(); + } + + DECLARE_CONCRETE_INSTRUCTION(SeqStringSetChar) + + private: + String::Encoding encoding_; +}; + + class HDeleteProperty: public HBinaryOperation { public: HDeleteProperty(HValue* context, HValue* obj, HValue* key) diff --git a/deps/v8/src/hydrogen.cc b/deps/v8/src/hydrogen.cc index 8393e51f9e..14feb5f132 100644 --- a/deps/v8/src/hydrogen.cc +++ b/deps/v8/src/hydrogen.cc @@ -71,7 +71,13 @@ HBasicBlock::HBasicBlock(HGraph* graph) parent_loop_header_(NULL), is_inline_return_target_(false), is_deoptimizing_(false), - dominates_loop_successors_(false) { } + dominates_loop_successors_(false), + is_osr_entry_(false) { } + + +Isolate* HBasicBlock::isolate() const { + return graph_->isolate(); +} void HBasicBlock::AttachLoopInformation() { @@ -133,21 +139,30 @@ HDeoptimize* HBasicBlock::CreateDeoptimize( } -HSimulate* HBasicBlock::CreateSimulate(BailoutId ast_id) { +HSimulate* HBasicBlock::CreateSimulate(BailoutId ast_id, + RemovableSimulate removable) { ASSERT(HasEnvironment()); HEnvironment* environment = last_environment(); ASSERT(ast_id.IsNone() || + ast_id == BailoutId::StubEntry() || environment->closure()->shared()->VerifyBailoutId(ast_id)); int push_count = environment->push_count(); int pop_count = environment->pop_count(); - HSimulate* instr = new(zone()) HSimulate(ast_id, pop_count, zone()); - for (int i = push_count - 1; i >= 0; --i) { + HSimulate* instr = + new(zone()) HSimulate(ast_id, pop_count, zone(), removable); + // Order of pushed values: newest (top of stack) first. This allows + // HSimulate::MergeInto() to easily append additional pushed values + // that are older (from further down the stack). + for (int i = 0; i < push_count; ++i) { instr->AddPushedValue(environment->ExpressionStackAt(i)); } - for (int i = 0; i < environment->assigned_variables()->length(); ++i) { - int index = environment->assigned_variables()->at(i); + for (GrowableBitVector::Iterator it(environment->assigned_variables(), + zone()); + !it.Done(); + it.Advance()) { + int index = it.Current(); instr->AddAssignedValue(index, environment->Lookup(index)); } environment->ClearHistory(); @@ -212,8 +227,9 @@ void HBasicBlock::SetJoinId(BailoutId ast_id) { HSimulate* simulate = HSimulate::cast(predecessor->end()->previous()); // We only need to verify the ID once. ASSERT(i != 0 || - predecessor->last_environment()->closure()->shared() - ->VerifyBailoutId(ast_id)); + (predecessor->last_environment()->closure().is_null() || + predecessor->last_environment()->closure()->shared() + ->VerifyBailoutId(ast_id))); simulate->set_ast_id(ast_id); } } @@ -488,6 +504,10 @@ class ReachabilityAnalyzer BASE_EMBEDDED { void HGraph::Verify(bool do_full_verify) const { + // Allow dereferencing for debug mode verification. + Heap::RelocationLock(isolate()->heap()); + HandleDereferenceGuard allow_handle_deref(isolate(), + HandleDereferenceGuard::ALLOW); for (int i = 0; i < blocks_.length(); i++) { HBasicBlock* block = blocks_.at(i); @@ -567,18 +587,6 @@ void HGraph::Verify(bool do_full_verify) const { #endif -HConstant* HGraph::GetConstant(SetOncePointer<HConstant>* pointer, - Handle<Object> value) { - if (!pointer->is_set()) { - HConstant* constant = new(zone()) HConstant(value, - Representation::Tagged()); - constant->InsertAfter(GetConstantUndefined()); - pointer->set(constant); - } - return pointer->get(); -} - - HConstant* HGraph::GetConstantInt32(SetOncePointer<HConstant>* pointer, int32_t value) { if (!pointer->is_set()) { @@ -591,6 +599,11 @@ HConstant* HGraph::GetConstantInt32(SetOncePointer<HConstant>* pointer, } +HConstant* HGraph::GetConstant0() { + return GetConstantInt32(&constant_0_, 0); +} + + HConstant* HGraph::GetConstant1() { return GetConstantInt32(&constant_1_, 1); } @@ -601,48 +614,579 @@ HConstant* HGraph::GetConstantMinus1() { } -HConstant* HGraph::GetConstantTrue() { - return GetConstant(&constant_true_, isolate()->factory()->true_value()); +#define DEFINE_GET_CONSTANT(Name, name, htype, boolean_value) \ +HConstant* HGraph::GetConstant##Name() { \ + if (!constant_##name##_.is_set()) { \ + HConstant* constant = new(zone()) HConstant( \ + isolate()->factory()->name##_value(), \ + Representation::Tagged(), \ + htype, \ + false, \ + boolean_value); \ + constant->InsertAfter(GetConstantUndefined()); \ + constant_##name##_.set(constant); \ + } \ + return constant_##name##_.get(); \ +} + + +DEFINE_GET_CONSTANT(True, true, HType::Boolean(), true) +DEFINE_GET_CONSTANT(False, false, HType::Boolean(), false) +DEFINE_GET_CONSTANT(Hole, the_hole, HType::Tagged(), false) + +#undef DEFINE_GET_CONSTANT + + +HGraphBuilder::CheckBuilder::CheckBuilder(HGraphBuilder* builder, BailoutId id) + : builder_(builder), + finished_(false), + id_(id) { + HEnvironment* env = builder->environment(); + failure_block_ = builder->CreateBasicBlock(env->Copy()); + merge_block_ = builder->CreateBasicBlock(env->Copy()); +} + + +void HGraphBuilder::CheckBuilder::CheckNotUndefined(HValue* value) { + HEnvironment* env = builder_->environment(); + HIsNilAndBranch* compare = + new(zone()) HIsNilAndBranch(value, kStrictEquality, kUndefinedValue); + HBasicBlock* success_block = builder_->CreateBasicBlock(env->Copy()); + HBasicBlock* failure_block = builder_->CreateBasicBlock(env->Copy()); + compare->SetSuccessorAt(0, failure_block); + compare->SetSuccessorAt(1, success_block); + failure_block->Goto(failure_block_); + builder_->current_block()->Finish(compare); + builder_->set_current_block(success_block); +} + + +void HGraphBuilder::CheckBuilder::CheckIntegerEq(HValue* left, HValue* right) { + HEnvironment* env = builder_->environment(); + HCompareIDAndBranch* compare = + new(zone()) HCompareIDAndBranch(left, right, Token::EQ); + compare->AssumeRepresentation(Representation::Integer32()); + HBasicBlock* success_block = builder_->CreateBasicBlock(env->Copy()); + HBasicBlock* failure_block = builder_->CreateBasicBlock(env->Copy()); + compare->SetSuccessorAt(0, success_block); + compare->SetSuccessorAt(1, failure_block); + failure_block->Goto(failure_block_); + builder_->current_block()->Finish(compare); + builder_->set_current_block(success_block); +} + + +void HGraphBuilder::CheckBuilder::End() { + ASSERT(!finished_); + builder_->current_block()->Goto(merge_block_); + failure_block_->FinishExitWithDeoptimization(HDeoptimize::kUseAll); + failure_block_->SetJoinId(id_); + builder_->set_current_block(merge_block_); + merge_block_->SetJoinId(id_); + finished_ = true; +} + + +HConstant* HGraph::GetInvalidContext() { + return GetConstantInt32(&constant_invalid_context_, 0xFFFFC0C7); +} + + +HGraphBuilder::IfBuilder::IfBuilder(HGraphBuilder* builder, BailoutId id) + : builder_(builder), + finished_(false), + id_(id) { + HEnvironment* env = builder->environment(); + first_true_block_ = builder->CreateBasicBlock(env->Copy()); + last_true_block_ = NULL; + first_false_block_ = builder->CreateBasicBlock(env->Copy()); +} + + +HInstruction* HGraphBuilder::IfBuilder::BeginTrue( + HValue* left, + HValue* right, + Token::Value token, + Representation input_representation) { + HCompareIDAndBranch* compare = + new(zone()) HCompareIDAndBranch(left, right, token); + compare->set_observed_input_representation(input_representation, + input_representation); + compare->ChangeRepresentation(input_representation); + compare->SetSuccessorAt(0, first_true_block_); + compare->SetSuccessorAt(1, first_false_block_); + builder_->current_block()->Finish(compare); + builder_->set_current_block(first_true_block_); + return compare; +} + + +void HGraphBuilder::IfBuilder::BeginFalse() { + last_true_block_ = builder_->current_block(); + ASSERT(!last_true_block_->IsFinished()); + builder_->set_current_block(first_false_block_); +} + + +void HGraphBuilder::IfBuilder::End() { + ASSERT(!finished_); + ASSERT(!last_true_block_->IsFinished()); + HBasicBlock* last_false_block = builder_->current_block(); + ASSERT(!last_false_block->IsFinished()); + HEnvironment* merge_env = + last_true_block_->last_environment()->Copy(); + merge_block_ = builder_->CreateBasicBlock(merge_env); + last_true_block_->Goto(merge_block_); + last_false_block->Goto(merge_block_); + merge_block_->SetJoinId(id_); + builder_->set_current_block(merge_block_); + finished_ = true; +} + + +HGraphBuilder::LoopBuilder::LoopBuilder(HGraphBuilder* builder, + HValue* context, + LoopBuilder::Direction direction, + BailoutId id) + : builder_(builder), + context_(context), + direction_(direction), + id_(id), + finished_(false) { + header_block_ = builder->CreateLoopHeaderBlock(); + body_block_ = NULL; + exit_block_ = NULL; +} + + +HValue* HGraphBuilder::LoopBuilder::BeginBody( + HValue* initial, + HValue* terminating, + Token::Value token, + Representation input_representation) { + HEnvironment* env = builder_->environment(); + phi_ = new(zone()) HPhi(env->values()->length(), zone()); + header_block_->AddPhi(phi_); + phi_->AddInput(initial); + phi_->ChangeRepresentation(Representation::Integer32()); + env->Push(initial); + builder_->current_block()->Goto(header_block_); + + HEnvironment* body_env = env->Copy(); + HEnvironment* exit_env = env->Copy(); + body_block_ = builder_->CreateBasicBlock(body_env); + exit_block_ = builder_->CreateBasicBlock(exit_env); + // Remove the phi from the expression stack + body_env->Pop(); + + builder_->set_current_block(header_block_); + HCompareIDAndBranch* compare = + new(zone()) HCompareIDAndBranch(phi_, terminating, token); + compare->set_observed_input_representation(input_representation, + input_representation); + compare->ChangeRepresentation(input_representation); + compare->SetSuccessorAt(0, body_block_); + compare->SetSuccessorAt(1, exit_block_); + builder_->current_block()->Finish(compare); + + builder_->set_current_block(body_block_); + if (direction_ == kPreIncrement || direction_ == kPreDecrement) { + HValue* one = builder_->graph()->GetConstant1(); + if (direction_ == kPreIncrement) { + increment_ = HAdd::New(zone(), context_, phi_, one); + } else { + increment_ = HSub::New(zone(), context_, phi_, one); + } + increment_->ClearFlag(HValue::kCanOverflow); + increment_->ChangeRepresentation(Representation::Integer32()); + builder_->AddInstruction(increment_); + return increment_; + } else { + return phi_; + } +} + + +void HGraphBuilder::LoopBuilder::EndBody() { + ASSERT(!finished_); + + if (direction_ == kPostIncrement || direction_ == kPostDecrement) { + HValue* one = builder_->graph()->GetConstant1(); + if (direction_ == kPostIncrement) { + increment_ = HAdd::New(zone(), context_, phi_, one); + } else { + increment_ = HSub::New(zone(), context_, phi_, one); + } + increment_->ClearFlag(HValue::kCanOverflow); + increment_->ChangeRepresentation(Representation::Integer32()); + builder_->AddInstruction(increment_); + } + + // Push the new increment value on the expression stack to merge into the phi. + builder_->environment()->Push(increment_); + builder_->current_block()->Goto(header_block_); + header_block_->loop_information()->RegisterBackEdge(body_block_); + header_block_->SetJoinId(id_); + + builder_->set_current_block(exit_block_); + // Pop the phi from the expression stack + builder_->environment()->Pop(); + finished_ = true; } -HConstant* HGraph::GetConstantFalse() { - return GetConstant(&constant_false_, isolate()->factory()->false_value()); +HGraph* HGraphBuilder::CreateGraph() { + graph_ = new(zone()) HGraph(info_); + if (FLAG_hydrogen_stats) isolate()->GetHStatistics()->Initialize(info_); + HPhase phase("H_Block building", isolate()); + set_current_block(graph()->entry_block()); + if (!BuildGraph()) return NULL; + return graph_; } -HConstant* HGraph::GetConstantHole() { - return GetConstant(&constant_hole_, isolate()->factory()->the_hole_value()); +HInstruction* HGraphBuilder::AddInstruction(HInstruction* instr) { + ASSERT(current_block() != NULL); + current_block()->AddInstruction(instr); + return instr; } -HGraphBuilder::HGraphBuilder(CompilationInfo* info, - TypeFeedbackOracle* oracle) - : function_state_(NULL), +void HGraphBuilder::AddSimulate(BailoutId id, + RemovableSimulate removable) { + ASSERT(current_block() != NULL); + current_block()->AddSimulate(id, removable); +} + + +HBoundsCheck* HGraphBuilder::AddBoundsCheck(HValue* index, + HValue* length, + BoundsCheckKeyMode key_mode, + Representation r) { + if (!index->type().IsSmi()) { + index = new(graph()->zone()) HCheckSmiOrInt32(index); + AddInstruction(HCheckSmiOrInt32::cast(index)); + } + if (!length->type().IsSmi()) { + length = new(graph()->zone()) HCheckSmiOrInt32(length); + AddInstruction(HCheckSmiOrInt32::cast(length)); + } + HBoundsCheck* result = new(graph()->zone()) HBoundsCheck( + index, length, key_mode, r); + AddInstruction(result); + return result; +} + + +HReturn* HGraphBuilder::AddReturn(HValue* value) { + HValue* context = environment()->LookupContext(); + int num_parameters = graph()->info()->num_parameters(); + HValue* params = AddInstruction(new(graph()->zone()) + HConstant(num_parameters, Representation::Integer32())); + HReturn* return_instruction = new(graph()->zone()) + HReturn(value, context, params); + current_block()->FinishExit(return_instruction); + return return_instruction; +} + + +HBasicBlock* HGraphBuilder::CreateBasicBlock(HEnvironment* env) { + HBasicBlock* b = graph()->CreateBasicBlock(); + b->SetInitialEnvironment(env); + return b; +} + + +HBasicBlock* HGraphBuilder::CreateLoopHeaderBlock() { + HBasicBlock* header = graph()->CreateBasicBlock(); + HEnvironment* entry_env = environment()->CopyAsLoopHeader(header); + header->SetInitialEnvironment(entry_env); + header->AttachLoopInformation(); + return header; +} + + +HInstruction* HGraphBuilder::BuildExternalArrayElementAccess( + HValue* external_elements, + HValue* checked_key, + HValue* val, + HValue* dependency, + ElementsKind elements_kind, + bool is_store) { + Zone* zone = this->zone(); + if (is_store) { + ASSERT(val != NULL); + switch (elements_kind) { + case EXTERNAL_PIXEL_ELEMENTS: { + val = AddInstruction(new(zone) HClampToUint8(val)); + break; + } + case EXTERNAL_BYTE_ELEMENTS: + case EXTERNAL_UNSIGNED_BYTE_ELEMENTS: + case EXTERNAL_SHORT_ELEMENTS: + case EXTERNAL_UNSIGNED_SHORT_ELEMENTS: + case EXTERNAL_INT_ELEMENTS: + case EXTERNAL_UNSIGNED_INT_ELEMENTS: { + break; + } + case EXTERNAL_FLOAT_ELEMENTS: + case EXTERNAL_DOUBLE_ELEMENTS: + break; + case FAST_SMI_ELEMENTS: + case FAST_ELEMENTS: + case FAST_DOUBLE_ELEMENTS: + case FAST_HOLEY_SMI_ELEMENTS: + case FAST_HOLEY_ELEMENTS: + case FAST_HOLEY_DOUBLE_ELEMENTS: + case DICTIONARY_ELEMENTS: + case NON_STRICT_ARGUMENTS_ELEMENTS: + UNREACHABLE(); + break; + } + return new(zone) HStoreKeyed(external_elements, checked_key, + val, elements_kind); + } else { + ASSERT(val == NULL); + HLoadKeyed* load = + new(zone) HLoadKeyed( + external_elements, checked_key, dependency, elements_kind); + if (FLAG_opt_safe_uint32_operations && + elements_kind == EXTERNAL_UNSIGNED_INT_ELEMENTS) { + graph()->RecordUint32Instruction(load); + } + return load; + } +} + + +HInstruction* HGraphBuilder::BuildFastElementAccess( + HValue* elements, + HValue* checked_key, + HValue* val, + HValue* load_dependency, + ElementsKind elements_kind, + bool is_store) { + Zone* zone = this->zone(); + if (is_store) { + ASSERT(val != NULL); + switch (elements_kind) { + case FAST_SMI_ELEMENTS: + case FAST_HOLEY_SMI_ELEMENTS: + // Smi-only arrays need a smi check. + AddInstruction(new(zone) HCheckSmi(val)); + // Fall through. + case FAST_ELEMENTS: + case FAST_HOLEY_ELEMENTS: + case FAST_DOUBLE_ELEMENTS: + case FAST_HOLEY_DOUBLE_ELEMENTS: + return new(zone) HStoreKeyed(elements, checked_key, val, elements_kind); + default: + UNREACHABLE(); + return NULL; + } + } + // It's an element load (!is_store). + return new(zone) HLoadKeyed(elements, + checked_key, + load_dependency, + elements_kind); +} + + +HInstruction* HGraphBuilder::BuildUncheckedMonomorphicElementAccess( + HValue* object, + HValue* key, + HValue* val, + HCheckMaps* mapcheck, + bool is_js_array, + ElementsKind elements_kind, + bool is_store, + Representation checked_index_representation) { + Zone* zone = this->zone(); + // No GVNFlag is necessary for ElementsKind if there is an explicit dependency + // on a HElementsTransition instruction. The flag can also be removed if the + // map to check has FAST_HOLEY_ELEMENTS, since there can be no further + // ElementsKind transitions. Finally, the dependency can be removed for stores + // for FAST_ELEMENTS, since a transition to HOLEY elements won't change the + // generated store code. + if ((elements_kind == FAST_HOLEY_ELEMENTS) || + (elements_kind == FAST_ELEMENTS && is_store)) { + if (mapcheck != NULL) { + mapcheck->ClearGVNFlag(kDependsOnElementsKind); + } + } + bool fast_smi_only_elements = IsFastSmiElementsKind(elements_kind); + bool fast_elements = IsFastObjectElementsKind(elements_kind); + HInstruction* elements = + AddInstruction(new(zone) HLoadElements(object, mapcheck)); + if (is_store && (fast_elements || fast_smi_only_elements)) { + HCheckMaps* check_cow_map = new(zone) HCheckMaps( + elements, isolate()->factory()->fixed_array_map(), zone); + check_cow_map->ClearGVNFlag(kDependsOnElementsKind); + AddInstruction(check_cow_map); + } + HInstruction* length = NULL; + HInstruction* checked_key = NULL; + if (IsExternalArrayElementsKind(elements_kind)) { + length = AddInstruction(new(zone) HFixedArrayBaseLength(elements)); + checked_key = AddBoundsCheck( + key, length, ALLOW_SMI_KEY, checked_index_representation); + HLoadExternalArrayPointer* external_elements = + new(zone) HLoadExternalArrayPointer(elements); + AddInstruction(external_elements); + return BuildExternalArrayElementAccess( + external_elements, checked_key, val, mapcheck, + elements_kind, is_store); + } + ASSERT(fast_smi_only_elements || + fast_elements || + IsFastDoubleElementsKind(elements_kind)); + if (is_js_array) { + length = AddInstruction(new(zone) HJSArrayLength(object, mapcheck, + HType::Smi())); + } else { + length = AddInstruction(new(zone) HFixedArrayBaseLength(elements)); + } + checked_key = AddBoundsCheck( + key, length, ALLOW_SMI_KEY, checked_index_representation); + return BuildFastElementAccess(elements, checked_key, val, mapcheck, + elements_kind, is_store); +} + + +HValue* HGraphBuilder::BuildAllocateElements(HContext* context, + ElementsKind kind, + HValue* capacity) { + Zone* zone = this->zone(); + + int elements_size = IsFastDoubleElementsKind(kind) + ? kDoubleSize : kPointerSize; + HConstant* elements_size_value = + new(zone) HConstant(elements_size, Representation::Integer32()); + AddInstruction(elements_size_value); + HValue* mul = AddInstruction( + HMul::New(zone, context, capacity, elements_size_value)); + mul->ChangeRepresentation(Representation::Integer32()); + mul->ClearFlag(HValue::kCanOverflow); + + HConstant* header_size = + new(zone) HConstant(FixedArray::kHeaderSize, Representation::Integer32()); + AddInstruction(header_size); + HValue* total_size = AddInstruction( + HAdd::New(zone, context, mul, header_size)); + total_size->ChangeRepresentation(Representation::Integer32()); + total_size->ClearFlag(HValue::kCanOverflow); + + HAllocate::Flags flags = HAllocate::CAN_ALLOCATE_IN_NEW_SPACE; + // TODO(hpayer): add support for old data space + if (FLAG_pretenure_literals && !IsFastDoubleElementsKind(kind)) { + flags = static_cast<HAllocate::Flags>( + flags | HAllocate::CAN_ALLOCATE_IN_OLD_POINTER_SPACE); + } + if (IsFastDoubleElementsKind(kind)) { + flags = static_cast<HAllocate::Flags>( + flags | HAllocate::ALLOCATE_DOUBLE_ALIGNED); + } + + HValue* elements = + AddInstruction(new(zone) HAllocate(context, total_size, + HType::JSArray(), flags)); + + Factory* factory = isolate()->factory(); + Handle<Map> map = IsFastDoubleElementsKind(kind) + ? factory->fixed_double_array_map() + : factory->fixed_array_map(); + BuildStoreMap(elements, map, BailoutId::StubEntry()); + + Handle<String> fixed_array_length_field_name = factory->length_field_string(); + HInstruction* store_length = + new(zone) HStoreNamedField(elements, fixed_array_length_field_name, + capacity, true, FixedArray::kLengthOffset); + AddInstruction(store_length); + AddSimulate(BailoutId::StubEntry(), FIXED_SIMULATE); + + return elements; +} + + +HInstruction* HGraphBuilder::BuildStoreMap(HValue* object, + HValue* map, + BailoutId id) { + Zone* zone = this->zone(); + Factory* factory = isolate()->factory(); + Handle<String> map_field_name = factory->map_field_string(); + HInstruction* store_map = + new(zone) HStoreNamedField(object, map_field_name, map, + true, JSObject::kMapOffset); + store_map->SetGVNFlag(kChangesMaps); + AddInstruction(store_map); + AddSimulate(id, FIXED_SIMULATE); + return store_map; +} + + +HInstruction* HGraphBuilder::BuildStoreMap(HValue* object, + Handle<Map> map, + BailoutId id) { + Zone* zone = this->zone(); + HValue* map_constant = + AddInstruction(new(zone) HConstant(map, Representation::Tagged())); + return BuildStoreMap(object, map_constant, id); +} + + +void HGraphBuilder::BuildCopyElements(HContext* context, + HValue* from_elements, + ElementsKind from_elements_kind, + HValue* to_elements, + ElementsKind to_elements_kind, + HValue* length) { + LoopBuilder builder(this, context, LoopBuilder::kPostIncrement, + BailoutId::StubEntry()); + + HValue* key = builder.BeginBody(graph()->GetConstant0(), + length, Token::LT); + + HValue* element = + AddInstruction(new(zone()) HLoadKeyed(from_elements, key, NULL, + from_elements_kind, + ALLOW_RETURN_HOLE)); + + AddInstruction(new(zone()) HStoreKeyed(to_elements, key, element, + to_elements_kind)); + AddSimulate(BailoutId::StubEntry(), REMOVABLE_SIMULATE); + + builder.EndBody(); +} + + +HOptimizedGraphBuilder::HOptimizedGraphBuilder(CompilationInfo* info, + TypeFeedbackOracle* oracle) + : HGraphBuilder(info), + function_state_(NULL), initial_function_state_(this, info, oracle, NORMAL_RETURN), ast_context_(NULL), break_scope_(NULL), - graph_(NULL), - current_block_(NULL), inlined_count_(0), globals_(10, info->zone()), - zone_(info->zone()), inline_bailout_(false) { // This is not initialized in the initializer list because the // constructor for the initial state relies on function_state_ == NULL // to know it's the initial state. function_state_= &initial_function_state_; + InitializeAstVisitor(); } -HBasicBlock* HGraphBuilder::CreateJoin(HBasicBlock* first, - HBasicBlock* second, - BailoutId join_id) { + +HBasicBlock* HOptimizedGraphBuilder::CreateJoin(HBasicBlock* first, + HBasicBlock* second, + BailoutId join_id) { if (first == NULL) { return second; } else if (second == NULL) { return first; } else { - HBasicBlock* join_block = graph_->CreateBasicBlock(); + HBasicBlock* join_block = graph()->CreateBasicBlock(); first->Goto(join_block); second->Goto(join_block); join_block->SetJoinId(join_id); @@ -651,9 +1195,9 @@ HBasicBlock* HGraphBuilder::CreateJoin(HBasicBlock* first, } -HBasicBlock* HGraphBuilder::JoinContinue(IterationStatement* statement, - HBasicBlock* exit_block, - HBasicBlock* continue_block) { +HBasicBlock* HOptimizedGraphBuilder::JoinContinue(IterationStatement* statement, + HBasicBlock* exit_block, + HBasicBlock* continue_block) { if (continue_block != NULL) { if (exit_block != NULL) exit_block->Goto(continue_block); continue_block->SetJoinId(statement->ContinueId()); @@ -663,11 +1207,11 @@ HBasicBlock* HGraphBuilder::JoinContinue(IterationStatement* statement, } -HBasicBlock* HGraphBuilder::CreateLoop(IterationStatement* statement, - HBasicBlock* loop_entry, - HBasicBlock* body_exit, - HBasicBlock* loop_successor, - HBasicBlock* break_block) { +HBasicBlock* HOptimizedGraphBuilder::CreateLoop(IterationStatement* statement, + HBasicBlock* loop_entry, + HBasicBlock* body_exit, + HBasicBlock* loop_successor, + HBasicBlock* break_block) { if (body_exit != NULL) body_exit->Goto(loop_entry); loop_entry->PostProcessLoopHeader(statement); if (break_block != NULL) { @@ -697,9 +1241,18 @@ HGraph::HGraph(CompilationInfo* info) zone_(info->zone()), is_recursive_(false), use_optimistic_licm_(false), + has_soft_deoptimize_(false), type_change_checksum_(0) { - start_environment_ = - new(zone_) HEnvironment(NULL, info->scope(), info->closure(), zone_); + if (info->IsStub()) { + HydrogenCodeStub* stub = info->code_stub(); + CodeStubInterfaceDescriptor* descriptor = + stub->GetInterfaceDescriptor(isolate_); + start_environment_ = + new(zone_) HEnvironment(zone_, descriptor->environment_length()); + } else { + start_environment_ = + new(zone_) HEnvironment(NULL, info->scope(), info->closure(), zone_); + } start_environment_->set_ast_id(BailoutId::FunctionEntry()); entry_block_ = CreateBasicBlock(); entry_block_->SetInitialEnvironment(start_environment_); @@ -1022,7 +1575,7 @@ class PostorderProcessor : public ZoneObject { void HGraph::OrderBlocks() { - HPhase phase("H_Block ordering"); + HPhase phase("H_Block ordering", isolate()); BitVector visited(blocks_.length(), zone()); ZoneList<HBasicBlock*> reverse_result(8, zone()); @@ -1059,13 +1612,18 @@ void HGraph::AssignDominators() { } } + // Mark all blocks that are dominated by an unconditional soft deoptimize to // prevent code motion across those blocks. void HGraph::PropagateDeoptimizingMark() { HPhase phase("H_Propagate deoptimizing mark", this); + // Skip this phase if there is nothing to be done anyway. + if (!has_soft_deoptimize()) return; MarkAsDeoptimizingRecursively(entry_block()); + NullifyUnreachableInstructions(); } + void HGraph::MarkAsDeoptimizingRecursively(HBasicBlock* block) { for (int i = 0; i < block->dominated_blocks()->length(); ++i) { HBasicBlock* dominated = block->dominated_blocks()->at(i); @@ -1074,37 +1632,112 @@ void HGraph::MarkAsDeoptimizingRecursively(HBasicBlock* block) { } } -void HGraph::EliminateRedundantPhis() { - HPhase phase("H_Redundant phi elimination", this); - // Worklist of phis that can potentially be eliminated. Initialized with - // all phi nodes. When elimination of a phi node modifies another phi node - // the modified phi node is added to the worklist. - ZoneList<HPhi*> worklist(blocks_.length(), zone()); - for (int i = 0; i < blocks_.length(); ++i) { - worklist.AddAll(*blocks_[i]->phis(), zone()); +void HGraph::NullifyUnreachableInstructions() { + if (!FLAG_unreachable_code_elimination) return; + int block_count = blocks_.length(); + for (int i = 0; i < block_count; ++i) { + HBasicBlock* block = blocks_.at(i); + bool nullify = false; + const ZoneList<HBasicBlock*>* predecessors = block->predecessors(); + int predecessors_length = predecessors->length(); + bool all_predecessors_deoptimizing = (predecessors_length > 0); + for (int j = 0; j < predecessors_length; ++j) { + if (!predecessors->at(j)->IsDeoptimizing()) { + all_predecessors_deoptimizing = false; + break; + } + } + if (all_predecessors_deoptimizing) nullify = true; + for (HInstruction* instr = block->first(); instr != NULL; + instr = instr->next()) { + // Leave the basic structure of the graph intact. + if (instr->IsBlockEntry()) continue; + if (instr->IsControlInstruction()) continue; + if (instr->IsSimulate()) continue; + if (instr->IsEnterInlined()) continue; + if (instr->IsLeaveInlined()) continue; + if (nullify) { + HInstruction* last_dummy = NULL; + for (int j = 0; j < instr->OperandCount(); ++j) { + HValue* operand = instr->OperandAt(j); + // Insert an HDummyUse for each operand, unless the operand + // is an HDummyUse itself. If it's even from the same block, + // remember it as a potential replacement for the instruction. + if (operand->IsDummyUse()) { + if (operand->block() == instr->block() && + last_dummy == NULL) { + last_dummy = HInstruction::cast(operand); + } + continue; + } + if (operand->IsControlInstruction()) { + // Inserting a dummy use for a value that's not defined anywhere + // will fail. Some instructions define fake inputs on such + // values as control flow dependencies. + continue; + } + HDummyUse* dummy = new(zone()) HDummyUse(operand); + dummy->InsertBefore(instr); + last_dummy = dummy; + } + if (last_dummy == NULL) last_dummy = GetConstant1(); + instr->DeleteAndReplaceWith(last_dummy); + continue; + } + if (instr->IsSoftDeoptimize()) { + ASSERT(block->IsDeoptimizing()); + nullify = true; + } + } } +} - while (!worklist.is_empty()) { - HPhi* phi = worklist.RemoveLast(); - HBasicBlock* block = phi->block(); - - // Skip phi node if it was already replaced. - if (block == NULL) continue; - // Get replacement value if phi is redundant. - HValue* replacement = phi->GetRedundantReplacement(); +// Replace all phis consisting of a single non-loop operand plus any number of +// loop operands by that single non-loop operand. +void HGraph::EliminateRedundantPhis() { + HPhase phase("H_Redundant phi elimination", this); - if (replacement != NULL) { - // Iterate through the uses and replace them all. - for (HUseIterator it(phi->uses()); !it.Done(); it.Advance()) { - HValue* value = it.value(); - value->SetOperandAt(it.index(), replacement); - if (value->IsPhi()) worklist.Add(HPhi::cast(value), zone()); + // We do a simple fixed point iteration without any work list, because + // machine-generated JavaScript can lead to a very dense Hydrogen graph with + // an enormous work list and will consequently result in OOM. Experiments + // showed that this simple algorithm is good enough, and even e.g. tracking + // the set or range of blocks to consider is not a real improvement. + bool need_another_iteration; + ZoneList<HPhi*> redundant_phis(blocks_.length(), zone()); + do { + need_another_iteration = false; + for (int i = 0; i < blocks_.length(); ++i) { + HBasicBlock* block = blocks_[i]; + for (int j = 0; j < block->phis()->length(); j++) { + HPhi* phi = block->phis()->at(j); + HValue* replacement = phi->GetRedundantReplacement(); + if (replacement != NULL) { + // Remember phi to avoid concurrent modification of the block's phis. + redundant_phis.Add(phi, zone()); + for (HUseIterator it(phi->uses()); !it.Done(); it.Advance()) { + HValue* value = it.value(); + value->SetOperandAt(it.index(), replacement); + need_another_iteration |= value->IsPhi(); + } + } } - block->RemovePhi(phi); + for (int i = 0; i < redundant_phis.length(); i++) { + block->RemovePhi(redundant_phis[i]); + } + redundant_phis.Clear(); + } + } while (need_another_iteration); + +#if DEBUG + // Make sure that we *really* removed all redundant phis. + for (int i = 0; i < blocks_.length(); ++i) { + for (int j = 0; j < blocks_[i]->phis()->length(); j++) { + ASSERT(blocks_[i]->phis()->at(j)->GetRedundantReplacement() == NULL); } } +#endif } @@ -1291,12 +1924,12 @@ void HRangeAnalysis::Analyze(HBasicBlock* block) { void HRangeAnalysis::InferControlFlowRange(HCompareIDAndBranch* test, HBasicBlock* dest) { ASSERT((test->FirstSuccessor() == dest) == (test->SecondSuccessor() != dest)); - if (test->GetInputRepresentation().IsInteger32()) { + if (test->representation().IsInteger32()) { Token::Value op = test->token(); if (test->SecondSuccessor() == dest) { op = Token::NegateCompareOp(op); } - Token::Value inverted_op = Token::InvertCompareOp(op); + Token::Value inverted_op = Token::ReverseCompareOp(op); UpdateControlFlowRange(op, test->left(), test->right()); UpdateControlFlowRange(inverted_op, test->right(), test->left()); } @@ -1989,7 +2622,7 @@ void HGlobalValueNumberer::ProcessLoopBlock( bool HGlobalValueNumberer::AllowCodeMotion() { - return info()->shared_info()->opt_count() + 1 < FLAG_max_opt_count; + return info()->IsStub() || info()->opt_count() + 1 < FLAG_max_opt_count; } @@ -2190,7 +2823,8 @@ void HGlobalValueNumberer::AnalyzeGraph() { map->Add(instr, zone()); } } - if (instr->CheckFlag(HValue::kTrackSideEffectDominators)) { + if (instr->IsLinked() && + instr->CheckFlag(HValue::kTrackSideEffectDominators)) { for (int i = 0; i < kNumberOfTrackedSideEffects; i++) { HValue* other = dominators->at(i); GVNFlag changes_flag = HValue::ChangesFlagFromInt(i); @@ -2239,32 +2873,8 @@ void HGlobalValueNumberer::AnalyzeGraph() { } -class HInferRepresentation BASE_EMBEDDED { - public: - explicit HInferRepresentation(HGraph* graph) - : graph_(graph), - worklist_(8, graph->zone()), - in_worklist_(graph->GetMaximumValueID(), graph->zone()) { } - - void Analyze(); - - private: - Representation TryChange(HValue* current); - void AddToWorklist(HValue* current); - void InferBasedOnInputs(HValue* current); - void AddDependantsToWorklist(HValue* current); - void InferBasedOnUses(HValue* current); - - Zone* zone() const { return graph_->zone(); } - - HGraph* graph_; - ZoneList<HValue*> worklist_; - BitVector in_worklist_; -}; - - void HInferRepresentation::AddToWorklist(HValue* current) { - if (current->representation().IsSpecialization()) return; + if (current->representation().IsTagged()) return; if (!current->CheckFlag(HValue::kFlexibleRepresentation)) return; if (in_worklist_.Contains(current->id())) return; worklist_.Add(current, zone()); @@ -2272,105 +2882,6 @@ void HInferRepresentation::AddToWorklist(HValue* current) { } -// This method tries to specialize the representation type of the value -// given as a parameter. The value is asked to infer its representation type -// based on its inputs. If the inferred type is more specialized, then this -// becomes the new representation type of the node. -void HInferRepresentation::InferBasedOnInputs(HValue* current) { - Representation r = current->representation(); - if (r.IsSpecialization()) return; - ASSERT(current->CheckFlag(HValue::kFlexibleRepresentation)); - Representation inferred = current->InferredRepresentation(); - if (inferred.IsSpecialization()) { - if (FLAG_trace_representation) { - PrintF("Changing #%d representation %s -> %s based on inputs\n", - current->id(), - r.Mnemonic(), - inferred.Mnemonic()); - } - current->ChangeRepresentation(inferred); - AddDependantsToWorklist(current); - } -} - - -void HInferRepresentation::AddDependantsToWorklist(HValue* value) { - for (HUseIterator it(value->uses()); !it.Done(); it.Advance()) { - AddToWorklist(it.value()); - } - for (int i = 0; i < value->OperandCount(); ++i) { - AddToWorklist(value->OperandAt(i)); - } -} - - -// This method calculates whether specializing the representation of the value -// given as the parameter has a benefit in terms of less necessary type -// conversions. If there is a benefit, then the representation of the value is -// specialized. -void HInferRepresentation::InferBasedOnUses(HValue* value) { - Representation r = value->representation(); - if (r.IsSpecialization() || value->HasNoUses()) return; - ASSERT(value->CheckFlag(HValue::kFlexibleRepresentation)); - Representation new_rep = TryChange(value); - if (!new_rep.IsNone()) { - if (!value->representation().Equals(new_rep)) { - if (FLAG_trace_representation) { - PrintF("Changing #%d representation %s -> %s based on uses\n", - value->id(), - r.Mnemonic(), - new_rep.Mnemonic()); - } - value->ChangeRepresentation(new_rep); - AddDependantsToWorklist(value); - } - } -} - - -Representation HInferRepresentation::TryChange(HValue* value) { - // Array of use counts for each representation. - int use_count[Representation::kNumRepresentations] = { 0 }; - - for (HUseIterator it(value->uses()); !it.Done(); it.Advance()) { - HValue* use = it.value(); - Representation rep = use->ObservedInputRepresentation(it.index()); - if (rep.IsNone()) continue; - if (FLAG_trace_representation) { - PrintF("%d %s is used by %d %s as %s\n", - value->id(), - value->Mnemonic(), - use->id(), - use->Mnemonic(), - rep.Mnemonic()); - } - if (use->IsPhi()) HPhi::cast(use)->AddIndirectUsesTo(&use_count[0]); - use_count[rep.kind()] += use->LoopWeight(); - } - int tagged_count = use_count[Representation::kTagged]; - int double_count = use_count[Representation::kDouble]; - int int32_count = use_count[Representation::kInteger32]; - int non_tagged_count = double_count + int32_count; - - // If a non-loop phi has tagged uses, don't convert it to untagged. - if (value->IsPhi() && !value->block()->IsLoopHeader() && tagged_count > 0) { - return Representation::None(); - } - - // Prefer unboxing over boxing, the latter is more expensive. - if (tagged_count > non_tagged_count) return Representation::None(); - - // Prefer Integer32 over Double, if possible. - if (int32_count > 0 && value->IsConvertibleToInteger()) { - return Representation::Integer32(); - } - - if (double_count > 0) return Representation::Double(); - - return Representation::None(); -} - - void HInferRepresentation::Analyze() { HPhase phase("H_Infer representations", graph_); @@ -2421,7 +2932,6 @@ void HInferRepresentation::Analyze() { it.Advance()) { HPhi* phi = phi_list->at(it.Current()); phi->set_is_convertible_to_integer(false); - phi->ResetInteger32Uses(); } } @@ -2457,8 +2967,74 @@ void HInferRepresentation::Analyze() { while (!worklist_.is_empty()) { HValue* current = worklist_.RemoveLast(); in_worklist_.Remove(current->id()); - InferBasedOnInputs(current); - InferBasedOnUses(current); + current->InferRepresentation(this); + } + + // Lastly: any instruction that we don't have representation information + // for defaults to Tagged. + for (int i = 0; i < graph_->blocks()->length(); ++i) { + HBasicBlock* block = graph_->blocks()->at(i); + const ZoneList<HPhi*>* phis = block->phis(); + for (int j = 0; j < phis->length(); ++j) { + HPhi* phi = phis->at(j); + if (phi->representation().IsNone()) { + phi->ChangeRepresentation(Representation::Tagged()); + } + } + for (HInstruction* current = block->first(); + current != NULL; current = current->next()) { + if (current->representation().IsNone() && + current->CheckFlag(HInstruction::kFlexibleRepresentation)) { + current->ChangeRepresentation(Representation::Tagged()); + } + } + } +} + + +void HGraph::MergeRemovableSimulates() { + for (int i = 0; i < blocks()->length(); ++i) { + HBasicBlock* block = blocks()->at(i); + // Always reset the folding candidate at the start of a block. + HSimulate* folding_candidate = NULL; + // Nasty heuristic: Never remove the first simulate in a block. This + // just so happens to have a beneficial effect on register allocation. + bool first = true; + for (HInstruction* current = block->first(); + current != NULL; current = current->next()) { + if (current->IsLeaveInlined()) { + // Never fold simulates from inlined environments into simulates + // in the outer environment. + // (Before each HEnterInlined, there is a non-foldable HSimulate + // anyway, so we get the barrier in the other direction for free.) + if (folding_candidate != NULL) { + folding_candidate->DeleteAndReplaceWith(NULL); + } + folding_candidate = NULL; + continue; + } + // If we have an HSimulate and a candidate, perform the folding. + if (!current->IsSimulate()) continue; + if (first) { + first = false; + continue; + } + HSimulate* current_simulate = HSimulate::cast(current); + if (folding_candidate != NULL) { + folding_candidate->MergeInto(current_simulate); + folding_candidate->DeleteAndReplaceWith(NULL); + folding_candidate = NULL; + } + // Check if the current simulate is a candidate for folding. + if (current_simulate->previous()->HasObservableSideEffects() && + !current_simulate->next()->IsSimulate()) { + continue; + } + if (!current_simulate->is_candidate_for_removal()) { + continue; + } + folding_candidate = current_simulate; + } } } @@ -2553,7 +3129,6 @@ void HGraph::InsertRepresentationChangeForUse(HValue* value, } else { next = HInstruction::cast(use_value); } - // For constants we try to make the representation change at compile // time. When a representation change is not possible without loss of // information we treat constants like normal instructions and insert the @@ -2565,7 +3140,7 @@ void HGraph::InsertRepresentationChangeForUse(HValue* value, if (value->IsConstant()) { HConstant* constant = HConstant::cast(value); // Try to create a new copy of the constant with the new representation. - new_value = is_truncating + new_value = (is_truncating && to.IsInteger32()) ? constant->CopyToTruncatedInt32(zone()) : constant->CopyToRepresentation(to, zone()); } @@ -2625,9 +3200,23 @@ void HGraph::InsertRepresentationChanges() { for (int i = 0; i < phi_list()->length(); i++) { HPhi* phi = phi_list()->at(i); if (!phi->CheckFlag(HValue::kTruncatingToInt32)) continue; - if (!phi->CheckUsesForFlag(HValue::kTruncatingToInt32)) { - phi->ClearFlag(HValue::kTruncatingToInt32); - change = true; + for (HUseIterator it(phi->uses()); !it.Done(); it.Advance()) { + // If a Phi is used as a non-truncating int32 or as a double, + // clear its "truncating" flag. + HValue* use = it.value(); + Representation input_representation = + use->RequiredInputRepresentation(it.index()); + if ((input_representation.IsInteger32() && + !use->CheckFlag(HValue::kTruncatingToInt32)) || + input_representation.IsDouble()) { + if (FLAG_trace_representation) { + PrintF("#%d Phi is not truncating because of #%d %s\n", + phi->id(), it.value()->id(), it.value()->Mnemonic()); + } + phi->ClearFlag(HValue::kTruncatingToInt32); + change = true; + break; + } } } } @@ -2642,8 +3231,9 @@ void HGraph::InsertRepresentationChanges() { // Process normal instructions. HInstruction* current = blocks_[i]->first(); while (current != NULL) { + HInstruction* next = current->next(); InsertRepresentationChangesForValue(current); - current = current->next(); + current = next; } } } @@ -2715,17 +3305,18 @@ bool Uint32Analysis::IsSafeUint32Use(HValue* val, HValue* use) { } else if (use->IsChange() || use->IsSimulate()) { // Conversions and deoptimization have special support for unt32. return true; - } else if (use->IsStoreKeyedSpecializedArrayElement()) { - // Storing a value into an external integer array is a bit level operation. - HStoreKeyedSpecializedArrayElement* store = - HStoreKeyedSpecializedArrayElement::cast(use); - - if (store->value() == val) { - // Clamping or a conversion to double should have beed inserted. - ASSERT(store->elements_kind() != EXTERNAL_PIXEL_ELEMENTS); - ASSERT(store->elements_kind() != EXTERNAL_FLOAT_ELEMENTS); - ASSERT(store->elements_kind() != EXTERNAL_DOUBLE_ELEMENTS); - return true; + } else if (use->IsStoreKeyed()) { + HStoreKeyed* store = HStoreKeyed::cast(use); + if (store->is_external()) { + // Storing a value into an external integer array is a bit level + // operation. + if (store->value() == val) { + // Clamping or a conversion to double should have beed inserted. + ASSERT(store->elements_kind() != EXTERNAL_PIXEL_ELEMENTS); + ASSERT(store->elements_kind() != EXTERNAL_FLOAT_ELEMENTS); + ASSERT(store->elements_kind() != EXTERNAL_DOUBLE_ELEMENTS); + return true; + } } } @@ -2931,7 +3522,7 @@ void HGraph::ComputeMinusZeroChecks() { // Implementation of utility class to encapsulate the translation state for // a (possibly inlined) function. -FunctionState::FunctionState(HGraphBuilder* owner, +FunctionState::FunctionState(HOptimizedGraphBuilder* owner, CompilationInfo* info, TypeFeedbackOracle* oracle, InliningKind inlining_kind) @@ -2980,7 +3571,7 @@ FunctionState::~FunctionState() { // Implementation of utility classes to represent an expression's context in // the AST. -AstContext::AstContext(HGraphBuilder* owner, Expression::Context kind) +AstContext::AstContext(HOptimizedGraphBuilder* owner, Expression::Context kind) : owner_(owner), kind_(kind), outer_(owner->ast_context()), @@ -3037,7 +3628,9 @@ void TestContext::ReturnValue(HValue* value) { void EffectContext::ReturnInstruction(HInstruction* instr, BailoutId ast_id) { ASSERT(!instr->IsControlInstruction()); owner()->AddInstruction(instr); - if (instr->HasObservableSideEffects()) owner()->AddSimulate(ast_id); + if (instr->HasObservableSideEffects()) { + owner()->AddSimulate(ast_id, REMOVABLE_SIMULATE); + } } @@ -3061,7 +3654,9 @@ void ValueContext::ReturnInstruction(HInstruction* instr, BailoutId ast_id) { } owner()->AddInstruction(instr); owner()->Push(instr); - if (instr->HasObservableSideEffects()) owner()->AddSimulate(ast_id); + if (instr->HasObservableSideEffects()) { + owner()->AddSimulate(ast_id, REMOVABLE_SIMULATE); + } } @@ -3087,13 +3682,13 @@ void ValueContext::ReturnControl(HControlInstruction* instr, BailoutId ast_id) { void TestContext::ReturnInstruction(HInstruction* instr, BailoutId ast_id) { ASSERT(!instr->IsControlInstruction()); - HGraphBuilder* builder = owner(); + HOptimizedGraphBuilder* builder = owner(); builder->AddInstruction(instr); // We expect a simulate after every expression with side effects, though // this one isn't actually needed (and wouldn't work if it were targeted). if (instr->HasObservableSideEffects()) { builder->Push(instr); - builder->AddSimulate(ast_id); + builder->AddSimulate(ast_id, REMOVABLE_SIMULATE); builder->Pop(); } BuildBranch(instr); @@ -3118,10 +3713,20 @@ void TestContext::BuildBranch(HValue* value) { // connects a branch node to a join node. We conservatively ensure that // property by always adding an empty block on the outgoing edges of this // branch. - HGraphBuilder* builder = owner(); + HOptimizedGraphBuilder* builder = owner(); if (value != NULL && value->CheckFlag(HValue::kIsArguments)) { builder->Bailout("arguments object value in a test context"); } + if (value->IsConstant()) { + HConstant* constant_value = HConstant::cast(value); + if (constant_value->BooleanValue()) { + builder->current_block()->Goto(if_true(), builder->function_state()); + } else { + builder->current_block()->Goto(if_false(), builder->function_state()); + } + builder->set_current_block(NULL); + return; + } HBasicBlock* empty_true = builder->graph()->CreateBasicBlock(); HBasicBlock* empty_false = builder->graph()->CreateBasicBlock(); TypeFeedbackId test_id = condition()->test_id(); @@ -3129,13 +3734,13 @@ void TestContext::BuildBranch(HValue* value) { HBranch* test = new(zone()) HBranch(value, empty_true, empty_false, expected); builder->current_block()->Finish(test); - empty_true->Goto(if_true(), owner()->function_state()); - empty_false->Goto(if_false(), owner()->function_state()); + empty_true->Goto(if_true(), builder->function_state()); + empty_false->Goto(if_false(), builder->function_state()); builder->set_current_block(NULL); } -// HGraphBuilder infrastructure for bailing out and checking bailouts. +// HOptimizedGraphBuilder infrastructure for bailing out and checking bailouts. #define CHECK_BAILOUT(call) \ do { \ call; \ @@ -3150,25 +3755,26 @@ void TestContext::BuildBranch(HValue* value) { } while (false) -void HGraphBuilder::Bailout(const char* reason) { +void HOptimizedGraphBuilder::Bailout(const char* reason) { info()->set_bailout_reason(reason); SetStackOverflow(); } -void HGraphBuilder::VisitForEffect(Expression* expr) { +void HOptimizedGraphBuilder::VisitForEffect(Expression* expr) { EffectContext for_effect(this); Visit(expr); } -void HGraphBuilder::VisitForValue(Expression* expr, ArgumentsAllowedFlag flag) { +void HOptimizedGraphBuilder::VisitForValue(Expression* expr, + ArgumentsAllowedFlag flag) { ValueContext for_value(this, flag); Visit(expr); } -void HGraphBuilder::VisitForTypeOf(Expression* expr) { +void HOptimizedGraphBuilder::VisitForTypeOf(Expression* expr) { ValueContext for_value(this, ARGUMENTS_NOT_ALLOWED); for_value.set_for_typeof(true); Visit(expr); @@ -3176,119 +3782,137 @@ void HGraphBuilder::VisitForTypeOf(Expression* expr) { -void HGraphBuilder::VisitForControl(Expression* expr, - HBasicBlock* true_block, - HBasicBlock* false_block) { +void HOptimizedGraphBuilder::VisitForControl(Expression* expr, + HBasicBlock* true_block, + HBasicBlock* false_block) { TestContext for_test(this, expr, oracle(), true_block, false_block); Visit(expr); } -void HGraphBuilder::VisitArgument(Expression* expr) { +void HOptimizedGraphBuilder::VisitArgument(Expression* expr) { CHECK_ALIVE(VisitForValue(expr)); Push(AddInstruction(new(zone()) HPushArgument(Pop()))); } -void HGraphBuilder::VisitArgumentList(ZoneList<Expression*>* arguments) { +void HOptimizedGraphBuilder::VisitArgumentList( + ZoneList<Expression*>* arguments) { for (int i = 0; i < arguments->length(); i++) { CHECK_ALIVE(VisitArgument(arguments->at(i))); } } -void HGraphBuilder::VisitExpressions(ZoneList<Expression*>* exprs) { +void HOptimizedGraphBuilder::VisitExpressions( + ZoneList<Expression*>* exprs) { for (int i = 0; i < exprs->length(); ++i) { CHECK_ALIVE(VisitForValue(exprs->at(i))); } } -HGraph* HGraphBuilder::CreateGraph() { - graph_ = new(zone()) HGraph(info()); - if (FLAG_hydrogen_stats) HStatistics::Instance()->Initialize(info()); +bool HOptimizedGraphBuilder::BuildGraph() { + Scope* scope = info()->scope(); + if (scope->HasIllegalRedeclaration()) { + Bailout("function with illegal redeclaration"); + return false; + } + if (scope->calls_eval()) { + Bailout("function calls eval"); + return false; + } + SetUpScope(scope); + + // Add an edge to the body entry. This is warty: the graph's start + // environment will be used by the Lithium translation as the initial + // environment on graph entry, but it has now been mutated by the + // Hydrogen translation of the instructions in the start block. This + // environment uses values which have not been defined yet. These + // Hydrogen instructions will then be replayed by the Lithium + // translation, so they cannot have an environment effect. The edge to + // the body's entry block (along with some special logic for the start + // block in HInstruction::InsertAfter) seals the start block from + // getting unwanted instructions inserted. + // + // TODO(kmillikin): Fix this. Stop mutating the initial environment. + // Make the Hydrogen instructions in the initial block into Hydrogen + // values (but not instructions), present in the initial environment and + // not replayed by the Lithium translation. + HEnvironment* initial_env = environment()->CopyWithoutHistory(); + HBasicBlock* body_entry = CreateBasicBlock(initial_env); + current_block()->Goto(body_entry); + body_entry->SetJoinId(BailoutId::FunctionEntry()); + set_current_block(body_entry); + + // Handle implicit declaration of the function name in named function + // expressions before other declarations. + if (scope->is_function_scope() && scope->function() != NULL) { + VisitVariableDeclaration(scope->function()); + } + VisitDeclarations(scope->declarations()); + AddSimulate(BailoutId::Declarations()); - { - HPhase phase("H_Block building"); - current_block_ = graph()->entry_block(); + HValue* context = environment()->LookupContext(); + AddInstruction( + new(zone()) HStackCheck(context, HStackCheck::kFunctionEntry)); - Scope* scope = info()->scope(); - if (scope->HasIllegalRedeclaration()) { - Bailout("function with illegal redeclaration"); - return NULL; - } - if (scope->calls_eval()) { - Bailout("function calls eval"); - return NULL; - } - SetUpScope(scope); - - // Add an edge to the body entry. This is warty: the graph's start - // environment will be used by the Lithium translation as the initial - // environment on graph entry, but it has now been mutated by the - // Hydrogen translation of the instructions in the start block. This - // environment uses values which have not been defined yet. These - // Hydrogen instructions will then be replayed by the Lithium - // translation, so they cannot have an environment effect. The edge to - // the body's entry block (along with some special logic for the start - // block in HInstruction::InsertAfter) seals the start block from - // getting unwanted instructions inserted. - // - // TODO(kmillikin): Fix this. Stop mutating the initial environment. - // Make the Hydrogen instructions in the initial block into Hydrogen - // values (but not instructions), present in the initial environment and - // not replayed by the Lithium translation. - HEnvironment* initial_env = environment()->CopyWithoutHistory(); - HBasicBlock* body_entry = CreateBasicBlock(initial_env); - current_block()->Goto(body_entry); - body_entry->SetJoinId(BailoutId::FunctionEntry()); - set_current_block(body_entry); - - // Handle implicit declaration of the function name in named function - // expressions before other declarations. - if (scope->is_function_scope() && scope->function() != NULL) { - VisitVariableDeclaration(scope->function()); - } - VisitDeclarations(scope->declarations()); - AddSimulate(BailoutId::Declarations()); + VisitStatements(info()->function()->body()); + if (HasStackOverflow()) return false; - HValue* context = environment()->LookupContext(); - AddInstruction( - new(zone()) HStackCheck(context, HStackCheck::kFunctionEntry)); + if (current_block() != NULL) { + AddReturn(graph()->GetConstantUndefined()); + set_current_block(NULL); + } - VisitStatements(info()->function()->body()); - if (HasStackOverflow()) return NULL; + // If the checksum of the number of type info changes is the same as the + // last time this function was compiled, then this recompile is likely not + // due to missing/inadequate type feedback, but rather too aggressive + // optimization. Disable optimistic LICM in that case. + Handle<Code> unoptimized_code(info()->shared_info()->code()); + ASSERT(unoptimized_code->kind() == Code::FUNCTION); + Handle<TypeFeedbackInfo> type_info( + TypeFeedbackInfo::cast(unoptimized_code->type_feedback_info())); + int checksum = type_info->own_type_change_checksum(); + int composite_checksum = graph()->update_type_change_checksum(checksum); + graph()->set_use_optimistic_licm( + !type_info->matches_inlined_type_change_checksum(composite_checksum)); + type_info->set_inlined_type_change_checksum(composite_checksum); - if (current_block() != NULL) { - HReturn* instr = new(zone()) HReturn(graph()->GetConstantUndefined()); - current_block()->FinishExit(instr); - set_current_block(NULL); - } + return true; +} - // If the checksum of the number of type info changes is the same as the - // last time this function was compiled, then this recompile is likely not - // due to missing/inadequate type feedback, but rather too aggressive - // optimization. Disable optimistic LICM in that case. - Handle<Code> unoptimized_code(info()->shared_info()->code()); - ASSERT(unoptimized_code->kind() == Code::FUNCTION); - Handle<Object> maybe_type_info(unoptimized_code->type_feedback_info()); - Handle<TypeFeedbackInfo> type_info( - Handle<TypeFeedbackInfo>::cast(maybe_type_info)); - int checksum = type_info->own_type_change_checksum(); - int composite_checksum = graph()->update_type_change_checksum(checksum); - graph()->set_use_optimistic_licm( - !type_info->matches_inlined_type_change_checksum(composite_checksum)); - type_info->set_inlined_type_change_checksum(composite_checksum); - } - return graph(); +void HGraph::GlobalValueNumbering() { + // Perform common subexpression elimination and loop-invariant code motion. + if (FLAG_use_gvn) { + // We use objects' raw addresses for identification, so they must not move. + Heap::RelocationLock relocation_lock(isolate()->heap()); + HPhase phase("H_Global value numbering", this); + HGlobalValueNumberer gvn(this, info()); + bool removed_side_effects = gvn.Analyze(); + // Trigger a second analysis pass to further eliminate duplicate values that + // could only be discovered by removing side-effect-generating instructions + // during the first pass. + if (FLAG_smi_only_arrays && removed_side_effects) { + removed_side_effects = gvn.Analyze(); + ASSERT(!removed_side_effects); + } + } } + bool HGraph::Optimize(SmartArrayPointer<char>* bailout_reason) { *bailout_reason = SmartArrayPointer<char>(); OrderBlocks(); AssignDominators(); + // We need to create a HConstant "zero" now so that GVN will fold every + // zero-valued constant in the graph together. + // The constant is needed to make idef-based bounds check work: the pass + // evaluates relations with "zero" and that zero cannot be created after GVN. + GetConstant0(); + #ifdef DEBUG // Do a full verify after building the graph and computing dominators. Verify(true); @@ -3320,6 +3944,11 @@ bool HGraph::Optimize(SmartArrayPointer<char>* bailout_reason) { HInferRepresentation rep(this); rep.Analyze(); + // Remove HSimulate instructions that have turned out not to be needed + // after all by folding them into the following HSimulate. + // This must happen after inferring representations. + MergeRemovableSimulates(); + MarkDeoptimizeOnUndefined(); InsertRepresentationChanges(); @@ -3332,19 +3961,7 @@ bool HGraph::Optimize(SmartArrayPointer<char>* bailout_reason) { Canonicalize(); - // Perform common subexpression elimination and loop-invariant code motion. - if (FLAG_use_gvn) { - HPhase phase("H_Global value numbering", this); - HGlobalValueNumberer gvn(this, info()); - bool removed_side_effects = gvn.Analyze(); - // Trigger a second analysis pass to further eliminate duplicate values that - // could only be discovered by removing side-effect-generating instructions - // during the first pass. - if (FLAG_smi_only_arrays && removed_side_effects) { - removed_side_effects = gvn.Analyze(); - ASSERT(!removed_side_effects); - } - } + GlobalValueNumbering(); if (FLAG_use_range) { HRangeAnalysis rangeAnalysis(this); @@ -3356,14 +3973,61 @@ bool HGraph::Optimize(SmartArrayPointer<char>* bailout_reason) { HStackCheckEliminator sce(this); sce.Process(); - EliminateRedundantBoundsChecks(); - DehoistSimpleArrayIndexComputations(); + if (FLAG_idefs) SetupInformativeDefinitions(); + if (FLAG_array_bounds_checks_elimination && !FLAG_idefs) { + EliminateRedundantBoundsChecks(); + } + if (FLAG_array_index_dehoisting) DehoistSimpleArrayIndexComputations(); if (FLAG_dead_code_elimination) DeadCodeElimination(); + RestoreActualValues(); + return true; } +void HGraph::SetupInformativeDefinitionsInBlock(HBasicBlock* block) { + for (int phi_index = 0; phi_index < block->phis()->length(); phi_index++) { + HPhi* phi = block->phis()->at(phi_index); + phi->AddInformativeDefinitions(); + phi->SetFlag(HValue::kIDefsProcessingDone); + // We do not support phis that "redefine just one operand". + ASSERT(!phi->IsInformativeDefinition()); + } + + for (HInstruction* i = block->first(); i != NULL; i = i->next()) { + i->AddInformativeDefinitions(); + i->SetFlag(HValue::kIDefsProcessingDone); + i->UpdateRedefinedUsesWhileSettingUpInformativeDefinitions(); + } +} + + +// This method is recursive, so if its stack frame is large it could +// cause a stack overflow. +// To keep the individual stack frames small we do the actual work inside +// SetupInformativeDefinitionsInBlock(); +void HGraph::SetupInformativeDefinitionsRecursively(HBasicBlock* block) { + SetupInformativeDefinitionsInBlock(block); + for (int i = 0; i < block->dominated_blocks()->length(); ++i) { + SetupInformativeDefinitionsRecursively(block->dominated_blocks()->at(i)); + } + + for (HInstruction* i = block->first(); i != NULL; i = i->next()) { + if (i->IsBoundsCheck()) { + HBoundsCheck* check = HBoundsCheck::cast(i); + check->ApplyIndexChange(); + } + } +} + + +void HGraph::SetupInformativeDefinitions() { + HPhase phase("H_Setup informative definitions", this); + SetupInformativeDefinitionsRecursively(entry_block()); +} + + // We try to "factor up" HBoundsCheck instructions towards the root of the // dominator tree. // For now we handle checks where the index is like "exp + int32value". @@ -3476,7 +4140,10 @@ class BoundsCheckBbData: public ZoneObject { // (either upper or lower; note that HasSingleCheck() becomes false). // Otherwise one of the current checks is modified so that it also covers // new_offset, and new_check is removed. - void CoverCheck(HBoundsCheck* new_check, + // + // If the check cannot be modified because the context is unknown it + // returns false, otherwise it returns true. + bool CoverCheck(HBoundsCheck* new_check, int32_t new_offset) { ASSERT(new_check->index()->representation().IsInteger32()); bool keep_new_check = false; @@ -3487,12 +4154,14 @@ class BoundsCheckBbData: public ZoneObject { keep_new_check = true; upper_check_ = new_check; } else { - BuildOffsetAdd(upper_check_, - &added_upper_index_, - &added_upper_offset_, - Key()->IndexBase(), - new_check->index()->representation(), - new_offset); + bool result = BuildOffsetAdd(upper_check_, + &added_upper_index_, + &added_upper_offset_, + Key()->IndexBase(), + new_check->index()->representation(), + new_offset); + if (!result) return false; + upper_check_->ReplaceAllUsesWith(upper_check_->index()); upper_check_->SetOperandAt(0, added_upper_index_); } } else if (new_offset < lower_offset_) { @@ -3501,12 +4170,14 @@ class BoundsCheckBbData: public ZoneObject { keep_new_check = true; lower_check_ = new_check; } else { - BuildOffsetAdd(lower_check_, - &added_lower_index_, - &added_lower_offset_, - Key()->IndexBase(), - new_check->index()->representation(), - new_offset); + bool result = BuildOffsetAdd(lower_check_, + &added_lower_index_, + &added_lower_offset_, + Key()->IndexBase(), + new_check->index()->representation(), + new_offset); + if (!result) return false; + lower_check_->ReplaceAllUsesWith(lower_check_->index()); lower_check_->SetOperandAt(0, added_lower_index_); } } else { @@ -3514,8 +4185,10 @@ class BoundsCheckBbData: public ZoneObject { } if (!keep_new_check) { - new_check->DeleteAndReplaceWith(NULL); + new_check->DeleteAndReplaceWith(new_check->ActualValue()); } + + return true; } void RemoveZeroOperations() { @@ -3551,29 +4224,42 @@ class BoundsCheckBbData: public ZoneObject { HBasicBlock* basic_block_; HBoundsCheck* lower_check_; HBoundsCheck* upper_check_; - HAdd* added_lower_index_; + HInstruction* added_lower_index_; HConstant* added_lower_offset_; - HAdd* added_upper_index_; + HInstruction* added_upper_index_; HConstant* added_upper_offset_; BoundsCheckBbData* next_in_bb_; BoundsCheckBbData* father_in_dt_; - void BuildOffsetAdd(HBoundsCheck* check, - HAdd** add, + // Given an existing add instruction and a bounds check it tries to + // find the current context (either of the add or of the check index). + HValue* IndexContext(HInstruction* add, HBoundsCheck* check) { + if (add != NULL && add->IsAdd()) { + return HAdd::cast(add)->context(); + } + if (check->index()->IsBinaryOperation()) { + return HBinaryOperation::cast(check->index())->context(); + } + return NULL; + } + + // This function returns false if it cannot build the add because the + // current context cannot be determined. + bool BuildOffsetAdd(HBoundsCheck* check, + HInstruction** add, HConstant** constant, HValue* original_value, Representation representation, int32_t new_offset) { + HValue* index_context = IndexContext(*add, check); + if (index_context == NULL) return false; + HConstant* new_constant = new(BasicBlock()->zone()) HConstant(new_offset, Representation::Integer32()); if (*add == NULL) { new_constant->InsertBefore(check); - // Because of the bounds checks elimination algorithm, the index is always - // an HAdd or an HSub here, so we can safely cast to an HBinaryOperation. - HValue* context = HBinaryOperation::cast(check->index())->context(); - *add = new(BasicBlock()->zone()) HAdd(context, - original_value, - new_constant); + (*add) = HAdd::New( + BasicBlock()->zone(), index_context, original_value, new_constant); (*add)->AssumeRepresentation(representation); (*add)->InsertBefore(check); } else { @@ -3581,11 +4267,12 @@ class BoundsCheckBbData: public ZoneObject { (*constant)->DeleteAndReplaceWith(new_constant); } *constant = new_constant; + return true; } - void RemoveZeroAdd(HAdd** add, HConstant** constant) { - if (*add != NULL && (*constant)->Integer32Value() == 0) { - (*add)->DeleteAndReplaceWith((*add)->left()); + void RemoveZeroAdd(HInstruction** add, HConstant** constant) { + if (*add != NULL && (*add)->IsAdd() && (*constant)->Integer32Value() == 0) { + (*add)->DeleteAndReplaceWith(HAdd::cast(*add)->left()); (*constant)->DeleteAndReplaceWith(NULL); } } @@ -3633,10 +4320,6 @@ void HGraph::EliminateRedundantBoundsChecks(HBasicBlock* bb, if (!i->IsBoundsCheck()) continue; HBoundsCheck* check = HBoundsCheck::cast(i); - check->ReplaceAllUsesWith(check->index()); - - if (!FLAG_array_bounds_checks_elimination) continue; - int32_t offset; BoundsCheckKey* key = BoundsCheckKey::Create(zone(), check, &offset); @@ -3654,10 +4337,12 @@ void HGraph::EliminateRedundantBoundsChecks(HBasicBlock* bb, NULL); *data_p = bb_data_list; } else if (data->OffsetIsCovered(offset)) { - check->DeleteAndReplaceWith(NULL); - } else if (data->BasicBlock() == bb) { - data->CoverCheck(check, offset); - } else { + check->DeleteAndReplaceWith(check->ActualValue()); + } else if (data->BasicBlock() != bb || + !data->CoverCheck(check, offset)) { + // If the check is in the current BB we try to modify it by calling + // "CoverCheck", but if also that fails we record the current offsets + // in a new data instance because from now on they are covered. int32_t new_lower_offset = offset < data->LowerOffset() ? offset : data->LowerOffset(); @@ -3701,7 +4386,7 @@ void HGraph::EliminateRedundantBoundsChecks() { static void DehoistArrayIndex(ArrayInstructionInterface* array_operation) { - HValue* index = array_operation->GetKey(); + HValue* index = array_operation->GetKey()->ActualValue(); if (!index->representation().IsInteger32()) return; HConstant* constant; @@ -3749,35 +4434,17 @@ static void DehoistArrayIndex(ArrayInstructionInterface* array_operation) { void HGraph::DehoistSimpleArrayIndexComputations() { - if (!FLAG_array_index_dehoisting) return; - HPhase phase("H_Dehoist index computations", this); for (int i = 0; i < blocks()->length(); ++i) { for (HInstruction* instr = blocks()->at(i)->first(); instr != NULL; instr = instr->next()) { ArrayInstructionInterface* array_instruction = NULL; - if (instr->IsLoadKeyedFastElement()) { - HLoadKeyedFastElement* op = HLoadKeyedFastElement::cast(instr); - array_instruction = static_cast<ArrayInstructionInterface*>(op); - } else if (instr->IsLoadKeyedFastDoubleElement()) { - HLoadKeyedFastDoubleElement* op = - HLoadKeyedFastDoubleElement::cast(instr); - array_instruction = static_cast<ArrayInstructionInterface*>(op); - } else if (instr->IsLoadKeyedSpecializedArrayElement()) { - HLoadKeyedSpecializedArrayElement* op = - HLoadKeyedSpecializedArrayElement::cast(instr); + if (instr->IsLoadKeyed()) { + HLoadKeyed* op = HLoadKeyed::cast(instr); array_instruction = static_cast<ArrayInstructionInterface*>(op); - } else if (instr->IsStoreKeyedFastElement()) { - HStoreKeyedFastElement* op = HStoreKeyedFastElement::cast(instr); - array_instruction = static_cast<ArrayInstructionInterface*>(op); - } else if (instr->IsStoreKeyedFastDoubleElement()) { - HStoreKeyedFastDoubleElement* op = - HStoreKeyedFastDoubleElement::cast(instr); - array_instruction = static_cast<ArrayInstructionInterface*>(op); - } else if (instr->IsStoreKeyedSpecializedArrayElement()) { - HStoreKeyedSpecializedArrayElement* op = - HStoreKeyedSpecializedArrayElement::cast(instr); + } else if (instr->IsStoreKeyed()) { + HStoreKeyed* op = HStoreKeyed::cast(instr); array_instruction = static_cast<ArrayInstructionInterface*>(op); } else { continue; @@ -3818,33 +4485,58 @@ void HGraph::DeadCodeElimination() { } -HInstruction* HGraphBuilder::AddInstruction(HInstruction* instr) { - ASSERT(current_block() != NULL); - current_block()->AddInstruction(instr); - return instr; -} +void HGraph::RestoreActualValues() { + HPhase phase("H_Restore actual values", this); + for (int block_index = 0; block_index < blocks()->length(); block_index++) { + HBasicBlock* block = blocks()->at(block_index); -void HGraphBuilder::AddSimulate(BailoutId ast_id) { - ASSERT(current_block() != NULL); - current_block()->AddSimulate(ast_id); +#ifdef DEBUG + for (int i = 0; i < block->phis()->length(); i++) { + HPhi* phi = block->phis()->at(i); + ASSERT(phi->ActualValue() == phi); + } +#endif + + for (HInstruction* instruction = block->first(); + instruction != NULL; + instruction = instruction->next()) { + if (instruction->ActualValue() != instruction) { + ASSERT(instruction->IsInformativeDefinition()); + if (instruction->IsPurelyInformativeDefinition()) { + instruction->DeleteAndReplaceWith(instruction->RedefinedOperand()); + } else { + instruction->ReplaceAllUsesWith(instruction->ActualValue()); + } + } + } + } } -void HGraphBuilder::AddPhi(HPhi* instr) { +void HOptimizedGraphBuilder::AddPhi(HPhi* instr) { ASSERT(current_block() != NULL); current_block()->AddPhi(instr); } -void HGraphBuilder::PushAndAdd(HInstruction* instr) { +void HOptimizedGraphBuilder::PushAndAdd(HInstruction* instr) { Push(instr); AddInstruction(instr); } +void HOptimizedGraphBuilder::AddSoftDeoptimize() { + if (FLAG_always_opt) return; + if (current_block()->IsDeoptimizing()) return; + AddInstruction(new(zone()) HSoftDeoptimize()); + current_block()->MarkAsDeoptimizing(); + graph()->set_has_soft_deoptimize(true); +} + + template <class Instruction> -HInstruction* HGraphBuilder::PreProcessCall(Instruction* call) { +HInstruction* HOptimizedGraphBuilder::PreProcessCall(Instruction* call) { int count = call->argument_count(); ZoneList<HValue*> arguments(count, zone()); for (int i = 0; i < count; ++i) { @@ -3858,11 +4550,11 @@ HInstruction* HGraphBuilder::PreProcessCall(Instruction* call) { } -void HGraphBuilder::SetUpScope(Scope* scope) { +void HOptimizedGraphBuilder::SetUpScope(Scope* scope) { HConstant* undefined_constant = new(zone()) HConstant( isolate()->factory()->undefined_value(), Representation::Tagged()); AddInstruction(undefined_constant); - graph_->set_undefined_constant(undefined_constant); + graph()->set_undefined_constant(undefined_constant); HArgumentsObject* object = new(zone()) HArgumentsObject; AddInstruction(object); @@ -3901,30 +4593,14 @@ void HGraphBuilder::SetUpScope(Scope* scope) { } -void HGraphBuilder::VisitStatements(ZoneList<Statement*>* statements) { +void HOptimizedGraphBuilder::VisitStatements(ZoneList<Statement*>* statements) { for (int i = 0; i < statements->length(); i++) { CHECK_ALIVE(Visit(statements->at(i))); } } -HBasicBlock* HGraphBuilder::CreateBasicBlock(HEnvironment* env) { - HBasicBlock* b = graph()->CreateBasicBlock(); - b->SetInitialEnvironment(env); - return b; -} - - -HBasicBlock* HGraphBuilder::CreateLoopHeaderBlock() { - HBasicBlock* header = graph()->CreateBasicBlock(); - HEnvironment* entry_env = environment()->CopyAsLoopHeader(header); - header->SetInitialEnvironment(entry_env); - header->AttachLoopInformation(); - return header; -} - - -void HGraphBuilder::VisitBlock(Block* stmt) { +void HOptimizedGraphBuilder::VisitBlock(Block* stmt) { ASSERT(!HasStackOverflow()); ASSERT(current_block() != NULL); ASSERT(current_block()->HasPredecessor()); @@ -3944,7 +4620,8 @@ void HGraphBuilder::VisitBlock(Block* stmt) { } -void HGraphBuilder::VisitExpressionStatement(ExpressionStatement* stmt) { +void HOptimizedGraphBuilder::VisitExpressionStatement( + ExpressionStatement* stmt) { ASSERT(!HasStackOverflow()); ASSERT(current_block() != NULL); ASSERT(current_block()->HasPredecessor()); @@ -3952,14 +4629,14 @@ void HGraphBuilder::VisitExpressionStatement(ExpressionStatement* stmt) { } -void HGraphBuilder::VisitEmptyStatement(EmptyStatement* stmt) { +void HOptimizedGraphBuilder::VisitEmptyStatement(EmptyStatement* stmt) { ASSERT(!HasStackOverflow()); ASSERT(current_block() != NULL); ASSERT(current_block()->HasPredecessor()); } -void HGraphBuilder::VisitIfStatement(IfStatement* stmt) { +void HOptimizedGraphBuilder::VisitIfStatement(IfStatement* stmt) { ASSERT(!HasStackOverflow()); ASSERT(current_block() != NULL); ASSERT(current_block()->HasPredecessor()); @@ -3998,7 +4675,7 @@ void HGraphBuilder::VisitIfStatement(IfStatement* stmt) { } -HBasicBlock* HGraphBuilder::BreakAndContinueScope::Get( +HBasicBlock* HOptimizedGraphBuilder::BreakAndContinueScope::Get( BreakableStatement* stmt, BreakType type, int* drop_extra) { @@ -4037,7 +4714,8 @@ HBasicBlock* HGraphBuilder::BreakAndContinueScope::Get( } -void HGraphBuilder::VisitContinueStatement(ContinueStatement* stmt) { +void HOptimizedGraphBuilder::VisitContinueStatement( + ContinueStatement* stmt) { ASSERT(!HasStackOverflow()); ASSERT(current_block() != NULL); ASSERT(current_block()->HasPredecessor()); @@ -4051,7 +4729,7 @@ void HGraphBuilder::VisitContinueStatement(ContinueStatement* stmt) { } -void HGraphBuilder::VisitBreakStatement(BreakStatement* stmt) { +void HOptimizedGraphBuilder::VisitBreakStatement(BreakStatement* stmt) { ASSERT(!HasStackOverflow()); ASSERT(current_block() != NULL); ASSERT(current_block()->HasPredecessor()); @@ -4065,7 +4743,7 @@ void HGraphBuilder::VisitBreakStatement(BreakStatement* stmt) { } -void HGraphBuilder::VisitReturnStatement(ReturnStatement* stmt) { +void HOptimizedGraphBuilder::VisitReturnStatement(ReturnStatement* stmt) { ASSERT(!HasStackOverflow()); ASSERT(current_block() != NULL); ASSERT(current_block()->HasPredecessor()); @@ -4075,7 +4753,7 @@ void HGraphBuilder::VisitReturnStatement(ReturnStatement* stmt) { // Not an inlined return, so an actual one. CHECK_ALIVE(VisitForValue(stmt->expression())); HValue* result = environment()->Pop(); - current_block()->FinishExit(new(zone()) HReturn(result)); + AddReturn(result); } else if (state->inlining_kind() == CONSTRUCT_CALL_RETURN) { // Return from an inlined construct call. In a test context the return value // will always evaluate to true, in a value context the return value needs @@ -4102,7 +4780,19 @@ void HGraphBuilder::VisitReturnStatement(ReturnStatement* stmt) { typecheck->SetSuccessorAt(1, not_spec_object); current_block()->Finish(typecheck); if_spec_object->AddLeaveInlined(return_value, state); - not_spec_object->AddLeaveInlined(receiver, state); + if (!FLAG_harmony_symbols) { + not_spec_object->AddLeaveInlined(receiver, state); + } else { + HHasInstanceTypeAndBranch* symbolcheck = + new(zone()) HHasInstanceTypeAndBranch(return_value, SYMBOL_TYPE); + HBasicBlock* is_symbol = graph()->CreateBasicBlock(); + HBasicBlock* not_symbol = graph()->CreateBasicBlock(); + symbolcheck->SetSuccessorAt(0, is_symbol); + symbolcheck->SetSuccessorAt(1, not_symbol); + not_spec_object->Finish(symbolcheck); + is_symbol->AddLeaveInlined(return_value, state); + not_symbol->AddLeaveInlined(receiver, state); + } } } else if (state->inlining_kind() == SETTER_CALL_RETURN) { // Return from an inlined setter call. The returned value is never used, the @@ -4137,7 +4827,7 @@ void HGraphBuilder::VisitReturnStatement(ReturnStatement* stmt) { } -void HGraphBuilder::VisitWithStatement(WithStatement* stmt) { +void HOptimizedGraphBuilder::VisitWithStatement(WithStatement* stmt) { ASSERT(!HasStackOverflow()); ASSERT(current_block() != NULL); ASSERT(current_block()->HasPredecessor()); @@ -4145,7 +4835,7 @@ void HGraphBuilder::VisitWithStatement(WithStatement* stmt) { } -void HGraphBuilder::VisitSwitchStatement(SwitchStatement* stmt) { +void HOptimizedGraphBuilder::VisitSwitchStatement(SwitchStatement* stmt) { ASSERT(!HasStackOverflow()); ASSERT(current_block() != NULL); ASSERT(current_block()->HasPredecessor()); @@ -4184,7 +4874,7 @@ void HGraphBuilder::VisitSwitchStatement(SwitchStatement* stmt) { !clause->label()->IsStringLiteral()) || (switch_type == SMI_SWITCH && !clause->label()->IsSmiLiteral())) { - return Bailout("SwitchStatemnt: mixed label types are not supported"); + return Bailout("SwitchStatement: mixed label types are not supported"); } } @@ -4238,12 +4928,13 @@ void HGraphBuilder::VisitSwitchStatement(SwitchStatement* stmt) { new(zone()) HCompareIDAndBranch(tag_value, label_value, Token::EQ_STRICT); - compare_->SetInputRepresentation(Representation::Integer32()); + compare_->set_observed_input_representation( + Representation::Integer32(), Representation::Integer32()); compare = compare_; } else { compare = new(zone()) HStringCompareAndBranch(context, tag_value, - label_value, - Token::EQ_STRICT); + label_value, + Token::EQ_STRICT); } compare->SetSuccessorAt(0, body_block); @@ -4330,12 +5021,12 @@ void HGraphBuilder::VisitSwitchStatement(SwitchStatement* stmt) { } -bool HGraphBuilder::HasOsrEntryAt(IterationStatement* statement) { +bool HOptimizedGraphBuilder::HasOsrEntryAt(IterationStatement* statement) { return statement->OsrEntryId() == info()->osr_ast_id(); } -bool HGraphBuilder::PreProcessOsrEntry(IterationStatement* statement) { +bool HOptimizedGraphBuilder::PreProcessOsrEntry(IterationStatement* statement) { if (!HasOsrEntryAt(statement)) return false; HBasicBlock* non_osr_entry = graph()->CreateBasicBlock(); @@ -4348,6 +5039,7 @@ bool HGraphBuilder::PreProcessOsrEntry(IterationStatement* statement) { non_osr_entry->Goto(loop_predecessor); set_current_block(osr_entry); + osr_entry->set_osr_entry(); BailoutId osr_entry_id = statement->OsrEntryId(); int first_expression_index = environment()->first_expression_index(); int length = environment()->length(); @@ -4385,9 +5077,9 @@ bool HGraphBuilder::PreProcessOsrEntry(IterationStatement* statement) { } -void HGraphBuilder::VisitLoopBody(IterationStatement* stmt, - HBasicBlock* loop_entry, - BreakAndContinueInfo* break_info) { +void HOptimizedGraphBuilder::VisitLoopBody(IterationStatement* stmt, + HBasicBlock* loop_entry, + BreakAndContinueInfo* break_info) { BreakAndContinueScope push(break_info, this); AddSimulate(stmt->StackCheckId()); HValue* context = environment()->LookupContext(); @@ -4400,7 +5092,7 @@ void HGraphBuilder::VisitLoopBody(IterationStatement* stmt, } -void HGraphBuilder::VisitDoWhileStatement(DoWhileStatement* stmt) { +void HOptimizedGraphBuilder::VisitDoWhileStatement(DoWhileStatement* stmt) { ASSERT(!HasStackOverflow()); ASSERT(current_block() != NULL); ASSERT(current_block()->HasPredecessor()); @@ -4443,7 +5135,7 @@ void HGraphBuilder::VisitDoWhileStatement(DoWhileStatement* stmt) { } -void HGraphBuilder::VisitWhileStatement(WhileStatement* stmt) { +void HOptimizedGraphBuilder::VisitWhileStatement(WhileStatement* stmt) { ASSERT(!HasStackOverflow()); ASSERT(current_block() != NULL); ASSERT(current_block()->HasPredecessor()); @@ -4487,7 +5179,7 @@ void HGraphBuilder::VisitWhileStatement(WhileStatement* stmt) { } -void HGraphBuilder::VisitForStatement(ForStatement* stmt) { +void HOptimizedGraphBuilder::VisitForStatement(ForStatement* stmt) { ASSERT(!HasStackOverflow()); ASSERT(current_block() != NULL); ASSERT(current_block()->HasPredecessor()); @@ -4539,7 +5231,7 @@ void HGraphBuilder::VisitForStatement(ForStatement* stmt) { } -void HGraphBuilder::VisitForInStatement(ForInStatement* stmt) { +void HOptimizedGraphBuilder::VisitForInStatement(ForInStatement* stmt) { ASSERT(!HasStackOverflow()); ASSERT(current_block() != NULL); ASSERT(current_block()->HasPredecessor()); @@ -4575,7 +5267,7 @@ void HGraphBuilder::VisitForInStatement(ForInStatement* stmt) { HInstruction* enum_length = AddInstruction(new(zone()) HMapEnumLength(map)); HInstruction* start_index = AddInstruction(new(zone()) HConstant( - Handle<Object>(Smi::FromInt(0)), Representation::Integer32())); + Handle<Object>(Smi::FromInt(0), isolate()), Representation::Integer32())); Push(map); Push(array); @@ -4602,7 +5294,8 @@ void HGraphBuilder::VisitForInStatement(ForInStatement* stmt) { // Check that we still have more keys. HCompareIDAndBranch* compare_index = new(zone()) HCompareIDAndBranch(index, limit, Token::LT); - compare_index->SetInputRepresentation(Representation::Integer32()); + compare_index->set_observed_input_representation( + Representation::Integer32(), Representation::Integer32()); HBasicBlock* loop_body = graph()->CreateBasicBlock(); HBasicBlock* loop_successor = graph()->CreateBasicBlock(); @@ -4617,10 +5310,11 @@ void HGraphBuilder::VisitForInStatement(ForInStatement* stmt) { set_current_block(loop_body); HValue* key = AddInstruction( - new(zone()) HLoadKeyedFastElement( + new(zone()) HLoadKeyed( environment()->ExpressionStackAt(2), // Enum cache. environment()->ExpressionStackAt(0), // Iteration index. - environment()->ExpressionStackAt(0))); + environment()->ExpressionStackAt(0), + FAST_ELEMENTS)); // Check if the expected map still matches that of the enumerable. // If not just deoptimize. @@ -4640,9 +5334,10 @@ void HGraphBuilder::VisitForInStatement(ForInStatement* stmt) { set_current_block(body_exit); HValue* current_index = Pop(); - HInstruction* new_index = new(zone()) HAdd(environment()->LookupContext(), - current_index, - graph()->GetConstant1()); + HInstruction* new_index = HAdd::New(zone(), + environment()->LookupContext(), + current_index, + graph()->GetConstant1()); new_index->AssumeRepresentation(Representation::Integer32()); PushAndAdd(new_index); body_exit = current_block(); @@ -4658,7 +5353,7 @@ void HGraphBuilder::VisitForInStatement(ForInStatement* stmt) { } -void HGraphBuilder::VisitTryCatchStatement(TryCatchStatement* stmt) { +void HOptimizedGraphBuilder::VisitTryCatchStatement(TryCatchStatement* stmt) { ASSERT(!HasStackOverflow()); ASSERT(current_block() != NULL); ASSERT(current_block()->HasPredecessor()); @@ -4666,7 +5361,8 @@ void HGraphBuilder::VisitTryCatchStatement(TryCatchStatement* stmt) { } -void HGraphBuilder::VisitTryFinallyStatement(TryFinallyStatement* stmt) { +void HOptimizedGraphBuilder::VisitTryFinallyStatement( + TryFinallyStatement* stmt) { ASSERT(!HasStackOverflow()); ASSERT(current_block() != NULL); ASSERT(current_block()->HasPredecessor()); @@ -4674,7 +5370,7 @@ void HGraphBuilder::VisitTryFinallyStatement(TryFinallyStatement* stmt) { } -void HGraphBuilder::VisitDebuggerStatement(DebuggerStatement* stmt) { +void HOptimizedGraphBuilder::VisitDebuggerStatement(DebuggerStatement* stmt) { ASSERT(!HasStackOverflow()); ASSERT(current_block() != NULL); ASSERT(current_block()->HasPredecessor()); @@ -4702,7 +5398,7 @@ static Handle<SharedFunctionInfo> SearchSharedFunctionInfo( } -void HGraphBuilder::VisitFunctionLiteral(FunctionLiteral* expr) { +void HOptimizedGraphBuilder::VisitFunctionLiteral(FunctionLiteral* expr) { ASSERT(!HasStackOverflow()); ASSERT(current_block() != NULL); ASSERT(current_block()->HasPredecessor()); @@ -4721,7 +5417,7 @@ void HGraphBuilder::VisitFunctionLiteral(FunctionLiteral* expr) { } -void HGraphBuilder::VisitSharedFunctionInfoLiteral( +void HOptimizedGraphBuilder::VisitSharedFunctionInfoLiteral( SharedFunctionInfoLiteral* expr) { ASSERT(!HasStackOverflow()); ASSERT(current_block() != NULL); @@ -4730,7 +5426,7 @@ void HGraphBuilder::VisitSharedFunctionInfoLiteral( } -void HGraphBuilder::VisitConditional(Conditional* expr) { +void HOptimizedGraphBuilder::VisitConditional(Conditional* expr) { ASSERT(!HasStackOverflow()); ASSERT(current_block() != NULL); ASSERT(current_block()->HasPredecessor()); @@ -4768,8 +5464,9 @@ void HGraphBuilder::VisitConditional(Conditional* expr) { } -HGraphBuilder::GlobalPropertyAccess HGraphBuilder::LookupGlobalProperty( - Variable* var, LookupResult* lookup, bool is_store) { +HOptimizedGraphBuilder::GlobalPropertyAccess + HOptimizedGraphBuilder::LookupGlobalProperty( + Variable* var, LookupResult* lookup, bool is_store) { if (var->is_this() || !info()->has_global_object()) { return kUseGeneric; } @@ -4785,7 +5482,7 @@ HGraphBuilder::GlobalPropertyAccess HGraphBuilder::LookupGlobalProperty( } -HValue* HGraphBuilder::BuildContextChainWalk(Variable* var) { +HValue* HOptimizedGraphBuilder::BuildContextChainWalk(Variable* var) { ASSERT(var->IsContextSlot()); HValue* context = environment()->LookupContext(); int length = info()->scope()->ContextChainLength(var->scope()); @@ -4798,7 +5495,7 @@ HValue* HGraphBuilder::BuildContextChainWalk(Variable* var) { } -void HGraphBuilder::VisitVariableProxy(VariableProxy* expr) { +void HOptimizedGraphBuilder::VisitVariableProxy(VariableProxy* expr) { ASSERT(!HasStackOverflow()); ASSERT(current_block() != NULL); ASSERT(current_block()->HasPredecessor()); @@ -4871,17 +5568,17 @@ void HGraphBuilder::VisitVariableProxy(VariableProxy* expr) { } -void HGraphBuilder::VisitLiteral(Literal* expr) { +void HOptimizedGraphBuilder::VisitLiteral(Literal* expr) { ASSERT(!HasStackOverflow()); ASSERT(current_block() != NULL); ASSERT(current_block()->HasPredecessor()); HConstant* instr = - new(zone()) HConstant(expr->handle(), Representation::Tagged()); + new(zone()) HConstant(expr->handle(), Representation::None()); return ast_context()->ReturnInstruction(instr, expr->id()); } -void HGraphBuilder::VisitRegExpLiteral(RegExpLiteral* expr) { +void HOptimizedGraphBuilder::VisitRegExpLiteral(RegExpLiteral* expr) { ASSERT(!HasStackOverflow()); ASSERT(current_block() != NULL); ASSERT(current_block()->HasPredecessor()); @@ -4920,12 +5617,13 @@ static bool LookupAccessorPair(Handle<Map> map, Handle<String> name, Handle<AccessorPair>* accessors, Handle<JSObject>* holder) { - LookupResult lookup(map->GetIsolate()); + Isolate* isolate = map->GetIsolate(); + LookupResult lookup(isolate); // Check for a JavaScript accessor directly in the map. map->LookupDescriptor(NULL, *name, &lookup); if (lookup.IsPropertyCallbacks()) { - Handle<Object> callback(lookup.GetValueFromMap(*map)); + Handle<Object> callback(lookup.GetValueFromMap(*map), isolate); if (!callback->IsAccessorPair()) return false; *accessors = Handle<AccessorPair>::cast(callback); *holder = Handle<JSObject>(); @@ -4938,7 +5636,7 @@ static bool LookupAccessorPair(Handle<Map> map, // Check for a JavaScript accessor somewhere in the proto chain. LookupInPrototypes(map, name, &lookup); if (lookup.IsPropertyCallbacks()) { - Handle<Object> callback(lookup.GetValue()); + Handle<Object> callback(lookup.GetValue(), isolate); if (!callback->IsAccessorPair()) return false; *accessors = Handle<AccessorPair>::cast(callback); *holder = Handle<JSObject>(lookup.holder()); @@ -4988,9 +5686,10 @@ static bool IsFastLiteral(Handle<JSObject> boilerplate, ASSERT(max_depth >= 0 && *max_properties >= 0); if (max_depth == 0) return false; + Isolate* isolate = boilerplate->GetIsolate(); Handle<FixedArrayBase> elements(boilerplate->elements()); if (elements->length() > 0 && - elements->map() != boilerplate->GetHeap()->fixed_cow_array_map()) { + elements->map() != isolate->heap()->fixed_cow_array_map()) { if (boilerplate->HasFastDoubleElements()) { *total_size += FixedDoubleArray::SizeFor(elements->length()); } else if (boilerplate->HasFastObjectElements()) { @@ -4998,7 +5697,7 @@ static bool IsFastLiteral(Handle<JSObject> boilerplate, int length = elements->length(); for (int i = 0; i < length; i++) { if ((*max_properties)-- == 0) return false; - Handle<Object> value(fast_elements->get(i)); + Handle<Object> value(fast_elements->get(i), isolate); if (value->IsJSObject()) { Handle<JSObject> value_object = Handle<JSObject>::cast(value); if (!IsFastLiteral(value_object, @@ -5022,7 +5721,7 @@ static bool IsFastLiteral(Handle<JSObject> boilerplate, int nof = boilerplate->map()->inobject_properties(); for (int i = 0; i < nof; i++) { if ((*max_properties)-- == 0) return false; - Handle<Object> value(boilerplate->InObjectPropertyAt(i)); + Handle<Object> value(boilerplate->InObjectPropertyAt(i), isolate); if (value->IsJSObject()) { Handle<JSObject> value_object = Handle<JSObject>::cast(value); if (!IsFastLiteral(value_object, @@ -5040,7 +5739,7 @@ static bool IsFastLiteral(Handle<JSObject> boilerplate, } -void HGraphBuilder::VisitObjectLiteral(ObjectLiteral* expr) { +void HOptimizedGraphBuilder::VisitObjectLiteral(ObjectLiteral* expr) { ASSERT(!HasStackOverflow()); ASSERT(current_block() != NULL); ASSERT(current_block()->HasPredecessor()); @@ -5051,7 +5750,8 @@ void HGraphBuilder::VisitObjectLiteral(ObjectLiteral* expr) { // Check whether to use fast or slow deep-copying for boilerplate. int total_size = 0; int max_properties = HFastLiteral::kMaxLiteralProperties; - Handle<Object> boilerplate(closure->literals()->get(expr->literal_index())); + Handle<Object> boilerplate(closure->literals()->get(expr->literal_index()), + isolate()); if (boilerplate->IsJSObject() && IsFastLiteral(Handle<JSObject>::cast(boilerplate), HFastLiteral::kMaxLiteralDepth, @@ -5062,7 +5762,8 @@ void HGraphBuilder::VisitObjectLiteral(ObjectLiteral* expr) { boilerplate_object, total_size, expr->literal_index(), - expr->depth()); + expr->depth(), + DONT_TRACK_ALLOCATION_SITE); } else { literal = new(zone()) HObjectLiteral(context, expr->constant_properties(), @@ -5090,7 +5791,7 @@ void HGraphBuilder::VisitObjectLiteral(ObjectLiteral* expr) { ASSERT(!CompileTimeValue::IsCompileTimeValue(value)); // Fall through. case ObjectLiteral::Property::COMPUTED: - if (key->handle()->IsSymbol()) { + if (key->handle()->IsInternalizedString()) { if (property->emit_store()) { property->RecordTypeFeedback(oracle()); CHECK_ALIVE(VisitForValue(value)); @@ -5113,7 +5814,9 @@ void HGraphBuilder::VisitObjectLiteral(ObjectLiteral* expr) { map)); } AddInstruction(store); - if (store->HasObservableSideEffects()) AddSimulate(key->id()); + if (store->HasObservableSideEffects()) { + AddSimulate(key->id(), REMOVABLE_SIMULATE); + } } else { CHECK_ALIVE(VisitForEffect(value)); } @@ -5143,7 +5846,7 @@ void HGraphBuilder::VisitObjectLiteral(ObjectLiteral* expr) { } -void HGraphBuilder::VisitArrayLiteral(ArrayLiteral* expr) { +void HOptimizedGraphBuilder::VisitArrayLiteral(ArrayLiteral* expr) { ASSERT(!HasStackOverflow()); ASSERT(current_block() != NULL); ASSERT(current_block()->HasPredecessor()); @@ -5153,7 +5856,8 @@ void HGraphBuilder::VisitArrayLiteral(ArrayLiteral* expr) { HInstruction* literal; Handle<FixedArray> literals(environment()->closure()->literals()); - Handle<Object> raw_boilerplate(literals->get(expr->literal_index())); + Handle<Object> raw_boilerplate(literals->get(expr->literal_index()), + isolate()); if (raw_boilerplate->IsUndefined()) { raw_boilerplate = Runtime::CreateArrayLiteralBoilerplate( @@ -5170,7 +5874,13 @@ void HGraphBuilder::VisitArrayLiteral(ArrayLiteral* expr) { Handle<JSObject> boilerplate = Handle<JSObject>::cast(raw_boilerplate); ElementsKind boilerplate_elements_kind = - Handle<JSObject>::cast(boilerplate)->GetElementsKind(); + Handle<JSObject>::cast(boilerplate)->GetElementsKind(); + + // TODO(mvstanton): This heuristic is only a temporary solution. In the + // end, we want to quit creating allocation site info after a certain number + // of GCs for a call site. + AllocationSiteMode mode = AllocationSiteInfo::GetMode( + boilerplate_elements_kind); // Check whether to use fast or slow deep-copying for boilerplate. int total_size = 0; @@ -5179,17 +5889,22 @@ void HGraphBuilder::VisitArrayLiteral(ArrayLiteral* expr) { HFastLiteral::kMaxLiteralDepth, &max_properties, &total_size)) { + if (mode == TRACK_ALLOCATION_SITE) { + total_size += AllocationSiteInfo::kSize; + } literal = new(zone()) HFastLiteral(context, boilerplate, total_size, expr->literal_index(), - expr->depth()); + expr->depth(), + mode); } else { literal = new(zone()) HArrayLiteral(context, boilerplate, length, expr->literal_index(), - expr->depth()); + expr->depth(), + mode); } // The array is expected in the bailout environment during computation @@ -5214,7 +5929,7 @@ void HGraphBuilder::VisitArrayLiteral(ArrayLiteral* expr) { AddInstruction(elements); HValue* key = AddInstruction( - new(zone()) HConstant(Handle<Object>(Smi::FromInt(i)), + new(zone()) HConstant(Handle<Object>(Smi::FromInt(i), isolate()), Representation::Integer32())); switch (boilerplate_elements_kind) { @@ -5225,18 +5940,14 @@ void HGraphBuilder::VisitArrayLiteral(ArrayLiteral* expr) { // Fall through. case FAST_ELEMENTS: case FAST_HOLEY_ELEMENTS: - AddInstruction(new(zone()) HStoreKeyedFastElement( + case FAST_DOUBLE_ELEMENTS: + case FAST_HOLEY_DOUBLE_ELEMENTS: + AddInstruction(new(zone()) HStoreKeyed( elements, key, value, boilerplate_elements_kind)); break; - case FAST_DOUBLE_ELEMENTS: - case FAST_HOLEY_DOUBLE_ELEMENTS: - AddInstruction(new(zone()) HStoreKeyedFastDoubleElement(elements, - key, - value)); - break; default: UNREACHABLE(); break; @@ -5253,6 +5964,10 @@ static bool ComputeLoadStoreField(Handle<Map> type, Handle<String> name, LookupResult* lookup, bool is_store) { + if (type->has_named_interceptor()) { + lookup->InterceptorResult(NULL); + return false; + } // If we directly find a field, the access can be inlined. type->LookupDescriptor(NULL, *name, lookup); if (lookup->IsField()) return true; @@ -5281,18 +5996,20 @@ static int ComputeLoadStoreFieldIndex(Handle<Map> type, } -HInstruction* HGraphBuilder::BuildStoreNamedField(HValue* object, - Handle<String> name, - HValue* value, - Handle<Map> map, - LookupResult* lookup, - bool smi_and_map_check) { - ASSERT(lookup->IsFound()); - if (smi_and_map_check) { - AddInstruction(new(zone()) HCheckNonSmi(object)); - AddInstruction(HCheckMaps::NewWithTransitions(object, map, zone())); - } +void HOptimizedGraphBuilder::AddCheckMapsWithTransitions(HValue* object, + Handle<Map> map) { + AddInstruction(new(zone()) HCheckNonSmi(object)); + AddInstruction(HCheckMaps::NewWithTransitions(object, map, zone())); +} + +HInstruction* HOptimizedGraphBuilder::BuildStoreNamedField( + HValue* object, + Handle<String> name, + HValue* value, + Handle<Map> map, + LookupResult* lookup) { + ASSERT(lookup->IsFound()); // If the property does not exist yet, we have to check that it wasn't made // readonly or turned into a setter by some meanwhile modifications on the // prototype chain. @@ -5311,13 +6028,16 @@ HInstruction* HGraphBuilder::BuildStoreNamedField(HValue* object, proto = proto_result.holder(); } else { // Otherwise, find the top prototype. - while (proto->GetPrototype()->IsJSObject()) proto = proto->GetPrototype(); - ASSERT(proto->GetPrototype()->IsNull()); + while (proto->GetPrototype(isolate())->IsJSObject()) { + proto = proto->GetPrototype(isolate()); + } + ASSERT(proto->GetPrototype(isolate())->IsNull()); } ASSERT(proto->IsJSObject()); AddInstruction(new(zone()) HCheckPrototypeMaps( Handle<JSObject>(JSObject::cast(map->prototype())), - Handle<JSObject>(JSObject::cast(proto)))); + Handle<JSObject>(JSObject::cast(proto)), + zone())); } int index = ComputeLoadStoreFieldIndex(map, name, lookup); @@ -5343,9 +6063,10 @@ HInstruction* HGraphBuilder::BuildStoreNamedField(HValue* object, } -HInstruction* HGraphBuilder::BuildStoreNamedGeneric(HValue* object, - Handle<String> name, - HValue* value) { +HInstruction* HOptimizedGraphBuilder::BuildStoreNamedGeneric( + HValue* object, + Handle<String> name, + HValue* value) { HValue* context = environment()->LookupContext(); return new(zone()) HStoreNamedGeneric( context, @@ -5356,27 +6077,29 @@ HInstruction* HGraphBuilder::BuildStoreNamedGeneric(HValue* object, } -HInstruction* HGraphBuilder::BuildCallSetter(HValue* object, - HValue* value, - Handle<Map> map, - Handle<JSFunction> setter, - Handle<JSObject> holder) { - AddCheckConstantFunction(holder, object, map, true); +HInstruction* HOptimizedGraphBuilder::BuildCallSetter( + HValue* object, + HValue* value, + Handle<Map> map, + Handle<JSFunction> setter, + Handle<JSObject> holder) { + AddCheckConstantFunction(holder, object, map); AddInstruction(new(zone()) HPushArgument(object)); AddInstruction(new(zone()) HPushArgument(value)); return new(zone()) HCallConstantFunction(setter, 2); } -HInstruction* HGraphBuilder::BuildStoreNamedMonomorphic(HValue* object, - Handle<String> name, - HValue* value, - Handle<Map> map) { +HInstruction* HOptimizedGraphBuilder::BuildStoreNamedMonomorphic( + HValue* object, + Handle<String> name, + HValue* value, + Handle<Map> map) { // Handle a store to a known field. LookupResult lookup(isolate()); if (ComputeLoadStoreField(map, name, &lookup, true)) { - // true = needs smi and map check. - return BuildStoreNamedField(object, name, value, map, &lookup, true); + AddCheckMapsWithTransitions(object, map); + return BuildStoreNamedField(object, name, value, map, &lookup); } // No luck, do a generic store. @@ -5384,10 +6107,11 @@ HInstruction* HGraphBuilder::BuildStoreNamedMonomorphic(HValue* object, } -void HGraphBuilder::HandlePolymorphicLoadNamedField(Property* expr, - HValue* object, - SmallMapList* types, - Handle<String> name) { +void HOptimizedGraphBuilder::HandlePolymorphicLoadNamedField( + Property* expr, + HValue* object, + SmallMapList* types, + Handle<String> name) { int count = 0; int previous_field_offset = 0; bool previous_field_is_in_object = false; @@ -5424,7 +6148,7 @@ void HGraphBuilder::HandlePolymorphicLoadNamedField(Property* expr, HInstruction* instr; if (count == types->length() && is_monomorphic_field) { AddInstruction(new(zone()) HCheckMaps(object, types, zone())); - instr = BuildLoadNamedField(object, map, &lookup, false); + instr = BuildLoadNamedField(object, map, &lookup); } else { HValue* context = environment()->LookupContext(); instr = new(zone()) HLoadNamedFieldPolymorphic(context, @@ -5439,11 +6163,12 @@ void HGraphBuilder::HandlePolymorphicLoadNamedField(Property* expr, } -void HGraphBuilder::HandlePolymorphicStoreNamedField(Assignment* expr, - HValue* object, - HValue* value, - SmallMapList* types, - Handle<String> name) { +void HOptimizedGraphBuilder::HandlePolymorphicStoreNamedField( + Assignment* expr, + HValue* object, + HValue* value, + SmallMapList* types, + Handle<String> name) { // TODO(ager): We should recognize when the prototype chains for different // maps are identical. In that case we can avoid repeatedly generating the // same prototype map checks. @@ -5467,7 +6192,7 @@ void HGraphBuilder::HandlePolymorphicStoreNamedField(Assignment* expr, set_current_block(if_true); HInstruction* instr; CHECK_ALIVE(instr = - BuildStoreNamedField(object, name, value, map, &lookup, false)); + BuildStoreNamedField(object, name, value, map, &lookup)); instr->set_position(expr->position()); // Goto will add the HSimulate for the store. AddInstruction(instr); @@ -5497,10 +6222,10 @@ void HGraphBuilder::HandlePolymorphicStoreNamedField(Assignment* expr, // unoptimized code). if (instr->HasObservableSideEffects()) { if (ast_context()->IsEffect()) { - AddSimulate(expr->id()); + AddSimulate(expr->id(), REMOVABLE_SIMULATE); } else { Push(value); - AddSimulate(expr->id()); + AddSimulate(expr->id(), REMOVABLE_SIMULATE); Drop(1); } } @@ -5515,7 +6240,7 @@ void HGraphBuilder::HandlePolymorphicStoreNamedField(Assignment* expr, } -void HGraphBuilder::HandlePropertyAssignment(Assignment* expr) { +void HOptimizedGraphBuilder::HandlePropertyAssignment(Assignment* expr) { Property* prop = expr->target()->AsProperty(); ASSERT(prop != NULL); expr->RecordTypeFeedback(oracle(), zone()); @@ -5543,7 +6268,7 @@ void HGraphBuilder::HandlePropertyAssignment(Assignment* expr) { Handle<JSFunction> setter; Handle<JSObject> holder; if (LookupSetter(map, name, &setter, &holder)) { - AddCheckConstantFunction(holder, object, map, true); + AddCheckConstantFunction(holder, object, map); if (FLAG_inline_accessors && TryInlineSetter(setter, expr, value)) { return; } @@ -5570,7 +6295,9 @@ void HGraphBuilder::HandlePropertyAssignment(Assignment* expr) { Push(value); instr->set_position(expr->position()); AddInstruction(instr); - if (instr->HasObservableSideEffects()) AddSimulate(expr->AssignmentId()); + if (instr->HasObservableSideEffects()) { + AddSimulate(expr->AssignmentId(), REMOVABLE_SIMULATE); + } return ast_context()->ReturnValue(Pop()); } else { @@ -5587,7 +6314,7 @@ void HGraphBuilder::HandlePropertyAssignment(Assignment* expr) { &has_side_effects); Push(value); ASSERT(has_side_effects); // Stores always have side effects. - AddSimulate(expr->AssignmentId()); + AddSimulate(expr->AssignmentId(), REMOVABLE_SIMULATE); return ast_context()->ReturnValue(Pop()); } } @@ -5596,10 +6323,11 @@ void HGraphBuilder::HandlePropertyAssignment(Assignment* expr) { // Because not every expression has a position and there is not common // superclass of Assignment and CountOperation, we cannot just pass the // owning expression instead of position and ast_id separately. -void HGraphBuilder::HandleGlobalVariableAssignment(Variable* var, - HValue* value, - int position, - BailoutId ast_id) { +void HOptimizedGraphBuilder::HandleGlobalVariableAssignment( + Variable* var, + HValue* value, + int position, + BailoutId ast_id) { LookupResult lookup(isolate()); GlobalPropertyAccess type = LookupGlobalProperty(var, &lookup, true); if (type == kUseCell) { @@ -5609,7 +6337,9 @@ void HGraphBuilder::HandleGlobalVariableAssignment(Variable* var, new(zone()) HStoreGlobalCell(value, cell, lookup.GetPropertyDetails()); instr->set_position(position); AddInstruction(instr); - if (instr->HasObservableSideEffects()) AddSimulate(ast_id); + if (instr->HasObservableSideEffects()) { + AddSimulate(ast_id, REMOVABLE_SIMULATE); + } } else { HValue* context = environment()->LookupContext(); HGlobalObject* global_object = new(zone()) HGlobalObject(context); @@ -5623,12 +6353,12 @@ void HGraphBuilder::HandleGlobalVariableAssignment(Variable* var, instr->set_position(position); AddInstruction(instr); ASSERT(instr->HasObservableSideEffects()); - if (instr->HasObservableSideEffects()) AddSimulate(ast_id); + AddSimulate(ast_id, REMOVABLE_SIMULATE); } } -void HGraphBuilder::HandleCompoundAssignment(Assignment* expr) { +void HOptimizedGraphBuilder::HandleCompoundAssignment(Assignment* expr) { Expression* target = expr->target(); VariableProxy* proxy = target->AsVariableProxy(); Property* prop = target->AsProperty(); @@ -5700,7 +6430,7 @@ void HGraphBuilder::HandleCompoundAssignment(Assignment* expr) { new(zone()) HStoreContextSlot(context, var->index(), mode, Top()); AddInstruction(instr); if (instr->HasObservableSideEffects()) { - AddSimulate(expr->AssignmentId()); + AddSimulate(expr->AssignmentId(), REMOVABLE_SIMULATE); } break; } @@ -5740,7 +6470,9 @@ void HGraphBuilder::HandleCompoundAssignment(Assignment* expr) { load = BuildLoadNamedGeneric(object, name, prop); } PushAndAdd(load); - if (load->HasObservableSideEffects()) AddSimulate(prop->LoadId()); + if (load->HasObservableSideEffects()) { + AddSimulate(prop->LoadId(), REMOVABLE_SIMULATE); + } CHECK_ALIVE(VisitForValue(expr->value())); HValue* right = Pop(); @@ -5748,10 +6480,12 @@ void HGraphBuilder::HandleCompoundAssignment(Assignment* expr) { HInstruction* instr = BuildBinaryOperation(operation, left, right); PushAndAdd(instr); - if (instr->HasObservableSideEffects()) AddSimulate(operation->id()); + if (instr->HasObservableSideEffects()) { + AddSimulate(operation->id(), REMOVABLE_SIMULATE); + } HInstruction* store; - if (!monomorphic) { + if (!monomorphic || map->is_observed()) { // If we don't know the monomorphic type, do a generic store. CHECK_ALIVE(store = BuildStoreNamedGeneric(object, name, instr)); } else { @@ -5770,7 +6504,9 @@ void HGraphBuilder::HandleCompoundAssignment(Assignment* expr) { // Drop the simulated receiver and value. Return the value. Drop(2); Push(instr); - if (store->HasObservableSideEffects()) AddSimulate(expr->AssignmentId()); + if (store->HasObservableSideEffects()) { + AddSimulate(expr->AssignmentId(), REMOVABLE_SIMULATE); + } return ast_context()->ReturnValue(Pop()); } else { @@ -5786,7 +6522,7 @@ void HGraphBuilder::HandleCompoundAssignment(Assignment* expr) { false, // is_store &has_side_effects); Push(load); - if (has_side_effects) AddSimulate(prop->LoadId()); + if (has_side_effects) AddSimulate(prop->LoadId(), REMOVABLE_SIMULATE); CHECK_ALIVE(VisitForValue(expr->value())); @@ -5795,7 +6531,9 @@ void HGraphBuilder::HandleCompoundAssignment(Assignment* expr) { HInstruction* instr = BuildBinaryOperation(operation, left, right); PushAndAdd(instr); - if (instr->HasObservableSideEffects()) AddSimulate(operation->id()); + if (instr->HasObservableSideEffects()) { + AddSimulate(operation->id(), REMOVABLE_SIMULATE); + } expr->RecordTypeFeedback(oracle(), zone()); HandleKeyedElementAccess(obj, key, instr, expr, expr->AssignmentId(), @@ -5807,7 +6545,7 @@ void HGraphBuilder::HandleCompoundAssignment(Assignment* expr) { Drop(3); Push(instr); ASSERT(has_side_effects); // Stores always have side effects. - AddSimulate(expr->AssignmentId()); + AddSimulate(expr->AssignmentId(), REMOVABLE_SIMULATE); return ast_context()->ReturnValue(Pop()); } @@ -5817,7 +6555,7 @@ void HGraphBuilder::HandleCompoundAssignment(Assignment* expr) { } -void HGraphBuilder::VisitAssignment(Assignment* expr) { +void HOptimizedGraphBuilder::VisitAssignment(Assignment* expr) { ASSERT(!HasStackOverflow()); ASSERT(current_block() != NULL); ASSERT(current_block()->HasPredecessor()); @@ -5930,7 +6668,7 @@ void HGraphBuilder::VisitAssignment(Assignment* expr) { context, var->index(), mode, Top()); AddInstruction(instr); if (instr->HasObservableSideEffects()) { - AddSimulate(expr->AssignmentId()); + AddSimulate(expr->AssignmentId(), REMOVABLE_SIMULATE); } return ast_context()->ReturnValue(Pop()); } @@ -5944,7 +6682,7 @@ void HGraphBuilder::VisitAssignment(Assignment* expr) { } -void HGraphBuilder::VisitThrow(Throw* expr) { +void HOptimizedGraphBuilder::VisitThrow(Throw* expr) { ASSERT(!HasStackOverflow()); ASSERT(current_block() != NULL); ASSERT(current_block()->HasPredecessor()); @@ -5965,15 +6703,10 @@ void HGraphBuilder::VisitThrow(Throw* expr) { } -HLoadNamedField* HGraphBuilder::BuildLoadNamedField(HValue* object, - Handle<Map> map, - LookupResult* lookup, - bool smi_and_map_check) { - if (smi_and_map_check) { - AddInstruction(new(zone()) HCheckNonSmi(object)); - AddInstruction(HCheckMaps::NewWithTransitions(object, map, zone())); - } - +HLoadNamedField* HOptimizedGraphBuilder::BuildLoadNamedField( + HValue* object, + Handle<Map> map, + LookupResult* lookup) { int index = lookup->GetLocalFieldIndexFromMap(*map); if (index < 0) { // Negative property indices are in-object properties, indexed @@ -5988,234 +6721,106 @@ HLoadNamedField* HGraphBuilder::BuildLoadNamedField(HValue* object, } -HInstruction* HGraphBuilder::BuildLoadNamedGeneric(HValue* object, - Handle<String> name, - Property* expr) { - if (expr->IsUninitialized() && !FLAG_always_opt) { - AddInstruction(new(zone()) HSoftDeoptimize); - current_block()->MarkAsDeoptimizing(); +HInstruction* HOptimizedGraphBuilder::BuildLoadNamedGeneric( + HValue* object, + Handle<String> name, + Property* expr) { + if (expr->IsUninitialized()) { + AddSoftDeoptimize(); } HValue* context = environment()->LookupContext(); return new(zone()) HLoadNamedGeneric(context, object, name); } -HInstruction* HGraphBuilder::BuildCallGetter(HValue* object, - Handle<Map> map, - Handle<JSFunction> getter, - Handle<JSObject> holder) { - AddCheckConstantFunction(holder, object, map, true); +HInstruction* HOptimizedGraphBuilder::BuildCallGetter( + HValue* object, + Handle<Map> map, + Handle<JSFunction> getter, + Handle<JSObject> holder) { + AddCheckConstantFunction(holder, object, map); AddInstruction(new(zone()) HPushArgument(object)); return new(zone()) HCallConstantFunction(getter, 1); } -HInstruction* HGraphBuilder::BuildLoadNamedMonomorphic(HValue* object, - Handle<String> name, - Property* expr, - Handle<Map> map) { +HInstruction* HOptimizedGraphBuilder::BuildLoadNamedMonomorphic( + HValue* object, + Handle<String> name, + Property* expr, + Handle<Map> map) { // Handle a load from a known field. ASSERT(!map->is_dictionary_map()); LookupResult lookup(isolate()); map->LookupDescriptor(NULL, *name, &lookup); if (lookup.IsField()) { - return BuildLoadNamedField(object, map, &lookup, true); + AddCheckMapsWithTransitions(object, map); + return BuildLoadNamedField(object, map, &lookup); } // Handle a load of a constant known function. if (lookup.IsConstantFunction()) { - AddInstruction(new(zone()) HCheckNonSmi(object)); - AddInstruction(HCheckMaps::NewWithTransitions(object, map, zone())); + AddCheckMapsWithTransitions(object, map); Handle<JSFunction> function(lookup.GetConstantFunctionFromMap(*map)); return new(zone()) HConstant(function, Representation::Tagged()); } + // Handle a load from a known field somewhere in the prototype chain. + LookupInPrototypes(map, name, &lookup); + if (lookup.IsField()) { + Handle<JSObject> prototype(JSObject::cast(map->prototype())); + Handle<JSObject> holder(lookup.holder()); + Handle<Map> holder_map(holder->map()); + AddCheckMapsWithTransitions(object, map); + HInstruction* holder_value = AddInstruction( + new(zone()) HCheckPrototypeMaps(prototype, holder, zone())); + return BuildLoadNamedField(holder_value, holder_map, &lookup); + } + + // Handle a load of a constant function somewhere in the prototype chain. + if (lookup.IsConstantFunction()) { + Handle<JSObject> prototype(JSObject::cast(map->prototype())); + Handle<JSObject> holder(lookup.holder()); + Handle<Map> holder_map(holder->map()); + AddCheckMapsWithTransitions(object, map); + AddInstruction(new(zone()) HCheckPrototypeMaps(prototype, holder, zone())); + Handle<JSFunction> function(lookup.GetConstantFunctionFromMap(*holder_map)); + return new(zone()) HConstant(function, Representation::Tagged()); + } + // No luck, do a generic load. return BuildLoadNamedGeneric(object, name, expr); } -HInstruction* HGraphBuilder::BuildLoadKeyedGeneric(HValue* object, - HValue* key) { +HInstruction* HOptimizedGraphBuilder::BuildLoadKeyedGeneric(HValue* object, + HValue* key) { HValue* context = environment()->LookupContext(); return new(zone()) HLoadKeyedGeneric(context, object, key); } -HInstruction* HGraphBuilder::BuildExternalArrayElementAccess( - HValue* external_elements, - HValue* checked_key, +HInstruction* HOptimizedGraphBuilder::BuildMonomorphicElementAccess( + HValue* object, + HValue* key, HValue* val, HValue* dependency, - ElementsKind elements_kind, + Handle<Map> map, bool is_store) { - if (is_store) { - ASSERT(val != NULL); - switch (elements_kind) { - case EXTERNAL_PIXEL_ELEMENTS: { - val = AddInstruction(new(zone()) HClampToUint8(val)); - break; - } - case EXTERNAL_BYTE_ELEMENTS: - case EXTERNAL_UNSIGNED_BYTE_ELEMENTS: - case EXTERNAL_SHORT_ELEMENTS: - case EXTERNAL_UNSIGNED_SHORT_ELEMENTS: - case EXTERNAL_INT_ELEMENTS: - case EXTERNAL_UNSIGNED_INT_ELEMENTS: { - if (!val->representation().IsInteger32()) { - val = AddInstruction(new(zone()) HChange( - val, - Representation::Integer32(), - true, // Truncate to int32. - false)); // Don't deoptimize undefined (irrelevant here). - } - break; - } - case EXTERNAL_FLOAT_ELEMENTS: - case EXTERNAL_DOUBLE_ELEMENTS: - break; - case FAST_SMI_ELEMENTS: - case FAST_ELEMENTS: - case FAST_DOUBLE_ELEMENTS: - case FAST_HOLEY_SMI_ELEMENTS: - case FAST_HOLEY_ELEMENTS: - case FAST_HOLEY_DOUBLE_ELEMENTS: - case DICTIONARY_ELEMENTS: - case NON_STRICT_ARGUMENTS_ELEMENTS: - UNREACHABLE(); - break; - } - return new(zone()) HStoreKeyedSpecializedArrayElement( - external_elements, checked_key, val, elements_kind); - } else { - ASSERT(val == NULL); - HLoadKeyedSpecializedArrayElement* load = - new(zone()) HLoadKeyedSpecializedArrayElement( - external_elements, checked_key, dependency, elements_kind); - if (FLAG_opt_safe_uint32_operations && - elements_kind == EXTERNAL_UNSIGNED_INT_ELEMENTS) { - graph()->RecordUint32Instruction(load); - } - return load; - } -} - - -HInstruction* HGraphBuilder::BuildFastElementAccess(HValue* elements, - HValue* checked_key, - HValue* val, - HValue* load_dependency, - ElementsKind elements_kind, - bool is_store) { - if (is_store) { - ASSERT(val != NULL); - switch (elements_kind) { - case FAST_DOUBLE_ELEMENTS: - case FAST_HOLEY_DOUBLE_ELEMENTS: - return new(zone()) HStoreKeyedFastDoubleElement( - elements, checked_key, val); - case FAST_SMI_ELEMENTS: - case FAST_HOLEY_SMI_ELEMENTS: - // Smi-only arrays need a smi check. - AddInstruction(new(zone()) HCheckSmi(val)); - // Fall through. - case FAST_ELEMENTS: - case FAST_HOLEY_ELEMENTS: - return new(zone()) HStoreKeyedFastElement( - elements, checked_key, val, elements_kind); - default: - UNREACHABLE(); - return NULL; - } - } - // It's an element load (!is_store). - HoleCheckMode mode = IsFastPackedElementsKind(elements_kind) ? - OMIT_HOLE_CHECK : - PERFORM_HOLE_CHECK; - if (IsFastDoubleElementsKind(elements_kind)) { - return new(zone()) HLoadKeyedFastDoubleElement(elements, checked_key, - load_dependency, mode); - } else { // Smi or Object elements. - return new(zone()) HLoadKeyedFastElement(elements, checked_key, - load_dependency, elements_kind); - } -} - - -HInstruction* HGraphBuilder::BuildMonomorphicElementAccess(HValue* object, - HValue* key, - HValue* val, - HValue* dependency, - Handle<Map> map, - bool is_store) { HCheckMaps* mapcheck = new(zone()) HCheckMaps(object, map, zone(), dependency); AddInstruction(mapcheck); if (dependency) { mapcheck->ClearGVNFlag(kDependsOnElementsKind); } - return BuildUncheckedMonomorphicElementAccess(object, key, val, - mapcheck, map, is_store); + return BuildUncheckedMonomorphicElementAccess( + object, key, val, + mapcheck, map->instance_type() == JS_ARRAY_TYPE, + map->elements_kind(), is_store); } -HInstruction* HGraphBuilder::BuildUncheckedMonomorphicElementAccess( - HValue* object, - HValue* key, - HValue* val, - HCheckMaps* mapcheck, - Handle<Map> map, - bool is_store) { - // No GVNFlag is necessary for ElementsKind if there is an explicit dependency - // on a HElementsTransition instruction. The flag can also be removed if the - // map to check has FAST_HOLEY_ELEMENTS, since there can be no further - // ElementsKind transitions. Finally, the dependency can be removed for stores - // for FAST_ELEMENTS, since a transition to HOLEY elements won't change the - // generated store code. - if ((map->elements_kind() == FAST_HOLEY_ELEMENTS) || - (map->elements_kind() == FAST_ELEMENTS && is_store)) { - mapcheck->ClearGVNFlag(kDependsOnElementsKind); - } - bool fast_smi_only_elements = map->has_fast_smi_elements(); - bool fast_elements = map->has_fast_object_elements(); - HInstruction* elements = - AddInstruction(new(zone()) HLoadElements(object, mapcheck)); - if (is_store && (fast_elements || fast_smi_only_elements)) { - HCheckMaps* check_cow_map = new(zone()) HCheckMaps( - elements, isolate()->factory()->fixed_array_map(), zone()); - check_cow_map->ClearGVNFlag(kDependsOnElementsKind); - AddInstruction(check_cow_map); - } - HInstruction* length = NULL; - HInstruction* checked_key = NULL; - if (map->has_external_array_elements()) { - length = AddInstruction(new(zone()) HFixedArrayBaseLength(elements)); - checked_key = AddInstruction(new(zone()) HBoundsCheck(key, length, - ALLOW_SMI_KEY)); - HLoadExternalArrayPointer* external_elements = - new(zone()) HLoadExternalArrayPointer(elements); - AddInstruction(external_elements); - return BuildExternalArrayElementAccess( - external_elements, checked_key, val, mapcheck, - map->elements_kind(), is_store); - } - ASSERT(fast_smi_only_elements || - fast_elements || - map->has_fast_double_elements()); - if (map->instance_type() == JS_ARRAY_TYPE) { - length = AddInstruction(new(zone()) HJSArrayLength(object, mapcheck, - HType::Smi())); - } else { - length = AddInstruction(new(zone()) HFixedArrayBaseLength(elements)); - } - checked_key = AddInstruction(new(zone()) HBoundsCheck(key, length, - ALLOW_SMI_KEY)); - return BuildFastElementAccess(elements, checked_key, val, mapcheck, - map->elements_kind(), is_store); -} - - -HInstruction* HGraphBuilder::TryBuildConsolidatedElementLoad( +HInstruction* HOptimizedGraphBuilder::TryBuildConsolidatedElementLoad( HValue* object, HValue* key, HValue* val, @@ -6263,19 +6868,23 @@ HInstruction* HGraphBuilder::TryBuildConsolidatedElementLoad( HCheckMaps* check_maps = new(zone()) HCheckMaps(object, maps, zone()); AddInstruction(check_maps); HInstruction* instr = BuildUncheckedMonomorphicElementAccess( - object, key, val, check_maps, most_general_consolidated_map, false); + object, key, val, check_maps, + most_general_consolidated_map->instance_type() == JS_ARRAY_TYPE, + most_general_consolidated_map->elements_kind(), + false); return instr; } -HValue* HGraphBuilder::HandlePolymorphicElementAccess(HValue* object, - HValue* key, - HValue* val, - Expression* prop, - BailoutId ast_id, - int position, - bool is_store, - bool* has_side_effects) { +HValue* HOptimizedGraphBuilder::HandlePolymorphicElementAccess( + HValue* object, + HValue* key, + HValue* val, + Expression* prop, + BailoutId ast_id, + int position, + bool is_store, + bool* has_side_effects) { *has_side_effects = false; AddInstruction(new(zone()) HCheckNonSmi(object)); SmallMapList* maps = prop->GetReceiverTypes(); @@ -6330,8 +6939,9 @@ HValue* HGraphBuilder::HandlePolymorphicElementAccess(HValue* object, ASSERT(Map::IsValidElementsTransition( map->elements_kind(), transition_target.at(i)->elements_kind())); + HValue* context = environment()->LookupContext(); transition = new(zone()) HTransitionElementsKind( - object, map, transition_target.at(i)); + context, object, map, transition_target.at(i)); AddInstruction(transition); } else { type_todo[map->elements_kind()] = true; @@ -6365,7 +6975,6 @@ HValue* HGraphBuilder::HandlePolymorphicElementAccess(HValue* object, HInstruction* elements_kind_instr = AddInstruction(new(zone()) HElementsKind(object)); - HCompareConstantEqAndBranch* elements_kind_branch = NULL; HInstruction* elements = AddInstruction(new(zone()) HLoadElements(object, checkspec)); HLoadExternalArrayPointer* external_elements = NULL; @@ -6389,15 +6998,16 @@ HValue* HGraphBuilder::HandlePolymorphicElementAccess(HValue* object, && todo_external_array) { HInstruction* length = AddInstruction(new(zone()) HFixedArrayBaseLength(elements)); - checked_key = AddInstruction(new(zone()) HBoundsCheck(key, length)); + checked_key = AddBoundsCheck(key, length); external_elements = new(zone()) HLoadExternalArrayPointer(elements); AddInstruction(external_elements); } if (type_todo[elements_kind]) { HBasicBlock* if_true = graph()->CreateBasicBlock(); HBasicBlock* if_false = graph()->CreateBasicBlock(); - elements_kind_branch = new(zone()) HCompareConstantEqAndBranch( - elements_kind_instr, elements_kind, Token::EQ_STRICT); + HCompareConstantEqAndBranch* elements_kind_branch = + new(zone()) HCompareConstantEqAndBranch( + elements_kind_instr, elements_kind, Token::EQ_STRICT); elements_kind_branch->SetSuccessorAt(0, if_true); elements_kind_branch->SetSuccessorAt(1, if_false); current_block()->Finish(elements_kind_branch); @@ -6430,8 +7040,7 @@ HValue* HGraphBuilder::HandlePolymorphicElementAccess(HValue* object, HInstruction* length; length = AddInstruction(new(zone()) HJSArrayLength(object, typecheck, HType::Smi())); - checked_key = AddInstruction(new(zone()) HBoundsCheck(key, length, - ALLOW_SMI_KEY)); + checked_key = AddBoundsCheck(key, length, ALLOW_SMI_KEY); access = AddInstruction(BuildFastElementAccess( elements, checked_key, val, elements_kind_branch, elements_kind, is_store)); @@ -6447,8 +7056,7 @@ HValue* HGraphBuilder::HandlePolymorphicElementAccess(HValue* object, set_current_block(if_fastobject); length = AddInstruction(new(zone()) HFixedArrayBaseLength(elements)); - checked_key = AddInstruction(new(zone()) HBoundsCheck(key, length, - ALLOW_SMI_KEY)); + checked_key = AddBoundsCheck(key, length, ALLOW_SMI_KEY); access = AddInstruction(BuildFastElementAccess( elements, checked_key, val, elements_kind_branch, elements_kind, is_store)); @@ -6460,8 +7068,8 @@ HValue* HGraphBuilder::HandlePolymorphicElementAccess(HValue* object, } } else { // External array elements. access = AddInstruction(BuildExternalArrayElementAccess( - external_elements, checked_key, val, elements_kind_branch, - elements_kind, is_store)); + external_elements, checked_key, val, + elements_kind_branch, elements_kind, is_store)); } *has_side_effects |= access->HasObservableSideEffects(); if (position != RelocInfo::kNoPosition) access->set_position(position); @@ -6481,14 +7089,15 @@ HValue* HGraphBuilder::HandlePolymorphicElementAccess(HValue* object, } -HValue* HGraphBuilder::HandleKeyedElementAccess(HValue* obj, - HValue* key, - HValue* val, - Expression* expr, - BailoutId ast_id, - int position, - bool is_store, - bool* has_side_effects) { +HValue* HOptimizedGraphBuilder::HandleKeyedElementAccess( + HValue* obj, + HValue* key, + HValue* val, + Expression* expr, + BailoutId ast_id, + int position, + bool is_store, + bool* has_side_effects) { ASSERT(!expr->IsPropertyName()); HInstruction* instr = NULL; if (expr->IsMonomorphic()) { @@ -6518,9 +7127,10 @@ HValue* HGraphBuilder::HandleKeyedElementAccess(HValue* obj, } -HInstruction* HGraphBuilder::BuildStoreKeyedGeneric(HValue* object, - HValue* key, - HValue* value) { +HInstruction* HOptimizedGraphBuilder::BuildStoreKeyedGeneric( + HValue* object, + HValue* key, + HValue* value) { HValue* context = environment()->LookupContext(); return new(zone()) HStoreKeyedGeneric( context, @@ -6531,7 +7141,7 @@ HInstruction* HGraphBuilder::BuildStoreKeyedGeneric(HValue* object, } -void HGraphBuilder::EnsureArgumentsArePushedForAccess() { +void HOptimizedGraphBuilder::EnsureArgumentsArePushedForAccess() { // Outermost function already has arguments on the stack. if (function_state()->outer() == NULL) return; @@ -6559,7 +7169,7 @@ void HGraphBuilder::EnsureArgumentsArePushedForAccess() { } -bool HGraphBuilder::TryArgumentsAccess(Property* expr) { +bool HOptimizedGraphBuilder::TryArgumentsAccess(Property* expr) { VariableProxy* proxy = expr->obj()->AsVariableProxy(); if (proxy == NULL) return false; if (!proxy->var()->IsStackAllocated()) return false; @@ -6570,7 +7180,7 @@ bool HGraphBuilder::TryArgumentsAccess(Property* expr) { HInstruction* result = NULL; if (expr->key()->IsPropertyName()) { Handle<String> name = expr->key()->AsLiteral()->AsPropertyName(); - if (!name->IsEqualTo(CStrVector("length"))) return false; + if (!name->IsOneByteEqualTo(STATIC_ASCII_VECTOR("length"))) return false; if (function_state()->outer() == NULL) { HInstruction* elements = AddInstruction( @@ -6581,8 +7191,8 @@ bool HGraphBuilder::TryArgumentsAccess(Property* expr) { int argument_count = environment()-> arguments_environment()->parameter_count() - 1; result = new(zone()) HConstant( - Handle<Object>(Smi::FromInt(argument_count)), - Representation::Integer32()); + Handle<Object>(Smi::FromInt(argument_count), isolate()), + Representation::Integer32()); } } else { Push(graph()->GetArgumentsObject()); @@ -6595,8 +7205,7 @@ bool HGraphBuilder::TryArgumentsAccess(Property* expr) { new(zone()) HArgumentsElements(false)); HInstruction* length = AddInstruction( new(zone()) HArgumentsLength(elements)); - HInstruction* checked_key = - AddInstruction(new(zone()) HBoundsCheck(key, length)); + HInstruction* checked_key = AddBoundsCheck(key, length); result = new(zone()) HAccessArgumentsAt(elements, length, checked_key); } else { EnsureArgumentsArePushedForAccess(); @@ -6606,10 +7215,9 @@ bool HGraphBuilder::TryArgumentsAccess(Property* expr) { int argument_count = environment()-> arguments_environment()->parameter_count() - 1; HInstruction* length = AddInstruction(new(zone()) HConstant( - Handle<Object>(Smi::FromInt(argument_count)), - Representation::Integer32())); - HInstruction* checked_key = - AddInstruction(new(zone()) HBoundsCheck(key, length)); + Handle<Object>(Smi::FromInt(argument_count), isolate()), + Representation::Integer32())); + HInstruction* checked_key = AddBoundsCheck(key, length); result = new(zone()) HAccessArgumentsAt(elements, length, checked_key); } } @@ -6618,7 +7226,7 @@ bool HGraphBuilder::TryArgumentsAccess(Property* expr) { } -void HGraphBuilder::VisitProperty(Property* expr) { +void HOptimizedGraphBuilder::VisitProperty(Property* expr) { ASSERT(!HasStackOverflow()); ASSERT(current_block() != NULL); ASSERT(current_block()->HasPredecessor()); @@ -6639,16 +7247,16 @@ void HGraphBuilder::VisitProperty(Property* expr) { HValue* string = Pop(); AddInstruction(new(zone()) HCheckNonSmi(string)); AddInstruction(HCheckInstanceType::NewIsString(string, zone())); - instr = new(zone()) HStringLength(string); + instr = HStringLength::New(zone(), string); } else if (expr->IsStringAccess()) { CHECK_ALIVE(VisitForValue(expr->key())); HValue* index = Pop(); HValue* string = Pop(); HValue* context = environment()->LookupContext(); - HStringCharCodeAt* char_code = + HInstruction* char_code = BuildStringCharCodeAt(context, string, index); AddInstruction(char_code); - instr = new(zone()) HStringCharFromCode(context, char_code); + instr = HStringCharFromCode::New(zone(), context, char_code); } else if (expr->IsFunctionPrototype()) { HValue* function = Pop(); @@ -6658,18 +7266,22 @@ void HGraphBuilder::VisitProperty(Property* expr) { } else if (expr->key()->IsPropertyName()) { Handle<String> name = expr->key()->AsLiteral()->AsPropertyName(); SmallMapList* types = expr->GetReceiverTypes(); + HValue* object = Top(); - bool monomorphic = expr->IsMonomorphic(); Handle<Map> map; + bool monomorphic = false; if (expr->IsMonomorphic()) { map = types->first(); - if (map->is_dictionary_map()) monomorphic = false; + monomorphic = !map->is_dictionary_map(); + } else if (object->HasMonomorphicJSObjectType()) { + map = object->GetMonomorphicJSObjectMap(); + monomorphic = !map->is_dictionary_map(); } if (monomorphic) { Handle<JSFunction> getter; Handle<JSObject> holder; if (LookupGetter(map, name, &getter, &holder)) { - AddCheckConstantFunction(holder, Top(), map, true); + AddCheckConstantFunction(holder, Top(), map); if (FLAG_inline_accessors && TryInlineGetter(getter, expr)) return; AddInstruction(new(zone()) HPushArgument(Pop())); instr = new(zone()) HCallConstantFunction(getter, 1); @@ -6695,10 +7307,10 @@ void HGraphBuilder::VisitProperty(Property* expr) { &has_side_effects); if (has_side_effects) { if (ast_context()->IsEffect()) { - AddSimulate(expr->id()); + AddSimulate(expr->id(), REMOVABLE_SIMULATE); } else { Push(load); - AddSimulate(expr->id()); + AddSimulate(expr->id(), REMOVABLE_SIMULATE); Drop(1); } } @@ -6709,22 +7321,25 @@ void HGraphBuilder::VisitProperty(Property* expr) { } -void HGraphBuilder::AddCheckConstantFunction(Handle<JSObject> holder, - HValue* receiver, - Handle<Map> receiver_map, - bool smi_and_map_check) { +void HOptimizedGraphBuilder::AddCheckPrototypeMaps(Handle<JSObject> holder, + Handle<Map> receiver_map) { + if (!holder.is_null()) { + Handle<JSObject> prototype(JSObject::cast(receiver_map->prototype())); + AddInstruction( + new(zone()) HCheckPrototypeMaps(prototype, holder, zone())); + } +} + + +void HOptimizedGraphBuilder::AddCheckConstantFunction( + Handle<JSObject> holder, + HValue* receiver, + Handle<Map> receiver_map) { // Constant functions have the nice property that the map will change if they // are overwritten. Therefore it is enough to check the map of the holder and // its prototypes. - if (smi_and_map_check) { - AddInstruction(new(zone()) HCheckNonSmi(receiver)); - AddInstruction(HCheckMaps::NewWithTransitions(receiver, receiver_map, - zone())); - } - if (!holder.is_null()) { - AddInstruction(new(zone()) HCheckPrototypeMaps( - Handle<JSObject>(JSObject::cast(receiver_map->prototype())), holder)); - } + AddCheckMapsWithTransitions(receiver, receiver_map); + AddCheckPrototypeMaps(holder, receiver_map); } @@ -6761,10 +7376,11 @@ static int CompareHotness(void const* a, void const* b) { } -void HGraphBuilder::HandlePolymorphicCallNamed(Call* expr, - HValue* receiver, - SmallMapList* types, - Handle<String> name) { +void HOptimizedGraphBuilder::HandlePolymorphicCallNamed( + Call* expr, + HValue* receiver, + SmallMapList* types, + Handle<String> name) { // TODO(ager): We should recognize when the prototype chains for different // maps are identical. In that case we can avoid repeatedly generating the // same prototype map checks. @@ -6772,11 +7388,25 @@ void HGraphBuilder::HandlePolymorphicCallNamed(Call* expr, HBasicBlock* join = NULL; FunctionSorter order[kMaxCallPolymorphism]; int ordered_functions = 0; + + Handle<Map> initial_string_map( + isolate()->native_context()->string_function()->initial_map()); + Handle<Map> string_marker_map( + JSObject::cast(initial_string_map->prototype())->map()); + Handle<Map> initial_number_map( + isolate()->native_context()->number_function()->initial_map()); + Handle<Map> number_marker_map( + JSObject::cast(initial_number_map->prototype())->map()); + Handle<Map> heap_number_map = isolate()->factory()->heap_number_map(); + + bool handle_smi = false; + for (int i = 0; i < types->length() && ordered_functions < kMaxCallPolymorphism; ++i) { Handle<Map> map = types->at(i); if (expr->ComputeTarget(map, name)) { + if (map.is_identical_to(number_marker_map)) handle_smi = true; order[ordered_functions++] = FunctionSorter(i, expr->target()->shared()->profiler_ticks(), @@ -6790,23 +7420,61 @@ void HGraphBuilder::HandlePolymorphicCallNamed(Call* expr, sizeof(order[0]), &CompareHotness); + HBasicBlock* number_block = NULL; + for (int fn = 0; fn < ordered_functions; ++fn) { int i = order[fn].index(); Handle<Map> map = types->at(i); if (fn == 0) { // Only needed once. - AddInstruction(new(zone()) HCheckNonSmi(receiver)); join = graph()->CreateBasicBlock(); + if (handle_smi) { + HBasicBlock* empty_smi_block = graph()->CreateBasicBlock(); + HBasicBlock* not_smi_block = graph()->CreateBasicBlock(); + number_block = graph()->CreateBasicBlock(); + HIsSmiAndBranch* smicheck = new(zone()) HIsSmiAndBranch(receiver); + smicheck->SetSuccessorAt(0, empty_smi_block); + smicheck->SetSuccessorAt(1, not_smi_block); + current_block()->Finish(smicheck); + empty_smi_block->Goto(number_block); + set_current_block(not_smi_block); + } else { + AddInstruction(new(zone()) HCheckNonSmi(receiver)); + } } HBasicBlock* if_true = graph()->CreateBasicBlock(); HBasicBlock* if_false = graph()->CreateBasicBlock(); - HCompareMap* compare = - new(zone()) HCompareMap(receiver, map, if_true, if_false); + HUnaryControlInstruction* compare; + + if (handle_smi && map.is_identical_to(number_marker_map)) { + compare = new(zone()) HCompareMap( + receiver, heap_number_map, if_true, if_false); + map = initial_number_map; + expr->set_number_check( + Handle<JSObject>(JSObject::cast(map->prototype()))); + } else if (map.is_identical_to(string_marker_map)) { + compare = new(zone()) HIsStringAndBranch(receiver); + compare->SetSuccessorAt(0, if_true); + compare->SetSuccessorAt(1, if_false); + map = initial_string_map; + expr->set_string_check( + Handle<JSObject>(JSObject::cast(map->prototype()))); + } else { + compare = new(zone()) HCompareMap(receiver, map, if_true, if_false); + expr->set_map_check(); + } + current_block()->Finish(compare); + if (expr->check_type() == NUMBER_CHECK) { + if_true->Goto(number_block); + if_true = number_block; + number_block->SetJoinId(expr->id()); + } set_current_block(if_true); + expr->ComputeTarget(map, name); - AddCheckConstantFunction(expr->holder(), receiver, map, false); + AddCheckPrototypeMaps(expr->holder(), map); if (FLAG_trace_inlining && FLAG_polymorphic_inlining) { Handle<JSFunction> caller = info()->closure(); SmartArrayPointer<char> caller_name = @@ -6866,9 +7534,9 @@ void HGraphBuilder::HandlePolymorphicCallNamed(Call* expr, } -void HGraphBuilder::TraceInline(Handle<JSFunction> target, - Handle<JSFunction> caller, - const char* reason) { +void HOptimizedGraphBuilder::TraceInline(Handle<JSFunction> target, + Handle<JSFunction> caller, + const char* reason) { if (FLAG_trace_inlining) { SmartArrayPointer<char> target_name = target->shared()->DebugName()->ToCString(); @@ -6887,7 +7555,7 @@ void HGraphBuilder::TraceInline(Handle<JSFunction> target, static const int kNotInlinable = 1000000000; -int HGraphBuilder::InliningAstSize(Handle<JSFunction> target) { +int HOptimizedGraphBuilder::InliningAstSize(Handle<JSFunction> target) { if (!FLAG_use_inlining) return kNotInlinable; // Precondition: call is monomorphic and we have found a target with the @@ -6918,13 +7586,13 @@ int HGraphBuilder::InliningAstSize(Handle<JSFunction> target) { } -bool HGraphBuilder::TryInline(CallKind call_kind, - Handle<JSFunction> target, - int arguments_count, - HValue* implicit_return_value, - BailoutId ast_id, - BailoutId return_id, - InliningKind inlining_kind) { +bool HOptimizedGraphBuilder::TryInline(CallKind call_kind, + Handle<JSFunction> target, + int arguments_count, + HValue* implicit_return_value, + BailoutId ast_id, + BailoutId return_id, + InliningKind inlining_kind) { int nodes_added = InliningAstSize(target); if (nodes_added == kNotInlinable) return false; @@ -6935,8 +7603,6 @@ bool HGraphBuilder::TryInline(CallKind call_kind, return false; } - Handle<SharedFunctionInfo> target_shared(target->shared()); - #if !defined(V8_TARGET_ARCH_IA32) // Target must be able to use caller's context. CompilationInfo* outer_info = info(); @@ -6967,7 +7633,7 @@ bool HGraphBuilder::TryInline(CallKind call_kind, for (FunctionState* state = function_state(); state != NULL; state = state->outer()) { - if (state->compilation_info()->closure()->shared() == *target_shared) { + if (*state->compilation_info()->closure() == *target) { TraceInline(target, caller, "target is recursive"); return false; } @@ -6982,6 +7648,7 @@ bool HGraphBuilder::TryInline(CallKind call_kind, // Parse and allocate variables. CompilationInfo target_info(target, zone()); + Handle<SharedFunctionInfo> target_shared(target->shared()); if (!ParserApi::Parse(&target_info, kNoParsingFlags) || !Scope::Analyze(&target_info)) { if (target_info.isolate()->has_pending_exception()) { @@ -7049,7 +7716,7 @@ bool HGraphBuilder::TryInline(CallKind call_kind, TraceInline(target, caller, "could not generate deoptimization info"); return false; } - if (target_shared->scope_info() == ScopeInfo::Empty()) { + if (target_shared->scope_info() == ScopeInfo::Empty(isolate())) { // The scope info might not have been set if a lazily compiled // function is inlined before being called for the first time. Handle<ScopeInfo> target_scope_info = @@ -7081,13 +7748,15 @@ bool HGraphBuilder::TryInline(CallKind call_kind, this, &target_info, &target_oracle, inlining_kind); HConstant* undefined = graph()->GetConstantUndefined(); + bool undefined_receiver = HEnvironment::UseUndefinedReceiver( + target, function, call_kind, inlining_kind); HEnvironment* inner_env = environment()->CopyForInlining(target, arguments_count, function, undefined, - call_kind, - function_state()->inlining_kind()); + function_state()->inlining_kind(), + undefined_receiver); #ifdef V8_TARGET_ARCH_IA32 // IA32 only, overwrite the caller's context in the deoptimization // environment with the correct one. @@ -7121,10 +7790,10 @@ bool HGraphBuilder::TryInline(CallKind call_kind, new(zone()) HEnterInlined(target, arguments_count, function, - call_kind, function_state()->inlining_kind(), function->scope()->arguments(), - arguments_values); + arguments_values, + undefined_receiver); function_state()->set_entry(enter_inlined); AddInstruction(enter_inlined); @@ -7152,9 +7821,8 @@ bool HGraphBuilder::TryInline(CallKind call_kind, inlined_count_ += nodes_added; ASSERT(unoptimized_code->kind() == Code::FUNCTION); - Handle<Object> maybe_type_info(unoptimized_code->type_feedback_info()); Handle<TypeFeedbackInfo> type_info( - Handle<TypeFeedbackInfo>::cast(maybe_type_info)); + TypeFeedbackInfo::cast(unoptimized_code->type_feedback_info())); graph()->update_type_change_checksum(type_info->own_type_change_checksum()); TraceInline(target, caller, NULL); @@ -7234,7 +7902,7 @@ bool HGraphBuilder::TryInline(CallKind call_kind, } -bool HGraphBuilder::TryInlineCall(Call* expr, bool drop_extra) { +bool HOptimizedGraphBuilder::TryInlineCall(Call* expr, bool drop_extra) { // The function call we are inlining is a method call if the call // is a property call. CallKind call_kind = (expr->expression()->AsProperty() == NULL) @@ -7251,8 +7919,8 @@ bool HGraphBuilder::TryInlineCall(Call* expr, bool drop_extra) { } -bool HGraphBuilder::TryInlineConstruct(CallNew* expr, - HValue* implicit_return_value) { +bool HOptimizedGraphBuilder::TryInlineConstruct(CallNew* expr, + HValue* implicit_return_value) { return TryInline(CALL_AS_FUNCTION, expr->target(), expr->arguments()->length(), @@ -7263,8 +7931,8 @@ bool HGraphBuilder::TryInlineConstruct(CallNew* expr, } -bool HGraphBuilder::TryInlineGetter(Handle<JSFunction> getter, - Property* prop) { +bool HOptimizedGraphBuilder::TryInlineGetter(Handle<JSFunction> getter, + Property* prop) { return TryInline(CALL_AS_METHOD, getter, 0, @@ -7275,9 +7943,9 @@ bool HGraphBuilder::TryInlineGetter(Handle<JSFunction> getter, } -bool HGraphBuilder::TryInlineSetter(Handle<JSFunction> setter, - Assignment* assignment, - HValue* implicit_return_value) { +bool HOptimizedGraphBuilder::TryInlineSetter(Handle<JSFunction> setter, + Assignment* assignment, + HValue* implicit_return_value) { return TryInline(CALL_AS_METHOD, setter, 1, @@ -7288,11 +7956,29 @@ bool HGraphBuilder::TryInlineSetter(Handle<JSFunction> setter, } -bool HGraphBuilder::TryInlineBuiltinFunctionCall(Call* expr, bool drop_extra) { +bool HOptimizedGraphBuilder::TryInlineApply(Handle<JSFunction> function, + Call* expr, + int arguments_count) { + return TryInline(CALL_AS_METHOD, + function, + arguments_count, + NULL, + expr->id(), + expr->ReturnId(), + NORMAL_RETURN); +} + + +bool HOptimizedGraphBuilder::TryInlineBuiltinFunctionCall(Call* expr, + bool drop_extra) { if (!expr->target()->shared()->HasBuiltinFunctionId()) return false; BuiltinFunctionId id = expr->target()->shared()->builtin_function_id(); switch (id) { + case kMathExp: + if (!FLAG_fast_math) break; + // Fall through if FLAG_fast_math. case kMathRound: + case kMathFloor: case kMathAbs: case kMathSqrt: case kMathLog: @@ -7303,8 +7989,8 @@ bool HGraphBuilder::TryInlineBuiltinFunctionCall(Call* expr, bool drop_extra) { HValue* argument = Pop(); HValue* context = environment()->LookupContext(); Drop(1); // Receiver. - HUnaryMathOperation* op = - new(zone()) HUnaryMathOperation(context, argument, id); + HInstruction* op = + HUnaryMathOperation::New(zone(), context, argument, id); op->set_position(expr->position()); if (drop_extra) Drop(1); // Optionally drop the function. ast_context()->ReturnInstruction(op, expr->id()); @@ -7319,10 +8005,11 @@ bool HGraphBuilder::TryInlineBuiltinFunctionCall(Call* expr, bool drop_extra) { } -bool HGraphBuilder::TryInlineBuiltinMethodCall(Call* expr, - HValue* receiver, - Handle<Map> receiver_map, - CheckType check_type) { +bool HOptimizedGraphBuilder::TryInlineBuiltinMethodCall( + Call* expr, + HValue* receiver, + Handle<Map> receiver_map, + CheckType check_type) { ASSERT(check_type != RECEIVER_MAP_CHECK || !receiver_map.is_null()); // Try to inline calls like Math.* as operations in the calling function. if (!expr->target()->shared()->HasBuiltinFunctionId()) return false; @@ -7338,20 +8025,24 @@ bool HGraphBuilder::TryInlineBuiltinMethodCall(Call* expr, ASSERT(!expr->holder().is_null()); AddInstruction(new(zone()) HCheckPrototypeMaps( oracle()->GetPrototypeForPrimitiveCheck(STRING_CHECK), - expr->holder())); - HStringCharCodeAt* char_code = + expr->holder(), + zone())); + HInstruction* char_code = BuildStringCharCodeAt(context, string, index); if (id == kStringCharCodeAt) { ast_context()->ReturnInstruction(char_code, expr->id()); return true; } AddInstruction(char_code); - HStringCharFromCode* result = - new(zone()) HStringCharFromCode(context, char_code); + HInstruction* result = + HStringCharFromCode::New(zone(), context, char_code); ast_context()->ReturnInstruction(result, expr->id()); return true; } break; + case kMathExp: + if (!FLAG_fast_math) break; + // Fall through if FLAG_fast_math. case kMathRound: case kMathFloor: case kMathAbs: @@ -7361,12 +8052,12 @@ bool HGraphBuilder::TryInlineBuiltinMethodCall(Call* expr, case kMathCos: case kMathTan: if (argument_count == 2 && check_type == RECEIVER_MAP_CHECK) { - AddCheckConstantFunction(expr->holder(), receiver, receiver_map, true); + AddCheckConstantFunction(expr->holder(), receiver, receiver_map); HValue* argument = Pop(); HValue* context = environment()->LookupContext(); Drop(1); // Receiver. - HUnaryMathOperation* op = - new(zone()) HUnaryMathOperation(context, argument, id); + HInstruction* op = + HUnaryMathOperation::New(zone(), context, argument, id); op->set_position(expr->position()); ast_context()->ReturnInstruction(op, expr->id()); return true; @@ -7374,7 +8065,7 @@ bool HGraphBuilder::TryInlineBuiltinMethodCall(Call* expr, break; case kMathPow: if (argument_count == 3 && check_type == RECEIVER_MAP_CHECK) { - AddCheckConstantFunction(expr->holder(), receiver, receiver_map, true); + AddCheckConstantFunction(expr->holder(), receiver, receiver_map); HValue* right = Pop(); HValue* left = Pop(); Pop(); // Pop receiver. @@ -7385,30 +8076,31 @@ bool HGraphBuilder::TryInlineBuiltinMethodCall(Call* expr, double exponent = HConstant::cast(right)->DoubleValue(); if (exponent == 0.5) { result = - new(zone()) HUnaryMathOperation(context, left, kMathPowHalf); + HUnaryMathOperation::New(zone(), context, left, kMathPowHalf); } else if (exponent == -0.5) { HConstant* double_one = - new(zone()) HConstant(Handle<Object>(Smi::FromInt(1)), + new(zone()) HConstant(Handle<Object>(Smi::FromInt(1), + isolate()), Representation::Double()); AddInstruction(double_one); - HUnaryMathOperation* square_root = - new(zone()) HUnaryMathOperation(context, left, kMathPowHalf); - AddInstruction(square_root); + HInstruction* sqrt = + HUnaryMathOperation::New(zone(), context, left, kMathPowHalf); + AddInstruction(sqrt); // MathPowHalf doesn't have side effects so there's no need for // an environment simulation here. - ASSERT(!square_root->HasObservableSideEffects()); - result = new(zone()) HDiv(context, double_one, square_root); + ASSERT(!sqrt->HasObservableSideEffects()); + result = HDiv::New(zone(), context, double_one, sqrt); } else if (exponent == 2.0) { - result = new(zone()) HMul(context, left, left); + result = HMul::New(zone(), context, left, left); } } else if (right->IsConstant() && HConstant::cast(right)->HasInteger32Value() && HConstant::cast(right)->Integer32Value() == 2) { - result = new(zone()) HMul(context, left, left); + result = HMul::New(zone(), context, left, left); } if (result == NULL) { - result = new(zone()) HPower(left, right); + result = HPower::New(zone(), left, right); } ast_context()->ReturnInstruction(result, expr->id()); return true; @@ -7416,7 +8108,7 @@ bool HGraphBuilder::TryInlineBuiltinMethodCall(Call* expr, break; case kMathRandom: if (argument_count == 1 && check_type == RECEIVER_MAP_CHECK) { - AddCheckConstantFunction(expr->holder(), receiver, receiver_map, true); + AddCheckConstantFunction(expr->holder(), receiver, receiver_map); Drop(1); // Receiver. HValue* context = environment()->LookupContext(); HGlobalObject* global_object = new(zone()) HGlobalObject(context); @@ -7429,14 +8121,15 @@ bool HGraphBuilder::TryInlineBuiltinMethodCall(Call* expr, case kMathMax: case kMathMin: if (argument_count == 3 && check_type == RECEIVER_MAP_CHECK) { - AddCheckConstantFunction(expr->holder(), receiver, receiver_map, true); + AddCheckConstantFunction(expr->holder(), receiver, receiver_map); HValue* right = Pop(); HValue* left = Pop(); Drop(1); // Receiver. HValue* context = environment()->LookupContext(); HMathMinMax::Operation op = (id == kMathMin) ? HMathMinMax::kMathMin : HMathMinMax::kMathMax; - HMathMinMax* result = new(zone()) HMathMinMax(context, left, right, op); + HInstruction* result = + HMathMinMax::New(zone(), context, left, right, op); ast_context()->ReturnInstruction(result, expr->id()); return true; } @@ -7449,7 +8142,7 @@ bool HGraphBuilder::TryInlineBuiltinMethodCall(Call* expr, } -bool HGraphBuilder::TryCallApply(Call* expr) { +bool HOptimizedGraphBuilder::TryCallApply(Call* expr) { Expression* callee = expr->expression(); Property* prop = callee->AsProperty(); ASSERT(prop != NULL); @@ -7478,7 +8171,7 @@ bool HGraphBuilder::TryCallApply(Call* expr) { VisitForValue(prop->obj()); if (HasStackOverflow() || current_block() == NULL) return true; HValue* function = Top(); - AddCheckConstantFunction(expr->holder(), function, function_map, true); + AddCheckConstantFunction(expr->holder(), function, function_map); Drop(1); VisitForValue(args->at(0)); @@ -7506,24 +8199,37 @@ bool HGraphBuilder::TryCallApply(Call* expr) { // TODO(mstarzinger): For now we just ensure arguments are pushed // right after HEnterInlined, but we could be smarter about this. EnsureArgumentsArePushedForAccess(); - HValue* context = environment()->LookupContext(); - - HValue* wrapped_receiver = - AddInstruction(new(zone()) HWrapReceiver(receiver, function)); - PushAndAdd(new(zone()) HPushArgument(wrapped_receiver)); + ASSERT_EQ(environment()->arguments_environment()->parameter_count(), + function_state()->entry()->arguments_values()->length()); + HEnterInlined* entry = function_state()->entry(); + ZoneList<HValue*>* arguments_values = entry->arguments_values(); + int arguments_count = arguments_values->length(); + PushAndAdd(new(zone()) HWrapReceiver(receiver, function)); + for (int i = 1; i < arguments_count; i++) { + Push(arguments_values->at(i)); + } - HEnvironment* arguments_env = environment()->arguments_environment(); + Handle<JSFunction> known_function; + if (function->IsConstant()) { + HConstant* constant_function = HConstant::cast(function); + known_function = Handle<JSFunction>::cast(constant_function->handle()); + int args_count = arguments_count - 1; // Excluding receiver. + if (TryInlineApply(known_function, expr, args_count)) return true; + } - int parameter_count = arguments_env->parameter_count(); - for (int i = 1; i < arguments_env->parameter_count(); i++) { - PushAndAdd(new(zone()) HPushArgument(arguments_env->Lookup(i))); + Drop(arguments_count - 1); + PushAndAdd(new(zone()) HPushArgument(Pop())); + for (int i = 1; i < arguments_count; i++) { + PushAndAdd(new(zone()) HPushArgument(arguments_values->at(i))); } + HValue* context = environment()->LookupContext(); HInvokeFunction* call = new(zone()) HInvokeFunction( context, function, - parameter_count); - Drop(parameter_count); + known_function, + arguments_count); + Drop(arguments_count); call->set_position(expr->position()); ast_context()->ReturnInstruction(call, expr->id()); return true; @@ -7531,7 +8237,56 @@ bool HGraphBuilder::TryCallApply(Call* expr) { } -void HGraphBuilder::VisitCall(Call* expr) { +// Checks if all maps in |types| are from the same family, i.e., are elements +// transitions of each other. Returns either NULL if they are not from the same +// family, or a Map* indicating the map with the first elements kind of the +// family that is in the list. +static Map* CheckSameElementsFamily(SmallMapList* types) { + if (types->length() <= 1) return NULL; + // Check if all maps belong to the same transition family. + Map* kinds[kFastElementsKindCount]; + Map* first_map = *types->first(); + ElementsKind first_kind = first_map->elements_kind(); + if (!IsFastElementsKind(first_kind)) return NULL; + int first_index = GetSequenceIndexFromFastElementsKind(first_kind); + int last_index = first_index; + + for (int i = 0; i < kFastElementsKindCount; i++) kinds[i] = NULL; + + kinds[first_index] = first_map; + + for (int i = 1; i < types->length(); ++i) { + Map* map = *types->at(i); + ElementsKind elements_kind = map->elements_kind(); + if (!IsFastElementsKind(elements_kind)) return NULL; + int index = GetSequenceIndexFromFastElementsKind(elements_kind); + if (index < first_index) { + first_index = index; + } else if (index > last_index) { + last_index = index; + } else if (kinds[index] != map) { + return NULL; + } + kinds[index] = map; + } + + Map* current = kinds[first_index]; + for (int i = first_index + 1; i <= last_index; i++) { + Map* next = kinds[i]; + if (next != NULL) { + ElementsKind current_kind = next->elements_kind(); + if (next != current->LookupElementsTransitionMap(current_kind)) { + return NULL; + } + current = next; + } + } + + return kinds[first_index]; +} + + +void HOptimizedGraphBuilder::VisitCall(Call* expr) { ASSERT(!HasStackOverflow()); ASSERT(current_block() != NULL); ASSERT(current_block()->HasPredecessor()); @@ -7570,15 +8325,25 @@ void HGraphBuilder::VisitCall(Call* expr) { CHECK_ALIVE(VisitExpressions(expr->arguments())); Handle<String> name = prop->key()->AsLiteral()->AsPropertyName(); - SmallMapList* types = expr->GetReceiverTypes(); - HValue* receiver = - environment()->ExpressionStackAt(expr->arguments()->length()); - if (expr->IsMonomorphic()) { - Handle<Map> receiver_map = (types == NULL || types->is_empty()) + bool monomorphic = expr->IsMonomorphic(); + Handle<Map> receiver_map; + if (monomorphic) { + receiver_map = (types == NULL || types->is_empty()) ? Handle<Map>::null() : types->first(); + } else { + Map* family_map = CheckSameElementsFamily(types); + if (family_map != NULL) { + receiver_map = Handle<Map>(family_map); + monomorphic = expr->ComputeTarget(receiver_map, name); + } + } + + HValue* receiver = + environment()->ExpressionStackAt(expr->arguments()->length()); + if (monomorphic) { if (TryInlineBuiltinMethodCall(expr, receiver, receiver_map, @@ -7600,7 +8365,7 @@ void HGraphBuilder::VisitCall(Call* expr) { call = PreProcessCall( new(zone()) HCallNamed(context, name, argument_count)); } else { - AddCheckConstantFunction(expr->holder(), receiver, receiver_map, true); + AddCheckConstantFunction(expr->holder(), receiver, receiver_map); if (TryInlineCall(expr)) return; call = PreProcessCall( @@ -7623,7 +8388,7 @@ void HGraphBuilder::VisitCall(Call* expr) { VariableProxy* proxy = expr->expression()->AsVariableProxy(); bool global_call = proxy != NULL && proxy->var()->IsUnallocated(); - if (proxy != NULL && proxy->var()->is_possibly_eval()) { + if (proxy != NULL && proxy->var()->is_possibly_eval(isolate())) { return Bailout("possible direct call to eval"); } @@ -7751,7 +8516,7 @@ static bool IsAllocationInlineable(Handle<JSFunction> constructor) { } -void HGraphBuilder::VisitCallNew(CallNew* expr) { +void HOptimizedGraphBuilder::VisitCallNew(CallNew* expr) { ASSERT(!HasStackOverflow()); ASSERT(current_block() != NULL); ASSERT(current_block()->HasPredecessor()); @@ -7804,8 +8569,21 @@ void HGraphBuilder::VisitCallNew(CallNew* expr) { CHECK_ALIVE(VisitArgument(expr->expression())); HValue* constructor = HPushArgument::cast(Top())->argument(); CHECK_ALIVE(VisitArgumentList(expr->arguments())); - HInstruction* call = - new(zone()) HCallNew(context, constructor, argument_count); + HCallNew* call; + if (FLAG_optimize_constructed_arrays && + !(expr->target().is_null()) && + *(expr->target()) == isolate()->global_context()->array_function()) { + Handle<Object> feedback = oracle()->GetInfo(expr->CallNewFeedbackId()); + ASSERT(feedback->IsSmi()); + Handle<JSGlobalPropertyCell> cell = + isolate()->factory()->NewJSGlobalPropertyCell(feedback); + AddInstruction(new(zone()) HCheckFunction(constructor, + Handle<JSFunction>(isolate()->global_context()->array_function()))); + call = new(zone()) HCallNewArray(context, constructor, argument_count, + cell); + } else { + call = new(zone()) HCallNew(context, constructor, argument_count); + } Drop(argument_count); call->set_position(expr->position()); return ast_context()->ReturnInstruction(call, expr->id()); @@ -7815,20 +8593,21 @@ void HGraphBuilder::VisitCallNew(CallNew* expr) { // Support for generating inlined runtime functions. -// Lookup table for generators for runtime calls that are generated inline. -// Elements of the table are member pointers to functions of HGraphBuilder. +// Lookup table for generators for runtime calls that are generated inline. +// Elements of the table are member pointers to functions of +// HOptimizedGraphBuilder. #define INLINE_FUNCTION_GENERATOR_ADDRESS(Name, argc, ressize) \ - &HGraphBuilder::Generate##Name, + &HOptimizedGraphBuilder::Generate##Name, -const HGraphBuilder::InlineFunctionGenerator - HGraphBuilder::kInlineFunctionGenerators[] = { +const HOptimizedGraphBuilder::InlineFunctionGenerator + HOptimizedGraphBuilder::kInlineFunctionGenerators[] = { INLINE_FUNCTION_LIST(INLINE_FUNCTION_GENERATOR_ADDRESS) INLINE_RUNTIME_FUNCTION_LIST(INLINE_FUNCTION_GENERATOR_ADDRESS) }; #undef INLINE_FUNCTION_GENERATOR_ADDRESS -void HGraphBuilder::VisitCallRuntime(CallRuntime* expr) { +void HOptimizedGraphBuilder::VisitCallRuntime(CallRuntime* expr) { ASSERT(!HasStackOverflow()); ASSERT(current_block() != NULL); ASSERT(current_block()->HasPredecessor()); @@ -7866,7 +8645,7 @@ void HGraphBuilder::VisitCallRuntime(CallRuntime* expr) { } -void HGraphBuilder::VisitUnaryOperation(UnaryOperation* expr) { +void HOptimizedGraphBuilder::VisitUnaryOperation(UnaryOperation* expr) { ASSERT(!HasStackOverflow()); ASSERT(current_block() != NULL); ASSERT(current_block()->HasPredecessor()); @@ -7882,7 +8661,7 @@ void HGraphBuilder::VisitUnaryOperation(UnaryOperation* expr) { } } -void HGraphBuilder::VisitDelete(UnaryOperation* expr) { +void HOptimizedGraphBuilder::VisitDelete(UnaryOperation* expr) { Property* prop = expr->expression()->AsProperty(); VariableProxy* proxy = expr->expression()->AsVariableProxy(); if (prop != NULL) { @@ -7917,13 +8696,13 @@ void HGraphBuilder::VisitDelete(UnaryOperation* expr) { } -void HGraphBuilder::VisitVoid(UnaryOperation* expr) { +void HOptimizedGraphBuilder::VisitVoid(UnaryOperation* expr) { CHECK_ALIVE(VisitForEffect(expr->expression())); return ast_context()->ReturnValue(graph()->GetConstantUndefined()); } -void HGraphBuilder::VisitTypeof(UnaryOperation* expr) { +void HOptimizedGraphBuilder::VisitTypeof(UnaryOperation* expr) { CHECK_ALIVE(VisitForTypeOf(expr->expression())); HValue* value = Pop(); HValue* context = environment()->LookupContext(); @@ -7932,49 +8711,53 @@ void HGraphBuilder::VisitTypeof(UnaryOperation* expr) { } -void HGraphBuilder::VisitAdd(UnaryOperation* expr) { +void HOptimizedGraphBuilder::VisitAdd(UnaryOperation* expr) { CHECK_ALIVE(VisitForValue(expr->expression())); HValue* value = Pop(); HValue* context = environment()->LookupContext(); HInstruction* instr = - new(zone()) HMul(context, value, graph_->GetConstant1()); + HMul::New(zone(), context, value, graph()->GetConstant1()); + if (instr->IsBinaryOperation()) { + // Since we don't have type feedback, we must be cautious/pessimistic. + HBinaryOperation::cast(instr)->set_observed_input_representation( + Representation::Tagged(), Representation::Tagged()); + } return ast_context()->ReturnInstruction(instr, expr->id()); } -void HGraphBuilder::VisitSub(UnaryOperation* expr) { +void HOptimizedGraphBuilder::VisitSub(UnaryOperation* expr) { CHECK_ALIVE(VisitForValue(expr->expression())); HValue* value = Pop(); HValue* context = environment()->LookupContext(); HInstruction* instr = - new(zone()) HMul(context, value, graph_->GetConstantMinus1()); + HMul::New(zone(), context, value, graph()->GetConstantMinus1()); TypeInfo info = oracle()->UnaryType(expr); + Representation rep = ToRepresentation(info); if (info.IsUninitialized()) { - AddInstruction(new(zone()) HSoftDeoptimize); - current_block()->MarkAsDeoptimizing(); + AddSoftDeoptimize(); info = TypeInfo::Unknown(); } - Representation rep = ToRepresentation(info); - TraceRepresentation(expr->op(), info, instr, rep); - instr->AssumeRepresentation(rep); + if (instr->IsBinaryOperation()) { + HBinaryOperation::cast(instr)->set_observed_input_representation(rep, rep); + } return ast_context()->ReturnInstruction(instr, expr->id()); } -void HGraphBuilder::VisitBitNot(UnaryOperation* expr) { +void HOptimizedGraphBuilder::VisitBitNot(UnaryOperation* expr) { CHECK_ALIVE(VisitForValue(expr->expression())); HValue* value = Pop(); TypeInfo info = oracle()->UnaryType(expr); if (info.IsUninitialized()) { - AddInstruction(new(zone()) HSoftDeoptimize); - current_block()->MarkAsDeoptimizing(); + AddSoftDeoptimize(); } HInstruction* instr = new(zone()) HBitNot(value); return ast_context()->ReturnInstruction(instr, expr->id()); } -void HGraphBuilder::VisitNot(UnaryOperation* expr) { +void HOptimizedGraphBuilder::VisitNot(UnaryOperation* expr) { if (ast_context()->IsTest()) { TestContext* context = TestContext::cast(ast_context()); VisitForControl(expr->expression(), @@ -8018,8 +8801,9 @@ void HGraphBuilder::VisitNot(UnaryOperation* expr) { } -HInstruction* HGraphBuilder::BuildIncrement(bool returns_original_input, - CountOperation* expr) { +HInstruction* HOptimizedGraphBuilder::BuildIncrement( + bool returns_original_input, + CountOperation* expr) { // The input to the count operation is on top of the expression stack. TypeInfo info = oracle()->IncrementType(expr); Representation rep = ToRepresentation(info); @@ -8041,18 +8825,21 @@ HInstruction* HGraphBuilder::BuildIncrement(bool returns_original_input, // to simulate the expression stack after this instruction. // Any later failures deopt to the load of the input or earlier. HConstant* delta = (expr->op() == Token::INC) - ? graph_->GetConstant1() - : graph_->GetConstantMinus1(); + ? graph()->GetConstant1() + : graph()->GetConstantMinus1(); HValue* context = environment()->LookupContext(); - HInstruction* instr = new(zone()) HAdd(context, Top(), delta); - TraceRepresentation(expr->op(), info, instr, rep); + HInstruction* instr = HAdd::New(zone(), context, Top(), delta); + // We can't insert a simulate here, because it would break deoptimization, + // so the HAdd must not have side effects, so we must freeze its + // representation. instr->AssumeRepresentation(rep); + instr->ClearAllSideEffects(); AddInstruction(instr); return instr; } -void HGraphBuilder::VisitCountOperation(CountOperation* expr) { +void HOptimizedGraphBuilder::VisitCountOperation(CountOperation* expr) { ASSERT(!HasStackOverflow()); ASSERT(current_block() != NULL); ASSERT(current_block()->HasPredecessor()); @@ -8120,7 +8907,7 @@ void HGraphBuilder::VisitCountOperation(CountOperation* expr) { new(zone()) HStoreContextSlot(context, var->index(), mode, after); AddInstruction(instr); if (instr->HasObservableSideEffects()) { - AddSimulate(expr->AssignmentId()); + AddSimulate(expr->AssignmentId(), REMOVABLE_SIMULATE); } break; } @@ -8136,7 +8923,7 @@ void HGraphBuilder::VisitCountOperation(CountOperation* expr) { if (prop->key()->IsPropertyName()) { // Named property. - if (returns_original_input) Push(graph_->GetConstantUndefined()); + if (returns_original_input) Push(graph()->GetConstantUndefined()); CHECK_ALIVE(VisitForValue(prop->obj())); HValue* object = Top(); @@ -8161,13 +8948,15 @@ void HGraphBuilder::VisitCountOperation(CountOperation* expr) { load = BuildLoadNamedGeneric(object, name, prop); } PushAndAdd(load); - if (load->HasObservableSideEffects()) AddSimulate(prop->LoadId()); + if (load->HasObservableSideEffects()) { + AddSimulate(prop->LoadId(), REMOVABLE_SIMULATE); + } after = BuildIncrement(returns_original_input, expr); input = Pop(); HInstruction* store; - if (!monomorphic) { + if (!monomorphic || map->is_observed()) { // If we don't know the monomorphic type, do a generic store. CHECK_ALIVE(store = BuildStoreNamedGeneric(object, name, after)); } else { @@ -8189,11 +8978,13 @@ void HGraphBuilder::VisitCountOperation(CountOperation* expr) { // necessary. environment()->SetExpressionStackAt(0, after); if (returns_original_input) environment()->SetExpressionStackAt(1, input); - if (store->HasObservableSideEffects()) AddSimulate(expr->AssignmentId()); + if (store->HasObservableSideEffects()) { + AddSimulate(expr->AssignmentId(), REMOVABLE_SIMULATE); + } } else { // Keyed property. - if (returns_original_input) Push(graph_->GetConstantUndefined()); + if (returns_original_input) Push(graph()->GetConstantUndefined()); CHECK_ALIVE(VisitForValue(prop->obj())); CHECK_ALIVE(VisitForValue(prop->key())); @@ -8206,7 +8997,7 @@ void HGraphBuilder::VisitCountOperation(CountOperation* expr) { false, // is_store &has_side_effects); Push(load); - if (has_side_effects) AddSimulate(prop->LoadId()); + if (has_side_effects) AddSimulate(prop->LoadId(), REMOVABLE_SIMULATE); after = BuildIncrement(returns_original_input, expr); input = Pop(); @@ -8224,7 +9015,7 @@ void HGraphBuilder::VisitCountOperation(CountOperation* expr) { environment()->SetExpressionStackAt(0, after); if (returns_original_input) environment()->SetExpressionStackAt(1, input); ASSERT(has_side_effects); // Stores always have side effects. - AddSimulate(expr->AssignmentId()); + AddSimulate(expr->AssignmentId(), REMOVABLE_SIMULATE); } } @@ -8233,101 +9024,165 @@ void HGraphBuilder::VisitCountOperation(CountOperation* expr) { } -HStringCharCodeAt* HGraphBuilder::BuildStringCharCodeAt(HValue* context, - HValue* string, - HValue* index) { +HInstruction* HOptimizedGraphBuilder::BuildStringCharCodeAt( + HValue* context, + HValue* string, + HValue* index) { + if (string->IsConstant() && index->IsConstant()) { + HConstant* c_string = HConstant::cast(string); + HConstant* c_index = HConstant::cast(index); + if (c_string->HasStringValue() && c_index->HasNumberValue()) { + int32_t i = c_index->NumberValueAsInteger32(); + Handle<String> s = c_string->StringValue(); + if (i < 0 || i >= s->length()) { + return new(zone()) HConstant(OS::nan_value(), Representation::Double()); + } + return new(zone()) HConstant(s->Get(i), Representation::Integer32()); + } + } AddInstruction(new(zone()) HCheckNonSmi(string)); AddInstruction(HCheckInstanceType::NewIsString(string, zone())); - HStringLength* length = new(zone()) HStringLength(string); + HInstruction* length = HStringLength::New(zone(), string); AddInstruction(length); - HInstruction* checked_index = - AddInstruction(new(zone()) HBoundsCheck(index, length)); + HInstruction* checked_index = AddBoundsCheck(index, length); return new(zone()) HStringCharCodeAt(context, string, checked_index); } +// Checks if the given shift amounts have form: (sa) and (32 - sa). +static bool ShiftAmountsAllowReplaceByRotate(HValue* sa, + HValue* const32_minus_sa) { + if (!const32_minus_sa->IsSub()) return false; + HSub* sub = HSub::cast(const32_minus_sa); + if (sa != sub->right()) return false; + HValue* const32 = sub->left(); + if (!const32->IsConstant() || + HConstant::cast(const32)->Integer32Value() != 32) { + return false; + } + return (sub->right() == sa); +} + + +// Checks if the left and the right are shift instructions with the oposite +// directions that can be replaced by one rotate right instruction or not. +// Returns the operand and the shift amount for the rotate instruction in the +// former case. +bool HOptimizedGraphBuilder::MatchRotateRight(HValue* left, + HValue* right, + HValue** operand, + HValue** shift_amount) { + HShl* shl; + HShr* shr; + if (left->IsShl() && right->IsShr()) { + shl = HShl::cast(left); + shr = HShr::cast(right); + } else if (left->IsShr() && right->IsShl()) { + shl = HShl::cast(right); + shr = HShr::cast(left); + } else { + return false; + } + if (shl->left() != shr->left()) return false; + + if (!ShiftAmountsAllowReplaceByRotate(shl->right(), shr->right()) && + !ShiftAmountsAllowReplaceByRotate(shr->right(), shl->right())) { + return false; + } + *operand= shr->left(); + *shift_amount = shr->right(); + return true; +} + -HInstruction* HGraphBuilder::BuildBinaryOperation(BinaryOperation* expr, - HValue* left, - HValue* right) { +bool CanBeZero(HValue *right) { + if (right->IsConstant()) { + HConstant* right_const = HConstant::cast(right); + if (right_const->HasInteger32Value() && + (right_const->Integer32Value() & 0x1f) != 0) { + return false; + } + } + return true; +} + + +HInstruction* HOptimizedGraphBuilder::BuildBinaryOperation( + BinaryOperation* expr, + HValue* left, + HValue* right) { HValue* context = environment()->LookupContext(); - TypeInfo info = oracle()->BinaryType(expr); - if (info.IsUninitialized()) { - AddInstruction(new(zone()) HSoftDeoptimize); - current_block()->MarkAsDeoptimizing(); - info = TypeInfo::Unknown(); + TypeInfo left_info, right_info, result_info, combined_info; + oracle()->BinaryType(expr, &left_info, &right_info, &result_info); + Representation left_rep = ToRepresentation(left_info); + Representation right_rep = ToRepresentation(right_info); + Representation result_rep = ToRepresentation(result_info); + if (left_info.IsUninitialized()) { + // Can't have initialized one but not the other. + ASSERT(right_info.IsUninitialized()); + AddSoftDeoptimize(); + left_info = right_info = TypeInfo::Unknown(); } HInstruction* instr = NULL; switch (expr->op()) { case Token::ADD: - if (info.IsString()) { + if (left_info.IsString() && right_info.IsString()) { AddInstruction(new(zone()) HCheckNonSmi(left)); AddInstruction(HCheckInstanceType::NewIsString(left, zone())); AddInstruction(new(zone()) HCheckNonSmi(right)); AddInstruction(HCheckInstanceType::NewIsString(right, zone())); - instr = new(zone()) HStringAdd(context, left, right); + instr = HStringAdd::New(zone(), context, left, right); } else { - instr = HAdd::NewHAdd(zone(), context, left, right); + instr = HAdd::New(zone(), context, left, right); } break; case Token::SUB: - instr = HSub::NewHSub(zone(), context, left, right); + instr = HSub::New(zone(), context, left, right); break; case Token::MUL: - instr = HMul::NewHMul(zone(), context, left, right); + instr = HMul::New(zone(), context, left, right); break; case Token::MOD: - instr = HMod::NewHMod(zone(), context, left, right); + instr = HMod::New(zone(), context, left, right); break; case Token::DIV: - instr = HDiv::NewHDiv(zone(), context, left, right); + instr = HDiv::New(zone(), context, left, right); break; case Token::BIT_XOR: case Token::BIT_AND: - case Token::BIT_OR: - instr = HBitwise::NewHBitwise(zone(), expr->op(), context, left, right); + instr = HBitwise::New(zone(), expr->op(), context, left, right); + break; + case Token::BIT_OR: { + HValue* operand, *shift_amount; + if (left_info.IsInteger32() && right_info.IsInteger32() && + MatchRotateRight(left, right, &operand, &shift_amount)) { + instr = new(zone()) HRor(context, operand, shift_amount); + } else { + instr = HBitwise::New(zone(), expr->op(), context, left, right); + } break; + } case Token::SAR: - instr = HSar::NewHSar(zone(), context, left, right); + instr = HSar::New(zone(), context, left, right); break; case Token::SHR: - instr = HShr::NewHShr(zone(), context, left, right); - if (FLAG_opt_safe_uint32_operations && instr->IsShr()) { - bool can_be_shift_by_zero = true; - if (right->IsConstant()) { - HConstant* right_const = HConstant::cast(right); - if (right_const->HasInteger32Value() && - (right_const->Integer32Value() & 0x1f) != 0) { - can_be_shift_by_zero = false; - } - } - - if (can_be_shift_by_zero) graph()->RecordUint32Instruction(instr); + instr = HShr::New(zone(), context, left, right); + if (FLAG_opt_safe_uint32_operations && instr->IsShr() && + CanBeZero(right)) { + graph()->RecordUint32Instruction(instr); } break; case Token::SHL: - instr = HShl::NewHShl(zone(), context, left, right); + instr = HShl::New(zone(), context, left, right); break; default: UNREACHABLE(); } - // If we hit an uninitialized binary op stub we will get type info - // for a smi operation. If one of the operands is a constant string - // do not generate code assuming it is a smi operation. - if (info.IsSmi() && - ((left->IsConstant() && HConstant::cast(left)->handle()->IsString()) || - (right->IsConstant() && HConstant::cast(right)->handle()->IsString()))) { - return instr; - } - Representation rep = ToRepresentation(info); - // We only generate either int32 or generic tagged bitwise operations. - if (instr->IsBitwiseBinaryOperation()) { - HBitwiseBinaryOperation::cast(instr)-> - InitializeObservedInputRepresentation(rep); - if (rep.IsDouble()) rep = Representation::Integer32(); + if (instr->IsBinaryOperation()) { + HBinaryOperation* binop = HBinaryOperation::cast(instr); + binop->set_observed_input_representation(left_rep, right_rep); + binop->initialize_output_representation(result_rep); } - TraceRepresentation(expr->op(), info, instr, rep); - instr->AssumeRepresentation(rep); return instr; } @@ -8340,13 +9195,15 @@ static bool IsClassOfTest(CompareOperation* expr) { Literal* literal = expr->right()->AsLiteral(); if (literal == NULL) return false; if (!literal->handle()->IsString()) return false; - if (!call->name()->IsEqualTo(CStrVector("_ClassOf"))) return false; + if (!call->name()->IsOneByteEqualTo(STATIC_ASCII_VECTOR("_ClassOf"))) { + return false; + } ASSERT(call->arguments()->length() == 1); return true; } -void HGraphBuilder::VisitBinaryOperation(BinaryOperation* expr) { +void HOptimizedGraphBuilder::VisitBinaryOperation(BinaryOperation* expr) { ASSERT(!HasStackOverflow()); ASSERT(current_block() != NULL); ASSERT(current_block()->HasPredecessor()); @@ -8362,7 +9219,7 @@ void HGraphBuilder::VisitBinaryOperation(BinaryOperation* expr) { } -void HGraphBuilder::VisitComma(BinaryOperation* expr) { +void HOptimizedGraphBuilder::VisitComma(BinaryOperation* expr) { CHECK_ALIVE(VisitForEffect(expr->left())); // Visit the right subexpression in the same AST context as the entire // expression. @@ -8370,7 +9227,7 @@ void HGraphBuilder::VisitComma(BinaryOperation* expr) { } -void HGraphBuilder::VisitLogicalExpression(BinaryOperation* expr) { +void HOptimizedGraphBuilder::VisitLogicalExpression(BinaryOperation* expr) { bool is_logical_and = expr->op() == Token::AND; if (ast_context()->IsTest()) { TestContext* context = TestContext::cast(ast_context()); @@ -8397,6 +9254,17 @@ void HGraphBuilder::VisitLogicalExpression(BinaryOperation* expr) { } else if (ast_context()->IsValue()) { CHECK_ALIVE(VisitForValue(expr->left())); ASSERT(current_block() != NULL); + HValue* left_value = Top(); + + if (left_value->IsConstant()) { + HConstant* left_constant = HConstant::cast(left_value); + if ((is_logical_and && left_constant->BooleanValue()) || + (!is_logical_and && !left_constant->BooleanValue())) { + Drop(1); // left_value. + CHECK_BAILOUT(VisitForValue(expr->right())); + } + return ast_context()->ReturnValue(Pop()); + } // We need an extra block to maintain edge-split form. HBasicBlock* empty_block = graph()->CreateBasicBlock(); @@ -8404,8 +9272,8 @@ void HGraphBuilder::VisitLogicalExpression(BinaryOperation* expr) { TypeFeedbackId test_id = expr->left()->test_id(); ToBooleanStub::Types expected(oracle()->ToBooleanTypes(test_id)); HBranch* test = is_logical_and - ? new(zone()) HBranch(Top(), eval_right, empty_block, expected) - : new(zone()) HBranch(Top(), empty_block, eval_right, expected); + ? new(zone()) HBranch(left_value, eval_right, empty_block, expected) + : new(zone()) HBranch(left_value, empty_block, eval_right, expected); current_block()->Finish(test); set_current_block(eval_right); @@ -8460,7 +9328,7 @@ void HGraphBuilder::VisitLogicalExpression(BinaryOperation* expr) { } -void HGraphBuilder::VisitArithmeticExpression(BinaryOperation* expr) { +void HOptimizedGraphBuilder::VisitArithmeticExpression(BinaryOperation* expr) { CHECK_ALIVE(VisitForValue(expr->left())); CHECK_ALIVE(VisitForValue(expr->right())); HValue* right = Pop(); @@ -8471,27 +9339,8 @@ void HGraphBuilder::VisitArithmeticExpression(BinaryOperation* expr) { } -void HGraphBuilder::TraceRepresentation(Token::Value op, - TypeInfo info, - HValue* value, - Representation rep) { - if (!FLAG_trace_representation) return; - // TODO(svenpanne) Under which circumstances are we actually not flexible? - // At first glance, this looks a bit weird... - bool flexible = value->CheckFlag(HValue::kFlexibleRepresentation); - PrintF("Operation %s has type info %s, %schange representation assumption " - "for %s (ID %d) from %s to %s\n", - Token::Name(op), - info.ToString(), - flexible ? "" : " DO NOT ", - value->Mnemonic(), - graph_->GetMaximumValueID(), - value->representation().Mnemonic(), - rep.Mnemonic()); -} - - -Representation HGraphBuilder::ToRepresentation(TypeInfo info) { +Representation HOptimizedGraphBuilder::ToRepresentation(TypeInfo info) { + if (info.IsUninitialized()) return Representation::None(); if (info.IsSmi()) return Representation::Integer32(); if (info.IsInteger32()) return Representation::Integer32(); if (info.IsDouble()) return Representation::Double(); @@ -8500,9 +9349,9 @@ Representation HGraphBuilder::ToRepresentation(TypeInfo info) { } -void HGraphBuilder::HandleLiteralCompareTypeof(CompareOperation* expr, - HTypeof* typeof_expr, - Handle<String> check) { +void HOptimizedGraphBuilder::HandleLiteralCompareTypeof(CompareOperation* expr, + HTypeof* typeof_expr, + Handle<String> check) { // Note: The HTypeof itself is removed during canonicalization, if possible. HValue* value = typeof_expr->value(); HTypeofIsAndBranch* instr = new(zone()) HTypeofIsAndBranch(value, check); @@ -8572,7 +9421,7 @@ static bool IsLiteralCompareBool(HValue* left, } -void HGraphBuilder::VisitCompareOperation(CompareOperation* expr) { +void HOptimizedGraphBuilder::VisitCompareOperation(CompareOperation* expr) { ASSERT(!HasStackOverflow()); ASSERT(current_block() != NULL); ASSERT(current_block()->HasPredecessor()); @@ -8589,13 +9438,16 @@ void HGraphBuilder::VisitCompareOperation(CompareOperation* expr) { return ast_context()->ReturnControl(instr, expr->id()); } - TypeInfo type_info = oracle()->CompareType(expr); + TypeInfo left_type, right_type, overall_type_info; + oracle()->CompareType(expr, &left_type, &right_type, &overall_type_info); + Representation combined_rep = ToRepresentation(overall_type_info); + Representation left_rep = ToRepresentation(left_type); + Representation right_rep = ToRepresentation(right_type); // Check if this expression was ever executed according to type feedback. // Note that for the special typeof/null/undefined cases we get unknown here. - if (type_info.IsUninitialized()) { - AddInstruction(new(zone()) HSoftDeoptimize); - current_block()->MarkAsDeoptimizing(); - type_info = TypeInfo::Unknown(); + if (overall_type_info.IsUninitialized()) { + AddSoftDeoptimize(); + overall_type_info = left_type = right_type = TypeInfo::Unknown(); } CHECK_ALIVE(VisitForValue(expr->left())); @@ -8612,7 +9464,7 @@ void HGraphBuilder::VisitCompareOperation(CompareOperation* expr) { return HandleLiteralCompareTypeof(expr, typeof_expr, check); } HValue* sub_expr = NULL; - Factory* f = graph()->isolate()->factory(); + Factory* f = isolate()->factory(); if (IsLiteralCompareNil(left, op, right, f->undefined_value(), &sub_expr)) { return HandleLiteralCompareNil(expr, sub_expr, kUndefinedValue); } @@ -8667,17 +9519,15 @@ void HGraphBuilder::VisitCompareOperation(CompareOperation* expr) { HIn* result = new(zone()) HIn(context, left, right); result->set_position(expr->position()); return ast_context()->ReturnInstruction(result, expr->id()); - } else if (type_info.IsNonPrimitive()) { + } else if (overall_type_info.IsNonPrimitive()) { switch (op) { case Token::EQ: case Token::EQ_STRICT: { // Can we get away with map check and not instance type check? Handle<Map> map = oracle()->GetCompareMap(expr); if (!map.is_null()) { - AddInstruction(new(zone()) HCheckNonSmi(left)); - AddInstruction(HCheckMaps::NewWithTransitions(left, map, zone())); - AddInstruction(new(zone()) HCheckNonSmi(right)); - AddInstruction(HCheckMaps::NewWithTransitions(right, map, zone())); + AddCheckMapsWithTransitions(left, map); + AddCheckMapsWithTransitions(right, map); HCompareObjectEqAndBranch* result = new(zone()) HCompareObjectEqAndBranch(left, right); result->set_position(expr->position()); @@ -8696,37 +9546,37 @@ void HGraphBuilder::VisitCompareOperation(CompareOperation* expr) { default: return Bailout("Unsupported non-primitive compare"); } - } else if (type_info.IsString() && oracle()->IsSymbolCompare(expr) && - (op == Token::EQ || op == Token::EQ_STRICT)) { + } else if (overall_type_info.IsInternalizedString() && + Token::IsEqualityOp(op)) { AddInstruction(new(zone()) HCheckNonSmi(left)); - AddInstruction(HCheckInstanceType::NewIsSymbol(left, zone())); + AddInstruction(HCheckInstanceType::NewIsInternalizedString(left, zone())); AddInstruction(new(zone()) HCheckNonSmi(right)); - AddInstruction(HCheckInstanceType::NewIsSymbol(right, zone())); + AddInstruction(HCheckInstanceType::NewIsInternalizedString(right, zone())); HCompareObjectEqAndBranch* result = new(zone()) HCompareObjectEqAndBranch(left, right); result->set_position(expr->position()); return ast_context()->ReturnControl(result, expr->id()); } else { - Representation r = ToRepresentation(type_info); - if (r.IsTagged()) { + if (combined_rep.IsTagged() || combined_rep.IsNone()) { HCompareGeneric* result = new(zone()) HCompareGeneric(context, left, right, op); + result->set_observed_input_representation(left_rep, right_rep); result->set_position(expr->position()); return ast_context()->ReturnInstruction(result, expr->id()); } else { HCompareIDAndBranch* result = new(zone()) HCompareIDAndBranch(left, right, op); + result->set_observed_input_representation(left_rep, right_rep); result->set_position(expr->position()); - result->SetInputRepresentation(r); return ast_context()->ReturnControl(result, expr->id()); } } } -void HGraphBuilder::HandleLiteralCompareNil(CompareOperation* expr, - HValue* value, - NilValue nil) { +void HOptimizedGraphBuilder::HandleLiteralCompareNil(CompareOperation* expr, + HValue* value, + NilValue nil) { ASSERT(!HasStackOverflow()); ASSERT(current_block() != NULL); ASSERT(current_block()->HasPredecessor()); @@ -8738,7 +9588,7 @@ void HGraphBuilder::HandleLiteralCompareNil(CompareOperation* expr, } -HInstruction* HGraphBuilder::BuildThisFunction() { +HInstruction* HOptimizedGraphBuilder::BuildThisFunction() { // If we share optimized code between different closures, the // this-function is not a constant, except inside an inlined body. if (function_state()->outer() != NULL) { @@ -8751,7 +9601,7 @@ HInstruction* HGraphBuilder::BuildThisFunction() { } -void HGraphBuilder::VisitThisFunction(ThisFunction* expr) { +void HOptimizedGraphBuilder::VisitThisFunction(ThisFunction* expr) { ASSERT(!HasStackOverflow()); ASSERT(current_block() != NULL); ASSERT(current_block()->HasPredecessor()); @@ -8760,7 +9610,8 @@ void HGraphBuilder::VisitThisFunction(ThisFunction* expr) { } -void HGraphBuilder::VisitDeclarations(ZoneList<Declaration*>* declarations) { +void HOptimizedGraphBuilder::VisitDeclarations( + ZoneList<Declaration*>* declarations) { ASSERT(globals_.is_empty()); AstVisitor::VisitDeclarations(declarations); if (!globals_.is_empty()) { @@ -8778,7 +9629,8 @@ void HGraphBuilder::VisitDeclarations(ZoneList<Declaration*>* declarations) { } -void HGraphBuilder::VisitVariableDeclaration(VariableDeclaration* declaration) { +void HOptimizedGraphBuilder::VisitVariableDeclaration( + VariableDeclaration* declaration) { VariableProxy* proxy = declaration->proxy(); VariableMode mode = declaration->mode(); Variable* variable = proxy->var(); @@ -8804,7 +9656,9 @@ void HGraphBuilder::VisitVariableDeclaration(VariableDeclaration* declaration) { HStoreContextSlot* store = new(zone()) HStoreContextSlot( context, variable->index(), HStoreContextSlot::kNoCheck, value); AddInstruction(store); - if (store->HasObservableSideEffects()) AddSimulate(proxy->id()); + if (store->HasObservableSideEffects()) { + AddSimulate(proxy->id(), REMOVABLE_SIMULATE); + } } break; case Variable::LOOKUP: @@ -8813,7 +9667,8 @@ void HGraphBuilder::VisitVariableDeclaration(VariableDeclaration* declaration) { } -void HGraphBuilder::VisitFunctionDeclaration(FunctionDeclaration* declaration) { +void HOptimizedGraphBuilder::VisitFunctionDeclaration( + FunctionDeclaration* declaration) { VariableProxy* proxy = declaration->proxy(); Variable* variable = proxy->var(); switch (variable->location()) { @@ -8840,7 +9695,9 @@ void HGraphBuilder::VisitFunctionDeclaration(FunctionDeclaration* declaration) { HStoreContextSlot* store = new(zone()) HStoreContextSlot( context, variable->index(), HStoreContextSlot::kNoCheck, value); AddInstruction(store); - if (store->HasObservableSideEffects()) AddSimulate(proxy->id()); + if (store->HasObservableSideEffects()) { + AddSimulate(proxy->id(), REMOVABLE_SIMULATE); + } break; } case Variable::LOOKUP: @@ -8849,44 +9706,52 @@ void HGraphBuilder::VisitFunctionDeclaration(FunctionDeclaration* declaration) { } -void HGraphBuilder::VisitModuleDeclaration(ModuleDeclaration* declaration) { +void HOptimizedGraphBuilder::VisitModuleDeclaration( + ModuleDeclaration* declaration) { UNREACHABLE(); } -void HGraphBuilder::VisitImportDeclaration(ImportDeclaration* declaration) { +void HOptimizedGraphBuilder::VisitImportDeclaration( + ImportDeclaration* declaration) { UNREACHABLE(); } -void HGraphBuilder::VisitExportDeclaration(ExportDeclaration* declaration) { +void HOptimizedGraphBuilder::VisitExportDeclaration( + ExportDeclaration* declaration) { UNREACHABLE(); } -void HGraphBuilder::VisitModuleLiteral(ModuleLiteral* module) { +void HOptimizedGraphBuilder::VisitModuleLiteral(ModuleLiteral* module) { UNREACHABLE(); } -void HGraphBuilder::VisitModuleVariable(ModuleVariable* module) { +void HOptimizedGraphBuilder::VisitModuleVariable(ModuleVariable* module) { UNREACHABLE(); } -void HGraphBuilder::VisitModulePath(ModulePath* module) { +void HOptimizedGraphBuilder::VisitModulePath(ModulePath* module) { UNREACHABLE(); } -void HGraphBuilder::VisitModuleUrl(ModuleUrl* module) { +void HOptimizedGraphBuilder::VisitModuleUrl(ModuleUrl* module) { + UNREACHABLE(); +} + + +void HOptimizedGraphBuilder::VisitModuleStatement(ModuleStatement* stmt) { UNREACHABLE(); } // Generators for inline runtime functions. // Support for types. -void HGraphBuilder::GenerateIsSmi(CallRuntime* call) { +void HOptimizedGraphBuilder::GenerateIsSmi(CallRuntime* call) { ASSERT(call->arguments()->length() == 1); CHECK_ALIVE(VisitForValue(call->arguments()->at(0))); HValue* value = Pop(); @@ -8895,7 +9760,7 @@ void HGraphBuilder::GenerateIsSmi(CallRuntime* call) { } -void HGraphBuilder::GenerateIsSpecObject(CallRuntime* call) { +void HOptimizedGraphBuilder::GenerateIsSpecObject(CallRuntime* call) { ASSERT(call->arguments()->length() == 1); CHECK_ALIVE(VisitForValue(call->arguments()->at(0))); HValue* value = Pop(); @@ -8907,7 +9772,17 @@ void HGraphBuilder::GenerateIsSpecObject(CallRuntime* call) { } -void HGraphBuilder::GenerateIsFunction(CallRuntime* call) { +void HOptimizedGraphBuilder::GenerateIsSymbol(CallRuntime* call) { + ASSERT(call->arguments()->length() == 1); + CHECK_ALIVE(VisitForValue(call->arguments()->at(0))); + HValue* value = Pop(); + HHasInstanceTypeAndBranch* result = + new(zone()) HHasInstanceTypeAndBranch(value, SYMBOL_TYPE); + return ast_context()->ReturnControl(result, call->id()); +} + + +void HOptimizedGraphBuilder::GenerateIsFunction(CallRuntime* call) { ASSERT(call->arguments()->length() == 1); CHECK_ALIVE(VisitForValue(call->arguments()->at(0))); HValue* value = Pop(); @@ -8917,7 +9792,7 @@ void HGraphBuilder::GenerateIsFunction(CallRuntime* call) { } -void HGraphBuilder::GenerateHasCachedArrayIndex(CallRuntime* call) { +void HOptimizedGraphBuilder::GenerateHasCachedArrayIndex(CallRuntime* call) { ASSERT(call->arguments()->length() == 1); CHECK_ALIVE(VisitForValue(call->arguments()->at(0))); HValue* value = Pop(); @@ -8927,7 +9802,7 @@ void HGraphBuilder::GenerateHasCachedArrayIndex(CallRuntime* call) { } -void HGraphBuilder::GenerateIsArray(CallRuntime* call) { +void HOptimizedGraphBuilder::GenerateIsArray(CallRuntime* call) { ASSERT(call->arguments()->length() == 1); CHECK_ALIVE(VisitForValue(call->arguments()->at(0))); HValue* value = Pop(); @@ -8937,7 +9812,7 @@ void HGraphBuilder::GenerateIsArray(CallRuntime* call) { } -void HGraphBuilder::GenerateIsRegExp(CallRuntime* call) { +void HOptimizedGraphBuilder::GenerateIsRegExp(CallRuntime* call) { ASSERT(call->arguments()->length() == 1); CHECK_ALIVE(VisitForValue(call->arguments()->at(0))); HValue* value = Pop(); @@ -8947,7 +9822,7 @@ void HGraphBuilder::GenerateIsRegExp(CallRuntime* call) { } -void HGraphBuilder::GenerateIsObject(CallRuntime* call) { +void HOptimizedGraphBuilder::GenerateIsObject(CallRuntime* call) { ASSERT(call->arguments()->length() == 1); CHECK_ALIVE(VisitForValue(call->arguments()->at(0))); HValue* value = Pop(); @@ -8956,12 +9831,12 @@ void HGraphBuilder::GenerateIsObject(CallRuntime* call) { } -void HGraphBuilder::GenerateIsNonNegativeSmi(CallRuntime* call) { +void HOptimizedGraphBuilder::GenerateIsNonNegativeSmi(CallRuntime* call) { return Bailout("inlined runtime function: IsNonNegativeSmi"); } -void HGraphBuilder::GenerateIsUndetectableObject(CallRuntime* call) { +void HOptimizedGraphBuilder::GenerateIsUndetectableObject(CallRuntime* call) { ASSERT(call->arguments()->length() == 1); CHECK_ALIVE(VisitForValue(call->arguments()->at(0))); HValue* value = Pop(); @@ -8971,7 +9846,7 @@ void HGraphBuilder::GenerateIsUndetectableObject(CallRuntime* call) { } -void HGraphBuilder::GenerateIsStringWrapperSafeForDefaultValueOf( +void HOptimizedGraphBuilder::GenerateIsStringWrapperSafeForDefaultValueOf( CallRuntime* call) { return Bailout( "inlined runtime function: IsStringWrapperSafeForDefaultValueOf"); @@ -8979,7 +9854,7 @@ void HGraphBuilder::GenerateIsStringWrapperSafeForDefaultValueOf( // Support for construct call checks. -void HGraphBuilder::GenerateIsConstructCall(CallRuntime* call) { +void HOptimizedGraphBuilder::GenerateIsConstructCall(CallRuntime* call) { ASSERT(call->arguments()->length() == 0); if (function_state()->outer() != NULL) { // We are generating graph for inlined function. @@ -8995,7 +9870,7 @@ void HGraphBuilder::GenerateIsConstructCall(CallRuntime* call) { // Support for arguments.length and arguments[?]. -void HGraphBuilder::GenerateArgumentsLength(CallRuntime* call) { +void HOptimizedGraphBuilder::GenerateArgumentsLength(CallRuntime* call) { // Our implementation of arguments (based on this stack frame or an // adapter below it) does not work for inlined functions. This runtime // function is blacklisted by AstNode::IsInlineable. @@ -9008,7 +9883,7 @@ void HGraphBuilder::GenerateArgumentsLength(CallRuntime* call) { } -void HGraphBuilder::GenerateArguments(CallRuntime* call) { +void HOptimizedGraphBuilder::GenerateArguments(CallRuntime* call) { // Our implementation of arguments (based on this stack frame or an // adapter below it) does not work for inlined functions. This runtime // function is blacklisted by AstNode::IsInlineable. @@ -9019,8 +9894,7 @@ void HGraphBuilder::GenerateArguments(CallRuntime* call) { HInstruction* elements = AddInstruction( new(zone()) HArgumentsElements(false)); HInstruction* length = AddInstruction(new(zone()) HArgumentsLength(elements)); - HInstruction* checked_index = - AddInstruction(new(zone()) HBoundsCheck(index, length)); + HInstruction* checked_index = AddBoundsCheck(index, length); HAccessArgumentsAt* result = new(zone()) HAccessArgumentsAt(elements, length, checked_index); return ast_context()->ReturnInstruction(result, call->id()); @@ -9028,14 +9902,14 @@ void HGraphBuilder::GenerateArguments(CallRuntime* call) { // Support for accessing the class and value fields of an object. -void HGraphBuilder::GenerateClassOf(CallRuntime* call) { +void HOptimizedGraphBuilder::GenerateClassOf(CallRuntime* call) { // The special form detected by IsClassOfTest is detected before we get here // and does not cause a bailout. return Bailout("inlined runtime function: ClassOf"); } -void HGraphBuilder::GenerateValueOf(CallRuntime* call) { +void HOptimizedGraphBuilder::GenerateValueOf(CallRuntime* call) { ASSERT(call->arguments()->length() == 1); CHECK_ALIVE(VisitForValue(call->arguments()->at(0))); HValue* value = Pop(); @@ -9044,7 +9918,7 @@ void HGraphBuilder::GenerateValueOf(CallRuntime* call) { } -void HGraphBuilder::GenerateDateField(CallRuntime* call) { +void HOptimizedGraphBuilder::GenerateDateField(CallRuntime* call) { ASSERT(call->arguments()->length() == 2); ASSERT_NE(NULL, call->arguments()->at(1)->AsLiteral()); Smi* index = Smi::cast(*(call->arguments()->at(1)->AsLiteral()->handle())); @@ -9055,7 +9929,40 @@ void HGraphBuilder::GenerateDateField(CallRuntime* call) { } -void HGraphBuilder::GenerateSetValueOf(CallRuntime* call) { +void HOptimizedGraphBuilder::GenerateOneByteSeqStringSetChar( + CallRuntime* call) { + ASSERT(call->arguments()->length() == 3); + CHECK_ALIVE(VisitForValue(call->arguments()->at(0))); + CHECK_ALIVE(VisitForValue(call->arguments()->at(1))); + CHECK_ALIVE(VisitForValue(call->arguments()->at(2))); + HValue* value = Pop(); + HValue* index = Pop(); + HValue* string = Pop(); + HSeqStringSetChar* result = new(zone()) HSeqStringSetChar( + String::ONE_BYTE_ENCODING, string, index, value); + return ast_context()->ReturnInstruction(result, call->id()); +} + + +void HOptimizedGraphBuilder::GenerateTwoByteSeqStringSetChar( + CallRuntime* call) { + ASSERT(call->arguments()->length() == 3); + CHECK_ALIVE(VisitForValue(call->arguments()->at(0))); + CHECK_ALIVE(VisitForValue(call->arguments()->at(1))); + CHECK_ALIVE(VisitForValue(call->arguments()->at(2))); + HValue* value = Pop(); + HValue* index = Pop(); + HValue* string = Pop(); + HValue* context = environment()->LookupContext(); + HInstruction* char_code = BuildStringCharCodeAt(context, string, index); + AddInstruction(char_code); + HSeqStringSetChar* result = new(zone()) HSeqStringSetChar( + String::TWO_BYTE_ENCODING, string, index, value); + return ast_context()->ReturnInstruction(result, call->id()); +} + + +void HOptimizedGraphBuilder::GenerateSetValueOf(CallRuntime* call) { ASSERT(call->arguments()->length() == 2); CHECK_ALIVE(VisitForValue(call->arguments()->at(0))); CHECK_ALIVE(VisitForValue(call->arguments()->at(1))); @@ -9084,7 +9991,7 @@ void HGraphBuilder::GenerateSetValueOf(CallRuntime* call) { // Create in-object property store to kValueOffset. set_current_block(if_js_value); - Handle<String> name = isolate()->factory()->undefined_symbol(); + Handle<String> name = isolate()->factory()->undefined_string(); AddInstruction(new(zone()) HStoreNamedField(object, name, value, @@ -9098,48 +10005,46 @@ void HGraphBuilder::GenerateSetValueOf(CallRuntime* call) { // Fast support for charCodeAt(n). -void HGraphBuilder::GenerateStringCharCodeAt(CallRuntime* call) { +void HOptimizedGraphBuilder::GenerateStringCharCodeAt(CallRuntime* call) { ASSERT(call->arguments()->length() == 2); CHECK_ALIVE(VisitForValue(call->arguments()->at(0))); CHECK_ALIVE(VisitForValue(call->arguments()->at(1))); HValue* index = Pop(); HValue* string = Pop(); HValue* context = environment()->LookupContext(); - HStringCharCodeAt* result = BuildStringCharCodeAt(context, string, index); + HInstruction* result = BuildStringCharCodeAt(context, string, index); return ast_context()->ReturnInstruction(result, call->id()); } // Fast support for string.charAt(n) and string[n]. -void HGraphBuilder::GenerateStringCharFromCode(CallRuntime* call) { +void HOptimizedGraphBuilder::GenerateStringCharFromCode(CallRuntime* call) { ASSERT(call->arguments()->length() == 1); CHECK_ALIVE(VisitForValue(call->arguments()->at(0))); HValue* char_code = Pop(); HValue* context = environment()->LookupContext(); - HStringCharFromCode* result = - new(zone()) HStringCharFromCode(context, char_code); + HInstruction* result = HStringCharFromCode::New(zone(), context, char_code); return ast_context()->ReturnInstruction(result, call->id()); } // Fast support for string.charAt(n) and string[n]. -void HGraphBuilder::GenerateStringCharAt(CallRuntime* call) { +void HOptimizedGraphBuilder::GenerateStringCharAt(CallRuntime* call) { ASSERT(call->arguments()->length() == 2); CHECK_ALIVE(VisitForValue(call->arguments()->at(0))); CHECK_ALIVE(VisitForValue(call->arguments()->at(1))); HValue* index = Pop(); HValue* string = Pop(); HValue* context = environment()->LookupContext(); - HStringCharCodeAt* char_code = BuildStringCharCodeAt(context, string, index); + HInstruction* char_code = BuildStringCharCodeAt(context, string, index); AddInstruction(char_code); - HStringCharFromCode* result = - new(zone()) HStringCharFromCode(context, char_code); + HInstruction* result = HStringCharFromCode::New(zone(), context, char_code); return ast_context()->ReturnInstruction(result, call->id()); } // Fast support for object equality testing. -void HGraphBuilder::GenerateObjectEquals(CallRuntime* call) { +void HOptimizedGraphBuilder::GenerateObjectEquals(CallRuntime* call) { ASSERT(call->arguments()->length() == 2); CHECK_ALIVE(VisitForValue(call->arguments()->at(0))); CHECK_ALIVE(VisitForValue(call->arguments()->at(1))); @@ -9151,14 +10056,14 @@ void HGraphBuilder::GenerateObjectEquals(CallRuntime* call) { } -void HGraphBuilder::GenerateLog(CallRuntime* call) { +void HOptimizedGraphBuilder::GenerateLog(CallRuntime* call) { // %_Log is ignored in optimized code. return ast_context()->ReturnValue(graph()->GetConstantUndefined()); } // Fast support for Math.random(). -void HGraphBuilder::GenerateRandomHeapNumber(CallRuntime* call) { +void HOptimizedGraphBuilder::GenerateRandomHeapNumber(CallRuntime* call) { HValue* context = environment()->LookupContext(); HGlobalObject* global_object = new(zone()) HGlobalObject(context); AddInstruction(global_object); @@ -9168,7 +10073,7 @@ void HGraphBuilder::GenerateRandomHeapNumber(CallRuntime* call) { // Fast support for StringAdd. -void HGraphBuilder::GenerateStringAdd(CallRuntime* call) { +void HOptimizedGraphBuilder::GenerateStringAdd(CallRuntime* call) { ASSERT_EQ(2, call->arguments()->length()); CHECK_ALIVE(VisitArgumentList(call->arguments())); HValue* context = environment()->LookupContext(); @@ -9179,7 +10084,7 @@ void HGraphBuilder::GenerateStringAdd(CallRuntime* call) { // Fast support for SubString. -void HGraphBuilder::GenerateSubString(CallRuntime* call) { +void HOptimizedGraphBuilder::GenerateSubString(CallRuntime* call) { ASSERT_EQ(3, call->arguments()->length()); CHECK_ALIVE(VisitArgumentList(call->arguments())); HValue* context = environment()->LookupContext(); @@ -9190,7 +10095,7 @@ void HGraphBuilder::GenerateSubString(CallRuntime* call) { // Fast support for StringCompare. -void HGraphBuilder::GenerateStringCompare(CallRuntime* call) { +void HOptimizedGraphBuilder::GenerateStringCompare(CallRuntime* call) { ASSERT_EQ(2, call->arguments()->length()); CHECK_ALIVE(VisitArgumentList(call->arguments())); HValue* context = environment()->LookupContext(); @@ -9202,7 +10107,7 @@ void HGraphBuilder::GenerateStringCompare(CallRuntime* call) { // Support for direct calls from JavaScript to native RegExp code. -void HGraphBuilder::GenerateRegExpExec(CallRuntime* call) { +void HOptimizedGraphBuilder::GenerateRegExpExec(CallRuntime* call) { ASSERT_EQ(4, call->arguments()->length()); CHECK_ALIVE(VisitArgumentList(call->arguments())); HValue* context = environment()->LookupContext(); @@ -9213,7 +10118,7 @@ void HGraphBuilder::GenerateRegExpExec(CallRuntime* call) { // Construct a RegExp exec result with two in-object properties. -void HGraphBuilder::GenerateRegExpConstructResult(CallRuntime* call) { +void HOptimizedGraphBuilder::GenerateRegExpConstructResult(CallRuntime* call) { ASSERT_EQ(3, call->arguments()->length()); CHECK_ALIVE(VisitArgumentList(call->arguments())); HValue* context = environment()->LookupContext(); @@ -9225,13 +10130,13 @@ void HGraphBuilder::GenerateRegExpConstructResult(CallRuntime* call) { // Support for fast native caches. -void HGraphBuilder::GenerateGetFromCache(CallRuntime* call) { +void HOptimizedGraphBuilder::GenerateGetFromCache(CallRuntime* call) { return Bailout("inlined runtime function: GetFromCache"); } // Fast support for number to string. -void HGraphBuilder::GenerateNumberToString(CallRuntime* call) { +void HOptimizedGraphBuilder::GenerateNumberToString(CallRuntime* call) { ASSERT_EQ(1, call->arguments()->length()); CHECK_ALIVE(VisitArgumentList(call->arguments())); HValue* context = environment()->LookupContext(); @@ -9243,7 +10148,7 @@ void HGraphBuilder::GenerateNumberToString(CallRuntime* call) { // Fast call for custom callbacks. -void HGraphBuilder::GenerateCallFunction(CallRuntime* call) { +void HOptimizedGraphBuilder::GenerateCallFunction(CallRuntime* call) { // 1 ~ The function to call is not itself an argument to the call. int arg_count = call->arguments()->length() - 1; ASSERT(arg_count >= 1); // There's always at least a receiver. @@ -9287,18 +10192,18 @@ void HGraphBuilder::GenerateCallFunction(CallRuntime* call) { // Fast call to math functions. -void HGraphBuilder::GenerateMathPow(CallRuntime* call) { +void HOptimizedGraphBuilder::GenerateMathPow(CallRuntime* call) { ASSERT_EQ(2, call->arguments()->length()); CHECK_ALIVE(VisitForValue(call->arguments()->at(0))); CHECK_ALIVE(VisitForValue(call->arguments()->at(1))); HValue* right = Pop(); HValue* left = Pop(); - HPower* result = new(zone()) HPower(left, right); + HInstruction* result = HPower::New(zone(), left, right); return ast_context()->ReturnInstruction(result, call->id()); } -void HGraphBuilder::GenerateMathSin(CallRuntime* call) { +void HOptimizedGraphBuilder::GenerateMathSin(CallRuntime* call) { ASSERT_EQ(1, call->arguments()->length()); CHECK_ALIVE(VisitArgumentList(call->arguments())); HValue* context = environment()->LookupContext(); @@ -9310,7 +10215,7 @@ void HGraphBuilder::GenerateMathSin(CallRuntime* call) { } -void HGraphBuilder::GenerateMathCos(CallRuntime* call) { +void HOptimizedGraphBuilder::GenerateMathCos(CallRuntime* call) { ASSERT_EQ(1, call->arguments()->length()); CHECK_ALIVE(VisitArgumentList(call->arguments())); HValue* context = environment()->LookupContext(); @@ -9322,7 +10227,7 @@ void HGraphBuilder::GenerateMathCos(CallRuntime* call) { } -void HGraphBuilder::GenerateMathTan(CallRuntime* call) { +void HOptimizedGraphBuilder::GenerateMathTan(CallRuntime* call) { ASSERT_EQ(1, call->arguments()->length()); CHECK_ALIVE(VisitArgumentList(call->arguments())); HValue* context = environment()->LookupContext(); @@ -9334,7 +10239,7 @@ void HGraphBuilder::GenerateMathTan(CallRuntime* call) { } -void HGraphBuilder::GenerateMathLog(CallRuntime* call) { +void HOptimizedGraphBuilder::GenerateMathLog(CallRuntime* call) { ASSERT_EQ(1, call->arguments()->length()); CHECK_ALIVE(VisitArgumentList(call->arguments())); HValue* context = environment()->LookupContext(); @@ -9346,18 +10251,18 @@ void HGraphBuilder::GenerateMathLog(CallRuntime* call) { } -void HGraphBuilder::GenerateMathSqrt(CallRuntime* call) { +void HOptimizedGraphBuilder::GenerateMathSqrt(CallRuntime* call) { return Bailout("inlined runtime function: MathSqrt"); } // Check whether two RegExps are equivalent -void HGraphBuilder::GenerateIsRegExpEquivalent(CallRuntime* call) { +void HOptimizedGraphBuilder::GenerateIsRegExpEquivalent(CallRuntime* call) { return Bailout("inlined runtime function: IsRegExpEquivalent"); } -void HGraphBuilder::GenerateGetCachedArrayIndex(CallRuntime* call) { +void HOptimizedGraphBuilder::GenerateGetCachedArrayIndex(CallRuntime* call) { ASSERT(call->arguments()->length() == 1); CHECK_ALIVE(VisitForValue(call->arguments()->at(0))); HValue* value = Pop(); @@ -9366,7 +10271,7 @@ void HGraphBuilder::GenerateGetCachedArrayIndex(CallRuntime* call) { } -void HGraphBuilder::GenerateFastAsciiArrayJoin(CallRuntime* call) { +void HOptimizedGraphBuilder::GenerateFastAsciiArrayJoin(CallRuntime* call) { return Bailout("inlined runtime function: FastAsciiArrayJoin"); } @@ -9381,7 +10286,6 @@ HEnvironment::HEnvironment(HEnvironment* outer, Zone* zone) : closure_(closure), values_(0, zone), - assigned_variables_(4, zone), frame_type_(JS_FUNCTION), parameter_count_(0), specials_count_(1), @@ -9396,12 +10300,27 @@ HEnvironment::HEnvironment(HEnvironment* outer, } +HEnvironment::HEnvironment(Zone* zone, int parameter_count) + : values_(0, zone), + frame_type_(STUB), + parameter_count_(parameter_count), + specials_count_(1), + local_count_(0), + outer_(NULL), + entry_(NULL), + pop_count_(0), + push_count_(0), + ast_id_(BailoutId::None()), + zone_(zone) { + Initialize(parameter_count, 0, 0); +} + + HEnvironment::HEnvironment(const HEnvironment* other, Zone* zone) : values_(0, zone), - assigned_variables_(0, zone), frame_type_(JS_FUNCTION), parameter_count_(0), - specials_count_(1), + specials_count_(0), local_count_(0), outer_(NULL), entry_(NULL), @@ -9420,7 +10339,6 @@ HEnvironment::HEnvironment(HEnvironment* outer, Zone* zone) : closure_(closure), values_(arguments, zone), - assigned_variables_(0, zone), frame_type_(frame_type), parameter_count_(arguments), local_count_(0), @@ -9449,7 +10367,7 @@ void HEnvironment::Initialize(int parameter_count, void HEnvironment::Initialize(const HEnvironment* other) { closure_ = other->closure(); values_.AddAll(other->values_, zone()); - assigned_variables_.AddAll(other->assigned_variables_, zone()); + assigned_variables_.Union(other->assigned_variables_, zone()); frame_type_ = other->frame_type_; parameter_count_ = other->parameter_count_; local_count_ = other->local_count_; @@ -9457,6 +10375,7 @@ void HEnvironment::Initialize(const HEnvironment* other) { entry_ = other->entry_; pop_count_ = other->pop_count_; push_count_ = other->push_count_; + specials_count_ = other->specials_count_; ast_id_ = other->ast_id_; } @@ -9493,9 +10412,7 @@ void HEnvironment::AddIncomingEdge(HBasicBlock* block, HEnvironment* other) { void HEnvironment::Bind(int index, HValue* value) { ASSERT(value != NULL); - if (!assigned_variables_.Contains(index)) { - assigned_variables_.Add(index, zone()); - } + assigned_variables_.Add(index, zone()); values_[index] = value; } @@ -9578,8 +10495,8 @@ HEnvironment* HEnvironment::CopyForInlining( int arguments, FunctionLiteral* function, HConstant* undefined, - CallKind call_kind, - InliningKind inlining_kind) const { + InliningKind inlining_kind, + bool undefined_receiver) const { ASSERT(frame_type() == JS_FUNCTION); // Outer environment is a copy of this one without the arguments. @@ -9620,8 +10537,7 @@ HEnvironment* HEnvironment::CopyForInlining( // If the function we are inlining is a strict mode function or a // builtin function, pass undefined as the receiver for function // calls (instead of the global receiver). - if ((target->shared()->native() || !function->is_classic_mode()) && - call_kind == CALL_AS_FUNCTION && inlining_kind != CONSTRUCT_CALL_RETURN) { + if (undefined_receiver) { inner->SetValueAt(0, undefined); } inner->SetValueAt(arity + 1, LookupContext()); @@ -9663,21 +10579,33 @@ void HEnvironment::PrintToStd() { } -void HTracer::TraceCompilation(FunctionLiteral* function) { +void HTracer::TraceCompilation(CompilationInfo* info) { Tag tag(this, "compilation"); - Handle<String> name = function->debug_name(); - PrintStringProperty("name", *name->ToCString()); - PrintStringProperty("method", *name->ToCString()); + if (info->IsOptimizing()) { + Handle<String> name = info->function()->debug_name(); + PrintStringProperty("name", *name->ToCString()); + PrintStringProperty("method", *name->ToCString()); + } else { + CodeStub::Major major_key = info->code_stub()->MajorKey(); + PrintStringProperty("name", CodeStub::MajorName(major_key, false)); + PrintStringProperty("method", "stub"); + } PrintLongProperty("date", static_cast<int64_t>(OS::TimeCurrentMillis())); } void HTracer::TraceLithium(const char* name, LChunk* chunk) { + ASSERT(!FLAG_parallel_recompilation); + HandleDereferenceGuard allow_handle_deref(chunk->isolate(), + HandleDereferenceGuard::ALLOW); Trace(name, chunk->graph(), chunk); } void HTracer::TraceHydrogen(const char* name, HGraph* graph) { + ASSERT(!FLAG_parallel_recompilation); + HandleDereferenceGuard allow_handle_deref(graph->isolate(), + HandleDereferenceGuard::ALLOW); Trace(name, graph, NULL); } @@ -9873,12 +10801,13 @@ void HTracer::TraceLiveRange(LiveRange* range, const char* type, void HTracer::FlushToFile() { - AppendChars(filename_, *trace_.ToCString(), trace_.length(), false); + AppendChars(filename_.start(), *trace_.ToCString(), trace_.length(), false); trace_.Reset(); } void HStatistics::Initialize(CompilationInfo* info) { + if (info->shared_info().is_null()) return; source_size_ += info->shared_info()->SourceSize(); } @@ -9894,38 +10823,55 @@ void HStatistics::Print() { PrintF("%30s", names_[i]); double ms = static_cast<double>(timing_[i]) / 1000; double percent = static_cast<double>(timing_[i]) * 100 / sum; - PrintF(" - %7.3f ms / %4.1f %% ", ms, percent); + PrintF(" - %8.3f ms / %4.1f %% ", ms, percent); unsigned size = sizes_[i]; double size_percent = static_cast<double>(size) * 100 / total_size_; - PrintF(" %8u bytes / %4.1f %%\n", size, size_percent); - } + PrintF(" %9u bytes / %4.1f %%\n", size, size_percent); + } + + PrintF("----------------------------------------" + "---------------------------------------\n"); + int64_t total = create_graph_ + optimize_graph_ + generate_code_; + PrintF("%30s - %8.3f ms / %4.1f %% \n", + "Create graph", + static_cast<double>(create_graph_) / 1000, + static_cast<double>(create_graph_) * 100 / total); + PrintF("%30s - %8.3f ms / %4.1f %% \n", + "Optimize graph", + static_cast<double>(optimize_graph_) / 1000, + static_cast<double>(optimize_graph_) * 100 / total); + PrintF("%30s - %8.3f ms / %4.1f %% \n", + "Generate and install code", + static_cast<double>(generate_code_) / 1000, + static_cast<double>(generate_code_) * 100 / total); + PrintF("----------------------------------------" + "---------------------------------------\n"); + PrintF("%30s - %8.3f ms (%.1f times slower than full code gen)\n", + "Total", + static_cast<double>(total) / 1000, + static_cast<double>(total) / full_code_gen_); + double source_size_in_kb = static_cast<double>(source_size_) / 1024; double normalized_time = source_size_in_kb > 0 - ? (static_cast<double>(sum) / 1000) / source_size_in_kb + ? (static_cast<double>(total) / 1000) / source_size_in_kb : 0; - double normalized_bytes = source_size_in_kb > 0 - ? total_size_ / source_size_in_kb + double normalized_size_in_kb = source_size_in_kb > 0 + ? total_size_ / 1024 / source_size_in_kb : 0; - PrintF("%30s - %7.3f ms %7.3f bytes\n", "Sum", - normalized_time, normalized_bytes); - PrintF("---------------------------------------------------------------\n"); - PrintF("%30s - %7.3f ms (%.1f times slower than full code gen)\n", - "Total", - static_cast<double>(total_) / 1000, - static_cast<double>(total_) / full_code_gen_); + PrintF("%30s - %8.3f ms %7.3f kB allocated\n", + "Average per kB source", + normalized_time, normalized_size_in_kb); } void HStatistics::SaveTiming(const char* name, int64_t ticks, unsigned size) { if (name == HPhase::kFullCodeGen) { full_code_gen_ += ticks; - } else if (name == HPhase::kTotal) { - total_ += ticks; } else { total_size_ += size; for (int i = 0; i < names_.length(); ++i) { - if (names_[i] == name) { + if (strcmp(names_[i], name) == 0) { timing_[i] += ticks; sizes_[i] += size; return; @@ -9939,13 +10885,34 @@ void HStatistics::SaveTiming(const char* name, int64_t ticks, unsigned size) { const char* const HPhase::kFullCodeGen = "Full code generator"; -const char* const HPhase::kTotal = "Total"; -void HPhase::Begin(const char* name, - HGraph* graph, - LChunk* chunk, - LAllocator* allocator) { +HPhase::HPhase(const char* name, Isolate* isolate) { + Init(isolate, name, NULL, NULL, NULL); +} + + +HPhase::HPhase(const char* name, HGraph* graph) { + Init(graph->isolate(), name, graph, NULL, NULL); +} + + +HPhase::HPhase(const char* name, LChunk* chunk) { + Init(chunk->isolate(), name, NULL, chunk, NULL); +} + + +HPhase::HPhase(const char* name, LAllocator* allocator) { + Init(allocator->isolate(), name, NULL, NULL, allocator); +} + + +void HPhase::Init(Isolate* isolate, + const char* name, + HGraph* graph, + LChunk* chunk, + LAllocator* allocator) { + isolate_ = isolate; name_ = name; graph_ = graph; chunk_ = chunk; @@ -9953,26 +10920,32 @@ void HPhase::Begin(const char* name, if (allocator != NULL && chunk_ == NULL) { chunk_ = allocator->chunk(); } - if (FLAG_hydrogen_stats) start_ = OS::Ticks(); - start_allocation_size_ = Zone::allocation_size_; + if (FLAG_hydrogen_stats) { + start_ticks_ = OS::Ticks(); + start_allocation_size_ = Zone::allocation_size_; + } } -void HPhase::End() const { +HPhase::~HPhase() { if (FLAG_hydrogen_stats) { - int64_t end = OS::Ticks(); + int64_t ticks = OS::Ticks() - start_ticks_; unsigned size = Zone::allocation_size_ - start_allocation_size_; - HStatistics::Instance()->SaveTiming(name_, end - start_, size); + isolate_->GetHStatistics()->SaveTiming(name_, ticks, size); } // Produce trace output if flag is set so that the first letter of the // phase name matches the command line parameter FLAG_trace_phase. if (FLAG_trace_hydrogen && OS::StrChr(const_cast<char*>(FLAG_trace_phase), name_[0]) != NULL) { - if (graph_ != NULL) HTracer::Instance()->TraceHydrogen(name_, graph_); - if (chunk_ != NULL) HTracer::Instance()->TraceLithium(name_, chunk_); + if (graph_ != NULL) { + isolate_->GetHTracer()->TraceHydrogen(name_, graph_); + } + if (chunk_ != NULL) { + isolate_->GetHTracer()->TraceLithium(name_, chunk_); + } if (allocator_ != NULL) { - HTracer::Instance()->TraceLiveRanges(name_, allocator_); + isolate_->GetHTracer()->TraceLiveRanges(name_, allocator_); } } diff --git a/deps/v8/src/hydrogen.h b/deps/v8/src/hydrogen.h index a0d81497f8..7f5326bcb5 100644 --- a/deps/v8/src/hydrogen.h +++ b/deps/v8/src/hydrogen.h @@ -61,6 +61,7 @@ class HBasicBlock: public ZoneObject { int block_id() const { return block_id_; } void set_block_id(int id) { block_id_ = id; } HGraph* graph() const { return graph_; } + Isolate* isolate() const; const ZoneList<HPhi*>* phis() const { return &phis_; } HInstruction* first() const { return first_; } HInstruction* last() const { return last_; } @@ -91,6 +92,8 @@ class HBasicBlock: public ZoneObject { void set_last_instruction_index(int index) { last_instruction_index_ = index; } + bool is_osr_entry() { return is_osr_entry_; } + void set_osr_entry() { is_osr_entry_ = true; } void AttachLoopInformation(); void DetachLoopInformation(); @@ -125,7 +128,10 @@ class HBasicBlock: public ZoneObject { void Goto(HBasicBlock* block, FunctionState* state = NULL); int PredecessorIndexOf(HBasicBlock* predecessor) const; - void AddSimulate(BailoutId ast_id) { AddInstruction(CreateSimulate(ast_id)); } + void AddSimulate(BailoutId ast_id, + RemovableSimulate removable = FIXED_SIMULATE) { + AddInstruction(CreateSimulate(ast_id, removable)); + } void AssignCommonDominator(HBasicBlock* other); void AssignLoopSuccessorDominators(); @@ -166,7 +172,7 @@ class HBasicBlock: public ZoneObject { void RegisterPredecessor(HBasicBlock* pred); void AddDominatedBlock(HBasicBlock* block); - HSimulate* CreateSimulate(BailoutId ast_id); + HSimulate* CreateSimulate(BailoutId ast_id, RemovableSimulate removable); HDeoptimize* CreateDeoptimize(HDeoptimize::UseEnvironment has_uses); int block_id_; @@ -190,6 +196,7 @@ class HBasicBlock: public ZoneObject { bool is_inline_return_target_; bool is_deoptimizing_; bool dominates_loop_successors_; + bool is_osr_entry_; }; @@ -244,7 +251,7 @@ class HGraph: public ZoneObject { public: explicit HGraph(CompilationInfo* info); - Isolate* isolate() { return isolate_; } + Isolate* isolate() const { return isolate_; } Zone* zone() const { return zone_; } CompilationInfo* info() const { return info_; } @@ -255,21 +262,25 @@ class HGraph: public ZoneObject { void InitializeInferredTypes(); void InsertTypeConversions(); + void MergeRemovableSimulates(); void InsertRepresentationChanges(); void MarkDeoptimizeOnUndefined(); void ComputeMinusZeroChecks(); void ComputeSafeUint32Operations(); + void GlobalValueNumbering(); bool ProcessArgumentsObject(); void EliminateRedundantPhis(); void EliminateUnreachablePhis(); void Canonicalize(); void OrderBlocks(); void AssignDominators(); - void ReplaceCheckedValues(); + void SetupInformativeDefinitions(); void EliminateRedundantBoundsChecks(); void DehoistSimpleArrayIndexComputations(); void DeadCodeElimination(); + void RestoreActualValues(); void PropagateDeoptimizingMark(); + void EliminateUnusedInstructions(); // Returns false if there are phi-uses of the arguments-object // which are not supported by the optimizing compiler. @@ -285,11 +296,13 @@ class HGraph: public ZoneObject { undefined_constant_.set(constant); } HConstant* GetConstantUndefined() const { return undefined_constant_.get(); } + HConstant* GetConstant0(); HConstant* GetConstant1(); HConstant* GetConstantMinus1(); HConstant* GetConstantTrue(); HConstant* GetConstantFalse(); HConstant* GetConstantHole(); + HConstant* GetInvalidContext(); HBasicBlock* CreateBasicBlock(); HArgumentsObject* GetArgumentsObject() const { @@ -350,6 +363,14 @@ class HGraph: public ZoneObject { use_optimistic_licm_ = value; } + bool has_soft_deoptimize() { + return has_soft_deoptimize_; + } + + void set_has_soft_deoptimize(bool value) { + has_soft_deoptimize_ = value; + } + void MarkRecursive() { is_recursive_ = true; } @@ -366,12 +387,11 @@ class HGraph: public ZoneObject { } private: - HConstant* GetConstant(SetOncePointer<HConstant>* pointer, - Handle<Object> value); HConstant* GetConstantInt32(SetOncePointer<HConstant>* pointer, int32_t integer_value); void MarkAsDeoptimizingRecursively(HBasicBlock* block); + void NullifyUnreachableInstructions(); void InsertTypeConversions(HInstruction* instr); void PropagateMinusZeroChecks(HValue* value, BitVector* visited); void RecursivelyMarkPhiDeoptimizeOnUndefined(HPhi* phi); @@ -383,6 +403,8 @@ class HGraph: public ZoneObject { void InferTypes(ZoneList<HValue*>* worklist); void InitializeInferredTypes(int from_inclusive, int to_inclusive); void CheckForBackEdge(HBasicBlock* block, HBasicBlock* successor); + void SetupInformativeDefinitionsInBlock(HBasicBlock* block); + void SetupInformativeDefinitionsRecursively(HBasicBlock* block); void EliminateRedundantBoundsChecks(HBasicBlock* bb, BoundsCheckTable* table); Isolate* isolate_; @@ -394,11 +416,13 @@ class HGraph: public ZoneObject { ZoneList<HPhi*>* phi_list_; ZoneList<HInstruction*>* uint32_instructions_; SetOncePointer<HConstant> undefined_constant_; + SetOncePointer<HConstant> constant_0_; SetOncePointer<HConstant> constant_1_; SetOncePointer<HConstant> constant_minus1_; SetOncePointer<HConstant> constant_true_; SetOncePointer<HConstant> constant_false_; - SetOncePointer<HConstant> constant_hole_; + SetOncePointer<HConstant> constant_the_hole_; + SetOncePointer<HConstant> constant_invalid_context_; SetOncePointer<HArgumentsObject> arguments_object_; SetOncePointer<HBasicBlock> osr_loop_entry_; @@ -409,6 +433,7 @@ class HGraph: public ZoneObject { bool is_recursive_; bool use_optimistic_licm_; + bool has_soft_deoptimize_; int type_change_checksum_; DISALLOW_COPY_AND_ASSIGN(HGraph); @@ -424,7 +449,8 @@ enum FrameType { JS_CONSTRUCT, JS_GETTER, JS_SETTER, - ARGUMENTS_ADAPTOR + ARGUMENTS_ADAPTOR, + STUB }; @@ -435,6 +461,8 @@ class HEnvironment: public ZoneObject { Handle<JSFunction> closure, Zone* zone); + HEnvironment(Zone* zone, int parameter_count); + HEnvironment* arguments_environment() { return outer()->frame_type() == ARGUMENTS_ADAPTOR ? outer() : this; } @@ -442,7 +470,7 @@ class HEnvironment: public ZoneObject { // Simple accessors. Handle<JSFunction> closure() const { return closure_; } const ZoneList<HValue*>* values() const { return &values_; } - const ZoneList<int>* assigned_variables() const { + const GrowableBitVector* assigned_variables() const { return &assigned_variables_; } FrameType frame_type() const { return frame_type_; } @@ -534,8 +562,16 @@ class HEnvironment: public ZoneObject { int arguments, FunctionLiteral* function, HConstant* undefined, - CallKind call_kind, - InliningKind inlining_kind) const; + InliningKind inlining_kind, + bool undefined_receiver) const; + + static bool UseUndefinedReceiver(Handle<JSFunction> closure, + FunctionLiteral* function, + CallKind call_kind, + InliningKind inlining_kind) { + return (closure->shared()->native() || !function->is_classic_mode()) && + call_kind == CALL_AS_FUNCTION && inlining_kind != CONSTRUCT_CALL_RETURN; + } HEnvironment* DiscardInlined(bool drop_extra) { HEnvironment* outer = outer_; @@ -549,7 +585,7 @@ class HEnvironment: public ZoneObject { void ClearHistory() { pop_count_ = 0; push_count_ = 0; - assigned_variables_.Rewind(0); + assigned_variables_.Clear(); } void SetValueAt(int index, HValue* value) { @@ -598,7 +634,7 @@ class HEnvironment: public ZoneObject { Handle<JSFunction> closure_; // Value array [parameters] [specials] [locals] [temporaries]. ZoneList<HValue*> values_; - ZoneList<int> assigned_variables_; + GrowableBitVector assigned_variables_; FrameType frame_type_; int parameter_count_; int specials_count_; @@ -612,7 +648,26 @@ class HEnvironment: public ZoneObject { }; -class HGraphBuilder; +class HInferRepresentation BASE_EMBEDDED { + public: + explicit HInferRepresentation(HGraph* graph) + : graph_(graph), + worklist_(8, graph->zone()), + in_worklist_(graph->GetMaximumValueID(), graph->zone()) { } + + void Analyze(); + void AddToWorklist(HValue* current); + + private: + Zone* zone() const { return graph_->zone(); } + + HGraph* graph_; + ZoneList<HValue*> worklist_; + BitVector in_worklist_; +}; + + +class HOptimizedGraphBuilder; enum ArgumentsAllowedFlag { ARGUMENTS_NOT_ALLOWED, @@ -648,10 +703,10 @@ class AstContext { bool is_for_typeof() { return for_typeof_; } protected: - AstContext(HGraphBuilder* owner, Expression::Context kind); + AstContext(HOptimizedGraphBuilder* owner, Expression::Context kind); virtual ~AstContext(); - HGraphBuilder* owner() const { return owner_; } + HOptimizedGraphBuilder* owner() const { return owner_; } inline Zone* zone() const; @@ -662,7 +717,7 @@ class AstContext { #endif private: - HGraphBuilder* owner_; + HOptimizedGraphBuilder* owner_; Expression::Context kind_; AstContext* outer_; bool for_typeof_; @@ -671,7 +726,7 @@ class AstContext { class EffectContext: public AstContext { public: - explicit EffectContext(HGraphBuilder* owner) + explicit EffectContext(HOptimizedGraphBuilder* owner) : AstContext(owner, Expression::kEffect) { } virtual ~EffectContext(); @@ -684,7 +739,7 @@ class EffectContext: public AstContext { class ValueContext: public AstContext { public: - explicit ValueContext(HGraphBuilder* owner, ArgumentsAllowedFlag flag) + ValueContext(HOptimizedGraphBuilder* owner, ArgumentsAllowedFlag flag) : AstContext(owner, Expression::kValue), flag_(flag) { } virtual ~ValueContext(); @@ -702,7 +757,7 @@ class ValueContext: public AstContext { class TestContext: public AstContext { public: - TestContext(HGraphBuilder* owner, + TestContext(HOptimizedGraphBuilder* owner, Expression* condition, TypeFeedbackOracle* oracle, HBasicBlock* if_true, @@ -742,7 +797,7 @@ class TestContext: public AstContext { class FunctionState { public: - FunctionState(HGraphBuilder* owner, + FunctionState(HOptimizedGraphBuilder* owner, CompilationInfo* info, TypeFeedbackOracle* oracle, InliningKind inlining_kind); @@ -772,7 +827,7 @@ class FunctionState { bool arguments_pushed() { return arguments_elements() != NULL; } private: - HGraphBuilder* owner_; + HOptimizedGraphBuilder* owner_; CompilationInfo* compilation_info_; TypeFeedbackOracle* oracle_; @@ -804,7 +859,178 @@ class FunctionState { }; -class HGraphBuilder: public AstVisitor { +class HGraphBuilder { + public: + explicit HGraphBuilder(CompilationInfo* info) + : info_(info), graph_(NULL), current_block_(NULL) {} + virtual ~HGraphBuilder() {} + + HBasicBlock* current_block() const { return current_block_; } + void set_current_block(HBasicBlock* block) { current_block_ = block; } + HEnvironment* environment() const { + return current_block()->last_environment(); + } + Zone* zone() const { return info_->zone(); } + HGraph* graph() const { return graph_; } + Isolate* isolate() const { return graph_->isolate(); } + + HGraph* CreateGraph(); + + // Adding instructions. + HInstruction* AddInstruction(HInstruction* instr); + void AddSimulate(BailoutId id, + RemovableSimulate removable = FIXED_SIMULATE); + HBoundsCheck* AddBoundsCheck( + HValue* index, + HValue* length, + BoundsCheckKeyMode key_mode = DONT_ALLOW_SMI_KEY, + Representation r = Representation::None()); + + HReturn* AddReturn(HValue* value); + + protected: + virtual bool BuildGraph() = 0; + + HBasicBlock* CreateBasicBlock(HEnvironment* env); + HBasicBlock* CreateLoopHeaderBlock(); + + // Building common constructs + HInstruction* BuildExternalArrayElementAccess( + HValue* external_elements, + HValue* checked_key, + HValue* val, + HValue* dependency, + ElementsKind elements_kind, + bool is_store); + + HInstruction* BuildFastElementAccess( + HValue* elements, + HValue* checked_key, + HValue* val, + HValue* dependency, + ElementsKind elements_kind, + bool is_store); + + HInstruction* BuildUncheckedMonomorphicElementAccess( + HValue* object, + HValue* key, + HValue* val, + HCheckMaps* mapcheck, + bool is_js_array, + ElementsKind elements_kind, + bool is_store, + Representation checked_index_representation = Representation::None()); + + HInstruction* BuildStoreMap(HValue* object, HValue* map, BailoutId id); + HInstruction* BuildStoreMap(HValue* object, Handle<Map> map, BailoutId id); + + class CheckBuilder { + public: + CheckBuilder(HGraphBuilder* builder, BailoutId id); + ~CheckBuilder() { + if (!finished_) End(); + } + + void CheckNotUndefined(HValue* value); + void CheckIntegerEq(HValue* left, HValue* right); + void End(); + + private: + Zone* zone() { return builder_->zone(); } + + HGraphBuilder* builder_; + bool finished_; + HBasicBlock* failure_block_; + HBasicBlock* merge_block_; + BailoutId id_; + }; + + class IfBuilder { + public: + IfBuilder(HGraphBuilder* builder, BailoutId id); + ~IfBuilder() { + if (!finished_) End(); + } + + HInstruction* BeginTrue( + HValue* left, + HValue* right, + Token::Value token, + Representation input_representation = Representation::Integer32()); + void BeginFalse(); + void End(); + + private: + Zone* zone() { return builder_->zone(); } + + HGraphBuilder* builder_; + bool finished_; + HBasicBlock* first_true_block_; + HBasicBlock* last_true_block_; + HBasicBlock* first_false_block_; + HBasicBlock* merge_block_; + BailoutId id_; + }; + + class LoopBuilder { + public: + enum Direction { + kPreIncrement, + kPostIncrement, + kPreDecrement, + kPostDecrement + }; + + LoopBuilder(HGraphBuilder* builder, + HValue* context, + Direction direction, + BailoutId id); + ~LoopBuilder() { + ASSERT(finished_); + } + + HValue* BeginBody( + HValue* initial, + HValue* terminating, + Token::Value token, + Representation input_representation = Representation::Integer32()); + void EndBody(); + + private: + Zone* zone() { return builder_->zone(); } + + HGraphBuilder* builder_; + HValue* context_; + HInstruction* increment_; + HPhi* phi_; + HBasicBlock* header_block_; + HBasicBlock* body_block_; + HBasicBlock* exit_block_; + Direction direction_; + BailoutId id_; + bool finished_; + }; + + HValue* BuildAllocateElements(HContext* context, + ElementsKind kind, + HValue* capacity); + + void BuildCopyElements(HContext* context, + HValue* from_elements, + ElementsKind from_elements_kind, + HValue* to_elements, + ElementsKind to_elements_kind, + HValue* length); + + private: + HGraphBuilder(); + CompilationInfo* info_; + HGraph* graph_; + HBasicBlock* current_block_; +}; + + +class HOptimizedGraphBuilder: public HGraphBuilder, public AstVisitor { public: enum BreakType { BREAK, CONTINUE }; enum SwitchType { UNKNOWN_SWITCH, SMI_SWITCH, STRING_SWITCH }; @@ -840,7 +1066,8 @@ class HGraphBuilder: public AstVisitor { // structures mirroring BreakableStatement nesting. class BreakAndContinueScope BASE_EMBEDDED { public: - BreakAndContinueScope(BreakAndContinueInfo* info, HGraphBuilder* owner) + BreakAndContinueScope(BreakAndContinueInfo* info, + HOptimizedGraphBuilder* owner) : info_(info), owner_(owner), next_(owner->break_scope()) { owner->set_break_scope(this); } @@ -848,7 +1075,7 @@ class HGraphBuilder: public AstVisitor { ~BreakAndContinueScope() { owner_->set_break_scope(next_); } BreakAndContinueInfo* info() { return info_; } - HGraphBuilder* owner() { return owner_; } + HOptimizedGraphBuilder* owner() { return owner_; } BreakAndContinueScope* next() { return next_; } // Search the break stack for a break or continue target. @@ -856,30 +1083,21 @@ class HGraphBuilder: public AstVisitor { private: BreakAndContinueInfo* info_; - HGraphBuilder* owner_; + HOptimizedGraphBuilder* owner_; BreakAndContinueScope* next_; }; - HGraphBuilder(CompilationInfo* info, TypeFeedbackOracle* oracle); + HOptimizedGraphBuilder(CompilationInfo* info, TypeFeedbackOracle* oracle); - HGraph* CreateGraph(); + virtual bool BuildGraph(); // Simple accessors. - HGraph* graph() const { return graph_; } BreakAndContinueScope* break_scope() const { return break_scope_; } void set_break_scope(BreakAndContinueScope* head) { break_scope_ = head; } - HBasicBlock* current_block() const { return current_block_; } - void set_current_block(HBasicBlock* block) { current_block_ = block; } - HEnvironment* environment() const { - return current_block()->last_environment(); - } - bool inline_bailout() { return inline_bailout_; } - // Adding instructions. - HInstruction* AddInstruction(HInstruction* instr); - void AddSimulate(BailoutId ast_id); + void AddSoftDeoptimize(); // Bailout environment manipulation. void Push(HValue* value) { environment()->Push(value); } @@ -903,9 +1121,12 @@ class HGraphBuilder: public AstVisitor { void operator delete(void* pointer, Zone* zone) { } void operator delete(void* pointer) { } + DEFINE_AST_VISITOR_SUBCLASS_MEMBERS(); + private: // Type of a member function that generates inline code for a native function. - typedef void (HGraphBuilder::*InlineFunctionGenerator)(CallRuntime* call); + typedef void (HOptimizedGraphBuilder::*InlineFunctionGenerator) + (CallRuntime* call); // Forward declarations for inner scope classes. class SubgraphScope; @@ -1024,10 +1245,6 @@ class HGraphBuilder: public AstVisitor { // to push them as outgoing parameters. template <class Instruction> HInstruction* PreProcessCall(Instruction* call); - void TraceRepresentation(Token::Value op, - TypeInfo info, - HValue* value, - Representation rep); static Representation ToRepresentation(TypeInfo info); void SetUpScope(Scope* scope); @@ -1037,9 +1254,6 @@ class HGraphBuilder: public AstVisitor { AST_NODE_LIST(DECLARE_VISIT) #undef DECLARE_VISIT - HBasicBlock* CreateBasicBlock(HEnvironment* env); - HBasicBlock* CreateLoopHeaderBlock(); - // Helpers for flow graph construction. enum GlobalPropertyAccess { kUseCell, @@ -1070,6 +1284,9 @@ class HGraphBuilder: public AstVisitor { bool TryInlineSetter(Handle<JSFunction> setter, Assignment* assignment, HValue* implicit_return_value); + bool TryInlineApply(Handle<JSFunction> function, + Call* expr, + int arguments_count); bool TryInlineBuiltinMethodCall(Call* expr, HValue* receiver, Handle<Map> receiver_map, @@ -1110,33 +1327,22 @@ class HGraphBuilder: public AstVisitor { HValue* value, NilValue nil); - HStringCharCodeAt* BuildStringCharCodeAt(HValue* context, - HValue* string, - HValue* index); + HInstruction* BuildStringCharCodeAt(HValue* context, + HValue* string, + HValue* index); HInstruction* BuildBinaryOperation(BinaryOperation* expr, HValue* left, HValue* right); HInstruction* BuildIncrement(bool returns_original_input, CountOperation* expr); - HInstruction* BuildFastElementAccess(HValue* elements, - HValue* checked_key, - HValue* val, - HValue* dependency, - ElementsKind elements_kind, - bool is_store); + HInstruction* BuildLoadKeyedGeneric(HValue* object, + HValue* key); HInstruction* TryBuildConsolidatedElementLoad(HValue* object, HValue* key, HValue* val, SmallMapList* maps); - HInstruction* BuildUncheckedMonomorphicElementAccess(HValue* object, - HValue* key, - HValue* val, - HCheckMaps* mapcheck, - Handle<Map> map, - bool is_store); - HInstruction* BuildMonomorphicElementAccess(HValue* object, HValue* key, HValue* val, @@ -1164,8 +1370,7 @@ class HGraphBuilder: public AstVisitor { HLoadNamedField* BuildLoadNamedField(HValue* object, Handle<Map> map, - LookupResult* result, - bool smi_and_map_check); + LookupResult* result); HInstruction* BuildLoadNamedGeneric(HValue* object, Handle<String> name, Property* expr); @@ -1177,21 +1382,15 @@ class HGraphBuilder: public AstVisitor { Handle<String> name, Property* expr, Handle<Map> map); - HInstruction* BuildLoadKeyedGeneric(HValue* object, HValue* key); - HInstruction* BuildExternalArrayElementAccess( - HValue* external_elements, - HValue* checked_key, - HValue* val, - HValue* dependency, - ElementsKind elements_kind, - bool is_store); + + void AddCheckMapsWithTransitions(HValue* object, + Handle<Map> map); HInstruction* BuildStoreNamedField(HValue* object, Handle<String> name, HValue* value, Handle<Map> map, - LookupResult* lookup, - bool smi_and_map_check); + LookupResult* lookup); HInstruction* BuildStoreNamedGeneric(HValue* object, Handle<String> name, HValue* value); @@ -1212,12 +1411,17 @@ class HGraphBuilder: public AstVisitor { HInstruction* BuildThisFunction(); + void AddCheckPrototypeMaps(Handle<JSObject> holder, + Handle<Map> receiver_map); + void AddCheckConstantFunction(Handle<JSObject> holder, HValue* receiver, - Handle<Map> receiver_map, - bool smi_and_map_check); + Handle<Map> receiver_map); - Zone* zone() const { return zone_; } + bool MatchRotateRight(HValue* left, + HValue* right, + HValue** operand, + HValue** shift_amount); // The translation state of the currently-being-translated function. FunctionState* function_state_; @@ -1232,20 +1436,16 @@ class HGraphBuilder: public AstVisitor { // A stack of breakable statements entered. BreakAndContinueScope* break_scope_; - HGraph* graph_; - HBasicBlock* current_block_; - int inlined_count_; ZoneList<Handle<Object> > globals_; - Zone* zone_; - bool inline_bailout_; friend class FunctionState; // Pushes and pops the state stack. friend class AstContext; // Pushes and pops the AST context stack. + friend class KeyedLoadFastElementStub; - DISALLOW_COPY_AND_ASSIGN(HGraphBuilder); + DISALLOW_COPY_AND_ASSIGN(HOptimizedGraphBuilder); }; @@ -1338,31 +1538,36 @@ class HSideEffectMap BASE_EMBEDDED { class HStatistics: public Malloced { public: - void Initialize(CompilationInfo* info); - void Print(); - void SaveTiming(const char* name, int64_t ticks, unsigned size); - static HStatistics* Instance() { - static SetOncePointer<HStatistics> instance; - if (!instance.is_set()) { - instance.set(new HStatistics()); - } - return instance.get(); - } - - private: HStatistics() : timing_(5), names_(5), sizes_(5), - total_(0), + create_graph_(0), + optimize_graph_(0), + generate_code_(0), total_size_(0), full_code_gen_(0), source_size_(0) { } + void Initialize(CompilationInfo* info); + void Print(); + void SaveTiming(const char* name, int64_t ticks, unsigned size); + + void IncrementSubtotals(int64_t create_graph, + int64_t optimize_graph, + int64_t generate_code) { + create_graph_ += create_graph; + optimize_graph_ += optimize_graph; + generate_code_ += generate_code; + } + + private: List<int64_t> timing_; List<const char*> names_; List<unsigned> sizes_; - int64_t total_; + int64_t create_graph_; + int64_t optimize_graph_; + int64_t generate_code_; unsigned total_size_; int64_t full_code_gen_; double source_size_; @@ -1372,54 +1577,46 @@ class HStatistics: public Malloced { class HPhase BASE_EMBEDDED { public: static const char* const kFullCodeGen; - static const char* const kTotal; - explicit HPhase(const char* name) { Begin(name, NULL, NULL, NULL); } - HPhase(const char* name, HGraph* graph) { - Begin(name, graph, NULL, NULL); - } - HPhase(const char* name, LChunk* chunk) { - Begin(name, NULL, chunk, NULL); - } - HPhase(const char* name, LAllocator* allocator) { - Begin(name, NULL, NULL, allocator); - } - - ~HPhase() { - End(); - } + HPhase(const char* name, Isolate* isolate); + HPhase(const char* name, HGraph* graph); + HPhase(const char* name, LChunk* chunk); + HPhase(const char* name, LAllocator* allocator); + ~HPhase(); private: - void Begin(const char* name, - HGraph* graph, - LChunk* chunk, - LAllocator* allocator); - void End() const; + void Init(Isolate* isolate, + const char* name, + HGraph* graph, + LChunk* chunk, + LAllocator* allocator); - int64_t start_; + Isolate* isolate_; const char* name_; HGraph* graph_; LChunk* chunk_; LAllocator* allocator_; + int64_t start_ticks_; unsigned start_allocation_size_; }; class HTracer: public Malloced { public: - void TraceCompilation(FunctionLiteral* function); + explicit HTracer(int isolate_id) + : trace_(&string_allocator_), indent_(0) { + OS::SNPrintF(filename_, + "hydrogen-%d-%d.cfg", + OS::GetCurrentProcessId(), + isolate_id); + WriteChars(filename_.start(), "", 0, false); + } + + void TraceCompilation(CompilationInfo* info); void TraceHydrogen(const char* name, HGraph* graph); void TraceLithium(const char* name, LChunk* chunk); void TraceLiveRanges(const char* name, LAllocator* allocator); - static HTracer* Instance() { - static SetOncePointer<HTracer> instance; - if (!instance.is_set()) { - instance.set(new HTracer("hydrogen.cfg")); - } - return instance.get(); - } - private: class Tag BASE_EMBEDDED { public: @@ -1444,11 +1641,6 @@ class HTracer: public Malloced { const char* name_; }; - explicit HTracer(const char* filename) - : filename_(filename), trace_(&string_allocator_), indent_(0) { - WriteChars(filename, "", 0, false); - } - void TraceLiveRange(LiveRange* range, const char* type, Zone* zone); void Trace(const char* name, HGraph* graph, LChunk* chunk); void FlushToFile(); @@ -1484,7 +1676,7 @@ class HTracer: public Malloced { } } - const char* filename_; + EmbeddedVector<char, 64> filename_; HeapStringAllocator string_allocator_; StringStream trace_; int indent_; diff --git a/deps/v8/src/ia32/assembler-ia32-inl.h b/deps/v8/src/ia32/assembler-ia32-inl.h index 7fdf50c7a1..fbe2f242d6 100644 --- a/deps/v8/src/ia32/assembler-ia32-inl.h +++ b/deps/v8/src/ia32/assembler-ia32-inl.h @@ -46,12 +46,21 @@ namespace v8 { namespace internal { +static const byte kCallOpcode = 0xE8; + + // The modes possibly affected by apply must be in kApplyMask. void RelocInfo::apply(intptr_t delta) { - if (rmode_ == RUNTIME_ENTRY || IsCodeTarget(rmode_)) { + if (IsRuntimeEntry(rmode_) || IsCodeTarget(rmode_)) { int32_t* p = reinterpret_cast<int32_t*>(pc_); *p -= delta; // Relocate entry. CPU::FlushICache(p, sizeof(uint32_t)); + } else if (rmode_ == CODE_AGE_SEQUENCE) { + if (*pc_ == kCallOpcode) { + int32_t* p = reinterpret_cast<int32_t*>(pc_ + 1); + *p -= delta; // Relocate entry. + CPU::FlushICache(p, sizeof(uint32_t)); + } } else if (rmode_ == JS_RETURN && IsPatchedReturnSequence()) { // Special handling of js_return when a break point is set (call // instruction has been inserted). @@ -74,13 +83,13 @@ void RelocInfo::apply(intptr_t delta) { Address RelocInfo::target_address() { - ASSERT(IsCodeTarget(rmode_) || rmode_ == RUNTIME_ENTRY); + ASSERT(IsCodeTarget(rmode_) || IsRuntimeEntry(rmode_)); return Assembler::target_address_at(pc_); } Address RelocInfo::target_address_address() { - ASSERT(IsCodeTarget(rmode_) || rmode_ == RUNTIME_ENTRY + ASSERT(IsCodeTarget(rmode_) || IsRuntimeEntry(rmode_) || rmode_ == EMBEDDED_OBJECT || rmode_ == EXTERNAL_REFERENCE); return reinterpret_cast<Address>(pc_); @@ -94,7 +103,7 @@ int RelocInfo::target_address_size() { void RelocInfo::set_target_address(Address target, WriteBarrierMode mode) { Assembler::set_target_address_at(pc_, target); - ASSERT(IsCodeTarget(rmode_) || rmode_ == RUNTIME_ENTRY); + ASSERT(IsCodeTarget(rmode_) || IsRuntimeEntry(rmode_)); if (mode == UPDATE_WRITE_BARRIER && host() != NULL && IsCodeTarget(rmode_)) { Object* target_code = Code::GetCodeFromTargetAddress(target); host()->GetHeap()->incremental_marking()->RecordWriteIntoCode( @@ -140,6 +149,19 @@ Address* RelocInfo::target_reference_address() { } +Address RelocInfo::target_runtime_entry(Assembler* origin) { + ASSERT(IsRuntimeEntry(rmode_)); + return reinterpret_cast<Address>(*reinterpret_cast<int32_t*>(pc_)); +} + + +void RelocInfo::set_target_runtime_entry(Address target, + WriteBarrierMode mode) { + ASSERT(IsRuntimeEntry(rmode_)); + if (target_address() != target) set_target_address(target, mode); +} + + Handle<JSGlobalPropertyCell> RelocInfo::target_cell_handle() { ASSERT(rmode_ == RelocInfo::GLOBAL_PROPERTY_CELL); Address address = Memory::Address_at(pc_); @@ -169,6 +191,21 @@ void RelocInfo::set_target_cell(JSGlobalPropertyCell* cell, } +Code* RelocInfo::code_age_stub() { + ASSERT(rmode_ == RelocInfo::CODE_AGE_SEQUENCE); + ASSERT(*pc_ == kCallOpcode); + return Code::GetCodeFromTargetAddress( + Assembler::target_address_at(pc_ + 1)); +} + + +void RelocInfo::set_code_age_stub(Code* stub) { + ASSERT(*pc_ == kCallOpcode); + ASSERT(rmode_ == RelocInfo::CODE_AGE_SEQUENCE); + Assembler::set_target_address_at(pc_ + 1, stub->instruction_start()); +} + + Address RelocInfo::call_address() { ASSERT((IsJSReturn(rmode()) && IsPatchedReturnSequence()) || (IsDebugBreakSlot(rmode()) && IsPatchedDebugBreakSlotSequence())); @@ -206,7 +243,7 @@ Object** RelocInfo::call_object_address() { bool RelocInfo::IsPatchedReturnSequence() { - return *pc_ == 0xE8; + return *pc_ == kCallOpcode; } @@ -227,7 +264,9 @@ void RelocInfo::Visit(ObjectVisitor* visitor) { } else if (mode == RelocInfo::EXTERNAL_REFERENCE) { visitor->VisitExternalReference(this); CPU::FlushICache(pc_, sizeof(Address)); -#ifdef ENABLE_DEBUGGER_SUPPORT + } else if (RelocInfo::IsCodeAgeSequence(mode)) { + visitor->VisitCodeAgeSequence(this); + #ifdef ENABLE_DEBUGGER_SUPPORT // TODO(isolates): Get a cached isolate below. } else if (((RelocInfo::IsJSReturn(mode) && IsPatchedReturnSequence()) || @@ -236,7 +275,7 @@ void RelocInfo::Visit(ObjectVisitor* visitor) { Isolate::Current()->debug()->has_break_points()) { visitor->VisitDebugTarget(this); #endif - } else if (mode == RelocInfo::RUNTIME_ENTRY) { + } else if (IsRuntimeEntry(mode)) { visitor->VisitRuntimeEntry(this); } } @@ -255,6 +294,8 @@ void RelocInfo::Visit(Heap* heap) { } else if (mode == RelocInfo::EXTERNAL_REFERENCE) { StaticVisitor::VisitExternalReference(this); CPU::FlushICache(pc_, sizeof(Address)); + } else if (RelocInfo::IsCodeAgeSequence(mode)) { + StaticVisitor::VisitCodeAgeSequence(heap, this); #ifdef ENABLE_DEBUGGER_SUPPORT } else if (heap->isolate()->debug()->has_break_points() && ((RelocInfo::IsJSReturn(mode) && @@ -263,7 +304,7 @@ void RelocInfo::Visit(Heap* heap) { IsPatchedDebugBreakSlotSequence()))) { StaticVisitor::VisitDebugTarget(heap, this); #endif - } else if (mode == RelocInfo::RUNTIME_ENTRY) { + } else if (IsRuntimeEntry(mode)) { StaticVisitor::VisitRuntimeEntry(this); } } @@ -272,7 +313,7 @@ void RelocInfo::Visit(Heap* heap) { Immediate::Immediate(int x) { x_ = x; - rmode_ = RelocInfo::NONE; + rmode_ = RelocInfo::NONE32; } @@ -298,20 +339,20 @@ Immediate::Immediate(Handle<Object> handle) { } else { // no relocation needed x_ = reinterpret_cast<intptr_t>(obj); - rmode_ = RelocInfo::NONE; + rmode_ = RelocInfo::NONE32; } } Immediate::Immediate(Smi* value) { x_ = reinterpret_cast<intptr_t>(value); - rmode_ = RelocInfo::NONE; + rmode_ = RelocInfo::NONE32; } Immediate::Immediate(Address addr) { x_ = reinterpret_cast<int32_t>(addr); - rmode_ = RelocInfo::NONE; + rmode_ = RelocInfo::NONE32; } @@ -338,7 +379,7 @@ void Assembler::emit(Handle<Object> handle) { void Assembler::emit(uint32_t x, RelocInfo::Mode rmode, TypeFeedbackId id) { if (rmode == RelocInfo::CODE_TARGET && !id.IsNone()) { RecordRelocInfo(RelocInfo::CODE_TARGET_WITH_ID, id.ToInt()); - } else if (rmode != RelocInfo::NONE) { + } else if (!RelocInfo::IsNone(rmode)) { RecordRelocInfo(rmode); } emit(x); @@ -351,7 +392,7 @@ void Assembler::emit(const Immediate& x) { emit_code_relative_offset(label); return; } - if (x.rmode_ != RelocInfo::NONE) RecordRelocInfo(x.rmode_); + if (!RelocInfo::IsNone(x.rmode_)) RecordRelocInfo(x.rmode_); emit(x.x_); } @@ -368,7 +409,7 @@ void Assembler::emit_code_relative_offset(Label* label) { void Assembler::emit_w(const Immediate& x) { - ASSERT(x.rmode_ == RelocInfo::NONE); + ASSERT(RelocInfo::IsNone(x.rmode_)); uint16_t value = static_cast<uint16_t>(x.x_); reinterpret_cast<uint16_t*>(pc_)[0] = value; pc_ += sizeof(uint16_t); diff --git a/deps/v8/src/ia32/assembler-ia32.cc b/deps/v8/src/ia32/assembler-ia32.cc index f291b05260..b48906e706 100644 --- a/deps/v8/src/ia32/assembler-ia32.cc +++ b/deps/v8/src/ia32/assembler-ia32.cc @@ -52,7 +52,40 @@ namespace internal { bool CpuFeatures::initialized_ = false; #endif uint64_t CpuFeatures::supported_ = 0; -uint64_t CpuFeatures::found_by_runtime_probing_ = 0; +uint64_t CpuFeatures::found_by_runtime_probing_only_ = 0; + + +ExternalReference ExternalReference::cpu_features() { + ASSERT(CpuFeatures::initialized_); + return ExternalReference(&CpuFeatures::supported_); +} + + +int IntelDoubleRegister::NumAllocatableRegisters() { + if (CpuFeatures::IsSupported(SSE2)) { + return XMMRegister::kNumAllocatableRegisters; + } else { + return X87TopOfStackRegister::kNumAllocatableRegisters; + } +} + + +int IntelDoubleRegister::NumRegisters() { + if (CpuFeatures::IsSupported(SSE2)) { + return XMMRegister::kNumRegisters; + } else { + return X87TopOfStackRegister::kNumRegisters; + } +} + + +const char* IntelDoubleRegister::AllocationIndexToString(int index) { + if (CpuFeatures::IsSupported(SSE2)) { + return XMMRegister::AllocationIndexToString(index); + } else { + return X87TopOfStackRegister::AllocationIndexToString(index); + } +} // The Probe method needs executable memory, so it uses Heap::CreateCode. @@ -113,7 +146,7 @@ void CpuFeatures::Probe() { __ bind(&cpuid); __ mov(eax, 1); supported_ = (1 << CPUID); - { Scope fscope(CPUID); + { CpuFeatureScope fscope(&assm, CPUID); __ cpuid(); } supported_ = 0; @@ -136,11 +169,10 @@ void CpuFeatures::Probe() { typedef uint64_t (*F0)(); F0 probe = FUNCTION_CAST<F0>(reinterpret_cast<Address>(memory->address())); - supported_ = probe(); - found_by_runtime_probing_ = supported_; - uint64_t os_guarantees = OS::CpuFeaturesImpliedByPlatform(); - supported_ |= os_guarantees; - found_by_runtime_probing_ &= ~os_guarantees; + uint64_t probed_features = probe(); + uint64_t platform_features = OS::CpuFeaturesImpliedByPlatform(); + supported_ = probed_features | platform_features; + found_by_runtime_probing_only_ = probed_features & ~platform_features; delete memory; } @@ -169,7 +201,7 @@ void Displacement::init(Label* L, Type type) { const int RelocInfo::kApplyMask = RelocInfo::kCodeTargetMask | 1 << RelocInfo::RUNTIME_ENTRY | 1 << RelocInfo::JS_RETURN | 1 << RelocInfo::INTERNAL_REFERENCE | - 1 << RelocInfo::DEBUG_BREAK_SLOT; + 1 << RelocInfo::DEBUG_BREAK_SLOT | 1 << RelocInfo::CODE_AGE_SEQUENCE; bool RelocInfo::IsCodedSpecially() { @@ -209,7 +241,7 @@ void RelocInfo::PatchCodeWithCall(Address target, int guard_bytes) { #endif // Patch the code. - patcher.masm()->call(target, RelocInfo::NONE); + patcher.masm()->call(target, RelocInfo::NONE32); // Check that the size of the code generated is as expected. ASSERT_EQ(kCallCodeSize, @@ -228,11 +260,11 @@ void RelocInfo::PatchCodeWithCall(Address target, int guard_bytes) { Operand::Operand(Register base, int32_t disp, RelocInfo::Mode rmode) { // [base + disp/r] - if (disp == 0 && rmode == RelocInfo::NONE && !base.is(ebp)) { + if (disp == 0 && RelocInfo::IsNone(rmode) && !base.is(ebp)) { // [base] set_modrm(0, base); if (base.is(esp)) set_sib(times_1, esp, base); - } else if (is_int8(disp) && rmode == RelocInfo::NONE) { + } else if (is_int8(disp) && RelocInfo::IsNone(rmode)) { // [base + disp8] set_modrm(1, base); if (base.is(esp)) set_sib(times_1, esp, base); @@ -253,11 +285,11 @@ Operand::Operand(Register base, RelocInfo::Mode rmode) { ASSERT(!index.is(esp)); // illegal addressing mode // [base + index*scale + disp/r] - if (disp == 0 && rmode == RelocInfo::NONE && !base.is(ebp)) { + if (disp == 0 && RelocInfo::IsNone(rmode) && !base.is(ebp)) { // [base + index*scale] set_modrm(0, esp); set_sib(scale, index, base); - } else if (is_int8(disp) && rmode == RelocInfo::NONE) { + } else if (is_int8(disp) && RelocInfo::IsNone(rmode)) { // [base + index*scale + disp8] set_modrm(1, esp); set_sib(scale, index, base); @@ -312,48 +344,19 @@ Register Operand::reg() const { static void InitCoverageLog(); #endif -Assembler::Assembler(Isolate* arg_isolate, void* buffer, int buffer_size) - : AssemblerBase(arg_isolate), - positions_recorder_(this), - emit_debug_code_(FLAG_debug_code) { - if (buffer == NULL) { - // Do our own buffer management. - if (buffer_size <= kMinimalBufferSize) { - buffer_size = kMinimalBufferSize; - - if (isolate()->assembler_spare_buffer() != NULL) { - buffer = isolate()->assembler_spare_buffer(); - isolate()->set_assembler_spare_buffer(NULL); - } - } - if (buffer == NULL) { - buffer_ = NewArray<byte>(buffer_size); - } else { - buffer_ = static_cast<byte*>(buffer); - } - buffer_size_ = buffer_size; - own_buffer_ = true; - } else { - // Use externally provided buffer instead. - ASSERT(buffer_size > 0); - buffer_ = static_cast<byte*>(buffer); - buffer_size_ = buffer_size; - own_buffer_ = false; - } - +Assembler::Assembler(Isolate* isolate, void* buffer, int buffer_size) + : AssemblerBase(isolate, buffer, buffer_size), + positions_recorder_(this) { // Clear the buffer in debug mode unless it was provided by the // caller in which case we can't be sure it's okay to overwrite // existing code in it; see CodePatcher::CodePatcher(...). #ifdef DEBUG if (own_buffer_) { - memset(buffer_, 0xCC, buffer_size); // int3 + memset(buffer_, 0xCC, buffer_size_); // int3 } #endif - // Set up buffer pointers. - ASSERT(buffer_ != NULL); - pc_ = buffer_; - reloc_info_writer.Reposition(buffer_ + buffer_size, pc_); + reloc_info_writer.Reposition(buffer_ + buffer_size_, pc_); #ifdef GENERATED_CODE_COVERAGE InitCoverageLog(); @@ -361,18 +364,6 @@ Assembler::Assembler(Isolate* arg_isolate, void* buffer, int buffer_size) } -Assembler::~Assembler() { - if (own_buffer_) { - if (isolate()->assembler_spare_buffer() == NULL && - buffer_size_ == kMinimalBufferSize) { - isolate()->set_assembler_spare_buffer(buffer_); - } else { - DeleteArray(buffer_); - } - } -} - - void Assembler::GetCode(CodeDesc* desc) { // Finalize code (at this point overflow() may be true, but the gap ensures // that we are still not overlapping instructions and relocation info). @@ -483,7 +474,7 @@ void Assembler::CodeTargetAlign() { void Assembler::cpuid() { - ASSERT(CpuFeatures::IsEnabled(CPUID)); + ASSERT(IsEnabled(CPUID)); EnsureSpace ensure_space(this); EMIT(0x0F); EMIT(0xA2); @@ -705,7 +696,7 @@ void Assembler::movzx_w(Register dst, const Operand& src) { void Assembler::cmov(Condition cc, Register dst, const Operand& src) { - ASSERT(CpuFeatures::IsEnabled(CMOV)); + ASSERT(IsEnabled(CMOV)); EnsureSpace ensure_space(this); // Opcode: 0f 40 + cc /r. EMIT(0x0F); @@ -1064,6 +1055,25 @@ void Assembler::rcr(Register dst, uint8_t imm8) { } } +void Assembler::ror(Register dst, uint8_t imm8) { + EnsureSpace ensure_space(this); + ASSERT(is_uint5(imm8)); // illegal shift count + if (imm8 == 1) { + EMIT(0xD1); + EMIT(0xC8 | dst.code()); + } else { + EMIT(0xC1); + EMIT(0xC8 | dst.code()); + EMIT(imm8); + } +} + +void Assembler::ror_cl(Register dst) { + EnsureSpace ensure_space(this); + EMIT(0xD3); + EMIT(0xC8 | dst.code()); +} + void Assembler::sar(Register dst, uint8_t imm8) { EnsureSpace ensure_space(this); @@ -1175,7 +1185,7 @@ void Assembler::test(Register reg, const Immediate& imm) { EnsureSpace ensure_space(this); // Only use test against byte for registers that have a byte // variant: eax, ebx, ecx, and edx. - if (imm.rmode_ == RelocInfo::NONE && + if (RelocInfo::IsNone(imm.rmode_) && is_uint8(imm.x_) && reg.is_byte_register()) { uint8_t imm8 = imm.x_; @@ -1295,7 +1305,7 @@ void Assembler::nop() { void Assembler::rdtsc() { - ASSERT(CpuFeatures::IsEnabled(RDTSC)); + ASSERT(IsEnabled(RDTSC)); EnsureSpace ensure_space(this); EMIT(0x0F); EMIT(0x31); @@ -1415,7 +1425,11 @@ void Assembler::call(byte* entry, RelocInfo::Mode rmode) { EnsureSpace ensure_space(this); ASSERT(!RelocInfo::IsCodeTarget(rmode)); EMIT(0xE8); - emit(entry - (pc_ + sizeof(int32_t)), rmode); + if (RelocInfo::IsRuntimeEntry(rmode)) { + emit(reinterpret_cast<uint32_t>(entry), rmode); + } else { + emit(entry - (pc_ + sizeof(int32_t)), rmode); + } } @@ -1480,7 +1494,11 @@ void Assembler::jmp(byte* entry, RelocInfo::Mode rmode) { EnsureSpace ensure_space(this); ASSERT(!RelocInfo::IsCodeTarget(rmode)); EMIT(0xE9); - emit(entry - (pc_ + sizeof(int32_t)), rmode); + if (RelocInfo::IsRuntimeEntry(rmode)) { + emit(reinterpret_cast<uint32_t>(entry), rmode); + } else { + emit(entry - (pc_ + sizeof(int32_t)), rmode); + } } @@ -1501,7 +1519,7 @@ void Assembler::jmp(Handle<Code> code, RelocInfo::Mode rmode) { void Assembler::j(Condition cc, Label* L, Label::Distance distance) { EnsureSpace ensure_space(this); - ASSERT(0 <= cc && cc < 16); + ASSERT(0 <= cc && static_cast<int>(cc) < 16); if (L->is_bound()) { const int short_size = 2; const int long_size = 6; @@ -1533,11 +1551,15 @@ void Assembler::j(Condition cc, Label* L, Label::Distance distance) { void Assembler::j(Condition cc, byte* entry, RelocInfo::Mode rmode) { EnsureSpace ensure_space(this); - ASSERT((0 <= cc) && (cc < 16)); + ASSERT((0 <= cc) && (static_cast<int>(cc) < 16)); // 0000 1111 1000 tttn #32-bit disp. EMIT(0x0F); EMIT(0x80 | cc); - emit(entry - (pc_ + sizeof(int32_t)), rmode); + if (RelocInfo::IsRuntimeEntry(rmode)) { + emit(reinterpret_cast<uint32_t>(entry), rmode); + } else { + emit(entry - (pc_ + sizeof(int32_t)), rmode); + } } @@ -1649,7 +1671,7 @@ void Assembler::fistp_s(const Operand& adr) { void Assembler::fisttp_s(const Operand& adr) { - ASSERT(CpuFeatures::IsEnabled(SSE3)); + ASSERT(IsEnabled(SSE3)); EnsureSpace ensure_space(this); EMIT(0xDB); emit_operand(ecx, adr); @@ -1657,7 +1679,7 @@ void Assembler::fisttp_s(const Operand& adr) { void Assembler::fisttp_d(const Operand& adr) { - ASSERT(CpuFeatures::IsEnabled(SSE3)); + ASSERT(IsEnabled(SSE3)); EnsureSpace ensure_space(this); EMIT(0xDD); emit_operand(ecx, adr); @@ -1919,7 +1941,7 @@ void Assembler::setcc(Condition cc, Register reg) { void Assembler::cvttss2si(Register dst, const Operand& src) { - ASSERT(CpuFeatures::IsEnabled(SSE2)); + ASSERT(IsEnabled(SSE2)); EnsureSpace ensure_space(this); EMIT(0xF3); EMIT(0x0F); @@ -1929,7 +1951,7 @@ void Assembler::cvttss2si(Register dst, const Operand& src) { void Assembler::cvttsd2si(Register dst, const Operand& src) { - ASSERT(CpuFeatures::IsEnabled(SSE2)); + ASSERT(IsEnabled(SSE2)); EnsureSpace ensure_space(this); EMIT(0xF2); EMIT(0x0F); @@ -1939,7 +1961,7 @@ void Assembler::cvttsd2si(Register dst, const Operand& src) { void Assembler::cvtsd2si(Register dst, XMMRegister src) { - ASSERT(CpuFeatures::IsEnabled(SSE2)); + ASSERT(IsEnabled(SSE2)); EnsureSpace ensure_space(this); EMIT(0xF2); EMIT(0x0F); @@ -1949,7 +1971,7 @@ void Assembler::cvtsd2si(Register dst, XMMRegister src) { void Assembler::cvtsi2sd(XMMRegister dst, const Operand& src) { - ASSERT(CpuFeatures::IsEnabled(SSE2)); + ASSERT(IsEnabled(SSE2)); EnsureSpace ensure_space(this); EMIT(0xF2); EMIT(0x0F); @@ -1959,7 +1981,7 @@ void Assembler::cvtsi2sd(XMMRegister dst, const Operand& src) { void Assembler::cvtss2sd(XMMRegister dst, XMMRegister src) { - ASSERT(CpuFeatures::IsEnabled(SSE2)); + ASSERT(IsEnabled(SSE2)); EnsureSpace ensure_space(this); EMIT(0xF3); EMIT(0x0F); @@ -1969,7 +1991,7 @@ void Assembler::cvtss2sd(XMMRegister dst, XMMRegister src) { void Assembler::cvtsd2ss(XMMRegister dst, XMMRegister src) { - ASSERT(CpuFeatures::IsEnabled(SSE2)); + ASSERT(IsEnabled(SSE2)); EnsureSpace ensure_space(this); EMIT(0xF2); EMIT(0x0F); @@ -1979,7 +2001,17 @@ void Assembler::cvtsd2ss(XMMRegister dst, XMMRegister src) { void Assembler::addsd(XMMRegister dst, XMMRegister src) { - ASSERT(CpuFeatures::IsEnabled(SSE2)); + ASSERT(IsEnabled(SSE2)); + EnsureSpace ensure_space(this); + EMIT(0xF2); + EMIT(0x0F); + EMIT(0x58); + emit_sse_operand(dst, src); +} + + +void Assembler::addsd(XMMRegister dst, const Operand& src) { + ASSERT(IsEnabled(SSE2)); EnsureSpace ensure_space(this); EMIT(0xF2); EMIT(0x0F); @@ -1989,7 +2021,17 @@ void Assembler::addsd(XMMRegister dst, XMMRegister src) { void Assembler::mulsd(XMMRegister dst, XMMRegister src) { - ASSERT(CpuFeatures::IsEnabled(SSE2)); + ASSERT(IsEnabled(SSE2)); + EnsureSpace ensure_space(this); + EMIT(0xF2); + EMIT(0x0F); + EMIT(0x59); + emit_sse_operand(dst, src); +} + + +void Assembler::mulsd(XMMRegister dst, const Operand& src) { + ASSERT(IsEnabled(SSE2)); EnsureSpace ensure_space(this); EMIT(0xF2); EMIT(0x0F); @@ -1999,7 +2041,7 @@ void Assembler::mulsd(XMMRegister dst, XMMRegister src) { void Assembler::subsd(XMMRegister dst, XMMRegister src) { - ASSERT(CpuFeatures::IsEnabled(SSE2)); + ASSERT(IsEnabled(SSE2)); EnsureSpace ensure_space(this); EMIT(0xF2); EMIT(0x0F); @@ -2009,7 +2051,7 @@ void Assembler::subsd(XMMRegister dst, XMMRegister src) { void Assembler::divsd(XMMRegister dst, XMMRegister src) { - ASSERT(CpuFeatures::IsEnabled(SSE2)); + ASSERT(IsEnabled(SSE2)); EnsureSpace ensure_space(this); EMIT(0xF2); EMIT(0x0F); @@ -2019,7 +2061,7 @@ void Assembler::divsd(XMMRegister dst, XMMRegister src) { void Assembler::xorpd(XMMRegister dst, XMMRegister src) { - ASSERT(CpuFeatures::IsEnabled(SSE2)); + ASSERT(IsEnabled(SSE2)); EnsureSpace ensure_space(this); EMIT(0x66); EMIT(0x0F); @@ -2064,7 +2106,7 @@ void Assembler::orpd(XMMRegister dst, XMMRegister src) { void Assembler::ucomisd(XMMRegister dst, XMMRegister src) { - ASSERT(CpuFeatures::IsEnabled(SSE2)); + ASSERT(IsEnabled(SSE2)); EnsureSpace ensure_space(this); EMIT(0x66); EMIT(0x0F); @@ -2074,7 +2116,7 @@ void Assembler::ucomisd(XMMRegister dst, XMMRegister src) { void Assembler::ucomisd(XMMRegister dst, const Operand& src) { - ASSERT(CpuFeatures::IsEnabled(SSE2)); + ASSERT(IsEnabled(SSE2)); EnsureSpace ensure_space(this); EMIT(0x66); EMIT(0x0F); @@ -2084,7 +2126,7 @@ void Assembler::ucomisd(XMMRegister dst, const Operand& src) { void Assembler::roundsd(XMMRegister dst, XMMRegister src, RoundingMode mode) { - ASSERT(CpuFeatures::IsEnabled(SSE4_1)); + ASSERT(IsEnabled(SSE4_1)); EnsureSpace ensure_space(this); EMIT(0x66); EMIT(0x0F); @@ -2096,7 +2138,7 @@ void Assembler::roundsd(XMMRegister dst, XMMRegister src, RoundingMode mode) { } void Assembler::movmskpd(Register dst, XMMRegister src) { - ASSERT(CpuFeatures::IsEnabled(SSE2)); + ASSERT(IsEnabled(SSE2)); EnsureSpace ensure_space(this); EMIT(0x66); EMIT(0x0F); @@ -2105,8 +2147,17 @@ void Assembler::movmskpd(Register dst, XMMRegister src) { } +void Assembler::movmskps(Register dst, XMMRegister src) { + ASSERT(IsEnabled(SSE2)); + EnsureSpace ensure_space(this); + EMIT(0x0F); + EMIT(0x50); + emit_sse_operand(dst, src); +} + + void Assembler::pcmpeqd(XMMRegister dst, XMMRegister src) { - ASSERT(CpuFeatures::IsEnabled(SSE2)); + ASSERT(IsEnabled(SSE2)); EnsureSpace ensure_space(this); EMIT(0x66); EMIT(0x0F); @@ -2116,7 +2167,7 @@ void Assembler::pcmpeqd(XMMRegister dst, XMMRegister src) { void Assembler::cmpltsd(XMMRegister dst, XMMRegister src) { - ASSERT(CpuFeatures::IsEnabled(SSE2)); + ASSERT(IsEnabled(SSE2)); EnsureSpace ensure_space(this); EMIT(0xF2); EMIT(0x0F); @@ -2127,7 +2178,7 @@ void Assembler::cmpltsd(XMMRegister dst, XMMRegister src) { void Assembler::movaps(XMMRegister dst, XMMRegister src) { - ASSERT(CpuFeatures::IsEnabled(SSE2)); + ASSERT(IsEnabled(SSE2)); EnsureSpace ensure_space(this); EMIT(0x0F); EMIT(0x28); @@ -2136,7 +2187,7 @@ void Assembler::movaps(XMMRegister dst, XMMRegister src) { void Assembler::movdqa(const Operand& dst, XMMRegister src) { - ASSERT(CpuFeatures::IsEnabled(SSE2)); + ASSERT(IsEnabled(SSE2)); EnsureSpace ensure_space(this); EMIT(0x66); EMIT(0x0F); @@ -2146,7 +2197,7 @@ void Assembler::movdqa(const Operand& dst, XMMRegister src) { void Assembler::movdqa(XMMRegister dst, const Operand& src) { - ASSERT(CpuFeatures::IsEnabled(SSE2)); + ASSERT(IsEnabled(SSE2)); EnsureSpace ensure_space(this); EMIT(0x66); EMIT(0x0F); @@ -2156,7 +2207,7 @@ void Assembler::movdqa(XMMRegister dst, const Operand& src) { void Assembler::movdqu(const Operand& dst, XMMRegister src ) { - ASSERT(CpuFeatures::IsEnabled(SSE2)); + ASSERT(IsEnabled(SSE2)); EnsureSpace ensure_space(this); EMIT(0xF3); EMIT(0x0F); @@ -2166,7 +2217,7 @@ void Assembler::movdqu(const Operand& dst, XMMRegister src ) { void Assembler::movdqu(XMMRegister dst, const Operand& src) { - ASSERT(CpuFeatures::IsEnabled(SSE2)); + ASSERT(IsEnabled(SSE2)); EnsureSpace ensure_space(this); EMIT(0xF3); EMIT(0x0F); @@ -2176,7 +2227,7 @@ void Assembler::movdqu(XMMRegister dst, const Operand& src) { void Assembler::movntdqa(XMMRegister dst, const Operand& src) { - ASSERT(CpuFeatures::IsEnabled(SSE4_1)); + ASSERT(IsEnabled(SSE4_1)); EnsureSpace ensure_space(this); EMIT(0x66); EMIT(0x0F); @@ -2187,7 +2238,7 @@ void Assembler::movntdqa(XMMRegister dst, const Operand& src) { void Assembler::movntdq(const Operand& dst, XMMRegister src) { - ASSERT(CpuFeatures::IsEnabled(SSE2)); + ASSERT(IsEnabled(SSE2)); EnsureSpace ensure_space(this); EMIT(0x66); EMIT(0x0F); @@ -2201,7 +2252,8 @@ void Assembler::prefetch(const Operand& src, int level) { EnsureSpace ensure_space(this); EMIT(0x0F); EMIT(0x18); - XMMRegister code = { level }; // Emit hint number in Reg position of RegR/M. + // Emit hint number in Reg position of RegR/M. + XMMRegister code = XMMRegister::from_code(level); emit_sse_operand(code, src); } @@ -2219,7 +2271,7 @@ void Assembler::movdbl(const Operand& dst, XMMRegister src) { void Assembler::movsd(const Operand& dst, XMMRegister src ) { - ASSERT(CpuFeatures::IsEnabled(SSE2)); + ASSERT(IsEnabled(SSE2)); EnsureSpace ensure_space(this); EMIT(0xF2); // double EMIT(0x0F); @@ -2229,7 +2281,7 @@ void Assembler::movsd(const Operand& dst, XMMRegister src ) { void Assembler::movsd(XMMRegister dst, const Operand& src) { - ASSERT(CpuFeatures::IsEnabled(SSE2)); + ASSERT(IsEnabled(SSE2)); EnsureSpace ensure_space(this); EMIT(0xF2); // double EMIT(0x0F); @@ -2239,7 +2291,7 @@ void Assembler::movsd(XMMRegister dst, const Operand& src) { void Assembler::movsd(XMMRegister dst, XMMRegister src) { - ASSERT(CpuFeatures::IsEnabled(SSE2)); + ASSERT(IsEnabled(SSE2)); EnsureSpace ensure_space(this); EMIT(0xF2); EMIT(0x0F); @@ -2249,7 +2301,7 @@ void Assembler::movsd(XMMRegister dst, XMMRegister src) { void Assembler::movss(const Operand& dst, XMMRegister src ) { - ASSERT(CpuFeatures::IsEnabled(SSE2)); + ASSERT(IsEnabled(SSE2)); EnsureSpace ensure_space(this); EMIT(0xF3); // float EMIT(0x0F); @@ -2259,7 +2311,7 @@ void Assembler::movss(const Operand& dst, XMMRegister src ) { void Assembler::movss(XMMRegister dst, const Operand& src) { - ASSERT(CpuFeatures::IsEnabled(SSE2)); + ASSERT(IsEnabled(SSE2)); EnsureSpace ensure_space(this); EMIT(0xF3); // float EMIT(0x0F); @@ -2269,7 +2321,7 @@ void Assembler::movss(XMMRegister dst, const Operand& src) { void Assembler::movss(XMMRegister dst, XMMRegister src) { - ASSERT(CpuFeatures::IsEnabled(SSE2)); + ASSERT(IsEnabled(SSE2)); EnsureSpace ensure_space(this); EMIT(0xF3); EMIT(0x0F); @@ -2279,7 +2331,7 @@ void Assembler::movss(XMMRegister dst, XMMRegister src) { void Assembler::movd(XMMRegister dst, const Operand& src) { - ASSERT(CpuFeatures::IsEnabled(SSE2)); + ASSERT(IsEnabled(SSE2)); EnsureSpace ensure_space(this); EMIT(0x66); EMIT(0x0F); @@ -2289,7 +2341,7 @@ void Assembler::movd(XMMRegister dst, const Operand& src) { void Assembler::movd(const Operand& dst, XMMRegister src) { - ASSERT(CpuFeatures::IsEnabled(SSE2)); + ASSERT(IsEnabled(SSE2)); EnsureSpace ensure_space(this); EMIT(0x66); EMIT(0x0F); @@ -2312,7 +2364,7 @@ void Assembler::extractps(Register dst, XMMRegister src, byte imm8) { void Assembler::pand(XMMRegister dst, XMMRegister src) { - ASSERT(CpuFeatures::IsEnabled(SSE2)); + ASSERT(IsEnabled(SSE2)); EnsureSpace ensure_space(this); EMIT(0x66); EMIT(0x0F); @@ -2322,7 +2374,7 @@ void Assembler::pand(XMMRegister dst, XMMRegister src) { void Assembler::pxor(XMMRegister dst, XMMRegister src) { - ASSERT(CpuFeatures::IsEnabled(SSE2)); + ASSERT(IsEnabled(SSE2)); EnsureSpace ensure_space(this); EMIT(0x66); EMIT(0x0F); @@ -2332,7 +2384,7 @@ void Assembler::pxor(XMMRegister dst, XMMRegister src) { void Assembler::por(XMMRegister dst, XMMRegister src) { - ASSERT(CpuFeatures::IsEnabled(SSE2)); + ASSERT(IsEnabled(SSE2)); EnsureSpace ensure_space(this); EMIT(0x66); EMIT(0x0F); @@ -2342,7 +2394,7 @@ void Assembler::por(XMMRegister dst, XMMRegister src) { void Assembler::ptest(XMMRegister dst, XMMRegister src) { - ASSERT(CpuFeatures::IsEnabled(SSE4_1)); + ASSERT(IsEnabled(SSE4_1)); EnsureSpace ensure_space(this); EMIT(0x66); EMIT(0x0F); @@ -2353,7 +2405,7 @@ void Assembler::ptest(XMMRegister dst, XMMRegister src) { void Assembler::psllq(XMMRegister reg, int8_t shift) { - ASSERT(CpuFeatures::IsEnabled(SSE2)); + ASSERT(IsEnabled(SSE2)); EnsureSpace ensure_space(this); EMIT(0x66); EMIT(0x0F); @@ -2364,7 +2416,7 @@ void Assembler::psllq(XMMRegister reg, int8_t shift) { void Assembler::psllq(XMMRegister dst, XMMRegister src) { - ASSERT(CpuFeatures::IsEnabled(SSE2)); + ASSERT(IsEnabled(SSE2)); EnsureSpace ensure_space(this); EMIT(0x66); EMIT(0x0F); @@ -2374,7 +2426,7 @@ void Assembler::psllq(XMMRegister dst, XMMRegister src) { void Assembler::psrlq(XMMRegister reg, int8_t shift) { - ASSERT(CpuFeatures::IsEnabled(SSE2)); + ASSERT(IsEnabled(SSE2)); EnsureSpace ensure_space(this); EMIT(0x66); EMIT(0x0F); @@ -2385,7 +2437,7 @@ void Assembler::psrlq(XMMRegister reg, int8_t shift) { void Assembler::psrlq(XMMRegister dst, XMMRegister src) { - ASSERT(CpuFeatures::IsEnabled(SSE2)); + ASSERT(IsEnabled(SSE2)); EnsureSpace ensure_space(this); EMIT(0x66); EMIT(0x0F); @@ -2394,8 +2446,8 @@ void Assembler::psrlq(XMMRegister dst, XMMRegister src) { } -void Assembler::pshufd(XMMRegister dst, XMMRegister src, int8_t shuffle) { - ASSERT(CpuFeatures::IsEnabled(SSE2)); +void Assembler::pshufd(XMMRegister dst, XMMRegister src, uint8_t shuffle) { + ASSERT(IsEnabled(SSE2)); EnsureSpace ensure_space(this); EMIT(0x66); EMIT(0x0F); @@ -2406,7 +2458,7 @@ void Assembler::pshufd(XMMRegister dst, XMMRegister src, int8_t shuffle) { void Assembler::pextrd(const Operand& dst, XMMRegister src, int8_t offset) { - ASSERT(CpuFeatures::IsEnabled(SSE4_1)); + ASSERT(IsEnabled(SSE4_1)); EnsureSpace ensure_space(this); EMIT(0x66); EMIT(0x0F); @@ -2418,7 +2470,7 @@ void Assembler::pextrd(const Operand& dst, XMMRegister src, int8_t offset) { void Assembler::pinsrd(XMMRegister dst, const Operand& src, int8_t offset) { - ASSERT(CpuFeatures::IsEnabled(SSE4_1)); + ASSERT(IsEnabled(SSE4_1)); EnsureSpace ensure_space(this); EMIT(0x66); EMIT(0x0F); @@ -2446,7 +2498,7 @@ void Assembler::emit_sse_operand(Register dst, XMMRegister src) { void Assembler::Print() { - Disassembler::Decode(stdout, buffer_, pc_); + Disassembler::Decode(isolate(), stdout, buffer_, pc_); } @@ -2524,10 +2576,7 @@ void Assembler::GrowBuffer() { // Relocate runtime entries. for (RelocIterator it(desc); !it.done(); it.next()) { RelocInfo::Mode rmode = it.rinfo()->rmode(); - if (rmode == RelocInfo::RUNTIME_ENTRY) { - int32_t* p = reinterpret_cast<int32_t*>(it.rinfo()->pc()); - *p -= pc_delta; // relocate entry - } else if (rmode == RelocInfo::INTERNAL_REFERENCE) { + if (rmode == RelocInfo::INTERNAL_REFERENCE) { int32_t* p = reinterpret_cast<int32_t*>(it.rinfo()->pc()); if (*p != 0) { // 0 means uninitialized. *p += pc_delta; @@ -2579,7 +2628,7 @@ void Assembler::emit_operand(Register reg, const Operand& adr) { pc_ += length; // Emit relocation information if necessary. - if (length >= sizeof(int32_t) && adr.rmode_ != RelocInfo::NONE) { + if (length >= sizeof(int32_t) && !RelocInfo::IsNone(adr.rmode_)) { pc_ -= sizeof(int32_t); // pc_ must be *at* disp32 RecordRelocInfo(adr.rmode_); pc_ += sizeof(int32_t); @@ -2608,7 +2657,7 @@ void Assembler::dd(uint32_t data) { void Assembler::RecordRelocInfo(RelocInfo::Mode rmode, intptr_t data) { - ASSERT(rmode != RelocInfo::NONE); + ASSERT(!RelocInfo::IsNone(rmode)); // Don't record external references unless the heap will be serialized. if (rmode == RelocInfo::EXTERNAL_REFERENCE) { #ifdef DEBUG diff --git a/deps/v8/src/ia32/assembler-ia32.h b/deps/v8/src/ia32/assembler-ia32.h index b0f4651d1b..a3da9af433 100644 --- a/deps/v8/src/ia32/assembler-ia32.h +++ b/deps/v8/src/ia32/assembler-ia32.h @@ -65,7 +65,10 @@ namespace internal { // and best performance in optimized code. // struct Register { - static const int kNumAllocatableRegisters = 6; + static const int kMaxNumAllocatableRegisters = 6; + static int NumAllocatableRegisters() { + return kMaxNumAllocatableRegisters; + } static const int kNumRegisters = 8; static inline const char* AllocationIndexToString(int index); @@ -119,7 +122,7 @@ const Register no_reg = { kRegister_no_reg_Code }; inline const char* Register::AllocationIndexToString(int index) { - ASSERT(index >= 0 && index < kNumAllocatableRegisters); + ASSERT(index >= 0 && index < kMaxNumAllocatableRegisters); // This is the mapping of allocation indices to registers. const char* const kNames[] = { "eax", "ecx", "edx", "ebx", "esi", "edi" }; return kNames[index]; @@ -133,22 +136,70 @@ inline int Register::ToAllocationIndex(Register reg) { inline Register Register::FromAllocationIndex(int index) { - ASSERT(index >= 0 && index < kNumAllocatableRegisters); + ASSERT(index >= 0 && index < kMaxNumAllocatableRegisters); return (index >= 4) ? from_code(index + 2) : from_code(index); } -struct XMMRegister { - static const int kNumAllocatableRegisters = 7; - static const int kNumRegisters = 8; +struct IntelDoubleRegister { + static const int kMaxNumRegisters = 8; + static const int kMaxNumAllocatableRegisters = 7; + static int NumAllocatableRegisters(); + static int NumRegisters(); + static const char* AllocationIndexToString(int index); - static int ToAllocationIndex(XMMRegister reg) { + static int ToAllocationIndex(IntelDoubleRegister reg) { ASSERT(reg.code() != 0); return reg.code() - 1; } + static IntelDoubleRegister FromAllocationIndex(int index) { + ASSERT(index >= 0 && index < NumAllocatableRegisters()); + return from_code(index + 1); + } + + static IntelDoubleRegister from_code(int code) { + IntelDoubleRegister result = { code }; + return result; + } + + bool is_valid() const { + return 0 <= code_ && code_ < NumRegisters(); + } + int code() const { + ASSERT(is_valid()); + return code_; + } + + int code_; +}; + + +const IntelDoubleRegister double_register_0 = { 0 }; +const IntelDoubleRegister double_register_1 = { 1 }; +const IntelDoubleRegister double_register_2 = { 2 }; +const IntelDoubleRegister double_register_3 = { 3 }; +const IntelDoubleRegister double_register_4 = { 4 }; +const IntelDoubleRegister double_register_5 = { 5 }; +const IntelDoubleRegister double_register_6 = { 6 }; +const IntelDoubleRegister double_register_7 = { 7 }; + + +struct XMMRegister : IntelDoubleRegister { + static const int kNumAllocatableRegisters = 7; + static const int kNumRegisters = 8; + + static XMMRegister from_code(int code) { + STATIC_ASSERT(sizeof(XMMRegister) == sizeof(IntelDoubleRegister)); + XMMRegister result; + result.code_ = code; + return result; + } + + bool is(XMMRegister reg) const { return code_ == reg.code_; } + static XMMRegister FromAllocationIndex(int index) { - ASSERT(index >= 0 && index < kNumAllocatableRegisters); + ASSERT(index >= 0 && index < NumAllocatableRegisters()); return from_code(index + 1); } @@ -165,34 +216,46 @@ struct XMMRegister { }; return names[index]; } +}; - static XMMRegister from_code(int code) { - XMMRegister r = { code }; - return r; + +#define xmm0 (static_cast<const XMMRegister&>(double_register_0)) +#define xmm1 (static_cast<const XMMRegister&>(double_register_1)) +#define xmm2 (static_cast<const XMMRegister&>(double_register_2)) +#define xmm3 (static_cast<const XMMRegister&>(double_register_3)) +#define xmm4 (static_cast<const XMMRegister&>(double_register_4)) +#define xmm5 (static_cast<const XMMRegister&>(double_register_5)) +#define xmm6 (static_cast<const XMMRegister&>(double_register_6)) +#define xmm7 (static_cast<const XMMRegister&>(double_register_7)) + + +struct X87TopOfStackRegister : IntelDoubleRegister { + static const int kNumAllocatableRegisters = 1; + static const int kNumRegisters = 1; + + bool is(X87TopOfStackRegister reg) const { + return code_ == reg.code_; } - bool is_valid() const { return 0 <= code_ && code_ < kNumRegisters; } - bool is(XMMRegister reg) const { return code_ == reg.code_; } - int code() const { - ASSERT(is_valid()); - return code_; + static const char* AllocationIndexToString(int index) { + ASSERT(index >= 0 && index < kNumAllocatableRegisters); + const char* const names[] = { + "st0", + }; + return names[index]; } - int code_; + static int ToAllocationIndex(X87TopOfStackRegister reg) { + ASSERT(reg.code() == 0); + return 0; + } }; - -const XMMRegister xmm0 = { 0 }; -const XMMRegister xmm1 = { 1 }; -const XMMRegister xmm2 = { 2 }; -const XMMRegister xmm3 = { 3 }; -const XMMRegister xmm4 = { 4 }; -const XMMRegister xmm5 = { 5 }; -const XMMRegister xmm6 = { 6 }; -const XMMRegister xmm7 = { 7 }; +#define x87tos \ + static_cast<const X87TopOfStackRegister&>(double_register_0) -typedef XMMRegister DoubleRegister; +typedef IntelDoubleRegister DoubleRegister; enum Condition { @@ -275,12 +338,12 @@ class Immediate BASE_EMBEDDED { return Immediate(label); } - bool is_zero() const { return x_ == 0 && rmode_ == RelocInfo::NONE; } + bool is_zero() const { return x_ == 0 && RelocInfo::IsNone(rmode_); } bool is_int8() const { - return -128 <= x_ && x_ < 128 && rmode_ == RelocInfo::NONE; + return -128 <= x_ && x_ < 128 && RelocInfo::IsNone(rmode_); } bool is_int16() const { - return -32768 <= x_ && x_ < 32768 && rmode_ == RelocInfo::NONE; + return -32768 <= x_ && x_ < 32768 && RelocInfo::IsNone(rmode_); } private: @@ -320,20 +383,20 @@ class Operand BASE_EMBEDDED { // [base + disp/r] explicit Operand(Register base, int32_t disp, - RelocInfo::Mode rmode = RelocInfo::NONE); + RelocInfo::Mode rmode = RelocInfo::NONE32); // [base + index*scale + disp/r] explicit Operand(Register base, Register index, ScaleFactor scale, int32_t disp, - RelocInfo::Mode rmode = RelocInfo::NONE); + RelocInfo::Mode rmode = RelocInfo::NONE32); // [index*scale + disp/r] explicit Operand(Register index, ScaleFactor scale, int32_t disp, - RelocInfo::Mode rmode = RelocInfo::NONE); + RelocInfo::Mode rmode = RelocInfo::NONE32); static Operand StaticVariable(const ExternalReference& ext) { return Operand(reinterpret_cast<int32_t>(ext.address()), @@ -442,10 +505,10 @@ class Displacement BASE_EMBEDDED { // CpuFeatures keeps track of which features are supported by the target CPU. -// Supported features must be enabled by a Scope before use. +// Supported features must be enabled by a CpuFeatureScope before use. // Example: -// if (CpuFeatures::IsSupported(SSE2)) { -// CpuFeatures::Scope fscope(SSE2); +// if (assembler->IsSupported(SSE2)) { +// CpuFeatureScope fscope(assembler, SSE2); // // Generate SSE2 floating point code. // } else { // // Generate standard x87 floating point code. @@ -467,88 +530,25 @@ class CpuFeatures : public AllStatic { return (supported_ & (static_cast<uint64_t>(1) << f)) != 0; } -#ifdef DEBUG - // Check whether a feature is currently enabled. - static bool IsEnabled(CpuFeature f) { + static bool IsFoundByRuntimeProbingOnly(CpuFeature f) { ASSERT(initialized_); - Isolate* isolate = Isolate::UncheckedCurrent(); - if (isolate == NULL) { - // When no isolate is available, work as if we're running in - // release mode. - return IsSupported(f); - } - uint64_t enabled = isolate->enabled_cpu_features(); - return (enabled & (static_cast<uint64_t>(1) << f)) != 0; + return (found_by_runtime_probing_only_ & + (static_cast<uint64_t>(1) << f)) != 0; } -#endif - - // Enable a specified feature within a scope. - class Scope BASE_EMBEDDED { -#ifdef DEBUG - public: - explicit Scope(CpuFeature f) { - uint64_t mask = static_cast<uint64_t>(1) << f; - ASSERT(CpuFeatures::IsSupported(f)); - ASSERT(!Serializer::enabled() || - (CpuFeatures::found_by_runtime_probing_ & mask) == 0); - isolate_ = Isolate::UncheckedCurrent(); - old_enabled_ = 0; - if (isolate_ != NULL) { - old_enabled_ = isolate_->enabled_cpu_features(); - isolate_->set_enabled_cpu_features(old_enabled_ | mask); - } - } - ~Scope() { - ASSERT_EQ(Isolate::UncheckedCurrent(), isolate_); - if (isolate_ != NULL) { - isolate_->set_enabled_cpu_features(old_enabled_); - } - } - - private: - Isolate* isolate_; - uint64_t old_enabled_; -#else - - public: - explicit Scope(CpuFeature f) {} -#endif - }; - - class TryForceFeatureScope BASE_EMBEDDED { - public: - explicit TryForceFeatureScope(CpuFeature f) - : old_supported_(CpuFeatures::supported_) { - if (CanForce()) { - CpuFeatures::supported_ |= (static_cast<uint64_t>(1) << f); - } - } - - ~TryForceFeatureScope() { - if (CanForce()) { - CpuFeatures::supported_ = old_supported_; - } - } - - private: - static bool CanForce() { - // It's only safe to temporarily force support of CPU features - // when there's only a single isolate, which is guaranteed when - // the serializer is enabled. - return Serializer::enabled(); - } - - const uint64_t old_supported_; - }; + static bool IsSafeForSnapshot(CpuFeature f) { + return (IsSupported(f) && + (!Serializer::enabled() || !IsFoundByRuntimeProbingOnly(f))); + } private: #ifdef DEBUG static bool initialized_; #endif static uint64_t supported_; - static uint64_t found_by_runtime_probing_; + static uint64_t found_by_runtime_probing_only_; + friend class ExternalReference; DISALLOW_COPY_AND_ASSIGN(CpuFeatures); }; @@ -582,15 +582,7 @@ class Assembler : public AssemblerBase { // upon destruction of the assembler. // TODO(vitalyr): the assembler does not need an isolate. Assembler(Isolate* isolate, void* buffer, int buffer_size); - ~Assembler(); - - // Overrides the default provided by FLAG_debug_code. - void set_emit_debug_code(bool value) { emit_debug_code_ = value; } - - // Avoids using instructions that vary in size in unpredictable ways between - // the snapshot and the running VM. This is needed by the full compiler so - // that it can recompile code with debug support and fix the PC. - void set_predictable_code_size(bool value) { predictable_code_size_ = value; } + virtual ~Assembler() { } // GetCode emits any pending (non-emitted) code and fills the descriptor // desc. GetCode() is idempotent; it returns the same result if no other @@ -817,6 +809,8 @@ class Assembler : public AssemblerBase { void rcl(Register dst, uint8_t imm8); void rcr(Register dst, uint8_t imm8); + void ror(Register dst, uint8_t imm8); + void ror_cl(Register dst); void sar(Register dst, uint8_t imm8); void sar_cl(Register dst); @@ -996,8 +990,10 @@ class Assembler : public AssemblerBase { void cvtsd2ss(XMMRegister dst, XMMRegister src); void addsd(XMMRegister dst, XMMRegister src); + void addsd(XMMRegister dst, const Operand& src); void subsd(XMMRegister dst, XMMRegister src); void mulsd(XMMRegister dst, XMMRegister src); + void mulsd(XMMRegister dst, const Operand& src); void divsd(XMMRegister dst, XMMRegister src); void xorpd(XMMRegister dst, XMMRegister src); void xorps(XMMRegister dst, XMMRegister src); @@ -1019,6 +1015,7 @@ class Assembler : public AssemblerBase { void roundsd(XMMRegister dst, XMMRegister src, RoundingMode mode); void movmskpd(Register dst, XMMRegister src); + void movmskps(Register dst, XMMRegister src); void cmpltsd(XMMRegister dst, XMMRegister src); void pcmpeqd(XMMRegister dst, XMMRegister src); @@ -1054,7 +1051,7 @@ class Assembler : public AssemblerBase { void psllq(XMMRegister dst, XMMRegister src); void psrlq(XMMRegister reg, int8_t shift); void psrlq(XMMRegister dst, XMMRegister src); - void pshufd(XMMRegister dst, XMMRegister src, int8_t shuffle); + void pshufd(XMMRegister dst, XMMRegister src, uint8_t shuffle); void pextrd(Register dst, XMMRegister src, int8_t offset) { pextrd(Operand(dst), src, offset); } @@ -1097,8 +1094,6 @@ class Assembler : public AssemblerBase { void db(uint8_t data); void dd(uint32_t data); - int pc_offset() const { return pc_ - buffer_; } - // Check if there is less than kGap bytes available in the buffer. // If this is the case, we need to grow the buffer before emitting // an instruction or relocation information. @@ -1117,15 +1112,11 @@ class Assembler : public AssemblerBase { // Avoid overflows for displacements etc. static const int kMaximalBufferSize = 512*MB; - static const int kMinimalBufferSize = 4*KB; byte byte_at(int pos) { return buffer_[pos]; } void set_byte_at(int pos, byte value) { buffer_[pos] = value; } protected: - bool emit_debug_code() const { return emit_debug_code_; } - bool predictable_code_size() const { return predictable_code_size_ ; } - void movsd(XMMRegister dst, const Operand& src); void movsd(const Operand& dst, XMMRegister src); @@ -1186,22 +1177,10 @@ class Assembler : public AssemblerBase { friend class CodePatcher; friend class EnsureSpace; - // Code buffer: - // The buffer into which code and relocation info are generated. - byte* buffer_; - int buffer_size_; - // True if the assembler owns the buffer, false if buffer is external. - bool own_buffer_; - // code generation - byte* pc_; // the program counter; moves forward RelocInfoWriter reloc_info_writer; PositionsRecorder positions_recorder_; - - bool emit_debug_code_; - bool predictable_code_size_; - friend class PositionsRecorder; }; diff --git a/deps/v8/src/ia32/builtins-ia32.cc b/deps/v8/src/ia32/builtins-ia32.cc index 9bc15e9098..afb1c030ed 100644 --- a/deps/v8/src/ia32/builtins-ia32.cc +++ b/deps/v8/src/ia32/builtins-ia32.cc @@ -87,6 +87,32 @@ void Builtins::Generate_InRecompileQueue(MacroAssembler* masm) { } +void Builtins::Generate_InstallRecompiledCode(MacroAssembler* masm) { + { + FrameScope scope(masm, StackFrame::INTERNAL); + + // Push a copy of the function. + __ push(edi); + // Push call kind information. + __ push(ecx); + + __ push(edi); // Function is also the parameter to the runtime call. + __ CallRuntime(Runtime::kInstallRecompiledCode, 1); + + // Restore call kind information. + __ pop(ecx); + // Restore receiver. + __ pop(edi); + + // Tear down internal frame. + } + + // Do a tail-call of the compiled function. + __ lea(eax, FieldOperand(eax, Code::kHeaderSize)); + __ jmp(eax); +} + + void Builtins::Generate_ParallelRecompile(MacroAssembler* masm) { { FrameScope scope(masm, StackFrame::INTERNAL); @@ -257,6 +283,7 @@ static void Generate_JSConstructStubHelper(MacroAssembler* masm, __ AllocateInNewSpace(FixedArray::kHeaderSize, times_pointer_size, edx, + REGISTER_VALUE_IS_INT32, edi, ecx, no_reg, @@ -382,6 +409,10 @@ static void Generate_JSConstructStubHelper(MacroAssembler* masm, __ CmpObjectType(eax, FIRST_SPEC_OBJECT_TYPE, ecx); __ j(above_equal, &exit); + // Symbols are "objects". + __ CmpInstanceType(ecx, SYMBOL_TYPE); + __ j(equal, &exit); + // Throw away the result of the constructor invocation and use the // on-stack receiver as the result. __ bind(&use_receiver); @@ -538,6 +569,61 @@ void Builtins::Generate_LazyRecompile(MacroAssembler* masm) { } +static void GenerateMakeCodeYoungAgainCommon(MacroAssembler* masm) { + // For now, we are relying on the fact that make_code_young doesn't do any + // garbage collection which allows us to save/restore the registers without + // worrying about which of them contain pointers. We also don't build an + // internal frame to make the code faster, since we shouldn't have to do stack + // crawls in MakeCodeYoung. This seems a bit fragile. + + // Re-execute the code that was patched back to the young age when + // the stub returns. + __ sub(Operand(esp, 0), Immediate(5)); + __ pushad(); + __ mov(eax, Operand(esp, 8 * kPointerSize)); + { + FrameScope scope(masm, StackFrame::MANUAL); + __ PrepareCallCFunction(1, ebx); + __ mov(Operand(esp, 0), eax); + __ CallCFunction( + ExternalReference::get_make_code_young_function(masm->isolate()), 1); + } + __ popad(); + __ ret(0); +} + +#define DEFINE_CODE_AGE_BUILTIN_GENERATOR(C) \ +void Builtins::Generate_Make##C##CodeYoungAgainEvenMarking( \ + MacroAssembler* masm) { \ + GenerateMakeCodeYoungAgainCommon(masm); \ +} \ +void Builtins::Generate_Make##C##CodeYoungAgainOddMarking( \ + MacroAssembler* masm) { \ + GenerateMakeCodeYoungAgainCommon(masm); \ +} +CODE_AGE_LIST(DEFINE_CODE_AGE_BUILTIN_GENERATOR) +#undef DEFINE_CODE_AGE_BUILTIN_GENERATOR + + +void Builtins::Generate_NotifyStubFailure(MacroAssembler* masm) { + // Enter an internal frame. + { + FrameScope scope(masm, StackFrame::INTERNAL); + + // Preserve registers across notification, this is important for compiled + // stubs that tail call the runtime on deopts passing their parameters in + // registers. + __ pushad(); + __ CallRuntime(Runtime::kNotifyStubFailure, 0); + __ popad(); + // Tear down internal frame. + } + + __ pop(MemOperand(esp, 0)); // Ignore state offset + __ ret(0); // Return to IC Miss stub, continuation still on stack. +} + + static void Generate_NotifyDeoptimizedHelper(MacroAssembler* masm, Deoptimizer::BailoutType type) { { @@ -583,6 +669,8 @@ void Builtins::Generate_NotifyLazyDeoptimized(MacroAssembler* masm) { void Builtins::Generate_NotifyOSR(MacroAssembler* masm) { // TODO(kasperl): Do we need to save/restore the XMM registers too? + // TODO(mvstanton): We should save these regs, do this in a future + // checkin. // For now, we are relying on the fact that Runtime::NotifyOSR // doesn't do any garbage collection which allows us to save/restore @@ -945,12 +1033,7 @@ static void AllocateEmptyJSArray(MacroAssembler* masm, if (initial_capacity > 0) { size += FixedArray::SizeFor(initial_capacity); } - __ AllocateInNewSpace(size, - result, - scratch2, - scratch3, - gc_required, - TAG_OBJECT); + __ Allocate(size, result, scratch2, scratch3, gc_required, TAG_OBJECT); // Allocated the JSArray. Now initialize the fields except for the elements // array. @@ -1047,8 +1130,9 @@ static void AllocateJSArray(MacroAssembler* masm, // requested elements. STATIC_ASSERT(kSmiTagSize == 1 && kSmiTag == 0); __ AllocateInNewSpace(JSArray::kSize + FixedArray::kHeaderSize, - times_half_pointer_size, // array_size is a smi. + times_pointer_size, array_size, + REGISTER_VALUE_IS_SMI, result, elements_array_end, scratch, @@ -1418,34 +1502,66 @@ void Builtins::Generate_ArrayCode(MacroAssembler* masm) { void Builtins::Generate_ArrayConstructCode(MacroAssembler* masm) { // ----------- S t a t e ------------- // -- eax : argc + // -- ebx : type info cell // -- edi : constructor // -- esp[0] : return address // -- esp[4] : last argument // ----------------------------------- - Label generic_constructor; - if (FLAG_debug_code) { // The array construct code is only set for the global and natives // builtin Array functions which always have maps. // Initial map for the builtin Array function should be a map. - __ mov(ebx, FieldOperand(edi, JSFunction::kPrototypeOrInitialMapOffset)); + __ mov(ecx, FieldOperand(edi, JSFunction::kPrototypeOrInitialMapOffset)); // Will both indicate a NULL and a Smi. - __ test(ebx, Immediate(kSmiTagMask)); + __ test(ecx, Immediate(kSmiTagMask)); __ Assert(not_zero, "Unexpected initial map for Array function"); - __ CmpObjectType(ebx, MAP_TYPE, ecx); + __ CmpObjectType(ecx, MAP_TYPE, ecx); __ Assert(equal, "Unexpected initial map for Array function"); - } - // Run the native code for the Array function called as constructor. - ArrayNativeCode(masm, true, &generic_constructor); + if (FLAG_optimize_constructed_arrays) { + // We should either have undefined in ebx or a valid jsglobalpropertycell + Label okay_here; + Handle<Object> undefined_sentinel( + masm->isolate()->heap()->undefined_value(), masm->isolate()); + Handle<Map> global_property_cell_map( + masm->isolate()->heap()->global_property_cell_map()); + __ cmp(ebx, Immediate(undefined_sentinel)); + __ j(equal, &okay_here); + __ cmp(FieldOperand(ebx, 0), Immediate(global_property_cell_map)); + __ Assert(equal, "Expected property cell in register ebx"); + __ bind(&okay_here); + } + } - // Jump to the generic construct code in case the specialized code cannot - // handle the construction. - __ bind(&generic_constructor); - Handle<Code> generic_construct_stub = - masm->isolate()->builtins()->JSConstructStubGeneric(); - __ jmp(generic_construct_stub, RelocInfo::CODE_TARGET); + if (FLAG_optimize_constructed_arrays) { + Label not_zero_case, not_one_case; + __ test(eax, eax); + __ j(not_zero, ¬_zero_case); + ArrayNoArgumentConstructorStub no_argument_stub; + __ TailCallStub(&no_argument_stub); + + __ bind(¬_zero_case); + __ cmp(eax, 1); + __ j(greater, ¬_one_case); + ArraySingleArgumentConstructorStub single_argument_stub; + __ TailCallStub(&single_argument_stub); + + __ bind(¬_one_case); + ArrayNArgumentsConstructorStub n_argument_stub; + __ TailCallStub(&n_argument_stub); + } else { + Label generic_constructor; + // Run the native code for the Array function called as constructor. + ArrayNativeCode(masm, true, &generic_constructor); + + // Jump to the generic construct code in case the specialized code cannot + // handle the construction. + __ bind(&generic_constructor); + Handle<Code> generic_construct_stub = + masm->isolate()->builtins()->JSConstructStubGeneric(); + __ jmp(generic_construct_stub, RelocInfo::CODE_TARGET); + } } @@ -1497,12 +1613,12 @@ void Builtins::Generate_StringConstructCode(MacroAssembler* masm) { // Allocate a JSValue and put the tagged pointer into eax. Label gc_required; - __ AllocateInNewSpace(JSValue::kSize, - eax, // Result. - ecx, // New allocation top (we ignore it). - no_reg, - &gc_required, - TAG_OBJECT); + __ Allocate(JSValue::kSize, + eax, // Result. + ecx, // New allocation top (we ignore it). + no_reg, + &gc_required, + TAG_OBJECT); // Set the map. __ LoadGlobalFunctionInitialMap(edi, ecx); @@ -1699,12 +1815,6 @@ void Builtins::Generate_ArgumentsAdaptorTrampoline(MacroAssembler* masm) { void Builtins::Generate_OnStackReplacement(MacroAssembler* masm) { - CpuFeatures::TryForceFeatureScope scope(SSE2); - if (!CpuFeatures::IsSupported(SSE2) && FLAG_debug_code) { - __ Abort("Unreachable code: Cannot optimize without SSE2 support."); - return; - } - // Get the loop depth of the stack guard check. This is recorded in // a test(eax, depth) instruction right after the call. Label stack_check; diff --git a/deps/v8/src/ia32/code-stubs-ia32.cc b/deps/v8/src/ia32/code-stubs-ia32.cc index 1d23c7e5d2..88207c4907 100644 --- a/deps/v8/src/ia32/code-stubs-ia32.cc +++ b/deps/v8/src/ia32/code-stubs-ia32.cc @@ -34,12 +34,88 @@ #include "isolate.h" #include "jsregexp.h" #include "regexp-macro-assembler.h" +#include "runtime.h" #include "stub-cache.h" #include "codegen.h" +#include "runtime.h" namespace v8 { namespace internal { + +void FastCloneShallowObjectStub::InitializeInterfaceDescriptor( + Isolate* isolate, + CodeStubInterfaceDescriptor* descriptor) { + static Register registers[] = { eax, ebx, ecx, edx }; + descriptor->register_param_count_ = 4; + descriptor->register_params_ = registers; + descriptor->stack_parameter_count_ = NULL; + descriptor->deoptimization_handler_ = + Runtime::FunctionForId(Runtime::kCreateObjectLiteralShallow)->entry; +} + + +void KeyedLoadFastElementStub::InitializeInterfaceDescriptor( + Isolate* isolate, + CodeStubInterfaceDescriptor* descriptor) { + static Register registers[] = { edx, ecx }; + descriptor->register_param_count_ = 2; + descriptor->register_params_ = registers; + descriptor->stack_parameter_count_ = NULL; + descriptor->deoptimization_handler_ = + FUNCTION_ADDR(KeyedLoadIC_MissFromStubFailure); +} + + +void TransitionElementsKindStub::InitializeInterfaceDescriptor( + Isolate* isolate, + CodeStubInterfaceDescriptor* descriptor) { + static Register registers[] = { eax, ebx }; + descriptor->register_param_count_ = 2; + descriptor->register_params_ = registers; + descriptor->deoptimization_handler_ = + Runtime::FunctionForId(Runtime::kTransitionElementsKind)->entry; +} + + +static void InitializeArrayConstructorDescriptor(Isolate* isolate, + CodeStubInterfaceDescriptor* descriptor) { + // register state + // edi -- constructor function + // ebx -- type info cell with elements kind + // eax -- number of arguments to the constructor function + static Register registers[] = { edi, ebx }; + descriptor->register_param_count_ = 2; + // stack param count needs (constructor pointer, and single argument) + descriptor->stack_parameter_count_ = &eax; + descriptor->register_params_ = registers; + descriptor->extra_expression_stack_count_ = 1; + descriptor->deoptimization_handler_ = + FUNCTION_ADDR(ArrayConstructor_StubFailure); +} + + +void ArrayNoArgumentConstructorStub::InitializeInterfaceDescriptor( + Isolate* isolate, + CodeStubInterfaceDescriptor* descriptor) { + InitializeArrayConstructorDescriptor(isolate, descriptor); +} + + +void ArraySingleArgumentConstructorStub::InitializeInterfaceDescriptor( + Isolate* isolate, + CodeStubInterfaceDescriptor* descriptor) { + InitializeArrayConstructorDescriptor(isolate, descriptor); +} + + +void ArrayNArgumentsConstructorStub::InitializeInterfaceDescriptor( + Isolate* isolate, + CodeStubInterfaceDescriptor* descriptor) { + InitializeArrayConstructorDescriptor(isolate, descriptor); +} + + #define __ ACCESS_MASM(masm) void ToNumberStub::Generate(MacroAssembler* masm) { @@ -69,7 +145,7 @@ void FastNewClosureStub::Generate(MacroAssembler* masm) { Counters* counters = masm->isolate()->counters(); Label gc; - __ AllocateInNewSpace(JSFunction::kSize, eax, ebx, ecx, &gc, TAG_OBJECT); + __ Allocate(JSFunction::kSize, eax, ebx, ecx, &gc, TAG_OBJECT); __ IncrementCounter(counters->fast_new_closure_total(), 1); @@ -197,8 +273,8 @@ void FastNewContextStub::Generate(MacroAssembler* masm) { // Try to allocate the context in new space. Label gc; int length = slots_ + Context::MIN_CONTEXT_SLOTS; - __ AllocateInNewSpace((length * kPointerSize) + FixedArray::kHeaderSize, - eax, ebx, ecx, &gc, TAG_OBJECT); + __ Allocate((length * kPointerSize) + FixedArray::kHeaderSize, + eax, ebx, ecx, &gc, TAG_OBJECT); // Get the function from the stack. __ mov(ecx, Operand(esp, 1 * kPointerSize)); @@ -245,8 +321,7 @@ void FastNewBlockContextStub::Generate(MacroAssembler* masm) { // Try to allocate the context in new space. Label gc; int length = slots_ + Context::MIN_CONTEXT_SLOTS; - __ AllocateInNewSpace(FixedArray::SizeFor(length), - eax, ebx, ecx, &gc, TAG_OBJECT); + __ Allocate(FixedArray::SizeFor(length), eax, ebx, ecx, &gc, TAG_OBJECT); // Get the function or sentinel from the stack. __ mov(ecx, Operand(esp, 1 * kPointerSize)); @@ -311,6 +386,7 @@ static void GenerateFastCloneShallowArrayCommon( MacroAssembler* masm, int length, FastCloneShallowArrayStub::Mode mode, + AllocationSiteMode allocation_site_mode, Label* fail) { // Registers on entry: // @@ -324,11 +400,27 @@ static void GenerateFastCloneShallowArrayCommon( ? FixedDoubleArray::SizeFor(length) : FixedArray::SizeFor(length); } - int size = JSArray::kSize + elements_size; + int size = JSArray::kSize; + int allocation_info_start = size; + if (allocation_site_mode == TRACK_ALLOCATION_SITE) { + size += AllocationSiteInfo::kSize; + } + size += elements_size; // Allocate both the JS array and the elements array in one big // allocation. This avoids multiple limit checks. - __ AllocateInNewSpace(size, eax, ebx, edx, fail, TAG_OBJECT); + AllocationFlags flags = TAG_OBJECT; + if (mode == FastCloneShallowArrayStub::CLONE_DOUBLE_ELEMENTS) { + flags = static_cast<AllocationFlags>(DOUBLE_ALIGNMENT | flags); + } + __ Allocate(size, eax, ebx, edx, fail, flags); + + if (allocation_site_mode == TRACK_ALLOCATION_SITE) { + __ mov(FieldOperand(eax, allocation_info_start), + Immediate(Handle<Map>(masm->isolate()->heap()-> + allocation_site_info_map()))); + __ mov(FieldOperand(eax, allocation_info_start + kPointerSize), ecx); + } // Copy the JS array part. for (int i = 0; i < JSArray::kSize; i += kPointerSize) { @@ -342,7 +434,11 @@ static void GenerateFastCloneShallowArrayCommon( // Get hold of the elements array of the boilerplate and setup the // elements pointer in the resulting object. __ mov(ecx, FieldOperand(ecx, JSArray::kElementsOffset)); - __ lea(edx, Operand(eax, JSArray::kSize)); + if (allocation_site_mode == TRACK_ALLOCATION_SITE) { + __ lea(edx, Operand(eax, JSArray::kSize + AllocationSiteInfo::kSize)); + } else { + __ lea(edx, Operand(eax, JSArray::kSize)); + } __ mov(FieldOperand(eax, JSArray::kElementsOffset), edx); // Copy the elements array. @@ -397,15 +493,17 @@ void FastCloneShallowArrayStub::Generate(MacroAssembler* masm) { __ mov(ebx, FieldOperand(ecx, JSArray::kElementsOffset)); __ CheckMap(ebx, factory->fixed_cow_array_map(), &check_fast_elements, DONT_DO_SMI_CHECK); - GenerateFastCloneShallowArrayCommon(masm, 0, - COPY_ON_WRITE_ELEMENTS, &slow_case); + GenerateFastCloneShallowArrayCommon(masm, 0, COPY_ON_WRITE_ELEMENTS, + allocation_site_mode_, + &slow_case); __ ret(3 * kPointerSize); __ bind(&check_fast_elements); __ CheckMap(ebx, factory->fixed_array_map(), &double_elements, DONT_DO_SMI_CHECK); - GenerateFastCloneShallowArrayCommon(masm, length_, - CLONE_ELEMENTS, &slow_case); + GenerateFastCloneShallowArrayCommon(masm, length_, CLONE_ELEMENTS, + allocation_site_mode_, + &slow_case); __ ret(3 * kPointerSize); __ bind(&double_elements); @@ -434,7 +532,10 @@ void FastCloneShallowArrayStub::Generate(MacroAssembler* masm) { __ pop(ecx); } - GenerateFastCloneShallowArrayCommon(masm, length_, mode, &slow_case); + GenerateFastCloneShallowArrayCommon(masm, length_, mode, + allocation_site_mode_, + &slow_case); + // Return and remove the on-stack parameters. __ ret(3 * kPointerSize); @@ -443,52 +544,6 @@ void FastCloneShallowArrayStub::Generate(MacroAssembler* masm) { } -void FastCloneShallowObjectStub::Generate(MacroAssembler* masm) { - // Stack layout on entry: - // - // [esp + kPointerSize]: object literal flags. - // [esp + (2 * kPointerSize)]: constant properties. - // [esp + (3 * kPointerSize)]: literal index. - // [esp + (4 * kPointerSize)]: literals array. - - // Load boilerplate object into ecx and check if we need to create a - // boilerplate. - Label slow_case; - __ mov(ecx, Operand(esp, 4 * kPointerSize)); - __ mov(eax, Operand(esp, 3 * kPointerSize)); - STATIC_ASSERT(kPointerSize == 4); - STATIC_ASSERT(kSmiTagSize == 1); - STATIC_ASSERT(kSmiTag == 0); - __ mov(ecx, FieldOperand(ecx, eax, times_half_pointer_size, - FixedArray::kHeaderSize)); - Factory* factory = masm->isolate()->factory(); - __ cmp(ecx, factory->undefined_value()); - __ j(equal, &slow_case); - - // Check that the boilerplate contains only fast properties and we can - // statically determine the instance size. - int size = JSObject::kHeaderSize + length_ * kPointerSize; - __ mov(eax, FieldOperand(ecx, HeapObject::kMapOffset)); - __ movzx_b(eax, FieldOperand(eax, Map::kInstanceSizeOffset)); - __ cmp(eax, Immediate(size >> kPointerSizeLog2)); - __ j(not_equal, &slow_case); - - // Allocate the JS object and copy header together with all in-object - // properties from the boilerplate. - __ AllocateInNewSpace(size, eax, ebx, edx, &slow_case, TAG_OBJECT); - for (int i = 0; i < size; i += kPointerSize) { - __ mov(ebx, FieldOperand(ecx, i)); - __ mov(FieldOperand(eax, i), ebx); - } - - // Return and remove the on-stack parameters. - __ ret(4 * kPointerSize); - - __ bind(&slow_case); - __ TailCallRuntime(Runtime::kCreateObjectLiteralShallow, 4, 1); -} - - // The stub expects its argument on the stack and returns its result in tos_: // zero for false, and a non-zero value for true. void ToBooleanStub::Generate(MacroAssembler* masm) { @@ -597,7 +652,7 @@ void StoreBufferOverflowStub::Generate(MacroAssembler* masm) { // restore them. __ pushad(); if (save_doubles_ == kSaveFPRegs) { - CpuFeatures::Scope scope(SSE2); + CpuFeatureScope scope(masm, SSE2); __ sub(esp, Immediate(kDoubleSize * XMMRegister::kNumRegisters)); for (int i = 0; i < XMMRegister::kNumRegisters; i++) { XMMRegister reg = XMMRegister::from_code(i); @@ -614,7 +669,7 @@ void StoreBufferOverflowStub::Generate(MacroAssembler* masm) { ExternalReference::store_buffer_overflow_function(masm->isolate()), argument_count); if (save_doubles_ == kSaveFPRegs) { - CpuFeatures::Scope scope(SSE2); + CpuFeatureScope scope(masm, SSE2); for (int i = 0; i < XMMRegister::kNumRegisters; i++) { XMMRegister reg = XMMRegister::from_code(i); __ movdbl(reg, Operand(esp, i * kDoubleSize)); @@ -696,25 +751,14 @@ class FloatingPointHelper : public AllStatic { Label* non_float, Register scratch); - // Checks that the two floating point numbers on top of the FPU stack - // have int32 values. - static void CheckFloatOperandsAreInt32(MacroAssembler* masm, - Label* non_int32); - // Takes the operands in edx and eax and loads them as integers in eax // and ecx. static void LoadUnknownsAsIntegers(MacroAssembler* masm, bool use_sse3, + BinaryOpIC::TypeInfo left_type, + BinaryOpIC::TypeInfo right_type, Label* operand_conversion_failure); - // Must only be called after LoadUnknownsAsIntegers. Assumes that the - // operands are pushed on the stack, and that their conversions to int32 - // are in eax and ecx. Checks that the original numbers were in the int32 - // range. - static void CheckLoadedIntegersWereInt32(MacroAssembler* masm, - bool use_sse3, - Label* not_int32); - // Assumes that operands are smis or heap numbers and loads them // into xmm0 and xmm1. Operands are in edx and eax. // Leaves operands unchanged. @@ -735,6 +779,15 @@ class FloatingPointHelper : public AllStatic { static void CheckSSE2OperandsAreInt32(MacroAssembler* masm, Label* non_int32, Register scratch); + + // Checks that |operand| has an int32 value. If |int32_result| is different + // from |scratch|, it will contain that int32 value. + static void CheckSSE2OperandIsInt32(MacroAssembler* masm, + Label* non_int32, + XMMRegister operand, + Register int32_result, + Register scratch, + XMMRegister xmm_scratch); }; @@ -755,11 +808,20 @@ static void IntegerConvert(MacroAssembler* masm, // Get exponent alone in scratch2. __ mov(scratch2, scratch); __ and_(scratch2, HeapNumber::kExponentMask); + __ shr(scratch2, HeapNumber::kExponentShift); + __ sub(scratch2, Immediate(HeapNumber::kExponentBias)); + // Load ecx with zero. We use this either for the final shift or + // for the answer. + __ xor_(ecx, ecx); + // If the exponent is above 83, the number contains no significant + // bits in the range 0..2^31, so the result is zero. + static const uint32_t kResultIsZeroExponent = 83; + __ cmp(scratch2, Immediate(kResultIsZeroExponent)); + __ j(above, &done); if (use_sse3) { - CpuFeatures::Scope scope(SSE3); + CpuFeatureScope scope(masm, SSE3); // Check whether the exponent is too big for a 64 bit signed integer. - static const uint32_t kTooBigExponent = - (HeapNumber::kExponentBias + 63) << HeapNumber::kExponentShift; + static const uint32_t kTooBigExponent = 63; __ cmp(scratch2, Immediate(kTooBigExponent)); __ j(greater_equal, conversion_failure); // Load x87 register with heap number. @@ -771,15 +833,11 @@ static void IntegerConvert(MacroAssembler* masm, __ mov(ecx, Operand(esp, 0)); // Load low word of answer into ecx. __ add(esp, Immediate(sizeof(uint64_t))); // Nolint. } else { - // Load ecx with zero. We use this either for the final shift or - // for the answer. - __ xor_(ecx, ecx); // Check whether the exponent matches a 32 bit signed int that cannot be // represented by a Smi. A non-smi 32 bit integer is 1.xxx * 2^30 so the // exponent is 30 (biased). This is the exponent that we are fastest at and // also the highest exponent we can handle here. - const uint32_t non_smi_exponent = - (HeapNumber::kExponentBias + 30) << HeapNumber::kExponentShift; + const uint32_t non_smi_exponent = 30; __ cmp(scratch2, Immediate(non_smi_exponent)); // If we have a match of the int32-but-not-Smi exponent then skip some // logic. @@ -791,8 +849,7 @@ static void IntegerConvert(MacroAssembler* masm, { // Handle a big exponent. The only reason we have this code is that the // >>> operator has a tendency to generate numbers with an exponent of 31. - const uint32_t big_non_smi_exponent = - (HeapNumber::kExponentBias + 31) << HeapNumber::kExponentShift; + const uint32_t big_non_smi_exponent = 31; __ cmp(scratch2, Immediate(big_non_smi_exponent)); __ j(not_equal, conversion_failure); // We have the big exponent, typically from >>>. This means the number is @@ -821,19 +878,8 @@ static void IntegerConvert(MacroAssembler* masm, } __ bind(&normal_exponent); - // Exponent word in scratch, exponent part of exponent word in scratch2. - // Zero in ecx. - // We know the exponent is smaller than 30 (biased). If it is less than - // 0 (biased) then the number is smaller in magnitude than 1.0 * 2^0, i.e. - // it rounds to zero. - const uint32_t zero_exponent = - (HeapNumber::kExponentBias + 0) << HeapNumber::kExponentShift; - __ sub(scratch2, Immediate(zero_exponent)); - // ecx already has a Smi zero. - __ j(less, &done, Label::kNear); - - // We have a shifted exponent between 0 and 30 in scratch2. - __ shr(scratch2, HeapNumber::kExponentShift); + // Exponent word in scratch, exponent in scratch2. Zero in ecx. + // We know that 0 <= exponent < 30. __ mov(ecx, Immediate(30)); __ sub(ecx, scratch2); @@ -868,8 +914,20 @@ static void IntegerConvert(MacroAssembler* masm, __ jmp(&done, Label::kNear); __ bind(&negative); __ sub(ecx, scratch2); - __ bind(&done); } + __ bind(&done); +} + + +// Uses SSE2 to convert the heap number in |source| to an integer. Jumps to +// |conversion_failure| if the heap number did not contain an int32 value. +// Result is in ecx. Trashes ebx, xmm0, and xmm1. +static void ConvertHeapNumberToInt32(MacroAssembler* masm, + Register source, + Label* conversion_failure) { + __ movdbl(xmm0, FieldOperand(source, HeapNumber::kValueOffset)); + FloatingPointHelper::CheckSSE2OperandIsInt32( + masm, conversion_failure, xmm0, ecx, ebx, xmm1); } @@ -896,8 +954,8 @@ void UnaryOpStub::Generate(MacroAssembler* masm) { case UnaryOpIC::SMI: GenerateSmiStub(masm); break; - case UnaryOpIC::HEAP_NUMBER: - GenerateHeapNumberStub(masm); + case UnaryOpIC::NUMBER: + GenerateNumberStub(masm); break; case UnaryOpIC::GENERIC: GenerateGenericStub(masm); @@ -1001,13 +1059,13 @@ void UnaryOpStub::GenerateSmiCodeUndo(MacroAssembler* masm) { // TODO(svenpanne): Use virtual functions instead of switch. -void UnaryOpStub::GenerateHeapNumberStub(MacroAssembler* masm) { +void UnaryOpStub::GenerateNumberStub(MacroAssembler* masm) { switch (op_) { case Token::SUB: - GenerateHeapNumberStubSub(masm); + GenerateNumberStubSub(masm); break; case Token::BIT_NOT: - GenerateHeapNumberStubBitNot(masm); + GenerateNumberStubBitNot(masm); break; default: UNREACHABLE(); @@ -1015,7 +1073,7 @@ void UnaryOpStub::GenerateHeapNumberStub(MacroAssembler* masm) { } -void UnaryOpStub::GenerateHeapNumberStubSub(MacroAssembler* masm) { +void UnaryOpStub::GenerateNumberStubSub(MacroAssembler* masm) { Label non_smi, undo, slow, call_builtin; GenerateSmiCodeSub(masm, &non_smi, &undo, &call_builtin, Label::kNear); __ bind(&non_smi); @@ -1029,7 +1087,7 @@ void UnaryOpStub::GenerateHeapNumberStubSub(MacroAssembler* masm) { } -void UnaryOpStub::GenerateHeapNumberStubBitNot( +void UnaryOpStub::GenerateNumberStubBitNot( MacroAssembler* masm) { Label non_smi, slow; GenerateSmiCodeBitNot(masm, &non_smi, Label::kNear); @@ -1124,7 +1182,7 @@ void UnaryOpStub::GenerateHeapNumberCodeBitNot(MacroAssembler* masm, __ bind(&heapnumber_allocated); } if (CpuFeatures::IsSupported(SSE2)) { - CpuFeatures::Scope use_sse2(SSE2); + CpuFeatureScope use_sse2(masm, SSE2); __ cvtsi2sd(xmm0, ecx); __ movdbl(FieldOperand(eax, HeapNumber::kValueOffset), xmm0); } else { @@ -1192,16 +1250,17 @@ void UnaryOpStub::GenerateGenericCodeFallback(MacroAssembler* masm) { } +void BinaryOpStub::Initialize() { + platform_specific_bit_ = CpuFeatures::IsSupported(SSE3); +} + + void BinaryOpStub::GenerateTypeTransition(MacroAssembler* masm) { __ pop(ecx); // Save return address. __ push(edx); __ push(eax); // Left and right arguments are now on top. - // Push this stub's key. Although the operation and the type info are - // encoded into the key, the encoding is opaque, so push them too. __ push(Immediate(Smi::FromInt(MinorKey()))); - __ push(Immediate(Smi::FromInt(op_))); - __ push(Immediate(Smi::FromInt(operands_type_))); __ push(ecx); // Push return address. @@ -1210,7 +1269,7 @@ void BinaryOpStub::GenerateTypeTransition(MacroAssembler* masm) { __ TailCallExternalReference( ExternalReference(IC_Utility(IC::kBinaryOp_Patch), masm->isolate()), - 5, + 3, 1); } @@ -1220,11 +1279,7 @@ void BinaryOpStub::GenerateTypeTransition(MacroAssembler* masm) { void BinaryOpStub::GenerateTypeTransitionWithSavedArgs(MacroAssembler* masm) { __ pop(ecx); // Save return address. // Left and right arguments are already on top of the stack. - // Push this stub's key. Although the operation and the type info are - // encoded into the key, the encoding is opaque, so push them too. __ push(Immediate(Smi::FromInt(MinorKey()))); - __ push(Immediate(Smi::FromInt(op_))); - __ push(Immediate(Smi::FromInt(operands_type_))); __ push(ecx); // Push return address. @@ -1233,73 +1288,22 @@ void BinaryOpStub::GenerateTypeTransitionWithSavedArgs(MacroAssembler* masm) { __ TailCallExternalReference( ExternalReference(IC_Utility(IC::kBinaryOp_Patch), masm->isolate()), - 5, + 3, 1); } -void BinaryOpStub::Generate(MacroAssembler* masm) { - // Explicitly allow generation of nested stubs. It is safe here because - // generation code does not use any raw pointers. - AllowStubCallsScope allow_stub_calls(masm, true); - - switch (operands_type_) { - case BinaryOpIC::UNINITIALIZED: - GenerateTypeTransition(masm); - break; - case BinaryOpIC::SMI: - GenerateSmiStub(masm); - break; - case BinaryOpIC::INT32: - GenerateInt32Stub(masm); - break; - case BinaryOpIC::HEAP_NUMBER: - GenerateHeapNumberStub(masm); - break; - case BinaryOpIC::ODDBALL: - GenerateOddballStub(masm); - break; - case BinaryOpIC::BOTH_STRING: - GenerateBothStringStub(masm); - break; - case BinaryOpIC::STRING: - GenerateStringStub(masm); - break; - case BinaryOpIC::GENERIC: - GenerateGeneric(masm); - break; - default: - UNREACHABLE(); - } -} - - -void BinaryOpStub::PrintName(StringStream* stream) { - const char* op_name = Token::Name(op_); - const char* overwrite_name; - switch (mode_) { - case NO_OVERWRITE: overwrite_name = "Alloc"; break; - case OVERWRITE_RIGHT: overwrite_name = "OverwriteRight"; break; - case OVERWRITE_LEFT: overwrite_name = "OverwriteLeft"; break; - default: overwrite_name = "UnknownOverwrite"; break; - } - stream->Add("BinaryOpStub_%s_%s_%s", - op_name, - overwrite_name, - BinaryOpIC::GetName(operands_type_)); -} - - -void BinaryOpStub::GenerateSmiCode( +static void BinaryOpStub_GenerateSmiCode( MacroAssembler* masm, Label* slow, - SmiCodeGenerateHeapNumberResults allow_heapnumber_results) { + BinaryOpStub::SmiCodeGenerateHeapNumberResults allow_heapnumber_results, + Token::Value op) { // 1. Move arguments into edx, eax except for DIV and MOD, which need the // dividend in eax and edx free for the division. Use eax, ebx for those. Comment load_comment(masm, "-- Load arguments"); Register left = edx; Register right = eax; - if (op_ == Token::DIV || op_ == Token::MOD) { + if (op == Token::DIV || op == Token::MOD) { left = eax; right = ebx; __ mov(ebx, eax); @@ -1312,7 +1316,7 @@ void BinaryOpStub::GenerateSmiCode( Label not_smis; Register combined = ecx; ASSERT(!left.is(combined) && !right.is(combined)); - switch (op_) { + switch (op) { case Token::BIT_OR: // Perform the operation into eax and smi check the result. Preserve // eax in case the result is not a smi. @@ -1356,7 +1360,7 @@ void BinaryOpStub::GenerateSmiCode( // eax and check the result if necessary. Comment perform_smi(masm, "-- Perform smi operation"); Label use_fp_on_smis; - switch (op_) { + switch (op) { case Token::BIT_OR: // Nothing to do. break; @@ -1490,7 +1494,7 @@ void BinaryOpStub::GenerateSmiCode( } // 5. Emit return of result in eax. Some operations have registers pushed. - switch (op_) { + switch (op) { case Token::ADD: case Token::SUB: case Token::MUL: @@ -1513,9 +1517,9 @@ void BinaryOpStub::GenerateSmiCode( // 6. For some operations emit inline code to perform floating point // operations on known smis (e.g., if the result of the operation // overflowed the smi range). - if (allow_heapnumber_results == NO_HEAPNUMBER_RESULTS) { + if (allow_heapnumber_results == BinaryOpStub::NO_HEAPNUMBER_RESULTS) { __ bind(&use_fp_on_smis); - switch (op_) { + switch (op) { // Undo the effects of some operations, and some register moves. case Token::SHL: // The arguments are saved on the stack, and only used from there. @@ -1543,8 +1547,8 @@ void BinaryOpStub::GenerateSmiCode( } __ jmp(¬_smis); } else { - ASSERT(allow_heapnumber_results == ALLOW_HEAPNUMBER_RESULTS); - switch (op_) { + ASSERT(allow_heapnumber_results == BinaryOpStub::ALLOW_HEAPNUMBER_RESULTS); + switch (op) { case Token::SHL: case Token::SHR: { Comment perform_float(masm, "-- Perform float operation on smis"); @@ -1555,15 +1559,15 @@ void BinaryOpStub::GenerateSmiCode( // Store the result in the HeapNumber and return. // It's OK to overwrite the arguments on the stack because we // are about to return. - if (op_ == Token::SHR) { + if (op == Token::SHR) { __ mov(Operand(esp, 1 * kPointerSize), left); __ mov(Operand(esp, 2 * kPointerSize), Immediate(0)); __ fild_d(Operand(esp, 1 * kPointerSize)); __ fstp_d(FieldOperand(eax, HeapNumber::kValueOffset)); } else { - ASSERT_EQ(Token::SHL, op_); + ASSERT_EQ(Token::SHL, op); if (CpuFeatures::IsSupported(SSE2)) { - CpuFeatures::Scope use_sse2(SSE2); + CpuFeatureScope use_sse2(masm, SSE2); __ cvtsi2sd(xmm0, left); __ movdbl(FieldOperand(eax, HeapNumber::kValueOffset), xmm0); } else { @@ -1583,7 +1587,7 @@ void BinaryOpStub::GenerateSmiCode( Comment perform_float(masm, "-- Perform float operation on smis"); __ bind(&use_fp_on_smis); // Restore arguments to edx, eax. - switch (op_) { + switch (op) { case Token::ADD: // Revert right = right + left. __ sub(right, left); @@ -1607,9 +1611,9 @@ void BinaryOpStub::GenerateSmiCode( } __ AllocateHeapNumber(ecx, ebx, no_reg, slow); if (CpuFeatures::IsSupported(SSE2)) { - CpuFeatures::Scope use_sse2(SSE2); + CpuFeatureScope use_sse2(masm, SSE2); FloatingPointHelper::LoadSSE2Smis(masm, ebx); - switch (op_) { + switch (op) { case Token::ADD: __ addsd(xmm0, xmm1); break; case Token::SUB: __ subsd(xmm0, xmm1); break; case Token::MUL: __ mulsd(xmm0, xmm1); break; @@ -1619,7 +1623,7 @@ void BinaryOpStub::GenerateSmiCode( __ movdbl(FieldOperand(ecx, HeapNumber::kValueOffset), xmm0); } else { // SSE2 not available, use FPU. FloatingPointHelper::LoadFloatSmis(masm, ebx); - switch (op_) { + switch (op) { case Token::ADD: __ faddp(1); break; case Token::SUB: __ fsubp(1); break; case Token::MUL: __ fmulp(1); break; @@ -1642,7 +1646,7 @@ void BinaryOpStub::GenerateSmiCode( // edx and eax. Comment done_comment(masm, "-- Enter non-smi code"); __ bind(¬_smis); - switch (op_) { + switch (op) { case Token::BIT_OR: case Token::SHL: case Token::SAR: @@ -1689,9 +1693,11 @@ void BinaryOpStub::GenerateSmiStub(MacroAssembler* masm) { if (result_type_ == BinaryOpIC::UNINITIALIZED || result_type_ == BinaryOpIC::SMI) { - GenerateSmiCode(masm, &call_runtime, NO_HEAPNUMBER_RESULTS); + BinaryOpStub_GenerateSmiCode( + masm, &call_runtime, NO_HEAPNUMBER_RESULTS, op_); } else { - GenerateSmiCode(masm, &call_runtime, ALLOW_HEAPNUMBER_RESULTS); + BinaryOpStub_GenerateSmiCode( + masm, &call_runtime, ALLOW_HEAPNUMBER_RESULTS, op_); } __ bind(&call_runtime); switch (op_) { @@ -1716,19 +1722,9 @@ void BinaryOpStub::GenerateSmiStub(MacroAssembler* masm) { } -void BinaryOpStub::GenerateStringStub(MacroAssembler* masm) { - ASSERT(operands_type_ == BinaryOpIC::STRING); - ASSERT(op_ == Token::ADD); - // Try to add arguments as strings, otherwise, transition to the generic - // BinaryOpIC type. - GenerateAddStrings(masm); - GenerateTypeTransition(masm); -} - - void BinaryOpStub::GenerateBothStringStub(MacroAssembler* masm) { Label call_runtime; - ASSERT(operands_type_ == BinaryOpIC::BOTH_STRING); + ASSERT(left_type_ == BinaryOpIC::STRING && right_type_ == BinaryOpIC::STRING); ASSERT(op_ == Token::ADD); // If both arguments are strings, call the string add stub. // Otherwise, do a transition. @@ -1756,6 +1752,11 @@ void BinaryOpStub::GenerateBothStringStub(MacroAssembler* masm) { } +static void BinaryOpStub_GenerateHeapResultAllocation(MacroAssembler* masm, + Label* alloc_failure, + OverwriteMode mode); + + // Input: // edx: left operand (tagged) // eax: right operand (tagged) @@ -1763,7 +1764,7 @@ void BinaryOpStub::GenerateBothStringStub(MacroAssembler* masm) { // eax: result (tagged) void BinaryOpStub::GenerateInt32Stub(MacroAssembler* masm) { Label call_runtime; - ASSERT(operands_type_ == BinaryOpIC::INT32); + ASSERT(Max(left_type_, right_type_) == BinaryOpIC::INT32); // Floating point case. switch (op_) { @@ -1775,7 +1776,19 @@ void BinaryOpStub::GenerateInt32Stub(MacroAssembler* masm) { Label not_floats; Label not_int32; if (CpuFeatures::IsSupported(SSE2)) { - CpuFeatures::Scope use_sse2(SSE2); + CpuFeatureScope use_sse2(masm, SSE2); + // It could be that only SMIs have been seen at either the left + // or the right operand. For precise type feedback, patch the IC + // again if this changes. + // In theory, we would need the same check in the non-SSE2 case, + // but since we don't support Crankshaft on such hardware we can + // afford not to care about precise type feedback. + if (left_type_ == BinaryOpIC::SMI) { + __ JumpIfNotSmi(edx, ¬_int32); + } + if (right_type_ == BinaryOpIC::SMI) { + __ JumpIfNotSmi(eax, ¬_int32); + } FloatingPointHelper::LoadSSE2Operands(masm, ¬_floats); FloatingPointHelper::CheckSSE2OperandsAreInt32(masm, ¬_int32, ecx); if (op_ == Token::MOD) { @@ -1791,14 +1804,10 @@ void BinaryOpStub::GenerateInt32Stub(MacroAssembler* masm) { } // Check result type if it is currently Int32. if (result_type_ <= BinaryOpIC::INT32) { - __ cvttsd2si(ecx, Operand(xmm0)); - __ cvtsi2sd(xmm2, ecx); - __ pcmpeqd(xmm2, xmm0); - __ movmskpd(ecx, xmm2); - __ test(ecx, Immediate(1)); - __ j(zero, ¬_int32); + FloatingPointHelper::CheckSSE2OperandIsInt32( + masm, ¬_int32, xmm0, ecx, ecx, xmm2); } - GenerateHeapResultAllocation(masm, &call_runtime); + BinaryOpStub_GenerateHeapResultAllocation(masm, &call_runtime, mode_); __ movdbl(FieldOperand(eax, HeapNumber::kValueOffset), xmm0); __ ret(0); } @@ -1808,7 +1817,6 @@ void BinaryOpStub::GenerateInt32Stub(MacroAssembler* masm) { masm, ecx, FloatingPointHelper::ARGS_IN_REGISTERS); - FloatingPointHelper::CheckFloatOperandsAreInt32(masm, ¬_int32); if (op_ == Token::MOD) { // The operands are now on the FPU stack, but we don't need them. __ fstp(0); @@ -1824,7 +1832,8 @@ void BinaryOpStub::GenerateInt32Stub(MacroAssembler* masm) { default: UNREACHABLE(); } Label after_alloc_failure; - GenerateHeapResultAllocation(masm, &after_alloc_failure); + BinaryOpStub_GenerateHeapResultAllocation( + masm, &after_alloc_failure, mode_); __ fstp_d(FieldOperand(eax, HeapNumber::kValueOffset)); __ ret(0); __ bind(&after_alloc_failure); @@ -1849,11 +1858,9 @@ void BinaryOpStub::GenerateInt32Stub(MacroAssembler* masm) { Label not_floats; Label not_int32; Label non_smi_result; - FloatingPointHelper::LoadUnknownsAsIntegers(masm, - use_sse3_, - ¬_floats); - FloatingPointHelper::CheckLoadedIntegersWereInt32(masm, use_sse3_, - ¬_int32); + bool use_sse3 = platform_specific_bit_; + FloatingPointHelper::LoadUnknownsAsIntegers( + masm, use_sse3, left_type_, right_type_, ¬_floats); switch (op_) { case Token::BIT_OR: __ or_(eax, ecx); break; case Token::BIT_AND: __ and_(eax, ecx); break; @@ -1900,7 +1907,7 @@ void BinaryOpStub::GenerateInt32Stub(MacroAssembler* masm) { } // Store the result in the HeapNumber and return. if (CpuFeatures::IsSupported(SSE2)) { - CpuFeatures::Scope use_sse2(SSE2); + CpuFeatureScope use_sse2(masm, SSE2); __ cvtsi2sd(xmm0, ebx); __ movdbl(FieldOperand(eax, HeapNumber::kValueOffset), xmm0); } else { @@ -1925,44 +1932,24 @@ void BinaryOpStub::GenerateInt32Stub(MacroAssembler* masm) { switch (op_) { case Token::ADD: - GenerateRegisterArgsPush(masm); - __ InvokeBuiltin(Builtins::ADD, JUMP_FUNCTION); - break; case Token::SUB: - GenerateRegisterArgsPush(masm); - __ InvokeBuiltin(Builtins::SUB, JUMP_FUNCTION); - break; case Token::MUL: - GenerateRegisterArgsPush(masm); - __ InvokeBuiltin(Builtins::MUL, JUMP_FUNCTION); - break; case Token::DIV: GenerateRegisterArgsPush(masm); - __ InvokeBuiltin(Builtins::DIV, JUMP_FUNCTION); break; case Token::MOD: - break; + return; // Handled above. case Token::BIT_OR: - __ InvokeBuiltin(Builtins::BIT_OR, JUMP_FUNCTION); - break; case Token::BIT_AND: - __ InvokeBuiltin(Builtins::BIT_AND, JUMP_FUNCTION); - break; case Token::BIT_XOR: - __ InvokeBuiltin(Builtins::BIT_XOR, JUMP_FUNCTION); - break; case Token::SAR: - __ InvokeBuiltin(Builtins::SAR, JUMP_FUNCTION); - break; case Token::SHL: - __ InvokeBuiltin(Builtins::SHL, JUMP_FUNCTION); - break; case Token::SHR: - __ InvokeBuiltin(Builtins::SHR, JUMP_FUNCTION); break; default: UNREACHABLE(); } + GenerateCallRuntime(masm); } @@ -1995,11 +1982,11 @@ void BinaryOpStub::GenerateOddballStub(MacroAssembler* masm) { } __ bind(&done); - GenerateHeapNumberStub(masm); + GenerateNumberStub(masm); } -void BinaryOpStub::GenerateHeapNumberStub(MacroAssembler* masm) { +void BinaryOpStub::GenerateNumberStub(MacroAssembler* masm) { Label call_runtime; // Floating point case. @@ -2010,8 +1997,29 @@ void BinaryOpStub::GenerateHeapNumberStub(MacroAssembler* masm) { case Token::DIV: { Label not_floats; if (CpuFeatures::IsSupported(SSE2)) { - CpuFeatures::Scope use_sse2(SSE2); + CpuFeatureScope use_sse2(masm, SSE2); + + // It could be that only SMIs have been seen at either the left + // or the right operand. For precise type feedback, patch the IC + // again if this changes. + // In theory, we would need the same check in the non-SSE2 case, + // but since we don't support Crankshaft on such hardware we can + // afford not to care about precise type feedback. + if (left_type_ == BinaryOpIC::SMI) { + __ JumpIfNotSmi(edx, ¬_floats); + } + if (right_type_ == BinaryOpIC::SMI) { + __ JumpIfNotSmi(eax, ¬_floats); + } FloatingPointHelper::LoadSSE2Operands(masm, ¬_floats); + if (left_type_ == BinaryOpIC::INT32) { + FloatingPointHelper::CheckSSE2OperandIsInt32( + masm, ¬_floats, xmm0, ecx, ecx, xmm2); + } + if (right_type_ == BinaryOpIC::INT32) { + FloatingPointHelper::CheckSSE2OperandIsInt32( + masm, ¬_floats, xmm1, ecx, ecx, xmm2); + } switch (op_) { case Token::ADD: __ addsd(xmm0, xmm1); break; @@ -2020,7 +2028,7 @@ void BinaryOpStub::GenerateHeapNumberStub(MacroAssembler* masm) { case Token::DIV: __ divsd(xmm0, xmm1); break; default: UNREACHABLE(); } - GenerateHeapResultAllocation(masm, &call_runtime); + BinaryOpStub_GenerateHeapResultAllocation(masm, &call_runtime, mode_); __ movdbl(FieldOperand(eax, HeapNumber::kValueOffset), xmm0); __ ret(0); } else { // SSE2 not available, use FPU. @@ -2037,7 +2045,8 @@ void BinaryOpStub::GenerateHeapNumberStub(MacroAssembler* masm) { default: UNREACHABLE(); } Label after_alloc_failure; - GenerateHeapResultAllocation(masm, &after_alloc_failure); + BinaryOpStub_GenerateHeapResultAllocation( + masm, &after_alloc_failure, mode_); __ fstp_d(FieldOperand(eax, HeapNumber::kValueOffset)); __ ret(0); __ bind(&after_alloc_failure); @@ -2063,9 +2072,12 @@ void BinaryOpStub::GenerateHeapNumberStub(MacroAssembler* masm) { GenerateRegisterArgsPush(masm); Label not_floats; Label non_smi_result; - FloatingPointHelper::LoadUnknownsAsIntegers(masm, - use_sse3_, - ¬_floats); + // We do not check the input arguments here, as any value is + // unconditionally truncated to an int32 anyway. To get the + // right optimized code, int32 type feedback is just right. + bool use_sse3 = platform_specific_bit_; + FloatingPointHelper::LoadUnknownsAsIntegers( + masm, use_sse3, left_type_, right_type_, ¬_floats); switch (op_) { case Token::BIT_OR: __ or_(eax, ecx); break; case Token::BIT_AND: __ and_(eax, ecx); break; @@ -2112,7 +2124,7 @@ void BinaryOpStub::GenerateHeapNumberStub(MacroAssembler* masm) { } // Store the result in the HeapNumber and return. if (CpuFeatures::IsSupported(SSE2)) { - CpuFeatures::Scope use_sse2(SSE2); + CpuFeatureScope use_sse2(masm, SSE2); __ cvtsi2sd(xmm0, ebx); __ movdbl(FieldOperand(eax, HeapNumber::kValueOffset), xmm0); } else { @@ -2136,46 +2148,23 @@ void BinaryOpStub::GenerateHeapNumberStub(MacroAssembler* masm) { switch (op_) { case Token::ADD: - GenerateRegisterArgsPush(masm); - __ InvokeBuiltin(Builtins::ADD, JUMP_FUNCTION); - break; case Token::SUB: - GenerateRegisterArgsPush(masm); - __ InvokeBuiltin(Builtins::SUB, JUMP_FUNCTION); - break; case Token::MUL: - GenerateRegisterArgsPush(masm); - __ InvokeBuiltin(Builtins::MUL, JUMP_FUNCTION); - break; case Token::DIV: - GenerateRegisterArgsPush(masm); - __ InvokeBuiltin(Builtins::DIV, JUMP_FUNCTION); - break; case Token::MOD: GenerateRegisterArgsPush(masm); - __ InvokeBuiltin(Builtins::MOD, JUMP_FUNCTION); break; case Token::BIT_OR: - __ InvokeBuiltin(Builtins::BIT_OR, JUMP_FUNCTION); - break; case Token::BIT_AND: - __ InvokeBuiltin(Builtins::BIT_AND, JUMP_FUNCTION); - break; case Token::BIT_XOR: - __ InvokeBuiltin(Builtins::BIT_XOR, JUMP_FUNCTION); - break; case Token::SAR: - __ InvokeBuiltin(Builtins::SAR, JUMP_FUNCTION); - break; case Token::SHL: - __ InvokeBuiltin(Builtins::SHL, JUMP_FUNCTION); - break; case Token::SHR: - __ InvokeBuiltin(Builtins::SHR, JUMP_FUNCTION); break; default: UNREACHABLE(); } + GenerateCallRuntime(masm); } @@ -2204,7 +2193,8 @@ void BinaryOpStub::GenerateGeneric(MacroAssembler* masm) { UNREACHABLE(); } - GenerateSmiCode(masm, &call_runtime, ALLOW_HEAPNUMBER_RESULTS); + BinaryOpStub_GenerateSmiCode( + masm, &call_runtime, ALLOW_HEAPNUMBER_RESULTS, op_); // Floating point case. switch (op_) { @@ -2214,7 +2204,7 @@ void BinaryOpStub::GenerateGeneric(MacroAssembler* masm) { case Token::DIV: { Label not_floats; if (CpuFeatures::IsSupported(SSE2)) { - CpuFeatures::Scope use_sse2(SSE2); + CpuFeatureScope use_sse2(masm, SSE2); FloatingPointHelper::LoadSSE2Operands(masm, ¬_floats); switch (op_) { @@ -2224,7 +2214,7 @@ void BinaryOpStub::GenerateGeneric(MacroAssembler* masm) { case Token::DIV: __ divsd(xmm0, xmm1); break; default: UNREACHABLE(); } - GenerateHeapResultAllocation(masm, &call_runtime); + BinaryOpStub_GenerateHeapResultAllocation(masm, &call_runtime, mode_); __ movdbl(FieldOperand(eax, HeapNumber::kValueOffset), xmm0); __ ret(0); } else { // SSE2 not available, use FPU. @@ -2241,7 +2231,8 @@ void BinaryOpStub::GenerateGeneric(MacroAssembler* masm) { default: UNREACHABLE(); } Label after_alloc_failure; - GenerateHeapResultAllocation(masm, &after_alloc_failure); + BinaryOpStub_GenerateHeapResultAllocation( + masm, &after_alloc_failure, mode_); __ fstp_d(FieldOperand(eax, HeapNumber::kValueOffset)); __ ret(0); __ bind(&after_alloc_failure); @@ -2262,8 +2253,11 @@ void BinaryOpStub::GenerateGeneric(MacroAssembler* masm) { case Token::SHL: case Token::SHR: { Label non_smi_result; + bool use_sse3 = platform_specific_bit_; FloatingPointHelper::LoadUnknownsAsIntegers(masm, - use_sse3_, + use_sse3, + BinaryOpIC::GENERIC, + BinaryOpIC::GENERIC, &call_runtime); switch (op_) { case Token::BIT_OR: __ or_(eax, ecx); break; @@ -2311,7 +2305,7 @@ void BinaryOpStub::GenerateGeneric(MacroAssembler* masm) { } // Store the result in the HeapNumber and return. if (CpuFeatures::IsSupported(SSE2)) { - CpuFeatures::Scope use_sse2(SSE2); + CpuFeatureScope use_sse2(masm, SSE2); __ cvtsi2sd(xmm0, ebx); __ movdbl(FieldOperand(eax, HeapNumber::kValueOffset), xmm0); } else { @@ -2330,48 +2324,26 @@ void BinaryOpStub::GenerateGeneric(MacroAssembler* masm) { // result. __ bind(&call_runtime); switch (op_) { - case Token::ADD: { + case Token::ADD: GenerateAddStrings(masm); - GenerateRegisterArgsPush(masm); - __ InvokeBuiltin(Builtins::ADD, JUMP_FUNCTION); - break; - } + // Fall through. case Token::SUB: - GenerateRegisterArgsPush(masm); - __ InvokeBuiltin(Builtins::SUB, JUMP_FUNCTION); - break; case Token::MUL: - GenerateRegisterArgsPush(masm); - __ InvokeBuiltin(Builtins::MUL, JUMP_FUNCTION); - break; case Token::DIV: GenerateRegisterArgsPush(masm); - __ InvokeBuiltin(Builtins::DIV, JUMP_FUNCTION); break; case Token::MOD: - __ InvokeBuiltin(Builtins::MOD, JUMP_FUNCTION); - break; case Token::BIT_OR: - __ InvokeBuiltin(Builtins::BIT_OR, JUMP_FUNCTION); - break; case Token::BIT_AND: - __ InvokeBuiltin(Builtins::BIT_AND, JUMP_FUNCTION); - break; case Token::BIT_XOR: - __ InvokeBuiltin(Builtins::BIT_XOR, JUMP_FUNCTION); - break; case Token::SAR: - __ InvokeBuiltin(Builtins::SAR, JUMP_FUNCTION); - break; case Token::SHL: - __ InvokeBuiltin(Builtins::SHL, JUMP_FUNCTION); - break; case Token::SHR: - __ InvokeBuiltin(Builtins::SHR, JUMP_FUNCTION); break; default: UNREACHABLE(); } + GenerateCallRuntime(masm); } @@ -2407,11 +2379,10 @@ void BinaryOpStub::GenerateAddStrings(MacroAssembler* masm) { } -void BinaryOpStub::GenerateHeapResultAllocation( - MacroAssembler* masm, - Label* alloc_failure) { +static void BinaryOpStub_GenerateHeapResultAllocation(MacroAssembler* masm, + Label* alloc_failure, + OverwriteMode mode) { Label skip_allocation; - OverwriteMode mode = mode_; switch (mode) { case OVERWRITE_LEFT: { // If the argument in edx is already an object, we skip the @@ -2504,8 +2475,9 @@ void TranscendentalCacheStub::Generate(MacroAssembler* masm) { __ bind(&loaded); } else { // UNTAGGED. + CpuFeatureScope scope(masm, SSE2); if (CpuFeatures::IsSupported(SSE4_1)) { - CpuFeatures::Scope sse4_scope(SSE4_1); + CpuFeatureScope sse4_scope(masm, SSE4_1); __ pextrd(edx, xmm1, 0x1); // copy xmm1[63..32] to edx. } else { __ pshufd(xmm0, xmm1, 0x1); @@ -2576,6 +2548,7 @@ void TranscendentalCacheStub::Generate(MacroAssembler* masm) { __ fstp(0); __ ret(kPointerSize); } else { // UNTAGGED. + CpuFeatureScope scope(masm, SSE2); __ movdbl(xmm1, FieldOperand(eax, HeapNumber::kValueOffset)); __ Ret(); } @@ -2588,6 +2561,7 @@ void TranscendentalCacheStub::Generate(MacroAssembler* masm) { if (tagged) { __ AllocateHeapNumber(eax, edi, no_reg, &runtime_call_clear_stack); } else { // UNTAGGED. + CpuFeatureScope scope(masm, SSE2); __ AllocateHeapNumber(eax, edi, no_reg, &skip_cache); __ sub(esp, Immediate(kDoubleSize)); __ movdbl(Operand(esp, 0), xmm1); @@ -2602,6 +2576,7 @@ void TranscendentalCacheStub::Generate(MacroAssembler* masm) { if (tagged) { __ ret(kPointerSize); } else { // UNTAGGED. + CpuFeatureScope scope(masm, SSE2); __ movdbl(xmm1, FieldOperand(eax, HeapNumber::kValueOffset)); __ Ret(); @@ -2634,6 +2609,7 @@ void TranscendentalCacheStub::Generate(MacroAssembler* masm) { ExternalReference(RuntimeFunction(), masm->isolate()); __ TailCallExternalReference(runtime, 1, 1); } else { // UNTAGGED. + CpuFeatureScope scope(masm, SSE2); __ bind(&runtime_call_clear_stack); __ bind(&runtime_call); __ AllocateHeapNumber(eax, edi, no_reg, &skip_cache); @@ -2762,16 +2738,24 @@ void TranscendentalCacheStub::GenerateOperation( // Input: edx, eax are the left and right objects of a bit op. // Output: eax, ecx are left and right integers for a bit op. -void FloatingPointHelper::LoadUnknownsAsIntegers(MacroAssembler* masm, - bool use_sse3, - Label* conversion_failure) { +// Warning: can clobber inputs even when it jumps to |conversion_failure|! +void FloatingPointHelper::LoadUnknownsAsIntegers( + MacroAssembler* masm, + bool use_sse3, + BinaryOpIC::TypeInfo left_type, + BinaryOpIC::TypeInfo right_type, + Label* conversion_failure) { // Check float operands. Label arg1_is_object, check_undefined_arg1; Label arg2_is_object, check_undefined_arg2; Label load_arg2, done; // Test if arg1 is a Smi. - __ JumpIfNotSmi(edx, &arg1_is_object, Label::kNear); + if (left_type == BinaryOpIC::SMI) { + __ JumpIfNotSmi(edx, conversion_failure); + } else { + __ JumpIfNotSmi(edx, &arg1_is_object, Label::kNear); + } __ SmiUntag(edx); __ jmp(&load_arg2); @@ -2790,14 +2774,23 @@ void FloatingPointHelper::LoadUnknownsAsIntegers(MacroAssembler* masm, __ j(not_equal, &check_undefined_arg1); // Get the untagged integer version of the edx heap number in ecx. - IntegerConvert(masm, edx, use_sse3, conversion_failure); + if (left_type == BinaryOpIC::INT32 && CpuFeatures::IsSupported(SSE2)) { + CpuFeatureScope use_sse2(masm, SSE2); + ConvertHeapNumberToInt32(masm, edx, conversion_failure); + } else { + IntegerConvert(masm, edx, use_sse3, conversion_failure); + } __ mov(edx, ecx); // Here edx has the untagged integer, eax has a Smi or a heap number. __ bind(&load_arg2); // Test if arg2 is a Smi. - __ JumpIfNotSmi(eax, &arg2_is_object, Label::kNear); + if (right_type == BinaryOpIC::SMI) { + __ JumpIfNotSmi(eax, conversion_failure); + } else { + __ JumpIfNotSmi(eax, &arg2_is_object, Label::kNear); + } __ SmiUntag(eax); __ mov(ecx, eax); @@ -2814,18 +2807,17 @@ void FloatingPointHelper::LoadUnknownsAsIntegers(MacroAssembler* masm, __ mov(ebx, FieldOperand(eax, HeapObject::kMapOffset)); __ cmp(ebx, factory->heap_number_map()); __ j(not_equal, &check_undefined_arg2); - // Get the untagged integer version of the eax heap number in ecx. - IntegerConvert(masm, eax, use_sse3, conversion_failure); - __ bind(&done); - __ mov(eax, edx); -} + if (right_type == BinaryOpIC::INT32 && CpuFeatures::IsSupported(SSE2)) { + CpuFeatureScope use_sse2(masm, SSE2); + ConvertHeapNumberToInt32(masm, eax, conversion_failure); + } else { + IntegerConvert(masm, eax, use_sse3, conversion_failure); + } -void FloatingPointHelper::CheckLoadedIntegersWereInt32(MacroAssembler* masm, - bool use_sse3, - Label* not_int32) { - return; + __ bind(&done); + __ mov(eax, edx); } @@ -2923,16 +2915,25 @@ void FloatingPointHelper::LoadSSE2Smis(MacroAssembler* masm, void FloatingPointHelper::CheckSSE2OperandsAreInt32(MacroAssembler* masm, Label* non_int32, Register scratch) { - __ cvttsd2si(scratch, Operand(xmm0)); - __ cvtsi2sd(xmm2, scratch); - __ ucomisd(xmm0, xmm2); - __ j(not_zero, non_int32); - __ j(carry, non_int32); - __ cvttsd2si(scratch, Operand(xmm1)); - __ cvtsi2sd(xmm2, scratch); - __ ucomisd(xmm1, xmm2); + CheckSSE2OperandIsInt32(masm, non_int32, xmm0, scratch, scratch, xmm2); + CheckSSE2OperandIsInt32(masm, non_int32, xmm1, scratch, scratch, xmm2); +} + + +void FloatingPointHelper::CheckSSE2OperandIsInt32(MacroAssembler* masm, + Label* non_int32, + XMMRegister operand, + Register int32_result, + Register scratch, + XMMRegister xmm_scratch) { + __ cvttsd2si(int32_result, Operand(operand)); + __ cvtsi2sd(xmm_scratch, int32_result); + __ pcmpeqd(xmm_scratch, operand); + __ movmskps(scratch, xmm_scratch); + // Two least significant bits should be both set. + __ not_(scratch); + __ test(scratch, Immediate(3)); __ j(not_zero, non_int32); - __ j(carry, non_int32); } @@ -3016,14 +3017,8 @@ void FloatingPointHelper::CheckFloatOperands(MacroAssembler* masm, } -void FloatingPointHelper::CheckFloatOperandsAreInt32(MacroAssembler* masm, - Label* non_int32) { - return; -} - - void MathPowStub::Generate(MacroAssembler* masm) { - CpuFeatures::Scope use_sse2(SSE2); + CpuFeatureScope use_sse2(masm, SSE2); Factory* factory = masm->isolate()->factory(); const Register exponent = eax; const Register base = edx; @@ -3187,10 +3182,10 @@ void MathPowStub::Generate(MacroAssembler* masm) { // F2XM1 calculates 2^st(0) - 1 for -1 < st(0) < 1 __ f2xm1(); // 2^(X-rnd(X)) - 1, rnd(X) __ fld1(); // 1, 2^(X-rnd(X)) - 1, rnd(X) - __ faddp(1); // 1, 2^(X-rnd(X)), rnd(X) + __ faddp(1); // 2^(X-rnd(X)), rnd(X) // FSCALE calculates st(0) * 2^st(1) __ fscale(); // 2^X, rnd(X) - __ fstp(1); + __ fstp(1); // 2^X // Bail out to runtime in case of exceptions in the status word. __ fnstsw_ax(); __ test_b(eax, 0x5F); // We check for all but precision exception. @@ -3290,6 +3285,134 @@ void MathPowStub::Generate(MacroAssembler* masm) { } +void ArrayLengthStub::Generate(MacroAssembler* masm) { + // ----------- S t a t e ------------- + // -- ecx : name + // -- edx : receiver + // -- esp[0] : return address + // ----------------------------------- + Label miss; + + if (kind() == Code::KEYED_LOAD_IC) { + __ cmp(ecx, Immediate(masm->isolate()->factory()->length_string())); + __ j(not_equal, &miss); + } + + StubCompiler::GenerateLoadArrayLength(masm, edx, eax, &miss); + __ bind(&miss); + StubCompiler::TailCallBuiltin(masm, StubCompiler::MissBuiltin(kind())); +} + + +void FunctionPrototypeStub::Generate(MacroAssembler* masm) { + // ----------- S t a t e ------------- + // -- ecx : name + // -- edx : receiver + // -- esp[0] : return address + // ----------------------------------- + Label miss; + + if (kind() == Code::KEYED_LOAD_IC) { + __ cmp(ecx, Immediate(masm->isolate()->factory()->prototype_string())); + __ j(not_equal, &miss); + } + + StubCompiler::GenerateLoadFunctionPrototype(masm, edx, eax, ebx, &miss); + __ bind(&miss); + StubCompiler::TailCallBuiltin(masm, StubCompiler::MissBuiltin(kind())); +} + + +void StringLengthStub::Generate(MacroAssembler* masm) { + // ----------- S t a t e ------------- + // -- ecx : name + // -- edx : receiver + // -- esp[0] : return address + // ----------------------------------- + Label miss; + + if (kind() == Code::KEYED_LOAD_IC) { + __ cmp(ecx, Immediate(masm->isolate()->factory()->length_string())); + __ j(not_equal, &miss); + } + + StubCompiler::GenerateLoadStringLength(masm, edx, eax, ebx, &miss, + support_wrapper_); + __ bind(&miss); + StubCompiler::TailCallBuiltin(masm, StubCompiler::MissBuiltin(kind())); +} + + +void StoreArrayLengthStub::Generate(MacroAssembler* masm) { + // ----------- S t a t e ------------- + // -- eax : value + // -- ecx : name + // -- edx : receiver + // -- esp[0] : return address + // ----------------------------------- + // + // This accepts as a receiver anything JSArray::SetElementsLength accepts + // (currently anything except for external arrays which means anything with + // elements of FixedArray type). Value must be a number, but only smis are + // accepted as the most common case. + + Label miss; + + Register receiver = edx; + Register value = eax; + Register scratch = ebx; + + if (kind() == Code::KEYED_STORE_IC) { + __ cmp(ecx, Immediate(masm->isolate()->factory()->length_string())); + __ j(not_equal, &miss); + } + + // Check that the receiver isn't a smi. + __ JumpIfSmi(receiver, &miss); + + // Check that the object is a JS array. + __ CmpObjectType(receiver, JS_ARRAY_TYPE, scratch); + __ j(not_equal, &miss); + + // Check that elements are FixedArray. + // We rely on StoreIC_ArrayLength below to deal with all types of + // fast elements (including COW). + __ mov(scratch, FieldOperand(receiver, JSArray::kElementsOffset)); + __ CmpObjectType(scratch, FIXED_ARRAY_TYPE, scratch); + __ j(not_equal, &miss); + + // Check that the array has fast properties, otherwise the length + // property might have been redefined. + __ mov(scratch, FieldOperand(receiver, JSArray::kPropertiesOffset)); + __ CompareRoot(FieldOperand(scratch, FixedArray::kMapOffset), + Heap::kHashTableMapRootIndex); + __ j(equal, &miss); + + // Check that value is a smi. + __ JumpIfNotSmi(value, &miss); + + // Prepare tail call to StoreIC_ArrayLength. + __ pop(scratch); + __ push(receiver); + __ push(value); + __ push(scratch); // return address + + ExternalReference ref = + ExternalReference(IC_Utility(IC::kStoreIC_ArrayLength), masm->isolate()); + __ TailCallExternalReference(ref, 2, 1); + + __ bind(&miss); + + StubCompiler::TailCallBuiltin(masm, StubCompiler::MissBuiltin(kind())); +} + + +void LoadFieldStub::Generate(MacroAssembler* masm) { + StubCompiler::DoGenerateFastPropertyLoad(masm, eax, reg_, inobject_, index_); + __ ret(0); +} + + void ArgumentsAccessStub::GenerateReadElement(MacroAssembler* masm) { // The key is in edx and the parameter count is in eax. @@ -3714,7 +3837,8 @@ void RegExpExecStub::Generate(MacroAssembler* masm) { static const int kSubjectOffset = 3 * kPointerSize; static const int kJSRegExpOffset = 4 * kPointerSize; - Label runtime, invoke_regexp; + Label runtime; + Factory* factory = masm->isolate()->factory(); // Ensure that a RegExp stack is allocated. ExternalReference address_of_regexp_stack_memory_address = @@ -3732,6 +3856,7 @@ void RegExpExecStub::Generate(MacroAssembler* masm) { __ JumpIfSmi(eax, &runtime); __ CmpObjectType(eax, JS_REGEXP_TYPE, ecx); __ j(not_equal, &runtime); + // Check that the RegExp has been compiled (data contains a fixed array). __ mov(ecx, FieldOperand(eax, JSRegExp::kDataOffset)); if (FLAG_debug_code) { @@ -3750,156 +3875,124 @@ void RegExpExecStub::Generate(MacroAssembler* masm) { // ecx: RegExp data (FixedArray) // Check that the number of captures fit in the static offsets vector buffer. __ mov(edx, FieldOperand(ecx, JSRegExp::kIrregexpCaptureCountOffset)); - // Calculate number of capture registers (number_of_captures + 1) * 2. This - // uses the asumption that smis are 2 * their untagged value. + // Check (number_of_captures + 1) * 2 <= offsets vector size + // Or number_of_captures * 2 <= offsets vector size - 2 + // Multiplying by 2 comes for free since edx is smi-tagged. STATIC_ASSERT(kSmiTag == 0); STATIC_ASSERT(kSmiTagSize + kSmiShiftSize == 1); - __ add(edx, Immediate(2)); // edx was a smi. - // Check that the static offsets vector buffer is large enough. - __ cmp(edx, Isolate::kJSRegexpStaticOffsetsVectorSize); + STATIC_ASSERT(Isolate::kJSRegexpStaticOffsetsVectorSize >= 2); + __ cmp(edx, Isolate::kJSRegexpStaticOffsetsVectorSize - 2); __ j(above, &runtime); - // ecx: RegExp data (FixedArray) - // edx: Number of capture registers - // Check that the second argument is a string. - __ mov(eax, Operand(esp, kSubjectOffset)); - __ JumpIfSmi(eax, &runtime); - Condition is_string = masm->IsObjectStringType(eax, ebx, ebx); - __ j(NegateCondition(is_string), &runtime); - // Get the length of the string to ebx. - __ mov(ebx, FieldOperand(eax, String::kLengthOffset)); - - // ebx: Length of subject string as a smi - // ecx: RegExp data (FixedArray) - // edx: Number of capture registers - // Check that the third argument is a positive smi less than the subject - // string length. A negative value will be greater (unsigned comparison). - __ mov(eax, Operand(esp, kPreviousIndexOffset)); - __ JumpIfNotSmi(eax, &runtime); - __ cmp(eax, ebx); - __ j(above_equal, &runtime); - - // ecx: RegExp data (FixedArray) - // edx: Number of capture registers - // Check that the fourth object is a JSArray object. - __ mov(eax, Operand(esp, kLastMatchInfoOffset)); - __ JumpIfSmi(eax, &runtime); - __ CmpObjectType(eax, JS_ARRAY_TYPE, ebx); - __ j(not_equal, &runtime); - // Check that the JSArray is in fast case. - __ mov(ebx, FieldOperand(eax, JSArray::kElementsOffset)); - __ mov(eax, FieldOperand(ebx, HeapObject::kMapOffset)); - Factory* factory = masm->isolate()->factory(); - __ cmp(eax, factory->fixed_array_map()); - __ j(not_equal, &runtime); - // Check that the last match info has space for the capture registers and the - // additional information. - __ mov(eax, FieldOperand(ebx, FixedArray::kLengthOffset)); - __ SmiUntag(eax); - __ add(edx, Immediate(RegExpImpl::kLastMatchOverhead)); - __ cmp(edx, eax); - __ j(greater, &runtime); - // Reset offset for possibly sliced string. __ Set(edi, Immediate(0)); - // ecx: RegExp data (FixedArray) - // Check the representation and encoding of the subject string. - Label seq_ascii_string, seq_two_byte_string, check_code; __ mov(eax, Operand(esp, kSubjectOffset)); + __ JumpIfSmi(eax, &runtime); + __ mov(edx, eax); // Make a copy of the original subject string. __ mov(ebx, FieldOperand(eax, HeapObject::kMapOffset)); __ movzx_b(ebx, FieldOperand(ebx, Map::kInstanceTypeOffset)); - // First check for flat two byte string. + + // eax: subject string + // edx: subject string + // ebx: subject string instance type + // ecx: RegExp data (FixedArray) + // Handle subject string according to its encoding and representation: + // (1) Sequential two byte? If yes, go to (9). + // (2) Sequential one byte? If yes, go to (6). + // (3) Anything but sequential or cons? If yes, go to (7). + // (4) Cons string. If the string is flat, replace subject with first string. + // Otherwise bailout. + // (5a) Is subject sequential two byte? If yes, go to (9). + // (5b) Is subject external? If yes, go to (8). + // (6) One byte sequential. Load regexp code for one byte. + // (E) Carry on. + /// [...] + + // Deferred code at the end of the stub: + // (7) Not a long external string? If yes, go to (10). + // (8) External string. Make it, offset-wise, look like a sequential string. + // (8a) Is the external string one byte? If yes, go to (6). + // (9) Two byte sequential. Load regexp code for one byte. Go to (E). + // (10) Short external string or not a string? If yes, bail out to runtime. + // (11) Sliced string. Replace subject with parent. Go to (5a). + + Label seq_one_byte_string /* 6 */, seq_two_byte_string /* 9 */, + external_string /* 8 */, check_underlying /* 5a */, + not_seq_nor_cons /* 7 */, check_code /* E */, + not_long_external /* 10 */; + + // (1) Sequential two byte? If yes, go to (9). __ and_(ebx, kIsNotStringMask | kStringRepresentationMask | kStringEncodingMask | kShortExternalStringMask); STATIC_ASSERT((kStringTag | kSeqStringTag | kTwoByteStringTag) == 0); - __ j(zero, &seq_two_byte_string, Label::kNear); - // Any other flat string must be a flat ASCII string. None of the following - // string type tests will succeed if subject is not a string or a short - // external string. + __ j(zero, &seq_two_byte_string); // Go to (9). + + // (2) Sequential one byte? If yes, go to (6). + // Any other sequential string must be one byte. __ and_(ebx, Immediate(kIsNotStringMask | kStringRepresentationMask | kShortExternalStringMask)); - __ j(zero, &seq_ascii_string, Label::kNear); - - // ebx: whether subject is a string and if yes, its string representation - // Check for flat cons string or sliced string. - // A flat cons string is a cons string where the second part is the empty - // string. In that case the subject string is just the first part of the cons - // string. Also in this case the first part of the cons string is known to be - // a sequential string or an external string. - // In the case of a sliced string its offset has to be taken into account. - Label cons_string, external_string, check_encoding; + __ j(zero, &seq_one_byte_string, Label::kNear); // Go to (6). + + // (3) Anything but sequential or cons? If yes, go to (7). + // We check whether the subject string is a cons, since sequential strings + // have already been covered. STATIC_ASSERT(kConsStringTag < kExternalStringTag); STATIC_ASSERT(kSlicedStringTag > kExternalStringTag); STATIC_ASSERT(kIsNotStringMask > kExternalStringTag); STATIC_ASSERT(kShortExternalStringTag > kExternalStringTag); __ cmp(ebx, Immediate(kExternalStringTag)); - __ j(less, &cons_string); - __ j(equal, &external_string); + __ j(greater_equal, ¬_seq_nor_cons); // Go to (7). - // Catch non-string subject or short external string. - STATIC_ASSERT(kNotStringTag != 0 && kShortExternalStringTag !=0); - __ test(ebx, Immediate(kIsNotStringMask | kShortExternalStringTag)); - __ j(not_zero, &runtime); - - // String is sliced. - __ mov(edi, FieldOperand(eax, SlicedString::kOffsetOffset)); - __ mov(eax, FieldOperand(eax, SlicedString::kParentOffset)); - // edi: offset of sliced string, smi-tagged. - // eax: parent string. - __ jmp(&check_encoding, Label::kNear); - // String is a cons string, check whether it is flat. - __ bind(&cons_string); + // (4) Cons string. Check that it's flat. + // Replace subject with first string and reload instance type. __ cmp(FieldOperand(eax, ConsString::kSecondOffset), factory->empty_string()); __ j(not_equal, &runtime); __ mov(eax, FieldOperand(eax, ConsString::kFirstOffset)); - __ bind(&check_encoding); + __ bind(&check_underlying); __ mov(ebx, FieldOperand(eax, HeapObject::kMapOffset)); - // eax: first part of cons string or parent of sliced string. - // ebx: map of first part of cons string or map of parent of sliced string. - // Is first part of cons or parent of slice a flat two byte string? - __ test_b(FieldOperand(ebx, Map::kInstanceTypeOffset), - kStringRepresentationMask | kStringEncodingMask); + __ mov(ebx, FieldOperand(ebx, Map::kInstanceTypeOffset)); + + // (5a) Is subject sequential two byte? If yes, go to (9). + __ test_b(ebx, kStringRepresentationMask | kStringEncodingMask); STATIC_ASSERT((kSeqStringTag | kTwoByteStringTag) == 0); - __ j(zero, &seq_two_byte_string, Label::kNear); - // Any other flat string must be sequential ASCII or external. - __ test_b(FieldOperand(ebx, Map::kInstanceTypeOffset), - kStringRepresentationMask); - __ j(not_zero, &external_string); - - __ bind(&seq_ascii_string); - // eax: subject string (flat ASCII) + __ j(zero, &seq_two_byte_string); // Go to (9). + // (5b) Is subject external? If yes, go to (8). + __ test_b(ebx, kStringRepresentationMask); + // The underlying external string is never a short external string. + STATIC_CHECK(ExternalString::kMaxShortLength < ConsString::kMinLength); + STATIC_CHECK(ExternalString::kMaxShortLength < SlicedString::kMinLength); + __ j(not_zero, &external_string); // Go to (8). + + // eax: sequential subject string (or look-alike, external string) + // edx: original subject string // ecx: RegExp data (FixedArray) + // (6) One byte sequential. Load regexp code for one byte. + __ bind(&seq_one_byte_string); + // Load previous index and check range before edx is overwritten. We have + // to use edx instead of eax here because it might have been only made to + // look like a sequential string when it actually is an external string. + __ mov(ebx, Operand(esp, kPreviousIndexOffset)); + __ JumpIfNotSmi(ebx, &runtime); + __ cmp(ebx, FieldOperand(edx, String::kLengthOffset)); + __ j(above_equal, &runtime); __ mov(edx, FieldOperand(ecx, JSRegExp::kDataAsciiCodeOffset)); - __ Set(ecx, Immediate(1)); // Type is ASCII. - __ jmp(&check_code, Label::kNear); - - __ bind(&seq_two_byte_string); - // eax: subject string (flat two byte) - // ecx: RegExp data (FixedArray) - __ mov(edx, FieldOperand(ecx, JSRegExp::kDataUC16CodeOffset)); - __ Set(ecx, Immediate(0)); // Type is two byte. + __ Set(ecx, Immediate(1)); // Type is one byte. + // (E) Carry on. String handling is done. __ bind(&check_code); + // edx: irregexp code // Check that the irregexp code has been generated for the actual string // encoding. If it has, the field contains a code object otherwise it contains // a smi (code flushing support). __ JumpIfSmi(edx, &runtime); // eax: subject string + // ebx: previous index (smi) // edx: code // ecx: encoding of subject string (1 if ASCII, 0 if two_byte); - // Load used arguments before starting to push arguments for call to native - // RegExp code to avoid handling changing stack height. - __ mov(ebx, Operand(esp, kPreviousIndexOffset)); - __ SmiUntag(ebx); // Previous index from smi. - - // eax: subject string - // ebx: previous index - // edx: code - // ecx: encoding of subject string (1 if ASCII 0 if two_byte); // All checks done. Now push arguments for native regexp code. Counters* counters = masm->isolate()->counters(); __ IncrementCounter(counters->regexp_entry_native(), 1); @@ -3930,6 +4023,7 @@ void RegExpExecStub::Generate(MacroAssembler* masm) { masm->isolate()))); // Argument 2: Previous index. + __ SmiUntag(ebx); __ mov(Operand(esp, 1 * kPointerSize), ebx); // Argument 1: Original subject string. @@ -3960,9 +4054,9 @@ void RegExpExecStub::Generate(MacroAssembler* masm) { __ test(ecx, ecx); __ j(zero, &setup_two_byte, Label::kNear); __ SmiUntag(esi); - __ lea(ecx, FieldOperand(eax, esi, times_1, SeqAsciiString::kHeaderSize)); + __ lea(ecx, FieldOperand(eax, esi, times_1, SeqOneByteString::kHeaderSize)); __ mov(Operand(esp, 3 * kPointerSize), ecx); // Argument 4. - __ lea(ecx, FieldOperand(eax, ebx, times_1, SeqAsciiString::kHeaderSize)); + __ lea(ecx, FieldOperand(eax, ebx, times_1, SeqOneByteString::kHeaderSize)); __ mov(Operand(esp, 2 * kPointerSize), ecx); // Argument 3. __ jmp(&setup_rest, Label::kNear); @@ -4039,8 +4133,23 @@ void RegExpExecStub::Generate(MacroAssembler* masm) { // edx: Number of capture registers // Load last_match_info which is still known to be a fast case JSArray. + // Check that the fourth object is a JSArray object. __ mov(eax, Operand(esp, kLastMatchInfoOffset)); + __ JumpIfSmi(eax, &runtime); + __ CmpObjectType(eax, JS_ARRAY_TYPE, ebx); + __ j(not_equal, &runtime); + // Check that the JSArray is in fast case. __ mov(ebx, FieldOperand(eax, JSArray::kElementsOffset)); + __ mov(eax, FieldOperand(ebx, HeapObject::kMapOffset)); + __ cmp(eax, factory->fixed_array_map()); + __ j(not_equal, &runtime); + // Check that the last match info has space for the capture registers and the + // additional information. + __ mov(eax, FieldOperand(ebx, FixedArray::kLengthOffset)); + __ SmiUntag(eax); + __ sub(eax, Immediate(RegExpImpl::kLastMatchOverhead)); + __ cmp(edx, eax); + __ j(greater, &runtime); // ebx: last_match_info backing store (FixedArray) // edx: number of capture registers @@ -4050,13 +4159,14 @@ void RegExpExecStub::Generate(MacroAssembler* masm) { __ SmiUntag(edx); // Number of capture registers back from smi. // Store last subject and last input. __ mov(eax, Operand(esp, kSubjectOffset)); + __ mov(ecx, eax); __ mov(FieldOperand(ebx, RegExpImpl::kLastSubjectOffset), eax); __ RecordWriteField(ebx, RegExpImpl::kLastSubjectOffset, eax, edi, kDontSaveFPRegs); - __ mov(eax, Operand(esp, kSubjectOffset)); + __ mov(eax, ecx); __ mov(FieldOperand(ebx, RegExpImpl::kLastInputOffset), eax); __ RecordWriteField(ebx, RegExpImpl::kLastInputOffset, @@ -4094,10 +4204,19 @@ void RegExpExecStub::Generate(MacroAssembler* masm) { __ mov(eax, Operand(esp, kLastMatchInfoOffset)); __ ret(4 * kPointerSize); - // External string. Short external strings have already been ruled out. - // eax: subject string (expected to be external) - // ebx: scratch + // Do the runtime call to execute the regexp. + __ bind(&runtime); + __ TailCallRuntime(Runtime::kRegExpExec, 4, 1); + + // Deferred code for string handling. + // (7) Not a long external string? If yes, go to (10). + __ bind(¬_seq_nor_cons); + // Compare flags are still set from (3). + __ j(greater, ¬_long_external, Label::kNear); // Go to (10). + + // (8) External string. Short external strings have been ruled out. __ bind(&external_string); + // Reload instance type. __ mov(ebx, FieldOperand(eax, HeapObject::kMapOffset)); __ movzx_b(ebx, FieldOperand(ebx, Map::kInstanceTypeOffset)); if (FLAG_debug_code) { @@ -4108,16 +4227,41 @@ void RegExpExecStub::Generate(MacroAssembler* masm) { } __ mov(eax, FieldOperand(eax, ExternalString::kResourceDataOffset)); // Move the pointer so that offset-wise, it looks like a sequential string. - STATIC_ASSERT(SeqTwoByteString::kHeaderSize == SeqAsciiString::kHeaderSize); + STATIC_ASSERT(SeqTwoByteString::kHeaderSize == SeqOneByteString::kHeaderSize); __ sub(eax, Immediate(SeqTwoByteString::kHeaderSize - kHeapObjectTag)); STATIC_ASSERT(kTwoByteStringTag == 0); + // (8a) Is the external string one byte? If yes, go to (6). __ test_b(ebx, kStringEncodingMask); - __ j(not_zero, &seq_ascii_string); - __ jmp(&seq_two_byte_string); + __ j(not_zero, &seq_one_byte_string); // Goto (6). - // Do the runtime call to execute the regexp. - __ bind(&runtime); - __ TailCallRuntime(Runtime::kRegExpExec, 4, 1); + // eax: sequential subject string (or look-alike, external string) + // edx: original subject string + // ecx: RegExp data (FixedArray) + // (9) Two byte sequential. Load regexp code for one byte. Go to (E). + __ bind(&seq_two_byte_string); + // Load previous index and check range before edx is overwritten. We have + // to use edx instead of eax here because it might have been only made to + // look like a sequential string when it actually is an external string. + __ mov(ebx, Operand(esp, kPreviousIndexOffset)); + __ JumpIfNotSmi(ebx, &runtime); + __ cmp(ebx, FieldOperand(edx, String::kLengthOffset)); + __ j(above_equal, &runtime); + __ mov(edx, FieldOperand(ecx, JSRegExp::kDataUC16CodeOffset)); + __ Set(ecx, Immediate(0)); // Type is two byte. + __ jmp(&check_code); // Go to (E). + + // (10) Not a string or a short external string? If yes, bail out to runtime. + __ bind(¬_long_external); + // Catch non-string subject or short external string. + STATIC_ASSERT(kNotStringTag != 0 && kShortExternalStringTag !=0); + __ test(ebx, Immediate(kIsNotStringMask | kShortExternalStringTag)); + __ j(not_zero, &runtime); + + // (11) Sliced string. Replace subject with parent. Go to (5a). + // Load offset into edi and replace subject string with parent. + __ mov(edi, FieldOperand(eax, SlicedString::kOffsetOffset)); + __ mov(eax, FieldOperand(eax, SlicedString::kParentOffset)); + __ jmp(&check_underlying); // Go to (5a). #endif // V8_INTERPRETED_REGEXP } @@ -4137,8 +4281,9 @@ void RegExpConstructResultStub::Generate(MacroAssembler* masm) { // JSArray: [Map][empty properties][Elements][Length-smi][index][input] // Elements: [Map][Length][..elements..] __ AllocateInNewSpace(JSRegExpResult::kSize + FixedArray::kHeaderSize, - times_half_pointer_size, - ebx, // In: Number of elements (times 2, being a smi) + times_pointer_size, + ebx, // In: Number of elements as a smi + REGISTER_VALUE_IS_SMI, eax, // Out: Start of allocation (tagged). ecx, // Out: End of allocation. edx, // Scratch register @@ -4261,7 +4406,7 @@ void NumberToStringStub::GenerateLookupNumberStringCache(MacroAssembler* masm, FixedArray::kHeaderSize)); __ JumpIfSmi(probe, not_found); if (CpuFeatures::IsSupported(SSE2)) { - CpuFeatures::Scope fscope(SSE2); + CpuFeatureScope fscope(masm, SSE2); __ movdbl(xmm0, FieldOperand(object, HeapNumber::kValueOffset)); __ movdbl(xmm1, FieldOperand(probe, HeapNumber::kValueOffset)); __ ucomisd(xmm0, xmm1); @@ -4321,30 +4466,59 @@ static int NegativeComparisonResult(Condition cc) { return (cc == greater || cc == greater_equal) ? LESS : GREATER; } -void CompareStub::Generate(MacroAssembler* masm) { - ASSERT(lhs_.is(no_reg) && rhs_.is(no_reg)); +static void CheckInputType(MacroAssembler* masm, + Register input, + CompareIC::State expected, + Label* fail) { + Label ok; + if (expected == CompareIC::SMI) { + __ JumpIfNotSmi(input, fail); + } else if (expected == CompareIC::NUMBER) { + __ JumpIfSmi(input, &ok); + __ cmp(FieldOperand(input, HeapObject::kMapOffset), + Immediate(masm->isolate()->factory()->heap_number_map())); + __ j(not_equal, fail); + } + // We could be strict about internalized/non-internalized here, but as long as + // hydrogen doesn't care, the stub doesn't have to care either. + __ bind(&ok); +} + + +static void BranchIfNotInternalizedString(MacroAssembler* masm, + Label* label, + Register object, + Register scratch) { + __ JumpIfSmi(object, label); + __ mov(scratch, FieldOperand(object, HeapObject::kMapOffset)); + __ movzx_b(scratch, FieldOperand(scratch, Map::kInstanceTypeOffset)); + __ and_(scratch, kIsInternalizedMask | kIsNotStringMask); + __ cmp(scratch, kInternalizedTag | kStringTag); + __ j(not_equal, label); +} + + +void ICCompareStub::GenerateGeneric(MacroAssembler* masm) { Label check_unequal_objects; + Condition cc = GetCondition(); - // Compare two smis if required. - if (include_smi_compare_) { - Label non_smi, smi_done; - __ mov(ecx, edx); - __ or_(ecx, eax); - __ JumpIfNotSmi(ecx, &non_smi, Label::kNear); - __ sub(edx, eax); // Return on the result of the subtraction. - __ j(no_overflow, &smi_done, Label::kNear); - __ not_(edx); // Correct sign in case of overflow. edx is never 0 here. - __ bind(&smi_done); - __ mov(eax, edx); - __ ret(0); - __ bind(&non_smi); - } else if (FLAG_debug_code) { - __ mov(ecx, edx); - __ or_(ecx, eax); - __ test(ecx, Immediate(kSmiTagMask)); - __ Assert(not_zero, "Unexpected smi operands."); - } + Label miss; + CheckInputType(masm, edx, left_, &miss); + CheckInputType(masm, eax, right_, &miss); + + // Compare two smis. + Label non_smi, smi_done; + __ mov(ecx, edx); + __ or_(ecx, eax); + __ JumpIfNotSmi(ecx, &non_smi, Label::kNear); + __ sub(edx, eax); // Return on the result of the subtraction. + __ j(no_overflow, &smi_done, Label::kNear); + __ not_(edx); // Correct sign in case of overflow. edx is never 0 here. + __ bind(&smi_done); + __ mov(eax, edx); + __ ret(0); + __ bind(&non_smi); // NOTICE! This code is only reached after a smi-fast-case check, so // it is certain that at least one operand isn't a smi. @@ -4356,67 +4530,61 @@ void CompareStub::Generate(MacroAssembler* masm) { __ cmp(eax, edx); __ j(not_equal, ¬_identical); - if (cc_ != equal) { + if (cc != equal) { // Check for undefined. undefined OP undefined is false even though // undefined == undefined. Label check_for_nan; __ cmp(edx, masm->isolate()->factory()->undefined_value()); __ j(not_equal, &check_for_nan, Label::kNear); - __ Set(eax, Immediate(Smi::FromInt(NegativeComparisonResult(cc_)))); + __ Set(eax, Immediate(Smi::FromInt(NegativeComparisonResult(cc)))); __ ret(0); __ bind(&check_for_nan); } // Test for NaN. Sadly, we can't just compare to factory->nan_value(), // so we do the second best thing - test it ourselves. - // Note: if cc_ != equal, never_nan_nan_ is not used. - if (never_nan_nan_ && (cc_ == equal)) { - __ Set(eax, Immediate(Smi::FromInt(EQUAL))); + Label heap_number; + __ cmp(FieldOperand(edx, HeapObject::kMapOffset), + Immediate(masm->isolate()->factory()->heap_number_map())); + __ j(equal, &heap_number, Label::kNear); + if (cc != equal) { + // Call runtime on identical JSObjects. Otherwise return equal. + __ CmpObjectType(eax, FIRST_SPEC_OBJECT_TYPE, ecx); + __ j(above_equal, ¬_identical); + } + __ Set(eax, Immediate(Smi::FromInt(EQUAL))); + __ ret(0); + + __ bind(&heap_number); + // It is a heap number, so return non-equal if it's NaN and equal if + // it's not NaN. + // The representation of NaN values has all exponent bits (52..62) set, + // and not all mantissa bits (0..51) clear. + // We only accept QNaNs, which have bit 51 set. + // Read top bits of double representation (second word of value). + + // Value is a QNaN if value & kQuietNaNMask == kQuietNaNMask, i.e., + // all bits in the mask are set. We only need to check the word + // that contains the exponent and high bit of the mantissa. + STATIC_ASSERT(((kQuietNaNHighBitsMask << 1) & 0x80000000u) != 0); + __ mov(edx, FieldOperand(edx, HeapNumber::kExponentOffset)); + __ Set(eax, Immediate(0)); + // Shift value and mask so kQuietNaNHighBitsMask applies to topmost + // bits. + __ add(edx, edx); + __ cmp(edx, kQuietNaNHighBitsMask << 1); + if (cc == equal) { + STATIC_ASSERT(EQUAL != 1); + __ setcc(above_equal, eax); __ ret(0); } else { - Label heap_number; - __ cmp(FieldOperand(edx, HeapObject::kMapOffset), - Immediate(masm->isolate()->factory()->heap_number_map())); - __ j(equal, &heap_number, Label::kNear); - if (cc_ != equal) { - // Call runtime on identical JSObjects. Otherwise return equal. - __ CmpObjectType(eax, FIRST_SPEC_OBJECT_TYPE, ecx); - __ j(above_equal, ¬_identical); - } + Label nan; + __ j(above_equal, &nan, Label::kNear); __ Set(eax, Immediate(Smi::FromInt(EQUAL))); __ ret(0); - - __ bind(&heap_number); - // It is a heap number, so return non-equal if it's NaN and equal if - // it's not NaN. - // The representation of NaN values has all exponent bits (52..62) set, - // and not all mantissa bits (0..51) clear. - // We only accept QNaNs, which have bit 51 set. - // Read top bits of double representation (second word of value). - - // Value is a QNaN if value & kQuietNaNMask == kQuietNaNMask, i.e., - // all bits in the mask are set. We only need to check the word - // that contains the exponent and high bit of the mantissa. - STATIC_ASSERT(((kQuietNaNHighBitsMask << 1) & 0x80000000u) != 0); - __ mov(edx, FieldOperand(edx, HeapNumber::kExponentOffset)); - __ Set(eax, Immediate(0)); - // Shift value and mask so kQuietNaNHighBitsMask applies to topmost - // bits. - __ add(edx, edx); - __ cmp(edx, kQuietNaNHighBitsMask << 1); - if (cc_ == equal) { - STATIC_ASSERT(EQUAL != 1); - __ setcc(above_equal, eax); - __ ret(0); - } else { - Label nan; - __ j(above_equal, &nan, Label::kNear); - __ Set(eax, Immediate(Smi::FromInt(EQUAL))); - __ ret(0); - __ bind(&nan); - __ Set(eax, Immediate(Smi::FromInt(NegativeComparisonResult(cc_)))); - __ ret(0); - } + __ bind(&nan); + __ Set(eax, Immediate(Smi::FromInt(NegativeComparisonResult(cc)))); + __ ret(0); } __ bind(¬_identical); @@ -4424,7 +4592,7 @@ void CompareStub::Generate(MacroAssembler* masm) { // Strict equality can quickly decide whether objects are equal. // Non-strict object equality is slower, so it is handled later in the stub. - if (cc_ == equal && strict_) { + if (cc == equal && strict()) { Label slow; // Fallthrough label. Label not_smis; // If we're doing a strict equality comparison, we don't have to do @@ -4495,75 +4663,73 @@ void CompareStub::Generate(MacroAssembler* masm) { } // Generate the number comparison code. - if (include_number_compare_) { - Label non_number_comparison; - Label unordered; - if (CpuFeatures::IsSupported(SSE2)) { - CpuFeatures::Scope use_sse2(SSE2); - CpuFeatures::Scope use_cmov(CMOV); - - FloatingPointHelper::LoadSSE2Operands(masm, &non_number_comparison); - __ ucomisd(xmm0, xmm1); + Label non_number_comparison; + Label unordered; + if (CpuFeatures::IsSupported(SSE2)) { + CpuFeatureScope use_sse2(masm, SSE2); + CpuFeatureScope use_cmov(masm, CMOV); - // Don't base result on EFLAGS when a NaN is involved. - __ j(parity_even, &unordered, Label::kNear); - // Return a result of -1, 0, or 1, based on EFLAGS. - __ mov(eax, 0); // equal - __ mov(ecx, Immediate(Smi::FromInt(1))); - __ cmov(above, eax, ecx); - __ mov(ecx, Immediate(Smi::FromInt(-1))); - __ cmov(below, eax, ecx); - __ ret(0); - } else { - FloatingPointHelper::CheckFloatOperands( - masm, &non_number_comparison, ebx); - FloatingPointHelper::LoadFloatOperand(masm, eax); - FloatingPointHelper::LoadFloatOperand(masm, edx); - __ FCmp(); + FloatingPointHelper::LoadSSE2Operands(masm, &non_number_comparison); + __ ucomisd(xmm0, xmm1); - // Don't base result on EFLAGS when a NaN is involved. - __ j(parity_even, &unordered, Label::kNear); + // Don't base result on EFLAGS when a NaN is involved. + __ j(parity_even, &unordered, Label::kNear); + // Return a result of -1, 0, or 1, based on EFLAGS. + __ mov(eax, 0); // equal + __ mov(ecx, Immediate(Smi::FromInt(1))); + __ cmov(above, eax, ecx); + __ mov(ecx, Immediate(Smi::FromInt(-1))); + __ cmov(below, eax, ecx); + __ ret(0); + } else { + FloatingPointHelper::CheckFloatOperands( + masm, &non_number_comparison, ebx); + FloatingPointHelper::LoadFloatOperand(masm, eax); + FloatingPointHelper::LoadFloatOperand(masm, edx); + __ FCmp(); - Label below_label, above_label; - // Return a result of -1, 0, or 1, based on EFLAGS. - __ j(below, &below_label, Label::kNear); - __ j(above, &above_label, Label::kNear); + // Don't base result on EFLAGS when a NaN is involved. + __ j(parity_even, &unordered, Label::kNear); - __ Set(eax, Immediate(0)); - __ ret(0); + Label below_label, above_label; + // Return a result of -1, 0, or 1, based on EFLAGS. + __ j(below, &below_label, Label::kNear); + __ j(above, &above_label, Label::kNear); - __ bind(&below_label); - __ mov(eax, Immediate(Smi::FromInt(-1))); - __ ret(0); + __ Set(eax, Immediate(0)); + __ ret(0); - __ bind(&above_label); - __ mov(eax, Immediate(Smi::FromInt(1))); - __ ret(0); - } + __ bind(&below_label); + __ mov(eax, Immediate(Smi::FromInt(-1))); + __ ret(0); - // If one of the numbers was NaN, then the result is always false. - // The cc is never not-equal. - __ bind(&unordered); - ASSERT(cc_ != not_equal); - if (cc_ == less || cc_ == less_equal) { - __ mov(eax, Immediate(Smi::FromInt(1))); - } else { - __ mov(eax, Immediate(Smi::FromInt(-1))); - } + __ bind(&above_label); + __ mov(eax, Immediate(Smi::FromInt(1))); __ ret(0); + } - // The number comparison code did not provide a valid result. - __ bind(&non_number_comparison); + // If one of the numbers was NaN, then the result is always false. + // The cc is never not-equal. + __ bind(&unordered); + ASSERT(cc != not_equal); + if (cc == less || cc == less_equal) { + __ mov(eax, Immediate(Smi::FromInt(1))); + } else { + __ mov(eax, Immediate(Smi::FromInt(-1))); } + __ ret(0); + + // The number comparison code did not provide a valid result. + __ bind(&non_number_comparison); - // Fast negative check for symbol-to-symbol equality. + // Fast negative check for internalized-to-internalized equality. Label check_for_strings; - if (cc_ == equal) { - BranchIfNonSymbol(masm, &check_for_strings, eax, ecx); - BranchIfNonSymbol(masm, &check_for_strings, edx, ecx); + if (cc == equal) { + BranchIfNotInternalizedString(masm, &check_for_strings, eax, ecx); + BranchIfNotInternalizedString(masm, &check_for_strings, edx, ecx); // We've already checked for object identity, so if both operands - // are symbols they aren't equal. Register eax already holds a + // are internalized they aren't equal. Register eax already holds a // non-zero value, which indicates not equal, so just return. __ ret(0); } @@ -4574,7 +4740,7 @@ void CompareStub::Generate(MacroAssembler* masm) { &check_unequal_objects); // Inline comparison of ASCII strings. - if (cc_ == equal) { + if (cc == equal) { StringCompareStub::GenerateFlatAsciiStringEquals(masm, edx, eax, @@ -4593,7 +4759,7 @@ void CompareStub::Generate(MacroAssembler* masm) { #endif __ bind(&check_unequal_objects); - if (cc_ == equal && !strict_) { + if (cc == equal && !strict()) { // Non-strict equality. Objects are unequal if // they are both JSObjects and not undetectable, // and their pointers are different. @@ -4637,11 +4803,11 @@ void CompareStub::Generate(MacroAssembler* masm) { // Figure out which native to call and setup the arguments. Builtins::JavaScript builtin; - if (cc_ == equal) { - builtin = strict_ ? Builtins::STRICT_EQUALS : Builtins::EQUALS; + if (cc == equal) { + builtin = strict() ? Builtins::STRICT_EQUALS : Builtins::EQUALS; } else { builtin = Builtins::COMPARE; - __ push(Immediate(Smi::FromInt(NegativeComparisonResult(cc_)))); + __ push(Immediate(Smi::FromInt(NegativeComparisonResult(cc)))); } // Restore return address on the stack. @@ -4650,19 +4816,9 @@ void CompareStub::Generate(MacroAssembler* masm) { // Call the native; it returns -1 (less), 0 (equal), or 1 (greater) // tagged as a small integer. __ InvokeBuiltin(builtin, JUMP_FUNCTION); -} - -void CompareStub::BranchIfNonSymbol(MacroAssembler* masm, - Label* label, - Register object, - Register scratch) { - __ JumpIfSmi(object, label); - __ mov(scratch, FieldOperand(object, HeapObject::kMapOffset)); - __ movzx_b(scratch, FieldOperand(scratch, Map::kInstanceTypeOffset)); - __ and_(scratch, kIsSymbolMask | kIsNotStringMask); - __ cmp(scratch, kSymbolTag | kStringTag); - __ j(not_equal, label); + __ bind(&miss); + GenerateMiss(masm); } @@ -4676,12 +4832,13 @@ void InterruptStub::Generate(MacroAssembler* masm) { } -static void GenerateRecordCallTarget(MacroAssembler* masm) { +static void GenerateRecordCallTargetNoArray(MacroAssembler* masm) { // Cache the called function in a global property cell. Cache states // are uninitialized, monomorphic (indicated by a JSFunction), and // megamorphic. // ebx : cache cell for call target // edi : the function to call + ASSERT(!FLAG_optimize_constructed_arrays); Isolate* isolate = masm->isolate(); Label initialize, done; @@ -4714,6 +4871,82 @@ static void GenerateRecordCallTarget(MacroAssembler* masm) { } +static void GenerateRecordCallTarget(MacroAssembler* masm) { + // Cache the called function in a global property cell. Cache states + // are uninitialized, monomorphic (indicated by a JSFunction), and + // megamorphic. + // ebx : cache cell for call target + // edi : the function to call + ASSERT(FLAG_optimize_constructed_arrays); + Isolate* isolate = masm->isolate(); + Label initialize, done, miss, megamorphic, not_array_function; + + // Load the cache state into ecx. + __ mov(ecx, FieldOperand(ebx, JSGlobalPropertyCell::kValueOffset)); + + // A monomorphic cache hit or an already megamorphic state: invoke the + // function without changing the state. + __ cmp(ecx, edi); + __ j(equal, &done); + __ cmp(ecx, Immediate(TypeFeedbackCells::MegamorphicSentinel(isolate))); + __ j(equal, &done); + + // Special handling of the Array() function, which caches not only the + // monomorphic Array function but the initial ElementsKind with special + // sentinels + Handle<Object> terminal_kind_sentinel = + TypeFeedbackCells::MonomorphicArraySentinel(isolate, + LAST_FAST_ELEMENTS_KIND); + __ cmp(ecx, Immediate(terminal_kind_sentinel)); + __ j(above, &miss); + // Load the global or builtins object from the current context + __ LoadGlobalContext(ecx); + // Make sure the function is the Array() function + __ cmp(edi, Operand(ecx, + Context::SlotOffset(Context::ARRAY_FUNCTION_INDEX))); + __ j(not_equal, &megamorphic); + __ jmp(&done); + + __ bind(&miss); + + // A monomorphic miss (i.e, here the cache is not uninitialized) goes + // megamorphic. + __ cmp(ecx, Immediate(TypeFeedbackCells::UninitializedSentinel(isolate))); + __ j(equal, &initialize); + // MegamorphicSentinel is an immortal immovable object (undefined) so no + // write-barrier is needed. + __ bind(&megamorphic); + __ mov(FieldOperand(ebx, JSGlobalPropertyCell::kValueOffset), + Immediate(TypeFeedbackCells::MegamorphicSentinel(isolate))); + __ jmp(&done, Label::kNear); + + // An uninitialized cache is patched with the function or sentinel to + // indicate the ElementsKind if function is the Array constructor. + __ bind(&initialize); + __ LoadGlobalContext(ecx); + // Make sure the function is the Array() function + __ cmp(edi, Operand(ecx, + Context::SlotOffset(Context::ARRAY_FUNCTION_INDEX))); + __ j(not_equal, ¬_array_function); + + // The target function is the Array constructor, install a sentinel value in + // the constructor's type info cell that will track the initial ElementsKind + // that should be used for the array when its constructed. + Handle<Object> initial_kind_sentinel = + TypeFeedbackCells::MonomorphicArraySentinel(isolate, + GetInitialFastElementsKind()); + __ mov(FieldOperand(ebx, JSGlobalPropertyCell::kValueOffset), + Immediate(initial_kind_sentinel)); + __ jmp(&done); + + __ bind(¬_array_function); + __ mov(FieldOperand(ebx, JSGlobalPropertyCell::kValueOffset), edi); + // No need for a write barrier here - cells are rescanned. + + __ bind(&done); +} + + void CallFunctionStub::Generate(MacroAssembler* masm) { // ebx : cache cell for call target // edi : the function to call @@ -4745,7 +4978,11 @@ void CallFunctionStub::Generate(MacroAssembler* masm) { __ j(not_equal, &slow); if (RecordCallTarget()) { - GenerateRecordCallTarget(masm); + if (FLAG_optimize_constructed_arrays) { + GenerateRecordCallTarget(masm); + } else { + GenerateRecordCallTargetNoArray(masm); + } } // Fast-case: Just invoke the function. @@ -4818,14 +5055,20 @@ void CallConstructStub::Generate(MacroAssembler* masm) { __ j(not_equal, &slow); if (RecordCallTarget()) { - GenerateRecordCallTarget(masm); + if (FLAG_optimize_constructed_arrays) { + GenerateRecordCallTarget(masm); + } else { + GenerateRecordCallTargetNoArray(masm); + } } // Jump to the function-specific construct stub. - __ mov(ebx, FieldOperand(edi, JSFunction::kSharedFunctionInfoOffset)); - __ mov(ebx, FieldOperand(ebx, SharedFunctionInfo::kConstructStubOffset)); - __ lea(ebx, FieldOperand(ebx, Code::kHeaderSize)); - __ jmp(ebx); + Register jmp_reg = FLAG_optimize_constructed_arrays ? ecx : ebx; + __ mov(jmp_reg, FieldOperand(edi, JSFunction::kSharedFunctionInfoOffset)); + __ mov(jmp_reg, FieldOperand(jmp_reg, + SharedFunctionInfo::kConstructStubOffset)); + __ lea(jmp_reg, FieldOperand(jmp_reg, Code::kHeaderSize)); + __ jmp(jmp_reg); // edi: called object // eax: number of arguments @@ -4860,29 +5103,49 @@ bool CEntryStub::IsPregenerated() { } -void CodeStub::GenerateStubsAheadOfTime() { - CEntryStub::GenerateAheadOfTime(); - StoreBufferOverflowStub::GenerateFixedRegStubsAheadOfTime(); +void CodeStub::GenerateStubsAheadOfTime(Isolate* isolate) { + CEntryStub::GenerateAheadOfTime(isolate); + StoreBufferOverflowStub::GenerateFixedRegStubsAheadOfTime(isolate); // It is important that the store buffer overflow stubs are generated first. - RecordWriteStub::GenerateFixedRegStubsAheadOfTime(); + RecordWriteStub::GenerateFixedRegStubsAheadOfTime(isolate); } -void CodeStub::GenerateFPStubs() { - CEntryStub save_doubles(1, kSaveFPRegs); - Handle<Code> code = save_doubles.GetCode(); - code->set_is_pregenerated(true); - code->GetIsolate()->set_fp_stubs_generated(true); +void CodeStub::GenerateFPStubs(Isolate* isolate) { + if (CpuFeatures::IsSupported(SSE2)) { + CEntryStub save_doubles(1, kSaveFPRegs); + // Stubs might already be in the snapshot, detect that and don't regenerate, + // which would lead to code stub initialization state being messed up. + Code* save_doubles_code; + if (!save_doubles.FindCodeInCache(&save_doubles_code, isolate)) { + save_doubles_code = *(save_doubles.GetCode(isolate)); + } + save_doubles_code->set_is_pregenerated(true); + isolate->set_fp_stubs_generated(true); + } } -void CEntryStub::GenerateAheadOfTime() { +void CEntryStub::GenerateAheadOfTime(Isolate* isolate) { CEntryStub stub(1, kDontSaveFPRegs); - Handle<Code> code = stub.GetCode(); + Handle<Code> code = stub.GetCode(isolate); code->set_is_pregenerated(true); } +static void JumpIfOOM(MacroAssembler* masm, + Register value, + Register scratch, + Label* oom_label) { + __ mov(scratch, value); + STATIC_ASSERT(Failure::OUT_OF_MEMORY_EXCEPTION == 3); + STATIC_ASSERT(kFailureTag == 3); + __ and_(scratch, 0xf); + __ cmp(scratch, 0xf); + __ j(equal, oom_label); +} + + void CEntryStub::GenerateCore(MacroAssembler* masm, Label* throw_normal_exception, Label* throw_termination_exception, @@ -4980,8 +5243,7 @@ void CEntryStub::GenerateCore(MacroAssembler* masm, __ j(zero, &retry, Label::kNear); // Special handling of out of memory exceptions. - __ cmp(eax, reinterpret_cast<int32_t>(Failure::OutOfMemoryException())); - __ j(equal, throw_out_of_memory_exception); + JumpIfOOM(masm, eax, ecx, throw_out_of_memory_exception); // Retrieve the pending exception and clear the variable. __ mov(eax, Operand::StaticVariable(pending_exception_address)); @@ -5063,7 +5325,10 @@ void CEntryStub::Generate(MacroAssembler* masm) { // Set pending exception and eax to out of memory exception. ExternalReference pending_exception(Isolate::kPendingExceptionAddress, isolate); - __ mov(eax, reinterpret_cast<int32_t>(Failure::OutOfMemoryException())); + Label already_have_failure; + JumpIfOOM(masm, eax, ecx, &already_have_failure); + __ mov(eax, reinterpret_cast<int32_t>(Failure::OutOfMemoryException(0x1))); + __ bind(&already_have_failure); __ mov(Operand::StaticVariable(pending_exception), eax); // Fall through to the next label. @@ -5407,44 +5672,6 @@ Register InstanceofStub::left() { return eax; } Register InstanceofStub::right() { return edx; } -int CompareStub::MinorKey() { - // Encode the three parameters in a unique 16 bit value. To avoid duplicate - // stubs the never NaN NaN condition is only taken into account if the - // condition is equals. - ASSERT(static_cast<unsigned>(cc_) < (1 << 12)); - ASSERT(lhs_.is(no_reg) && rhs_.is(no_reg)); - return ConditionField::encode(static_cast<unsigned>(cc_)) - | RegisterField::encode(false) // lhs_ and rhs_ are not used - | StrictField::encode(strict_) - | NeverNanNanField::encode(cc_ == equal ? never_nan_nan_ : false) - | IncludeNumberCompareField::encode(include_number_compare_) - | IncludeSmiCompareField::encode(include_smi_compare_); -} - - -// Unfortunately you have to run without snapshots to see most of these -// names in the profile since most compare stubs end up in the snapshot. -void CompareStub::PrintName(StringStream* stream) { - ASSERT(lhs_.is(no_reg) && rhs_.is(no_reg)); - const char* cc_name; - switch (cc_) { - case less: cc_name = "LT"; break; - case greater: cc_name = "GT"; break; - case less_equal: cc_name = "LE"; break; - case greater_equal: cc_name = "GE"; break; - case equal: cc_name = "EQ"; break; - case not_equal: cc_name = "NE"; break; - default: cc_name = "UnknownCondition"; break; - } - bool is_equality = cc_ == equal || cc_ == not_equal; - stream->Add("CompareStub_%s", cc_name); - if (strict_ && is_equality) stream->Add("_STRICT"); - if (never_nan_nan_ && is_equality) stream->Add("_NO_NAN"); - if (!include_number_compare_) stream->Add("_NO_NUMBER"); - if (!include_smi_compare_) stream->Add("_NO_SMI"); -} - - // ------------------------------------------------------------------------- // StringCharCodeAtGenerator @@ -5544,10 +5771,10 @@ void StringCharFromCodeGenerator::GenerateFast(MacroAssembler* masm) { // Fast case of Heap::LookupSingleCharacterStringFromCode. STATIC_ASSERT(kSmiTag == 0); STATIC_ASSERT(kSmiShiftSize == 0); - ASSERT(IsPowerOf2(String::kMaxAsciiCharCode + 1)); + ASSERT(IsPowerOf2(String::kMaxOneByteCharCode + 1)); __ test(code_, Immediate(kSmiTagMask | - ((~String::kMaxAsciiCharCode) << kSmiTagSize))); + ((~String::kMaxOneByteCharCode) << kSmiTagSize))); __ j(not_zero, &slow_case_); Factory* factory = masm->isolate()->factory(); @@ -5584,23 +5811,6 @@ void StringCharFromCodeGenerator::GenerateSlow( } -// ------------------------------------------------------------------------- -// StringCharAtGenerator - -void StringCharAtGenerator::GenerateFast(MacroAssembler* masm) { - char_code_at_generator_.GenerateFast(masm); - char_from_code_generator_.GenerateFast(masm); -} - - -void StringCharAtGenerator::GenerateSlow( - MacroAssembler* masm, - const RuntimeCallHelper& call_helper) { - char_code_at_generator_.GenerateSlow(masm, call_helper); - char_from_code_generator_.GenerateSlow(masm, call_helper); -} - - void StringAddStub::Generate(MacroAssembler* masm) { Label call_runtime, call_builtin; Builtins::JavaScript builtin_id = Builtins::ADD; @@ -5670,8 +5880,8 @@ void StringAddStub::Generate(MacroAssembler* masm) { STATIC_ASSERT(Smi::kMaxValue == String::kMaxLength); // Handle exceptionally long strings in the runtime system. __ j(overflow, &call_runtime); - // Use the symbol table when adding two one character strings, as it - // helps later optimizations to return a symbol here. + // Use the string table when adding two one character strings, as it + // helps later optimizations to return an internalized string here. __ cmp(ebx, Immediate(Smi::FromInt(2))); __ j(not_equal, &longer_than_two); @@ -5679,13 +5889,13 @@ void StringAddStub::Generate(MacroAssembler* masm) { __ JumpIfNotBothSequentialAsciiStrings(eax, edx, ebx, ecx, &call_runtime); // Get the two characters forming the new string. - __ movzx_b(ebx, FieldOperand(eax, SeqAsciiString::kHeaderSize)); - __ movzx_b(ecx, FieldOperand(edx, SeqAsciiString::kHeaderSize)); + __ movzx_b(ebx, FieldOperand(eax, SeqOneByteString::kHeaderSize)); + __ movzx_b(ecx, FieldOperand(edx, SeqOneByteString::kHeaderSize)); - // Try to lookup two character string in symbol table. If it is not found + // Try to lookup two character string in string table. If it is not found // just allocate a new one. Label make_two_character_string, make_two_character_string_no_reload; - StringHelper::GenerateTwoCharacterSymbolTableProbe( + StringHelper::GenerateTwoCharacterStringTableProbe( masm, ebx, ecx, eax, edx, edi, &make_two_character_string_no_reload, &make_two_character_string); __ IncrementCounter(counters->string_add_native(), 1); @@ -5697,8 +5907,8 @@ void StringAddStub::Generate(MacroAssembler* masm) { __ mov(eax, Operand(esp, 2 * kPointerSize)); // First argument. __ mov(edx, Operand(esp, 1 * kPointerSize)); // Second argument. // Get the two characters forming the new string. - __ movzx_b(ebx, FieldOperand(eax, SeqAsciiString::kHeaderSize)); - __ movzx_b(ecx, FieldOperand(edx, SeqAsciiString::kHeaderSize)); + __ movzx_b(ebx, FieldOperand(eax, SeqOneByteString::kHeaderSize)); + __ movzx_b(ecx, FieldOperand(edx, SeqOneByteString::kHeaderSize)); __ bind(&make_two_character_string_no_reload); __ IncrementCounter(counters->string_add_make_two_char(), 1); __ AllocateAsciiString(eax, 2, edi, edx, &call_runtime); @@ -5706,7 +5916,7 @@ void StringAddStub::Generate(MacroAssembler* masm) { __ shl(ecx, kBitsPerByte); __ or_(ebx, ecx); // Set the characters in the new string. - __ mov_w(FieldOperand(eax, SeqAsciiString::kHeaderSize), ebx); + __ mov_w(FieldOperand(eax, SeqOneByteString::kHeaderSize), ebx); __ IncrementCounter(counters->string_add_native(), 1); __ ret(2 * kPointerSize); @@ -5723,7 +5933,7 @@ void StringAddStub::Generate(MacroAssembler* masm) { __ mov(edi, FieldOperand(edx, HeapObject::kMapOffset)); __ movzx_b(edi, FieldOperand(edi, Map::kInstanceTypeOffset)); __ and_(ecx, edi); - STATIC_ASSERT((kStringEncodingMask & kAsciiStringTag) != 0); + STATIC_ASSERT((kStringEncodingMask & kOneByteStringTag) != 0); STATIC_ASSERT((kStringEncodingMask & kTwoByteStringTag) == 0); __ test(ecx, Immediate(kStringEncodingMask)); __ j(zero, &non_ascii); @@ -5751,9 +5961,9 @@ void StringAddStub::Generate(MacroAssembler* masm) { __ mov(ecx, FieldOperand(eax, HeapObject::kMapOffset)); __ movzx_b(ecx, FieldOperand(ecx, Map::kInstanceTypeOffset)); __ xor_(edi, ecx); - STATIC_ASSERT(kAsciiStringTag != 0 && kAsciiDataHintTag != 0); - __ and_(edi, kAsciiStringTag | kAsciiDataHintTag); - __ cmp(edi, kAsciiStringTag | kAsciiDataHintTag); + STATIC_ASSERT(kOneByteStringTag != 0 && kAsciiDataHintTag != 0); + __ and_(edi, kOneByteStringTag | kAsciiDataHintTag); + __ cmp(edi, kOneByteStringTag | kAsciiDataHintTag); __ j(equal, &ascii_data); // Allocate a two byte cons string. __ AllocateTwoByteConsString(ecx, edi, no_reg, &call_runtime); @@ -5780,10 +5990,10 @@ void StringAddStub::Generate(MacroAssembler* masm) { __ test_b(ecx, kShortExternalStringMask); __ j(not_zero, &call_runtime); __ mov(eax, FieldOperand(eax, ExternalString::kResourceDataOffset)); - STATIC_ASSERT(SeqAsciiString::kHeaderSize == SeqTwoByteString::kHeaderSize); + STATIC_ASSERT(SeqOneByteString::kHeaderSize == SeqTwoByteString::kHeaderSize); __ jmp(&first_prepared, Label::kNear); __ bind(&first_is_sequential); - __ add(eax, Immediate(SeqAsciiString::kHeaderSize - kHeapObjectTag)); + __ add(eax, Immediate(SeqOneByteString::kHeaderSize - kHeapObjectTag)); __ bind(&first_prepared); __ mov(edi, FieldOperand(edx, HeapObject::kMapOffset)); @@ -5801,10 +6011,10 @@ void StringAddStub::Generate(MacroAssembler* masm) { __ test_b(edi, kShortExternalStringMask); __ j(not_zero, &call_runtime); __ mov(edx, FieldOperand(edx, ExternalString::kResourceDataOffset)); - STATIC_ASSERT(SeqAsciiString::kHeaderSize == SeqTwoByteString::kHeaderSize); + STATIC_ASSERT(SeqOneByteString::kHeaderSize == SeqTwoByteString::kHeaderSize); __ jmp(&second_prepared, Label::kNear); __ bind(&second_is_sequential); - __ add(edx, Immediate(SeqAsciiString::kHeaderSize - kHeapObjectTag)); + __ add(edx, Immediate(SeqOneByteString::kHeaderSize - kHeapObjectTag)); __ bind(&second_prepared); // Push the addresses of both strings' first characters onto the stack. @@ -5825,7 +6035,7 @@ void StringAddStub::Generate(MacroAssembler* masm) { // eax: result string __ mov(ecx, eax); // Locate first character of result. - __ add(ecx, Immediate(SeqAsciiString::kHeaderSize - kHeapObjectTag)); + __ add(ecx, Immediate(SeqOneByteString::kHeaderSize - kHeapObjectTag)); // Load first argument's length and first character location. Account for // values currently on the stack when fetching arguments from it. __ mov(edx, Operand(esp, 4 * kPointerSize)); @@ -6031,7 +6241,7 @@ void StringHelper::GenerateCopyCharactersREP(MacroAssembler* masm, } -void StringHelper::GenerateTwoCharacterSymbolTableProbe(MacroAssembler* masm, +void StringHelper::GenerateTwoCharacterStringTableProbe(MacroAssembler* masm, Register c1, Register c2, Register scratch1, @@ -6043,7 +6253,7 @@ void StringHelper::GenerateTwoCharacterSymbolTableProbe(MacroAssembler* masm, Register scratch = scratch3; // Make sure that both characters are not digits as such strings has a - // different hash algorithm. Don't try to look for these in the symbol table. + // different hash algorithm. Don't try to look for these in the string table. Label not_array_index; __ mov(scratch, c1); __ sub(scratch, Immediate(static_cast<int>('0'))); @@ -6069,47 +6279,47 @@ void StringHelper::GenerateTwoCharacterSymbolTableProbe(MacroAssembler* masm, // chars: two character string, char 1 in byte 0 and char 2 in byte 1. // hash: hash of two character string. - // Load the symbol table. - Register symbol_table = c2; + // Load the string table. + Register string_table = c2; ExternalReference roots_array_start = ExternalReference::roots_array_start(masm->isolate()); - __ mov(scratch, Immediate(Heap::kSymbolTableRootIndex)); - __ mov(symbol_table, + __ mov(scratch, Immediate(Heap::kStringTableRootIndex)); + __ mov(string_table, Operand::StaticArray(scratch, times_pointer_size, roots_array_start)); - // Calculate capacity mask from the symbol table capacity. + // Calculate capacity mask from the string table capacity. Register mask = scratch2; - __ mov(mask, FieldOperand(symbol_table, SymbolTable::kCapacityOffset)); + __ mov(mask, FieldOperand(string_table, StringTable::kCapacityOffset)); __ SmiUntag(mask); __ sub(mask, Immediate(1)); // Registers // chars: two character string, char 1 in byte 0 and char 2 in byte 1. // hash: hash of two character string - // symbol_table: symbol table + // string_table: string table // mask: capacity mask // scratch: - - // Perform a number of probes in the symbol table. + // Perform a number of probes in the string table. static const int kProbes = 4; - Label found_in_symbol_table; + Label found_in_string_table; Label next_probe[kProbes], next_probe_pop_mask[kProbes]; Register candidate = scratch; // Scratch register contains candidate. for (int i = 0; i < kProbes; i++) { - // Calculate entry in symbol table. + // Calculate entry in string table. __ mov(scratch, hash); if (i > 0) { - __ add(scratch, Immediate(SymbolTable::GetProbeOffset(i))); + __ add(scratch, Immediate(StringTable::GetProbeOffset(i))); } __ and_(scratch, mask); - // Load the entry from the symbol table. - STATIC_ASSERT(SymbolTable::kEntrySize == 1); + // Load the entry from the string table. + STATIC_ASSERT(StringTable::kEntrySize == 1); __ mov(candidate, - FieldOperand(symbol_table, + FieldOperand(string_table, scratch, times_pointer_size, - SymbolTable::kElementsStartOffset)); + StringTable::kElementsStartOffset)); // If entry is undefined no string with this hash can be found. Factory* factory = masm->isolate()->factory(); @@ -6135,10 +6345,10 @@ void StringHelper::GenerateTwoCharacterSymbolTableProbe(MacroAssembler* masm, temp, temp, &next_probe_pop_mask[i]); // Check if the two characters match. - __ mov(temp, FieldOperand(candidate, SeqAsciiString::kHeaderSize)); + __ mov(temp, FieldOperand(candidate, SeqOneByteString::kHeaderSize)); __ and_(temp, 0x0000ffff); __ cmp(chars, temp); - __ j(equal, &found_in_symbol_table); + __ j(equal, &found_in_string_table); __ bind(&next_probe_pop_mask[i]); __ pop(mask); __ bind(&next_probe[i]); @@ -6149,7 +6359,7 @@ void StringHelper::GenerateTwoCharacterSymbolTableProbe(MacroAssembler* masm, // Scratch register contains result when we fall through to here. Register result = candidate; - __ bind(&found_in_symbol_table); + __ bind(&found_in_string_table); __ pop(mask); // Pop saved mask from the stack. if (!result.is(eax)) { __ mov(eax, result); @@ -6267,6 +6477,10 @@ void SubStringStub::Generate(MacroAssembler* masm) { __ ret(3 * kPointerSize); __ bind(¬_original_string); + Label single_char; + __ cmp(ecx, Immediate(Smi::FromInt(1))); + __ j(equal, &single_char); + // eax: string // ebx: instance type // ecx: sub string length (smi) @@ -6324,7 +6538,7 @@ void SubStringStub::Generate(MacroAssembler* masm) { // string's encoding is wrong because we always have to recheck encoding of // the newly created string's parent anyways due to externalized strings. Label two_byte_slice, set_slice_header; - STATIC_ASSERT((kStringEncodingMask & kAsciiStringTag) != 0); + STATIC_ASSERT((kStringEncodingMask & kOneByteStringTag) != 0); STATIC_ASSERT((kStringEncodingMask & kTwoByteStringTag) == 0); __ test(ebx, Immediate(kStringEncodingMask)); __ j(zero, &two_byte_slice, Label::kNear); @@ -6363,7 +6577,7 @@ void SubStringStub::Generate(MacroAssembler* masm) { __ j(not_zero, &runtime); __ mov(edi, FieldOperand(edi, ExternalString::kResourceDataOffset)); // Move the pointer so that offset-wise, it looks like a sequential string. - STATIC_ASSERT(SeqTwoByteString::kHeaderSize == SeqAsciiString::kHeaderSize); + STATIC_ASSERT(SeqTwoByteString::kHeaderSize == SeqOneByteString::kHeaderSize); __ sub(edi, Immediate(SeqTwoByteString::kHeaderSize - kHeapObjectTag)); __ bind(&sequential_string); @@ -6371,7 +6585,7 @@ void SubStringStub::Generate(MacroAssembler* masm) { __ push(edx); __ push(edi); __ SmiUntag(ecx); - STATIC_ASSERT((kAsciiStringTag & kStringEncodingMask) != 0); + STATIC_ASSERT((kOneByteStringTag & kStringEncodingMask) != 0); __ test_b(ebx, kStringEncodingMask); __ j(zero, &two_byte_sequential); @@ -6383,12 +6597,12 @@ void SubStringStub::Generate(MacroAssembler* masm) { __ mov(edx, esi); // esi used by following code. // Locate first character of result. __ mov(edi, eax); - __ add(edi, Immediate(SeqAsciiString::kHeaderSize - kHeapObjectTag)); + __ add(edi, Immediate(SeqOneByteString::kHeaderSize - kHeapObjectTag)); // Load string argument and locate character of sub string start. __ pop(esi); __ pop(ebx); __ SmiUntag(ebx); - __ lea(esi, FieldOperand(esi, ebx, times_1, SeqAsciiString::kHeaderSize)); + __ lea(esi, FieldOperand(esi, ebx, times_1, SeqOneByteString::kHeaderSize)); // eax: result string // ecx: result length @@ -6437,6 +6651,17 @@ void SubStringStub::Generate(MacroAssembler* masm) { // Just jump to runtime to create the sub string. __ bind(&runtime); __ TailCallRuntime(Runtime::kSubString, 3, 1); + + __ bind(&single_char); + // eax: string + // ebx: instance type + // ecx: sub string length (smi) + // edx: from index (smi) + StringCharAtGenerator generator( + eax, edx, ecx, eax, &runtime, &runtime, &runtime, STRING_INDEX_IS_NUMBER); + generator.GenerateFast(masm); + __ ret(3 * kPointerSize); + generator.SkipSlow(masm, &runtime); } @@ -6513,7 +6738,8 @@ void StringCompareStub::GenerateCompareFlatAsciiStrings(MacroAssembler* masm, // Compare lengths - strings up to min-length are equal. __ bind(&compare_lengths); __ test(length_delta, length_delta); - __ j(not_zero, &result_not_equal, Label::kNear); + Label length_not_equal; + __ j(not_zero, &length_not_equal, Label::kNear); // Result is EQUAL. STATIC_ASSERT(EQUAL == 0); @@ -6522,8 +6748,13 @@ void StringCompareStub::GenerateCompareFlatAsciiStrings(MacroAssembler* masm, __ ret(0); Label result_greater; - __ bind(&result_not_equal); + Label result_less; + __ bind(&length_not_equal); __ j(greater, &result_greater, Label::kNear); + __ jmp(&result_less, Label::kNear); + __ bind(&result_not_equal); + __ j(above, &result_greater, Label::kNear); + __ bind(&result_less); // Result is LESS. __ Set(eax, Immediate(Smi::FromInt(LESS))); @@ -6549,9 +6780,9 @@ void StringCompareStub::GenerateAsciiCharsCompareLoop( // doesn't need an additional compare. __ SmiUntag(length); __ lea(left, - FieldOperand(left, length, times_1, SeqAsciiString::kHeaderSize)); + FieldOperand(left, length, times_1, SeqOneByteString::kHeaderSize)); __ lea(right, - FieldOperand(right, length, times_1, SeqAsciiString::kHeaderSize)); + FieldOperand(right, length, times_1, SeqOneByteString::kHeaderSize)); __ neg(length); Register index = length; // index = -length; @@ -6606,7 +6837,7 @@ void StringCompareStub::Generate(MacroAssembler* masm) { void ICCompareStub::GenerateSmis(MacroAssembler* masm) { - ASSERT(state_ == CompareIC::SMIS); + ASSERT(state_ == CompareIC::SMI); Label miss; __ mov(ecx, edx); __ or_(ecx, eax); @@ -6631,32 +6862,53 @@ void ICCompareStub::GenerateSmis(MacroAssembler* masm) { } -void ICCompareStub::GenerateHeapNumbers(MacroAssembler* masm) { - ASSERT(state_ == CompareIC::HEAP_NUMBERS); +void ICCompareStub::GenerateNumbers(MacroAssembler* masm) { + ASSERT(state_ == CompareIC::NUMBER); Label generic_stub; Label unordered, maybe_undefined1, maybe_undefined2; Label miss; - __ mov(ecx, edx); - __ and_(ecx, eax); - __ JumpIfSmi(ecx, &generic_stub, Label::kNear); - __ CmpObjectType(eax, HEAP_NUMBER_TYPE, ecx); - __ j(not_equal, &maybe_undefined1, Label::kNear); - __ CmpObjectType(edx, HEAP_NUMBER_TYPE, ecx); - __ j(not_equal, &maybe_undefined2, Label::kNear); + if (left_ == CompareIC::SMI) { + __ JumpIfNotSmi(edx, &miss); + } + if (right_ == CompareIC::SMI) { + __ JumpIfNotSmi(eax, &miss); + } // Inlining the double comparison and falling back to the general compare - // stub if NaN is involved or SS2 or CMOV is unsupported. + // stub if NaN is involved or SSE2 or CMOV is unsupported. if (CpuFeatures::IsSupported(SSE2) && CpuFeatures::IsSupported(CMOV)) { - CpuFeatures::Scope scope1(SSE2); - CpuFeatures::Scope scope2(CMOV); + CpuFeatureScope scope1(masm, SSE2); + CpuFeatureScope scope2(masm, CMOV); - // Load left and right operand - __ movdbl(xmm0, FieldOperand(edx, HeapNumber::kValueOffset)); + // Load left and right operand. + Label done, left, left_smi, right_smi; + __ JumpIfSmi(eax, &right_smi, Label::kNear); + __ cmp(FieldOperand(eax, HeapObject::kMapOffset), + masm->isolate()->factory()->heap_number_map()); + __ j(not_equal, &maybe_undefined1, Label::kNear); __ movdbl(xmm1, FieldOperand(eax, HeapNumber::kValueOffset)); + __ jmp(&left, Label::kNear); + __ bind(&right_smi); + __ mov(ecx, eax); // Can't clobber eax because we can still jump away. + __ SmiUntag(ecx); + __ cvtsi2sd(xmm1, ecx); + + __ bind(&left); + __ JumpIfSmi(edx, &left_smi, Label::kNear); + __ cmp(FieldOperand(edx, HeapObject::kMapOffset), + masm->isolate()->factory()->heap_number_map()); + __ j(not_equal, &maybe_undefined2, Label::kNear); + __ movdbl(xmm0, FieldOperand(edx, HeapNumber::kValueOffset)); + __ jmp(&done); + __ bind(&left_smi); + __ mov(ecx, edx); // Can't clobber edx because we can still jump away. + __ SmiUntag(ecx); + __ cvtsi2sd(xmm0, ecx); - // Compare operands + __ bind(&done); + // Compare operands. __ ucomisd(xmm0, xmm1); // Don't base result on EFLAGS when a NaN is involved. @@ -6670,17 +6922,30 @@ void ICCompareStub::GenerateHeapNumbers(MacroAssembler* masm) { __ mov(ecx, Immediate(Smi::FromInt(-1))); __ cmov(below, eax, ecx); __ ret(0); + } else { + __ mov(ecx, edx); + __ and_(ecx, eax); + __ JumpIfSmi(ecx, &generic_stub, Label::kNear); + + __ cmp(FieldOperand(eax, HeapObject::kMapOffset), + masm->isolate()->factory()->heap_number_map()); + __ j(not_equal, &maybe_undefined1, Label::kNear); + __ cmp(FieldOperand(edx, HeapObject::kMapOffset), + masm->isolate()->factory()->heap_number_map()); + __ j(not_equal, &maybe_undefined2, Label::kNear); } __ bind(&unordered); - CompareStub stub(GetCondition(), strict(), NO_COMPARE_FLAGS); __ bind(&generic_stub); - __ jmp(stub.GetCode(), RelocInfo::CODE_TARGET); + ICCompareStub stub(op_, CompareIC::GENERIC, CompareIC::GENERIC, + CompareIC::GENERIC); + __ jmp(stub.GetCode(masm->isolate()), RelocInfo::CODE_TARGET); __ bind(&maybe_undefined1); if (Token::IsOrderedRelationalCompareOp(op_)) { __ cmp(eax, Immediate(masm->isolate()->factory()->undefined_value())); __ j(not_equal, &miss); + __ JumpIfSmi(edx, &unordered); __ CmpObjectType(edx, HEAP_NUMBER_TYPE, ecx); __ j(not_equal, &maybe_undefined2, Label::kNear); __ jmp(&unordered); @@ -6697,8 +6962,8 @@ void ICCompareStub::GenerateHeapNumbers(MacroAssembler* masm) { } -void ICCompareStub::GenerateSymbols(MacroAssembler* masm) { - ASSERT(state_ == CompareIC::SYMBOLS); +void ICCompareStub::GenerateInternalizedStrings(MacroAssembler* masm) { + ASSERT(state_ == CompareIC::INTERNALIZED_STRING); ASSERT(GetCondition() == equal); // Registers containing left and right operands respectively. @@ -6714,17 +6979,74 @@ void ICCompareStub::GenerateSymbols(MacroAssembler* masm) { __ and_(tmp1, right); __ JumpIfSmi(tmp1, &miss, Label::kNear); - // Check that both operands are symbols. + // Check that both operands are internalized strings. __ mov(tmp1, FieldOperand(left, HeapObject::kMapOffset)); __ mov(tmp2, FieldOperand(right, HeapObject::kMapOffset)); __ movzx_b(tmp1, FieldOperand(tmp1, Map::kInstanceTypeOffset)); __ movzx_b(tmp2, FieldOperand(tmp2, Map::kInstanceTypeOffset)); - STATIC_ASSERT(kSymbolTag != 0); + STATIC_ASSERT(kInternalizedTag != 0); __ and_(tmp1, tmp2); - __ test(tmp1, Immediate(kIsSymbolMask)); + __ test(tmp1, Immediate(kIsInternalizedMask)); __ j(zero, &miss, Label::kNear); - // Symbols are compared by identity. + // Internalized strings are compared by identity. + Label done; + __ cmp(left, right); + // Make sure eax is non-zero. At this point input operands are + // guaranteed to be non-zero. + ASSERT(right.is(eax)); + __ j(not_equal, &done, Label::kNear); + STATIC_ASSERT(EQUAL == 0); + STATIC_ASSERT(kSmiTag == 0); + __ Set(eax, Immediate(Smi::FromInt(EQUAL))); + __ bind(&done); + __ ret(0); + + __ bind(&miss); + GenerateMiss(masm); +} + + +void ICCompareStub::GenerateUniqueNames(MacroAssembler* masm) { + ASSERT(state_ == CompareIC::UNIQUE_NAME); + ASSERT(GetCondition() == equal); + + // Registers containing left and right operands respectively. + Register left = edx; + Register right = eax; + Register tmp1 = ecx; + Register tmp2 = ebx; + + // Check that both operands are heap objects. + Label miss; + __ mov(tmp1, left); + STATIC_ASSERT(kSmiTag == 0); + __ and_(tmp1, right); + __ JumpIfSmi(tmp1, &miss, Label::kNear); + + // Check that both operands are unique names. This leaves the instance + // types loaded in tmp1 and tmp2. + STATIC_ASSERT(kInternalizedTag != 0); + __ mov(tmp1, FieldOperand(left, HeapObject::kMapOffset)); + __ mov(tmp2, FieldOperand(right, HeapObject::kMapOffset)); + __ movzx_b(tmp1, FieldOperand(tmp1, Map::kInstanceTypeOffset)); + __ movzx_b(tmp2, FieldOperand(tmp2, Map::kInstanceTypeOffset)); + + Label succeed1; + __ test(tmp1, Immediate(kIsInternalizedMask)); + __ j(not_zero, &succeed1); + __ cmpb(tmp1, static_cast<uint8_t>(SYMBOL_TYPE)); + __ j(not_equal, &miss); + __ bind(&succeed1); + + Label succeed2; + __ test(tmp2, Immediate(kIsInternalizedMask)); + __ j(not_zero, &succeed2); + __ cmpb(tmp2, static_cast<uint8_t>(SYMBOL_TYPE)); + __ j(not_equal, &miss); + __ bind(&succeed2); + + // Unique names are compared by identity. Label done; __ cmp(left, right); // Make sure eax is non-zero. At this point input operands are @@ -6743,7 +7065,7 @@ void ICCompareStub::GenerateSymbols(MacroAssembler* masm) { void ICCompareStub::GenerateStrings(MacroAssembler* masm) { - ASSERT(state_ == CompareIC::STRINGS); + ASSERT(state_ == CompareIC::STRING); Label miss; bool equality = Token::IsEqualityOp(op_); @@ -6785,14 +7107,14 @@ void ICCompareStub::GenerateStrings(MacroAssembler* masm) { // Handle not identical strings. __ bind(¬_same); - // Check that both strings are symbols. If they are, we're done + // Check that both strings are internalized. If they are, we're done // because we already know they are not identical. But in the case of // non-equality compare, we still need to determine the order. if (equality) { Label do_compare; - STATIC_ASSERT(kSymbolTag != 0); + STATIC_ASSERT(kInternalizedTag != 0); __ and_(tmp1, tmp2); - __ test(tmp1, Immediate(kIsSymbolMask)); + __ test(tmp1, Immediate(kIsInternalizedMask)); __ j(zero, &do_compare, Label::kNear); // Make sure eax is non-zero. At this point input operands are // guaranteed to be non-zero. @@ -6832,7 +7154,7 @@ void ICCompareStub::GenerateStrings(MacroAssembler* masm) { void ICCompareStub::GenerateObjects(MacroAssembler* masm) { - ASSERT(state_ == CompareIC::OBJECTS); + ASSERT(state_ == CompareIC::OBJECT); Label miss; __ mov(ecx, edx); __ and_(ecx, eax); @@ -6900,14 +7222,14 @@ void ICCompareStub::GenerateMiss(MacroAssembler* masm) { // the property. This function may return false negatives, so miss_label // must always call a backup property check that is complete. // This function is safe to call if the receiver has fast properties. -// Name must be a symbol and receiver must be a heap object. -void StringDictionaryLookupStub::GenerateNegativeLookup(MacroAssembler* masm, - Label* miss, - Label* done, - Register properties, - Handle<String> name, - Register r0) { - ASSERT(name->IsSymbol()); +// Name must be a unique name and receiver must be a heap object. +void NameDictionaryLookupStub::GenerateNegativeLookup(MacroAssembler* masm, + Label* miss, + Label* done, + Register properties, + Handle<Name> name, + Register r0) { + ASSERT(name->IsUniqueName()); // If names of slots in range from 1 to kProbes - 1 for the hash value are // not equal to the name and kProbes-th slot is not used (its name is the @@ -6922,10 +7244,10 @@ void StringDictionaryLookupStub::GenerateNegativeLookup(MacroAssembler* masm, __ dec(index); __ and_(index, Immediate(Smi::FromInt(name->Hash() + - StringDictionary::GetProbeOffset(i)))); + NameDictionary::GetProbeOffset(i)))); // Scale the index by multiplying by the entry size. - ASSERT(StringDictionary::kEntrySize == 3); + ASSERT(NameDictionary::kEntrySize == 3); __ lea(index, Operand(index, index, times_2, 0)); // index *= 3. Register entity_name = r0; // Having undefined at this place means the name is not contained. @@ -6936,26 +7258,26 @@ void StringDictionaryLookupStub::GenerateNegativeLookup(MacroAssembler* masm, __ j(equal, done); // Stop if found the property. - __ cmp(entity_name, Handle<String>(name)); + __ cmp(entity_name, Handle<Name>(name)); __ j(equal, miss); - Label the_hole; + Label good; // Check for the hole and skip. __ cmp(entity_name, masm->isolate()->factory()->the_hole_value()); - __ j(equal, &the_hole, Label::kNear); + __ j(equal, &good, Label::kNear); - // Check if the entry name is not a symbol. + // Check if the entry name is not a unique name. __ mov(entity_name, FieldOperand(entity_name, HeapObject::kMapOffset)); __ test_b(FieldOperand(entity_name, Map::kInstanceTypeOffset), - kIsSymbolMask); - __ j(zero, miss); - __ bind(&the_hole); + kIsInternalizedMask); + __ j(not_zero, &good); + __ cmpb(FieldOperand(entity_name, Map::kInstanceTypeOffset), + static_cast<uint8_t>(SYMBOL_TYPE)); + __ j(not_equal, miss); + __ bind(&good); } - StringDictionaryLookupStub stub(properties, - r0, - r0, - StringDictionaryLookupStub::NEGATIVE_LOOKUP); + NameDictionaryLookupStub stub(properties, r0, r0, NEGATIVE_LOOKUP); __ push(Immediate(Handle<Object>(name))); __ push(Immediate(name->Hash())); __ CallStub(&stub); @@ -6965,23 +7287,23 @@ void StringDictionaryLookupStub::GenerateNegativeLookup(MacroAssembler* masm, } -// Probe the string dictionary in the |elements| register. Jump to the +// Probe the name dictionary in the |elements| register. Jump to the // |done| label if a property with the given name is found leaving the // index into the dictionary in |r0|. Jump to the |miss| label // otherwise. -void StringDictionaryLookupStub::GeneratePositiveLookup(MacroAssembler* masm, - Label* miss, - Label* done, - Register elements, - Register name, - Register r0, - Register r1) { +void NameDictionaryLookupStub::GeneratePositiveLookup(MacroAssembler* masm, + Label* miss, + Label* done, + Register elements, + Register name, + Register r0, + Register r1) { ASSERT(!elements.is(r0)); ASSERT(!elements.is(r1)); ASSERT(!name.is(r0)); ASSERT(!name.is(r1)); - __ AssertString(name); + __ AssertName(name); __ mov(r1, FieldOperand(elements, kCapacityOffset)); __ shr(r1, kSmiTagSize); // convert smi to int @@ -6992,15 +7314,15 @@ void StringDictionaryLookupStub::GeneratePositiveLookup(MacroAssembler* masm, // cover ~93% of loads from dictionaries. for (int i = 0; i < kInlinedProbes; i++) { // Compute the masked index: (hash + i + i * i) & mask. - __ mov(r0, FieldOperand(name, String::kHashFieldOffset)); - __ shr(r0, String::kHashShift); + __ mov(r0, FieldOperand(name, Name::kHashFieldOffset)); + __ shr(r0, Name::kHashShift); if (i > 0) { - __ add(r0, Immediate(StringDictionary::GetProbeOffset(i))); + __ add(r0, Immediate(NameDictionary::GetProbeOffset(i))); } __ and_(r0, r1); // Scale the index by multiplying by the entry size. - ASSERT(StringDictionary::kEntrySize == 3); + ASSERT(NameDictionary::kEntrySize == 3); __ lea(r0, Operand(r0, r0, times_2, 0)); // r0 = r0 * 3 // Check if the key is identical to the name. @@ -7011,13 +7333,10 @@ void StringDictionaryLookupStub::GeneratePositiveLookup(MacroAssembler* masm, __ j(equal, done); } - StringDictionaryLookupStub stub(elements, - r1, - r0, - POSITIVE_LOOKUP); + NameDictionaryLookupStub stub(elements, r1, r0, POSITIVE_LOOKUP); __ push(name); - __ mov(r0, FieldOperand(name, String::kHashFieldOffset)); - __ shr(r0, String::kHashShift); + __ mov(r0, FieldOperand(name, Name::kHashFieldOffset)); + __ shr(r0, Name::kHashShift); __ push(r0); __ CallStub(&stub); @@ -7027,7 +7346,7 @@ void StringDictionaryLookupStub::GeneratePositiveLookup(MacroAssembler* masm, } -void StringDictionaryLookupStub::Generate(MacroAssembler* masm) { +void NameDictionaryLookupStub::Generate(MacroAssembler* masm) { // This stub overrides SometimesSetsUpAFrame() to return false. That means // we cannot call anything that could cause a GC from this stub. // Stack frame on entry: @@ -7035,7 +7354,7 @@ void StringDictionaryLookupStub::Generate(MacroAssembler* masm) { // esp[1 * kPointerSize]: key's hash. // esp[2 * kPointerSize]: key. // Registers: - // dictionary_: StringDictionary to probe. + // dictionary_: NameDictionary to probe. // result_: used as scratch. // index_: will hold an index of entry if lookup is successful. // might alias with result_. @@ -7060,12 +7379,12 @@ void StringDictionaryLookupStub::Generate(MacroAssembler* masm) { // Compute the masked index: (hash + i + i * i) & mask. __ mov(scratch, Operand(esp, 2 * kPointerSize)); if (i > 0) { - __ add(scratch, Immediate(StringDictionary::GetProbeOffset(i))); + __ add(scratch, Immediate(NameDictionary::GetProbeOffset(i))); } __ and_(scratch, Operand(esp, 0)); // Scale the index by multiplying by the entry size. - ASSERT(StringDictionary::kEntrySize == 3); + ASSERT(NameDictionary::kEntrySize == 3); __ lea(index_, Operand(scratch, scratch, times_2, 0)); // index *= 3. // Having undefined at this place means the name is not contained. @@ -7082,15 +7401,20 @@ void StringDictionaryLookupStub::Generate(MacroAssembler* masm) { __ j(equal, &in_dictionary); if (i != kTotalProbes - 1 && mode_ == NEGATIVE_LOOKUP) { - // If we hit a non symbol key during negative lookup - // we have to bailout as this key might be equal to the + // If we hit a key that is not a unique name during negative + // lookup we have to bailout as this key might be equal to the // key we are looking for. - // Check if the entry name is not a symbol. + // Check if the entry name is not a unique name. + Label cont; __ mov(scratch, FieldOperand(scratch, HeapObject::kMapOffset)); __ test_b(FieldOperand(scratch, Map::kInstanceTypeOffset), - kIsSymbolMask); - __ j(zero, &maybe_in_dictionary); + kIsInternalizedMask); + __ j(not_zero, &cont); + __ cmpb(FieldOperand(scratch, Map::kInstanceTypeOffset), + static_cast<uint8_t>(SYMBOL_TYPE)); + __ j(not_equal, &maybe_in_dictionary); + __ bind(&cont); } } @@ -7179,19 +7503,18 @@ bool RecordWriteStub::IsPregenerated() { } -void StoreBufferOverflowStub::GenerateFixedRegStubsAheadOfTime() { - StoreBufferOverflowStub stub1(kDontSaveFPRegs); - stub1.GetCode()->set_is_pregenerated(true); - - CpuFeatures::TryForceFeatureScope scope(SSE2); - if (CpuFeatures::IsSupported(SSE2)) { +void StoreBufferOverflowStub::GenerateFixedRegStubsAheadOfTime( + Isolate* isolate) { + StoreBufferOverflowStub stub(kDontSaveFPRegs); + stub.GetCode(isolate)->set_is_pregenerated(true); + if (CpuFeatures::IsSafeForSnapshot(SSE2)) { StoreBufferOverflowStub stub2(kSaveFPRegs); - stub2.GetCode()->set_is_pregenerated(true); + stub2.GetCode(isolate)->set_is_pregenerated(true); } } -void RecordWriteStub::GenerateFixedRegStubsAheadOfTime() { +void RecordWriteStub::GenerateFixedRegStubsAheadOfTime(Isolate* isolate) { for (const AheadOfTimeWriteBarrierStubList* entry = kAheadOfTime; !entry->object.is(no_reg); entry++) { @@ -7200,7 +7523,7 @@ void RecordWriteStub::GenerateFixedRegStubsAheadOfTime() { entry->address, entry->action, kDontSaveFPRegs); - stub.GetCode()->set_is_pregenerated(true); + stub.GetCode(isolate)->set_is_pregenerated(true); } } @@ -7297,13 +7620,7 @@ void RecordWriteStub::InformIncrementalMarker(MacroAssembler* masm, Mode mode) { int argument_count = 3; __ PrepareCallCFunction(argument_count, regs_.scratch0()); __ mov(Operand(esp, 0 * kPointerSize), regs_.object()); - if (mode == INCREMENTAL_COMPACTION) { - __ mov(Operand(esp, 1 * kPointerSize), regs_.address()); // Slot. - } else { - ASSERT(mode == INCREMENTAL); - __ mov(regs_.scratch0(), Operand(regs_.address(), 0)); - __ mov(Operand(esp, 1 * kPointerSize), regs_.scratch0()); // Value. - } + __ mov(Operand(esp, 1 * kPointerSize), regs_.address()); // Slot. __ mov(Operand(esp, 2 * kPointerSize), Immediate(ExternalReference::isolate_address())); @@ -7498,6 +7815,22 @@ void StoreArrayLiteralElementStub::Generate(MacroAssembler* masm) { } +void StubFailureTrampolineStub::Generate(MacroAssembler* masm) { + ASSERT(!Serializer::enabled()); + bool save_fp_regs = CpuFeatures::IsSupported(SSE2); + CEntryStub ces(1, save_fp_regs ? kSaveFPRegs : kDontSaveFPRegs); + __ call(ces.GetCode(masm->isolate()), RelocInfo::CODE_TARGET); + int parameter_count_offset = + StubFailureTrampolineFrame::kCallerStackParameterCountFrameOffset; + __ mov(ebx, MemOperand(ebp, parameter_count_offset)); + masm->LeaveFrame(StackFrame::STUB_FAILURE_TRAMPOLINE); + __ pop(ecx); + __ lea(esp, MemOperand(esp, ebx, times_pointer_size, + extra_expression_stack_count_ * kPointerSize)); + __ jmp(ecx); // Return to IC Miss stub, continuation still on stack. +} + + void ProfileEntryHookStub::MaybeCallEntryHook(MacroAssembler* masm) { if (entry_hook_ != NULL) { ProfileEntryHookStub stub; @@ -7521,7 +7854,7 @@ void ProfileEntryHookStub::Generate(MacroAssembler* masm) { // Call the entry hook. int32_t hook_location = reinterpret_cast<int32_t>(&entry_hook_); - __ call(Operand(hook_location, RelocInfo::NONE)); + __ call(Operand(hook_location, RelocInfo::NONE32)); __ add(esp, Immediate(2 * kPointerSize)); // Restore ecx. diff --git a/deps/v8/src/ia32/code-stubs-ia32.h b/deps/v8/src/ia32/code-stubs-ia32.h index 803a711de9..c2ae5f0fae 100644 --- a/deps/v8/src/ia32/code-stubs-ia32.h +++ b/deps/v8/src/ia32/code-stubs-ia32.h @@ -38,7 +38,7 @@ namespace internal { // Compute a transcendental math function natively, or call the // TranscendentalCache runtime function. -class TranscendentalCacheStub: public CodeStub { +class TranscendentalCacheStub: public PlatformCodeStub { public: enum ArgumentType { TAGGED = 0, @@ -61,15 +61,17 @@ class TranscendentalCacheStub: public CodeStub { }; -class StoreBufferOverflowStub: public CodeStub { +class StoreBufferOverflowStub: public PlatformCodeStub { public: explicit StoreBufferOverflowStub(SaveFPRegsMode save_fp) - : save_doubles_(save_fp) { } + : save_doubles_(save_fp) { + ASSERT(CpuFeatures::IsSafeForSnapshot(SSE2) || save_fp == kDontSaveFPRegs); + } void Generate(MacroAssembler* masm); virtual bool IsPregenerated() { return true; } - static void GenerateFixedRegStubsAheadOfTime(); + static void GenerateFixedRegStubsAheadOfTime(Isolate* isolate); virtual bool SometimesSetsUpAFrame() { return false; } private: @@ -80,7 +82,7 @@ class StoreBufferOverflowStub: public CodeStub { }; -class UnaryOpStub: public CodeStub { +class UnaryOpStub: public PlatformCodeStub { public: UnaryOpStub(Token::Value op, UnaryOverwriteMode mode, @@ -131,9 +133,9 @@ class UnaryOpStub: public CodeStub { Label::Distance non_smi_near = Label::kFar); void GenerateSmiCodeUndo(MacroAssembler* masm); - void GenerateHeapNumberStub(MacroAssembler* masm); - void GenerateHeapNumberStubSub(MacroAssembler* masm); - void GenerateHeapNumberStubBitNot(MacroAssembler* masm); + void GenerateNumberStub(MacroAssembler* masm); + void GenerateNumberStubSub(MacroAssembler* masm); + void GenerateNumberStubBitNot(MacroAssembler* masm); void GenerateHeapNumberCodeSub(MacroAssembler* masm, Label* slow); void GenerateHeapNumberCodeBitNot(MacroAssembler* masm, Label* slow); @@ -154,96 +156,6 @@ class UnaryOpStub: public CodeStub { }; -class BinaryOpStub: public CodeStub { - public: - BinaryOpStub(Token::Value op, OverwriteMode mode) - : op_(op), - mode_(mode), - operands_type_(BinaryOpIC::UNINITIALIZED), - result_type_(BinaryOpIC::UNINITIALIZED) { - use_sse3_ = CpuFeatures::IsSupported(SSE3); - ASSERT(OpBits::is_valid(Token::NUM_TOKENS)); - } - - BinaryOpStub( - int key, - BinaryOpIC::TypeInfo operands_type, - BinaryOpIC::TypeInfo result_type = BinaryOpIC::UNINITIALIZED) - : op_(OpBits::decode(key)), - mode_(ModeBits::decode(key)), - use_sse3_(SSE3Bits::decode(key)), - operands_type_(operands_type), - result_type_(result_type) { } - - private: - enum SmiCodeGenerateHeapNumberResults { - ALLOW_HEAPNUMBER_RESULTS, - NO_HEAPNUMBER_RESULTS - }; - - Token::Value op_; - OverwriteMode mode_; - bool use_sse3_; - - // Operand type information determined at runtime. - BinaryOpIC::TypeInfo operands_type_; - BinaryOpIC::TypeInfo result_type_; - - virtual void PrintName(StringStream* stream); - - // Minor key encoding in 16 bits RRRTTTSOOOOOOOMM. - class ModeBits: public BitField<OverwriteMode, 0, 2> {}; - class OpBits: public BitField<Token::Value, 2, 7> {}; - class SSE3Bits: public BitField<bool, 9, 1> {}; - class OperandTypeInfoBits: public BitField<BinaryOpIC::TypeInfo, 10, 3> {}; - class ResultTypeInfoBits: public BitField<BinaryOpIC::TypeInfo, 13, 3> {}; - - Major MajorKey() { return BinaryOp; } - int MinorKey() { - return OpBits::encode(op_) - | ModeBits::encode(mode_) - | SSE3Bits::encode(use_sse3_) - | OperandTypeInfoBits::encode(operands_type_) - | ResultTypeInfoBits::encode(result_type_); - } - - void Generate(MacroAssembler* masm); - void GenerateGeneric(MacroAssembler* masm); - void GenerateSmiCode(MacroAssembler* masm, - Label* slow, - SmiCodeGenerateHeapNumberResults heapnumber_results); - void GenerateLoadArguments(MacroAssembler* masm); - void GenerateReturn(MacroAssembler* masm); - void GenerateUninitializedStub(MacroAssembler* masm); - void GenerateSmiStub(MacroAssembler* masm); - void GenerateInt32Stub(MacroAssembler* masm); - void GenerateHeapNumberStub(MacroAssembler* masm); - void GenerateOddballStub(MacroAssembler* masm); - void GenerateStringStub(MacroAssembler* masm); - void GenerateBothStringStub(MacroAssembler* masm); - void GenerateGenericStub(MacroAssembler* masm); - void GenerateAddStrings(MacroAssembler* masm); - - void GenerateHeapResultAllocation(MacroAssembler* masm, Label* alloc_failure); - void GenerateRegisterArgsPush(MacroAssembler* masm); - void GenerateTypeTransition(MacroAssembler* masm); - void GenerateTypeTransitionWithSavedArgs(MacroAssembler* masm); - - virtual int GetCodeKind() { return Code::BINARY_OP_IC; } - - virtual InlineCacheState GetICState() { - return BinaryOpIC::ToState(operands_type_); - } - - virtual void FinishCode(Handle<Code> code) { - code->set_binary_op_type(operands_type_); - code->set_binary_op_result_type(result_type_); - } - - friend class CodeGenerator; -}; - - class StringHelper : public AllStatic { public: // Generate code for copying characters using a simple loop. This should only @@ -267,15 +179,15 @@ class StringHelper : public AllStatic { Register scratch, // Neither of above. bool ascii); - // Probe the symbol table for a two character string. If the string + // Probe the string table for a two character string. If the string // requires non-standard hashing a jump to the label not_probed is // performed and registers c1 and c2 are preserved. In all other // cases they are clobbered. If the string is not found by probing a // jump to the label not_found is performed. This jump does not - // guarantee that the string is not in the symbol table. If the + // guarantee that the string is not in the string table. If the // string is found the code falls through with the string in // register eax. - static void GenerateTwoCharacterSymbolTableProbe(MacroAssembler* masm, + static void GenerateTwoCharacterStringTableProbe(MacroAssembler* masm, Register c1, Register c2, Register scratch1, @@ -315,7 +227,7 @@ enum StringAddFlags { }; -class StringAddStub: public CodeStub { +class StringAddStub: public PlatformCodeStub { public: explicit StringAddStub(StringAddFlags flags) : flags_(flags) {} @@ -337,7 +249,7 @@ class StringAddStub: public CodeStub { }; -class SubStringStub: public CodeStub { +class SubStringStub: public PlatformCodeStub { public: SubStringStub() {} @@ -349,7 +261,7 @@ class SubStringStub: public CodeStub { }; -class StringCompareStub: public CodeStub { +class StringCompareStub: public PlatformCodeStub { public: StringCompareStub() { } @@ -385,7 +297,7 @@ class StringCompareStub: public CodeStub { }; -class NumberToStringStub: public CodeStub { +class NumberToStringStub: public PlatformCodeStub { public: NumberToStringStub() { } @@ -410,14 +322,14 @@ class NumberToStringStub: public CodeStub { }; -class StringDictionaryLookupStub: public CodeStub { +class NameDictionaryLookupStub: public PlatformCodeStub { public: enum LookupMode { POSITIVE_LOOKUP, NEGATIVE_LOOKUP }; - StringDictionaryLookupStub(Register dictionary, - Register result, - Register index, - LookupMode mode) + NameDictionaryLookupStub(Register dictionary, + Register result, + Register index, + LookupMode mode) : dictionary_(dictionary), result_(result), index_(index), mode_(mode) { } void Generate(MacroAssembler* masm); @@ -426,7 +338,7 @@ class StringDictionaryLookupStub: public CodeStub { Label* miss, Label* done, Register properties, - Handle<String> name, + Handle<Name> name, Register r0); static void GeneratePositiveLookup(MacroAssembler* masm, @@ -444,14 +356,14 @@ class StringDictionaryLookupStub: public CodeStub { static const int kTotalProbes = 20; static const int kCapacityOffset = - StringDictionary::kHeaderSize + - StringDictionary::kCapacityIndex * kPointerSize; + NameDictionary::kHeaderSize + + NameDictionary::kCapacityIndex * kPointerSize; static const int kElementsStartOffset = - StringDictionary::kHeaderSize + - StringDictionary::kElementsStartIndex * kPointerSize; + NameDictionary::kHeaderSize + + NameDictionary::kElementsStartIndex * kPointerSize; - Major MajorKey() { return StringDictionaryLookup; } + Major MajorKey() { return NameDictionaryLookup; } int MinorKey() { return DictionaryBits::encode(dictionary_.code()) | @@ -472,7 +384,7 @@ class StringDictionaryLookupStub: public CodeStub { }; -class RecordWriteStub: public CodeStub { +class RecordWriteStub: public PlatformCodeStub { public: RecordWriteStub(Register object, Register value, @@ -487,6 +399,7 @@ class RecordWriteStub: public CodeStub { regs_(object, // An input reg. address, // An input reg. value) { // One scratch reg. + ASSERT(CpuFeatures::IsSafeForSnapshot(SSE2) || fp_mode == kDontSaveFPRegs); } enum Mode { @@ -496,7 +409,7 @@ class RecordWriteStub: public CodeStub { }; virtual bool IsPregenerated(); - static void GenerateFixedRegStubsAheadOfTime(); + static void GenerateFixedRegStubsAheadOfTime(Isolate* isolate); virtual bool SometimesSetsUpAFrame() { return false; } static const byte kTwoByteNopInstruction = 0x3c; // Cmpb al, #imm8. @@ -630,7 +543,7 @@ class RecordWriteStub: public CodeStub { if (!scratch0_.is(eax) && !scratch1_.is(eax)) masm->push(eax); if (!scratch0_.is(edx) && !scratch1_.is(edx)) masm->push(edx); if (mode == kSaveFPRegs) { - CpuFeatures::Scope scope(SSE2); + CpuFeatureScope scope(masm, SSE2); masm->sub(esp, Immediate(kDoubleSize * (XMMRegister::kNumRegisters - 1))); // Save all XMM registers except XMM0. @@ -644,7 +557,7 @@ class RecordWriteStub: public CodeStub { inline void RestoreCallerSaveRegisters(MacroAssembler*masm, SaveFPRegsMode mode) { if (mode == kSaveFPRegs) { - CpuFeatures::Scope scope(SSE2); + CpuFeatureScope scope(masm, SSE2); // Restore all XMM registers except XMM0. for (int i = XMMRegister::kNumRegisters - 1; i > 0; i--) { XMMRegister reg = XMMRegister::from_code(i); @@ -675,7 +588,7 @@ class RecordWriteStub: public CodeStub { Register GetRegThatIsNotEcxOr(Register r1, Register r2, Register r3) { - for (int i = 0; i < Register::kNumAllocatableRegisters; i++) { + for (int i = 0; i < Register::NumAllocatableRegisters(); i++) { Register candidate = Register::FromAllocationIndex(i); if (candidate.is(ecx)) continue; if (candidate.is(r1)) continue; diff --git a/deps/v8/src/ia32/codegen-ia32.cc b/deps/v8/src/ia32/codegen-ia32.cc index eb6868729b..b3fce81a32 100644 --- a/deps/v8/src/ia32/codegen-ia32.cc +++ b/deps/v8/src/ia32/codegen-ia32.cc @@ -94,7 +94,45 @@ UnaryMathFunction CreateTranscendentalFunction(TranscendentalCache::Type type) { CodeDesc desc; masm.GetCode(&desc); - ASSERT(desc.reloc_size == 0); + ASSERT(!RelocInfo::RequiresRelocation(desc)); + + CPU::FlushICache(buffer, actual_size); + OS::ProtectCode(buffer, actual_size); + return FUNCTION_CAST<UnaryMathFunction>(buffer); +} + + +UnaryMathFunction CreateExpFunction() { + if (!CpuFeatures::IsSupported(SSE2)) return &exp; + if (!FLAG_fast_math) return &exp; + size_t actual_size; + byte* buffer = static_cast<byte*>(OS::Allocate(1 * KB, &actual_size, true)); + if (buffer == NULL) return &exp; + ExternalReference::InitializeMathExpData(); + + MacroAssembler masm(NULL, buffer, static_cast<int>(actual_size)); + // esp[1 * kPointerSize]: raw double input + // esp[0 * kPointerSize]: return address + { + CpuFeatureScope use_sse2(&masm, SSE2); + XMMRegister input = xmm1; + XMMRegister result = xmm2; + __ movdbl(input, Operand(esp, 1 * kPointerSize)); + __ push(eax); + __ push(ebx); + + MathExpGenerator::EmitMathExp(&masm, input, result, xmm0, eax, ebx); + + __ pop(ebx); + __ pop(eax); + __ movdbl(Operand(esp, 1 * kPointerSize), result); + __ fld_d(Operand(esp, 1 * kPointerSize)); + __ Ret(); + } + + CodeDesc desc; + masm.GetCode(&desc); + ASSERT(!RelocInfo::RequiresRelocation(desc)); CPU::FlushICache(buffer, actual_size); OS::ProtectCode(buffer, actual_size); @@ -116,7 +154,7 @@ UnaryMathFunction CreateSqrtFunction() { // esp[0 * kPointerSize]: return address // Move double input into registers. { - CpuFeatures::Scope use_sse2(SSE2); + CpuFeatureScope use_sse2(&masm, SSE2); __ movdbl(xmm0, Operand(esp, 1 * kPointerSize)); __ sqrtsd(xmm0, xmm0); __ movdbl(Operand(esp, 1 * kPointerSize), xmm0); @@ -127,7 +165,7 @@ UnaryMathFunction CreateSqrtFunction() { CodeDesc desc; masm.GetCode(&desc); - ASSERT(desc.reloc_size == 0); + ASSERT(!RelocInfo::RequiresRelocation(desc)); CPU::FlushICache(buffer, actual_size); OS::ProtectCode(buffer, actual_size); @@ -176,7 +214,7 @@ OS::MemCopyFunction CreateMemCopyFunction() { __ bind(&ok); } if (CpuFeatures::IsSupported(SSE2)) { - CpuFeatures::Scope enable(SSE2); + CpuFeatureScope enable(&masm, SSE2); __ push(edi); __ push(esi); stack_offset += 2 * kPointerSize; @@ -337,7 +375,7 @@ OS::MemCopyFunction CreateMemCopyFunction() { CodeDesc desc; masm.GetCode(&desc); - ASSERT(desc.reloc_size == 0); + ASSERT(!RelocInfo::RequiresRelocation(desc)); CPU::FlushICache(buffer, actual_size); OS::ProtectCode(buffer, actual_size); @@ -351,8 +389,10 @@ OS::MemCopyFunction CreateMemCopyFunction() { #define __ ACCESS_MASM(masm) + void ElementsTransitionGenerator::GenerateMapChangeElementsTransition( - MacroAssembler* masm) { + MacroAssembler* masm, AllocationSiteMode mode, + Label* allocation_site_info_found) { // ----------- S t a t e ------------- // -- eax : value // -- ebx : target map @@ -360,6 +400,12 @@ void ElementsTransitionGenerator::GenerateMapChangeElementsTransition( // -- edx : receiver // -- esp[0] : return address // ----------------------------------- + if (mode == TRACK_ALLOCATION_SITE) { + ASSERT(allocation_site_info_found != NULL); + __ TestJSArrayForAllocationSiteInfo(edx, edi); + __ j(equal, allocation_site_info_found); + } + // Set transitioned map. __ mov(FieldOperand(edx, HeapObject::kMapOffset), ebx); __ RecordWriteField(edx, @@ -373,7 +419,7 @@ void ElementsTransitionGenerator::GenerateMapChangeElementsTransition( void ElementsTransitionGenerator::GenerateSmiToDouble( - MacroAssembler* masm, Label* fail) { + MacroAssembler* masm, AllocationSiteMode mode, Label* fail) { // ----------- S t a t e ------------- // -- eax : value // -- ebx : target map @@ -383,6 +429,11 @@ void ElementsTransitionGenerator::GenerateSmiToDouble( // ----------------------------------- Label loop, entry, convert_hole, gc_required, only_change_map; + if (mode == TRACK_ALLOCATION_SITE) { + __ TestJSArrayForAllocationSiteInfo(edx, edi); + __ j(equal, fail); + } + // Check for empty arrays, which only require a map transition and no changes // to the backing store. __ mov(edi, FieldOperand(edx, JSObject::kElementsOffset)); @@ -397,24 +448,11 @@ void ElementsTransitionGenerator::GenerateSmiToDouble( // Allocate new FixedDoubleArray. // edx: receiver // edi: length of source FixedArray (smi-tagged) - __ lea(esi, Operand(edi, - times_4, - FixedDoubleArray::kHeaderSize + kPointerSize)); - __ AllocateInNewSpace(esi, eax, ebx, no_reg, &gc_required, TAG_OBJECT); - - Label aligned, aligned_done; - __ test(eax, Immediate(kDoubleAlignmentMask - kHeapObjectTag)); - __ j(zero, &aligned, Label::kNear); - __ mov(FieldOperand(eax, 0), - Immediate(masm->isolate()->factory()->one_pointer_filler_map())); - __ add(eax, Immediate(kPointerSize)); - __ jmp(&aligned_done); - - __ bind(&aligned); - __ mov(Operand(eax, esi, times_1, -kPointerSize-1), - Immediate(masm->isolate()->factory()->one_pointer_filler_map())); - - __ bind(&aligned_done); + AllocationFlags flags = + static_cast<AllocationFlags>(TAG_OBJECT | DOUBLE_ALIGNMENT); + __ AllocateInNewSpace(FixedDoubleArray::kHeaderSize, times_8, + edi, REGISTER_VALUE_IS_SMI, + eax, ebx, no_reg, &gc_required, flags); // eax: destination FixedDoubleArray // edi: number of elements @@ -441,7 +479,7 @@ void ElementsTransitionGenerator::GenerateSmiToDouble( ExternalReference::address_of_the_hole_nan(); XMMRegister the_hole_nan = xmm1; if (CpuFeatures::IsSupported(SSE2)) { - CpuFeatures::Scope use_sse2(SSE2); + CpuFeatureScope use_sse2(masm, SSE2); __ movdbl(the_hole_nan, Operand::StaticVariable(canonical_the_hole_nan_reference)); } @@ -466,7 +504,7 @@ void ElementsTransitionGenerator::GenerateSmiToDouble( // Normal smi, convert it to double and store. __ SmiUntag(ebx); if (CpuFeatures::IsSupported(SSE2)) { - CpuFeatures::Scope fscope(SSE2); + CpuFeatureScope fscope(masm, SSE2); __ cvtsi2sd(xmm0, ebx); __ movdbl(FieldOperand(eax, edi, times_4, FixedDoubleArray::kHeaderSize), xmm0); @@ -487,7 +525,7 @@ void ElementsTransitionGenerator::GenerateSmiToDouble( } if (CpuFeatures::IsSupported(SSE2)) { - CpuFeatures::Scope use_sse2(SSE2); + CpuFeatureScope use_sse2(masm, SSE2); __ movdbl(FieldOperand(eax, edi, times_4, FixedDoubleArray::kHeaderSize), the_hole_nan); } else { @@ -521,7 +559,7 @@ void ElementsTransitionGenerator::GenerateSmiToDouble( void ElementsTransitionGenerator::GenerateDoubleToObject( - MacroAssembler* masm, Label* fail) { + MacroAssembler* masm, AllocationSiteMode mode, Label* fail) { // ----------- S t a t e ------------- // -- eax : value // -- ebx : target map @@ -531,6 +569,11 @@ void ElementsTransitionGenerator::GenerateDoubleToObject( // ----------------------------------- Label loop, entry, convert_hole, gc_required, only_change_map, success; + if (mode == TRACK_ALLOCATION_SITE) { + __ TestJSArrayForAllocationSiteInfo(edx, edi); + __ j(equal, fail); + } + // Check for empty arrays, which only require a map transition and no changes // to the backing store. __ mov(edi, FieldOperand(edx, JSObject::kElementsOffset)); @@ -592,7 +635,7 @@ void ElementsTransitionGenerator::GenerateDoubleToObject( __ AllocateHeapNumber(edx, esi, no_reg, &gc_required); // edx: new heap number if (CpuFeatures::IsSupported(SSE2)) { - CpuFeatures::Scope fscope(SSE2); + CpuFeatureScope fscope(masm, SSE2); __ movdbl(xmm0, FieldOperand(edi, ebx, times_4, FixedDoubleArray::kHeaderSize)); __ movdbl(FieldOperand(edx, HeapNumber::kValueOffset), xmm0); @@ -732,7 +775,7 @@ void StringCharLoadGenerator::Generate(MacroAssembler* masm, // Dispatch on the encoding: ASCII or two-byte. Label ascii; __ bind(&seq_string); - STATIC_ASSERT((kStringEncodingMask & kAsciiStringTag) != 0); + STATIC_ASSERT((kStringEncodingMask & kOneByteStringTag) != 0); STATIC_ASSERT((kStringEncodingMask & kTwoByteStringTag) == 0); __ test(result, Immediate(kStringEncodingMask)); __ j(not_zero, &ascii, Label::kNear); @@ -751,12 +794,174 @@ void StringCharLoadGenerator::Generate(MacroAssembler* masm, __ movzx_b(result, FieldOperand(string, index, times_1, - SeqAsciiString::kHeaderSize)); + SeqOneByteString::kHeaderSize)); + __ bind(&done); +} + + +void SeqStringSetCharGenerator::Generate(MacroAssembler* masm, + String::Encoding encoding, + Register string, + Register index, + Register value) { + if (FLAG_debug_code) { + __ test(index, Immediate(kSmiTagMask)); + __ Check(zero, "Non-smi index"); + __ test(value, Immediate(kSmiTagMask)); + __ Check(zero, "Non-smi value"); + + __ cmp(index, FieldOperand(string, String::kLengthOffset)); + __ Check(less, "Index is too large"); + + __ cmp(index, Immediate(Smi::FromInt(0))); + __ Check(greater_equal, "Index is negative"); + + __ push(value); + __ mov(value, FieldOperand(string, HeapObject::kMapOffset)); + __ movzx_b(value, FieldOperand(value, Map::kInstanceTypeOffset)); + + __ and_(value, Immediate(kStringRepresentationMask | kStringEncodingMask)); + static const uint32_t one_byte_seq_type = kSeqStringTag | kOneByteStringTag; + static const uint32_t two_byte_seq_type = kSeqStringTag | kTwoByteStringTag; + __ cmp(value, Immediate(encoding == String::ONE_BYTE_ENCODING + ? one_byte_seq_type : two_byte_seq_type)); + __ Check(equal, "Unexpected string type"); + __ pop(value); + } + + __ SmiUntag(value); + STATIC_ASSERT(kSmiTagSize == 1 && kSmiTag == 0); + if (encoding == String::ONE_BYTE_ENCODING) { + __ SmiUntag(index); + __ mov_b(FieldOperand(string, index, times_1, SeqString::kHeaderSize), + value); + } else { + // No need to untag a smi for two-byte addressing. + __ mov_w(FieldOperand(string, index, times_1, SeqString::kHeaderSize), + value); + } +} + + +static Operand ExpConstant(int index) { + return Operand::StaticVariable(ExternalReference::math_exp_constants(index)); +} + + +void MathExpGenerator::EmitMathExp(MacroAssembler* masm, + XMMRegister input, + XMMRegister result, + XMMRegister double_scratch, + Register temp1, + Register temp2) { + ASSERT(!input.is(double_scratch)); + ASSERT(!input.is(result)); + ASSERT(!result.is(double_scratch)); + ASSERT(!temp1.is(temp2)); + ASSERT(ExternalReference::math_exp_constants(0).address() != NULL); + + Label done; + + __ movdbl(double_scratch, ExpConstant(0)); + __ xorpd(result, result); + __ ucomisd(double_scratch, input); + __ j(above_equal, &done); + __ ucomisd(input, ExpConstant(1)); + __ movdbl(result, ExpConstant(2)); + __ j(above_equal, &done); + __ movdbl(double_scratch, ExpConstant(3)); + __ movdbl(result, ExpConstant(4)); + __ mulsd(double_scratch, input); + __ addsd(double_scratch, result); + __ movd(temp2, double_scratch); + __ subsd(double_scratch, result); + __ movdbl(result, ExpConstant(6)); + __ mulsd(double_scratch, ExpConstant(5)); + __ subsd(double_scratch, input); + __ subsd(result, double_scratch); + __ movsd(input, double_scratch); + __ mulsd(input, double_scratch); + __ mulsd(result, input); + __ mov(temp1, temp2); + __ mulsd(result, ExpConstant(7)); + __ subsd(result, double_scratch); + __ add(temp1, Immediate(0x1ff800)); + __ addsd(result, ExpConstant(8)); + __ and_(temp2, Immediate(0x7ff)); + __ shr(temp1, 11); + __ shl(temp1, 20); + __ movd(input, temp1); + __ pshufd(input, input, static_cast<uint8_t>(0xe1)); // Order: 11 10 00 01 + __ movdbl(double_scratch, Operand::StaticArray( + temp2, times_8, ExternalReference::math_exp_log_table())); + __ por(input, double_scratch); + __ mulsd(result, input); __ bind(&done); } #undef __ +static const int kNoCodeAgeSequenceLength = 5; + +static byte* GetNoCodeAgeSequence(uint32_t* length) { + static bool initialized = false; + static byte sequence[kNoCodeAgeSequenceLength]; + *length = kNoCodeAgeSequenceLength; + if (!initialized) { + // The sequence of instructions that is patched out for aging code is the + // following boilerplate stack-building prologue that is found both in + // FUNCTION and OPTIMIZED_FUNCTION code: + CodePatcher patcher(sequence, kNoCodeAgeSequenceLength); + patcher.masm()->push(ebp); + patcher.masm()->mov(ebp, esp); + patcher.masm()->push(esi); + patcher.masm()->push(edi); + initialized = true; + } + return sequence; +} + + +bool Code::IsYoungSequence(byte* sequence) { + uint32_t young_length; + byte* young_sequence = GetNoCodeAgeSequence(&young_length); + bool result = (!memcmp(sequence, young_sequence, young_length)); + ASSERT(result || *sequence == kCallOpcode); + return result; +} + + +void Code::GetCodeAgeAndParity(byte* sequence, Age* age, + MarkingParity* parity) { + if (IsYoungSequence(sequence)) { + *age = kNoAge; + *parity = NO_MARKING_PARITY; + } else { + sequence++; // Skip the kCallOpcode byte + Address target_address = sequence + *reinterpret_cast<int*>(sequence) + + Assembler::kCallTargetAddressOffset; + Code* stub = GetCodeFromTargetAddress(target_address); + GetCodeAgeAndParity(stub, age, parity); + } +} + + +void Code::PatchPlatformCodeAge(byte* sequence, + Code::Age age, + MarkingParity parity) { + uint32_t young_length; + byte* young_sequence = GetNoCodeAgeSequence(&young_length); + if (age == kNoAge) { + memcpy(sequence, young_sequence, young_length); + CPU::FlushICache(sequence, young_length); + } else { + Code* stub = GetCodeAgeStub(age, parity); + CodePatcher patcher(sequence, young_length); + patcher.masm()->call(stub->instruction_start(), RelocInfo::NONE32); + } +} + + } } // namespace v8::internal #endif // V8_TARGET_ARCH_IA32 diff --git a/deps/v8/src/ia32/codegen-ia32.h b/deps/v8/src/ia32/codegen-ia32.h index f4ab0b50f6..5137274145 100644 --- a/deps/v8/src/ia32/codegen-ia32.h +++ b/deps/v8/src/ia32/codegen-ia32.h @@ -88,6 +88,20 @@ class StringCharLoadGenerator : public AllStatic { DISALLOW_COPY_AND_ASSIGN(StringCharLoadGenerator); }; + +class MathExpGenerator : public AllStatic { + public: + static void EmitMathExp(MacroAssembler* masm, + XMMRegister input, + XMMRegister result, + XMMRegister double_scratch, + Register temp1, + Register temp2); + + private: + DISALLOW_COPY_AND_ASSIGN(MathExpGenerator); +}; + } } // namespace v8::internal #endif // V8_IA32_CODEGEN_IA32_H_ diff --git a/deps/v8/src/ia32/deoptimizer-ia32.cc b/deps/v8/src/ia32/deoptimizer-ia32.cc index 99ad5225bc..8cdcf9965f 100644 --- a/deps/v8/src/ia32/deoptimizer-ia32.cc +++ b/deps/v8/src/ia32/deoptimizer-ia32.cc @@ -114,17 +114,19 @@ void Deoptimizer::EnsureRelocSpaceForLazyDeoptimization(Handle<Code> code) { } -void Deoptimizer::DeoptimizeFunction(JSFunction* function) { - if (!function->IsOptimized()) return; +void Deoptimizer::DeoptimizeFunctionWithPreparedFunctionList( + JSFunction* function) { + Isolate* isolate = function->GetIsolate(); + HandleScope scope(isolate); + AssertNoAllocation no_allocation; + + ASSERT(function->IsOptimized()); + ASSERT(function->FunctionsInFunctionListShareSameCode()); // The optimized code is going to be patched, so we cannot use it // any more. Play safe and reset the whole cache. function->shared()->ClearOptimizedCodeMap(); - Isolate* isolate = function->GetIsolate(); - HandleScope scope(isolate); - AssertNoAllocation no_allocation; - // Get the optimized code. Code* code = function->code(); Address code_start_address = code->instruction_start(); @@ -155,8 +157,8 @@ void Deoptimizer::DeoptimizeFunction(JSFunction* function) { // Patch lazy deoptimization entry. Address call_address = code_start_address + deopt_data->Pc(i)->value(); CodePatcher patcher(call_address, patch_size()); - Address deopt_entry = GetDeoptimizationEntry(i, LAZY); - patcher.masm()->call(deopt_entry, RelocInfo::NONE); + Address deopt_entry = GetDeoptimizationEntry(isolate, i, LAZY); + patcher.masm()->call(deopt_entry, RelocInfo::NONE32); // We use RUNTIME_ENTRY for deoptimization bailouts. RelocInfo rinfo(call_address + 1, // 1 after the call opcode. RelocInfo::RUNTIME_ENTRY, @@ -210,8 +212,6 @@ void Deoptimizer::DeoptimizeFunction(JSFunction* function) { static const byte kJnsInstruction = 0x79; static const byte kJnsOffset = 0x13; -static const byte kJaeInstruction = 0x73; -static const byte kJaeOffset = 0x07; static const byte kCallInstruction = 0xe8; static const byte kNopByteOne = 0x66; static const byte kNopByteTwo = 0x90; @@ -224,31 +224,26 @@ void Deoptimizer::PatchStackCheckCodeAt(Code* unoptimized_code, Address call_target_address = pc_after - kIntSize; ASSERT_EQ(check_code->entry(), Assembler::target_address_at(call_target_address)); - // The stack check code matches the pattern: + // The back edge bookkeeping code matches the pattern: // - // cmp esp, <limit> - // jae ok + // sub <profiling_counter>, <delta> + // jns ok // call <stack guard> // test eax, <loop nesting depth> // ok: ... // // We will patch away the branch so the code is: // - // cmp esp, <limit> ;; Not changed + // sub <profiling_counter>, <delta> ;; Not changed // nop // nop // call <on-stack replacment> // test eax, <loop nesting depth> // ok: - if (FLAG_count_based_interrupts) { - ASSERT_EQ(kJnsInstruction, *(call_target_address - 3)); - ASSERT_EQ(kJnsOffset, *(call_target_address - 2)); - } else { - ASSERT_EQ(kJaeInstruction, *(call_target_address - 3)); - ASSERT_EQ(kJaeOffset, *(call_target_address - 2)); - } - ASSERT_EQ(kCallInstruction, *(call_target_address - 1)); + ASSERT_EQ(kJnsInstruction, *(call_target_address - 3)); + ASSERT_EQ(kJnsOffset, *(call_target_address - 2)); + ASSERT_EQ(kCallInstruction, *(call_target_address - 1)); *(call_target_address - 3) = kNopByteOne; *(call_target_address - 2) = kNopByteTwo; Assembler::set_target_address_at(call_target_address, @@ -272,13 +267,8 @@ void Deoptimizer::RevertStackCheckCodeAt(Code* unoptimized_code, ASSERT_EQ(kNopByteOne, *(call_target_address - 3)); ASSERT_EQ(kNopByteTwo, *(call_target_address - 2)); ASSERT_EQ(kCallInstruction, *(call_target_address - 1)); - if (FLAG_count_based_interrupts) { - *(call_target_address - 3) = kJnsInstruction; - *(call_target_address - 2) = kJnsOffset; - } else { - *(call_target_address - 3) = kJaeInstruction; - *(call_target_address - 2) = kJaeOffset; - } + *(call_target_address - 3) = kJnsInstruction; + *(call_target_address - 2) = kJnsOffset; Assembler::set_target_address_at(call_target_address, check_code->entry()); @@ -307,7 +297,7 @@ static int LookupBailoutId(DeoptimizationInputData* data, BailoutId ast_id) { void Deoptimizer::DoComputeOsrOutputFrame() { DeoptimizationInputData* data = DeoptimizationInputData::cast( - optimized_code_->deoptimization_data()); + compiled_code_->deoptimization_data()); unsigned ast_id = data->OsrAstId()->value(); // TODO(kasperl): This should not be the bailout_id_. It should be // the ast id. Confusing. @@ -344,7 +334,7 @@ void Deoptimizer::DoComputeOsrOutputFrame() { unsigned input_frame_size = input_->GetFrameSize(); ASSERT(fixed_size + height_in_bytes == input_frame_size); - unsigned stack_slot_size = optimized_code_->stack_slots() * kPointerSize; + unsigned stack_slot_size = compiled_code_->stack_slots() * kPointerSize; unsigned outgoing_height = data->ArgumentsStackHeight(bailout_id)->value(); unsigned outgoing_size = outgoing_height * kPointerSize; unsigned output_frame_size = fixed_size + stack_slot_size + outgoing_size; @@ -455,7 +445,7 @@ void Deoptimizer::DoComputeOsrOutputFrame() { unsigned pc_offset = data->OsrPcOffset()->value(); uint32_t pc = reinterpret_cast<uint32_t>( - optimized_code_->entry() + pc_offset); + compiled_code_->entry() + pc_offset); output_[0]->SetPc(pc); } Code* continuation = @@ -473,334 +463,6 @@ void Deoptimizer::DoComputeOsrOutputFrame() { } -void Deoptimizer::DoComputeArgumentsAdaptorFrame(TranslationIterator* iterator, - int frame_index) { - JSFunction* function = JSFunction::cast(ComputeLiteral(iterator->Next())); - unsigned height = iterator->Next(); - unsigned height_in_bytes = height * kPointerSize; - if (FLAG_trace_deopt) { - PrintF(" translating arguments adaptor => height=%d\n", height_in_bytes); - } - - unsigned fixed_frame_size = ArgumentsAdaptorFrameConstants::kFrameSize; - unsigned output_frame_size = height_in_bytes + fixed_frame_size; - - // Allocate and store the output frame description. - FrameDescription* output_frame = - new(output_frame_size) FrameDescription(output_frame_size, function); - output_frame->SetFrameType(StackFrame::ARGUMENTS_ADAPTOR); - - // Arguments adaptor can not be topmost or bottommost. - ASSERT(frame_index > 0 && frame_index < output_count_ - 1); - ASSERT(output_[frame_index] == NULL); - output_[frame_index] = output_frame; - - // The top address of the frame is computed from the previous - // frame's top and this frame's size. - uint32_t top_address; - top_address = output_[frame_index - 1]->GetTop() - output_frame_size; - output_frame->SetTop(top_address); - - // Compute the incoming parameter translation. - int parameter_count = height; - unsigned output_offset = output_frame_size; - for (int i = 0; i < parameter_count; ++i) { - output_offset -= kPointerSize; - DoTranslateCommand(iterator, frame_index, output_offset); - } - - // Read caller's PC from the previous frame. - output_offset -= kPointerSize; - intptr_t callers_pc = output_[frame_index - 1]->GetPc(); - output_frame->SetFrameSlot(output_offset, callers_pc); - if (FLAG_trace_deopt) { - PrintF(" 0x%08x: [top + %d] <- 0x%08x ; caller's pc\n", - top_address + output_offset, output_offset, callers_pc); - } - - // Read caller's FP from the previous frame, and set this frame's FP. - output_offset -= kPointerSize; - intptr_t value = output_[frame_index - 1]->GetFp(); - output_frame->SetFrameSlot(output_offset, value); - intptr_t fp_value = top_address + output_offset; - output_frame->SetFp(fp_value); - if (FLAG_trace_deopt) { - PrintF(" 0x%08x: [top + %d] <- 0x%08x ; caller's fp\n", - fp_value, output_offset, value); - } - - // A marker value is used in place of the context. - output_offset -= kPointerSize; - intptr_t context = reinterpret_cast<intptr_t>( - Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR)); - output_frame->SetFrameSlot(output_offset, context); - if (FLAG_trace_deopt) { - PrintF(" 0x%08x: [top + %d] <- 0x%08x ; context (adaptor sentinel)\n", - top_address + output_offset, output_offset, context); - } - - // The function was mentioned explicitly in the ARGUMENTS_ADAPTOR_FRAME. - output_offset -= kPointerSize; - value = reinterpret_cast<intptr_t>(function); - output_frame->SetFrameSlot(output_offset, value); - if (FLAG_trace_deopt) { - PrintF(" 0x%08x: [top + %d] <- 0x%08x ; function\n", - top_address + output_offset, output_offset, value); - } - - // Number of incoming arguments. - output_offset -= kPointerSize; - value = reinterpret_cast<uint32_t>(Smi::FromInt(height - 1)); - output_frame->SetFrameSlot(output_offset, value); - if (FLAG_trace_deopt) { - PrintF(" 0x%08x: [top + %d] <- 0x%08x ; argc (%d)\n", - top_address + output_offset, output_offset, value, height - 1); - } - - ASSERT(0 == output_offset); - - Builtins* builtins = isolate_->builtins(); - Code* adaptor_trampoline = - builtins->builtin(Builtins::kArgumentsAdaptorTrampoline); - uint32_t pc = reinterpret_cast<uint32_t>( - adaptor_trampoline->instruction_start() + - isolate_->heap()->arguments_adaptor_deopt_pc_offset()->value()); - output_frame->SetPc(pc); -} - - -void Deoptimizer::DoComputeConstructStubFrame(TranslationIterator* iterator, - int frame_index) { - Builtins* builtins = isolate_->builtins(); - Code* construct_stub = builtins->builtin(Builtins::kJSConstructStubGeneric); - JSFunction* function = JSFunction::cast(ComputeLiteral(iterator->Next())); - unsigned height = iterator->Next(); - unsigned height_in_bytes = height * kPointerSize; - if (FLAG_trace_deopt) { - PrintF(" translating construct stub => height=%d\n", height_in_bytes); - } - - unsigned fixed_frame_size = 7 * kPointerSize; - unsigned output_frame_size = height_in_bytes + fixed_frame_size; - - // Allocate and store the output frame description. - FrameDescription* output_frame = - new(output_frame_size) FrameDescription(output_frame_size, function); - output_frame->SetFrameType(StackFrame::CONSTRUCT); - - // Construct stub can not be topmost or bottommost. - ASSERT(frame_index > 0 && frame_index < output_count_ - 1); - ASSERT(output_[frame_index] == NULL); - output_[frame_index] = output_frame; - - // The top address of the frame is computed from the previous - // frame's top and this frame's size. - uint32_t top_address; - top_address = output_[frame_index - 1]->GetTop() - output_frame_size; - output_frame->SetTop(top_address); - - // Compute the incoming parameter translation. - int parameter_count = height; - unsigned output_offset = output_frame_size; - for (int i = 0; i < parameter_count; ++i) { - output_offset -= kPointerSize; - DoTranslateCommand(iterator, frame_index, output_offset); - } - - // Read caller's PC from the previous frame. - output_offset -= kPointerSize; - intptr_t callers_pc = output_[frame_index - 1]->GetPc(); - output_frame->SetFrameSlot(output_offset, callers_pc); - if (FLAG_trace_deopt) { - PrintF(" 0x%08x: [top + %d] <- 0x%08x ; caller's pc\n", - top_address + output_offset, output_offset, callers_pc); - } - - // Read caller's FP from the previous frame, and set this frame's FP. - output_offset -= kPointerSize; - intptr_t value = output_[frame_index - 1]->GetFp(); - output_frame->SetFrameSlot(output_offset, value); - intptr_t fp_value = top_address + output_offset; - output_frame->SetFp(fp_value); - if (FLAG_trace_deopt) { - PrintF(" 0x%08x: [top + %d] <- 0x%08x ; caller's fp\n", - fp_value, output_offset, value); - } - - // The context can be gotten from the previous frame. - output_offset -= kPointerSize; - value = output_[frame_index - 1]->GetContext(); - output_frame->SetFrameSlot(output_offset, value); - if (FLAG_trace_deopt) { - PrintF(" 0x%08x: [top + %d] <- 0x%08x ; context\n", - top_address + output_offset, output_offset, value); - } - - // A marker value is used in place of the function. - output_offset -= kPointerSize; - value = reinterpret_cast<intptr_t>(Smi::FromInt(StackFrame::CONSTRUCT)); - output_frame->SetFrameSlot(output_offset, value); - if (FLAG_trace_deopt) { - PrintF(" 0x%08x: [top + %d] <- 0x%08x ; function (construct sentinel)\n", - top_address + output_offset, output_offset, value); - } - - // The output frame reflects a JSConstructStubGeneric frame. - output_offset -= kPointerSize; - value = reinterpret_cast<intptr_t>(construct_stub); - output_frame->SetFrameSlot(output_offset, value); - if (FLAG_trace_deopt) { - PrintF(" 0x%08x: [top + %d] <- 0x%08x ; code object\n", - top_address + output_offset, output_offset, value); - } - - // Number of incoming arguments. - output_offset -= kPointerSize; - value = reinterpret_cast<uint32_t>(Smi::FromInt(height - 1)); - output_frame->SetFrameSlot(output_offset, value); - if (FLAG_trace_deopt) { - PrintF(" 0x%08x: [top + %d] <- 0x%08x ; argc (%d)\n", - top_address + output_offset, output_offset, value, height - 1); - } - - // The newly allocated object was passed as receiver in the artificial - // constructor stub environment created by HEnvironment::CopyForInlining(). - output_offset -= kPointerSize; - value = output_frame->GetFrameSlot(output_frame_size - kPointerSize); - output_frame->SetFrameSlot(output_offset, value); - if (FLAG_trace_deopt) { - PrintF(" 0x%08x: [top + %d] <- 0x%08x ; allocated receiver\n", - top_address + output_offset, output_offset, value); - } - - ASSERT(0 == output_offset); - - uint32_t pc = reinterpret_cast<uint32_t>( - construct_stub->instruction_start() + - isolate_->heap()->construct_stub_deopt_pc_offset()->value()); - output_frame->SetPc(pc); -} - - -void Deoptimizer::DoComputeAccessorStubFrame(TranslationIterator* iterator, - int frame_index, - bool is_setter_stub_frame) { - JSFunction* accessor = JSFunction::cast(ComputeLiteral(iterator->Next())); - // The receiver (and the implicit return value, if any) are expected in - // registers by the LoadIC/StoreIC, so they don't belong to the output stack - // frame. This means that we have to use a height of 0. - unsigned height = 0; - unsigned height_in_bytes = height * kPointerSize; - const char* kind = is_setter_stub_frame ? "setter" : "getter"; - if (FLAG_trace_deopt) { - PrintF(" translating %s stub => height=%u\n", kind, height_in_bytes); - } - - // We need 1 stack entry for the return address + 4 stack entries from - // StackFrame::INTERNAL (FP, context, frame type, code object, see - // MacroAssembler::EnterFrame). For a setter stub frame we need one additional - // entry for the implicit return value, see - // StoreStubCompiler::CompileStoreViaSetter. - unsigned fixed_frame_entries = 1 + 4 + (is_setter_stub_frame ? 1 : 0); - unsigned fixed_frame_size = fixed_frame_entries * kPointerSize; - unsigned output_frame_size = height_in_bytes + fixed_frame_size; - - // Allocate and store the output frame description. - FrameDescription* output_frame = - new(output_frame_size) FrameDescription(output_frame_size, accessor); - output_frame->SetFrameType(StackFrame::INTERNAL); - - // A frame for an accessor stub can not be the topmost or bottommost one. - ASSERT(frame_index > 0 && frame_index < output_count_ - 1); - ASSERT(output_[frame_index] == NULL); - output_[frame_index] = output_frame; - - // The top address of the frame is computed from the previous frame's top and - // this frame's size. - intptr_t top_address = output_[frame_index - 1]->GetTop() - output_frame_size; - output_frame->SetTop(top_address); - - unsigned output_offset = output_frame_size; - - // Read caller's PC from the previous frame. - output_offset -= kPointerSize; - intptr_t callers_pc = output_[frame_index - 1]->GetPc(); - output_frame->SetFrameSlot(output_offset, callers_pc); - if (FLAG_trace_deopt) { - PrintF(" 0x%08" V8PRIxPTR ": [top + %u] <- 0x%08" V8PRIxPTR - " ; caller's pc\n", - top_address + output_offset, output_offset, callers_pc); - } - - // Read caller's FP from the previous frame, and set this frame's FP. - output_offset -= kPointerSize; - intptr_t value = output_[frame_index - 1]->GetFp(); - output_frame->SetFrameSlot(output_offset, value); - intptr_t fp_value = top_address + output_offset; - output_frame->SetFp(fp_value); - if (FLAG_trace_deopt) { - PrintF(" 0x%08" V8PRIxPTR ": [top + %u] <- 0x%08" V8PRIxPTR - " ; caller's fp\n", - fp_value, output_offset, value); - } - - // The context can be gotten from the previous frame. - output_offset -= kPointerSize; - value = output_[frame_index - 1]->GetContext(); - output_frame->SetFrameSlot(output_offset, value); - if (FLAG_trace_deopt) { - PrintF(" 0x%08" V8PRIxPTR ": [top + %u] <- 0x%08" V8PRIxPTR - " ; context\n", - top_address + output_offset, output_offset, value); - } - - // A marker value is used in place of the function. - output_offset -= kPointerSize; - value = reinterpret_cast<intptr_t>(Smi::FromInt(StackFrame::INTERNAL)); - output_frame->SetFrameSlot(output_offset, value); - if (FLAG_trace_deopt) { - PrintF(" 0x%08" V8PRIxPTR ": [top + %u] <- 0x%08" V8PRIxPTR - " ; function (%s sentinel)\n", - top_address + output_offset, output_offset, value, kind); - } - - // Get Code object from accessor stub. - output_offset -= kPointerSize; - Builtins::Name name = is_setter_stub_frame ? - Builtins::kStoreIC_Setter_ForDeopt : - Builtins::kLoadIC_Getter_ForDeopt; - Code* accessor_stub = isolate_->builtins()->builtin(name); - value = reinterpret_cast<intptr_t>(accessor_stub); - output_frame->SetFrameSlot(output_offset, value); - if (FLAG_trace_deopt) { - PrintF(" 0x%08" V8PRIxPTR ": [top + %u] <- 0x%08" V8PRIxPTR - " ; code object\n", - top_address + output_offset, output_offset, value); - } - - // Skip receiver. - Translation::Opcode opcode = - static_cast<Translation::Opcode>(iterator->Next()); - iterator->Skip(Translation::NumberOfOperandsFor(opcode)); - - if (is_setter_stub_frame) { - // The implicit return value was part of the artificial setter stub - // environment. - output_offset -= kPointerSize; - DoTranslateCommand(iterator, frame_index, output_offset); - } - - ASSERT(0 == output_offset); - - Smi* offset = is_setter_stub_frame ? - isolate_->heap()->setter_stub_deopt_pc_offset() : - isolate_->heap()->getter_stub_deopt_pc_offset(); - intptr_t pc = reinterpret_cast<intptr_t>( - accessor_stub->instruction_start() + offset->value()); - output_frame->SetPc(pc); -} - - void Deoptimizer::DoComputeJSFrame(TranslationIterator* iterator, int frame_index) { BailoutId node_id = BailoutId(iterator->Next()); @@ -815,7 +477,7 @@ void Deoptimizer::DoComputeJSFrame(TranslationIterator* iterator, } unsigned height = iterator->Next(); unsigned height_in_bytes = height * kPointerSize; - if (FLAG_trace_deopt) { + if (trace_) { PrintF(" translating "); function->PrintName(); PrintF(" => node=%d, height=%d\n", node_id.ToInt(), height_in_bytes); @@ -893,7 +555,7 @@ void Deoptimizer::DoComputeJSFrame(TranslationIterator* iterator, value = output_[frame_index - 1]->GetPc(); } output_frame->SetFrameSlot(output_offset, value); - if (FLAG_trace_deopt) { + if (trace_) { PrintF(" 0x%08x: [top + %d] <- 0x%08x ; caller's pc\n", top_address + output_offset, output_offset, value); } @@ -916,7 +578,7 @@ void Deoptimizer::DoComputeJSFrame(TranslationIterator* iterator, fp_value); output_frame->SetFp(fp_value); if (is_topmost) output_frame->SetRegister(ebp.code(), fp_value); - if (FLAG_trace_deopt) { + if (trace_) { PrintF(" 0x%08x: [top + %d] <- 0x%08x ; caller's fp\n", fp_value, output_offset, value); } @@ -936,7 +598,7 @@ void Deoptimizer::DoComputeJSFrame(TranslationIterator* iterator, output_frame->SetFrameSlot(output_offset, value); output_frame->SetContext(value); if (is_topmost) output_frame->SetRegister(esi.code(), value); - if (FLAG_trace_deopt) { + if (trace_) { PrintF(" 0x%08x: [top + %d] <- 0x%08x ; context\n", top_address + output_offset, output_offset, value); } @@ -949,7 +611,7 @@ void Deoptimizer::DoComputeJSFrame(TranslationIterator* iterator, // input frame. ASSERT(!is_bottommost || input_->GetFrameSlot(input_offset) == value); output_frame->SetFrameSlot(output_offset, value); - if (FLAG_trace_deopt) { + if (trace_) { PrintF(" 0x%08x: [top + %d] <- 0x%08x ; function\n", top_address + output_offset, output_offset, value); } @@ -997,7 +659,7 @@ void Deoptimizer::FillInputFrame(Address tos, JavaScriptFrame* frame) { } input_->SetRegister(esp.code(), reinterpret_cast<intptr_t>(frame->sp())); input_->SetRegister(ebp.code(), reinterpret_cast<intptr_t>(frame->fp())); - for (int i = 0; i < DoubleRegister::kNumAllocatableRegisters; i++) { + for (int i = 0; i < DoubleRegister::NumAllocatableRegisters(); i++) { input_->SetDoubleRegister(i, 0.0); } @@ -1008,11 +670,31 @@ void Deoptimizer::FillInputFrame(Address tos, JavaScriptFrame* frame) { } +void Deoptimizer::SetPlatformCompiledStubRegisters( + FrameDescription* output_frame, CodeStubInterfaceDescriptor* descriptor) { + intptr_t handler = + reinterpret_cast<intptr_t>(descriptor->deoptimization_handler_); + int params = descriptor->register_param_count_; + if (descriptor->stack_parameter_count_ != NULL) { + params++; + } + output_frame->SetRegister(eax.code(), params); + output_frame->SetRegister(ebx.code(), handler); +} + + +void Deoptimizer::CopyDoubleRegisters(FrameDescription* output_frame) { + for (int i = 0; i < XMMRegister::kNumAllocatableRegisters; ++i) { + double double_value = input_->GetDoubleRegister(i); + output_frame->SetDoubleRegister(i, double_value); + } +} + + #define __ masm()-> void Deoptimizer::EntryGenerator::Generate() { GeneratePrologue(); - CpuFeatures::Scope scope(SSE2); Isolate* isolate = masm()->isolate(); @@ -1022,10 +704,13 @@ void Deoptimizer::EntryGenerator::Generate() { const int kDoubleRegsSize = kDoubleSize * XMMRegister::kNumAllocatableRegisters; __ sub(esp, Immediate(kDoubleRegsSize)); - for (int i = 0; i < XMMRegister::kNumAllocatableRegisters; ++i) { - XMMRegister xmm_reg = XMMRegister::FromAllocationIndex(i); - int offset = i * kDoubleSize; - __ movdbl(Operand(esp, offset), xmm_reg); + if (CpuFeatures::IsSupported(SSE2)) { + CpuFeatureScope scope(masm(), SSE2); + for (int i = 0; i < XMMRegister::kNumAllocatableRegisters; ++i) { + XMMRegister xmm_reg = XMMRegister::FromAllocationIndex(i); + int offset = i * kDoubleSize; + __ movdbl(Operand(esp, offset), xmm_reg); + } } __ pushad(); @@ -1073,15 +758,23 @@ void Deoptimizer::EntryGenerator::Generate() { __ pop(Operand(ebx, offset)); } - // Fill in the double input registers. int double_regs_offset = FrameDescription::double_registers_offset(); - for (int i = 0; i < XMMRegister::kNumAllocatableRegisters; ++i) { - int dst_offset = i * kDoubleSize + double_regs_offset; - int src_offset = i * kDoubleSize; - __ movdbl(xmm0, Operand(esp, src_offset)); - __ movdbl(Operand(ebx, dst_offset), xmm0); + if (CpuFeatures::IsSupported(SSE2)) { + CpuFeatureScope scope(masm(), SSE2); + // Fill in the double input registers. + for (int i = 0; i < XMMRegister::kNumAllocatableRegisters; ++i) { + int dst_offset = i * kDoubleSize + double_regs_offset; + int src_offset = i * kDoubleSize; + __ movdbl(xmm0, Operand(esp, src_offset)); + __ movdbl(Operand(ebx, dst_offset), xmm0); + } } + // Clear FPU all exceptions. + // TODO(ulan): Find out why the TOP register is not zero here in some cases, + // and check that the generated code never deoptimizes with unbalanced stack. + __ fnclex(); + // Remove the bailout id and the double registers from the stack. if (type() == EAGER) { __ add(esp, Immediate(kDoubleRegsSize + kPointerSize)); @@ -1098,10 +791,13 @@ void Deoptimizer::EntryGenerator::Generate() { // limit and copy the contents of the activation frame to the input // frame description. __ lea(edx, Operand(ebx, FrameDescription::frame_content_offset())); + Label pop_loop_header; + __ jmp(&pop_loop_header); Label pop_loop; __ bind(&pop_loop); __ pop(Operand(edx, 0)); __ add(edx, Immediate(sizeof(uint32_t))); + __ bind(&pop_loop_header); __ cmp(ecx, esp); __ j(not_equal, &pop_loop); @@ -1139,27 +835,33 @@ void Deoptimizer::EntryGenerator::Generate() { } // Replace the current frame with the output frames. - Label outer_push_loop, inner_push_loop; + Label outer_push_loop, inner_push_loop, + outer_loop_header, inner_loop_header; // Outer loop state: eax = current FrameDescription**, edx = one past the // last FrameDescription**. __ mov(edx, Operand(eax, Deoptimizer::output_count_offset())); __ mov(eax, Operand(eax, Deoptimizer::output_offset())); __ lea(edx, Operand(eax, edx, times_4, 0)); + __ jmp(&outer_loop_header); __ bind(&outer_push_loop); // Inner loop state: ebx = current FrameDescription*, ecx = loop index. __ mov(ebx, Operand(eax, 0)); __ mov(ecx, Operand(ebx, FrameDescription::frame_size_offset())); + __ jmp(&inner_loop_header); __ bind(&inner_push_loop); __ sub(ecx, Immediate(sizeof(uint32_t))); __ push(Operand(ebx, ecx, times_1, FrameDescription::frame_content_offset())); + __ bind(&inner_loop_header); __ test(ecx, ecx); __ j(not_zero, &inner_push_loop); __ add(eax, Immediate(kPointerSize)); + __ bind(&outer_loop_header); __ cmp(eax, edx); __ j(below, &outer_push_loop); - // In case of OSR, we have to restore the XMM registers. - if (type() == OSR) { + // In case of OSR or a failed STUB, we have to restore the XMM registers. + if (CpuFeatures::IsSupported(SSE2)) { + CpuFeatureScope scope(masm(), SSE2); for (int i = 0; i < XMMRegister::kNumAllocatableRegisters; ++i) { XMMRegister xmm_reg = XMMRegister::FromAllocationIndex(i); int src_offset = i * kDoubleSize + double_regs_offset; diff --git a/deps/v8/src/ia32/disasm-ia32.cc b/deps/v8/src/ia32/disasm-ia32.cc index 75b46bd478..9eb0d292c7 100644 --- a/deps/v8/src/ia32/disasm-ia32.cc +++ b/deps/v8/src/ia32/disasm-ia32.cc @@ -869,6 +869,7 @@ static const char* F0Mnem(byte f0byte) { case 0xAF: return "imul"; case 0xA5: return "shld"; case 0xAD: return "shrd"; + case 0xAC: return "shrd"; // 3-operand version. case 0xAB: return "bts"; default: return NULL; } @@ -1039,6 +1040,14 @@ int DisassemblerIA32::InstructionDecode(v8::internal::Vector<char> out_buffer, NameOfXMMRegister(regop), NameOfXMMRegister(rm)); data++; + } else if (f0byte == 0x50) { + data += 2; + int mod, regop, rm; + get_modrm(*data, &mod, ®op, &rm); + AppendToBuffer("movmskps %s,%s", + NameOfCPURegister(regop), + NameOfXMMRegister(rm)); + data++; } else if ((f0byte & 0xF0) == 0x80) { data += JumpConditional(data, branch_hint); } else if (f0byte == 0xBE || f0byte == 0xBF || f0byte == 0xB6 || diff --git a/deps/v8/src/ia32/frames-ia32.cc b/deps/v8/src/ia32/frames-ia32.cc index dd44f0ee5f..4932fa387e 100644 --- a/deps/v8/src/ia32/frames-ia32.cc +++ b/deps/v8/src/ia32/frames-ia32.cc @@ -29,6 +29,9 @@ #if defined(V8_TARGET_ARCH_IA32) +#include "assembler.h" +#include "assembler-ia32.h" +#include "assembler-ia32-inl.h" #include "frames-inl.h" namespace v8 { @@ -40,6 +43,10 @@ Address ExitFrame::ComputeStackPointer(Address fp) { } +Register StubFailureTrampolineFrame::fp_register() { return ebp; } +Register StubFailureTrampolineFrame::context_register() { return esi; } + + } } // namespace v8::internal #endif // V8_TARGET_ARCH_IA32 diff --git a/deps/v8/src/ia32/frames-ia32.h b/deps/v8/src/ia32/frames-ia32.h index 18915e2e3c..c59b1664a1 100644 --- a/deps/v8/src/ia32/frames-ia32.h +++ b/deps/v8/src/ia32/frames-ia32.h @@ -97,22 +97,6 @@ class ExitFrameConstants : public AllStatic { }; -class StandardFrameConstants : public AllStatic { - public: - // Fixed part of the frame consists of return address, caller fp, - // context and function. - // StandardFrame::IterateExpressions assumes that kContextOffset is the last - // object pointer. - static const int kFixedFrameSize = 4 * kPointerSize; - static const int kExpressionsOffset = -3 * kPointerSize; - static const int kMarkerOffset = -2 * kPointerSize; - static const int kContextOffset = -1 * kPointerSize; - static const int kCallerFPOffset = 0 * kPointerSize; - static const int kCallerPCOffset = +1 * kPointerSize; - static const int kCallerSPOffset = +2 * kPointerSize; -}; - - class JavaScriptFrameConstants : public AllStatic { public: // FP-relative. @@ -130,14 +114,30 @@ class JavaScriptFrameConstants : public AllStatic { class ArgumentsAdaptorFrameConstants : public AllStatic { public: + // FP-relative. static const int kLengthOffset = StandardFrameConstants::kExpressionsOffset; + static const int kFrameSize = StandardFrameConstants::kFixedFrameSize + kPointerSize; }; +class ConstructFrameConstants : public AllStatic { + public: + // FP-relative. + static const int kImplicitReceiverOffset = -5 * kPointerSize; + static const int kConstructorOffset = kMinInt; + static const int kLengthOffset = -4 * kPointerSize; + static const int kCodeOffset = StandardFrameConstants::kExpressionsOffset; + + static const int kFrameSize = + StandardFrameConstants::kFixedFrameSize + 3 * kPointerSize; +}; + + class InternalFrameConstants : public AllStatic { public: + // FP-relative. static const int kCodeOffset = StandardFrameConstants::kExpressionsOffset; }; diff --git a/deps/v8/src/ia32/full-codegen-ia32.cc b/deps/v8/src/ia32/full-codegen-ia32.cc index 406537d2d3..ebc3a2bd5c 100644 --- a/deps/v8/src/ia32/full-codegen-ia32.cc +++ b/deps/v8/src/ia32/full-codegen-ia32.cc @@ -119,7 +119,7 @@ void FullCodeGenerator::Generate() { handler_table_ = isolate()->factory()->NewFixedArray(function()->handler_count(), TENURED); profiling_counter_ = isolate()->factory()->NewJSGlobalPropertyCell( - Handle<Smi>(Smi::FromInt(FLAG_interrupt_budget))); + Handle<Smi>(Smi::FromInt(FLAG_interrupt_budget), isolate())); SetFunctionPosition(function()); Comment cmnt(masm_, "[ function compiled by full code generator"); @@ -127,7 +127,7 @@ void FullCodeGenerator::Generate() { #ifdef DEBUG if (strlen(FLAG_stop_at) > 0 && - info->function()->name()->IsEqualTo(CStrVector(FLAG_stop_at))) { + info->function()->name()->IsUtf8EqualTo(CStrVector(FLAG_stop_at))) { __ int3(); } #endif @@ -156,6 +156,7 @@ void FullCodeGenerator::Generate() { // the frame (that is done below). FrameScope frame_scope(masm_, StackFrame::MANUAL); + info->set_prologue_offset(masm_->pc_offset()); __ push(ebp); // Caller's frame pointer. __ mov(ebp, esp); __ push(esi); // Callee's context. @@ -328,39 +329,27 @@ void FullCodeGenerator::EmitProfilingCounterReset() { } -void FullCodeGenerator::EmitStackCheck(IterationStatement* stmt, - Label* back_edge_target) { - Comment cmnt(masm_, "[ Stack check"); +void FullCodeGenerator::EmitBackEdgeBookkeeping(IterationStatement* stmt, + Label* back_edge_target) { + Comment cmnt(masm_, "[ Back edge bookkeeping"); Label ok; - if (FLAG_count_based_interrupts) { - int weight = 1; - if (FLAG_weighted_back_edges) { - ASSERT(back_edge_target->is_bound()); - int distance = masm_->SizeOfCodeGeneratedSince(back_edge_target); - weight = Min(kMaxBackEdgeWeight, - Max(1, distance / kBackEdgeDistanceUnit)); - } - EmitProfilingCounterDecrement(weight); - __ j(positive, &ok, Label::kNear); - InterruptStub stub; - __ CallStub(&stub); - } else { - // Count based interrupts happen often enough when they are enabled - // that the additional stack checks are not necessary (they would - // only check for interrupts). - ExternalReference stack_limit = - ExternalReference::address_of_stack_limit(isolate()); - __ cmp(esp, Operand::StaticVariable(stack_limit)); - __ j(above_equal, &ok, Label::kNear); - StackCheckStub stub; - __ CallStub(&stub); + int weight = 1; + if (FLAG_weighted_back_edges) { + ASSERT(back_edge_target->is_bound()); + int distance = masm_->SizeOfCodeGeneratedSince(back_edge_target); + weight = Min(kMaxBackEdgeWeight, + Max(1, distance / kBackEdgeDistanceUnit)); } + EmitProfilingCounterDecrement(weight); + __ j(positive, &ok, Label::kNear); + InterruptStub stub; + __ CallStub(&stub); // Record a mapping of this PC offset to the OSR id. This is used to find // the AST id from the unoptimized code in order to use it as a key into // the deoptimization input data found in the optimized code. - RecordStackCheck(stmt->OsrEntryId()); + RecordBackEdge(stmt->OsrEntryId()); // Loop stack checks can be patched to perform on-stack replacement. In // order to decide whether or not to perform OSR we embed the loop depth @@ -369,9 +358,7 @@ void FullCodeGenerator::EmitStackCheck(IterationStatement* stmt, ASSERT(loop_depth() > 0); __ test(eax, Immediate(Min(loop_depth(), Code::kMaxLoopNestingMarker))); - if (FLAG_count_based_interrupts) { - EmitProfilingCounterReset(); - } + EmitProfilingCounterReset(); __ bind(&ok); PrepareForBailoutForId(stmt->EntryId(), NO_REGISTERS); @@ -754,8 +741,7 @@ void FullCodeGenerator::PrepareForBailoutBeforeSplit(Expression* expr, void FullCodeGenerator::EmitDebugCheckDeclarationContext(Variable* variable) { - // The variable in the declaration always resides in the current function - // context. + // The variable in the declaration always resides in the current context. ASSERT_EQ(0, scope()->ContextChainLength(variable->scope())); if (generate_debug_code_) { // Check that we're not inside a with or catch context. @@ -884,33 +870,32 @@ void FullCodeGenerator::VisitFunctionDeclaration( void FullCodeGenerator::VisitModuleDeclaration(ModuleDeclaration* declaration) { - VariableProxy* proxy = declaration->proxy(); - Variable* variable = proxy->var(); - Handle<JSModule> instance = declaration->module()->interface()->Instance(); - ASSERT(!instance.is_null()); + Variable* variable = declaration->proxy()->var(); + ASSERT(variable->location() == Variable::CONTEXT); + ASSERT(variable->interface()->IsFrozen()); - switch (variable->location()) { - case Variable::UNALLOCATED: { - Comment cmnt(masm_, "[ ModuleDeclaration"); - globals_->Add(variable->name(), zone()); - globals_->Add(instance, zone()); - Visit(declaration->module()); - break; - } + Comment cmnt(masm_, "[ ModuleDeclaration"); + EmitDebugCheckDeclarationContext(variable); - case Variable::CONTEXT: { - Comment cmnt(masm_, "[ ModuleDeclaration"); - EmitDebugCheckDeclarationContext(variable); - __ mov(ContextOperand(esi, variable->index()), Immediate(instance)); - Visit(declaration->module()); - break; - } + // Load instance object. + __ LoadContext(eax, scope_->ContextChainLength(scope_->GlobalScope())); + __ mov(eax, ContextOperand(eax, variable->interface()->Index())); + __ mov(eax, ContextOperand(eax, Context::EXTENSION_INDEX)); - case Variable::PARAMETER: - case Variable::LOCAL: - case Variable::LOOKUP: - UNREACHABLE(); - } + // Assign it. + __ mov(ContextOperand(esi, variable->index()), eax); + // We know that we have written a module, which is not a smi. + __ RecordWriteContextSlot(esi, + Context::SlotOffset(variable->index()), + eax, + ecx, + kDontSaveFPRegs, + EMIT_REMEMBERED_SET, + OMIT_SMI_CHECK); + PrepareForBailoutForId(declaration->proxy()->id(), NO_REGISTERS); + + // Traverse into body. + Visit(declaration->module()); } @@ -945,13 +930,21 @@ void FullCodeGenerator::VisitExportDeclaration(ExportDeclaration* declaration) { void FullCodeGenerator::DeclareGlobals(Handle<FixedArray> pairs) { // Call the runtime to declare the globals. __ push(esi); // The context is the first argument. - __ push(Immediate(pairs)); - __ push(Immediate(Smi::FromInt(DeclareGlobalsFlags()))); + __ Push(pairs); + __ Push(Smi::FromInt(DeclareGlobalsFlags())); __ CallRuntime(Runtime::kDeclareGlobals, 3); // Return value is ignored. } +void FullCodeGenerator::DeclareModules(Handle<FixedArray> descriptions) { + // Call the runtime to declare the modules. + __ Push(descriptions); + __ CallRuntime(Runtime::kDeclareModules, 1); + // Return value is ignored. +} + + void FullCodeGenerator::VisitSwitchStatement(SwitchStatement* stmt) { Comment cmnt(masm_, "[ SwitchStatement"); Breakable nested_statement(this, stmt); @@ -1002,7 +995,7 @@ void FullCodeGenerator::VisitSwitchStatement(SwitchStatement* stmt) { // Record position before stub call for type feedback. SetSourcePosition(clause->position()); - Handle<Code> ic = CompareIC::GetUninitialized(Token::EQ_STRICT); + Handle<Code> ic = CompareIC::GetUninitialized(isolate(), Token::EQ_STRICT); CallIC(ic, RelocInfo::CODE_TARGET, clause->CompareId()); patch_site.EmitPatchInfo(); __ test(eax, eax); @@ -1119,7 +1112,8 @@ void FullCodeGenerator::VisitForInStatement(ForInStatement* stmt) { Handle<JSGlobalPropertyCell> cell = isolate()->factory()->NewJSGlobalPropertyCell( Handle<Object>( - Smi::FromInt(TypeFeedbackCells::kForInFastCaseMarker))); + Smi::FromInt(TypeFeedbackCells::kForInFastCaseMarker), + isolate())); RecordTypeFeedbackCell(stmt->ForInFeedbackId(), cell); __ LoadHeapObject(ebx, cell); __ mov(FieldOperand(ebx, JSGlobalPropertyCell::kValueOffset), @@ -1193,7 +1187,7 @@ void FullCodeGenerator::VisitForInStatement(ForInStatement* stmt) { __ bind(loop_statement.continue_label()); __ add(Operand(esp, 0 * kPointerSize), Immediate(Smi::FromInt(1))); - EmitStackCheck(stmt, &loop); + EmitBackEdgeBookkeeping(stmt, &loop); __ jmp(&loop); // Remove the pointers stored on the stack. @@ -1346,9 +1340,9 @@ void FullCodeGenerator::EmitDynamicLookupFastCase(Variable* var, } else if (var->mode() == DYNAMIC_LOCAL) { Variable* local = var->local_if_not_shadowed(); __ mov(eax, ContextSlotOperandCheckExtensions(local, slow)); - if (local->mode() == CONST || - local->mode() == CONST_HARMONY || - local->mode() == LET) { + if (local->mode() == LET || + local->mode() == CONST || + local->mode() == CONST_HARMONY) { __ cmp(eax, isolate()->factory()->the_hole_value()); __ j(not_equal, done); if (local->mode() == CONST) { @@ -1495,7 +1489,7 @@ void FullCodeGenerator::VisitRegExpLiteral(RegExpLiteral* expr) { __ bind(&materialized); int size = JSRegExp::kSize + JSRegExp::kInObjectFieldCount * kPointerSize; Label allocated, runtime_allocate; - __ AllocateInNewSpace(size, eax, ecx, edx, &runtime_allocate, TAG_OBJECT); + __ Allocate(size, eax, ecx, edx, &runtime_allocate, TAG_OBJECT); __ jmp(&allocated); __ bind(&runtime_allocate); @@ -1533,24 +1527,34 @@ void FullCodeGenerator::EmitAccessor(Expression* expression) { void FullCodeGenerator::VisitObjectLiteral(ObjectLiteral* expr) { Comment cmnt(masm_, "[ ObjectLiteral"); Handle<FixedArray> constant_properties = expr->constant_properties(); - __ mov(edi, Operand(ebp, JavaScriptFrameConstants::kFunctionOffset)); - __ push(FieldOperand(edi, JSFunction::kLiteralsOffset)); - __ push(Immediate(Smi::FromInt(expr->literal_index()))); - __ push(Immediate(constant_properties)); int flags = expr->fast_elements() ? ObjectLiteral::kFastElements : ObjectLiteral::kNoFlags; flags |= expr->has_function() ? ObjectLiteral::kHasFunction : ObjectLiteral::kNoFlags; - __ push(Immediate(Smi::FromInt(flags))); int properties_count = constant_properties->length() / 2; if (expr->depth() > 1) { + __ mov(edi, Operand(ebp, JavaScriptFrameConstants::kFunctionOffset)); + __ push(FieldOperand(edi, JSFunction::kLiteralsOffset)); + __ push(Immediate(Smi::FromInt(expr->literal_index()))); + __ push(Immediate(constant_properties)); + __ push(Immediate(Smi::FromInt(flags))); __ CallRuntime(Runtime::kCreateObjectLiteral, 4); - } else if (flags != ObjectLiteral::kFastElements || + } else if (Serializer::enabled() || flags != ObjectLiteral::kFastElements || properties_count > FastCloneShallowObjectStub::kMaximumClonedProperties) { + __ mov(edi, Operand(ebp, JavaScriptFrameConstants::kFunctionOffset)); + __ push(FieldOperand(edi, JSFunction::kLiteralsOffset)); + __ push(Immediate(Smi::FromInt(expr->literal_index()))); + __ push(Immediate(constant_properties)); + __ push(Immediate(Smi::FromInt(flags))); __ CallRuntime(Runtime::kCreateObjectLiteralShallow, 4); } else { + __ mov(edi, Operand(ebp, JavaScriptFrameConstants::kFunctionOffset)); + __ mov(eax, FieldOperand(edi, JSFunction::kLiteralsOffset)); + __ mov(ebx, Immediate(Smi::FromInt(expr->literal_index()))); + __ mov(ecx, Immediate(constant_properties)); + __ mov(edx, Immediate(Smi::FromInt(flags))); FastCloneShallowObjectStub stub(properties_count); __ CallStub(&stub); } @@ -1582,7 +1586,7 @@ void FullCodeGenerator::VisitObjectLiteral(ObjectLiteral* expr) { ASSERT(!CompileTimeValue::IsCompileTimeValue(value)); // Fall through. case ObjectLiteral::Property::COMPUTED: - if (key->handle()->IsSymbol()) { + if (key->handle()->IsInternalizedString()) { if (property->emit_store()) { VisitForAccumulatorValue(value); __ mov(ecx, Immediate(key->handle())); @@ -1597,8 +1601,6 @@ void FullCodeGenerator::VisitObjectLiteral(ObjectLiteral* expr) { } break; } - // Fall through. - case ObjectLiteral::Property::PROTOTYPE: __ push(Operand(esp, 0)); // Duplicate receiver. VisitForStackValue(key); VisitForStackValue(value); @@ -1609,6 +1611,15 @@ void FullCodeGenerator::VisitObjectLiteral(ObjectLiteral* expr) { __ Drop(3); } break; + case ObjectLiteral::Property::PROTOTYPE: + __ push(Operand(esp, 0)); // Duplicate receiver. + VisitForStackValue(value); + if (property->emit_store()) { + __ CallRuntime(Runtime::kSetPrototype, 2); + } else { + __ Drop(2); + } + break; case ObjectLiteral::Property::GETTER: accessor_table.lookup(key)->second->getter = value; break; @@ -1671,6 +1682,7 @@ void FullCodeGenerator::VisitArrayLiteral(ArrayLiteral* expr) { __ IncrementCounter(isolate()->counters()->cow_arrays_created_stub(), 1); FastCloneShallowArrayStub stub( FastCloneShallowArrayStub::COPY_ON_WRITE_ELEMENTS, + DONT_TRACK_ALLOCATION_SITE, length); __ CallStub(&stub); } else if (expr->depth() > 1) { @@ -1680,12 +1692,19 @@ void FullCodeGenerator::VisitArrayLiteral(ArrayLiteral* expr) { } else { ASSERT(IsFastSmiOrObjectElementsKind(constant_elements_kind) || FLAG_smi_only_arrays); + FastCloneShallowArrayStub::Mode mode = + FastCloneShallowArrayStub::CLONE_ANY_ELEMENTS; + AllocationSiteMode allocation_site_mode = FLAG_track_allocation_sites + ? TRACK_ALLOCATION_SITE : DONT_TRACK_ALLOCATION_SITE; + // If the elements are already FAST_*_ELEMENTS, the boilerplate cannot // change, so it's possible to specialize the stub in advance. - FastCloneShallowArrayStub::Mode mode = has_constant_fast_elements - ? FastCloneShallowArrayStub::CLONE_ELEMENTS - : FastCloneShallowArrayStub::CLONE_ANY_ELEMENTS; - FastCloneShallowArrayStub stub(mode, length); + if (has_constant_fast_elements) { + mode = FastCloneShallowArrayStub::CLONE_ELEMENTS; + allocation_site_mode = DONT_TRACK_ALLOCATION_SITE; + } + + FastCloneShallowArrayStub stub(mode, allocation_site_mode, length); __ CallStub(&stub); } @@ -1890,7 +1909,7 @@ void FullCodeGenerator::EmitInlineSmiBinaryOp(BinaryOperation* expr, __ bind(&stub_call); __ mov(eax, ecx); BinaryOpStub stub(op, mode); - CallIC(stub.GetCode(), RelocInfo::CODE_TARGET, + CallIC(stub.GetCode(isolate()), RelocInfo::CODE_TARGET, expr->BinaryOperationFeedbackId()); patch_site.EmitPatchInfo(); __ jmp(&done, Label::kNear); @@ -1976,7 +1995,7 @@ void FullCodeGenerator::EmitBinaryOp(BinaryOperation* expr, __ pop(edx); BinaryOpStub stub(op, mode); JumpPatchSite patch_site(masm_); // unbound, signals no inlined smi code. - CallIC(stub.GetCode(), RelocInfo::CODE_TARGET, + CallIC(stub.GetCode(isolate()), RelocInfo::CODE_TARGET, expr->BinaryOperationFeedbackId()); patch_site.EmitPatchInfo(); context()->Plug(eax); @@ -1984,7 +2003,7 @@ void FullCodeGenerator::EmitBinaryOp(BinaryOperation* expr, void FullCodeGenerator::EmitAssignment(Expression* expr) { - // Invalid left-hand sides are rewritten to have a 'throw + // Invalid left-hand sides are rewritten by the parser to have a 'throw // ReferenceError' on the left-hand side. if (!expr->IsValidLeftHandSide()) { VisitForEffect(expr); @@ -2325,7 +2344,7 @@ void FullCodeGenerator::VisitCall(Call* expr) { VariableProxy* proxy = callee->AsVariableProxy(); Property* property = callee->AsProperty(); - if (proxy != NULL && proxy->var()->is_possibly_eval()) { + if (proxy != NULL && proxy->var()->is_possibly_eval(isolate())) { // In a call to eval, we first call %ResolvePossiblyDirectEval to // resolve the function we need to call and the receiver of the call. // Then we call the resolved function using the given arguments. @@ -2467,7 +2486,7 @@ void FullCodeGenerator::VisitCallNew(CallNew* expr) { __ mov(ebx, cell); CallConstructStub stub(RECORD_CALL_TARGET); - __ call(stub.GetCode(), RelocInfo::CONSTRUCT_CALL); + __ call(stub.GetCode(isolate()), RelocInfo::CONSTRUCT_CALL); PrepareForBailoutForId(expr->ReturnId(), TOS_REG); context()->Plug(eax); } @@ -2548,6 +2567,7 @@ void FullCodeGenerator::EmitIsObject(CallRuntime* expr) { void FullCodeGenerator::EmitIsSpecObject(CallRuntime* expr) { + // TODO(rossberg): incorporate symbols. ZoneList<Expression*>* args = expr->arguments(); ASSERT(args->length() == 1); @@ -2622,7 +2642,7 @@ void FullCodeGenerator::EmitIsStringWrapperSafeForDefaultValueOf( __ cmp(ecx, FACTORY->hash_table_map()); __ j(equal, if_false); - // Look for valueOf symbol in the descriptor array, and indicate false if + // Look for valueOf string in the descriptor array, and indicate false if // found. Since we omit an enumeration index check, if it is added via a // transition that shares its descriptor array, this is a false positive. Label entry, loop, done; @@ -2644,11 +2664,11 @@ void FullCodeGenerator::EmitIsStringWrapperSafeForDefaultValueOf( // Calculate location of the first key name. __ add(ebx, Immediate(DescriptorArray::kFirstOffset)); // Loop through all the keys in the descriptor array. If one of these is the - // symbol valueOf the result is false. + // internalized string "valueOf" the result is false. __ jmp(&entry); __ bind(&loop); __ mov(edx, FieldOperand(ebx, 0)); - __ cmp(edx, FACTORY->value_of_symbol()); + __ cmp(edx, FACTORY->value_of_string()); __ j(equal, if_false); __ add(ebx, Immediate(DescriptorArray::kDescriptorSize * kPointerSize)); __ bind(&entry); @@ -2683,6 +2703,28 @@ void FullCodeGenerator::EmitIsStringWrapperSafeForDefaultValueOf( } +void FullCodeGenerator::EmitIsSymbol(CallRuntime* expr) { + ZoneList<Expression*>* args = expr->arguments(); + ASSERT(args->length() == 1); + + VisitForAccumulatorValue(args->at(0)); + + Label materialize_true, materialize_false; + Label* if_true = NULL; + Label* if_false = NULL; + Label* fall_through = NULL; + context()->PrepareTest(&materialize_true, &materialize_false, + &if_true, &if_false, &fall_through); + + __ JumpIfSmi(eax, if_false); + __ CmpObjectType(eax, SYMBOL_TYPE, ebx); + PrepareForBailoutBeforeSplit(expr, true, if_true, if_false); + Split(equal, if_true, if_false, fall_through); + + context()->Plug(if_true, if_false); +} + + void FullCodeGenerator::EmitIsFunction(CallRuntime* expr) { ZoneList<Expression*>* args = expr->arguments(); ASSERT(args->length() == 1); @@ -2885,12 +2927,12 @@ void FullCodeGenerator::EmitClassOf(CallRuntime* expr) { // Functions have class 'Function'. __ bind(&function); - __ mov(eax, isolate()->factory()->function_class_symbol()); + __ mov(eax, isolate()->factory()->function_class_string()); __ jmp(&done); // Objects with a non-function constructor have class 'Object'. __ bind(&non_function_constructor); - __ mov(eax, isolate()->factory()->Object_symbol()); + __ mov(eax, isolate()->factory()->Object_string()); __ jmp(&done); // Non-JS objects have class null. @@ -2952,7 +2994,7 @@ void FullCodeGenerator::EmitRandomHeapNumber(CallRuntime* expr) { // ( 1.(20 0s)(32 random bits) x 2^20 ) - (1.0 x 2^20)). // This is implemented on both SSE2 and FPU. if (CpuFeatures::IsSupported(SSE2)) { - CpuFeatures::Scope fscope(SSE2); + CpuFeatureScope fscope(masm(), SSE2); __ mov(ebx, Immediate(0x49800000)); // 1.0 x 2^20 as single. __ movd(xmm1, ebx); __ movd(xmm0, eax); @@ -3067,6 +3109,38 @@ void FullCodeGenerator::EmitDateField(CallRuntime* expr) { } +void FullCodeGenerator::EmitOneByteSeqStringSetChar(CallRuntime* expr) { + ZoneList<Expression*>* args = expr->arguments(); + ASSERT_EQ(3, args->length()); + + VisitForStackValue(args->at(1)); // index + VisitForStackValue(args->at(2)); // value + __ pop(ecx); + __ pop(ebx); + VisitForAccumulatorValue(args->at(0)); // string + + static const String::Encoding encoding = String::ONE_BYTE_ENCODING; + SeqStringSetCharGenerator::Generate(masm_, encoding, eax, ebx, ecx); + context()->Plug(eax); +} + + +void FullCodeGenerator::EmitTwoByteSeqStringSetChar(CallRuntime* expr) { + ZoneList<Expression*>* args = expr->arguments(); + ASSERT_EQ(3, args->length()); + + VisitForStackValue(args->at(1)); // index + VisitForStackValue(args->at(2)); // value + __ pop(ecx); + __ pop(ebx); + VisitForAccumulatorValue(args->at(0)); // string + + static const String::Encoding encoding = String::TWO_BYTE_ENCODING; + SeqStringSetCharGenerator::Generate(masm_, encoding, eax, ebx, ecx); + context()->Plug(eax); +} + + void FullCodeGenerator::EmitMathPow(CallRuntime* expr) { // Load the arguments on the stack and call the runtime function. ZoneList<Expression*>* args = expr->arguments(); @@ -3573,10 +3647,10 @@ void FullCodeGenerator::EmitFastAsciiArrayJoin(CallRuntime* expr) { __ movzx_b(scratch, FieldOperand(scratch, Map::kInstanceTypeOffset)); __ and_(scratch, Immediate( kIsNotStringMask | kStringEncodingMask | kStringRepresentationMask)); - __ cmp(scratch, kStringTag | kAsciiStringTag | kSeqStringTag); + __ cmp(scratch, kStringTag | kOneByteStringTag | kSeqStringTag); __ j(not_equal, &bailout); __ add(string_length, - FieldOperand(string, SeqAsciiString::kLengthOffset)); + FieldOperand(string, SeqOneByteString::kLengthOffset)); __ j(overflow, &bailout); __ add(index, Immediate(1)); __ cmp(index, array_length); @@ -3612,7 +3686,7 @@ void FullCodeGenerator::EmitFastAsciiArrayJoin(CallRuntime* expr) { // Add (separator length times array_length) - separator length // to string_length. __ mov(scratch, separator_operand); - __ mov(scratch, FieldOperand(scratch, SeqAsciiString::kLengthOffset)); + __ mov(scratch, FieldOperand(scratch, SeqOneByteString::kLengthOffset)); __ sub(string_length, scratch); // May be negative, temporarily. __ imul(scratch, array_length_operand); __ j(overflow, &bailout); @@ -3626,11 +3700,11 @@ void FullCodeGenerator::EmitFastAsciiArrayJoin(CallRuntime* expr) { __ AllocateAsciiString(result_pos, string_length, scratch, index, string, &bailout); __ mov(result_operand, result_pos); - __ lea(result_pos, FieldOperand(result_pos, SeqAsciiString::kHeaderSize)); + __ lea(result_pos, FieldOperand(result_pos, SeqOneByteString::kHeaderSize)); __ mov(string, separator_operand); - __ cmp(FieldOperand(string, SeqAsciiString::kLengthOffset), + __ cmp(FieldOperand(string, SeqOneByteString::kLengthOffset), Immediate(Smi::FromInt(1))); __ j(equal, &one_char_separator); __ j(greater, &long_separator); @@ -3655,7 +3729,7 @@ void FullCodeGenerator::EmitFastAsciiArrayJoin(CallRuntime* expr) { FieldOperand(string, String::kLengthOffset)); __ shr(string_length, 1); __ lea(string, - FieldOperand(string, SeqAsciiString::kHeaderSize)); + FieldOperand(string, SeqOneByteString::kHeaderSize)); __ CopyBytes(string, result_pos, string_length, scratch); __ add(index, Immediate(1)); __ bind(&loop_1_condition); @@ -3668,7 +3742,7 @@ void FullCodeGenerator::EmitFastAsciiArrayJoin(CallRuntime* expr) { // One-character separator case __ bind(&one_char_separator); // Replace separator with its ASCII character value. - __ mov_b(scratch, FieldOperand(string, SeqAsciiString::kHeaderSize)); + __ mov_b(scratch, FieldOperand(string, SeqOneByteString::kHeaderSize)); __ mov_b(separator_operand, scratch); __ Set(index, Immediate(0)); @@ -3696,7 +3770,7 @@ void FullCodeGenerator::EmitFastAsciiArrayJoin(CallRuntime* expr) { FieldOperand(string, String::kLengthOffset)); __ shr(string_length, 1); __ lea(string, - FieldOperand(string, SeqAsciiString::kHeaderSize)); + FieldOperand(string, SeqOneByteString::kHeaderSize)); __ CopyBytes(string, result_pos, string_length, scratch); __ add(index, Immediate(1)); @@ -3725,7 +3799,7 @@ void FullCodeGenerator::EmitFastAsciiArrayJoin(CallRuntime* expr) { FieldOperand(string, String::kLengthOffset)); __ shr(string_length, 1); __ lea(string, - FieldOperand(string, SeqAsciiString::kHeaderSize)); + FieldOperand(string, SeqOneByteString::kHeaderSize)); __ CopyBytes(string, result_pos, string_length, scratch); __ bind(&loop_3_entry); @@ -3737,7 +3811,7 @@ void FullCodeGenerator::EmitFastAsciiArrayJoin(CallRuntime* expr) { FieldOperand(string, String::kLengthOffset)); __ shr(string_length, 1); __ lea(string, - FieldOperand(string, SeqAsciiString::kHeaderSize)); + FieldOperand(string, SeqOneByteString::kHeaderSize)); __ CopyBytes(string, result_pos, string_length, scratch); __ add(index, Immediate(1)); @@ -3945,7 +4019,7 @@ void FullCodeGenerator::EmitUnaryOperation(UnaryOperation* expr, // accumulator register eax. VisitForAccumulatorValue(expr->expression()); SetSourcePosition(expr->position()); - CallIC(stub.GetCode(), RelocInfo::CODE_TARGET, + CallIC(stub.GetCode(isolate()), RelocInfo::CODE_TARGET, expr->UnaryOperationFeedbackId()); context()->Plug(eax); } @@ -4067,7 +4141,9 @@ void FullCodeGenerator::VisitCountOperation(CountOperation* expr) { __ mov(edx, eax); __ mov(eax, Immediate(Smi::FromInt(1))); BinaryOpStub stub(expr->binary_op(), NO_OVERWRITE); - CallIC(stub.GetCode(), RelocInfo::CODE_TARGET, expr->CountBinOpFeedbackId()); + CallIC(stub.GetCode(isolate()), + RelocInfo::CODE_TARGET, + expr->CountBinOpFeedbackId()); patch_site.EmitPatchInfo(); __ bind(&done); @@ -4186,12 +4262,12 @@ void FullCodeGenerator::EmitLiteralCompareTypeof(Expression* expr, } PrepareForBailoutBeforeSplit(expr, true, if_true, if_false); - if (check->Equals(isolate()->heap()->number_symbol())) { + if (check->Equals(isolate()->heap()->number_string())) { __ JumpIfSmi(eax, if_true); __ cmp(FieldOperand(eax, HeapObject::kMapOffset), isolate()->factory()->heap_number_map()); Split(equal, if_true, if_false, fall_through); - } else if (check->Equals(isolate()->heap()->string_symbol())) { + } else if (check->Equals(isolate()->heap()->string_string())) { __ JumpIfSmi(eax, if_false); __ CmpObjectType(eax, FIRST_NONSTRING_TYPE, edx); __ j(above_equal, if_false); @@ -4199,16 +4275,16 @@ void FullCodeGenerator::EmitLiteralCompareTypeof(Expression* expr, __ test_b(FieldOperand(edx, Map::kBitFieldOffset), 1 << Map::kIsUndetectable); Split(zero, if_true, if_false, fall_through); - } else if (check->Equals(isolate()->heap()->boolean_symbol())) { + } else if (check->Equals(isolate()->heap()->boolean_string())) { __ cmp(eax, isolate()->factory()->true_value()); __ j(equal, if_true); __ cmp(eax, isolate()->factory()->false_value()); Split(equal, if_true, if_false, fall_through); } else if (FLAG_harmony_typeof && - check->Equals(isolate()->heap()->null_symbol())) { + check->Equals(isolate()->heap()->null_string())) { __ cmp(eax, isolate()->factory()->null_value()); Split(equal, if_true, if_false, fall_through); - } else if (check->Equals(isolate()->heap()->undefined_symbol())) { + } else if (check->Equals(isolate()->heap()->undefined_string())) { __ cmp(eax, isolate()->factory()->undefined_value()); __ j(equal, if_true); __ JumpIfSmi(eax, if_false); @@ -4217,19 +4293,23 @@ void FullCodeGenerator::EmitLiteralCompareTypeof(Expression* expr, __ movzx_b(ecx, FieldOperand(edx, Map::kBitFieldOffset)); __ test(ecx, Immediate(1 << Map::kIsUndetectable)); Split(not_zero, if_true, if_false, fall_through); - } else if (check->Equals(isolate()->heap()->function_symbol())) { + } else if (check->Equals(isolate()->heap()->function_string())) { __ JumpIfSmi(eax, if_false); STATIC_ASSERT(NUM_OF_CALLABLE_SPEC_OBJECT_TYPES == 2); __ CmpObjectType(eax, JS_FUNCTION_TYPE, edx); __ j(equal, if_true); __ CmpInstanceType(edx, JS_FUNCTION_PROXY_TYPE); Split(equal, if_true, if_false, fall_through); - } else if (check->Equals(isolate()->heap()->object_symbol())) { + } else if (check->Equals(isolate()->heap()->object_string())) { __ JumpIfSmi(eax, if_false); if (!FLAG_harmony_typeof) { __ cmp(eax, isolate()->factory()->null_value()); __ j(equal, if_true); } + if (FLAG_harmony_symbols) { + __ CmpObjectType(eax, SYMBOL_TYPE, edx); + __ j(equal, if_true); + } __ CmpObjectType(eax, FIRST_NONCALLABLE_SPEC_OBJECT_TYPE, edx); __ j(below, if_false); __ CmpInstanceType(edx, LAST_NONCALLABLE_SPEC_OBJECT_TYPE); @@ -4286,29 +4366,7 @@ void FullCodeGenerator::VisitCompareOperation(CompareOperation* expr) { default: { VisitForAccumulatorValue(expr->right()); - Condition cc = no_condition; - switch (op) { - case Token::EQ_STRICT: - case Token::EQ: - cc = equal; - break; - case Token::LT: - cc = less; - break; - case Token::GT: - cc = greater; - break; - case Token::LTE: - cc = less_equal; - break; - case Token::GTE: - cc = greater_equal; - break; - case Token::IN: - case Token::INSTANCEOF: - default: - UNREACHABLE(); - } + Condition cc = CompareIC::ComputeCondition(op); __ pop(edx); bool inline_smi_code = ShouldInlineSmiCase(op); @@ -4325,7 +4383,7 @@ void FullCodeGenerator::VisitCompareOperation(CompareOperation* expr) { // Record position and call the compare IC. SetSourcePosition(expr->position()); - Handle<Code> ic = CompareIC::GetUninitialized(op); + Handle<Code> ic = CompareIC::GetUninitialized(isolate(), op); CallIC(ic, RelocInfo::CODE_TARGET, expr->CompareOperationFeedbackId()); patch_site.EmitPatchInfo(); diff --git a/deps/v8/src/ia32/ic-ia32.cc b/deps/v8/src/ia32/ic-ia32.cc index dae3bbd63b..964db0eb1b 100644 --- a/deps/v8/src/ia32/ic-ia32.cc +++ b/deps/v8/src/ia32/ic-ia32.cc @@ -60,11 +60,11 @@ static void GenerateGlobalInstanceTypeCheck(MacroAssembler* masm, // Generated code falls through if the receiver is a regular non-global // JS object with slow properties and no interceptors. -static void GenerateStringDictionaryReceiverCheck(MacroAssembler* masm, - Register receiver, - Register r0, - Register r1, - Label* miss) { +static void GenerateNameDictionaryReceiverCheck(MacroAssembler* masm, + Register receiver, + Register r0, + Register r1, + Label* miss) { // Register usage: // receiver: holds the receiver on entry and is unchanged. // r0: used to hold receiver instance type. @@ -100,7 +100,7 @@ static void GenerateStringDictionaryReceiverCheck(MacroAssembler* masm, // storage. This function may fail to load a property even though it is // in the dictionary, so code at miss_label must always call a backup // property load that is complete. This function is safe to call if -// name is not a symbol, and will jump to the miss_label in that +// name is not internalized, and will jump to the miss_label in that // case. The generated code assumes that the receiver has slow // properties, is not a global object and does not have interceptors. static void GenerateDictionaryLoad(MacroAssembler* masm, @@ -127,21 +127,21 @@ static void GenerateDictionaryLoad(MacroAssembler* masm, Label done; // Probe the dictionary. - StringDictionaryLookupStub::GeneratePositiveLookup(masm, - miss_label, - &done, - elements, - name, - r0, - r1); + NameDictionaryLookupStub::GeneratePositiveLookup(masm, + miss_label, + &done, + elements, + name, + r0, + r1); // If probing finds an entry in the dictionary, r0 contains the // index into the dictionary. Check that the value is a normal // property. __ bind(&done); const int kElementsStartOffset = - StringDictionary::kHeaderSize + - StringDictionary::kElementsStartIndex * kPointerSize; + NameDictionary::kHeaderSize + + NameDictionary::kElementsStartIndex * kPointerSize; const int kDetailsOffset = kElementsStartOffset + 2 * kPointerSize; __ test(Operand(elements, r0, times_4, kDetailsOffset - kHeapObjectTag), Immediate(PropertyDetails::TypeField::kMask << kSmiTagSize)); @@ -157,7 +157,7 @@ static void GenerateDictionaryLoad(MacroAssembler* masm, // storage. This function may fail to store a property eventhough it // is in the dictionary, so code at miss_label must always call a // backup property store that is complete. This function is safe to -// call if name is not a symbol, and will jump to the miss_label in +// call if name is not internalized, and will jump to the miss_label in // that case. The generated code assumes that the receiver has slow // properties, is not a global object and does not have interceptors. static void GenerateDictionaryStore(MacroAssembler* masm, @@ -182,21 +182,21 @@ static void GenerateDictionaryStore(MacroAssembler* masm, // Probe the dictionary. - StringDictionaryLookupStub::GeneratePositiveLookup(masm, - miss_label, - &done, - elements, - name, - r0, - r1); + NameDictionaryLookupStub::GeneratePositiveLookup(masm, + miss_label, + &done, + elements, + name, + r0, + r1); // If probing finds an entry in the dictionary, r0 contains the // index into the dictionary. Check that the value is a normal // property that is not read only. __ bind(&done); const int kElementsStartOffset = - StringDictionary::kHeaderSize + - StringDictionary::kElementsStartIndex * kPointerSize; + NameDictionary::kHeaderSize + + NameDictionary::kElementsStartIndex * kPointerSize; const int kDetailsOffset = kElementsStartOffset + 2 * kPointerSize; const int kTypeAndReadOnlyMask = (PropertyDetails::TypeField::kMask | @@ -216,50 +216,6 @@ static void GenerateDictionaryStore(MacroAssembler* masm, } -void LoadIC::GenerateArrayLength(MacroAssembler* masm) { - // ----------- S t a t e ------------- - // -- ecx : name - // -- edx : receiver - // -- esp[0] : return address - // ----------------------------------- - Label miss; - - StubCompiler::GenerateLoadArrayLength(masm, edx, eax, &miss); - __ bind(&miss); - StubCompiler::GenerateLoadMiss(masm, Code::LOAD_IC); -} - - -void LoadIC::GenerateStringLength(MacroAssembler* masm, - bool support_wrappers) { - // ----------- S t a t e ------------- - // -- ecx : name - // -- edx : receiver - // -- esp[0] : return address - // ----------------------------------- - Label miss; - - StubCompiler::GenerateLoadStringLength(masm, edx, eax, ebx, &miss, - support_wrappers); - __ bind(&miss); - StubCompiler::GenerateLoadMiss(masm, Code::LOAD_IC); -} - - -void LoadIC::GenerateFunctionPrototype(MacroAssembler* masm) { - // ----------- S t a t e ------------- - // -- ecx : name - // -- edx : receiver - // -- esp[0] : return address - // ----------------------------------- - Label miss; - - StubCompiler::GenerateLoadFunctionPrototype(masm, edx, eax, ebx, &miss); - __ bind(&miss); - StubCompiler::GenerateLoadMiss(masm, Code::LOAD_IC); -} - - // Checks the receiver for special cases (value type, slow case bits). // Falls through for regular JS object. static void GenerateKeyedLoadReceiverCheck(MacroAssembler* masm, @@ -336,31 +292,36 @@ static void GenerateFastArrayLoad(MacroAssembler* masm, } -// Checks whether a key is an array index string or a symbol string. -// Falls through if the key is a symbol. -static void GenerateKeyStringCheck(MacroAssembler* masm, - Register key, - Register map, - Register hash, - Label* index_string, - Label* not_symbol) { +// Checks whether a key is an array index string or a unique name. +// Falls through if the key is a unique name. +static void GenerateKeyNameCheck(MacroAssembler* masm, + Register key, + Register map, + Register hash, + Label* index_string, + Label* not_unique) { // Register use: // key - holds the key and is unchanged. Assumed to be non-smi. // Scratch registers: // map - used to hold the map of the key. // hash - used to hold the hash of the key. - __ CmpObjectType(key, FIRST_NONSTRING_TYPE, map); - __ j(above_equal, not_symbol); + Label unique; + __ CmpObjectType(key, LAST_UNIQUE_NAME_TYPE, map); + __ j(above, not_unique); + STATIC_ASSERT(LAST_UNIQUE_NAME_TYPE == FIRST_NONSTRING_TYPE); + __ j(equal, &unique); // Is the string an array index, with cached numeric value? - __ mov(hash, FieldOperand(key, String::kHashFieldOffset)); - __ test(hash, Immediate(String::kContainsCachedArrayIndexMask)); + __ mov(hash, FieldOperand(key, Name::kHashFieldOffset)); + __ test(hash, Immediate(Name::kContainsCachedArrayIndexMask)); __ j(zero, index_string); - // Is the string a symbol? - STATIC_ASSERT(kSymbolTag != 0); - __ test_b(FieldOperand(map, Map::kInstanceTypeOffset), kIsSymbolMask); - __ j(zero, not_symbol); + // Is the string internalized? + STATIC_ASSERT(kInternalizedTag != 0); + __ test_b(FieldOperand(map, Map::kInstanceTypeOffset), kIsInternalizedMask); + __ j(zero, not_unique); + + __ bind(&unique); } @@ -447,11 +408,11 @@ void KeyedLoadIC::GenerateGeneric(MacroAssembler* masm) { // -- edx : receiver // -- esp[0] : return address // ----------------------------------- - Label slow, check_string, index_smi, index_string, property_array_property; + Label slow, check_name, index_smi, index_name, property_array_property; Label probe_dictionary, check_number_dictionary; // Check that the key is a smi. - __ JumpIfNotSmi(ecx, &check_string); + __ JumpIfNotSmi(ecx, &check_name); __ bind(&index_smi); // Now the key is known to be a smi. This place is also jumped to from // where a numeric string is converted to a smi. @@ -502,8 +463,8 @@ void KeyedLoadIC::GenerateGeneric(MacroAssembler* masm) { __ IncrementCounter(counters->keyed_load_generic_slow(), 1); GenerateRuntimeGetProperty(masm); - __ bind(&check_string); - GenerateKeyStringCheck(masm, ecx, eax, ebx, &index_string, &slow); + __ bind(&check_name); + GenerateKeyNameCheck(masm, ecx, eax, ebx, &index_name, &slow); GenerateKeyedLoadReceiverCheck( masm, edx, eax, Map::kHasNamedInterceptor, &slow); @@ -528,7 +489,7 @@ void KeyedLoadIC::GenerateGeneric(MacroAssembler* masm) { __ xor_(eax, edi); __ and_(eax, KeyedLookupCache::kCapacityMask & KeyedLookupCache::kHashMask); - // Load the key (consisting of map and symbol) from the cache and + // Load the key (consisting of map and internalized string) from the cache and // check for match. Label load_in_object_property; static const int kEntriesPerBucket = KeyedLookupCache::kEntriesPerBucket; @@ -612,7 +573,7 @@ void KeyedLoadIC::GenerateGeneric(MacroAssembler* masm) { __ IncrementCounter(counters->keyed_load_generic_symbol(), 1); __ ret(0); - __ bind(&index_string); + __ bind(&index_name); __ IndexFromHash(ebx, ecx); // Now jump to the place where smi keys are handled. __ jmp(&index_smi); @@ -647,7 +608,7 @@ void KeyedLoadIC::GenerateString(MacroAssembler* masm) { char_at_generator.GenerateSlow(masm, call_helper); __ bind(&miss); - GenerateMiss(masm, false); + GenerateMiss(masm, MISS); } @@ -689,7 +650,7 @@ void KeyedLoadIC::GenerateIndexedInterceptor(MacroAssembler* masm) { __ TailCallExternalReference(ref, 2, 1); __ bind(&slow); - GenerateMiss(masm, false); + GenerateMiss(masm, MISS); } @@ -714,7 +675,7 @@ void KeyedLoadIC::GenerateNonStrictArguments(MacroAssembler* masm) { __ mov(eax, unmapped_location); __ Ret(); __ bind(&slow); - GenerateMiss(masm, false); + GenerateMiss(masm, MISS); } @@ -743,7 +704,7 @@ void KeyedStoreIC::GenerateNonStrictArguments(MacroAssembler* masm) { __ RecordWrite(ebx, edi, edx, kDontSaveFPRegs); __ Ret(); __ bind(&slow); - GenerateMiss(masm, false); + GenerateMiss(masm, MISS); } @@ -835,7 +796,9 @@ static void KeyedStoreGenerateGenericHelper( ebx, edi, slow); - ElementsTransitionGenerator::GenerateSmiToDouble(masm, slow); + AllocationSiteMode mode = AllocationSiteInfo::GetMode(FAST_SMI_ELEMENTS, + FAST_DOUBLE_ELEMENTS); + ElementsTransitionGenerator::GenerateSmiToDouble(masm, mode, slow); __ mov(ebx, FieldOperand(edx, JSObject::kElementsOffset)); __ jmp(&fast_double_without_map_check); @@ -846,7 +809,9 @@ static void KeyedStoreGenerateGenericHelper( ebx, edi, slow); - ElementsTransitionGenerator::GenerateMapChangeElementsTransition(masm); + mode = AllocationSiteInfo::GetMode(FAST_SMI_ELEMENTS, FAST_ELEMENTS); + ElementsTransitionGenerator::GenerateMapChangeElementsTransition(masm, mode, + slow); __ mov(ebx, FieldOperand(edx, JSObject::kElementsOffset)); __ jmp(&finish_object_store); @@ -860,7 +825,8 @@ static void KeyedStoreGenerateGenericHelper( ebx, edi, slow); - ElementsTransitionGenerator::GenerateDoubleToObject(masm, slow); + mode = AllocationSiteInfo::GetMode(FAST_DOUBLE_ELEMENTS, FAST_ELEMENTS); + ElementsTransitionGenerator::GenerateDoubleToObject(masm, mode, slow); __ mov(ebx, FieldOperand(edx, JSObject::kElementsOffset)); __ jmp(&finish_object_store); } @@ -1055,7 +1021,7 @@ void CallICBase::GenerateNormal(MacroAssembler* masm, int argc) { // Get the receiver of the function from the stack; 1 ~ return address. __ mov(edx, Operand(esp, (argc + 1) * kPointerSize)); - GenerateStringDictionaryReceiverCheck(masm, edx, eax, ebx, &miss); + GenerateNameDictionaryReceiverCheck(masm, edx, eax, ebx, &miss); // eax: elements // Search the dictionary placing the result in edi. @@ -1171,11 +1137,11 @@ void KeyedCallIC::GenerateMegamorphic(MacroAssembler* masm, int argc) { __ mov(edx, Operand(esp, (argc + 1) * kPointerSize)); Label do_call, slow_call, slow_load, slow_reload_receiver; - Label check_number_dictionary, check_string, lookup_monomorphic_cache; - Label index_smi, index_string; + Label check_number_dictionary, check_name, lookup_monomorphic_cache; + Label index_smi, index_name; // Check that the key is a smi. - __ JumpIfNotSmi(ecx, &check_string); + __ JumpIfNotSmi(ecx, &check_name); __ bind(&index_smi); // Now the key is known to be a smi. This place is also jumped to from @@ -1234,10 +1200,10 @@ void KeyedCallIC::GenerateMegamorphic(MacroAssembler* masm, int argc) { __ mov(edi, eax); __ jmp(&do_call); - __ bind(&check_string); - GenerateKeyStringCheck(masm, ecx, eax, ebx, &index_string, &slow_call); + __ bind(&check_name); + GenerateKeyNameCheck(masm, ecx, eax, ebx, &index_name, &slow_call); - // The key is known to be a symbol. + // The key is known to be a unique name. // If the receiver is a regular JS object with slow properties then do // a quick inline probe of the receiver's dictionary. // Otherwise do the monomorphic cache probe. @@ -1263,14 +1229,14 @@ void KeyedCallIC::GenerateMegamorphic(MacroAssembler* masm, int argc) { __ bind(&slow_call); // This branch is taken if: // - the receiver requires boxing or access check, - // - the key is neither smi nor symbol, + // - the key is neither smi nor a unique name, // - the value loaded is not a function, // - there is hope that the runtime will create a monomorphic call stub // that will get fetched next time. __ IncrementCounter(counters->keyed_call_generic_slow(), 1); GenerateMiss(masm, argc); - __ bind(&index_string); + __ bind(&index_name); __ IndexFromHash(ebx, ecx); // Now jump to the place where smi keys are handled. __ jmp(&index_smi); @@ -1315,10 +1281,10 @@ void KeyedCallIC::GenerateNormal(MacroAssembler* masm, int argc) { // -- esp[(argc + 1) * 4] : receiver // ----------------------------------- - // Check if the name is a string. + // Check if the name is really a name. Label miss; __ JumpIfSmi(ecx, &miss); - Condition cond = masm->IsObjectStringType(ecx, eax, eax); + Condition cond = masm->IsObjectNameType(ecx, eax, eax); __ j(NegateCondition(cond), &miss); CallICBase::GenerateNormal(masm, argc); __ bind(&miss); @@ -1334,9 +1300,11 @@ void LoadIC::GenerateMegamorphic(MacroAssembler* masm) { // ----------------------------------- // Probe the stub cache. - Code::Flags flags = Code::ComputeFlags(Code::LOAD_IC, MONOMORPHIC); - Isolate::Current()->stub_cache()->GenerateProbe(masm, flags, edx, ecx, ebx, - eax); + Code::Flags flags = Code::ComputeFlags( + Code::STUB, MONOMORPHIC, Code::kNoExtraICState, + Code::NORMAL, Code::LOAD_IC); + Isolate::Current()->stub_cache()->GenerateProbe( + masm, flags, edx, ecx, ebx, eax); // Cache miss: Jump to runtime. GenerateMiss(masm); @@ -1351,7 +1319,7 @@ void LoadIC::GenerateNormal(MacroAssembler* masm) { // ----------------------------------- Label miss; - GenerateStringDictionaryReceiverCheck(masm, edx, eax, ebx, &miss); + GenerateNameDictionaryReceiverCheck(masm, edx, eax, ebx, &miss); // eax: elements // Search the dictionary placing the result in eax. @@ -1385,7 +1353,7 @@ void LoadIC::GenerateMiss(MacroAssembler* masm) { } -void KeyedLoadIC::GenerateMiss(MacroAssembler* masm, bool force_generic) { +void KeyedLoadIC::GenerateMiss(MacroAssembler* masm, ICMissMode miss_mode) { // ----------- S t a t e ------------- // -- ecx : key // -- edx : receiver @@ -1400,7 +1368,7 @@ void KeyedLoadIC::GenerateMiss(MacroAssembler* masm, bool force_generic) { __ push(ebx); // return address // Perform tail call to the entry. - ExternalReference ref = force_generic + ExternalReference ref = miss_mode == MISS_FORCE_GENERIC ? ExternalReference(IC_Utility(kKeyedLoadIC_MissForceGeneric), masm->isolate()) : ExternalReference(IC_Utility(kKeyedLoadIC_Miss), masm->isolate()); @@ -1465,65 +1433,6 @@ void StoreIC::GenerateMiss(MacroAssembler* masm) { } -void StoreIC::GenerateArrayLength(MacroAssembler* masm) { - // ----------- S t a t e ------------- - // -- eax : value - // -- ecx : name - // -- edx : receiver - // -- esp[0] : return address - // ----------------------------------- - // - // This accepts as a receiver anything JSArray::SetElementsLength accepts - // (currently anything except for external arrays which means anything with - // elements of FixedArray type). Value must be a number, but only smis are - // accepted as the most common case. - - Label miss; - - Register receiver = edx; - Register value = eax; - Register scratch = ebx; - - // Check that the receiver isn't a smi. - __ JumpIfSmi(receiver, &miss); - - // Check that the object is a JS array. - __ CmpObjectType(receiver, JS_ARRAY_TYPE, scratch); - __ j(not_equal, &miss); - - // Check that elements are FixedArray. - // We rely on StoreIC_ArrayLength below to deal with all types of - // fast elements (including COW). - __ mov(scratch, FieldOperand(receiver, JSArray::kElementsOffset)); - __ CmpObjectType(scratch, FIXED_ARRAY_TYPE, scratch); - __ j(not_equal, &miss); - - // Check that the array has fast properties, otherwise the length - // property might have been redefined. - __ mov(scratch, FieldOperand(receiver, JSArray::kPropertiesOffset)); - __ CompareRoot(FieldOperand(scratch, FixedArray::kMapOffset), - Heap::kHashTableMapRootIndex); - __ j(equal, &miss); - - // Check that value is a smi. - __ JumpIfNotSmi(value, &miss); - - // Prepare tail call to StoreIC_ArrayLength. - __ pop(scratch); - __ push(receiver); - __ push(value); - __ push(scratch); // return address - - ExternalReference ref = - ExternalReference(IC_Utility(kStoreIC_ArrayLength), masm->isolate()); - __ TailCallExternalReference(ref, 2, 1); - - __ bind(&miss); - - GenerateMiss(masm); -} - - void StoreIC::GenerateNormal(MacroAssembler* masm) { // ----------- S t a t e ------------- // -- eax : value @@ -1534,7 +1443,7 @@ void StoreIC::GenerateNormal(MacroAssembler* masm) { Label miss, restore_miss; - GenerateStringDictionaryReceiverCheck(masm, edx, ebx, edi, &miss); + GenerateNameDictionaryReceiverCheck(masm, edx, ebx, edi, &miss); // A lot of registers are needed for storing to slow case // objects. Push and restore receiver but rely on @@ -1598,7 +1507,7 @@ void KeyedStoreIC::GenerateRuntimeSetProperty(MacroAssembler* masm, } -void KeyedStoreIC::GenerateMiss(MacroAssembler* masm, bool force_generic) { +void KeyedStoreIC::GenerateMiss(MacroAssembler* masm, ICMissMode miss_mode) { // ----------- S t a t e ------------- // -- eax : value // -- ecx : key @@ -1613,7 +1522,7 @@ void KeyedStoreIC::GenerateMiss(MacroAssembler* masm, bool force_generic) { __ push(ebx); // Do tail-call to runtime routine. - ExternalReference ref = force_generic + ExternalReference ref = miss_mode == MISS_FORCE_GENERIC ? ExternalReference(IC_Utility(kKeyedStoreIC_MissForceGeneric), masm->isolate()) : ExternalReference(IC_Utility(kKeyedStoreIC_Miss), masm->isolate()); @@ -1650,7 +1559,9 @@ void KeyedStoreIC::GenerateTransitionElementsSmiToDouble(MacroAssembler* masm) { // Must return the modified receiver in eax. if (!FLAG_trace_elements_transitions) { Label fail; - ElementsTransitionGenerator::GenerateSmiToDouble(masm, &fail); + AllocationSiteMode mode = AllocationSiteInfo::GetMode(FAST_SMI_ELEMENTS, + FAST_DOUBLE_ELEMENTS); + ElementsTransitionGenerator::GenerateSmiToDouble(masm, mode, &fail); __ mov(eax, edx); __ Ret(); __ bind(&fail); @@ -1676,7 +1587,9 @@ void KeyedStoreIC::GenerateTransitionElementsDoubleToObject( // Must return the modified receiver in eax. if (!FLAG_trace_elements_transitions) { Label fail; - ElementsTransitionGenerator::GenerateDoubleToObject(masm, &fail); + AllocationSiteMode mode = AllocationSiteInfo::GetMode(FAST_DOUBLE_ELEMENTS, + FAST_ELEMENTS); + ElementsTransitionGenerator::GenerateDoubleToObject(masm, mode, &fail); __ mov(eax, edx); __ Ret(); __ bind(&fail); @@ -1715,7 +1628,7 @@ Condition CompareIC::ComputeCondition(Token::Value op) { } -static bool HasInlinedSmiCode(Address address) { +bool CompareIC::HasInlinedSmiCode(Address address) { // The address of the instruction following the call. Address test_instruction_address = address + Assembler::kCallTargetAddressOffset; @@ -1726,40 +1639,6 @@ static bool HasInlinedSmiCode(Address address) { } -void CompareIC::UpdateCaches(Handle<Object> x, Handle<Object> y) { - HandleScope scope; - Handle<Code> rewritten; - State previous_state = GetState(); - - State state = TargetState(previous_state, HasInlinedSmiCode(address()), x, y); - if (state == GENERIC) { - CompareStub stub(GetCondition(), strict(), NO_COMPARE_FLAGS); - rewritten = stub.GetCode(); - } else { - ICCompareStub stub(op_, state); - if (state == KNOWN_OBJECTS) { - stub.set_known_map(Handle<Map>(Handle<JSObject>::cast(x)->map())); - } - rewritten = stub.GetCode(); - } - set_target(*rewritten); - -#ifdef DEBUG - if (FLAG_trace_ic) { - PrintF("[CompareIC (%s->%s)#%s]\n", - GetStateName(previous_state), - GetStateName(state), - Token::Name(op_)); - } -#endif - - // Activate inlined smi code. - if (previous_state == UNINITIALIZED) { - PatchInlinedSmiCode(address(), ENABLE_INLINED_SMI_CHECK); - } -} - - void PatchInlinedSmiCode(Address address, InlinedSmiCheck check) { // The address of the instruction following the call. Address test_instruction_address = diff --git a/deps/v8/src/ia32/lithium-codegen-ia32.cc b/deps/v8/src/ia32/lithium-codegen-ia32.cc index 32c66a05f5..205781b9d9 100644 --- a/deps/v8/src/ia32/lithium-codegen-ia32.cc +++ b/deps/v8/src/ia32/lithium-codegen-ia32.cc @@ -30,6 +30,7 @@ #if defined(V8_TARGET_ARCH_IA32) #include "ia32/lithium-codegen-ia32.h" +#include "ic.h" #include "code-stubs.h" #include "deoptimizer.h" #include "stub-cache.h" @@ -39,6 +40,12 @@ namespace v8 { namespace internal { +static SaveFPRegsMode GetSaveFPRegsMode() { + // We don't need to save floating point regs when generating the snapshot + return CpuFeatures::IsSafeForSnapshot(SSE2) ? kSaveFPRegs : kDontSaveFPRegs; +} + + // When invoking builtins, we need to record the safepoint in the middle of // the invoke instruction sequence generated by the macro assembler. class SafepointGenerator : public CallWrapper { @@ -70,22 +77,23 @@ bool LCodeGen::GenerateCode() { HPhase phase("Z_Code generation", chunk()); ASSERT(is_unused()); status_ = GENERATING; - CpuFeatures::Scope scope(SSE2); - - CodeStub::GenerateFPStubs(); // Open a frame scope to indicate that there is a frame on the stack. The // MANUAL indicates that the scope shouldn't actually generate code to set up // the frame (that is done in GeneratePrologue). FrameScope frame_scope(masm_, StackFrame::MANUAL); - dynamic_frame_alignment_ = (chunk()->num_double_slots() > 2 && - !chunk()->graph()->is_recursive()) || - !info()->osr_ast_id().IsNone(); + support_aligned_spilled_doubles_ = info()->IsOptimizing(); + + dynamic_frame_alignment_ = info()->IsOptimizing() && + ((chunk()->num_double_slots() > 2 && + !chunk()->graph()->is_recursive()) || + !info()->osr_ast_id().IsNone()); return GeneratePrologue() && GenerateBody() && GenerateDeferredCode() && + GenerateJumpTable() && GenerateSafepointTable(); } @@ -94,8 +102,17 @@ void LCodeGen::FinishCode(Handle<Code> code) { ASSERT(is_done()); code->set_stack_slots(GetStackSlotCount()); code->set_safepoint_table_offset(safepoints_.GetCodeOffset()); + if (FLAG_weak_embedded_maps_in_optimized_code) { + RegisterDependentCodeForEmbeddedMaps(code); + } PopulateDeoptimizationData(code); - Deoptimizer::EnsureRelocSpaceForLazyDeoptimization(code); + if (!info()->IsStub()) { + Deoptimizer::EnsureRelocSpaceForLazyDeoptimization(code); + } + for (int i = 0 ; i < prototype_maps_.length(); i++) { + prototype_maps_.at(i)->AddDependentCode( + DependentCode::kPrototypeCheckGroup, code); + } } @@ -126,112 +143,144 @@ void LCodeGen::Comment(const char* format, ...) { bool LCodeGen::GeneratePrologue() { ASSERT(is_generating()); - ProfileEntryHookStub::MaybeCallEntryHook(masm_); + if (info()->IsOptimizing()) { + ProfileEntryHookStub::MaybeCallEntryHook(masm_); #ifdef DEBUG - if (strlen(FLAG_stop_at) > 0 && - info_->function()->name()->IsEqualTo(CStrVector(FLAG_stop_at))) { - __ int3(); - } + if (strlen(FLAG_stop_at) > 0 && + info_->function()->name()->IsUtf8EqualTo(CStrVector(FLAG_stop_at))) { + __ int3(); + } #endif - // Strict mode functions and builtins need to replace the receiver - // with undefined when called as functions (without an explicit - // receiver object). ecx is zero for method calls and non-zero for - // function calls. - if (!info_->is_classic_mode() || info_->is_native()) { - Label ok; - __ test(ecx, Operand(ecx)); - __ j(zero, &ok, Label::kNear); - // +1 for return address. - int receiver_offset = (scope()->num_parameters() + 1) * kPointerSize; - __ mov(Operand(esp, receiver_offset), - Immediate(isolate()->factory()->undefined_value())); - __ bind(&ok); - } + // Strict mode functions and builtins need to replace the receiver + // with undefined when called as functions (without an explicit + // receiver object). ecx is zero for method calls and non-zero for + // function calls. + if (!info_->is_classic_mode() || info_->is_native()) { + Label ok; + __ test(ecx, Operand(ecx)); + __ j(zero, &ok, Label::kNear); + // +1 for return address. + int receiver_offset = (scope()->num_parameters() + 1) * kPointerSize; + __ mov(Operand(esp, receiver_offset), + Immediate(isolate()->factory()->undefined_value())); + __ bind(&ok); + } + if (support_aligned_spilled_doubles_ && dynamic_frame_alignment_) { + // Move state of dynamic frame alignment into edx. + __ mov(edx, Immediate(kNoAlignmentPadding)); + + Label do_not_pad, align_loop; + STATIC_ASSERT(kDoubleSize == 2 * kPointerSize); + // Align esp + 4 to a multiple of 2 * kPointerSize. + __ test(esp, Immediate(kPointerSize)); + __ j(not_zero, &do_not_pad, Label::kNear); + __ push(Immediate(0)); + __ mov(ebx, esp); + __ mov(edx, Immediate(kAlignmentPaddingPushed)); + // Copy arguments, receiver, and return address. + __ mov(ecx, Immediate(scope()->num_parameters() + 2)); + + __ bind(&align_loop); + __ mov(eax, Operand(ebx, 1 * kPointerSize)); + __ mov(Operand(ebx, 0), eax); + __ add(Operand(ebx), Immediate(kPointerSize)); + __ dec(ecx); + __ j(not_zero, &align_loop, Label::kNear); + __ mov(Operand(ebx, 0), Immediate(kAlignmentZapValue)); + __ bind(&do_not_pad); + } + } - if (dynamic_frame_alignment_) { - // Move state of dynamic frame alignment into edx. - __ mov(edx, Immediate(kNoAlignmentPadding)); + info()->set_prologue_offset(masm_->pc_offset()); + if (NeedsEagerFrame()) { + ASSERT(!frame_is_built_); + frame_is_built_ = true; + __ push(ebp); // Caller's frame pointer. + __ mov(ebp, esp); + __ push(esi); // Callee's context. + if (info()->IsStub()) { + __ push(Immediate(Smi::FromInt(StackFrame::STUB))); + } else { + __ push(edi); // Callee's JS function. + } + } - Label do_not_pad, align_loop; - STATIC_ASSERT(kDoubleSize == 2 * kPointerSize); - // Align esp + 4 to a multiple of 2 * kPointerSize. - __ test(esp, Immediate(kPointerSize)); - __ j(not_zero, &do_not_pad, Label::kNear); - __ push(Immediate(0)); - __ mov(ebx, esp); - __ mov(edx, Immediate(kAlignmentPaddingPushed)); - // Copy arguments, receiver, and return address. - __ mov(ecx, Immediate(scope()->num_parameters() + 2)); - - __ bind(&align_loop); - __ mov(eax, Operand(ebx, 1 * kPointerSize)); - __ mov(Operand(ebx, 0), eax); - __ add(Operand(ebx), Immediate(kPointerSize)); - __ dec(ecx); - __ j(not_zero, &align_loop, Label::kNear); - __ mov(Operand(ebx, 0), Immediate(kAlignmentZapValue)); - __ bind(&do_not_pad); - } - - __ push(ebp); // Caller's frame pointer. - __ mov(ebp, esp); - __ push(esi); // Callee's context. - __ push(edi); // Callee's JS function. - - if (dynamic_frame_alignment_ && FLAG_debug_code) { + if (info()->IsOptimizing() && + dynamic_frame_alignment_ && + FLAG_debug_code) { __ test(esp, Immediate(kPointerSize)); __ Assert(zero, "frame is expected to be aligned"); } // Reserve space for the stack slots needed by the code. int slots = GetStackSlotCount(); - ASSERT_GE(slots, 1); - if (slots == 1) { - if (dynamic_frame_alignment_) { - __ push(edx); - } else { - __ push(Immediate(kNoAlignmentPadding)); - } - } else { - if (FLAG_debug_code) { - __ mov(Operand(eax), Immediate(slots)); - Label loop; - __ bind(&loop); - __ push(Immediate(kSlotsZapValue)); - __ dec(eax); - __ j(not_zero, &loop); + ASSERT(slots != 0 || !info()->IsOptimizing()); + if (slots > 0) { + if (slots == 1) { + if (dynamic_frame_alignment_) { + __ push(edx); + } else { + __ push(Immediate(kNoAlignmentPadding)); + } } else { - __ sub(Operand(esp), Immediate(slots * kPointerSize)); - #ifdef _MSC_VER - // On windows, you may not access the stack more than one page below - // the most recently mapped page. To make the allocated area randomly - // accessible, we write to each page in turn (the value is irrelevant). - const int kPageSize = 4 * KB; - for (int offset = slots * kPointerSize - kPageSize; - offset > 0; - offset -= kPageSize) { - __ mov(Operand(esp, offset), eax); + if (FLAG_debug_code) { + __ sub(Operand(esp), Immediate(slots * kPointerSize)); + __ push(eax); + __ mov(Operand(eax), Immediate(slots)); + Label loop; + __ bind(&loop); + __ mov(MemOperand(esp, eax, times_4, 0), + Immediate(kSlotsZapValue)); + __ dec(eax); + __ j(not_zero, &loop); + __ pop(eax); + } else { + __ sub(Operand(esp), Immediate(slots * kPointerSize)); +#ifdef _MSC_VER + // On windows, you may not access the stack more than one page below + // the most recently mapped page. To make the allocated area randomly + // accessible, we write to each page in turn (the value is irrelevant). + const int kPageSize = 4 * KB; + for (int offset = slots * kPointerSize - kPageSize; + offset > 0; + offset -= kPageSize) { + __ mov(Operand(esp, offset), eax); + } +#endif + } + + if (support_aligned_spilled_doubles_) { + Comment(";;; Store dynamic frame alignment tag for spilled doubles"); + // Store dynamic frame alignment state in the first local. + int offset = JavaScriptFrameConstants::kDynamicAlignmentStateOffset; + if (dynamic_frame_alignment_) { + __ mov(Operand(ebp, offset), edx); + } else { + __ mov(Operand(ebp, offset), Immediate(kNoAlignmentPadding)); + } } - #endif } - // Store dynamic frame alignment state in the first local. - if (dynamic_frame_alignment_) { - __ mov(Operand(ebp, - JavaScriptFrameConstants::kDynamicAlignmentStateOffset), - edx); - } else { - __ mov(Operand(ebp, - JavaScriptFrameConstants::kDynamicAlignmentStateOffset), - Immediate(kNoAlignmentPadding)); + if (info()->saves_caller_doubles() && CpuFeatures::IsSupported(SSE2)) { + Comment(";;; Save clobbered callee double registers"); + CpuFeatureScope scope(masm(), SSE2); + int count = 0; + BitVector* doubles = chunk()->allocated_double_registers(); + BitVector::Iterator save_iterator(doubles); + while (!save_iterator.Done()) { + __ movdbl(MemOperand(esp, count * kDoubleSize), + XMMRegister::FromAllocationIndex(save_iterator.Current())); + save_iterator.Advance(); + count++; + } } } // Possibly allocate a local context. - int heap_slots = scope()->num_heap_slots() - Context::MIN_CONTEXT_SLOTS; + int heap_slots = info_->num_heap_slots() - Context::MIN_CONTEXT_SLOTS; if (heap_slots > 0) { Comment(";;; Allocate local context"); // Argument to NewContext is the function, which is still in edi. @@ -271,7 +320,7 @@ bool LCodeGen::GeneratePrologue() { } // Trace the call. - if (FLAG_trace) { + if (FLAG_trace && info()->IsOptimizing()) { // We have not executed any compiled code yet, so esi still holds the // incoming context. __ CallRuntime(Runtime::kTraceEnter, 0); @@ -293,7 +342,30 @@ bool LCodeGen::GenerateBody() { } if (emit_instructions) { - Comment(";;; @%d: %s.", current_instruction_, instr->Mnemonic()); + if (FLAG_code_comments) { + HValue* hydrogen = instr->hydrogen_value(); + if (hydrogen != NULL) { + if (hydrogen->IsChange()) { + HValue* changed_value = HChange::cast(hydrogen)->value(); + int use_id = 0; + const char* use_mnemo = "dead"; + if (hydrogen->UseCount() >= 1) { + HValue* use_value = hydrogen->uses().value(); + use_id = use_value->id(); + use_mnemo = use_value->Mnemonic(); + } + Comment(";;; @%d: %s. <of #%d %s for #%d %s>", + current_instruction_, instr->Mnemonic(), + changed_value->id(), changed_value->Mnemonic(), + use_id, use_mnemo); + } else { + Comment(";;; @%d: %s. <#%d>", current_instruction_, + instr->Mnemonic(), hydrogen->id()); + } + } else { + Comment(";;; @%d: %s.", current_instruction_, instr->Mnemonic()); + } + } instr->CompileToNative(this); } } @@ -302,16 +374,111 @@ bool LCodeGen::GenerateBody() { } +bool LCodeGen::GenerateJumpTable() { + Label needs_frame_not_call; + Label needs_frame_is_call; + for (int i = 0; i < jump_table_.length(); i++) { + __ bind(&jump_table_[i].label); + Address entry = jump_table_[i].address; + bool is_lazy_deopt = jump_table_[i].is_lazy_deopt; + Deoptimizer::BailoutType type = + is_lazy_deopt ? Deoptimizer::LAZY : Deoptimizer::EAGER; + int id = Deoptimizer::GetDeoptimizationId(isolate(), entry, type); + if (id == Deoptimizer::kNotDeoptimizationEntry) { + Comment(";;; jump table entry %d.", i); + } else { + Comment(";;; jump table entry %d: deoptimization bailout %d.", i, id); + } + if (jump_table_[i].needs_frame) { + __ push(Immediate(ExternalReference::ForDeoptEntry(entry))); + if (is_lazy_deopt) { + if (needs_frame_is_call.is_bound()) { + __ jmp(&needs_frame_is_call); + } else { + __ bind(&needs_frame_is_call); + __ push(MemOperand(ebp, StandardFrameConstants::kContextOffset)); + // This variant of deopt can only be used with stubs. Since we don't + // have a function pointer to install in the stack frame that we're + // building, install a special marker there instead. + ASSERT(info()->IsStub()); + __ push(Immediate(Smi::FromInt(StackFrame::STUB))); + // Push a PC inside the function so that the deopt code can find where + // the deopt comes from. It doesn't have to be the precise return + // address of a "calling" LAZY deopt, it only has to be somewhere + // inside the code body. + Label push_approx_pc; + __ call(&push_approx_pc); + __ bind(&push_approx_pc); + // Push the continuation which was stashed were the ebp should + // be. Replace it with the saved ebp. + __ push(MemOperand(esp, 3 * kPointerSize)); + __ mov(MemOperand(esp, 4 * kPointerSize), ebp); + __ lea(ebp, MemOperand(esp, 4 * kPointerSize)); + __ ret(0); // Call the continuation without clobbering registers. + } + } else { + if (needs_frame_not_call.is_bound()) { + __ jmp(&needs_frame_not_call); + } else { + __ bind(&needs_frame_not_call); + __ push(MemOperand(ebp, StandardFrameConstants::kContextOffset)); + // This variant of deopt can only be used with stubs. Since we don't + // have a function pointer to install in the stack frame that we're + // building, install a special marker there instead. + ASSERT(info()->IsStub()); + __ push(Immediate(Smi::FromInt(StackFrame::STUB))); + // Push the continuation which was stashed were the ebp should + // be. Replace it with the saved ebp. + __ push(MemOperand(esp, 2 * kPointerSize)); + __ mov(MemOperand(esp, 3 * kPointerSize), ebp); + __ lea(ebp, MemOperand(esp, 3 * kPointerSize)); + __ ret(0); // Call the continuation without clobbering registers. + } + } + } else { + if (is_lazy_deopt) { + __ call(entry, RelocInfo::RUNTIME_ENTRY); + } else { + __ jmp(entry, RelocInfo::RUNTIME_ENTRY); + } + } + } + return !is_aborted(); +} + + bool LCodeGen::GenerateDeferredCode() { ASSERT(is_generating()); if (deferred_.length() > 0) { for (int i = 0; !is_aborted() && i < deferred_.length(); i++) { LDeferredCode* code = deferred_[i]; __ bind(code->entry()); + if (NeedsDeferredFrame()) { + Comment(";;; Deferred build frame", + code->instruction_index(), + code->instr()->Mnemonic()); + ASSERT(!frame_is_built_); + ASSERT(info()->IsStub()); + frame_is_built_ = true; + // Build the frame in such a way that esi isn't trashed. + __ push(ebp); // Caller's frame pointer. + __ push(Operand(ebp, StandardFrameConstants::kContextOffset)); + __ push(Immediate(Smi::FromInt(StackFrame::STUB))); + __ lea(ebp, Operand(esp, 2 * kPointerSize)); + } Comment(";;; Deferred code @%d: %s.", code->instruction_index(), code->instr()->Mnemonic()); code->Generate(); + if (NeedsDeferredFrame()) { + Comment(";;; Deferred destroy frame", + code->instruction_index(), + code->instr()->Mnemonic()); + ASSERT(frame_is_built_); + frame_is_built_ = false; + __ mov(esp, ebp); + __ pop(ebp); + } __ jmp(code->exit()); } } @@ -325,6 +492,15 @@ bool LCodeGen::GenerateDeferredCode() { bool LCodeGen::GenerateSafepointTable() { ASSERT(is_done()); + if (!info()->IsStub()) { + // For lazy deoptimization we need space to patch a call after every call. + // Ensure there is always space for such patching, even if the code ends + // in a call. + int target_offset = masm()->pc_offset() + Deoptimizer::patch_size(); + while (masm()->pc_offset() < target_offset) { + masm()->nop(); + } + } safepoints_.Emit(masm(), GetStackSlotCount()); return !is_aborted(); } @@ -340,6 +516,11 @@ XMMRegister LCodeGen::ToDoubleRegister(int index) const { } +bool LCodeGen::IsX87TopOfStack(LOperand* op) const { + return op->IsDoubleRegister(); +} + + Register LCodeGen::ToRegister(LOperand* op) const { ASSERT(op->IsRegister()); return ToRegister(op->index()); @@ -354,8 +535,6 @@ XMMRegister LCodeGen::ToDoubleRegister(LOperand* op) const { int LCodeGen::ToInteger32(LConstantOperand* op) const { HConstant* constant = chunk_->LookupConstant(op); - ASSERT(chunk_->LookupLiteralRepresentation(op).IsInteger32()); - ASSERT(constant->HasInteger32Value()); return constant->Integer32Value(); } @@ -383,30 +562,20 @@ Operand LCodeGen::ToOperand(LOperand* op) const { if (op->IsRegister()) return Operand(ToRegister(op)); if (op->IsDoubleRegister()) return Operand(ToDoubleRegister(op)); ASSERT(op->IsStackSlot() || op->IsDoubleStackSlot()); - int index = op->index(); - if (index >= 0) { - // Local or spill slot. Skip the frame pointer, function, and - // context in the fixed part of the frame. - return Operand(ebp, -(index + 3) * kPointerSize); - } else { - // Incoming parameter. Skip the return address. - return Operand(ebp, -(index - 1) * kPointerSize); - } + return Operand(ebp, StackSlotOffset(op->index())); } Operand LCodeGen::HighOperand(LOperand* op) { ASSERT(op->IsDoubleStackSlot()); - int index = op->index(); - int offset = (index >= 0) ? index + 3 : index - 1; - return Operand(ebp, -offset * kPointerSize); + return Operand(ebp, StackSlotOffset(op->index()) + kPointerSize); } void LCodeGen::WriteTranslation(LEnvironment* environment, Translation* translation, - int* arguments_index, - int* arguments_count) { + int* pushed_arguments_index, + int* pushed_arguments_count) { if (environment == NULL) return; // The translation includes one command per value in the environment. @@ -418,14 +587,16 @@ void LCodeGen::WriteTranslation(LEnvironment* environment, // arguments index points to the first element of a sequence of tagged // values on the stack that represent the arguments. This needs to be // kept in sync with the LArgumentsElements implementation. - *arguments_index = -environment->parameter_count(); - *arguments_count = environment->parameter_count(); + *pushed_arguments_index = -environment->parameter_count(); + *pushed_arguments_count = environment->parameter_count(); WriteTranslation(environment->outer(), translation, - arguments_index, - arguments_count); - int closure_id = *info()->closure() != *environment->closure() + pushed_arguments_index, + pushed_arguments_count); + bool has_closure_id = !info()->closure().is_null() && + *info()->closure() != *environment->closure(); + int closure_id = has_closure_id ? DefineDeoptimizationLiteral(environment->closure()) : Translation::kSelfLiteralId; switch (environment->frame_type()) { @@ -448,16 +619,28 @@ void LCodeGen::WriteTranslation(LEnvironment* environment, case ARGUMENTS_ADAPTOR: translation->BeginArgumentsAdaptorFrame(closure_id, translation_size); break; + case STUB: + translation->BeginCompiledStubFrame(); + break; + default: + UNREACHABLE(); } // Inlined frames which push their arguments cause the index to be - // bumped and another stack area to be used for materialization. - if (environment->entry() != NULL && - environment->entry()->arguments_pushed()) { - *arguments_index = *arguments_index < 0 - ? GetStackSlotCount() - : *arguments_index + *arguments_count; - *arguments_count = environment->entry()->arguments_count() + 1; + // bumped and another stack area to be used for materialization, + // otherwise actual argument values are unknown for inlined frames. + bool arguments_known = true; + int arguments_index = *pushed_arguments_index; + int arguments_count = *pushed_arguments_count; + if (environment->entry() != NULL) { + arguments_known = environment->entry()->arguments_pushed(); + arguments_index = arguments_index < 0 + ? GetStackSlotCount() : arguments_index + arguments_count; + arguments_count = environment->entry()->arguments_count() + 1; + if (environment->entry()->arguments_pushed()) { + *pushed_arguments_index = arguments_index; + *pushed_arguments_count = arguments_count; + } } for (int i = 0; i < translation_size; ++i) { @@ -472,8 +655,9 @@ void LCodeGen::WriteTranslation(LEnvironment* environment, environment->spilled_registers()[value->index()], environment->HasTaggedValueAt(i), environment->HasUint32ValueAt(i), - *arguments_index, - *arguments_count); + arguments_known, + arguments_index, + arguments_count); } else if ( value->IsDoubleRegister() && environment->spilled_double_registers()[value->index()] != NULL) { @@ -483,8 +667,9 @@ void LCodeGen::WriteTranslation(LEnvironment* environment, environment->spilled_double_registers()[value->index()], false, false, - *arguments_index, - *arguments_count); + arguments_known, + arguments_index, + arguments_count); } } @@ -492,8 +677,9 @@ void LCodeGen::WriteTranslation(LEnvironment* environment, value, environment->HasTaggedValueAt(i), environment->HasUint32ValueAt(i), - *arguments_index, - *arguments_count); + arguments_known, + arguments_index, + arguments_count); } } @@ -502,13 +688,15 @@ void LCodeGen::AddToTranslation(Translation* translation, LOperand* op, bool is_tagged, bool is_uint32, + bool arguments_known, int arguments_index, int arguments_count) { if (op == NULL) { // TODO(twuerthinger): Introduce marker operands to indicate that this value // is not present and must be reconstructed from the deoptimizer. Currently // this is only used for the arguments object. - translation->StoreArgumentsObject(arguments_index, arguments_count); + translation->StoreArgumentsObject( + arguments_known, arguments_index, arguments_count); } else if (op->IsStackSlot()) { if (is_tagged) { translation->StoreStackSlot(op->index()); @@ -582,13 +770,12 @@ void LCodeGen::CallRuntime(const Runtime::Function* fun, __ CallRuntime(fun, argc); RecordSafepointWithLazyDeopt(instr, RECORD_SIMPLE_SAFEPOINT); + + ASSERT(info()->is_calling()); } -void LCodeGen::CallRuntimeFromDeferred(Runtime::FunctionId id, - int argc, - LInstruction* instr, - LOperand* context) { +void LCodeGen::LoadContextFromDeferred(LOperand* context) { if (context->IsRegister()) { if (!ToRegister(context).is(esi)) { __ mov(esi, ToRegister(context)); @@ -602,10 +789,19 @@ void LCodeGen::CallRuntimeFromDeferred(Runtime::FunctionId id, } else { UNREACHABLE(); } +} + +void LCodeGen::CallRuntimeFromDeferred(Runtime::FunctionId id, + int argc, + LInstruction* instr, + LOperand* context) { + LoadContextFromDeferred(context); __ CallRuntimeSaveDoubles(id); RecordSafepointWithRegisters( instr->pointer_map(), argc, Safepoint::kNoLazyDeopt); + + ASSERT(info()->is_calling()); } @@ -651,7 +847,12 @@ void LCodeGen::DeoptimizeIf(Condition cc, LEnvironment* environment) { RegisterEnvironmentForDeoptimization(environment, Safepoint::kNoLazyDeopt); ASSERT(environment->HasBeenRegistered()); int id = environment->deoptimization_index(); - Address entry = Deoptimizer::GetDeoptimizationEntry(id, Deoptimizer::EAGER); + ASSERT(info()->IsOptimizing() || info()->IsStub()); + Deoptimizer::BailoutType bailout_type = info()->IsStub() + ? Deoptimizer::LAZY + : Deoptimizer::EAGER; + Address entry = + Deoptimizer::GetDeoptimizationEntry(isolate(), id, bailout_type); if (entry == NULL) { Abort("bailout was not prepared"); return; @@ -685,20 +886,64 @@ void LCodeGen::DeoptimizeIf(Condition cc, LEnvironment* environment) { __ popfd(); } - if (cc == no_condition) { - if (FLAG_trap_on_deopt) __ int3(); - __ jmp(entry, RelocInfo::RUNTIME_ENTRY); - } else { - if (FLAG_trap_on_deopt) { - Label done; + if (FLAG_trap_on_deopt) { + Label done; + if (cc != no_condition) { __ j(NegateCondition(cc), &done, Label::kNear); - __ int3(); + } + __ int3(); + __ bind(&done); + } + + ASSERT(info()->IsStub() || frame_is_built_); + bool needs_lazy_deopt = info()->IsStub(); + if (cc == no_condition && frame_is_built_) { + if (needs_lazy_deopt) { + __ call(entry, RelocInfo::RUNTIME_ENTRY); + } else { __ jmp(entry, RelocInfo::RUNTIME_ENTRY); - __ bind(&done); + } + } else { + // We often have several deopts to the same entry, reuse the last + // jump entry if this is the case. + if (jump_table_.is_empty() || + jump_table_.last().address != entry || + jump_table_.last().needs_frame != !frame_is_built_ || + jump_table_.last().is_lazy_deopt != needs_lazy_deopt) { + JumpTableEntry table_entry(entry, !frame_is_built_, needs_lazy_deopt); + jump_table_.Add(table_entry, zone()); + } + if (cc == no_condition) { + __ jmp(&jump_table_.last().label); } else { - __ j(cc, entry, RelocInfo::RUNTIME_ENTRY); + __ j(cc, &jump_table_.last().label); + } + } +} + + +void LCodeGen::RegisterDependentCodeForEmbeddedMaps(Handle<Code> code) { + ZoneList<Handle<Map> > maps(1, zone()); + int mode_mask = RelocInfo::ModeMask(RelocInfo::EMBEDDED_OBJECT); + for (RelocIterator it(*code, mode_mask); !it.done(); it.next()) { + RelocInfo::Mode mode = it.rinfo()->rmode(); + if (mode == RelocInfo::EMBEDDED_OBJECT && + it.rinfo()->target_object()->IsMap()) { + Handle<Map> map(Map::cast(it.rinfo()->target_object())); + if (map->CanTransition()) { + maps.Add(map, zone()); + } } } +#ifdef VERIFY_HEAP + // This disables verification of weak embedded maps after full GC. + // AddDependentCode can cause a GC, which would observe the state where + // this code is not yet in the depended code lists of the embedded maps. + NoWeakEmbeddedMapsVerificationScope disable_verification_of_embedded_maps; +#endif + for (int i = 0; i < maps.length(); i++) { + maps.at(i)->AddDependentCode(DependentCode::kWeaklyEmbeddedGroup, code); + } } @@ -708,7 +953,8 @@ void LCodeGen::PopulateDeoptimizationData(Handle<Code> code) { Handle<DeoptimizationInputData> data = factory()->NewDeoptimizationInputData(length, TENURED); - Handle<ByteArray> translations = translations_.CreateByteArray(); + Handle<ByteArray> translations = + translations_.CreateByteArray(isolate()->factory()); data->SetTranslationByteArray(*translations); data->SetInlinedFunctionCount(Smi::FromInt(inlined_function_count_)); @@ -862,38 +1108,38 @@ void LCodeGen::DoCallStub(LCallStub* instr) { switch (instr->hydrogen()->major_key()) { case CodeStub::RegExpConstructResult: { RegExpConstructResultStub stub; - CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr); + CallCode(stub.GetCode(isolate()), RelocInfo::CODE_TARGET, instr); break; } case CodeStub::RegExpExec: { RegExpExecStub stub; - CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr); + CallCode(stub.GetCode(isolate()), RelocInfo::CODE_TARGET, instr); break; } case CodeStub::SubString: { SubStringStub stub; - CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr); + CallCode(stub.GetCode(isolate()), RelocInfo::CODE_TARGET, instr); break; } case CodeStub::NumberToString: { NumberToStringStub stub; - CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr); + CallCode(stub.GetCode(isolate()), RelocInfo::CODE_TARGET, instr); break; } case CodeStub::StringAdd: { StringAddStub stub(NO_STRING_ADD_FLAGS); - CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr); + CallCode(stub.GetCode(isolate()), RelocInfo::CODE_TARGET, instr); break; } case CodeStub::StringCompare: { StringCompareStub stub; - CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr); + CallCode(stub.GetCode(isolate()), RelocInfo::CODE_TARGET, instr); break; } case CodeStub::TranscendentalCache: { TranscendentalCacheStub stub(instr->transcendental_type(), TranscendentalCacheStub::TAGGED); - CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr); + CallCode(stub.GetCode(isolate()), RelocInfo::CODE_TARGET, instr); break; } default: @@ -987,6 +1233,17 @@ void LCodeGen::DoModI(LModI* instr) { // Slow case, using idiv instruction. __ bind(&slow); + + // Check for (kMinInt % -1). + if (instr->hydrogen()->CheckFlag(HValue::kCanOverflow)) { + Label left_not_min_int; + __ cmp(left_reg, kMinInt); + __ j(not_zero, &left_not_min_int, Label::kNear); + __ cmp(right_reg, -1); + DeoptimizeIf(zero, instr->environment()); + __ bind(&left_not_min_int); + } + // Sign extend to edx. __ cdq(); @@ -1020,6 +1277,43 @@ void LCodeGen::DoModI(LModI* instr) { void LCodeGen::DoDivI(LDivI* instr) { + if (!instr->is_flooring() && instr->hydrogen()->HasPowerOf2Divisor()) { + Register dividend = ToRegister(instr->left()); + int32_t divisor = + HConstant::cast(instr->hydrogen()->right())->Integer32Value(); + int32_t test_value = 0; + int32_t power = 0; + + if (divisor > 0) { + test_value = divisor - 1; + power = WhichPowerOf2(divisor); + } else { + // Check for (0 / -x) that will produce negative zero. + if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) { + __ test(dividend, Operand(dividend)); + DeoptimizeIf(zero, instr->environment()); + } + // Check for (kMinInt / -1). + if (divisor == -1 && instr->hydrogen()->CheckFlag(HValue::kCanOverflow)) { + __ cmp(dividend, kMinInt); + DeoptimizeIf(zero, instr->environment()); + } + test_value = - divisor - 1; + power = WhichPowerOf2(-divisor); + } + + if (test_value != 0) { + // Deoptimize if remainder is not 0. + __ test(dividend, Immediate(test_value)); + DeoptimizeIf(not_zero, instr->environment()); + __ sar(dividend, power); + } + + if (divisor < 0) __ neg(dividend); + + return; + } + LOperand* right = instr->right(); ASSERT(ToRegister(instr->result()).is(eax)); ASSERT(ToRegister(instr->left()).is(eax)); @@ -1030,13 +1324,13 @@ void LCodeGen::DoDivI(LDivI* instr) { // Check for x / 0. Register right_reg = ToRegister(right); - if (instr->hydrogen()->CheckFlag(HValue::kCanBeDivByZero)) { + if (instr->hydrogen_value()->CheckFlag(HValue::kCanBeDivByZero)) { __ test(right_reg, ToOperand(right)); DeoptimizeIf(zero, instr->environment()); } // Check for (0 / -x) that will produce negative zero. - if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) { + if (instr->hydrogen_value()->CheckFlag(HValue::kBailoutOnMinusZero)) { Label left_not_zero; __ test(left_reg, Operand(left_reg)); __ j(not_zero, &left_not_zero, Label::kNear); @@ -1045,8 +1339,8 @@ void LCodeGen::DoDivI(LDivI* instr) { __ bind(&left_not_zero); } - // Check for (-kMinInt / -1). - if (instr->hydrogen()->CheckFlag(HValue::kCanOverflow)) { + // Check for (kMinInt / -1). + if (instr->hydrogen_value()->CheckFlag(HValue::kCanOverflow)) { Label left_not_min_int; __ cmp(left_reg, kMinInt); __ j(not_zero, &left_not_min_int, Label::kNear); @@ -1059,9 +1353,19 @@ void LCodeGen::DoDivI(LDivI* instr) { __ cdq(); __ idiv(right_reg); - // Deoptimize if remainder is not 0. - __ test(edx, Operand(edx)); - DeoptimizeIf(not_zero, instr->environment()); + if (!instr->is_flooring()) { + // Deoptimize if remainder is not 0. + __ test(edx, Operand(edx)); + DeoptimizeIf(not_zero, instr->environment()); + } else { + Label done; + __ test(edx, edx); + __ j(zero, &done, Label::kNear); + __ xor_(edx, right_reg); + __ sar(edx, 31); + __ add(eax, edx); + __ bind(&done); + } } @@ -1300,6 +1604,13 @@ void LCodeGen::DoShiftI(LShiftI* instr) { ASSERT(ToRegister(right).is(ecx)); switch (instr->op()) { + case Token::ROR: + __ ror_cl(ToRegister(left)); + if (instr->can_deopt()) { + __ test(ToRegister(left), Immediate(0x80000000)); + DeoptimizeIf(not_zero, instr->environment()); + } + break; case Token::SAR: __ sar_cl(ToRegister(left)); break; @@ -1321,6 +1632,14 @@ void LCodeGen::DoShiftI(LShiftI* instr) { int value = ToInteger32(LConstantOperand::cast(right)); uint8_t shift_count = static_cast<uint8_t>(value & 0x1F); switch (instr->op()) { + case Token::ROR: + if (shift_count == 0 && instr->can_deopt()) { + __ test(ToRegister(left), Immediate(0x80000000)); + DeoptimizeIf(not_zero, instr->environment()); + } else { + __ ror(ToRegister(left), shift_count); + } + break; case Token::SAR: if (shift_count != 0) { __ sar(ToRegister(left), shift_count); @@ -1383,7 +1702,8 @@ void LCodeGen::DoConstantD(LConstantD* instr) { int32_t lower = static_cast<int32_t>(int_val); int32_t upper = static_cast<int32_t>(int_val >> (kBitsPerInt)); if (CpuFeatures::IsSupported(SSE4_1)) { - CpuFeatures::Scope scope(SSE4_1); + CpuFeatureScope scope1(masm(), SSE2); + CpuFeatureScope scope2(masm(), SSE4_1); if (lower != 0) { __ Set(temp, Immediate(lower)); __ movd(res, Operand(temp)); @@ -1395,6 +1715,7 @@ void LCodeGen::DoConstantD(LConstantD* instr) { __ pinsrd(res, Operand(temp), 1); } } else { + CpuFeatureScope scope(masm(), SSE2); __ Set(temp, Immediate(upper)); __ movd(res, Operand(temp)); __ psllq(res, 32); @@ -1511,6 +1832,15 @@ void LCodeGen::DoDateField(LDateField* instr) { } +void LCodeGen::DoSeqStringSetChar(LSeqStringSetChar* instr) { + SeqStringSetCharGenerator::Generate(masm(), + instr->encoding(), + ToRegister(instr->string()), + ToRegister(instr->index()), + ToRegister(instr->value())); +} + + void LCodeGen::DoBitNotI(LBitNotI* instr) { LOperand* input = instr->value(); ASSERT(input->Equals(instr->result())); @@ -1548,6 +1878,7 @@ void LCodeGen::DoAddI(LAddI* instr) { void LCodeGen::DoMathMinMax(LMathMinMax* instr) { + CpuFeatureScope scope(masm(), SSE2); LOperand* left = instr->left(); LOperand* right = instr->right(); ASSERT(left->Equals(instr->result())); @@ -1609,6 +1940,7 @@ void LCodeGen::DoMathMinMax(LMathMinMax* instr) { void LCodeGen::DoArithmeticD(LArithmeticD* instr) { + CpuFeatureScope scope(masm(), SSE2); XMMRegister left = ToDoubleRegister(instr->left()); XMMRegister right = ToDoubleRegister(instr->right()); XMMRegister result = ToDoubleRegister(instr->result()); @@ -1619,8 +1951,8 @@ void LCodeGen::DoArithmeticD(LArithmeticD* instr) { __ addsd(left, right); break; case Token::SUB: - __ subsd(left, right); - break; + __ subsd(left, right); + break; case Token::MUL: __ mulsd(left, right); break; @@ -1658,7 +1990,7 @@ void LCodeGen::DoArithmeticT(LArithmeticT* instr) { ASSERT(ToRegister(instr->result()).is(eax)); BinaryOpStub stub(instr->op(), NO_OVERWRITE); - CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr); + CallCode(stub.GetCode(isolate()), RelocInfo::CODE_TARGET, instr); __ nop(); // Signals no inlined code. } @@ -1693,6 +2025,7 @@ void LCodeGen::EmitBranch(int left_block, int right_block, Condition cc) { void LCodeGen::DoBranch(LBranch* instr) { int true_block = chunk_->LookupDestination(instr->true_block_id()); int false_block = chunk_->LookupDestination(instr->false_block_id()); + CpuFeatureScope scope(masm(), SSE2); Representation r = instr->hydrogen()->value()->representation(); if (r.IsInteger32()) { @@ -1789,9 +2122,8 @@ void LCodeGen::DoBranch(LBranch* instr) { __ cmp(FieldOperand(reg, HeapObject::kMapOffset), factory()->heap_number_map()); __ j(not_equal, ¬_heap_number, Label::kNear); - __ fldz(); - __ fld_d(FieldOperand(reg, HeapNumber::kValueOffset)); - __ FCmp(); + __ xorps(xmm0, xmm0); + __ ucomisd(xmm0, FieldOperand(reg, HeapNumber::kValueOffset)); __ j(zero, false_label); __ jmp(true_label); __ bind(¬_heap_number); @@ -1863,6 +2195,7 @@ void LCodeGen::DoCmpIDAndBranch(LCmpIDAndBranch* instr) { EmitGoto(next_block); } else { if (instr->is_double()) { + CpuFeatureScope scope(masm(), SSE2); // Don't base result on EFLAGS when a NaN is involved. Instead // jump to the false block. __ ucomisd(ToDoubleRegister(left), ToDoubleRegister(right)); @@ -2059,7 +2392,7 @@ void LCodeGen::DoStringCompareAndBranch(LStringCompareAndBranch* instr) { int true_block = chunk_->LookupDestination(instr->true_block_id()); int false_block = chunk_->LookupDestination(instr->false_block_id()); - Handle<Code> ic = CompareIC::GetUninitialized(op); + Handle<Code> ic = CompareIC::GetUninitialized(isolate(), op); CallCode(ic, RelocInfo::CODE_TARGET, instr); Condition condition = ComputeCompareCondition(op); @@ -2142,7 +2475,7 @@ void LCodeGen::EmitClassOfTest(Label* is_true, ASSERT(!temp.is(temp2)); __ JumpIfSmi(input, is_false); - if (class_name->IsEqualTo(CStrVector("Function"))) { + if (class_name->IsOneByteEqualTo(STATIC_ASCII_VECTOR("Function"))) { // Assuming the following assertions, we can use the same compares to test // for both being a function type and being in the object type range. STATIC_ASSERT(NUM_OF_CALLABLE_SPEC_OBJECT_TYPES == 2); @@ -2172,7 +2505,7 @@ void LCodeGen::EmitClassOfTest(Label* is_true, __ mov(temp, FieldOperand(temp, Map::kConstructorOffset)); // Objects with a non-function constructor have class 'Object'. __ CmpObjectType(temp, JS_FUNCTION_TYPE, temp2); - if (class_name->IsEqualTo(CStrVector("Object"))) { + if (class_name->IsOneByteEqualTo(STATIC_ASCII_VECTOR("Object"))) { __ j(not_equal, is_true); } else { __ j(not_equal, is_false); @@ -2183,12 +2516,12 @@ void LCodeGen::EmitClassOfTest(Label* is_true, __ mov(temp, FieldOperand(temp, JSFunction::kSharedFunctionInfoOffset)); __ mov(temp, FieldOperand(temp, SharedFunctionInfo::kInstanceClassNameOffset)); - // The class name we are testing against is a symbol because it's a literal. - // The name in the constructor is a symbol because of the way the context is - // booted. This routine isn't expected to work for random API-created + // The class name we are testing against is internalized since it's a literal. + // The name in the constructor is internalized because of the way the context + // is booted. This routine isn't expected to work for random API-created // classes and it doesn't have to because you can't access it with natives - // syntax. Since both sides are symbols it is sufficient to use an identity - // comparison. + // syntax. Since both sides are internalized it is sufficient to use an + // identity comparison. __ cmp(temp, class_name); // End with the answer in the z flag. } @@ -2227,7 +2560,7 @@ void LCodeGen::DoInstanceOf(LInstanceOf* instr) { // Object and function are in fixed registers defined by the stub. ASSERT(ToRegister(instr->context()).is(esi)); InstanceofStub stub(InstanceofStub::kArgsInRegisters); - CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr); + CallCode(stub.GetCode(isolate()), RelocInfo::CODE_TARGET, instr); Label true_value, done; __ test(eax, Operand(eax)); @@ -2328,7 +2661,7 @@ void LCodeGen::DoDeferredInstanceOfKnownGlobal(LInstanceOfKnownGlobal* instr, int delta = masm_->SizeOfCodeGeneratedSince(map_check) + kAdditionalDelta; __ mov(temp, Immediate(delta)); __ StoreToSafepointRegisterSlot(temp, temp); - CallCodeGeneric(stub.GetCode(), + CallCodeGeneric(stub.GetCode(isolate()), RelocInfo::CODE_TARGET, instr, RECORD_SAFEPOINT_WITH_REGISTERS_AND_NO_ARGUMENTS); @@ -2342,10 +2675,18 @@ void LCodeGen::DoDeferredInstanceOfKnownGlobal(LInstanceOfKnownGlobal* instr, } +void LCodeGen::DoInstanceSize(LInstanceSize* instr) { + Register object = ToRegister(instr->object()); + Register result = ToRegister(instr->result()); + __ mov(result, FieldOperand(object, HeapObject::kMapOffset)); + __ movzx_b(result, FieldOperand(result, Map::kInstanceSizeOffset)); +} + + void LCodeGen::DoCmpT(LCmpT* instr) { Token::Value op = instr->op(); - Handle<Code> ic = CompareIC::GetUninitialized(op); + Handle<Code> ic = CompareIC::GetUninitialized(isolate(), op); CallCode(ic, RelocInfo::CODE_TARGET, instr); Condition condition = ComputeCompareCondition(op); @@ -2360,8 +2701,43 @@ void LCodeGen::DoCmpT(LCmpT* instr) { } +void LCodeGen::EmitReturn(LReturn* instr, bool dynamic_frame_alignment) { + int extra_value_count = dynamic_frame_alignment ? 2 : 1; + + if (instr->has_constant_parameter_count()) { + int parameter_count = ToInteger32(instr->constant_parameter_count()); + if (dynamic_frame_alignment && FLAG_debug_code) { + __ cmp(Operand(esp, + (parameter_count + extra_value_count) * kPointerSize), + Immediate(kAlignmentZapValue)); + __ Assert(equal, "expected alignment marker"); + } + __ Ret((parameter_count + extra_value_count) * kPointerSize, ecx); + } else { + Register reg = ToRegister(instr->parameter_count()); + Register return_addr_reg = reg.is(ecx) ? ebx : ecx; + if (dynamic_frame_alignment && FLAG_debug_code) { + ASSERT(extra_value_count == 2); + __ cmp(Operand(esp, reg, times_pointer_size, + extra_value_count * kPointerSize), + Immediate(kAlignmentZapValue)); + __ Assert(equal, "expected alignment marker"); + } + + // emit code to restore stack based on instr->parameter_count() + __ pop(return_addr_reg); // save return address + if (dynamic_frame_alignment) { + __ inc(reg); // 1 more for alignment + } + __ shl(reg, kPointerSizeLog2); + __ add(esp, reg); + __ jmp(return_addr_reg); + } +} + + void LCodeGen::DoReturn(LReturn* instr) { - if (FLAG_trace) { + if (FLAG_trace && info()->IsOptimizing()) { // Preserve the return value on the stack and rely on the runtime call // to return the value in the same register. We're leaving the code // managed by the register allocator and tearing down the frame, it's @@ -2370,26 +2746,38 @@ void LCodeGen::DoReturn(LReturn* instr) { __ mov(esi, Operand(ebp, StandardFrameConstants::kContextOffset)); __ CallRuntime(Runtime::kTraceExit, 1); } + if (info()->saves_caller_doubles() && CpuFeatures::IsSupported(SSE2)) { + ASSERT(NeedsEagerFrame()); + CpuFeatureScope scope(masm(), SSE2); + BitVector* doubles = chunk()->allocated_double_registers(); + BitVector::Iterator save_iterator(doubles); + int count = 0; + while (!save_iterator.Done()) { + __ movdbl(XMMRegister::FromAllocationIndex(save_iterator.Current()), + MemOperand(esp, count * kDoubleSize)); + save_iterator.Advance(); + count++; + } + } if (dynamic_frame_alignment_) { // Fetch the state of the dynamic frame alignment. __ mov(edx, Operand(ebp, JavaScriptFrameConstants::kDynamicAlignmentStateOffset)); } - __ mov(esp, ebp); - __ pop(ebp); + if (NeedsEagerFrame()) { + __ mov(esp, ebp); + __ pop(ebp); + } if (dynamic_frame_alignment_) { Label no_padding; __ cmp(edx, Immediate(kNoAlignmentPadding)); __ j(equal, &no_padding); - if (FLAG_debug_code) { - __ cmp(Operand(esp, (GetParameterCount() + 2) * kPointerSize), - Immediate(kAlignmentZapValue)); - __ Assert(equal, "expected alignment marker"); - } - __ Ret((GetParameterCount() + 2) * kPointerSize, ecx); + + EmitReturn(instr, true); __ bind(&no_padding); } - __ Ret((GetParameterCount() + 1) * kPointerSize, ecx); + + EmitReturn(instr, false); } @@ -2494,7 +2882,7 @@ void LCodeGen::DoStoreContextSlot(LStoreContextSlot* instr) { offset, value, temp, - kSaveFPRegs, + GetSaveFPRegsMode(), EMIT_REMEMBERED_SET, check_needed); } @@ -2749,95 +3137,7 @@ void LCodeGen::DoAccessArgumentsAt(LAccessArgumentsAt* instr) { } -void LCodeGen::DoLoadKeyedFastElement(LLoadKeyedFastElement* instr) { - Register result = ToRegister(instr->result()); - - // Load the result. - __ mov(result, - BuildFastArrayOperand(instr->elements(), - instr->key(), - instr->hydrogen()->key()->representation(), - FAST_ELEMENTS, - FixedArray::kHeaderSize - kHeapObjectTag, - instr->additional_index())); - - // Check for the hole value. - if (instr->hydrogen()->RequiresHoleCheck()) { - if (IsFastSmiElementsKind(instr->hydrogen()->elements_kind())) { - __ test(result, Immediate(kSmiTagMask)); - DeoptimizeIf(not_equal, instr->environment()); - } else { - __ cmp(result, factory()->the_hole_value()); - DeoptimizeIf(equal, instr->environment()); - } - } -} - - -void LCodeGen::DoLoadKeyedFastDoubleElement( - LLoadKeyedFastDoubleElement* instr) { - XMMRegister result = ToDoubleRegister(instr->result()); - - if (instr->hydrogen()->RequiresHoleCheck()) { - int offset = FixedDoubleArray::kHeaderSize - kHeapObjectTag + - sizeof(kHoleNanLower32); - Operand hole_check_operand = BuildFastArrayOperand( - instr->elements(), instr->key(), - instr->hydrogen()->key()->representation(), - FAST_DOUBLE_ELEMENTS, - offset, - instr->additional_index()); - __ cmp(hole_check_operand, Immediate(kHoleNanUpper32)); - DeoptimizeIf(equal, instr->environment()); - } - - Operand double_load_operand = BuildFastArrayOperand( - instr->elements(), - instr->key(), - instr->hydrogen()->key()->representation(), - FAST_DOUBLE_ELEMENTS, - FixedDoubleArray::kHeaderSize - kHeapObjectTag, - instr->additional_index()); - __ movdbl(result, double_load_operand); -} - - -Operand LCodeGen::BuildFastArrayOperand( - LOperand* elements_pointer, - LOperand* key, - Representation key_representation, - ElementsKind elements_kind, - uint32_t offset, - uint32_t additional_index) { - Register elements_pointer_reg = ToRegister(elements_pointer); - int shift_size = ElementsKindToShiftSize(elements_kind); - // Even though the HLoad/StoreKeyedFastElement instructions force the input - // representation for the key to be an integer, the input gets replaced during - // bound check elimination with the index argument to the bounds check, which - // can be tagged, so that case must be handled here, too. - if (key_representation.IsTagged() && (shift_size >= 1)) { - shift_size -= kSmiTagSize; - } - if (key->IsConstantOperand()) { - int constant_value = ToInteger32(LConstantOperand::cast(key)); - if (constant_value & 0xF0000000) { - Abort("array index constant value too big"); - } - return Operand(elements_pointer_reg, - ((constant_value + additional_index) << shift_size) - + offset); - } else { - ScaleFactor scale_factor = static_cast<ScaleFactor>(shift_size); - return Operand(elements_pointer_reg, - ToRegister(key), - scale_factor, - offset + (additional_index << shift_size)); - } -} - - -void LCodeGen::DoLoadKeyedSpecializedArrayElement( - LLoadKeyedSpecializedArrayElement* instr) { +void LCodeGen::DoLoadKeyedExternalArray(LLoadKeyed* instr) { ElementsKind elements_kind = instr->elements_kind(); LOperand* key = instr->key(); if (!key->IsConstantOperand() && @@ -2846,18 +3146,30 @@ void LCodeGen::DoLoadKeyedSpecializedArrayElement( __ SmiUntag(ToRegister(key)); } Operand operand(BuildFastArrayOperand( - instr->external_pointer(), + instr->elements(), key, instr->hydrogen()->key()->representation(), elements_kind, 0, instr->additional_index())); if (elements_kind == EXTERNAL_FLOAT_ELEMENTS) { - XMMRegister result(ToDoubleRegister(instr->result())); - __ movss(result, operand); - __ cvtss2sd(result, result); + if (CpuFeatures::IsSupported(SSE2)) { + CpuFeatureScope scope(masm(), SSE2); + XMMRegister result(ToDoubleRegister(instr->result())); + __ movss(result, operand); + __ cvtss2sd(result, result); + } else { + __ fld_s(operand); + HandleX87FPReturnValue(instr); + } } else if (elements_kind == EXTERNAL_DOUBLE_ELEMENTS) { - __ movdbl(ToDoubleRegister(instr->result()), operand); + if (CpuFeatures::IsSupported(SSE2)) { + CpuFeatureScope scope(masm(), SSE2); + __ movdbl(ToDoubleRegister(instr->result()), operand); + } else { + __ fld_d(operand); + HandleX87FPReturnValue(instr); + } } else { Register result(ToRegister(instr->result())); switch (elements_kind) { @@ -2901,6 +3213,128 @@ void LCodeGen::DoLoadKeyedSpecializedArrayElement( } +void LCodeGen::HandleX87FPReturnValue(LInstruction* instr) { + if (IsX87TopOfStack(instr->result())) { + // Return value is already on stack. If the value has no uses, then + // pop it off the FP stack. Otherwise, make sure that there are enough + // copies of the value on the stack to feed all of the usages, e.g. + // when the following instruction uses the return value in multiple + // inputs. + int count = instr->hydrogen_value()->UseCount(); + if (count == 0) { + __ fstp(0); + } else { + count--; + ASSERT(count <= 7); + while (count-- > 0) { + __ fld(0); + } + } + } else { + __ fstp_d(ToOperand(instr->result())); + } +} + + +void LCodeGen::DoLoadKeyedFixedDoubleArray(LLoadKeyed* instr) { + if (instr->hydrogen()->RequiresHoleCheck()) { + int offset = FixedDoubleArray::kHeaderSize - kHeapObjectTag + + sizeof(kHoleNanLower32); + Operand hole_check_operand = BuildFastArrayOperand( + instr->elements(), instr->key(), + instr->hydrogen()->key()->representation(), + FAST_DOUBLE_ELEMENTS, + offset, + instr->additional_index()); + __ cmp(hole_check_operand, Immediate(kHoleNanUpper32)); + DeoptimizeIf(equal, instr->environment()); + } + + Operand double_load_operand = BuildFastArrayOperand( + instr->elements(), + instr->key(), + instr->hydrogen()->key()->representation(), + FAST_DOUBLE_ELEMENTS, + FixedDoubleArray::kHeaderSize - kHeapObjectTag, + instr->additional_index()); + if (CpuFeatures::IsSupported(SSE2)) { + CpuFeatureScope scope(masm(), SSE2); + XMMRegister result = ToDoubleRegister(instr->result()); + __ movdbl(result, double_load_operand); + } else { + __ fld_d(double_load_operand); + HandleX87FPReturnValue(instr); + } +} + + +void LCodeGen::DoLoadKeyedFixedArray(LLoadKeyed* instr) { + Register result = ToRegister(instr->result()); + + // Load the result. + __ mov(result, + BuildFastArrayOperand(instr->elements(), + instr->key(), + instr->hydrogen()->key()->representation(), + FAST_ELEMENTS, + FixedArray::kHeaderSize - kHeapObjectTag, + instr->additional_index())); + + // Check for the hole value. + if (instr->hydrogen()->RequiresHoleCheck()) { + if (IsFastSmiElementsKind(instr->hydrogen()->elements_kind())) { + __ test(result, Immediate(kSmiTagMask)); + DeoptimizeIf(not_equal, instr->environment()); + } else { + __ cmp(result, factory()->the_hole_value()); + DeoptimizeIf(equal, instr->environment()); + } + } +} + + +void LCodeGen::DoLoadKeyed(LLoadKeyed* instr) { + if (instr->is_external()) { + DoLoadKeyedExternalArray(instr); + } else if (instr->hydrogen()->representation().IsDouble()) { + DoLoadKeyedFixedDoubleArray(instr); + } else { + DoLoadKeyedFixedArray(instr); + } +} + + +Operand LCodeGen::BuildFastArrayOperand( + LOperand* elements_pointer, + LOperand* key, + Representation key_representation, + ElementsKind elements_kind, + uint32_t offset, + uint32_t additional_index) { + Register elements_pointer_reg = ToRegister(elements_pointer); + int shift_size = ElementsKindToShiftSize(elements_kind); + if (key->IsConstantOperand()) { + int constant_value = ToInteger32(LConstantOperand::cast(key)); + if (constant_value & 0xF0000000) { + Abort("array index constant value too big"); + } + return Operand(elements_pointer_reg, + ((constant_value + additional_index) << shift_size) + + offset); + } else { + // Take the tag bit into account while computing the shift size. + if (key_representation.IsTagged() && (shift_size >= 1)) { + shift_size -= kSmiTagSize; + } + ScaleFactor scale_factor = static_cast<ScaleFactor>(shift_size); + return Operand(elements_pointer_reg, + ToRegister(key), + scale_factor, + offset + (additional_index << shift_size)); + } +} + + void LCodeGen::DoLoadKeyedGeneric(LLoadKeyedGeneric* instr) { ASSERT(ToRegister(instr->context()).is(esi)); ASSERT(ToRegister(instr->object()).is(edx)); @@ -3071,7 +3505,12 @@ void LCodeGen::DoThisFunction(LThisFunction* instr) { void LCodeGen::DoContext(LContext* instr) { Register result = ToRegister(instr->result()); - __ mov(result, Operand(ebp, StandardFrameConstants::kContextOffset)); + if (info()->IsOptimizing()) { + __ mov(result, Operand(ebp, StandardFrameConstants::kContextOffset)); + } else { + // If there is no frame, the context must be in esi. + ASSERT(result.is(esi)); + } } @@ -3243,6 +3682,7 @@ void LCodeGen::DoMathAbs(LUnaryMathOperation* instr) { ASSERT(instr->value()->Equals(instr->result())); Representation r = instr->hydrogen()->value()->representation(); + CpuFeatureScope scope(masm(), SSE2); if (r.IsDouble()) { XMMRegister scratch = xmm0; XMMRegister input_reg = ToDoubleRegister(instr->value()); @@ -3264,12 +3704,13 @@ void LCodeGen::DoMathAbs(LUnaryMathOperation* instr) { void LCodeGen::DoMathFloor(LUnaryMathOperation* instr) { + CpuFeatureScope scope(masm(), SSE2); XMMRegister xmm_scratch = xmm0; Register output_reg = ToRegister(instr->result()); XMMRegister input_reg = ToDoubleRegister(instr->value()); if (CpuFeatures::IsSupported(SSE4_1)) { - CpuFeatures::Scope scope(SSE4_1); + CpuFeatureScope scope(masm(), SSE4_1); if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) { // Deoptimize on negative zero. Label non_zero; @@ -3327,45 +3768,61 @@ void LCodeGen::DoMathFloor(LUnaryMathOperation* instr) { } } -void LCodeGen::DoMathRound(LUnaryMathOperation* instr) { - XMMRegister xmm_scratch = xmm0; +void LCodeGen::DoMathRound(LMathRound* instr) { + CpuFeatureScope scope(masm(), SSE2); Register output_reg = ToRegister(instr->result()); XMMRegister input_reg = ToDoubleRegister(instr->value()); - - Label below_half, done; - // xmm_scratch = 0.5 + XMMRegister xmm_scratch = xmm0; + XMMRegister input_temp = ToDoubleRegister(instr->temp()); ExternalReference one_half = ExternalReference::address_of_one_half(); + ExternalReference minus_one_half = + ExternalReference::address_of_minus_one_half(); + + Label done, round_to_zero, below_one_half, do_not_compensate; __ movdbl(xmm_scratch, Operand::StaticVariable(one_half)); __ ucomisd(xmm_scratch, input_reg); - __ j(above, &below_half); - // xmm_scratch = input + 0.5 - __ addsd(xmm_scratch, input_reg); + __ j(above, &below_one_half); - // Compute Math.floor(value + 0.5). - // Use truncating instruction (OK because input is positive). + // CVTTSD2SI rounds towards zero, since 0.5 <= x, we use floor(0.5 + x). + __ addsd(xmm_scratch, input_reg); __ cvttsd2si(output_reg, Operand(xmm_scratch)); - // Overflow is signalled with minint. __ cmp(output_reg, 0x80000000u); + __ RecordComment("D2I conversion overflow"); DeoptimizeIf(equal, instr->environment()); __ jmp(&done); - __ bind(&below_half); + __ bind(&below_one_half); + __ movdbl(xmm_scratch, Operand::StaticVariable(minus_one_half)); + __ ucomisd(xmm_scratch, input_reg); + __ j(below_equal, &round_to_zero); + + // CVTTSD2SI rounds towards zero, we use ceil(x - (-0.5)) and then + // compare and compensate. + __ movsd(input_temp, input_reg); // Do not alter input_reg. + __ subsd(input_temp, xmm_scratch); + __ cvttsd2si(output_reg, Operand(input_temp)); + // Catch minint due to overflow, and to prevent overflow when compensating. + __ cmp(output_reg, 0x80000000u); + __ RecordComment("D2I conversion overflow"); + DeoptimizeIf(equal, instr->environment()); + __ cvtsi2sd(xmm_scratch, output_reg); + __ ucomisd(xmm_scratch, input_temp); + __ j(equal, &done); + __ sub(output_reg, Immediate(1)); + // No overflow because we already ruled out minint. + __ jmp(&done); + + __ bind(&round_to_zero); // We return 0 for the input range [+0, 0.5[, or [-0.5, 0.5[ if // we can ignore the difference between a result of -0 and +0. if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) { // If the sign is positive, we return +0. __ movmskpd(output_reg, input_reg); __ test(output_reg, Immediate(1)); + __ RecordComment("Minus zero"); DeoptimizeIf(not_zero, instr->environment()); - } else { - // If the input is >= -0.5, we return +0. - __ mov(output_reg, Immediate(0xBF000000)); - __ movd(xmm_scratch, Operand(output_reg)); - __ cvtss2sd(xmm_scratch, xmm_scratch); - __ ucomisd(input_reg, xmm_scratch); - DeoptimizeIf(below, instr->environment()); } __ Set(output_reg, Immediate(0)); __ bind(&done); @@ -3373,6 +3830,7 @@ void LCodeGen::DoMathRound(LUnaryMathOperation* instr) { void LCodeGen::DoMathSqrt(LUnaryMathOperation* instr) { + CpuFeatureScope scope(masm(), SSE2); XMMRegister input_reg = ToDoubleRegister(instr->value()); ASSERT(ToDoubleRegister(instr->result()).is(input_reg)); __ sqrtsd(input_reg, input_reg); @@ -3380,6 +3838,7 @@ void LCodeGen::DoMathSqrt(LUnaryMathOperation* instr) { void LCodeGen::DoMathPowHalf(LMathPowHalf* instr) { + CpuFeatureScope scope(masm(), SSE2); XMMRegister xmm_scratch = xmm0; XMMRegister input_reg = ToDoubleRegister(instr->value()); Register scratch = ToRegister(instr->temp()); @@ -3456,6 +3915,7 @@ void LCodeGen::DoRandom(LRandom* instr) { DeferredDoRandom* deferred = new(zone()) DeferredDoRandom(this, instr); + CpuFeatureScope scope(masm(), SSE2); // Having marked this instruction as a call we can use any // registers. ASSERT(ToDoubleRegister(instr->result()).is(xmm1)); @@ -3523,6 +3983,7 @@ void LCodeGen::DoDeferredRandom(LRandom* instr) { void LCodeGen::DoMathLog(LUnaryMathOperation* instr) { + CpuFeatureScope scope(masm(), SSE2); ASSERT(instr->value()->Equals(instr->result())); XMMRegister input_reg = ToDoubleRegister(instr->value()); Label positive, done, zero; @@ -3553,11 +4014,22 @@ void LCodeGen::DoMathLog(LUnaryMathOperation* instr) { } +void LCodeGen::DoMathExp(LMathExp* instr) { + CpuFeatureScope scope(masm(), SSE2); + XMMRegister input = ToDoubleRegister(instr->value()); + XMMRegister result = ToDoubleRegister(instr->result()); + Register temp1 = ToRegister(instr->temp1()); + Register temp2 = ToRegister(instr->temp2()); + + MathExpGenerator::EmitMathExp(masm(), input, result, xmm0, temp1, temp2); +} + + void LCodeGen::DoMathTan(LUnaryMathOperation* instr) { ASSERT(ToDoubleRegister(instr->result()).is(xmm1)); TranscendentalCacheStub stub(TranscendentalCache::TAN, TranscendentalCacheStub::UNTAGGED); - CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr); + CallCode(stub.GetCode(isolate()), RelocInfo::CODE_TARGET, instr); } @@ -3565,7 +4037,7 @@ void LCodeGen::DoMathCos(LUnaryMathOperation* instr) { ASSERT(ToDoubleRegister(instr->result()).is(xmm1)); TranscendentalCacheStub stub(TranscendentalCache::COS, TranscendentalCacheStub::UNTAGGED); - CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr); + CallCode(stub.GetCode(isolate()), RelocInfo::CODE_TARGET, instr); } @@ -3573,7 +4045,7 @@ void LCodeGen::DoMathSin(LUnaryMathOperation* instr) { ASSERT(ToDoubleRegister(instr->result()).is(xmm1)); TranscendentalCacheStub stub(TranscendentalCache::SIN, TranscendentalCacheStub::UNTAGGED); - CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr); + CallCode(stub.GetCode(isolate()), RelocInfo::CODE_TARGET, instr); } @@ -3585,9 +4057,6 @@ void LCodeGen::DoUnaryMathOperation(LUnaryMathOperation* instr) { case kMathFloor: DoMathFloor(instr); break; - case kMathRound: - DoMathRound(instr); - break; case kMathSqrt: DoMathSqrt(instr); break; @@ -3664,7 +4133,7 @@ void LCodeGen::DoCallFunction(LCallFunction* instr) { int arity = instr->arity(); CallFunctionStub stub(arity, NO_CALL_FUNCTION_FLAGS); - CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr); + CallCode(stub.GetCode(isolate()), RelocInfo::CODE_TARGET, instr); } @@ -3696,9 +4165,29 @@ void LCodeGen::DoCallNew(LCallNew* instr) { ASSERT(ToRegister(instr->constructor()).is(edi)); ASSERT(ToRegister(instr->result()).is(eax)); + if (FLAG_optimize_constructed_arrays) { + // No cell in ebx for construct type feedback in optimized code + Handle<Object> undefined_value(isolate()->heap()->undefined_value(), + isolate()); + __ mov(ebx, Immediate(undefined_value)); + } CallConstructStub stub(NO_CALL_FUNCTION_FLAGS); __ Set(eax, Immediate(instr->arity())); - CallCode(stub.GetCode(), RelocInfo::CONSTRUCT_CALL, instr); + CallCode(stub.GetCode(isolate()), RelocInfo::CONSTRUCT_CALL, instr); +} + + +void LCodeGen::DoCallNewArray(LCallNewArray* instr) { + ASSERT(ToRegister(instr->context()).is(esi)); + ASSERT(ToRegister(instr->constructor()).is(edi)); + ASSERT(ToRegister(instr->result()).is(eax)); + ASSERT(FLAG_optimize_constructed_arrays); + + __ mov(ebx, instr->hydrogen()->property_cell()); + Handle<Code> array_construct_code = + isolate()->builtins()->ArrayConstructCode(); + __ Set(eax, Immediate(instr->arity())); + CallCode(array_construct_code, RelocInfo::CONSTRUCT_CALL, instr); } @@ -3707,6 +4196,13 @@ void LCodeGen::DoCallRuntime(LCallRuntime* instr) { } +void LCodeGen::DoInnerAllocatedObject(LInnerAllocatedObject* instr) { + Register result = ToRegister(instr->result()); + Register base = ToRegister(instr->base_object()); + __ lea(result, Operand(base, instr->offset())); +} + + void LCodeGen::DoStoreNamedField(LStoreNamedField* instr) { Register object = ToRegister(instr->object()); Register value = ToRegister(instr->value()); @@ -3725,7 +4221,7 @@ void LCodeGen::DoStoreNamedField(LStoreNamedField* instr) { HeapObject::kMapOffset, temp_map, temp, - kSaveFPRegs, + GetSaveFPRegsMode(), OMIT_REMEMBERED_SET, OMIT_SMI_CHECK); } @@ -3744,7 +4240,7 @@ void LCodeGen::DoStoreNamedField(LStoreNamedField* instr) { offset, value, temp, - kSaveFPRegs, + GetSaveFPRegsMode(), EMIT_REMEMBERED_SET, check_needed); } @@ -3759,7 +4255,7 @@ void LCodeGen::DoStoreNamedField(LStoreNamedField* instr) { offset, value, object, - kSaveFPRegs, + GetSaveFPRegsMode(), EMIT_REMEMBERED_SET, check_needed); } @@ -3780,27 +4276,9 @@ void LCodeGen::DoStoreNamedGeneric(LStoreNamedGeneric* instr) { } -void LCodeGen::DeoptIfTaggedButNotSmi(LEnvironment* environment, - HValue* value, - LOperand* operand) { - if (value->representation().IsTagged() && !value->type().IsSmi()) { - if (operand->IsRegister()) { - __ test(ToRegister(operand), Immediate(kSmiTagMask)); - } else { - __ test(ToOperand(operand), Immediate(kSmiTagMask)); - } - DeoptimizeIf(not_zero, environment); - } -} - - void LCodeGen::DoBoundsCheck(LBoundsCheck* instr) { - DeoptIfTaggedButNotSmi(instr->environment(), - instr->hydrogen()->length(), - instr->length()); - DeoptIfTaggedButNotSmi(instr->environment(), - instr->hydrogen()->index(), - instr->index()); + if (instr->hydrogen()->skip_check()) return; + if (instr->index()->IsConstantOperand()) { int constant_index = ToInteger32(LConstantOperand::cast(instr->index())); @@ -3818,8 +4296,7 @@ void LCodeGen::DoBoundsCheck(LBoundsCheck* instr) { } -void LCodeGen::DoStoreKeyedSpecializedArrayElement( - LStoreKeyedSpecializedArrayElement* instr) { +void LCodeGen::DoStoreKeyedExternalArray(LStoreKeyed* instr) { ElementsKind elements_kind = instr->elements_kind(); LOperand* key = instr->key(); if (!key->IsConstantOperand() && @@ -3828,16 +4305,18 @@ void LCodeGen::DoStoreKeyedSpecializedArrayElement( __ SmiUntag(ToRegister(key)); } Operand operand(BuildFastArrayOperand( - instr->external_pointer(), + instr->elements(), key, instr->hydrogen()->key()->representation(), elements_kind, 0, instr->additional_index())); if (elements_kind == EXTERNAL_FLOAT_ELEMENTS) { + CpuFeatureScope scope(masm(), SSE2); __ cvtsd2ss(xmm0, ToDoubleRegister(instr->value())); __ movss(operand, xmm0); } else if (elements_kind == EXTERNAL_DOUBLE_ELEMENTS) { + CpuFeatureScope scope(masm(), SSE2); __ movdbl(operand, ToDoubleRegister(instr->value())); } else { Register value = ToRegister(instr->value()); @@ -3872,13 +4351,40 @@ void LCodeGen::DoStoreKeyedSpecializedArrayElement( } -void LCodeGen::DoStoreKeyedFastElement(LStoreKeyedFastElement* instr) { +void LCodeGen::DoStoreKeyedFixedDoubleArray(LStoreKeyed* instr) { + CpuFeatureScope scope(masm(), SSE2); + XMMRegister value = ToDoubleRegister(instr->value()); + + if (instr->NeedsCanonicalization()) { + Label have_value; + + __ ucomisd(value, value); + __ j(parity_odd, &have_value); // NaN. + + ExternalReference canonical_nan_reference = + ExternalReference::address_of_canonical_non_hole_nan(); + __ movdbl(value, Operand::StaticVariable(canonical_nan_reference)); + __ bind(&have_value); + } + + Operand double_store_operand = BuildFastArrayOperand( + instr->elements(), + instr->key(), + instr->hydrogen()->key()->representation(), + FAST_DOUBLE_ELEMENTS, + FixedDoubleArray::kHeaderSize - kHeapObjectTag, + instr->additional_index()); + __ movdbl(double_store_operand, value); +} + + +void LCodeGen::DoStoreKeyedFixedArray(LStoreKeyed* instr) { Register value = ToRegister(instr->value()); - Register elements = ToRegister(instr->object()); + Register elements = ToRegister(instr->elements()); Register key = instr->key()->IsRegister() ? ToRegister(instr->key()) : no_reg; Operand operand = BuildFastArrayOperand( - instr->object(), + instr->elements(), instr->key(), instr->hydrogen()->key()->representation(), FAST_ELEMENTS, @@ -3896,37 +4402,22 @@ void LCodeGen::DoStoreKeyedFastElement(LStoreKeyedFastElement* instr) { __ RecordWrite(elements, key, value, - kSaveFPRegs, + GetSaveFPRegsMode(), EMIT_REMEMBERED_SET, check_needed); } } -void LCodeGen::DoStoreKeyedFastDoubleElement( - LStoreKeyedFastDoubleElement* instr) { - XMMRegister value = ToDoubleRegister(instr->value()); - - if (instr->NeedsCanonicalization()) { - Label have_value; - - __ ucomisd(value, value); - __ j(parity_odd, &have_value); // NaN. - - ExternalReference canonical_nan_reference = - ExternalReference::address_of_canonical_non_hole_nan(); - __ movdbl(value, Operand::StaticVariable(canonical_nan_reference)); - __ bind(&have_value); +void LCodeGen::DoStoreKeyed(LStoreKeyed* instr) { + // By cases...external, fast-double, fast + if (instr->is_external()) { + DoStoreKeyedExternalArray(instr); + } else if (instr->hydrogen()->value()->representation().IsDouble()) { + DoStoreKeyedFixedDoubleArray(instr); + } else { + DoStoreKeyedFixedArray(instr); } - - Operand double_store_operand = BuildFastArrayOperand( - instr->elements(), - instr->key(), - instr->hydrogen()->key()->representation(), - FAST_DOUBLE_ELEMENTS, - FixedDoubleArray::kHeaderSize - kHeapObjectTag, - instr->additional_index()); - __ movdbl(double_store_operand, value); } @@ -3943,14 +4434,21 @@ void LCodeGen::DoStoreKeyedGeneric(LStoreKeyedGeneric* instr) { } +void LCodeGen::DoTrapAllocationMemento(LTrapAllocationMemento* instr) { + Register object = ToRegister(instr->object()); + Register temp = ToRegister(instr->temp()); + __ TestJSArrayForAllocationSiteInfo(object, temp); + DeoptimizeIf(equal, instr->environment()); +} + + void LCodeGen::DoTransitionElementsKind(LTransitionElementsKind* instr) { Register object_reg = ToRegister(instr->object()); - Register new_map_reg = ToRegister(instr->new_map_temp()); Handle<Map> from_map = instr->original_map(); Handle<Map> to_map = instr->transitioned_map(); - ElementsKind from_kind = from_map->elements_kind(); - ElementsKind to_kind = to_map->elements_kind(); + ElementsKind from_kind = instr->from_kind(); + ElementsKind to_kind = instr->to_kind(); Label not_applicable; bool is_simple_map_transition = @@ -3960,7 +4458,7 @@ void LCodeGen::DoTransitionElementsKind(LTransitionElementsKind* instr) { __ cmp(FieldOperand(object_reg, HeapObject::kMapOffset), from_map); __ j(not_equal, ¬_applicable, branch_distance); if (is_simple_map_transition) { - Register object_reg = ToRegister(instr->object()); + Register new_map_reg = ToRegister(instr->new_map_temp()); Handle<Map> map = instr->hydrogen()->transitioned_map(); __ mov(FieldOperand(object_reg, HeapObject::kMapOffset), Immediate(map)); @@ -3969,8 +4467,23 @@ void LCodeGen::DoTransitionElementsKind(LTransitionElementsKind* instr) { __ RecordWriteForMap(object_reg, to_map, new_map_reg, ToRegister(instr->temp()), kDontSaveFPRegs); + } else if (FLAG_compiled_transitions) { + PushSafepointRegistersScope scope(this); + if (!object_reg.is(eax)) { + __ push(object_reg); + } + LoadContextFromDeferred(instr->context()); + if (!object_reg.is(eax)) { + __ pop(eax); + } + __ mov(ebx, to_map); + TransitionElementsKindStub stub(from_kind, to_kind); + __ CallStub(&stub); + RecordSafepointWithRegisters( + instr->pointer_map(), 0, Safepoint::kNoLazyDeopt); } else if (IsFastSmiElementsKind(from_kind) && IsFastDoubleElementsKind(to_kind)) { + Register new_map_reg = ToRegister(instr->new_map_temp()); __ mov(new_map_reg, to_map); Register fixed_object_reg = ToRegister(instr->temp()); ASSERT(fixed_object_reg.is(edx)); @@ -3980,6 +4493,7 @@ void LCodeGen::DoTransitionElementsKind(LTransitionElementsKind* instr) { RelocInfo::CODE_TARGET, instr); } else if (IsFastDoubleElementsKind(from_kind) && IsFastObjectElementsKind(to_kind)) { + Register new_map_reg = ToRegister(instr->new_map_temp()); __ mov(new_map_reg, to_map); Register fixed_object_reg = ToRegister(instr->temp()); ASSERT(fixed_object_reg.is(edx)); @@ -4067,7 +4581,7 @@ void LCodeGen::DoStringCharFromCode(LStringCharFromCode* instr) { Register result = ToRegister(instr->result()); ASSERT(!char_code.is(result)); - __ cmp(char_code, String::kMaxAsciiCharCode); + __ cmp(char_code, String::kMaxOneByteCharCode); __ j(above, deferred->entry()); __ Set(result, Immediate(factory()->single_character_string_cache())); __ mov(result, FieldOperand(result, @@ -4107,20 +4621,26 @@ void LCodeGen::DoStringAdd(LStringAdd* instr) { EmitPushTaggedOperand(instr->left()); EmitPushTaggedOperand(instr->right()); StringAddStub stub(NO_STRING_CHECK_IN_STUB); - CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr); + CallCode(stub.GetCode(isolate()), RelocInfo::CODE_TARGET, instr); } void LCodeGen::DoInteger32ToDouble(LInteger32ToDouble* instr) { - LOperand* input = instr->value(); - ASSERT(input->IsRegister() || input->IsStackSlot()); - LOperand* output = instr->result(); - ASSERT(output->IsDoubleRegister()); - __ cvtsi2sd(ToDoubleRegister(output), ToOperand(input)); + if (CpuFeatures::IsSupported(SSE2)) { + CpuFeatureScope scope(masm(), SSE2); + LOperand* input = instr->value(); + ASSERT(input->IsRegister() || input->IsStackSlot()); + LOperand* output = instr->result(); + ASSERT(output->IsDoubleRegister()); + __ cvtsi2sd(ToDoubleRegister(output), ToOperand(input)); + } else { + UNREACHABLE(); + } } void LCodeGen::DoUint32ToDouble(LUint32ToDouble* instr) { + CpuFeatureScope scope(masm(), SSE2); LOperand* input = instr->value(); LOperand* output = instr->result(); LOperand* temp = instr->temp(); @@ -4198,9 +4718,27 @@ void LCodeGen::DoDeferredNumberTagI(LInstruction* instr, // the value in there. If that fails, call the runtime system. __ SmiUntag(reg); __ xor_(reg, 0x80000000); - __ cvtsi2sd(xmm0, Operand(reg)); + if (CpuFeatures::IsSupported(SSE2)) { + CpuFeatureScope feature_scope(masm(), SSE2); + __ cvtsi2sd(xmm0, Operand(reg)); + } else { + __ push(reg); + __ fild_s(Operand(esp, 0)); + __ pop(reg); + } } else { - __ LoadUint32(xmm0, reg, xmm1); + if (CpuFeatures::IsSupported(SSE2)) { + CpuFeatureScope feature_scope(masm(), SSE2); + __ LoadUint32(xmm0, reg, xmm1); + } else { + // There's no fild variant for unsigned values, so zero-extend to a 64-bit + // int manually. + __ push(Immediate(0)); + __ push(reg); + __ fild_d(Operand(esp, 0)); + __ pop(reg); + __ pop(reg); + } } if (FLAG_inline_new) { @@ -4229,7 +4767,12 @@ void LCodeGen::DoDeferredNumberTagI(LInstruction* instr, // Done. Put the value in xmm0 into the value of the allocated heap // number. __ bind(&done); - __ movdbl(FieldOperand(reg, HeapNumber::kValueOffset), xmm0); + if (CpuFeatures::IsSupported(SSE2)) { + CpuFeatureScope feature_scope(masm(), SSE2); + __ movdbl(FieldOperand(reg, HeapNumber::kValueOffset), xmm0); + } else { + __ fstp_d(FieldOperand(reg, HeapNumber::kValueOffset)); + } __ StoreToSafepointRegisterSlot(reg, reg); } @@ -4245,18 +4788,83 @@ void LCodeGen::DoNumberTagD(LNumberTagD* instr) { LNumberTagD* instr_; }; - XMMRegister input_reg = ToDoubleRegister(instr->value()); Register reg = ToRegister(instr->result()); - Register tmp = ToRegister(instr->temp()); + bool convert_hole = false; + HValue* change_input = instr->hydrogen()->value(); + if (change_input->IsLoadKeyed()) { + HLoadKeyed* load = HLoadKeyed::cast(change_input); + convert_hole = load->UsesMustHandleHole(); + } + + Label no_special_nan_handling; + Label done; + if (convert_hole) { + bool use_sse2 = CpuFeatures::IsSupported(SSE2); + if (use_sse2) { + CpuFeatureScope scope(masm(), SSE2); + XMMRegister input_reg = ToDoubleRegister(instr->value()); + __ ucomisd(input_reg, input_reg); + } else { + if (!IsX87TopOfStack(instr->value())) { + __ fld_d(ToOperand(instr->value())); + } + __ fld(0); + __ fld(0); + __ FCmp(); + } + + __ j(parity_odd, &no_special_nan_handling); + __ sub(esp, Immediate(kDoubleSize)); + if (use_sse2) { + CpuFeatureScope scope(masm(), SSE2); + XMMRegister input_reg = ToDoubleRegister(instr->value()); + __ movdbl(MemOperand(esp, 0), input_reg); + } else { + __ fld(0); + __ fstp_d(MemOperand(esp, 0)); + } + __ cmp(MemOperand(esp, sizeof(kHoleNanLower32)), + Immediate(kHoleNanUpper32)); + Label canonicalize; + __ j(not_equal, &canonicalize); + __ add(esp, Immediate(kDoubleSize)); + __ mov(reg, factory()->the_hole_value()); + __ jmp(&done); + __ bind(&canonicalize); + __ add(esp, Immediate(kDoubleSize)); + ExternalReference nan = + ExternalReference::address_of_canonical_non_hole_nan(); + if (use_sse2) { + CpuFeatureScope scope(masm(), SSE2); + XMMRegister input_reg = ToDoubleRegister(instr->value()); + __ movdbl(input_reg, Operand::StaticVariable(nan)); + } else { + __ fstp(0); + __ fld_d(Operand::StaticVariable(nan)); + } + } + + __ bind(&no_special_nan_handling); DeferredNumberTagD* deferred = new(zone()) DeferredNumberTagD(this, instr); if (FLAG_inline_new) { + Register tmp = ToRegister(instr->temp()); __ AllocateHeapNumber(reg, tmp, no_reg, deferred->entry()); } else { __ jmp(deferred->entry()); } __ bind(deferred->exit()); - __ movdbl(FieldOperand(reg, HeapNumber::kValueOffset), input_reg); + if (CpuFeatures::IsSupported(SSE2)) { + CpuFeatureScope scope(masm(), SSE2); + XMMRegister input_reg = ToDoubleRegister(instr->value()); + __ movdbl(FieldOperand(reg, HeapNumber::kValueOffset), input_reg); + } else { + if (!IsX87TopOfStack(instr->value())) { + __ fld_d(ToOperand(instr->value())); + } + __ fstp_d(FieldOperand(reg, HeapNumber::kValueOffset)); + } + __ bind(&done); } @@ -4307,44 +4915,59 @@ void LCodeGen::EmitNumberUntagD(Register input_reg, XMMRegister result_reg, bool deoptimize_on_undefined, bool deoptimize_on_minus_zero, - LEnvironment* env) { + LEnvironment* env, + NumberUntagDMode mode) { Label load_smi, done; - // Smi check. - __ JumpIfSmi(input_reg, &load_smi, Label::kNear); + if (mode == NUMBER_CANDIDATE_IS_ANY_TAGGED) { + // Smi check. + __ JumpIfSmi(input_reg, &load_smi, Label::kNear); - // Heap number map check. - __ cmp(FieldOperand(input_reg, HeapObject::kMapOffset), - factory()->heap_number_map()); - if (deoptimize_on_undefined) { - DeoptimizeIf(not_equal, env); - } else { - Label heap_number; - __ j(equal, &heap_number, Label::kNear); + // Heap number map check. + __ cmp(FieldOperand(input_reg, HeapObject::kMapOffset), + factory()->heap_number_map()); + if (deoptimize_on_undefined) { + DeoptimizeIf(not_equal, env); + } else { + Label heap_number; + __ j(equal, &heap_number, Label::kNear); - __ cmp(input_reg, factory()->undefined_value()); - DeoptimizeIf(not_equal, env); + __ cmp(input_reg, factory()->undefined_value()); + DeoptimizeIf(not_equal, env); - // Convert undefined to NaN. - ExternalReference nan = - ExternalReference::address_of_canonical_non_hole_nan(); - __ movdbl(result_reg, Operand::StaticVariable(nan)); - __ jmp(&done, Label::kNear); + // Convert undefined to NaN. + ExternalReference nan = + ExternalReference::address_of_canonical_non_hole_nan(); + __ movdbl(result_reg, Operand::StaticVariable(nan)); + __ jmp(&done, Label::kNear); - __ bind(&heap_number); - } - // Heap number to XMM conversion. - __ movdbl(result_reg, FieldOperand(input_reg, HeapNumber::kValueOffset)); - if (deoptimize_on_minus_zero) { - XMMRegister xmm_scratch = xmm0; - __ xorps(xmm_scratch, xmm_scratch); - __ ucomisd(result_reg, xmm_scratch); - __ j(not_zero, &done, Label::kNear); - __ movmskpd(temp_reg, result_reg); - __ test_b(temp_reg, 1); - DeoptimizeIf(not_zero, env); + __ bind(&heap_number); + } + // Heap number to XMM conversion. + __ movdbl(result_reg, FieldOperand(input_reg, HeapNumber::kValueOffset)); + if (deoptimize_on_minus_zero) { + XMMRegister xmm_scratch = xmm0; + __ xorps(xmm_scratch, xmm_scratch); + __ ucomisd(result_reg, xmm_scratch); + __ j(not_zero, &done, Label::kNear); + __ movmskpd(temp_reg, result_reg); + __ test_b(temp_reg, 1); + DeoptimizeIf(not_zero, env); + } + __ jmp(&done, Label::kNear); + } else if (mode == NUMBER_CANDIDATE_IS_SMI_OR_HOLE) { + __ test(input_reg, Immediate(kSmiTagMask)); + DeoptimizeIf(not_equal, env); + } else if (mode == NUMBER_CANDIDATE_IS_SMI_CONVERT_HOLE) { + __ test(input_reg, Immediate(kSmiTagMask)); + __ j(zero, &load_smi); + ExternalReference hole_nan_reference = + ExternalReference::address_of_the_hole_nan(); + __ movdbl(result_reg, Operand::StaticVariable(hole_nan_reference)); + __ jmp(&done, Label::kNear); + } else { + ASSERT(mode == NUMBER_CANDIDATE_IS_SMI); } - __ jmp(&done, Label::kNear); // Smi to XMM conversion __ bind(&load_smi); @@ -4368,13 +4991,14 @@ void LCodeGen::DoDeferredTaggedToI(LTaggedToI* instr) { // Check for undefined. Undefined is converted to zero for truncating // conversions. __ cmp(input_reg, factory()->undefined_value()); + __ RecordComment("Deferred TaggedToI: cannot truncate"); DeoptimizeIf(not_equal, instr->environment()); __ mov(input_reg, 0); __ jmp(&done, Label::kNear); __ bind(&heap_number); if (CpuFeatures::IsSupported(SSE3)) { - CpuFeatures::Scope scope(SSE3); + CpuFeatureScope scope(masm(), SSE3); Label convert; // Use more powerful conversion when sse3 is available. // Load x87 register with heap number. @@ -4388,6 +5012,7 @@ void LCodeGen::DoDeferredTaggedToI(LTaggedToI* instr) { __ j(less, &convert, Label::kNear); // Pop FPU stack before deoptimizing. __ fstp(0); + __ RecordComment("Deferred TaggedToI: exponent too big"); DeoptimizeIf(no_condition, instr->environment()); // Reserve space for 64 bit answer. @@ -4398,6 +5023,7 @@ void LCodeGen::DoDeferredTaggedToI(LTaggedToI* instr) { __ mov(input_reg, Operand(esp, 0)); // Low word of answer is the result. __ add(Operand(esp), Immediate(kDoubleSize)); } else { + CpuFeatureScope scope(masm(), SSE2); XMMRegister xmm_temp = ToDoubleRegister(instr->temp()); __ movdbl(xmm0, FieldOperand(input_reg, HeapNumber::kValueOffset)); __ cvttsd2si(input_reg, Operand(xmm0)); @@ -4411,8 +5037,10 @@ void LCodeGen::DoDeferredTaggedToI(LTaggedToI* instr) { DeoptimizeIf(not_equal, instr->environment()); DeoptimizeIf(parity_even, instr->environment()); // NaN. } - } else { + } else if (CpuFeatures::IsSupported(SSE2)) { + CpuFeatureScope scope(masm(), SSE2); // Deoptimize if we don't have a heap number. + __ RecordComment("Deferred TaggedToI: not a heap number"); DeoptimizeIf(not_equal, instr->environment()); XMMRegister xmm_temp = ToDoubleRegister(instr->temp()); @@ -4420,15 +5048,20 @@ void LCodeGen::DoDeferredTaggedToI(LTaggedToI* instr) { __ cvttsd2si(input_reg, Operand(xmm0)); __ cvtsi2sd(xmm_temp, Operand(input_reg)); __ ucomisd(xmm0, xmm_temp); + __ RecordComment("Deferred TaggedToI: lost precision"); DeoptimizeIf(not_equal, instr->environment()); + __ RecordComment("Deferred TaggedToI: NaN"); DeoptimizeIf(parity_even, instr->environment()); // NaN. if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) { __ test(input_reg, Operand(input_reg)); __ j(not_zero, &done); __ movmskpd(input_reg, xmm0); __ and_(input_reg, 1); + __ RecordComment("Deferred TaggedToI: minus zero"); DeoptimizeIf(not_zero, instr->environment()); } + } else { + UNREACHABLE(); } __ bind(&done); } @@ -4471,19 +5104,42 @@ void LCodeGen::DoNumberUntagD(LNumberUntagD* instr) { LOperand* result = instr->result(); ASSERT(result->IsDoubleRegister()); - Register input_reg = ToRegister(input); - XMMRegister result_reg = ToDoubleRegister(result); - - bool deoptimize_on_minus_zero = - instr->hydrogen()->deoptimize_on_minus_zero(); - Register temp_reg = deoptimize_on_minus_zero ? ToRegister(temp) : no_reg; + if (CpuFeatures::IsSupported(SSE2)) { + CpuFeatureScope scope(masm(), SSE2); + Register input_reg = ToRegister(input); + XMMRegister result_reg = ToDoubleRegister(result); + + bool deoptimize_on_minus_zero = + instr->hydrogen()->deoptimize_on_minus_zero(); + Register temp_reg = deoptimize_on_minus_zero ? ToRegister(temp) : no_reg; + + NumberUntagDMode mode = NUMBER_CANDIDATE_IS_ANY_TAGGED; + HValue* value = instr->hydrogen()->value(); + if (value->type().IsSmi()) { + if (value->IsLoadKeyed()) { + HLoadKeyed* load = HLoadKeyed::cast(value); + if (load->UsesMustHandleHole()) { + if (load->hole_mode() == ALLOW_RETURN_HOLE) { + mode = NUMBER_CANDIDATE_IS_SMI_CONVERT_HOLE; + } else { + mode = NUMBER_CANDIDATE_IS_SMI_OR_HOLE; + } + } else { + mode = NUMBER_CANDIDATE_IS_SMI; + } + } + } - EmitNumberUntagD(input_reg, - temp_reg, - result_reg, - instr->hydrogen()->deoptimize_on_undefined(), - deoptimize_on_minus_zero, - instr->environment()); + EmitNumberUntagD(input_reg, + temp_reg, + result_reg, + instr->hydrogen()->deoptimize_on_undefined(), + deoptimize_on_minus_zero, + instr->environment(), + mode); + } else { + UNIMPLEMENTED(); + } } @@ -4492,6 +5148,7 @@ void LCodeGen::DoDoubleToI(LDoubleToI* instr) { ASSERT(input->IsDoubleRegister()); LOperand* result = instr->result(); ASSERT(result->IsRegister()); + CpuFeatureScope scope(masm(), SSE2); XMMRegister input_reg = ToDoubleRegister(input); Register result_reg = ToRegister(result); @@ -4503,7 +5160,7 @@ void LCodeGen::DoDoubleToI(LDoubleToI* instr) { __ cmp(result_reg, 0x80000000u); if (CpuFeatures::IsSupported(SSE3)) { // This will deoptimize if the exponent of the input in out of range. - CpuFeatures::Scope scope(SSE3); + CpuFeatureScope scope(masm(), SSE3); Label convert, done; __ j(not_equal, &done, Label::kNear); __ sub(Operand(esp), Immediate(kDoubleSize)); @@ -4665,7 +5322,7 @@ void LCodeGen::DoCheckInstanceType(LCheckInstanceType* instr) { void LCodeGen::DoCheckFunction(LCheckFunction* instr) { Handle<JSFunction> target = instr->hydrogen()->target(); - if (isolate()->heap()->InNewSpace(*target)) { + if (instr->hydrogen()->target_in_new_space()) { Register reg = ToRegister(instr->value()); Handle<JSGlobalPropertyCell> cell = isolate()->factory()->NewJSGlobalPropertyCell(target); @@ -4681,10 +5338,10 @@ void LCodeGen::DoCheckFunction(LCheckFunction* instr) { void LCodeGen::DoCheckMapCommon(Register reg, Handle<Map> map, CompareMapMode mode, - LEnvironment* env) { + LInstruction* instr) { Label success; __ CompareMap(reg, map, &success, mode); - DeoptimizeIf(not_equal, env); + DeoptimizeIf(not_equal, instr->environment()); __ bind(&success); } @@ -4702,12 +5359,13 @@ void LCodeGen::DoCheckMaps(LCheckMaps* instr) { __ j(equal, &success); } Handle<Map> map = map_set->last(); - DoCheckMapCommon(reg, map, REQUIRE_EXACT_MAP, instr->environment()); + DoCheckMapCommon(reg, map, REQUIRE_EXACT_MAP, instr); __ bind(&success); } void LCodeGen::DoClampDToUint8(LClampDToUint8* instr) { + CpuFeatureScope scope(masm(), SSE2); XMMRegister value_reg = ToDoubleRegister(instr->unclamped()); Register result_reg = ToRegister(instr->result()); __ ClampDoubleToUint8(value_reg, xmm0, result_reg); @@ -4722,6 +5380,8 @@ void LCodeGen::DoClampIToUint8(LClampIToUint8* instr) { void LCodeGen::DoClampTToUint8(LClampTToUint8* instr) { + CpuFeatureScope scope(masm(), SSE2); + ASSERT(instr->unclamped()->Equals(instr->result())); Register input_reg = ToRegister(instr->unclamped()); Label is_smi, done, heap_number; @@ -4756,28 +5416,29 @@ void LCodeGen::DoClampTToUint8(LClampTToUint8* instr) { void LCodeGen::DoCheckPrototypeMaps(LCheckPrototypeMaps* instr) { + ASSERT(instr->temp()->Equals(instr->result())); Register reg = ToRegister(instr->temp()); - Handle<JSObject> holder = instr->holder(); - Handle<JSObject> current_prototype = instr->prototype(); - - // Load prototype object. - __ LoadHeapObject(reg, current_prototype); + ZoneList<Handle<JSObject> >* prototypes = instr->prototypes(); + ZoneList<Handle<Map> >* maps = instr->maps(); - // Check prototype maps up to the holder. - while (!current_prototype.is_identical_to(holder)) { - DoCheckMapCommon(reg, Handle<Map>(current_prototype->map()), - ALLOW_ELEMENT_TRANSITION_MAPS, instr->environment()); + ASSERT(prototypes->length() == maps->length()); - current_prototype = - Handle<JSObject>(JSObject::cast(current_prototype->GetPrototype())); - // Load next prototype object. - __ LoadHeapObject(reg, current_prototype); + // TODO(ulan): Move this check to hydrogen and split HCheckPrototypeMaps + // into two instruction: one that checks the prototypes and another that + // loads the holder (HConstant). Find a way to do it without breaking + // parallel recompilation. + if (instr->hydrogen()->CanOmitPrototypeChecks()) { + for (int i = 0; i < maps->length(); i++) { + prototype_maps_.Add(maps->at(i), info()->zone()); + } + __ LoadHeapObject(reg, prototypes->at(prototypes->length() - 1)); + } else { + for (int i = 0; i < prototypes->length(); i++) { + __ LoadHeapObject(reg, prototypes->at(i)); + DoCheckMapCommon(reg, maps->at(i), ALLOW_ELEMENT_TRANSITION_MAPS, instr); + } } - - // Check the holder map. - DoCheckMapCommon(reg, Handle<Map>(current_prototype->map()), - ALLOW_ELEMENT_TRANSITION_MAPS, instr->environment()); } @@ -4808,12 +5469,8 @@ void LCodeGen::DoAllocateObject(LAllocateObject* instr) { // the constructor's prototype changes, but instance size and property // counts remain unchanged (if slack tracking finished). ASSERT(!constructor->shared()->IsInobjectSlackTrackingInProgress()); - __ AllocateInNewSpace(instance_size, - result, - no_reg, - scratch, - deferred->entry(), - TAG_OBJECT); + __ Allocate(instance_size, result, no_reg, scratch, deferred->entry(), + TAG_OBJECT); __ bind(deferred->exit()); if (FLAG_debug_code) { @@ -4879,11 +5536,69 @@ void LCodeGen::DoDeferredAllocateObject(LAllocateObject* instr) { } +void LCodeGen::DoAllocate(LAllocate* instr) { + class DeferredAllocate: public LDeferredCode { + public: + DeferredAllocate(LCodeGen* codegen, LAllocate* instr) + : LDeferredCode(codegen), instr_(instr) { } + virtual void Generate() { codegen()->DoDeferredAllocate(instr_); } + virtual LInstruction* instr() { return instr_; } + private: + LAllocate* instr_; + }; + + DeferredAllocate* deferred = + new(zone()) DeferredAllocate(this, instr); + + Register result = ToRegister(instr->result()); + Register temp = ToRegister(instr->temp()); + + // Allocate memory for the object. + AllocationFlags flags = TAG_OBJECT; + if (instr->hydrogen()->MustAllocateDoubleAligned()) { + flags = static_cast<AllocationFlags>(flags | DOUBLE_ALIGNMENT); + } + if (instr->size()->IsConstantOperand()) { + int32_t size = ToInteger32(LConstantOperand::cast(instr->size())); + if (instr->hydrogen()->CanAllocateInOldPointerSpace()) { + flags = static_cast<AllocationFlags>(flags | PRETENURE_OLD_POINTER_SPACE); + } + __ Allocate(size, result, temp, no_reg, deferred->entry(), flags); + } else { + Register size = ToRegister(instr->size()); + __ AllocateInNewSpace(size, result, temp, no_reg, deferred->entry(), flags); + } + + __ bind(deferred->exit()); +} + + +void LCodeGen::DoDeferredAllocate(LAllocate* instr) { + Register size = ToRegister(instr->size()); + Register result = ToRegister(instr->result()); + + __ SmiTag(size); + PushSafepointRegistersScope scope(this); + // TODO(3095996): Get rid of this. For now, we need to make the + // result register contain a valid pointer because it is already + // contained in the register pointer map. + if (!size.is(result)) { + __ StoreToSafepointRegisterSlot(result, size); + } + __ push(size); + CallRuntimeFromDeferred( + Runtime::kAllocateInNewSpace, 1, instr, instr->context()); + __ StoreToSafepointRegisterSlot(result, eax); +} + + void LCodeGen::DoArrayLiteral(LArrayLiteral* instr) { ASSERT(ToRegister(instr->context()).is(esi)); Handle<FixedArray> literals(instr->environment()->closure()->literals()); ElementsKind boilerplate_elements_kind = instr->hydrogen()->boilerplate_elements_kind(); + AllocationSiteMode allocation_site_mode = + instr->hydrogen()->allocation_site_mode(); // Deopt if the array literal boilerplate ElementsKind is of a type different // than the expected one. The check isn't necessary if the boilerplate has @@ -4914,8 +5629,8 @@ void LCodeGen::DoArrayLiteral(LArrayLiteral* instr) { ASSERT(instr->hydrogen()->depth() == 1); FastCloneShallowArrayStub::Mode mode = FastCloneShallowArrayStub::COPY_ON_WRITE_ELEMENTS; - FastCloneShallowArrayStub stub(mode, length); - CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr); + FastCloneShallowArrayStub stub(mode, DONT_TRACK_ALLOCATION_SITE, length); + CallCode(stub.GetCode(isolate()), RelocInfo::CODE_TARGET, instr); } else if (instr->hydrogen()->depth() > 1) { CallRuntime(Runtime::kCreateArrayLiteral, 3, instr); } else if (length > FastCloneShallowArrayStub::kMaximumClonedLength) { @@ -4923,10 +5638,10 @@ void LCodeGen::DoArrayLiteral(LArrayLiteral* instr) { } else { FastCloneShallowArrayStub::Mode mode = boilerplate_elements_kind == FAST_DOUBLE_ELEMENTS - ? FastCloneShallowArrayStub::CLONE_DOUBLE_ELEMENTS - : FastCloneShallowArrayStub::CLONE_ELEMENTS; - FastCloneShallowArrayStub stub(mode, length); - CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr); + ? FastCloneShallowArrayStub::CLONE_DOUBLE_ELEMENTS + : FastCloneShallowArrayStub::CLONE_ELEMENTS; + FastCloneShallowArrayStub stub(mode, allocation_site_mode, length); + CallCode(stub.GetCode(isolate()), RelocInfo::CODE_TARGET, instr); } } @@ -4934,10 +5649,14 @@ void LCodeGen::DoArrayLiteral(LArrayLiteral* instr) { void LCodeGen::EmitDeepCopy(Handle<JSObject> object, Register result, Register source, - int* offset) { + int* offset, + AllocationSiteMode mode) { ASSERT(!source.is(ecx)); ASSERT(!result.is(ecx)); + bool create_allocation_site_info = mode == TRACK_ALLOCATION_SITE && + object->map()->CanTrackAllocationSite(); + if (FLAG_debug_code) { __ LoadHeapObject(ecx, object); __ cmp(source, ecx); @@ -4960,8 +5679,13 @@ void LCodeGen::EmitDeepCopy(Handle<JSObject> object, // this object and its backing store. int object_offset = *offset; int object_size = object->map()->instance_size(); - int elements_offset = *offset + object_size; int elements_size = has_elements ? elements->Size() : 0; + int elements_offset = *offset + object_size; + if (create_allocation_site_info) { + elements_offset += AllocationSiteInfo::kSize; + *offset += AllocationSiteInfo::kSize; + } + *offset += object_size + elements_size; // Copy object header. @@ -4980,13 +5704,15 @@ void LCodeGen::EmitDeepCopy(Handle<JSObject> object, // Copy in-object properties. for (int i = 0; i < inobject_properties; i++) { int total_offset = object_offset + object->GetInObjectPropertyOffset(i); - Handle<Object> value = Handle<Object>(object->InObjectPropertyAt(i)); + Handle<Object> value = Handle<Object>(object->InObjectPropertyAt(i), + isolate()); if (value->IsJSObject()) { Handle<JSObject> value_object = Handle<JSObject>::cast(value); __ lea(ecx, Operand(result, *offset)); __ mov(FieldOperand(result, total_offset), ecx); __ LoadHeapObject(source, value_object); - EmitDeepCopy(value_object, result, source, offset); + EmitDeepCopy(value_object, result, source, offset, + DONT_TRACK_ALLOCATION_SITE); } else if (value->IsHeapObject()) { __ LoadHeapObject(ecx, Handle<HeapObject>::cast(value)); __ mov(FieldOperand(result, total_offset), ecx); @@ -4995,6 +5721,14 @@ void LCodeGen::EmitDeepCopy(Handle<JSObject> object, } } + // Build Allocation Site Info if desired + if (create_allocation_site_info) { + __ mov(FieldOperand(result, object_size), + Immediate(Handle<Map>(isolate()->heap()-> + allocation_site_info_map()))); + __ mov(FieldOperand(result, object_size + kPointerSize), source); + } + if (has_elements) { // Copy elements backing store header. __ LoadHeapObject(source, elements); @@ -5021,13 +5755,14 @@ void LCodeGen::EmitDeepCopy(Handle<JSObject> object, Handle<FixedArray> fast_elements = Handle<FixedArray>::cast(elements); for (int i = 0; i < elements_length; i++) { int total_offset = elements_offset + FixedArray::OffsetOfElementAt(i); - Handle<Object> value(fast_elements->get(i)); + Handle<Object> value(fast_elements->get(i), isolate()); if (value->IsJSObject()) { Handle<JSObject> value_object = Handle<JSObject>::cast(value); __ lea(ecx, Operand(result, *offset)); __ mov(FieldOperand(result, total_offset), ecx); __ LoadHeapObject(source, value_object); - EmitDeepCopy(value_object, result, source, offset); + EmitDeepCopy(value_object, result, source, offset, + DONT_TRACK_ALLOCATION_SITE); } else if (value->IsHeapObject()) { __ LoadHeapObject(ecx, Handle<HeapObject>::cast(value)); __ mov(FieldOperand(result, total_offset), ecx); @@ -5067,7 +5802,7 @@ void LCodeGen::DoFastLiteral(LFastLiteral* instr) { // Allocate all objects that are part of the literal in one big // allocation. This avoids multiple limit checks. Label allocated, runtime_allocate; - __ AllocateInNewSpace(size, eax, ecx, edx, &runtime_allocate, TAG_OBJECT); + __ Allocate(size, eax, ecx, edx, &runtime_allocate, TAG_OBJECT); __ jmp(&allocated); __ bind(&runtime_allocate); @@ -5077,7 +5812,8 @@ void LCodeGen::DoFastLiteral(LFastLiteral* instr) { __ bind(&allocated); int offset = 0; __ LoadHeapObject(ebx, instr->hydrogen()->boilerplate()); - EmitDeepCopy(instr->hydrogen()->boilerplate(), eax, ebx, &offset); + EmitDeepCopy(instr->hydrogen()->boilerplate(), eax, ebx, &offset, + instr->hydrogen()->allocation_site_mode()); ASSERT_EQ(size, offset); } @@ -5088,28 +5824,36 @@ void LCodeGen::DoObjectLiteral(LObjectLiteral* instr) { Handle<FixedArray> constant_properties = instr->hydrogen()->constant_properties(); - // Set up the parameters to the stub/runtime call. - __ PushHeapObject(literals); - __ push(Immediate(Smi::FromInt(instr->hydrogen()->literal_index()))); - __ push(Immediate(constant_properties)); int flags = instr->hydrogen()->fast_elements() ? ObjectLiteral::kFastElements : ObjectLiteral::kNoFlags; flags |= instr->hydrogen()->has_function() ? ObjectLiteral::kHasFunction : ObjectLiteral::kNoFlags; - __ push(Immediate(Smi::FromInt(flags))); - // Pick the right runtime function or stub to call. + // Set up the parameters to the stub/runtime call and pick the right + // runtime function or stub to call. int properties_count = constant_properties->length() / 2; if (instr->hydrogen()->depth() > 1) { + __ PushHeapObject(literals); + __ push(Immediate(Smi::FromInt(instr->hydrogen()->literal_index()))); + __ push(Immediate(constant_properties)); + __ push(Immediate(Smi::FromInt(flags))); CallRuntime(Runtime::kCreateObjectLiteral, 4, instr); } else if (flags != ObjectLiteral::kFastElements || properties_count > FastCloneShallowObjectStub::kMaximumClonedProperties) { + __ PushHeapObject(literals); + __ push(Immediate(Smi::FromInt(instr->hydrogen()->literal_index()))); + __ push(Immediate(constant_properties)); + __ push(Immediate(Smi::FromInt(flags))); CallRuntime(Runtime::kCreateObjectLiteralShallow, 4, instr); } else { + __ LoadHeapObject(eax, literals); + __ mov(ebx, Immediate(Smi::FromInt(instr->hydrogen()->literal_index()))); + __ mov(ecx, Immediate(constant_properties)); + __ mov(edx, Immediate(Smi::FromInt(flags))); FastCloneShallowObjectStub stub(properties_count); - CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr); + CallCode(stub.GetCode(isolate()), RelocInfo::CODE_TARGET, instr); } } @@ -5148,7 +5892,7 @@ void LCodeGen::DoRegExpLiteral(LRegExpLiteral* instr) { __ bind(&materialized); int size = JSRegExp::kSize + JSRegExp::kInObjectFieldCount * kPointerSize; Label allocated, runtime_allocate; - __ AllocateInNewSpace(size, eax, ecx, edx, &runtime_allocate, TAG_OBJECT); + __ Allocate(size, eax, ecx, edx, &runtime_allocate, TAG_OBJECT); __ jmp(&allocated); __ bind(&runtime_allocate); @@ -5182,7 +5926,7 @@ void LCodeGen::DoFunctionLiteral(LFunctionLiteral* instr) { if (!pretenure && shared_info->num_literals() == 0) { FastNewClosureStub stub(shared_info->language_mode()); __ push(Immediate(shared_info)); - CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr); + CallCode(stub.GetCode(isolate()), RelocInfo::CODE_TARGET, instr); } else { __ push(esi); __ push(Immediate(shared_info)); @@ -5221,13 +5965,13 @@ Condition LCodeGen::EmitTypeofIs(Label* true_label, Register input, Handle<String> type_name) { Condition final_branch_condition = no_condition; - if (type_name->Equals(heap()->number_symbol())) { + if (type_name->Equals(heap()->number_string())) { __ JumpIfSmi(input, true_label); __ cmp(FieldOperand(input, HeapObject::kMapOffset), factory()->heap_number_map()); final_branch_condition = equal; - } else if (type_name->Equals(heap()->string_symbol())) { + } else if (type_name->Equals(heap()->string_string())) { __ JumpIfSmi(input, false_label); __ CmpObjectType(input, FIRST_NONSTRING_TYPE, input); __ j(above_equal, false_label); @@ -5235,17 +5979,17 @@ Condition LCodeGen::EmitTypeofIs(Label* true_label, 1 << Map::kIsUndetectable); final_branch_condition = zero; - } else if (type_name->Equals(heap()->boolean_symbol())) { + } else if (type_name->Equals(heap()->boolean_string())) { __ cmp(input, factory()->true_value()); __ j(equal, true_label); __ cmp(input, factory()->false_value()); final_branch_condition = equal; - } else if (FLAG_harmony_typeof && type_name->Equals(heap()->null_symbol())) { + } else if (FLAG_harmony_typeof && type_name->Equals(heap()->null_string())) { __ cmp(input, factory()->null_value()); final_branch_condition = equal; - } else if (type_name->Equals(heap()->undefined_symbol())) { + } else if (type_name->Equals(heap()->undefined_string())) { __ cmp(input, factory()->undefined_value()); __ j(equal, true_label); __ JumpIfSmi(input, false_label); @@ -5255,7 +5999,7 @@ Condition LCodeGen::EmitTypeofIs(Label* true_label, 1 << Map::kIsUndetectable); final_branch_condition = not_zero; - } else if (type_name->Equals(heap()->function_symbol())) { + } else if (type_name->Equals(heap()->function_string())) { STATIC_ASSERT(NUM_OF_CALLABLE_SPEC_OBJECT_TYPES == 2); __ JumpIfSmi(input, false_label); __ CmpObjectType(input, JS_FUNCTION_TYPE, input); @@ -5263,13 +6007,19 @@ Condition LCodeGen::EmitTypeofIs(Label* true_label, __ CmpInstanceType(input, JS_FUNCTION_PROXY_TYPE); final_branch_condition = equal; - } else if (type_name->Equals(heap()->object_symbol())) { + } else if (type_name->Equals(heap()->object_string())) { __ JumpIfSmi(input, false_label); if (!FLAG_harmony_typeof) { __ cmp(input, factory()->null_value()); __ j(equal, true_label); } - __ CmpObjectType(input, FIRST_NONCALLABLE_SPEC_OBJECT_TYPE, input); + if (FLAG_harmony_symbols) { + __ CmpObjectType(input, SYMBOL_TYPE, input); + __ j(equal, true_label); + __ CmpInstanceType(input, FIRST_NONCALLABLE_SPEC_OBJECT_TYPE); + } else { + __ CmpObjectType(input, FIRST_NONCALLABLE_SPEC_OBJECT_TYPE, input); + } __ j(below, false_label); __ CmpInstanceType(input, LAST_NONCALLABLE_SPEC_OBJECT_TYPE); __ j(above, false_label); @@ -5314,13 +6064,15 @@ void LCodeGen::EmitIsConstructCall(Register temp) { void LCodeGen::EnsureSpaceForLazyDeopt() { - // Ensure that we have enough space after the previous lazy-bailout - // instruction for patching the code here. - int current_pc = masm()->pc_offset(); - int patch_size = Deoptimizer::patch_size(); - if (current_pc < last_lazy_deopt_pc_ + patch_size) { - int padding_size = last_lazy_deopt_pc_ + patch_size - current_pc; - __ Nop(padding_size); + if (!info()->IsStub()) { + // Ensure that we have enough space after the previous lazy-bailout + // instruction for patching the code here. + int current_pc = masm()->pc_offset(); + int patch_size = Deoptimizer::patch_size(); + if (current_pc < last_lazy_deopt_pc_ + patch_size) { + int padding_size = last_lazy_deopt_pc_ + patch_size - current_pc; + __ Nop(padding_size); + } } last_lazy_deopt_pc_ = masm()->pc_offset(); } @@ -5340,6 +6092,11 @@ void LCodeGen::DoDeoptimize(LDeoptimize* instr) { } +void LCodeGen::DoDummyUse(LDummyUse* instr) { + // Nothing to see here, move on! +} + + void LCodeGen::DoDeleteProperty(LDeleteProperty* instr) { LOperand* obj = instr->object(); LOperand* key = instr->key(); @@ -5396,7 +6153,7 @@ void LCodeGen::DoStackCheck(LStackCheck* instr) { ASSERT(instr->context()->IsRegister()); ASSERT(ToRegister(instr->context()).is(esi)); StackCheckStub stub; - CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr); + CallCode(stub.GetCode(isolate()), RelocInfo::CODE_TARGET, instr); EnsureSpaceForLazyDeopt(); __ bind(&done); RegisterEnvironmentForDeoptimization(env, Safepoint::kLazyDeopt); diff --git a/deps/v8/src/ia32/lithium-codegen-ia32.h b/deps/v8/src/ia32/lithium-codegen-ia32.h index 6670024dba..3a38e321de 100644 --- a/deps/v8/src/ia32/lithium-codegen-ia32.h +++ b/deps/v8/src/ia32/lithium-codegen-ia32.h @@ -55,15 +55,19 @@ class LCodeGen BASE_EMBEDDED { current_instruction_(-1), instructions_(chunk->instructions()), deoptimizations_(4, info->zone()), + jump_table_(4, info->zone()), deoptimization_literals_(8, info->zone()), + prototype_maps_(0, info->zone()), inlined_function_count_(0), scope_(info->scope()), status_(UNUSED), translations_(info->zone()), deferred_(8, info->zone()), dynamic_frame_alignment_(false), + support_aligned_spilled_doubles_(false), osr_pc_offset_(-1), last_lazy_deopt_pc_(0), + frame_is_built_(false), safepoints_(info->zone()), resolver_(this), expected_safepoint_kind_(Safepoint::kSimple) { @@ -78,10 +82,20 @@ class LCodeGen BASE_EMBEDDED { Heap* heap() const { return isolate()->heap(); } Zone* zone() const { return zone_; } + bool NeedsEagerFrame() const { + return GetStackSlotCount() > 0 || + info()->is_non_deferred_calling() || + !info()->IsStub(); + } + bool NeedsDeferredFrame() const { + return !NeedsEagerFrame() && info()->is_deferred_calling(); + } + // Support for converting LOperands to assembler types. Operand ToOperand(LOperand* op) const; Register ToRegister(LOperand* op) const; XMMRegister ToDoubleRegister(LOperand* op) const; + bool IsX87TopOfStack(LOperand* op) const; bool IsInteger32(LConstantOperand* op) const; Immediate ToInteger32Immediate(LOperand* op) const { @@ -90,6 +104,9 @@ class LCodeGen BASE_EMBEDDED { Handle<Object> ToHandle(LConstantOperand* op) const; + // A utility for instructions that return floating point values on X87. + void HandleX87FPReturnValue(LInstruction* instr); + // The operand denoting the second word (the one with a higher address) of // a double stack slot. Operand HighOperand(LOperand* op); @@ -118,11 +135,12 @@ class LCodeGen BASE_EMBEDDED { void DoDeferredStringCharCodeAt(LStringCharCodeAt* instr); void DoDeferredStringCharFromCode(LStringCharFromCode* instr); void DoDeferredAllocateObject(LAllocateObject* instr); + void DoDeferredAllocate(LAllocate* instr); void DoDeferredInstanceOfKnownGlobal(LInstanceOfKnownGlobal* instr, Label* map_check); void DoCheckMapCommon(Register reg, Handle<Map> map, - CompareMapMode mode, LEnvironment* env); + CompareMapMode mode, LInstruction* instr); // Parallel move support. void DoParallelMove(LParallelMove* move); @@ -172,7 +190,6 @@ class LCodeGen BASE_EMBEDDED { Register temporary2); int GetStackSlotCount() const { return chunk()->spill_slot_count(); } - int GetParameterCount() const { return scope()->num_parameters(); } void Abort(const char* reason); void Comment(const char* format, ...); @@ -184,9 +201,7 @@ class LCodeGen BASE_EMBEDDED { bool GeneratePrologue(); bool GenerateBody(); bool GenerateDeferredCode(); - // Pad the reloc info to ensure that we have enough space to patch during - // deoptimization. - bool GenerateRelocPadding(); + bool GenerateJumpTable(); bool GenerateSafepointTable(); enum SafepointMode { @@ -219,6 +234,8 @@ class LCodeGen BASE_EMBEDDED { LInstruction* instr, LOperand* context); + void LoadContextFromDeferred(LOperand* context); + enum EDIState { EDI_UNINITIALIZED, EDI_CONTAINS_TARGET @@ -243,8 +260,10 @@ class LCodeGen BASE_EMBEDDED { LOperand* op, bool is_tagged, bool is_uint32, + bool arguments_known, int arguments_index, int arguments_count); + void RegisterDependentCodeForEmbeddedMaps(Handle<Code> code); void PopulateDeoptimizationData(Handle<Code> code); int DefineDeoptimizationLiteral(Handle<Object> literal); @@ -266,7 +285,6 @@ class LCodeGen BASE_EMBEDDED { void EmitIntegerMathAbs(LUnaryMathOperation* instr); void DoMathAbs(LUnaryMathOperation* instr); void DoMathFloor(LUnaryMathOperation* instr); - void DoMathRound(LUnaryMathOperation* instr); void DoMathSqrt(LUnaryMathOperation* instr); void DoMathLog(LUnaryMathOperation* instr); void DoMathTan(LUnaryMathOperation* instr); @@ -288,16 +306,14 @@ class LCodeGen BASE_EMBEDDED { static Condition TokenToCondition(Token::Value op, bool is_unsigned); void EmitGoto(int block); void EmitBranch(int left_block, int right_block, Condition cc); - void EmitNumberUntagD(Register input, - Register temp, - XMMRegister result, - bool deoptimize_on_undefined, - bool deoptimize_on_minus_zero, - LEnvironment* env); - - void DeoptIfTaggedButNotSmi(LEnvironment* environment, - HValue* value, - LOperand* operand); + void EmitNumberUntagD( + Register input, + Register temp, + XMMRegister result, + bool deoptimize_on_undefined, + bool deoptimize_on_minus_zero, + LEnvironment* env, + NumberUntagDMode mode = NUMBER_CANDIDATE_IS_ANY_TAGGED); // Emits optimized code for typeof x == "y". Modifies input register. // Returns the condition on which a final split to @@ -337,9 +353,18 @@ class LCodeGen BASE_EMBEDDED { void EmitDeepCopy(Handle<JSObject> object, Register result, Register source, - int* offset); + int* offset, + AllocationSiteMode mode); void EnsureSpaceForLazyDeopt(); + void DoLoadKeyedExternalArray(LLoadKeyed* instr); + void DoLoadKeyedFixedDoubleArray(LLoadKeyed* instr); + void DoLoadKeyedFixedArray(LLoadKeyed* instr); + void DoStoreKeyedExternalArray(LStoreKeyed* instr); + void DoStoreKeyedFixedDoubleArray(LStoreKeyed* instr); + void DoStoreKeyedFixedArray(LStoreKeyed* instr); + + void EmitReturn(LReturn* instr, bool dynamic_frame_alignment); // Emits code for pushing either a tagged constant, a (non-double) // register, or a stack slot operand. @@ -350,19 +375,35 @@ class LCodeGen BASE_EMBEDDED { MacroAssembler* const masm_; CompilationInfo* const info_; + struct JumpTableEntry { + inline JumpTableEntry(Address entry, bool frame, bool is_lazy) + : label(), + address(entry), + needs_frame(frame), + is_lazy_deopt(is_lazy) { } + Label label; + Address address; + bool needs_frame; + bool is_lazy_deopt; + }; + int current_block_; int current_instruction_; const ZoneList<LInstruction*>* instructions_; ZoneList<LEnvironment*> deoptimizations_; + ZoneList<JumpTableEntry> jump_table_; ZoneList<Handle<Object> > deoptimization_literals_; + ZoneList<Handle<Map> > prototype_maps_; int inlined_function_count_; Scope* const scope_; Status status_; TranslationBuffer translations_; ZoneList<LDeferredCode*> deferred_; bool dynamic_frame_alignment_; + bool support_aligned_spilled_doubles_; int osr_pc_offset_; int last_lazy_deopt_pc_; + bool frame_is_built_; // Builder that keeps track of safepoints in the code. The table // itself is emitted at the end of the generated code. @@ -380,6 +421,7 @@ class LCodeGen BASE_EMBEDDED { ASSERT(codegen_->expected_safepoint_kind_ == Safepoint::kSimple); codegen_->masm_->PushSafepointRegisters(); codegen_->expected_safepoint_kind_ = Safepoint::kWithRegisters; + ASSERT(codegen_->info()->is_calling()); } ~PushSafepointRegistersScope() { diff --git a/deps/v8/src/ia32/lithium-gap-resolver-ia32.cc b/deps/v8/src/ia32/lithium-gap-resolver-ia32.cc index 6428916fef..b062ba5d31 100644 --- a/deps/v8/src/ia32/lithium-gap-resolver-ia32.cc +++ b/deps/v8/src/ia32/lithium-gap-resolver-ia32.cc @@ -191,7 +191,7 @@ int LGapResolver::CountSourceUses(LOperand* operand) { Register LGapResolver::GetFreeRegisterNot(Register reg) { int skip_index = reg.is(no_reg) ? -1 : Register::ToAllocationIndex(reg); - for (int i = 0; i < Register::kNumAllocatableRegisters; ++i) { + for (int i = 0; i < Register::NumAllocatableRegisters(); ++i) { if (source_uses_[i] == 0 && destination_uses_[i] > 0 && i != skip_index) { return Register::FromAllocationIndex(i); } @@ -204,7 +204,7 @@ bool LGapResolver::HasBeenReset() { if (!moves_.is_empty()) return false; if (spilled_register_ >= 0) return false; - for (int i = 0; i < Register::kNumAllocatableRegisters; ++i) { + for (int i = 0; i < Register::NumAllocatableRegisters(); ++i) { if (source_uses_[i] != 0) return false; if (destination_uses_[i] != 0) return false; } @@ -256,7 +256,7 @@ Register LGapResolver::EnsureTempRegister() { // 3. Prefer to spill a register that is not used in any remaining move // because it will not need to be restored until the end. - for (int i = 0; i < Register::kNumAllocatableRegisters; ++i) { + for (int i = 0; i < Register::NumAllocatableRegisters(); ++i) { if (source_uses_[i] == 0 && destination_uses_[i] == 0) { Register scratch = Register::FromAllocationIndex(i); __ push(scratch); @@ -324,6 +324,7 @@ void LGapResolver::EmitMove(int index) { } } else if (source->IsDoubleRegister()) { + CpuFeatureScope scope(cgen_->masm(), SSE2); XMMRegister src = cgen_->ToDoubleRegister(source); if (destination->IsDoubleRegister()) { XMMRegister dst = cgen_->ToDoubleRegister(destination); @@ -334,6 +335,7 @@ void LGapResolver::EmitMove(int index) { __ movdbl(dst, src); } } else if (source->IsDoubleStackSlot()) { + CpuFeatureScope scope(cgen_->masm(), SSE2); ASSERT(destination->IsDoubleRegister() || destination->IsDoubleStackSlot()); Operand src = cgen_->ToOperand(source); @@ -346,7 +348,6 @@ void LGapResolver::EmitMove(int index) { __ movdbl(xmm0, src); __ movdbl(dst, xmm0); } - } else { UNREACHABLE(); } @@ -410,6 +411,7 @@ void LGapResolver::EmitSwap(int index) { __ mov(src, tmp0); } } else if (source->IsDoubleRegister() && destination->IsDoubleRegister()) { + CpuFeatureScope scope(cgen_->masm(), SSE2); // XMM register-register swap. We rely on having xmm0 // available as a fixed scratch register. XMMRegister src = cgen_->ToDoubleRegister(source); @@ -419,6 +421,7 @@ void LGapResolver::EmitSwap(int index) { __ movaps(dst, xmm0); } else if (source->IsDoubleRegister() || destination->IsDoubleRegister()) { + CpuFeatureScope scope(cgen_->masm(), SSE2); // XMM register-memory swap. We rely on having xmm0 // available as a fixed scratch register. ASSERT(source->IsDoubleStackSlot() || destination->IsDoubleStackSlot()); @@ -432,6 +435,7 @@ void LGapResolver::EmitSwap(int index) { __ movdbl(reg, Operand(xmm0)); } else if (source->IsDoubleStackSlot() && destination->IsDoubleStackSlot()) { + CpuFeatureScope scope(cgen_->masm(), SSE2); // Double-width memory-to-memory. Spill on demand to use a general // purpose temporary register and also rely on having xmm0 available as // a fixed scratch register. diff --git a/deps/v8/src/ia32/lithium-gap-resolver-ia32.h b/deps/v8/src/ia32/lithium-gap-resolver-ia32.h index 0c81d72ee3..3a58f585c3 100644 --- a/deps/v8/src/ia32/lithium-gap-resolver-ia32.h +++ b/deps/v8/src/ia32/lithium-gap-resolver-ia32.h @@ -97,8 +97,8 @@ class LGapResolver BASE_EMBEDDED { ZoneList<LMoveOperands> moves_; // Source and destination use counts for the general purpose registers. - int source_uses_[Register::kNumAllocatableRegisters]; - int destination_uses_[Register::kNumAllocatableRegisters]; + int source_uses_[Register::kMaxNumAllocatableRegisters]; + int destination_uses_[Register::kMaxNumAllocatableRegisters]; // If we had to spill on demand, the currently spilled register's // allocation index. diff --git a/deps/v8/src/ia32/lithium-ia32.cc b/deps/v8/src/ia32/lithium-ia32.cc index 1d12d23d24..f2aec99770 100644 --- a/deps/v8/src/ia32/lithium-ia32.cc +++ b/deps/v8/src/ia32/lithium-ia32.cc @@ -44,10 +44,10 @@ LITHIUM_CONCRETE_INSTRUCTION_LIST(DEFINE_COMPILE) #undef DEFINE_COMPILE LOsrEntry::LOsrEntry() { - for (int i = 0; i < Register::kNumAllocatableRegisters; ++i) { + for (int i = 0; i < Register::NumAllocatableRegisters(); ++i) { register_spills_[i] = NULL; } - for (int i = 0; i < DoubleRegister::kNumAllocatableRegisters; ++i) { + for (int i = 0; i < DoubleRegister::NumAllocatableRegisters(); ++i) { double_register_spills_[i] = NULL; } } @@ -114,7 +114,11 @@ void LInstruction::PrintDataTo(StringStream* stream) { stream->Add("= "); for (int i = 0; i < InputCount(); i++) { if (i > 0) stream->Add(" "); - InputAt(i)->PrintTo(stream); + if (InputAt(i) == NULL) { + stream->Add("NULL"); + } else { + InputAt(i)->PrintTo(stream); + } } } @@ -179,6 +183,7 @@ const char* LArithmeticT::Mnemonic() const { case Token::BIT_AND: return "bit-and-t"; case Token::BIT_OR: return "bit-or-t"; case Token::BIT_XOR: return "bit-xor-t"; + case Token::ROR: return "ror-t"; case Token::SHL: return "sal-t"; case Token::SAR: return "sar-t"; case Token::SHR: return "shr-t"; @@ -287,6 +292,13 @@ void LTypeofIsAndBranch::PrintDataTo(StringStream* stream) { } +void LInnerAllocatedObject::PrintDataTo(StringStream* stream) { + stream->Add(" = "); + base_object()->PrintTo(stream); + stream->Add(" + %d", offset()); +} + + void LCallConstantFunction::PrintDataTo(StringStream* stream) { stream->Add("#%d / ", arity()); } @@ -298,12 +310,23 @@ void LUnaryMathOperation::PrintDataTo(StringStream* stream) { } +void LMathExp::PrintDataTo(StringStream* stream) { + value()->PrintTo(stream); +} + + void LMathPowHalf::PrintDataTo(StringStream* stream) { stream->Add("/pow_half "); value()->PrintTo(stream); } +void LMathRound::PrintDataTo(StringStream* stream) { + stream->Add("/round "); + value()->PrintTo(stream); +} + + void LLoadContextSlot::PrintDataTo(StringStream* stream) { context()->PrintTo(stream); stream->Add("[%d]", slot_index()); @@ -357,6 +380,19 @@ void LCallNew::PrintDataTo(StringStream* stream) { } +void LCallNewArray::PrintDataTo(StringStream* stream) { + stream->Add("= "); + context()->PrintTo(stream); + stream->Add(" "); + constructor()->PrintTo(stream); + stream->Add(" #%d / ", arity()); + ASSERT(hydrogen()->property_cell()->value()->IsSmi()); + ElementsKind kind = static_cast<ElementsKind>( + Smi::cast(hydrogen()->property_cell()->value())->value()); + stream->Add(" (%s) ", ElementsKindToString(kind)); +} + + void LAccessArgumentsAt::PrintDataTo(StringStream* stream) { arguments()->PrintTo(stream); @@ -407,20 +443,27 @@ void LStoreNamedGeneric::PrintDataTo(StringStream* stream) { } -void LStoreKeyedFastElement::PrintDataTo(StringStream* stream) { - object()->PrintTo(stream); +void LLoadKeyed::PrintDataTo(StringStream* stream) { + elements()->PrintTo(stream); stream->Add("["); key()->PrintTo(stream); - stream->Add("] <- "); - value()->PrintTo(stream); + if (hydrogen()->IsDehoisted()) { + stream->Add(" + %d]", additional_index()); + } else { + stream->Add("]"); + } } -void LStoreKeyedFastDoubleElement::PrintDataTo(StringStream* stream) { +void LStoreKeyed::PrintDataTo(StringStream* stream) { elements()->PrintTo(stream); stream->Add("["); key()->PrintTo(stream); - stream->Add("] <- "); + if (hydrogen()->IsDehoisted()) { + stream->Add(" + %d] <-", additional_index()); + } else { + stream->Add("] <- "); + } value()->PrintTo(stream); } @@ -447,9 +490,11 @@ LPlatformChunk* LChunkBuilder::Build() { status_ = BUILDING; // Reserve the first spill slot for the state of dynamic alignment. - int alignment_state_index = chunk_->GetNextSpillIndex(false); - ASSERT_EQ(alignment_state_index, 0); - USE(alignment_state_index); + if (info()->IsOptimizing()) { + int alignment_state_index = chunk_->GetNextSpillIndex(false); + ASSERT_EQ(alignment_state_index, 0); + USE(alignment_state_index); + } const ZoneList<HBasicBlock*>* blocks = graph()->blocks(); for (int i = 0; i < blocks->length(); i++) { @@ -481,6 +526,12 @@ LUnallocated* LChunkBuilder::ToUnallocated(XMMRegister reg) { } +LUnallocated* LChunkBuilder::ToUnallocated(X87TopOfStackRegister reg) { + return new(zone()) LUnallocated(LUnallocated::FIXED_DOUBLE_REGISTER, + X87TopOfStackRegister::ToAllocationIndex(reg)); +} + + LOperand* LChunkBuilder::UseFixed(HValue* value, Register fixed_register) { return Use(value, ToUnallocated(fixed_register)); } @@ -613,6 +664,13 @@ LInstruction* LChunkBuilder::DefineFixedDouble( } +template<int I, int T> +LInstruction* LChunkBuilder::DefineX87TOS( + LTemplateInstruction<1, I, T>* instr) { + return Define(instr, ToUnallocated(x87tos)); +} + + LInstruction* LChunkBuilder::AssignEnvironment(LInstruction* instr) { HEnvironment* hydrogen_env = current_block_->last_environment(); int argument_index_accumulator = 0; @@ -625,6 +683,8 @@ LInstruction* LChunkBuilder::AssignEnvironment(LInstruction* instr) { LInstruction* LChunkBuilder::MarkAsCall(LInstruction* instr, HInstruction* hinstr, CanDeoptimize can_deoptimize) { + info()->MarkAsNonDeferredCalling(); + #ifdef DEBUG instr->VerifyCall(); #endif @@ -665,10 +725,12 @@ LInstruction* LChunkBuilder::AssignPointerMap(LInstruction* instr) { LUnallocated* LChunkBuilder::TempRegister() { LUnallocated* operand = new(zone()) LUnallocated(LUnallocated::MUST_HAVE_REGISTER); - operand->set_virtual_register(allocator_->GetVirtualRegister()); + int vreg = allocator_->GetVirtualRegister(); if (!allocator_->AllocationOk()) { - Abort("Not enough virtual registers (temps)."); + Abort("Out of virtual registers while trying to allocate temp register."); + return NULL; } + operand->set_virtual_register(vreg); return operand; } @@ -692,6 +754,11 @@ LInstruction* LChunkBuilder::DoBlockEntry(HBlockEntry* instr) { } +LInstruction* LChunkBuilder::DoDummyUse(HDummyUse* instr) { + return DefineAsRegister(new(zone()) LDummyUse(UseAny(instr->value()))); +} + + LInstruction* LChunkBuilder::DoSoftDeoptimize(HSoftDeoptimize* instr) { return AssignEnvironment(new(zone()) LDeoptimize); } @@ -926,7 +993,7 @@ LInstruction* LChunkBuilder::DoBranch(HBranch* instr) { if (value->EmitAtUses()) { ASSERT(value->IsConstant()); ASSERT(!value->representation().IsDouble()); - HBasicBlock* successor = HConstant::cast(value)->ToBoolean() + HBasicBlock* successor = HConstant::cast(value)->BooleanValue() ? instr->FirstSuccessor() : instr->SecondSuccessor(); return new(zone()) LGoto(successor->block_id()); @@ -987,6 +1054,12 @@ LInstruction* LChunkBuilder::DoInstanceOfKnownGlobal( } +LInstruction* LChunkBuilder::DoInstanceSize(HInstanceSize* instr) { + LOperand* object = UseRegisterAtStart(instr->object()); + return DefineAsRegister(new(zone()) LInstanceSize(object)); +} + + LInstruction* LChunkBuilder::DoWrapReceiver(HWrapReceiver* instr) { LOperand* receiver = UseRegister(instr->receiver()); LOperand* function = UseRegisterAtStart(instr->function()); @@ -1017,6 +1090,15 @@ LInstruction* LChunkBuilder::DoPushArgument(HPushArgument* instr) { } +LInstruction* LChunkBuilder::DoInnerAllocatedObject( + HInnerAllocatedObject* inner_object) { + LOperand* base_object = UseRegisterAtStart(inner_object->base_object()); + LInnerAllocatedObject* result = + new(zone()) LInnerAllocatedObject(base_object); + return DefineAsRegister(result); +} + + LInstruction* LChunkBuilder::DoThisFunction(HThisFunction* instr) { return instr->HasNoUses() ? NULL @@ -1025,7 +1107,13 @@ LInstruction* LChunkBuilder::DoThisFunction(HThisFunction* instr) { LInstruction* LChunkBuilder::DoContext(HContext* instr) { - return instr->HasNoUses() ? NULL : DefineAsRegister(new(zone()) LContext); + if (instr->HasNoUses()) return NULL; + + if (info()->IsStub()) { + return DefineFixed(new(zone()) LContext, esi); + } + + return DefineAsRegister(new(zone()) LContext); } @@ -1079,6 +1167,14 @@ LInstruction* LChunkBuilder::DoUnaryMathOperation(HUnaryMathOperation* instr) { LUnaryMathOperation* result = new(zone()) LUnaryMathOperation(context, input); return DefineSameAsFirst(result); + } else if (op == kMathExp) { + ASSERT(instr->representation().IsDouble()); + ASSERT(instr->value()->representation().IsDouble()); + LOperand* value = UseTempRegister(instr->value()); + LOperand* temp1 = TempRegister(); + LOperand* temp2 = TempRegister(); + LMathExp* result = new(zone()) LMathExp(value, temp1, temp2); + return DefineAsRegister(result); } else if (op == kMathSin || op == kMathCos || op == kMathTan) { LOperand* context = UseFixed(instr->context(), esi); LOperand* input = UseFixedDouble(instr->value(), xmm1); @@ -1092,6 +1188,10 @@ LInstruction* LChunkBuilder::DoUnaryMathOperation(HUnaryMathOperation* instr) { LOperand* temp = TempRegister(); LMathPowHalf* result = new(zone()) LMathPowHalf(context, input, temp); return DefineSameAsFirst(result); + } else if (op == kMathRound) { + LOperand* temp = FixedTemp(xmm4); + LMathRound* result = new(zone()) LMathRound(context, input, temp); + return AssignEnvironment(DefineAsRegister(result)); } LUnaryMathOperation* result = new(zone()) LUnaryMathOperation(context, input); @@ -1100,8 +1200,6 @@ LInstruction* LChunkBuilder::DoUnaryMathOperation(HUnaryMathOperation* instr) { return AssignEnvironment(AssignPointerMap(DefineSameAsFirst(result))); case kMathFloor: return AssignEnvironment(DefineAsRegister(result)); - case kMathRound: - return AssignEnvironment(DefineAsRegister(result)); case kMathSqrt: return DefineSameAsFirst(result); default: @@ -1153,6 +1251,16 @@ LInstruction* LChunkBuilder::DoCallNew(HCallNew* instr) { } +LInstruction* LChunkBuilder::DoCallNewArray(HCallNewArray* instr) { + ASSERT(FLAG_optimize_constructed_arrays); + LOperand* context = UseFixed(instr->context(), esi); + LOperand* constructor = UseFixed(instr->constructor(), edi); + argument_count_ -= instr->argument_count(); + LCallNewArray* result = new(zone()) LCallNewArray(context, constructor); + return MarkAsCall(DefineFixed(result, eax), instr); +} + + LInstruction* LChunkBuilder::DoCallFunction(HCallFunction* instr) { LOperand* context = UseFixed(instr->context(), esi); LOperand* function = UseFixed(instr->function(), edi); @@ -1169,6 +1277,11 @@ LInstruction* LChunkBuilder::DoCallRuntime(HCallRuntime* instr) { } +LInstruction* LChunkBuilder::DoRor(HRor* instr) { + return DoShift(Token::ROR, instr); +} + + LInstruction* LChunkBuilder::DoShr(HShr* instr) { return DoShift(Token::SHR, instr); } @@ -1221,6 +1334,13 @@ LInstruction* LChunkBuilder::DoDiv(HDiv* instr) { if (instr->representation().IsDouble()) { return DoArithmeticD(Token::DIV, instr); } else if (instr->representation().IsInteger32()) { + if (instr->HasPowerOf2Divisor()) { + ASSERT(!instr->CheckFlag(HValue::kCanBeDivByZero)); + LOperand* value = UseRegisterAtStart(instr->left()); + LDivI* div = + new(zone()) LDivI(value, UseOrConstant(instr->right()), NULL); + return AssignEnvironment(DefineSameAsFirst(div)); + } // The temporary operand is necessary to ensure that right is not allocated // into edx. LOperand* temp = FixedTemp(edx); @@ -1255,12 +1375,31 @@ HValue* LChunkBuilder::SimplifiedDivisorForMathFloorOfDiv(HValue* divisor) { return constant_val->CopyToRepresentation(Representation::Integer32(), divisor->block()->zone()); } + // A value with an integer representation does not need to be transformed. + if (divisor->representation().IsInteger32()) { + return divisor; + // A change from an integer32 can be replaced by the integer32 value. + } else if (divisor->IsChange() && + HChange::cast(divisor)->from().IsInteger32()) { + return HChange::cast(divisor)->value(); + } return NULL; } LInstruction* LChunkBuilder::DoMathFloorOfDiv(HMathFloorOfDiv* instr) { HValue* right = instr->right(); + if (!right->IsConstant()) { + ASSERT(right->representation().IsInteger32()); + // The temporary operand is necessary to ensure that right is not allocated + // into edx. + LOperand* temp = FixedTemp(edx); + LOperand* dividend = UseFixed(instr->left(), eax); + LOperand* divisor = UseRegister(instr->right()); + LDivI* flooring_div = new(zone()) LDivI(dividend, divisor, temp); + return AssignEnvironment(DefineFixed(flooring_div, eax)); + } + ASSERT(right->IsConstant() && HConstant::cast(right)->HasInteger32Value()); LOperand* divisor = chunk_->DefineConstantOperand(HConstant::cast(right)); int32_t divisor_si = HConstant::cast(right)->Integer32Value(); @@ -1452,7 +1591,7 @@ LInstruction* LChunkBuilder::DoCompareGeneric(HCompareGeneric* instr) { LInstruction* LChunkBuilder::DoCompareIDAndBranch( HCompareIDAndBranch* instr) { - Representation r = instr->GetInputRepresentation(); + Representation r = instr->representation(); if (r.IsInteger32()) { ASSERT(instr->left()->representation().IsInteger32()); ASSERT(instr->right()->representation().IsInteger32()); @@ -1617,6 +1756,28 @@ LInstruction* LChunkBuilder::DoDateField(HDateField* instr) { } +LInstruction* LChunkBuilder::DoSeqStringSetChar(HSeqStringSetChar* instr) { + LOperand* string = UseRegister(instr->string()); + LOperand* index = UseRegister(instr->index()); + ASSERT(ecx.is_byte_register()); + LOperand* value = UseFixed(instr->value(), ecx); + LSeqStringSetChar* result = + new(zone()) LSeqStringSetChar(instr->encoding(), string, index, value); + return DefineSameAsFirst(result); +} + + +LInstruction* LChunkBuilder::DoNumericConstraint(HNumericConstraint* instr) { + return NULL; +} + + +LInstruction* LChunkBuilder::DoInductionVariableAnnotation( + HInductionVariableAnnotation* instr) { + return NULL; +} + + LInstruction* LChunkBuilder::DoBoundsCheck(HBoundsCheck* instr) { return AssignEnvironment(new(zone()) LBoundsCheck( UseRegisterOrConstantAtStart(instr->index()), @@ -1624,6 +1785,13 @@ LInstruction* LChunkBuilder::DoBoundsCheck(HBoundsCheck* instr) { } +LInstruction* LChunkBuilder::DoBoundsCheckBaseIndexInformation( + HBoundsCheckBaseIndexInformation* instr) { + UNREACHABLE(); + return NULL; +} + + LInstruction* LChunkBuilder::DoAbnormalExit(HAbnormalExit* instr) { // The control instruction marking the end of a block that completed // abruptly (e.g., threw an exception). There is nothing specific to do. @@ -1654,8 +1822,12 @@ LInstruction* LChunkBuilder::DoForceRepresentation(HForceRepresentation* bad) { LInstruction* LChunkBuilder::DoChange(HChange* instr) { Representation from = instr->from(); Representation to = instr->to(); + // Only mark conversions that might need to allocate as calling rather than + // all changes. This makes simple, non-allocating conversion not have to force + // building a stack frame. if (from.IsTagged()) { if (to.IsDouble()) { + info()->MarkAsDeferredCalling(); LOperand* value = UseRegister(instr->value()); // Temp register only necessary for minus zero check. LOperand* temp = instr->deoptimize_on_minus_zero() @@ -1680,8 +1852,11 @@ LInstruction* LChunkBuilder::DoChange(HChange* instr) { } } else if (from.IsDouble()) { if (to.IsTagged()) { - LOperand* value = UseRegister(instr->value()); - LOperand* temp = TempRegister(); + info()->MarkAsDeferredCalling(); + LOperand* value = CpuFeatures::IsSupported(SSE2) + ? UseRegisterAtStart(instr->value()) + : UseAtStart(instr->value()); + LOperand* temp = FLAG_inline_new ? TempRegister() : NULL; // Make sure that temp and result_temp are different registers. LUnallocated* result_temp = TempRegister(); @@ -1698,14 +1873,14 @@ LInstruction* LChunkBuilder::DoChange(HChange* instr) { DefineAsRegister(new(zone()) LDoubleToI(value, temp))); } } else if (from.IsInteger32()) { + info()->MarkAsDeferredCalling(); if (to.IsTagged()) { HValue* val = instr->value(); LOperand* value = UseRegister(val); if (val->HasRange() && val->range()->IsInSmiRange()) { return DefineSameAsFirst(new(zone()) LSmiTag(value)); } else if (val->CheckFlag(HInstruction::kUint32)) { - LOperand* temp = FixedTemp(xmm1); - LNumberTagU* result = new(zone()) LNumberTagU(value, temp); + LNumberTagU* result = new(zone()) LNumberTagU(value); return AssignEnvironment(AssignPointerMap(DefineSameAsFirst(result))); } else { LNumberTagI* result = new(zone()) LNumberTagI(value); @@ -1743,9 +1918,9 @@ LInstruction* LChunkBuilder::DoCheckInstanceType(HCheckInstanceType* instr) { LInstruction* LChunkBuilder::DoCheckPrototypeMaps(HCheckPrototypeMaps* instr) { - LOperand* temp = TempRegister(); + LUnallocated* temp = TempRegister(); LCheckPrototypeMaps* result = new(zone()) LCheckPrototypeMaps(temp); - return AssignEnvironment(result); + return AssignEnvironment(Define(result, temp)); } @@ -1755,14 +1930,19 @@ LInstruction* LChunkBuilder::DoCheckSmi(HCheckSmi* instr) { } +LInstruction* LChunkBuilder::DoCheckSmiOrInt32(HCheckSmiOrInt32* instr) { + LOperand* value = UseAtStart(instr->value()); + return AssignEnvironment(new(zone()) LCheckSmi(value)); +} + + LInstruction* LChunkBuilder::DoCheckFunction(HCheckFunction* instr) { // If the target is in new space, we'll emit a global cell compare and so // want the value in a register. If the target gets promoted before we // emit code, we will still get the register but will do an immediate // compare instead of the cell compare. This is safe. - LOperand* value = Isolate::Current()->heap()->InNewSpace(*instr->target()) - ? UseRegisterAtStart(instr->value()) - : UseAtStart(instr->value()); + LOperand* value = instr->target_in_new_space() + ? UseRegisterAtStart(instr->value()) : UseAtStart(instr->value()); return AssignEnvironment(new(zone()) LCheckFunction(value)); } @@ -1796,7 +1976,12 @@ LInstruction* LChunkBuilder::DoClampToUint8(HClampToUint8* instr) { LInstruction* LChunkBuilder::DoReturn(HReturn* instr) { - return new(zone()) LReturn(UseFixed(instr->value(), eax)); + LOperand* context = info()->IsStub() + ? UseFixed(instr->context(), esi) + : NULL; + LOperand* parameter_count = UseRegisterOrConstant(instr->parameter_count()); + return new(zone()) LReturn(UseFixed(instr->value(), eax), context, + parameter_count); } @@ -1932,59 +2117,38 @@ LInstruction* LChunkBuilder::DoLoadExternalArrayPointer( } -LInstruction* LChunkBuilder::DoLoadKeyedFastElement( - HLoadKeyedFastElement* instr) { - ASSERT(instr->representation().IsTagged()); - ASSERT(instr->key()->representation().IsInteger32() || - instr->key()->representation().IsTagged()); - LOperand* obj = UseRegisterAtStart(instr->object()); - LOperand* key = UseRegisterOrConstantAtStart(instr->key()); - LLoadKeyedFastElement* result = new(zone()) LLoadKeyedFastElement(obj, key); - if (instr->RequiresHoleCheck()) AssignEnvironment(result); - return DefineAsRegister(result); -} - - -LInstruction* LChunkBuilder::DoLoadKeyedFastDoubleElement( - HLoadKeyedFastDoubleElement* instr) { - ASSERT(instr->representation().IsDouble()); +LInstruction* LChunkBuilder::DoLoadKeyed(HLoadKeyed* instr) { ASSERT(instr->key()->representation().IsInteger32() || instr->key()->representation().IsTagged()); - LOperand* elements = UseRegisterAtStart(instr->elements()); - LOperand* key = UseRegisterOrConstantAtStart(instr->key()); - LLoadKeyedFastDoubleElement* result = - new(zone()) LLoadKeyedFastDoubleElement(elements, key); - return AssignEnvironment(DefineAsRegister(result)); -} - - -LInstruction* LChunkBuilder::DoLoadKeyedSpecializedArrayElement( - HLoadKeyedSpecializedArrayElement* instr) { ElementsKind elements_kind = instr->elements_kind(); - ASSERT( - (instr->representation().IsInteger32() && - (elements_kind != EXTERNAL_FLOAT_ELEMENTS) && - (elements_kind != EXTERNAL_DOUBLE_ELEMENTS)) || - (instr->representation().IsDouble() && - ((elements_kind == EXTERNAL_FLOAT_ELEMENTS) || - (elements_kind == EXTERNAL_DOUBLE_ELEMENTS)))); - ASSERT(instr->key()->representation().IsInteger32() || - instr->key()->representation().IsTagged()); - LOperand* external_pointer = UseRegister(instr->external_pointer()); bool clobbers_key = ExternalArrayOpRequiresTemp( instr->key()->representation(), elements_kind); LOperand* key = clobbers_key ? UseTempRegister(instr->key()) - : UseRegisterOrConstant(instr->key()); + : UseRegisterOrConstantAtStart(instr->key()); + LLoadKeyed* result = NULL; - LLoadKeyedSpecializedArrayElement* result = - new(zone()) LLoadKeyedSpecializedArrayElement(external_pointer, key); - LInstruction* load_instr = DefineAsRegister(result); + if (!instr->is_external()) { + LOperand* obj = UseRegisterAtStart(instr->elements()); + result = new(zone()) LLoadKeyed(obj, key); + } else { + ASSERT( + (instr->representation().IsInteger32() && + (elements_kind != EXTERNAL_FLOAT_ELEMENTS) && + (elements_kind != EXTERNAL_DOUBLE_ELEMENTS)) || + (instr->representation().IsDouble() && + ((elements_kind == EXTERNAL_FLOAT_ELEMENTS) || + (elements_kind == EXTERNAL_DOUBLE_ELEMENTS)))); + LOperand* external_pointer = UseRegister(instr->elements()); + result = new(zone()) LLoadKeyed(external_pointer, key); + } + + DefineAsRegister(result); + bool can_deoptimize = instr->RequiresHoleCheck() || + (elements_kind == EXTERNAL_UNSIGNED_INT_ELEMENTS); // An unsigned int array load might overflow and cause a deopt, make sure it // has an environment. - return (elements_kind == EXTERNAL_UNSIGNED_INT_ELEMENTS) - ? AssignEnvironment(load_instr) - : load_instr; + return can_deoptimize ? AssignEnvironment(result) : result; } @@ -1999,72 +2163,61 @@ LInstruction* LChunkBuilder::DoLoadKeyedGeneric(HLoadKeyedGeneric* instr) { } -LInstruction* LChunkBuilder::DoStoreKeyedFastElement( - HStoreKeyedFastElement* instr) { - bool needs_write_barrier = instr->NeedsWriteBarrier(); - ASSERT(instr->value()->representation().IsTagged()); - ASSERT(instr->object()->representation().IsTagged()); - ASSERT(instr->key()->representation().IsInteger32() || - instr->key()->representation().IsTagged()); - - LOperand* obj = UseRegister(instr->object()); - LOperand* val = needs_write_barrier - ? UseTempRegister(instr->value()) - : UseRegisterAtStart(instr->value()); - LOperand* key = needs_write_barrier - ? UseTempRegister(instr->key()) - : UseRegisterOrConstantAtStart(instr->key()); - return new(zone()) LStoreKeyedFastElement(obj, key, val); -} - - -LInstruction* LChunkBuilder::DoStoreKeyedFastDoubleElement( - HStoreKeyedFastDoubleElement* instr) { - ASSERT(instr->value()->representation().IsDouble()); - ASSERT(instr->elements()->representation().IsTagged()); - ASSERT(instr->key()->representation().IsInteger32() || - instr->key()->representation().IsTagged()); +LInstruction* LChunkBuilder::DoStoreKeyed(HStoreKeyed* instr) { + if (!instr->is_external()) { + ASSERT(instr->elements()->representation().IsTagged()); + ASSERT(instr->key()->representation().IsInteger32() || + instr->key()->representation().IsTagged()); - LOperand* elements = UseRegisterAtStart(instr->elements()); - LOperand* val = UseTempRegister(instr->value()); - LOperand* key = UseRegisterOrConstantAtStart(instr->key()); - - return new(zone()) LStoreKeyedFastDoubleElement(elements, key, val); -} + if (instr->value()->representation().IsDouble()) { + LOperand* object = UseRegisterAtStart(instr->elements()); + LOperand* val = UseTempRegister(instr->value()); + LOperand* key = UseRegisterOrConstantAtStart(instr->key()); + return new(zone()) LStoreKeyed(object, key, val); + } else { + ASSERT(instr->value()->representation().IsTagged()); + bool needs_write_barrier = instr->NeedsWriteBarrier(); + + LOperand* obj = UseRegister(instr->elements()); + LOperand* val = needs_write_barrier + ? UseTempRegister(instr->value()) + : UseRegisterAtStart(instr->value()); + LOperand* key = needs_write_barrier + ? UseTempRegister(instr->key()) + : UseRegisterOrConstantAtStart(instr->key()); + return new(zone()) LStoreKeyed(obj, key, val); + } + } -LInstruction* LChunkBuilder::DoStoreKeyedSpecializedArrayElement( - HStoreKeyedSpecializedArrayElement* instr) { ElementsKind elements_kind = instr->elements_kind(); - ASSERT( + ASSERT( (instr->value()->representation().IsInteger32() && (elements_kind != EXTERNAL_FLOAT_ELEMENTS) && (elements_kind != EXTERNAL_DOUBLE_ELEMENTS)) || (instr->value()->representation().IsDouble() && ((elements_kind == EXTERNAL_FLOAT_ELEMENTS) || - (elements_kind == EXTERNAL_DOUBLE_ELEMENTS)))); - ASSERT(instr->external_pointer()->representation().IsExternal()); - ASSERT(instr->key()->representation().IsInteger32() || - instr->key()->representation().IsTagged()); + (elements_kind == EXTERNAL_DOUBLE_ELEMENTS)))); + ASSERT(instr->elements()->representation().IsExternal()); - LOperand* external_pointer = UseRegister(instr->external_pointer()); - LOperand* val = NULL; - if (elements_kind == EXTERNAL_BYTE_ELEMENTS || + LOperand* external_pointer = UseRegister(instr->elements()); + // Determine if we need a byte register in this case for the value. + bool val_is_fixed_register = + elements_kind == EXTERNAL_BYTE_ELEMENTS || elements_kind == EXTERNAL_UNSIGNED_BYTE_ELEMENTS || - elements_kind == EXTERNAL_PIXEL_ELEMENTS) { - // We need a byte register in this case for the value. - val = UseFixed(instr->value(), eax); - } else { - val = UseRegister(instr->value()); - } + elements_kind == EXTERNAL_PIXEL_ELEMENTS; + + LOperand* val = val_is_fixed_register + ? UseFixed(instr->value(), eax) + : UseRegister(instr->value()); bool clobbers_key = ExternalArrayOpRequiresTemp( instr->key()->representation(), elements_kind); LOperand* key = clobbers_key ? UseTempRegister(instr->key()) - : UseRegisterOrConstant(instr->key()); - return new(zone()) LStoreKeyedSpecializedArrayElement(external_pointer, - key, - val); + : UseRegisterOrConstantAtStart(instr->key()); + return new(zone()) LStoreKeyed(external_pointer, + key, + val); } @@ -2086,28 +2239,44 @@ LInstruction* LChunkBuilder::DoStoreKeyedGeneric(HStoreKeyedGeneric* instr) { LInstruction* LChunkBuilder::DoTransitionElementsKind( HTransitionElementsKind* instr) { - ElementsKind from_kind = instr->original_map()->elements_kind(); - ElementsKind to_kind = instr->transitioned_map()->elements_kind(); - if (IsSimpleMapChangeTransition(from_kind, to_kind)) { + LOperand* object = UseRegister(instr->object()); + if (IsSimpleMapChangeTransition(instr->from_kind(), instr->to_kind())) { LOperand* object = UseRegister(instr->object()); LOperand* new_map_reg = TempRegister(); LOperand* temp_reg = TempRegister(); LTransitionElementsKind* result = - new(zone()) LTransitionElementsKind(object, new_map_reg, temp_reg); - return DefineSameAsFirst(result); + new(zone()) LTransitionElementsKind(object, NULL, + new_map_reg, temp_reg); + return result; + } else if (FLAG_compiled_transitions) { + LOperand* context = UseRegister(instr->context()); + LTransitionElementsKind* result = + new(zone()) LTransitionElementsKind(object, context, NULL, NULL); + return AssignPointerMap(result); } else { LOperand* object = UseFixed(instr->object(), eax); LOperand* fixed_object_reg = FixedTemp(edx); LOperand* new_map_reg = FixedTemp(ebx); LTransitionElementsKind* result = new(zone()) LTransitionElementsKind(object, + NULL, new_map_reg, fixed_object_reg); - return MarkAsCall(DefineFixed(result, eax), instr); + return MarkAsCall(result, instr); } } +LInstruction* LChunkBuilder::DoTrapAllocationMemento( + HTrapAllocationMemento* instr) { + LOperand* object = UseRegister(instr->object()); + LOperand* temp = TempRegister(); + LTrapAllocationMemento* result = + new(zone()) LTrapAllocationMemento(object, temp); + return AssignEnvironment(result); +} + + LInstruction* LChunkBuilder::DoStoreNamedField(HStoreNamedField* instr) { bool needs_write_barrier = instr->NeedsWriteBarrier(); bool needs_write_barrier_for_map = !instr->transition().is_null() && @@ -2186,13 +2355,24 @@ LInstruction* LChunkBuilder::DoStringLength(HStringLength* instr) { LInstruction* LChunkBuilder::DoAllocateObject(HAllocateObject* instr) { - LOperand* context = UseFixed(instr->context(), esi); + info()->MarkAsDeferredCalling(); + LOperand* context = UseAny(instr->context()); LOperand* temp = TempRegister(); LAllocateObject* result = new(zone()) LAllocateObject(context, temp); return AssignPointerMap(DefineAsRegister(result)); } +LInstruction* LChunkBuilder::DoAllocate(HAllocate* instr) { + info()->MarkAsDeferredCalling(); + LOperand* context = UseAny(instr->context()); + LOperand* size = UseTempRegister(instr->size()); + LOperand* temp = TempRegister(); + LAllocate* result = new(zone()) LAllocate(context, size, temp); + return AssignPointerMap(DefineAsRegister(result)); +} + + LInstruction* LChunkBuilder::DoFastLiteral(HFastLiteral* instr) { LOperand* context = UseFixed(instr->context(), esi); return MarkAsCall( @@ -2246,8 +2426,17 @@ LInstruction* LChunkBuilder::DoOsrEntry(HOsrEntry* instr) { LInstruction* LChunkBuilder::DoParameter(HParameter* instr) { - int spill_index = chunk()->GetParameterStackSlot(instr->index()); - return DefineAsSpilled(new(zone()) LParameter, spill_index); + LParameter* result = new(zone()) LParameter; + if (instr->kind() == HParameter::STACK_PARAMETER) { + int spill_index = chunk()->GetParameterStackSlot(instr->index()); + return DefineAsSpilled(result, spill_index); + } else { + ASSERT(info()->IsStub()); + CodeStubInterfaceDescriptor* descriptor = + info()->code_stub()->GetInterfaceDescriptor(info()->isolate()); + Register reg = descriptor->register_params_[instr->index()]; + return DefineFixed(result, reg); + } } @@ -2319,7 +2508,7 @@ LInstruction* LChunkBuilder::DoSimulate(HSimulate* instr) { env->set_ast_id(instr->ast_id()); env->Drop(instr->pop_count()); - for (int i = 0; i < instr->values()->length(); ++i) { + for (int i = instr->values()->length() - 1; i >= 0; --i) { HValue* value = instr->values()->at(i); if (instr->HasAssignedIndexAt(i)) { env->Bind(instr->GetAssignedIndexAt(i), value); @@ -2348,6 +2537,7 @@ LInstruction* LChunkBuilder::DoSimulate(HSimulate* instr) { LInstruction* LChunkBuilder::DoStackCheck(HStackCheck* instr) { + info()->MarkAsDeferredCalling(); if (instr->is_function_entry()) { LOperand* context = UseFixed(instr->context(), esi); return MarkAsCall(new(zone()) LStackCheck(context), instr); @@ -2367,8 +2557,8 @@ LInstruction* LChunkBuilder::DoEnterInlined(HEnterInlined* instr) { instr->arguments_count(), instr->function(), undefined, - instr->call_kind(), - instr->inlining_kind()); + instr->inlining_kind(), + instr->undefined_receiver()); if (instr->arguments_var() != NULL) { inner->Bind(instr->arguments_var(), graph()->GetArgumentsObject()); } diff --git a/deps/v8/src/ia32/lithium-ia32.h b/deps/v8/src/ia32/lithium-ia32.h index 4643f95f48..0e36474073 100644 --- a/deps/v8/src/ia32/lithium-ia32.h +++ b/deps/v8/src/ia32/lithium-ia32.h @@ -43,6 +43,7 @@ class LCodeGen; #define LITHIUM_CONCRETE_INSTRUCTION_LIST(V) \ V(AccessArgumentsAt) \ V(AddI) \ + V(Allocate) \ V(AllocateObject) \ V(ApplyArguments) \ V(ArgumentsElements) \ @@ -61,6 +62,7 @@ class LCodeGen; V(CallKnownGlobal) \ V(CallNamed) \ V(CallNew) \ + V(CallNewArray) \ V(CallRuntime) \ V(CallStub) \ V(CheckFunction) \ @@ -87,6 +89,7 @@ class LCodeGen; V(Deoptimize) \ V(DivI) \ V(DoubleToI) \ + V(DummyUse) \ V(ElementsKind) \ V(FastLiteral) \ V(FixedArrayBaseLength) \ @@ -100,6 +103,7 @@ class LCodeGen; V(In) \ V(InstanceOf) \ V(InstanceOfKnownGlobal) \ + V(InstanceSize) \ V(InstructionGap) \ V(Integer32ToDouble) \ V(Uint32ToDouble) \ @@ -119,17 +123,17 @@ class LCodeGen; V(LoadFunctionPrototype) \ V(LoadGlobalCell) \ V(LoadGlobalGeneric) \ - V(LoadKeyedFastElement) \ - V(LoadKeyedFastDoubleElement) \ + V(LoadKeyed) \ V(LoadKeyedGeneric) \ - V(LoadKeyedSpecializedArrayElement) \ V(LoadNamedField) \ V(LoadNamedFieldPolymorphic) \ V(LoadNamedGeneric) \ V(MapEnumLength) \ + V(MathExp) \ V(MathFloorOfDiv) \ V(MathMinMax) \ V(MathPowHalf) \ + V(MathRound) \ V(ModI) \ V(MulI) \ V(NumberTagD) \ @@ -145,6 +149,7 @@ class LCodeGen; V(PushArgument) \ V(RegExpLiteral) \ V(Return) \ + V(SeqStringSetChar) \ V(ShiftI) \ V(SmiTag) \ V(SmiUntag) \ @@ -152,10 +157,8 @@ class LCodeGen; V(StoreContextSlot) \ V(StoreGlobalCell) \ V(StoreGlobalGeneric) \ - V(StoreKeyedFastDoubleElement) \ - V(StoreKeyedFastElement) \ + V(StoreKeyed) \ V(StoreKeyedGeneric) \ - V(StoreKeyedSpecializedArrayElement) \ V(StoreNamedField) \ V(StoreNamedGeneric) \ V(StringAdd) \ @@ -169,6 +172,7 @@ class LCodeGen; V(Throw) \ V(ToFastProperties) \ V(TransitionElementsKind) \ + V(TrapAllocationMemento) \ V(Typeof) \ V(TypeofIsAndBranch) \ V(UnaryMathOperation) \ @@ -180,7 +184,8 @@ class LCodeGen; V(LoadFieldByIndex) \ V(DateField) \ V(WrapReceiver) \ - V(Drop) + V(Drop) \ + V(InnerAllocatedObject) #define DECLARE_CONCRETE_INSTRUCTION(type, mnemonic) \ @@ -252,7 +257,11 @@ class LInstruction: public ZoneObject { void MarkAsCall() { is_call_ = true; } // Interface to the register allocator and iterators. - bool IsMarkedAsCall() const { return is_call_; } + bool ClobbersTemps() const { return is_call_; } + bool ClobbersRegisters() const { return is_call_; } + virtual bool ClobbersDoubleRegisters() const { + return is_call_ || !CpuFeatures::IsSupported(SSE2); + } virtual bool HasResult() const = 0; virtual LOperand* result() = 0; @@ -358,6 +367,7 @@ class LGap: public LTemplateInstruction<0, 0, 0> { class LInstructionGap: public LGap { public: explicit LInstructionGap(HBasicBlock* block) : LGap(block) { } + virtual bool ClobbersDoubleRegisters() const { return false; } DECLARE_CONCRETE_INSTRUCTION(InstructionGap, "gap") }; @@ -384,6 +394,15 @@ class LLazyBailout: public LTemplateInstruction<0, 0, 0> { }; +class LDummyUse: public LTemplateInstruction<1, 1, 0> { + public: + explicit LDummyUse(LOperand* value) { + inputs_[0] = value; + } + DECLARE_CONCRETE_INSTRUCTION(DummyUse, "dummy-use") +}; + + class LDeoptimize: public LTemplateInstruction<0, 0, 0> { public: DECLARE_CONCRETE_INSTRUCTION(Deoptimize, "deoptimize") @@ -562,6 +581,8 @@ class LDivI: public LTemplateInstruction<1, 2, 1> { LOperand* left() { return inputs_[0]; } LOperand* right() { return inputs_[1]; } + bool is_flooring() { return hydrogen_value()->IsMathFloorOfDiv(); } + DECLARE_CONCRETE_INSTRUCTION(DivI, "div-i") DECLARE_HYDROGEN_ACCESSOR(Div) }; @@ -618,7 +639,7 @@ class LCmpIDAndBranch: public LControlInstruction<2, 0> { Token::Value op() const { return hydrogen()->token(); } bool is_double() const { - return hydrogen()->GetInputRepresentation().IsDouble(); + return hydrogen()->representation().IsDouble(); } virtual void PrintDataTo(StringStream* stream); @@ -643,6 +664,27 @@ class LUnaryMathOperation: public LTemplateInstruction<1, 2, 0> { }; +class LMathExp: public LTemplateInstruction<1, 1, 2> { + public: + LMathExp(LOperand* value, + LOperand* temp1, + LOperand* temp2) { + inputs_[0] = value; + temps_[0] = temp1; + temps_[1] = temp2; + ExternalReference::InitializeMathExpData(); + } + + LOperand* value() { return inputs_[0]; } + LOperand* temp1() { return temps_[0]; } + LOperand* temp2() { return temps_[1]; } + + DECLARE_CONCRETE_INSTRUCTION(MathExp, "math-exp") + + virtual void PrintDataTo(StringStream* stream); +}; + + class LMathPowHalf: public LTemplateInstruction<1, 2, 1> { public: LMathPowHalf(LOperand* context, LOperand* value, LOperand* temp) { @@ -661,6 +703,25 @@ class LMathPowHalf: public LTemplateInstruction<1, 2, 1> { }; +class LMathRound: public LTemplateInstruction<1, 2, 1> { + public: + LMathRound(LOperand* context, LOperand* value, LOperand* temp) { + inputs_[1] = context; + inputs_[0] = value; + temps_[0] = temp; + } + + LOperand* context() { return inputs_[1]; } + LOperand* value() { return inputs_[0]; } + LOperand* temp() { return temps_[0]; } + + DECLARE_CONCRETE_INSTRUCTION(MathRound, "math-round") + DECLARE_HYDROGEN_ACCESSOR(UnaryMathOperation) + + virtual void PrintDataTo(StringStream* stream); +}; + + class LCmpObjectEqAndBranch: public LControlInstruction<2, 0> { public: LCmpObjectEqAndBranch(LOperand* left, LOperand* right) { @@ -931,6 +992,19 @@ class LInstanceOfKnownGlobal: public LTemplateInstruction<1, 2, 1> { }; +class LInstanceSize: public LTemplateInstruction<1, 1, 0> { + public: + explicit LInstanceSize(LOperand* object) { + inputs_[0] = object; + } + + LOperand* object() { return inputs_[0]; } + + DECLARE_CONCRETE_INSTRUCTION(InstanceSize, "instance-size") + DECLARE_HYDROGEN_ACCESSOR(InstanceSize) +}; + + class LBoundsCheck: public LTemplateInstruction<0, 2, 0> { public: LBoundsCheck(LOperand* index, LOperand* length) { @@ -1161,6 +1235,30 @@ class LDateField: public LTemplateInstruction<1, 1, 1> { }; +class LSeqStringSetChar: public LTemplateInstruction<1, 3, 0> { + public: + LSeqStringSetChar(String::Encoding encoding, + LOperand* string, + LOperand* index, + LOperand* value) : encoding_(encoding) { + inputs_[0] = string; + inputs_[1] = index; + inputs_[2] = value; + } + + String::Encoding encoding() { return encoding_; } + LOperand* string() { return inputs_[0]; } + LOperand* index() { return inputs_[1]; } + LOperand* value() { return inputs_[2]; } + + DECLARE_CONCRETE_INSTRUCTION(SeqStringSetChar, "seq-string-set-char") + DECLARE_HYDROGEN_ACCESSOR(SeqStringSetChar) + + private: + String::Encoding encoding_; +}; + + class LThrow: public LTemplateInstruction<0, 2, 0> { public: LThrow(LOperand* context, LOperand* value) { @@ -1294,11 +1392,23 @@ class LArithmeticT: public LTemplateInstruction<1, 3, 0> { }; -class LReturn: public LTemplateInstruction<0, 1, 0> { +class LReturn: public LTemplateInstruction<0, 3, 0> { public: - explicit LReturn(LOperand* value) { + explicit LReturn(LOperand* value, LOperand* context, + LOperand* parameter_count) { inputs_[0] = value; + inputs_[1] = context; + inputs_[2] = parameter_count; + } + + bool has_constant_parameter_count() { + return parameter_count()->IsConstantOperand(); + } + LConstantOperand* constant_parameter_count() { + ASSERT(has_constant_parameter_count()); + return LConstantOperand::cast(parameter_count()); } + LOperand* parameter_count() { return inputs_[2]; } DECLARE_CONCRETE_INSTRUCTION(Return, "return") }; @@ -1389,38 +1499,34 @@ class LLoadExternalArrayPointer: public LTemplateInstruction<1, 1, 0> { }; -class LLoadKeyedFastElement: public LTemplateInstruction<1, 2, 0> { +class LLoadKeyed: public LTemplateInstruction<1, 2, 0> { public: - LLoadKeyedFastElement(LOperand* elements, LOperand* key) { + LLoadKeyed(LOperand* elements, LOperand* key) { inputs_[0] = elements; inputs_[1] = key; } - LOperand* elements() { return inputs_[0]; } LOperand* key() { return inputs_[1]; } - - DECLARE_CONCRETE_INSTRUCTION(LoadKeyedFastElement, "load-keyed-fast-element") - DECLARE_HYDROGEN_ACCESSOR(LoadKeyedFastElement) - - uint32_t additional_index() const { return hydrogen()->index_offset(); } -}; - - -class LLoadKeyedFastDoubleElement: public LTemplateInstruction<1, 2, 0> { - public: - LLoadKeyedFastDoubleElement(LOperand* elements, LOperand* key) { - inputs_[0] = elements; - inputs_[1] = key; + ElementsKind elements_kind() const { + return hydrogen()->elements_kind(); + } + bool is_external() const { + return hydrogen()->is_external(); } - LOperand* elements() { return inputs_[0]; } - LOperand* key() { return inputs_[1]; } + virtual bool ClobbersDoubleRegisters() const { + return !CpuFeatures::IsSupported(SSE2) && + !IsDoubleOrFloatElementsKind(hydrogen()->elements_kind()); + } - DECLARE_CONCRETE_INSTRUCTION(LoadKeyedFastDoubleElement, - "load-keyed-fast-double-element") - DECLARE_HYDROGEN_ACCESSOR(LoadKeyedFastDoubleElement) + DECLARE_CONCRETE_INSTRUCTION(LoadKeyed, "load-keyed") + DECLARE_HYDROGEN_ACCESSOR(LoadKeyed) + virtual void PrintDataTo(StringStream* stream); uint32_t additional_index() const { return hydrogen()->index_offset(); } + bool key_is_smi() { + return hydrogen()->key()->representation().IsTagged(); + } }; @@ -1437,27 +1543,6 @@ inline static bool ExternalArrayOpRequiresTemp( } -class LLoadKeyedSpecializedArrayElement: public LTemplateInstruction<1, 2, 0> { - public: - LLoadKeyedSpecializedArrayElement(LOperand* external_pointer, LOperand* key) { - inputs_[0] = external_pointer; - inputs_[1] = key; - } - - LOperand* external_pointer() { return inputs_[0]; } - LOperand* key() { return inputs_[1]; } - - DECLARE_CONCRETE_INSTRUCTION(LoadKeyedSpecializedArrayElement, - "load-keyed-specialized-array-element") - DECLARE_HYDROGEN_ACCESSOR(LoadKeyedSpecializedArrayElement) - - ElementsKind elements_kind() const { - return hydrogen()->elements_kind(); - } - uint32_t additional_index() const { return hydrogen()->index_offset(); } -}; - - class LLoadKeyedGeneric: public LTemplateInstruction<1, 3, 0> { public: LLoadKeyedGeneric(LOperand* context, LOperand* obj, LOperand* key) { @@ -1597,6 +1682,22 @@ class LDrop: public LTemplateInstruction<0, 0, 0> { }; +class LInnerAllocatedObject: public LTemplateInstruction<1, 1, 0> { + public: + explicit LInnerAllocatedObject(LOperand* base_object) { + inputs_[0] = base_object; + } + + LOperand* base_object() { return inputs_[0]; } + int offset() { return hydrogen()->offset(); } + + virtual void PrintDataTo(StringStream* stream); + + DECLARE_CONCRETE_INSTRUCTION(InnerAllocatedObject, "sub-allocated-object") + DECLARE_HYDROGEN_ACCESSOR(InnerAllocatedObject) +}; + + class LThisFunction: public LTemplateInstruction<1, 0, 0> { public: DECLARE_CONCRETE_INSTRUCTION(ThisFunction, "this-function") @@ -1607,6 +1708,7 @@ class LThisFunction: public LTemplateInstruction<1, 0, 0> { class LContext: public LTemplateInstruction<1, 0, 0> { public: DECLARE_CONCRETE_INSTRUCTION(Context, "context") + DECLARE_HYDROGEN_ACCESSOR(Context) }; @@ -1794,6 +1896,25 @@ class LCallNew: public LTemplateInstruction<1, 2, 0> { }; +class LCallNewArray: public LTemplateInstruction<1, 2, 0> { + public: + LCallNewArray(LOperand* context, LOperand* constructor) { + inputs_[0] = context; + inputs_[1] = constructor; + } + + LOperand* context() { return inputs_[0]; } + LOperand* constructor() { return inputs_[1]; } + + DECLARE_CONCRETE_INSTRUCTION(CallNewArray, "call-new-array") + DECLARE_HYDROGEN_ACCESSOR(CallNewArray) + + virtual void PrintDataTo(StringStream* stream); + + int arity() const { return hydrogen()->argument_count() - 1; } +}; + + class LCallRuntime: public LTemplateInstruction<1, 1, 0> { public: explicit LCallRuntime(LOperand* context) { @@ -1848,11 +1969,10 @@ class LNumberTagI: public LTemplateInstruction<1, 1, 0> { }; -class LNumberTagU: public LTemplateInstruction<1, 1, 1> { +class LNumberTagU: public LTemplateInstruction<1, 1, 0> { public: - explicit LNumberTagU(LOperand* value, LOperand* temp) { + explicit LNumberTagU(LOperand* value) { inputs_[0] = value; - temps_[0] = temp; } LOperand* value() { return inputs_[0]; } @@ -1872,6 +1992,7 @@ class LNumberTagD: public LTemplateInstruction<1, 1, 1> { LOperand* temp() { return temps_[0]; } DECLARE_CONCRETE_INSTRUCTION(NumberTagD, "number-tag-d") + DECLARE_HYDROGEN_ACCESSOR(Change) }; @@ -2006,78 +2127,31 @@ class LStoreNamedGeneric: public LTemplateInstruction<0, 3, 0> { }; -class LStoreKeyedFastElement: public LTemplateInstruction<0, 3, 0> { +class LStoreKeyed: public LTemplateInstruction<0, 3, 0> { public: - LStoreKeyedFastElement(LOperand* obj, LOperand* key, LOperand* val) { + LStoreKeyed(LOperand* obj, LOperand* key, LOperand* val) { inputs_[0] = obj; inputs_[1] = key; inputs_[2] = val; } - LOperand* object() { return inputs_[0]; } - LOperand* key() { return inputs_[1]; } - LOperand* value() { return inputs_[2]; } - - DECLARE_CONCRETE_INSTRUCTION(StoreKeyedFastElement, - "store-keyed-fast-element") - DECLARE_HYDROGEN_ACCESSOR(StoreKeyedFastElement) - - virtual void PrintDataTo(StringStream* stream); - uint32_t additional_index() const { return hydrogen()->index_offset(); } -}; - - -class LStoreKeyedFastDoubleElement: public LTemplateInstruction<0, 3, 0> { - public: - LStoreKeyedFastDoubleElement(LOperand* elements, - LOperand* key, - LOperand* val) { - inputs_[0] = elements; - inputs_[1] = key; - inputs_[2] = val; - } - + bool is_external() const { return hydrogen()->is_external(); } LOperand* elements() { return inputs_[0]; } LOperand* key() { return inputs_[1]; } LOperand* value() { return inputs_[2]; } + ElementsKind elements_kind() const { + return hydrogen()->elements_kind(); + } - DECLARE_CONCRETE_INSTRUCTION(StoreKeyedFastDoubleElement, - "store-keyed-fast-double-element") - DECLARE_HYDROGEN_ACCESSOR(StoreKeyedFastDoubleElement) + DECLARE_CONCRETE_INSTRUCTION(StoreKeyed, "store-keyed") + DECLARE_HYDROGEN_ACCESSOR(StoreKeyed) virtual void PrintDataTo(StringStream* stream); - uint32_t additional_index() const { return hydrogen()->index_offset(); } - bool NeedsCanonicalization() { return hydrogen()->NeedsCanonicalization(); } }; -class LStoreKeyedSpecializedArrayElement: public LTemplateInstruction<0, 3, 0> { - public: - LStoreKeyedSpecializedArrayElement(LOperand* external_pointer, - LOperand* key, - LOperand* val) { - inputs_[0] = external_pointer; - inputs_[1] = key; - inputs_[2] = val; - } - - LOperand* external_pointer() { return inputs_[0]; } - LOperand* key() { return inputs_[1]; } - LOperand* value() { return inputs_[2]; } - - DECLARE_CONCRETE_INSTRUCTION(StoreKeyedSpecializedArrayElement, - "store-keyed-specialized-array-element") - DECLARE_HYDROGEN_ACCESSOR(StoreKeyedSpecializedArrayElement) - - ElementsKind elements_kind() const { - return hydrogen()->elements_kind(); - } - uint32_t additional_index() const { return hydrogen()->index_offset(); } -}; - - class LStoreKeyedGeneric: public LTemplateInstruction<0, 4, 0> { public: LStoreKeyedGeneric(LOperand* context, @@ -2104,16 +2178,19 @@ class LStoreKeyedGeneric: public LTemplateInstruction<0, 4, 0> { }; -class LTransitionElementsKind: public LTemplateInstruction<1, 1, 2> { +class LTransitionElementsKind: public LTemplateInstruction<0, 2, 2> { public: LTransitionElementsKind(LOperand* object, + LOperand* context, LOperand* new_map_temp, LOperand* temp) { inputs_[0] = object; + inputs_[1] = context; temps_[0] = new_map_temp; temps_[1] = temp; } + LOperand* context() { return inputs_[1]; } LOperand* object() { return inputs_[0]; } LOperand* new_map_temp() { return temps_[0]; } LOperand* temp() { return temps_[1]; } @@ -2126,6 +2203,24 @@ class LTransitionElementsKind: public LTemplateInstruction<1, 1, 2> { Handle<Map> original_map() { return hydrogen()->original_map(); } Handle<Map> transitioned_map() { return hydrogen()->transitioned_map(); } + ElementsKind from_kind() { return hydrogen()->from_kind(); } + ElementsKind to_kind() { return hydrogen()->to_kind(); } +}; + + +class LTrapAllocationMemento : public LTemplateInstruction<0, 1, 1> { + public: + LTrapAllocationMemento(LOperand* object, + LOperand* temp) { + inputs_[0] = object; + temps_[0] = temp; + } + + LOperand* object() { return inputs_[0]; } + LOperand* temp() { return temps_[0]; } + + DECLARE_CONCRETE_INSTRUCTION(TrapAllocationMemento, + "trap-allocation-memento") }; @@ -2232,7 +2327,7 @@ class LCheckMaps: public LTemplateInstruction<0, 1, 0> { }; -class LCheckPrototypeMaps: public LTemplateInstruction<0, 0, 1> { +class LCheckPrototypeMaps: public LTemplateInstruction<1, 0, 1> { public: explicit LCheckPrototypeMaps(LOperand* temp) { temps_[0] = temp; @@ -2243,8 +2338,10 @@ class LCheckPrototypeMaps: public LTemplateInstruction<0, 0, 1> { DECLARE_CONCRETE_INSTRUCTION(CheckPrototypeMaps, "check-prototype-maps") DECLARE_HYDROGEN_ACCESSOR(CheckPrototypeMaps) - Handle<JSObject> prototype() const { return hydrogen()->prototype(); } - Handle<JSObject> holder() const { return hydrogen()->holder(); } + ZoneList<Handle<JSObject> >* prototypes() const { + return hydrogen()->prototypes(); + } + ZoneList<Handle<Map> >* maps() const { return hydrogen()->maps(); } }; @@ -2324,6 +2421,23 @@ class LAllocateObject: public LTemplateInstruction<1, 1, 1> { }; +class LAllocate: public LTemplateInstruction<1, 2, 1> { + public: + LAllocate(LOperand* context, LOperand* size, LOperand* temp) { + inputs_[0] = context; + inputs_[1] = size; + temps_[0] = temp; + } + + LOperand* context() { return inputs_[0]; } + LOperand* size() { return inputs_[1]; } + LOperand* temp() { return temps_[0]; } + + DECLARE_CONCRETE_INSTRUCTION(Allocate, "allocate") + DECLARE_HYDROGEN_ACCESSOR(Allocate) +}; + + class LFastLiteral: public LTemplateInstruction<1, 1, 0> { public: explicit LFastLiteral(LOperand* context) { @@ -2469,8 +2583,9 @@ class LOsrEntry: public LTemplateInstruction<0, 0, 0> { // slot, i.e., that must also be restored to the spill slot on OSR entry. // NULL if the register has no assigned spill slot. Indexed by allocation // index. - LOperand* register_spills_[Register::kNumAllocatableRegisters]; - LOperand* double_register_spills_[DoubleRegister::kNumAllocatableRegisters]; + LOperand* register_spills_[Register::kMaxNumAllocatableRegisters]; + LOperand* double_register_spills_[ + DoubleRegister::kMaxNumAllocatableRegisters]; }; @@ -2634,6 +2749,7 @@ class LChunkBuilder BASE_EMBEDDED { // Methods for getting operands for Use / Define / Temp. LUnallocated* ToUnallocated(Register reg); LUnallocated* ToUnallocated(XMMRegister reg); + LUnallocated* ToUnallocated(X87TopOfStackRegister reg); // Methods for setting up define-use relationships. MUST_USE_RESULT LOperand* Use(HValue* value, LUnallocated* operand); @@ -2694,6 +2810,8 @@ class LChunkBuilder BASE_EMBEDDED { template<int I, int T> LInstruction* DefineFixedDouble(LTemplateInstruction<1, I, T>* instr, XMMRegister reg); + template<int I, int T> + LInstruction* DefineX87TOS(LTemplateInstruction<1, I, T>* instr); // Assigns an environment to an instruction. An instruction which can // deoptimize must have an environment. LInstruction* AssignEnvironment(LInstruction* instr); diff --git a/deps/v8/src/ia32/macro-assembler-ia32.cc b/deps/v8/src/ia32/macro-assembler-ia32.cc index 0d0bf03775..debf64aa1f 100644 --- a/deps/v8/src/ia32/macro-assembler-ia32.cc +++ b/deps/v8/src/ia32/macro-assembler-ia32.cc @@ -170,7 +170,7 @@ void MacroAssembler::LoadUint32(XMMRegister dst, Label done; cmp(src, Immediate(0)); movdbl(scratch, - Operand(reinterpret_cast<int32_t>(&kUint32Bias), RelocInfo::NONE)); + Operand(reinterpret_cast<int32_t>(&kUint32Bias), RelocInfo::NONE32)); cvtsi2sd(dst, src); j(not_sign, &done, Label::kNear); addsd(dst, scratch); @@ -385,7 +385,7 @@ void MacroAssembler::DebugBreak() { Set(eax, Immediate(0)); mov(ebx, Immediate(ExternalReference(Runtime::kDebugBreak, isolate()))); CEntryStub ces(1); - call(ces.GetCode(), RelocInfo::DEBUG_BREAK); + call(ces.GetCode(isolate()), RelocInfo::DEBUG_BREAK); } #endif @@ -406,7 +406,7 @@ void MacroAssembler::Set(const Operand& dst, const Immediate& x) { bool MacroAssembler::IsUnsafeImmediate(const Immediate& x) { static const int kMaxImmediateBits = 17; - if (x.rmode_ != RelocInfo::NONE) return false; + if (!RelocInfo::IsNone(x.rmode_)) return false; return !is_intn(x.x_, kMaxImmediateBits); } @@ -507,7 +507,8 @@ void MacroAssembler::StoreNumberToDoubleElements( Register scratch1, XMMRegister scratch2, Label* fail, - bool specialize_for_processor) { + bool specialize_for_processor, + int elements_offset) { Label smi_value, done, maybe_nan, not_nan, is_nan, have_double_value; JumpIfSmi(maybe_number, &smi_value, Label::kNear); @@ -526,15 +527,17 @@ void MacroAssembler::StoreNumberToDoubleElements( ExternalReference canonical_nan_reference = ExternalReference::address_of_canonical_non_hole_nan(); if (CpuFeatures::IsSupported(SSE2) && specialize_for_processor) { - CpuFeatures::Scope use_sse2(SSE2); + CpuFeatureScope use_sse2(this, SSE2); movdbl(scratch2, FieldOperand(maybe_number, HeapNumber::kValueOffset)); bind(&have_double_value); - movdbl(FieldOperand(elements, key, times_4, FixedDoubleArray::kHeaderSize), + movdbl(FieldOperand(elements, key, times_4, + FixedDoubleArray::kHeaderSize - elements_offset), scratch2); } else { fld_d(FieldOperand(maybe_number, HeapNumber::kValueOffset)); bind(&have_double_value); - fstp_d(FieldOperand(elements, key, times_4, FixedDoubleArray::kHeaderSize)); + fstp_d(FieldOperand(elements, key, times_4, + FixedDoubleArray::kHeaderSize - elements_offset)); } jmp(&done); @@ -546,7 +549,7 @@ void MacroAssembler::StoreNumberToDoubleElements( j(zero, ¬_nan); bind(&is_nan); if (CpuFeatures::IsSupported(SSE2) && specialize_for_processor) { - CpuFeatures::Scope use_sse2(SSE2); + CpuFeatureScope use_sse2(this, SSE2); movdbl(scratch2, Operand::StaticVariable(canonical_nan_reference)); } else { fld_d(Operand::StaticVariable(canonical_nan_reference)); @@ -559,15 +562,17 @@ void MacroAssembler::StoreNumberToDoubleElements( mov(scratch1, maybe_number); SmiUntag(scratch1); if (CpuFeatures::IsSupported(SSE2) && specialize_for_processor) { - CpuFeatures::Scope fscope(SSE2); + CpuFeatureScope fscope(this, SSE2); cvtsi2sd(scratch2, scratch1); - movdbl(FieldOperand(elements, key, times_4, FixedDoubleArray::kHeaderSize), + movdbl(FieldOperand(elements, key, times_4, + FixedDoubleArray::kHeaderSize - elements_offset), scratch2); } else { push(scratch1); fild_s(Operand(esp, 0)); pop(scratch1); - fstp_d(FieldOperand(elements, key, times_4, FixedDoubleArray::kHeaderSize)); + fstp_d(FieldOperand(elements, key, times_4, + FixedDoubleArray::kHeaderSize - elements_offset)); } bind(&done); } @@ -613,6 +618,7 @@ void MacroAssembler::CheckMap(Register obj, void MacroAssembler::DispatchMap(Register obj, + Register unused, Handle<Map> map, Handle<Code> success, SmiCheckType smi_check_type) { @@ -638,6 +644,16 @@ Condition MacroAssembler::IsObjectStringType(Register heap_object, } +Condition MacroAssembler::IsObjectNameType(Register heap_object, + Register map, + Register instance_type) { + mov(map, FieldOperand(heap_object, HeapObject::kMapOffset)); + movzx_b(instance_type, FieldOperand(map, Map::kInstanceTypeOffset)); + cmpb(instance_type, static_cast<uint8_t>(LAST_NAME_TYPE)); + return below_equal; +} + + void MacroAssembler::IsObjectJSObjectType(Register heap_object, Register map, Register scratch, @@ -705,6 +721,19 @@ void MacroAssembler::AssertString(Register object) { } +void MacroAssembler::AssertName(Register object) { + if (emit_debug_code()) { + test(object, Immediate(kSmiTagMask)); + Check(not_equal, "Operand is a smi and not a name"); + push(object); + mov(object, FieldOperand(object, HeapObject::kMapOffset)); + CmpInstanceType(object, LAST_NAME_TYPE); + pop(object); + Check(below_equal, "Operand is not a name"); + } +} + + void MacroAssembler::AssertNotSmi(Register object) { if (emit_debug_code()) { test(object, Immediate(kSmiTagMask)); @@ -762,7 +791,7 @@ void MacroAssembler::EnterExitFramePrologue() { void MacroAssembler::EnterExitFrameEpilogue(int argc, bool save_doubles) { // Optionally save all XMM registers. if (save_doubles) { - CpuFeatures::Scope scope(SSE2); + CpuFeatureScope scope(this, SSE2); int space = XMMRegister::kNumRegisters * kDoubleSize + argc * kPointerSize; sub(esp, Immediate(space)); const int offset = -2 * kPointerSize; @@ -808,7 +837,7 @@ void MacroAssembler::EnterApiExitFrame(int argc) { void MacroAssembler::LeaveExitFrame(bool save_doubles) { // Optionally restore all XMM registers. if (save_doubles) { - CpuFeatures::Scope scope(SSE2); + CpuFeatureScope scope(this, SSE2); const int offset = -2 * kPointerSize; for (int i = 0; i < XMMRegister::kNumRegisters; i++) { XMMRegister reg = XMMRegister::from_code(i); @@ -1186,8 +1215,8 @@ void MacroAssembler::LoadFromNumberDictionary(Label* miss, void MacroAssembler::LoadAllocationTopHelper(Register result, Register scratch, AllocationFlags flags) { - ExternalReference new_space_allocation_top = - ExternalReference::new_space_allocation_top_address(isolate()); + ExternalReference allocation_top = + AllocationUtils::GetAllocationTopReference(isolate(), flags); // Just return if allocation top is already known. if ((flags & RESULT_CONTAINS_TOP) != 0) { @@ -1195,7 +1224,7 @@ void MacroAssembler::LoadAllocationTopHelper(Register result, ASSERT(scratch.is(no_reg)); #ifdef DEBUG // Assert that result actually contains top on entry. - cmp(result, Operand::StaticVariable(new_space_allocation_top)); + cmp(result, Operand::StaticVariable(allocation_top)); Check(equal, "Unexpected allocation top"); #endif return; @@ -1203,39 +1232,41 @@ void MacroAssembler::LoadAllocationTopHelper(Register result, // Move address of new object to result. Use scratch register if available. if (scratch.is(no_reg)) { - mov(result, Operand::StaticVariable(new_space_allocation_top)); + mov(result, Operand::StaticVariable(allocation_top)); } else { - mov(scratch, Immediate(new_space_allocation_top)); + mov(scratch, Immediate(allocation_top)); mov(result, Operand(scratch, 0)); } } void MacroAssembler::UpdateAllocationTopHelper(Register result_end, - Register scratch) { + Register scratch, + AllocationFlags flags) { if (emit_debug_code()) { test(result_end, Immediate(kObjectAlignmentMask)); Check(zero, "Unaligned allocation in new space"); } - ExternalReference new_space_allocation_top = - ExternalReference::new_space_allocation_top_address(isolate()); + ExternalReference allocation_top = + AllocationUtils::GetAllocationTopReference(isolate(), flags); // Update new top. Use scratch if available. if (scratch.is(no_reg)) { - mov(Operand::StaticVariable(new_space_allocation_top), result_end); + mov(Operand::StaticVariable(allocation_top), result_end); } else { mov(Operand(scratch, 0), result_end); } } -void MacroAssembler::AllocateInNewSpace(int object_size, - Register result, - Register result_end, - Register scratch, - Label* gc_required, - AllocationFlags flags) { +void MacroAssembler::Allocate(int object_size, + Register result, + Register result_end, + Register scratch, + Label* gc_required, + AllocationFlags flags) { + ASSERT((flags & (RESULT_CONTAINS_TOP | SIZE_IN_WORDS)) == 0); if (!FLAG_inline_new) { if (emit_debug_code()) { // Trash the registers to simulate an allocation failure. @@ -1255,44 +1286,64 @@ void MacroAssembler::AllocateInNewSpace(int object_size, // Load address of new object into result. LoadAllocationTopHelper(result, scratch, flags); + // Align the next allocation. Storing the filler map without checking top is + // always safe because the limit of the heap is always aligned. + if ((flags & DOUBLE_ALIGNMENT) != 0) { + ASSERT((flags & PRETENURE_OLD_POINTER_SPACE) == 0); + ASSERT(kPointerAlignment * 2 == kDoubleAlignment); + Label aligned; + test(result, Immediate(kDoubleAlignmentMask)); + j(zero, &aligned, Label::kNear); + mov(Operand(result, 0), + Immediate(isolate()->factory()->one_pointer_filler_map())); + add(result, Immediate(kDoubleSize / 2)); + bind(&aligned); + } + Register top_reg = result_end.is_valid() ? result_end : result; - // Calculate new top and bail out if new space is exhausted. - ExternalReference new_space_allocation_limit = - ExternalReference::new_space_allocation_limit_address(isolate()); + // Calculate new top and bail out if space is exhausted. + ExternalReference allocation_limit = + AllocationUtils::GetAllocationLimitReference(isolate(), flags); if (!top_reg.is(result)) { mov(top_reg, result); } add(top_reg, Immediate(object_size)); j(carry, gc_required); - cmp(top_reg, Operand::StaticVariable(new_space_allocation_limit)); + cmp(top_reg, Operand::StaticVariable(allocation_limit)); j(above, gc_required); // Update allocation top. - UpdateAllocationTopHelper(top_reg, scratch); + UpdateAllocationTopHelper(top_reg, scratch, flags); // Tag result if requested. + bool tag_result = (flags & TAG_OBJECT) != 0; if (top_reg.is(result)) { - if ((flags & TAG_OBJECT) != 0) { + if (tag_result) { sub(result, Immediate(object_size - kHeapObjectTag)); } else { sub(result, Immediate(object_size)); } - } else if ((flags & TAG_OBJECT) != 0) { - add(result, Immediate(kHeapObjectTag)); + } else if (tag_result) { + ASSERT(kHeapObjectTag == 1); + inc(result); } } -void MacroAssembler::AllocateInNewSpace(int header_size, - ScaleFactor element_size, - Register element_count, - Register result, - Register result_end, - Register scratch, - Label* gc_required, - AllocationFlags flags) { +void MacroAssembler::AllocateInNewSpace( + int header_size, + ScaleFactor element_size, + Register element_count, + RegisterValueType element_count_type, + Register result, + Register result_end, + Register scratch, + Label* gc_required, + AllocationFlags flags) { + ASSERT((flags & SIZE_IN_WORDS) == 0); + ASSERT((flags & PRETENURE_OLD_POINTER_SPACE) == 0); if (!FLAG_inline_new) { if (emit_debug_code()) { // Trash the registers to simulate an allocation failure. @@ -1311,25 +1362,48 @@ void MacroAssembler::AllocateInNewSpace(int header_size, // Load address of new object into result. LoadAllocationTopHelper(result, scratch, flags); + // Align the next allocation. Storing the filler map without checking top is + // always safe because the limit of the heap is always aligned. + if ((flags & DOUBLE_ALIGNMENT) != 0) { + ASSERT(kPointerAlignment * 2 == kDoubleAlignment); + Label aligned; + test(result, Immediate(kDoubleAlignmentMask)); + j(zero, &aligned, Label::kNear); + mov(Operand(result, 0), + Immediate(isolate()->factory()->one_pointer_filler_map())); + add(result, Immediate(kDoubleSize / 2)); + bind(&aligned); + } + // Calculate new top and bail out if new space is exhausted. ExternalReference new_space_allocation_limit = ExternalReference::new_space_allocation_limit_address(isolate()); // We assume that element_count*element_size + header_size does not // overflow. + if (element_count_type == REGISTER_VALUE_IS_SMI) { + STATIC_ASSERT(static_cast<ScaleFactor>(times_2 - 1) == times_1); + STATIC_ASSERT(static_cast<ScaleFactor>(times_4 - 1) == times_2); + STATIC_ASSERT(static_cast<ScaleFactor>(times_8 - 1) == times_4); + ASSERT(element_size >= times_2); + ASSERT(kSmiTagSize == 1); + element_size = static_cast<ScaleFactor>(element_size - 1); + } else { + ASSERT(element_count_type == REGISTER_VALUE_IS_INT32); + } lea(result_end, Operand(element_count, element_size, header_size)); add(result_end, result); j(carry, gc_required); cmp(result_end, Operand::StaticVariable(new_space_allocation_limit)); j(above, gc_required); - // Tag result if requested. if ((flags & TAG_OBJECT) != 0) { - lea(result, Operand(result, kHeapObjectTag)); + ASSERT(kHeapObjectTag == 1); + inc(result); } // Update allocation top. - UpdateAllocationTopHelper(result_end, scratch); + UpdateAllocationTopHelper(result_end, scratch, flags); } @@ -1339,6 +1413,8 @@ void MacroAssembler::AllocateInNewSpace(Register object_size, Register scratch, Label* gc_required, AllocationFlags flags) { + ASSERT((flags & (RESULT_CONTAINS_TOP | SIZE_IN_WORDS)) == 0); + ASSERT((flags & PRETENURE_OLD_POINTER_SPACE) == 0); if (!FLAG_inline_new) { if (emit_debug_code()) { // Trash the registers to simulate an allocation failure. @@ -1357,6 +1433,19 @@ void MacroAssembler::AllocateInNewSpace(Register object_size, // Load address of new object into result. LoadAllocationTopHelper(result, scratch, flags); + // Align the next allocation. Storing the filler map without checking top is + // always safe because the limit of the heap is always aligned. + if ((flags & DOUBLE_ALIGNMENT) != 0) { + ASSERT(kPointerAlignment * 2 == kDoubleAlignment); + Label aligned; + test(result, Immediate(kDoubleAlignmentMask)); + j(zero, &aligned, Label::kNear); + mov(Operand(result, 0), + Immediate(isolate()->factory()->one_pointer_filler_map())); + add(result, Immediate(kDoubleSize / 2)); + bind(&aligned); + } + // Calculate new top and bail out if new space is exhausted. ExternalReference new_space_allocation_limit = ExternalReference::new_space_allocation_limit_address(isolate()); @@ -1370,11 +1459,12 @@ void MacroAssembler::AllocateInNewSpace(Register object_size, // Tag result if requested. if ((flags & TAG_OBJECT) != 0) { - lea(result, Operand(result, kHeapObjectTag)); + ASSERT(kHeapObjectTag == 1); + inc(result); } // Update allocation top. - UpdateAllocationTopHelper(result_end, scratch); + UpdateAllocationTopHelper(result_end, scratch, flags); } @@ -1397,12 +1487,8 @@ void MacroAssembler::AllocateHeapNumber(Register result, Register scratch2, Label* gc_required) { // Allocate heap number in new space. - AllocateInNewSpace(HeapNumber::kSize, - result, - scratch1, - scratch2, - gc_required, - TAG_OBJECT); + Allocate(HeapNumber::kSize, result, scratch1, scratch2, gc_required, + TAG_OBJECT); // Set the map. mov(FieldOperand(result, HeapObject::kMapOffset), @@ -1428,6 +1514,7 @@ void MacroAssembler::AllocateTwoByteString(Register result, AllocateInNewSpace(SeqTwoByteString::kHeaderSize, times_1, scratch1, + REGISTER_VALUE_IS_INT32, result, scratch2, scratch3, @@ -1453,16 +1540,17 @@ void MacroAssembler::AllocateAsciiString(Register result, Label* gc_required) { // Calculate the number of bytes needed for the characters in the string while // observing object alignment. - ASSERT((SeqAsciiString::kHeaderSize & kObjectAlignmentMask) == 0); + ASSERT((SeqOneByteString::kHeaderSize & kObjectAlignmentMask) == 0); mov(scratch1, length); ASSERT(kCharSize == 1); add(scratch1, Immediate(kObjectAlignmentMask)); and_(scratch1, Immediate(~kObjectAlignmentMask)); // Allocate ASCII string in new space. - AllocateInNewSpace(SeqAsciiString::kHeaderSize, + AllocateInNewSpace(SeqOneByteString::kHeaderSize, times_1, scratch1, + REGISTER_VALUE_IS_INT32, result, scratch2, scratch3, @@ -1488,12 +1576,8 @@ void MacroAssembler::AllocateAsciiString(Register result, ASSERT(length > 0); // Allocate ASCII string in new space. - AllocateInNewSpace(SeqAsciiString::SizeFor(length), - result, - scratch1, - scratch2, - gc_required, - TAG_OBJECT); + Allocate(SeqOneByteString::SizeFor(length), result, scratch1, scratch2, + gc_required, TAG_OBJECT); // Set the map, length and hash field. mov(FieldOperand(result, HeapObject::kMapOffset), @@ -1510,12 +1594,8 @@ void MacroAssembler::AllocateTwoByteConsString(Register result, Register scratch2, Label* gc_required) { // Allocate heap number in new space. - AllocateInNewSpace(ConsString::kSize, - result, - scratch1, - scratch2, - gc_required, - TAG_OBJECT); + Allocate(ConsString::kSize, result, scratch1, scratch2, gc_required, + TAG_OBJECT); // Set the map. The other fields are left uninitialized. mov(FieldOperand(result, HeapObject::kMapOffset), @@ -1528,12 +1608,8 @@ void MacroAssembler::AllocateAsciiConsString(Register result, Register scratch2, Label* gc_required) { // Allocate heap number in new space. - AllocateInNewSpace(ConsString::kSize, - result, - scratch1, - scratch2, - gc_required, - TAG_OBJECT); + Allocate(ConsString::kSize, result, scratch1, scratch2, gc_required, + TAG_OBJECT); // Set the map. The other fields are left uninitialized. mov(FieldOperand(result, HeapObject::kMapOffset), @@ -1546,12 +1622,8 @@ void MacroAssembler::AllocateTwoByteSlicedString(Register result, Register scratch2, Label* gc_required) { // Allocate heap number in new space. - AllocateInNewSpace(SlicedString::kSize, - result, - scratch1, - scratch2, - gc_required, - TAG_OBJECT); + Allocate(SlicedString::kSize, result, scratch1, scratch2, gc_required, + TAG_OBJECT); // Set the map. The other fields are left uninitialized. mov(FieldOperand(result, HeapObject::kMapOffset), @@ -1564,12 +1636,8 @@ void MacroAssembler::AllocateAsciiSlicedString(Register result, Register scratch2, Label* gc_required) { // Allocate heap number in new space. - AllocateInNewSpace(SlicedString::kSize, - result, - scratch1, - scratch2, - gc_required, - TAG_OBJECT); + Allocate(SlicedString::kSize, result, scratch1, scratch2, gc_required, + TAG_OBJECT); // Set the map. The other fields are left uninitialized. mov(FieldOperand(result, HeapObject::kMapOffset), @@ -1738,13 +1806,14 @@ void MacroAssembler::TryGetFunctionPrototype(Register function, void MacroAssembler::CallStub(CodeStub* stub, TypeFeedbackId ast_id) { ASSERT(AllowThisStubCall(stub)); // Calls are not allowed in some stubs. - call(stub->GetCode(), RelocInfo::CODE_TARGET, ast_id); + call(stub->GetCode(isolate()), RelocInfo::CODE_TARGET, ast_id); } void MacroAssembler::TailCallStub(CodeStub* stub) { - ASSERT(allow_stub_calls_ || stub->CompilingCallsToThisStubIsGCSafe()); - jmp(stub->GetCode(), RelocInfo::CODE_TARGET); + ASSERT(allow_stub_calls_ || + stub->CompilingCallsToThisStubIsGCSafe(isolate())); + jmp(stub->GetCode(isolate()), RelocInfo::CODE_TARGET); } @@ -1756,7 +1825,7 @@ void MacroAssembler::StubReturn(int argc) { bool MacroAssembler::AllowThisStubCall(CodeStub* stub) { if (!has_frame_ && stub->SometimesSetsUpAFrame()) return false; - return allow_stub_calls_ || stub->CompilingCallsToThisStubIsGCSafe(); + return allow_stub_calls_ || stub->CompilingCallsToThisStubIsGCSafe(isolate()); } @@ -1796,7 +1865,8 @@ void MacroAssembler::CallRuntimeSaveDoubles(Runtime::FunctionId id) { const Runtime::Function* function = Runtime::FunctionForId(id); Set(eax, Immediate(function->nargs)); mov(ebx, Immediate(ExternalReference(function, isolate()))); - CEntryStub ces(1, kSaveFPRegs); + CEntryStub ces(1, CpuFeatures::IsSupported(SSE2) ? kSaveFPRegs + : kDontSaveFPRegs); CallStub(&ces); } @@ -1904,20 +1974,36 @@ void MacroAssembler::PrepareCallApiFunction(int argc) { void MacroAssembler::CallApiFunctionAndReturn(Address function_address, int stack_space) { ExternalReference next_address = - ExternalReference::handle_scope_next_address(); + ExternalReference::handle_scope_next_address(isolate()); ExternalReference limit_address = - ExternalReference::handle_scope_limit_address(); + ExternalReference::handle_scope_limit_address(isolate()); ExternalReference level_address = - ExternalReference::handle_scope_level_address(); + ExternalReference::handle_scope_level_address(isolate()); // Allocate HandleScope in callee-save registers. mov(ebx, Operand::StaticVariable(next_address)); mov(edi, Operand::StaticVariable(limit_address)); add(Operand::StaticVariable(level_address), Immediate(1)); + if (FLAG_log_timer_events) { + FrameScope frame(this, StackFrame::MANUAL); + PushSafepointRegisters(); + PrepareCallCFunction(0, eax); + CallCFunction(ExternalReference::log_enter_external_function(isolate()), 0); + PopSafepointRegisters(); + } + // Call the api function. call(function_address, RelocInfo::RUNTIME_ENTRY); + if (FLAG_log_timer_events) { + FrameScope frame(this, StackFrame::MANUAL); + PushSafepointRegisters(); + PrepareCallCFunction(0, eax); + CallCFunction(ExternalReference::log_leave_external_function(isolate()), 0); + PopSafepointRegisters(); + } + if (!kReturnHandlesDirectly) { // PrepareCallApiFunction saved pointer to the output slot into // callee-save register esi. @@ -2016,7 +2102,7 @@ void MacroAssembler::JumpToExternalReference(const ExternalReference& ext) { // Set the entry point and jump to the C entry runtime stub. mov(ebx, Immediate(ext)); CEntryStub ces(1); - jmp(ces.GetCode(), RelocInfo::CODE_TARGET); + jmp(ces.GetCode(isolate()), RelocInfo::CODE_TARGET); } @@ -2328,12 +2414,23 @@ void MacroAssembler::LoadInitialArrayMap( } +void MacroAssembler::LoadGlobalContext(Register global_context) { + // Load the global or builtins object from the current context. + mov(global_context, + Operand(esi, Context::SlotOffset(Context::GLOBAL_OBJECT_INDEX))); + // Load the native context from the global or builtins object. + mov(global_context, + FieldOperand(global_context, GlobalObject::kNativeContextOffset)); +} + + void MacroAssembler::LoadGlobalFunction(int index, Register function) { // Load the global or builtins object from the current context. mov(function, Operand(esi, Context::SlotOffset(Context::GLOBAL_OBJECT_INDEX))); // Load the native context from the global or builtins object. - mov(function, FieldOperand(function, GlobalObject::kNativeContextOffset)); + mov(function, + FieldOperand(function, GlobalObject::kNativeContextOffset)); // Load the function from the native context. mov(function, Operand(function, Context::SlotOffset(index))); } @@ -2615,7 +2712,7 @@ void MacroAssembler::JumpIfInstanceTypeIsNotSequentialAscii( } and_(scratch, kIsNotStringMask | kStringRepresentationMask | kStringEncodingMask); - cmp(scratch, kStringTag | kSeqStringTag | kAsciiStringTag); + cmp(scratch, kStringTag | kSeqStringTag | kOneByteStringTag); j(not_equal, failure); } @@ -2906,15 +3003,15 @@ void MacroAssembler::EnsureNotWhite( bind(¬_external); // Sequential string, either ASCII or UC16. - ASSERT(kAsciiStringTag == 0x04); + ASSERT(kOneByteStringTag == 0x04); and_(length, Immediate(kStringEncodingMask)); xor_(length, Immediate(kStringEncodingMask)); add(length, Immediate(0x04)); // Value now either 4 (if ASCII) or 8 (if UC16), i.e., char-size shifted // by 2. If we multiply the string length as smi by this, it still // won't overflow a 32-bit value. - ASSERT_EQ(SeqAsciiString::kMaxSize, SeqTwoByteString::kMaxSize); - ASSERT(SeqAsciiString::kMaxSize <= + ASSERT_EQ(SeqOneByteString::kMaxSize, SeqTwoByteString::kMaxSize); + ASSERT(SeqOneByteString::kMaxSize <= static_cast<int>(0xffffffffu >> (2 + kSmiTagSize))); imul(length, FieldOperand(value, String::kLengthOffset)); shr(length, 2 + kSmiTagSize + kSmiShiftSize); @@ -2981,6 +3078,29 @@ void MacroAssembler::CheckEnumCache(Label* call_runtime) { j(not_equal, &next); } + +void MacroAssembler::TestJSArrayForAllocationSiteInfo( + Register receiver_reg, + Register scratch_reg) { + Label no_info_available; + + ExternalReference new_space_start = + ExternalReference::new_space_start(isolate()); + ExternalReference new_space_allocation_top = + ExternalReference::new_space_allocation_top_address(isolate()); + + lea(scratch_reg, Operand(receiver_reg, + JSArray::kSize + AllocationSiteInfo::kSize - kHeapObjectTag)); + cmp(scratch_reg, Immediate(new_space_start)); + j(less, &no_info_available); + cmp(scratch_reg, Operand::StaticVariable(new_space_allocation_top)); + j(greater, &no_info_available); + cmp(MemOperand(scratch_reg, -AllocationSiteInfo::kSize), + Immediate(Handle<Map>(isolate()->heap()->allocation_site_info_map()))); + bind(&no_info_available); +} + + } } // namespace v8::internal #endif // V8_TARGET_ARCH_IA32 diff --git a/deps/v8/src/ia32/macro-assembler-ia32.h b/deps/v8/src/ia32/macro-assembler-ia32.h index e48d0e75c4..8dd4120711 100644 --- a/deps/v8/src/ia32/macro-assembler-ia32.h +++ b/deps/v8/src/ia32/macro-assembler-ia32.h @@ -35,18 +35,6 @@ namespace v8 { namespace internal { -// Flags used for the AllocateInNewSpace functions. -enum AllocationFlags { - // No special flags. - NO_ALLOCATION_FLAGS = 0, - // Return the pointer to the allocated already tagged as a heap object. - TAG_OBJECT = 1 << 0, - // The content of the result register already contains the allocation top in - // new space. - RESULT_CONTAINS_TOP = 1 << 1 -}; - - // Convenience for platform-independent signatures. We do not normally // distinguish memory operands from other operands on ia32. typedef Operand MemOperand; @@ -55,6 +43,12 @@ enum RememberedSetAction { EMIT_REMEMBERED_SET, OMIT_REMEMBERED_SET }; enum SmiCheck { INLINE_SMI_CHECK, OMIT_SMI_CHECK }; +enum RegisterValueType { + REGISTER_VALUE_IS_SMI, + REGISTER_VALUE_IS_INT32 +}; + + bool AreAliased(Register r1, Register r2, Register r3, Register r4); @@ -255,6 +249,8 @@ class MacroAssembler: public Assembler { Register map_out, bool can_have_holes); + void LoadGlobalContext(Register global_context); + // Load the global function with the given index. void LoadGlobalFunction(int index, Register function); @@ -388,7 +384,8 @@ class MacroAssembler: public Assembler { Register scratch1, XMMRegister scratch2, Label* fail, - bool specialize_for_processor); + bool specialize_for_processor, + int offset = 0); // Compare an object's map with the specified map and its transitioned // elements maps if mode is ALLOW_ELEMENT_TRANSITION_MAPS. FLAGS are set with @@ -413,6 +410,7 @@ class MacroAssembler: public Assembler { // specified target if equal. Skip the smi check if not required (object is // known to be a heap object) void DispatchMap(Register obj, + Register unused, Handle<Map> map, Handle<Code> success, SmiCheckType smi_check_type); @@ -426,6 +424,15 @@ class MacroAssembler: public Assembler { Register map, Register instance_type); + // Check if the object in register heap_object is a name. Afterwards the + // register map contains the object map and the register instance_type + // contains the instance_type. The registers map and instance_type can be the + // same in which case it contains the instance type afterwards. Either of the + // registers map and instance_type can be the same as heap_object. + Condition IsObjectNameType(Register heap_object, + Register map, + Register instance_type); + // Check if a heap object's type is in the JSObject range, not including // JSFunction. The object's map will be loaded in the map register. // Any or all of the three registers may be the same. @@ -516,6 +523,9 @@ class MacroAssembler: public Assembler { // Abort execution if argument is not a string, enabled via --debug-code. void AssertString(Register object); + // Abort execution if argument is not a name, enabled via --debug-code. + void AssertName(Register object); + // --------------------------------------------------------------------------- // Exception handling @@ -555,26 +565,27 @@ class MacroAssembler: public Assembler { // --------------------------------------------------------------------------- // Allocation support - // Allocate an object in new space. If the new space is exhausted control - // continues at the gc_required label. The allocated object is returned in - // result and end of the new object is returned in result_end. The register - // scratch can be passed as no_reg in which case an additional object - // reference will be added to the reloc info. The returned pointers in result - // and result_end have not yet been tagged as heap objects. If - // result_contains_top_on_entry is true the content of result is known to be - // the allocation top on entry (could be result_end from a previous call to - // AllocateInNewSpace). If result_contains_top_on_entry is true scratch + // Allocate an object in new space or old pointer space. If the given space + // is exhausted control continues at the gc_required label. The allocated + // object is returned in result and end of the new object is returned in + // result_end. The register scratch can be passed as no_reg in which case + // an additional object reference will be added to the reloc info. The + // returned pointers in result and result_end have not yet been tagged as + // heap objects. If result_contains_top_on_entry is true the content of + // result is known to be the allocation top on entry (could be result_end + // from a previous call). If result_contains_top_on_entry is true scratch // should be no_reg as it is never used. - void AllocateInNewSpace(int object_size, - Register result, - Register result_end, - Register scratch, - Label* gc_required, - AllocationFlags flags); + void Allocate(int object_size, + Register result, + Register result_end, + Register scratch, + Label* gc_required, + AllocationFlags flags); void AllocateInNewSpace(int header_size, ScaleFactor element_size, Register element_count, + RegisterValueType element_count_type, Register result, Register result_end, Register scratch, @@ -788,6 +799,7 @@ class MacroAssembler: public Assembler { // Push a handle value. void Push(Handle<Object> handle) { push(Immediate(handle)); } + void Push(Smi* smi) { Push(Handle<Smi>(smi, isolate())); } Handle<Object> CodeObject() { ASSERT(!code_object_.is_null()); @@ -862,6 +874,15 @@ class MacroAssembler: public Assembler { // in eax. Assumes that any other register can be used as a scratch. void CheckEnumCache(Label* call_runtime); + // AllocationSiteInfo support. Arrays may have an associated + // AllocationSiteInfo object that can be checked for in order to pretransition + // to another type. + // On entry, receiver_reg should point to the array object. + // scratch_reg gets clobbered. + // If allocation info is present, conditional code is set to equal + void TestJSArrayForAllocationSiteInfo(Register receiver_reg, + Register scratch_reg); + private: bool generating_stub_; bool allow_stub_calls_; @@ -890,7 +911,10 @@ class MacroAssembler: public Assembler { void LoadAllocationTopHelper(Register result, Register scratch, AllocationFlags flags); - void UpdateAllocationTopHelper(Register result_end, Register scratch); + + void UpdateAllocationTopHelper(Register result_end, + Register scratch, + AllocationFlags flags); // Helper for PopHandleScope. Allowed to perform a GC and returns // NULL if gc_allowed. Does not perform a GC if !gc_allowed, and @@ -922,9 +946,9 @@ class MacroAssembler: public Assembler { Operand SafepointRegisterSlot(Register reg); static int SafepointRegisterStackIndex(int reg_code); - // Needs access to SafepointRegisterStackIndex for optimized frame + // Needs access to SafepointRegisterStackIndex for compiled frame // traversal. - friend class OptimizedFrame; + friend class StandardFrame; }; diff --git a/deps/v8/src/ia32/regexp-macro-assembler-ia32.cc b/deps/v8/src/ia32/regexp-macro-assembler-ia32.cc index 622dc4254d..d8f2e8f0e7 100644 --- a/deps/v8/src/ia32/regexp-macro-assembler-ia32.cc +++ b/deps/v8/src/ia32/regexp-macro-assembler-ia32.cc @@ -217,7 +217,7 @@ void RegExpMacroAssemblerIA32::CheckCharacters(Vector<const uc16> str, // If input is ASCII, don't even bother calling here if the string to // match contains a non-ASCII character. if (mode_ == ASCII) { - ASSERT(String::IsAscii(str.start(), str.length())); + ASSERT(String::IsOneByte(str.start(), str.length())); } #endif int byte_length = str.length() * char_size(); @@ -344,7 +344,15 @@ void RegExpMacroAssemblerIA32::CheckNotBackReferenceIgnoreCase( __ or_(eax, 0x20); // Convert match character to lower-case. __ lea(ecx, Operand(eax, -'a')); __ cmp(ecx, static_cast<int32_t>('z' - 'a')); // Is eax a lowercase letter? - __ j(above, &fail); + Label convert_capture; + __ j(below_equal, &convert_capture); // In range 'a'-'z'. + // Latin-1: Check for values in range [224,254] but not 247. + __ sub(ecx, Immediate(224 - 'a')); + __ cmp(ecx, Immediate(254 - 224)); + __ j(above, &fail); // Weren't Latin-1 letters. + __ cmp(ecx, Immediate(247 - 224)); // Check for 247. + __ j(equal, &fail); + __ bind(&convert_capture); // Also convert capture character. __ movzx_b(ecx, Operand(edx, 0)); __ or_(ecx, 0x20); @@ -569,7 +577,7 @@ void RegExpMacroAssemblerIA32::CheckBitInTable( Label* on_bit_set) { __ mov(eax, Immediate(table)); Register index = current_character(); - if (mode_ != ASCII || kTableMask != String::kMaxAsciiCharCode) { + if (mode_ != ASCII || kTableMask != String::kMaxOneByteCharCode) { __ mov(ebx, kTableSize - 1); __ and_(ebx, current_character()); index = ebx; @@ -587,29 +595,23 @@ bool RegExpMacroAssemblerIA32::CheckSpecialCharacterClass(uc16 type, case 's': // Match space-characters if (mode_ == ASCII) { - // ASCII space characters are '\t'..'\r' and ' '. + // One byte space characters are '\t'..'\r', ' ' and \u00a0. Label success; __ cmp(current_character(), ' '); - __ j(equal, &success); + __ j(equal, &success, Label::kNear); // Check range 0x09..0x0d __ lea(eax, Operand(current_character(), -'\t')); __ cmp(eax, '\r' - '\t'); - BranchOrBacktrack(above, on_no_match); + __ j(below_equal, &success, Label::kNear); + // \u00a0 (NBSP). + __ cmp(eax, 0x00a0 - '\t'); + BranchOrBacktrack(not_equal, on_no_match); __ bind(&success); return true; } return false; case 'S': - // Match non-space characters. - if (mode_ == ASCII) { - // ASCII space characters are '\t'..'\r' and ' '. - __ cmp(current_character(), ' '); - BranchOrBacktrack(equal, on_no_match); - __ lea(eax, Operand(current_character(), -'\t')); - __ cmp(eax, '\r' - '\t'); - BranchOrBacktrack(below_equal, on_no_match); - return true; - } + // The emitted code for generic character classes is good enough. return false; case 'd': // Match ASCII digits ('0'..'9') @@ -1197,7 +1199,7 @@ int RegExpMacroAssemblerIA32::CheckStackGuardState(Address* return_address, Handle<String> subject(frame_entry<String*>(re_frame, kInputString)); // Current string. - bool is_ascii = subject->IsAsciiRepresentationUnderneath(); + bool is_ascii = subject->IsOneByteRepresentationUnderneath(); ASSERT(re_code->instruction_start() <= *return_address); ASSERT(*return_address <= @@ -1228,7 +1230,7 @@ int RegExpMacroAssemblerIA32::CheckStackGuardState(Address* return_address, } // String might have changed. - if (subject_tmp->IsAsciiRepresentation() != is_ascii) { + if (subject_tmp->IsOneByteRepresentation() != is_ascii) { // If we changed between an ASCII and an UC16 string, the specialized // code cannot be used, and we need to restart regexp matching from // scratch (including, potentially, compiling a new version of the code). diff --git a/deps/v8/src/ia32/stub-cache-ia32.cc b/deps/v8/src/ia32/stub-cache-ia32.cc index 11efb72bb6..f7e795e788 100644 --- a/deps/v8/src/ia32/stub-cache-ia32.cc +++ b/deps/v8/src/ia32/stub-cache-ia32.cc @@ -141,14 +141,14 @@ static void ProbeTable(Isolate* isolate, // the property. This function may return false negatives, so miss_label // must always call a backup property check that is complete. // This function is safe to call if the receiver has fast properties. -// Name must be a symbol and receiver must be a heap object. +// Name must be unique and receiver must be a heap object. static void GenerateDictionaryNegativeLookup(MacroAssembler* masm, Label* miss_label, Register receiver, - Handle<String> name, + Handle<Name> name, Register r0, Register r1) { - ASSERT(name->IsSymbol()); + ASSERT(name->IsUniqueName()); Counters* counters = masm->isolate()->counters(); __ IncrementCounter(counters->negative_lookups(), 1); __ IncrementCounter(counters->negative_lookups_miss(), 1); @@ -177,12 +177,12 @@ static void GenerateDictionaryNegativeLookup(MacroAssembler* masm, __ j(not_equal, miss_label); Label done; - StringDictionaryLookupStub::GenerateNegativeLookup(masm, - miss_label, - &done, - properties, - name, - r1); + NameDictionaryLookupStub::GenerateNegativeLookup(masm, + miss_label, + &done, + properties, + name, + r1); __ bind(&done); __ DecrementCounter(counters->negative_lookups_miss(), 1); } @@ -227,7 +227,7 @@ void StubCache::GenerateProbe(MacroAssembler* masm, __ JumpIfSmi(receiver, &miss); // Get the map of the receiver and compute the hash. - __ mov(offset, FieldOperand(name, String::kHashFieldOffset)); + __ mov(offset, FieldOperand(name, Name::kHashFieldOffset)); __ add(offset, FieldOperand(receiver, HeapObject::kMapOffset)); __ xor_(offset, flags); // We mask out the last two bits because they are not part of the hash and @@ -241,7 +241,7 @@ void StubCache::GenerateProbe(MacroAssembler* masm, ProbeTable(isolate(), masm, flags, kPrimary, name, receiver, offset, extra); // Primary miss: Compute hash for secondary probe. - __ mov(offset, FieldOperand(name, String::kHashFieldOffset)); + __ mov(offset, FieldOperand(name, Name::kHashFieldOffset)); __ add(offset, FieldOperand(receiver, HeapObject::kMapOffset)); __ xor_(offset, flags); __ and_(offset, (kPrimaryTableSize - 1) << kHeapObjectTagSize); @@ -369,26 +369,19 @@ void StubCompiler::GenerateLoadFunctionPrototype(MacroAssembler* masm, } -// Load a fast property out of a holder object (src). In-object properties -// are loaded directly otherwise the property is loaded from the properties -// fixed array. -void StubCompiler::GenerateFastPropertyLoad(MacroAssembler* masm, - Register dst, - Register src, - Handle<JSObject> holder, - int index) { - // Adjust for the number of properties stored in the holder. - index -= holder->map()->inobject_properties(); - if (index < 0) { - // Get the property straight out of the holder. - int offset = holder->map()->instance_size() + (index * kPointerSize); - __ mov(dst, FieldOperand(src, offset)); - } else { +void StubCompiler::DoGenerateFastPropertyLoad(MacroAssembler* masm, + Register dst, + Register src, + bool inobject, + int index) { + int offset = index * kPointerSize; + if (!inobject) { // Calculate the offset into the properties array. - int offset = index * kPointerSize + FixedArray::kHeaderSize; + offset = offset + FixedArray::kHeaderSize; __ mov(dst, FieldOperand(src, JSObject::kPropertiesOffset)); - __ mov(dst, FieldOperand(dst, offset)); + src = dst; } + __ mov(dst, FieldOperand(src, offset)); } @@ -487,7 +480,7 @@ static void GenerateFastApiCall(MacroAssembler* masm, // Pass the additional arguments. __ mov(Operand(esp, 2 * kPointerSize), edi); Handle<CallHandlerInfo> api_call_info = optimization.api_call_info(); - Handle<Object> call_data(api_call_info->data()); + Handle<Object> call_data(api_call_info->data(), masm->isolate()); if (masm->isolate()->heap()->InNewSpace(*call_data)) { __ mov(ecx, api_call_info); __ mov(ebx, FieldOperand(ecx, CallHandlerInfo::kDataOffset)); @@ -541,7 +534,7 @@ class CallInterceptorCompiler BASE_EMBEDDED { void Compile(MacroAssembler* masm, Handle<JSObject> object, Handle<JSObject> holder, - Handle<String> name, + Handle<Name> name, LookupResult* lookup, Register receiver, Register scratch1, @@ -573,7 +566,7 @@ class CallInterceptorCompiler BASE_EMBEDDED { Register scratch3, Handle<JSObject> interceptor_holder, LookupResult* lookup, - Handle<String> name, + Handle<Name> name, const CallOptimization& optimization, Label* miss_label) { ASSERT(optimization.is_constant_call()); @@ -666,7 +659,7 @@ class CallInterceptorCompiler BASE_EMBEDDED { Register scratch1, Register scratch2, Register scratch3, - Handle<String> name, + Handle<Name> name, Handle<JSObject> interceptor_holder, Label* miss_label) { Register holder = @@ -723,19 +716,13 @@ class CallInterceptorCompiler BASE_EMBEDDED { }; -void StubCompiler::GenerateLoadMiss(MacroAssembler* masm, Code::Kind kind) { - ASSERT(kind == Code::LOAD_IC || kind == Code::KEYED_LOAD_IC); - Handle<Code> code = (kind == Code::LOAD_IC) - ? masm->isolate()->builtins()->LoadIC_Miss() - : masm->isolate()->builtins()->KeyedLoadIC_Miss(); - __ jmp(code, RelocInfo::CODE_TARGET); -} - - -void StubCompiler::GenerateKeyedLoadMissForceGeneric(MacroAssembler* masm) { - Handle<Code> code = - masm->isolate()->builtins()->KeyedLoadIC_MissForceGeneric(); - __ jmp(code, RelocInfo::CODE_TARGET); +void BaseStoreStubCompiler::GenerateRestoreName(MacroAssembler* masm, + Label* label, + Handle<Name> name) { + if (!label->is_unused()) { + __ bind(label); + __ mov(this->name(), Immediate(name)); + } } @@ -745,12 +732,14 @@ void StubCompiler::GenerateStoreField(MacroAssembler* masm, Handle<JSObject> object, int index, Handle<Map> transition, - Handle<String> name, + Handle<Name> name, Register receiver_reg, Register name_reg, + Register value_reg, Register scratch1, Register scratch2, - Label* miss_label) { + Label* miss_label, + Label* miss_restore_name) { LookupResult lookup(masm->isolate()); object->Lookup(*name, &lookup); if (lookup.IsFound() && (lookup.IsReadOnly() || !lookup.IsCacheable())) { @@ -785,16 +774,8 @@ void StubCompiler::GenerateStoreField(MacroAssembler* masm, } while (holder->GetPrototype()->IsJSObject()); } // We need an extra register, push - __ push(name_reg); - Label miss_pop, done_check; CheckPrototypes(object, receiver_reg, Handle<JSObject>(holder), name_reg, - scratch1, scratch2, name, &miss_pop); - __ jmp(&done_check); - __ bind(&miss_pop); - __ pop(name_reg); - __ jmp(miss_label); - __ bind(&done_check); - __ pop(name_reg); + scratch1, scratch2, name, miss_restore_name); } // Stub never generated for non-global objects that require access @@ -842,11 +823,11 @@ void StubCompiler::GenerateStoreField(MacroAssembler* masm, if (index < 0) { // Set the property straight into the object. int offset = object->map()->instance_size() + (index * kPointerSize); - __ mov(FieldOperand(receiver_reg, offset), eax); + __ mov(FieldOperand(receiver_reg, offset), value_reg); // Update the write barrier for the array address. // Pass the value being stored in the now unused name_reg. - __ mov(name_reg, eax); + __ mov(name_reg, value_reg); __ RecordWriteField(receiver_reg, offset, name_reg, @@ -861,7 +842,7 @@ void StubCompiler::GenerateStoreField(MacroAssembler* masm, // Update the write barrier for the array address. // Pass the value being stored in the now unused name_reg. - __ mov(name_reg, eax); + __ mov(name_reg, value_reg); __ RecordWriteField(scratch1, offset, name_reg, @@ -870,6 +851,7 @@ void StubCompiler::GenerateStoreField(MacroAssembler* masm, } // Return the value (register eax). + ASSERT(value_reg.is(eax)); __ ret(0); } @@ -879,7 +861,7 @@ void StubCompiler::GenerateStoreField(MacroAssembler* masm, // property. static void GenerateCheckPropertyCell(MacroAssembler* masm, Handle<GlobalObject> global, - Handle<String> name, + Handle<Name> name, Register scratch, Label* miss) { Handle<JSGlobalPropertyCell> cell = @@ -902,7 +884,7 @@ static void GenerateCheckPropertyCell(MacroAssembler* masm, static void GenerateCheckPropertyCells(MacroAssembler* masm, Handle<JSObject> object, Handle<JSObject> holder, - Handle<String> name, + Handle<Name> name, Register scratch, Label* miss) { Handle<JSObject> current = object; @@ -918,6 +900,12 @@ static void GenerateCheckPropertyCells(MacroAssembler* masm, } } + +void StubCompiler::GenerateTailCall(MacroAssembler* masm, Handle<Code> code) { + __ jmp(code, RelocInfo::CODE_TARGET); +} + + #undef __ #define __ ACCESS_MASM(masm()) @@ -928,9 +916,11 @@ Register StubCompiler::CheckPrototypes(Handle<JSObject> object, Register holder_reg, Register scratch1, Register scratch2, - Handle<String> name, + Handle<Name> name, int save_at_depth, - Label* miss) { + Label* miss, + PrototypeCheckType check) { + Handle<JSObject> first = object; // Make sure there's no overlap between holder and object registers. ASSERT(!scratch1.is(object_reg) && !scratch1.is(holder_reg)); ASSERT(!scratch2.is(object_reg) && !scratch2.is(holder_reg) @@ -958,11 +948,12 @@ Register StubCompiler::CheckPrototypes(Handle<JSObject> object, if (!current->HasFastProperties() && !current->IsJSGlobalObject() && !current->IsJSGlobalProxy()) { - if (!name->IsSymbol()) { - name = factory()->LookupSymbol(name); + if (!name->IsUniqueName()) { + ASSERT(name->IsString()); + name = factory()->InternalizeString(Handle<String>::cast(name)); } ASSERT(current->property_dictionary()->FindEntry(*name) == - StringDictionary::kNotFound); + NameDictionary::kNotFound); GenerateDictionaryNegativeLookup(masm(), miss, reg, name, scratch1, scratch2); @@ -977,8 +968,10 @@ Register StubCompiler::CheckPrototypes(Handle<JSObject> object, // Save the map in scratch1 for later. __ mov(scratch1, FieldOperand(reg, HeapObject::kMapOffset)); } - __ CheckMap(reg, current_map, miss, DONT_DO_SMI_CHECK, - ALLOW_ELEMENT_TRANSITION_MAPS); + if (!current.is_identical_to(first) || check == CHECK_ALL_MAPS) { + __ CheckMap(reg, current_map, miss, DONT_DO_SMI_CHECK, + ALLOW_ELEMENT_TRANSITION_MAPS); + } // Check access rights to the global object. This has to happen after // the map check so that we know that the object is actually a global @@ -1010,9 +1003,11 @@ Register StubCompiler::CheckPrototypes(Handle<JSObject> object, // Log the check depth. LOG(isolate(), IntEvent("check-maps-depth", depth + 1)); - // Check the holder map. - __ CheckMap(reg, Handle<Map>(holder->map()), - miss, DONT_DO_SMI_CHECK, ALLOW_ELEMENT_TRANSITION_MAPS); + if (!holder.is_identical_to(first) || check == CHECK_ALL_MAPS) { + // Check the holder map. + __ CheckMap(reg, Handle<Map>(holder->map()), + miss, DONT_DO_SMI_CHECK, ALLOW_ELEMENT_TRANSITION_MAPS); + } // Perform security check for access to the global object. ASSERT(holder->IsJSGlobalProxy() || !holder->IsAccessCheckNeeded()); @@ -1030,128 +1025,140 @@ Register StubCompiler::CheckPrototypes(Handle<JSObject> object, } -void StubCompiler::GenerateLoadField(Handle<JSObject> object, - Handle<JSObject> holder, - Register receiver, - Register scratch1, - Register scratch2, - Register scratch3, - int index, - Handle<String> name, - Label* miss) { - // Check that the receiver isn't a smi. - __ JumpIfSmi(receiver, miss); +void BaseLoadStubCompiler::HandlerFrontendFooter(Label* success, + Label* miss) { + if (!miss->is_unused()) { + __ jmp(success); + __ bind(miss); + TailCallBuiltin(masm(), MissBuiltin(kind())); + } +} - // Check the prototype chain. - Register reg = CheckPrototypes( - object, receiver, holder, scratch1, scratch2, scratch3, name, miss); - // Get the value from the properties. - GenerateFastPropertyLoad(masm(), eax, reg, holder, index); - __ ret(0); -} +Register BaseLoadStubCompiler::CallbackHandlerFrontend( + Handle<JSObject> object, + Register object_reg, + Handle<JSObject> holder, + Handle<Name> name, + Label* success, + Handle<ExecutableAccessorInfo> callback) { + Label miss; + Register reg = HandlerFrontendHeader(object, object_reg, holder, name, &miss); -void StubCompiler::GenerateDictionaryLoadCallback(Register receiver, - Register name_reg, - Register scratch1, - Register scratch2, - Register scratch3, - Handle<AccessorInfo> callback, - Handle<String> name, - Label* miss) { - ASSERT(!receiver.is(scratch2)); - ASSERT(!receiver.is(scratch3)); - Register dictionary = scratch1; - bool must_preserve_dictionary_reg = receiver.is(dictionary); - - // Load the properties dictionary. - if (must_preserve_dictionary_reg) { - __ push(dictionary); - } - __ mov(dictionary, FieldOperand(receiver, JSObject::kPropertiesOffset)); + if (!holder->HasFastProperties() && !holder->IsJSGlobalObject()) { + ASSERT(!reg.is(scratch2())); + ASSERT(!reg.is(scratch3())); + Register dictionary = scratch1(); + bool must_preserve_dictionary_reg = reg.is(dictionary); + + // Load the properties dictionary. + if (must_preserve_dictionary_reg) { + __ push(dictionary); + } + __ mov(dictionary, FieldOperand(reg, JSObject::kPropertiesOffset)); - // Probe the dictionary. - Label probe_done, pop_and_miss; - StringDictionaryLookupStub::GeneratePositiveLookup(masm(), + // Probe the dictionary. + Label probe_done, pop_and_miss; + NameDictionaryLookupStub::GeneratePositiveLookup(masm(), &pop_and_miss, &probe_done, dictionary, - name_reg, - scratch2, - scratch3); - __ bind(&pop_and_miss); - if (must_preserve_dictionary_reg) { - __ pop(dictionary); - } - __ jmp(miss); - __ bind(&probe_done); - - // If probing finds an entry in the dictionary, scratch2 contains the - // index into the dictionary. Check that the value is the callback. - Register index = scratch2; - const int kElementsStartOffset = - StringDictionary::kHeaderSize + - StringDictionary::kElementsStartIndex * kPointerSize; - const int kValueOffset = kElementsStartOffset + kPointerSize; - __ mov(scratch3, - Operand(dictionary, index, times_4, kValueOffset - kHeapObjectTag)); - if (must_preserve_dictionary_reg) { - __ pop(dictionary); + this->name(), + scratch2(), + scratch3()); + __ bind(&pop_and_miss); + if (must_preserve_dictionary_reg) { + __ pop(dictionary); + } + __ jmp(&miss); + __ bind(&probe_done); + + // If probing finds an entry in the dictionary, scratch2 contains the + // index into the dictionary. Check that the value is the callback. + Register index = scratch2(); + const int kElementsStartOffset = + NameDictionary::kHeaderSize + + NameDictionary::kElementsStartIndex * kPointerSize; + const int kValueOffset = kElementsStartOffset + kPointerSize; + __ mov(scratch3(), + Operand(dictionary, index, times_4, kValueOffset - kHeapObjectTag)); + if (must_preserve_dictionary_reg) { + __ pop(dictionary); + } + __ cmp(scratch3(), callback); + __ j(not_equal, &miss); } - __ cmp(scratch3, callback); - __ j(not_equal, miss); + + HandlerFrontendFooter(success, &miss); + return reg; } -void StubCompiler::GenerateLoadCallback(Handle<JSObject> object, - Handle<JSObject> holder, - Register receiver, - Register name_reg, - Register scratch1, - Register scratch2, - Register scratch3, - Register scratch4, - Handle<AccessorInfo> callback, - Handle<String> name, - Label* miss) { - // Check that the receiver isn't a smi. - __ JumpIfSmi(receiver, miss); +void BaseLoadStubCompiler::NonexistentHandlerFrontend( + Handle<JSObject> object, + Handle<JSObject> last, + Handle<Name> name, + Label* success, + Handle<GlobalObject> global) { + Label miss; - // Check that the maps haven't changed. - Register reg = CheckPrototypes(object, receiver, holder, scratch1, - scratch2, scratch3, name, miss); + Register reg = HandlerFrontendHeader(object, receiver(), last, name, &miss); - if (!holder->HasFastProperties() && !holder->IsJSGlobalObject()) { - GenerateDictionaryLoadCallback( - reg, name_reg, scratch1, scratch2, scratch3, callback, name, miss); + // If the last object in the prototype chain is a global object, + // check that the global property cell is empty. + if (!global.is_null()) { + GenerateCheckPropertyCell(masm(), global, name, scratch2(), &miss); } + if (!last->HasFastProperties()) { + __ mov(scratch2(), FieldOperand(reg, HeapObject::kMapOffset)); + __ mov(scratch2(), FieldOperand(scratch2(), Map::kPrototypeOffset)); + __ cmp(scratch2(), isolate()->factory()->null_value()); + __ j(not_equal, &miss); + } + + HandlerFrontendFooter(success, &miss); +} + + +void BaseLoadStubCompiler::GenerateLoadField(Register reg, + Handle<JSObject> holder, + PropertyIndex index) { + // Get the value from the properties. + GenerateFastPropertyLoad(masm(), eax, reg, holder, index); + __ ret(0); +} + + +void BaseLoadStubCompiler::GenerateLoadCallback( + Register reg, + Handle<ExecutableAccessorInfo> callback) { // Insert additional parameters into the stack frame above return address. - ASSERT(!scratch3.is(reg)); - __ pop(scratch3); // Get return address to place it below. + ASSERT(!scratch3().is(reg)); + __ pop(scratch3()); // Get return address to place it below. - __ push(receiver); // receiver - __ mov(scratch2, esp); - ASSERT(!scratch2.is(reg)); + __ push(receiver()); // receiver + __ mov(scratch2(), esp); + ASSERT(!scratch2().is(reg)); __ push(reg); // holder - // Push data from AccessorInfo. + // Push data from ExecutableAccessorInfo. if (isolate()->heap()->InNewSpace(callback->data())) { - __ mov(scratch1, Immediate(callback)); - __ push(FieldOperand(scratch1, AccessorInfo::kDataOffset)); + __ mov(scratch1(), Immediate(callback)); + __ push(FieldOperand(scratch1(), ExecutableAccessorInfo::kDataOffset)); } else { - __ push(Immediate(Handle<Object>(callback->data()))); + __ push(Immediate(Handle<Object>(callback->data(), isolate()))); } __ push(Immediate(reinterpret_cast<int>(isolate()))); - // Save a pointer to where we pushed the arguments pointer. - // This will be passed as the const AccessorInfo& to the C++ callback. - __ push(scratch2); + // Save a pointer to where we pushed the arguments pointer. This will be + // passed as the const ExecutableAccessorInfo& to the C++ callback. + __ push(scratch2()); - __ push(name_reg); // name + __ push(name()); // name __ mov(ebx, esp); // esp points to reference to name (handler). - __ push(scratch3); // Restore return address. + __ push(scratch3()); // Restore return address. // 4 elements array for v8::Arguments::values_, handler for name and pointer // to the values (it considered as smi in GC). @@ -1172,44 +1179,22 @@ void StubCompiler::GenerateLoadCallback(Handle<JSObject> object, } -void StubCompiler::GenerateLoadConstant(Handle<JSObject> object, - Handle<JSObject> holder, - Register receiver, - Register scratch1, - Register scratch2, - Register scratch3, - Handle<JSFunction> value, - Handle<String> name, - Label* miss) { - // Check that the receiver isn't a smi. - __ JumpIfSmi(receiver, miss); - - // Check that the maps haven't changed. - CheckPrototypes( - object, receiver, holder, scratch1, scratch2, scratch3, name, miss); - +void BaseLoadStubCompiler::GenerateLoadConstant(Handle<JSFunction> value) { // Return the constant value. __ LoadHeapObject(eax, value); __ ret(0); } -void StubCompiler::GenerateLoadInterceptor(Handle<JSObject> object, - Handle<JSObject> interceptor_holder, - LookupResult* lookup, - Register receiver, - Register name_reg, - Register scratch1, - Register scratch2, - Register scratch3, - Handle<String> name, - Label* miss) { +void BaseLoadStubCompiler::GenerateLoadInterceptor( + Register holder_reg, + Handle<JSObject> object, + Handle<JSObject> interceptor_holder, + LookupResult* lookup, + Handle<Name> name) { ASSERT(interceptor_holder->HasNamedInterceptor()); ASSERT(!interceptor_holder->GetNamedInterceptor()->getter()->IsUndefined()); - // Check that the receiver isn't a smi. - __ JumpIfSmi(receiver, miss); - // So far the most popular follow ups for interceptor loads are FIELD // and CALLBACKS, so inline only them, other cases may be added // later. @@ -1218,8 +1203,9 @@ void StubCompiler::GenerateLoadInterceptor(Handle<JSObject> object, if (lookup->IsField()) { compile_followup_inline = true; } else if (lookup->type() == CALLBACKS && - lookup->GetCallbackObject()->IsAccessorInfo()) { - AccessorInfo* callback = AccessorInfo::cast(lookup->GetCallbackObject()); + lookup->GetCallbackObject()->IsExecutableAccessorInfo()) { + ExecutableAccessorInfo* callback = + ExecutableAccessorInfo::cast(lookup->GetCallbackObject()); compile_followup_inline = callback->getter() != NULL && callback->IsCompatibleReceiver(*object); } @@ -1229,17 +1215,14 @@ void StubCompiler::GenerateLoadInterceptor(Handle<JSObject> object, // Compile the interceptor call, followed by inline code to load the // property from further up the prototype chain if the call fails. // Check that the maps haven't changed. - Register holder_reg = CheckPrototypes(object, receiver, interceptor_holder, - scratch1, scratch2, scratch3, - name, miss); - ASSERT(holder_reg.is(receiver) || holder_reg.is(scratch1)); + ASSERT(holder_reg.is(receiver()) || holder_reg.is(scratch1())); // Preserve the receiver register explicitly whenever it is different from // the holder and it is needed should the interceptor return without any // result. The CALLBACKS case needs the receiver to be passed into C++ code, // the FIELD case might cause a miss during the prototype check. bool must_perfrom_prototype_check = *interceptor_holder != lookup->holder(); - bool must_preserve_receiver_reg = !receiver.is(holder_reg) && + bool must_preserve_receiver_reg = !receiver().is(holder_reg) && (lookup->type() == CALLBACKS || must_perfrom_prototype_check); // Save necessary data before invoking an interceptor. @@ -1248,18 +1231,18 @@ void StubCompiler::GenerateLoadInterceptor(Handle<JSObject> object, FrameScope frame_scope(masm(), StackFrame::INTERNAL); if (must_preserve_receiver_reg) { - __ push(receiver); + __ push(receiver()); } __ push(holder_reg); - __ push(name_reg); + __ push(this->name()); // Invoke an interceptor. Note: map checks from receiver to // interceptor's holder has been compiled before (see a caller // of this method.) CompileCallLoadPropertyWithInterceptor(masm(), - receiver, + receiver(), holder_reg, - name_reg, + this->name(), interceptor_holder); // Check if interceptor provided a value for property. If it's @@ -1273,76 +1256,28 @@ void StubCompiler::GenerateLoadInterceptor(Handle<JSObject> object, // Clobber registers when generating debug-code to provoke errors. __ bind(&interceptor_failed); if (FLAG_debug_code) { - __ mov(receiver, Immediate(BitCast<int32_t>(kZapValue))); + __ mov(receiver(), Immediate(BitCast<int32_t>(kZapValue))); __ mov(holder_reg, Immediate(BitCast<int32_t>(kZapValue))); - __ mov(name_reg, Immediate(BitCast<int32_t>(kZapValue))); + __ mov(this->name(), Immediate(BitCast<int32_t>(kZapValue))); } - __ pop(name_reg); + __ pop(this->name()); __ pop(holder_reg); if (must_preserve_receiver_reg) { - __ pop(receiver); + __ pop(receiver()); } // Leave the internal frame. } - // Check that the maps from interceptor's holder to lookup's holder - // haven't changed. And load lookup's holder into holder_reg. - if (must_perfrom_prototype_check) { - holder_reg = CheckPrototypes(interceptor_holder, - holder_reg, - Handle<JSObject>(lookup->holder()), - scratch1, - scratch2, - scratch3, - name, - miss); - } - - if (lookup->IsField()) { - // We found FIELD property in prototype chain of interceptor's holder. - // Retrieve a field from field's holder. - GenerateFastPropertyLoad(masm(), eax, holder_reg, - Handle<JSObject>(lookup->holder()), - lookup->GetFieldIndex()); - __ ret(0); - } else { - // We found CALLBACKS property in prototype chain of interceptor's - // holder. - ASSERT(lookup->type() == CALLBACKS); - Handle<AccessorInfo> callback( - AccessorInfo::cast(lookup->GetCallbackObject())); - ASSERT(callback->getter() != NULL); - - // Tail call to runtime. - // Important invariant in CALLBACKS case: the code above must be - // structured to never clobber |receiver| register. - __ pop(scratch2); // return address - __ push(receiver); - __ push(holder_reg); - __ mov(holder_reg, Immediate(callback)); - __ push(FieldOperand(holder_reg, AccessorInfo::kDataOffset)); - __ push(Immediate(reinterpret_cast<int>(isolate()))); - __ push(holder_reg); - __ push(name_reg); - __ push(scratch2); // restore return address - - ExternalReference ref = - ExternalReference(IC_Utility(IC::kLoadCallbackProperty), - masm()->isolate()); - __ TailCallExternalReference(ref, 6, 1); - } + GenerateLoadPostInterceptor(holder_reg, interceptor_holder, name, lookup); } else { // !compile_followup_inline // Call the runtime system to load the interceptor. // Check that the maps haven't changed. - Register holder_reg = - CheckPrototypes(object, receiver, interceptor_holder, - scratch1, scratch2, scratch3, name, miss); - __ pop(scratch2); // save old return address - PushInterceptorArguments(masm(), receiver, holder_reg, - name_reg, interceptor_holder); - __ push(scratch2); // restore old return address + __ pop(scratch2()); // save old return address + PushInterceptorArguments(masm(), receiver(), holder_reg, + this->name(), interceptor_holder); + __ push(scratch2()); // restore old return address ExternalReference ref = ExternalReference(IC_Utility(IC::kLoadPropertyWithInterceptorForLoad), @@ -1352,7 +1287,7 @@ void StubCompiler::GenerateLoadInterceptor(Handle<JSObject> object, } -void CallStubCompiler::GenerateNameCheck(Handle<String> name, Label* miss) { +void CallStubCompiler::GenerateNameCheck(Handle<Name> name, Label* miss) { if (kind_ == Code::KEYED_CALL_IC) { __ cmp(ecx, Immediate(name)); __ j(not_equal, miss); @@ -1362,7 +1297,7 @@ void CallStubCompiler::GenerateNameCheck(Handle<String> name, Label* miss) { void CallStubCompiler::GenerateGlobalReceiverCheck(Handle<JSObject> object, Handle<JSObject> holder, - Handle<String> name, + Handle<Name> name, Label* miss) { ASSERT(holder->IsGlobalObject()); @@ -1423,8 +1358,8 @@ void CallStubCompiler::GenerateMissBranch() { Handle<Code> CallStubCompiler::CompileCallField(Handle<JSObject> object, Handle<JSObject> holder, - int index, - Handle<String> name) { + PropertyIndex index, + Handle<Name> name) { // ----------- S t a t e ------------- // -- ecx : name // -- esp[0] : return address @@ -1518,7 +1453,7 @@ Handle<Code> CallStubCompiler::CompileArrayPushCall( Label call_builtin; if (argc == 1) { // Otherwise fall through to call builtin. - Label attempt_to_grow_elements, with_write_barrier; + Label attempt_to_grow_elements, with_write_barrier, check_double; // Get the elements array of the object. __ mov(edi, FieldOperand(edx, JSArray::kElementsOffset)); @@ -1526,7 +1461,7 @@ Handle<Code> CallStubCompiler::CompileArrayPushCall( // Check that the elements are in fast mode and writable. __ cmp(FieldOperand(edi, HeapObject::kMapOffset), Immediate(factory()->fixed_array_map())); - __ j(not_equal, &call_builtin); + __ j(not_equal, &check_double); // Get the array's length into eax and calculate new length. __ mov(eax, FieldOperand(edx, JSArray::kLengthOffset)); @@ -1557,17 +1492,49 @@ Handle<Code> CallStubCompiler::CompileArrayPushCall( __ ret((argc + 1) * kPointerSize); + __ bind(&check_double); + + + // Check that the elements are in double mode. + __ cmp(FieldOperand(edi, HeapObject::kMapOffset), + Immediate(factory()->fixed_double_array_map())); + __ j(not_equal, &call_builtin); + + // Get the array's length into eax and calculate new length. + __ mov(eax, FieldOperand(edx, JSArray::kLengthOffset)); + STATIC_ASSERT(kSmiTagSize == 1); + STATIC_ASSERT(kSmiTag == 0); + __ add(eax, Immediate(Smi::FromInt(argc))); + + // Get the elements' length into ecx. + __ mov(ecx, FieldOperand(edi, FixedArray::kLengthOffset)); + + // Check if we could survive without allocation. + __ cmp(eax, ecx); + __ j(greater, &call_builtin); + + __ mov(ecx, Operand(esp, argc * kPointerSize)); + __ StoreNumberToDoubleElements( + ecx, edi, eax, ecx, xmm0, &call_builtin, true, argc * kDoubleSize); + + // Save new length. + __ mov(FieldOperand(edx, JSArray::kLengthOffset), eax); + __ ret((argc + 1) * kPointerSize); + __ bind(&with_write_barrier); __ mov(ebx, FieldOperand(edx, HeapObject::kMapOffset)); - if (FLAG_smi_only_arrays && !FLAG_trace_elements_transitions) { + if (FLAG_smi_only_arrays && !FLAG_trace_elements_transitions) { Label fast_object, not_fast_object; __ CheckFastObjectElements(ebx, ¬_fast_object, Label::kNear); __ jmp(&fast_object); // In case of fast smi-only, convert to fast object, otherwise bail out. __ bind(¬_fast_object); __ CheckFastSmiElements(ebx, &call_builtin); + __ cmp(FieldOperand(ecx, HeapObject::kMapOffset), + Immediate(factory()->heap_number_map())); + __ j(equal, &call_builtin); // edi: elements array // edx: receiver // ebx: map @@ -1579,7 +1546,9 @@ Handle<Code> CallStubCompiler::CompileArrayPushCall( &try_holey_map); ElementsTransitionGenerator:: - GenerateMapChangeElementsTransition(masm()); + GenerateMapChangeElementsTransition(masm(), + DONT_TRACK_ALLOCATION_SITE, + NULL); // Restore edi. __ mov(edi, FieldOperand(edx, JSArray::kElementsOffset)); __ jmp(&fast_object); @@ -1591,7 +1560,9 @@ Handle<Code> CallStubCompiler::CompileArrayPushCall( edi, &call_builtin); ElementsTransitionGenerator:: - GenerateMapChangeElementsTransition(masm()); + GenerateMapChangeElementsTransition(masm(), + DONT_TRACK_ALLOCATION_SITE, + NULL); // Restore edi. __ mov(edi, FieldOperand(edx, JSArray::kElementsOffset)); __ bind(&fast_object); @@ -1821,8 +1792,9 @@ Handle<Code> CallStubCompiler::CompileStringCharCodeAtCall( eax, &miss); ASSERT(!object.is_identical_to(holder)); - CheckPrototypes(Handle<JSObject>(JSObject::cast(object->GetPrototype())), - eax, holder, ebx, edx, edi, name, &miss); + CheckPrototypes( + Handle<JSObject>(JSObject::cast(object->GetPrototype(isolate()))), + eax, holder, ebx, edx, edi, name, &miss); Register receiver = ebx; Register index = edi; @@ -1904,8 +1876,9 @@ Handle<Code> CallStubCompiler::CompileStringCharAtCall( eax, &miss); ASSERT(!object.is_identical_to(holder)); - CheckPrototypes(Handle<JSObject>(JSObject::cast(object->GetPrototype())), - eax, holder, ebx, edx, edi, name, &miss); + CheckPrototypes( + Handle<JSObject>(JSObject::cast(object->GetPrototype(isolate()))), + eax, holder, ebx, edx, edi, name, &miss); Register receiver = eax; Register index = edi; @@ -2042,7 +2015,7 @@ Handle<Code> CallStubCompiler::CompileMathFloorCall( return Handle<Code>::null(); } - CpuFeatures::Scope use_sse2(SSE2); + CpuFeatureScope use_sse2(masm(), SSE2); const int argc = arguments().immediate(); @@ -2318,11 +2291,11 @@ Handle<Code> CallStubCompiler::CompileFastApiCall( } -Handle<Code> CallStubCompiler::CompileCallConstant(Handle<Object> object, - Handle<JSObject> holder, - Handle<JSFunction> function, - Handle<String> name, - CheckType check) { +void CallStubCompiler::CompileHandlerFrontend(Handle<Object> object, + Handle<JSObject> holder, + Handle<Name> name, + CheckType check, + Label* success) { // ----------- S t a t e ------------- // -- ecx : name // -- esp[0] : return address @@ -2330,15 +2303,6 @@ Handle<Code> CallStubCompiler::CompileCallConstant(Handle<Object> object, // -- ... // -- esp[(argc + 1) * 4] : receiver // ----------------------------------- - - if (HasCustomCallGenerator(function)) { - Handle<Code> code = CompileCustomCall(object, holder, - Handle<JSGlobalPropertyCell>::null(), - function, name); - // A null handle means bail out to the regular compiler code below. - if (!code.is_null()) return code; - } - Label miss; GenerateNameCheck(name, &miss); @@ -2371,76 +2335,93 @@ Handle<Code> CallStubCompiler::CompileCallConstant(Handle<Object> object, break; case STRING_CHECK: - if (function->IsBuiltin() || !function->shared()->is_classic_mode()) { - // Check that the object is a string or a symbol. - __ CmpObjectType(edx, FIRST_NONSTRING_TYPE, eax); - __ j(above_equal, &miss); - // Check that the maps starting from the prototype haven't changed. - GenerateDirectLoadGlobalFunctionPrototype( - masm(), Context::STRING_FUNCTION_INDEX, eax, &miss); - CheckPrototypes( - Handle<JSObject>(JSObject::cast(object->GetPrototype())), - eax, holder, ebx, edx, edi, name, &miss); - } else { - // Calling non-strict non-builtins with a value as the receiver - // requires boxing. - __ jmp(&miss); - } + // Check that the object is a string. + __ CmpObjectType(edx, FIRST_NONSTRING_TYPE, eax); + __ j(above_equal, &miss); + // Check that the maps starting from the prototype haven't changed. + GenerateDirectLoadGlobalFunctionPrototype( + masm(), Context::STRING_FUNCTION_INDEX, eax, &miss); + CheckPrototypes( + Handle<JSObject>(JSObject::cast(object->GetPrototype(isolate()))), + eax, holder, ebx, edx, edi, name, &miss); break; - case NUMBER_CHECK: - if (function->IsBuiltin() || !function->shared()->is_classic_mode()) { - Label fast; - // Check that the object is a smi or a heap number. - __ JumpIfSmi(edx, &fast); - __ CmpObjectType(edx, HEAP_NUMBER_TYPE, eax); - __ j(not_equal, &miss); - __ bind(&fast); - // Check that the maps starting from the prototype haven't changed. - GenerateDirectLoadGlobalFunctionPrototype( - masm(), Context::NUMBER_FUNCTION_INDEX, eax, &miss); - CheckPrototypes( - Handle<JSObject>(JSObject::cast(object->GetPrototype())), - eax, holder, ebx, edx, edi, name, &miss); - } else { - // Calling non-strict non-builtins with a value as the receiver - // requires boxing. - __ jmp(&miss); - } + case SYMBOL_CHECK: + // Check that the object is a symbol. + __ CmpObjectType(edx, SYMBOL_TYPE, eax); + __ j(not_equal, &miss); break; - case BOOLEAN_CHECK: - if (function->IsBuiltin() || !function->shared()->is_classic_mode()) { - Label fast; - // Check that the object is a boolean. - __ cmp(edx, factory()->true_value()); - __ j(equal, &fast); - __ cmp(edx, factory()->false_value()); - __ j(not_equal, &miss); - __ bind(&fast); - // Check that the maps starting from the prototype haven't changed. - GenerateDirectLoadGlobalFunctionPrototype( - masm(), Context::BOOLEAN_FUNCTION_INDEX, eax, &miss); - CheckPrototypes( - Handle<JSObject>(JSObject::cast(object->GetPrototype())), - eax, holder, ebx, edx, edi, name, &miss); - } else { - // Calling non-strict non-builtins with a value as the receiver - // requires boxing. - __ jmp(&miss); - } + case NUMBER_CHECK: { + Label fast; + // Check that the object is a smi or a heap number. + __ JumpIfSmi(edx, &fast); + __ CmpObjectType(edx, HEAP_NUMBER_TYPE, eax); + __ j(not_equal, &miss); + __ bind(&fast); + // Check that the maps starting from the prototype haven't changed. + GenerateDirectLoadGlobalFunctionPrototype( + masm(), Context::NUMBER_FUNCTION_INDEX, eax, &miss); + CheckPrototypes( + Handle<JSObject>(JSObject::cast(object->GetPrototype(isolate()))), + eax, holder, ebx, edx, edi, name, &miss); + break; + } + case BOOLEAN_CHECK: { + Label fast; + // Check that the object is a boolean. + __ cmp(edx, factory()->true_value()); + __ j(equal, &fast); + __ cmp(edx, factory()->false_value()); + __ j(not_equal, &miss); + __ bind(&fast); + // Check that the maps starting from the prototype haven't changed. + GenerateDirectLoadGlobalFunctionPrototype( + masm(), Context::BOOLEAN_FUNCTION_INDEX, eax, &miss); + CheckPrototypes( + Handle<JSObject>(JSObject::cast(object->GetPrototype(isolate()))), + eax, holder, ebx, edx, edi, name, &miss); break; + } } + __ jmp(success); + + // Handle call cache miss. + __ bind(&miss); + GenerateMissBranch(); +} + + +void CallStubCompiler::CompileHandlerBackend(Handle<JSFunction> function) { CallKind call_kind = CallICBase::Contextual::decode(extra_state_) ? CALL_AS_FUNCTION : CALL_AS_METHOD; __ InvokeFunction(function, arguments(), JUMP_FUNCTION, NullCallWrapper(), call_kind); +} - // Handle call cache miss. - __ bind(&miss); - GenerateMissBranch(); + +Handle<Code> CallStubCompiler::CompileCallConstant( + Handle<Object> object, + Handle<JSObject> holder, + Handle<Name> name, + CheckType check, + Handle<JSFunction> function) { + + if (HasCustomCallGenerator(function)) { + Handle<Code> code = CompileCustomCall(object, holder, + Handle<JSGlobalPropertyCell>::null(), + function, Handle<String>::cast(name)); + // A null handle means bail out to the regular compiler code below. + if (!code.is_null()) return code; + } + + Label success; + + CompileHandlerFrontend(object, holder, name, check, &success); + __ bind(&success); + CompileHandlerBackend(function); // Return the generated code. return GetCode(function); @@ -2449,7 +2430,7 @@ Handle<Code> CallStubCompiler::CompileCallConstant(Handle<Object> object, Handle<Code> CallStubCompiler::CompileCallInterceptor(Handle<JSObject> object, Handle<JSObject> holder, - Handle<String> name) { + Handle<Name> name) { // ----------- S t a t e ------------- // -- ecx : name // -- esp[0] : return address @@ -2511,7 +2492,7 @@ Handle<Code> CallStubCompiler::CompileCallGlobal( Handle<GlobalObject> holder, Handle<JSGlobalPropertyCell> cell, Handle<JSFunction> function, - Handle<String> name) { + Handle<Name> name) { // ----------- S t a t e ------------- // -- ecx : name // -- esp[0] : return address @@ -2521,7 +2502,8 @@ Handle<Code> CallStubCompiler::CompileCallGlobal( // ----------------------------------- if (HasCustomCallGenerator(function)) { - Handle<Code> code = CompileCustomCall(object, holder, cell, function, name); + Handle<Code> code = CompileCustomCall( + object, holder, cell, function, Handle<String>::cast(name)); // A null handle means bail out to the regular compiler code below. if (!code.is_null()) return code; } @@ -2567,66 +2549,27 @@ Handle<Code> CallStubCompiler::CompileCallGlobal( } -Handle<Code> StoreStubCompiler::CompileStoreField(Handle<JSObject> object, - int index, - Handle<Map> transition, - Handle<String> name) { - // ----------- S t a t e ------------- - // -- eax : value - // -- ecx : name - // -- edx : receiver - // -- esp[0] : return address - // ----------------------------------- - Label miss; - - // Generate store field code. Trashes the name register. - GenerateStoreField(masm(), - object, - index, - transition, - name, - edx, ecx, ebx, edi, - &miss); - // Handle store cache miss. - __ bind(&miss); - __ mov(ecx, Immediate(name)); // restore name - Handle<Code> ic = isolate()->builtins()->StoreIC_Miss(); - __ jmp(ic, RelocInfo::CODE_TARGET); - - // Return the generated code. - return GetCode(transition.is_null() - ? Code::FIELD - : Code::MAP_TRANSITION, name); -} - - Handle<Code> StoreStubCompiler::CompileStoreCallback( - Handle<String> name, - Handle<JSObject> receiver, + Handle<Name> name, + Handle<JSObject> object, Handle<JSObject> holder, - Handle<AccessorInfo> callback) { - // ----------- S t a t e ------------- - // -- eax : value - // -- ecx : name - // -- edx : receiver - // -- esp[0] : return address - // ----------------------------------- - Label miss; + Handle<ExecutableAccessorInfo> callback) { + Label miss, miss_restore_name; // Check that the maps haven't changed, preserving the value register. - __ push(eax); - __ JumpIfSmi(edx, &miss); - CheckPrototypes(receiver, edx, holder, ebx, eax, edi, name, &miss); - __ pop(eax); // restore value + __ JumpIfSmi(receiver(), &miss); + CheckPrototypes(object, receiver(), holder, + scratch1(), this->name(), scratch2(), + name, &miss_restore_name); // Stub never generated for non-global objects that require access checks. ASSERT(holder->IsJSGlobalProxy() || !holder->IsAccessCheckNeeded()); - __ pop(ebx); // remove the return address - __ push(edx); // receiver - __ push(Immediate(callback)); // callback info - __ push(ecx); // name - __ push(eax); // value - __ push(ebx); // restore return address + __ pop(scratch1()); // remove the return address + __ push(receiver()); + __ Push(callback); + __ Push(name); + __ push(value()); + __ push(scratch1()); // restore return address // Do tail-call to the runtime system. ExternalReference store_callback_property = @@ -2634,13 +2577,12 @@ Handle<Code> StoreStubCompiler::CompileStoreCallback( __ TailCallExternalReference(store_callback_property, 4, 1); // Handle store cache miss. + GenerateRestoreName(masm(), &miss_restore_name, name); __ bind(&miss); - __ pop(eax); - Handle<Code> ic = isolate()->builtins()->StoreIC_Miss(); - __ jmp(ic, RelocInfo::CODE_TARGET); + TailCallBuiltin(masm(), MissBuiltin(kind())); // Return the generated code. - return GetCode(Code::CALLBACKS, name); + return GetICCode(kind(), Code::CALLBACKS, name); } @@ -2690,67 +2632,30 @@ void StoreStubCompiler::GenerateStoreViaSetter( #define __ ACCESS_MASM(masm()) -Handle<Code> StoreStubCompiler::CompileStoreViaSetter( - Handle<String> name, - Handle<JSObject> receiver, - Handle<JSObject> holder, - Handle<JSFunction> setter) { - // ----------- S t a t e ------------- - // -- eax : value - // -- ecx : name - // -- edx : receiver - // -- esp[0] : return address - // ----------------------------------- - Label miss; - - // Check that the maps haven't changed, preserving the name register. - __ push(ecx); - __ JumpIfSmi(edx, &miss); - CheckPrototypes(receiver, edx, holder, ebx, ecx, edi, name, &miss); - __ pop(ecx); - - GenerateStoreViaSetter(masm(), setter); - - __ bind(&miss); - __ pop(ecx); - Handle<Code> ic = isolate()->builtins()->StoreIC_Miss(); - __ jmp(ic, RelocInfo::CODE_TARGET); - - // Return the generated code. - return GetCode(Code::CALLBACKS, name); -} - - Handle<Code> StoreStubCompiler::CompileStoreInterceptor( - Handle<JSObject> receiver, - Handle<String> name) { - // ----------- S t a t e ------------- - // -- eax : value - // -- ecx : name - // -- edx : receiver - // -- esp[0] : return address - // ----------------------------------- + Handle<JSObject> object, + Handle<Name> name) { Label miss; // Check that the map of the object hasn't changed. - __ CheckMap(edx, Handle<Map>(receiver->map()), + __ CheckMap(receiver(), Handle<Map>(object->map()), &miss, DO_SMI_CHECK, ALLOW_ELEMENT_TRANSITION_MAPS); // Perform global security token check if needed. - if (receiver->IsJSGlobalProxy()) { + if (object->IsJSGlobalProxy()) { __ CheckAccessGlobalProxy(edx, ebx, &miss); } // Stub never generated for non-global objects that require access // checks. - ASSERT(receiver->IsJSGlobalProxy() || !receiver->IsAccessCheckNeeded()); + ASSERT(object->IsJSGlobalProxy() || !object->IsAccessCheckNeeded()); - __ pop(ebx); // remove the return address - __ push(edx); // receiver - __ push(ecx); // name - __ push(eax); // value - __ push(Immediate(Smi::FromInt(strict_mode_))); - __ push(ebx); // restore return address + __ pop(scratch1()); // remove the return address + __ push(receiver()); + __ push(this->name()); + __ push(value()); + __ push(Immediate(Smi::FromInt(strict_mode()))); + __ push(scratch1()); // restore return address // Do tail-call to the runtime system. ExternalReference store_ic_property = @@ -2759,34 +2664,28 @@ Handle<Code> StoreStubCompiler::CompileStoreInterceptor( // Handle store cache miss. __ bind(&miss); - Handle<Code> ic = isolate()->builtins()->StoreIC_Miss(); - __ jmp(ic, RelocInfo::CODE_TARGET); + TailCallBuiltin(masm(), MissBuiltin(kind())); // Return the generated code. - return GetCode(Code::INTERCEPTOR, name); + return GetICCode(kind(), Code::INTERCEPTOR, name); } Handle<Code> StoreStubCompiler::CompileStoreGlobal( Handle<GlobalObject> object, Handle<JSGlobalPropertyCell> cell, - Handle<String> name) { - // ----------- S t a t e ------------- - // -- eax : value - // -- ecx : name - // -- edx : receiver - // -- esp[0] : return address - // ----------------------------------- + Handle<Name> name) { Label miss; // Check that the map of the global has not changed. - __ cmp(FieldOperand(edx, HeapObject::kMapOffset), + __ cmp(FieldOperand(receiver(), HeapObject::kMapOffset), Immediate(Handle<Map>(object->map()))); __ j(not_equal, &miss); // Compute the cell operand to use. - __ mov(ebx, Immediate(cell)); - Operand cell_operand = FieldOperand(ebx, JSGlobalPropertyCell::kValueOffset); + __ mov(scratch1(), Immediate(cell)); + Operand cell_operand = + FieldOperand(scratch1(), JSGlobalPropertyCell::kValueOffset); // Check that the value in the cell is not the hole. If it is, this // cell could have been deleted and reintroducing the global needs @@ -2796,7 +2695,7 @@ Handle<Code> StoreStubCompiler::CompileStoreGlobal( __ j(equal, &miss); // Store the value in the cell. - __ mov(cell_operand, eax); + __ mov(cell_operand, value()); // No write barrier here, because cells are always rescanned. // Return the value (register eax). @@ -2807,75 +2706,10 @@ Handle<Code> StoreStubCompiler::CompileStoreGlobal( // Handle store cache miss. __ bind(&miss); __ IncrementCounter(counters->named_store_global_inline_miss(), 1); - Handle<Code> ic = isolate()->builtins()->StoreIC_Miss(); - __ jmp(ic, RelocInfo::CODE_TARGET); + TailCallBuiltin(masm(), MissBuiltin(kind())); // Return the generated code. - return GetCode(Code::NORMAL, name); -} - - -Handle<Code> KeyedStoreStubCompiler::CompileStoreField(Handle<JSObject> object, - int index, - Handle<Map> transition, - Handle<String> name) { - // ----------- S t a t e ------------- - // -- eax : value - // -- ecx : key - // -- edx : receiver - // -- esp[0] : return address - // ----------------------------------- - Label miss; - - Counters* counters = isolate()->counters(); - __ IncrementCounter(counters->keyed_store_field(), 1); - - // Check that the name has not changed. - __ cmp(ecx, Immediate(name)); - __ j(not_equal, &miss); - - // Generate store field code. Trashes the name register. - GenerateStoreField(masm(), - object, - index, - transition, - name, - edx, ecx, ebx, edi, - &miss); - - // Handle store cache miss. - __ bind(&miss); - __ DecrementCounter(counters->keyed_store_field(), 1); - Handle<Code> ic = isolate()->builtins()->KeyedStoreIC_Miss(); - __ jmp(ic, RelocInfo::CODE_TARGET); - - // Return the generated code. - return GetCode(transition.is_null() - ? Code::FIELD - : Code::MAP_TRANSITION, name); -} - - -Handle<Code> KeyedStoreStubCompiler::CompileStoreElement( - Handle<Map> receiver_map) { - // ----------- S t a t e ------------- - // -- eax : value - // -- ecx : key - // -- edx : receiver - // -- esp[0] : return address - // ----------------------------------- - ElementsKind elements_kind = receiver_map->elements_kind(); - bool is_jsarray = receiver_map->instance_type() == JS_ARRAY_TYPE; - Handle<Code> stub = - KeyedStoreElementStub(is_jsarray, elements_kind, grow_mode_).GetCode(); - - __ DispatchMap(edx, receiver_map, stub, DO_SMI_CHECK); - - Handle<Code> ic = isolate()->builtins()->KeyedStoreIC_Miss(); - __ jmp(ic, RelocInfo::CODE_TARGET); - - // Return the generated code. - return GetCode(Code::NORMAL, factory()->empty_string()); + return GetICCode(kind(), Code::NORMAL, name); } @@ -2883,116 +2717,91 @@ Handle<Code> KeyedStoreStubCompiler::CompileStorePolymorphic( MapHandleList* receiver_maps, CodeHandleList* handler_stubs, MapHandleList* transitioned_maps) { - // ----------- S t a t e ------------- - // -- eax : value - // -- ecx : key - // -- edx : receiver - // -- esp[0] : return address - // ----------------------------------- Label miss; - __ JumpIfSmi(edx, &miss, Label::kNear); - __ mov(edi, FieldOperand(edx, HeapObject::kMapOffset)); - // ebx: receiver->map(). + __ JumpIfSmi(receiver(), &miss, Label::kNear); + __ mov(scratch1(), FieldOperand(receiver(), HeapObject::kMapOffset)); for (int i = 0; i < receiver_maps->length(); ++i) { - __ cmp(edi, receiver_maps->at(i)); + __ cmp(scratch1(), receiver_maps->at(i)); if (transitioned_maps->at(i).is_null()) { __ j(equal, handler_stubs->at(i)); } else { Label next_map; __ j(not_equal, &next_map, Label::kNear); - __ mov(ebx, Immediate(transitioned_maps->at(i))); + __ mov(transition_map(), Immediate(transitioned_maps->at(i))); __ jmp(handler_stubs->at(i), RelocInfo::CODE_TARGET); __ bind(&next_map); } } __ bind(&miss); - Handle<Code> miss_ic = isolate()->builtins()->KeyedStoreIC_Miss(); - __ jmp(miss_ic, RelocInfo::CODE_TARGET); + TailCallBuiltin(masm(), MissBuiltin(kind())); // Return the generated code. - return GetCode(Code::NORMAL, factory()->empty_string(), MEGAMORPHIC); + return GetICCode( + kind(), Code::NORMAL, factory()->empty_string(), POLYMORPHIC); } -Handle<Code> LoadStubCompiler::CompileLoadNonexistent(Handle<String> name, - Handle<JSObject> object, - Handle<JSObject> last) { - // ----------- S t a t e ------------- - // -- ecx : name - // -- edx : receiver - // -- esp[0] : return address - // ----------------------------------- - Label miss; - - // Check that the receiver isn't a smi. - __ JumpIfSmi(edx, &miss); - - ASSERT(last->IsGlobalObject() || last->HasFastProperties()); +Handle<Code> LoadStubCompiler::CompileLoadNonexistent( + Handle<JSObject> object, + Handle<JSObject> last, + Handle<Name> name, + Handle<GlobalObject> global) { + Label success; - // Check the maps of the full prototype chain. Also check that - // global property cells up to (but not including) the last object - // in the prototype chain are empty. - CheckPrototypes(object, edx, last, ebx, eax, edi, name, &miss); - - // If the last object in the prototype chain is a global object, - // check that the global property cell is empty. - if (last->IsGlobalObject()) { - GenerateCheckPropertyCell( - masm(), Handle<GlobalObject>::cast(last), name, eax, &miss); - } + NonexistentHandlerFrontend(object, last, name, &success, global); + __ bind(&success); // Return undefined if maps of the full prototype chain are still the // same and no global property with this name contains a value. __ mov(eax, isolate()->factory()->undefined_value()); __ ret(0); - __ bind(&miss); - GenerateLoadMiss(masm(), Code::LOAD_IC); - // Return the generated code. - return GetCode(Code::NONEXISTENT, factory()->empty_string()); + return GetCode(kind(), Code::NONEXISTENT, name); } -Handle<Code> LoadStubCompiler::CompileLoadField(Handle<JSObject> object, - Handle<JSObject> holder, - int index, - Handle<String> name) { - // ----------- S t a t e ------------- - // -- ecx : name - // -- edx : receiver - // -- esp[0] : return address - // ----------------------------------- - Label miss; +Register* LoadStubCompiler::registers() { + // receiver, name, scratch1, scratch2, scratch3, scratch4. + static Register registers[] = { edx, ecx, ebx, eax, edi, no_reg }; + return registers; +} - GenerateLoadField(object, holder, edx, ebx, eax, edi, index, name, &miss); - __ bind(&miss); - GenerateLoadMiss(masm(), Code::LOAD_IC); - // Return the generated code. - return GetCode(Code::FIELD, name); +Register* KeyedLoadStubCompiler::registers() { + // receiver, name, scratch1, scratch2, scratch3, scratch4. + static Register registers[] = { edx, ecx, ebx, eax, edi, no_reg }; + return registers; } -Handle<Code> LoadStubCompiler::CompileLoadCallback( - Handle<String> name, - Handle<JSObject> object, - Handle<JSObject> holder, - Handle<AccessorInfo> callback) { - // ----------- S t a t e ------------- - // -- ecx : name - // -- edx : receiver - // -- esp[0] : return address - // ----------------------------------- - Label miss; +Register* StoreStubCompiler::registers() { + // receiver, name, value, scratch1, scratch2, scratch3. + static Register registers[] = { edx, ecx, eax, ebx, edi, no_reg }; + return registers; +} - GenerateLoadCallback(object, holder, edx, ecx, ebx, eax, edi, no_reg, - callback, name, &miss); - __ bind(&miss); - GenerateLoadMiss(masm(), Code::LOAD_IC); - // Return the generated code. - return GetCode(Code::CALLBACKS, name); +Register* KeyedStoreStubCompiler::registers() { + // receiver, name, value, scratch1, scratch2, scratch3. + static Register registers[] = { edx, ecx, eax, ebx, edi, no_reg }; + return registers; +} + + +void KeyedLoadStubCompiler::GenerateNameCheck(Handle<Name> name, + Register name_reg, + Label* miss) { + __ cmp(name_reg, Immediate(name)); + __ j(not_equal, miss); +} + + +void KeyedStoreStubCompiler::GenerateNameCheck(Handle<Name> name, + Register name_reg, + Label* miss) { + __ cmp(name_reg, Immediate(name)); + __ j(not_equal, miss); } @@ -3033,369 +2842,75 @@ void LoadStubCompiler::GenerateLoadViaGetter(MacroAssembler* masm, #define __ ACCESS_MASM(masm()) -Handle<Code> LoadStubCompiler::CompileLoadViaGetter( - Handle<String> name, - Handle<JSObject> receiver, - Handle<JSObject> holder, - Handle<JSFunction> getter) { - // ----------- S t a t e ------------- - // -- ecx : name - // -- edx : receiver - // -- esp[0] : return address - // ----------------------------------- - Label miss; - - // Check that the maps haven't changed. - __ JumpIfSmi(edx, &miss); - CheckPrototypes(receiver, edx, holder, ebx, eax, edi, name, &miss); - - GenerateLoadViaGetter(masm(), getter); - - __ bind(&miss); - GenerateLoadMiss(masm(), Code::LOAD_IC); - - // Return the generated code. - return GetCode(Code::CALLBACKS, name); -} - - -Handle<Code> LoadStubCompiler::CompileLoadConstant(Handle<JSObject> object, - Handle<JSObject> holder, - Handle<JSFunction> value, - Handle<String> name) { - // ----------- S t a t e ------------- - // -- ecx : name - // -- edx : receiver - // -- esp[0] : return address - // ----------------------------------- - Label miss; - - GenerateLoadConstant(object, holder, edx, ebx, eax, edi, value, name, &miss); - __ bind(&miss); - GenerateLoadMiss(masm(), Code::LOAD_IC); - - // Return the generated code. - return GetCode(Code::CONSTANT_FUNCTION, name); -} - - -Handle<Code> LoadStubCompiler::CompileLoadInterceptor(Handle<JSObject> receiver, - Handle<JSObject> holder, - Handle<String> name) { - // ----------- S t a t e ------------- - // -- ecx : name - // -- edx : receiver - // -- esp[0] : return address - // ----------------------------------- - Label miss; - - LookupResult lookup(isolate()); - LookupPostInterceptor(holder, name, &lookup); - - // TODO(368): Compile in the whole chain: all the interceptors in - // prototypes and ultimate answer. - GenerateLoadInterceptor(receiver, holder, &lookup, edx, ecx, eax, ebx, edi, - name, &miss); - - __ bind(&miss); - GenerateLoadMiss(masm(), Code::LOAD_IC); - - // Return the generated code. - return GetCode(Code::INTERCEPTOR, name); -} - - Handle<Code> LoadStubCompiler::CompileLoadGlobal( Handle<JSObject> object, - Handle<GlobalObject> holder, + Handle<GlobalObject> global, Handle<JSGlobalPropertyCell> cell, - Handle<String> name, + Handle<Name> name, bool is_dont_delete) { - // ----------- S t a t e ------------- - // -- ecx : name - // -- edx : receiver - // -- esp[0] : return address - // ----------------------------------- - Label miss; - - // Check that the maps haven't changed. - __ JumpIfSmi(edx, &miss); - CheckPrototypes(object, edx, holder, ebx, eax, edi, name, &miss); + Label success, miss; + __ CheckMap(receiver(), Handle<Map>(object->map()), &miss, DO_SMI_CHECK); + HandlerFrontendHeader( + object, receiver(), Handle<JSObject>::cast(global), name, &miss); // Get the value from the cell. if (Serializer::enabled()) { - __ mov(ebx, Immediate(cell)); - __ mov(ebx, FieldOperand(ebx, JSGlobalPropertyCell::kValueOffset)); + __ mov(eax, Immediate(cell)); + __ mov(eax, FieldOperand(eax, JSGlobalPropertyCell::kValueOffset)); } else { - __ mov(ebx, Operand::Cell(cell)); + __ mov(eax, Operand::Cell(cell)); } // Check for deleted property if property can actually be deleted. if (!is_dont_delete) { - __ cmp(ebx, factory()->the_hole_value()); + __ cmp(eax, factory()->the_hole_value()); __ j(equal, &miss); } else if (FLAG_debug_code) { - __ cmp(ebx, factory()->the_hole_value()); + __ cmp(eax, factory()->the_hole_value()); __ Check(not_equal, "DontDelete cells can't contain the hole"); } + HandlerFrontendFooter(&success, &miss); + __ bind(&success); + Counters* counters = isolate()->counters(); __ IncrementCounter(counters->named_load_global_stub(), 1); - __ mov(eax, ebx); + // The code above already loads the result into the return register. __ ret(0); - __ bind(&miss); - __ IncrementCounter(counters->named_load_global_stub_miss(), 1); - GenerateLoadMiss(masm(), Code::LOAD_IC); - - // Return the generated code. - return GetCode(Code::NORMAL, name); -} - - -Handle<Code> KeyedLoadStubCompiler::CompileLoadField(Handle<String> name, - Handle<JSObject> receiver, - Handle<JSObject> holder, - int index) { - // ----------- S t a t e ------------- - // -- ecx : key - // -- edx : receiver - // -- esp[0] : return address - // ----------------------------------- - Label miss; - - Counters* counters = isolate()->counters(); - __ IncrementCounter(counters->keyed_load_field(), 1); - - // Check that the name has not changed. - __ cmp(ecx, Immediate(name)); - __ j(not_equal, &miss); - - GenerateLoadField(receiver, holder, edx, ebx, eax, edi, index, name, &miss); - - __ bind(&miss); - __ DecrementCounter(counters->keyed_load_field(), 1); - GenerateLoadMiss(masm(), Code::KEYED_LOAD_IC); - - // Return the generated code. - return GetCode(Code::FIELD, name); -} - - -Handle<Code> KeyedLoadStubCompiler::CompileLoadCallback( - Handle<String> name, - Handle<JSObject> receiver, - Handle<JSObject> holder, - Handle<AccessorInfo> callback) { - // ----------- S t a t e ------------- - // -- ecx : key - // -- edx : receiver - // -- esp[0] : return address - // ----------------------------------- - Label miss; - - Counters* counters = isolate()->counters(); - __ IncrementCounter(counters->keyed_load_callback(), 1); - - // Check that the name has not changed. - __ cmp(ecx, Immediate(name)); - __ j(not_equal, &miss); - - GenerateLoadCallback(receiver, holder, edx, ecx, ebx, eax, edi, no_reg, - callback, name, &miss); - - __ bind(&miss); - __ DecrementCounter(counters->keyed_load_callback(), 1); - GenerateLoadMiss(masm(), Code::KEYED_LOAD_IC); - - // Return the generated code. - return GetCode(Code::CALLBACKS, name); -} - - -Handle<Code> KeyedLoadStubCompiler::CompileLoadConstant( - Handle<String> name, - Handle<JSObject> receiver, - Handle<JSObject> holder, - Handle<JSFunction> value) { - // ----------- S t a t e ------------- - // -- ecx : key - // -- edx : receiver - // -- esp[0] : return address - // ----------------------------------- - Label miss; - - Counters* counters = isolate()->counters(); - __ IncrementCounter(counters->keyed_load_constant_function(), 1); - - // Check that the name has not changed. - __ cmp(ecx, Immediate(name)); - __ j(not_equal, &miss); - - GenerateLoadConstant( - receiver, holder, edx, ebx, eax, edi, value, name, &miss); - __ bind(&miss); - __ DecrementCounter(counters->keyed_load_constant_function(), 1); - GenerateLoadMiss(masm(), Code::KEYED_LOAD_IC); - - // Return the generated code. - return GetCode(Code::CONSTANT_FUNCTION, name); -} - - -Handle<Code> KeyedLoadStubCompiler::CompileLoadInterceptor( - Handle<JSObject> receiver, - Handle<JSObject> holder, - Handle<String> name) { - // ----------- S t a t e ------------- - // -- ecx : key - // -- edx : receiver - // -- esp[0] : return address - // ----------------------------------- - Label miss; - - Counters* counters = isolate()->counters(); - __ IncrementCounter(counters->keyed_load_interceptor(), 1); - - // Check that the name has not changed. - __ cmp(ecx, Immediate(name)); - __ j(not_equal, &miss); - - LookupResult lookup(isolate()); - LookupPostInterceptor(holder, name, &lookup); - GenerateLoadInterceptor(receiver, holder, &lookup, edx, ecx, eax, ebx, edi, - name, &miss); - __ bind(&miss); - __ DecrementCounter(counters->keyed_load_interceptor(), 1); - GenerateLoadMiss(masm(), Code::KEYED_LOAD_IC); - - // Return the generated code. - return GetCode(Code::INTERCEPTOR, name); -} - - -Handle<Code> KeyedLoadStubCompiler::CompileLoadArrayLength( - Handle<String> name) { - // ----------- S t a t e ------------- - // -- ecx : key - // -- edx : receiver - // -- esp[0] : return address - // ----------------------------------- - Label miss; - - Counters* counters = isolate()->counters(); - __ IncrementCounter(counters->keyed_load_array_length(), 1); - - // Check that the name has not changed. - __ cmp(ecx, Immediate(name)); - __ j(not_equal, &miss); - - GenerateLoadArrayLength(masm(), edx, eax, &miss); - __ bind(&miss); - __ DecrementCounter(counters->keyed_load_array_length(), 1); - GenerateLoadMiss(masm(), Code::KEYED_LOAD_IC); - - // Return the generated code. - return GetCode(Code::CALLBACKS, name); -} - - -Handle<Code> KeyedLoadStubCompiler::CompileLoadStringLength( - Handle<String> name) { - // ----------- S t a t e ------------- - // -- ecx : key - // -- edx : receiver - // -- esp[0] : return address - // ----------------------------------- - Label miss; - - Counters* counters = isolate()->counters(); - __ IncrementCounter(counters->keyed_load_string_length(), 1); - - // Check that the name has not changed. - __ cmp(ecx, Immediate(name)); - __ j(not_equal, &miss); - - GenerateLoadStringLength(masm(), edx, eax, ebx, &miss, true); - __ bind(&miss); - __ DecrementCounter(counters->keyed_load_string_length(), 1); - GenerateLoadMiss(masm(), Code::KEYED_LOAD_IC); - - // Return the generated code. - return GetCode(Code::CALLBACKS, name); -} - - -Handle<Code> KeyedLoadStubCompiler::CompileLoadFunctionPrototype( - Handle<String> name) { - // ----------- S t a t e ------------- - // -- ecx : key - // -- edx : receiver - // -- esp[0] : return address - // ----------------------------------- - Label miss; - - Counters* counters = isolate()->counters(); - __ IncrementCounter(counters->keyed_load_function_prototype(), 1); - - // Check that the name has not changed. - __ cmp(ecx, Immediate(name)); - __ j(not_equal, &miss); - - GenerateLoadFunctionPrototype(masm(), edx, eax, ebx, &miss); - __ bind(&miss); - __ DecrementCounter(counters->keyed_load_function_prototype(), 1); - GenerateLoadMiss(masm(), Code::KEYED_LOAD_IC); - // Return the generated code. - return GetCode(Code::CALLBACKS, name); + return GetICCode(kind(), Code::NORMAL, name); } -Handle<Code> KeyedLoadStubCompiler::CompileLoadElement( - Handle<Map> receiver_map) { - // ----------- S t a t e ------------- - // -- ecx : key - // -- edx : receiver - // -- esp[0] : return address - // ----------------------------------- - - ElementsKind elements_kind = receiver_map->elements_kind(); - Handle<Code> stub = KeyedLoadElementStub(elements_kind).GetCode(); - - __ DispatchMap(edx, receiver_map, stub, DO_SMI_CHECK); - - GenerateLoadMiss(masm(), Code::KEYED_LOAD_IC); - - // Return the generated code. - return GetCode(Code::NORMAL, factory()->empty_string()); -} - - -Handle<Code> KeyedLoadStubCompiler::CompileLoadPolymorphic( +Handle<Code> BaseLoadStubCompiler::CompilePolymorphicIC( MapHandleList* receiver_maps, - CodeHandleList* handler_ics) { - // ----------- S t a t e ------------- - // -- ecx : key - // -- edx : receiver - // -- esp[0] : return address - // ----------------------------------- + CodeHandleList* handlers, + Handle<Name> name, + Code::StubType type, + IcCheckType check) { Label miss; - __ JumpIfSmi(edx, &miss); - Register map_reg = ebx; - __ mov(map_reg, FieldOperand(edx, HeapObject::kMapOffset)); + if (check == PROPERTY) { + GenerateNameCheck(name, this->name(), &miss); + } + + __ JumpIfSmi(receiver(), &miss); + Register map_reg = scratch1(); + __ mov(map_reg, FieldOperand(receiver(), HeapObject::kMapOffset)); int receiver_count = receiver_maps->length(); for (int current = 0; current < receiver_count; ++current) { __ cmp(map_reg, receiver_maps->at(current)); - __ j(equal, handler_ics->at(current)); + __ j(equal, handlers->at(current)); } __ bind(&miss); - GenerateLoadMiss(masm(), Code::KEYED_LOAD_IC); + TailCallBuiltin(masm(), MissBuiltin(kind())); // Return the generated code. - return GetCode(Code::NORMAL, factory()->empty_string(), MEGAMORPHIC); + InlineCacheState state = + receiver_maps->length() > 1 ? POLYMORPHIC : MONOMORPHIC; + return GetICCode(kind(), type, name, state); } @@ -3446,8 +2961,8 @@ Handle<Code> ConstructStubCompiler::CompileConstructStub( __ cmp(ecx, Immediate(instance_size)); __ Check(equal, "Instance size of initial map changed."); #endif - __ AllocateInNewSpace(instance_size, edx, ecx, no_reg, - &generic_stub_call, NO_ALLOCATION_FLAGS); + __ Allocate(instance_size, edx, ecx, no_reg, &generic_stub_call, + NO_ALLOCATION_FLAGS); // Allocated the JSObject, now initialize the fields and add the heap tag. // ebx: initial map @@ -3488,7 +3003,7 @@ Handle<Code> ConstructStubCompiler::CompileConstructStub( __ mov(ebx, edi); __ cmp(eax, arg_number); if (CpuFeatures::IsSupported(CMOV)) { - CpuFeatures::Scope use_cmov(CMOV); + CpuFeatureScope use_cmov(masm(), CMOV); __ cmov(above, ebx, Operand(ecx, arg_number * -kPointerSize)); } else { Label not_passed; @@ -3500,7 +3015,8 @@ Handle<Code> ConstructStubCompiler::CompileConstructStub( __ mov(Operand(edx, i * kPointerSize), ebx); } else { // Set the property to the constant value. - Handle<Object> constant(shared->GetThisPropertyAssignmentConstant(i)); + Handle<Object> constant(shared->GetThisPropertyAssignmentConstant(i), + isolate()); __ mov(Operand(edx, i * kPointerSize), Immediate(constant)); } } @@ -3573,10 +3089,7 @@ void KeyedLoadStubCompiler::GenerateLoadDictionaryElement( // -- edx : receiver // -- esp[0] : return address // ----------------------------------- - - Handle<Code> slow_ic = - masm->isolate()->builtins()->KeyedLoadIC_Slow(); - __ jmp(slow_ic, RelocInfo::CODE_TARGET); + TailCallBuiltin(masm, Builtins::kKeyedLoadIC_Slow); __ bind(&miss_force_generic); // ----------- S t a t e ------------- @@ -3584,10 +3097,7 @@ void KeyedLoadStubCompiler::GenerateLoadDictionaryElement( // -- edx : receiver // -- esp[0] : return address // ----------------------------------- - - Handle<Code> miss_force_generic_ic = - masm->isolate()->builtins()->KeyedLoadIC_MissForceGeneric(); - __ jmp(miss_force_generic_ic, RelocInfo::CODE_TARGET); + TailCallBuiltin(masm, Builtins::kKeyedLoadIC_MissForceGeneric); } @@ -3600,7 +3110,7 @@ static void GenerateSmiKeyCheck(MacroAssembler* masm, // Check that key is a smi and if SSE2 is available a heap number // containing a smi and branch if the check fails. if (CpuFeatures::IsSupported(SSE2)) { - CpuFeatures::Scope use_sse2(SSE2); + CpuFeatureScope use_sse2(masm, SSE2); Label key_ok; __ JumpIfSmi(key, &key_ok); __ cmp(FieldOperand(key, HeapObject::kMapOffset), @@ -3624,157 +3134,6 @@ static void GenerateSmiKeyCheck(MacroAssembler* masm, } -void KeyedLoadStubCompiler::GenerateLoadExternalArray( - MacroAssembler* masm, - ElementsKind elements_kind) { - // ----------- S t a t e ------------- - // -- ecx : key - // -- edx : receiver - // -- esp[0] : return address - // ----------------------------------- - Label miss_force_generic, failed_allocation, slow; - - // This stub is meant to be tail-jumped to, the receiver must already - // have been verified by the caller to not be a smi. - - // Check that the key is a smi or a heap number convertible to a smi. - GenerateSmiKeyCheck(masm, ecx, eax, xmm0, xmm1, &miss_force_generic); - - // Check that the index is in range. - __ mov(ebx, FieldOperand(edx, JSObject::kElementsOffset)); - __ cmp(ecx, FieldOperand(ebx, ExternalArray::kLengthOffset)); - // Unsigned comparison catches both negative and too-large values. - __ j(above_equal, &miss_force_generic); - __ mov(ebx, FieldOperand(ebx, ExternalArray::kExternalPointerOffset)); - // ebx: base pointer of external storage - switch (elements_kind) { - case EXTERNAL_BYTE_ELEMENTS: - __ SmiUntag(ecx); // Untag the index. - __ movsx_b(eax, Operand(ebx, ecx, times_1, 0)); - break; - case EXTERNAL_UNSIGNED_BYTE_ELEMENTS: - case EXTERNAL_PIXEL_ELEMENTS: - __ SmiUntag(ecx); // Untag the index. - __ movzx_b(eax, Operand(ebx, ecx, times_1, 0)); - break; - case EXTERNAL_SHORT_ELEMENTS: - __ movsx_w(eax, Operand(ebx, ecx, times_1, 0)); - break; - case EXTERNAL_UNSIGNED_SHORT_ELEMENTS: - __ movzx_w(eax, Operand(ebx, ecx, times_1, 0)); - break; - case EXTERNAL_UNSIGNED_INT_ELEMENTS: - case EXTERNAL_INT_ELEMENTS: - __ mov(eax, Operand(ebx, ecx, times_2, 0)); - break; - case EXTERNAL_FLOAT_ELEMENTS: - __ fld_s(Operand(ebx, ecx, times_2, 0)); - break; - case EXTERNAL_DOUBLE_ELEMENTS: - __ fld_d(Operand(ebx, ecx, times_4, 0)); - break; - default: - UNREACHABLE(); - break; - } - - // For integer array types: - // eax: value - // For floating-point array type: - // FP(0): value - - if (elements_kind == EXTERNAL_INT_ELEMENTS || - elements_kind == EXTERNAL_UNSIGNED_INT_ELEMENTS) { - // For the Int and UnsignedInt array types, we need to see whether - // the value can be represented in a Smi. If not, we need to convert - // it to a HeapNumber. - Label box_int; - if (elements_kind == EXTERNAL_INT_ELEMENTS) { - __ cmp(eax, 0xc0000000); - __ j(sign, &box_int); - } else { - ASSERT_EQ(EXTERNAL_UNSIGNED_INT_ELEMENTS, elements_kind); - // The test is different for unsigned int values. Since we need - // the value to be in the range of a positive smi, we can't - // handle either of the top two bits being set in the value. - __ test(eax, Immediate(0xc0000000)); - __ j(not_zero, &box_int); - } - - __ SmiTag(eax); - __ ret(0); - - __ bind(&box_int); - - // Allocate a HeapNumber for the int and perform int-to-double - // conversion. - if (elements_kind == EXTERNAL_INT_ELEMENTS) { - __ push(eax); - __ fild_s(Operand(esp, 0)); - __ pop(eax); - } else { - ASSERT_EQ(EXTERNAL_UNSIGNED_INT_ELEMENTS, elements_kind); - // Need to zero-extend the value. - // There's no fild variant for unsigned values, so zero-extend - // to a 64-bit int manually. - __ push(Immediate(0)); - __ push(eax); - __ fild_d(Operand(esp, 0)); - __ pop(eax); - __ pop(eax); - } - // FP(0): value - __ AllocateHeapNumber(eax, ebx, edi, &failed_allocation); - // Set the value. - __ fstp_d(FieldOperand(eax, HeapNumber::kValueOffset)); - __ ret(0); - } else if (elements_kind == EXTERNAL_FLOAT_ELEMENTS || - elements_kind == EXTERNAL_DOUBLE_ELEMENTS) { - // For the floating-point array type, we need to always allocate a - // HeapNumber. - __ AllocateHeapNumber(eax, ebx, edi, &failed_allocation); - // Set the value. - __ fstp_d(FieldOperand(eax, HeapNumber::kValueOffset)); - __ ret(0); - } else { - __ SmiTag(eax); - __ ret(0); - } - - // If we fail allocation of the HeapNumber, we still have a value on - // top of the FPU stack. Remove it. - __ bind(&failed_allocation); - __ fstp(0); - // Fall through to slow case. - - // Slow case: Jump to runtime. - __ bind(&slow); - Counters* counters = masm->isolate()->counters(); - __ IncrementCounter(counters->keyed_load_external_array_slow(), 1); - - // ----------- S t a t e ------------- - // -- ecx : key - // -- edx : receiver - // -- esp[0] : return address - // ----------------------------------- - - Handle<Code> ic = masm->isolate()->builtins()->KeyedLoadIC_Slow(); - __ jmp(ic, RelocInfo::CODE_TARGET); - - // ----------- S t a t e ------------- - // -- ecx : key - // -- edx : receiver - // -- esp[0] : return address - // ----------------------------------- - - // Miss case: Jump to runtime. - __ bind(&miss_force_generic); - Handle<Code> miss_ic = - masm->isolate()->builtins()->KeyedLoadIC_MissForceGeneric(); - __ jmp(miss_ic, RelocInfo::CODE_TARGET); -} - - void KeyedStoreStubCompiler::GenerateStoreExternalArray( MacroAssembler* masm, ElementsKind elements_kind) { @@ -3888,7 +3247,7 @@ void KeyedStoreStubCompiler::GenerateStoreExternalArray( if ((elements_kind == EXTERNAL_INT_ELEMENTS || elements_kind == EXTERNAL_UNSIGNED_INT_ELEMENTS) && CpuFeatures::IsSupported(SSE3)) { - CpuFeatures::Scope scope(SSE3); + CpuFeatureScope scope(masm, SSE3); // fisttp stores values as signed integers. To represent the // entire range of int and unsigned int arrays, store as a // 64-bit int and discard the high 32 bits. @@ -3913,7 +3272,7 @@ void KeyedStoreStubCompiler::GenerateStoreExternalArray( __ mov(Operand(edi, ecx, times_2, 0), ebx); } else { ASSERT(CpuFeatures::IsSupported(SSE2)); - CpuFeatures::Scope scope(SSE2); + CpuFeatureScope scope(masm, SSE2); __ cvttsd2si(ebx, FieldOperand(eax, HeapNumber::kValueOffset)); __ cmp(ebx, 0x80000000u); __ j(equal, &slow); @@ -3956,9 +3315,7 @@ void KeyedStoreStubCompiler::GenerateStoreExternalArray( // -- edx : receiver // -- esp[0] : return address // ----------------------------------- - - Handle<Code> ic = masm->isolate()->builtins()->KeyedStoreIC_Slow(); - __ jmp(ic, RelocInfo::CODE_TARGET); + TailCallBuiltin(masm, Builtins::kKeyedStoreIC_Slow); // ----------- S t a t e ------------- // -- eax : value @@ -3968,109 +3325,7 @@ void KeyedStoreStubCompiler::GenerateStoreExternalArray( // ----------------------------------- __ bind(&miss_force_generic); - Handle<Code> miss_ic = - masm->isolate()->builtins()->KeyedStoreIC_MissForceGeneric(); - __ jmp(miss_ic, RelocInfo::CODE_TARGET); -} - - -void KeyedLoadStubCompiler::GenerateLoadFastElement(MacroAssembler* masm) { - // ----------- S t a t e ------------- - // -- ecx : key - // -- edx : receiver - // -- esp[0] : return address - // ----------------------------------- - Label miss_force_generic; - - // This stub is meant to be tail-jumped to, the receiver must already - // have been verified by the caller to not be a smi. - - // Check that the key is a smi or a heap number convertible to a smi. - GenerateSmiKeyCheck(masm, ecx, eax, xmm0, xmm1, &miss_force_generic); - - // Get the elements array. - __ mov(eax, FieldOperand(edx, JSObject::kElementsOffset)); - __ AssertFastElements(eax); - - // Check that the key is within bounds. - __ cmp(ecx, FieldOperand(eax, FixedArray::kLengthOffset)); - __ j(above_equal, &miss_force_generic); - - // Load the result and make sure it's not the hole. - __ mov(ebx, Operand(eax, ecx, times_2, - FixedArray::kHeaderSize - kHeapObjectTag)); - __ cmp(ebx, masm->isolate()->factory()->the_hole_value()); - __ j(equal, &miss_force_generic); - __ mov(eax, ebx); - __ ret(0); - - __ bind(&miss_force_generic); - Handle<Code> miss_ic = - masm->isolate()->builtins()->KeyedLoadIC_MissForceGeneric(); - __ jmp(miss_ic, RelocInfo::CODE_TARGET); -} - - -void KeyedLoadStubCompiler::GenerateLoadFastDoubleElement( - MacroAssembler* masm) { - // ----------- S t a t e ------------- - // -- ecx : key - // -- edx : receiver - // -- esp[0] : return address - // ----------------------------------- - Label miss_force_generic, slow_allocate_heapnumber; - - // This stub is meant to be tail-jumped to, the receiver must already - // have been verified by the caller to not be a smi. - - // Check that the key is a smi or a heap number convertible to a smi. - GenerateSmiKeyCheck(masm, ecx, eax, xmm0, xmm1, &miss_force_generic); - - // Get the elements array. - __ mov(eax, FieldOperand(edx, JSObject::kElementsOffset)); - __ AssertFastElements(eax); - - // Check that the key is within bounds. - __ cmp(ecx, FieldOperand(eax, FixedDoubleArray::kLengthOffset)); - __ j(above_equal, &miss_force_generic); - - // Check for the hole - uint32_t offset = FixedDoubleArray::kHeaderSize + sizeof(kHoleNanLower32); - __ cmp(FieldOperand(eax, ecx, times_4, offset), Immediate(kHoleNanUpper32)); - __ j(equal, &miss_force_generic); - - // Always allocate a heap number for the result. - if (CpuFeatures::IsSupported(SSE2)) { - CpuFeatures::Scope use_sse2(SSE2); - __ movdbl(xmm0, FieldOperand(eax, ecx, times_4, - FixedDoubleArray::kHeaderSize)); - } else { - __ fld_d(FieldOperand(eax, ecx, times_4, FixedDoubleArray::kHeaderSize)); - } - __ AllocateHeapNumber(eax, ebx, edi, &slow_allocate_heapnumber); - // Set the value. - if (CpuFeatures::IsSupported(SSE2)) { - CpuFeatures::Scope use_sse2(SSE2); - __ movdbl(FieldOperand(eax, HeapNumber::kValueOffset), xmm0); - } else { - __ fstp_d(FieldOperand(eax, HeapNumber::kValueOffset)); - } - __ ret(0); - - __ bind(&slow_allocate_heapnumber); - // A value was pushed on the floating point stack before the allocation, if - // the allocation fails it needs to be removed. - if (!CpuFeatures::IsSupported(SSE2)) { - __ fstp(0); - } - Handle<Code> slow_ic = - masm->isolate()->builtins()->KeyedLoadIC_Slow(); - __ jmp(slow_ic, RelocInfo::CODE_TARGET); - - __ bind(&miss_force_generic); - Handle<Code> miss_ic = - masm->isolate()->builtins()->KeyedLoadIC_MissForceGeneric(); - __ jmp(miss_ic, RelocInfo::CODE_TARGET); + TailCallBuiltin(masm, Builtins::kKeyedStoreIC_MissForceGeneric); } @@ -4078,7 +3333,7 @@ void KeyedStoreStubCompiler::GenerateStoreFastElement( MacroAssembler* masm, bool is_js_array, ElementsKind elements_kind, - KeyedAccessGrowMode grow_mode) { + KeyedAccessStoreMode store_mode) { // ----------- S t a t e ------------- // -- eax : value // -- ecx : key @@ -4103,7 +3358,7 @@ void KeyedStoreStubCompiler::GenerateStoreFastElement( if (is_js_array) { // Check that the key is within bounds. __ cmp(ecx, FieldOperand(edx, JSArray::kLengthOffset)); // smis. - if (grow_mode == ALLOW_JSARRAY_GROWTH) { + if (IsGrowStoreMode(store_mode)) { __ j(above_equal, &grow); } else { __ j(above_equal, &miss_force_generic); @@ -4146,16 +3401,13 @@ void KeyedStoreStubCompiler::GenerateStoreFastElement( // Handle store cache miss, replacing the ic with the generic stub. __ bind(&miss_force_generic); - Handle<Code> ic_force_generic = - masm->isolate()->builtins()->KeyedStoreIC_MissForceGeneric(); - __ jmp(ic_force_generic, RelocInfo::CODE_TARGET); + TailCallBuiltin(masm, Builtins::kKeyedStoreIC_MissForceGeneric); // Handle transition to other elements kinds without using the generic stub. __ bind(&transition_elements_kind); - Handle<Code> ic_miss = masm->isolate()->builtins()->KeyedStoreIC_Miss(); - __ jmp(ic_miss, RelocInfo::CODE_TARGET); + TailCallBuiltin(masm, Builtins::kKeyedStoreIC_Miss); - if (is_js_array && grow_mode == ALLOW_JSARRAY_GROWTH) { + if (is_js_array && IsGrowStoreMode(store_mode)) { // Handle transition requiring the array to grow. __ bind(&grow); @@ -4171,7 +3423,7 @@ void KeyedStoreStubCompiler::GenerateStoreFastElement( __ j(not_equal, &check_capacity); int size = FixedArray::SizeFor(JSArray::kPreallocatedArrayElements); - __ AllocateInNewSpace(size, edi, ebx, ecx, &prepare_slow, TAG_OBJECT); + __ Allocate(size, edi, ebx, ecx, &prepare_slow, TAG_OBJECT); // Restore the key, which is known to be the array length. // eax: value @@ -4224,8 +3476,7 @@ void KeyedStoreStubCompiler::GenerateStoreFastElement( __ mov(ecx, Immediate(0)); __ bind(&slow); - Handle<Code> ic_slow = masm->isolate()->builtins()->KeyedStoreIC_Slow(); - __ jmp(ic_slow, RelocInfo::CODE_TARGET); + TailCallBuiltin(masm, Builtins::kKeyedStoreIC_Slow); } } @@ -4233,7 +3484,7 @@ void KeyedStoreStubCompiler::GenerateStoreFastElement( void KeyedStoreStubCompiler::GenerateStoreFastDoubleElement( MacroAssembler* masm, bool is_js_array, - KeyedAccessGrowMode grow_mode) { + KeyedAccessStoreMode store_mode) { // ----------- S t a t e ------------- // -- eax : value // -- ecx : key @@ -4256,7 +3507,7 @@ void KeyedStoreStubCompiler::GenerateStoreFastDoubleElement( if (is_js_array) { // Check that the key is within bounds. __ cmp(ecx, FieldOperand(edx, JSArray::kLengthOffset)); // smis. - if (grow_mode == ALLOW_JSARRAY_GROWTH) { + if (IsGrowStoreMode(store_mode)) { __ j(above_equal, &grow); } else { __ j(above_equal, &miss_force_generic); @@ -4274,16 +3525,13 @@ void KeyedStoreStubCompiler::GenerateStoreFastDoubleElement( // Handle store cache miss, replacing the ic with the generic stub. __ bind(&miss_force_generic); - Handle<Code> ic_force_generic = - masm->isolate()->builtins()->KeyedStoreIC_MissForceGeneric(); - __ jmp(ic_force_generic, RelocInfo::CODE_TARGET); + TailCallBuiltin(masm, Builtins::kKeyedStoreIC_MissForceGeneric); // Handle transition to other elements kinds without using the generic stub. __ bind(&transition_elements_kind); - Handle<Code> ic_miss = masm->isolate()->builtins()->KeyedStoreIC_Miss(); - __ jmp(ic_miss, RelocInfo::CODE_TARGET); + TailCallBuiltin(masm, Builtins::kKeyedStoreIC_Miss); - if (is_js_array && grow_mode == ALLOW_JSARRAY_GROWTH) { + if (is_js_array && IsGrowStoreMode(store_mode)) { // Handle transition requiring the array to grow. __ bind(&grow); @@ -4307,7 +3555,7 @@ void KeyedStoreStubCompiler::GenerateStoreFastDoubleElement( __ j(not_equal, &check_capacity); int size = FixedDoubleArray::SizeFor(JSArray::kPreallocatedArrayElements); - __ AllocateInNewSpace(size, edi, ebx, ecx, &prepare_slow, TAG_OBJECT); + __ Allocate(size, edi, ebx, ecx, &prepare_slow, TAG_OBJECT); // Restore the key, which is known to be the array length. __ mov(ecx, Immediate(0)); @@ -4316,13 +3564,22 @@ void KeyedStoreStubCompiler::GenerateStoreFastDoubleElement( // ecx: key // edx: receiver // edi: elements - // Initialize the new FixedDoubleArray. Leave elements unitialized for - // efficiency, they are guaranteed to be initialized before use. + // Initialize the new FixedDoubleArray. __ mov(FieldOperand(edi, JSObject::kMapOffset), Immediate(masm->isolate()->factory()->fixed_double_array_map())); __ mov(FieldOperand(edi, FixedDoubleArray::kLengthOffset), Immediate(Smi::FromInt(JSArray::kPreallocatedArrayElements))); + __ StoreNumberToDoubleElements(eax, edi, ecx, ebx, xmm0, + &transition_elements_kind, true); + + for (int i = 1; i < JSArray::kPreallocatedArrayElements; i++) { + int offset = FixedDoubleArray::OffsetOfElementAt(i); + __ mov(FieldOperand(edi, offset), Immediate(kHoleNanLower32)); + __ mov(FieldOperand(edi, offset + kPointerSize), + Immediate(kHoleNanUpper32)); + } + // Install the new backing store in the JSArray. __ mov(FieldOperand(edx, JSObject::kElementsOffset), edi); __ RecordWriteField(edx, JSObject::kElementsOffset, edi, ebx, @@ -4332,7 +3589,7 @@ void KeyedStoreStubCompiler::GenerateStoreFastDoubleElement( __ add(FieldOperand(edx, JSArray::kLengthOffset), Immediate(Smi::FromInt(1))); __ mov(edi, FieldOperand(edx, JSObject::kElementsOffset)); - __ jmp(&finish_store); + __ ret(0); __ bind(&check_capacity); // eax: value @@ -4353,8 +3610,7 @@ void KeyedStoreStubCompiler::GenerateStoreFastDoubleElement( __ mov(ecx, Immediate(0)); __ bind(&slow); - Handle<Code> ic_slow = masm->isolate()->builtins()->KeyedStoreIC_Slow(); - __ jmp(ic_slow, RelocInfo::CODE_TARGET); + TailCallBuiltin(masm, Builtins::kKeyedStoreIC_Slow); } } diff --git a/deps/v8/src/ic-inl.h b/deps/v8/src/ic-inl.h index 49b6ef9d0f..9439792bed 100644 --- a/deps/v8/src/ic-inl.h +++ b/deps/v8/src/ic-inl.h @@ -43,7 +43,8 @@ Address IC::address() const { Address result = Assembler::target_address_from_return_address(pc()); #ifdef ENABLE_DEBUGGER_SUPPORT - Debug* debug = Isolate::Current()->debug(); + ASSERT(Isolate::Current() == isolate()); + Debug* debug = isolate()->debug(); // First check if any break points are active if not just return the address // of the call. if (!debug->has_break_points()) return result; @@ -106,8 +107,9 @@ InlineCacheHolderFlag IC::GetCodeCacheForObject(Object* object, return GetCodeCacheForObject(JSObject::cast(object), holder); } // If the object is a value, we use the prototype map for the cache. - ASSERT(object->IsString() || object->IsNumber() || object->IsBoolean()); - return PROTOTYPE_MAP; + ASSERT(object->IsString() || object->IsSymbol() || + object->IsNumber() || object->IsBoolean()); + return DELEGATE_MAP; } @@ -122,14 +124,16 @@ InlineCacheHolderFlag IC::GetCodeCacheForObject(JSObject* object, !object->HasFastProperties() && !object->IsJSGlobalProxy() && !object->IsJSGlobalObject()) { - return PROTOTYPE_MAP; + return DELEGATE_MAP; } return OWN_MAP; } -JSObject* IC::GetCodeCacheHolder(Object* object, InlineCacheHolderFlag holder) { - Object* map_owner = (holder == OWN_MAP ? object : object->GetPrototype()); +JSObject* IC::GetCodeCacheHolder(Isolate* isolate, + Object* object, + InlineCacheHolderFlag holder) { + Object* map_owner = holder == OWN_MAP ? object : object->GetDelegate(isolate); ASSERT(map_owner->IsJSObject()); return JSObject::cast(map_owner); } diff --git a/deps/v8/src/ic.cc b/deps/v8/src/ic.cc index dd0bb10e10..da2211b7b6 100644 --- a/deps/v8/src/ic.cc +++ b/deps/v8/src/ic.cc @@ -43,35 +43,46 @@ namespace internal { char IC::TransitionMarkFromState(IC::State state) { switch (state) { case UNINITIALIZED: return '0'; - case PREMONOMORPHIC: return 'P'; + case PREMONOMORPHIC: return '.'; case MONOMORPHIC: return '1'; case MONOMORPHIC_PROTOTYPE_FAILURE: return '^'; - case MEGAMORPHIC: return IsGeneric() ? 'G' : 'N'; + case POLYMORPHIC: return 'P'; + case MEGAMORPHIC: return 'N'; + case GENERIC: return 'G'; // We never see the debugger states here, because the state is // computed from the original code - not the patched code. Let // these cases fall through to the unreachable code below. - case DEBUG_BREAK: break; - case DEBUG_PREPARE_STEP_IN: break; + case DEBUG_STUB: break; } UNREACHABLE(); return 0; } + +const char* GetTransitionMarkModifier(KeyedAccessStoreMode mode) { + if (mode == STORE_NO_TRANSITION_HANDLE_COW) return ".COW"; + if (mode == STORE_NO_TRANSITION_IGNORE_OUT_OF_BOUNDS) { + return ".IGNORE_OOB"; + } + if (IsGrowStoreMode(mode)) return ".GROW"; + return ""; +} + + void IC::TraceIC(const char* type, Handle<Object> name, State old_state, Code* new_target) { if (FLAG_trace_ic) { - State new_state = StateFrom(new_target, - HEAP->undefined_value(), - HEAP->undefined_value()); + Object* undef = new_target->GetHeap()->undefined_value(); + State new_state = StateFrom(new_target, undef, undef); PrintF("[%s in ", type); - StackFrameIterator it; + Isolate* isolate = new_target->GetIsolate(); + StackFrameIterator it(isolate); while (it.frame()->fp() != this->fp()) it.Advance(); StackFrame* raw_frame = it.frame(); if (raw_frame->is_internal()) { - Isolate* isolate = new_target->GetIsolate(); Code* apply_builtin = isolate->builtins()->builtin( Builtins::kFunctionApply); if (raw_frame->unchecked_code() == apply_builtin) { @@ -80,55 +91,54 @@ void IC::TraceIC(const char* type, raw_frame = it.frame(); } } - JavaScriptFrame::PrintTop(stdout, false, true); - bool new_can_grow = - Code::GetKeyedAccessGrowMode(new_target->extra_ic_state()) == - ALLOW_JSARRAY_GROWTH; + JavaScriptFrame::PrintTop(isolate, stdout, false, true); + Code::ExtraICState state = new_target->extra_ic_state(); + const char* modifier = + GetTransitionMarkModifier(Code::GetKeyedAccessStoreMode(state)); PrintF(" (%c->%c%s)", TransitionMarkFromState(old_state), TransitionMarkFromState(new_state), - new_can_grow ? ".GROW" : ""); + modifier); name->Print(); PrintF("]\n"); } } -#define TRACE_GENERIC_IC(type, reason) \ +#define TRACE_GENERIC_IC(isolate, type, reason) \ do { \ if (FLAG_trace_ic) { \ PrintF("[%s patching generic stub in ", type); \ - JavaScriptFrame::PrintTop(stdout, false, true); \ + JavaScriptFrame::PrintTop(isolate, stdout, false, true); \ PrintF(" (%s)]\n", reason); \ } \ } while (false) #else -#define TRACE_GENERIC_IC(type, reason) +#define TRACE_GENERIC_IC(isolate, type, reason) #endif // DEBUG #define TRACE_IC(type, name, old_state, new_target) \ ASSERT((TraceIC(type, name, old_state, new_target), true)) IC::IC(FrameDepth depth, Isolate* isolate) : isolate_(isolate) { - ASSERT(isolate == Isolate::Current()); - // To improve the performance of the (much used) IC code, we unfold - // a few levels of the stack frame iteration code. This yields a - // ~35% speedup when running DeltaBlue with the '--nouse-ic' flag. + // To improve the performance of the (much used) IC code, we unfold a few + // levels of the stack frame iteration code. This yields a ~35% speedup when + // running DeltaBlue and a ~25% speedup of gbemu with the '--nouse-ic' flag. const Address entry = Isolate::c_entry_fp(isolate->thread_local_top()); Address* pc_address = reinterpret_cast<Address*>(entry + ExitFrameConstants::kCallerPCOffset); Address fp = Memory::Address_at(entry + ExitFrameConstants::kCallerFPOffset); - // If there's another JavaScript frame on the stack, we need to look - // one frame further down the stack to find the frame pointer and - // the return address stack slot. + // If there's another JavaScript frame on the stack or a + // StubFailureTrampoline, we need to look one frame further down the stack to + // find the frame pointer and the return address stack slot. if (depth == EXTRA_CALL_FRAME) { const int kCallerPCOffset = StandardFrameConstants::kCallerPCOffset; pc_address = reinterpret_cast<Address*>(fp + kCallerPCOffset); fp = Memory::Address_at(fp + StandardFrameConstants::kCallerFPOffset); } #ifdef DEBUG - StackFrameIterator it; + StackFrameIterator it(isolate); for (int i = 0; i < depth + 1; i++) it.Advance(); StackFrame* frame = it.frame(); ASSERT(fp == frame->fp() && pc_address == frame->pc_address()); @@ -140,11 +150,11 @@ IC::IC(FrameDepth depth, Isolate* isolate) : isolate_(isolate) { #ifdef ENABLE_DEBUGGER_SUPPORT Address IC::OriginalCodeAddress() const { - HandleScope scope; + HandleScope scope(isolate()); // Compute the JavaScript frame for the frame pointer of this IC // structure. We need this to be able to find the function // corresponding to the frame. - StackFrameIterator it; + StackFrameIterator it(isolate()); while (it.frame()->fp() != this->fp()) it.Advance(); JavaScriptFrame* frame = JavaScriptFrame::cast(it.frame()); // Find the function on the stack and both the active code for the @@ -169,42 +179,23 @@ Address IC::OriginalCodeAddress() const { #endif -static bool HasNormalObjectsInPrototypeChain(Isolate* isolate, - LookupResult* lookup, - Object* receiver) { - Object* end = lookup->IsProperty() - ? lookup->holder() : Object::cast(isolate->heap()->null_value()); - for (Object* current = receiver; - current != end; - current = current->GetPrototype()) { - if (current->IsJSObject() && - !JSObject::cast(current)->HasFastProperties() && - !current->IsJSGlobalProxy() && - !current->IsJSGlobalObject()) { - return true; - } - } - - return false; -} - - static bool TryRemoveInvalidPrototypeDependentStub(Code* target, Object* receiver, Object* name) { InlineCacheHolderFlag cache_holder = Code::ExtractCacheHolderFromFlags(target->flags()); + Isolate* isolate = target->GetIsolate(); if (cache_holder == OWN_MAP && !receiver->IsJSObject()) { // The stub was generated for JSObject but called for non-JSObject. // IC::GetCodeCacheHolder is not applicable. return false; - } else if (cache_holder == PROTOTYPE_MAP && - receiver->GetPrototype()->IsNull()) { + } else if (cache_holder == DELEGATE_MAP && + receiver->GetPrototype(isolate)->IsNull()) { // IC::GetCodeCacheHolder is not applicable. return false; } - Map* map = IC::GetCodeCacheHolder(receiver, cache_holder)->map(); + Map* map = IC::GetCodeCacheHolder(isolate, receiver, cache_holder)->map(); // Decide whether the inline cache failed because of changes to the // receiver itself or changes to one of its prototypes. @@ -273,7 +264,7 @@ RelocInfo::Mode IC::ComputeMode() { if (info->pc() == addr) return info->rmode(); } UNREACHABLE(); - return RelocInfo::NONE; + return RelocInfo::NONE32; } @@ -310,7 +301,8 @@ void IC::PostPatching(Address address, Code* target, Code* old_target) { if (FLAG_type_info_threshold == 0 && !FLAG_watch_ic_patching) { return; } - Code* host = target->GetHeap()->isolate()-> + Isolate* isolate = target->GetHeap()->isolate(); + Code* host = isolate-> inner_pointer_to_code_cache()->GetCacheEntry(address)->code; if (host->kind() != Code::FUNCTION) return; @@ -333,7 +325,7 @@ void IC::PostPatching(Address address, Code* target, Code* old_target) { } if (FLAG_watch_ic_patching) { host->set_profiler_ticks(0); - Isolate::Current()->runtime_profiler()->NotifyICChanged(); + isolate->runtime_profiler()->NotifyICChanged(); } // TODO(2029): When an optimized function is patched, it would // be nice to propagate the corresponding type information to its @@ -345,15 +337,13 @@ void IC::Clear(Address address) { Code* target = GetTargetAtAddress(address); // Don't clear debug break inline cache as it will remove the break point. - if (target->ic_state() == DEBUG_BREAK) return; + if (target->is_debug_break()) return; switch (target->kind()) { case Code::LOAD_IC: return LoadIC::Clear(address, target); - case Code::KEYED_LOAD_IC: - return KeyedLoadIC::Clear(address, target); + case Code::KEYED_LOAD_IC: return KeyedLoadIC::Clear(address, target); case Code::STORE_IC: return StoreIC::Clear(address, target); - case Code::KEYED_STORE_IC: - return KeyedStoreIC::Clear(address, target); + case Code::KEYED_STORE_IC: return KeyedStoreIC::Clear(address, target); case Code::CALL_IC: return CallIC::Clear(address, target); case Code::KEYED_CALL_IC: return KeyedCallIC::Clear(address, target); case Code::COMPARE_IC: return CompareIC::Clear(address, target); @@ -385,13 +375,13 @@ void KeyedLoadIC::Clear(Address address, Code* target) { // Make sure to also clear the map used in inline fast cases. If we // do not clear these maps, cached code can keep objects alive // through the embedded maps. - SetTargetAtAddress(address, initialize_stub()); + SetTargetAtAddress(address, *initialize_stub()); } void LoadIC::Clear(Address address, Code* target) { if (target->ic_state() == UNINITIALIZED) return; - SetTargetAtAddress(address, initialize_stub()); + SetTargetAtAddress(address, *initialize_stub()); } @@ -399,8 +389,8 @@ void StoreIC::Clear(Address address, Code* target) { if (target->ic_state() == UNINITIALIZED) return; SetTargetAtAddress(address, (Code::GetStrictMode(target->extra_ic_state()) == kStrictMode) - ? initialize_stub_strict() - : initialize_stub()); + ? *initialize_stub_strict() + : *initialize_stub()); } @@ -408,17 +398,19 @@ void KeyedStoreIC::Clear(Address address, Code* target) { if (target->ic_state() == UNINITIALIZED) return; SetTargetAtAddress(address, (Code::GetStrictMode(target->extra_ic_state()) == kStrictMode) - ? initialize_stub_strict() - : initialize_stub()); + ? *initialize_stub_strict() + : *initialize_stub()); } void CompareIC::Clear(Address address, Code* target) { - // Only clear ICCompareStubs, we currently cannot clear generic CompareStubs. - if (target->major_key() != CodeStub::CompareIC) return; + ASSERT(target->major_key() == CodeStub::CompareIC); + CompareIC::State handler_state; + Token::Value op; + ICCompareStub::DecodeMinorKey(target->stub_info(), NULL, NULL, + &handler_state, &op); // Only clear CompareICs that can retain objects. - if (target->compare_state() != KNOWN_OBJECTS) return; - Token::Value op = CompareIC::ComputeOperation(target); + if (handler_state != KNOWN_OBJECT) return; SetTargetAtAddress(address, GetRawUninitialized(op)); PatchInlinedSmiCode(address, DISABLE_INLINED_SMI_CHECK); } @@ -454,7 +446,7 @@ static void LookupForRead(Handle<Object> object, return; } - Handle<Object> proto(holder->GetPrototype()); + Handle<Object> proto(holder->GetPrototype(), name->GetIsolate()); if (proto->IsNull()) { ASSERT(!lookup->IsFound()); return; @@ -472,7 +464,7 @@ Handle<Object> CallICBase::TryCallAsFunction(Handle<Object> object) { // Patch the receiver and use the delegate as the function to // invoke. This is used for invoking objects as if they were functions. const int argc = target()->arguments_count(); - StackFrameLocator locator; + StackFrameLocator locator(isolate()); JavaScriptFrame* frame = locator.FindJavaScriptFrame(0); int index = frame->ComputeExpressionsCount() - (argc + 1); frame->SetExpression(index, *object); @@ -485,7 +477,8 @@ Handle<Object> CallICBase::TryCallAsFunction(Handle<Object> object) { void CallICBase::ReceiverToObjectIfRequired(Handle<Object> callee, Handle<Object> object) { while (callee->IsJSFunctionProxy()) { - callee = Handle<Object>(JSFunctionProxy::cast(*callee)->call_trap()); + callee = Handle<Object>(JSFunctionProxy::cast(*callee)->call_trap(), + isolate()); } if (callee->IsJSFunction()) { @@ -500,7 +493,7 @@ void CallICBase::ReceiverToObjectIfRequired(Handle<Object> callee, if (object->IsString() || object->IsNumber() || object->IsBoolean()) { // Change the receiver to the result of calling ToObject on it. const int argc = this->target()->arguments_count(); - StackFrameLocator locator; + StackFrameLocator locator(isolate()); JavaScriptFrame* frame = locator.FindJavaScriptFrame(0); int index = frame->ComputeExpressionsCount() - (argc + 1); frame->SetExpression(index, *isolate()->factory()->ToObject(object)); @@ -540,7 +533,7 @@ MaybeObject* CallICBase::LoadFunction(State state, if (!lookup.IsFound()) { // If the object does not have the requested property, check which // exception we need to throw. - return IsContextual(object) + return IsUndeclaredGlobal(object) ? ReferenceError("not_defined", name) : TypeError("undefined_method", object, name); } @@ -559,7 +552,7 @@ MaybeObject* CallICBase::LoadFunction(State state, if (lookup.IsInterceptor() && attr == ABSENT) { // If the object does not have the requested property, check which // exception we need to throw. - return IsContextual(object) + return IsUndeclaredGlobal(object) ? ReferenceError("not_defined", name) : TypeError("undefined_method", object, name); } @@ -646,7 +639,7 @@ Handle<Code> CallICBase::ComputeMonomorphicStub(LookupResult* lookup, Handle<JSObject> holder(lookup->holder()); switch (lookup->type()) { case FIELD: { - int index = lookup->GetFieldIndex(); + PropertyIndex index = lookup->GetFieldIndex(); return isolate()->stub_cache()->ComputeCallField( argc, kind_, extra_state, name, object, holder, index); } @@ -699,17 +692,8 @@ void CallICBase::UpdateCaches(LookupResult* lookup, // Bail out if we didn't find a result. if (!lookup->IsProperty() || !lookup->IsCacheable()) return; - if (lookup->holder() != *object && - HasNormalObjectsInPrototypeChain( - isolate(), lookup, object->GetPrototype())) { - // Suppress optimization for prototype chains with slow properties objects - // in the middle. - return; - } - // Compute the number of arguments. int argc = target()->arguments_count(); - bool had_proto_failure = false; Handle<Code> code; if (state == UNINITIALIZED) { // This is the first time we execute this inline cache. @@ -726,7 +710,7 @@ void CallICBase::UpdateCaches(LookupResult* lookup, TryRemoveInvalidPrototypeDependentStub(target(), *object, *name)) { - had_proto_failure = true; + state = MONOMORPHIC_PROTOTYPE_FAILURE; code = ComputeMonomorphicStub(lookup, state, extra_ic_state, object, name); } else { @@ -742,22 +726,39 @@ void CallICBase::UpdateCaches(LookupResult* lookup, if (code.is_null()) return; // Patch the call site depending on the state of the cache. - if (state == UNINITIALIZED || - state == PREMONOMORPHIC || - state == MONOMORPHIC || - state == MONOMORPHIC_PROTOTYPE_FAILURE) { - set_target(*code); - } else if (state == MEGAMORPHIC) { - // Cache code holding map should be consistent with - // GenerateMonomorphicCacheProbe. It is not the map which holds the stub. - Handle<JSObject> cache_object = object->IsJSObject() - ? Handle<JSObject>::cast(object) - : Handle<JSObject>(JSObject::cast(object->GetPrototype())); - // Update the stub cache. - isolate()->stub_cache()->Set(*name, cache_object->map(), *code); - } - - if (had_proto_failure) state = MONOMORPHIC_PROTOTYPE_FAILURE; + switch (state) { + case UNINITIALIZED: + case MONOMORPHIC_PROTOTYPE_FAILURE: + case PREMONOMORPHIC: + set_target(*code); + break; + case MONOMORPHIC: + if (code->ic_state() != MONOMORPHIC) { + Map* map = target()->FindFirstMap(); + if (map != NULL) { + UpdateMegamorphicCache(map, *name, target()); + } + } + set_target(*code); + break; + case MEGAMORPHIC: { + // Cache code holding map should be consistent with + // GenerateMonomorphicCacheProbe. It is not the map which holds the stub. + Handle<JSObject> cache_object = object->IsJSObject() + ? Handle<JSObject>::cast(object) + : Handle<JSObject>(JSObject::cast(object->GetPrototype(isolate()))); + // Update the stub cache. + UpdateMegamorphicCache(cache_object->map(), *name, *code); + break; + } + case DEBUG_STUB: + break; + case POLYMORPHIC: + case GENERIC: + UNREACHABLE(); + break; + } + TRACE_IC(kind_ == Code::CALL_IC ? "CallIC" : "KeyedCallIC", name, state, target()); } @@ -766,7 +767,7 @@ void CallICBase::UpdateCaches(LookupResult* lookup, MaybeObject* KeyedCallIC::LoadFunction(State state, Handle<Object> object, Handle<Object> key) { - if (key->IsSymbol()) { + if (key->IsInternalizedString()) { return CallICBase::LoadFunction(state, Code::kNoExtraICState, object, @@ -777,25 +778,26 @@ MaybeObject* KeyedCallIC::LoadFunction(State state, return TypeError("non_object_property_call", object, key); } - if (FLAG_use_ic && state != MEGAMORPHIC && object->IsHeapObject()) { + bool use_ic = FLAG_use_ic && !object->IsAccessCheckNeeded(); + ASSERT(!(use_ic && object->IsJSGlobalProxy())); + + if (use_ic && state != MEGAMORPHIC) { int argc = target()->arguments_count(); - Handle<Map> map = - isolate()->factory()->non_strict_arguments_elements_map(); - if (object->IsJSObject() && - Handle<JSObject>::cast(object)->elements()->map() == *map) { - Handle<Code> code = isolate()->stub_cache()->ComputeCallArguments( - argc, Code::KEYED_CALL_IC); - set_target(*code); - TRACE_IC("KeyedCallIC", key, state, target()); - } else if (!object->IsAccessCheckNeeded()) { - Handle<Code> code = isolate()->stub_cache()->ComputeCallMegamorphic( - argc, Code::KEYED_CALL_IC, Code::kNoExtraICState); - set_target(*code); - TRACE_IC("KeyedCallIC", key, state, target()); + Handle<Code> stub = isolate()->stub_cache()->ComputeCallMegamorphic( + argc, Code::KEYED_CALL_IC, Code::kNoExtraICState); + if (object->IsJSObject()) { + Handle<JSObject> receiver = Handle<JSObject>::cast(object); + if (receiver->elements()->map() == + isolate()->heap()->non_strict_arguments_elements_map()) { + stub = isolate()->stub_cache()->ComputeCallArguments(argc); + } } + ASSERT(!stub.is_null()); + set_target(*stub); + TRACE_IC("KeyedCallIC", key, state, target()); } - Handle<Object> result = GetProperty(object, key); + Handle<Object> result = GetProperty(isolate(), object, key); RETURN_IF_EMPTY_HANDLE(isolate(), result); // Make receiver an object if the callee requires it. Strict mode or builtin @@ -826,17 +828,18 @@ MaybeObject* LoadIC::Load(State state, // objects is read-only and therefore always returns the length of // the underlying string value. See ECMA-262 15.5.5.1. if ((object->IsString() || object->IsStringWrapper()) && - name->Equals(isolate()->heap()->length_symbol())) { + name->Equals(isolate()->heap()->length_string())) { Handle<Code> stub; if (state == UNINITIALIZED) { stub = pre_monomorphic_stub(); } else if (state == PREMONOMORPHIC) { - stub = object->IsString() - ? isolate()->builtins()->LoadIC_StringLength() - : isolate()->builtins()->LoadIC_StringWrapperLength(); + StringLengthStub string_length_stub(kind(), !object->IsString()); + stub = string_length_stub.GetCode(isolate()); } else if (state == MONOMORPHIC && object->IsStringWrapper()) { - stub = isolate()->builtins()->LoadIC_StringWrapperLength(); + StringLengthStub string_length_stub(kind(), true); + stub = string_length_stub.GetCode(isolate()); } else if (state != MEGAMORPHIC) { + ASSERT(state != GENERIC); stub = megamorphic_stub(); } if (!stub.is_null()) { @@ -847,20 +850,22 @@ MaybeObject* LoadIC::Load(State state, } // Get the string if we have a string wrapper object. Handle<Object> string = object->IsJSValue() - ? Handle<Object>(Handle<JSValue>::cast(object)->value()) + ? Handle<Object>(Handle<JSValue>::cast(object)->value(), isolate()) : object; return Smi::FromInt(String::cast(*string)->length()); } // Use specialized code for getting the length of arrays. if (object->IsJSArray() && - name->Equals(isolate()->heap()->length_symbol())) { + name->Equals(isolate()->heap()->length_string())) { Handle<Code> stub; if (state == UNINITIALIZED) { stub = pre_monomorphic_stub(); } else if (state == PREMONOMORPHIC) { - stub = isolate()->builtins()->LoadIC_ArrayLength(); + ArrayLengthStub array_length_stub(kind()); + stub = array_length_stub.GetCode(isolate()); } else if (state != MEGAMORPHIC) { + ASSERT(state != GENERIC); stub = megamorphic_stub(); } if (!stub.is_null()) { @@ -874,14 +879,16 @@ MaybeObject* LoadIC::Load(State state, // Use specialized code for getting prototype of functions. if (object->IsJSFunction() && - name->Equals(isolate()->heap()->prototype_symbol()) && + name->Equals(isolate()->heap()->prototype_string()) && Handle<JSFunction>::cast(object)->should_have_prototype()) { Handle<Code> stub; if (state == UNINITIALIZED) { stub = pre_monomorphic_stub(); } else if (state == PREMONOMORPHIC) { - stub = isolate()->builtins()->LoadIC_FunctionPrototype(); + FunctionPrototypeStub function_prototype_stub(kind()); + stub = function_prototype_stub.GetCode(isolate()); } else if (state != MEGAMORPHIC) { + ASSERT(state != GENERIC); stub = megamorphic_stub(); } if (!stub.is_null()) { @@ -895,9 +902,13 @@ MaybeObject* LoadIC::Load(State state, } // Check if the name is trivially convertible to an index and get - // the element if so. + // the element or char if so. uint32_t index; - if (name->AsArrayIndex(&index)) return object->GetElement(index); + if (kind() == Code::KEYED_LOAD_IC && name->AsArrayIndex(&index)) { + // Rewrite to the generic keyed load stub. + if (FLAG_use_ic) set_target(*generic_stub()); + return Runtime::GetElementOrCharAt(isolate(), object, index); + } // Named lookup in the object. LookupResult lookup(isolate()); @@ -905,7 +916,7 @@ MaybeObject* LoadIC::Load(State state, // If we did not find a property, check if we need to throw an exception. if (!lookup.IsFound()) { - if (IsContextual(object)) { + if (IsUndeclaredGlobal(object)) { return ReferenceError("not_defined", name); } LOG(isolate(), SuspectReadEvent(*name, *object)); @@ -924,7 +935,7 @@ MaybeObject* LoadIC::Load(State state, RETURN_IF_EMPTY_HANDLE(isolate(), result); // If the property is not present, check if we need to throw an // exception. - if (attr == ABSENT && IsContextual(object)) { + if (attr == ABSENT && IsUndeclaredGlobal(object)) { return ReferenceError("not_defined", name); } return *result; @@ -935,6 +946,199 @@ MaybeObject* LoadIC::Load(State state, } +static bool AddOneReceiverMapIfMissing(MapHandleList* receiver_maps, + Handle<Map> new_receiver_map) { + ASSERT(!new_receiver_map.is_null()); + for (int current = 0; current < receiver_maps->length(); ++current) { + if (!receiver_maps->at(current).is_null() && + receiver_maps->at(current).is_identical_to(new_receiver_map)) { + return false; + } + } + receiver_maps->Add(new_receiver_map); + return true; +} + + +bool IC::UpdatePolymorphicIC(State state, + StrictModeFlag strict_mode, + Handle<JSObject> receiver, + Handle<String> name, + Handle<Code> code) { + if (code->type() == Code::NORMAL) return false; + if (target()->ic_state() == MONOMORPHIC && + target()->type() == Code::NORMAL) { + return false; + } + + MapHandleList receiver_maps; + CodeHandleList handlers; + + { + AssertNoAllocation no_gc; + target()->FindAllMaps(&receiver_maps); + int number_of_maps = receiver_maps.length(); + if (number_of_maps >= 4) return false; + + // Only allow 0 maps in case target() was reset to UNINITIALIZED by the GC. + // In that case, allow the IC to go back monomorphic. + if (number_of_maps == 0 && target()->ic_state() != UNINITIALIZED) { + return false; + } + target()->FindAllCode(&handlers, receiver_maps.length()); + } + + if (!AddOneReceiverMapIfMissing(&receiver_maps, + Handle<Map>(receiver->map()))) { + return false; + } + + handlers.Add(code); + Handle<Code> ic = isolate()->stub_cache()->ComputePolymorphicIC( + &receiver_maps, &handlers, name); + set_target(*ic); + return true; +} + + +void LoadIC::UpdateMonomorphicIC(Handle<JSObject> receiver, + Handle<Code> handler, + Handle<String> name) { + if (handler->type() == Code::NORMAL) return set_target(*handler); + Handle<Code> ic = isolate()->stub_cache()->ComputeMonomorphicIC( + receiver, handler, name); + set_target(*ic); +} + + +void KeyedLoadIC::UpdateMonomorphicIC(Handle<JSObject> receiver, + Handle<Code> handler, + Handle<String> name) { + if (handler->type() == Code::NORMAL) return set_target(*handler); + Handle<Code> ic = isolate()->stub_cache()->ComputeKeyedMonomorphicIC( + receiver, handler, name); + set_target(*ic); +} + + +void IC::CopyICToMegamorphicCache(Handle<String> name) { + MapHandleList receiver_maps; + CodeHandleList handlers; + { + AssertNoAllocation no_gc; + target()->FindAllMaps(&receiver_maps); + target()->FindAllCode(&handlers, receiver_maps.length()); + } + for (int i = 0; i < receiver_maps.length(); i++) { + UpdateMegamorphicCache(*receiver_maps.at(i), *name, *handlers.at(i)); + } +} + + +// Since GC may have been invoked, by the time PatchCache is called, |state| is +// not necessarily equal to target()->state(). +void IC::PatchCache(State state, + StrictModeFlag strict_mode, + Handle<JSObject> receiver, + Handle<String> name, + Handle<Code> code) { + switch (state) { + case UNINITIALIZED: + case PREMONOMORPHIC: + case MONOMORPHIC_PROTOTYPE_FAILURE: + UpdateMonomorphicIC(receiver, code, name); + break; + case MONOMORPHIC: + // Only move to megamorphic if the target changes. + if (target() != *code) { + if (target()->is_load_stub()) { + if (UpdatePolymorphicIC(state, strict_mode, receiver, name, code)) { + break; + } + } + if (target()->type() != Code::NORMAL) { + if (target()->is_load_stub()) { + CopyICToMegamorphicCache(name); + } else { + Code* handler = target(); + Map* map = handler->FindFirstMap(); + if (map != NULL) { + UpdateMegamorphicCache(map, *name, handler); + } + } + } + + UpdateMegamorphicCache(receiver->map(), *name, *code); + set_target((strict_mode == kStrictMode) + ? *megamorphic_stub_strict() + : *megamorphic_stub()); + } + break; + case MEGAMORPHIC: + // Update the stub cache. + UpdateMegamorphicCache(receiver->map(), *name, *code); + break; + case POLYMORPHIC: + if (target()->is_load_stub()) { + if (UpdatePolymorphicIC(state, strict_mode, receiver, name, code)) { + break; + } + CopyICToMegamorphicCache(name); + UpdateMegamorphicCache(receiver->map(), *name, *code); + set_target(*megamorphic_stub()); + } else { + // When trying to patch a polymorphic keyed load/store element stub + // with anything other than another polymorphic stub, go generic. + set_target((strict_mode == kStrictMode) + ? *generic_stub_strict() + : *generic_stub()); + } + break; + case DEBUG_STUB: + break; + case GENERIC: + UNREACHABLE(); + break; + } +} + + +static void GetReceiverMapsForStub(Handle<Code> stub, + MapHandleList* result) { + ASSERT(stub->is_inline_cache_stub()); + switch (stub->ic_state()) { + case MONOMORPHIC: { + Map* map = stub->FindFirstMap(); + if (map != NULL) { + result->Add(Handle<Map>(map)); + } + break; + } + case POLYMORPHIC: { + AssertNoAllocation no_allocation; + int mask = RelocInfo::ModeMask(RelocInfo::EMBEDDED_OBJECT); + for (RelocIterator it(*stub, mask); !it.done(); it.next()) { + RelocInfo* info = it.rinfo(); + Handle<Object> object(info->target_object(), stub->GetIsolate()); + if (object->IsString()) break; + ASSERT(object->IsMap()); + AddOneReceiverMapIfMissing(result, Handle<Map>::cast(object)); + } + break; + } + case MEGAMORPHIC: + break; + case UNINITIALIZED: + case PREMONOMORPHIC: + case MONOMORPHIC_PROTOTYPE_FAILURE: + case GENERIC: + case DEBUG_STUB: + UNREACHABLE(); + break; + } +} + + void LoadIC::UpdateCaches(LookupResult* lookup, State state, Handle<Object> object, @@ -945,134 +1149,95 @@ void LoadIC::UpdateCaches(LookupResult* lookup, // Loading properties from values is not common, so don't try to // deal with non-JS objects here. if (!object->IsJSObject()) return; - Handle<JSObject> receiver = Handle<JSObject>::cast(object); - - if (HasNormalObjectsInPrototypeChain(isolate(), lookup, *object)) return; - // Compute the code stub for this load. + Handle<JSObject> receiver = Handle<JSObject>::cast(object); Handle<Code> code; if (state == UNINITIALIZED) { // This is the first time we execute this inline cache. // Set the target to the pre monomorphic stub to delay // setting the monomorphic state. code = pre_monomorphic_stub(); - } else if (!lookup->IsProperty()) { - // Nonexistent property. The result is undefined. - code = isolate()->stub_cache()->ComputeLoadNonexistent(name, receiver); } else { - // Compute monomorphic stub. - Handle<JSObject> holder(lookup->holder()); - switch (lookup->type()) { - case FIELD: - code = isolate()->stub_cache()->ComputeLoadField( - name, receiver, holder, lookup->GetFieldIndex()); - break; - case CONSTANT_FUNCTION: { - Handle<JSFunction> constant(lookup->GetConstantFunction()); - code = isolate()->stub_cache()->ComputeLoadConstant( - name, receiver, holder, constant); - break; - } - case NORMAL: - if (holder->IsGlobalObject()) { - Handle<GlobalObject> global = Handle<GlobalObject>::cast(holder); - Handle<JSGlobalPropertyCell> cell(global->GetPropertyCell(lookup)); - code = isolate()->stub_cache()->ComputeLoadGlobal( - name, receiver, global, cell, lookup->IsDontDelete()); - } else { - // There is only one shared stub for loading normalized - // properties. It does not traverse the prototype chain, so the - // property must be found in the receiver for the stub to be - // applicable. - if (!holder.is_identical_to(receiver)) return; - code = isolate()->stub_cache()->ComputeLoadNormal(); - } - break; - case CALLBACKS: { - Handle<Object> callback(lookup->GetCallbackObject()); - if (callback->IsAccessorInfo()) { - Handle<AccessorInfo> info = Handle<AccessorInfo>::cast(callback); - if (v8::ToCData<Address>(info->getter()) == 0) return; - if (!info->IsCompatibleReceiver(*receiver)) return; - code = isolate()->stub_cache()->ComputeLoadCallback( - name, receiver, holder, info); - } else if (callback->IsAccessorPair()) { - Handle<Object> getter(Handle<AccessorPair>::cast(callback)->getter()); - if (!getter->IsJSFunction()) return; - if (holder->IsGlobalObject()) return; - if (!holder->HasFastProperties()) return; - code = isolate()->stub_cache()->ComputeLoadViaGetter( - name, receiver, holder, Handle<JSFunction>::cast(getter)); - } else { - ASSERT(callback->IsForeign()); - // No IC support for old-style native accessors. - return; - } - break; - } - case INTERCEPTOR: - ASSERT(HasInterceptorGetter(*holder)); - code = isolate()->stub_cache()->ComputeLoadInterceptor( - name, receiver, holder); - break; - default: - return; - } - } - - // Patch the call site depending on the state of the cache. - if (state == UNINITIALIZED || - state == PREMONOMORPHIC || - state == MONOMORPHIC_PROTOTYPE_FAILURE) { - set_target(*code); - } else if (state == MONOMORPHIC) { - // We are transitioning from monomorphic to megamorphic case. - // Place the current monomorphic stub and stub compiled for - // the receiver into stub cache. - Map* map = target()->FindFirstMap(); - if (map != NULL) { - isolate()->stub_cache()->Set(*name, map, target()); - } - isolate()->stub_cache()->Set(*name, receiver->map(), *code); - - set_target(*megamorphic_stub()); - } else if (state == MEGAMORPHIC) { - // Cache code holding map should be consistent with - // GenerateMonomorphicCacheProbe. - isolate()->stub_cache()->Set(*name, receiver->map(), *code); + code = ComputeLoadHandler(lookup, receiver, name); + if (code.is_null()) return; } + PatchCache(state, kNonStrictMode, receiver, name, code); TRACE_IC("LoadIC", name, state, target()); } -Handle<Code> KeyedLoadIC::GetElementStubWithoutMapCheck( - bool is_js_array, - ElementsKind elements_kind, - KeyedAccessGrowMode grow_mode) { - ASSERT(grow_mode == DO_NOT_ALLOW_JSARRAY_GROWTH); - return KeyedLoadElementStub(elements_kind).GetCode(); +void IC::UpdateMegamorphicCache(Map* map, String* name, Code* code) { + // Cache code holding map should be consistent with + // GenerateMonomorphicCacheProbe. + isolate()->stub_cache()->Set(name, map, code); } -Handle<Code> KeyedLoadIC::ComputePolymorphicStub( - MapHandleList* receiver_maps, - StrictModeFlag strict_mode, - KeyedAccessGrowMode growth_mode) { - CodeHandleList handler_ics(receiver_maps->length()); - for (int i = 0; i < receiver_maps->length(); ++i) { - Handle<Map> receiver_map = receiver_maps->at(i); - Handle<Code> cached_stub = ComputeMonomorphicStubWithoutMapCheck( - receiver_map, strict_mode, growth_mode); - handler_ics.Add(cached_stub); +Handle<Code> LoadIC::ComputeLoadHandler(LookupResult* lookup, + Handle<JSObject> receiver, + Handle<String> name) { + if (!lookup->IsProperty()) { + // Nonexistent property. The result is undefined. + return isolate()->stub_cache()->ComputeLoadNonexistent(name, receiver); } - KeyedLoadStubCompiler compiler(isolate()); - Handle<Code> code = compiler.CompileLoadPolymorphic( - receiver_maps, &handler_ics); - isolate()->counters()->keyed_load_polymorphic_stubs()->Increment(); - PROFILE(isolate(), - CodeCreateEvent(Logger::KEYED_LOAD_MEGAMORPHIC_IC_TAG, *code, 0)); - return code; + + // Compute monomorphic stub. + Handle<JSObject> holder(lookup->holder()); + switch (lookup->type()) { + case FIELD: + return isolate()->stub_cache()->ComputeLoadField( + name, receiver, holder, lookup->GetFieldIndex()); + case CONSTANT_FUNCTION: { + Handle<JSFunction> constant(lookup->GetConstantFunction()); + return isolate()->stub_cache()->ComputeLoadConstant( + name, receiver, holder, constant); + } + case NORMAL: + if (holder->IsGlobalObject()) { + Handle<GlobalObject> global = Handle<GlobalObject>::cast(holder); + Handle<JSGlobalPropertyCell> cell(global->GetPropertyCell(lookup)); + return isolate()->stub_cache()->ComputeLoadGlobal( + name, receiver, global, cell, lookup->IsDontDelete()); + } + // There is only one shared stub for loading normalized + // properties. It does not traverse the prototype chain, so the + // property must be found in the receiver for the stub to be + // applicable. + if (!holder.is_identical_to(receiver)) break; + return isolate()->stub_cache()->ComputeLoadNormal(name, receiver); + case CALLBACKS: { + Handle<Object> callback(lookup->GetCallbackObject(), isolate()); + if (callback->IsExecutableAccessorInfo()) { + Handle<ExecutableAccessorInfo> info = + Handle<ExecutableAccessorInfo>::cast(callback); + if (v8::ToCData<Address>(info->getter()) == 0) break; + if (!info->IsCompatibleReceiver(*receiver)) break; + return isolate()->stub_cache()->ComputeLoadCallback( + name, receiver, holder, info); + } else if (callback->IsAccessorPair()) { + Handle<Object> getter(Handle<AccessorPair>::cast(callback)->getter(), + isolate()); + if (!getter->IsJSFunction()) break; + if (holder->IsGlobalObject()) break; + if (!holder->HasFastProperties()) break; + return isolate()->stub_cache()->ComputeLoadViaGetter( + name, receiver, holder, Handle<JSFunction>::cast(getter)); + } + // TODO(dcarney): Handle correctly. + if (callback->IsDeclaredAccessorInfo()) break; + ASSERT(callback->IsForeign()); + // No IC support for old-style native accessors. + break; + } + case INTERCEPTOR: + ASSERT(HasInterceptorGetter(*holder)); + return isolate()->stub_cache()->ComputeLoadInterceptor( + name, receiver, holder); + default: + break; + } + return Handle<Code>::null(); } @@ -1082,125 +1247,103 @@ static Handle<Object> TryConvertKey(Handle<Object> key, Isolate* isolate) { if (key->IsHeapNumber()) { double value = Handle<HeapNumber>::cast(key)->value(); if (isnan(value)) { - key = isolate->factory()->nan_symbol(); + key = isolate->factory()->nan_string(); } else { int int_value = FastD2I(value); if (value == int_value && Smi::IsValid(int_value)) { - key = Handle<Smi>(Smi::FromInt(int_value)); + key = Handle<Smi>(Smi::FromInt(int_value), isolate); } } } else if (key->IsUndefined()) { - key = isolate->factory()->undefined_symbol(); + key = isolate->factory()->undefined_string(); } return key; } -MaybeObject* KeyedLoadIC::Load(State state, - Handle<Object> object, - Handle<Object> key, - bool force_generic_stub) { - // Check for values that can be converted into a symbol directly or - // is representable as a smi. - key = TryConvertKey(key, isolate()); +Handle<Code> KeyedLoadIC::LoadElementStub(Handle<JSObject> receiver) { + State ic_state = target()->ic_state(); - if (key->IsSymbol()) { - Handle<String> name = Handle<String>::cast(key); + // Don't handle megamorphic property accesses for INTERCEPTORS or CALLBACKS + // via megamorphic stubs, since they don't have a map in their relocation info + // and so the stubs can't be harvested for the object needed for a map check. + if (target()->type() != Code::NORMAL) { + TRACE_GENERIC_IC(isolate(), "KeyedIC", "non-NORMAL target type"); + return generic_stub(); + } - // If the object is undefined or null it's illegal to try to get any - // of its properties; throw a TypeError in that case. - if (object->IsUndefined() || object->IsNull()) { - return TypeError("non_object_property_load", object, name); - } + Handle<Map> receiver_map(receiver->map()); + MapHandleList target_receiver_maps; + if (ic_state == UNINITIALIZED || ic_state == PREMONOMORPHIC) { + // Optimistically assume that ICs that haven't reached the MONOMORPHIC state + // yet will do so and stay there. + return isolate()->stub_cache()->ComputeKeyedLoadElement(receiver_map); + } - if (FLAG_use_ic) { - // TODO(1073): don't ignore the current stub state. - - // Use specialized code for getting the length of strings. - if (object->IsString() && - name->Equals(isolate()->heap()->length_symbol())) { - Handle<String> string = Handle<String>::cast(object); - Handle<Code> code = - isolate()->stub_cache()->ComputeKeyedLoadStringLength(name, string); - ASSERT(!code.is_null()); - set_target(*code); - TRACE_IC("KeyedLoadIC", name, state, target()); - return Smi::FromInt(string->length()); - } + if (target() == *string_stub()) { + target_receiver_maps.Add(isolate()->factory()->string_map()); + } else { + GetReceiverMapsForStub(Handle<Code>(target()), &target_receiver_maps); + if (target_receiver_maps.length() == 0) { + return isolate()->stub_cache()->ComputeKeyedLoadElement(receiver_map); + } + } - // Use specialized code for getting the length of arrays. - if (object->IsJSArray() && - name->Equals(isolate()->heap()->length_symbol())) { - Handle<JSArray> array = Handle<JSArray>::cast(object); - Handle<Code> code = - isolate()->stub_cache()->ComputeKeyedLoadArrayLength(name, array); - ASSERT(!code.is_null()); - set_target(*code); - TRACE_IC("KeyedLoadIC", name, state, target()); - return array->length(); - } + // The first time a receiver is seen that is a transitioned version of the + // previous monomorphic receiver type, assume the new ElementsKind is the + // monomorphic type. This benefits global arrays that only transition + // once, and all call sites accessing them are faster if they remain + // monomorphic. If this optimistic assumption is not true, the IC will + // miss again and it will become polymorphic and support both the + // untransitioned and transitioned maps. + if (ic_state == MONOMORPHIC && + IsMoreGeneralElementsKindTransition( + target_receiver_maps.at(0)->elements_kind(), + receiver->GetElementsKind())) { + return isolate()->stub_cache()->ComputeKeyedLoadElement(receiver_map); + } - // Use specialized code for getting prototype of functions. - if (object->IsJSFunction() && - name->Equals(isolate()->heap()->prototype_symbol()) && - Handle<JSFunction>::cast(object)->should_have_prototype()) { - Handle<JSFunction> function = Handle<JSFunction>::cast(object); - Handle<Code> code = - isolate()->stub_cache()->ComputeKeyedLoadFunctionPrototype( - name, function); - ASSERT(!code.is_null()); - set_target(*code); - TRACE_IC("KeyedLoadIC", name, state, target()); - return Accessors::FunctionGetPrototype(*object, 0); - } - } + ASSERT(ic_state != GENERIC); - // Check if the name is trivially convertible to an index and get - // the element or char if so. - uint32_t index = 0; - if (name->AsArrayIndex(&index)) { - // Rewrite to the generic keyed load stub. - if (FLAG_use_ic) set_target(*generic_stub()); - return Runtime::GetElementOrCharAt(isolate(), object, index); - } + // Determine the list of receiver maps that this call site has seen, + // adding the map that was just encountered. + if (!AddOneReceiverMapIfMissing(&target_receiver_maps, receiver_map)) { + // If the miss wasn't due to an unseen map, a polymorphic stub + // won't help, use the generic stub. + TRACE_GENERIC_IC(isolate(), "KeyedIC", "same map added twice"); + return generic_stub(); + } - // Named lookup. - LookupResult lookup(isolate()); - LookupForRead(object, name, &lookup); + // If the maximum number of receiver maps has been exceeded, use the generic + // version of the IC. + if (target_receiver_maps.length() > kMaxKeyedPolymorphism) { + TRACE_GENERIC_IC(isolate(), "KeyedIC", "max polymorph exceeded"); + return generic_stub(); + } - // If we did not find a property, check if we need to throw an exception. - if (!lookup.IsFound() && IsContextual(object)) { - return ReferenceError("not_defined", name); - } + return isolate()->stub_cache()->ComputeLoadElementPolymorphic( + &target_receiver_maps); +} - if (FLAG_use_ic) { - UpdateCaches(&lookup, state, object, name); - } - PropertyAttributes attr; - if (lookup.IsInterceptor()) { - // Get the property. - Handle<Object> result = - Object::GetProperty(object, object, &lookup, name, &attr); - RETURN_IF_EMPTY_HANDLE(isolate(), result); - // If the property is not present, check if we need to throw an - // exception. - if (attr == ABSENT && IsContextual(object)) { - return ReferenceError("not_defined", name); - } - return *result; - } +MaybeObject* KeyedLoadIC::Load(State state, + Handle<Object> object, + Handle<Object> key, + ICMissMode miss_mode) { + // Check for values that can be converted into an internalized string directly + // or is representable as a smi. + key = TryConvertKey(key, isolate()); - return object->GetProperty(*object, &lookup, *name, &attr); + if (key->IsInternalizedString()) { + return LoadIC::Load(state, object, Handle<String>::cast(key)); } - // Do not use ICs for objects that require access checks (including - // the global object). bool use_ic = FLAG_use_ic && !object->IsAccessCheckNeeded(); + ASSERT(!(use_ic && object->IsJSGlobalProxy())); if (use_ic) { Handle<Code> stub = generic_stub(); - if (!force_generic_stub) { + if (miss_mode != MISS_FORCE_GENERIC) { if (object->IsString() && key->IsNumber()) { if (state == UNINITIALIZED) { stub = string_stub(); @@ -1213,90 +1356,60 @@ MaybeObject* KeyedLoadIC::Load(State state, } else if (receiver->HasIndexedInterceptor()) { stub = indexed_interceptor_stub(); } else if (key->IsSmi() && (target() != *non_strict_arguments_stub())) { - stub = ComputeStub(receiver, LOAD, kNonStrictMode, stub); + stub = LoadElementStub(receiver); } } } else { - TRACE_GENERIC_IC("KeyedLoadIC", "force generic"); + TRACE_GENERIC_IC(isolate(), "KeyedLoadIC", "force generic"); } - if (!stub.is_null()) set_target(*stub); + ASSERT(!stub.is_null()); + set_target(*stub); + TRACE_IC("KeyedLoadIC", key, state, target()); } - TRACE_IC("KeyedLoadIC", key, state, target()); - // Get the property. return Runtime::GetObjectProperty(isolate(), object, key); } -void KeyedLoadIC::UpdateCaches(LookupResult* lookup, - State state, - Handle<Object> object, - Handle<String> name) { +Handle<Code> KeyedLoadIC::ComputeLoadHandler(LookupResult* lookup, + Handle<JSObject> receiver, + Handle<String> name) { // Bail out if we didn't find a result. - if (!lookup->IsProperty() || !lookup->IsCacheable()) return; - - if (!object->IsJSObject()) return; - Handle<JSObject> receiver = Handle<JSObject>::cast(object); + if (!lookup->IsProperty()) return Handle<Code>::null(); - if (HasNormalObjectsInPrototypeChain(isolate(), lookup, *object)) return; - - // Compute the code stub for this load. - Handle<Code> code; - - if (state == UNINITIALIZED) { - // This is the first time we execute this inline cache. - // Set the target to the pre monomorphic stub to delay - // setting the monomorphic state. - code = pre_monomorphic_stub(); - } else { - // Compute a monomorphic stub. - Handle<JSObject> holder(lookup->holder()); - switch (lookup->type()) { - case FIELD: - code = isolate()->stub_cache()->ComputeKeyedLoadField( - name, receiver, holder, lookup->GetFieldIndex()); - break; - case CONSTANT_FUNCTION: { - Handle<JSFunction> constant(lookup->GetConstantFunction()); - code = isolate()->stub_cache()->ComputeKeyedLoadConstant( - name, receiver, holder, constant); - break; - } - case CALLBACKS: { - Handle<Object> callback_object(lookup->GetCallbackObject()); - if (!callback_object->IsAccessorInfo()) return; - Handle<AccessorInfo> callback = - Handle<AccessorInfo>::cast(callback_object); - if (v8::ToCData<Address>(callback->getter()) == 0) return; - if (!callback->IsCompatibleReceiver(*receiver)) return; - code = isolate()->stub_cache()->ComputeKeyedLoadCallback( - name, receiver, holder, callback); - break; - } - case INTERCEPTOR: - ASSERT(HasInterceptorGetter(lookup->holder())); - code = isolate()->stub_cache()->ComputeKeyedLoadInterceptor( - name, receiver, holder); - break; - default: - // Always rewrite to the generic case so that we do not - // repeatedly try to rewrite. - code = generic_stub(); - break; + // Compute a monomorphic stub. + Handle<JSObject> holder(lookup->holder()); + switch (lookup->type()) { + case FIELD: + return isolate()->stub_cache()->ComputeKeyedLoadField( + name, receiver, holder, lookup->GetFieldIndex()); + case CONSTANT_FUNCTION: { + Handle<JSFunction> constant(lookup->GetConstantFunction()); + return isolate()->stub_cache()->ComputeKeyedLoadConstant( + name, receiver, holder, constant); } + case CALLBACKS: { + Handle<Object> callback_object(lookup->GetCallbackObject(), isolate()); + // TODO(dcarney): Handle DeclaredAccessorInfo correctly. + if (!callback_object->IsExecutableAccessorInfo()) break; + Handle<ExecutableAccessorInfo> callback = + Handle<ExecutableAccessorInfo>::cast(callback_object); + if (v8::ToCData<Address>(callback->getter()) == 0) break; + if (!callback->IsCompatibleReceiver(*receiver)) break; + return isolate()->stub_cache()->ComputeKeyedLoadCallback( + name, receiver, holder, callback); + } + case INTERCEPTOR: + ASSERT(HasInterceptorGetter(lookup->holder())); + return isolate()->stub_cache()->ComputeKeyedLoadInterceptor( + name, receiver, holder); + default: + // Always rewrite to the generic case so that we do not + // repeatedly try to rewrite. + return generic_stub(); } - - // Patch the call site depending on the state of the cache. Make - // sure to always rewrite from monomorphic to megamorphic. - ASSERT(state != MONOMORPHIC_PROTOTYPE_FAILURE); - if (state == UNINITIALIZED || state == PREMONOMORPHIC) { - set_target(*code); - } else if (state == MONOMORPHIC) { - set_target(*megamorphic_stub()); - } - - TRACE_IC("KeyedLoadIC", name, state, target()); + return Handle<Code>::null(); } @@ -1342,30 +1455,30 @@ MaybeObject* StoreIC::Store(State state, StrictModeFlag strict_mode, Handle<Object> object, Handle<String> name, - Handle<Object> value) { - if (!object->IsJSObject()) { - // Handle proxies. - if (object->IsJSProxy()) { - return JSProxy::cast(*object)-> - SetProperty(*name, *value, NONE, strict_mode); - } + Handle<Object> value, + JSReceiver::StoreFromKeyed store_mode) { + // Handle proxies. + if (object->IsJSProxy()) { + return JSProxy::cast(*object)-> + SetProperty(*name, *value, NONE, strict_mode); + } - // If the object is undefined or null it's illegal to try to set any - // properties on it; throw a TypeError in that case. - if (object->IsUndefined() || object->IsNull()) { - return TypeError("non_object_property_store", object, name); - } + // If the object is undefined or null it's illegal to try to set any + // properties on it; throw a TypeError in that case. + if (object->IsUndefined() || object->IsNull()) { + return TypeError("non_object_property_store", object, name); + } - // The length property of string values is read-only. Throw in strict mode. - if (strict_mode == kStrictMode && object->IsString() && - name->Equals(isolate()->heap()->length_symbol())) { - return TypeError("strict_read_only_property", object, name); - } - // Ignore other stores where the receiver is not a JSObject. - // TODO(1475): Must check prototype chains of object wrappers. - return *value; + // The length property of string values is read-only. Throw in strict mode. + if (strict_mode == kStrictMode && object->IsString() && + name->Equals(isolate()->heap()->length_string())) { + return TypeError("strict_read_only_property", object, name); } + // Ignore other stores where the receiver is not a JSObject. + // TODO(1475): Must check prototype chains of object wrappers. + if (!object->IsJSObject()) return *value; + Handle<JSObject> receiver = Handle<JSObject>::cast(object); // Check if the given name is an array index. @@ -1377,63 +1490,53 @@ MaybeObject* StoreIC::Store(State state, return *value; } + // Observed objects are always modified through the runtime. + if (FLAG_harmony_observation && receiver->map()->is_observed()) { + return receiver->SetProperty(*name, *value, NONE, strict_mode, store_mode); + } + // Use specialized code for setting the length of arrays with fast - // properties. Slow properties might indicate redefinition of the - // length property. - if (receiver->IsJSArray() && - name->Equals(isolate()->heap()->length_symbol()) && + // properties. Slow properties might indicate redefinition of the length + // property. + if (FLAG_use_ic && + receiver->IsJSArray() && + name->Equals(isolate()->heap()->length_string()) && Handle<JSArray>::cast(receiver)->AllowsSetElementsLength() && receiver->HasFastProperties()) { -#ifdef DEBUG - if (FLAG_trace_ic) PrintF("[StoreIC : +#length /array]\n"); -#endif - Handle<Code> stub = (strict_mode == kStrictMode) - ? isolate()->builtins()->StoreIC_ArrayLength_Strict() - : isolate()->builtins()->StoreIC_ArrayLength(); + Handle<Code> stub = + StoreArrayLengthStub(kind(), strict_mode).GetCode(isolate()); set_target(*stub); - return receiver->SetProperty(*name, *value, NONE, strict_mode); + TRACE_IC("StoreIC", name, state, *stub); + return receiver->SetProperty(*name, *value, NONE, strict_mode, store_mode); } - // Lookup the property locally in the receiver. - if (!receiver->IsJSGlobalProxy()) { - LookupResult lookup(isolate()); - - if (LookupForWrite(receiver, name, &lookup)) { - if (FLAG_use_ic) { // Generate a stub for this store. - UpdateCaches(&lookup, state, strict_mode, receiver, name, value); - } - } else { - // Strict mode doesn't allow setting non-existent global property - // or an assignment to a read only property. - if (strict_mode == kStrictMode) { - if (lookup.IsProperty() && lookup.IsReadOnly()) { - return TypeError("strict_read_only_property", object, name); - } else if (IsContextual(object)) { - return ReferenceError("not_defined", name); - } - } + if (receiver->IsJSGlobalProxy()) { + if (FLAG_use_ic && kind() != Code::KEYED_STORE_IC) { + // Generate a generic stub that goes to the runtime when we see a global + // proxy as receiver. + Handle<Code> stub = (strict_mode == kStrictMode) + ? global_proxy_stub_strict() + : global_proxy_stub(); + set_target(*stub); + TRACE_IC("StoreIC", name, state, *stub); } + return receiver->SetProperty(*name, *value, NONE, strict_mode, store_mode); } - if (receiver->IsJSGlobalProxy()) { - // TODO(ulan): find out why we patch this site even with --no-use-ic - // Generate a generic stub that goes to the runtime when we see a global - // proxy as receiver. - Handle<Code> stub = (strict_mode == kStrictMode) - ? global_proxy_stub_strict() - : global_proxy_stub(); - if (target() != *stub) { - set_target(*stub); - TRACE_IC("StoreIC", name, state, target()); + LookupResult lookup(isolate()); + if (LookupForWrite(receiver, name, &lookup)) { + if (FLAG_use_ic) { + UpdateCaches(&lookup, state, strict_mode, receiver, name, value); } + } else if (strict_mode == kStrictMode && + !(lookup.IsProperty() && lookup.IsReadOnly()) && + IsUndeclaredGlobal(object)) { + // Strict mode doesn't allow setting non-existent global property. + return ReferenceError("not_defined", name); } // Set the property. - return receiver->SetProperty(*name, - *value, - NONE, - strict_mode, - JSReceiver::CERTAINLY_NOT_STORE_FROM_KEYED); + return receiver->SetProperty(*name, *value, NONE, strict_mode, store_mode); } @@ -1450,24 +1553,25 @@ void StoreIC::UpdateCaches(LookupResult* lookup, // These are not cacheable, so we never see such LookupResults here. ASSERT(!lookup->IsHandler()); - // If the property has a non-field type allowing map transitions - // where there is extra room in the object, we leave the IC in its - // current state. - PropertyType type = lookup->type(); + Handle<Code> code = + ComputeStoreMonomorphic(lookup, strict_mode, receiver, name); + if (code.is_null()) return; + + PatchCache(state, strict_mode, receiver, name, code); + TRACE_IC("StoreIC", name, state, target()); +} - // Compute the code stub for this store; used for rewriting to - // monomorphic state and making sure that the code stub is in the - // stub cache. + +Handle<Code> StoreIC::ComputeStoreMonomorphic(LookupResult* lookup, + StrictModeFlag strict_mode, + Handle<JSObject> receiver, + Handle<String> name) { Handle<JSObject> holder(lookup->holder()); - Handle<Code> code; - switch (type) { + switch (lookup->type()) { case FIELD: - code = isolate()->stub_cache()->ComputeStoreField(name, - receiver, - lookup->GetFieldIndex(), - Handle<Map>::null(), - strict_mode); - break; + return isolate()->stub_cache()->ComputeStoreField( + name, receiver, lookup->GetFieldIndex().field_index(), + Handle<Map>::null(), strict_mode); case NORMAL: if (receiver->IsGlobalObject()) { // The stub generated for the global object picks the value directly @@ -1475,44 +1579,43 @@ void StoreIC::UpdateCaches(LookupResult* lookup, // global object. Handle<GlobalObject> global = Handle<GlobalObject>::cast(receiver); Handle<JSGlobalPropertyCell> cell(global->GetPropertyCell(lookup)); - code = isolate()->stub_cache()->ComputeStoreGlobal( + return isolate()->stub_cache()->ComputeStoreGlobal( name, global, cell, strict_mode); - } else { - if (!holder.is_identical_to(receiver)) return; - code = isolate()->stub_cache()->ComputeStoreNormal(strict_mode); } - break; + if (!holder.is_identical_to(receiver)) break; + return isolate()->stub_cache()->ComputeStoreNormal(strict_mode); case CALLBACKS: { - Handle<Object> callback(lookup->GetCallbackObject()); - if (callback->IsAccessorInfo()) { - Handle<AccessorInfo> info = Handle<AccessorInfo>::cast(callback); - if (v8::ToCData<Address>(info->setter()) == 0) return; - if (!holder->HasFastProperties()) return; - if (!info->IsCompatibleReceiver(*receiver)) return; - code = isolate()->stub_cache()->ComputeStoreCallback( + Handle<Object> callback(lookup->GetCallbackObject(), isolate()); + if (callback->IsExecutableAccessorInfo()) { + Handle<ExecutableAccessorInfo> info = + Handle<ExecutableAccessorInfo>::cast(callback); + if (v8::ToCData<Address>(info->setter()) == 0) break; + if (!holder->HasFastProperties()) break; + if (!info->IsCompatibleReceiver(*receiver)) break; + return isolate()->stub_cache()->ComputeStoreCallback( name, receiver, holder, info, strict_mode); } else if (callback->IsAccessorPair()) { - Handle<Object> setter(Handle<AccessorPair>::cast(callback)->setter()); - if (!setter->IsJSFunction()) return; - if (holder->IsGlobalObject()) return; - if (!holder->HasFastProperties()) return; - code = isolate()->stub_cache()->ComputeStoreViaSetter( + Handle<Object> setter(Handle<AccessorPair>::cast(callback)->setter(), + isolate()); + if (!setter->IsJSFunction()) break; + if (holder->IsGlobalObject()) break; + if (!holder->HasFastProperties()) break; + return isolate()->stub_cache()->ComputeStoreViaSetter( name, receiver, holder, Handle<JSFunction>::cast(setter), strict_mode); - } else { - ASSERT(callback->IsForeign()); - // No IC support for old-style native accessors. - return; } + // TODO(dcarney): Handle correctly. + if (callback->IsDeclaredAccessorInfo()) break; + ASSERT(callback->IsForeign()); + // No IC support for old-style native accessors. break; } case INTERCEPTOR: ASSERT(!receiver->GetNamedInterceptor()->setter()->IsUndefined()); - code = isolate()->stub_cache()->ComputeStoreInterceptor( + return isolate()->stub_cache()->ComputeStoreInterceptor( name, receiver, strict_mode); - break; case CONSTANT_FUNCTION: - return; + break; case TRANSITION: { Handle<Map> transition(lookup->GetTransitionTarget()); int descriptor = transition->LastAdded(); @@ -1520,292 +1623,200 @@ void StoreIC::UpdateCaches(LookupResult* lookup, DescriptorArray* target_descriptors = transition->instance_descriptors(); PropertyDetails details = target_descriptors->GetDetails(descriptor); - if (details.type() != FIELD || details.attributes() != NONE) return; + if (details.type() != FIELD || details.attributes() != NONE) break; int field_index = target_descriptors->GetFieldIndex(descriptor); - code = isolate()->stub_cache()->ComputeStoreField( + return isolate()->stub_cache()->ComputeStoreField( name, receiver, field_index, transition, strict_mode); - - break; } case NONEXISTENT: case HANDLER: UNREACHABLE(); - return; - } - - // Patch the call site depending on the state of the cache. - if (state == UNINITIALIZED || state == MONOMORPHIC_PROTOTYPE_FAILURE) { - set_target(*code); - } else if (state == MONOMORPHIC) { - // Only move to megamorphic if the target changes. - if (target() != *code) { - set_target((strict_mode == kStrictMode) - ? megamorphic_stub_strict() - : megamorphic_stub()); - } - } else if (state == MEGAMORPHIC) { - // Update the stub cache. - isolate()->stub_cache()->Set(*name, receiver->map(), *code); - } - - TRACE_IC("StoreIC", name, state, target()); -} - - -static bool AddOneReceiverMapIfMissing(MapHandleList* receiver_maps, - Handle<Map> new_receiver_map) { - ASSERT(!new_receiver_map.is_null()); - for (int current = 0; current < receiver_maps->length(); ++current) { - if (!receiver_maps->at(current).is_null() && - receiver_maps->at(current).is_identical_to(new_receiver_map)) { - return false; - } - } - receiver_maps->Add(new_receiver_map); - return true; -} - - -void KeyedIC::GetReceiverMapsForStub(Handle<Code> stub, - MapHandleList* result) { - ASSERT(stub->is_inline_cache_stub()); - if (!string_stub().is_null() && stub.is_identical_to(string_stub())) { - return result->Add(isolate()->factory()->string_map()); - } else if (stub->is_keyed_load_stub() || stub->is_keyed_store_stub()) { - if (stub->ic_state() == MONOMORPHIC) { - result->Add(Handle<Map>(stub->FindFirstMap())); - } else { - ASSERT(stub->ic_state() == MEGAMORPHIC); - AssertNoAllocation no_allocation; - int mask = RelocInfo::ModeMask(RelocInfo::EMBEDDED_OBJECT); - for (RelocIterator it(*stub, mask); !it.done(); it.next()) { - RelocInfo* info = it.rinfo(); - Handle<Object> object(info->target_object()); - ASSERT(object->IsMap()); - AddOneReceiverMapIfMissing(result, Handle<Map>::cast(object)); - } - } + break; } + return Handle<Code>::null(); } -Handle<Code> KeyedIC::ComputeStub(Handle<JSObject> receiver, - StubKind stub_kind, - StrictModeFlag strict_mode, - Handle<Code> generic_stub) { - State ic_state = target()->ic_state(); - KeyedAccessGrowMode grow_mode = IsGrowStubKind(stub_kind) - ? ALLOW_JSARRAY_GROWTH - : DO_NOT_ALLOW_JSARRAY_GROWTH; - +Handle<Code> KeyedStoreIC::StoreElementStub(Handle<JSObject> receiver, + KeyedAccessStoreMode store_mode, + StrictModeFlag strict_mode) { // Don't handle megamorphic property accesses for INTERCEPTORS or CALLBACKS // via megamorphic stubs, since they don't have a map in their relocation info // and so the stubs can't be harvested for the object needed for a map check. if (target()->type() != Code::NORMAL) { - TRACE_GENERIC_IC("KeyedIC", "non-NORMAL target type"); - return generic_stub; + TRACE_GENERIC_IC(isolate(), "KeyedIC", "non-NORMAL target type"); + return strict_mode == kStrictMode ? generic_stub_strict() : generic_stub(); + } + + if ((store_mode == STORE_NO_TRANSITION_HANDLE_COW || + store_mode == STORE_NO_TRANSITION_IGNORE_OUT_OF_BOUNDS)) { + // TODO(danno): We'll soon handle MONOMORPHIC ICs that also support + // copying COW arrays and silently ignoring some OOB stores into external + // arrays, but for now use the generic. + TRACE_GENERIC_IC(isolate(), "KeyedIC", "COW/OOB external array"); + return strict_mode == kStrictMode + ? generic_stub_strict() + : generic_stub(); } - bool monomorphic = false; - bool is_transition_stub = IsTransitionStubKind(stub_kind); + State ic_state = target()->ic_state(); Handle<Map> receiver_map(receiver->map()); - Handle<Map> monomorphic_map = receiver_map; - MapHandleList target_receiver_maps; if (ic_state == UNINITIALIZED || ic_state == PREMONOMORPHIC) { // Optimistically assume that ICs that haven't reached the MONOMORPHIC state // yet will do so and stay there. - monomorphic = true; - } else { - GetReceiverMapsForStub(Handle<Code>(target()), &target_receiver_maps); - if (ic_state == MONOMORPHIC && (is_transition_stub || stub_kind == LOAD)) { - // The first time a receiver is seen that is a transitioned version of the - // previous monomorphic receiver type, assume the new ElementsKind is the - // monomorphic type. This benefits global arrays that only transition - // once, and all call sites accessing them are faster if they remain - // monomorphic. If this optimistic assumption is not true, the IC will - // miss again and it will become polymorphic and support both the - // untransitioned and transitioned maps. - monomorphic = IsMoreGeneralElementsKindTransition( - target_receiver_maps.at(0)->elements_kind(), - receiver->GetElementsKind()); - } + Handle<Map> monomorphic_map = ComputeTransitionedMap(receiver, store_mode); + store_mode = GetNonTransitioningStoreMode(store_mode); + return isolate()->stub_cache()->ComputeKeyedStoreElement( + monomorphic_map, strict_mode, store_mode); + } + + MapHandleList target_receiver_maps; + target()->FindAllMaps(&target_receiver_maps); + if (target_receiver_maps.length() == 0) { + // In the case that there is a non-map-specific IC is installed (e.g. keyed + // stores into properties in dictionary mode), then there will be not + // receiver maps in the target. + return strict_mode == kStrictMode + ? generic_stub_strict() + : generic_stub(); } - if (monomorphic) { - if (is_transition_stub) { - monomorphic_map = ComputeTransitionedMap(receiver, stub_kind); - ASSERT(*monomorphic_map != *receiver_map); - stub_kind = GetNoTransitionStubKind(stub_kind); + // There are several special cases where an IC that is MONOMORPHIC can still + // transition to a different GetNonTransitioningStoreMode IC that handles a + // superset of the original IC. Handle those here if the receiver map hasn't + // changed or it has transitioned to a more general kind. + KeyedAccessStoreMode old_store_mode = + Code::GetKeyedAccessStoreMode(target()->extra_ic_state()); + Handle<Map> previous_receiver_map = target_receiver_maps.at(0); + if (ic_state == MONOMORPHIC && old_store_mode == STANDARD_STORE) { + // If the "old" and "new" maps are in the same elements map family, stay + // MONOMORPHIC and use the map for the most generic ElementsKind. + Handle<Map> transitioned_receiver_map = receiver_map; + if (IsTransitionStoreMode(store_mode)) { + transitioned_receiver_map = + ComputeTransitionedMap(receiver, store_mode); + } + ElementsKind transitioned_kind = + transitioned_receiver_map->elements_kind(); + bool more_general_transition = + IsMoreGeneralElementsKindTransition( + previous_receiver_map->elements_kind(), + transitioned_kind); + Map* transitioned_previous_map = more_general_transition + ? previous_receiver_map->LookupElementsTransitionMap(transitioned_kind) + : NULL; + if (transitioned_previous_map == *transitioned_receiver_map) { + // Element family is the same, use the "worst" case map. + store_mode = GetNonTransitioningStoreMode(store_mode); + return isolate()->stub_cache()->ComputeKeyedStoreElement( + transitioned_receiver_map, strict_mode, store_mode); + } else if (*previous_receiver_map == receiver->map()) { + if (IsGrowStoreMode(store_mode)) { + // A "normal" IC that handles stores can switch to a version that can + // grow at the end of the array and still stay MONOMORPHIC. + return isolate()->stub_cache()->ComputeKeyedStoreElement( + receiver_map, strict_mode, store_mode); + } } - return ComputeMonomorphicStub( - monomorphic_map, stub_kind, strict_mode, generic_stub); } - ASSERT(target() != *generic_stub); - // Determine the list of receiver maps that this call site has seen, - // adding the map that was just encountered. + ASSERT(ic_state != GENERIC); + bool map_added = AddOneReceiverMapIfMissing(&target_receiver_maps, receiver_map); - if (IsTransitionStubKind(stub_kind)) { - Handle<Map> new_map = ComputeTransitionedMap(receiver, stub_kind); - map_added |= AddOneReceiverMapIfMissing(&target_receiver_maps, new_map); + + if (IsTransitionStoreMode(store_mode)) { + Handle<Map> transitioned_receiver_map = + ComputeTransitionedMap(receiver, store_mode); + map_added |= AddOneReceiverMapIfMissing(&target_receiver_maps, + transitioned_receiver_map); } + if (!map_added) { // If the miss wasn't due to an unseen map, a polymorphic stub // won't help, use the generic stub. - TRACE_GENERIC_IC("KeyedIC", "same map added twice"); - return generic_stub; + TRACE_GENERIC_IC(isolate(), "KeyedIC", "same map added twice"); + return strict_mode == kStrictMode ? generic_stub_strict() : generic_stub(); } // If the maximum number of receiver maps has been exceeded, use the generic // version of the IC. if (target_receiver_maps.length() > kMaxKeyedPolymorphism) { - TRACE_GENERIC_IC("KeyedIC", "max polymorph exceeded"); - return generic_stub; - } - - if ((Code::GetKeyedAccessGrowMode(target()->extra_ic_state()) == - ALLOW_JSARRAY_GROWTH)) { - grow_mode = ALLOW_JSARRAY_GROWTH; + TRACE_GENERIC_IC(isolate(), "KeyedIC", "max polymorph exceeded"); + return strict_mode == kStrictMode ? generic_stub_strict() : generic_stub(); + } + + // Make sure all polymorphic handlers have the same store mode, otherwise the + // generic stub must be used. + store_mode = GetNonTransitioningStoreMode(store_mode); + if (old_store_mode != STANDARD_STORE) { + if (store_mode == STANDARD_STORE) { + store_mode = old_store_mode; + } else if (store_mode != old_store_mode) { + TRACE_GENERIC_IC(isolate(), "KeyedIC", "store mode mismatch"); + return strict_mode == kStrictMode + ? generic_stub_strict() + : generic_stub(); + } } - Handle<PolymorphicCodeCache> cache = - isolate()->factory()->polymorphic_code_cache(); - Code::ExtraICState extra_state = Code::ComputeExtraICState(grow_mode, - strict_mode); - Code::Flags flags = Code::ComputeFlags(kind(), MEGAMORPHIC, extra_state); - Handle<Object> probe = cache->Lookup(&target_receiver_maps, flags); - if (probe->IsCode()) return Handle<Code>::cast(probe); - - Handle<Code> stub = - ComputePolymorphicStub(&target_receiver_maps, strict_mode, grow_mode); - PolymorphicCodeCache::Update(cache, &target_receiver_maps, flags, stub); - return stub; + return isolate()->stub_cache()->ComputeStoreElementPolymorphic( + &target_receiver_maps, store_mode, strict_mode); } -Handle<Code> KeyedIC::ComputeMonomorphicStubWithoutMapCheck( - Handle<Map> receiver_map, - StrictModeFlag strict_mode, - KeyedAccessGrowMode grow_mode) { - if ((receiver_map->instance_type() & kNotStringTag) == 0) { - ASSERT(!string_stub().is_null()); - return string_stub(); - } else { - ASSERT(receiver_map->has_dictionary_elements() || - receiver_map->has_fast_smi_or_object_elements() || - receiver_map->has_fast_double_elements() || - receiver_map->has_external_array_elements()); - bool is_js_array = receiver_map->instance_type() == JS_ARRAY_TYPE; - return GetElementStubWithoutMapCheck(is_js_array, - receiver_map->elements_kind(), - grow_mode); - } -} - - -Handle<Code> KeyedIC::ComputeMonomorphicStub(Handle<Map> receiver_map, - StubKind stub_kind, - StrictModeFlag strict_mode, - Handle<Code> generic_stub) { - ElementsKind elements_kind = receiver_map->elements_kind(); - if (IsFastElementsKind(elements_kind) || - IsExternalArrayElementsKind(elements_kind) || - IsDictionaryElementsKind(elements_kind)) { - return isolate()->stub_cache()->ComputeKeyedLoadOrStoreElement( - receiver_map, stub_kind, strict_mode); - } else { - return generic_stub; - } -} - - -Handle<Map> KeyedIC::ComputeTransitionedMap(Handle<JSObject> receiver, - StubKind stub_kind) { - switch (stub_kind) { - case KeyedIC::STORE_TRANSITION_SMI_TO_OBJECT: - case KeyedIC::STORE_TRANSITION_DOUBLE_TO_OBJECT: - case KeyedIC::STORE_AND_GROW_TRANSITION_SMI_TO_OBJECT: - case KeyedIC::STORE_AND_GROW_TRANSITION_DOUBLE_TO_OBJECT: +Handle<Map> KeyedStoreIC::ComputeTransitionedMap( + Handle<JSObject> receiver, + KeyedAccessStoreMode store_mode) { + switch (store_mode) { + case STORE_TRANSITION_SMI_TO_OBJECT: + case STORE_TRANSITION_DOUBLE_TO_OBJECT: + case STORE_AND_GROW_TRANSITION_SMI_TO_OBJECT: + case STORE_AND_GROW_TRANSITION_DOUBLE_TO_OBJECT: return JSObject::GetElementsTransitionMap(receiver, FAST_ELEMENTS); - case KeyedIC::STORE_TRANSITION_SMI_TO_DOUBLE: - case KeyedIC::STORE_AND_GROW_TRANSITION_SMI_TO_DOUBLE: + case STORE_TRANSITION_SMI_TO_DOUBLE: + case STORE_AND_GROW_TRANSITION_SMI_TO_DOUBLE: return JSObject::GetElementsTransitionMap(receiver, FAST_DOUBLE_ELEMENTS); - case KeyedIC::STORE_TRANSITION_HOLEY_SMI_TO_OBJECT: - case KeyedIC::STORE_TRANSITION_HOLEY_DOUBLE_TO_OBJECT: - case KeyedIC::STORE_AND_GROW_TRANSITION_HOLEY_SMI_TO_OBJECT: - case KeyedIC::STORE_AND_GROW_TRANSITION_HOLEY_DOUBLE_TO_OBJECT: + case STORE_TRANSITION_HOLEY_SMI_TO_OBJECT: + case STORE_TRANSITION_HOLEY_DOUBLE_TO_OBJECT: + case STORE_AND_GROW_TRANSITION_HOLEY_SMI_TO_OBJECT: + case STORE_AND_GROW_TRANSITION_HOLEY_DOUBLE_TO_OBJECT: return JSObject::GetElementsTransitionMap(receiver, FAST_HOLEY_ELEMENTS); - case KeyedIC::STORE_TRANSITION_HOLEY_SMI_TO_DOUBLE: - case KeyedIC::STORE_AND_GROW_TRANSITION_HOLEY_SMI_TO_DOUBLE: + case STORE_TRANSITION_HOLEY_SMI_TO_DOUBLE: + case STORE_AND_GROW_TRANSITION_HOLEY_SMI_TO_DOUBLE: return JSObject::GetElementsTransitionMap(receiver, FAST_HOLEY_DOUBLE_ELEMENTS); - case KeyedIC::LOAD: - case KeyedIC::STORE_NO_TRANSITION: - case KeyedIC::STORE_AND_GROW_NO_TRANSITION: - UNREACHABLE(); - break; + case STORE_NO_TRANSITION_IGNORE_OUT_OF_BOUNDS: + ASSERT(receiver->map()->has_external_array_elements()); + // Fall through + case STORE_NO_TRANSITION_HANDLE_COW: + case STANDARD_STORE: + case STORE_AND_GROW_NO_TRANSITION: + return Handle<Map>(receiver->map()); } return Handle<Map>::null(); } -Handle<Code> KeyedStoreIC::GetElementStubWithoutMapCheck( - bool is_js_array, - ElementsKind elements_kind, - KeyedAccessGrowMode grow_mode) { - return KeyedStoreElementStub(is_js_array, elements_kind, grow_mode).GetCode(); -} - - -Handle<Code> KeyedStoreIC::ComputePolymorphicStub( - MapHandleList* receiver_maps, - StrictModeFlag strict_mode, - KeyedAccessGrowMode grow_mode) { - // Collect MONOMORPHIC stubs for all target_receiver_maps. - CodeHandleList handler_ics(receiver_maps->length()); - MapHandleList transitioned_maps(receiver_maps->length()); - for (int i = 0; i < receiver_maps->length(); ++i) { - Handle<Map> receiver_map(receiver_maps->at(i)); - Handle<Code> cached_stub; - Handle<Map> transitioned_map = - receiver_map->FindTransitionedMap(receiver_maps); - if (!transitioned_map.is_null()) { - cached_stub = ElementsTransitionAndStoreStub( - receiver_map->elements_kind(), // original elements_kind - transitioned_map->elements_kind(), - receiver_map->instance_type() == JS_ARRAY_TYPE, // is_js_array - strict_mode, grow_mode).GetCode(); - } else { - cached_stub = ComputeMonomorphicStubWithoutMapCheck(receiver_map, - strict_mode, - grow_mode); - } - ASSERT(!cached_stub.is_null()); - handler_ics.Add(cached_stub); - transitioned_maps.Add(transitioned_map); - } - KeyedStoreStubCompiler compiler(isolate(), strict_mode, grow_mode); - Handle<Code> code = compiler.CompileStorePolymorphic( - receiver_maps, &handler_ics, &transitioned_maps); - isolate()->counters()->keyed_store_polymorphic_stubs()->Increment(); - PROFILE(isolate(), - CodeCreateEvent(Logger::KEYED_STORE_MEGAMORPHIC_IC_TAG, *code, 0)); - return code; +bool IsOutOfBoundsAccess(Handle<JSObject> receiver, + int index) { + if (receiver->IsJSArray()) { + return JSArray::cast(*receiver)->length()->IsSmi() && + index >= Smi::cast(JSArray::cast(*receiver)->length())->value(); + } + return index >= receiver->elements()->length(); } -KeyedIC::StubKind KeyedStoreIC::GetStubKind(Handle<JSObject> receiver, - Handle<Object> key, - Handle<Object> value) { +KeyedAccessStoreMode KeyedStoreIC::GetStoreMode(Handle<JSObject> receiver, + Handle<Object> key, + Handle<Object> value) { ASSERT(key->IsSmi()); int index = Smi::cast(*key)->value(); - bool allow_growth = receiver->IsJSArray() && - JSArray::cast(*receiver)->length()->IsSmi() && - index >= Smi::cast(JSArray::cast(*receiver)->length())->value(); - + bool oob_access = IsOutOfBoundsAccess(receiver, index); + bool allow_growth = receiver->IsJSArray() && oob_access; if (allow_growth) { // Handle growing array in stub if necessary. if (receiver->HasFastSmiElements()) { @@ -1858,7 +1869,12 @@ KeyedIC::StubKind KeyedStoreIC::GetStubKind(Handle<JSObject> receiver, } } } - return STORE_NO_TRANSITION; + if (!FLAG_trace_external_array_abuse && + receiver->map()->has_external_array_elements() && oob_access) { + return STORE_NO_TRANSITION_IGNORE_OUT_OF_BOUNDS; + } else { + return STANDARD_STORE; + } } } @@ -1868,114 +1884,65 @@ MaybeObject* KeyedStoreIC::Store(State state, Handle<Object> object, Handle<Object> key, Handle<Object> value, - bool force_generic) { - // Check for values that can be converted into a symbol directly or - // is representable as a smi. + ICMissMode miss_mode) { + // Check for values that can be converted into an internalized string directly + // or is representable as a smi. key = TryConvertKey(key, isolate()); - if (key->IsSymbol()) { - Handle<String> name = Handle<String>::cast(key); - - // Handle proxies. - if (object->IsJSProxy()) { - return JSProxy::cast(*object)->SetProperty( - *name, *value, NONE, strict_mode); - } - - // If the object is undefined or null it's illegal to try to set any - // properties on it; throw a TypeError in that case. - if (object->IsUndefined() || object->IsNull()) { - return TypeError("non_object_property_store", object, name); - } - - // Ignore stores where the receiver is not a JSObject. - if (!object->IsJSObject()) return *value; - Handle<JSObject> receiver = Handle<JSObject>::cast(object); - - // Check if the given name is an array index. - uint32_t index; - if (name->AsArrayIndex(&index)) { - Handle<Object> result = - JSObject::SetElement(receiver, index, value, NONE, strict_mode); - RETURN_IF_EMPTY_HANDLE(isolate(), result); - return *value; - } - - // Update inline cache and stub cache. - if (FLAG_use_ic && !receiver->IsJSGlobalProxy()) { - LookupResult lookup(isolate()); - if (LookupForWrite(receiver, name, &lookup)) { - UpdateCaches(&lookup, state, strict_mode, receiver, name, value); - } - } - - // Set the property. - return receiver->SetProperty(*name, *value, NONE, strict_mode); + if (key->IsInternalizedString()) { + return StoreIC::Store(state, + strict_mode, + object, + Handle<String>::cast(key), + value, + JSReceiver::MAY_BE_STORE_FROM_KEYED); } - // Do not use ICs for objects that require access checks (including - // the global object). - bool use_ic = FLAG_use_ic && !object->IsAccessCheckNeeded(); + bool use_ic = FLAG_use_ic && !object->IsAccessCheckNeeded() && + !(FLAG_harmony_observation && object->IsJSObject() && + JSObject::cast(*object)->map()->is_observed()); ASSERT(!(use_ic && object->IsJSGlobalProxy())); if (use_ic) { Handle<Code> stub = (strict_mode == kStrictMode) ? generic_stub_strict() : generic_stub(); - if (object->IsJSObject()) { - Handle<JSObject> receiver = Handle<JSObject>::cast(object); - if (receiver->elements()->map() == - isolate()->heap()->non_strict_arguments_elements_map()) { - stub = non_strict_arguments_stub(); - } else if (!force_generic) { - if (key->IsSmi() && (target() != *non_strict_arguments_stub())) { - StubKind stub_kind = GetStubKind(receiver, key, value); - stub = ComputeStub(receiver, stub_kind, strict_mode, stub); + if (miss_mode != MISS_FORCE_GENERIC) { + if (object->IsJSObject()) { + Handle<JSObject> receiver = Handle<JSObject>::cast(object); + if (receiver->elements()->map() == + isolate()->heap()->non_strict_arguments_elements_map()) { + stub = non_strict_arguments_stub(); + } else if (key->IsSmi() && (target() != *non_strict_arguments_stub())) { + KeyedAccessStoreMode store_mode = GetStoreMode(receiver, key, value); + stub = StoreElementStub(receiver, store_mode, strict_mode); } - } else { - TRACE_GENERIC_IC("KeyedStoreIC", "force generic"); } + } else { + TRACE_GENERIC_IC(isolate(), "KeyedStoreIC", "force generic"); } - if (!stub.is_null()) set_target(*stub); + ASSERT(!stub.is_null()); + set_target(*stub); + TRACE_IC("KeyedStoreIC", key, state, target()); } - TRACE_IC("KeyedStoreIC", key, state, target()); - - // Set the property. return Runtime::SetObjectProperty( isolate(), object , key, value, NONE, strict_mode); } -void KeyedStoreIC::UpdateCaches(LookupResult* lookup, - State state, - StrictModeFlag strict_mode, - Handle<JSObject> receiver, - Handle<String> name, - Handle<Object> value) { - ASSERT(!receiver->IsJSGlobalProxy()); - ASSERT(StoreICableLookup(lookup)); - ASSERT(lookup->IsFound()); - - // These are not cacheable, so we never see such LookupResults here. - ASSERT(!lookup->IsHandler()); - +Handle<Code> KeyedStoreIC::ComputeStoreMonomorphic(LookupResult* lookup, + StrictModeFlag strict_mode, + Handle<JSObject> receiver, + Handle<String> name) { // If the property has a non-field type allowing map transitions // where there is extra room in the object, we leave the IC in its // current state. - PropertyType type = lookup->type(); - - // Compute the code stub for this store; used for rewriting to - // monomorphic state and making sure that the code stub is in the - // stub cache. - Handle<Code> code; - - switch (type) { + switch (lookup->type()) { case FIELD: - code = isolate()->stub_cache()->ComputeKeyedStoreField( - name, receiver, lookup->GetFieldIndex(), + return isolate()->stub_cache()->ComputeKeyedStoreField( + name, receiver, lookup->GetFieldIndex().field_index(), Handle<Map>::null(), strict_mode); - break; case TRANSITION: { Handle<Map> transition(lookup->GetTransitionTarget()); int descriptor = transition->LastAdded(); @@ -1985,9 +1952,8 @@ void KeyedStoreIC::UpdateCaches(LookupResult* lookup, if (details.type() == FIELD && details.attributes() == NONE) { int field_index = target_descriptors->GetFieldIndex(descriptor); - code = isolate()->stub_cache()->ComputeKeyedStoreField( + return isolate()->stub_cache()->ComputeKeyedStoreField( name, receiver, field_index, transition, strict_mode); - break; } // fall through. } @@ -1997,30 +1963,15 @@ void KeyedStoreIC::UpdateCaches(LookupResult* lookup, case INTERCEPTOR: // Always rewrite to the generic case so that we do not // repeatedly try to rewrite. - code = (strict_mode == kStrictMode) + return (strict_mode == kStrictMode) ? generic_stub_strict() : generic_stub(); - break; case HANDLER: case NONEXISTENT: UNREACHABLE(); - return; - } - - ASSERT(!code.is_null()); - - // Patch the call site depending on the state of the cache. Make - // sure to always rewrite from monomorphic to megamorphic. - ASSERT(state != MONOMORPHIC_PROTOTYPE_FAILURE); - if (state == UNINITIALIZED || state == PREMONOMORPHIC) { - set_target(*code); - } else if (state == MONOMORPHIC) { - set_target((strict_mode == kStrictMode) - ? *megamorphic_stub_strict() - : *megamorphic_stub()); + break; } - - TRACE_IC("KeyedStoreIC", name, state, target()); + return Handle<Code>::null(); } @@ -2042,13 +1993,12 @@ RUNTIME_FUNCTION(MaybeObject*, CallIC_Miss) { extra_ic_state, args.at<Object>(0), args.at<String>(1)); - // Result could be a function or a failure. - JSFunction* raw_function = NULL; + JSFunction* raw_function; if (!maybe_result->To(&raw_function)) return maybe_result; // The first time the inline cache is updated may be the first time the - // function it references gets called. If the function is lazily compiled - // then the first call will trigger a compilation. We check for this case + // function it references gets called. If the function is lazily compiled + // then the first call will trigger a compilation. We check for this case // and we do the compilation immediately, instead of waiting for the stub // currently attached to the JSFunction object to trigger compilation. if (raw_function->is_compiled()) return raw_function; @@ -2083,7 +2033,7 @@ RUNTIME_FUNCTION(MaybeObject*, KeyedCallIC_Miss) { RUNTIME_FUNCTION(MaybeObject*, LoadIC_Miss) { HandleScope scope(isolate); ASSERT(args.length() == 2); - LoadIC ic(isolate); + LoadIC ic(IC::NO_EXTRA_FRAME, isolate); IC::State state = IC::StateFrom(ic.target(), args[0], args[1]); return ic.Load(state, args.at<Object>(0), args.at<String>(1)); } @@ -2093,24 +2043,36 @@ RUNTIME_FUNCTION(MaybeObject*, LoadIC_Miss) { RUNTIME_FUNCTION(MaybeObject*, KeyedLoadIC_Miss) { HandleScope scope(isolate); ASSERT(args.length() == 2); - KeyedLoadIC ic(isolate); + KeyedLoadIC ic(IC::NO_EXTRA_FRAME, isolate); IC::State state = IC::StateFrom(ic.target(), args[0], args[1]); - return ic.Load(state, args.at<Object>(0), args.at<Object>(1), false); + return ic.Load(state, args.at<Object>(0), args.at<Object>(1), MISS); +} + + +RUNTIME_FUNCTION(MaybeObject*, KeyedLoadIC_MissFromStubFailure) { + HandleScope scope(isolate); + ASSERT(args.length() == 2); + KeyedLoadIC ic(IC::EXTRA_CALL_FRAME, isolate); + IC::State state = IC::StateFrom(ic.target(), args[0], args[1]); + return ic.Load(state, args.at<Object>(0), args.at<Object>(1), MISS); } RUNTIME_FUNCTION(MaybeObject*, KeyedLoadIC_MissForceGeneric) { HandleScope scope(isolate); ASSERT(args.length() == 2); - KeyedLoadIC ic(isolate); + KeyedLoadIC ic(IC::NO_EXTRA_FRAME, isolate); IC::State state = IC::StateFrom(ic.target(), args[0], args[1]); - return ic.Load(state, args.at<Object>(0), args.at<Object>(1), true); + return ic.Load(state, + args.at<Object>(0), + args.at<Object>(1), + MISS_FORCE_GENERIC); } // Used from ic-<arch>.cc. RUNTIME_FUNCTION(MaybeObject*, StoreIC_Miss) { - HandleScope scope; + HandleScope scope(isolate); ASSERT(args.length() == 3); StoreIC ic(isolate); IC::State state = IC::StateFrom(ic.target(), args[0], args[1]); @@ -2124,7 +2086,7 @@ RUNTIME_FUNCTION(MaybeObject*, StoreIC_Miss) { RUNTIME_FUNCTION(MaybeObject*, StoreIC_ArrayLength) { - NoHandleAllocation nha; + NoHandleAllocation nha(isolate); ASSERT(args.length() == 2); JSArray* receiver = JSArray::cast(args[0]); @@ -2136,14 +2098,14 @@ RUNTIME_FUNCTION(MaybeObject*, StoreIC_ArrayLength) { #ifdef DEBUG // The length property has to be a writable callback property. LookupResult debug_lookup(isolate); - receiver->LocalLookup(isolate->heap()->length_symbol(), &debug_lookup); + receiver->LocalLookup(isolate->heap()->length_string(), &debug_lookup); ASSERT(debug_lookup.IsPropertyCallbacks() && !debug_lookup.IsReadOnly()); #endif Object* result; - { MaybeObject* maybe_result = receiver->SetElementsLength(len); - if (!maybe_result->ToObject(&result)) return maybe_result; - } + MaybeObject* maybe_result = receiver->SetElementsLength(len); + if (!maybe_result->To(&result)) return maybe_result; + return len; } @@ -2152,7 +2114,7 @@ RUNTIME_FUNCTION(MaybeObject*, StoreIC_ArrayLength) { // it is necessary to extend the properties array of a // JSObject. RUNTIME_FUNCTION(MaybeObject*, SharedStoreIC_ExtendStorage) { - NoHandleAllocation na; + NoHandleAllocation na(isolate); ASSERT(args.length() == 3); // Convert the parameters @@ -2196,12 +2158,12 @@ RUNTIME_FUNCTION(MaybeObject*, KeyedStoreIC_Miss) { args.at<Object>(0), args.at<Object>(1), args.at<Object>(2), - false); + MISS); } RUNTIME_FUNCTION(MaybeObject*, KeyedStoreIC_Slow) { - NoHandleAllocation na; + NoHandleAllocation na(isolate); ASSERT(args.length() == 3); KeyedStoreIC ic(isolate); Code::ExtraICState extra_ic_state = ic.target()->extra_ic_state(); @@ -2229,7 +2191,7 @@ RUNTIME_FUNCTION(MaybeObject*, KeyedStoreIC_MissForceGeneric) { args.at<Object>(0), args.at<Object>(1), args.at<Object>(2), - true); + MISS_FORCE_GENERIC); } @@ -2242,7 +2204,7 @@ const char* UnaryOpIC::GetName(TypeInfo type_info) { switch (type_info) { case UNINITIALIZED: return "Uninitialized"; case SMI: return "Smi"; - case HEAP_NUMBER: return "HeapNumbers"; + case NUMBER: return "Number"; case GENERIC: return "Generic"; default: return "Invalid"; } @@ -2254,10 +2216,10 @@ UnaryOpIC::State UnaryOpIC::ToState(TypeInfo type_info) { case UNINITIALIZED: return ::v8::internal::UNINITIALIZED; case SMI: - case HEAP_NUMBER: + case NUMBER: return MONOMORPHIC; case GENERIC: - return MEGAMORPHIC; + return ::v8::internal::GENERIC; } UNREACHABLE(); return ::v8::internal::UNINITIALIZED; @@ -2269,7 +2231,7 @@ UnaryOpIC::TypeInfo UnaryOpIC::GetTypeInfo(Handle<Object> operand) { if (operand_type.IsSmi()) { return SMI; } else if (operand_type.IsNumber()) { - return HEAP_NUMBER; + return NUMBER; } else { return GENERIC; } @@ -2277,24 +2239,22 @@ UnaryOpIC::TypeInfo UnaryOpIC::GetTypeInfo(Handle<Object> operand) { UnaryOpIC::TypeInfo UnaryOpIC::ComputeNewType( - UnaryOpIC::TypeInfo current_type, - UnaryOpIC::TypeInfo previous_type) { + TypeInfo current_type, + TypeInfo previous_type) { switch (previous_type) { - case UnaryOpIC::UNINITIALIZED: + case UNINITIALIZED: return current_type; - case UnaryOpIC::SMI: - return (current_type == UnaryOpIC::GENERIC) - ? UnaryOpIC::GENERIC - : UnaryOpIC::HEAP_NUMBER; - case UnaryOpIC::HEAP_NUMBER: - return UnaryOpIC::GENERIC; - case UnaryOpIC::GENERIC: + case SMI: + return (current_type == GENERIC) ? GENERIC : NUMBER; + case NUMBER: + return GENERIC; + case GENERIC: // We should never do patching if we are in GENERIC state. UNREACHABLE(); - return UnaryOpIC::GENERIC; + return GENERIC; } UNREACHABLE(); - return UnaryOpIC::GENERIC; + return GENERIC; } @@ -2306,12 +2266,11 @@ void BinaryOpIC::patch(Code* code) { const char* BinaryOpIC::GetName(TypeInfo type_info) { switch (type_info) { case UNINITIALIZED: return "Uninitialized"; - case SMI: return "SMI"; - case INT32: return "Int32s"; - case HEAP_NUMBER: return "HeapNumbers"; + case SMI: return "Smi"; + case INT32: return "Int32"; + case NUMBER: return "Number"; case ODDBALL: return "Oddball"; - case BOTH_STRING: return "BothStrings"; - case STRING: return "Strings"; + case STRING: return "String"; case GENERIC: return "Generic"; default: return "Invalid"; } @@ -2324,71 +2283,18 @@ BinaryOpIC::State BinaryOpIC::ToState(TypeInfo type_info) { return ::v8::internal::UNINITIALIZED; case SMI: case INT32: - case HEAP_NUMBER: + case NUMBER: case ODDBALL: - case BOTH_STRING: case STRING: return MONOMORPHIC; case GENERIC: - return MEGAMORPHIC; + return ::v8::internal::GENERIC; } UNREACHABLE(); return ::v8::internal::UNINITIALIZED; } -BinaryOpIC::TypeInfo BinaryOpIC::JoinTypes(BinaryOpIC::TypeInfo x, - BinaryOpIC::TypeInfo y) { - if (x == UNINITIALIZED) return y; - if (y == UNINITIALIZED) return x; - if (x == y) return x; - if (x == BOTH_STRING && y == STRING) return STRING; - if (x == STRING && y == BOTH_STRING) return STRING; - if (x == STRING || x == BOTH_STRING || y == STRING || y == BOTH_STRING) { - return GENERIC; - } - if (x > y) return x; - return y; -} - - -BinaryOpIC::TypeInfo BinaryOpIC::GetTypeInfo(Handle<Object> left, - Handle<Object> right) { - ::v8::internal::TypeInfo left_type = - ::v8::internal::TypeInfo::TypeFromValue(left); - ::v8::internal::TypeInfo right_type = - ::v8::internal::TypeInfo::TypeFromValue(right); - - if (left_type.IsSmi() && right_type.IsSmi()) { - return SMI; - } - - if (left_type.IsInteger32() && right_type.IsInteger32()) { - // Platforms with 32-bit Smis have no distinct INT32 type. - if (kSmiValueSize == 32) return SMI; - return INT32; - } - - if (left_type.IsNumber() && right_type.IsNumber()) { - return HEAP_NUMBER; - } - - // Patching for fast string ADD makes sense even if only one of the - // arguments is a string. - if (left_type.IsString()) { - return right_type.IsString() ? BOTH_STRING : STRING; - } else if (right_type.IsString()) { - return STRING; - } - - // Check for oddball objects. - if (left->IsUndefined() && right->IsNumber()) return ODDBALL; - if (left->IsNumber() && right->IsUndefined()) return ODDBALL; - - return GENERIC; -} - - RUNTIME_FUNCTION(MaybeObject*, UnaryOp_Patch) { ASSERT(args.length() == 4); @@ -2403,13 +2309,16 @@ RUNTIME_FUNCTION(MaybeObject*, UnaryOp_Patch) { type = UnaryOpIC::ComputeNewType(type, previous_type); UnaryOpStub stub(op, mode, type); - Handle<Code> code = stub.GetCode(); + Handle<Code> code = stub.GetCode(isolate); if (!code.is_null()) { if (FLAG_trace_ic) { - PrintF("[UnaryOpIC (%s->%s)#%s]\n", + PrintF("[UnaryOpIC in "); + JavaScriptFrame::PrintTop(isolate, stdout, false, true); + PrintF(" (%s->%s)#%s @ %p]\n", UnaryOpIC::GetName(previous_type), UnaryOpIC::GetName(type), - Token::Name(op)); + Token::Name(op), + static_cast<void*>(*code)); } UnaryOpIC ic(isolate); ic.patch(*code); @@ -2440,25 +2349,72 @@ RUNTIME_FUNCTION(MaybeObject*, UnaryOp_Patch) { return *result; } + +static BinaryOpIC::TypeInfo TypeInfoFromValue(Handle<Object> value, + Token::Value op) { + ::v8::internal::TypeInfo type = + ::v8::internal::TypeInfo::TypeFromValue(value); + if (type.IsSmi()) return BinaryOpIC::SMI; + if (type.IsInteger32()) { + if (kSmiValueSize == 32) return BinaryOpIC::SMI; + return BinaryOpIC::INT32; + } + if (type.IsNumber()) return BinaryOpIC::NUMBER; + if (type.IsString()) return BinaryOpIC::STRING; + if (value->IsUndefined()) { + if (op == Token::BIT_AND || + op == Token::BIT_OR || + op == Token::BIT_XOR || + op == Token::SAR || + op == Token::SHL || + op == Token::SHR) { + if (kSmiValueSize == 32) return BinaryOpIC::SMI; + return BinaryOpIC::INT32; + } + return BinaryOpIC::ODDBALL; + } + return BinaryOpIC::GENERIC; +} + + +static BinaryOpIC::TypeInfo InputState(BinaryOpIC::TypeInfo old_type, + Handle<Object> value, + Token::Value op) { + BinaryOpIC::TypeInfo new_type = TypeInfoFromValue(value, op); + if (old_type == BinaryOpIC::STRING) { + if (new_type == BinaryOpIC::STRING) return new_type; + return BinaryOpIC::GENERIC; + } + return Max(old_type, new_type); +} + + RUNTIME_FUNCTION(MaybeObject*, BinaryOp_Patch) { - ASSERT(args.length() == 5); + ASSERT(args.length() == 3); HandleScope scope(isolate); Handle<Object> left = args.at<Object>(0); Handle<Object> right = args.at<Object>(1); int key = args.smi_at(2); - Token::Value op = static_cast<Token::Value>(args.smi_at(3)); - BinaryOpIC::TypeInfo previous_type = - static_cast<BinaryOpIC::TypeInfo>(args.smi_at(4)); + Token::Value op = BinaryOpStub::decode_op_from_minor_key(key); + BinaryOpIC::TypeInfo previous_left, previous_right, unused_previous_result; + BinaryOpStub::decode_types_from_minor_key( + key, &previous_left, &previous_right, &unused_previous_result); - BinaryOpIC::TypeInfo type = BinaryOpIC::GetTypeInfo(left, right); - type = BinaryOpIC::JoinTypes(type, previous_type); + BinaryOpIC::TypeInfo new_left = InputState(previous_left, left, op); + BinaryOpIC::TypeInfo new_right = InputState(previous_right, right, op); BinaryOpIC::TypeInfo result_type = BinaryOpIC::UNINITIALIZED; - if ((type == BinaryOpIC::STRING || type == BinaryOpIC::BOTH_STRING) && + + // STRING is only used for ADD operations. + if ((new_left == BinaryOpIC::STRING || new_right == BinaryOpIC::STRING) && op != Token::ADD) { - type = BinaryOpIC::GENERIC; + new_left = new_right = BinaryOpIC::GENERIC; } - if (type == BinaryOpIC::SMI && previous_type == BinaryOpIC::SMI) { + + BinaryOpIC::TypeInfo new_overall = Max(new_left, new_right); + BinaryOpIC::TypeInfo previous_overall = Max(previous_left, previous_right); + + if (new_overall == BinaryOpIC::SMI && previous_overall == BinaryOpIC::SMI) { if (op == Token::DIV || op == Token::MUL || op == Token::SHR || @@ -2467,32 +2423,41 @@ RUNTIME_FUNCTION(MaybeObject*, BinaryOp_Patch) { // That is the only way to get here from the Smi stub. // With 32-bit Smis, all overflows give heap numbers, but with // 31-bit Smis, most operations overflow to int32 results. - result_type = BinaryOpIC::HEAP_NUMBER; + result_type = BinaryOpIC::NUMBER; } else { // Other operations on SMIs that overflow yield int32s. result_type = BinaryOpIC::INT32; } } - if (type == BinaryOpIC::INT32 && previous_type == BinaryOpIC::INT32) { - // We must be here because an operation on two INT32 types overflowed. - result_type = BinaryOpIC::HEAP_NUMBER; + if (new_overall == BinaryOpIC::INT32 && + previous_overall == BinaryOpIC::INT32) { + if (new_left == previous_left && new_right == previous_right) { + result_type = BinaryOpIC::NUMBER; + } } - BinaryOpStub stub(key, type, result_type); - Handle<Code> code = stub.GetCode(); + BinaryOpStub stub(key, new_left, new_right, result_type); + Handle<Code> code = stub.GetCode(isolate); if (!code.is_null()) { +#ifdef DEBUG if (FLAG_trace_ic) { - PrintF("[BinaryOpIC (%s->(%s->%s))#%s]\n", - BinaryOpIC::GetName(previous_type), - BinaryOpIC::GetName(type), + PrintF("[BinaryOpIC in "); + JavaScriptFrame::PrintTop(isolate, stdout, false, true); + PrintF(" ((%s+%s)->((%s+%s)->%s))#%s @ %p]\n", + BinaryOpIC::GetName(previous_left), + BinaryOpIC::GetName(previous_right), + BinaryOpIC::GetName(new_left), + BinaryOpIC::GetName(new_right), BinaryOpIC::GetName(result_type), - Token::Name(op)); + Token::Name(op), + static_cast<void*>(*code)); } +#endif BinaryOpIC ic(isolate); ic.patch(*code); // Activate inlined smi code. - if (previous_type == BinaryOpIC::UNINITIALIZED) { + if (previous_overall == BinaryOpIC::UNINITIALIZED) { PatchInlinedSmiCode(ic.address(), ENABLE_INLINED_SMI_CHECK); } } @@ -2555,43 +2520,29 @@ RUNTIME_FUNCTION(MaybeObject*, BinaryOp_Patch) { Code* CompareIC::GetRawUninitialized(Token::Value op) { - ICCompareStub stub(op, UNINITIALIZED); + ICCompareStub stub(op, UNINITIALIZED, UNINITIALIZED, UNINITIALIZED); Code* code = NULL; - CHECK(stub.FindCodeInCache(&code)); + CHECK(stub.FindCodeInCache(&code, Isolate::Current())); return code; } -Handle<Code> CompareIC::GetUninitialized(Token::Value op) { - ICCompareStub stub(op, UNINITIALIZED); - return stub.GetCode(); -} - - -CompareIC::State CompareIC::ComputeState(Code* target) { - int key = target->major_key(); - if (key == CodeStub::Compare) return GENERIC; - ASSERT(key == CodeStub::CompareIC); - return static_cast<State>(target->compare_state()); -} - - -Token::Value CompareIC::ComputeOperation(Code* target) { - ASSERT(target->major_key() == CodeStub::CompareIC); - return static_cast<Token::Value>( - target->compare_operation() + Token::EQ); +Handle<Code> CompareIC::GetUninitialized(Isolate* isolate, Token::Value op) { + ICCompareStub stub(op, UNINITIALIZED, UNINITIALIZED, UNINITIALIZED); + return stub.GetCode(isolate); } const char* CompareIC::GetStateName(State state) { switch (state) { case UNINITIALIZED: return "UNINITIALIZED"; - case SMIS: return "SMIS"; - case HEAP_NUMBERS: return "HEAP_NUMBERS"; - case OBJECTS: return "OBJECTS"; - case KNOWN_OBJECTS: return "KNOWN_OBJECTS"; - case SYMBOLS: return "SYMBOLS"; - case STRINGS: return "STRINGS"; + case SMI: return "SMI"; + case NUMBER: return "NUMBER"; + case INTERNALIZED_STRING: return "INTERNALIZED_STRING"; + case STRING: return "STRING"; + case UNIQUE_NAME: return "UNIQUE_NAME"; + case OBJECT: return "OBJECT"; + case KNOWN_OBJECT: return "KNOWN_OBJECT"; case GENERIC: return "GENERIC"; default: UNREACHABLE(); @@ -2600,61 +2551,153 @@ const char* CompareIC::GetStateName(State state) { } -CompareIC::State CompareIC::TargetState(State state, +static CompareIC::State InputState(CompareIC::State old_state, + Handle<Object> value) { + switch (old_state) { + case CompareIC::UNINITIALIZED: + if (value->IsSmi()) return CompareIC::SMI; + if (value->IsHeapNumber()) return CompareIC::NUMBER; + if (value->IsInternalizedString()) return CompareIC::INTERNALIZED_STRING; + if (value->IsString()) return CompareIC::STRING; + if (value->IsSymbol()) return CompareIC::UNIQUE_NAME; + if (value->IsJSObject()) return CompareIC::OBJECT; + break; + case CompareIC::SMI: + if (value->IsSmi()) return CompareIC::SMI; + if (value->IsHeapNumber()) return CompareIC::NUMBER; + break; + case CompareIC::NUMBER: + if (value->IsNumber()) return CompareIC::NUMBER; + break; + case CompareIC::INTERNALIZED_STRING: + if (value->IsInternalizedString()) return CompareIC::INTERNALIZED_STRING; + if (value->IsString()) return CompareIC::STRING; + if (value->IsSymbol()) return CompareIC::UNIQUE_NAME; + break; + case CompareIC::STRING: + if (value->IsString()) return CompareIC::STRING; + break; + case CompareIC::UNIQUE_NAME: + if (value->IsUniqueName()) return CompareIC::UNIQUE_NAME; + break; + case CompareIC::OBJECT: + if (value->IsJSObject()) return CompareIC::OBJECT; + break; + case CompareIC::GENERIC: + break; + case CompareIC::KNOWN_OBJECT: + UNREACHABLE(); + break; + } + return CompareIC::GENERIC; +} + + +CompareIC::State CompareIC::TargetState(State old_state, + State old_left, + State old_right, bool has_inlined_smi_code, Handle<Object> x, Handle<Object> y) { - switch (state) { + switch (old_state) { case UNINITIALIZED: - if (x->IsSmi() && y->IsSmi()) return SMIS; - if (x->IsNumber() && y->IsNumber()) return HEAP_NUMBERS; + if (x->IsSmi() && y->IsSmi()) return SMI; + if (x->IsNumber() && y->IsNumber()) return NUMBER; if (Token::IsOrderedRelationalCompareOp(op_)) { // Ordered comparisons treat undefined as NaN, so the - // HEAP_NUMBER stub will do the right thing. + // NUMBER stub will do the right thing. if ((x->IsNumber() && y->IsUndefined()) || (y->IsNumber() && x->IsUndefined())) { - return HEAP_NUMBERS; + return NUMBER; } } - if (x->IsSymbol() && y->IsSymbol()) { - // We compare symbols as strings if we need to determine + if (x->IsInternalizedString() && y->IsInternalizedString()) { + // We compare internalized strings as plain ones if we need to determine // the order in a non-equality compare. - return Token::IsEqualityOp(op_) ? SYMBOLS : STRINGS; + return Token::IsEqualityOp(op_) ? INTERNALIZED_STRING : STRING; } - if (x->IsString() && y->IsString()) return STRINGS; + if (x->IsString() && y->IsString()) return STRING; if (!Token::IsEqualityOp(op_)) return GENERIC; + if (x->IsUniqueName() && y->IsUniqueName()) return UNIQUE_NAME; if (x->IsJSObject() && y->IsJSObject()) { if (Handle<JSObject>::cast(x)->map() == - Handle<JSObject>::cast(y)->map() && - Token::IsEqualityOp(op_)) { - return KNOWN_OBJECTS; + Handle<JSObject>::cast(y)->map()) { + return KNOWN_OBJECT; } else { - return OBJECTS; + return OBJECT; } } return GENERIC; - case SMIS: - return has_inlined_smi_code && x->IsNumber() && y->IsNumber() - ? HEAP_NUMBERS - : GENERIC; - case SYMBOLS: + case SMI: + return x->IsNumber() && y->IsNumber() ? NUMBER : GENERIC; + case INTERNALIZED_STRING: + ASSERT(Token::IsEqualityOp(op_)); + if (x->IsString() && y->IsString()) return STRING; + if (x->IsUniqueName() && y->IsUniqueName()) return UNIQUE_NAME; + return GENERIC; + case NUMBER: + // If the failure was due to one side changing from smi to heap number, + // then keep the state (if other changed at the same time, we will get + // a second miss and then go to generic). + if (old_left == SMI && x->IsHeapNumber()) return NUMBER; + if (old_right == SMI && y->IsHeapNumber()) return NUMBER; + return GENERIC; + case KNOWN_OBJECT: ASSERT(Token::IsEqualityOp(op_)); - return x->IsString() && y->IsString() ? STRINGS : GENERIC; - case HEAP_NUMBERS: - case STRINGS: - case OBJECTS: - case KNOWN_OBJECTS: + if (x->IsJSObject() && y->IsJSObject()) return OBJECT; + return GENERIC; + case STRING: + case UNIQUE_NAME: + case OBJECT: case GENERIC: return GENERIC; } UNREACHABLE(); - return GENERIC; + return GENERIC; // Make the compiler happy. +} + + +void CompareIC::UpdateCaches(Handle<Object> x, Handle<Object> y) { + HandleScope scope(isolate()); + State previous_left, previous_right, previous_state; + ICCompareStub::DecodeMinorKey(target()->stub_info(), &previous_left, + &previous_right, &previous_state, NULL); + State new_left = InputState(previous_left, x); + State new_right = InputState(previous_right, y); + State state = TargetState(previous_state, previous_left, previous_right, + HasInlinedSmiCode(address()), x, y); + ICCompareStub stub(op_, new_left, new_right, state); + if (state == KNOWN_OBJECT) { + stub.set_known_map(Handle<Map>(Handle<JSObject>::cast(x)->map())); + } + set_target(*stub.GetCode(isolate())); + +#ifdef DEBUG + if (FLAG_trace_ic) { + PrintF("[CompareIC in "); + JavaScriptFrame::PrintTop(isolate(), stdout, false, true); + PrintF(" ((%s+%s=%s)->(%s+%s=%s))#%s @ %p]\n", + GetStateName(previous_left), + GetStateName(previous_right), + GetStateName(previous_state), + GetStateName(new_left), + GetStateName(new_right), + GetStateName(state), + Token::Name(op_), + static_cast<void*>(*stub.GetCode(isolate()))); + } +#endif + + // Activate inlined smi code. + if (previous_state == UNINITIALIZED) { + PatchInlinedSmiCode(address(), ENABLE_INLINED_SMI_CHECK); + } } -// Used from ic_<arch>.cc. +// Used from ICCompareStub::GenerateMiss in code-stubs-<arch>.cc. RUNTIME_FUNCTION(Code*, CompareIC_Miss) { - NoHandleAllocation na; + NoHandleAllocation na(isolate); ASSERT(args.length() == 3); CompareIC ic(isolate, static_cast<Token::Value>(args.smi_at(2))); ic.UpdateCaches(args.at<Object>(0), args.at<Object>(1)); @@ -2675,7 +2718,7 @@ RUNTIME_FUNCTION(MaybeObject*, ToBoolean_Patch) { old_types.TraceTransition(new_types); ToBooleanStub stub(tos, new_types); - Handle<Code> code = stub.GetCode(); + Handle<Code> code = stub.GetCode(isolate); ToBooleanIC ic(isolate); ic.patch(*code); return Smi::FromInt(to_boolean_value ? 1 : 0); diff --git a/deps/v8/src/ic.h b/deps/v8/src/ic.h index 8767f988a2..b225955525 100644 --- a/deps/v8/src/ic.h +++ b/deps/v8/src/ic.h @@ -50,7 +50,6 @@ namespace internal { ICU(KeyedStoreIC_MissForceGeneric) \ ICU(KeyedStoreIC_Slow) \ /* Utilities for IC stubs. */ \ - ICU(LoadCallbackProperty) \ ICU(StoreCallbackProperty) \ ICU(LoadPropertyWithInterceptorOnly) \ ICU(LoadPropertyWithInterceptorForLoad) \ @@ -97,8 +96,6 @@ class IC { Code* target() const { return GetTargetAtAddress(address()); } inline Address address() const; - virtual bool IsGeneric() const { return false; } - // Compute the current IC state based on the target stub, receiver and name. static State StateFrom(Code* target, Object* receiver, Object* name); @@ -112,16 +109,16 @@ class IC { // Returns if this IC is for contextual (no explicit receiver) // access to properties. - bool IsContextual(Handle<Object> receiver) { + bool IsUndeclaredGlobal(Handle<Object> receiver) { if (receiver->IsGlobalObject()) { - return SlowIsContextual(); + return SlowIsUndeclaredGlobal(); } else { - ASSERT(!SlowIsContextual()); + ASSERT(!SlowIsUndeclaredGlobal()); return false; } } - bool SlowIsContextual() { + bool SlowIsUndeclaredGlobal() { return ComputeMode() == RelocInfo::CODE_TARGET_CONTEXT; } @@ -131,7 +128,8 @@ class IC { JSObject* holder); static inline InlineCacheHolderFlag GetCodeCacheForObject(JSObject* object, JSObject* holder); - static inline JSObject* GetCodeCacheHolder(Object* object, + static inline JSObject* GetCodeCacheHolder(Isolate* isolate, + Object* object, InlineCacheHolderFlag holder); protected: @@ -167,6 +165,40 @@ class IC { static inline void SetTargetAtAddress(Address address, Code* target); static void PostPatching(Address address, Code* target, Code* old_target); + virtual void UpdateMonomorphicIC(Handle<JSObject> receiver, + Handle<Code> handler, + Handle<String> name) { + set_target(*handler); + } + bool UpdatePolymorphicIC(State state, + StrictModeFlag strict_mode, + Handle<JSObject> receiver, + Handle<String> name, + Handle<Code> code); + void CopyICToMegamorphicCache(Handle<String> name); + void PatchCache(State state, + StrictModeFlag strict_mode, + Handle<JSObject> receiver, + Handle<String> name, + Handle<Code> code); + virtual void UpdateMegamorphicCache(Map* map, String* name, Code* code); + virtual Handle<Code> megamorphic_stub() { + UNREACHABLE(); + return Handle<Code>::null(); + } + virtual Handle<Code> megamorphic_stub_strict() { + UNREACHABLE(); + return Handle<Code>::null(); + } + virtual Handle<Code> generic_stub() const { + UNREACHABLE(); + return Handle<Code>::null(); + } + virtual Handle<Code> generic_stub_strict() const { + UNREACHABLE(); + return Handle<Code>::null(); + } + private: // Frame pointer for the frame that uses (calls) the IC. Address fp_; @@ -321,14 +353,10 @@ class KeyedCallIC: public CallICBase { class LoadIC: public IC { public: - explicit LoadIC(Isolate* isolate) : IC(NO_EXTRA_FRAME, isolate) { - ASSERT(target()->is_load_stub()); + explicit LoadIC(FrameDepth depth, Isolate* isolate) : IC(depth, isolate) { + ASSERT(target()->is_load_stub() || target()->is_keyed_load_stub()); } - MUST_USE_RESULT MaybeObject* Load(State state, - Handle<Object> object, - Handle<String> name); - // Code generator routines. static void GenerateInitialize(MacroAssembler* masm) { GenerateMiss(masm); } static void GeneratePreMonomorphic(MacroAssembler* masm) { @@ -338,29 +366,41 @@ class LoadIC: public IC { static void GenerateMegamorphic(MacroAssembler* masm); static void GenerateNormal(MacroAssembler* masm); - // Specialized code generator routines. - static void GenerateArrayLength(MacroAssembler* masm); - static void GenerateStringLength(MacroAssembler* masm, - bool support_wrappers); - static void GenerateFunctionPrototype(MacroAssembler* masm); + MUST_USE_RESULT MaybeObject* Load(State state, + Handle<Object> object, + Handle<String> name); + + protected: + virtual Code::Kind kind() const { return Code::LOAD_IC; } + + virtual Handle<Code> generic_stub() const { + UNREACHABLE(); + return Handle<Code>::null(); + } + + virtual Handle<Code> megamorphic_stub() { + return isolate()->builtins()->LoadIC_Megamorphic(); + } - private: // Update the inline cache and the global stub cache based on the // lookup result. void UpdateCaches(LookupResult* lookup, State state, Handle<Object> object, Handle<String> name); + virtual void UpdateMonomorphicIC(Handle<JSObject> receiver, + Handle<Code> handler, + Handle<String> name); + virtual Handle<Code> ComputeLoadHandler(LookupResult* lookup, + Handle<JSObject> receiver, + Handle<String> name); + private: // Stub accessors. - Handle<Code> megamorphic_stub() { - return isolate()->builtins()->LoadIC_Megamorphic(); + static Handle<Code> initialize_stub() { + return Isolate::Current()->builtins()->LoadIC_Initialize(); } - static Code* initialize_stub() { - return Isolate::Current()->builtins()->builtin( - Builtins::kLoadIC_Initialize); - } - Handle<Code> pre_monomorphic_stub() { + virtual Handle<Code> pre_monomorphic_stub() { return isolate()->builtins()->LoadIC_PreMonomorphic(); } @@ -370,131 +410,32 @@ class LoadIC: public IC { }; -class KeyedIC: public IC { - public: - enum StubKind { - LOAD, - STORE_NO_TRANSITION, - STORE_TRANSITION_SMI_TO_OBJECT, - STORE_TRANSITION_SMI_TO_DOUBLE, - STORE_TRANSITION_DOUBLE_TO_OBJECT, - STORE_TRANSITION_HOLEY_SMI_TO_OBJECT, - STORE_TRANSITION_HOLEY_SMI_TO_DOUBLE, - STORE_TRANSITION_HOLEY_DOUBLE_TO_OBJECT, - STORE_AND_GROW_NO_TRANSITION, - STORE_AND_GROW_TRANSITION_SMI_TO_OBJECT, - STORE_AND_GROW_TRANSITION_SMI_TO_DOUBLE, - STORE_AND_GROW_TRANSITION_DOUBLE_TO_OBJECT, - STORE_AND_GROW_TRANSITION_HOLEY_SMI_TO_OBJECT, - STORE_AND_GROW_TRANSITION_HOLEY_SMI_TO_DOUBLE, - STORE_AND_GROW_TRANSITION_HOLEY_DOUBLE_TO_OBJECT - }; - - static const int kGrowICDelta = STORE_AND_GROW_NO_TRANSITION - - STORE_NO_TRANSITION; - STATIC_ASSERT(kGrowICDelta == - STORE_AND_GROW_TRANSITION_SMI_TO_OBJECT - - STORE_TRANSITION_SMI_TO_OBJECT); - STATIC_ASSERT(kGrowICDelta == - STORE_AND_GROW_TRANSITION_SMI_TO_DOUBLE - - STORE_TRANSITION_SMI_TO_DOUBLE); - STATIC_ASSERT(kGrowICDelta == - STORE_AND_GROW_TRANSITION_DOUBLE_TO_OBJECT - - STORE_TRANSITION_DOUBLE_TO_OBJECT); - - explicit KeyedIC(Isolate* isolate) : IC(NO_EXTRA_FRAME, isolate) {} - virtual ~KeyedIC() {} - - static inline KeyedAccessGrowMode GetGrowModeFromStubKind( - StubKind stub_kind) { - return (stub_kind >= STORE_AND_GROW_NO_TRANSITION) - ? ALLOW_JSARRAY_GROWTH - : DO_NOT_ALLOW_JSARRAY_GROWTH; - } - - static inline StubKind GetGrowStubKind(StubKind stub_kind) { - ASSERT(stub_kind != LOAD); - if (stub_kind < STORE_AND_GROW_NO_TRANSITION) { - stub_kind = static_cast<StubKind>(static_cast<int>(stub_kind) + - kGrowICDelta); - } - return stub_kind; - } - - virtual Handle<Code> GetElementStubWithoutMapCheck( - bool is_js_array, - ElementsKind elements_kind, - KeyedAccessGrowMode grow_mode) = 0; - - protected: - virtual Handle<Code> string_stub() { - return Handle<Code>::null(); - } - - virtual Code::Kind kind() const = 0; - - Handle<Code> ComputeStub(Handle<JSObject> receiver, - StubKind stub_kind, - StrictModeFlag strict_mode, - Handle<Code> default_stub); - - virtual Handle<Code> ComputePolymorphicStub( - MapHandleList* receiver_maps, - StrictModeFlag strict_mode, - KeyedAccessGrowMode grow_mode) = 0; - - Handle<Code> ComputeMonomorphicStubWithoutMapCheck( - Handle<Map> receiver_map, - StrictModeFlag strict_mode, - KeyedAccessGrowMode grow_mode); - - private: - void GetReceiverMapsForStub(Handle<Code> stub, MapHandleList* result); - - Handle<Code> ComputeMonomorphicStub(Handle<Map> receiver_map, - StubKind stub_kind, - StrictModeFlag strict_mode, - Handle<Code> default_stub); - - Handle<Map> ComputeTransitionedMap(Handle<JSObject> receiver, - StubKind stub_kind); - - static bool IsTransitionStubKind(StubKind stub_kind) { - return stub_kind > STORE_NO_TRANSITION && - stub_kind != STORE_AND_GROW_NO_TRANSITION; - } - - static bool IsGrowStubKind(StubKind stub_kind) { - return stub_kind >= STORE_AND_GROW_NO_TRANSITION; - } - - static StubKind GetNoTransitionStubKind(StubKind stub_kind) { - if (!IsTransitionStubKind(stub_kind)) return stub_kind; - if (IsGrowStubKind(stub_kind)) return STORE_AND_GROW_NO_TRANSITION; - return STORE_NO_TRANSITION; - } +enum ICMissMode { + MISS_FORCE_GENERIC, + MISS }; -class KeyedLoadIC: public KeyedIC { +class KeyedLoadIC: public LoadIC { public: - explicit KeyedLoadIC(Isolate* isolate) : KeyedIC(isolate) { + explicit KeyedLoadIC(FrameDepth depth, Isolate* isolate) + : LoadIC(depth, isolate) { ASSERT(target()->is_keyed_load_stub()); } MUST_USE_RESULT MaybeObject* Load(State state, Handle<Object> object, Handle<Object> key, - bool force_generic_stub); + ICMissMode force_generic); // Code generator routines. - static void GenerateMiss(MacroAssembler* masm, bool force_generic); + static void GenerateMiss(MacroAssembler* masm, ICMissMode force_generic); static void GenerateRuntimeGetProperty(MacroAssembler* masm); static void GenerateInitialize(MacroAssembler* masm) { - GenerateMiss(masm, false); + GenerateMiss(masm, MISS); } static void GeneratePreMonomorphic(MacroAssembler* masm) { - GenerateMiss(masm, false); + GenerateMiss(masm, MISS); } static void GenerateGeneric(MacroAssembler* masm); static void GenerateString(MacroAssembler* masm); @@ -508,45 +449,33 @@ class KeyedLoadIC: public KeyedIC { static const int kSlowCaseBitFieldMask = (1 << Map::kIsAccessCheckNeeded) | (1 << Map::kHasIndexedInterceptor); - virtual Handle<Code> GetElementStubWithoutMapCheck( - bool is_js_array, - ElementsKind elements_kind, - KeyedAccessGrowMode grow_mode); - - virtual bool IsGeneric() const { - return target() == *generic_stub(); - } - protected: virtual Code::Kind kind() const { return Code::KEYED_LOAD_IC; } - virtual Handle<Code> ComputePolymorphicStub(MapHandleList* receiver_maps, - StrictModeFlag strict_mode, - KeyedAccessGrowMode grow_mode); + Handle<Code> LoadElementStub(Handle<JSObject> receiver); - virtual Handle<Code> string_stub() { - return isolate()->builtins()->KeyedLoadIC_String(); + virtual Handle<Code> megamorphic_stub() { + return isolate()->builtins()->KeyedLoadIC_Generic(); + } + virtual Handle<Code> generic_stub() const { + return isolate()->builtins()->KeyedLoadIC_Generic(); } - private: // Update the inline cache. - void UpdateCaches(LookupResult* lookup, - State state, - Handle<Object> object, - Handle<String> name); + virtual void UpdateMonomorphicIC(Handle<JSObject> receiver, + Handle<Code> handler, + Handle<String> name); + virtual Handle<Code> ComputeLoadHandler(LookupResult* lookup, + Handle<JSObject> receiver, + Handle<String> name); + virtual void UpdateMegamorphicCache(Map* map, String* name, Code* code) { } + private: // Stub accessors. - static Code* initialize_stub() { - return Isolate::Current()->builtins()->builtin( - Builtins::kKeyedLoadIC_Initialize); - } - Handle<Code> megamorphic_stub() { - return isolate()->builtins()->KeyedLoadIC_Generic(); - } - Handle<Code> generic_stub() const { - return isolate()->builtins()->KeyedLoadIC_Generic(); + static Handle<Code> initialize_stub() { + return Isolate::Current()->builtins()->KeyedLoadIC_Initialize(); } - Handle<Code> pre_monomorphic_stub() { + virtual Handle<Code> pre_monomorphic_stub() { return isolate()->builtins()->KeyedLoadIC_PreMonomorphic(); } Handle<Code> indexed_interceptor_stub() { @@ -555,6 +484,9 @@ class KeyedLoadIC: public KeyedIC { Handle<Code> non_strict_arguments_stub() { return isolate()->builtins()->KeyedLoadIC_NonStrictArguments(); } + Handle<Code> string_stub() { + return isolate()->builtins()->KeyedLoadIC_String(); + } static void Clear(Address address, Code* target); @@ -565,26 +497,44 @@ class KeyedLoadIC: public KeyedIC { class StoreIC: public IC { public: explicit StoreIC(Isolate* isolate) : IC(NO_EXTRA_FRAME, isolate) { - ASSERT(target()->is_store_stub()); + ASSERT(target()->is_store_stub() || target()->is_keyed_store_stub()); } - MUST_USE_RESULT MaybeObject* Store(State state, - StrictModeFlag strict_mode, - Handle<Object> object, - Handle<String> name, - Handle<Object> value); - // Code generators for stub routines. Only called once at startup. static void GenerateInitialize(MacroAssembler* masm) { GenerateMiss(masm); } static void GenerateMiss(MacroAssembler* masm); static void GenerateMegamorphic(MacroAssembler* masm, StrictModeFlag strict_mode); - static void GenerateArrayLength(MacroAssembler* masm); static void GenerateNormal(MacroAssembler* masm); static void GenerateGlobalProxy(MacroAssembler* masm, StrictModeFlag strict_mode); - private: + MUST_USE_RESULT MaybeObject* Store( + State state, + StrictModeFlag strict_mode, + Handle<Object> object, + Handle<String> name, + Handle<Object> value, + JSReceiver::StoreFromKeyed store_mode = + JSReceiver::CERTAINLY_NOT_STORE_FROM_KEYED); + + protected: + virtual Code::Kind kind() const { return Code::STORE_IC; } + virtual Handle<Code> megamorphic_stub() { + return isolate()->builtins()->StoreIC_Megamorphic(); + } + // Stub accessors. + virtual Handle<Code> megamorphic_stub_strict() { + return isolate()->builtins()->StoreIC_Megamorphic_Strict(); + } + virtual Handle<Code> global_proxy_stub() { + return isolate()->builtins()->StoreIC_GlobalProxy(); + } + virtual Handle<Code> global_proxy_stub_strict() { + return isolate()->builtins()->StoreIC_GlobalProxy_Strict(); + } + + // Update the inline cache and the global stub cache based on the // lookup result. void UpdateCaches(LookupResult* lookup, @@ -593,7 +543,15 @@ class StoreIC: public IC { Handle<JSObject> receiver, Handle<String> name, Handle<Object> value); + // Compute the code stub for this store; used for rewriting to + // monomorphic state and making sure that the code stub is in the + // stub cache. + virtual Handle<Code> ComputeStoreMonomorphic(LookupResult* lookup, + StrictModeFlag strict_mode, + Handle<JSObject> receiver, + Handle<String> name); + private: void set_target(Code* code) { // Strict mode must be preserved across IC patching. ASSERT(Code::GetStrictMode(code->extra_ic_state()) == @@ -601,30 +559,12 @@ class StoreIC: public IC { IC::set_target(code); } - // Stub accessors. - Code* megamorphic_stub() { - return isolate()->builtins()->builtin( - Builtins::kStoreIC_Megamorphic); - } - Code* megamorphic_stub_strict() { - return isolate()->builtins()->builtin( - Builtins::kStoreIC_Megamorphic_Strict); - } - static Code* initialize_stub() { - return Isolate::Current()->builtins()->builtin( - Builtins::kStoreIC_Initialize); - } - static Code* initialize_stub_strict() { - return Isolate::Current()->builtins()->builtin( - Builtins::kStoreIC_Initialize_Strict); - } - Handle<Code> global_proxy_stub() { - return isolate()->builtins()->StoreIC_GlobalProxy(); + static Handle<Code> initialize_stub() { + return Isolate::Current()->builtins()->StoreIC_Initialize(); } - Handle<Code> global_proxy_stub_strict() { - return isolate()->builtins()->StoreIC_GlobalProxy_Strict(); + static Handle<Code> initialize_stub_strict() { + return Isolate::Current()->builtins()->StoreIC_Initialize_Strict(); } - static void Clear(Address address, Code* target); friend class IC; @@ -643,9 +583,9 @@ enum KeyedStoreIncrementLength { }; -class KeyedStoreIC: public KeyedIC { +class KeyedStoreIC: public StoreIC { public: - explicit KeyedStoreIC(Isolate* isolate) : KeyedIC(isolate) { + explicit KeyedStoreIC(Isolate* isolate) : StoreIC(isolate) { ASSERT(target()->is_keyed_store_stub()); } @@ -654,13 +594,13 @@ class KeyedStoreIC: public KeyedIC { Handle<Object> object, Handle<Object> name, Handle<Object> value, - bool force_generic); + ICMissMode force_generic); // Code generators for stub routines. Only called once at startup. static void GenerateInitialize(MacroAssembler* masm) { - GenerateMiss(masm, false); + GenerateMiss(masm, MISS); } - static void GenerateMiss(MacroAssembler* masm, bool force_generic); + static void GenerateMiss(MacroAssembler* masm, ICMissMode force_generic); static void GenerateSlow(MacroAssembler* masm); static void GenerateRuntimeSetProperty(MacroAssembler* masm, StrictModeFlag strict_mode); @@ -669,32 +609,27 @@ class KeyedStoreIC: public KeyedIC { static void GenerateTransitionElementsSmiToDouble(MacroAssembler* masm); static void GenerateTransitionElementsDoubleToObject(MacroAssembler* masm); - virtual Handle<Code> GetElementStubWithoutMapCheck( - bool is_js_array, - ElementsKind elements_kind, - KeyedAccessGrowMode grow_mode); - - virtual bool IsGeneric() const { - return target() == *generic_stub() || - target() == *generic_stub_strict(); - } - protected: virtual Code::Kind kind() const { return Code::KEYED_STORE_IC; } - virtual Handle<Code> ComputePolymorphicStub(MapHandleList* receiver_maps, - StrictModeFlag strict_mode, - KeyedAccessGrowMode grow_mode); + virtual Handle<Code> ComputeStoreMonomorphic(LookupResult* lookup, + StrictModeFlag strict_mode, + Handle<JSObject> receiver, + Handle<String> name); + virtual void UpdateMegamorphicCache(Map* map, String* name, Code* code) { } - private: - // Update the inline cache. - void UpdateCaches(LookupResult* lookup, - State state, - StrictModeFlag strict_mode, - Handle<JSObject> receiver, - Handle<String> name, - Handle<Object> value); + virtual Handle<Code> megamorphic_stub() { + return isolate()->builtins()->KeyedStoreIC_Generic(); + } + virtual Handle<Code> megamorphic_stub_strict() { + return isolate()->builtins()->KeyedStoreIC_Generic_Strict(); + } + Handle<Code> StoreElementStub(Handle<JSObject> receiver, + KeyedAccessStoreMode store_mode, + StrictModeFlag strict_mode); + + private: void set_target(Code* code) { // Strict mode must be preserved across IC patching. ASSERT(Code::GetStrictMode(code->extra_ic_state()) == @@ -703,19 +638,11 @@ class KeyedStoreIC: public KeyedIC { } // Stub accessors. - static Code* initialize_stub() { - return Isolate::Current()->builtins()->builtin( - Builtins::kKeyedStoreIC_Initialize); + static Handle<Code> initialize_stub() { + return Isolate::Current()->builtins()->KeyedStoreIC_Initialize(); } - static Code* initialize_stub_strict() { - return Isolate::Current()->builtins()->builtin( - Builtins::kKeyedStoreIC_Initialize_Strict); - } - Handle<Code> megamorphic_stub() { - return isolate()->builtins()->KeyedStoreIC_Generic(); - } - Handle<Code> megamorphic_stub_strict() { - return isolate()->builtins()->KeyedStoreIC_Generic_Strict(); + static Handle<Code> initialize_stub_strict() { + return Isolate::Current()->builtins()->KeyedStoreIC_Initialize_Strict(); } Handle<Code> generic_stub() const { return isolate()->builtins()->KeyedStoreIC_Generic(); @@ -729,9 +656,12 @@ class KeyedStoreIC: public KeyedIC { static void Clear(Address address, Code* target); - StubKind GetStubKind(Handle<JSObject> receiver, - Handle<Object> key, - Handle<Object> value); + KeyedAccessStoreMode GetStoreMode(Handle<JSObject> receiver, + Handle<Object> key, + Handle<Object> value); + + Handle<Map> ComputeTransitionedMap(Handle<JSObject> receiver, + KeyedAccessStoreMode store_mode); friend class IC; }; @@ -744,7 +674,7 @@ class UnaryOpIC: public IC { enum TypeInfo { UNINITIALIZED, SMI, - HEAP_NUMBER, + NUMBER, GENERIC }; @@ -769,10 +699,9 @@ class BinaryOpIC: public IC { UNINITIALIZED, SMI, INT32, - HEAP_NUMBER, + NUMBER, ODDBALL, - BOTH_STRING, // Only used for addition operation. - STRING, // Only used for addition operation. At least one string operand. + STRING, // Only used for addition operation. GENERIC }; @@ -783,23 +712,26 @@ class BinaryOpIC: public IC { static const char* GetName(TypeInfo type_info); static State ToState(TypeInfo type_info); - - static TypeInfo GetTypeInfo(Handle<Object> left, Handle<Object> right); - - static TypeInfo JoinTypes(TypeInfo x, TypeInfo y); }; class CompareIC: public IC { public: + // The type/state lattice is defined by the following inequations: + // UNINITIALIZED < ... + // ... < GENERIC + // SMI < NUMBER + // INTERNALIZED_STRING < STRING + // KNOWN_OBJECT < OBJECT enum State { UNINITIALIZED, - SMIS, - HEAP_NUMBERS, - SYMBOLS, - STRINGS, - OBJECTS, - KNOWN_OBJECTS, + SMI, + NUMBER, + STRING, + INTERNALIZED_STRING, + UNIQUE_NAME, // Symbol or InternalizedString + OBJECT, // JSObject + KNOWN_OBJECT, // JSObject with specific map (faster check) GENERIC }; @@ -809,27 +741,27 @@ class CompareIC: public IC { // Update the inline cache for the given operands. void UpdateCaches(Handle<Object> x, Handle<Object> y); + // Factory method for getting an uninitialized compare stub. - static Handle<Code> GetUninitialized(Token::Value op); + static Handle<Code> GetUninitialized(Isolate* isolate, Token::Value op); // Helper function for computing the condition for a compare operation. static Condition ComputeCondition(Token::Value op); - // Helper function for determining the state of a compare IC. - static State ComputeState(Code* target); - - // Helper function for determining the operation a compare IC is for. - static Token::Value ComputeOperation(Code* target); - static const char* GetStateName(State state); private: - State TargetState(State state, bool has_inlined_smi_code, - Handle<Object> x, Handle<Object> y); + static bool HasInlinedSmiCode(Address address); + + State TargetState(State old_state, + State old_left, + State old_right, + bool has_inlined_smi_code, + Handle<Object> x, + Handle<Object> y); bool strict() const { return op_ == Token::EQ_STRICT; } Condition GetCondition() const { return ComputeCondition(op_); } - State GetState() { return ComputeState(target()); } static Code* GetRawUninitialized(Token::Value op); @@ -853,6 +785,8 @@ class ToBooleanIC: public IC { enum InlinedSmiCheck { ENABLE_INLINED_SMI_CHECK, DISABLE_INLINED_SMI_CHECK }; void PatchInlinedSmiCode(Address address, InlinedSmiCheck check); +DECLARE_RUNTIME_FUNCTION(MaybeObject*, KeyedLoadIC_MissFromStubFailure); + } } // namespace v8::internal #endif // V8_IC_H_ diff --git a/deps/v8/src/incremental-marking-inl.h b/deps/v8/src/incremental-marking-inl.h index bbe9a9d209..1c30383d52 100644 --- a/deps/v8/src/incremental-marking-inl.h +++ b/deps/v8/src/incremental-marking-inl.h @@ -37,16 +37,27 @@ namespace internal { bool IncrementalMarking::BaseRecordWrite(HeapObject* obj, Object** slot, Object* value) { - MarkBit value_bit = Marking::MarkBitFrom(HeapObject::cast(value)); + HeapObject* value_heap_obj = HeapObject::cast(value); + MarkBit value_bit = Marking::MarkBitFrom(value_heap_obj); if (Marking::IsWhite(value_bit)) { MarkBit obj_bit = Marking::MarkBitFrom(obj); if (Marking::IsBlack(obj_bit)) { - BlackToGreyAndUnshift(obj, obj_bit); - RestartIfNotMarking(); + MemoryChunk* chunk = MemoryChunk::FromAddress(obj->address()); + if (chunk->IsFlagSet(MemoryChunk::HAS_PROGRESS_BAR)) { + if (chunk->IsLeftOfProgressBar(slot)) { + WhiteToGreyAndPush(value_heap_obj, value_bit); + RestartIfNotMarking(); + } else { + return false; + } + } else { + BlackToGreyAndUnshift(obj, obj_bit); + RestartIfNotMarking(); + return false; + } + } else { + return false; } - - // Object is either grey or white. It will be scanned if survives. - return false; } if (!is_compacting_) return false; MarkBit obj_bit = Marking::MarkBitFrom(obj); @@ -83,6 +94,10 @@ void IncrementalMarking::RecordWrites(HeapObject* obj) { if (IsMarking()) { MarkBit obj_bit = Marking::MarkBitFrom(obj); if (Marking::IsBlack(obj_bit)) { + MemoryChunk* chunk = MemoryChunk::FromAddress(obj->address()); + if (chunk->IsFlagSet(MemoryChunk::HAS_PROGRESS_BAR)) { + chunk->set_progress_bar(0); + } BlackToGreyAndUnshift(obj, obj_bit); RestartIfNotMarking(); } diff --git a/deps/v8/src/incremental-marking.cc b/deps/v8/src/incremental-marking.cc index e51d6c1364..658a34c9cd 100644 --- a/deps/v8/src/incremental-marking.cc +++ b/deps/v8/src/incremental-marking.cc @@ -78,7 +78,7 @@ void IncrementalMarking::RecordWriteSlow(HeapObject* obj, void IncrementalMarking::RecordWriteFromCode(HeapObject* obj, - Object* value, + Object** slot, Isolate* isolate) { ASSERT(obj->IsHeapObject()); IncrementalMarking* marking = isolate->heap()->incremental_marking(); @@ -94,7 +94,7 @@ void IncrementalMarking::RecordWriteFromCode(HeapObject* obj, MemoryChunk::kWriteBarrierCounterGranularity); } - marking->RecordWrite(obj, NULL, value); + marking->RecordWrite(obj, slot, *slot); } @@ -175,15 +175,100 @@ void IncrementalMarking::RecordWriteIntoCodeSlow(HeapObject* obj, } +static void MarkObjectGreyDoNotEnqueue(Object* obj) { + if (obj->IsHeapObject()) { + HeapObject* heap_obj = HeapObject::cast(obj); + MarkBit mark_bit = Marking::MarkBitFrom(HeapObject::cast(obj)); + if (Marking::IsBlack(mark_bit)) { + MemoryChunk::IncrementLiveBytesFromGC(heap_obj->address(), + -heap_obj->Size()); + } + Marking::AnyToGrey(mark_bit); + } +} + + +static inline void MarkBlackOrKeepGrey(HeapObject* heap_object, + MarkBit mark_bit, + int size) { + ASSERT(!Marking::IsImpossible(mark_bit)); + if (mark_bit.Get()) return; + mark_bit.Set(); + MemoryChunk::IncrementLiveBytesFromGC(heap_object->address(), size); + ASSERT(Marking::IsBlack(mark_bit)); +} + + +static inline void MarkBlackOrKeepBlack(HeapObject* heap_object, + MarkBit mark_bit, + int size) { + ASSERT(!Marking::IsImpossible(mark_bit)); + if (Marking::IsBlack(mark_bit)) return; + Marking::MarkBlack(mark_bit); + MemoryChunk::IncrementLiveBytesFromGC(heap_object->address(), size); + ASSERT(Marking::IsBlack(mark_bit)); +} + + class IncrementalMarkingMarkingVisitor : public StaticMarkingVisitor<IncrementalMarkingMarkingVisitor> { public: static void Initialize() { StaticMarkingVisitor<IncrementalMarkingMarkingVisitor>::Initialize(); - + table_.Register(kVisitFixedArray, &VisitFixedArrayIncremental); + table_.Register(kVisitNativeContext, &VisitNativeContextIncremental); table_.Register(kVisitJSRegExp, &VisitJSRegExp); } + static const int kProgressBarScanningChunk = 32 * 1024; + + static void VisitFixedArrayIncremental(Map* map, HeapObject* object) { + MemoryChunk* chunk = MemoryChunk::FromAddress(object->address()); + // TODO(mstarzinger): Move setting of the flag to the allocation site of + // the array. The visitor should just check the flag. + if (FLAG_use_marking_progress_bar && + chunk->owner()->identity() == LO_SPACE) { + chunk->SetFlag(MemoryChunk::HAS_PROGRESS_BAR); + } + if (chunk->IsFlagSet(MemoryChunk::HAS_PROGRESS_BAR)) { + Heap* heap = map->GetHeap(); + // When using a progress bar for large fixed arrays, scan only a chunk of + // the array and try to push it onto the marking deque again until it is + // fully scanned. Fall back to scanning it through to the end in case this + // fails because of a full deque. + int object_size = FixedArray::BodyDescriptor::SizeOf(map, object); + int start_offset = Max(FixedArray::BodyDescriptor::kStartOffset, + chunk->progress_bar()); + int end_offset = Min(object_size, + start_offset + kProgressBarScanningChunk); + bool scan_until_end = false; + do { + VisitPointersWithAnchor(heap, + HeapObject::RawField(object, 0), + HeapObject::RawField(object, start_offset), + HeapObject::RawField(object, end_offset)); + start_offset = end_offset; + end_offset = Min(object_size, end_offset + kProgressBarScanningChunk); + scan_until_end = heap->incremental_marking()->marking_deque()->IsFull(); + } while (scan_until_end && start_offset < object_size); + chunk->set_progress_bar(start_offset); + if (start_offset < object_size) { + heap->incremental_marking()->marking_deque()->UnshiftGrey(object); + } + } else { + FixedArrayVisitor::Visit(map, object); + } + } + + static void VisitNativeContextIncremental(Map* map, HeapObject* object) { + Context* context = Context::cast(object); + + // We will mark cache black with a separate pass + // when we finish marking. + MarkObjectGreyDoNotEnqueue(context->normalized_map_cache()); + VisitNativeContext(map, context); + } + static void VisitJSWeakMap(Map* map, HeapObject* object) { Heap* heap = map->GetHeap(); VisitPointers(heap, @@ -211,15 +296,25 @@ class IncrementalMarkingMarkingVisitor } } + INLINE(static void VisitPointersWithAnchor(Heap* heap, + Object** anchor, + Object** start, + Object** end)) { + for (Object** p = start; p < end; p++) { + Object* obj = *p; + if (obj->NonFailureIsHeapObject()) { + heap->mark_compact_collector()->RecordSlot(anchor, p, obj); + MarkObject(heap, obj); + } + } + } + // Marks the object grey and pushes it on the marking stack. INLINE(static void MarkObject(Heap* heap, Object* obj)) { HeapObject* heap_object = HeapObject::cast(obj); MarkBit mark_bit = Marking::MarkBitFrom(heap_object); if (mark_bit.data_only()) { - if (heap->incremental_marking()->MarkBlackOrKeepGrey(mark_bit)) { - MemoryChunk::IncrementLiveBytesFromGC(heap_object->address(), - heap_object->Size()); - } + MarkBlackOrKeepGrey(heap_object, mark_bit, heap_object->Size()); } else if (Marking::IsWhite(mark_bit)) { heap->incremental_marking()->WhiteToGreyAndPush(heap_object, mark_bit); } @@ -243,10 +338,9 @@ class IncrementalMarkingMarkingVisitor class IncrementalMarkingRootMarkingVisitor : public ObjectVisitor { public: - IncrementalMarkingRootMarkingVisitor(Heap* heap, - IncrementalMarking* incremental_marking) - : heap_(heap), - incremental_marking_(incremental_marking) { + explicit IncrementalMarkingRootMarkingVisitor( + IncrementalMarking* incremental_marking) + : incremental_marking_(incremental_marking) { } void VisitPointer(Object** p) { @@ -265,10 +359,7 @@ class IncrementalMarkingRootMarkingVisitor : public ObjectVisitor { HeapObject* heap_object = HeapObject::cast(obj); MarkBit mark_bit = Marking::MarkBitFrom(heap_object); if (mark_bit.data_only()) { - if (incremental_marking_->MarkBlackOrKeepGrey(mark_bit)) { - MemoryChunk::IncrementLiveBytesFromGC(heap_object->address(), - heap_object->Size()); - } + MarkBlackOrKeepGrey(heap_object, mark_bit, heap_object->Size()); } else { if (Marking::IsWhite(mark_bit)) { incremental_marking_->WhiteToGreyAndPush(heap_object, mark_bit); @@ -276,7 +367,6 @@ class IncrementalMarkingRootMarkingVisitor : public ObjectVisitor { } } - Heap* heap_; IncrementalMarking* incremental_marking_; }; @@ -480,8 +570,7 @@ void IncrementalMarking::Start() { ResetStepCounters(); - if (heap_->old_pointer_space()->IsSweepingComplete() && - heap_->old_data_space()->IsSweepingComplete()) { + if (heap_->IsSweepingComplete()) { StartMarking(ALLOW_COMPACTION); } else { if (FLAG_trace_incremental_marking) { @@ -494,19 +583,6 @@ void IncrementalMarking::Start() { } -static void MarkObjectGreyDoNotEnqueue(Object* obj) { - if (obj->IsHeapObject()) { - HeapObject* heap_obj = HeapObject::cast(obj); - MarkBit mark_bit = Marking::MarkBitFrom(HeapObject::cast(obj)); - if (Marking::IsBlack(mark_bit)) { - MemoryChunk::IncrementLiveBytesFromGC(heap_obj->address(), - -heap_obj->Size()); - } - Marking::AnyToGrey(mark_bit); - } -} - - void IncrementalMarking::StartMarking(CompactionFlag flag) { if (FLAG_trace_incremental_marking) { PrintF("[IncrementalMarking] Start marking\n"); @@ -550,7 +626,7 @@ void IncrementalMarking::StartMarking(CompactionFlag flag) { } // Mark strong roots grey. - IncrementalMarkingRootMarkingVisitor visitor(heap_, this); + IncrementalMarkingRootMarkingVisitor visitor(this); heap_->IterateStrongRoots(&visitor, VISIT_ONLY_STRONG); // Ready to start incremental marking. @@ -606,8 +682,11 @@ void IncrementalMarking::UpdateMarkingDequeAfterScavenge() { ASSERT(new_top != marking_deque_.bottom()); #ifdef DEBUG MarkBit mark_bit = Marking::MarkBitFrom(obj); + MemoryChunk* chunk = MemoryChunk::FromAddress(obj->address()); ASSERT(Marking::IsGrey(mark_bit) || - (obj->IsFiller() && Marking::IsWhite(mark_bit))); + (obj->IsFiller() && Marking::IsWhite(mark_bit)) || + (chunk->IsFlagSet(MemoryChunk::HAS_PROGRESS_BAR) && + Marking::IsBlack(mark_bit))); #endif } } @@ -619,46 +698,79 @@ void IncrementalMarking::UpdateMarkingDequeAfterScavenge() { } +void IncrementalMarking::VisitObject(Map* map, HeapObject* obj, int size) { + MarkBit map_mark_bit = Marking::MarkBitFrom(map); + if (Marking::IsWhite(map_mark_bit)) { + WhiteToGreyAndPush(map, map_mark_bit); + } + + IncrementalMarkingMarkingVisitor::IterateBody(map, obj); + + MarkBit mark_bit = Marking::MarkBitFrom(obj); +#ifdef DEBUG + MemoryChunk* chunk = MemoryChunk::FromAddress(obj->address()); + SLOW_ASSERT(Marking::IsGrey(mark_bit) || + (obj->IsFiller() && Marking::IsWhite(mark_bit)) || + (chunk->IsFlagSet(MemoryChunk::HAS_PROGRESS_BAR) && + Marking::IsBlack(mark_bit))); +#endif + MarkBlackOrKeepBlack(obj, mark_bit, size); +} + + +void IncrementalMarking::ProcessMarkingDeque(intptr_t bytes_to_process) { + Map* filler_map = heap_->one_pointer_filler_map(); + while (!marking_deque_.IsEmpty() && bytes_to_process > 0) { + HeapObject* obj = marking_deque_.Pop(); + + // Explicitly skip one word fillers. Incremental markbit patterns are + // correct only for objects that occupy at least two words. + Map* map = obj->map(); + if (map == filler_map) continue; + + int size = obj->SizeFromMap(map); + bytes_to_process -= size; + VisitObject(map, obj, size); + } +} + + +void IncrementalMarking::ProcessMarkingDeque() { + Map* filler_map = heap_->one_pointer_filler_map(); + while (!marking_deque_.IsEmpty()) { + HeapObject* obj = marking_deque_.Pop(); + + // Explicitly skip one word fillers. Incremental markbit patterns are + // correct only for objects that occupy at least two words. + Map* map = obj->map(); + if (map == filler_map) continue; + + VisitObject(map, obj, obj->SizeFromMap(map)); + } +} + + void IncrementalMarking::Hurry() { if (state() == MARKING) { double start = 0.0; - if (FLAG_trace_incremental_marking) { - PrintF("[IncrementalMarking] Hurry\n"); + if (FLAG_trace_incremental_marking || FLAG_print_cumulative_gc_stat) { start = OS::TimeCurrentMillis(); + if (FLAG_trace_incremental_marking) { + PrintF("[IncrementalMarking] Hurry\n"); + } } // TODO(gc) hurry can mark objects it encounters black as mutator // was stopped. - Map* filler_map = heap_->one_pointer_filler_map(); - Map* native_context_map = heap_->native_context_map(); - while (!marking_deque_.IsEmpty()) { - HeapObject* obj = marking_deque_.Pop(); - - // Explicitly skip one word fillers. Incremental markbit patterns are - // correct only for objects that occupy at least two words. - Map* map = obj->map(); - if (map == filler_map) { - continue; - } else if (map == native_context_map) { - // Native contexts have weak fields. - IncrementalMarkingMarkingVisitor::VisitNativeContext(map, obj); - } else { - MarkBit map_mark_bit = Marking::MarkBitFrom(map); - if (Marking::IsWhite(map_mark_bit)) { - WhiteToGreyAndPush(map, map_mark_bit); - } - IncrementalMarkingMarkingVisitor::IterateBody(map, obj); - } - - MarkBit mark_bit = Marking::MarkBitFrom(obj); - ASSERT(!Marking::IsBlack(mark_bit)); - Marking::MarkBlack(mark_bit); - MemoryChunk::IncrementLiveBytesFromGC(obj->address(), obj->Size()); - } + ProcessMarkingDeque(); state_ = COMPLETE; - if (FLAG_trace_incremental_marking) { + if (FLAG_trace_incremental_marking || FLAG_print_cumulative_gc_stat) { double end = OS::TimeCurrentMillis(); - PrintF("[IncrementalMarking] Complete (hurry), spent %d ms.\n", - static_cast<int>(end - start)); + double delta = end - start; + heap_->AddMarkingTime(delta); + if (FLAG_trace_incremental_marking) { + PrintF("[IncrementalMarking] Complete (hurry), spent %d ms.\n", + static_cast<int>(delta)); + } } } @@ -774,7 +886,7 @@ void IncrementalMarking::Step(intptr_t allocated_bytes, // allocation), so to reduce the lumpiness we don't use the write barriers // invoked since last step directly to determine the amount of work to do. intptr_t bytes_to_process = - marking_speed_ * Max(allocated_, kWriteBarriersInvokedThreshold); + marking_speed_ * Max(allocated_, write_barriers_invoked_since_last_step_); allocated_ = 0; write_barriers_invoked_since_last_step_ = 0; @@ -782,53 +894,18 @@ void IncrementalMarking::Step(intptr_t allocated_bytes, double start = 0; - if (FLAG_trace_incremental_marking || FLAG_trace_gc) { + if (FLAG_trace_incremental_marking || FLAG_trace_gc || + FLAG_print_cumulative_gc_stat) { start = OS::TimeCurrentMillis(); } if (state_ == SWEEPING) { - if (heap_->AdvanceSweepers(static_cast<int>(bytes_to_process))) { + if (heap_->EnsureSweepersProgressed(static_cast<int>(bytes_to_process))) { bytes_scanned_ = 0; StartMarking(PREVENT_COMPACTION); } } else if (state_ == MARKING) { - Map* filler_map = heap_->one_pointer_filler_map(); - Map* native_context_map = heap_->native_context_map(); - while (!marking_deque_.IsEmpty() && bytes_to_process > 0) { - HeapObject* obj = marking_deque_.Pop(); - - // Explicitly skip one word fillers. Incremental markbit patterns are - // correct only for objects that occupy at least two words. - Map* map = obj->map(); - if (map == filler_map) continue; - - int size = obj->SizeFromMap(map); - bytes_to_process -= size; - MarkBit map_mark_bit = Marking::MarkBitFrom(map); - if (Marking::IsWhite(map_mark_bit)) { - WhiteToGreyAndPush(map, map_mark_bit); - } - - // TODO(gc) switch to static visitor instead of normal visitor. - if (map == native_context_map) { - // Native contexts have weak fields. - Context* ctx = Context::cast(obj); - - // We will mark cache black with a separate pass - // when we finish marking. - MarkObjectGreyDoNotEnqueue(ctx->normalized_map_cache()); - - IncrementalMarkingMarkingVisitor::VisitNativeContext(map, ctx); - } else { - IncrementalMarkingMarkingVisitor::IterateBody(map, obj); - } - - MarkBit obj_mark_bit = Marking::MarkBitFrom(obj); - SLOW_ASSERT(Marking::IsGrey(obj_mark_bit) || - (obj->IsFiller() && Marking::IsWhite(obj_mark_bit))); - Marking::MarkBlack(obj_mark_bit); - MemoryChunk::IncrementLiveBytesFromGC(obj->address(), size); - } + ProcessMarkingDeque(bytes_to_process); if (marking_deque_.IsEmpty()) MarkingComplete(action); } @@ -898,12 +975,14 @@ void IncrementalMarking::Step(intptr_t allocated_bytes, } } - if (FLAG_trace_incremental_marking || FLAG_trace_gc) { + if (FLAG_trace_incremental_marking || FLAG_trace_gc || + FLAG_print_cumulative_gc_stat) { double end = OS::TimeCurrentMillis(); double delta = (end - start); longest_step_ = Max(longest_step_, delta); steps_took_ += delta; steps_took_since_last_gc_ += delta; + heap_->AddMarkingTime(delta); } } diff --git a/deps/v8/src/incremental-marking.h b/deps/v8/src/incremental-marking.h index 1a86fcd447..fc5a978cc4 100644 --- a/deps/v8/src/incremental-marking.h +++ b/deps/v8/src/incremental-marking.h @@ -127,7 +127,7 @@ class IncrementalMarking { } static void RecordWriteFromCode(HeapObject* obj, - Object* value, + Object** slot, Isolate* isolate); static void RecordWriteForEvacuationFromCode(HeapObject* obj, @@ -164,19 +164,6 @@ class IncrementalMarking { inline void WhiteToGreyAndPush(HeapObject* obj, MarkBit mark_bit); - // Does white->black or keeps gray or black color. Returns true if converting - // white to black. - inline bool MarkBlackOrKeepGrey(MarkBit mark_bit) { - ASSERT(!Marking::IsImpossible(mark_bit)); - if (mark_bit.Get()) { - // Grey or black: Keep the color. - return false; - } - mark_bit.Set(); - ASSERT(Marking::IsBlack(mark_bit)); - return true; - } - inline int steps_count() { return steps_count_; } @@ -259,6 +246,12 @@ class IncrementalMarking { void EnsureMarkingDequeIsCommitted(); + INLINE(void ProcessMarkingDeque()); + + INLINE(void ProcessMarkingDeque(intptr_t bytes_to_process)); + + INLINE(void VisitObject(Map* map, HeapObject* obj, int size)); + Heap* heap_; State state_; diff --git a/deps/v8/src/interface.cc b/deps/v8/src/interface.cc index 336be82c60..603dfe9b86 100644 --- a/deps/v8/src/interface.cc +++ b/deps/v8/src/interface.cc @@ -35,8 +35,8 @@ namespace internal { static bool Match(void* key1, void* key2) { String* name1 = *static_cast<String**>(key1); String* name2 = *static_cast<String**>(key2); - ASSERT(name1->IsSymbol()); - ASSERT(name2->IsSymbol()); + ASSERT(name1->IsInternalizedString()); + ASSERT(name2->IsInternalizedString()); return name1 == name2; } @@ -170,6 +170,8 @@ void Interface::DoUnify(Interface* that, bool* ok, Zone* zone) { ASSERT(that->forward_ == NULL); ASSERT(!this->IsValue()); ASSERT(!that->IsValue()); + ASSERT(this->index_ == -1); + ASSERT(that->index_ == -1); ASSERT(*ok); #ifdef DEBUG @@ -194,15 +196,6 @@ void Interface::DoUnify(Interface* that, bool* ok, Zone* zone) { return; } - // Merge instance. - if (!that->instance_.is_null()) { - if (!this->instance_.is_null() && *this->instance_ != *that->instance_) { - *ok = false; - return; - } - this->instance_ = that->instance_; - } - // Merge interfaces. this->flags_ |= that->flags_; that->forward_ = this; @@ -227,7 +220,7 @@ void Interface::Print(int n) { } else if (IsValue()) { PrintF("value\n"); } else if (IsModule()) { - PrintF("module %s{", IsFrozen() ? "" : "(unresolved) "); + PrintF("module %d %s{", Index(), IsFrozen() ? "" : "(unresolved) "); ZoneHashMap* map = Chase()->exports_; if (map == NULL || map->occupancy() == 0) { PrintF("}\n"); diff --git a/deps/v8/src/interface.h b/deps/v8/src/interface.h index 94ef11ba5c..f824a9a874 100644 --- a/deps/v8/src/interface.h +++ b/deps/v8/src/interface.h @@ -108,18 +108,18 @@ class Interface : public ZoneObject { if (*ok) Chase()->flags_ |= MODULE; } - // Set associated instance object. - void MakeSingleton(Handle<JSModule> instance, bool* ok) { - *ok = IsModule() && Chase()->instance_.is_null(); - if (*ok) Chase()->instance_ = instance; - } - // Do not allow any further refinements, directly or through unification. void Freeze(bool* ok) { *ok = IsValue() || IsModule(); if (*ok) Chase()->flags_ |= FROZEN; } + // Assign an index. + void Allocate(int index) { + ASSERT(IsModule() && IsFrozen() && Chase()->index_ == -1); + Chase()->index_ = index; + } + // --------------------------------------------------------------------------- // Accessors. @@ -138,7 +138,23 @@ class Interface : public ZoneObject { // Check whether this is closed (i.e. fully determined). bool IsFrozen() { return Chase()->flags_ & FROZEN; } - Handle<JSModule> Instance() { return Chase()->instance_; } + bool IsUnified(Interface* that) { + return Chase() == that->Chase() + || (this->IsValue() == that->IsValue() && + this->IsConst() == that->IsConst()); + } + + int Length() { + ASSERT(IsModule() && IsFrozen()); + ZoneHashMap* exports = Chase()->exports_; + return exports ? exports->occupancy() : 0; + } + + // The context slot in the hosting global context pointing to this module. + int Index() { + ASSERT(IsModule() && IsFrozen()); + return Chase()->index_; + } // Look up an exported name. Returns NULL if not (yet) defined. Interface* Lookup(Handle<String> name, Zone* zone); @@ -194,12 +210,13 @@ class Interface : public ZoneObject { int flags_; Interface* forward_; // Unification link ZoneHashMap* exports_; // Module exports and their types (allocated lazily) - Handle<JSModule> instance_; + int index_; explicit Interface(int flags) : flags_(flags), forward_(NULL), - exports_(NULL) { + exports_(NULL), + index_(-1) { #ifdef DEBUG if (FLAG_print_interface_details) PrintF("# Creating %p\n", static_cast<void*>(this)); diff --git a/deps/v8/src/interpreter-irregexp.cc b/deps/v8/src/interpreter-irregexp.cc index 3a92b84554..5abeb5a106 100644 --- a/deps/v8/src/interpreter-irregexp.cc +++ b/deps/v8/src/interpreter-irregexp.cc @@ -68,7 +68,7 @@ static bool BackRefMatchesNoCase(Canonicalize* interp_canonicalize, int from, int current, int len, - Vector<const char> subject) { + Vector<const uint8_t> subject) { for (int i = 0; i < len; i++) { unsigned int old_char = subject[from++]; unsigned int new_char = subject[current++]; @@ -617,7 +617,7 @@ RegExpImpl::IrregexpResult IrregexpInterpreter::Match( uc16 previous_char = '\n'; String::FlatContent subject_content = subject->GetFlatContent(); if (subject_content.IsAscii()) { - Vector<const char> subject_vector = subject_content.ToAsciiVector(); + Vector<const uint8_t> subject_vector = subject_content.ToOneByteVector(); if (start_position != 0) previous_char = subject_vector[start_position - 1]; return RawMatch(isolate, code_base, diff --git a/deps/v8/src/isolate.cc b/deps/v8/src/isolate.cc index 75e15a4541..5d4bc89b99 100644 --- a/deps/v8/src/isolate.cc +++ b/deps/v8/src/isolate.cc @@ -40,6 +40,7 @@ #include "isolate.h" #include "lithium-allocator.h" #include "log.h" +#include "marking-thread.h" #include "messages.h" #include "platform.h" #include "regexp-stack.h" @@ -49,6 +50,7 @@ #include "simulator.h" #include "spaces.h" #include "stub-cache.h" +#include "sweeper-thread.h" #include "version.h" #include "vm-state-inl.h" @@ -129,6 +131,24 @@ v8::TryCatch* ThreadLocalTop::TryCatchHandler() { } +int SystemThreadManager::NumberOfParallelSystemThreads( + ParallelSystemComponent type) { + int number_of_threads = Min(OS::NumberOfCores(), kMaxThreads); + ASSERT(number_of_threads > 0); + if (number_of_threads == 1) { + return 0; + } + if (type == PARALLEL_SWEEPING) { + return number_of_threads; + } else if (type == CONCURRENT_SWEEPING) { + return number_of_threads - 1; + } else if (type == PARALLEL_MARKING) { + return number_of_threads; + } + return 1; +} + + // Create a dummy thread that will wait forever on a semaphore. The only // purpose for this thread is to have some stack area to save essential data // into for use by a stacks only core dump (aka minidump). @@ -318,7 +338,7 @@ Thread::LocalStorageKey Isolate::thread_id_key_; Thread::LocalStorageKey Isolate::per_isolate_thread_data_key_; Mutex* Isolate::process_wide_mutex_ = OS::CreateMutex(); Isolate::ThreadDataTable* Isolate::thread_data_table_ = NULL; - +Atomic32 Isolate::isolate_counter_ = 0; Isolate::PerIsolateThreadData* Isolate::AllocatePerIsolateThreadData( ThreadId thread_id) { @@ -408,9 +428,9 @@ void Isolate::EnterDefaultIsolate() { } -Isolate* Isolate::GetDefaultIsolateForLocking() { +v8::Isolate* Isolate::GetDefaultIsolateForLocking() { EnsureDefaultIsolate(); - return default_isolate_; + return reinterpret_cast<v8::Isolate*>(default_isolate_); } @@ -426,11 +446,6 @@ char* Isolate::Iterate(ObjectVisitor* v, char* thread_storage) { } -void Isolate::IterateThread(ThreadVisitor* v) { - v->VisitThread(this, thread_local_top()); -} - - void Isolate::IterateThread(ThreadVisitor* v, char* t) { ThreadLocalTop* thread = reinterpret_cast<ThreadLocalTop*>(t); v->VisitThread(this, thread); @@ -526,11 +541,11 @@ Handle<String> Isolate::StackTraceString() { OS::PrintError( "If you are lucky you may find a partial stack dump on stdout.\n\n"); incomplete_message_->OutputToStdOut(); - return factory()->empty_symbol(); + return factory()->empty_string(); } else { OS::Abort(); // Unreachable - return factory()->empty_symbol(); + return factory()->empty_string(); } } @@ -541,22 +556,113 @@ void Isolate::PushStackTraceAndDie(unsigned int magic, unsigned int magic2) { const int kMaxStackTraceSize = 8192; Handle<String> trace = StackTraceString(); - char buffer[kMaxStackTraceSize]; + uint8_t buffer[kMaxStackTraceSize]; int length = Min(kMaxStackTraceSize - 1, trace->length()); String::WriteToFlat(*trace, buffer, 0, length); buffer[length] = '\0'; + // TODO(dcarney): convert buffer to utf8? OS::PrintError("Stacktrace (%x-%x) %p %p: %s\n", magic, magic2, static_cast<void*>(object), static_cast<void*>(map), - buffer); + reinterpret_cast<char*>(buffer)); OS::Abort(); } -void Isolate::CaptureAndSetCurrentStackTraceFor(Handle<JSObject> error_object) { +// Determines whether the given stack frame should be displayed in +// a stack trace. The caller is the error constructor that asked +// for the stack trace to be collected. The first time a construct +// call to this function is encountered it is skipped. The seen_caller +// in/out parameter is used to remember if the caller has been seen +// yet. +static bool IsVisibleInStackTrace(StackFrame* raw_frame, + Object* caller, + bool* seen_caller) { + // Only display JS frames. + if (!raw_frame->is_java_script()) return false; + JavaScriptFrame* frame = JavaScriptFrame::cast(raw_frame); + Object* raw_fun = frame->function(); + // Not sure when this can happen but skip it just in case. + if (!raw_fun->IsJSFunction()) return false; + if ((raw_fun == caller) && !(*seen_caller)) { + *seen_caller = true; + return false; + } + // Skip all frames until we've seen the caller. + if (!(*seen_caller)) return false; + // Also, skip non-visible built-in functions and any call with the builtins + // object as receiver, so as to not reveal either the builtins object or + // an internal function. + // The --builtins-in-stack-traces command line flag allows including + // internal call sites in the stack trace for debugging purposes. + if (!FLAG_builtins_in_stack_traces) { + JSFunction* fun = JSFunction::cast(raw_fun); + if (frame->receiver()->IsJSBuiltinsObject() || + (fun->IsBuiltin() && !fun->shared()->native())) { + return false; + } + } + return true; +} + + +Handle<JSArray> Isolate::CaptureSimpleStackTrace(Handle<JSObject> error_object, + Handle<Object> caller, + int limit) { + limit = Max(limit, 0); // Ensure that limit is not negative. + int initial_size = Min(limit, 10); + Handle<FixedArray> elements = + factory()->NewFixedArrayWithHoles(initial_size * 4); + + // If the caller parameter is a function we skip frames until we're + // under it before starting to collect. + bool seen_caller = !caller->IsJSFunction(); + int cursor = 0; + int frames_seen = 0; + for (StackFrameIterator iter(this); + !iter.done() && frames_seen < limit; + iter.Advance()) { + StackFrame* raw_frame = iter.frame(); + if (IsVisibleInStackTrace(raw_frame, *caller, &seen_caller)) { + frames_seen++; + JavaScriptFrame* frame = JavaScriptFrame::cast(raw_frame); + // Set initial size to the maximum inlining level + 1 for the outermost + // function. + List<FrameSummary> frames(Compiler::kMaxInliningLevels + 1); + frame->Summarize(&frames); + for (int i = frames.length() - 1; i >= 0; i--) { + if (cursor + 4 > elements->length()) { + int new_capacity = JSObject::NewElementsCapacity(elements->length()); + Handle<FixedArray> new_elements = + factory()->NewFixedArrayWithHoles(new_capacity); + for (int i = 0; i < cursor; i++) { + new_elements->set(i, elements->get(i)); + } + elements = new_elements; + } + ASSERT(cursor + 4 <= elements->length()); + + Handle<Object> recv = frames[i].receiver(); + Handle<JSFunction> fun = frames[i].function(); + Handle<Code> code = frames[i].code(); + Handle<Smi> offset(Smi::FromInt(frames[i].offset()), this); + elements->set(cursor++, *recv); + elements->set(cursor++, *fun); + elements->set(cursor++, *code); + elements->set(cursor++, *offset); + } + } + } + Handle<JSArray> result = factory()->NewJSArrayWithElements(elements); + result->set_length(Smi::FromInt(cursor)); + return result; +} + + +void Isolate::CaptureAndSetDetailedStackTrace(Handle<JSObject> error_object) { if (capture_stack_trace_for_uncaught_exceptions_) { // Capture stack trace for a detailed exception message. - Handle<String> key = factory()->hidden_stack_trace_symbol(); + Handle<String> key = factory()->hidden_stack_trace_string(); Handle<JSArray> stack_trace = CaptureCurrentStackTrace( stack_trace_for_uncaught_exceptions_frame_limit_, stack_trace_for_uncaught_exceptions_options_); @@ -571,17 +677,21 @@ Handle<JSArray> Isolate::CaptureCurrentStackTrace( int limit = Max(frame_limit, 0); Handle<JSArray> stack_trace = factory()->NewJSArray(frame_limit); - Handle<String> column_key = factory()->LookupAsciiSymbol("column"); - Handle<String> line_key = factory()->LookupAsciiSymbol("lineNumber"); - Handle<String> script_key = factory()->LookupAsciiSymbol("scriptName"); - Handle<String> name_or_source_url_key = - factory()->LookupAsciiSymbol("nameOrSourceURL"); + Handle<String> column_key = + factory()->InternalizeOneByteString(STATIC_ASCII_VECTOR("column")); + Handle<String> line_key = + factory()->InternalizeOneByteString(STATIC_ASCII_VECTOR("lineNumber")); + Handle<String> script_key = + factory()->InternalizeOneByteString(STATIC_ASCII_VECTOR("scriptName")); Handle<String> script_name_or_source_url_key = - factory()->LookupAsciiSymbol("scriptNameOrSourceURL"); - Handle<String> function_key = factory()->LookupAsciiSymbol("functionName"); - Handle<String> eval_key = factory()->LookupAsciiSymbol("isEval"); + factory()->InternalizeOneByteString( + STATIC_ASCII_VECTOR("scriptNameOrSourceURL")); + Handle<String> function_key = + factory()->InternalizeOneByteString(STATIC_ASCII_VECTOR("functionName")); + Handle<String> eval_key = + factory()->InternalizeOneByteString(STATIC_ASCII_VECTOR("isEval")); Handle<String> constructor_key = - factory()->LookupAsciiSymbol("isConstructor"); + factory()->InternalizeOneByteString(STATIC_ASCII_VECTOR("isConstructor")); StackTraceFrameIterator it(this); int frames_seen = 0; @@ -618,13 +728,13 @@ Handle<JSArray> Isolate::CaptureCurrentStackTrace( this, JSObject::SetLocalPropertyIgnoreAttributes( stack_frame, column_key, - Handle<Smi>(Smi::FromInt(column_offset + 1)), NONE)); + Handle<Smi>(Smi::FromInt(column_offset + 1), this), NONE)); } CHECK_NOT_EMPTY_HANDLE( this, JSObject::SetLocalPropertyIgnoreAttributes( stack_frame, line_key, - Handle<Smi>(Smi::FromInt(line_number + 1)), NONE)); + Handle<Smi>(Smi::FromInt(line_number + 1), this), NONE)); } if (options & StackTrace::kScriptName) { @@ -635,18 +745,7 @@ Handle<JSArray> Isolate::CaptureCurrentStackTrace( } if (options & StackTrace::kScriptNameOrSourceURL) { - Handle<Object> script_name(script->name(), this); - Handle<JSValue> script_wrapper = GetScriptWrapper(script); - Handle<Object> property = GetProperty(script_wrapper, - name_or_source_url_key); - ASSERT(property->IsJSFunction()); - Handle<JSFunction> method = Handle<JSFunction>::cast(property); - bool caught_exception; - Handle<Object> result = Execution::TryCall(method, script_wrapper, 0, - NULL, &caught_exception); - if (caught_exception) { - result = factory()->undefined_value(); - } + Handle<Object> result = GetScriptNameOrSourceURL(script); CHECK_NOT_EMPTY_HANDLE(this, JSObject::SetLocalPropertyIgnoreAttributes( stack_frame, script_name_or_source_url_key, @@ -655,7 +754,7 @@ Handle<JSArray> Isolate::CaptureCurrentStackTrace( if (options & StackTrace::kFunctionName) { Handle<Object> fun_name(fun->shared()->name(), this); - if (fun_name->ToBoolean()->IsFalse()) { + if (!fun_name->BooleanValue()) { fun_name = Handle<Object>(fun->shared()->inferred_name(), this); } CHECK_NOT_EMPTY_HANDLE(this, @@ -727,9 +826,10 @@ void Isolate::PrintStack() { } -static void PrintFrames(StringStream* accumulator, +static void PrintFrames(Isolate* isolate, + StringStream* accumulator, StackFrame::PrintMode mode) { - StackFrameIterator it; + StackFrameIterator it(isolate); for (int i = 0; !it.done(); it.Advance()) { it.frame()->Print(accumulator, mode, i++); } @@ -739,7 +839,7 @@ static void PrintFrames(StringStream* accumulator, void Isolate::PrintStack(StringStream* accumulator) { if (!IsInitialized()) { accumulator->Add( - "\n==== Stack trace is not available ==========================\n\n"); + "\n==== JS stack trace is not available =======================\n\n"); accumulator->Add( "\n==== Isolate for the thread is not initialized =============\n\n"); return; @@ -752,12 +852,12 @@ void Isolate::PrintStack(StringStream* accumulator) { if (c_entry_fp(thread_local_top()) == 0) return; accumulator->Add( - "\n==== Stack trace ============================================\n\n"); - PrintFrames(accumulator, StackFrame::OVERVIEW); + "\n==== JS stack trace =========================================\n\n"); + PrintFrames(this, accumulator, StackFrame::OVERVIEW); accumulator->Add( "\n==== Details ================================================\n\n"); - PrintFrames(accumulator, StackFrame::DETAILS); + PrintFrames(this, accumulator, StackFrame::DETAILS); accumulator->PrintMentionedObjectCache(); accumulator->Add("=====================\n\n"); @@ -783,9 +883,9 @@ void Isolate::ReportFailedAccessCheck(JSObject* receiver, v8::AccessType type) { constructor->shared()->get_api_func_data()->access_check_info(); if (data_obj == heap_.undefined_value()) return; - HandleScope scope; + HandleScope scope(this); Handle<JSObject> receiver_handle(receiver); - Handle<Object> data(AccessCheckInfo::cast(data_obj)->data()); + Handle<Object> data(AccessCheckInfo::cast(data_obj)->data(), this); { VMState state(this, EXTERNAL); thread_local_top()->failed_access_check_callback_( v8::Utils::ToLocal(receiver_handle), @@ -834,7 +934,7 @@ bool Isolate::MayNamedAccess(JSObject* receiver, Object* key, // Skip checks for hidden properties access. Note, we do not // require existence of a context in this case. - if (key == heap_.hidden_symbol()) return true; + if (key == heap_.hidden_string()) return true; // Check for compatibility between the security tokens in the // current lexical context and the accessed object. @@ -922,16 +1022,30 @@ const char* const Isolate::kStackOverflowMessage = Failure* Isolate::StackOverflow() { - HandleScope scope; - Handle<String> key = factory()->stack_overflow_symbol(); + HandleScope scope(this); + // At this point we cannot create an Error object using its javascript + // constructor. Instead, we copy the pre-constructed boilerplate and + // attach the stack trace as a hidden property. + Handle<String> key = factory()->stack_overflow_string(); Handle<JSObject> boilerplate = - Handle<JSObject>::cast(GetProperty(js_builtins_object(), key)); - Handle<Object> exception = Copy(boilerplate); - // TODO(1240995): To avoid having to call JavaScript code to compute - // the message for stack overflow exceptions which is very likely to - // double fault with another stack overflow exception, we use a - // precomputed message. + Handle<JSObject>::cast(GetProperty(this, js_builtins_object(), key)); + Handle<JSObject> exception = Copy(boilerplate); DoThrow(*exception, NULL); + + // Get stack trace limit. + Handle<Object> error = GetProperty(js_builtins_object(), "$Error"); + if (!error->IsJSObject()) return Failure::Exception(); + Handle<Object> stack_trace_limit = + GetProperty(Handle<JSObject>::cast(error), "stackTraceLimit"); + if (!stack_trace_limit->IsNumber()) return Failure::Exception(); + double dlimit = stack_trace_limit->Number(); + int limit = isnan(dlimit) ? 0 : static_cast<int>(dlimit); + + Handle<JSArray> stack_trace = CaptureSimpleStackTrace( + exception, factory()->undefined_value(), limit); + JSObject::SetHiddenProperty(exception, + factory()->hidden_stack_trace_string(), + stack_trace); return Failure::Exception(); } @@ -964,7 +1078,7 @@ Failure* Isolate::ReThrow(MaybeObject* exception) { Failure* Isolate::ThrowIllegalOperation() { - return Throw(heap_.illegal_access_symbol()); + return Throw(heap_.illegal_access_string()); } @@ -972,9 +1086,12 @@ void Isolate::ScheduleThrow(Object* exception) { // When scheduling a throw we first throw the exception to get the // error reporting if it is uncaught before rescheduling it. Throw(exception); - thread_local_top()->scheduled_exception_ = pending_exception(); - thread_local_top()->external_caught_exception_ = false; - clear_pending_exception(); + PropagatePendingExceptionToExternalTryCatch(); + if (has_pending_exception()) { + thread_local_top()->scheduled_exception_ = pending_exception(); + thread_local_top()->external_caught_exception_ = false; + clear_pending_exception(); + } } @@ -989,14 +1106,14 @@ Failure* Isolate::PromoteScheduledException() { void Isolate::PrintCurrentStackTrace(FILE* out) { StackTraceFrameIterator it(this); while (!it.done()) { - HandleScope scope; + HandleScope scope(this); // Find code position if recorded in relocation info. JavaScriptFrame* frame = it.frame(); int pos = frame->LookupCode()->SourcePosition(frame->pc()); - Handle<Object> pos_obj(Smi::FromInt(pos)); + Handle<Object> pos_obj(Smi::FromInt(pos), this); // Fetch function and receiver. Handle<JSFunction> fun(JSFunction::cast(frame->function())); - Handle<Object> recv(frame->receiver()); + Handle<Object> recv(frame->receiver(), this); // Advance to the next JavaScript frame and determine if the // current frame is the top-level frame. it.Advance(); @@ -1066,12 +1183,13 @@ bool Isolate::ShouldReportException(bool* can_be_caught_externally, bool Isolate::IsErrorObject(Handle<Object> obj) { if (!obj->IsJSObject()) return false; - String* error_key = *(factory()->LookupAsciiSymbol("$Error")); + String* error_key = + *(factory()->InternalizeOneByteString(STATIC_ASCII_VECTOR("$Error"))); Object* error_constructor = js_builtins_object()->GetPropertyNoExceptionThrown(error_key); for (Object* prototype = *obj; !prototype->IsNull(); - prototype = prototype->GetPrototype()) { + prototype = prototype->GetPrototype(this)) { if (!prototype->IsJSObject()) return false; if (JSObject::cast(prototype)->map()->constructor() == error_constructor) { return true; @@ -1084,8 +1202,8 @@ bool Isolate::IsErrorObject(Handle<Object> obj) { void Isolate::DoThrow(Object* exception, MessageLocation* location) { ASSERT(!has_pending_exception()); - HandleScope scope; - Handle<Object> exception_handle(exception); + HandleScope scope(this); + Handle<Object> exception_handle(exception, this); // Determine reporting and whether the exception is caught externally. bool catchable_by_javascript = is_catchable_by_javascript(exception); @@ -1122,7 +1240,7 @@ void Isolate::DoThrow(Object* exception, MessageLocation* location) { if (capture_stack_trace_for_uncaught_exceptions_) { if (IsErrorObject(exception_handle)) { // We fetch the stack trace that corresponds to this error object. - String* key = heap()->hidden_stack_trace_symbol(); + String* key = heap()->hidden_stack_trace_string(); Object* stack_property = JSObject::cast(*exception_handle)->GetHiddenProperty(key); // Property lookup may have failed. In this case it's probably not @@ -1138,10 +1256,23 @@ void Isolate::DoThrow(Object* exception, MessageLocation* location) { stack_trace_for_uncaught_exceptions_options_); } } + + Handle<Object> exception_arg = exception_handle; + // If the exception argument is a custom object, turn it into a string + // before throwing as uncaught exception. Note that the pending + // exception object to be set later must not be turned into a string. + if (exception_arg->IsJSObject() && !IsErrorObject(exception_arg)) { + bool failed = false; + exception_arg = Execution::ToDetailString(exception_arg, &failed); + if (failed) { + exception_arg = factory()->InternalizeOneByteString( + STATIC_ASCII_VECTOR("exception")); + } + } Handle<Object> message_obj = MessageHandler::MakeMessageObject( "uncaught_exception", location, - HandleVector<Object>(&exception_handle, 1), + HandleVector<Object>(&exception_arg, 1), stack_trace, stack_trace_object); thread_local_top()->pending_message_obj_ = *message_obj; @@ -1235,8 +1366,8 @@ void Isolate::ReportPendingMessages() { // the native context. Note: We have to mark the native context here // since the GenerateThrowOutOfMemory stub cannot make a RuntimeCall to // set it. - HandleScope scope; - if (thread_local_top_.pending_exception_ == Failure::OutOfMemoryException()) { + HandleScope scope(this); + if (thread_local_top_.pending_exception_->IsOutOfMemory()) { context()->mark_out_of_memory(); } else if (thread_local_top_.pending_exception_ == heap()->termination_exception()) { @@ -1246,8 +1377,9 @@ void Isolate::ReportPendingMessages() { if (thread_local_top_.has_pending_message_) { thread_local_top_.has_pending_message_ = false; if (!thread_local_top_.pending_message_obj_->IsTheHole()) { - HandleScope scope; - Handle<Object> message_obj(thread_local_top_.pending_message_obj_); + HandleScope scope(this); + Handle<Object> message_obj(thread_local_top_.pending_message_obj_, + this); if (thread_local_top_.pending_message_script_ != NULL) { Handle<Script> script(thread_local_top_.pending_message_script_); int start_pos = thread_local_top_.pending_message_start_pos_; @@ -1264,6 +1396,24 @@ void Isolate::ReportPendingMessages() { } +MessageLocation Isolate::GetMessageLocation() { + ASSERT(has_pending_exception()); + + if (!thread_local_top_.pending_exception_->IsOutOfMemory() && + thread_local_top_.pending_exception_ != heap()->termination_exception() && + thread_local_top_.has_pending_message_ && + !thread_local_top_.pending_message_obj_->IsTheHole() && + thread_local_top_.pending_message_script_ != NULL) { + Handle<Script> script(thread_local_top_.pending_message_script_); + int start_pos = thread_local_top_.pending_message_start_pos_; + int end_pos = thread_local_top_.pending_message_end_pos_; + return MessageLocation(script, start_pos, end_pos); + } + + return MessageLocation(); +} + + void Isolate::TraceException(bool flag) { FLAG_trace_exception = flag; // TODO(isolates): This is an unfortunate use. } @@ -1294,7 +1444,7 @@ bool Isolate::OptionalRescheduleException(bool is_bottom_call) { ASSERT(thread_local_top()->try_catch_handler_address() != NULL); Address external_handler_address = thread_local_top()->try_catch_handler_address(); - JavaScriptFrameIterator it; + JavaScriptFrameIterator it(this); if (it.done() || (it.frame()->sp() > external_handler_address)) { clear_exception = true; } @@ -1355,7 +1505,7 @@ Handle<Context> Isolate::global_context() { Handle<Context> Isolate::GetCallingNativeContext() { - JavaScriptFrameIterator it; + JavaScriptFrameIterator it(this); #ifdef ENABLE_DEBUGGER_SUPPORT if (debug_->InDebugger()) { while (!it.done()) { @@ -1415,6 +1565,14 @@ Isolate::ThreadDataTable::ThreadDataTable() } +Isolate::ThreadDataTable::~ThreadDataTable() { + // TODO(svenpanne) The assertion below would fire if an embedder does not + // cleanly dispose all Isolates before disposing v8, so we are conservative + // and leave it out for now. + // ASSERT_EQ(NULL, list_); +} + + Isolate::PerIsolateThreadData* Isolate::ThreadDataTable::Lookup(Isolate* isolate, ThreadId thread_id) { @@ -1463,7 +1621,8 @@ void Isolate::ThreadDataTable::RemoveAllThreads(Isolate* isolate) { #define TRACE_ISOLATE(tag) \ do { \ if (FLAG_trace_isolates) { \ - PrintF("Isolate %p " #tag "\n", reinterpret_cast<void*>(this)); \ + PrintF("Isolate %p (id %d)" #tag "\n", \ + reinterpret_cast<void*>(this), id()); \ } \ } while (false) #else @@ -1508,7 +1667,7 @@ Isolate::Isolate() free_list_(0), preallocated_storage_preallocated_(false), inner_pointer_to_code_cache_(NULL), - write_input_buffer_(NULL), + write_iterator_(NULL), global_handles_(NULL), context_switcher_(NULL), thread_manager_(NULL), @@ -1517,9 +1676,14 @@ Isolate::Isolate() string_tracker_(NULL), regexp_stack_(NULL), date_cache_(NULL), + code_stub_interface_descriptors_(NULL), context_exit_happened_(false), + cpu_profiler_(NULL), deferred_handles_head_(NULL), - optimizing_compiler_thread_(this) { + optimizing_compiler_thread_(this), + marking_thread_(NULL), + sweeper_thread_(NULL) { + id_ = NoBarrier_AtomicIncrement(&isolate_counter_, 1); TRACE_ISOLATE(constructor); memset(isolate_addresses_, 0, @@ -1545,6 +1709,9 @@ Isolate::Isolate() memset(&js_spill_information_, 0, sizeof(js_spill_information_)); memset(code_kind_statistics_, 0, sizeof(code_kind_statistics_[0]) * Code::NUMBER_OF_KINDS); + + allow_compiler_thread_handle_deref_ = true; + allow_execution_thread_handle_deref_ = true; #endif #ifdef ENABLE_DEBUGGER_SUPPORT @@ -1565,6 +1732,7 @@ Isolate::Isolate() #undef ISOLATE_INIT_ARRAY_EXECUTE } + void Isolate::TearDown() { TRACE_ISOLATE(tear_down); @@ -1596,13 +1764,34 @@ void Isolate::TearDown() { } +void Isolate::GlobalTearDown() { + delete thread_data_table_; +} + + void Isolate::Deinit() { if (state_ == INITIALIZED) { TRACE_ISOLATE(deinit); if (FLAG_parallel_recompilation) optimizing_compiler_thread_.Stop(); - if (FLAG_hydrogen_stats) HStatistics::Instance()->Print(); + if (FLAG_sweeper_threads > 0) { + for (int i = 0; i < FLAG_sweeper_threads; i++) { + sweeper_thread_[i]->Stop(); + delete sweeper_thread_[i]; + } + delete[] sweeper_thread_; + } + + if (FLAG_marking_threads > 0) { + for (int i = 0; i < FLAG_marking_threads; i++) { + marking_thread_[i]->Stop(); + delete marking_thread_[i]; + } + delete[] marking_thread_; + } + + if (FLAG_hydrogen_stats) GetHStatistics()->Print(); // We must stop the logger before we tear down other components. logger_->EnsureTickerStopped(); @@ -1610,7 +1799,7 @@ void Isolate::Deinit() { delete deoptimizer_data_; deoptimizer_data_ = NULL; if (FLAG_preemption) { - v8::Locker locker; + v8::Locker locker(reinterpret_cast<v8::Isolate*>(this)); v8::Locker::StopPreemption(); } builtins_.TearDown(); @@ -1622,7 +1811,9 @@ void Isolate::Deinit() { PreallocatedMemoryThreadStop(); HeapProfiler::TearDown(); - CpuProfiler::TearDown(); + delete cpu_profiler_; + cpu_profiler_ = NULL; + if (runtime_profiler_ != NULL) { runtime_profiler_->TearDown(); delete runtime_profiler_; @@ -1679,6 +1870,9 @@ Isolate::~Isolate() { delete date_cache_; date_cache_ = NULL; + delete[] code_stub_interface_descriptors_; + code_stub_interface_descriptors_ = NULL; + delete regexp_stack_; regexp_stack_ = NULL; @@ -1715,8 +1909,8 @@ Isolate::~Isolate() { bootstrapper_ = NULL; delete inner_pointer_to_code_cache_; inner_pointer_to_code_cache_ = NULL; - delete write_input_buffer_; - write_input_buffer_ = NULL; + delete write_iterator_; + write_iterator_ = NULL; delete context_switcher_; context_switcher_ = NULL; @@ -1759,7 +1953,7 @@ void Isolate::PropagatePendingExceptionToExternalTryCatch() { if (!external_caught) return; - if (thread_local_top_.pending_exception_ == Failure::OutOfMemoryException()) { + if (thread_local_top_.pending_exception_->IsOutOfMemory()) { // Do not propagate OOM exception: we should kill VM asap. } else if (thread_local_top_.pending_exception_ == heap()->termination_exception()) { @@ -1780,7 +1974,7 @@ void Isolate::PropagatePendingExceptionToExternalTryCatch() { void Isolate::InitializeLoggingAndCounters() { if (logger_ == NULL) { - logger_ = new Logger; + logger_ = new Logger(this); } if (counters_ == NULL) { counters_ = new Counters; @@ -1834,19 +2028,21 @@ bool Isolate::Init(Deserializer* des) { descriptor_lookup_cache_ = new DescriptorLookupCache(); unicode_cache_ = new UnicodeCache(); inner_pointer_to_code_cache_ = new InnerPointerToCodeCache(this); - write_input_buffer_ = new StringInputBuffer(); + write_iterator_ = new ConsStringIteratorOp(); global_handles_ = new GlobalHandles(this); - bootstrapper_ = new Bootstrapper(); + bootstrapper_ = new Bootstrapper(this); handle_scope_implementer_ = new HandleScopeImplementer(this); stub_cache_ = new StubCache(this, runtime_zone()); regexp_stack_ = new RegExpStack(); regexp_stack_->isolate_ = this; date_cache_ = new DateCache(); + code_stub_interface_descriptors_ = + new CodeStubInterfaceDescriptor[CodeStub::NUMBER_OF_IDS]; // Enable logging before setting up the heap logger_->SetUp(); - CpuProfiler::SetUp(); + cpu_profiler_ = new CpuProfiler(this); HeapProfiler::SetUp(); // Initialize other runtime facilities @@ -1865,10 +2061,17 @@ bool Isolate::Init(Deserializer* des) { } // SetUp the object heap. - const bool create_heap_objects = (des == NULL); ASSERT(!heap_.HasBeenSetUp()); - if (!heap_.SetUp(create_heap_objects)) { - V8::SetFatalError(); + if (!heap_.SetUp()) { + V8::FatalProcessOutOfMemory("heap setup"); + return false; + } + + deoptimizer_data_ = new DeoptimizerData(memory_allocator_); + + const bool create_heap_objects = (des == NULL); + if (create_heap_objects && !heap_.CreateHeapObjects()) { + V8::FatalProcessOutOfMemory("heap object creation"); return false; } @@ -1894,7 +2097,7 @@ bool Isolate::Init(Deserializer* des) { } if (FLAG_preemption) { - v8::Locker locker; + v8::Locker locker(reinterpret_cast<v8::Isolate*>(this)); v8::Locker::StartPreemption(100); } @@ -1920,15 +2123,14 @@ bool Isolate::Init(Deserializer* des) { // Quiet the heap NaN if needed on target platform. if (!create_heap_objects) Assembler::QuietNaN(heap_.nan_value()); - deoptimizer_data_ = new DeoptimizerData; runtime_profiler_ = new RuntimeProfiler(this); runtime_profiler_->SetUp(); // If we are deserializing, log non-function code objects and compiled // functions found in the snapshot. - if (create_heap_objects && + if (!create_heap_objects && (FLAG_log_code || FLAG_ll_prof || logger_->is_logging_code_events())) { - HandleScope scope; + HandleScope scope(this); LOG(this, LogCodeObjects()); LOG(this, LogCompiledFunctions()); } @@ -1942,7 +2144,70 @@ bool Isolate::Init(Deserializer* des) { state_ = INITIALIZED; time_millis_at_init_ = OS::TimeCurrentMillis(); + + if (!create_heap_objects) { + // Now that the heap is consistent, it's OK to generate the code for the + // deopt entry table that might have been referred to by optimized code in + // the snapshot. + HandleScope scope(this); + Deoptimizer::EnsureCodeForDeoptimizationEntry( + this, + Deoptimizer::LAZY, + kDeoptTableSerializeEntryCount - 1); + } + + if (!Serializer::enabled()) { + // Ensure that all stubs which need to be generated ahead of time, but + // cannot be serialized into the snapshot have been generated. + HandleScope scope(this); + StoreBufferOverflowStub::GenerateFixedRegStubsAheadOfTime(this); + CodeStub::GenerateFPStubs(this); + StubFailureTrampolineStub::GenerateAheadOfTime(this); + } + if (FLAG_parallel_recompilation) optimizing_compiler_thread_.Start(); + + if (FLAG_parallel_marking && FLAG_marking_threads == 0) { + FLAG_marking_threads = SystemThreadManager:: + NumberOfParallelSystemThreads( + SystemThreadManager::PARALLEL_MARKING); + } + if (FLAG_marking_threads > 0) { + marking_thread_ = new MarkingThread*[FLAG_marking_threads]; + for (int i = 0; i < FLAG_marking_threads; i++) { + marking_thread_[i] = new MarkingThread(this); + marking_thread_[i]->Start(); + } + } else { + FLAG_parallel_marking = false; + } + + if (FLAG_sweeper_threads == 0) { + if (FLAG_concurrent_sweeping) { + FLAG_sweeper_threads = SystemThreadManager:: + NumberOfParallelSystemThreads( + SystemThreadManager::CONCURRENT_SWEEPING); + } else if (FLAG_parallel_sweeping) { + FLAG_sweeper_threads = SystemThreadManager:: + NumberOfParallelSystemThreads( + SystemThreadManager::PARALLEL_SWEEPING); + } + } + if (FLAG_sweeper_threads > 0) { + sweeper_thread_ = new SweeperThread*[FLAG_sweeper_threads]; + for (int i = 0; i < FLAG_sweeper_threads; i++) { + sweeper_thread_[i] = new SweeperThread(this); + sweeper_thread_[i]->Start(); + } + } else { + FLAG_concurrent_sweeping = false; + FLAG_parallel_sweeping = false; + } + if (FLAG_parallel_recompilation && + SystemThreadManager::NumberOfParallelSystemThreads( + SystemThreadManager::PARALLEL_RECOMPILATION) == 0) { + FLAG_parallel_recompilation = false; + } return true; } @@ -2057,6 +2322,51 @@ void Isolate::UnlinkDeferredHandles(DeferredHandles* deferred) { #ifdef DEBUG +bool Isolate::AllowHandleDereference() { + if (allow_execution_thread_handle_deref_ && + allow_compiler_thread_handle_deref_) { + // Short-cut to avoid polling thread id. + return true; + } + if (FLAG_parallel_recompilation && + optimizing_compiler_thread()->IsOptimizerThread()) { + return allow_compiler_thread_handle_deref_; + } else { + return allow_execution_thread_handle_deref_; + } +} + + +void Isolate::SetAllowHandleDereference(bool allow) { + if (FLAG_parallel_recompilation && + optimizing_compiler_thread()->IsOptimizerThread()) { + allow_compiler_thread_handle_deref_ = allow; + } else { + allow_execution_thread_handle_deref_ = allow; + } +} +#endif + + +HStatistics* Isolate::GetHStatistics() { + if (hstatistics() == NULL) set_hstatistics(new HStatistics()); + return hstatistics(); +} + + +HTracer* Isolate::GetHTracer() { + if (htracer() == NULL) set_htracer(new HTracer(id())); + return htracer(); +} + + +CodeStubInterfaceDescriptor* + Isolate::code_stub_interface_descriptor(int index) { + return code_stub_interface_descriptors_ + index; +} + + +#ifdef DEBUG #define ISOLATE_FIELD_OFFSET(type, name, ignored) \ const intptr_t Isolate::name##_debug_offset_ = OFFSET_OF(Isolate, name##_); ISOLATE_INIT_LIST(ISOLATE_FIELD_OFFSET) diff --git a/deps/v8/src/isolate.h b/deps/v8/src/isolate.h index b90191d0eb..ad0b260bca 100644 --- a/deps/v8/src/isolate.h +++ b/deps/v8/src/isolate.h @@ -53,6 +53,7 @@ namespace internal { class Bootstrapper; class CodeGenerator; class CodeRange; +struct CodeStubInterfaceDescriptor; class CompilationCache; class ContextSlotCache; class ContextSwitcher; @@ -67,16 +68,20 @@ class Factory; class FunctionInfoListener; class HandleScopeImplementer; class HeapProfiler; +class HStatistics; +class HTracer; class InlineRuntimeFunctionsTable; class NoAllocationStringAllocator; class InnerPointerToCodeCache; +class MarkingThread; class PreallocatedMemoryThread; class RegExpStack; class SaveContext; class UnicodeCache; -class StringInputBuffer; +class ConsStringIteratorOp; class StringTracker; class StubCache; +class SweeperThread; class ThreadManager; class ThreadState; class ThreadVisitor; // Defined in v8threads.h @@ -284,6 +289,21 @@ class ThreadLocalTop BASE_EMBEDDED { }; +class SystemThreadManager { + public: + enum ParallelSystemComponent { + PARALLEL_SWEEPING, + CONCURRENT_SWEEPING, + PARALLEL_MARKING, + PARALLEL_RECOMPILATION + }; + + static int NumberOfParallelSystemThreads(ParallelSystemComponent type); + + static const int kMaxThreads = 4; +}; + + #ifdef ENABLE_DEBUGGER_SUPPORT #define ISOLATE_DEBUGGER_INIT_LIST(V) \ @@ -337,9 +357,6 @@ typedef List<HeapObject*, PreallocatedStorageAllocationPolicy> DebugObjectCache; V(FunctionInfoListener*, active_function_info_listener, NULL) \ /* State for Relocatable. */ \ V(Relocatable*, relocatable_top, NULL) \ - /* State for CodeEntry in profile-generator. */ \ - V(CodeGenerator*, current_code_generator, NULL) \ - V(bool, jump_target_compiling_deferred_code, false) \ V(DebugObjectCache*, string_stream_debug_object_cache, NULL) \ V(Object*, string_stream_current_security_token, NULL) \ /* TODO(isolates): Release this on destruction? */ \ @@ -351,9 +368,10 @@ typedef List<HeapObject*, PreallocatedStorageAllocationPolicy> DebugObjectCache; V(unsigned, ast_node_count, 0) \ /* SafeStackFrameIterator activations count. */ \ V(int, safe_stack_iterator_counter, 0) \ - V(uint64_t, enabled_cpu_features, 0) \ - V(CpuProfiler*, cpu_profiler, NULL) \ V(HeapProfiler*, heap_profiler, NULL) \ + V(bool, observer_delivery_pending, false) \ + V(HStatistics*, hstatistics, NULL) \ + V(HTracer*, htracer, NULL) \ ISOLATE_DEBUGGER_INIT_LIST(V) class Isolate { @@ -466,6 +484,8 @@ class Isolate { // for legacy API reasons. void TearDown(); + static void GlobalTearDown(); + bool IsDefaultIsolate() const { return this == default_isolate_; } // Ensures that process-wide resources and the default isolate have been @@ -529,11 +549,6 @@ class Isolate { thread_local_top_.save_context_ = save; } - // Access to the map of "new Object()". - Map* empty_object_map() { - return context()->native_context()->object_function()->map(); - } - // Access to current thread id. ThreadId thread_id() { return thread_local_top_.thread_id_; } void set_thread_id(ThreadId id) { thread_local_top_.thread_id_ = id; } @@ -613,7 +628,7 @@ class Isolate { bool IsExternallyCaught(); bool is_catchable_by_javascript(MaybeObject* exception) { - return (exception != Failure::OutOfMemoryException()) && + return (!exception->IsOutOfMemory()) && (exception != heap()->termination_exception()); } @@ -672,7 +687,8 @@ class Isolate { // Scope currently can only be used for regular exceptions, not // failures like OOM or termination exception. isolate_(isolate), - pending_exception_(isolate_->pending_exception()->ToObjectUnchecked()), + pending_exception_(isolate_->pending_exception()->ToObjectUnchecked(), + isolate_), catcher_(isolate_->catcher()) { } @@ -715,7 +731,10 @@ class Isolate { int frame_limit, StackTrace::StackTraceOptions options); - void CaptureAndSetCurrentStackTraceFor(Handle<JSObject> error_object); + Handle<JSArray> CaptureSimpleStackTrace(Handle<JSObject> error_object, + Handle<Object> caller, + int limit); + void CaptureAndSetDetailedStackTrace(Handle<JSObject> error_object); // Returns if the top context may access the given global object. If // the result is false, the pending exception is guaranteed to be @@ -739,6 +758,8 @@ class Isolate { Failure* ReThrow(MaybeObject* exception); void ScheduleThrow(Object* exception); void ReportPendingMessages(); + // Return pending location if any or unfilled structure. + MessageLocation GetMessageLocation(); Failure* ThrowIllegalOperation(); // Promote a scheduled exception to pending. Asserts has_scheduled_exception. @@ -764,7 +785,6 @@ class Isolate { void Iterate(ObjectVisitor* v); void Iterate(ObjectVisitor* v, ThreadLocalTop* t); char* Iterate(ObjectVisitor* v, char* t); - void IterateThread(ThreadVisitor* v); void IterateThread(ThreadVisitor* v, char* t); @@ -808,9 +828,9 @@ class Isolate { ISOLATE_INIT_ARRAY_LIST(GLOBAL_ARRAY_ACCESSOR) #undef GLOBAL_ARRAY_ACCESSOR -#define NATIVE_CONTEXT_FIELD_ACCESSOR(index, type, name) \ - Handle<type> name() { \ - return Handle<type>(context()->native_context()->name()); \ +#define NATIVE_CONTEXT_FIELD_ACCESSOR(index, type, name) \ + Handle<type> name() { \ + return Handle<type>(context()->native_context()->name(), this); \ } NATIVE_CONTEXT_FIELDS(NATIVE_CONTEXT_FIELD_ACCESSOR) #undef NATIVE_CONTEXT_FIELD_ACCESSOR @@ -875,7 +895,7 @@ class Isolate { return inner_pointer_to_code_cache_; } - StringInputBuffer* write_input_buffer() { return write_input_buffer_; } + ConsStringIteratorOp* write_iterator() { return write_iterator_; } GlobalHandles* global_handles() { return global_handles_; } @@ -897,16 +917,16 @@ class Isolate { return &jsregexp_canonrange_; } - StringInputBuffer* objects_string_compare_buffer_a() { - return &objects_string_compare_buffer_a_; + ConsStringIteratorOp* objects_string_compare_iterator_a() { + return &objects_string_compare_iterator_a_; } - StringInputBuffer* objects_string_compare_buffer_b() { - return &objects_string_compare_buffer_b_; + ConsStringIteratorOp* objects_string_compare_iterator_b() { + return &objects_string_compare_iterator_b_; } - StaticResource<StringInputBuffer>* objects_string_input_buffer() { - return &objects_string_input_buffer_; + StaticResource<ConsStringIteratorOp>* objects_string_iterator() { + return &objects_string_iterator_; } RuntimeState* runtime_state() { return &runtime_state_; } @@ -917,10 +937,6 @@ class Isolate { bool fp_stubs_generated() { return fp_stubs_generated_; } - StaticResource<SafeStringInputBuffer>* compiler_safe_string_input_buffer() { - return &compiler_safe_string_input_buffer_; - } - Builtins* builtins() { return &builtins_; } void NotifyExtensionInstalled() { @@ -959,6 +975,8 @@ class Isolate { inline bool IsDebuggerActive(); inline bool DebuggerHasBreakPoints(); + CpuProfiler* cpu_profiler() const { return cpu_profiler_; } + #ifdef DEBUG HistogramInfo* heap_histograms() { return heap_histograms_; } @@ -967,6 +985,10 @@ class Isolate { } int* code_kind_statistics() { return code_kind_statistics_; } + + bool AllowHandleDereference(); + + void SetAllowHandleDereference(bool allow); #endif #if defined(V8_TARGET_ARCH_ARM) && !defined(__arm__) || \ @@ -1014,7 +1036,6 @@ class Isolate { RuntimeProfiler::IsolateEnteredJS(this); } else if (current_state == JS && state != JS) { // JS -> non-JS transition. - ASSERT(RuntimeProfiler::IsSomeIsolateInJS()); RuntimeProfiler::IsolateExitedJS(this); } else { // Other types of state transitions are not interesting to the @@ -1058,6 +1079,9 @@ class Isolate { date_cache_ = date_cache; } + CodeStubInterfaceDescriptor* + code_stub_interface_descriptor(int index); + void IterateDeferredHandles(ObjectVisitor* visitor); void LinkDeferredHandles(DeferredHandles* deferred_handles); void UnlinkDeferredHandles(DeferredHandles* deferred_handles); @@ -1066,9 +1090,27 @@ class Isolate { return &optimizing_compiler_thread_; } + // PreInits and returns a default isolate. Needed when a new thread tries + // to create a Locker for the first time (the lock itself is in the isolate). + // TODO(svenpanne) This method is on death row... + static v8::Isolate* GetDefaultIsolateForLocking(); + + MarkingThread** marking_threads() { + return marking_thread_; + } + + SweeperThread** sweeper_threads() { + return sweeper_thread_; + } + + HStatistics* GetHStatistics(); + HTracer* GetHTracer(); + private: Isolate(); + int id() const { return static_cast<int>(id_); } + friend struct GlobalState; friend struct InitializeGlobalState; @@ -1136,6 +1178,9 @@ class Isolate { static Isolate* default_isolate_; static ThreadDataTable* thread_data_table_; + // A global counter for all generated Isolates, might overflow. + static Atomic32 isolate_counter_; + void Deinit(); static void SetIsolateThreadLocals(Isolate* isolate, @@ -1149,10 +1194,6 @@ class Isolate { // If one does not yet exist, allocate a new one. PerIsolateThreadData* FindOrAllocatePerThreadDataForThisThread(); - // PreInits and returns a default isolate. Needed when a new thread tries - // to create a Locker for the first time (the lock itself is in the isolate). - static Isolate* GetDefaultIsolateForLocking(); - // Initializes the current thread to run this Isolate. // Not thread-safe. Multiple threads should not Enter/Exit the same isolate // at the same time, this should be prevented using external locking. @@ -1184,6 +1225,7 @@ class Isolate { // the Error object. bool IsErrorObject(Handle<Object> obj); + Atomic32 id_; EntryStackItem* entry_stack_; int stack_trace_nesting_level_; StringStream* incomplete_message_; @@ -1221,26 +1263,26 @@ class Isolate { PreallocatedStorage free_list_; bool preallocated_storage_preallocated_; InnerPointerToCodeCache* inner_pointer_to_code_cache_; - StringInputBuffer* write_input_buffer_; + ConsStringIteratorOp* write_iterator_; GlobalHandles* global_handles_; ContextSwitcher* context_switcher_; ThreadManager* thread_manager_; RuntimeState runtime_state_; bool fp_stubs_generated_; - StaticResource<SafeStringInputBuffer> compiler_safe_string_input_buffer_; Builtins builtins_; bool has_installed_extensions_; StringTracker* string_tracker_; unibrow::Mapping<unibrow::Ecma262UnCanonicalize> jsregexp_uncanonicalize_; unibrow::Mapping<unibrow::CanonicalizationRange> jsregexp_canonrange_; - StringInputBuffer objects_string_compare_buffer_a_; - StringInputBuffer objects_string_compare_buffer_b_; - StaticResource<StringInputBuffer> objects_string_input_buffer_; + ConsStringIteratorOp objects_string_compare_iterator_a_; + ConsStringIteratorOp objects_string_compare_iterator_b_; + StaticResource<ConsStringIteratorOp> objects_string_iterator_; unibrow::Mapping<unibrow::Ecma262Canonicalize> regexp_macro_assembler_canonicalize_; RegExpStack* regexp_stack_; DateCache* date_cache_; unibrow::Mapping<unibrow::Ecma262Canonicalize> interp_canonicalize_mapping_; + CodeStubInterfaceDescriptor* code_stub_interface_descriptors_; // The garbage collector should be a little more aggressive when it knows // that a context was recently exited. @@ -1261,12 +1303,16 @@ class Isolate { HistogramInfo heap_histograms_[LAST_TYPE + 1]; JSObject::SpillInformation js_spill_information_; int code_kind_statistics_[Code::NUMBER_OF_KINDS]; + + bool allow_compiler_thread_handle_deref_; + bool allow_execution_thread_handle_deref_; #endif #ifdef ENABLE_DEBUGGER_SUPPORT Debugger* debugger_; Debug* debug_; #endif + CpuProfiler* cpu_profiler_; #define GLOBAL_BACKING_STORE(type, name, initialvalue) \ type name##_; @@ -1291,16 +1337,21 @@ class Isolate { DeferredHandles* deferred_handles_head_; OptimizingCompilerThread optimizing_compiler_thread_; + MarkingThread** marking_thread_; + SweeperThread** sweeper_thread_; friend class ExecutionAccess; friend class HandleScopeImplementer; friend class IsolateInitializer; + friend class MarkingThread; friend class OptimizingCompilerThread; + friend class SweeperThread; friend class ThreadManager; friend class Simulator; friend class StackGuard; friend class ThreadId; friend class TestMemoryAllocatorScope; + friend class TestCodeRangeScope; friend class v8::Isolate; friend class v8::Locker; friend class v8::Unlocker; @@ -1394,12 +1445,7 @@ class StackLimitCheck BASE_EMBEDDED { bool HasOverflowed() const { StackGuard* stack_guard = isolate_->stack_guard(); - // Stack has overflowed in C++ code only if stack pointer exceeds the C++ - // stack guard and the limits are not set to interrupt values. - // TODO(214): Stack overflows are ignored if a interrupt is pending. This - // code should probably always use the initial C++ limit. - return (reinterpret_cast<uintptr_t>(this) < stack_guard->climit()) && - stack_guard->IsStackOverflow(); + return (reinterpret_cast<uintptr_t>(this) < stack_guard->real_climit()); } private: Isolate* isolate_; diff --git a/deps/v8/src/json-parser.h b/deps/v8/src/json-parser.h index 03ed22d70e..28ef8b33c8 100644 --- a/deps/v8/src/json-parser.h +++ b/deps/v8/src/json-parser.h @@ -58,7 +58,7 @@ class JsonParser BASE_EMBEDDED { if (position_ >= source_length_) { c0_ = kEndOfString; } else if (seq_ascii) { - c0_ = seq_source_->SeqAsciiStringGet(position_); + c0_ = seq_source_->SeqOneByteStringGet(position_); } else { c0_ = source_->Get(position_); } @@ -102,10 +102,10 @@ class JsonParser BASE_EMBEDDED { Handle<String> ParseJsonString() { return ScanJsonString<false>(); } - Handle<String> ParseJsonSymbol() { + Handle<String> ParseJsonInternalizedString() { return ScanJsonString<true>(); } - template <bool is_symbol> + template <bool is_internalized> Handle<String> ScanJsonString(); // Creates a new string and copies prefix[start..end] into the beginning // of it. Then scans the rest of the string, adding characters after the @@ -154,13 +154,15 @@ class JsonParser BASE_EMBEDDED { inline Zone* zone() const { return zone_; } static const int kInitialSpecialStringLength = 1024; + static const int kPretenureTreshold = 100 * 1024; private: Handle<String> source_; int source_length_; - Handle<SeqAsciiString> seq_source_; + Handle<SeqOneByteString> seq_source_; + PretenureFlag pretenure_; Isolate* isolate_; Factory* factory_; Handle<JSFunction> object_constructor_; @@ -174,16 +176,17 @@ Handle<Object> JsonParser<seq_ascii>::ParseJson(Handle<String> source, Zone* zone) { isolate_ = source->map()->GetHeap()->isolate(); factory_ = isolate_->factory(); - object_constructor_ = - Handle<JSFunction>(isolate()->native_context()->object_function()); + object_constructor_ = Handle<JSFunction>( + isolate()->native_context()->object_function(), isolate()); zone_ = zone; FlattenString(source); source_ = source; source_length_ = source_->length(); + pretenure_ = (source_length_ >= kPretenureTreshold) ? TENURED : NOT_TENURED; // Optimized fast case where we only have ASCII characters. if (seq_ascii) { - seq_source_ = Handle<SeqAsciiString>::cast(source_); + seq_source_ = Handle<SeqOneByteString>::cast(source_); } // Set initial position right before the string. @@ -225,7 +228,8 @@ Handle<Object> JsonParser<seq_ascii>::ParseJson(Handle<String> source, break; default: message = "unexpected_token"; - Handle<Object> name = LookupSingleCharacterStringFromCode(c0_); + Handle<Object> name = + LookupSingleCharacterStringFromCode(isolate_, c0_); Handle<FixedArray> element = factory->NewFixedArray(1); element->set(0, *name); array = factory->NewJSArrayWithElements(element); @@ -287,9 +291,8 @@ Handle<Object> JsonParser<seq_ascii>::ParseJsonValue() { // Parse a JSON object. Position must be right at '{'. template <bool seq_ascii> Handle<Object> JsonParser<seq_ascii>::ParseJsonObject() { - Handle<Object> prototype; Handle<JSObject> json_object = - factory()->NewJSObject(object_constructor()); + factory()->NewJSObject(object_constructor(), pretenure_); ASSERT_EQ(c0_, '{'); AdvanceSkipWhitespace(); @@ -335,29 +338,24 @@ Handle<Object> JsonParser<seq_ascii>::ParseJsonObject() { c0_ = '"'; #endif - Handle<String> key = ParseJsonSymbol(); + Handle<String> key = ParseJsonInternalizedString(); if (key.is_null() || c0_ != ':') return ReportUnexpectedCharacter(); AdvanceSkipWhitespace(); Handle<Object> value = ParseJsonValue(); if (value.is_null()) return ReportUnexpectedCharacter(); - if (key->Equals(isolate()->heap()->Proto_symbol())) { - prototype = value; + if (JSObject::TryTransitionToField(json_object, key)) { + int index = json_object->LastAddedFieldIndex(); + json_object->FastPropertyAtPut(index, *value); } else { - if (JSObject::TryTransitionToField(json_object, key)) { - int index = json_object->LastAddedFieldIndex(); - json_object->FastPropertyAtPut(index, *value); - } else { - JSObject::SetLocalPropertyIgnoreAttributes( - json_object, key, value, NONE); - } + JSObject::SetLocalPropertyIgnoreAttributes( + json_object, key, value, NONE); } } while (MatchSkipWhiteSpace(',')); if (c0_ != '}') { return ReportUnexpectedCharacter(); } - if (!prototype.is_null()) SetPrototype(json_object, prototype); } AdvanceSkipWhitespace(); return json_object; @@ -384,11 +382,12 @@ Handle<Object> JsonParser<seq_ascii>::ParseJsonArray() { AdvanceSkipWhitespace(); // Allocate a fixed array with all the elements. Handle<FixedArray> fast_elements = - factory()->NewFixedArray(elements.length()); + factory()->NewFixedArray(elements.length(), pretenure_); for (int i = 0, n = elements.length(); i < n; i++) { fast_elements->set(i, *elements[i]); } - return factory()->NewJSArrayWithElements(fast_elements); + return factory()->NewJSArrayWithElements( + fast_elements, FAST_ELEMENTS, pretenure_); } @@ -437,25 +436,25 @@ Handle<Object> JsonParser<seq_ascii>::ParseJsonNumber() { int length = position_ - beg_pos; double number; if (seq_ascii) { - Vector<const char> chars(seq_source_->GetChars() + beg_pos, length); + Vector<const uint8_t> chars(seq_source_->GetChars() + beg_pos, length); number = StringToDouble(isolate()->unicode_cache(), - chars, + Vector<const char>::cast(chars), NO_FLAGS, // Hex, octal or trailing junk. OS::nan_value()); } else { - Vector<char> buffer = Vector<char>::New(length); + Vector<uint8_t> buffer = Vector<uint8_t>::New(length); String::WriteToFlat(*source_, buffer.start(), beg_pos, position_); - Vector<const char> result = - Vector<const char>(reinterpret_cast<const char*>(buffer.start()), - length); + Vector<const uint8_t> result = + Vector<const uint8_t>(buffer.start(), length); number = StringToDouble(isolate()->unicode_cache(), - result, - NO_FLAGS, // Hex, octal or trailing junk. - 0.0); + // TODO(dcarney): Convert StringToDouble to uint_t. + Vector<const char>::cast(result), + NO_FLAGS, // Hex, octal or trailing junk. + 0.0); buffer.Dispose(); } SkipWhitespace(); - return factory()->NewNumber(number); + return factory()->NewNumber(number, pretenure_); } @@ -468,21 +467,27 @@ inline void SeqStringSet(Handle<SeqTwoByteString> seq_str, int i, uc32 c) { } template <> -inline void SeqStringSet(Handle<SeqAsciiString> seq_str, int i, uc32 c) { - seq_str->SeqAsciiStringSet(i, c); +inline void SeqStringSet(Handle<SeqOneByteString> seq_str, int i, uc32 c) { + seq_str->SeqOneByteStringSet(i, c); } template <typename StringType> -inline Handle<StringType> NewRawString(Factory* factory, int length); +inline Handle<StringType> NewRawString(Factory* factory, + int length, + PretenureFlag pretenure); template <> -inline Handle<SeqTwoByteString> NewRawString(Factory* factory, int length) { - return factory->NewRawTwoByteString(length, NOT_TENURED); +inline Handle<SeqTwoByteString> NewRawString(Factory* factory, + int length, + PretenureFlag pretenure) { + return factory->NewRawTwoByteString(length, pretenure); } template <> -inline Handle<SeqAsciiString> NewRawString(Factory* factory, int length) { - return factory->NewRawAsciiString(length, NOT_TENURED); +inline Handle<SeqOneByteString> NewRawString(Factory* factory, + int length, + PretenureFlag pretenure) { + return factory->NewRawOneByteString(length, pretenure); } @@ -496,7 +501,8 @@ Handle<String> JsonParser<seq_ascii>::SlowScanJsonString( int count = end - start; int max_length = count + source_length_ - position_; int length = Min(max_length, Max(kInitialSpecialStringLength, 2 * count)); - Handle<StringType> seq_str = NewRawString<StringType>(factory(), length); + Handle<StringType> seq_str = + NewRawString<StringType>(factory(), length, pretenure_); // Copy prefix into seq_str. SinkChar* dest = seq_str->GetChars(); String::WriteToFlat(*prefix, dest, start, end); @@ -515,11 +521,11 @@ Handle<String> JsonParser<seq_ascii>::SlowScanJsonString( // in the ASCII sink. if (sizeof(SinkChar) == kUC16Size || seq_ascii || - c0_ <= kMaxAsciiCharCode) { + c0_ <= String::kMaxOneByteCharCode) { SeqStringSet(seq_str, count++, c0_); Advance(); } else { - // StringType is SeqAsciiString and we just read a non-ASCII char. + // StringType is SeqOneByteString and we just read a non-ASCII char. return SlowScanJsonString<SeqTwoByteString, uc16>(seq_str, 0, count); } } else { @@ -555,11 +561,12 @@ Handle<String> JsonParser<seq_ascii>::SlowScanJsonString( } value = value * 16 + digit; } - if (sizeof(SinkChar) == kUC16Size || value <= kMaxAsciiCharCode) { + if (sizeof(SinkChar) == kUC16Size || + value <= String::kMaxOneByteCharCode) { SeqStringSet(seq_str, count++, value); break; } else { - // StringType is SeqAsciiString and we just read a non-ASCII char. + // StringType is SeqOneByteString and we just read a non-ASCII char. position_ -= 6; // Rewind position_ to \ in \uxxxx. Advance(); return SlowScanJsonString<SeqTwoByteString, uc16>(seq_str, @@ -594,19 +601,19 @@ Handle<String> JsonParser<seq_ascii>::SlowScanJsonString( template <bool seq_ascii> -template <bool is_symbol> +template <bool is_internalized> Handle<String> JsonParser<seq_ascii>::ScanJsonString() { ASSERT_EQ('"', c0_); Advance(); if (c0_ == '"') { AdvanceSkipWhitespace(); - return Handle<String>(isolate()->heap()->empty_string()); + return factory()->empty_string(); } - if (seq_ascii && is_symbol) { - // Fast path for existing symbols. If the the string being parsed is not - // a known symbol, contains backslashes or unexpectedly reaches the end of - // string, return with an empty handle. + if (seq_ascii && is_internalized) { + // Fast path for existing internalized strings. If the the string being + // parsed is not a known internalized string, contains backslashes or + // unexpectedly reaches the end of string, return with an empty handle. uint32_t running_hash = isolate()->heap()->HashSeed(); int position = position_; uc32 c0 = c0_; @@ -615,40 +622,50 @@ Handle<String> JsonParser<seq_ascii>::ScanJsonString() { c0_ = c0; int beg_pos = position_; position_ = position; - return SlowScanJsonString<SeqAsciiString, char>(source_, - beg_pos, - position_); + return SlowScanJsonString<SeqOneByteString, uint8_t>(source_, + beg_pos, + position_); } if (c0 < 0x20) return Handle<String>::null(); - running_hash = StringHasher::AddCharacterCore(running_hash, c0); + if (static_cast<uint32_t>(c0) > + unibrow::Utf16::kMaxNonSurrogateCharCode) { + running_hash = + StringHasher::AddCharacterCore(running_hash, + unibrow::Utf16::LeadSurrogate(c0)); + running_hash = + StringHasher::AddCharacterCore(running_hash, + unibrow::Utf16::TrailSurrogate(c0)); + } else { + running_hash = StringHasher::AddCharacterCore(running_hash, c0); + } position++; if (position >= source_length_) return Handle<String>::null(); - c0 = seq_source_->SeqAsciiStringGet(position); + c0 = seq_source_->SeqOneByteStringGet(position); } while (c0 != '"'); int length = position - position_; uint32_t hash = (length <= String::kMaxHashCalcLength) ? StringHasher::GetHashCore(running_hash) : length; - Vector<const char> string_vector( + Vector<const uint8_t> string_vector( seq_source_->GetChars() + position_, length); - SymbolTable* symbol_table = isolate()->heap()->symbol_table(); - uint32_t capacity = symbol_table->Capacity(); - uint32_t entry = SymbolTable::FirstProbe(hash, capacity); + StringTable* string_table = isolate()->heap()->string_table(); + uint32_t capacity = string_table->Capacity(); + uint32_t entry = StringTable::FirstProbe(hash, capacity); uint32_t count = 1; while (true) { - Object* element = symbol_table->KeyAt(entry); - if (element == isolate()->heap()->raw_unchecked_undefined_value()) { + Object* element = string_table->KeyAt(entry); + if (element == isolate()->heap()->undefined_value()) { // Lookup failure. break; } - if (element != isolate()->heap()->raw_unchecked_the_hole_value() && - String::cast(element)->IsAsciiEqualTo(string_vector)) { + if (element != isolate()->heap()->the_hole_value() && + String::cast(element)->IsOneByteEqualTo(string_vector)) { // Lookup success, update the current position. position_ = position; // Advance past the last '"'. AdvanceSkipWhitespace(); - return Handle<String>(String::cast(element)); + return Handle<String>(String::cast(element), isolate()); } - entry = SymbolTable::NextProbe(entry, count++, capacity); + entry = StringTable::NextProbe(entry, count++, capacity); } } @@ -658,7 +675,7 @@ Handle<String> JsonParser<seq_ascii>::ScanJsonString() { // Check for control character (0x00-0x1f) or unterminated string (<0). if (c0_ < 0x20) return Handle<String>::null(); if (c0_ != '\\') { - if (seq_ascii || c0_ <= kMaxAsciiCharCode) { + if (seq_ascii || c0_ <= String::kMaxOneByteCharCode) { Advance(); } else { return SlowScanJsonString<SeqTwoByteString, uc16>(source_, @@ -666,20 +683,18 @@ Handle<String> JsonParser<seq_ascii>::ScanJsonString() { position_); } } else { - return SlowScanJsonString<SeqAsciiString, char>(source_, - beg_pos, - position_); + return SlowScanJsonString<SeqOneByteString, uint8_t>(source_, + beg_pos, + position_); } } while (c0_ != '"'); int length = position_ - beg_pos; Handle<String> result; - if (seq_ascii && is_symbol) { - result = factory()->LookupAsciiSymbol(seq_source_, - beg_pos, - length); + if (seq_ascii && is_internalized) { + result = factory()->InternalizeOneByteString(seq_source_, beg_pos, length); } else { - result = factory()->NewRawAsciiString(length); - char* dest = SeqAsciiString::cast(*result)->GetChars(); + result = factory()->NewRawOneByteString(length, pretenure_); + uint8_t* dest = SeqOneByteString::cast(*result)->GetChars(); String::WriteToFlat(*source_, dest, beg_pos, position_); } ASSERT_EQ('"', c0_); diff --git a/deps/v8/src/json-stringifier.h b/deps/v8/src/json-stringifier.h new file mode 100644 index 0000000000..ad9ef3ddb7 --- /dev/null +++ b/deps/v8/src/json-stringifier.h @@ -0,0 +1,791 @@ +// Copyright 2012 the V8 project authors. All rights reserved. +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following +// disclaimer in the documentation and/or other materials provided +// with the distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived +// from this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +#ifndef V8_JSON_STRINGIFIER_H_ +#define V8_JSON_STRINGIFIER_H_ + +#include "v8.h" +#include "v8utils.h" +#include "v8conversions.h" + +namespace v8 { +namespace internal { + +class BasicJsonStringifier BASE_EMBEDDED { + public: + explicit BasicJsonStringifier(Isolate* isolate); + + MaybeObject* Stringify(Handle<Object> object); + + private: + static const int kInitialPartLength = 32; + static const int kMaxPartLength = 16 * 1024; + static const int kPartLengthGrowthFactor = 2; + + enum Result { UNCHANGED, SUCCESS, EXCEPTION, CIRCULAR, STACK_OVERFLOW }; + + void Extend(); + + void ChangeEncoding(); + + void ShrinkCurrentPart(); + + template <bool is_ascii, typename Char> + INLINE(void Append_(Char c)); + + template <bool is_ascii, typename Char> + INLINE(void Append_(const Char* chars)); + + INLINE(void Append(uint8_t c)) { + if (is_ascii_) { + Append_<true>(c); + } else { + Append_<false>(c); + } + } + + INLINE(void AppendAscii(const char* chars)) { + if (is_ascii_) { + Append_<true>(reinterpret_cast<const uint8_t*>(chars)); + } else { + Append_<false>(reinterpret_cast<const uint8_t*>(chars)); + } + } + + Handle<Object> ApplyToJsonFunction(Handle<Object> object, + Handle<Object> key); + + Result SerializeGeneric(Handle<Object> object, + Handle<Object> key, + bool deferred_comma, + bool deferred_key); + + // Entry point to serialize the object. + INLINE(Result SerializeObject(Handle<Object> obj)) { + return Serialize_<false>(obj, false, factory_->empty_string()); + } + + // Serialize an array element. + // The index may serve as argument for the toJSON function. + INLINE(Result SerializeElement(Isolate* isolate, + Handle<Object> object, + int i)) { + return Serialize_<false>(object, + false, + Handle<Object>(Smi::FromInt(i), isolate)); + } + + // Serialize a object property. + // The key may or may not be serialized depending on the property. + // The key may also serve as argument for the toJSON function. + INLINE(Result SerializeProperty(Handle<Object> object, + bool deferred_comma, + Handle<String> deferred_key)) { + ASSERT(!deferred_key.is_null()); + return Serialize_<true>(object, deferred_comma, deferred_key); + } + + template <bool deferred_string_key> + Result Serialize_(Handle<Object> object, bool comma, Handle<Object> key); + + void SerializeDeferredKey(bool deferred_comma, Handle<Object> deferred_key) { + if (deferred_comma) Append(','); + SerializeString(Handle<String>::cast(deferred_key)); + Append(':'); + } + + Result SerializeSmi(Smi* object); + + Result SerializeDouble(double number); + INLINE(Result SerializeHeapNumber(Handle<HeapNumber> object)) { + return SerializeDouble(object->value()); + } + + Result SerializeJSValue(Handle<JSValue> object); + + INLINE(Result SerializeJSArray(Handle<JSArray> object)); + INLINE(Result SerializeJSObject(Handle<JSObject> object)); + + Result SerializeJSArraySlow(Handle<JSArray> object, int length); + + void SerializeString(Handle<String> object); + + template <typename SrcChar, typename DestChar> + INLINE(void SerializeStringUnchecked_(const SrcChar* src, + DestChar* dest, + int length)); + + template <bool is_ascii, typename Char> + INLINE(void SerializeString_(Handle<String> string)); + + template <typename Char> + INLINE(bool DoNotEscape(Char c)); + + template <typename Char> + INLINE(Vector<const Char> GetCharVector(Handle<String> string)); + + Result StackPush(Handle<Object> object); + void StackPop(); + + INLINE(Handle<String> accumulator()) { + return Handle<String>(String::cast(accumulator_store_->value()), isolate_); + } + + INLINE(void set_accumulator(Handle<String> string)) { + return accumulator_store_->set_value(*string); + } + + Isolate* isolate_; + Factory* factory_; + // We use a value wrapper for the string accumulator to keep the + // (indirect) handle to it in the outermost handle scope. + Handle<JSValue> accumulator_store_; + Handle<String> current_part_; + Handle<String> tojson_string_; + Handle<JSArray> stack_; + int current_index_; + int part_length_; + bool is_ascii_; + + static const int kJsonEscapeTableEntrySize = 8; + static const char* const JsonEscapeTable; +}; + + +// Translation table to escape ASCII characters. +// Table entries start at a multiple of 8 and are null-terminated. +const char* const BasicJsonStringifier::JsonEscapeTable = + "\\u0000\0 \\u0001\0 \\u0002\0 \\u0003\0 " + "\\u0004\0 \\u0005\0 \\u0006\0 \\u0007\0 " + "\\b\0 \\t\0 \\n\0 \\u000b\0 " + "\\f\0 \\r\0 \\u000e\0 \\u000f\0 " + "\\u0010\0 \\u0011\0 \\u0012\0 \\u0013\0 " + "\\u0014\0 \\u0015\0 \\u0016\0 \\u0017\0 " + "\\u0018\0 \\u0019\0 \\u001a\0 \\u001b\0 " + "\\u001c\0 \\u001d\0 \\u001e\0 \\u001f\0 " + " \0 !\0 \\\"\0 #\0 " + "$\0 %\0 &\0 '\0 " + "(\0 )\0 *\0 +\0 " + ",\0 -\0 .\0 /\0 " + "0\0 1\0 2\0 3\0 " + "4\0 5\0 6\0 7\0 " + "8\0 9\0 :\0 ;\0 " + "<\0 =\0 >\0 ?\0 " + "@\0 A\0 B\0 C\0 " + "D\0 E\0 F\0 G\0 " + "H\0 I\0 J\0 K\0 " + "L\0 M\0 N\0 O\0 " + "P\0 Q\0 R\0 S\0 " + "T\0 U\0 V\0 W\0 " + "X\0 Y\0 Z\0 [\0 " + "\\\\\0 ]\0 ^\0 _\0 " + "`\0 a\0 b\0 c\0 " + "d\0 e\0 f\0 g\0 " + "h\0 i\0 j\0 k\0 " + "l\0 m\0 n\0 o\0 " + "p\0 q\0 r\0 s\0 " + "t\0 u\0 v\0 w\0 " + "x\0 y\0 z\0 {\0 " + "|\0 }\0 ~\0 \177\0 " + "\200\0 \201\0 \202\0 \203\0 " + "\204\0 \205\0 \206\0 \207\0 " + "\210\0 \211\0 \212\0 \213\0 " + "\214\0 \215\0 \216\0 \217\0 " + "\220\0 \221\0 \222\0 \223\0 " + "\224\0 \225\0 \226\0 \227\0 " + "\230\0 \231\0 \232\0 \233\0 " + "\234\0 \235\0 \236\0 \237\0 " + "\240\0 \241\0 \242\0 \243\0 " + "\244\0 \245\0 \246\0 \247\0 " + "\250\0 \251\0 \252\0 \253\0 " + "\254\0 \255\0 \256\0 \257\0 " + "\260\0 \261\0 \262\0 \263\0 " + "\264\0 \265\0 \266\0 \267\0 " + "\270\0 \271\0 \272\0 \273\0 " + "\274\0 \275\0 \276\0 \277\0 " + "\300\0 \301\0 \302\0 \303\0 " + "\304\0 \305\0 \306\0 \307\0 " + "\310\0 \311\0 \312\0 \313\0 " + "\314\0 \315\0 \316\0 \317\0 " + "\320\0 \321\0 \322\0 \323\0 " + "\324\0 \325\0 \326\0 \327\0 " + "\330\0 \331\0 \332\0 \333\0 " + "\334\0 \335\0 \336\0 \337\0 " + "\340\0 \341\0 \342\0 \343\0 " + "\344\0 \345\0 \346\0 \347\0 " + "\350\0 \351\0 \352\0 \353\0 " + "\354\0 \355\0 \356\0 \357\0 " + "\360\0 \361\0 \362\0 \363\0 " + "\364\0 \365\0 \366\0 \367\0 " + "\370\0 \371\0 \372\0 \373\0 " + "\374\0 \375\0 \376\0 \377\0 "; + +BasicJsonStringifier::BasicJsonStringifier(Isolate* isolate) + : isolate_(isolate), current_index_(0), is_ascii_(true) { + factory_ = isolate_->factory(); + accumulator_store_ = Handle<JSValue>::cast( + factory_->ToObject(factory_->empty_string())); + part_length_ = kInitialPartLength; + current_part_ = factory_->NewRawOneByteString(kInitialPartLength); + tojson_string_ = + factory_->InternalizeOneByteString(STATIC_ASCII_VECTOR("toJSON")); + stack_ = factory_->NewJSArray(8); +} + + +MaybeObject* BasicJsonStringifier::Stringify(Handle<Object> object) { + switch (SerializeObject(object)) { + case UNCHANGED: + return isolate_->heap()->undefined_value(); + case SUCCESS: + ShrinkCurrentPart(); + return *factory_->NewConsString(accumulator(), current_part_); + case CIRCULAR: + return isolate_->Throw(*factory_->NewTypeError( + "circular_structure", HandleVector<Object>(NULL, 0))); + case STACK_OVERFLOW: + return isolate_->StackOverflow(); + default: + return Failure::Exception(); + } +} + + +template <bool is_ascii, typename Char> +void BasicJsonStringifier::Append_(Char c) { + if (is_ascii) { + SeqOneByteString::cast(*current_part_)->SeqOneByteStringSet( + current_index_++, c); + } else { + SeqTwoByteString::cast(*current_part_)->SeqTwoByteStringSet( + current_index_++, c); + } + if (current_index_ == part_length_) Extend(); +} + + +template <bool is_ascii, typename Char> +void BasicJsonStringifier::Append_(const Char* chars) { + for ( ; *chars != '\0'; chars++) Append_<is_ascii, Char>(*chars); +} + + +Handle<Object> BasicJsonStringifier::ApplyToJsonFunction( + Handle<Object> object, Handle<Object> key) { + LookupResult lookup(isolate_); + JSObject::cast(*object)->LookupRealNamedProperty(*tojson_string_, &lookup); + if (!lookup.IsProperty()) return object; + PropertyAttributes attr; + Handle<Object> fun = + Object::GetProperty(object, object, &lookup, tojson_string_, &attr); + if (!fun->IsJSFunction()) return object; + + // Call toJSON function. + if (key->IsSmi()) key = factory_->NumberToString(key); + Handle<Object> argv[] = { key }; + bool has_exception = false; + HandleScope scope(isolate_); + object = Execution::Call(fun, object, 1, argv, &has_exception); + // Return empty handle to signal an exception. + if (has_exception) return Handle<Object>::null(); + return scope.CloseAndEscape(object); +} + + +BasicJsonStringifier::Result BasicJsonStringifier::StackPush( + Handle<Object> object) { + StackLimitCheck check(isolate_); + if (check.HasOverflowed()) return STACK_OVERFLOW; + + int length = Smi::cast(stack_->length())->value(); + FixedArray* elements = FixedArray::cast(stack_->elements()); + for (int i = 0; i < length; i++) { + if (elements->get(i) == *object) { + return CIRCULAR; + } + } + stack_->EnsureSize(length + 1); + FixedArray::cast(stack_->elements())->set(length, *object); + stack_->set_length(Smi::FromInt(length + 1)); + return SUCCESS; +} + + +void BasicJsonStringifier::StackPop() { + int length = Smi::cast(stack_->length())->value(); + stack_->set_length(Smi::FromInt(length - 1)); +} + + +template <bool deferred_string_key> +BasicJsonStringifier::Result BasicJsonStringifier::Serialize_( + Handle<Object> object, bool comma, Handle<Object> key) { + if (object->IsJSObject()) { + object = ApplyToJsonFunction(object, key); + if (object.is_null()) return EXCEPTION; + } + + if (object->IsSmi()) { + if (deferred_string_key) SerializeDeferredKey(comma, key); + return SerializeSmi(Smi::cast(*object)); + } + + switch (HeapObject::cast(*object)->map()->instance_type()) { + case HEAP_NUMBER_TYPE: + if (deferred_string_key) SerializeDeferredKey(comma, key); + return SerializeHeapNumber(Handle<HeapNumber>::cast(object)); + case ODDBALL_TYPE: + switch (Oddball::cast(*object)->kind()) { + case Oddball::kFalse: + if (deferred_string_key) SerializeDeferredKey(comma, key); + AppendAscii("false"); + return SUCCESS; + case Oddball::kTrue: + if (deferred_string_key) SerializeDeferredKey(comma, key); + AppendAscii("true"); + return SUCCESS; + case Oddball::kNull: + if (deferred_string_key) SerializeDeferredKey(comma, key); + AppendAscii("null"); + return SUCCESS; + default: + return UNCHANGED; + } + case JS_ARRAY_TYPE: + if (deferred_string_key) SerializeDeferredKey(comma, key); + return SerializeJSArray(Handle<JSArray>::cast(object)); + case JS_VALUE_TYPE: + if (deferred_string_key) SerializeDeferredKey(comma, key); + return SerializeJSValue(Handle<JSValue>::cast(object)); + case JS_FUNCTION_TYPE: + return UNCHANGED; + default: + if (object->IsString()) { + if (deferred_string_key) SerializeDeferredKey(comma, key); + SerializeString(Handle<String>::cast(object)); + return SUCCESS; + } else if (object->IsJSObject()) { + if (deferred_string_key) SerializeDeferredKey(comma, key); + return SerializeJSObject(Handle<JSObject>::cast(object)); + } else { + return SerializeGeneric(object, key, comma, deferred_string_key); + } + } +} + + +BasicJsonStringifier::Result BasicJsonStringifier::SerializeGeneric( + Handle<Object> object, + Handle<Object> key, + bool deferred_comma, + bool deferred_key) { + Handle<JSObject> builtins(isolate_->native_context()->builtins()); + Handle<JSFunction> builtin = + Handle<JSFunction>::cast(GetProperty(builtins, "JSONSerializeAdapter")); + + Handle<Object> argv[] = { key, object }; + bool has_exception = false; + Handle<Object> result = + Execution::Call(builtin, object, 2, argv, &has_exception); + if (has_exception) return EXCEPTION; + if (result->IsUndefined()) return UNCHANGED; + if (deferred_key) { + if (key->IsSmi()) key = factory_->NumberToString(key); + SerializeDeferredKey(deferred_comma, key); + } + + Handle<String> result_string = Handle<String>::cast(result); + // Shrink current part, attach it to the accumulator, also attach the result + // string to the accumulator, and allocate a new part. + ShrinkCurrentPart(); // Shrink. + part_length_ = kInitialPartLength; // Allocate conservatively. + Extend(); // Attach current part and allocate new part. + // Attach result string to the accumulator. + set_accumulator(factory_->NewConsString(accumulator(), result_string)); + return SUCCESS; +} + + +BasicJsonStringifier::Result BasicJsonStringifier::SerializeJSValue( + Handle<JSValue> object) { + bool has_exception = false; + String* class_name = object->class_name(); + if (class_name == isolate_->heap()->String_string()) { + Handle<Object> value = Execution::ToString(object, &has_exception); + if (has_exception) return EXCEPTION; + SerializeString(Handle<String>::cast(value)); + } else if (class_name == isolate_->heap()->Number_string()) { + Handle<Object> value = Execution::ToNumber(object, &has_exception); + if (has_exception) return EXCEPTION; + if (value->IsSmi()) return SerializeSmi(Smi::cast(*value)); + SerializeHeapNumber(Handle<HeapNumber>::cast(value)); + } else { + ASSERT(class_name == isolate_->heap()->Boolean_string()); + Object* value = JSValue::cast(*object)->value(); + ASSERT(value->IsBoolean()); + AppendAscii(value->IsTrue() ? "true" : "false"); + } + return SUCCESS; +} + + +BasicJsonStringifier::Result BasicJsonStringifier::SerializeSmi(Smi* object) { + static const int kBufferSize = 100; + char chars[kBufferSize]; + Vector<char> buffer(chars, kBufferSize); + AppendAscii(IntToCString(object->value(), buffer)); + return SUCCESS; +} + + +BasicJsonStringifier::Result BasicJsonStringifier::SerializeDouble( + double number) { + if (isinf(number) || isnan(number)) { + AppendAscii("null"); + return SUCCESS; + } + static const int kBufferSize = 100; + char chars[kBufferSize]; + Vector<char> buffer(chars, kBufferSize); + AppendAscii(DoubleToCString(number, buffer)); + return SUCCESS; +} + + +BasicJsonStringifier::Result BasicJsonStringifier::SerializeJSArray( + Handle<JSArray> object) { + HandleScope handle_scope(isolate_); + Result stack_push = StackPush(object); + if (stack_push != SUCCESS) return stack_push; + int length = Smi::cast(object->length())->value(); + Append('['); + switch (object->GetElementsKind()) { + case FAST_SMI_ELEMENTS: { + Handle<FixedArray> elements( + FixedArray::cast(object->elements()), isolate_); + for (int i = 0; i < length; i++) { + if (i > 0) Append(','); + SerializeSmi(Smi::cast(elements->get(i))); + } + break; + } + case FAST_DOUBLE_ELEMENTS: { + Handle<FixedDoubleArray> elements( + FixedDoubleArray::cast(object->elements()), isolate_); + for (int i = 0; i < length; i++) { + if (i > 0) Append(','); + SerializeDouble(elements->get_scalar(i)); + } + break; + } + case FAST_ELEMENTS: { + Handle<FixedArray> elements( + FixedArray::cast(object->elements()), isolate_); + for (int i = 0; i < length; i++) { + if (i > 0) Append(','); + Result result = + SerializeElement(isolate_, + Handle<Object>(elements->get(i), isolate_), + i); + if (result == SUCCESS) continue; + if (result == UNCHANGED) { + AppendAscii("null"); + } else { + return result; + } + } + break; + } + // TODO(yangguo): The FAST_HOLEY_* cases could be handled in a faster way. + // They resemble the non-holey cases except that a prototype chain lookup + // is necessary for holes. + default: { + Result result = SerializeJSArraySlow(object, length); + if (result != SUCCESS) return result; + break; + } + } + Append(']'); + StackPop(); + current_part_ = handle_scope.CloseAndEscape(current_part_); + return SUCCESS; +} + + +BasicJsonStringifier::Result BasicJsonStringifier::SerializeJSArraySlow( + Handle<JSArray> object, int length) { + for (int i = 0; i < length; i++) { + if (i > 0) Append(','); + Handle<Object> element = Object::GetElement(object, i); + if (element->IsUndefined()) { + AppendAscii("null"); + } else { + Result result = SerializeElement(object->GetIsolate(), element, i); + if (result == SUCCESS) continue; + if (result == UNCHANGED) { + AppendAscii("null"); + } else { + return result; + } + } + } + return SUCCESS; +} + + +BasicJsonStringifier::Result BasicJsonStringifier::SerializeJSObject( + Handle<JSObject> object) { + HandleScope handle_scope(isolate_); + Result stack_push = StackPush(object); + if (stack_push != SUCCESS) return stack_push; + if (object->IsJSGlobalProxy()) { + object = Handle<JSObject>( + JSObject::cast(object->GetPrototype()), isolate_); + ASSERT(object->IsGlobalObject()); + } + + Append('{'); + bool comma = false; + + if (object->HasFastProperties() && + !object->HasIndexedInterceptor() && + !object->HasNamedInterceptor() && + object->elements()->length() == 0) { + Handle<Map> map(object->map()); + for (int i = 0; i < map->NumberOfOwnDescriptors(); i++) { + Handle<Name> name(map->instance_descriptors()->GetKey(i), isolate_); + // TODO(rossberg): Should this throw? + if (!name->IsString()) continue; + Handle<String> key = Handle<String>::cast(name); + PropertyDetails details = map->instance_descriptors()->GetDetails(i); + if (details.IsDontEnum() || details.IsDeleted()) continue; + Handle<Object> property; + if (details.type() == FIELD && *map == object->map()) { + property = Handle<Object>( + object->FastPropertyAt( + map->instance_descriptors()->GetFieldIndex(i)), + isolate_); + } else { + property = GetProperty(isolate_, object, key); + if (property.is_null()) return EXCEPTION; + } + Result result = SerializeProperty(property, comma, key); + if (!comma && result == SUCCESS) comma = true; + if (result >= EXCEPTION) return result; + } + } else { + bool has_exception = false; + Handle<FixedArray> contents = + GetKeysInFixedArrayFor(object, LOCAL_ONLY, &has_exception); + if (has_exception) return EXCEPTION; + + for (int i = 0; i < contents->length(); i++) { + Object* key = contents->get(i); + Handle<String> key_handle; + Handle<Object> property; + if (key->IsString()) { + key_handle = Handle<String>(String::cast(key), isolate_); + property = GetProperty(isolate_, object, key_handle); + } else { + ASSERT(key->IsNumber()); + key_handle = factory_->NumberToString(Handle<Object>(key, isolate_)); + uint32_t index; + if (key->IsSmi()) { + property = Object::GetElement(object, Smi::cast(key)->value()); + } else if (key_handle->AsArrayIndex(&index)) { + property = Object::GetElement(object, index); + } else { + property = GetProperty(isolate_, object, key_handle); + } + } + if (property.is_null()) return EXCEPTION; + Result result = SerializeProperty(property, comma, key_handle); + if (!comma && result == SUCCESS) comma = true; + if (result >= EXCEPTION) return result; + } + } + + Append('}'); + StackPop(); + current_part_ = handle_scope.CloseAndEscape(current_part_); + return SUCCESS; +} + + +void BasicJsonStringifier::ShrinkCurrentPart() { + ASSERT(current_index_ < part_length_); + current_part_ = Handle<String>( + SeqString::cast(*current_part_)->Truncate(current_index_), isolate_); +} + + +void BasicJsonStringifier::Extend() { + set_accumulator(factory_->NewConsString(accumulator(), current_part_)); + if (part_length_ <= kMaxPartLength / kPartLengthGrowthFactor) { + part_length_ *= kPartLengthGrowthFactor; + } + if (is_ascii_) { + current_part_ = factory_->NewRawOneByteString(part_length_); + } else { + current_part_ = factory_->NewRawTwoByteString(part_length_); + } + current_index_ = 0; +} + + +void BasicJsonStringifier::ChangeEncoding() { + ShrinkCurrentPart(); + set_accumulator(factory_->NewConsString(accumulator(), current_part_)); + current_part_ = factory_->NewRawTwoByteString(part_length_); + current_index_ = 0; + is_ascii_ = false; +} + + +template <typename SrcChar, typename DestChar> +void BasicJsonStringifier::SerializeStringUnchecked_(const SrcChar* src, + DestChar* dest, + int length) { + dest += current_index_; + DestChar* dest_start = dest; + + // Assert that uc16 character is not truncated down to 8 bit. + // The <uc16, char> version of this method must not be called. + ASSERT(sizeof(*dest) >= sizeof(*src)); + + for (int i = 0; i < length; i++) { + SrcChar c = src[i]; + if (DoNotEscape(c)) { + *(dest++) = static_cast<DestChar>(c); + } else { + const uint8_t* chars = reinterpret_cast<const uint8_t*>( + &JsonEscapeTable[c * kJsonEscapeTableEntrySize]); + while (*chars != '\0') *(dest++) = *(chars++); + } + } + + current_index_ += static_cast<int>(dest - dest_start); +} + + +template <bool is_ascii, typename Char> +void BasicJsonStringifier::SerializeString_(Handle<String> string) { + int length = string->length(); + Append_<is_ascii, char>('"'); + // We make a rough estimate to find out if the current string can be + // serialized without allocating a new string part. The worst case length of + // an escaped character is 6. Shifting the remainin string length right by 3 + // is a more pessimistic estimate, but faster to calculate. + + if (((part_length_ - current_index_) >> 3) > length) { + AssertNoAllocation no_allocation; + Vector<const Char> vector = GetCharVector<Char>(string); + if (is_ascii) { + SerializeStringUnchecked_( + vector.start(), + SeqOneByteString::cast(*current_part_)->GetChars(), + length); + } else { + SerializeStringUnchecked_( + vector.start(), + SeqTwoByteString::cast(*current_part_)->GetChars(), + length); + } + } else { + String* string_location = *string; + Vector<const Char> vector = GetCharVector<Char>(string); + for (int i = 0; i < length; i++) { + Char c = vector[i]; + if (DoNotEscape(c)) { + Append_<is_ascii, Char>(c); + } else { + Append_<is_ascii, uint8_t>(reinterpret_cast<const uint8_t*>( + &JsonEscapeTable[c * kJsonEscapeTableEntrySize])); + } + // If GC moved the string, we need to refresh the vector. + if (*string != string_location) { + vector = GetCharVector<Char>(string); + string_location = *string; + } + } + } + + Append_<is_ascii, uint8_t>('"'); +} + + +template <> +bool BasicJsonStringifier::DoNotEscape(uint8_t c) { + return c >= '#' && c <= '~' && c != '\\'; +} + + +template <> +bool BasicJsonStringifier::DoNotEscape(uint16_t c) { + return c >= '#' && c != '\\' && c != 0x7f; +} + + +template <> +Vector<const uint8_t> BasicJsonStringifier::GetCharVector( + Handle<String> string) { + String::FlatContent flat = string->GetFlatContent(); + ASSERT(flat.IsAscii()); + return flat.ToOneByteVector(); +} + + +template <> +Vector<const uc16> BasicJsonStringifier::GetCharVector(Handle<String> string) { + String::FlatContent flat = string->GetFlatContent(); + ASSERT(flat.IsTwoByte()); + return flat.ToUC16Vector(); +} + + +void BasicJsonStringifier::SerializeString(Handle<String> object) { + FlattenString(object); + String::FlatContent flat = object->GetFlatContent(); + if (is_ascii_) { + if (flat.IsAscii()) { + SerializeString_<true, uint8_t>(object); + } else { + ChangeEncoding(); + SerializeString(object); + } + } else { + if (flat.IsAscii()) { + SerializeString_<false, uint8_t>(object); + } else { + SerializeString_<false, uc16>(object); + } + } +} + +} } // namespace v8::internal + +#endif // V8_JSON_STRINGIFIER_H_ diff --git a/deps/v8/src/json.js b/deps/v8/src/json.js index 85224b0f05..e94d3c8e3e 100644 --- a/deps/v8/src/json.js +++ b/deps/v8/src/json.js @@ -178,141 +178,9 @@ function JSONSerialize(key, holder, replacer, stack, indent, gap) { } -function BasicSerializeArray(value, stack, builder) { - var len = value.length; - if (len == 0) { - builder.push("[]"); - return; - } - if (!%PushIfAbsent(stack, value)) { - throw MakeTypeError('circular_structure', $Array()); - } - builder.push("["); - var val = value[0]; - if (IS_STRING(val)) { - // First entry is a string. Remaining entries are likely to be strings too. - var array_string = %QuoteJSONStringArray(value); - if (!IS_UNDEFINED(array_string)) { - // array_string also includes bracket characters so we are done. - builder[builder.length - 1] = array_string; - stack.pop(); - return; - } else { - builder.push(%QuoteJSONString(val)); - for (var i = 1; i < len; i++) { - val = value[i]; - if (IS_STRING(val)) { - builder.push(%QuoteJSONStringComma(val)); - } else { - builder.push(","); - var before = builder.length; - BasicJSONSerialize(i, val, stack, builder); - if (before == builder.length) builder[before - 1] = ",null"; - } - } - } - } else if (IS_NUMBER(val)) { - // First entry is a number. Remaining entries are likely to be numbers too. - builder.push(JSON_NUMBER_TO_STRING(val)); - for (var i = 1; i < len; i++) { - builder.push(","); - val = value[i]; - if (IS_NUMBER(val)) { - builder.push(JSON_NUMBER_TO_STRING(val)); - } else { - var before = builder.length; - BasicJSONSerialize(i, val, stack, builder); - if (before == builder.length) builder[before - 1] = ",null"; - } - } - } else { - var before = builder.length; - BasicJSONSerialize(0, val, stack, builder); - if (before == builder.length) builder.push("null"); - for (var i = 1; i < len; i++) { - builder.push(","); - before = builder.length; - BasicJSONSerialize(i, value[i], stack, builder); - if (before == builder.length) builder[before - 1] = ",null"; - } - } - stack.pop(); - builder.push("]"); -} - - -function BasicSerializeObject(value, stack, builder) { - if (!%PushIfAbsent(stack, value)) { - throw MakeTypeError('circular_structure', $Array()); - } - builder.push("{"); - var first = true; - var keys = %ObjectKeys(value); - var len = keys.length; - for (var i = 0; i < len; i++) { - var p = keys[i]; - if (!first) { - builder.push(%QuoteJSONStringComma(p)); - } else { - builder.push(%QuoteJSONString(p)); - } - builder.push(":"); - var before = builder.length; - BasicJSONSerialize(p, value[p], stack, builder); - if (before == builder.length) { - builder.pop(); - builder.pop(); - } else { - first = false; - } - } - stack.pop(); - builder.push("}"); -} - - -function BasicJSONSerialize(key, value, stack, builder) { - if (IS_SPEC_OBJECT(value)) { - var toJSON = value.toJSON; - if (IS_SPEC_FUNCTION(toJSON)) { - value = %_CallFunction(value, ToString(key), toJSON); - } - } - if (IS_STRING(value)) { - builder.push(value !== "" ? %QuoteJSONString(value) : '""'); - } else if (IS_NUMBER(value)) { - builder.push(JSON_NUMBER_TO_STRING(value)); - } else if (IS_BOOLEAN(value)) { - builder.push(value ? "true" : "false"); - } else if (IS_NULL(value)) { - builder.push("null"); - } else if (IS_SPEC_OBJECT(value) && !(typeof value == "function")) { - // Value is a non-callable object. - // Unwrap value if necessary - if (IS_NUMBER_WRAPPER(value)) { - value = ToNumber(value); - builder.push(JSON_NUMBER_TO_STRING(value)); - } else if (IS_STRING_WRAPPER(value)) { - builder.push(%QuoteJSONString(ToString(value))); - } else if (IS_BOOLEAN_WRAPPER(value)) { - builder.push(%_ValueOf(value) ? "true" : "false"); - } else if (IS_ARRAY(value)) { - BasicSerializeArray(value, stack, builder); - } else { - BasicSerializeObject(value, stack, builder); - } - } -} - - function JSONStringify(value, replacer, space) { if (%_ArgumentsLength() == 1) { - var builder = new InternalArray(); - BasicJSONSerialize('', value, new InternalArray(), builder); - if (builder.length == 0) return; - var result = %_FastAsciiArrayJoin(builder, ""); - if (!IS_UNDEFINED(result)) return result; - return %StringBuilderConcat(builder, builder.length, ""); + return %BasicJSONStringify(value); } if (IS_OBJECT(space)) { // Unwrap 'space' if it is wrapped @@ -325,10 +193,10 @@ function JSONStringify(value, replacer, space) { var gap; if (IS_NUMBER(space)) { space = MathMax(0, MathMin(ToInteger(space), 10)); - gap = SubString(" ", 0, space); + gap = %_SubString(" ", 0, space); } else if (IS_STRING(space)) { if (space.length > 10) { - gap = SubString(space, 0, 10); + gap = %_SubString(space, 0, 10); } else { gap = space; } @@ -338,6 +206,7 @@ function JSONStringify(value, replacer, space) { return JSONSerialize('', {'': value}, replacer, new InternalArray(), "", gap); } + function SetUpJSON() { %CheckIsBootstrapping(); InstallFunctions($JSON, DONT_ENUM, $Array( @@ -346,4 +215,12 @@ function SetUpJSON() { )); } + +function JSONSerializeAdapter(key, object) { + var holder = {}; + holder[key] = object; + // No need to pass the actual holder since there is no replacer function. + return JSONSerialize(key, holder, void 0, new InternalArray(), "", ""); +} + SetUpJSON(); diff --git a/deps/v8/src/jsregexp-inl.h b/deps/v8/src/jsregexp-inl.h new file mode 100644 index 0000000000..3ef07d8c54 --- /dev/null +++ b/deps/v8/src/jsregexp-inl.h @@ -0,0 +1,106 @@ +// Copyright 2013 the V8 project authors. All rights reserved. +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following +// disclaimer in the documentation and/or other materials provided +// with the distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived +// from this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + + +#ifndef V8_JSREGEXP_INL_H_ +#define V8_JSREGEXP_INL_H_ + +#include "allocation.h" +#include "handles.h" +#include "heap.h" +#include "jsregexp.h" +#include "objects.h" + +namespace v8 { +namespace internal { + + +RegExpImpl::GlobalCache::~GlobalCache() { + // Deallocate the register array if we allocated it in the constructor + // (as opposed to using the existing jsregexp_static_offsets_vector). + if (register_array_size_ > Isolate::kJSRegexpStaticOffsetsVectorSize) { + DeleteArray(register_array_); + } +} + + +int32_t* RegExpImpl::GlobalCache::FetchNext() { + current_match_index_++; + if (current_match_index_ >= num_matches_) { + // Current batch of results exhausted. + // Fail if last batch was not even fully filled. + if (num_matches_ < max_matches_) { + num_matches_ = 0; // Signal failed match. + return NULL; + } + + int32_t* last_match = + ®ister_array_[(current_match_index_ - 1) * registers_per_match_]; + int last_end_index = last_match[1]; + + if (regexp_->TypeTag() == JSRegExp::ATOM) { + num_matches_ = RegExpImpl::AtomExecRaw(regexp_, + subject_, + last_end_index, + register_array_, + register_array_size_); + } else { + int last_start_index = last_match[0]; + if (last_start_index == last_end_index) last_end_index++; + if (last_end_index > subject_->length()) { + num_matches_ = 0; // Signal failed match. + return NULL; + } + num_matches_ = RegExpImpl::IrregexpExecRaw(regexp_, + subject_, + last_end_index, + register_array_, + register_array_size_); + } + + if (num_matches_ <= 0) return NULL; + current_match_index_ = 0; + return register_array_; + } else { + return ®ister_array_[current_match_index_ * registers_per_match_]; + } +} + + +int32_t* RegExpImpl::GlobalCache::LastSuccessfulMatch() { + int index = current_match_index_ * registers_per_match_; + if (num_matches_ == 0) { + // After a failed match we shift back by one result. + index -= registers_per_match_; + } + return ®ister_array_[index]; +} + + +} } // namespace v8::internal + +#endif // V8_JSREGEXP_INL_H_ diff --git a/deps/v8/src/jsregexp.cc b/deps/v8/src/jsregexp.cc index e59170d5a3..b490521bc0 100644 --- a/deps/v8/src/jsregexp.cc +++ b/deps/v8/src/jsregexp.cc @@ -32,6 +32,7 @@ #include "execution.h" #include "factory.h" #include "jsregexp.h" +#include "jsregexp-inl.h" #include "platform.h" #include "string-search.h" #include "runtime.h" @@ -269,7 +270,7 @@ static void SetAtomLastCapture(FixedArray* array, String* subject, int from, int to) { - NoHandleAllocation no_handles; + NoHandleAllocation no_handles(array->GetIsolate()); RegExpImpl::SetLastCaptureCount(array, 2); RegExpImpl::SetLastSubject(array, subject); RegExpImpl::SetLastInput(array, subject); @@ -309,16 +310,16 @@ int RegExpImpl::AtomExecRaw(Handle<JSRegExp> regexp, index = (needle_content.IsAscii() ? (subject_content.IsAscii() ? SearchString(isolate, - subject_content.ToAsciiVector(), - needle_content.ToAsciiVector(), + subject_content.ToOneByteVector(), + needle_content.ToOneByteVector(), index) : SearchString(isolate, subject_content.ToUC16Vector(), - needle_content.ToAsciiVector(), + needle_content.ToOneByteVector(), index)) : (subject_content.IsAscii() ? SearchString(isolate, - subject_content.ToAsciiVector(), + subject_content.ToOneByteVector(), needle_content.ToUC16Vector(), index) : SearchString(isolate, @@ -352,7 +353,7 @@ Handle<Object> RegExpImpl::AtomExec(Handle<JSRegExp> re, if (res == RegExpImpl::RE_FAILURE) return isolate->factory()->null_value(); ASSERT_EQ(res, RegExpImpl::RE_SUCCESS); - NoHandleAllocation no_handles; + NoHandleAllocation no_handles(isolate); FixedArray* array = FixedArray::cast(last_match_info->elements()); SetAtomLastCapture(array, *subject, output_registers[0], output_registers[1]); return last_match_info; @@ -529,7 +530,7 @@ int RegExpImpl::IrregexpPrepare(Handle<JSRegExp> regexp, if (!subject->IsFlat()) FlattenString(subject); // Check the asciiness of the underlying storage. - bool is_ascii = subject->IsAsciiRepresentationUnderneath(); + bool is_ascii = subject->IsOneByteRepresentationUnderneath(); if (!EnsureCompiledIrregexp(regexp, subject, is_ascii)) return -1; #ifdef V8_INTERPRETED_REGEXP @@ -560,7 +561,7 @@ int RegExpImpl::IrregexpExecRaw(Handle<JSRegExp> regexp, ASSERT(index <= subject->length()); ASSERT(subject->IsFlat()); - bool is_ascii = subject->IsAsciiRepresentationUnderneath(); + bool is_ascii = subject->IsOneByteRepresentationUnderneath(); #ifndef V8_INTERPRETED_REGEXP ASSERT(output_size >= (IrregexpNumberOfCaptures(*irregexp) + 1) * 2); @@ -596,7 +597,7 @@ int RegExpImpl::IrregexpExecRaw(Handle<JSRegExp> regexp, // being internal and external, and even between being ASCII and UC16, // but the characters are always the same). IrregexpPrepare(regexp, subject); - is_ascii = subject->IsAsciiRepresentationUnderneath(); + is_ascii = subject->IsOneByteRepresentationUnderneath(); } while (true); UNREACHABLE(); return RE_EXCEPTION; @@ -686,6 +687,7 @@ Handle<JSArray> RegExpImpl::SetLastMatchInfo(Handle<JSArray> last_match_info, Handle<String> subject, int capture_count, int32_t* match) { + ASSERT(last_match_info->HasFastObjectElements()); int capture_register_count = (capture_count + 1) * 2; last_match_info->EnsureSize(capture_register_count + kLastMatchOverhead); AssertNoAllocation no_gc; @@ -760,68 +762,6 @@ RegExpImpl::GlobalCache::GlobalCache(Handle<JSRegExp> regexp, } -RegExpImpl::GlobalCache::~GlobalCache() { - // Deallocate the register array if we allocated it in the constructor - // (as opposed to using the existing jsregexp_static_offsets_vector). - if (register_array_size_ > Isolate::kJSRegexpStaticOffsetsVectorSize) { - DeleteArray(register_array_); - } -} - - -int32_t* RegExpImpl::GlobalCache::FetchNext() { - current_match_index_++; - if (current_match_index_ >= num_matches_) { - // Current batch of results exhausted. - // Fail if last batch was not even fully filled. - if (num_matches_ < max_matches_) { - num_matches_ = 0; // Signal failed match. - return NULL; - } - - int32_t* last_match = - ®ister_array_[(current_match_index_ - 1) * registers_per_match_]; - int last_end_index = last_match[1]; - - if (regexp_->TypeTag() == JSRegExp::ATOM) { - num_matches_ = RegExpImpl::AtomExecRaw(regexp_, - subject_, - last_end_index, - register_array_, - register_array_size_); - } else { - int last_start_index = last_match[0]; - if (last_start_index == last_end_index) last_end_index++; - if (last_end_index > subject_->length()) { - num_matches_ = 0; // Signal failed match. - return NULL; - } - num_matches_ = RegExpImpl::IrregexpExecRaw(regexp_, - subject_, - last_end_index, - register_array_, - register_array_size_); - } - - if (num_matches_ <= 0) return NULL; - current_match_index_ = 0; - return register_array_; - } else { - return ®ister_array_[current_match_index_ * registers_per_match_]; - } -} - - -int32_t* RegExpImpl::GlobalCache::LastSuccessfulMatch() { - int index = current_match_index_ * registers_per_match_; - if (num_matches_ == 0) { - // After a failed match we shift back by one result. - index -= registers_per_match_; - } - return ®ister_array_[index]; -} - - // ------------------------------------------------------------------- // Implementation of the Irregexp regular expression engine. // @@ -1681,7 +1621,7 @@ static int GetCaseIndependentLetters(Isolate* isolate, letters[0] = character; length = 1; } - if (!ascii_subject || character <= String::kMaxAsciiCharCode) { + if (!ascii_subject || character <= String::kMaxOneByteCharCode) { return length; } // The standard requires that non-ASCII characters cannot have ASCII @@ -1732,7 +1672,7 @@ static inline bool EmitAtomNonLetter(Isolate* isolate, bool checked = false; // We handle the length > 1 case in a later pass. if (length == 1) { - if (ascii && c > String::kMaxAsciiCharCodeU) { + if (ascii && c > String::kMaxOneByteCharCodeU) { // Can't match - see above. return false; // Bounds not checked. } @@ -1753,7 +1693,7 @@ static bool ShortCutEmitCharacterPair(RegExpMacroAssembler* macro_assembler, Label* on_failure) { uc16 char_mask; if (ascii) { - char_mask = String::kMaxAsciiCharCode; + char_mask = String::kMaxOneByteCharCode; } else { char_mask = String::kMaxUtf16CodeUnit; } @@ -2007,7 +1947,7 @@ static void SplitSearchSpace(ZoneList<int>* ranges, // range with a single not-taken branch, speeding up this important // character range (even non-ASCII charset-based text has spaces and // punctuation). - if (*border - 1 > String::kMaxAsciiCharCode && // ASCII case. + if (*border - 1 > String::kMaxOneByteCharCode && // ASCII case. end_index - start_index > (*new_start_index - start_index) * 2 && last - first > kSize * 2 && binary_chop_index > *new_start_index && @@ -2211,7 +2151,7 @@ static void EmitCharClass(RegExpMacroAssembler* macro_assembler, int max_char; if (ascii) { - max_char = String::kMaxAsciiCharCode; + max_char = String::kMaxOneByteCharCode; } else { max_char = String::kMaxUtf16CodeUnit; } @@ -2359,35 +2299,33 @@ RegExpNode::LimitResult RegExpNode::LimitVersions(RegExpCompiler* compiler, int ActionNode::EatsAtLeast(int still_to_find, - int recursion_depth, + int budget, bool not_at_start) { - if (recursion_depth > RegExpCompiler::kMaxRecursion) return 0; + if (budget <= 0) return 0; if (type_ == POSITIVE_SUBMATCH_SUCCESS) return 0; // Rewinds input! return on_success()->EatsAtLeast(still_to_find, - recursion_depth + 1, + budget - 1, not_at_start); } void ActionNode::FillInBMInfo(int offset, - int recursion_depth, int budget, BoyerMooreLookahead* bm, bool not_at_start) { if (type_ == BEGIN_SUBMATCH) { bm->SetRest(offset); } else if (type_ != POSITIVE_SUBMATCH_SUCCESS) { - on_success()->FillInBMInfo( - offset, recursion_depth + 1, budget - 1, bm, not_at_start); + on_success()->FillInBMInfo(offset, budget - 1, bm, not_at_start); } SaveBMInfo(bm, not_at_start, offset); } int AssertionNode::EatsAtLeast(int still_to_find, - int recursion_depth, + int budget, bool not_at_start) { - if (recursion_depth > RegExpCompiler::kMaxRecursion) return 0; + if (budget <= 0) return 0; // If we know we are not at the start and we are asked "how many characters // will you match if you succeed?" then we can answer anything since false // implies false. So lets just return the max answer (still_to_find) since @@ -2395,55 +2333,53 @@ int AssertionNode::EatsAtLeast(int still_to_find, // branches in the node graph. if (type() == AT_START && not_at_start) return still_to_find; return on_success()->EatsAtLeast(still_to_find, - recursion_depth + 1, + budget - 1, not_at_start); } void AssertionNode::FillInBMInfo(int offset, - int recursion_depth, int budget, BoyerMooreLookahead* bm, bool not_at_start) { // Match the behaviour of EatsAtLeast on this node. if (type() == AT_START && not_at_start) return; - on_success()->FillInBMInfo( - offset, recursion_depth + 1, budget - 1, bm, not_at_start); + on_success()->FillInBMInfo(offset, budget - 1, bm, not_at_start); SaveBMInfo(bm, not_at_start, offset); } int BackReferenceNode::EatsAtLeast(int still_to_find, - int recursion_depth, + int budget, bool not_at_start) { - if (recursion_depth > RegExpCompiler::kMaxRecursion) return 0; + if (budget <= 0) return 0; return on_success()->EatsAtLeast(still_to_find, - recursion_depth + 1, + budget - 1, not_at_start); } int TextNode::EatsAtLeast(int still_to_find, - int recursion_depth, + int budget, bool not_at_start) { int answer = Length(); if (answer >= still_to_find) return answer; - if (recursion_depth > RegExpCompiler::kMaxRecursion) return answer; + if (budget <= 0) return answer; // We are not at start after this node so we set the last argument to 'true'. return answer + on_success()->EatsAtLeast(still_to_find - answer, - recursion_depth + 1, + budget - 1, true); } int NegativeLookaheadChoiceNode::EatsAtLeast(int still_to_find, - int recursion_depth, + int budget, bool not_at_start) { - if (recursion_depth > RegExpCompiler::kMaxRecursion) return 0; + if (budget <= 0) return 0; // Alternative 0 is the negative lookahead, alternative 1 is what comes // afterwards. RegExpNode* node = alternatives_->at(1).node(); - return node->EatsAtLeast(still_to_find, recursion_depth + 1, not_at_start); + return node->EatsAtLeast(still_to_find, budget - 1, not_at_start); } @@ -2460,39 +2396,40 @@ void NegativeLookaheadChoiceNode::GetQuickCheckDetails( int ChoiceNode::EatsAtLeastHelper(int still_to_find, - int recursion_depth, + int budget, RegExpNode* ignore_this_node, bool not_at_start) { - if (recursion_depth > RegExpCompiler::kMaxRecursion) return 0; + if (budget <= 0) return 0; int min = 100; int choice_count = alternatives_->length(); + budget = (budget - 1) / choice_count; for (int i = 0; i < choice_count; i++) { RegExpNode* node = alternatives_->at(i).node(); if (node == ignore_this_node) continue; - int node_eats_at_least = node->EatsAtLeast(still_to_find, - recursion_depth + 1, - not_at_start); + int node_eats_at_least = + node->EatsAtLeast(still_to_find, budget, not_at_start); if (node_eats_at_least < min) min = node_eats_at_least; + if (min == 0) return 0; } return min; } int LoopChoiceNode::EatsAtLeast(int still_to_find, - int recursion_depth, + int budget, bool not_at_start) { return EatsAtLeastHelper(still_to_find, - recursion_depth, + budget - 1, loop_node_, not_at_start); } int ChoiceNode::EatsAtLeast(int still_to_find, - int recursion_depth, + int budget, bool not_at_start) { return EatsAtLeastHelper(still_to_find, - recursion_depth, + budget, NULL, not_at_start); } @@ -2513,7 +2450,7 @@ bool QuickCheckDetails::Rationalize(bool asc) { bool found_useful_op = false; uint32_t char_mask; if (asc) { - char_mask = String::kMaxAsciiCharCode; + char_mask = String::kMaxOneByteCharCode; } else { char_mask = String::kMaxUtf16CodeUnit; } @@ -2522,7 +2459,7 @@ bool QuickCheckDetails::Rationalize(bool asc) { int char_shift = 0; for (int i = 0; i < characters_; i++) { Position* pos = &positions_[i]; - if ((pos->mask & String::kMaxAsciiCharCode) != 0) { + if ((pos->mask & String::kMaxOneByteCharCode) != 0) { found_useful_op = true; } mask_ |= (pos->mask & char_mask) << char_shift; @@ -2565,7 +2502,7 @@ bool RegExpNode::EmitQuickCheck(RegExpCompiler* compiler, // load so the value is already masked down. uint32_t char_mask; if (compiler->ascii()) { - char_mask = String::kMaxAsciiCharCode; + char_mask = String::kMaxOneByteCharCode; } else { char_mask = String::kMaxUtf16CodeUnit; } @@ -2575,7 +2512,7 @@ bool RegExpNode::EmitQuickCheck(RegExpCompiler* compiler, // For 2-character preloads in ASCII mode or 1-character preloads in // TWO_BYTE mode we also use a 16 bit load with zero extend. if (details->characters() == 2 && compiler->ascii()) { - if ((mask & 0x7f7f) == 0x7f7f) need_mask = false; + if ((mask & 0xffff) == 0xffff) need_mask = false; } else if (details->characters() == 1 && !compiler->ascii()) { if ((mask & 0xffff) == 0xffff) need_mask = false; } else { @@ -2617,7 +2554,7 @@ void TextNode::GetQuickCheckDetails(QuickCheckDetails* details, int characters = details->characters(); int char_mask; if (compiler->ascii()) { - char_mask = String::kMaxAsciiCharCode; + char_mask = String::kMaxOneByteCharCode; } else { char_mask = String::kMaxUtf16CodeUnit; } @@ -2834,24 +2771,41 @@ class VisitMarker { }; -RegExpNode* SeqRegExpNode::FilterASCII(int depth) { +RegExpNode* SeqRegExpNode::FilterASCII(int depth, bool ignore_case) { if (info()->replacement_calculated) return replacement(); if (depth < 0) return this; ASSERT(!info()->visited); VisitMarker marker(info()); - return FilterSuccessor(depth - 1); + return FilterSuccessor(depth - 1, ignore_case); } -RegExpNode* SeqRegExpNode::FilterSuccessor(int depth) { - RegExpNode* next = on_success_->FilterASCII(depth - 1); +RegExpNode* SeqRegExpNode::FilterSuccessor(int depth, bool ignore_case) { + RegExpNode* next = on_success_->FilterASCII(depth - 1, ignore_case); if (next == NULL) return set_replacement(NULL); on_success_ = next; return set_replacement(this); } -RegExpNode* TextNode::FilterASCII(int depth) { +// We need to check for the following characters: 0x39c 0x3bc 0x178. +static inline bool RangeContainsLatin1Equivalents(CharacterRange range) { + // TODO(dcarney): this could be a lot more efficient. + return range.Contains(0x39c) || + range.Contains(0x3bc) || range.Contains(0x178); +} + + +static bool RangesContainLatin1Equivalents(ZoneList<CharacterRange>* ranges) { + for (int i = 0; i < ranges->length(); i++) { + // TODO(dcarney): this could be a lot more efficient. + if (RangeContainsLatin1Equivalents(ranges->at(i))) return true; + } + return false; +} + + +RegExpNode* TextNode::FilterASCII(int depth, bool ignore_case) { if (info()->replacement_calculated) return replacement(); if (depth < 0) return this; ASSERT(!info()->visited); @@ -2862,12 +2816,17 @@ RegExpNode* TextNode::FilterASCII(int depth) { if (elm.type == TextElement::ATOM) { Vector<const uc16> quarks = elm.data.u_atom->data(); for (int j = 0; j < quarks.length(); j++) { - // We don't need special handling for case independence - // because of the rule that case independence cannot make - // a non-ASCII character match an ASCII character. - if (quarks[j] > String::kMaxAsciiCharCode) { - return set_replacement(NULL); - } + uint16_t c = quarks[j]; + if (c <= String::kMaxOneByteCharCode) continue; + if (!ignore_case) return set_replacement(NULL); + // Here, we need to check for characters whose upper and lower cases + // are outside the Latin-1 range. + uint16_t converted = unibrow::Latin1::ConvertNonLatin1ToLatin1(c); + // Character is outside Latin-1 completely + if (converted == 0) return set_replacement(NULL); + // Convert quark to Latin-1 in place. + uint16_t* copy = const_cast<uint16_t*>(quarks.start()); + copy[j] = converted; } } else { ASSERT(elm.type == TextElement::CHAR_CLASS); @@ -2881,39 +2840,44 @@ RegExpNode* TextNode::FilterASCII(int depth) { if (cc->is_negated()) { if (range_count != 0 && ranges->at(0).from() == 0 && - ranges->at(0).to() >= String::kMaxAsciiCharCode) { + ranges->at(0).to() >= String::kMaxOneByteCharCode) { + // This will be handled in a later filter. + if (ignore_case && RangesContainLatin1Equivalents(ranges)) continue; return set_replacement(NULL); } } else { if (range_count == 0 || - ranges->at(0).from() > String::kMaxAsciiCharCode) { + ranges->at(0).from() > String::kMaxOneByteCharCode) { + // This will be handled in a later filter. + if (ignore_case && RangesContainLatin1Equivalents(ranges)) continue; return set_replacement(NULL); } } } } - return FilterSuccessor(depth - 1); + return FilterSuccessor(depth - 1, ignore_case); } -RegExpNode* LoopChoiceNode::FilterASCII(int depth) { +RegExpNode* LoopChoiceNode::FilterASCII(int depth, bool ignore_case) { if (info()->replacement_calculated) return replacement(); if (depth < 0) return this; if (info()->visited) return this; { VisitMarker marker(info()); - RegExpNode* continue_replacement = continue_node_->FilterASCII(depth - 1); + RegExpNode* continue_replacement = + continue_node_->FilterASCII(depth - 1, ignore_case); // If we can't continue after the loop then there is no sense in doing the // loop. if (continue_replacement == NULL) return set_replacement(NULL); } - return ChoiceNode::FilterASCII(depth - 1); + return ChoiceNode::FilterASCII(depth - 1, ignore_case); } -RegExpNode* ChoiceNode::FilterASCII(int depth) { +RegExpNode* ChoiceNode::FilterASCII(int depth, bool ignore_case) { if (info()->replacement_calculated) return replacement(); if (depth < 0) return this; if (info()->visited) return this; @@ -2932,7 +2896,8 @@ RegExpNode* ChoiceNode::FilterASCII(int depth) { RegExpNode* survivor = NULL; for (int i = 0; i < choice_count; i++) { GuardedAlternative alternative = alternatives_->at(i); - RegExpNode* replacement = alternative.node()->FilterASCII(depth - 1); + RegExpNode* replacement = + alternative.node()->FilterASCII(depth - 1, ignore_case); ASSERT(replacement != this); // No missing EMPTY_MATCH_CHECK. if (replacement != NULL) { alternatives_->at(i).set_node(replacement); @@ -2952,7 +2917,7 @@ RegExpNode* ChoiceNode::FilterASCII(int depth) { new(zone()) ZoneList<GuardedAlternative>(surviving, zone()); for (int i = 0; i < choice_count; i++) { RegExpNode* replacement = - alternatives_->at(i).node()->FilterASCII(depth - 1); + alternatives_->at(i).node()->FilterASCII(depth - 1, ignore_case); if (replacement != NULL) { alternatives_->at(i).set_node(replacement); new_alternatives->Add(alternatives_->at(i), zone()); @@ -2963,7 +2928,8 @@ RegExpNode* ChoiceNode::FilterASCII(int depth) { } -RegExpNode* NegativeLookaheadChoiceNode::FilterASCII(int depth) { +RegExpNode* NegativeLookaheadChoiceNode::FilterASCII(int depth, + bool ignore_case) { if (info()->replacement_calculated) return replacement(); if (depth < 0) return this; if (info()->visited) return this; @@ -2971,12 +2937,12 @@ RegExpNode* NegativeLookaheadChoiceNode::FilterASCII(int depth) { // Alternative 0 is the negative lookahead, alternative 1 is what comes // afterwards. RegExpNode* node = alternatives_->at(1).node(); - RegExpNode* replacement = node->FilterASCII(depth - 1); + RegExpNode* replacement = node->FilterASCII(depth - 1, ignore_case); if (replacement == NULL) return set_replacement(NULL); alternatives_->at(1).set_node(replacement); RegExpNode* neg_node = alternatives_->at(0).node(); - RegExpNode* neg_replacement = neg_node->FilterASCII(depth - 1); + RegExpNode* neg_replacement = neg_node->FilterASCII(depth - 1, ignore_case); // If the negative lookahead is always going to fail then // we don't need to check it. if (neg_replacement == NULL) return set_replacement(replacement); @@ -2999,19 +2965,15 @@ void LoopChoiceNode::GetQuickCheckDetails(QuickCheckDetails* details, void LoopChoiceNode::FillInBMInfo(int offset, - int recursion_depth, int budget, BoyerMooreLookahead* bm, bool not_at_start) { - if (body_can_be_zero_length_ || - recursion_depth > RegExpCompiler::kMaxRecursion || - budget <= 0) { + if (body_can_be_zero_length_ || budget <= 0) { bm->SetRest(offset); SaveBMInfo(bm, not_at_start, offset); return; } - ChoiceNode::FillInBMInfo( - offset, recursion_depth + 1, budget - 1, bm, not_at_start); + ChoiceNode::FillInBMInfo(offset, budget - 1, bm, not_at_start); SaveBMInfo(bm, not_at_start, offset); } @@ -3108,12 +3070,13 @@ void AssertionNode::EmitBoundaryCheck(RegExpCompiler* compiler, Trace* trace) { BoyerMooreLookahead* lookahead = bm_info(not_at_start); if (lookahead == NULL) { int eats_at_least = - Min(kMaxLookaheadForBoyerMoore, - EatsAtLeast(kMaxLookaheadForBoyerMoore, 0, not_at_start)); + Min(kMaxLookaheadForBoyerMoore, EatsAtLeast(kMaxLookaheadForBoyerMoore, + kRecursionBudget, + not_at_start)); if (eats_at_least >= 1) { BoyerMooreLookahead* bm = new(zone()) BoyerMooreLookahead(eats_at_least, compiler, zone()); - FillInBMInfo(0, 0, kFillInBMBudget, bm, not_at_start); + FillInBMInfo(0, kRecursionBudget, bm, not_at_start); if (bm->at(0)->is_non_word()) next_is_word_character = Trace::FALSE; if (bm->at(0)->is_word()) next_is_word_character = Trace::TRUE; } @@ -3299,7 +3262,7 @@ void TextNode::TextEmitPass(RegExpCompiler* compiler, switch (pass) { case NON_ASCII_MATCH: ASSERT(ascii); - if (quarks[j] > String::kMaxAsciiCharCode) { + if (quarks[j] > String::kMaxOneByteCharCode) { assembler->GoTo(backtrack); return; } @@ -3498,7 +3461,7 @@ RegExpNode* TextNode::GetSuccessorOfOmnivorousTextNode( if (ranges->length() != 1) return NULL; uint32_t max_char; if (compiler->ascii()) { - max_char = String::kMaxAsciiCharCode; + max_char = String::kMaxOneByteCharCode; } else { max_char = String::kMaxUtf16CodeUnit; } @@ -3698,7 +3661,7 @@ BoyerMooreLookahead::BoyerMooreLookahead( : length_(length), compiler_(compiler) { if (compiler->ascii()) { - max_char_ = String::kMaxAsciiCharCode; + max_char_ = String::kMaxOneByteCharCode; } else { max_char_ = String::kMaxUtf16CodeUnit; } @@ -4045,16 +4008,17 @@ void ChoiceNode::Emit(RegExpCompiler* compiler, Trace* trace) { ASSERT(trace->is_trivial()); // This is the case on LoopChoiceNodes. BoyerMooreLookahead* lookahead = bm_info(not_at_start); if (lookahead == NULL) { - eats_at_least = - Min(kMaxLookaheadForBoyerMoore, - EatsAtLeast(kMaxLookaheadForBoyerMoore, 0, not_at_start)); + eats_at_least = Min(kMaxLookaheadForBoyerMoore, + EatsAtLeast(kMaxLookaheadForBoyerMoore, + kRecursionBudget, + not_at_start)); if (eats_at_least >= 1) { BoyerMooreLookahead* bm = new(zone()) BoyerMooreLookahead(eats_at_least, compiler, zone()); GuardedAlternative alt0 = alternatives_->at(0); - alt0.node()->FillInBMInfo(0, 0, kFillInBMBudget, bm, not_at_start); + alt0.node()->FillInBMInfo(0, kRecursionBudget, bm, not_at_start); skip_was_emitted = bm->EmitSkipInstructions(macro_assembler); } } else { @@ -4066,7 +4030,8 @@ void ChoiceNode::Emit(RegExpCompiler* compiler, Trace* trace) { if (eats_at_least == kEatsAtLeastNotYetInitialized) { // Save some time by looking at most one machine word ahead. - eats_at_least = EatsAtLeast(compiler->ascii() ? 4 : 2, 0, not_at_start); + eats_at_least = + EatsAtLeast(compiler->ascii() ? 4 : 2, kRecursionBudget, not_at_start); } int preload_characters = CalculatePreloadCharacters(compiler, eats_at_least); @@ -5336,9 +5301,9 @@ void CharacterRange::AddCaseEquivalents(ZoneList<CharacterRange>* ranges, Isolate* isolate = Isolate::Current(); uc16 bottom = from(); uc16 top = to(); - if (is_ascii) { - if (bottom > String::kMaxAsciiCharCode) return; - if (top > String::kMaxAsciiCharCode) top = String::kMaxAsciiCharCode; + if (is_ascii && !RangeContainsLatin1Equivalents(*this)) { + if (bottom > String::kMaxOneByteCharCode) return; + if (top > String::kMaxOneByteCharCode) top = String::kMaxOneByteCharCode; } unibrow::uchar chars[unibrow::Ecma262UnCanonicalize::kMaxWidth]; if (top == bottom) { @@ -5822,7 +5787,6 @@ void Analysis::VisitAssertion(AssertionNode* that) { void BackReferenceNode::FillInBMInfo(int offset, - int recursion_depth, int budget, BoyerMooreLookahead* bm, bool not_at_start) { @@ -5838,7 +5802,6 @@ STATIC_ASSERT(BoyerMoorePositionInfo::kMapSize == void ChoiceNode::FillInBMInfo(int offset, - int recursion_depth, int budget, BoyerMooreLookahead* bm, bool not_at_start) { @@ -5851,15 +5814,13 @@ void ChoiceNode::FillInBMInfo(int offset, SaveBMInfo(bm, not_at_start, offset); return; } - alt.node()->FillInBMInfo( - offset, recursion_depth + 1, budget, bm, not_at_start); + alt.node()->FillInBMInfo(offset, budget, bm, not_at_start); } SaveBMInfo(bm, not_at_start, offset); } void TextNode::FillInBMInfo(int initial_offset, - int recursion_depth, int budget, BoyerMooreLookahead* bm, bool not_at_start) { @@ -5885,7 +5846,7 @@ void TextNode::FillInBMInfo(int initial_offset, int length = GetCaseIndependentLetters( ISOLATE, character, - bm->max_char() == String::kMaxAsciiCharCode, + bm->max_char() == String::kMaxOneByteCharCode, chars); for (int j = 0; j < length; j++) { bm->Set(offset, chars[j]); @@ -5916,7 +5877,6 @@ void TextNode::FillInBMInfo(int initial_offset, return; } on_success()->FillInBMInfo(offset, - recursion_depth + 1, budget - 1, bm, true); // Not at start after a text node. @@ -6099,10 +6059,12 @@ RegExpEngine::CompilationResult RegExpEngine::Compile( } } if (is_ascii) { - node = node->FilterASCII(RegExpCompiler::kMaxRecursion); + node = node->FilterASCII(RegExpCompiler::kMaxRecursion, ignore_case); // Do it again to propagate the new nodes to places where they were not // put because they had not been calculated yet. - if (node != NULL) node = node->FilterASCII(RegExpCompiler::kMaxRecursion); + if (node != NULL) { + node = node->FilterASCII(RegExpCompiler::kMaxRecursion, ignore_case); + } } if (node == NULL) node = new(zone) EndNode(EndNode::BACKTRACK, zone); diff --git a/deps/v8/src/jsregexp.h b/deps/v8/src/jsregexp.h index 96825cef21..625f1925e3 100644 --- a/deps/v8/src/jsregexp.h +++ b/deps/v8/src/jsregexp.h @@ -153,17 +153,17 @@ class RegExpImpl { bool is_global, Isolate* isolate); - ~GlobalCache(); + INLINE(~GlobalCache()); // Fetch the next entry in the cache for global regexp match results. // This does not set the last match info. Upon failure, NULL is returned. // The cause can be checked with Result(). The previous // result is still in available in memory when a failure happens. - int32_t* FetchNext(); + INLINE(int32_t* FetchNext()); - int32_t* LastSuccessfulMatch(); + INLINE(int32_t* LastSuccessfulMatch()); - inline bool HasException() { return num_matches_ < 0; } + INLINE(bool HasException()) { return num_matches_ < 0; } private: int num_matches_; @@ -582,9 +582,7 @@ class RegExpNode: public ZoneObject { // used to indicate that we know we are not at the start of the input. In // this case anchored branches will always fail and can be ignored when // determining how many characters are consumed on success. - virtual int EatsAtLeast(int still_to_find, - int recursion_depth, - bool not_at_start) = 0; + virtual int EatsAtLeast(int still_to_find, int budget, bool not_at_start) = 0; // Emits some quick code that checks whether the preloaded characters match. // Falls through on certain failure, jumps to the label on possible success. // If the node cannot make a quick check it does nothing and returns false. @@ -616,9 +614,8 @@ class RegExpNode: public ZoneObject { // implementation. TODO(erikcorry): This should share more code with // EatsAtLeast, GetQuickCheckDetails. The budget argument is used to limit // the number of nodes we are willing to look at in order to create this data. - static const int kFillInBMBudget = 200; + static const int kRecursionBudget = 200; virtual void FillInBMInfo(int offset, - int recursion_depth, int budget, BoyerMooreLookahead* bm, bool not_at_start) { @@ -628,7 +625,7 @@ class RegExpNode: public ZoneObject { // If we know that the input is ASCII then there are some nodes that can // never match. This method returns a node that can be substituted for // itself, or NULL if the node can never match. - virtual RegExpNode* FilterASCII(int depth) { return this; } + virtual RegExpNode* FilterASCII(int depth, bool ignore_case) { return this; } // Helper for FilterASCII. RegExpNode* replacement() { ASSERT(info()->replacement_calculated); @@ -723,19 +720,17 @@ class SeqRegExpNode: public RegExpNode { : RegExpNode(on_success->zone()), on_success_(on_success) { } RegExpNode* on_success() { return on_success_; } void set_on_success(RegExpNode* node) { on_success_ = node; } - virtual RegExpNode* FilterASCII(int depth); + virtual RegExpNode* FilterASCII(int depth, bool ignore_case); virtual void FillInBMInfo(int offset, - int recursion_depth, int budget, BoyerMooreLookahead* bm, bool not_at_start) { - on_success_->FillInBMInfo( - offset, recursion_depth + 1, budget - 1, bm, not_at_start); + on_success_->FillInBMInfo(offset, budget - 1, bm, not_at_start); if (offset == 0) set_bm_info(not_at_start, bm); } protected: - RegExpNode* FilterSuccessor(int depth); + RegExpNode* FilterSuccessor(int depth, bool ignore_case); private: RegExpNode* on_success_; @@ -773,9 +768,7 @@ class ActionNode: public SeqRegExpNode { RegExpNode* on_success); virtual void Accept(NodeVisitor* visitor); virtual void Emit(RegExpCompiler* compiler, Trace* trace); - virtual int EatsAtLeast(int still_to_find, - int recursion_depth, - bool not_at_start); + virtual int EatsAtLeast(int still_to_find, int budget, bool not_at_start); virtual void GetQuickCheckDetails(QuickCheckDetails* details, RegExpCompiler* compiler, int filled_in, @@ -784,7 +777,6 @@ class ActionNode: public SeqRegExpNode { details, compiler, filled_in, not_at_start); } virtual void FillInBMInfo(int offset, - int recursion_depth, int budget, BoyerMooreLookahead* bm, bool not_at_start); @@ -843,9 +835,7 @@ class TextNode: public SeqRegExpNode { } virtual void Accept(NodeVisitor* visitor); virtual void Emit(RegExpCompiler* compiler, Trace* trace); - virtual int EatsAtLeast(int still_to_find, - int recursion_depth, - bool not_at_start); + virtual int EatsAtLeast(int still_to_find, int budget, bool not_at_start); virtual void GetQuickCheckDetails(QuickCheckDetails* details, RegExpCompiler* compiler, int characters_filled_in, @@ -856,12 +846,11 @@ class TextNode: public SeqRegExpNode { virtual RegExpNode* GetSuccessorOfOmnivorousTextNode( RegExpCompiler* compiler); virtual void FillInBMInfo(int offset, - int recursion_depth, int budget, BoyerMooreLookahead* bm, bool not_at_start); void CalculateOffsets(); - virtual RegExpNode* FilterASCII(int depth); + virtual RegExpNode* FilterASCII(int depth, bool ignore_case); private: enum TextEmitPassType { @@ -911,15 +900,12 @@ class AssertionNode: public SeqRegExpNode { } virtual void Accept(NodeVisitor* visitor); virtual void Emit(RegExpCompiler* compiler, Trace* trace); - virtual int EatsAtLeast(int still_to_find, - int recursion_depth, - bool not_at_start); + virtual int EatsAtLeast(int still_to_find, int budget, bool not_at_start); virtual void GetQuickCheckDetails(QuickCheckDetails* details, RegExpCompiler* compiler, int filled_in, bool not_at_start); virtual void FillInBMInfo(int offset, - int recursion_depth, int budget, BoyerMooreLookahead* bm, bool not_at_start); @@ -960,7 +946,6 @@ class BackReferenceNode: public SeqRegExpNode { return; } virtual void FillInBMInfo(int offset, - int recursion_depth, int budget, BoyerMooreLookahead* bm, bool not_at_start); @@ -989,7 +974,6 @@ class EndNode: public RegExpNode { UNREACHABLE(); } virtual void FillInBMInfo(int offset, - int recursion_depth, int budget, BoyerMooreLookahead* bm, bool not_at_start) { @@ -1075,11 +1059,9 @@ class ChoiceNode: public RegExpNode { ZoneList<GuardedAlternative>* alternatives() { return alternatives_; } DispatchTable* GetTable(bool ignore_case); virtual void Emit(RegExpCompiler* compiler, Trace* trace); - virtual int EatsAtLeast(int still_to_find, - int recursion_depth, - bool not_at_start); + virtual int EatsAtLeast(int still_to_find, int budget, bool not_at_start); int EatsAtLeastHelper(int still_to_find, - int recursion_depth, + int budget, RegExpNode* ignore_this_node, bool not_at_start); virtual void GetQuickCheckDetails(QuickCheckDetails* details, @@ -1087,7 +1069,6 @@ class ChoiceNode: public RegExpNode { int characters_filled_in, bool not_at_start); virtual void FillInBMInfo(int offset, - int recursion_depth, int budget, BoyerMooreLookahead* bm, bool not_at_start); @@ -1097,7 +1078,7 @@ class ChoiceNode: public RegExpNode { void set_not_at_start() { not_at_start_ = true; } void set_being_calculated(bool b) { being_calculated_ = b; } virtual bool try_to_emit_quick_check_for_alternative(int i) { return true; } - virtual RegExpNode* FilterASCII(int depth); + virtual RegExpNode* FilterASCII(int depth, bool ignore_case); protected: int GreedyLoopTextLengthForAlternative(GuardedAlternative* alternative); @@ -1133,20 +1114,17 @@ class NegativeLookaheadChoiceNode: public ChoiceNode { AddAlternative(this_must_fail); AddAlternative(then_do_this); } - virtual int EatsAtLeast(int still_to_find, - int recursion_depth, - bool not_at_start); + virtual int EatsAtLeast(int still_to_find, int budget, bool not_at_start); virtual void GetQuickCheckDetails(QuickCheckDetails* details, RegExpCompiler* compiler, int characters_filled_in, bool not_at_start); virtual void FillInBMInfo(int offset, - int recursion_depth, int budget, BoyerMooreLookahead* bm, bool not_at_start) { alternatives_->at(1).node()->FillInBMInfo( - offset, recursion_depth + 1, budget - 1, bm, not_at_start); + offset, budget - 1, bm, not_at_start); if (offset == 0) set_bm_info(not_at_start, bm); } // For a negative lookahead we don't emit the quick check for the @@ -1155,7 +1133,7 @@ class NegativeLookaheadChoiceNode: public ChoiceNode { // characters, but on a negative lookahead the negative branch did not take // part in that calculation (EatsAtLeast) so the assumptions don't hold. virtual bool try_to_emit_quick_check_for_alternative(int i) { return i != 0; } - virtual RegExpNode* FilterASCII(int depth); + virtual RegExpNode* FilterASCII(int depth, bool ignore_case); }; @@ -1169,15 +1147,12 @@ class LoopChoiceNode: public ChoiceNode { void AddLoopAlternative(GuardedAlternative alt); void AddContinueAlternative(GuardedAlternative alt); virtual void Emit(RegExpCompiler* compiler, Trace* trace); - virtual int EatsAtLeast(int still_to_find, - int recursion_depth, - bool not_at_start); + virtual int EatsAtLeast(int still_to_find, int budget, bool not_at_start); virtual void GetQuickCheckDetails(QuickCheckDetails* details, RegExpCompiler* compiler, int characters_filled_in, bool not_at_start); virtual void FillInBMInfo(int offset, - int recursion_depth, int budget, BoyerMooreLookahead* bm, bool not_at_start); @@ -1185,7 +1160,7 @@ class LoopChoiceNode: public ChoiceNode { RegExpNode* continue_node() { return continue_node_; } bool body_can_be_zero_length() { return body_can_be_zero_length_; } virtual void Accept(NodeVisitor* visitor); - virtual RegExpNode* FilterASCII(int depth); + virtual RegExpNode* FilterASCII(int depth, bool ignore_case); private: // AddAlternative is made private for loop nodes because alternatives diff --git a/deps/v8/src/list-inl.h b/deps/v8/src/list-inl.h index 60a033df04..7a84313cd6 100644 --- a/deps/v8/src/list-inl.h +++ b/deps/v8/src/list-inl.h @@ -85,8 +85,9 @@ void List<T, P>::ResizeAddInternal(const T& element, P alloc) { template<typename T, class P> void List<T, P>::Resize(int new_capacity, P alloc) { + ASSERT_LE(length_, new_capacity); T* new_data = NewData(new_capacity, alloc); - memcpy(new_data, data_, capacity_ * sizeof(T)); + memcpy(new_data, data_, length_ * sizeof(T)); List<T, P>::DeleteData(data_); data_ = new_data; capacity_ = new_capacity; @@ -162,6 +163,14 @@ void List<T, P>::Rewind(int pos) { template<typename T, class P> +void List<T, P>::Trim(P alloc) { + if (length_ < capacity_ / 4) { + Resize(capacity_ / 2, alloc); + } +} + + +template<typename T, class P> void List<T, P>::Iterate(void (*callback)(T* x)) { for (int i = 0; i < length_; i++) callback(&data_[i]); } diff --git a/deps/v8/src/list.h b/deps/v8/src/list.h index 7fd4f5cd2d..43d982f687 100644 --- a/deps/v8/src/list.h +++ b/deps/v8/src/list.h @@ -149,6 +149,9 @@ class List { // Drop the last 'count' elements from the list. INLINE(void RewindBy(int count)) { Rewind(length_ - count); } + // Halve the capacity if fill level is less than a quarter. + INLINE(void Trim(AllocationPolicy allocator = AllocationPolicy())); + bool Contains(const T& elm) const; int CountOccurrences(const T& elm, int start, int end) const; diff --git a/deps/v8/src/lithium-allocator-inl.h b/deps/v8/src/lithium-allocator-inl.h index 8f660ce0e0..a6d053aa72 100644 --- a/deps/v8/src/lithium-allocator-inl.h +++ b/deps/v8/src/lithium-allocator-inl.h @@ -99,6 +99,7 @@ bool InputIterator::Done() { return current_ >= limit_; } LOperand* InputIterator::Current() { ASSERT(!Done()); + ASSERT(instr_->InputAt(current_) != NULL); return instr_->InputAt(current_); } @@ -110,7 +111,9 @@ void InputIterator::Advance() { void InputIterator::SkipUninteresting() { - while (current_ < limit_ && instr_->InputAt(current_)->IsConstantOperand()) { + while (current_ < limit_) { + LOperand* current = instr_->InputAt(current_); + if (current != NULL && !current->IsConstantOperand()) break; ++current_; } } @@ -127,9 +130,11 @@ bool UseIterator::Done() { LOperand* UseIterator::Current() { ASSERT(!Done()); - return input_iterator_.Done() + LOperand* result = input_iterator_.Done() ? env_iterator_.Current() : input_iterator_.Current(); + ASSERT(result != NULL); + return result; } @@ -139,6 +144,21 @@ void UseIterator::Advance() { : input_iterator_.Advance(); } + +void LAllocator::SetLiveRangeAssignedRegister( + LiveRange* range, + int reg, + RegisterKind register_kind, + Zone* zone) { + if (register_kind == DOUBLE_REGISTERS) { + assigned_double_registers_->Add(reg); + } else { + assigned_registers_->Add(reg); + } + range->set_assigned_register(reg, register_kind, zone); +} + + } } // namespace v8::internal #endif // V8_LITHIUM_ALLOCATOR_INL_H_ diff --git a/deps/v8/src/lithium-allocator.cc b/deps/v8/src/lithium-allocator.cc index 91a98112b6..7049a58fdf 100644 --- a/deps/v8/src/lithium-allocator.cc +++ b/deps/v8/src/lithium-allocator.cc @@ -196,6 +196,18 @@ UsePosition* LiveRange::NextUsePositionRegisterIsBeneficial( } +UsePosition* LiveRange::PreviousUsePositionRegisterIsBeneficial( + LifetimePosition start) { + UsePosition* pos = first_pos(); + UsePosition* prev = NULL; + while (pos != NULL && pos->pos().Value() < start.Value()) { + if (pos->RegisterIsBeneficial()) prev = pos; + pos = pos->next(); + } + return prev; +} + + UsePosition* LiveRange::NextRegisterPosition(LifetimePosition start) { UsePosition* pos = NextUsePosition(start); while (pos != NULL && !pos->RequiresRegister()) { @@ -206,9 +218,6 @@ UsePosition* LiveRange::NextRegisterPosition(LifetimePosition start) { bool LiveRange::CanBeSpilled(LifetimePosition pos) { - // TODO(kmillikin): Comment. Now. - if (pos.Value() <= Start().Value() && HasRegisterAssigned()) return false; - // We cannot spill a live range that has a use requiring a register // at the current or the immediate next position. UsePosition* use_pos = NextRegisterPosition(pos); @@ -606,7 +615,7 @@ void LAllocator::AddInitialIntervals(HBasicBlock* block, int LAllocator::FixedDoubleLiveRangeID(int index) { - return -index - 1 - Register::kNumAllocatableRegisters; + return -index - 1 - Register::kMaxNumAllocatableRegisters; } @@ -638,12 +647,12 @@ LOperand* LAllocator::AllocateFixed(LUnallocated* operand, LiveRange* LAllocator::FixedLiveRangeFor(int index) { - ASSERT(index < Register::kNumAllocatableRegisters); + ASSERT(index < Register::kMaxNumAllocatableRegisters); LiveRange* result = fixed_live_ranges_[index]; if (result == NULL) { result = new(zone_) LiveRange(FixedLiveRangeID(index), zone_); ASSERT(result->IsFixed()); - result->set_assigned_register(index, GENERAL_REGISTERS, zone_); + SetLiveRangeAssignedRegister(result, index, GENERAL_REGISTERS, zone_); fixed_live_ranges_[index] = result; } return result; @@ -651,12 +660,12 @@ LiveRange* LAllocator::FixedLiveRangeFor(int index) { LiveRange* LAllocator::FixedDoubleLiveRangeFor(int index) { - ASSERT(index < DoubleRegister::kNumAllocatableRegisters); + ASSERT(index < DoubleRegister::NumAllocatableRegisters()); LiveRange* result = fixed_double_live_ranges_[index]; if (result == NULL) { result = new(zone_) LiveRange(FixedDoubleLiveRangeID(index), zone_); ASSERT(result->IsFixed()); - result->set_assigned_register(index, DOUBLE_REGISTERS, zone_); + SetLiveRangeAssignedRegister(result, index, DOUBLE_REGISTERS, zone_); fixed_double_live_ranges_[index] = result; } return result; @@ -768,6 +777,7 @@ void LAllocator::AddConstraintsGapMove(int index, void LAllocator::MeetRegisterConstraints(HBasicBlock* block) { int start = block->first_instruction_index(); int end = block->last_instruction_index(); + if (start == -1) return; for (int i = start; i <= end; ++i) { if (IsGapAt(i)) { LInstruction* instr = NULL; @@ -841,8 +851,9 @@ void LAllocator::MeetConstraintsBetween(LInstruction* first, ASSERT(!cur_input->IsUsedAtStart()); LUnallocated* input_copy = cur_input->CopyUnconstrained(zone()); - cur_input->set_virtual_register(GetVirtualRegister()); + int vreg = GetVirtualRegister(); if (!AllocationOk()) return; + cur_input->set_virtual_register(vreg); if (RequiredRegisterKind(input_copy->virtual_register()) == DOUBLE_REGISTERS) { @@ -946,8 +957,8 @@ void LAllocator::ProcessInstructions(HBasicBlock* block, BitVector* live) { Define(curr_position, output, NULL); } - if (instr->IsMarkedAsCall()) { - for (int i = 0; i < Register::kNumAllocatableRegisters; ++i) { + if (instr->ClobbersRegisters()) { + for (int i = 0; i < Register::kMaxNumAllocatableRegisters; ++i) { if (output == NULL || !output->IsRegister() || output->index() != i) { LiveRange* range = FixedLiveRangeFor(i); @@ -958,8 +969,8 @@ void LAllocator::ProcessInstructions(HBasicBlock* block, BitVector* live) { } } - if (instr->IsMarkedAsCall()) { - for (int i = 0; i < DoubleRegister::kNumAllocatableRegisters; ++i) { + if (instr->ClobbersDoubleRegisters()) { + for (int i = 0; i < DoubleRegister::NumAllocatableRegisters(); ++i) { if (output == NULL || !output->IsDoubleRegister() || output->index() != i) { LiveRange* range = FixedDoubleLiveRangeFor(i); @@ -989,7 +1000,7 @@ void LAllocator::ProcessInstructions(HBasicBlock* block, BitVector* live) { for (TempIterator it(instr); !it.Done(); it.Advance()) { LOperand* temp = it.Current(); - if (instr->IsMarkedAsCall()) { + if (instr->ClobbersTemps()) { if (temp->IsRegister()) continue; if (temp->IsUnallocated()) { LUnallocated* temp_unalloc = LUnallocated::cast(temp); @@ -1065,6 +1076,13 @@ void LAllocator::ResolvePhis(HBasicBlock* block) { bool LAllocator::Allocate(LChunk* chunk) { ASSERT(chunk_ == NULL); chunk_ = static_cast<LPlatformChunk*>(chunk); + assigned_registers_ = + new(zone()) BitVector(Register::NumAllocatableRegisters(), zone()); + assigned_registers_->Clear(); + assigned_double_registers_ = + new(zone()) BitVector(DoubleRegister::NumAllocatableRegisters(), + zone()); + assigned_double_registers_->Clear(); MeetRegisterConstraints(); if (!AllocationOk()) return false; ResolvePhis(); @@ -1324,8 +1342,14 @@ void LAllocator::BuildLiveRanges() { while (!iterator.Done()) { found = true; int operand_index = iterator.Current(); - PrintF("Function: %s\n", - *chunk_->info()->function()->debug_name()->ToCString()); + if (chunk_->info()->IsStub()) { + CodeStub::Major major_key = chunk_->info()->code_stub()->MajorKey(); + PrintF("Function: %s\n", CodeStub::MajorName(major_key, false)); + } else { + ASSERT(chunk_->info()->IsOptimizing()); + PrintF("Function: %s\n", + *chunk_->info()->function()->debug_name()->ToCString()); + } PrintF("Value %d used before first definition!\n", operand_index); LiveRange* range = LiveRangeFor(operand_index); PrintF("First use is at %d\n", range->first_pos()->pos().Value()); @@ -1411,7 +1435,7 @@ void LAllocator::PopulatePointerMaps() { LifetimePosition safe_point_pos = LifetimePosition::FromInstructionIndex(safe_point); LiveRange* cur = range; - while (cur != NULL && !cur->Covers(safe_point_pos.PrevInstruction())) { + while (cur != NULL && !cur->Covers(safe_point_pos)) { cur = cur->next(); } if (cur == NULL) continue; @@ -1471,14 +1495,14 @@ void LAllocator::ProcessOsrEntry() { void LAllocator::AllocateGeneralRegisters() { HPhase phase("L_Allocate general registers", this); - num_registers_ = Register::kNumAllocatableRegisters; + num_registers_ = Register::NumAllocatableRegisters(); AllocateRegisters(); } void LAllocator::AllocateDoubleRegisters() { HPhase phase("L_Allocate double registers", this); - num_registers_ = DoubleRegister::kNumAllocatableRegisters; + num_registers_ = DoubleRegister::NumAllocatableRegisters(); mode_ = DOUBLE_REGISTERS; AllocateRegisters(); } @@ -1502,7 +1526,7 @@ void LAllocator::AllocateRegisters() { ASSERT(inactive_live_ranges_.is_empty()); if (mode_ == DOUBLE_REGISTERS) { - for (int i = 0; i < fixed_double_live_ranges_.length(); ++i) { + for (int i = 0; i < DoubleRegister::NumAllocatableRegisters(); ++i) { LiveRange* current = fixed_double_live_ranges_.at(i); if (current != NULL) { AddToInactive(current); @@ -1757,14 +1781,14 @@ void LAllocator::InactiveToActive(LiveRange* range) { // TryAllocateFreeReg and AllocateBlockedReg assume this // when allocating local arrays. -STATIC_ASSERT(DoubleRegister::kNumAllocatableRegisters >= - Register::kNumAllocatableRegisters); +STATIC_ASSERT(DoubleRegister::kMaxNumAllocatableRegisters >= + Register::kMaxNumAllocatableRegisters); bool LAllocator::TryAllocateFreeReg(LiveRange* current) { - LifetimePosition free_until_pos[DoubleRegister::kNumAllocatableRegisters]; + LifetimePosition free_until_pos[DoubleRegister::kMaxNumAllocatableRegisters]; - for (int i = 0; i < DoubleRegister::kNumAllocatableRegisters; i++) { + for (int i = 0; i < DoubleRegister::kMaxNumAllocatableRegisters; i++) { free_until_pos[i] = LifetimePosition::MaxPosition(); } @@ -1801,7 +1825,7 @@ bool LAllocator::TryAllocateFreeReg(LiveRange* current) { TraceAlloc("Assigning preferred reg %s to live range %d\n", RegisterName(register_index), current->id()); - current->set_assigned_register(register_index, mode_, zone_); + SetLiveRangeAssignedRegister(current, register_index, mode_, zone_); return true; } } @@ -1837,7 +1861,7 @@ bool LAllocator::TryAllocateFreeReg(LiveRange* current) { TraceAlloc("Assigning free reg %s to live range %d\n", RegisterName(reg), current->id()); - current->set_assigned_register(reg, mode_, zone_); + SetLiveRangeAssignedRegister(current, reg, mode_, zone_); return true; } @@ -1853,10 +1877,10 @@ void LAllocator::AllocateBlockedReg(LiveRange* current) { } - LifetimePosition use_pos[DoubleRegister::kNumAllocatableRegisters]; - LifetimePosition block_pos[DoubleRegister::kNumAllocatableRegisters]; + LifetimePosition use_pos[DoubleRegister::kMaxNumAllocatableRegisters]; + LifetimePosition block_pos[DoubleRegister::kMaxNumAllocatableRegisters]; - for (int i = 0; i < DoubleRegister::kNumAllocatableRegisters; i++) { + for (int i = 0; i < DoubleRegister::NumAllocatableRegisters(); i++) { use_pos[i] = block_pos[i] = LifetimePosition::MaxPosition(); } @@ -1903,12 +1927,6 @@ void LAllocator::AllocateBlockedReg(LiveRange* current) { if (pos.Value() < register_use->pos().Value()) { // All registers are blocked before the first use that requires a register. // Spill starting part of live range up to that use. - // - // Corner case: the first use position is equal to the start of the range. - // In this case we have nothing to spill and SpillBetween will just return - // this range to the list of unhandled ones. This will lead to the infinite - // loop. - ASSERT(current->Start().Value() < register_use->pos().Value()); SpillBetween(current, current->Start(), register_use->pos()); return; } @@ -1919,6 +1937,7 @@ void LAllocator::AllocateBlockedReg(LiveRange* current) { LiveRange* tail = SplitBetween(current, current->Start(), block_pos[reg].InstructionStart()); + if (!AllocationOk()) return; AddToUnhandledSorted(tail); } @@ -1927,7 +1946,7 @@ void LAllocator::AllocateBlockedReg(LiveRange* current) { TraceAlloc("Assigning blocked reg %s to live range %d\n", RegisterName(reg), current->id()); - current->set_assigned_register(reg, mode_, zone_); + SetLiveRangeAssignedRegister(current, reg, mode_, zone_); // This register was not free. Thus we need to find and spill // parts of active and inactive live regions that use the same register @@ -1936,6 +1955,39 @@ void LAllocator::AllocateBlockedReg(LiveRange* current) { } +LifetimePosition LAllocator::FindOptimalSpillingPos(LiveRange* range, + LifetimePosition pos) { + HBasicBlock* block = GetBlock(pos.InstructionStart()); + HBasicBlock* loop_header = + block->IsLoopHeader() ? block : block->parent_loop_header(); + + if (loop_header == NULL) return pos; + + UsePosition* prev_use = + range->PreviousUsePositionRegisterIsBeneficial(pos); + + while (loop_header != NULL) { + // We are going to spill live range inside the loop. + // If possible try to move spilling position backwards to loop header. + // This will reduce number of memory moves on the back edge. + LifetimePosition loop_start = LifetimePosition::FromInstructionIndex( + loop_header->first_instruction_index()); + + if (range->Covers(loop_start)) { + if (prev_use == NULL || prev_use->pos().Value() < loop_start.Value()) { + // No register beneficial use inside the loop before the pos. + pos = loop_start; + } + } + + // Try hoisting out to an outer loop. + loop_header = loop_header->parent_loop_header(); + } + + return pos; +} + + void LAllocator::SplitAndSpillIntersecting(LiveRange* current) { ASSERT(current->HasRegisterAssigned()); int reg = current->assigned_register(); @@ -1944,11 +1996,13 @@ void LAllocator::SplitAndSpillIntersecting(LiveRange* current) { LiveRange* range = active_live_ranges_[i]; if (range->assigned_register() == reg) { UsePosition* next_pos = range->NextRegisterPosition(current->Start()); + LifetimePosition spill_pos = FindOptimalSpillingPos(range, split_pos); if (next_pos == NULL) { - SpillAfter(range, split_pos); + SpillAfter(range, spill_pos); } else { - SpillBetween(range, split_pos, next_pos->pos()); + SpillBetween(range, spill_pos, next_pos->pos()); } + if (!AllocationOk()) return; ActiveToHandled(range); --i; } @@ -1967,6 +2021,7 @@ void LAllocator::SplitAndSpillIntersecting(LiveRange* current) { next_intersection = Min(next_intersection, next_pos->pos()); SpillBetween(range, split_pos, next_intersection); } + if (!AllocationOk()) return; InactiveToHandled(range); --i; } @@ -1992,8 +2047,9 @@ LiveRange* LAllocator::SplitRangeAt(LiveRange* range, LifetimePosition pos) { ASSERT(pos.IsInstructionStart() || !chunk_->instructions()->at(pos.InstructionIndex())->IsControl()); - LiveRange* result = LiveRangeFor(GetVirtualRegister()); + int vreg = GetVirtualRegister(); if (!AllocationOk()) return NULL; + LiveRange* result = LiveRangeFor(vreg); range->SplitAt(pos, result, zone_); return result; } @@ -2058,7 +2114,7 @@ void LAllocator::SpillAfter(LiveRange* range, LifetimePosition pos) { void LAllocator::SpillBetween(LiveRange* range, LifetimePosition start, LifetimePosition end) { - ASSERT(start.Value() < end.Value()); + CHECK(start.Value() < end.Value()); LiveRange* second_part = SplitRangeAt(range, start); if (!AllocationOk()) return; @@ -2070,6 +2126,7 @@ void LAllocator::SpillBetween(LiveRange* range, second_part, second_part->Start().InstructionEnd(), end.PrevInstruction().InstructionEnd()); + if (!AllocationOk()) return; ASSERT(third_part != second_part); diff --git a/deps/v8/src/lithium-allocator.h b/deps/v8/src/lithium-allocator.h index 5b05263575..70f3182bef 100644 --- a/deps/v8/src/lithium-allocator.h +++ b/deps/v8/src/lithium-allocator.h @@ -311,6 +311,10 @@ class LiveRange: public ZoneObject { // Modifies internal state of live range! UsePosition* NextUsePositionRegisterIsBeneficial(LifetimePosition start); + // Returns use position for which register is beneficial in this live + // range and which precedes start. + UsePosition* PreviousUsePositionRegisterIsBeneficial(LifetimePosition start); + // Can this live range be spilled at this position. bool CanBeSpilled(LifetimePosition pos); @@ -399,40 +403,6 @@ class LiveRange: public ZoneObject { }; -class GrowableBitVector BASE_EMBEDDED { - public: - GrowableBitVector() : bits_(NULL) { } - - bool Contains(int value) const { - if (!InBitsRange(value)) return false; - return bits_->Contains(value); - } - - void Add(int value, Zone* zone) { - EnsureCapacity(value, zone); - bits_->Add(value); - } - - private: - static const int kInitialLength = 1024; - - bool InBitsRange(int value) const { - return bits_ != NULL && bits_->length() > value; - } - - void EnsureCapacity(int value, Zone* zone) { - if (InBitsRange(value)) return; - int new_length = bits_ == NULL ? kInitialLength : bits_->length(); - while (new_length <= value) new_length *= 2; - BitVector* new_bits = new(zone) BitVector(new_length, zone); - if (bits_ != NULL) new_bits->CopyFrom(*bits_); - bits_ = new_bits; - } - - BitVector* bits_; -}; - - class LAllocator BASE_EMBEDDED { public: LAllocator(int first_virtual_register, HGraph* graph); @@ -457,11 +427,14 @@ class LAllocator BASE_EMBEDDED { LPlatformChunk* chunk() const { return chunk_; } HGraph* graph() const { return graph_; } + Isolate* isolate() const { return graph_->isolate(); } Zone* zone() const { return zone_; } int GetVirtualRegister() { - if (next_virtual_register_ > LUnallocated::kMaxVirtualRegisters) { + if (next_virtual_register_ >= LUnallocated::kMaxVirtualRegisters) { allocation_ok_ = false; + // Maintain the invariant that we return something below the maximum. + return 0; } return next_virtual_register_++; } @@ -479,6 +452,13 @@ class LAllocator BASE_EMBEDDED { void Verify() const; #endif + BitVector* assigned_registers() { + return assigned_registers_; + } + BitVector* assigned_double_registers() { + return assigned_double_registers_; + } + private: void MeetRegisterConstraints(); void ResolvePhis(); @@ -563,6 +543,11 @@ class LAllocator BASE_EMBEDDED { void SplitAndSpillIntersecting(LiveRange* range); + // If we are trying to spill a range inside the loop try to + // hoist spill position out to the point just before the loop. + LifetimePosition FindOptimalSpillingPos(LiveRange* range, + LifetimePosition pos); + void Spill(LiveRange* range); bool IsBlockBoundary(LifetimePosition pos); @@ -571,6 +556,11 @@ class LAllocator BASE_EMBEDDED { HBasicBlock* block, HBasicBlock* pred); + inline void SetLiveRangeAssignedRegister(LiveRange* range, + int reg, + RegisterKind register_kind, + Zone* zone); + // Return parallel move that should be used to connect ranges split at the // given position. LParallelMove* GetConnectingParallelMove(LifetimePosition pos); @@ -608,9 +598,9 @@ class LAllocator BASE_EMBEDDED { ZoneList<LiveRange*> live_ranges_; // Lists of live ranges - EmbeddedVector<LiveRange*, Register::kNumAllocatableRegisters> + EmbeddedVector<LiveRange*, Register::kMaxNumAllocatableRegisters> fixed_live_ranges_; - EmbeddedVector<LiveRange*, DoubleRegister::kNumAllocatableRegisters> + EmbeddedVector<LiveRange*, DoubleRegister::kMaxNumAllocatableRegisters> fixed_double_live_ranges_; ZoneList<LiveRange*> unhandled_live_ranges_; ZoneList<LiveRange*> active_live_ranges_; @@ -625,6 +615,9 @@ class LAllocator BASE_EMBEDDED { RegisterKind mode_; int num_registers_; + BitVector* assigned_registers_; + BitVector* assigned_double_registers_; + HGraph* graph_; bool has_osr_entry_; diff --git a/deps/v8/src/lithium.cc b/deps/v8/src/lithium.cc index eb2198d854..09c0f4405f 100644 --- a/deps/v8/src/lithium.cc +++ b/deps/v8/src/lithium.cc @@ -174,6 +174,9 @@ void LParallelMove::PrintDataTo(StringStream* stream) const { void LEnvironment::PrintTo(StringStream* stream) { stream->Add("[id=%d|", ast_id().ToInt()); + if (deoptimization_index() != Safepoint::kNoDeoptimizationIndex) { + stream->Add("deopt_id=%d|", deoptimization_index()); + } stream->Add("[parameters=%d|", parameter_count()); stream->Add("[arguments_stack_height=%d|", arguments_stack_height()); for (int i = 0; i < values_.length(); ++i) { @@ -257,6 +260,28 @@ int ElementsKindToShiftSize(ElementsKind elements_kind) { } +int StackSlotOffset(int index) { + if (index >= 0) { + // Local or spill slot. Skip the frame pointer, function, and + // context in the fixed part of the frame. + return -(index + 3) * kPointerSize; + } else { + // Incoming parameter. Skip the return address. + return -(index - 1) * kPointerSize; + } +} + + +LChunk::LChunk(CompilationInfo* info, HGraph* graph) + : spill_slot_count_(0), + info_(info), + graph_(graph), + instructions_(32, graph->zone()), + pointer_maps_(8, graph->zone()), + inlined_closures_(1, graph->zone()) { +} + + LLabel* LChunk::GetLabel(int block_id) const { HBasicBlock* block = graph_->blocks()->at(block_id); int first_instruction = block->first_instruction_index(); @@ -391,7 +416,7 @@ Representation LChunk::LookupLiteralRepresentation( LChunk* LChunk::NewChunk(HGraph* graph) { - NoHandleAllocation no_handles; + NoHandleAllocation no_handles(graph->isolate()); AssertNoAllocation no_gc; int values = graph->GetMaximumValueID(); @@ -410,12 +435,18 @@ LChunk* LChunk::NewChunk(HGraph* graph) { return NULL; } + chunk->set_allocated_double_registers( + allocator.assigned_double_registers()); + return chunk; } -Handle<Code> LChunk::Codegen() { +Handle<Code> LChunk::Codegen(Code::Kind kind) { MacroAssembler assembler(info()->isolate(), NULL, 0); + LOG_CODE_EVENT(info()->isolate(), + CodeStartLinePosInfoRecordEvent( + assembler.positions_recorder())); LCodeGen generator(this, &assembler, info()); MarkEmptyBlocks(); @@ -425,10 +456,18 @@ Handle<Code> LChunk::Codegen() { PrintF("Crankshaft Compiler - "); } CodeGenerator::MakeCodePrologue(info()); - Code::Flags flags = Code::ComputeFlags(Code::OPTIMIZED_FUNCTION); + Code::Flags flags = Code::ComputeFlags(kind); Handle<Code> code = CodeGenerator::MakeCodeEpilogue(&assembler, flags, info()); generator.FinishCode(code); + + if (!code.is_null()) { + void* jit_handler_data = + assembler.positions_recorder()->DetachJITHandlerData(); + LOG_CODE_EVENT(info()->isolate(), + CodeEndLinePosInfoRecordEvent(*code, jit_handler_data)); + } + CodeGenerator::PrintCode(code, info()); return code; } @@ -436,4 +475,21 @@ Handle<Code> LChunk::Codegen() { } +void LChunk::set_allocated_double_registers(BitVector* allocated_registers) { + allocated_double_registers_ = allocated_registers; + BitVector* doubles = allocated_double_registers(); + BitVector::Iterator iterator(doubles); + while (!iterator.Done()) { + if (info()->saves_caller_doubles()) { + if (kDoubleSize == kPointerSize * 2) { + spill_slot_count_ += 2; + } else { + spill_slot_count_++; + } + } + iterator.Advance(); + } +} + + } } // namespace v8::internal diff --git a/deps/v8/src/lithium.h b/deps/v8/src/lithium.h index 089926e71a..9d5b0b9eec 100644 --- a/deps/v8/src/lithium.h +++ b/deps/v8/src/lithium.h @@ -581,6 +581,7 @@ class ShallowIterator BASE_EMBEDDED { LOperand* Current() { ASSERT(!Done()); + ASSERT(env_->values()->at(current_) != NULL); return env_->values()->at(current_); } @@ -622,6 +623,7 @@ class DeepIterator BASE_EMBEDDED { LOperand* Current() { ASSERT(!current_iterator_.Done()); + ASSERT(current_iterator_.Current() != NULL); return current_iterator_.Current(); } @@ -661,6 +663,7 @@ class LChunk: public ZoneObject { int spill_slot_count() const { return spill_slot_count_; } CompilationInfo* info() const { return info_; } HGraph* graph() const { return graph_; } + Isolate* isolate() const { return graph_->isolate(); } const ZoneList<LInstruction*>* instructions() const { return &instructions_; } void AddGapMove(int index, LOperand* from, LOperand* to); LGap* GetGapAt(int index) const; @@ -682,22 +685,22 @@ class LChunk: public ZoneObject { Zone* zone() const { return info_->zone(); } - Handle<Code> Codegen(); + Handle<Code> Codegen(Code::Kind kind); + + void set_allocated_double_registers(BitVector* allocated_registers); + BitVector* allocated_double_registers() { + return allocated_double_registers_; + } protected: - LChunk(CompilationInfo* info, HGraph* graph) - : spill_slot_count_(0), - info_(info), - graph_(graph), - instructions_(32, graph->zone()), - pointer_maps_(8, graph->zone()), - inlined_closures_(1, graph->zone()) { } + LChunk(CompilationInfo* info, HGraph* graph); int spill_slot_count_; private: CompilationInfo* info_; HGraph* const graph_; + BitVector* allocated_double_registers_; ZoneList<LInstruction*> instructions_; ZoneList<LPointerMap*> pointer_maps_; ZoneList<Handle<JSFunction> > inlined_closures_; @@ -705,6 +708,14 @@ class LChunk: public ZoneObject { int ElementsKindToShiftSize(ElementsKind elements_kind); +int StackSlotOffset(int index); + +enum NumberUntagDMode { + NUMBER_CANDIDATE_IS_SMI, + NUMBER_CANDIDATE_IS_SMI_OR_HOLE, + NUMBER_CANDIDATE_IS_SMI_CONVERT_HOLE, + NUMBER_CANDIDATE_IS_ANY_TAGGED +}; } } // namespace v8::internal diff --git a/deps/v8/src/liveedit-debugger.js b/deps/v8/src/liveedit-debugger.js index cfcdb818c9..451b146bde 100644 --- a/deps/v8/src/liveedit-debugger.js +++ b/deps/v8/src/liveedit-debugger.js @@ -76,7 +76,17 @@ Debug.LiveEdit = new function() { try { new_compile_info = GatherCompileInfo(new_source, script); } catch (e) { - throw new Failure("Failed to compile new version of script: " + e); + var failure = + new Failure("Failed to compile new version of script: " + e); + if (e instanceof SyntaxError) { + var details = { + type: "liveedit_compile_error", + syntaxErrorMessage: e.message + }; + CopyErrorPositionToDetails(e, details); + failure.details = details; + } + throw failure; } var root_new_node = BuildCodeInfoTree(new_compile_info); @@ -978,6 +988,31 @@ Debug.LiveEdit = new function() { return "LiveEdit Failure: " + this.message; }; + function CopyErrorPositionToDetails(e, details) { + function createPositionStruct(script, position) { + if (position == -1) return; + var location = script.locationFromPosition(position, true); + if (location == null) return; + return { + line: location.line + 1, + column: location.column + 1, + position: position + }; + } + + if (!("scriptObject" in e) || !("startPosition" in e)) { + return; + } + + var script = e.scriptObject; + + var position_struct = { + start: createPositionStruct(script, e.startPosition), + end: createPositionStruct(script, e.endPosition) + }; + details.position = position_struct; + } + // A testing entry. function GetPcFromSourcePos(func, source_pos) { return %GetFunctionCodePositionFromSource(func, source_pos); diff --git a/deps/v8/src/liveedit.cc b/deps/v8/src/liveedit.cc index 2a3aafc1f1..58c846a888 100644 --- a/deps/v8/src/liveedit.cc +++ b/deps/v8/src/liveedit.cc @@ -36,6 +36,7 @@ #include "debug.h" #include "deoptimizer.h" #include "global-handles.h" +#include "messages.h" #include "parser.h" #include "scopeinfo.h" #include "scopes.h" @@ -348,23 +349,26 @@ static void NarrowDownInput(SubrangableInput* input, // Each chunk is stored as 3 array elements: (pos1_begin, pos1_end, pos2_end). class CompareOutputArrayWriter { public: - CompareOutputArrayWriter() - : array_(FACTORY->NewJSArray(10)), current_size_(0) {} + explicit CompareOutputArrayWriter(Isolate* isolate) + : array_(isolate->factory()->NewJSArray(10)), current_size_(0) {} Handle<JSArray> GetResult() { return array_; } void WriteChunk(int char_pos1, int char_pos2, int char_len1, int char_len2) { + Isolate* isolate = array_->GetIsolate(); SetElementNonStrict(array_, - current_size_, - Handle<Object>(Smi::FromInt(char_pos1))); + current_size_, + Handle<Object>(Smi::FromInt(char_pos1), isolate)); SetElementNonStrict(array_, current_size_ + 1, - Handle<Object>(Smi::FromInt(char_pos1 + char_len1))); + Handle<Object>(Smi::FromInt(char_pos1 + char_len1), + isolate)); SetElementNonStrict(array_, current_size_ + 2, - Handle<Object>(Smi::FromInt(char_pos2 + char_len2))); + Handle<Object>(Smi::FromInt(char_pos2 + char_len2), + isolate)); current_size_ += 3; } @@ -526,7 +530,8 @@ class TokenizingLineArrayCompareOutput : public SubrangableOutput { TokenizingLineArrayCompareOutput(LineEndsWrapper line_ends1, LineEndsWrapper line_ends2, Handle<String> s1, Handle<String> s2) - : line_ends1_(line_ends1), line_ends2_(line_ends2), s1_(s1), s2_(s2), + : array_writer_(s1->GetIsolate()), + line_ends1_(line_ends1), line_ends2_(line_ends2), s1_(s1), s2_(s2), subrange_offset1_(0), subrange_offset2_(0) { } @@ -541,7 +546,7 @@ class TokenizingLineArrayCompareOutput : public SubrangableOutput { if (char_len1 < CHUNK_LEN_LIMIT && char_len2 < CHUNK_LEN_LIMIT) { // Chunk is small enough to conduct a nested token-level diff. - HandleScope subTaskScope; + HandleScope subTaskScope(s1_->GetIsolate()); TokensCompareInput tokens_input(s1_, char_pos1, char_len1, s2_, char_pos2, char_len2); @@ -619,7 +624,7 @@ static void CompileScriptForTracker(Isolate* isolate, Handle<Script> script) { // Unwraps JSValue object, returning its field "value" static Handle<Object> UnwrapJSValue(Handle<JSValue> jsValue) { - return Handle<Object>(jsValue->value()); + return Handle<Object>(jsValue->value(), jsValue->GetIsolate()); } @@ -670,6 +675,9 @@ class JSArrayBasedStruct { Handle<JSArray> GetJSArray() { return array_; } + Isolate* isolate() const { + return array_->GetIsolate(); + } protected: void SetField(int field_position, Handle<Object> value) { @@ -678,7 +686,7 @@ class JSArrayBasedStruct { void SetSmiValueField(int field_position, int value) { SetElementNonStrict(array_, field_position, - Handle<Smi>(Smi::FromInt(value))); + Handle<Smi>(Smi::FromInt(value), isolate())); } Object* GetField(int field_position) { return array_->GetElementNoExceptionThrown(field_position); @@ -703,12 +711,14 @@ class FunctionInfoWrapper : public JSArrayBasedStruct<FunctionInfoWrapper> { : JSArrayBasedStruct<FunctionInfoWrapper>(array) { } void SetInitialProperties(Handle<String> name, int start_position, - int end_position, int param_num, int parent_index) { - HandleScope scope; + int end_position, int param_num, + int literal_count, int parent_index) { + HandleScope scope(isolate()); this->SetField(kFunctionNameOffset_, name); this->SetSmiValueField(kStartPositionOffset_, start_position); this->SetSmiValueField(kEndPositionOffset_, end_position); this->SetSmiValueField(kParamNumOffset_, param_num); + this->SetSmiValueField(kLiteralNumOffset_, literal_count); this->SetSmiValueField(kParentIndexOffset_, parent_index); } void SetFunctionCode(Handle<Code> function_code, @@ -726,6 +736,9 @@ class FunctionInfoWrapper : public JSArrayBasedStruct<FunctionInfoWrapper> { Handle<JSValue> info_holder = WrapInJSValue(info); this->SetField(kSharedFunctionInfoOffset_, info_holder); } + int GetLiteralCount() { + return this->GetSmiValueField(kLiteralNumOffset_); + } int GetParentIndex() { return this->GetSmiValueField(kParentIndexOffset_); } @@ -759,7 +772,8 @@ class FunctionInfoWrapper : public JSArrayBasedStruct<FunctionInfoWrapper> { static const int kOuterScopeInfoOffset_ = 6; static const int kParentIndexOffset_ = 7; static const int kSharedFunctionInfoOffset_ = 8; - static const int kSize_ = 9; + static const int kLiteralNumOffset_ = 9; + static const int kSize_ = 10; friend class JSArrayBasedStruct<FunctionInfoWrapper>; }; @@ -781,7 +795,7 @@ class SharedInfoWrapper : public JSArrayBasedStruct<SharedInfoWrapper> { void SetProperties(Handle<String> name, int start_position, int end_position, Handle<SharedFunctionInfo> info) { - HandleScope scope; + HandleScope scope(isolate()); this->SetField(kFunctionNameOffset_, name); Handle<JSValue> info_holder = WrapInJSValue(info); this->SetField(kSharedInfoOffset_, info_holder); @@ -808,17 +822,18 @@ class SharedInfoWrapper : public JSArrayBasedStruct<SharedInfoWrapper> { class FunctionInfoListener { public: - FunctionInfoListener() { + explicit FunctionInfoListener(Isolate* isolate) { current_parent_index_ = -1; len_ = 0; - result_ = FACTORY->NewJSArray(10); + result_ = isolate->factory()->NewJSArray(10); } void FunctionStarted(FunctionLiteral* fun) { - HandleScope scope; + HandleScope scope(isolate()); FunctionInfoWrapper info = FunctionInfoWrapper::Create(); info.SetInitialProperties(fun->name(), fun->start_position(), fun->end_position(), fun->parameter_count(), + fun->materialized_literal_count(), current_parent_index_); current_parent_index_ = len_; SetElementNonStrict(result_, len_, info.GetJSArray()); @@ -826,7 +841,7 @@ class FunctionInfoListener { } void FunctionDone() { - HandleScope scope; + HandleScope scope(isolate()); FunctionInfoWrapper info = FunctionInfoWrapper::cast( result_->GetElementNoExceptionThrown(current_parent_index_)); @@ -839,7 +854,9 @@ class FunctionInfoListener { FunctionInfoWrapper info = FunctionInfoWrapper::cast( result_->GetElementNoExceptionThrown(current_parent_index_)); - info.SetFunctionCode(function_code, Handle<Object>(HEAP->null_value())); + info.SetFunctionCode(function_code, + Handle<Object>(isolate()->heap()->null_value(), + isolate())); } // Saves full information about a function: its code, its scope info @@ -853,20 +870,23 @@ class FunctionInfoListener { FunctionInfoWrapper::cast( result_->GetElementNoExceptionThrown(current_parent_index_)); info.SetFunctionCode(Handle<Code>(shared->code()), - Handle<Object>(shared->scope_info())); + Handle<Object>(shared->scope_info(), isolate())); info.SetSharedFunctionInfo(shared); - Handle<Object> scope_info_list(SerializeFunctionScope(scope, zone)); + Handle<Object> scope_info_list(SerializeFunctionScope(scope, zone), + isolate()); info.SetOuterScopeInfo(scope_info_list); } Handle<JSArray> GetResult() { return result_; } private: + Isolate* isolate() const { return result_->GetIsolate(); } + Object* SerializeFunctionScope(Scope* scope, Zone* zone) { - HandleScope handle_scope; + HandleScope handle_scope(isolate()); - Handle<JSArray> scope_info_list = FACTORY->NewJSArray(10); + Handle<JSArray> scope_info_list = isolate()->factory()->NewJSArray(10); int scope_info_length = 0; // Saves some description of scope. It stores name and indexes of @@ -874,7 +894,7 @@ class FunctionInfoListener { // scopes of this chain. Scope* outer_scope = scope->outer_scope(); if (outer_scope == NULL) { - return HEAP->undefined_value(); + return isolate()->heap()->undefined_value(); } do { ZoneList<Variable*> stack_list(outer_scope->StackLocalCount(), zone); @@ -890,12 +910,13 @@ class FunctionInfoListener { SetElementNonStrict( scope_info_list, scope_info_length, - Handle<Smi>(Smi::FromInt(context_list[i]->index()))); + Handle<Smi>(Smi::FromInt(context_list[i]->index()), isolate())); scope_info_length++; } SetElementNonStrict(scope_info_list, scope_info_length, - Handle<Object>(HEAP->null_value())); + Handle<Object>(isolate()->heap()->null_value(), + isolate())); scope_info_length++; outer_scope = outer_scope->outer_scope(); @@ -914,20 +935,71 @@ JSArray* LiveEdit::GatherCompileInfo(Handle<Script> script, Handle<String> source) { Isolate* isolate = Isolate::Current(); - FunctionInfoListener listener; - Handle<Object> original_source = Handle<Object>(script->source()); + FunctionInfoListener listener(isolate); + Handle<Object> original_source = + Handle<Object>(script->source(), isolate); script->set_source(*source); isolate->set_active_function_info_listener(&listener); - CompileScriptForTracker(isolate, script); + + { + // Creating verbose TryCatch from public API is currently the only way to + // force code save location. We do not use this the object directly. + v8::TryCatch try_catch; + try_catch.SetVerbose(true); + + // A logical 'try' section. + CompileScriptForTracker(isolate, script); + } + + // A logical 'catch' section. + Handle<JSObject> rethrow_exception; + if (isolate->has_pending_exception()) { + Handle<Object> exception(isolate->pending_exception()->ToObjectChecked(), + isolate); + MessageLocation message_location = isolate->GetMessageLocation(); + + isolate->clear_pending_message(); + isolate->clear_pending_exception(); + + // If possible, copy positions from message object to exception object. + if (exception->IsJSObject() && !message_location.script().is_null()) { + rethrow_exception = Handle<JSObject>::cast(exception); + + Factory* factory = isolate->factory(); + Handle<String> start_pos_key = factory->InternalizeOneByteString( + STATIC_ASCII_VECTOR("startPosition")); + Handle<String> end_pos_key = factory->InternalizeOneByteString( + STATIC_ASCII_VECTOR("endPosition")); + Handle<String> script_obj_key = factory->InternalizeOneByteString( + STATIC_ASCII_VECTOR("scriptObject")); + Handle<Smi> start_pos( + Smi::FromInt(message_location.start_pos()), isolate); + Handle<Smi> end_pos(Smi::FromInt(message_location.end_pos()), isolate); + Handle<JSValue> script_obj = GetScriptWrapper(message_location.script()); + JSReceiver::SetProperty( + rethrow_exception, start_pos_key, start_pos, NONE, kNonStrictMode); + JSReceiver::SetProperty( + rethrow_exception, end_pos_key, end_pos, NONE, kNonStrictMode); + JSReceiver::SetProperty( + rethrow_exception, script_obj_key, script_obj, NONE, kNonStrictMode); + } + } + + // A logical 'finally' section. isolate->set_active_function_info_listener(NULL); script->set_source(*original_source); - return *(listener.GetResult()); + if (rethrow_exception.is_null()) { + return *(listener.GetResult()); + } else { + isolate->Throw(*rethrow_exception); + return 0; + } } void LiveEdit::WrapSharedFunctionInfos(Handle<JSArray> array) { - HandleScope scope; + HandleScope scope(array->GetIsolate()); int len = GetArrayLength(array); for (int i = 0; i < len; i++) { Handle<SharedFunctionInfo> info( @@ -991,10 +1063,11 @@ static void ReplaceCodeObject(Handle<Code> original, // Since we are not in an incremental marking phase we can write pointers // to code objects (that are never in new space) without worrying about // write barriers. - HEAP->CollectAllGarbage(Heap::kMakeHeapIterableMask, + Heap* heap = original->GetHeap(); + heap->CollectAllGarbage(Heap::kMakeHeapIterableMask, "liveedit.cc ReplaceCodeObject"); - ASSERT(!HEAP->InNewSpace(*substitution)); + ASSERT(!heap->InNewSpace(*substitution)); AssertNoAllocation no_allocations_please; @@ -1003,17 +1076,140 @@ static void ReplaceCodeObject(Handle<Code> original, // Iterate over all roots. Stack frames may have pointer into original code, // so temporary replace the pointers with offset numbers // in prologue/epilogue. - HEAP->IterateRoots(&visitor, VISIT_ALL); + heap->IterateRoots(&visitor, VISIT_ALL); // Now iterate over all pointers of all objects, including code_target // implicit pointers. - HeapIterator iterator; + HeapIterator iterator(heap); for (HeapObject* obj = iterator.next(); obj != NULL; obj = iterator.next()) { obj->Iterate(&visitor); } } +// Patch function literals. +// Name 'literals' is a misnomer. Rather it's a cache for complex object +// boilerplates and for a native context. We must clean cached values. +// Additionally we may need to allocate a new array if number of literals +// changed. +class LiteralFixer { + public: + static void PatchLiterals(FunctionInfoWrapper* compile_info_wrapper, + Handle<SharedFunctionInfo> shared_info, + Isolate* isolate) { + int new_literal_count = compile_info_wrapper->GetLiteralCount(); + if (new_literal_count > 0) { + new_literal_count += JSFunction::kLiteralsPrefixSize; + } + int old_literal_count = shared_info->num_literals(); + + if (old_literal_count == new_literal_count) { + // If literal count didn't change, simply go over all functions + // and clear literal arrays. + ClearValuesVisitor visitor; + IterateJSFunctions(*shared_info, &visitor); + } else { + // When literal count changes, we have to create new array instances. + // Since we cannot create instances when iterating heap, we should first + // collect all functions and fix their literal arrays. + Handle<FixedArray> function_instances = + CollectJSFunctions(shared_info, isolate); + for (int i = 0; i < function_instances->length(); i++) { + Handle<JSFunction> fun(JSFunction::cast(function_instances->get(i))); + Handle<FixedArray> old_literals(fun->literals()); + Handle<FixedArray> new_literals = + isolate->factory()->NewFixedArray(new_literal_count); + if (new_literal_count > 0) { + Handle<Context> native_context; + if (old_literals->length() > + JSFunction::kLiteralNativeContextIndex) { + native_context = Handle<Context>( + JSFunction::NativeContextFromLiterals(fun->literals())); + } else { + native_context = Handle<Context>(fun->context()->native_context()); + } + new_literals->set(JSFunction::kLiteralNativeContextIndex, + *native_context); + } + fun->set_literals(*new_literals); + } + + shared_info->set_num_literals(new_literal_count); + } + } + + private: + // Iterates all function instances in the HEAP that refers to the + // provided shared_info. + template<typename Visitor> + static void IterateJSFunctions(SharedFunctionInfo* shared_info, + Visitor* visitor) { + AssertNoAllocation no_allocations_please; + + HeapIterator iterator(shared_info->GetHeap()); + for (HeapObject* obj = iterator.next(); obj != NULL; + obj = iterator.next()) { + if (obj->IsJSFunction()) { + JSFunction* function = JSFunction::cast(obj); + if (function->shared() == shared_info) { + visitor->visit(function); + } + } + } + } + + // Finds all instances of JSFunction that refers to the provided shared_info + // and returns array with them. + static Handle<FixedArray> CollectJSFunctions( + Handle<SharedFunctionInfo> shared_info, Isolate* isolate) { + CountVisitor count_visitor; + count_visitor.count = 0; + IterateJSFunctions(*shared_info, &count_visitor); + int size = count_visitor.count; + + Handle<FixedArray> result = isolate->factory()->NewFixedArray(size); + if (size > 0) { + CollectVisitor collect_visitor(result); + IterateJSFunctions(*shared_info, &collect_visitor); + } + return result; + } + + class ClearValuesVisitor { + public: + void visit(JSFunction* fun) { + FixedArray* literals = fun->literals(); + int len = literals->length(); + for (int j = JSFunction::kLiteralsPrefixSize; j < len; j++) { + literals->set_undefined(j); + } + } + }; + + class CountVisitor { + public: + void visit(JSFunction* fun) { + count++; + } + int count; + }; + + class CollectVisitor { + public: + explicit CollectVisitor(Handle<FixedArray> output) + : m_output(output), m_pos(0) {} + + void visit(JSFunction* fun) { + m_output->set(m_pos, fun); + m_pos++; + } + private: + Handle<FixedArray> m_output; + int m_pos; + }; +}; + + // Check whether the code is natural function code (not a lazy-compile stub // code). static bool IsJSFunctionCode(Code* code) { @@ -1044,23 +1240,15 @@ static bool IsInlined(JSFunction* function, SharedFunctionInfo* candidate) { } -class DependentFunctionsDeoptimizingVisitor : public OptimizedFunctionVisitor { +class DependentFunctionFilter : public OptimizedFunctionFilter { public: - explicit DependentFunctionsDeoptimizingVisitor( + explicit DependentFunctionFilter( SharedFunctionInfo* function_info) : function_info_(function_info) {} - virtual void EnterContext(Context* context) { - } - - virtual void VisitFunction(JSFunction* function) { - if (function->shared() == function_info_ || - IsInlined(function, function_info_)) { - Deoptimizer::DeoptimizeFunction(function); - } - } - - virtual void LeaveContext(Context* context) { + virtual bool TakeFunction(JSFunction* function) { + return (function->shared() == function_info_ || + IsInlined(function, function_info_)); } private: @@ -1071,18 +1259,19 @@ class DependentFunctionsDeoptimizingVisitor : public OptimizedFunctionVisitor { static void DeoptimizeDependentFunctions(SharedFunctionInfo* function_info) { AssertNoAllocation no_allocation; - DependentFunctionsDeoptimizingVisitor visitor(function_info); - Deoptimizer::VisitAllOptimizedFunctions(&visitor); + DependentFunctionFilter filter(function_info); + Deoptimizer::DeoptimizeAllFunctionsWith(function_info->GetIsolate(), &filter); } MaybeObject* LiveEdit::ReplaceFunctionCode( Handle<JSArray> new_compile_info_array, Handle<JSArray> shared_info_array) { - HandleScope scope; + Isolate* isolate = Isolate::Current(); + HandleScope scope(isolate); if (!SharedInfoWrapper::IsInstance(shared_info_array)) { - return Isolate::Current()->ThrowIllegalOperation(); + return isolate->ThrowIllegalOperation(); } FunctionInfoWrapper compile_info_wrapper(new_compile_info_array); @@ -1090,7 +1279,7 @@ MaybeObject* LiveEdit::ReplaceFunctionCode( Handle<SharedFunctionInfo> shared_info = shared_info_wrapper.GetInfo(); - HEAP->EnsureHeapIsIterable(); + isolate->heap()->EnsureHeapIsIterable(); if (IsJSFunctionCode(shared_info->code())) { Handle<Code> code = compile_info_wrapper.GetFunctionCode(); @@ -1113,32 +1302,34 @@ MaybeObject* LiveEdit::ReplaceFunctionCode( shared_info->set_start_position(start_position); shared_info->set_end_position(end_position); + LiteralFixer::PatchLiterals(&compile_info_wrapper, shared_info, isolate); + shared_info->set_construct_stub( - Isolate::Current()->builtins()->builtin( - Builtins::kJSConstructStubGeneric)); + isolate->builtins()->builtin(Builtins::kJSConstructStubGeneric)); DeoptimizeDependentFunctions(*shared_info); - Isolate::Current()->compilation_cache()->Remove(shared_info); + isolate->compilation_cache()->Remove(shared_info); - return HEAP->undefined_value(); + return isolate->heap()->undefined_value(); } MaybeObject* LiveEdit::FunctionSourceUpdated( Handle<JSArray> shared_info_array) { - HandleScope scope; + Isolate* isolate = shared_info_array->GetIsolate(); + HandleScope scope(isolate); if (!SharedInfoWrapper::IsInstance(shared_info_array)) { - return Isolate::Current()->ThrowIllegalOperation(); + return isolate->ThrowIllegalOperation(); } SharedInfoWrapper shared_info_wrapper(shared_info_array); Handle<SharedFunctionInfo> shared_info = shared_info_wrapper.GetInfo(); DeoptimizeDependentFunctions(*shared_info); - Isolate::Current()->compilation_cache()->Remove(shared_info); + isolate->compilation_cache()->Remove(shared_info); - return HEAP->undefined_value(); + return isolate->heap()->undefined_value(); } @@ -1287,7 +1478,9 @@ static Handle<Code> PatchPositionsInCode( continue; } } - buffer_writer.Write(it.rinfo()); + if (RelocInfo::IsRealRelocMode(rinfo->rmode())) { + buffer_writer.Write(it.rinfo()); + } } } @@ -1371,15 +1564,16 @@ static Handle<Script> CreateScriptCopy(Handle<Script> original) { Object* LiveEdit::ChangeScriptSource(Handle<Script> original_script, Handle<String> new_source, Handle<Object> old_script_name) { + Isolate* isolate = original_script->GetIsolate(); Handle<Object> old_script_object; if (old_script_name->IsString()) { Handle<Script> old_script = CreateScriptCopy(original_script); old_script->set_name(String::cast(*old_script_name)); old_script_object = old_script; - Isolate::Current()->debugger()->OnAfterCompile( + isolate->debugger()->OnAfterCompile( old_script, Debugger::SEND_WHEN_DEBUGGING); } else { - old_script_object = Handle<Object>(HEAP->null_value()); + old_script_object = isolate->factory()->null_value(); } original_script->set_source(*new_source); @@ -1425,6 +1619,7 @@ static bool CheckActivation(Handle<JSArray> shared_info_array, Handle<JSFunction> function( JSFunction::cast(JavaScriptFrame::cast(frame)->function())); + Isolate* isolate = shared_info_array->GetIsolate(); int len = GetArrayLength(shared_info_array); for (int i = 0; i < len; i++) { Object* element = shared_info_array->GetElementNoExceptionThrown(i); @@ -1434,7 +1629,8 @@ static bool CheckActivation(Handle<JSArray> shared_info_array, UnwrapSharedFunctionInfoFromJSValue(jsvalue); if (function->shared() == *shared || IsInlined(*function, *shared)) { - SetElementNonStrict(result, i, Handle<Smi>(Smi::FromInt(status))); + SetElementNonStrict(result, i, Handle<Smi>(Smi::FromInt(status), + isolate)); return true; } } @@ -1487,7 +1683,7 @@ static const char* DropFrames(Vector<StackFrame*> frames, Code* pre_top_frame_code = pre_top_frame->LookupCode(); bool frame_has_padding; if (pre_top_frame_code->is_inline_cache_stub() && - pre_top_frame_code->ic_state() == DEBUG_BREAK) { + pre_top_frame_code->is_debug_break()) { // OK, we can drop inline cache calls. *mode = Debug::FRAME_DROPPED_IN_IC_CALL; frame_has_padding = Debug::FramePaddingLayout::kIsSupported; @@ -1637,7 +1833,7 @@ static const char* DropActivationsInActiveThreadImpl( Isolate* isolate = Isolate::Current(); Debug* debug = isolate->debug(); ZoneScope scope(zone, DELETE_ON_EXIT); - Vector<StackFrame*> frames = CreateStackMap(zone); + Vector<StackFrame*> frames = CreateStackMap(isolate, zone); int top_frame_index = -1; @@ -1739,6 +1935,7 @@ static const char* DropActivationsInActiveThread( return message; } + Isolate* isolate = shared_info_array->GetIsolate(); int array_len = GetArrayLength(shared_info_array); // Replace "blocked on active" with "replaced on active" status. @@ -1746,7 +1943,7 @@ static const char* DropActivationsInActiveThread( if (result->GetElement(i) == Smi::FromInt(LiveEdit::FUNCTION_BLOCKED_ON_ACTIVE_STACK)) { Handle<Object> replaced( - Smi::FromInt(LiveEdit::FUNCTION_REPLACED_ON_ACTIVE_STACK)); + Smi::FromInt(LiveEdit::FUNCTION_REPLACED_ON_ACTIVE_STACK), isolate); SetElementNonStrict(result, i, replaced); } } @@ -1781,16 +1978,17 @@ class InactiveThreadActivationsChecker : public ThreadVisitor { Handle<JSArray> LiveEdit::CheckAndDropActivations( Handle<JSArray> shared_info_array, bool do_drop, Zone* zone) { + Isolate* isolate = shared_info_array->GetIsolate(); int len = GetArrayLength(shared_info_array); - Handle<JSArray> result = FACTORY->NewJSArray(len); + Handle<JSArray> result = isolate->factory()->NewJSArray(len); // Fill the default values. for (int i = 0; i < len; i++) { SetElementNonStrict( result, i, - Handle<Smi>(Smi::FromInt(FUNCTION_AVAILABLE_FOR_PATCH))); + Handle<Smi>(Smi::FromInt(FUNCTION_AVAILABLE_FOR_PATCH), isolate)); } diff --git a/deps/v8/src/liveobjectlist-inl.h b/deps/v8/src/liveobjectlist-inl.h deleted file mode 100644 index 2bc2296e29..0000000000 --- a/deps/v8/src/liveobjectlist-inl.h +++ /dev/null @@ -1,126 +0,0 @@ -// Copyright 2011 the V8 project authors. All rights reserved. -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following -// disclaimer in the documentation and/or other materials provided -// with the distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived -// from this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -#ifndef V8_LIVEOBJECTLIST_INL_H_ -#define V8_LIVEOBJECTLIST_INL_H_ - -#include "v8.h" - -#include "liveobjectlist.h" - -namespace v8 { -namespace internal { - -#ifdef LIVE_OBJECT_LIST - -void LiveObjectList::GCEpilogue() { - if (!NeedLOLProcessing()) return; - GCEpiloguePrivate(); -} - - -void LiveObjectList::GCPrologue() { - if (!NeedLOLProcessing()) return; -#ifdef VERIFY_LOL - if (FLAG_verify_lol) { - Verify(); - } -#endif -} - - -void LiveObjectList::IterateElements(ObjectVisitor* v) { - if (!NeedLOLProcessing()) return; - IterateElementsPrivate(v); -} - - -void LiveObjectList::ProcessNonLive(HeapObject* obj) { - // Only do work if we have at least one list to process. - if (last()) DoProcessNonLive(obj); -} - - -void LiveObjectList::UpdateReferencesForScavengeGC() { - if (LiveObjectList::NeedLOLProcessing()) { - UpdateLiveObjectListVisitor update_visitor; - LiveObjectList::IterateElements(&update_visitor); - } -} - - -LiveObjectList* LiveObjectList::FindLolForId(int id, - LiveObjectList* start_lol) { - if (id != 0) { - LiveObjectList* lol = start_lol; - while (lol != NULL) { - if (lol->id() == id) { - return lol; - } - lol = lol->prev_; - } - } - return NULL; -} - - -// Iterates the elements in every lol and returns the one that matches the -// specified key. If no matching element is found, then it returns NULL. -template <typename T> -inline LiveObjectList::Element* -LiveObjectList::FindElementFor(T (*GetValue)(LiveObjectList::Element*), T key) { - LiveObjectList* lol = last(); - while (lol != NULL) { - Element* elements = lol->elements_; - for (int i = 0; i < lol->obj_count_; i++) { - Element* element = &elements[i]; - if (GetValue(element) == key) { - return element; - } - } - lol = lol->prev_; - } - return NULL; -} - - -inline int LiveObjectList::GetElementId(LiveObjectList::Element* element) { - return element->id_; -} - - -inline HeapObject* -LiveObjectList::GetElementObj(LiveObjectList::Element* element) { - return element->obj_; -} - -#endif // LIVE_OBJECT_LIST - -} } // namespace v8::internal - -#endif // V8_LIVEOBJECTLIST_INL_H_ - diff --git a/deps/v8/src/liveobjectlist.cc b/deps/v8/src/liveobjectlist.cc deleted file mode 100644 index 6b89cf6839..0000000000 --- a/deps/v8/src/liveobjectlist.cc +++ /dev/null @@ -1,2631 +0,0 @@ -// Copyright 2011 the V8 project authors. All rights reserved. -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following -// disclaimer in the documentation and/or other materials provided -// with the distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived -// from this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -#ifdef LIVE_OBJECT_LIST - -#include <ctype.h> -#include <stdlib.h> - -#include "v8.h" - -#include "checks.h" -#include "global-handles.h" -#include "heap.h" -#include "inspector.h" -#include "isolate.h" -#include "list-inl.h" -#include "liveobjectlist-inl.h" -#include "string-stream.h" -#include "v8utils.h" -#include "v8conversions.h" - -namespace v8 { -namespace internal { - - -typedef int (*RawComparer)(const void*, const void*); - - -#ifdef CHECK_ALL_OBJECT_TYPES - -#define DEBUG_LIVE_OBJECT_TYPES(v) \ - v(Smi, "unexpected: Smi") \ - \ - v(CodeCache, "unexpected: CodeCache") \ - v(BreakPointInfo, "unexpected: BreakPointInfo") \ - v(DebugInfo, "unexpected: DebugInfo") \ - v(TypeSwitchInfo, "unexpected: TypeSwitchInfo") \ - v(SignatureInfo, "unexpected: SignatureInfo") \ - v(Script, "unexpected: Script") \ - v(ObjectTemplateInfo, "unexpected: ObjectTemplateInfo") \ - v(FunctionTemplateInfo, "unexpected: FunctionTemplateInfo") \ - v(CallHandlerInfo, "unexpected: CallHandlerInfo") \ - v(InterceptorInfo, "unexpected: InterceptorInfo") \ - v(AccessCheckInfo, "unexpected: AccessCheckInfo") \ - v(AccessorInfo, "unexpected: AccessorInfo") \ - v(ExternalTwoByteString, "unexpected: ExternalTwoByteString") \ - v(ExternalAsciiString, "unexpected: ExternalAsciiString") \ - v(ExternalString, "unexpected: ExternalString") \ - v(SeqTwoByteString, "unexpected: SeqTwoByteString") \ - v(SeqAsciiString, "unexpected: SeqAsciiString") \ - v(SeqString, "unexpected: SeqString") \ - v(JSFunctionResultCache, "unexpected: JSFunctionResultCache") \ - v(NativeContext, "unexpected: NativeContext") \ - v(MapCache, "unexpected: MapCache") \ - v(CodeCacheHashTable, "unexpected: CodeCacheHashTable") \ - v(CompilationCacheTable, "unexpected: CompilationCacheTable") \ - v(SymbolTable, "unexpected: SymbolTable") \ - v(Dictionary, "unexpected: Dictionary") \ - v(HashTable, "unexpected: HashTable") \ - v(DescriptorArray, "unexpected: DescriptorArray") \ - v(ExternalFloatArray, "unexpected: ExternalFloatArray") \ - v(ExternalUnsignedIntArray, "unexpected: ExternalUnsignedIntArray") \ - v(ExternalIntArray, "unexpected: ExternalIntArray") \ - v(ExternalUnsignedShortArray, "unexpected: ExternalUnsignedShortArray") \ - v(ExternalShortArray, "unexpected: ExternalShortArray") \ - v(ExternalUnsignedByteArray, "unexpected: ExternalUnsignedByteArray") \ - v(ExternalByteArray, "unexpected: ExternalByteArray") \ - v(JSValue, "unexpected: JSValue") - -#else -#define DEBUG_LIVE_OBJECT_TYPES(v) -#endif - - -#define FOR_EACH_LIVE_OBJECT_TYPE(v) \ - DEBUG_LIVE_OBJECT_TYPES(v) \ - \ - v(JSArray, "JSArray") \ - v(JSRegExp, "JSRegExp") \ - v(JSFunction, "JSFunction") \ - v(JSGlobalObject, "JSGlobal") \ - v(JSBuiltinsObject, "JSBuiltins") \ - v(GlobalObject, "Global") \ - v(JSGlobalProxy, "JSGlobalProxy") \ - v(JSObject, "JSObject") \ - \ - v(Context, "meta: Context") \ - v(ByteArray, "meta: ByteArray") \ - v(ExternalPixelArray, "meta: PixelArray") \ - v(ExternalArray, "meta: ExternalArray") \ - v(FixedArray, "meta: FixedArray") \ - v(String, "String") \ - v(HeapNumber, "HeapNumber") \ - \ - v(Code, "meta: Code") \ - v(Map, "meta: Map") \ - v(Oddball, "Oddball") \ - v(Foreign, "meta: Foreign") \ - v(SharedFunctionInfo, "meta: SharedFunctionInfo") \ - v(Struct, "meta: Struct") \ - \ - v(HeapObject, "HeapObject") - - -enum /* LiveObjectType */ { -#define DECLARE_OBJECT_TYPE_ENUM(type, name) kType##type, - FOR_EACH_LIVE_OBJECT_TYPE(DECLARE_OBJECT_TYPE_ENUM) - kInvalidLiveObjType, - kNumberOfTypes -#undef DECLARE_OBJECT_TYPE_ENUM -}; - - -LiveObjectType GetObjectType(HeapObject* heap_obj) { - // TODO(mlam): investigate usint Map::instance_type() instead. -#define CHECK_FOR_OBJECT_TYPE(type, name) \ - if (heap_obj->Is##type()) return kType##type; - FOR_EACH_LIVE_OBJECT_TYPE(CHECK_FOR_OBJECT_TYPE) -#undef CHECK_FOR_OBJECT_TYPE - - UNREACHABLE(); - return kInvalidLiveObjType; -} - - -inline const char* GetObjectTypeDesc(LiveObjectType type) { - static const char* const name[kNumberOfTypes] = { - #define DEFINE_OBJECT_TYPE_NAME(type, name) name, - FOR_EACH_LIVE_OBJECT_TYPE(DEFINE_OBJECT_TYPE_NAME) - "invalid" - #undef DEFINE_OBJECT_TYPE_NAME - }; - ASSERT(type < kNumberOfTypes); - return name[type]; -} - - -const char* GetObjectTypeDesc(HeapObject* heap_obj) { - LiveObjectType type = GetObjectType(heap_obj); - return GetObjectTypeDesc(type); -} - - -bool IsOfType(LiveObjectType type, HeapObject* obj) { - // Note: there are types that are more general (e.g. JSObject) that would - // have passed the Is##type_() test for more specialized types (e.g. - // JSFunction). If we find a more specialized match but we're looking for - // the general type, then we should reject the ones that matches the - // specialized type. -#define CHECK_OBJECT_TYPE(type_, name) \ - if (obj->Is##type_()) return (type == kType##type_); - - FOR_EACH_LIVE_OBJECT_TYPE(CHECK_OBJECT_TYPE) -#undef CHECK_OBJECT_TYPE - - return false; -} - - -const AllocationSpace kInvalidSpace = static_cast<AllocationSpace>(-1); - -static AllocationSpace FindSpaceFor(String* space_str) { - SmartArrayPointer<char> s = - space_str->ToCString(DISALLOW_NULLS, ROBUST_STRING_TRAVERSAL); - - const char* key_str = *s; - switch (key_str[0]) { - case 'c': - if (strcmp(key_str, "cell") == 0) return CELL_SPACE; - if (strcmp(key_str, "code") == 0) return CODE_SPACE; - break; - case 'l': - if (strcmp(key_str, "lo") == 0) return LO_SPACE; - break; - case 'm': - if (strcmp(key_str, "map") == 0) return MAP_SPACE; - break; - case 'n': - if (strcmp(key_str, "new") == 0) return NEW_SPACE; - break; - case 'o': - if (strcmp(key_str, "old-pointer") == 0) return OLD_POINTER_SPACE; - if (strcmp(key_str, "old-data") == 0) return OLD_DATA_SPACE; - break; - } - return kInvalidSpace; -} - - -static bool InSpace(AllocationSpace space, HeapObject* heap_obj) { - Heap* heap = ISOLATE->heap(); - if (space != LO_SPACE) { - return heap->InSpace(heap_obj, space); - } - - // This is an optimization to speed up the check for an object in the LO - // space by exclusion because we know that all object pointers passed in - // here are guaranteed to be in the heap. Hence, it is safe to infer - // using an exclusion test. - // Note: calling Heap::InSpace(heap_obj, LO_SPACE) is too slow for our - // filters. - int first_space = static_cast<int>(FIRST_SPACE); - int last_space = static_cast<int>(LO_SPACE); - for (int sp = first_space; sp < last_space; sp++) { - if (heap->InSpace(heap_obj, static_cast<AllocationSpace>(sp))) { - return false; - } - } - SLOW_ASSERT(heap->InSpace(heap_obj, LO_SPACE)); - return true; -} - - -static LiveObjectType FindTypeFor(String* type_str) { - SmartArrayPointer<char> s = - type_str->ToCString(DISALLOW_NULLS, ROBUST_STRING_TRAVERSAL); - -#define CHECK_OBJECT_TYPE(type_, name) { \ - const char* type_desc = GetObjectTypeDesc(kType##type_); \ - const char* key_str = *s; \ - if (strstr(type_desc, key_str) != NULL) return kType##type_; \ - } - FOR_EACH_LIVE_OBJECT_TYPE(CHECK_OBJECT_TYPE) -#undef CHECK_OBJECT_TYPE - - return kInvalidLiveObjType; -} - - -class LolFilter { - public: - explicit LolFilter(Handle<JSObject> filter_obj); - - inline bool is_active() const { return is_active_; } - inline bool Matches(HeapObject* obj) { - return !is_active() || MatchesSlow(obj); - } - - private: - void InitTypeFilter(Handle<JSObject> filter_obj); - void InitSpaceFilter(Handle<JSObject> filter_obj); - void InitPropertyFilter(Handle<JSObject> filter_obj); - bool MatchesSlow(HeapObject* obj); - - bool is_active_; - LiveObjectType type_; - AllocationSpace space_; - Handle<String> prop_; -}; - - -LolFilter::LolFilter(Handle<JSObject> filter_obj) - : is_active_(false), - type_(kInvalidLiveObjType), - space_(kInvalidSpace), - prop_() { - if (filter_obj.is_null()) return; - - InitTypeFilter(filter_obj); - InitSpaceFilter(filter_obj); - InitPropertyFilter(filter_obj); -} - - -void LolFilter::InitTypeFilter(Handle<JSObject> filter_obj) { - Handle<String> type_sym = FACTORY->LookupAsciiSymbol("type"); - MaybeObject* maybe_result = filter_obj->GetProperty(*type_sym); - Object* type_obj; - if (maybe_result->ToObject(&type_obj)) { - if (type_obj->IsString()) { - String* type_str = String::cast(type_obj); - type_ = FindTypeFor(type_str); - if (type_ != kInvalidLiveObjType) { - is_active_ = true; - } - } - } -} - - -void LolFilter::InitSpaceFilter(Handle<JSObject> filter_obj) { - Handle<String> space_sym = FACTORY->LookupAsciiSymbol("space"); - MaybeObject* maybe_result = filter_obj->GetProperty(*space_sym); - Object* space_obj; - if (maybe_result->ToObject(&space_obj)) { - if (space_obj->IsString()) { - String* space_str = String::cast(space_obj); - space_ = FindSpaceFor(space_str); - if (space_ != kInvalidSpace) { - is_active_ = true; - } - } - } -} - - -void LolFilter::InitPropertyFilter(Handle<JSObject> filter_obj) { - Handle<String> prop_sym = FACTORY->LookupAsciiSymbol("prop"); - MaybeObject* maybe_result = filter_obj->GetProperty(*prop_sym); - Object* prop_obj; - if (maybe_result->ToObject(&prop_obj)) { - if (prop_obj->IsString()) { - prop_ = Handle<String>(String::cast(prop_obj)); - is_active_ = true; - } - } -} - - -bool LolFilter::MatchesSlow(HeapObject* obj) { - if ((type_ != kInvalidLiveObjType) && !IsOfType(type_, obj)) { - return false; // Fail because obj is not of the type of interest. - } - if ((space_ != kInvalidSpace) && !InSpace(space_, obj)) { - return false; // Fail because obj is not in the space of interest. - } - if (!prop_.is_null() && obj->IsJSObject()) { - LookupResult result; - obj->Lookup(*prop_, &result); - if (!result.IsProperty()) { - return false; // Fail because obj does not have the property of interest. - } - } - return true; -} - - -class LolIterator { - public: - LolIterator(LiveObjectList* older, LiveObjectList* newer) - : older_(older), - newer_(newer), - curr_(0), - elements_(0), - count_(0), - index_(0) { } - - inline void Init() { - SetCurrent(newer_); - // If the elements_ list is empty, then move on to the next list as long - // as we're not at the last list (indicated by done()). - while ((elements_ == NULL) && !Done()) { - SetCurrent(curr_->prev_); - } - } - - inline bool Done() const { - return (curr_ == older_); - } - - // Object level iteration. - inline void Next() { - index_++; - if (index_ >= count_) { - // Iterate backwards until we get to the oldest list. - while (!Done()) { - SetCurrent(curr_->prev_); - // If we have elements to process, we're good to go. - if (elements_ != NULL) break; - - // Else, we should advance to the next older list. - } - } - } - - inline int Id() const { - return elements_[index_].id_; - } - inline HeapObject* Obj() const { - return elements_[index_].obj_; - } - - inline int LolObjCount() const { - if (curr_ != NULL) return curr_->obj_count_; - return 0; - } - - protected: - inline void SetCurrent(LiveObjectList* new_curr) { - curr_ = new_curr; - if (curr_ != NULL) { - elements_ = curr_->elements_; - count_ = curr_->obj_count_; - index_ = 0; - } - } - - LiveObjectList* older_; - LiveObjectList* newer_; - LiveObjectList* curr_; - LiveObjectList::Element* elements_; - int count_; - int index_; -}; - - -class LolForwardIterator : public LolIterator { - public: - LolForwardIterator(LiveObjectList* first, LiveObjectList* last) - : LolIterator(first, last) { - } - - inline void Init() { - SetCurrent(older_); - // If the elements_ list is empty, then move on to the next list as long - // as we're not at the last list (indicated by Done()). - while ((elements_ == NULL) && !Done()) { - SetCurrent(curr_->next_); - } - } - - inline bool Done() const { - return (curr_ == newer_); - } - - // Object level iteration. - inline void Next() { - index_++; - if (index_ >= count_) { - // Done with current list. Move on to the next. - while (!Done()) { // If not at the last list already, ... - SetCurrent(curr_->next_); - // If we have elements to process, we're good to go. - if (elements_ != NULL) break; - - // Else, we should advance to the next list. - } - } - } -}; - - -// Minimizes the white space in a string. Tabs and newlines are replaced -// with a space where appropriate. -static int CompactString(char* str) { - char* src = str; - char* dst = str; - char prev_ch = 0; - while (*dst != '\0') { - char ch = *src++; - // We will treat non-ASCII chars as '?'. - if ((ch & 0x80) != 0) { - ch = '?'; - } - // Compact contiguous whitespace chars into a single ' '. - if (isspace(ch)) { - if (prev_ch != ' ') *dst++ = ' '; - prev_ch = ' '; - continue; - } - *dst++ = ch; - prev_ch = ch; - } - return (dst - str); -} - - -// Generates a custom description based on the specific type of -// object we're looking at. We only generate specialized -// descriptions where we can. In all other cases, we emit the -// generic info. -static void GenerateObjectDesc(HeapObject* obj, - char* buffer, - int buffer_size) { - Vector<char> buffer_v(buffer, buffer_size); - ASSERT(obj != NULL); - if (obj->IsJSArray()) { - JSArray* jsarray = JSArray::cast(obj); - double length = jsarray->length()->Number(); - OS::SNPrintF(buffer_v, - "%p <%s> len %g", - reinterpret_cast<void*>(obj), - GetObjectTypeDesc(obj), - length); - - } else if (obj->IsString()) { - String* str = String::cast(obj); - // Only grab up to 160 chars in case they are double byte. - // We'll only dump 80 of them after we compact them. - const int kMaxCharToDump = 80; - const int kMaxBufferSize = kMaxCharToDump * 2; - SmartArrayPointer<char> str_sp = str->ToCString(DISALLOW_NULLS, - ROBUST_STRING_TRAVERSAL, - 0, - kMaxBufferSize); - char* str_cstr = *str_sp; - int length = CompactString(str_cstr); - OS::SNPrintF(buffer_v, - "%p <%s> '%.80s%s'", - reinterpret_cast<void*>(obj), - GetObjectTypeDesc(obj), - str_cstr, - (length > kMaxCharToDump) ? "..." : ""); - - } else if (obj->IsJSFunction() || obj->IsSharedFunctionInfo()) { - SharedFunctionInfo* sinfo; - if (obj->IsJSFunction()) { - JSFunction* func = JSFunction::cast(obj); - sinfo = func->shared(); - } else { - sinfo = SharedFunctionInfo::cast(obj); - } - - String* name = sinfo->DebugName(); - SmartArrayPointer<char> name_sp = - name->ToCString(DISALLOW_NULLS, ROBUST_STRING_TRAVERSAL); - char* name_cstr = *name_sp; - - HeapStringAllocator string_allocator; - StringStream stream(&string_allocator); - sinfo->SourceCodePrint(&stream, 50); - SmartArrayPointer<const char> source_sp = stream.ToCString(); - const char* source_cstr = *source_sp; - - OS::SNPrintF(buffer_v, - "%p <%s> '%s' %s", - reinterpret_cast<void*>(obj), - GetObjectTypeDesc(obj), - name_cstr, - source_cstr); - - } else if (obj->IsFixedArray()) { - FixedArray* fixed = FixedArray::cast(obj); - - OS::SNPrintF(buffer_v, - "%p <%s> len %d", - reinterpret_cast<void*>(obj), - GetObjectTypeDesc(obj), - fixed->length()); - - } else { - OS::SNPrintF(buffer_v, - "%p <%s>", - reinterpret_cast<void*>(obj), - GetObjectTypeDesc(obj)); - } -} - - -// Utility function for filling in a line of detail in a verbose dump. -static bool AddObjDetail(Handle<FixedArray> arr, - int index, - int obj_id, - Handle<HeapObject> target, - const char* desc_str, - Handle<String> id_sym, - Handle<String> desc_sym, - Handle<String> size_sym, - Handle<JSObject> detail, - Handle<String> desc, - Handle<Object> error) { - Isolate* isolate = Isolate::Current(); - Factory* factory = isolate->factory(); - detail = factory->NewJSObject(isolate->object_function()); - if (detail->IsFailure()) { - error = detail; - return false; - } - - int size = 0; - char buffer[512]; - if (desc_str == NULL) { - ASSERT(!target.is_null()); - HeapObject* obj = *target; - GenerateObjectDesc(obj, buffer, sizeof(buffer)); - desc_str = buffer; - size = obj->Size(); - } - desc = factory->NewStringFromAscii(CStrVector(desc_str)); - if (desc->IsFailure()) { - error = desc; - return false; - } - - { MaybeObject* maybe_result = detail->SetProperty(*id_sym, - Smi::FromInt(obj_id), - NONE, - kNonStrictMode); - if (maybe_result->IsFailure()) return false; - } - { MaybeObject* maybe_result = detail->SetProperty(*desc_sym, - *desc, - NONE, - kNonStrictMode); - if (maybe_result->IsFailure()) return false; - } - { MaybeObject* maybe_result = detail->SetProperty(*size_sym, - Smi::FromInt(size), - NONE, - kNonStrictMode); - if (maybe_result->IsFailure()) return false; - } - - arr->set(index, *detail); - return true; -} - - -class DumpWriter { - public: - virtual ~DumpWriter() {} - - virtual void ComputeTotalCountAndSize(LolFilter* filter, - int* count, - int* size) = 0; - virtual bool Write(Handle<FixedArray> elements_arr, - int start, - int dump_limit, - LolFilter* filter, - Handle<Object> error) = 0; -}; - - -class LolDumpWriter: public DumpWriter { - public: - LolDumpWriter(LiveObjectList* older, LiveObjectList* newer) - : older_(older), newer_(newer) { - } - - void ComputeTotalCountAndSize(LolFilter* filter, int* count, int* size) { - *count = 0; - *size = 0; - - LolIterator it(older_, newer_); - for (it.Init(); !it.Done(); it.Next()) { - HeapObject* heap_obj = it.Obj(); - if (!filter->Matches(heap_obj)) { - continue; - } - - *size += heap_obj->Size(); - (*count)++; - } - } - - bool Write(Handle<FixedArray> elements_arr, - int start, - int dump_limit, - LolFilter* filter, - Handle<Object> error) { - // The lols are listed in latest to earliest. We want to dump from - // earliest to latest. So, compute the last element to start with. - int index = 0; - int count = 0; - - Isolate* isolate = Isolate::Current(); - Factory* factory = isolate->factory(); - - // Prefetch some needed symbols. - Handle<String> id_sym = factory->LookupAsciiSymbol("id"); - Handle<String> desc_sym = factory->LookupAsciiSymbol("desc"); - Handle<String> size_sym = factory->LookupAsciiSymbol("size"); - - // Fill the array with the lol object details. - Handle<JSObject> detail; - Handle<String> desc; - Handle<HeapObject> target; - - LiveObjectList* first_lol = (older_ != NULL) ? - older_->next_ : LiveObjectList::first_; - LiveObjectList* last_lol = (newer_ != NULL) ? newer_->next_ : NULL; - - LolForwardIterator it(first_lol, last_lol); - for (it.Init(); !it.Done() && (index < dump_limit); it.Next()) { - HeapObject* heap_obj = it.Obj(); - - // Skip objects that have been filtered out. - if (!filter->Matches(heap_obj)) { - continue; - } - - // Only report objects that are in the section of interest. - if (count >= start) { - target = Handle<HeapObject>(heap_obj); - bool success = AddObjDetail(elements_arr, - index++, - it.Id(), - target, - NULL, - id_sym, - desc_sym, - size_sym, - detail, - desc, - error); - if (!success) return false; - } - count++; - } - return true; - } - - private: - LiveObjectList* older_; - LiveObjectList* newer_; -}; - - -class RetainersDumpWriter: public DumpWriter { - public: - RetainersDumpWriter(Handle<HeapObject> target, - Handle<JSObject> instance_filter, - Handle<JSFunction> args_function) - : target_(target), - instance_filter_(instance_filter), - args_function_(args_function) { - } - - void ComputeTotalCountAndSize(LolFilter* filter, int* count, int* size) { - Handle<FixedArray> retainers_arr; - Handle<Object> error; - - *size = -1; - LiveObjectList::GetRetainers(target_, - instance_filter_, - retainers_arr, - 0, - Smi::kMaxValue, - count, - filter, - NULL, - *args_function_, - error); - } - - bool Write(Handle<FixedArray> elements_arr, - int start, - int dump_limit, - LolFilter* filter, - Handle<Object> error) { - int dummy; - int count; - - // Fill the retainer objects. - count = LiveObjectList::GetRetainers(target_, - instance_filter_, - elements_arr, - start, - dump_limit, - &dummy, - filter, - NULL, - *args_function_, - error); - if (count < 0) { - return false; - } - return true; - } - - private: - Handle<HeapObject> target_; - Handle<JSObject> instance_filter_; - Handle<JSFunction> args_function_; -}; - - -class LiveObjectSummary { - public: - explicit LiveObjectSummary(LolFilter* filter) - : total_count_(0), - total_size_(0), - found_root_(false), - found_weak_root_(false), - filter_(filter) { - memset(counts_, 0, sizeof(counts_[0]) * kNumberOfEntries); - memset(sizes_, 0, sizeof(sizes_[0]) * kNumberOfEntries); - } - - void Add(HeapObject* heap_obj) { - int size = heap_obj->Size(); - LiveObjectType type = GetObjectType(heap_obj); - ASSERT(type != kInvalidLiveObjType); - counts_[type]++; - sizes_[type] += size; - total_count_++; - total_size_ += size; - } - - void set_found_root() { found_root_ = true; } - void set_found_weak_root() { found_weak_root_ = true; } - - inline int Count(LiveObjectType type) { - return counts_[type]; - } - inline int Size(LiveObjectType type) { - return sizes_[type]; - } - inline int total_count() { - return total_count_; - } - inline int total_size() { - return total_size_; - } - inline bool found_root() { - return found_root_; - } - inline bool found_weak_root() { - return found_weak_root_; - } - int GetNumberOfEntries() { - int entries = 0; - for (int i = 0; i < kNumberOfEntries; i++) { - if (counts_[i]) entries++; - } - return entries; - } - - inline LolFilter* filter() { return filter_; } - - static const int kNumberOfEntries = kNumberOfTypes; - - private: - int counts_[kNumberOfEntries]; - int sizes_[kNumberOfEntries]; - int total_count_; - int total_size_; - bool found_root_; - bool found_weak_root_; - - LolFilter* filter_; -}; - - -// Abstraction for a summary writer. -class SummaryWriter { - public: - virtual ~SummaryWriter() {} - virtual void Write(LiveObjectSummary* summary) = 0; -}; - - -// A summary writer for filling in a summary of lol lists and diffs. -class LolSummaryWriter: public SummaryWriter { - public: - LolSummaryWriter(LiveObjectList* older_lol, - LiveObjectList* newer_lol) - : older_(older_lol), newer_(newer_lol) { - } - - void Write(LiveObjectSummary* summary) { - LolFilter* filter = summary->filter(); - - // Fill the summary with the lol object details. - LolIterator it(older_, newer_); - for (it.Init(); !it.Done(); it.Next()) { - HeapObject* heap_obj = it.Obj(); - if (!filter->Matches(heap_obj)) { - continue; - } - summary->Add(heap_obj); - } - } - - private: - LiveObjectList* older_; - LiveObjectList* newer_; -}; - - -// A summary writer for filling in a retainers list. -class RetainersSummaryWriter: public SummaryWriter { - public: - RetainersSummaryWriter(Handle<HeapObject> target, - Handle<JSObject> instance_filter, - Handle<JSFunction> args_function) - : target_(target), - instance_filter_(instance_filter), - args_function_(args_function) { - } - - void Write(LiveObjectSummary* summary) { - Handle<FixedArray> retainers_arr; - Handle<Object> error; - int dummy_total_count; - LiveObjectList::GetRetainers(target_, - instance_filter_, - retainers_arr, - 0, - Smi::kMaxValue, - &dummy_total_count, - summary->filter(), - summary, - *args_function_, - error); - } - - private: - Handle<HeapObject> target_; - Handle<JSObject> instance_filter_; - Handle<JSFunction> args_function_; -}; - - -uint32_t LiveObjectList::next_element_id_ = 1; -int LiveObjectList::list_count_ = 0; -int LiveObjectList::last_id_ = 0; -LiveObjectList* LiveObjectList::first_ = NULL; -LiveObjectList* LiveObjectList::last_ = NULL; - - -LiveObjectList::LiveObjectList(LiveObjectList* prev, int capacity) - : prev_(prev), - next_(NULL), - capacity_(capacity), - obj_count_(0) { - elements_ = NewArray<Element>(capacity); - id_ = ++last_id_; - - list_count_++; -} - - -LiveObjectList::~LiveObjectList() { - DeleteArray<Element>(elements_); - delete prev_; -} - - -int LiveObjectList::GetTotalObjCountAndSize(int* size_p) { - int size = 0; - int count = 0; - LiveObjectList* lol = this; - do { - // Only compute total size if requested i.e. when size_p is not null. - if (size_p != NULL) { - Element* elements = lol->elements_; - for (int i = 0; i < lol->obj_count_; i++) { - HeapObject* heap_obj = elements[i].obj_; - size += heap_obj->Size(); - } - } - count += lol->obj_count_; - lol = lol->prev_; - } while (lol != NULL); - - if (size_p != NULL) { - *size_p = size; - } - return count; -} - - -// Adds an object to the lol. -// Returns true if successful, else returns false. -bool LiveObjectList::Add(HeapObject* obj) { - // If the object is already accounted for in the prev list which we inherit - // from, then no need to add it to this list. - if ((prev() != NULL) && (prev()->Find(obj) != NULL)) { - return true; - } - ASSERT(obj_count_ <= capacity_); - if (obj_count_ == capacity_) { - // The heap must have grown and we have more objects than capacity to store - // them. - return false; // Fail this addition. - } - Element& element = elements_[obj_count_++]; - element.id_ = next_element_id_++; - element.obj_ = obj; - return true; -} - - -// Comparator used for sorting and searching the lol. -int LiveObjectList::CompareElement(const Element* a, const Element* b) { - const HeapObject* obj1 = a->obj_; - const HeapObject* obj2 = b->obj_; - // For lol elements, it doesn't matter which comes first if 2 elements point - // to the same object (which gets culled later). Hence, we only care about - // the the greater than / less than relationships. - return (obj1 > obj2) ? 1 : (obj1 == obj2) ? 0 : -1; -} - - -// Looks for the specified object in the lol, and returns its element if found. -LiveObjectList::Element* LiveObjectList::Find(HeapObject* obj) { - LiveObjectList* lol = this; - Element key; - Element* result = NULL; - - key.obj_ = obj; - // Iterate through the chain of lol's to look for the object. - while ((result == NULL) && (lol != NULL)) { - result = reinterpret_cast<Element*>( - bsearch(&key, lol->elements_, lol->obj_count_, - sizeof(Element), - reinterpret_cast<RawComparer>(CompareElement))); - lol = lol->prev_; - } - return result; -} - - -// "Nullifies" (convert the HeapObject* into an SMI) so that it will get cleaned -// up in the GCEpilogue, while preserving the sort order of the lol. -// NOTE: the lols need to be already sorted before NullifyMostRecent() is -// called. -void LiveObjectList::NullifyMostRecent(HeapObject* obj) { - LiveObjectList* lol = last(); - Element key; - Element* result = NULL; - - key.obj_ = obj; - // Iterate through the chain of lol's to look for the object. - while (lol != NULL) { - result = reinterpret_cast<Element*>( - bsearch(&key, lol->elements_, lol->obj_count_, - sizeof(Element), - reinterpret_cast<RawComparer>(CompareElement))); - if (result != NULL) { - // Since there may be more than one (we are nullifying dup's after all), - // find the first in the current lol, and nullify that. The lol should - // be sorted already to make this easy (see the use of SortAll()). - int i = result - lol->elements_; - - // NOTE: we sort the lol in increasing order. So, if an object has been - // "nullified" (its lowest bit will be cleared to make it look like an - // SMI), it would/should show up before the equivalent dups that have not - // yet been "nullified". Hence, we should be searching backwards for the - // first occurence of a matching object and nullify that instance. This - // will ensure that we preserve the expected sorting order. - for (i--; i > 0; i--) { - Element* element = &lol->elements_[i]; - HeapObject* curr_obj = element->obj_; - if (curr_obj != obj) { - break; // No more matches. Let's move on. - } - result = element; // Let this earlier match be the result. - } - - // Nullify the object. - NullifyNonLivePointer(&result->obj_); - return; - } - lol = lol->prev_; - } -} - - -// Sorts the lol. -void LiveObjectList::Sort() { - if (obj_count_ > 0) { - Vector<Element> elements_v(elements_, obj_count_); - elements_v.Sort(CompareElement); - } -} - - -// Sorts all captured lols starting from the latest. -void LiveObjectList::SortAll() { - LiveObjectList* lol = last(); - while (lol != NULL) { - lol->Sort(); - lol = lol->prev_; - } -} - - -// Counts the number of objects in the heap. -static int CountHeapObjects() { - int count = 0; - // Iterate over all the heap spaces and count the number of objects. - HeapIterator iterator; - HeapObject* heap_obj = NULL; - while ((heap_obj = iterator.next()) != NULL) { - count++; - } - return count; -} - - -// Captures a current snapshot of all objects in the heap. -MaybeObject* LiveObjectList::Capture() { - Isolate* isolate = Isolate::Current(); - Factory* factory = isolate->factory(); - HandleScope scope(isolate); - - // Count the number of objects in the heap. - int total_count = CountHeapObjects(); - int count = total_count; - int size = 0; - - LiveObjectList* last_lol = last(); - if (last_lol != NULL) { - count -= last_lol->TotalObjCount(); - } - - LiveObjectList* lol; - - // Create a lol large enough to track all the objects. - lol = new LiveObjectList(last_lol, count); - if (lol == NULL) { - return NULL; // No memory to proceed. - } - - // The HeapIterator needs to be in its own scope because it disables - // allocation, and we need allocate below. - { - // Iterate over all the heap spaces and add the objects. - HeapIterator iterator; - HeapObject* heap_obj = NULL; - bool failed = false; - while (!failed && (heap_obj = iterator.next()) != NULL) { - failed = !lol->Add(heap_obj); - size += heap_obj->Size(); - } - ASSERT(!failed); - - lol->Sort(); - - // Add the current lol to the list of lols. - if (last_ != NULL) { - last_->next_ = lol; - } else { - first_ = lol; - } - last_ = lol; - -#ifdef VERIFY_LOL - if (FLAG_verify_lol) { - Verify(true); - } -#endif - } - - Handle<String> id_sym = factory->LookupAsciiSymbol("id"); - Handle<String> count_sym = factory->LookupAsciiSymbol("count"); - Handle<String> size_sym = factory->LookupAsciiSymbol("size"); - - Handle<JSObject> result = factory->NewJSObject(isolate->object_function()); - if (result->IsFailure()) return Object::cast(*result); - - { MaybeObject* maybe_result = result->SetProperty(*id_sym, - Smi::FromInt(lol->id()), - NONE, - kNonStrictMode); - if (maybe_result->IsFailure()) return maybe_result; - } - { MaybeObject* maybe_result = result->SetProperty(*count_sym, - Smi::FromInt(total_count), - NONE, - kNonStrictMode); - if (maybe_result->IsFailure()) return maybe_result; - } - { MaybeObject* maybe_result = result->SetProperty(*size_sym, - Smi::FromInt(size), - NONE, - kNonStrictMode); - if (maybe_result->IsFailure()) return maybe_result; - } - - return *result; -} - - -// Delete doesn't actually deletes an lol. It just marks it as invisible since -// its contents are considered to be part of subsequent lists as well. The -// only time we'll actually delete the lol is when we Reset() or if the lol is -// invisible, and its element count reaches 0. -bool LiveObjectList::Delete(int id) { - LiveObjectList* lol = last(); - while (lol != NULL) { - if (lol->id() == id) { - break; - } - lol = lol->prev_; - } - - // If no lol is found for this id, then we fail to delete. - if (lol == NULL) return false; - - // Else, mark the lol as invisible i.e. id == 0. - lol->id_ = 0; - list_count_--; - ASSERT(list_count_ >= 0); - if (lol->obj_count_ == 0) { - // Point the next lol's prev to this lol's prev. - LiveObjectList* next = lol->next_; - LiveObjectList* prev = lol->prev_; - // Point next's prev to prev. - if (next != NULL) { - next->prev_ = lol->prev_; - } else { - last_ = lol->prev_; - } - // Point prev's next to next. - if (prev != NULL) { - prev->next_ = lol->next_; - } else { - first_ = lol->next_; - } - - lol->prev_ = NULL; - lol->next_ = NULL; - - // Delete this now empty and invisible lol. - delete lol; - } - - // Just in case we've marked everything invisible, then clean up completely. - if (list_count_ == 0) { - Reset(); - } - - return true; -} - - -MaybeObject* LiveObjectList::Dump(int older_id, - int newer_id, - int start_idx, - int dump_limit, - Handle<JSObject> filter_obj) { - if ((older_id < 0) || (newer_id < 0) || (last() == NULL)) { - return Failure::Exception(); // Fail: 0 is not a valid lol id. - } - if (newer_id < older_id) { - // They are not in the expected order. Swap them. - int temp = older_id; - older_id = newer_id; - newer_id = temp; - } - - LiveObjectList* newer_lol = FindLolForId(newer_id, last()); - LiveObjectList* older_lol = FindLolForId(older_id, newer_lol); - - // If the id is defined, and we can't find a LOL for it, then we have an - // invalid id. - if ((newer_id != 0) && (newer_lol == NULL)) { - return Failure::Exception(); // Fail: the newer lol id is invalid. - } - if ((older_id != 0) && (older_lol == NULL)) { - return Failure::Exception(); // Fail: the older lol id is invalid. - } - - LolFilter filter(filter_obj); - LolDumpWriter writer(older_lol, newer_lol); - return DumpPrivate(&writer, start_idx, dump_limit, &filter); -} - - -MaybeObject* LiveObjectList::DumpPrivate(DumpWriter* writer, - int start, - int dump_limit, - LolFilter* filter) { - Isolate* isolate = Isolate::Current(); - Factory* factory = isolate->factory(); - - HandleScope scope(isolate); - - // Calculate the number of entries of the dump. - int count = -1; - int size = -1; - writer->ComputeTotalCountAndSize(filter, &count, &size); - - // Adjust for where to start the dump. - if ((start < 0) || (start >= count)) { - return Failure::Exception(); // invalid start. - } - - int remaining_count = count - start; - if (dump_limit > remaining_count) { - dump_limit = remaining_count; - } - - // Allocate an array to hold the result. - Handle<FixedArray> elements_arr = factory->NewFixedArray(dump_limit); - if (elements_arr->IsFailure()) return Object::cast(*elements_arr); - - // Fill in the dump. - Handle<Object> error; - bool success = writer->Write(elements_arr, - start, - dump_limit, - filter, - error); - if (!success) return Object::cast(*error); - - MaybeObject* maybe_result; - - // Allocate the result body. - Handle<JSObject> body = factory->NewJSObject(isolate->object_function()); - if (body->IsFailure()) return Object::cast(*body); - - // Set the updated body.count. - Handle<String> count_sym = factory->LookupAsciiSymbol("count"); - maybe_result = body->SetProperty(*count_sym, - Smi::FromInt(count), - NONE, - kNonStrictMode); - if (maybe_result->IsFailure()) return maybe_result; - - // Set the updated body.size if appropriate. - if (size >= 0) { - Handle<String> size_sym = factory->LookupAsciiSymbol("size"); - maybe_result = body->SetProperty(*size_sym, - Smi::FromInt(size), - NONE, - kNonStrictMode); - if (maybe_result->IsFailure()) return maybe_result; - } - - // Set body.first_index. - Handle<String> first_sym = factory->LookupAsciiSymbol("first_index"); - maybe_result = body->SetProperty(*first_sym, - Smi::FromInt(start), - NONE, - kNonStrictMode); - if (maybe_result->IsFailure()) return maybe_result; - - // Allocate the JSArray of the elements. - Handle<JSObject> elements = factory->NewJSObject(isolate->array_function()); - if (elements->IsFailure()) return Object::cast(*elements); - - maybe_result = Handle<JSArray>::cast(elements)->SetContent(*elements_arr); - if (maybe_result->IsFailure()) return maybe_result; - - // Set body.elements. - Handle<String> elements_sym = factory->LookupAsciiSymbol("elements"); - maybe_result = body->SetProperty(*elements_sym, - *elements, - NONE, - kNonStrictMode); - if (maybe_result->IsFailure()) return maybe_result; - - return *body; -} - - -MaybeObject* LiveObjectList::Summarize(int older_id, - int newer_id, - Handle<JSObject> filter_obj) { - if ((older_id < 0) || (newer_id < 0) || (last() == NULL)) { - return Failure::Exception(); // Fail: 0 is not a valid lol id. - } - if (newer_id < older_id) { - // They are not in the expected order. Swap them. - int temp = older_id; - older_id = newer_id; - newer_id = temp; - } - - LiveObjectList* newer_lol = FindLolForId(newer_id, last()); - LiveObjectList* older_lol = FindLolForId(older_id, newer_lol); - - // If the id is defined, and we can't find a LOL for it, then we have an - // invalid id. - if ((newer_id != 0) && (newer_lol == NULL)) { - return Failure::Exception(); // Fail: the newer lol id is invalid. - } - if ((older_id != 0) && (older_lol == NULL)) { - return Failure::Exception(); // Fail: the older lol id is invalid. - } - - LolFilter filter(filter_obj); - LolSummaryWriter writer(older_lol, newer_lol); - return SummarizePrivate(&writer, &filter, false); -} - - -// Creates a summary report for the debugger. -// Note: the SummaryWriter takes care of iterating over objects and filling in -// the summary. -MaybeObject* LiveObjectList::SummarizePrivate(SummaryWriter* writer, - LolFilter* filter, - bool is_tracking_roots) { - HandleScope scope; - MaybeObject* maybe_result; - - LiveObjectSummary summary(filter); - writer->Write(&summary); - - Isolate* isolate = Isolate::Current(); - Factory* factory = isolate->factory(); - - // The result body will look like this: - // body: { - // count: <total_count>, - // size: <total_size>, - // found_root: <boolean>, // optional. - // found_weak_root: <boolean>, // optional. - // summary: [ - // { - // desc: "<object type name>", - // count: <count>, - // size: size - // }, - // ... - // ] - // } - - // Prefetch some needed symbols. - Handle<String> desc_sym = factory->LookupAsciiSymbol("desc"); - Handle<String> count_sym = factory->LookupAsciiSymbol("count"); - Handle<String> size_sym = factory->LookupAsciiSymbol("size"); - Handle<String> summary_sym = factory->LookupAsciiSymbol("summary"); - - // Allocate the summary array. - int entries_count = summary.GetNumberOfEntries(); - Handle<FixedArray> summary_arr = - factory->NewFixedArray(entries_count); - if (summary_arr->IsFailure()) return Object::cast(*summary_arr); - - int idx = 0; - for (int i = 0; i < LiveObjectSummary::kNumberOfEntries; i++) { - // Allocate the summary record. - Handle<JSObject> detail = factory->NewJSObject(isolate->object_function()); - if (detail->IsFailure()) return Object::cast(*detail); - - // Fill in the summary record. - LiveObjectType type = static_cast<LiveObjectType>(i); - int count = summary.Count(type); - if (count) { - const char* desc_cstr = GetObjectTypeDesc(type); - Handle<String> desc = factory->LookupAsciiSymbol(desc_cstr); - int size = summary.Size(type); - - maybe_result = detail->SetProperty(*desc_sym, - *desc, - NONE, - kNonStrictMode); - if (maybe_result->IsFailure()) return maybe_result; - maybe_result = detail->SetProperty(*count_sym, - Smi::FromInt(count), - NONE, - kNonStrictMode); - if (maybe_result->IsFailure()) return maybe_result; - maybe_result = detail->SetProperty(*size_sym, - Smi::FromInt(size), - NONE, - kNonStrictMode); - if (maybe_result->IsFailure()) return maybe_result; - - summary_arr->set(idx++, *detail); - } - } - - // Wrap the summary fixed array in a JS array. - Handle<JSObject> summary_obj = - factory->NewJSObject(isolate->array_function()); - if (summary_obj->IsFailure()) return Object::cast(*summary_obj); - - maybe_result = Handle<JSArray>::cast(summary_obj)->SetContent(*summary_arr); - if (maybe_result->IsFailure()) return maybe_result; - - // Create the body object. - Handle<JSObject> body = factory->NewJSObject(isolate->object_function()); - if (body->IsFailure()) return Object::cast(*body); - - // Fill out the body object. - int total_count = summary.total_count(); - int total_size = summary.total_size(); - maybe_result = body->SetProperty(*count_sym, - Smi::FromInt(total_count), - NONE, - kNonStrictMode); - if (maybe_result->IsFailure()) return maybe_result; - - maybe_result = body->SetProperty(*size_sym, - Smi::FromInt(total_size), - NONE, - kNonStrictMode); - if (maybe_result->IsFailure()) return maybe_result; - - if (is_tracking_roots) { - int found_root = summary.found_root(); - int found_weak_root = summary.found_weak_root(); - Handle<String> root_sym = factory->LookupAsciiSymbol("found_root"); - Handle<String> weak_root_sym = - factory->LookupAsciiSymbol("found_weak_root"); - maybe_result = body->SetProperty(*root_sym, - Smi::FromInt(found_root), - NONE, - kNonStrictMode); - if (maybe_result->IsFailure()) return maybe_result; - maybe_result = body->SetProperty(*weak_root_sym, - Smi::FromInt(found_weak_root), - NONE, - kNonStrictMode); - if (maybe_result->IsFailure()) return maybe_result; - } - - maybe_result = body->SetProperty(*summary_sym, - *summary_obj, - NONE, - kNonStrictMode); - if (maybe_result->IsFailure()) return maybe_result; - - return *body; -} - - -// Returns an array listing the captured lols. -// Note: only dumps the section starting at start_idx and only up to -// dump_limit entries. -MaybeObject* LiveObjectList::Info(int start_idx, int dump_limit) { - Isolate* isolate = Isolate::Current(); - Factory* factory = isolate->factory(); - - HandleScope scope(isolate); - MaybeObject* maybe_result; - - int total_count = LiveObjectList::list_count(); - int dump_count = total_count; - - // Adjust for where to start the dump. - if (total_count == 0) { - start_idx = 0; // Ensure this to get an empty list. - } else if ((start_idx < 0) || (start_idx >= total_count)) { - return Failure::Exception(); // invalid start. - } - dump_count -= start_idx; - - // Adjust for the dump limit. - if (dump_count > dump_limit) { - dump_count = dump_limit; - } - - // Allocate an array to hold the result. - Handle<FixedArray> list = factory->NewFixedArray(dump_count); - if (list->IsFailure()) return Object::cast(*list); - - // Prefetch some needed symbols. - Handle<String> id_sym = factory->LookupAsciiSymbol("id"); - Handle<String> count_sym = factory->LookupAsciiSymbol("count"); - Handle<String> size_sym = factory->LookupAsciiSymbol("size"); - - // Fill the array with the lol details. - int idx = 0; - LiveObjectList* lol = first_; - while ((lol != NULL) && (idx < start_idx)) { // Skip tail entries. - if (lol->id() != 0) { - idx++; - } - lol = lol->next(); - } - idx = 0; - while ((lol != NULL) && (dump_limit != 0)) { - if (lol->id() != 0) { - int count; - int size; - count = lol->GetTotalObjCountAndSize(&size); - - Handle<JSObject> detail = - factory->NewJSObject(isolate->object_function()); - if (detail->IsFailure()) return Object::cast(*detail); - - maybe_result = detail->SetProperty(*id_sym, - Smi::FromInt(lol->id()), - NONE, - kNonStrictMode); - if (maybe_result->IsFailure()) return maybe_result; - maybe_result = detail->SetProperty(*count_sym, - Smi::FromInt(count), - NONE, - kNonStrictMode); - if (maybe_result->IsFailure()) return maybe_result; - maybe_result = detail->SetProperty(*size_sym, - Smi::FromInt(size), - NONE, - kNonStrictMode); - if (maybe_result->IsFailure()) return maybe_result; - list->set(idx++, *detail); - dump_limit--; - } - lol = lol->next(); - } - - // Return the result as a JS array. - Handle<JSObject> lols = factory->NewJSObject(isolate->array_function()); - - maybe_result = Handle<JSArray>::cast(lols)->SetContent(*list); - if (maybe_result->IsFailure()) return maybe_result; - - Handle<JSObject> result = factory->NewJSObject(isolate->object_function()); - if (result->IsFailure()) return Object::cast(*result); - - maybe_result = result->SetProperty(*count_sym, - Smi::FromInt(total_count), - NONE, - kNonStrictMode); - if (maybe_result->IsFailure()) return maybe_result; - - Handle<String> first_sym = factory->LookupAsciiSymbol("first_index"); - maybe_result = result->SetProperty(*first_sym, - Smi::FromInt(start_idx), - NONE, - kNonStrictMode); - if (maybe_result->IsFailure()) return maybe_result; - - Handle<String> lists_sym = factory->LookupAsciiSymbol("lists"); - maybe_result = result->SetProperty(*lists_sym, - *lols, - NONE, - kNonStrictMode); - if (maybe_result->IsFailure()) return maybe_result; - - return *result; -} - - -// Deletes all captured lols. -void LiveObjectList::Reset() { - LiveObjectList* lol = last(); - // Just delete the last. Each lol will delete it's prev automatically. - delete lol; - - next_element_id_ = 1; - list_count_ = 0; - last_id_ = 0; - first_ = NULL; - last_ = NULL; -} - - -// Gets the object for the specified obj id. -Object* LiveObjectList::GetObj(int obj_id) { - Element* element = FindElementFor<int>(GetElementId, obj_id); - if (element != NULL) { - return Object::cast(element->obj_); - } - return HEAP->undefined_value(); -} - - -// Gets the obj id for the specified address if valid. -int LiveObjectList::GetObjId(Object* obj) { - // Make a heap object pointer from the address. - HeapObject* hobj = HeapObject::cast(obj); - Element* element = FindElementFor<HeapObject*>(GetElementObj, hobj); - if (element != NULL) { - return element->id_; - } - return 0; // Invalid address. -} - - -// Gets the obj id for the specified address if valid. -Object* LiveObjectList::GetObjId(Handle<String> address) { - SmartArrayPointer<char> addr_str = - address->ToCString(DISALLOW_NULLS, ROBUST_STRING_TRAVERSAL); - - Isolate* isolate = Isolate::Current(); - - // Extract the address value from the string. - int value = - static_cast<int>(StringToInt(isolate->unicode_cache(), *address, 16)); - Object* obj = reinterpret_cast<Object*>(value); - return Smi::FromInt(GetObjId(obj)); -} - - -// Helper class for copying HeapObjects. -class LolVisitor: public ObjectVisitor { - public: - LolVisitor(HeapObject* target, Handle<HeapObject> handle_to_skip) - : target_(target), handle_to_skip_(handle_to_skip), found_(false) {} - - void VisitPointer(Object** p) { CheckPointer(p); } - - void VisitPointers(Object** start, Object** end) { - // Check all HeapObject pointers in [start, end). - for (Object** p = start; !found() && p < end; p++) CheckPointer(p); - } - - inline bool found() const { return found_; } - inline bool reset() { return found_ = false; } - - private: - inline void CheckPointer(Object** p) { - Object* object = *p; - if (HeapObject::cast(object) == target_) { - // We may want to skip this handle because the handle may be a local - // handle in a handle scope in one of our callers. Once we return, - // that handle will be popped. Hence, we don't want to count it as - // a root that would have kept the target object alive. - if (!handle_to_skip_.is_null() && - handle_to_skip_.location() == reinterpret_cast<HeapObject**>(p)) { - return; // Skip this handle. - } - found_ = true; - } - } - - HeapObject* target_; - Handle<HeapObject> handle_to_skip_; - bool found_; -}; - - -inline bool AddRootRetainerIfFound(const LolVisitor& visitor, - LolFilter* filter, - LiveObjectSummary* summary, - void (*SetRootFound)(LiveObjectSummary* s), - int start, - int dump_limit, - int* total_count, - Handle<FixedArray> retainers_arr, - int* count, - int* index, - const char* root_name, - Handle<String> id_sym, - Handle<String> desc_sym, - Handle<String> size_sym, - Handle<Object> error) { - HandleScope scope; - - // Scratch handles. - Handle<JSObject> detail; - Handle<String> desc; - Handle<HeapObject> retainer; - - if (visitor.found()) { - if (!filter->is_active()) { - (*total_count)++; - if (summary) { - SetRootFound(summary); - } else if ((*total_count > start) && ((*index) < dump_limit)) { - (*count)++; - if (!retainers_arr.is_null()) { - return AddObjDetail(retainers_arr, - (*index)++, - 0, - retainer, - root_name, - id_sym, - desc_sym, - size_sym, - detail, - desc, - error); - } - } - } - } - return true; -} - - -inline void SetFoundRoot(LiveObjectSummary* summary) { - summary->set_found_root(); -} - - -inline void SetFoundWeakRoot(LiveObjectSummary* summary) { - summary->set_found_weak_root(); -} - - -int LiveObjectList::GetRetainers(Handle<HeapObject> target, - Handle<JSObject> instance_filter, - Handle<FixedArray> retainers_arr, - int start, - int dump_limit, - int* total_count, - LolFilter* filter, - LiveObjectSummary* summary, - JSFunction* arguments_function, - Handle<Object> error) { - HandleScope scope; - - // Scratch handles. - Handle<JSObject> detail; - Handle<String> desc; - Handle<HeapObject> retainer; - - Isolate* isolate = Isolate::Current(); - Factory* factory = isolate->factory(); - - // Prefetch some needed symbols. - Handle<String> id_sym = factory->LookupAsciiSymbol("id"); - Handle<String> desc_sym = factory->LookupAsciiSymbol("desc"); - Handle<String> size_sym = factory->LookupAsciiSymbol("size"); - - NoHandleAllocation ha; - int count = 0; - int index = 0; - Handle<JSObject> last_obj; - - *total_count = 0; - - // Iterate roots. - LolVisitor lol_visitor(*target, target); - isolate->heap()->IterateStrongRoots(&lol_visitor, VISIT_ALL); - if (!AddRootRetainerIfFound(lol_visitor, - filter, - summary, - SetFoundRoot, - start, - dump_limit, - total_count, - retainers_arr, - &count, - &index, - "<root>", - id_sym, - desc_sym, - size_sym, - error)) { - return -1; - } - - lol_visitor.reset(); - isolate->heap()->IterateWeakRoots(&lol_visitor, VISIT_ALL); - if (!AddRootRetainerIfFound(lol_visitor, - filter, - summary, - SetFoundWeakRoot, - start, - dump_limit, - total_count, - retainers_arr, - &count, - &index, - "<weak root>", - id_sym, - desc_sym, - size_sym, - error)) { - return -1; - } - - // Iterate the live object lists. - LolIterator it(NULL, last()); - for (it.Init(); !it.Done() && (index < dump_limit); it.Next()) { - HeapObject* heap_obj = it.Obj(); - - // Only look at all JSObjects. - if (heap_obj->IsJSObject()) { - // Skip context extension objects and argument arrays as these are - // checked in the context of functions using them. - JSObject* obj = JSObject::cast(heap_obj); - if (obj->IsJSContextExtensionObject() || - obj->map()->constructor() == arguments_function) { - continue; - } - - // Check if the JS object has a reference to the object looked for. - if (obj->ReferencesObject(*target)) { - // Check instance filter if supplied. This is normally used to avoid - // references from mirror objects (see Runtime_IsInPrototypeChain). - if (!instance_filter->IsUndefined()) { - Object* V = obj; - while (true) { - Object* prototype = V->GetPrototype(); - if (prototype->IsNull()) { - break; - } - if (*instance_filter == prototype) { - obj = NULL; // Don't add this object. - break; - } - V = prototype; - } - } - - if (obj != NULL) { - // Skip objects that have been filtered out. - if (filter->Matches(heap_obj)) { - continue; - } - - // Valid reference found add to instance array if supplied an update - // count. - last_obj = Handle<JSObject>(obj); - (*total_count)++; - - if (summary != NULL) { - summary->Add(heap_obj); - } else if ((*total_count > start) && (index < dump_limit)) { - count++; - if (!retainers_arr.is_null()) { - retainer = Handle<HeapObject>(heap_obj); - bool success = AddObjDetail(retainers_arr, - index++, - it.Id(), - retainer, - NULL, - id_sym, - desc_sym, - size_sym, - detail, - desc, - error); - if (!success) return -1; - } - } - } - } - } - } - - // Check for circular reference only. This can happen when the object is only - // referenced from mirrors and has a circular reference in which case the - // object is not really alive and would have been garbage collected if not - // referenced from the mirror. - - if (*total_count == 1 && !last_obj.is_null() && *last_obj == *target) { - count = 0; - *total_count = 0; - } - - return count; -} - - -MaybeObject* LiveObjectList::GetObjRetainers(int obj_id, - Handle<JSObject> instance_filter, - bool verbose, - int start, - int dump_limit, - Handle<JSObject> filter_obj) { - Isolate* isolate = Isolate::Current(); - Factory* factory = isolate->factory(); - Heap* heap = isolate->heap(); - - HandleScope scope(isolate); - - // Get the target object. - HeapObject* heap_obj = HeapObject::cast(GetObj(obj_id)); - if (heap_obj == heap->undefined_value()) { - return heap_obj; - } - - Handle<HeapObject> target = Handle<HeapObject>(heap_obj); - - // Get the constructor function for context extension and arguments array. - JSObject* arguments_boilerplate = - isolate->context()->native_context()->arguments_boilerplate(); - JSFunction* arguments_function = - JSFunction::cast(arguments_boilerplate->map()->constructor()); - - Handle<JSFunction> args_function = Handle<JSFunction>(arguments_function); - LolFilter filter(filter_obj); - - if (!verbose) { - RetainersSummaryWriter writer(target, instance_filter, args_function); - return SummarizePrivate(&writer, &filter, true); - - } else { - RetainersDumpWriter writer(target, instance_filter, args_function); - Object* body_obj; - MaybeObject* maybe_result = - DumpPrivate(&writer, start, dump_limit, &filter); - if (!maybe_result->ToObject(&body_obj)) { - return maybe_result; - } - - // Set body.id. - Handle<JSObject> body = Handle<JSObject>(JSObject::cast(body_obj)); - Handle<String> id_sym = factory->LookupAsciiSymbol("id"); - maybe_result = body->SetProperty(*id_sym, - Smi::FromInt(obj_id), - NONE, - kNonStrictMode); - if (maybe_result->IsFailure()) return maybe_result; - - return *body; - } -} - - -Object* LiveObjectList::PrintObj(int obj_id) { - Object* obj = GetObj(obj_id); - if (!obj) { - return HEAP->undefined_value(); - } - - EmbeddedVector<char, 128> temp_filename; - static int temp_count = 0; - const char* path_prefix = "."; - - Isolate* isolate = Isolate::Current(); - Factory* factory = isolate->factory(); - Heap* heap = isolate->heap(); - - if (FLAG_lol_workdir) { - path_prefix = FLAG_lol_workdir; - } - OS::SNPrintF(temp_filename, "%s/lol-print-%d", path_prefix, ++temp_count); - - FILE* f = OS::FOpen(temp_filename.start(), "w+"); - - PrintF(f, "@%d ", LiveObjectList::GetObjId(obj)); -#ifdef OBJECT_PRINT -#ifdef INSPECTOR - Inspector::DumpObjectType(f, obj); -#endif // INSPECTOR - PrintF(f, "\n"); - obj->Print(f); -#else // !OBJECT_PRINT - obj->ShortPrint(f); -#endif // !OBJECT_PRINT - PrintF(f, "\n"); - Flush(f); - fclose(f); - - // Create a string from the temp_file. - // Note: the mmapped resource will take care of closing the file. - MemoryMappedExternalResource* resource = - new MemoryMappedExternalResource(temp_filename.start(), true); - if (resource->exists() && !resource->is_empty()) { - ASSERT(resource->IsAscii()); - Handle<String> dump_string = - factory->NewExternalStringFromAscii(resource); - heap->external_string_table()->AddString(*dump_string); - return *dump_string; - } else { - delete resource; - } - return HEAP->undefined_value(); -} - - -class LolPathTracer: public PathTracer { - public: - LolPathTracer(FILE* out, - Object* search_target, - WhatToFind what_to_find) - : PathTracer(search_target, what_to_find, VISIT_ONLY_STRONG), out_(out) {} - - private: - void ProcessResults(); - - FILE* out_; -}; - - -void LolPathTracer::ProcessResults() { - if (found_target_) { - PrintF(out_, "=====================================\n"); - PrintF(out_, "==== Path to object ====\n"); - PrintF(out_, "=====================================\n\n"); - - ASSERT(!object_stack_.is_empty()); - Object* prev = NULL; - for (int i = 0, index = 0; i < object_stack_.length(); i++) { - Object* obj = object_stack_[i]; - - // Skip this object if it is basically the internals of the - // previous object (which would have dumped its details already). - if (prev && prev->IsJSObject() && - (obj != search_target_)) { - JSObject* jsobj = JSObject::cast(prev); - if (obj->IsFixedArray() && - jsobj->properties() == FixedArray::cast(obj)) { - // Skip this one because it would have been printed as the - // properties of the last object already. - continue; - } else if (obj->IsHeapObject() && - jsobj->elements() == HeapObject::cast(obj)) { - // Skip this one because it would have been printed as the - // elements of the last object already. - continue; - } - } - - // Print a connecting arrow. - if (i > 0) PrintF(out_, "\n |\n |\n V\n\n"); - - // Print the object index. - PrintF(out_, "[%d] ", ++index); - - // Print the LOL object ID: - int id = LiveObjectList::GetObjId(obj); - if (id > 0) PrintF(out_, "@%d ", id); - -#ifdef OBJECT_PRINT -#ifdef INSPECTOR - Inspector::DumpObjectType(out_, obj); -#endif // INSPECTOR - PrintF(out_, "\n"); - obj->Print(out_); -#else // !OBJECT_PRINT - obj->ShortPrint(out_); - PrintF(out_, "\n"); -#endif // !OBJECT_PRINT - Flush(out_); - } - PrintF(out_, "\n"); - PrintF(out_, "=====================================\n\n"); - Flush(out_); - } -} - - -Object* LiveObjectList::GetPathPrivate(HeapObject* obj1, HeapObject* obj2) { - EmbeddedVector<char, 128> temp_filename; - static int temp_count = 0; - const char* path_prefix = "."; - - if (FLAG_lol_workdir) { - path_prefix = FLAG_lol_workdir; - } - OS::SNPrintF(temp_filename, "%s/lol-getpath-%d", path_prefix, ++temp_count); - - FILE* f = OS::FOpen(temp_filename.start(), "w+"); - - Isolate* isolate = Isolate::Current(); - Factory* factory = isolate->factory(); - Heap* heap = isolate->heap(); - - // Save the previous verbosity. - bool prev_verbosity = FLAG_use_verbose_printer; - FLAG_use_verbose_printer = false; - - // Dump the paths. - { - // The tracer needs to be scoped because its usage asserts no allocation, - // and we need to allocate the result string below. - LolPathTracer tracer(f, obj2, LolPathTracer::FIND_FIRST); - - bool found = false; - if (obj1 == NULL) { - // Check for ObjectGroups that references this object. - // TODO(mlam): refactor this to be more modular. - { - List<ObjectGroup*>* groups = isolate->global_handles()->object_groups(); - for (int i = 0; i < groups->length(); i++) { - ObjectGroup* group = groups->at(i); - if (group == NULL) continue; - - bool found_group = false; - for (size_t j = 0; j < group->length_; j++) { - Object* object = *(group->objects_[j]); - HeapObject* hobj = HeapObject::cast(object); - if (obj2 == hobj) { - found_group = true; - break; - } - } - - if (found_group) { - PrintF(f, - "obj %p is a member of object group %p {\n", - reinterpret_cast<void*>(obj2), - reinterpret_cast<void*>(group)); - for (size_t j = 0; j < group->length_; j++) { - Object* object = *(group->objects_[j]); - if (!object->IsHeapObject()) continue; - - HeapObject* hobj = HeapObject::cast(object); - int id = GetObjId(hobj); - if (id != 0) { - PrintF(f, " @%d:", id); - } else { - PrintF(f, " <no id>:"); - } - - char buffer[512]; - GenerateObjectDesc(hobj, buffer, sizeof(buffer)); - PrintF(f, " %s", buffer); - if (hobj == obj2) { - PrintF(f, " <==="); - } - PrintF(f, "\n"); - } - PrintF(f, "}\n"); - } - } - } - - PrintF(f, "path from roots to obj %p\n", reinterpret_cast<void*>(obj2)); - heap->IterateRoots(&tracer, VISIT_ONLY_STRONG); - found = tracer.found(); - - if (!found) { - PrintF(f, " No paths found. Checking symbol tables ...\n"); - SymbolTable* symbol_table = HEAP->raw_unchecked_symbol_table(); - tracer.VisitPointers(reinterpret_cast<Object**>(&symbol_table), - reinterpret_cast<Object**>(&symbol_table)+1); - found = tracer.found(); - if (!found) { - symbol_table->IteratePrefix(&tracer); - found = tracer.found(); - } - } - - if (!found) { - PrintF(f, " No paths found. Checking weak roots ...\n"); - // Check weak refs next. - isolate->global_handles()->IterateWeakRoots(&tracer); - found = tracer.found(); - } - - } else { - PrintF(f, "path from obj %p to obj %p:\n", - reinterpret_cast<void*>(obj1), reinterpret_cast<void*>(obj2)); - tracer.TracePathFrom(reinterpret_cast<Object**>(&obj1)); - found = tracer.found(); - } - - if (!found) { - PrintF(f, " No paths found\n\n"); - } - } - - // Flush and clean up the dumped file. - Flush(f); - fclose(f); - - // Restore the previous verbosity. - FLAG_use_verbose_printer = prev_verbosity; - - // Create a string from the temp_file. - // Note: the mmapped resource will take care of closing the file. - MemoryMappedExternalResource* resource = - new MemoryMappedExternalResource(temp_filename.start(), true); - if (resource->exists() && !resource->is_empty()) { - ASSERT(resource->IsAscii()); - Handle<String> path_string = - factory->NewExternalStringFromAscii(resource); - heap->external_string_table()->AddString(*path_string); - return *path_string; - } else { - delete resource; - } - return heap->undefined_value(); -} - - -Object* LiveObjectList::GetPath(int obj_id1, - int obj_id2, - Handle<JSObject> instance_filter) { - HandleScope scope; - - // Get the target object. - HeapObject* obj1 = NULL; - if (obj_id1 != 0) { - obj1 = HeapObject::cast(GetObj(obj_id1)); - if (obj1 == HEAP->undefined_value()) { - return obj1; - } - } - - HeapObject* obj2 = HeapObject::cast(GetObj(obj_id2)); - if (obj2 == HEAP->undefined_value()) { - return obj2; - } - - return GetPathPrivate(obj1, obj2); -} - - -void LiveObjectList::DoProcessNonLive(HeapObject* obj) { - // We should only be called if we have at least one lol to search. - ASSERT(last() != NULL); - Element* element = last()->Find(obj); - if (element != NULL) { - NullifyNonLivePointer(&element->obj_); - } -} - - -void LiveObjectList::IterateElementsPrivate(ObjectVisitor* v) { - LiveObjectList* lol = last(); - while (lol != NULL) { - Element* elements = lol->elements_; - int count = lol->obj_count_; - for (int i = 0; i < count; i++) { - HeapObject** p = &elements[i].obj_; - v->VisitPointer(reinterpret_cast<Object** >(p)); - } - lol = lol->prev_; - } -} - - -// Purpose: Called by GCEpilogue to purge duplicates. Not to be called by -// anyone else. -void LiveObjectList::PurgeDuplicates() { - bool is_sorted = false; - LiveObjectList* lol = last(); - if (!lol) { - return; // Nothing to purge. - } - - int total_count = lol->TotalObjCount(); - if (!total_count) { - return; // Nothing to purge. - } - - Element* elements = NewArray<Element>(total_count); - int count = 0; - - // Copy all the object elements into a consecutive array. - while (lol) { - memcpy(&elements[count], lol->elements_, lol->obj_count_ * sizeof(Element)); - count += lol->obj_count_; - lol = lol->prev_; - } - qsort(elements, total_count, sizeof(Element), - reinterpret_cast<RawComparer>(CompareElement)); - - ASSERT(count == total_count); - - // Iterate over all objects in the consolidated list and check for dups. - total_count--; - for (int i = 0; i < total_count; ) { - Element* curr = &elements[i]; - HeapObject* curr_obj = curr->obj_; - int j = i+1; - bool done = false; - - while (!done && (j < total_count)) { - // Process if the element's object is still live after the current GC. - // Non-live objects will be converted to SMIs i.e. not HeapObjects. - if (curr_obj->IsHeapObject()) { - Element* next = &elements[j]; - HeapObject* next_obj = next->obj_; - if (next_obj->IsHeapObject()) { - if (curr_obj != next_obj) { - done = true; - continue; // Live object but no match. Move on. - } - - // NOTE: we've just GCed the LOLs. Hence, they are no longer sorted. - // Since we detected at least one need to search for entries, we'll - // sort it to enable the use of NullifyMostRecent() below. We only - // need to sort it once (except for one exception ... see below). - if (!is_sorted) { - SortAll(); - is_sorted = true; - } - - // We have a match. Need to nullify the most recent ref to this - // object. We'll keep the oldest ref: - // Note: we will nullify the element record in the LOL - // database, not in the local sorted copy of the elements. - NullifyMostRecent(curr_obj); - } - } - // Either the object was already marked for purging, or we just marked - // it. Either way, if there's more than one dup, then we need to check - // the next element for another possible dup against the current as well - // before we move on. So, here we go. - j++; - } - - // We can move on to checking the match on the next element. - i = j; - } - - DeleteArray<Element>(elements); -} - - -// Purpose: Purges dead objects and resorts the LOLs. -void LiveObjectList::GCEpiloguePrivate() { - // Note: During the GC, ConsStrings may be collected and pointers may be - // forwarded to its constituent string. As a result, we may find dupes of - // objects references in the LOL list. - // Another common way we get dups is that free chunks that have been swept - // in the oldGen heap may be kept as ByteArray objects in a free list. - // - // When we promote live objects from the youngGen, the object may be moved - // to the start of these free chunks. Since there is no free or move event - // for the free chunks, their addresses will show up 2 times: once for their - // original free ByteArray selves, and once for the newly promoted youngGen - // object. Hence, we can get a duplicate address in the LOL again. - // - // We need to eliminate these dups because the LOL implementation expects to - // only have at most one unique LOL reference to any object at any time. - PurgeDuplicates(); - - // After the GC, sweep away all free'd Elements and compact. - LiveObjectList* prev = NULL; - LiveObjectList* next = NULL; - - // Iterating from the youngest lol to the oldest lol. - for (LiveObjectList* lol = last(); lol; lol = prev) { - Element* elements = lol->elements_; - prev = lol->prev(); // Save the prev. - - // Remove any references to collected objects. - int i = 0; - while (i < lol->obj_count_) { - Element& element = elements[i]; - if (!element.obj_->IsHeapObject()) { - // If the HeapObject address was converted into a SMI, then this - // is a dead object. Copy the last element over this one. - element = elements[lol->obj_count_ - 1]; - lol->obj_count_--; - // We've just moved the last element into this index. We'll revisit - // this index again. Hence, no need to increment the iterator. - } else { - i++; // Look at the next element next. - } - } - - int new_count = lol->obj_count_; - - // Check if there are any more elements to keep after purging the dead ones. - if (new_count == 0) { - DeleteArray<Element>(elements); - lol->elements_ = NULL; - lol->capacity_ = 0; - ASSERT(lol->obj_count_ == 0); - - // If the list is also invisible, the clean up the list as well. - if (lol->id_ == 0) { - // Point the next lol's prev to this lol's prev. - if (next) { - next->prev_ = lol->prev_; - } else { - last_ = lol->prev_; - } - - // Delete this now empty and invisible lol. - delete lol; - - // Don't point the next to this lol since it is now deleted. - // Leave the next pointer pointing to the current lol. - continue; - } - - } else { - // If the obj_count_ is less than the capacity and the difference is - // greater than a specified threshold, then we should shrink the list. - int diff = lol->capacity_ - new_count; - const int kMaxUnusedSpace = 64; - if (diff > kMaxUnusedSpace) { // Threshold for shrinking. - // Shrink the list. - Element* new_elements = NewArray<Element>(new_count); - memcpy(new_elements, elements, new_count * sizeof(Element)); - - DeleteArray<Element>(elements); - lol->elements_ = new_elements; - lol->capacity_ = new_count; - } - ASSERT(lol->obj_count_ == new_count); - - lol->Sort(); // We've moved objects. Re-sort in case. - } - - // Save the next (for the previous link) in case we need it later. - next = lol; - } - -#ifdef VERIFY_LOL - if (FLAG_verify_lol) { - Verify(); - } -#endif -} - - -#ifdef VERIFY_LOL -void LiveObjectList::Verify(bool match_heap_exactly) { - OS::Print("Verifying the LiveObjectList database:\n"); - - LiveObjectList* lol = last(); - if (lol == NULL) { - OS::Print(" No lol database to verify\n"); - return; - } - - OS::Print(" Preparing the lol database ...\n"); - int total_count = lol->TotalObjCount(); - - Element* elements = NewArray<Element>(total_count); - int count = 0; - - // Copy all the object elements into a consecutive array. - OS::Print(" Copying the lol database ...\n"); - while (lol != NULL) { - memcpy(&elements[count], lol->elements_, lol->obj_count_ * sizeof(Element)); - count += lol->obj_count_; - lol = lol->prev_; - } - qsort(elements, total_count, sizeof(Element), - reinterpret_cast<RawComparer>(CompareElement)); - - ASSERT(count == total_count); - - // Iterate over all objects in the heap and check for: - // 1. object in LOL but not in heap i.e. error. - // 2. object in heap but not in LOL (possibly not an error). Usually - // just means that we don't have the a capture of the latest heap. - // That is unless we did this verify immediately after a capture, - // and specified match_heap_exactly = true. - - int number_of_heap_objects = 0; - int number_of_matches = 0; - int number_not_in_heap = total_count; - int number_not_in_lol = 0; - - OS::Print(" Start verify ...\n"); - OS::Print(" Verifying ..."); - Flush(); - HeapIterator iterator; - HeapObject* heap_obj = NULL; - while ((heap_obj = iterator.next()) != NULL) { - number_of_heap_objects++; - - // Check if the heap_obj is in the lol. - Element key; - key.obj_ = heap_obj; - - Element* result = reinterpret_cast<Element*>( - bsearch(&key, elements, total_count, sizeof(Element), - reinterpret_cast<RawComparer>(CompareElement))); - - if (result != NULL) { - number_of_matches++; - number_not_in_heap--; - // Mark it as found by changing it into a SMI (mask off low bit). - // Note: we cannot use HeapObject::cast() here because it asserts that - // the HeapObject bit is set on the address, but we're unsetting it on - // purpose here for our marking. - result->obj_ = reinterpret_cast<HeapObject*>(heap_obj->address()); - - } else { - number_not_in_lol++; - if (match_heap_exactly) { - OS::Print("heap object %p NOT in lol database\n", heap_obj); - } - } - // Show some sign of life. - if (number_of_heap_objects % 1000 == 0) { - OS::Print("."); - fflush(stdout); - } - } - OS::Print("\n"); - - // Reporting lol objects not found in the heap. - if (number_not_in_heap) { - int found = 0; - for (int i = 0; (i < total_count) && (found < number_not_in_heap); i++) { - Element& element = elements[i]; - if (element.obj_->IsHeapObject()) { - OS::Print("lol database object [%d of %d] %p NOT in heap\n", - i, total_count, element.obj_); - found++; - } - } - } - - DeleteArray<Element>(elements); - - OS::Print("number of objects in lol database %d\n", total_count); - OS::Print("number of heap objects .......... %d\n", number_of_heap_objects); - OS::Print("number of matches ............... %d\n", number_of_matches); - OS::Print("number NOT in heap .............. %d\n", number_not_in_heap); - OS::Print("number NOT in lol database ...... %d\n", number_not_in_lol); - - if (number_of_matches != total_count) { - OS::Print(" *** ERROR: " - "NOT all lol database objects match heap objects.\n"); - } - if (number_not_in_heap != 0) { - OS::Print(" *** ERROR: %d lol database objects not found in heap.\n", - number_not_in_heap); - } - if (match_heap_exactly) { - if (!(number_not_in_lol == 0)) { - OS::Print(" *** ERROR: %d heap objects NOT found in lol database.\n", - number_not_in_lol); - } - } - - ASSERT(number_of_matches == total_count); - ASSERT(number_not_in_heap == 0); - ASSERT(number_not_in_lol == (number_of_heap_objects - total_count)); - if (match_heap_exactly) { - ASSERT(total_count == number_of_heap_objects); - ASSERT(number_not_in_lol == 0); - } - - OS::Print(" Verify the lol database is sorted ...\n"); - lol = last(); - while (lol != NULL) { - Element* elements = lol->elements_; - for (int i = 0; i < lol->obj_count_ - 1; i++) { - if (elements[i].obj_ >= elements[i+1].obj_) { - OS::Print(" *** ERROR: lol %p obj[%d] %p > obj[%d] %p\n", - lol, i, elements[i].obj_, i+1, elements[i+1].obj_); - } - } - lol = lol->prev_; - } - - OS::Print(" DONE verifying.\n\n\n"); -} - - -void LiveObjectList::VerifyNotInFromSpace() { - OS::Print("VerifyNotInFromSpace() ...\n"); - LolIterator it(NULL, last()); - Heap* heap = ISOLATE->heap(); - int i = 0; - for (it.Init(); !it.Done(); it.Next()) { - HeapObject* heap_obj = it.Obj(); - if (heap->InFromSpace(heap_obj)) { - OS::Print(" ERROR: VerifyNotInFromSpace: [%d] obj %p in From space %p\n", - i++, heap_obj, Heap::new_space()->FromSpaceStart()); - } - } -} -#endif // VERIFY_LOL - - -} } // namespace v8::internal - -#endif // LIVE_OBJECT_LIST diff --git a/deps/v8/src/liveobjectlist.h b/deps/v8/src/liveobjectlist.h deleted file mode 100644 index 1aa9196051..0000000000 --- a/deps/v8/src/liveobjectlist.h +++ /dev/null @@ -1,319 +0,0 @@ -// Copyright 2011 the V8 project authors. All rights reserved. -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following -// disclaimer in the documentation and/or other materials provided -// with the distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived -// from this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -#ifndef V8_LIVEOBJECTLIST_H_ -#define V8_LIVEOBJECTLIST_H_ - -#include "v8.h" - -#include "checks.h" -#include "heap.h" -#include "objects.h" -#include "globals.h" - -namespace v8 { -namespace internal { - -#ifdef LIVE_OBJECT_LIST - -#ifdef DEBUG -// The following symbol when defined enables thorough verification of lol data. -// FLAG_verify_lol will also need to set to true to enable the verification. -#define VERIFY_LOL -#endif - - -typedef int LiveObjectType; -class LolFilter; -class LiveObjectSummary; -class DumpWriter; -class SummaryWriter; - - -// The LiveObjectList is both a mechanism for tracking a live capture of -// objects in the JS heap, as well as is the data structure which represents -// each of those captures. Unlike a snapshot, the lol is live. For example, -// if an object in a captured lol dies and is collected by the GC, the lol -// will reflect that the object is no longer available. The term -// LiveObjectList (and lol) is used to describe both the mechanism and the -// data structure depending on context of use. -// -// In captured lols, objects are tracked using their address and an object id. -// The object id is unique. Once assigned to an object, the object id can never -// be assigned to another object. That is unless all captured lols are deleted -// which allows the user to start over with a fresh set of lols and object ids. -// The uniqueness of the object ids allows the user to track specific objects -// and inspect its longevity while debugging JS code in execution. -// -// The lol comes with utility functions to capture, dump, summarize, and diff -// captured lols amongst other functionality. These functionality are -// accessible via the v8 debugger interface. -class LiveObjectList { - public: - inline static void GCEpilogue(); - inline static void GCPrologue(); - inline static void IterateElements(ObjectVisitor* v); - inline static void ProcessNonLive(HeapObject* obj); - inline static void UpdateReferencesForScavengeGC(); - - // Note: LOLs can be listed by calling Dump(0, <lol id>), and 2 LOLs can be - // compared/diff'ed using Dump(<lol id1>, <lol id2>, ...). This will yield - // a verbose dump of all the objects in the resultant lists. - // Similarly, a summarized result of a LOL listing or a diff can be - // attained using the Summarize(0, <lol id>) and Summarize(<lol id1, - // <lol id2>, ...) respectively. - - static MaybeObject* Capture(); - static bool Delete(int id); - static MaybeObject* Dump(int id1, - int id2, - int start_idx, - int dump_limit, - Handle<JSObject> filter_obj); - static MaybeObject* Info(int start_idx, int dump_limit); - static MaybeObject* Summarize(int id1, int id2, Handle<JSObject> filter_obj); - - static void Reset(); - static Object* GetObj(int obj_id); - static int GetObjId(Object* obj); - static Object* GetObjId(Handle<String> address); - static MaybeObject* GetObjRetainers(int obj_id, - Handle<JSObject> instance_filter, - bool verbose, - int start, - int count, - Handle<JSObject> filter_obj); - - static Object* GetPath(int obj_id1, - int obj_id2, - Handle<JSObject> instance_filter); - static Object* PrintObj(int obj_id); - - private: - struct Element { - int id_; - HeapObject* obj_; - }; - - explicit LiveObjectList(LiveObjectList* prev, int capacity); - ~LiveObjectList(); - - static void GCEpiloguePrivate(); - static void IterateElementsPrivate(ObjectVisitor* v); - - static void DoProcessNonLive(HeapObject* obj); - - static int CompareElement(const Element* a, const Element* b); - - static Object* GetPathPrivate(HeapObject* obj1, HeapObject* obj2); - - static int GetRetainers(Handle<HeapObject> target, - Handle<JSObject> instance_filter, - Handle<FixedArray> retainers_arr, - int start, - int dump_limit, - int* total_count, - LolFilter* filter, - LiveObjectSummary* summary, - JSFunction* arguments_function, - Handle<Object> error); - - static MaybeObject* DumpPrivate(DumpWriter* writer, - int start, - int dump_limit, - LolFilter* filter); - static MaybeObject* SummarizePrivate(SummaryWriter* writer, - LolFilter* filter, - bool is_tracking_roots); - - static bool NeedLOLProcessing() { return (last() != NULL); } - static void NullifyNonLivePointer(HeapObject** p) { - // Mask out the low bit that marks this as a heap object. We'll use this - // cleared bit as an indicator that this pointer needs to be collected. - // - // Meanwhile, we still preserve its approximate value so that we don't - // have to resort the elements list all the time. - // - // Note: Doing so also makes this HeapObject* look like an SMI. Hence, - // GC pointer updater will ignore it when it gets scanned. - *p = reinterpret_cast<HeapObject*>((*p)->address()); - } - - LiveObjectList* prev() { return prev_; } - LiveObjectList* next() { return next_; } - int id() { return id_; } - - static int list_count() { return list_count_; } - static LiveObjectList* last() { return last_; } - - inline static LiveObjectList* FindLolForId(int id, LiveObjectList* start_lol); - int TotalObjCount() { return GetTotalObjCountAndSize(NULL); } - int GetTotalObjCountAndSize(int* size_p); - - bool Add(HeapObject* obj); - Element* Find(HeapObject* obj); - static void NullifyMostRecent(HeapObject* obj); - void Sort(); - static void SortAll(); - - static void PurgeDuplicates(); // Only to be called by GCEpilogue. - -#ifdef VERIFY_LOL - static void Verify(bool match_heap_exactly = false); - static void VerifyNotInFromSpace(); -#endif - - // Iterates the elements in every lol and returns the one that matches the - // specified key. If no matching element is found, then it returns NULL. - template <typename T> - inline static LiveObjectList::Element* - FindElementFor(T (*GetValue)(LiveObjectList::Element*), T key); - - inline static int GetElementId(Element* element); - inline static HeapObject* GetElementObj(Element* element); - - // Instance fields. - LiveObjectList* prev_; - LiveObjectList* next_; - int id_; - int capacity_; - int obj_count_; - Element* elements_; - - // Statics for managing all the lists. - static uint32_t next_element_id_; - static int list_count_; - static int last_id_; - static LiveObjectList* first_; - static LiveObjectList* last_; - - friend class LolIterator; - friend class LolForwardIterator; - friend class LolDumpWriter; - friend class RetainersDumpWriter; - friend class RetainersSummaryWriter; - friend class UpdateLiveObjectListVisitor; -}; - - -// Helper class for updating the LiveObjectList HeapObject pointers. -class UpdateLiveObjectListVisitor: public ObjectVisitor { - public: - void VisitPointer(Object** p) { UpdatePointer(p); } - - void VisitPointers(Object** start, Object** end) { - // Copy all HeapObject pointers in [start, end). - for (Object** p = start; p < end; p++) UpdatePointer(p); - } - - private: - // Based on Heap::ScavengeObject() but only does forwarding of pointers - // to live new space objects, and not actually keep them alive. - void UpdatePointer(Object** p) { - Object* object = *p; - if (!HEAP->InNewSpace(object)) return; - - HeapObject* heap_obj = HeapObject::cast(object); - ASSERT(HEAP->InFromSpace(heap_obj)); - - // We use the first word (where the map pointer usually is) of a heap - // object to record the forwarding pointer. A forwarding pointer can - // point to an old space, the code space, or the to space of the new - // generation. - MapWord first_word = heap_obj->map_word(); - - // If the first word is a forwarding address, the object has already been - // copied. - if (first_word.IsForwardingAddress()) { - *p = first_word.ToForwardingAddress(); - return; - - // Else, it's a dead object. - } else { - LiveObjectList::NullifyNonLivePointer(reinterpret_cast<HeapObject**>(p)); - } - } -}; - - -#else // !LIVE_OBJECT_LIST - - -class LiveObjectList { - public: - inline static void GCEpilogue() {} - inline static void GCPrologue() {} - inline static void IterateElements(ObjectVisitor* v) {} - inline static void ProcessNonLive(HeapObject* obj) {} - inline static void UpdateReferencesForScavengeGC() {} - - inline static MaybeObject* Capture() { return HEAP->undefined_value(); } - inline static bool Delete(int id) { return false; } - inline static MaybeObject* Dump(int id1, - int id2, - int start_idx, - int dump_limit, - Handle<JSObject> filter_obj) { - return HEAP->undefined_value(); - } - inline static MaybeObject* Info(int start_idx, int dump_limit) { - return HEAP->undefined_value(); - } - inline static MaybeObject* Summarize(int id1, - int id2, - Handle<JSObject> filter_obj) { - return HEAP->undefined_value(); - } - - inline static void Reset() {} - inline static Object* GetObj(int obj_id) { return HEAP->undefined_value(); } - inline static Object* GetObjId(Handle<String> address) { - return HEAP->undefined_value(); - } - inline static MaybeObject* GetObjRetainers(int obj_id, - Handle<JSObject> instance_filter, - bool verbose, - int start, - int count, - Handle<JSObject> filter_obj) { - return HEAP->undefined_value(); - } - - inline static Object* GetPath(int obj_id1, - int obj_id2, - Handle<JSObject> instance_filter) { - return HEAP->undefined_value(); - } - inline static Object* PrintObj(int obj_id) { return HEAP->undefined_value(); } -}; - - -#endif // LIVE_OBJECT_LIST - -} } // namespace v8::internal - -#endif // V8_LIVEOBJECTLIST_H_ diff --git a/deps/v8/src/log-utils.cc b/deps/v8/src/log-utils.cc index 7bd7baa2d8..45ac403a6e 100644 --- a/deps/v8/src/log-utils.cc +++ b/deps/v8/src/log-utils.cc @@ -67,6 +67,7 @@ void Log::Initialize() { FLAG_log_suspect = true; FLAG_log_handles = true; FLAG_log_regexp = true; + FLAG_log_internal_timer_events = true; } // --prof implies --log-code. @@ -80,7 +81,8 @@ void Log::Initialize() { bool open_log_file = FLAG_log || FLAG_log_runtime || FLAG_log_api || FLAG_log_code || FLAG_log_gc || FLAG_log_handles || FLAG_log_suspect - || FLAG_log_regexp || FLAG_log_state_changes || FLAG_ll_prof; + || FLAG_log_regexp || FLAG_log_state_changes || FLAG_ll_prof + || FLAG_log_internal_timer_events; // If we're logging anything, we need to open the log file. if (open_log_file) { @@ -105,6 +107,9 @@ void Log::Initialize() { // one character so we can escape the loop properly. p--; break; + case 'p': + stream.Add("%d", OS::GetCurrentProcessId()); + break; case 't': { // %t expands to the current time in milliseconds. double time = OS::TimeCurrentMillis(); @@ -257,10 +262,10 @@ void LogMessageBuilder::AppendDetailed(String* str, bool show_impl_info) { if (len > 0x1000) len = 0x1000; if (show_impl_info) { - Append(str->IsAsciiRepresentation() ? 'a' : '2'); + Append(str->IsOneByteRepresentation() ? 'a' : '2'); if (StringShape(str).IsExternal()) Append('e'); - if (StringShape(str).IsSymbol()) + if (StringShape(str).IsInternalized()) Append('#'); Append(":%i:", str->length()); } diff --git a/deps/v8/src/log.cc b/deps/v8/src/log.cc index b049ffe4eb..e9ef382152 100644 --- a/deps/v8/src/log.cc +++ b/deps/v8/src/log.cc @@ -44,37 +44,6 @@ namespace v8 { namespace internal { -// -// Sliding state window. Updates counters to keep track of the last -// window of kBufferSize states. This is useful to track where we -// spent our time. -// -class SlidingStateWindow { - public: - explicit SlidingStateWindow(Isolate* isolate); - ~SlidingStateWindow(); - void AddState(StateTag state); - - private: - static const int kBufferSize = 256; - Counters* counters_; - int current_index_; - bool is_full_; - byte buffer_[kBufferSize]; - - - void IncrementStateCounter(StateTag state) { - counters_->state_counters(state)->Increment(); - } - - - void DecrementStateCounter(StateTag state) { - counters_->state_counters(state)->Decrement(); - } -}; - - -// // The Profiler samples pc and sp values for the main thread. // Each sample is appended to a circular buffer. // An independent thread removes data and writes it to the log. @@ -189,24 +158,12 @@ class Ticker: public Sampler { public: Ticker(Isolate* isolate, int interval): Sampler(isolate, interval), - window_(NULL), profiler_(NULL) {} ~Ticker() { if (IsActive()) Stop(); } virtual void Tick(TickSample* sample) { if (profiler_) profiler_->Insert(sample); - if (window_) window_->AddState(sample->state); - } - - void SetWindow(SlidingStateWindow* window) { - window_ = window; - if (!IsActive()) Start(); - } - - void ClearWindow() { - window_ = NULL; - if (!profiler_ && IsActive() && !RuntimeProfiler::IsEnabled()) Stop(); } void SetProfiler(Profiler* profiler) { @@ -219,7 +176,7 @@ class Ticker: public Sampler { void ClearProfiler() { DecreaseProfilingDepth(); profiler_ = NULL; - if (!window_ && IsActive() && !RuntimeProfiler::IsEnabled()) Stop(); + if (IsActive()) Stop(); } protected: @@ -228,42 +185,11 @@ class Ticker: public Sampler { } private: - SlidingStateWindow* window_; Profiler* profiler_; }; // -// SlidingStateWindow implementation. -// -SlidingStateWindow::SlidingStateWindow(Isolate* isolate) - : counters_(isolate->counters()), current_index_(0), is_full_(false) { - for (int i = 0; i < kBufferSize; i++) { - buffer_[i] = static_cast<byte>(OTHER); - } - isolate->logger()->ticker_->SetWindow(this); -} - - -SlidingStateWindow::~SlidingStateWindow() { - LOGGER->ticker_->ClearWindow(); -} - - -void SlidingStateWindow::AddState(StateTag state) { - if (is_full_) { - DecrementStateCounter(static_cast<StateTag>(buffer_[current_index_])); - } else if (current_index_ == kBufferSize - 1) { - is_full_ = true; - } - buffer_[current_index_] = static_cast<byte>(state); - IncrementStateCounter(state); - ASSERT(IsPowerOf2(kBufferSize)); - current_index_ = (current_index_ + 1) & (kBufferSize - 1); -} - - -// // Profiler implementation. // Profiler::Profiler(Isolate* isolate) @@ -283,11 +209,7 @@ void Profiler::Engage() { if (engaged_) return; engaged_ = true; - // TODO(mnaganov): This is actually "Chromium" mode. Flags need to be revised. - // http://code.google.com/p/v8/issues/detail?id=487 - if (!FLAG_prof_lazy) { - OS::LogSharedLibraryAddresses(); - } + OS::LogSharedLibraryAddresses(); // Start thread processing the profiler buffer. running_ = true; @@ -458,7 +380,10 @@ class Logger::NameBuffer { if (str == NULL) return; if (str->HasOnlyAsciiChars()) { int utf8_length = Min(str->length(), kUtf8BufferSize - utf8_pos_); - String::WriteToFlat(str, utf8_buffer_ + utf8_pos_, 0, utf8_length); + String::WriteToFlat(str, + reinterpret_cast<uint8_t*>(utf8_buffer_ + utf8_pos_), + 0, + utf8_length); utf8_pos_ += utf8_length; return; } @@ -467,7 +392,7 @@ class Logger::NameBuffer { int previous = unibrow::Utf16::kNoPreviousCharacter; for (int i = 0; i < uc16_length && utf8_pos_ < kUtf8BufferSize; ++i) { uc16 c = utf16_buffer[i]; - if (c <= String::kMaxAsciiCharCodeU) { + if (c <= unibrow::Utf8::kMaxOneByteChar) { utf8_buffer_[utf8_pos_++] = static_cast<char>(c); } else { int char_length = unibrow::Utf8::Length(c, previous); @@ -502,6 +427,14 @@ class Logger::NameBuffer { } } + void AppendHex(uint32_t n) { + Vector<char> buffer(utf8_buffer_ + utf8_pos_, kUtf8BufferSize - utf8_pos_); + int size = OS::SNPrintF(buffer, "%x", n); + if (size > 0 && utf8_pos_ + size <= kUtf8BufferSize) { + utf8_pos_ += size; + } + } + const char* get() { return utf8_buffer_; } int size() const { return utf8_pos_; } @@ -515,10 +448,10 @@ class Logger::NameBuffer { }; -Logger::Logger() - : ticker_(NULL), +Logger::Logger(Isolate* isolate) + : isolate_(isolate), + ticker_(NULL), profiler_(NULL), - sliding_state_window_(NULL), log_events_(NULL), logging_nesting_(0), cpu_profiler_nesting_(0), @@ -531,7 +464,8 @@ Logger::Logger() prev_sp_(NULL), prev_function_(NULL), prev_to_(NULL), - prev_code_(NULL) { + prev_code_(NULL), + epoch_(0) { } @@ -543,12 +477,17 @@ Logger::~Logger() { void Logger::IssueCodeAddedEvent(Code* code, + Script* script, const char* name, size_t name_len) { JitCodeEvent event; + memset(&event, 0, sizeof(event)); event.type = JitCodeEvent::CODE_ADDED; event.code_start = code->instruction_start(); event.code_len = code->instruction_size(); + Handle<Script> script_handle = + script != NULL ? Handle<Script>(script) : Handle<Script>(); + event.script = v8::Handle<v8::Script>(ToApi<v8::Script>(script_handle)); event.name.str = name; event.name.len = name_len; @@ -587,6 +526,40 @@ void Logger::IssueCodeRemovedEvent(Address from) { code_event_handler_(&event); } +void Logger::IssueAddCodeLinePosInfoEvent( + void* jit_handler_data, + int pc_offset, + int position, + JitCodeEvent::PositionType position_type) { + JitCodeEvent event; + memset(&event, 0, sizeof(event)); + event.type = JitCodeEvent::CODE_ADD_LINE_POS_INFO; + event.user_data = jit_handler_data; + event.line_info.offset = pc_offset; + event.line_info.pos = position; + event.line_info.position_type = position_type; + + code_event_handler_(&event); +} + +void* Logger::IssueStartCodePosInfoEvent() { + JitCodeEvent event; + memset(&event, 0, sizeof(event)); + event.type = JitCodeEvent::CODE_START_LINE_INFO_RECORDING; + + code_event_handler_(&event); + return event.user_data; +} + +void Logger::IssueEndCodePosInfoEvent(Code* code, void* jit_handler_data) { + JitCodeEvent event; + memset(&event, 0, sizeof(event)); + event.type = JitCodeEvent::CODE_END_LINE_INFO_RECORDING; + event.code_start = code->instruction_start(); + event.user_data = jit_handler_data; + + code_event_handler_(&event); +} #define DECLARE_EVENT(ignore1, name) name, static const char* const kLogEventsNames[Logger::NUMBER_OF_LOG_EVENTS] = { @@ -670,6 +643,8 @@ void Logger::ApiNamedSecurityCheck(Object* key) { SmartArrayPointer<char> str = String::cast(key)->ToCString(DISALLOW_NULLS, ROBUST_STRING_TRAVERSAL); ApiEvent("api,check-security,\"%s\"\n", *str); + } else if (key->IsSymbol()) { + ApiEvent("api,check-security,symbol(hash %x)\n", Symbol::cast(key)->Hash()); } else if (key->IsUndefined()) { ApiEvent("api,check-security,undefined\n"); } else { @@ -704,6 +679,43 @@ void Logger::SharedLibraryEvent(const wchar_t* library_path, } +void Logger::TimerEvent(StartEnd se, const char* name) { + if (!log_->IsEnabled()) return; + ASSERT(FLAG_log_internal_timer_events); + LogMessageBuilder msg(this); + int since_epoch = static_cast<int>(OS::Ticks() - epoch_); + const char* format = (se == START) ? "timer-event-start,\"%s\",%ld\n" + : "timer-event-end,\"%s\",%ld\n"; + msg.Append(format, name, since_epoch); + msg.WriteToLogFile(); +} + + +void Logger::EnterExternal() { + LOG(ISOLATE, TimerEvent(START, TimerEventScope::v8_external)); +} + + +void Logger::LeaveExternal() { + LOG(ISOLATE, TimerEvent(END, TimerEventScope::v8_external)); +} + + +void Logger::TimerEventScope::LogTimerEvent(StartEnd se) { + LOG(isolate_, TimerEvent(se, name_)); +} + + +const char* Logger::TimerEventScope::v8_recompile_synchronous = + "V8.RecompileSynchronous"; +const char* Logger::TimerEventScope::v8_recompile_parallel = + "V8.RecompileParallel"; +const char* Logger::TimerEventScope::v8_compile_full_code = + "V8.CompileFullCode"; +const char* Logger::TimerEventScope::v8_execute = "V8.Execute"; +const char* Logger::TimerEventScope::v8_external = "V8.External"; + + void Logger::LogRegExpSource(Handle<JSRegExp> regexp) { // Prints "/" + re.source + "/" + // (re.global?"g":"") + (re.ignorecase?"i":"") + (re.multiline?"m":"") @@ -756,9 +768,11 @@ void Logger::RegExpCompileEvent(Handle<JSRegExp> regexp, bool in_cache) { } -void Logger::LogRuntime(Vector<const char> format, JSArray* args) { +void Logger::LogRuntime(Isolate* isolate, + Vector<const char> format, + JSArray* args) { if (!log_->IsEnabled() || !FLAG_log_runtime) return; - HandleScope scope; + HandleScope scope(isolate); LogMessageBuilder msg(this); for (int i = 0; i < format.length(); i++) { char c = format[i]; @@ -809,14 +823,19 @@ void Logger::ApiIndexedSecurityCheck(uint32_t index) { void Logger::ApiNamedPropertyAccess(const char* tag, JSObject* holder, Object* name) { - ASSERT(name->IsString()); + ASSERT(name->IsName()); if (!log_->IsEnabled() || !FLAG_log_api) return; String* class_name_obj = holder->class_name(); SmartArrayPointer<char> class_name = class_name_obj->ToCString(DISALLOW_NULLS, ROBUST_STRING_TRAVERSAL); - SmartArrayPointer<char> property_name = - String::cast(name)->ToCString(DISALLOW_NULLS, ROBUST_STRING_TRAVERSAL); - ApiEvent("api,%s,\"%s\",\"%s\"\n", tag, *class_name, *property_name); + if (name->IsString()) { + SmartArrayPointer<char> property_name = + String::cast(name)->ToCString(DISALLOW_NULLS, ROBUST_STRING_TRAVERSAL); + ApiEvent("api,%s,\"%s\",\"%s\"\n", tag, *class_name, *property_name); + } else { + uint32_t hash = Symbol::cast(name)->Hash(); + ApiEvent("api,%s,\"%s\",symbol(hash %x)\n", tag, *class_name, hash); + } } void Logger::ApiIndexedPropertyAccess(const char* tag, @@ -870,41 +889,41 @@ void Logger::DeleteEventStatic(const char* name, void* object) { LOGGER->DeleteEvent(name, object); } -void Logger::CallbackEventInternal(const char* prefix, const char* name, +void Logger::CallbackEventInternal(const char* prefix, Name* name, Address entry_point) { if (!log_->IsEnabled() || !FLAG_log_code) return; LogMessageBuilder msg(this); - msg.Append("%s,%s,", + msg.Append("%s,%s,-3,", kLogEventsNames[CODE_CREATION_EVENT], kLogEventsNames[CALLBACK_TAG]); msg.AppendAddress(entry_point); - msg.Append(",1,\"%s%s\"", prefix, name); + if (name->IsString()) { + SmartArrayPointer<char> str = + String::cast(name)->ToCString(DISALLOW_NULLS, ROBUST_STRING_TRAVERSAL); + msg.Append(",1,\"%s%s\"", prefix, *str); + } else { + msg.Append(",1,symbol(hash %x)", prefix, Name::cast(name)->Hash()); + } msg.Append('\n'); msg.WriteToLogFile(); } -void Logger::CallbackEvent(String* name, Address entry_point) { +void Logger::CallbackEvent(Name* name, Address entry_point) { if (!log_->IsEnabled() || !FLAG_log_code) return; - SmartArrayPointer<char> str = - name->ToCString(DISALLOW_NULLS, ROBUST_STRING_TRAVERSAL); - CallbackEventInternal("", *str, entry_point); + CallbackEventInternal("", name, entry_point); } -void Logger::GetterCallbackEvent(String* name, Address entry_point) { +void Logger::GetterCallbackEvent(Name* name, Address entry_point) { if (!log_->IsEnabled() || !FLAG_log_code) return; - SmartArrayPointer<char> str = - name->ToCString(DISALLOW_NULLS, ROBUST_STRING_TRAVERSAL); - CallbackEventInternal("get ", *str, entry_point); + CallbackEventInternal("get ", name, entry_point); } -void Logger::SetterCallbackEvent(String* name, Address entry_point) { +void Logger::SetterCallbackEvent(Name* name, Address entry_point) { if (!log_->IsEnabled() || !FLAG_log_code) return; - SmartArrayPointer<char> str = - name->ToCString(DISALLOW_NULLS, ROBUST_STRING_TRAVERSAL); - CallbackEventInternal("set ", *str, entry_point); + CallbackEventInternal("set ", name, entry_point); } @@ -919,7 +938,7 @@ void Logger::CodeCreateEvent(LogEventsAndTags tag, name_buffer_->AppendBytes(comment); } if (code_event_handler_ != NULL) { - IssueCodeAddedEvent(code, name_buffer_->get(), name_buffer_->size()); + IssueCodeAddedEvent(code, NULL, name_buffer_->get(), name_buffer_->size()); } if (!log_->IsEnabled()) return; if (FLAG_ll_prof) { @@ -930,9 +949,10 @@ void Logger::CodeCreateEvent(LogEventsAndTags tag, } if (!FLAG_log_code) return; LogMessageBuilder msg(this); - msg.Append("%s,%s,", + msg.Append("%s,%s,%d,", kLogEventsNames[CODE_CREATION_EVENT], - kLogEventsNames[tag]); + kLogEventsNames[tag], + code->kind()); msg.AppendAddress(code->address()); msg.Append(",%d,\"", code->ExecutableSize()); for (const char* p = comment; *p != '\0'; p++) { @@ -949,16 +969,22 @@ void Logger::CodeCreateEvent(LogEventsAndTags tag, void Logger::CodeCreateEvent(LogEventsAndTags tag, Code* code, - String* name) { + Name* name) { if (!is_logging_code_events()) return; if (FLAG_ll_prof || Serializer::enabled() || code_event_handler_ != NULL) { name_buffer_->Reset(); name_buffer_->AppendBytes(kLogEventsNames[tag]); name_buffer_->AppendByte(':'); - name_buffer_->AppendString(name); + if (name->IsString()) { + name_buffer_->AppendString(String::cast(name)); + } else { + name_buffer_->AppendBytes("symbol(hash "); + name_buffer_->AppendHex(Name::cast(name)->Hash()); + name_buffer_->AppendByte(')'); + } } if (code_event_handler_ != NULL) { - IssueCodeAddedEvent(code, name_buffer_->get(), name_buffer_->size()); + IssueCodeAddedEvent(code, NULL, name_buffer_->get(), name_buffer_->size()); } if (!log_->IsEnabled()) return; if (FLAG_ll_prof) { @@ -969,13 +995,19 @@ void Logger::CodeCreateEvent(LogEventsAndTags tag, } if (!FLAG_log_code) return; LogMessageBuilder msg(this); - msg.Append("%s,%s,", + msg.Append("%s,%s,%d,", kLogEventsNames[CODE_CREATION_EVENT], - kLogEventsNames[tag]); + kLogEventsNames[tag], + code->kind()); msg.AppendAddress(code->address()); - msg.Append(",%d,\"", code->ExecutableSize()); - msg.AppendDetailed(name, false); - msg.Append('"'); + msg.Append(",%d,", code->ExecutableSize()); + if (name->IsString()) { + msg.Append('"'); + msg.AppendDetailed(String::cast(name), false); + msg.Append('"'); + } else { + msg.Append("symbol(hash %x)", Name::cast(name)->Hash()); + } msg.Append('\n'); msg.WriteToLogFile(); } @@ -994,17 +1026,28 @@ static const char* ComputeMarker(Code* code) { void Logger::CodeCreateEvent(LogEventsAndTags tag, Code* code, SharedFunctionInfo* shared, - String* name) { + Name* name) { if (!is_logging_code_events()) return; if (FLAG_ll_prof || Serializer::enabled() || code_event_handler_ != NULL) { name_buffer_->Reset(); name_buffer_->AppendBytes(kLogEventsNames[tag]); name_buffer_->AppendByte(':'); name_buffer_->AppendBytes(ComputeMarker(code)); - name_buffer_->AppendString(name); + if (name->IsString()) { + name_buffer_->AppendString(String::cast(name)); + } else { + name_buffer_->AppendBytes("symbol(hash "); + name_buffer_->AppendHex(Name::cast(name)->Hash()); + name_buffer_->AppendByte(')'); + } } if (code_event_handler_ != NULL) { - IssueCodeAddedEvent(code, name_buffer_->get(), name_buffer_->size()); + Script* script = + shared->script()->IsScript() ? Script::cast(shared->script()) : NULL; + IssueCodeAddedEvent(code, + script, + name_buffer_->get(), + name_buffer_->size()); } if (!log_->IsEnabled()) return; if (FLAG_ll_prof) { @@ -1019,13 +1062,20 @@ void Logger::CodeCreateEvent(LogEventsAndTags tag, return; LogMessageBuilder msg(this); - SmartArrayPointer<char> str = - name->ToCString(DISALLOW_NULLS, ROBUST_STRING_TRAVERSAL); - msg.Append("%s,%s,", + msg.Append("%s,%s,%d,", kLogEventsNames[CODE_CREATION_EVENT], - kLogEventsNames[tag]); + kLogEventsNames[tag], + code->kind()); msg.AppendAddress(code->address()); - msg.Append(",%d,\"%s\",", code->ExecutableSize(), *str); + msg.Append(",%d,", code->ExecutableSize()); + if (name->IsString()) { + SmartArrayPointer<char> str = + String::cast(name)->ToCString(DISALLOW_NULLS, ROBUST_STRING_TRAVERSAL); + msg.Append("\"%s\"", *str); + } else { + msg.Append("symbol(hash %x)", Name::cast(name)->Hash()); + } + msg.Append(','); msg.AppendAddress(shared->address()); msg.Append(",%s", ComputeMarker(code)); msg.Append('\n'); @@ -1039,7 +1089,7 @@ void Logger::CodeCreateEvent(LogEventsAndTags tag, void Logger::CodeCreateEvent(LogEventsAndTags tag, Code* code, SharedFunctionInfo* shared, - String* source, int line) { + Name* source, int line) { if (!is_logging_code_events()) return; if (FLAG_ll_prof || Serializer::enabled() || code_event_handler_ != NULL) { name_buffer_->Reset(); @@ -1048,12 +1098,23 @@ void Logger::CodeCreateEvent(LogEventsAndTags tag, name_buffer_->AppendBytes(ComputeMarker(code)); name_buffer_->AppendString(shared->DebugName()); name_buffer_->AppendByte(' '); - name_buffer_->AppendString(source); + if (source->IsString()) { + name_buffer_->AppendString(String::cast(source)); + } else { + name_buffer_->AppendBytes("symbol(hash "); + name_buffer_->AppendHex(Name::cast(source)->Hash()); + name_buffer_->AppendByte(')'); + } name_buffer_->AppendByte(':'); name_buffer_->AppendInt(line); } if (code_event_handler_ != NULL) { - IssueCodeAddedEvent(code, name_buffer_->get(), name_buffer_->size()); + Script* script = + shared->script()->IsScript() ? Script::cast(shared->script()) : NULL; + IssueCodeAddedEvent(code, + script, + name_buffer_->get(), + name_buffer_->size()); } if (!log_->IsEnabled()) return; if (FLAG_ll_prof) { @@ -1066,17 +1127,20 @@ void Logger::CodeCreateEvent(LogEventsAndTags tag, LogMessageBuilder msg(this); SmartArrayPointer<char> name = shared->DebugName()->ToCString(DISALLOW_NULLS, ROBUST_STRING_TRAVERSAL); - SmartArrayPointer<char> sourcestr = - source->ToCString(DISALLOW_NULLS, ROBUST_STRING_TRAVERSAL); - msg.Append("%s,%s,", + msg.Append("%s,%s,%d,", kLogEventsNames[CODE_CREATION_EVENT], - kLogEventsNames[tag]); + kLogEventsNames[tag], + code->kind()); msg.AppendAddress(code->address()); - msg.Append(",%d,\"%s %s:%d\",", - code->ExecutableSize(), - *name, - *sourcestr, - line); + msg.Append(",%d,\"%s ", code->ExecutableSize(), *name); + if (source->IsString()) { + SmartArrayPointer<char> sourcestr = + String::cast(source)->ToCString(DISALLOW_NULLS, ROBUST_STRING_TRAVERSAL); + msg.Append("%s", *sourcestr); + } else { + msg.Append("symbol(hash %x)", Name::cast(source)->Hash()); + } + msg.Append(":%d\",", line); msg.AppendAddress(shared->address()); msg.Append(",%s", ComputeMarker(code)); msg.Append('\n'); @@ -1093,7 +1157,7 @@ void Logger::CodeCreateEvent(LogEventsAndTags tag, Code* code, int args_count) { name_buffer_->AppendInt(args_count); } if (code_event_handler_ != NULL) { - IssueCodeAddedEvent(code, name_buffer_->get(), name_buffer_->size()); + IssueCodeAddedEvent(code, NULL, name_buffer_->get(), name_buffer_->size()); } if (!log_->IsEnabled()) return; if (FLAG_ll_prof) { @@ -1104,9 +1168,10 @@ void Logger::CodeCreateEvent(LogEventsAndTags tag, Code* code, int args_count) { } if (!FLAG_log_code) return; LogMessageBuilder msg(this); - msg.Append("%s,%s,", + msg.Append("%s,%s,%d,", kLogEventsNames[CODE_CREATION_EVENT], - kLogEventsNames[tag]); + kLogEventsNames[tag], + code->kind()); msg.AppendAddress(code->address()); msg.Append(",%d,\"args_count: %d\"", code->ExecutableSize(), args_count); msg.Append('\n'); @@ -1130,7 +1195,7 @@ void Logger::RegExpCodeCreateEvent(Code* code, String* source) { name_buffer_->AppendString(source); } if (code_event_handler_ != NULL) { - IssueCodeAddedEvent(code, name_buffer_->get(), name_buffer_->size()); + IssueCodeAddedEvent(code, NULL, name_buffer_->get(), name_buffer_->size()); } if (!log_->IsEnabled()) return; if (FLAG_ll_prof) { @@ -1141,7 +1206,7 @@ void Logger::RegExpCodeCreateEvent(Code* code, String* source) { } if (!FLAG_log_code) return; LogMessageBuilder msg(this); - msg.Append("%s,%s,", + msg.Append("%s,%s,-2,", kLogEventsNames[CODE_CREATION_EVENT], kLogEventsNames[REG_EXP_TAG]); msg.AppendAddress(code->address()); @@ -1174,6 +1239,40 @@ void Logger::CodeDeleteEvent(Address from) { DeleteEventInternal(CODE_DELETE_EVENT, from); } +void Logger::CodeLinePosInfoAddPositionEvent(void* jit_handler_data, + int pc_offset, + int position) { + if (code_event_handler_ != NULL) { + IssueAddCodeLinePosInfoEvent(jit_handler_data, + pc_offset, + position, + JitCodeEvent::POSITION); + } +} + +void Logger::CodeLinePosInfoAddStatementPositionEvent(void* jit_handler_data, + int pc_offset, + int position) { + if (code_event_handler_ != NULL) { + IssueAddCodeLinePosInfoEvent(jit_handler_data, + pc_offset, + position, + JitCodeEvent::STATEMENT_POSITION); + } +} + +void Logger::CodeStartLinePosInfoRecordEvent(PositionsRecorder* pos_recorder) { + if (code_event_handler_ != NULL) { + pos_recorder->AttachJITHandlerData(IssueStartCodePosInfoEvent()); + } +} + +void Logger::CodeEndLinePosInfoRecordEvent(Code* code, + void* jit_handler_data) { + if (code_event_handler_ != NULL) { + IssueEndCodePosInfoEvent(code, jit_handler_data); + } +} void Logger::SnapshotPositionEvent(Address addr, int pos) { if (!log_->IsEnabled()) return; @@ -1245,18 +1344,22 @@ void Logger::ResourceEvent(const char* name, const char* tag) { } -void Logger::SuspectReadEvent(String* name, Object* obj) { +void Logger::SuspectReadEvent(Name* name, Object* obj) { if (!log_->IsEnabled() || !FLAG_log_suspect) return; LogMessageBuilder msg(this); String* class_name = obj->IsJSObject() ? JSObject::cast(obj)->class_name() - : HEAP->empty_string(); + : isolate_->heap()->empty_string(); msg.Append("suspect-read,"); msg.Append(class_name); msg.Append(','); - msg.Append('"'); - msg.Append(name); - msg.Append('"'); + if (name->IsString()) { + msg.Append('"'); + msg.Append(String::cast(name)); + msg.Append('"'); + } else { + msg.Append("symbol(hash %x)", Name::cast(name)->Hash()); + } msg.Append('\n'); msg.WriteToLogFile(); } @@ -1321,6 +1424,7 @@ void Logger::TickEvent(TickSample* sample, bool overflow) { msg.AppendAddress(sample->pc); msg.Append(','); msg.AppendAddress(sample->sp); + msg.Append(",%ld", static_cast<int>(OS::Ticks() - epoch_)); if (sample->has_external_callback) { msg.Append(",1,"); msg.AppendAddress(sample->external_callback); @@ -1353,9 +1457,7 @@ void Logger::PauseProfiler() { if (--cpu_profiler_nesting_ == 0) { profiler_->pause(); if (FLAG_prof_lazy) { - if (!FLAG_sliding_state_window && !RuntimeProfiler::IsEnabled()) { - ticker_->Stop(); - } + ticker_->Stop(); FLAG_log_code = false; LOG(ISOLATE, UncheckedStringEvent("profiler", "pause")); } @@ -1376,9 +1478,7 @@ void Logger::ResumeProfiler() { FLAG_log_code = true; LogCompiledFunctions(); LogAccessorCallbacks(); - if (!FLAG_sliding_state_window && !ticker_->IsActive()) { - ticker_->Start(); - } + if (!ticker_->IsActive()) ticker_->Start(); } profiler_->resume(); } @@ -1430,9 +1530,10 @@ class EnumerateOptimizedFunctionsVisitor: public OptimizedFunctionVisitor { }; -static int EnumerateCompiledFunctions(Handle<SharedFunctionInfo>* sfis, +static int EnumerateCompiledFunctions(Heap* heap, + Handle<SharedFunctionInfo>* sfis, Handle<Code>* code_objects) { - HeapIterator iterator; + HeapIterator iterator(heap); AssertNoAllocation no_alloc; int compiled_funcs_count = 0; @@ -1458,7 +1559,7 @@ static int EnumerateCompiledFunctions(Handle<SharedFunctionInfo>* sfis, EnumerateOptimizedFunctionsVisitor visitor(sfis, code_objects, &compiled_funcs_count); - Deoptimizer::VisitAllOptimizedFunctions(&visitor); + Deoptimizer::VisitAllOptimizedFunctions(heap->isolate(), &visitor); return compiled_funcs_count; } @@ -1477,6 +1578,7 @@ void Logger::LogCodeObject(Object* object) { case Code::BINARY_OP_IC: // fall through case Code::COMPARE_IC: // fall through case Code::TO_BOOLEAN_IC: // fall through + case Code::COMPILED_STUB: // fall through case Code::STUB: description = CodeStub::MajorName(CodeStub::GetMajorKey(code_object), true); @@ -1513,7 +1615,7 @@ void Logger::LogCodeObject(Object* object) { tag = Logger::KEYED_CALL_IC_TAG; break; } - PROFILE(ISOLATE, CodeCreateEvent(tag, code_object, description)); + PROFILE(isolate_, CodeCreateEvent(tag, code_object, description)); } } @@ -1597,9 +1699,10 @@ void Logger::LowLevelLogWriteBytes(const char* bytes, int size) { void Logger::LogCodeObjects() { - HEAP->CollectAllGarbage(Heap::kMakeHeapIterableMask, + Heap* heap = isolate_->heap(); + heap->CollectAllGarbage(Heap::kMakeHeapIterableMask, "Logger::LogCodeObjects"); - HeapIterator iterator; + HeapIterator iterator(heap); AssertNoAllocation no_alloc; for (HeapObject* obj = iterator.next(); obj != NULL; obj = iterator.next()) { if (obj->IsCode()) LogCodeObject(obj); @@ -1616,20 +1719,20 @@ void Logger::LogExistingFunction(Handle<SharedFunctionInfo> shared, Handle<String> script_name(String::cast(script->name())); int line_num = GetScriptLineNumber(script, shared->start_position()); if (line_num > 0) { - PROFILE(ISOLATE, + PROFILE(isolate_, CodeCreateEvent( Logger::ToNativeByScript(Logger::LAZY_COMPILE_TAG, *script), *code, *shared, *script_name, line_num + 1)); } else { // Can't distinguish eval and script here, so always use Script. - PROFILE(ISOLATE, + PROFILE(isolate_, CodeCreateEvent( Logger::ToNativeByScript(Logger::SCRIPT_TAG, *script), *code, *shared, *script_name)); } } else { - PROFILE(ISOLATE, + PROFILE(isolate_, CodeCreateEvent( Logger::ToNativeByScript(Logger::LAZY_COMPILE_TAG, *script), *code, *shared, *func_name)); @@ -1642,10 +1745,10 @@ void Logger::LogExistingFunction(Handle<SharedFunctionInfo> shared, CallHandlerInfo* call_data = CallHandlerInfo::cast(raw_call_data); Object* callback_obj = call_data->callback(); Address entry_point = v8::ToCData<Address>(callback_obj); - PROFILE(ISOLATE, CallbackEvent(*func_name, entry_point)); + PROFILE(isolate_, CallbackEvent(*func_name, entry_point)); } } else { - PROFILE(ISOLATE, + PROFILE(isolate_, CodeCreateEvent( Logger::LAZY_COMPILE_TAG, *code, *shared, *func_name)); } @@ -1653,13 +1756,14 @@ void Logger::LogExistingFunction(Handle<SharedFunctionInfo> shared, void Logger::LogCompiledFunctions() { - HEAP->CollectAllGarbage(Heap::kMakeHeapIterableMask, + Heap* heap = isolate_->heap(); + heap->CollectAllGarbage(Heap::kMakeHeapIterableMask, "Logger::LogCompiledFunctions"); - HandleScope scope; - const int compiled_funcs_count = EnumerateCompiledFunctions(NULL, NULL); + HandleScope scope(isolate_); + const int compiled_funcs_count = EnumerateCompiledFunctions(heap, NULL, NULL); ScopedVector< Handle<SharedFunctionInfo> > sfis(compiled_funcs_count); ScopedVector< Handle<Code> > code_objects(compiled_funcs_count); - EnumerateCompiledFunctions(sfis.start(), code_objects.start()); + EnumerateCompiledFunctions(heap, sfis.start(), code_objects.start()); // During iteration, there can be heap allocation due to // GetScriptLineNumber call. @@ -1673,22 +1777,23 @@ void Logger::LogCompiledFunctions() { void Logger::LogAccessorCallbacks() { - HEAP->CollectAllGarbage(Heap::kMakeHeapIterableMask, + Heap* heap = isolate_->heap(); + heap->CollectAllGarbage(Heap::kMakeHeapIterableMask, "Logger::LogAccessorCallbacks"); - HeapIterator iterator; + HeapIterator iterator(heap); AssertNoAllocation no_alloc; for (HeapObject* obj = iterator.next(); obj != NULL; obj = iterator.next()) { - if (!obj->IsAccessorInfo()) continue; - AccessorInfo* ai = AccessorInfo::cast(obj); - if (!ai->name()->IsString()) continue; - String* name = String::cast(ai->name()); + if (!obj->IsExecutableAccessorInfo()) continue; + ExecutableAccessorInfo* ai = ExecutableAccessorInfo::cast(obj); + if (!ai->name()->IsName()) continue; Address getter_entry = v8::ToCData<Address>(ai->getter()); + Name* name = Name::cast(ai->name()); if (getter_entry != 0) { - PROFILE(ISOLATE, GetterCallbackEvent(name, getter_entry)); + PROFILE(isolate_, GetterCallbackEvent(name, getter_entry)); } Address setter_entry = v8::ToCData<Address>(ai->setter()); if (setter_entry != 0) { - PROFILE(ISOLATE, SetterCallbackEvent(name, setter_entry)); + PROFILE(isolate_, SetterCallbackEvent(name, setter_entry)); } } } @@ -1721,13 +1826,10 @@ bool Logger::SetUp() { Isolate* isolate = Isolate::Current(); ticker_ = new Ticker(isolate, kSamplingIntervalMs); - if (FLAG_sliding_state_window && sliding_state_window_ == NULL) { - sliding_state_window_ = new SlidingStateWindow(isolate); - } - bool start_logging = FLAG_log || FLAG_log_runtime || FLAG_log_api || FLAG_log_code || FLAG_log_gc || FLAG_log_handles || FLAG_log_suspect - || FLAG_log_regexp || FLAG_log_state_changes || FLAG_ll_prof; + || FLAG_log_regexp || FLAG_log_state_changes || FLAG_ll_prof + || FLAG_log_internal_timer_events; if (start_logging) { logging_nesting_ = 1; @@ -1745,6 +1847,8 @@ bool Logger::SetUp() { } } + if (FLAG_log_internal_timer_events || FLAG_prof) epoch_ = OS::Ticks(); + return true; } @@ -1754,7 +1858,7 @@ void Logger::SetCodeEventHandler(uint32_t options, code_event_handler_ = event_handler; if (code_event_handler_ != NULL && (options & kJitCodeEventEnumExisting)) { - HandleScope scope; + HandleScope scope(Isolate::Current()); LogCodeObjects(); LogCompiledFunctions(); } @@ -1788,9 +1892,6 @@ FILE* Logger::TearDown() { profiler_ = NULL; } - delete sliding_state_window_; - sliding_state_window_ = NULL; - delete ticker_; ticker_ = NULL; @@ -1798,22 +1899,6 @@ FILE* Logger::TearDown() { } -void Logger::EnableSlidingStateWindow() { - // If the ticker is NULL, Logger::SetUp has not been called yet. In - // that case, we set the sliding_state_window flag so that the - // sliding window computation will be started when Logger::SetUp is - // called. - if (ticker_ == NULL) { - FLAG_sliding_state_window = true; - return; - } - // Otherwise, if the sliding state window computation has not been - // started we do it now. - if (sliding_state_window_ == NULL) { - sliding_state_window_ = new SlidingStateWindow(Isolate::Current()); - } -} - // Protects the state below. static Mutex* active_samplers_mutex = NULL; diff --git a/deps/v8/src/log.h b/deps/v8/src/log.h index 33f359a7f9..5c121bc316 100644 --- a/deps/v8/src/log.h +++ b/deps/v8/src/log.h @@ -74,8 +74,9 @@ namespace internal { class LogMessageBuilder; class Profiler; class Semaphore; -class SlidingStateWindow; class Ticker; +class Isolate; +class PositionsRecorder; #undef LOG #define LOG(isolate, Call) \ @@ -127,16 +128,18 @@ class Ticker; V(EVAL_TAG, "Eval") \ V(FUNCTION_TAG, "Function") \ V(KEYED_LOAD_IC_TAG, "KeyedLoadIC") \ - V(KEYED_LOAD_MEGAMORPHIC_IC_TAG, "KeyedLoadMegamorphicIC") \ + V(KEYED_LOAD_POLYMORPHIC_IC_TAG, "KeyedLoadPolymorphicIC") \ V(KEYED_EXTERNAL_ARRAY_LOAD_IC_TAG, "KeyedExternalArrayLoadIC") \ V(KEYED_STORE_IC_TAG, "KeyedStoreIC") \ - V(KEYED_STORE_MEGAMORPHIC_IC_TAG, "KeyedStoreMegamorphicIC") \ + V(KEYED_STORE_POLYMORPHIC_IC_TAG, "KeyedStorePolymorphicIC") \ V(KEYED_EXTERNAL_ARRAY_STORE_IC_TAG, "KeyedExternalArrayStoreIC") \ V(LAZY_COMPILE_TAG, "LazyCompile") \ V(LOAD_IC_TAG, "LoadIC") \ + V(LOAD_POLYMORPHIC_IC_TAG, "LoadPolymorphicIC") \ V(REG_EXP_TAG, "RegExp") \ V(SCRIPT_TAG, "Script") \ V(STORE_IC_TAG, "StoreIC") \ + V(STORE_POLYMORPHIC_IC_TAG, "StorePolymorphicIC") \ V(STUB_TAG, "Stub") \ V(NATIVE_FUNCTION_TAG, "Function") \ V(NATIVE_LAZY_COMPILE_TAG, "LazyCompile") \ @@ -174,9 +177,6 @@ class Logger { // leaving the file open. FILE* TearDown(); - // Enable the computation of a sliding window of states. - void EnableSlidingStateWindow(); - // Emits an event with a string value -> (name, value). void StringEvent(const char* name, const char* value); @@ -204,7 +204,7 @@ class Logger { // Emits an event that an undefined property was read from an // object. - void SuspectReadEvent(String* name, Object* obj); + void SuspectReadEvent(Name* name, Object* obj); // Emits an event when a message is put on or read from a debugging queue. // DebugTag lets us put a call-site specific label on the event. @@ -225,22 +225,22 @@ class Logger { // ==== Events logged by --log-code. ==== // Emits a code event for a callback function. - void CallbackEvent(String* name, Address entry_point); - void GetterCallbackEvent(String* name, Address entry_point); - void SetterCallbackEvent(String* name, Address entry_point); + void CallbackEvent(Name* name, Address entry_point); + void GetterCallbackEvent(Name* name, Address entry_point); + void SetterCallbackEvent(Name* name, Address entry_point); // Emits a code create event. void CodeCreateEvent(LogEventsAndTags tag, Code* code, const char* source); void CodeCreateEvent(LogEventsAndTags tag, - Code* code, String* name); + Code* code, Name* name); void CodeCreateEvent(LogEventsAndTags tag, Code* code, SharedFunctionInfo* shared, - String* name); + Name* name); void CodeCreateEvent(LogEventsAndTags tag, Code* code, SharedFunctionInfo* shared, - String* source, int line); + Name* source, int line); void CodeCreateEvent(LogEventsAndTags tag, Code* code, int args_count); void CodeMovingGCEvent(); // Emits a code create event for a RegExp. @@ -249,6 +249,19 @@ class Logger { void CodeMoveEvent(Address from, Address to); // Emits a code delete event. void CodeDeleteEvent(Address from); + // Emits a code line info add event with Postion type. + void CodeLinePosInfoAddPositionEvent(void* jit_handler_data, + int pc_offset, + int position); + // Emits a code line info add event with StatementPostion type. + void CodeLinePosInfoAddStatementPositionEvent(void* jit_handler_data, + int pc_offset, + int position); + // Emits a code line info start to record event + void CodeStartLinePosInfoRecordEvent(PositionsRecorder* pos_recorder); + // Emits a code line info finish record event. + // It's the callee's responsibility to dispose the parameter jit_handler_data. + void CodeEndLinePosInfoRecordEvent(Code* code, void* jit_handler_data); void SharedFunctionInfoMoveEvent(Address from, Address to); @@ -275,18 +288,54 @@ class Logger { uintptr_t start, uintptr_t end); + // ==== Events logged by --log-timer-events. ==== + enum StartEnd { START, END }; + + void TimerEvent(StartEnd se, const char* name); + + static void EnterExternal(); + static void LeaveExternal(); + + class TimerEventScope { + public: + TimerEventScope(Isolate* isolate, const char* name) + : isolate_(isolate), name_(name) { + if (FLAG_log_internal_timer_events) LogTimerEvent(START); + } + + ~TimerEventScope() { + if (FLAG_log_internal_timer_events) LogTimerEvent(END); + } + + void LogTimerEvent(StartEnd se); + + static const char* v8_recompile_synchronous; + static const char* v8_recompile_parallel; + static const char* v8_compile_full_code; + static const char* v8_execute; + static const char* v8_external; + + private: + Isolate* isolate_; + const char* name_; + }; + // ==== Events logged by --log-regexp ==== // Regexp compilation and execution events. void RegExpCompileEvent(Handle<JSRegExp> regexp, bool in_cache); // Log an event reported from generated code - void LogRuntime(Vector<const char> format, JSArray* args); + void LogRuntime(Isolate* isolate, Vector<const char> format, JSArray* args); bool is_logging() { return logging_nesting_ > 0; } + bool is_code_event_handler_enabled() { + return code_event_handler_ != NULL; + } + bool is_logging_code_events() { return is_logging() || code_event_handler_ != NULL; } @@ -326,20 +375,28 @@ class Logger { class NameBuffer; class NameMap; - Logger(); + explicit Logger(Isolate* isolate); ~Logger(); // Issue code notifications. - void IssueCodeAddedEvent(Code* code, const char* name, size_t name_len); + void IssueCodeAddedEvent(Code* code, + Script* script, + const char* name, + size_t name_len); void IssueCodeMovedEvent(Address from, Address to); void IssueCodeRemovedEvent(Address from); - + void IssueAddCodeLinePosInfoEvent(void* jit_handler_data, + int pc_offset, + int position, + JitCodeEvent::PositionType position_Type); + void* IssueStartCodePosInfoEvent(); + void IssueEndCodePosInfoEvent(Code* code, void* jit_handler_data); // Emits the profiler's first message. void ProfilerBeginEvent(); // Emits callback event messages. void CallbackEventInternal(const char* prefix, - const char* name, + Name* name, Address entry_point); // Internal configurable move event. @@ -393,6 +450,8 @@ class Logger { // Returns whether profiler's sampler is active. bool IsProfilerSamplerActive(); + Isolate* isolate_; + // The sampler used by the profiler and the sliding state window. Ticker* ticker_; @@ -401,10 +460,6 @@ class Logger { // of samples. Profiler* profiler_; - // SlidingStateWindow instance keeping a sliding window of the most - // recent VM states. - SlidingStateWindow* sliding_state_window_; - // An array of log events names. const char* const* log_events_; @@ -415,7 +470,6 @@ class Logger { friend class LogMessageBuilder; friend class TimeLog; friend class Profiler; - friend class SlidingStateWindow; friend class StackTracer; friend class VMState; @@ -449,6 +503,8 @@ class Logger { // Logger::FunctionCreateEvent(...) Address prev_code_; + int64_t epoch_; + friend class CpuProfiler; }; diff --git a/deps/v8/src/macro-assembler.h b/deps/v8/src/macro-assembler.h index 11e2217e07..55dccfa950 100644 --- a/deps/v8/src/macro-assembler.h +++ b/deps/v8/src/macro-assembler.h @@ -36,6 +36,25 @@ enum InvokeFlag { }; +// Flags used for the AllocateInNewSpace functions. +enum AllocationFlags { + // No special flags. + NO_ALLOCATION_FLAGS = 0, + // Return the pointer to the allocated already tagged as a heap object. + TAG_OBJECT = 1 << 0, + // The content of the result register already contains the allocation top in + // new space. + RESULT_CONTAINS_TOP = 1 << 1, + // Specify that the requested size of the space to allocate is specified in + // words instead of bytes. + SIZE_IN_WORDS = 1 << 2, + // Align the allocation to a multiple of kDoubleSize + DOUBLE_ALIGNMENT = 1 << 3, + // Directly allocate in old pointer space + PRETENURE_OLD_POINTER_SPACE = 1 << 4 +}; + + // Invalid depth in prototype chain. const int kInvalidProtoDepth = -1; @@ -151,6 +170,26 @@ class Comment { #endif // DEBUG + +class AllocationUtils { + public: + static ExternalReference GetAllocationTopReference( + Isolate* isolate, AllocationFlags flags) { + return ((flags & PRETENURE_OLD_POINTER_SPACE) != 0) ? + ExternalReference::old_pointer_space_allocation_top_address(isolate) : + ExternalReference::new_space_allocation_top_address(isolate); + } + + + static ExternalReference GetAllocationLimitReference( + Isolate* isolate, AllocationFlags flags) { + return ((flags & PRETENURE_OLD_POINTER_SPACE) != 0) ? + ExternalReference::old_pointer_space_allocation_limit_address(isolate) : + ExternalReference::new_space_allocation_limit_address(isolate); + } +}; + + } } // namespace v8::internal #endif // V8_MACRO_ASSEMBLER_H_ diff --git a/deps/v8/src/macros.py b/deps/v8/src/macros.py index 08fa82e686..291a898f53 100644 --- a/deps/v8/src/macros.py +++ b/deps/v8/src/macros.py @@ -32,6 +32,8 @@ const NONE = 0; const READ_ONLY = 1; const DONT_ENUM = 2; const DONT_DELETE = 4; +const NEW_ONE_BYTE_STRING = true; +const NEW_TWO_BYTE_STRING = false; # Constants used for getter and setter operations. const GETTER = 0; @@ -97,6 +99,7 @@ macro IS_UNDEFINED(arg) = (typeof(arg) === 'undefined'); macro IS_NUMBER(arg) = (typeof(arg) === 'number'); macro IS_STRING(arg) = (typeof(arg) === 'string'); macro IS_BOOLEAN(arg) = (typeof(arg) === 'boolean'); +macro IS_SYMBOL(arg) = (%_IsSymbol(arg)); macro IS_OBJECT(arg) = (%_IsObject(arg)); macro IS_ARRAY(arg) = (%_IsArray(arg)); macro IS_FUNCTION(arg) = (%_IsFunction(arg)); diff --git a/deps/v8/src/mark-compact.cc b/deps/v8/src/mark-compact.cc index 24730c6c0a..7503f24cb6 100644 --- a/deps/v8/src/mark-compact.cc +++ b/deps/v8/src/mark-compact.cc @@ -36,11 +36,12 @@ #include "heap-profiler.h" #include "ic-inl.h" #include "incremental-marking.h" -#include "liveobjectlist-inl.h" #include "mark-compact.h" +#include "marking-thread.h" #include "objects-visiting.h" #include "objects-visiting-inl.h" #include "stub-cache.h" +#include "sweeper-thread.h" namespace v8 { namespace internal { @@ -62,8 +63,11 @@ MarkCompactCollector::MarkCompactCollector() : // NOLINT sweep_precisely_(false), reduce_memory_footprint_(false), abort_incremental_marking_(false), + marking_parity_(ODD_MARKING_PARITY), compacting_(false), was_marked_incrementally_(false), + sweeping_pending_(false), + sequential_sweeping_(false), tracer_(NULL), migration_slots_buffer_(NULL), heap_(NULL), @@ -82,6 +86,16 @@ class VerifyMarkingVisitor: public ObjectVisitor { } } } + + void VisitEmbeddedPointer(RelocInfo* rinfo) { + ASSERT(rinfo->rmode() == RelocInfo::EMBEDDED_OBJECT); + if (!FLAG_weak_embedded_maps_in_optimized_code || !FLAG_collect_maps || + rinfo->host()->kind() != Code::OPTIMIZED_FUNCTION || + !rinfo->target_object()->IsMap() || + !Map::cast(rinfo->target_object())->CanTransition()) { + VisitPointer(rinfo->target_object_address()); + } + } }; @@ -275,7 +289,8 @@ class VerifyNativeContextSeparationVisitor: public ObjectVisitor { case TYPE_FEEDBACK_INFO_TYPE: object->Iterate(this); break; - case ACCESSOR_INFO_TYPE: + case DECLARED_ACCESSOR_INFO_TYPE: + case EXECUTABLE_ACCESSOR_INFO_TYPE: case BYTE_ARRAY_TYPE: case CALL_HANDLER_INFO_TYPE: case CODE_TYPE: @@ -382,7 +397,7 @@ void MarkCompactCollector::CollectGarbage() { MarkLiveObjects(); ASSERT(heap_->incremental_marking()->IsStopped()); - if (FLAG_collect_maps) ClearNonLiveTransitions(); + if (FLAG_collect_maps) ClearNonLiveReferences(); ClearWeakMaps(); @@ -402,8 +417,25 @@ void MarkCompactCollector::CollectGarbage() { } #endif +#ifdef VERIFY_HEAP + if (FLAG_collect_maps && FLAG_weak_embedded_maps_in_optimized_code && + heap()->weak_embedded_maps_verification_enabled()) { + VerifyWeakEmbeddedMapsInOptimizedCode(); + } + if (FLAG_collect_maps && FLAG_omit_prototype_checks_for_leaf_maps) { + VerifyOmittedPrototypeChecks(); + } +#endif + Finish(); + if (marking_parity_ == EVEN_MARKING_PARITY) { + marking_parity_ = ODD_MARKING_PARITY; + } else { + ASSERT(marking_parity_ == ODD_MARKING_PARITY); + marking_parity_ = EVEN_MARKING_PARITY; + } + tracer_ = NULL; } @@ -446,6 +478,30 @@ void MarkCompactCollector::VerifyMarkbitsAreClean() { CHECK_EQ(0, Page::FromAddress(obj->address())->LiveBytes()); } } + + +void MarkCompactCollector::VerifyWeakEmbeddedMapsInOptimizedCode() { + HeapObjectIterator code_iterator(heap()->code_space()); + for (HeapObject* obj = code_iterator.Next(); + obj != NULL; + obj = code_iterator.Next()) { + Code* code = Code::cast(obj); + if (code->kind() != Code::OPTIMIZED_FUNCTION) continue; + if (code->marked_for_deoptimization()) continue; + code->VerifyEmbeddedMapsDependency(); + } +} + + +void MarkCompactCollector::VerifyOmittedPrototypeChecks() { + HeapObjectIterator iterator(heap()->map_space()); + for (HeapObject* obj = iterator.Next(); + obj != NULL; + obj = iterator.Next()) { + Map* map = Map::cast(obj); + map->VerifyOmittedPrototypeChecks(); + } +} #endif // VERIFY_HEAP @@ -480,11 +536,69 @@ void MarkCompactCollector::ClearMarkbits() { MarkBit mark_bit = Marking::MarkBitFrom(obj); mark_bit.Clear(); mark_bit.Next().Clear(); + Page::FromAddress(obj->address())->ResetProgressBar(); Page::FromAddress(obj->address())->ResetLiveBytes(); } } +void MarkCompactCollector::StartSweeperThreads() { + sweeping_pending_ = true; + for (int i = 0; i < FLAG_sweeper_threads; i++) { + isolate()->sweeper_threads()[i]->StartSweeping(); + } +} + + +void MarkCompactCollector::WaitUntilSweepingCompleted() { + ASSERT(sweeping_pending_ == true); + for (int i = 0; i < FLAG_sweeper_threads; i++) { + isolate()->sweeper_threads()[i]->WaitForSweeperThread(); + } + sweeping_pending_ = false; + StealMemoryFromSweeperThreads(heap()->paged_space(OLD_DATA_SPACE)); + StealMemoryFromSweeperThreads(heap()->paged_space(OLD_POINTER_SPACE)); + heap()->paged_space(OLD_DATA_SPACE)->ResetUnsweptFreeBytes(); + heap()->paged_space(OLD_POINTER_SPACE)->ResetUnsweptFreeBytes(); +} + + +intptr_t MarkCompactCollector:: + StealMemoryFromSweeperThreads(PagedSpace* space) { + intptr_t freed_bytes = 0; + for (int i = 0; i < FLAG_sweeper_threads; i++) { + freed_bytes += isolate()->sweeper_threads()[i]->StealMemory(space); + } + space->AddToAccountingStats(freed_bytes); + space->DecrementUnsweptFreeBytes(freed_bytes); + return freed_bytes; +} + + +bool MarkCompactCollector::AreSweeperThreadsActivated() { + return isolate()->sweeper_threads() != NULL; +} + + +bool MarkCompactCollector::IsConcurrentSweepingInProgress() { + return sweeping_pending_; +} + + +void MarkCompactCollector::MarkInParallel() { + for (int i = 0; i < FLAG_marking_threads; i++) { + isolate()->marking_threads()[i]->StartMarking(); + } +} + + +void MarkCompactCollector::WaitUntilMarkingCompleted() { + for (int i = 0; i < FLAG_marking_threads; i++) { + isolate()->marking_threads()[i]->WaitForMarkingThread(); + } +} + + bool Marking::TransferMark(Address old_start, Address new_start) { // This is only used when resizing an object. ASSERT(MemoryChunk::FromAddress(old_start) == @@ -787,6 +901,11 @@ void MarkCompactCollector::Prepare(GCTracer* tracer) { ASSERT(!FLAG_never_compact || !FLAG_always_compact); + if (IsConcurrentSweepingInProgress()) { + // Instead of waiting we could also abort the sweeper threads here. + WaitUntilSweepingCompleted(); + } + // Clear marking bits if incremental marking is aborted. if (was_marked_incrementally_ && abort_incremental_marking_) { heap()->incremental_marking()->Abort(); @@ -801,7 +920,7 @@ void MarkCompactCollector::Prepare(GCTracer* tracer) { StartCompaction(NON_INCREMENTAL_COMPACTION); } - PagedSpaces spaces; + PagedSpaces spaces(heap()); for (PagedSpace* space = spaces.next(); space != NULL; space = spaces.next()) { @@ -816,6 +935,14 @@ void MarkCompactCollector::Prepare(GCTracer* tracer) { } +class DeoptimizeMarkedCodeFilter : public OptimizedFunctionFilter { + public: + virtual bool TakeFunction(JSFunction* function) { + return function->code()->marked_for_deoptimization(); + } +}; + + void MarkCompactCollector::Finish() { #ifdef DEBUG ASSERT(state_ == SWEEP_SPACES || state_ == RELOCATE_OBJECTS); @@ -825,9 +952,10 @@ void MarkCompactCollector::Finish() { // force lazy re-initialization of it. This must be done after the // GC, because it relies on the new address of certain old space // objects (empty string, illegal builtin). - heap()->isolate()->stub_cache()->Clear(); + isolate()->stub_cache()->Clear(); - heap()->external_string_table_.CleanUp(); + DeoptimizeMarkedCodeFilter filter; + Deoptimizer::DeoptimizeAllFunctionsWith(isolate(), &filter); } @@ -876,8 +1004,8 @@ void CodeFlusher::ProcessJSFunctionCandidates() { if (!code_mark.Get()) { shared->set_code(lazy_compile); candidate->set_code(lazy_compile); - } else if (code == lazy_compile) { - candidate->set_code(lazy_compile); + } else { + candidate->set_code(code); } // We are in the middle of a GC cycle so the write barrier in the code @@ -926,6 +1054,103 @@ void CodeFlusher::ProcessSharedFunctionInfoCandidates() { } +void CodeFlusher::EvictCandidate(SharedFunctionInfo* shared_info) { + // Make sure previous flushing decisions are revisited. + isolate_->heap()->incremental_marking()->RecordWrites(shared_info); + + SharedFunctionInfo* candidate = shared_function_info_candidates_head_; + SharedFunctionInfo* next_candidate; + if (candidate == shared_info) { + next_candidate = GetNextCandidate(shared_info); + shared_function_info_candidates_head_ = next_candidate; + ClearNextCandidate(shared_info); + } else { + while (candidate != NULL) { + next_candidate = GetNextCandidate(candidate); + + if (next_candidate == shared_info) { + next_candidate = GetNextCandidate(shared_info); + SetNextCandidate(candidate, next_candidate); + ClearNextCandidate(shared_info); + break; + } + + candidate = next_candidate; + } + } +} + + +void CodeFlusher::EvictCandidate(JSFunction* function) { + ASSERT(!function->next_function_link()->IsUndefined()); + Object* undefined = isolate_->heap()->undefined_value(); + + // Make sure previous flushing decisions are revisited. + isolate_->heap()->incremental_marking()->RecordWrites(function); + isolate_->heap()->incremental_marking()->RecordWrites(function->shared()); + + JSFunction* candidate = jsfunction_candidates_head_; + JSFunction* next_candidate; + if (candidate == function) { + next_candidate = GetNextCandidate(function); + jsfunction_candidates_head_ = next_candidate; + ClearNextCandidate(function, undefined); + } else { + while (candidate != NULL) { + next_candidate = GetNextCandidate(candidate); + + if (next_candidate == function) { + next_candidate = GetNextCandidate(function); + SetNextCandidate(candidate, next_candidate); + ClearNextCandidate(function, undefined); + break; + } + + candidate = next_candidate; + } + } +} + + +void CodeFlusher::EvictJSFunctionCandidates() { + JSFunction* candidate = jsfunction_candidates_head_; + JSFunction* next_candidate; + while (candidate != NULL) { + next_candidate = GetNextCandidate(candidate); + EvictCandidate(candidate); + candidate = next_candidate; + } + ASSERT(jsfunction_candidates_head_ == NULL); +} + + +void CodeFlusher::EvictSharedFunctionInfoCandidates() { + SharedFunctionInfo* candidate = shared_function_info_candidates_head_; + SharedFunctionInfo* next_candidate; + while (candidate != NULL) { + next_candidate = GetNextCandidate(candidate); + EvictCandidate(candidate); + candidate = next_candidate; + } + ASSERT(shared_function_info_candidates_head_ == NULL); +} + + +void CodeFlusher::IteratePointersToFromSpace(ObjectVisitor* v) { + Heap* heap = isolate_->heap(); + + JSFunction** slot = &jsfunction_candidates_head_; + JSFunction* candidate = jsfunction_candidates_head_; + while (candidate != NULL) { + if (heap->InFromSpace(candidate)) { + v->VisitPointer(reinterpret_cast<Object**>(slot)); + } + candidate = GetNextCandidate(*slot); + slot = GetNextCandidateSlot(*slot); + } +} + + MarkCompactCollector::~MarkCompactCollector() { if (code_flusher_ != NULL) { delete code_flusher_; @@ -935,7 +1160,7 @@ MarkCompactCollector::~MarkCompactCollector() { static inline HeapObject* ShortCircuitConsString(Object** p) { - // Optimization: If the heap object pointed to by p is a non-symbol + // Optimization: If the heap object pointed to by p is a non-internalized // cons string whose right substring is HEAP->empty_string, update // it in place to its left substring. Return the updated value. // @@ -943,7 +1168,7 @@ static inline HeapObject* ShortCircuitConsString(Object** p) { // (i.e., the left substring of a cons string is always a heap object). // // The check performed is: - // object->IsConsString() && !object->IsSymbol() && + // object->IsConsString() && !object->IsInternalizedString() && // (ConsString::cast(object)->second() == HEAP->empty_string()) // except the maps for the object and its possible substrings might be // marked. @@ -1054,9 +1279,9 @@ class MarkCompactMarkingVisitor // Visit all unmarked objects pointed to by [start, end). // Returns false if the operation fails (lack of stack space). - static inline bool VisitUnmarkedObjects(Heap* heap, + INLINE(static bool VisitUnmarkedObjects(Heap* heap, Object** start, - Object** end) { + Object** end)) { // Return false is we are close to the stack limit. StackLimitCheck check(heap->isolate()); if (check.HasOverflowed()) return false; @@ -1320,10 +1545,10 @@ class MarkCompactMarkingVisitor::ObjectStatsTracker< static inline void Visit(Map* map, HeapObject* obj) { Heap* heap = map->GetHeap(); FixedArray* fixed_array = FixedArray::cast(obj); - if (fixed_array == heap->symbol_table()) { + if (fixed_array == heap->string_table()) { heap->RecordObjectStats( FIXED_ARRAY_TYPE, - SYMBOL_TABLE_SUB_TYPE, + STRING_TABLE_SUB_TYPE, fixed_array->Size()); } ObjectStatsVisitBase(kVisitFixedArray, map, obj); @@ -1430,21 +1655,13 @@ void MarkCompactCollector::PrepareThreadForCodeFlushing(Isolate* isolate, void MarkCompactCollector::PrepareForCodeFlushing() { ASSERT(heap() == Isolate::Current()->heap()); - // TODO(1609) Currently incremental marker does not support code flushing. - if (!FLAG_flush_code || was_marked_incrementally_) { - EnableCodeFlushing(false); - return; - } - -#ifdef ENABLE_DEBUGGER_SUPPORT - if (heap()->isolate()->debug()->IsLoaded() || - heap()->isolate()->debug()->has_break_points()) { - EnableCodeFlushing(false); - return; + // Enable code flushing for non-incremental cycles. + if (FLAG_flush_code && !FLAG_flush_code_incrementally) { + EnableCodeFlushing(!was_marked_incrementally_); } -#endif - EnableCodeFlushing(true); + // If code flushing is disabled, there is no need to prepare for it. + if (!is_code_flushing_enabled()) return; // Ensure that empty descriptor array is marked. Method MarkDescriptorArray // relies on it being marked before any other descriptor array. @@ -1512,10 +1729,10 @@ class RootMarkingVisitor : public ObjectVisitor { }; -// Helper class for pruning the symbol table. -class SymbolTableCleaner : public ObjectVisitor { +// Helper class for pruning the string table. +class StringTableCleaner : public ObjectVisitor { public: - explicit SymbolTableCleaner(Heap* heap) + explicit StringTableCleaner(Heap* heap) : heap_(heap), pointers_removed_(0) { } virtual void VisitPointers(Object** start, Object** end) { @@ -1524,8 +1741,8 @@ class SymbolTableCleaner : public ObjectVisitor { Object* o = *p; if (o->IsHeapObject() && !Marking::MarkBitFrom(HeapObject::cast(o)).Get()) { - // Check if the symbol being pruned is an external symbol. We need to - // delete the associated external data as this symbol is going away. + // Check if the internalized string being pruned is external. We need to + // delete the associated external data as this string is going away. // Since no objects have yet been moved we can safely access the map of // the object. @@ -1615,10 +1832,10 @@ static void DiscoverGreyObjectsOnPage(MarkingDeque* marking_deque, Page* p) { for (; cell_index < last_cell_index; cell_index++, cell_base += 32 * kPointerSize) { - ASSERT((unsigned)cell_index == - Bitmap::IndexToCell( - Bitmap::CellAlignIndex( - p->AddressToMarkbitIndex(cell_base)))); + ASSERT(static_cast<unsigned>(cell_index) == + Bitmap::IndexToCell( + Bitmap::CellAlignIndex( + p->AddressToMarkbitIndex(cell_base)))); const MarkBit::CellType current_cell = cells[cell_index]; if (current_cell == 0) continue; @@ -1675,14 +1892,24 @@ bool MarkCompactCollector::IsUnmarkedHeapObject(Object** p) { } -void MarkCompactCollector::MarkSymbolTable() { - SymbolTable* symbol_table = heap()->symbol_table(); - // Mark the symbol table itself. - MarkBit symbol_table_mark = Marking::MarkBitFrom(symbol_table); - SetMark(symbol_table, symbol_table_mark); +bool MarkCompactCollector::IsUnmarkedHeapObjectWithHeap(Heap* heap, + Object** p) { + Object* o = *p; + ASSERT(o->IsHeapObject()); + HeapObject* heap_object = HeapObject::cast(o); + MarkBit mark = Marking::MarkBitFrom(heap_object); + return !mark.Get(); +} + + +void MarkCompactCollector::MarkStringTable() { + StringTable* string_table = heap()->string_table(); + // Mark the string table itself. + MarkBit string_table_mark = Marking::MarkBitFrom(string_table); + SetMark(string_table, string_table_mark); // Explicitly mark the prefix. MarkingVisitor marker(heap()); - symbol_table->IteratePrefix(&marker); + string_table->IteratePrefix(&marker); ProcessMarkingDeque(); } @@ -1692,8 +1919,8 @@ void MarkCompactCollector::MarkRoots(RootMarkingVisitor* visitor) { // etc., and all objects reachable from them. heap()->IterateStrongRoots(visitor, VISIT_ONLY_STRONG); - // Handle the symbol table specially. - MarkSymbolTable(); + // Handle the string table specially. + MarkStringTable(); // There may be overflowed objects in the heap. Visit them now. while (marking_deque_.overflowed()) { @@ -1703,57 +1930,9 @@ void MarkCompactCollector::MarkRoots(RootMarkingVisitor* visitor) { } -void MarkCompactCollector::MarkObjectGroups() { - List<ObjectGroup*>* object_groups = - heap()->isolate()->global_handles()->object_groups(); - - int last = 0; - for (int i = 0; i < object_groups->length(); i++) { - ObjectGroup* entry = object_groups->at(i); - ASSERT(entry != NULL); - - Object*** objects = entry->objects_; - bool group_marked = false; - for (size_t j = 0; j < entry->length_; j++) { - Object* object = *objects[j]; - if (object->IsHeapObject()) { - HeapObject* heap_object = HeapObject::cast(object); - MarkBit mark = Marking::MarkBitFrom(heap_object); - if (mark.Get()) { - group_marked = true; - break; - } - } - } - - if (!group_marked) { - (*object_groups)[last++] = entry; - continue; - } - - // An object in the group is marked, so mark as grey all white heap - // objects in the group. - for (size_t j = 0; j < entry->length_; ++j) { - Object* object = *objects[j]; - if (object->IsHeapObject()) { - HeapObject* heap_object = HeapObject::cast(object); - MarkBit mark = Marking::MarkBitFrom(heap_object); - MarkObject(heap_object, mark); - } - } - - // Once the entire group has been colored grey, set the object group - // to NULL so it won't be processed again. - entry->Dispose(); - object_groups->at(i) = NULL; - } - object_groups->Rewind(last); -} - - void MarkCompactCollector::MarkImplicitRefGroups() { List<ImplicitRefGroup*>* ref_groups = - heap()->isolate()->global_handles()->implicit_ref_groups(); + isolate()->global_handles()->implicit_ref_groups(); int last = 0; for (int i = 0; i < ref_groups->length(); i++) { @@ -1869,11 +2048,12 @@ void MarkCompactCollector::ProcessMarkingDeque() { } -void MarkCompactCollector::ProcessExternalMarking() { +void MarkCompactCollector::ProcessExternalMarking(RootMarkingVisitor* visitor) { bool work_to_do = true; ASSERT(marking_deque_.IsEmpty()); while (work_to_do) { - MarkObjectGroups(); + isolate()->global_handles()->IterateObjectGroups( + visitor, &IsUnmarkedHeapObjectWithHeap); MarkImplicitRefGroups(); work_to_do = !marking_deque_.IsEmpty(); ProcessMarkingDeque(); @@ -1886,7 +2066,7 @@ void MarkCompactCollector::MarkLiveObjects() { // The recursive GC marker detects when it is nearing stack overflow, // and switches to a different marking system. JS interrupts interfere // with the C stack limit check. - PostponeInterruptsScope postpone(heap()->isolate()); + PostponeInterruptsScope postpone(isolate()); bool incremental_marking_overflowed = false; IncrementalMarking* incremental_marking = heap_->incremental_marking(); @@ -1952,7 +2132,7 @@ void MarkCompactCollector::MarkLiveObjects() { // The objects reachable from the roots are marked, yet unreachable // objects are unmarked. Mark objects reachable due to host // application specific logic. - ProcessExternalMarking(); + ProcessExternalMarking(&root_visitor); // The objects reachable from the roots or object groups are marked, // yet unreachable objects are unmarked. Mark objects reachable @@ -1971,28 +2151,29 @@ void MarkCompactCollector::MarkLiveObjects() { // Repeat host application specific marking to mark unmarked objects // reachable from the weak roots. - ProcessExternalMarking(); + ProcessExternalMarking(&root_visitor); AfterMarking(); } void MarkCompactCollector::AfterMarking() { - // Object literal map caches reference symbols (cache keys) and maps + // Object literal map caches reference strings (cache keys) and maps // (cache values). At this point still useful maps have already been // marked. Mark the keys for the alive values before we process the - // symbol table. + // string table. ProcessMapCaches(); - // Prune the symbol table removing all symbols only pointed to by the - // symbol table. Cannot use symbol_table() here because the symbol + // Prune the string table removing all strings only pointed to by the + // string table. Cannot use string_table() here because the string // table is marked. - SymbolTable* symbol_table = heap()->symbol_table(); - SymbolTableCleaner v(heap()); - symbol_table->IterateElements(&v); - symbol_table->ElementsRemoved(v.PointersRemoved()); + StringTable* string_table = heap()->string_table(); + StringTableCleaner v(heap()); + string_table->IterateElements(&v); + string_table->ElementsRemoved(v.PointersRemoved()); heap()->external_string_table_.Iterate(&v); heap()->external_string_table_.CleanUp(); + heap()->error_object_list_.RemoveUnmarked(heap()); // Process the weak references. MarkCompactWeakObjectRetainer mark_compact_object_retainer; @@ -2005,9 +2186,11 @@ void MarkCompactCollector::AfterMarking() { // Flush code from collected candidates. if (is_code_flushing_enabled()) { code_flusher_->ProcessCandidates(); - // TODO(1609) Currently incremental marker does not support code flushing, - // we need to disable it before incremental marking steps for next cycle. - EnableCodeFlushing(false); + // If incremental marker does not support code flushing, we need to + // disable it before incremental marking steps for next cycle. + if (FLAG_flush_code && !FLAG_flush_code_incrementally) { + EnableCodeFlushing(false); + } } if (!FLAG_watch_ic_patching) { @@ -2090,7 +2273,7 @@ void MarkCompactCollector::ReattachInitialMaps() { } -void MarkCompactCollector::ClearNonLiveTransitions() { +void MarkCompactCollector::ClearNonLiveReferences() { HeapObjectIterator map_iterator(heap()->map_space()); // Iterate over the map space, setting map transitions that go from // a marked map to an unmarked map to null transitions. This action @@ -2102,9 +2285,7 @@ void MarkCompactCollector::ClearNonLiveTransitions() { if (map->IsFreeSpace()) continue; ASSERT(map->IsMap()); - // Only JSObject and subtypes have map transitions and back pointers. - STATIC_ASSERT(LAST_TYPE == LAST_JS_OBJECT_TYPE); - if (map->instance_type() < FIRST_JS_OBJECT_TYPE) continue; + if (!map->CanTransition()) continue; if (map_mark.Get() && map->attached_to_shared_function_info()) { @@ -2116,6 +2297,12 @@ void MarkCompactCollector::ClearNonLiveTransitions() { ClearNonLivePrototypeTransitions(map); ClearNonLiveMapTransitions(map, map_mark); + + if (map_mark.Get()) { + ClearNonLiveDependentCode(map); + } else { + ClearAndDeoptimizeDependentCode(map); + } } } @@ -2184,6 +2371,57 @@ void MarkCompactCollector::ClearNonLiveMapTransitions(Map* map, } +void MarkCompactCollector::ClearAndDeoptimizeDependentCode(Map* map) { + AssertNoAllocation no_allocation_scope; + DependentCode* entries = map->dependent_code(); + DependentCode::GroupStartIndexes starts(entries); + int number_of_entries = starts.number_of_entries(); + if (number_of_entries == 0) return; + for (int i = 0; i < number_of_entries; i++) { + Code* code = entries->code_at(i); + if (IsMarked(code) && !code->marked_for_deoptimization()) { + code->set_marked_for_deoptimization(true); + } + entries->clear_code_at(i); + } + map->set_dependent_code(DependentCode::cast(heap()->empty_fixed_array())); +} + + +void MarkCompactCollector::ClearNonLiveDependentCode(Map* map) { + AssertNoAllocation no_allocation_scope; + DependentCode* entries = map->dependent_code(); + DependentCode::GroupStartIndexes starts(entries); + int number_of_entries = starts.number_of_entries(); + if (number_of_entries == 0) return; + int new_number_of_entries = 0; + // Go through all groups, remove dead codes and compact. + for (int g = 0; g < DependentCode::kGroupCount; g++) { + int group_number_of_entries = 0; + for (int i = starts.at(g); i < starts.at(g + 1); i++) { + Code* code = entries->code_at(i); + if (IsMarked(code) && !code->marked_for_deoptimization()) { + if (new_number_of_entries + group_number_of_entries != i) { + entries->set_code_at(new_number_of_entries + + group_number_of_entries, code); + } + Object** slot = entries->code_slot_at(new_number_of_entries + + group_number_of_entries); + RecordSlot(slot, slot, code); + group_number_of_entries++; + } + } + entries->set_number_of_entries( + static_cast<DependentCode::DependencyGroup>(g), + group_number_of_entries); + new_number_of_entries += group_number_of_entries; + } + for (int i = new_number_of_entries; i < number_of_entries; i++) { + entries->clear_code_at(i); + } +} + + void MarkCompactCollector::ProcessWeakMaps() { Object* weak_map_obj = encountered_weak_maps(); while (weak_map_obj != Smi::FromInt(0)) { @@ -2282,7 +2520,7 @@ void MarkCompactCollector::MigrateObject(Address dst, } } } else if (dest == CODE_SPACE) { - PROFILE(heap()->isolate(), CodeMoveEvent(src, dst)); + PROFILE(isolate(), CodeMoveEvent(src, dst)); heap()->MoveBlock(dst, src, size); SlotsBuffer::AddTo(&slots_buffer_allocator_, &migration_slots_buffer_, @@ -2334,6 +2572,16 @@ class PointersUpdatingVisitor: public ObjectVisitor { } } + void VisitCodeAgeSequence(RelocInfo* rinfo) { + ASSERT(RelocInfo::IsCodeAgeSequence(rinfo->rmode())); + Object* stub = rinfo->code_age_stub(); + ASSERT(stub != NULL); + VisitPointer(&stub); + if (stub != rinfo->code_age_stub()) { + rinfo->set_code_age_stub(Code::cast(stub)); + } + } + void VisitDebugTarget(RelocInfo* rinfo) { ASSERT((RelocInfo::IsJSReturn(rinfo->rmode()) && rinfo->IsPatchedReturnSequence()) || @@ -2505,9 +2753,6 @@ void MarkCompactCollector::EvacuateNewSpace() { size, NEW_SPACE); } else { - // Process the dead object before we write a NULL into its header. - LiveObjectList::ProcessNonLive(object); - // Mark dead objects in the new space with null in their map field. Memory::Address_at(object->address()) = NULL; } @@ -2540,10 +2785,10 @@ void MarkCompactCollector::EvacuateLiveObjectsFromPage(Page* p) { for (; cell_index < last_cell_index; cell_index++, cell_base += 32 * kPointerSize) { - ASSERT((unsigned)cell_index == - Bitmap::IndexToCell( - Bitmap::CellAlignIndex( - p->AddressToMarkbitIndex(cell_base)))); + ASSERT(static_cast<unsigned>(cell_index) == + Bitmap::IndexToCell( + Bitmap::CellAlignIndex( + p->AddressToMarkbitIndex(cell_base)))); if (cells[cell_index] == 0) continue; int live_objects = MarkWordToObjectStarts(cells[cell_index], offsets); @@ -2596,6 +2841,7 @@ void MarkCompactCollector::EvacuatePages() { slots_buffer_allocator_.DeallocateChain(page->slots_buffer_address()); page->ClearEvacuationCandidate(); page->SetFlag(Page::RESCAN_ON_EVACUATION); + page->InsertAfter(static_cast<PagedSpace*>(page->owner())->anchor()); } return; } @@ -2686,6 +2932,11 @@ static void SweepPrecisely(PagedSpace* space, space->identity() == CODE_SPACE); ASSERT((p->skip_list() == NULL) || (skip_list_mode == REBUILD_SKIP_LIST)); + double start_time = 0.0; + if (FLAG_print_cumulative_gc_stat) { + start_time = OS::TimeCurrentMillis(); + } + MarkBit::CellType* cells = p->markbits()->cells(); p->MarkSweptPrecisely(); @@ -2713,10 +2964,10 @@ static void SweepPrecisely(PagedSpace* space, for (; cell_index < last_cell_index; cell_index++, object_address += 32 * kPointerSize) { - ASSERT((unsigned)cell_index == - Bitmap::IndexToCell( - Bitmap::CellAlignIndex( - p->AddressToMarkbitIndex(object_address)))); + ASSERT(static_cast<unsigned>(cell_index) == + Bitmap::IndexToCell( + Bitmap::CellAlignIndex( + p->AddressToMarkbitIndex(object_address)))); int live_objects = MarkWordToObjectStarts(cells[cell_index], offsets); int live_index = 0; for ( ; live_objects != 0; live_objects--) { @@ -2751,6 +3002,9 @@ static void SweepPrecisely(PagedSpace* space, space->Free(free_start, static_cast<int>(p->area_end() - free_start)); } p->ResetLiveBytes(); + if (FLAG_print_cumulative_gc_stat) { + space->heap()->AddSweepingTime(OS::TimeCurrentMillis() - start_time); + } } @@ -2907,7 +3161,6 @@ void MarkCompactCollector::EvacuateNewSpaceAndCandidates() { GCTracer::Scope::MC_UPDATE_ROOT_TO_NEW_POINTERS); // Update roots. heap_->IterateRoots(&updating_visitor, VISIT_ALL_IN_SWEEP_NEWSPACE); - LiveObjectList::IterateElements(&updating_visitor); } { GCTracer::Scope gc_scope(tracer_, @@ -2978,7 +3231,7 @@ void MarkCompactCollector::EvacuateNewSpaceAndCandidates() { switch (space->identity()) { case OLD_DATA_SPACE: - SweepConservatively(space, p); + SweepConservatively<SWEEP_SEQUENTIALLY>(space, NULL, p); break; case OLD_POINTER_SPACE: SweepPrecisely<SWEEP_AND_VISIT_LIVE_OBJECTS, IGNORE_SKIP_LIST>( @@ -3014,12 +3267,15 @@ void MarkCompactCollector::EvacuateNewSpaceAndCandidates() { // Update pointer from the native contexts list. updating_visitor.VisitPointer(heap_->native_contexts_list_address()); - heap_->symbol_table()->Iterate(&updating_visitor); + heap_->string_table()->Iterate(&updating_visitor); // Update pointers from external string table. heap_->UpdateReferencesInExternalStringTable( &UpdateReferenceInExternalStringTableEntry); + // Update pointers in the new error object list. + heap_->error_object_list()->UpdateReferences(); + if (!FLAG_watch_ic_patching) { // Update JSFunction pointers from the runtime profiler. heap()->isolate()->runtime_profiler()->UpdateSamplesAfterCompact( @@ -3043,6 +3299,23 @@ void MarkCompactCollector::EvacuateNewSpaceAndCandidates() { slots_buffer_allocator_.DeallocateChain(&migration_slots_buffer_); ASSERT(migration_slots_buffer_ == NULL); +} + + +void MarkCompactCollector::UnlinkEvacuationCandidates() { + int npages = evacuation_candidates_.length(); + for (int i = 0; i < npages; i++) { + Page* p = evacuation_candidates_[i]; + if (!p->IsEvacuationCandidate()) continue; + p->Unlink(); + p->ClearSweptPrecisely(); + p->ClearSweptConservatively(); + } +} + + +void MarkCompactCollector::ReleaseEvacuationCandidates() { + int npages = evacuation_candidates_.length(); for (int i = 0; i < npages; i++) { Page* p = evacuation_candidates_[i]; if (!p->IsEvacuationCandidate()) continue; @@ -3051,10 +3324,11 @@ void MarkCompactCollector::EvacuateNewSpaceAndCandidates() { p->set_scan_on_scavenge(false); slots_buffer_allocator_.DeallocateChain(p->slots_buffer_address()); p->ResetLiveBytes(); - space->ReleasePage(p); + space->ReleasePage(p, false); } evacuation_candidates_.Rewind(0); compacting_ = false; + heap()->FreeQueuedChunks(); } @@ -3334,6 +3608,33 @@ static inline Address StartOfLiveObject(Address block_address, uint32_t cell) { } +template<MarkCompactCollector::SweepingParallelism mode> +static intptr_t Free(PagedSpace* space, + FreeList* free_list, + Address start, + int size) { + if (mode == MarkCompactCollector::SWEEP_SEQUENTIALLY) { + return space->Free(start, size); + } else { + return size - free_list->Free(start, size); + } +} + + +// Force instantiation of templatized SweepConservatively method for +// SWEEP_SEQUENTIALLY mode. +template intptr_t MarkCompactCollector:: + SweepConservatively<MarkCompactCollector::SWEEP_SEQUENTIALLY>( + PagedSpace*, FreeList*, Page*); + + +// Force instantiation of templatized SweepConservatively method for +// SWEEP_IN_PARALLEL mode. +template intptr_t MarkCompactCollector:: + SweepConservatively<MarkCompactCollector::SWEEP_IN_PARALLEL>( + PagedSpace*, FreeList*, Page*); + + // Sweeps a space conservatively. After this has been done the larger free // spaces have been put on the free list and the smaller ones have been // ignored and left untouched. A free space is always either ignored or put @@ -3341,8 +3642,16 @@ static inline Address StartOfLiveObject(Address block_address, uint32_t cell) { // because it means that any FreeSpace maps left actually describe a region of // memory that can be ignored when scanning. Dead objects other than free // spaces will not contain the free space map. -intptr_t MarkCompactCollector::SweepConservatively(PagedSpace* space, Page* p) { +template<MarkCompactCollector::SweepingParallelism mode> +intptr_t MarkCompactCollector::SweepConservatively(PagedSpace* space, + FreeList* free_list, + Page* p) { ASSERT(!p->IsEvacuationCandidate() && !p->WasSwept()); + ASSERT((mode == MarkCompactCollector::SWEEP_IN_PARALLEL && + free_list != NULL) || + (mode == MarkCompactCollector::SWEEP_SEQUENTIALLY && + free_list == NULL)); + MarkBit::CellType* cells = p->markbits()->cells(); p->MarkSweptConservatively(); @@ -3369,8 +3678,8 @@ intptr_t MarkCompactCollector::SweepConservatively(PagedSpace* space, Page* p) { } size_t size = block_address - p->area_start(); if (cell_index == last_cell_index) { - freed_bytes += static_cast<int>(space->Free(p->area_start(), - static_cast<int>(size))); + freed_bytes += Free<mode>(space, free_list, p->area_start(), + static_cast<int>(size)); ASSERT_EQ(0, p->LiveBytes()); return freed_bytes; } @@ -3379,8 +3688,9 @@ intptr_t MarkCompactCollector::SweepConservatively(PagedSpace* space, Page* p) { Address free_end = StartOfLiveObject(block_address, cells[cell_index]); // Free the first free space. size = free_end - p->area_start(); - freed_bytes += space->Free(p->area_start(), - static_cast<int>(size)); + freed_bytes += Free<mode>(space, free_list, p->area_start(), + static_cast<int>(size)); + // The start of the current free area is represented in undigested form by // the address of the last 32-word section that contained a live object and // the marking bitmap for that cell, which describes where the live object @@ -3409,8 +3719,8 @@ intptr_t MarkCompactCollector::SweepConservatively(PagedSpace* space, Page* p) { // so now we need to find the start of the first live object at the // end of the free space. free_end = StartOfLiveObject(block_address, cell); - freed_bytes += space->Free(free_start, - static_cast<int>(free_end - free_start)); + freed_bytes += Free<mode>(space, free_list, free_start, + static_cast<int>(free_end - free_start)); } } // Update our undigested record of where the current free area started. @@ -3424,8 +3734,8 @@ intptr_t MarkCompactCollector::SweepConservatively(PagedSpace* space, Page* p) { // Handle the free space at the end of the page. if (block_address - free_start > 32 * kPointerSize) { free_start = DigestFreeStart(free_start, free_start_cell); - freed_bytes += space->Free(free_start, - static_cast<int>(block_address - free_start)); + freed_bytes += Free<mode>(space, free_list, free_start, + static_cast<int>(block_address - free_start)); } p->ResetLiveBytes(); @@ -3433,34 +3743,48 @@ intptr_t MarkCompactCollector::SweepConservatively(PagedSpace* space, Page* p) { } +void MarkCompactCollector::SweepInParallel(PagedSpace* space, + FreeList* private_free_list, + FreeList* free_list) { + PageIterator it(space); + while (it.has_next()) { + Page* p = it.next(); + + if (p->TryParallelSweeping()) { + SweepConservatively<SWEEP_IN_PARALLEL>(space, private_free_list, p); + free_list->Concatenate(private_free_list); + } + } +} + + void MarkCompactCollector::SweepSpace(PagedSpace* space, SweeperType sweeper) { space->set_was_swept_conservatively(sweeper == CONSERVATIVE || - sweeper == LAZY_CONSERVATIVE); - + sweeper == LAZY_CONSERVATIVE || + sweeper == PARALLEL_CONSERVATIVE || + sweeper == CONCURRENT_CONSERVATIVE); space->ClearStats(); PageIterator it(space); - intptr_t freed_bytes = 0; int pages_swept = 0; - intptr_t newspace_size = space->heap()->new_space()->Size(); bool lazy_sweeping_active = false; bool unused_page_present = false; + bool parallel_sweeping_active = false; while (it.has_next()) { Page* p = it.next(); + ASSERT(p->parallel_sweeping() == 0); + ASSERT(!p->IsEvacuationCandidate()); + // Clear sweeping flags indicating that marking bits are still intact. p->ClearSweptPrecisely(); p->ClearSweptConservatively(); - if (p->IsEvacuationCandidate()) { - ASSERT(evacuation_candidates_.length() > 0); - continue; - } - if (p->IsFlagSet(Page::RESCAN_ON_EVACUATION)) { // Will be processed in EvacuateNewSpaceAndCandidates. + ASSERT(evacuation_candidates_.length() > 0); continue; } @@ -3474,46 +3798,58 @@ void MarkCompactCollector::SweepSpace(PagedSpace* space, SweeperType sweeper) { // Adjust unswept free bytes because releasing a page expects said // counter to be accurate for unswept pages. space->IncreaseUnsweptFreeBytes(p); - space->ReleasePage(p); + space->ReleasePage(p, true); continue; } unused_page_present = true; } - if (lazy_sweeping_active) { - if (FLAG_gc_verbose) { - PrintF("Sweeping 0x%" V8PRIxPTR " lazily postponed.\n", - reinterpret_cast<intptr_t>(p)); - } - space->IncreaseUnsweptFreeBytes(p); - continue; - } - switch (sweeper) { case CONSERVATIVE: { if (FLAG_gc_verbose) { PrintF("Sweeping 0x%" V8PRIxPTR " conservatively.\n", reinterpret_cast<intptr_t>(p)); } - SweepConservatively(space, p); + SweepConservatively<SWEEP_SEQUENTIALLY>(space, NULL, p); pages_swept++; break; } case LAZY_CONSERVATIVE: { - if (FLAG_gc_verbose) { - PrintF("Sweeping 0x%" V8PRIxPTR " conservatively as needed.\n", - reinterpret_cast<intptr_t>(p)); - } - freed_bytes += SweepConservatively(space, p); - pages_swept++; - if (freed_bytes > 2 * newspace_size) { + if (lazy_sweeping_active) { + if (FLAG_gc_verbose) { + PrintF("Sweeping 0x%" V8PRIxPTR " lazily postponed.\n", + reinterpret_cast<intptr_t>(p)); + } + space->IncreaseUnsweptFreeBytes(p); + } else { + if (FLAG_gc_verbose) { + PrintF("Sweeping 0x%" V8PRIxPTR " conservatively.\n", + reinterpret_cast<intptr_t>(p)); + } + SweepConservatively<SWEEP_SEQUENTIALLY>(space, NULL, p); + pages_swept++; space->SetPagesToSweep(p->next_page()); lazy_sweeping_active = true; + } + break; + } + case CONCURRENT_CONSERVATIVE: + case PARALLEL_CONSERVATIVE: { + if (!parallel_sweeping_active) { + if (FLAG_gc_verbose) { + PrintF("Sweeping 0x%" V8PRIxPTR " conservatively.\n", + reinterpret_cast<intptr_t>(p)); + } + SweepConservatively<SWEEP_SEQUENTIALLY>(space, NULL, p); + pages_swept++; + parallel_sweeping_active = true; } else { if (FLAG_gc_verbose) { - PrintF("Only %" V8PRIdPTR " bytes freed. Still sweeping.\n", - freed_bytes); + PrintF("Sweeping 0x%" V8PRIxPTR " conservatively in parallel.\n", + reinterpret_cast<intptr_t>(p)); } + p->set_parallel_sweeping(1); + space->IncreaseUnsweptFreeBytes(p); } break; } @@ -3554,16 +3890,34 @@ void MarkCompactCollector::SweepSpaces() { #endif SweeperType how_to_sweep = FLAG_lazy_sweeping ? LAZY_CONSERVATIVE : CONSERVATIVE; + if (FLAG_parallel_sweeping) how_to_sweep = PARALLEL_CONSERVATIVE; + if (FLAG_concurrent_sweeping) how_to_sweep = CONCURRENT_CONSERVATIVE; if (FLAG_expose_gc) how_to_sweep = CONSERVATIVE; if (sweep_precisely_) how_to_sweep = PRECISE; + + // Unlink evacuation candidates before sweeper threads access the list of + // pages to avoid race condition. + UnlinkEvacuationCandidates(); + // Noncompacting collections simply sweep the spaces to clear the mark // bits and free the nonlive blocks (for old and map spaces). We sweep // the map space last because freeing non-live maps overwrites them and // the other spaces rely on possibly non-live maps to get the sizes for // non-live objects. + SequentialSweepingScope scope(this); SweepSpace(heap()->old_pointer_space(), how_to_sweep); SweepSpace(heap()->old_data_space(), how_to_sweep); + if (how_to_sweep == PARALLEL_CONSERVATIVE || + how_to_sweep == CONCURRENT_CONSERVATIVE) { + // TODO(hpayer): fix race with concurrent sweeper + StartSweeperThreads(); + } + + if (how_to_sweep == PARALLEL_CONSERVATIVE) { + WaitUntilSweepingCompleted(); + } + RemoveDeadInvalidatedCode(); SweepSpace(heap()->code_space(), PRECISE); @@ -3578,15 +3932,26 @@ void MarkCompactCollector::SweepSpaces() { // Deallocate unmarked objects and clear marked bits for marked objects. heap_->lo_space()->FreeUnmarkedObjects(); + + // Deallocate evacuated candidate pages. + ReleaseEvacuationCandidates(); } void MarkCompactCollector::EnableCodeFlushing(bool enable) { +#ifdef ENABLE_DEBUGGER_SUPPORT + if (isolate()->debug()->IsLoaded() || + isolate()->debug()->has_break_points()) { + enable = false; + } +#endif + if (enable) { if (code_flusher_ != NULL) return; - code_flusher_ = new CodeFlusher(heap()->isolate()); + code_flusher_ = new CodeFlusher(isolate()); } else { if (code_flusher_ == NULL) return; + code_flusher_->EvictAllCandidates(); delete code_flusher_; code_flusher_ = NULL; } @@ -3609,6 +3974,11 @@ void MarkCompactCollector::ReportDeleteIfNeeded(HeapObject* obj, } +Isolate* MarkCompactCollector::isolate() const { + return heap_->isolate(); +} + + void MarkCompactCollector::Initialize() { MarkCompactMarkingVisitor::Initialize(); IncrementalMarking::Initialize(); @@ -3690,7 +4060,7 @@ void MarkCompactCollector::RecordCodeEntrySlot(Address slot, Code* target) { void MarkCompactCollector::RecordCodeTargetPatch(Address pc, Code* target) { ASSERT(heap()->gc_state() == Heap::MARK_COMPACT); if (is_compacting()) { - Code* host = heap()->isolate()->inner_pointer_to_code_cache()-> + Code* host = isolate()->inner_pointer_to_code_cache()-> GcSafeFindCodeForInnerPointer(pc); MarkBit mark_bit = Marking::MarkBitFrom(host); if (Marking::IsBlack(mark_bit)) { diff --git a/deps/v8/src/mark-compact.h b/deps/v8/src/mark-compact.h index 7c648000de..cbc8f410c6 100644 --- a/deps/v8/src/mark-compact.h +++ b/deps/v8/src/mark-compact.h @@ -53,59 +53,59 @@ class Marking { : heap_(heap) { } - static inline MarkBit MarkBitFrom(Address addr); + INLINE(static MarkBit MarkBitFrom(Address addr)); - static inline MarkBit MarkBitFrom(HeapObject* obj) { + INLINE(static MarkBit MarkBitFrom(HeapObject* obj)) { return MarkBitFrom(reinterpret_cast<Address>(obj)); } // Impossible markbits: 01 static const char* kImpossibleBitPattern; - static inline bool IsImpossible(MarkBit mark_bit) { + INLINE(static bool IsImpossible(MarkBit mark_bit)) { return !mark_bit.Get() && mark_bit.Next().Get(); } // Black markbits: 10 - this is required by the sweeper. static const char* kBlackBitPattern; - static inline bool IsBlack(MarkBit mark_bit) { + INLINE(static bool IsBlack(MarkBit mark_bit)) { return mark_bit.Get() && !mark_bit.Next().Get(); } // White markbits: 00 - this is required by the mark bit clearer. static const char* kWhiteBitPattern; - static inline bool IsWhite(MarkBit mark_bit) { + INLINE(static bool IsWhite(MarkBit mark_bit)) { return !mark_bit.Get(); } // Grey markbits: 11 static const char* kGreyBitPattern; - static inline bool IsGrey(MarkBit mark_bit) { + INLINE(static bool IsGrey(MarkBit mark_bit)) { return mark_bit.Get() && mark_bit.Next().Get(); } - static inline void MarkBlack(MarkBit mark_bit) { + INLINE(static void MarkBlack(MarkBit mark_bit)) { mark_bit.Set(); mark_bit.Next().Clear(); } - static inline void BlackToGrey(MarkBit markbit) { + INLINE(static void BlackToGrey(MarkBit markbit)) { markbit.Next().Set(); } - static inline void WhiteToGrey(MarkBit markbit) { + INLINE(static void WhiteToGrey(MarkBit markbit)) { markbit.Set(); markbit.Next().Set(); } - static inline void GreyToBlack(MarkBit markbit) { + INLINE(static void GreyToBlack(MarkBit markbit)) { markbit.Next().Clear(); } - static inline void BlackToGrey(HeapObject* obj) { + INLINE(static void BlackToGrey(HeapObject* obj)) { BlackToGrey(MarkBitFrom(obj)); } - static inline void AnyToGrey(MarkBit markbit) { + INLINE(static void AnyToGrey(MarkBit markbit)) { markbit.Set(); markbit.Next().Set(); } @@ -194,7 +194,7 @@ class MarkingDeque { // Push the (marked) object on the marking stack if there is room, // otherwise mark the object as overflowed and wait for a rescan of the // heap. - inline void PushBlack(HeapObject* object) { + INLINE(void PushBlack(HeapObject* object)) { ASSERT(object->IsHeapObject()); if (IsFull()) { Marking::BlackToGrey(object); @@ -206,7 +206,7 @@ class MarkingDeque { } } - inline void PushGrey(HeapObject* object) { + INLINE(void PushGrey(HeapObject* object)) { ASSERT(object->IsHeapObject()); if (IsFull()) { SetOverflowed(); @@ -216,7 +216,7 @@ class MarkingDeque { } } - inline HeapObject* Pop() { + INLINE(HeapObject* Pop()) { ASSERT(!IsEmpty()); top_ = ((top_ - 1) & mask_); HeapObject* object = array_[top_]; @@ -224,7 +224,7 @@ class MarkingDeque { return object; } - inline void UnshiftGrey(HeapObject* object) { + INLINE(void UnshiftGrey(HeapObject* object)) { ASSERT(object->IsHeapObject()); if (IsFull()) { SetOverflowed(); @@ -366,10 +366,10 @@ class SlotsBuffer { return buffer != NULL && buffer->chain_length_ >= kChainLengthThreshold; } - static bool AddTo(SlotsBufferAllocator* allocator, - SlotsBuffer** buffer_address, - ObjectSlot slot, - AdditionMode mode) { + INLINE(static bool AddTo(SlotsBufferAllocator* allocator, + SlotsBuffer** buffer_address, + ObjectSlot slot, + AdditionMode mode)) { SlotsBuffer* buffer = *buffer_address; if (buffer == NULL || buffer->IsFull()) { if (mode == FAIL_ON_OVERFLOW && ChainLengthThresholdReached(buffer)) { @@ -420,25 +420,45 @@ class CodeFlusher { shared_function_info_candidates_head_(NULL) {} void AddCandidate(SharedFunctionInfo* shared_info) { - SetNextCandidate(shared_info, shared_function_info_candidates_head_); - shared_function_info_candidates_head_ = shared_info; + if (GetNextCandidate(shared_info) == NULL) { + SetNextCandidate(shared_info, shared_function_info_candidates_head_); + shared_function_info_candidates_head_ = shared_info; + } } void AddCandidate(JSFunction* function) { ASSERT(function->code() == function->shared()->code()); - ASSERT(function->next_function_link()->IsUndefined()); - SetNextCandidate(function, jsfunction_candidates_head_); - jsfunction_candidates_head_ = function; + if (GetNextCandidate(function)->IsUndefined()) { + SetNextCandidate(function, jsfunction_candidates_head_); + jsfunction_candidates_head_ = function; + } } + void EvictCandidate(SharedFunctionInfo* shared_info); + void EvictCandidate(JSFunction* function); + void ProcessCandidates() { ProcessSharedFunctionInfoCandidates(); ProcessJSFunctionCandidates(); } + void EvictAllCandidates() { + EvictJSFunctionCandidates(); + EvictSharedFunctionInfoCandidates(); + } + + void IteratePointersToFromSpace(ObjectVisitor* v); + private: void ProcessJSFunctionCandidates(); void ProcessSharedFunctionInfoCandidates(); + void EvictJSFunctionCandidates(); + void EvictSharedFunctionInfoCandidates(); + + static JSFunction** GetNextCandidateSlot(JSFunction* candidate) { + return reinterpret_cast<JSFunction**>( + HeapObject::RawField(candidate, JSFunction::kNextFunctionLinkOffset)); + } static JSFunction* GetNextCandidate(JSFunction* candidate) { Object* next_candidate = candidate->next_function_link(); @@ -560,6 +580,7 @@ class MarkCompactCollector { static inline bool IsMarked(Object* obj); inline Heap* heap() const { return heap_; } + inline Isolate* isolate() const; CodeFlusher* code_flusher() { return code_flusher_; } inline bool is_code_flushing_enabled() const { return code_flusher_ != NULL; } @@ -568,18 +589,30 @@ class MarkCompactCollector { enum SweeperType { CONSERVATIVE, LAZY_CONSERVATIVE, + PARALLEL_CONSERVATIVE, + CONCURRENT_CONSERVATIVE, PRECISE }; + enum SweepingParallelism { + SWEEP_SEQUENTIALLY, + SWEEP_IN_PARALLEL + }; + #ifdef VERIFY_HEAP void VerifyMarkbitsAreClean(); static void VerifyMarkbitsAreClean(PagedSpace* space); static void VerifyMarkbitsAreClean(NewSpace* space); + void VerifyWeakEmbeddedMapsInOptimizedCode(); + void VerifyOmittedPrototypeChecks(); #endif // Sweep a single page from the given space conservatively. // Return a number of reclaimed bytes. - static intptr_t SweepConservatively(PagedSpace* space, Page* p); + template<SweepingParallelism type> + static intptr_t SweepConservatively(PagedSpace* space, + FreeList* free_list, + Page* p); INLINE(static bool ShouldSkipEvacuationSlotRecording(Object** anchor)) { return Page::FromAddress(reinterpret_cast<Address>(anchor))-> @@ -596,7 +629,7 @@ class MarkCompactCollector { IsEvacuationCandidate(); } - void EvictEvacuationCandidate(Page* page) { + INLINE(void EvictEvacuationCandidate(Page* page)) { if (FLAG_trace_fragmentation) { PrintF("Page %p is too popular. Disabling evacuation.\n", reinterpret_cast<void*>(page)); @@ -639,8 +672,38 @@ class MarkCompactCollector { void ClearMarkbits(); + bool abort_incremental_marking() const { return abort_incremental_marking_; } + bool is_compacting() const { return compacting_; } + MarkingParity marking_parity() { return marking_parity_; } + + // Concurrent and parallel sweeping support. + void SweepInParallel(PagedSpace* space, + FreeList* private_free_list, + FreeList* free_list); + + void WaitUntilSweepingCompleted(); + + intptr_t StealMemoryFromSweeperThreads(PagedSpace* space); + + bool AreSweeperThreadsActivated(); + + bool IsConcurrentSweepingInProgress(); + + void set_sequential_sweeping(bool sequential_sweeping) { + sequential_sweeping_ = sequential_sweeping; + } + + bool sequential_sweeping() const { + return sequential_sweeping_; + } + + // Parallel marking support. + void MarkInParallel(); + + void WaitUntilMarkingCompleted(); + private: MarkCompactCollector(); ~MarkCompactCollector(); @@ -649,6 +712,10 @@ class MarkCompactCollector { void RemoveDeadInvalidatedCode(); void ProcessInvalidatedCode(ObjectVisitor* visitor); + void UnlinkEvacuationCandidates(); + void ReleaseEvacuationCandidates(); + + void StartSweeperThreads(); #ifdef DEBUG enum CollectorState { @@ -673,12 +740,19 @@ class MarkCompactCollector { bool abort_incremental_marking_; + MarkingParity marking_parity_; + // True if we are collecting slots to perform evacuation from evacuation // candidates. bool compacting_; bool was_marked_incrementally_; + // True if concurrent or parallel sweeping is currently in progress. + bool sweeping_pending_; + + bool sequential_sweeping_; + // A pointer to the current stack-allocated GC tracer object during a full // collection (NULL before and after). GCTracer* tracer_; @@ -727,13 +801,9 @@ class MarkCompactCollector { // Mark the heap roots and all objects reachable from them. void MarkRoots(RootMarkingVisitor* visitor); - // Mark the symbol table specially. References to symbols from the - // symbol table are weak. - void MarkSymbolTable(); - - // Mark objects in object groups that have at least one object in the - // group marked. - void MarkObjectGroups(); + // Mark the string table specially. References to internalized strings from + // the string table are weak. + void MarkStringTable(); // Mark objects in implicit references groups if their parent object // is marked. @@ -741,7 +811,7 @@ class MarkCompactCollector { // Mark all objects which are reachable due to host application // logic like object groups or implicit references' groups. - void ProcessExternalMarking(); + void ProcessExternalMarking(RootMarkingVisitor* visitor); // Mark objects reachable (transitively) from objects in the marking stack // or overflowed in the heap. @@ -765,13 +835,17 @@ class MarkCompactCollector { // Callback function for telling whether the object *p is an unmarked // heap object. static bool IsUnmarkedHeapObject(Object** p); + static bool IsUnmarkedHeapObjectWithHeap(Heap* heap, Object** p); // Map transitions from a live map to a dead map must be killed. // We replace them with a null descriptor, with the same key. - void ClearNonLiveTransitions(); + void ClearNonLiveReferences(); void ClearNonLivePrototypeTransitions(Map* map); void ClearNonLiveMapTransitions(Map* map, MarkBit map_mark); + void ClearAndDeoptimizeDependentCode(Map* map); + void ClearNonLiveDependentCode(Map* map); + // Marking detaches initial maps from SharedFunctionInfo objects // to make this reference weak. We need to reattach initial maps // back after collection. This is either done during @@ -834,6 +908,22 @@ class MarkCompactCollector { }; +class SequentialSweepingScope BASE_EMBEDDED { + public: + explicit SequentialSweepingScope(MarkCompactCollector *collector) : + collector_(collector) { + collector_->set_sequential_sweeping(true); + } + + ~SequentialSweepingScope() { + collector_->set_sequential_sweeping(false); + } + + private: + MarkCompactCollector* collector_; +}; + + const char* AllocationSpaceName(AllocationSpace space); } } // namespace v8::internal diff --git a/deps/v8/src/marking-thread.cc b/deps/v8/src/marking-thread.cc new file mode 100644 index 0000000000..ac64381268 --- /dev/null +++ b/deps/v8/src/marking-thread.cc @@ -0,0 +1,85 @@ +// Copyright 2013 the V8 project authors. All rights reserved. +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following +// disclaimer in the documentation and/or other materials provided +// with the distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived +// from this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +#include "marking-thread.h" + +#include "v8.h" + +#include "isolate.h" +#include "v8threads.h" + +namespace v8 { +namespace internal { + +MarkingThread::MarkingThread(Isolate* isolate) + : Thread("MarkingThread"), + isolate_(isolate), + heap_(isolate->heap()), + start_marking_semaphore_(OS::CreateSemaphore(0)), + end_marking_semaphore_(OS::CreateSemaphore(0)), + stop_semaphore_(OS::CreateSemaphore(0)) { + NoBarrier_Store(&stop_thread_, static_cast<AtomicWord>(false)); + id_ = NoBarrier_AtomicIncrement(&id_counter_, 1); +} + + +Atomic32 MarkingThread::id_counter_ = -1; + + +void MarkingThread::Run() { + Isolate::SetIsolateThreadLocals(isolate_, NULL); + + while (true) { + start_marking_semaphore_->Wait(); + + if (Acquire_Load(&stop_thread_)) { + stop_semaphore_->Signal(); + return; + } + + end_marking_semaphore_->Signal(); + } +} + + +void MarkingThread::Stop() { + Release_Store(&stop_thread_, static_cast<AtomicWord>(true)); + start_marking_semaphore_->Signal(); + stop_semaphore_->Wait(); +} + + +void MarkingThread::StartMarking() { + start_marking_semaphore_->Signal(); +} + + +void MarkingThread::WaitForMarkingThread() { + end_marking_semaphore_->Wait(); +} + +} } // namespace v8::internal diff --git a/deps/v8/src/inspector.cc b/deps/v8/src/marking-thread.h index 833d338439..9efa3af132 100644 --- a/deps/v8/src/inspector.cc +++ b/deps/v8/src/marking-thread.h @@ -1,4 +1,4 @@ -// Copyright 2011 the V8 project authors. All rights reserved. +// Copyright 2013 the V8 project authors. All rights reserved. // Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions are // met: @@ -25,39 +25,47 @@ // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +#ifndef V8_MARKING_THREAD_H_ +#define V8_MARKING_THREAD_H_ -#include "v8.h" -#include "inspector.h" +#include "atomicops.h" +#include "flags.h" +#include "platform.h" +#include "v8utils.h" +#include "spaces.h" + +#include "heap.h" namespace v8 { namespace internal { -#ifdef INSPECTOR - -//============================================================================ -// The Inspector. +class MarkingThread : public Thread { + public: + explicit MarkingThread(Isolate* isolate); -void Inspector::DumpObjectType(FILE* out, Object* obj, bool print_more) { - // Dump the object pointer. - OS::FPrint(out, "%p:", reinterpret_cast<void*>(obj)); - if (obj->IsHeapObject()) { - HeapObject* hobj = HeapObject::cast(obj); - OS::FPrint(out, " size %d :", hobj->Size()); - } + void Run(); + void Stop(); + void StartMarking(); + void WaitForMarkingThread(); - // Dump each object classification that matches this object. -#define FOR_EACH_TYPE(type) \ - if (obj->Is##type()) { \ - OS::FPrint(out, " %s", #type); \ + ~MarkingThread() { + delete start_marking_semaphore_; + delete end_marking_semaphore_; + delete stop_semaphore_; } - OBJECT_TYPE_LIST(FOR_EACH_TYPE) - HEAP_OBJECT_TYPE_LIST(FOR_EACH_TYPE) -#undef FOR_EACH_TYPE -} - -#endif // INSPECTOR + private: + Isolate* isolate_; + Heap* heap_; + Semaphore* start_marking_semaphore_; + Semaphore* end_marking_semaphore_; + Semaphore* stop_semaphore_; + volatile AtomicWord stop_thread_; + int id_; + static Atomic32 id_counter_; +}; } } // namespace v8::internal +#endif // V8_MARKING_THREAD_H_ diff --git a/deps/v8/src/math.js b/deps/v8/src/math.js index aee56af4f9..0e02541025 100644 --- a/deps/v8/src/math.js +++ b/deps/v8/src/math.js @@ -37,7 +37,7 @@ var $abs = MathAbs; function MathConstructor() {} %FunctionSetInstanceClassName(MathConstructor, 'Math'); var $Math = new MathConstructor(); -$Math.__proto__ = $Object.prototype; +%SetPrototype($Math, $Object.prototype); %SetProperty(global, "Math", $Math, DONT_ENUM); // ECMA 262 - 15.8.2.1 @@ -131,19 +131,16 @@ function MathMax(arg1, arg2) { // length == 2 // All comparisons failed, one of the arguments must be NaN. return 0/0; // Compiler constant-folds this to NaN. } - if (length == 0) { - return -1/0; // Compiler constant-folds this to -Infinity. - } - var r = arg1; - if (!IS_NUMBER(r)) r = NonNumberToNumber(r); - if (NUMBER_IS_NAN(r)) return r; - for (var i = 1; i < length; i++) { + var r = -1/0; // Compiler constant-folds this to -Infinity. + for (var i = 0; i < length; i++) { var n = %_Arguments(i); if (!IS_NUMBER(n)) n = NonNumberToNumber(n); - if (NUMBER_IS_NAN(n)) return n; // Make sure +0 is considered greater than -0. -0 is never a Smi, +0 can be // a Smi or heap number. - if (n > r || (r == 0 && n == 0 && !%_IsSmi(r) && 1 / r < 0)) r = n; + if (NUMBER_IS_NAN(n) || n > r || + (r == 0 && n == 0 && !%_IsSmi(r) && 1 / r < 0)) { + r = n; + } } return r; } @@ -164,19 +161,16 @@ function MathMin(arg1, arg2) { // length == 2 // All comparisons failed, one of the arguments must be NaN. return 0/0; // Compiler constant-folds this to NaN. } - if (length == 0) { - return 1/0; // Compiler constant-folds this to Infinity. - } - var r = arg1; - if (!IS_NUMBER(r)) r = NonNumberToNumber(r); - if (NUMBER_IS_NAN(r)) return r; - for (var i = 1; i < length; i++) { + var r = 1/0; // Compiler constant-folds this to Infinity. + for (var i = 0; i < length; i++) { var n = %_Arguments(i); if (!IS_NUMBER(n)) n = NonNumberToNumber(n); - if (NUMBER_IS_NAN(n)) return n; // Make sure -0 is considered less than +0. -0 is never a Smi, +0 can be a // Smi or a heap number. - if (n < r || (r == 0 && n == 0 && !%_IsSmi(n) && 1 / n < 0)) r = n; + if (NUMBER_IS_NAN(n) || n < r || + (r == 0 && n == 0 && !%_IsSmi(n) && 1 / n < 0)) { + r = n; + } } return r; } diff --git a/deps/v8/src/messages.cc b/deps/v8/src/messages.cc index a041770d12..de18a4b1aa 100644 --- a/deps/v8/src/messages.cc +++ b/deps/v8/src/messages.cc @@ -38,14 +38,15 @@ namespace internal { // If no message listeners have been registered this one is called // by default. -void MessageHandler::DefaultMessageReport(const MessageLocation* loc, +void MessageHandler::DefaultMessageReport(Isolate* isolate, + const MessageLocation* loc, Handle<Object> message_obj) { - SmartArrayPointer<char> str = GetLocalizedMessage(message_obj); + SmartArrayPointer<char> str = GetLocalizedMessage(isolate, message_obj); if (loc == NULL) { PrintF("%s\n", *str); } else { - HandleScope scope; - Handle<Object> data(loc->script()->name()); + HandleScope scope(isolate); + Handle<Object> data(loc->script()->name(), isolate); SmartArrayPointer<char> data_str; if (data->IsString()) data_str = Handle<String>::cast(data)->ToCString(DISALLOW_NULLS); @@ -61,7 +62,7 @@ Handle<JSMessageObject> MessageHandler::MakeMessageObject( Vector< Handle<Object> > args, Handle<String> stack_trace, Handle<JSArray> stack_frames) { - Handle<String> type_handle = FACTORY->LookupAsciiSymbol(type); + Handle<String> type_handle = FACTORY->InternalizeUtf8String(type); Handle<FixedArray> arguments_elements = FACTORY->NewFixedArray(args.length()); for (int i = 0; i < args.length(); i++) { @@ -112,7 +113,7 @@ void MessageHandler::ReportMessage(Isolate* isolate, if (isolate->has_pending_exception()) { isolate->pending_exception()->ToObject(&exception_object); } - Handle<Object> exception_handle(exception_object); + Handle<Object> exception_handle(exception_object, isolate); Isolate::ExceptionScope exception_scope(isolate); isolate->clear_pending_exception(); @@ -124,7 +125,7 @@ void MessageHandler::ReportMessage(Isolate* isolate, v8::NeanderArray global_listeners(FACTORY->message_listeners()); int global_length = global_listeners.length(); if (global_length == 0) { - DefaultMessageReport(loc, message); + DefaultMessageReport(isolate, loc, message); if (isolate->has_scheduled_exception()) { isolate->clear_scheduled_exception(); } @@ -152,25 +153,30 @@ void MessageHandler::ReportMessage(Isolate* isolate, } -Handle<String> MessageHandler::GetMessage(Handle<Object> data) { - Handle<String> fmt_str = FACTORY->LookupAsciiSymbol("FormatMessage"); +Handle<String> MessageHandler::GetMessage(Isolate* isolate, + Handle<Object> data) { + Factory* factory = isolate->factory(); + Handle<String> fmt_str = + factory->InternalizeOneByteString(STATIC_ASCII_VECTOR("FormatMessage")); Handle<JSFunction> fun = Handle<JSFunction>( JSFunction::cast( - Isolate::Current()->js_builtins_object()-> + isolate->js_builtins_object()-> GetPropertyNoExceptionThrown(*fmt_str))); - Handle<Object> argv[] = { data }; + Handle<JSMessageObject> message = Handle<JSMessageObject>::cast(data); + Handle<Object> argv[] = { Handle<Object>(message->type(), isolate), + Handle<Object>(message->arguments(), isolate) }; bool caught_exception; Handle<Object> result = Execution::TryCall(fun, - Isolate::Current()->js_builtins_object(), + isolate->js_builtins_object(), ARRAY_SIZE(argv), argv, &caught_exception); if (caught_exception || !result->IsString()) { - return FACTORY->LookupAsciiSymbol("<error>"); + return factory->InternalizeOneByteString(STATIC_ASCII_VECTOR("<error>")); } Handle<String> result_string = Handle<String>::cast(result); // A string that has been obtained from JS code in this way is @@ -184,9 +190,10 @@ Handle<String> MessageHandler::GetMessage(Handle<Object> data) { SmartArrayPointer<char> MessageHandler::GetLocalizedMessage( + Isolate* isolate, Handle<Object> data) { - HandleScope scope; - return GetMessage(data)->ToCString(DISALLOW_NULLS); + HandleScope scope(isolate); + return GetMessage(isolate, data)->ToCString(DISALLOW_NULLS); } diff --git a/deps/v8/src/messages.h b/deps/v8/src/messages.h index 358509ec3b..3361abe231 100644 --- a/deps/v8/src/messages.h +++ b/deps/v8/src/messages.h @@ -102,10 +102,12 @@ class MessageHandler { MessageLocation* loc, Handle<Object> message); - static void DefaultMessageReport(const MessageLocation* loc, + static void DefaultMessageReport(Isolate* isolate, + const MessageLocation* loc, Handle<Object> message_obj); - static Handle<String> GetMessage(Handle<Object> data); - static SmartArrayPointer<char> GetLocalizedMessage(Handle<Object> data); + static Handle<String> GetMessage(Isolate* isolate, Handle<Object> data); + static SmartArrayPointer<char> GetLocalizedMessage(Isolate* isolate, + Handle<Object> data); }; } } // namespace v8::internal diff --git a/deps/v8/src/messages.js b/deps/v8/src/messages.js index fe894b578f..7353444b4b 100644 --- a/deps/v8/src/messages.js +++ b/deps/v8/src/messages.js @@ -26,18 +26,137 @@ // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. // ------------------------------------------------------------------- -// -// If this object gets passed to an error constructor the error will -// get an accessor for .message that constructs a descriptive error -// message on access. -var kAddMessageAccessorsMarker = { }; - -// This will be lazily initialized when first needed (and forcibly -// overwritten even though it's const). -var kMessages = 0; -function FormatString(format, message) { - var args = %MessageGetArguments(message); +var kMessages = { + // Error + cyclic_proto: ["Cyclic __proto__ value"], + code_gen_from_strings: ["%0"], + // TypeError + unexpected_token: ["Unexpected token ", "%0"], + unexpected_token_number: ["Unexpected number"], + unexpected_token_string: ["Unexpected string"], + unexpected_token_identifier: ["Unexpected identifier"], + unexpected_reserved: ["Unexpected reserved word"], + unexpected_strict_reserved: ["Unexpected strict mode reserved word"], + unexpected_eos: ["Unexpected end of input"], + malformed_regexp: ["Invalid regular expression: /", "%0", "/: ", "%1"], + unterminated_regexp: ["Invalid regular expression: missing /"], + regexp_flags: ["Cannot supply flags when constructing one RegExp from another"], + incompatible_method_receiver: ["Method ", "%0", " called on incompatible receiver ", "%1"], + invalid_lhs_in_assignment: ["Invalid left-hand side in assignment"], + invalid_lhs_in_for_in: ["Invalid left-hand side in for-in"], + invalid_lhs_in_postfix_op: ["Invalid left-hand side expression in postfix operation"], + invalid_lhs_in_prefix_op: ["Invalid left-hand side expression in prefix operation"], + multiple_defaults_in_switch: ["More than one default clause in switch statement"], + newline_after_throw: ["Illegal newline after throw"], + redeclaration: ["%0", " '", "%1", "' has already been declared"], + no_catch_or_finally: ["Missing catch or finally after try"], + unknown_label: ["Undefined label '", "%0", "'"], + uncaught_exception: ["Uncaught ", "%0"], + stack_trace: ["Stack Trace:\n", "%0"], + called_non_callable: ["%0", " is not a function"], + undefined_method: ["Object ", "%1", " has no method '", "%0", "'"], + property_not_function: ["Property '", "%0", "' of object ", "%1", " is not a function"], + cannot_convert_to_primitive: ["Cannot convert object to primitive value"], + not_constructor: ["%0", " is not a constructor"], + not_defined: ["%0", " is not defined"], + non_object_property_load: ["Cannot read property '", "%0", "' of ", "%1"], + non_object_property_store: ["Cannot set property '", "%0", "' of ", "%1"], + non_object_property_call: ["Cannot call method '", "%0", "' of ", "%1"], + with_expression: ["%0", " has no properties"], + illegal_invocation: ["Illegal invocation"], + no_setter_in_callback: ["Cannot set property ", "%0", " of ", "%1", " which has only a getter"], + apply_non_function: ["Function.prototype.apply was called on ", "%0", ", which is a ", "%1", " and not a function"], + apply_wrong_args: ["Function.prototype.apply: Arguments list has wrong type"], + invalid_in_operator_use: ["Cannot use 'in' operator to search for '", "%0", "' in ", "%1"], + instanceof_function_expected: ["Expecting a function in instanceof check, but got ", "%0"], + instanceof_nonobject_proto: ["Function has non-object prototype '", "%0", "' in instanceof check"], + null_to_object: ["Cannot convert null to object"], + reduce_no_initial: ["Reduce of empty array with no initial value"], + getter_must_be_callable: ["Getter must be a function: ", "%0"], + setter_must_be_callable: ["Setter must be a function: ", "%0"], + value_and_accessor: ["Invalid property. A property cannot both have accessors and be writable or have a value, ", "%0"], + proto_object_or_null: ["Object prototype may only be an Object or null"], + property_desc_object: ["Property description must be an object: ", "%0"], + redefine_disallowed: ["Cannot redefine property: ", "%0"], + define_disallowed: ["Cannot define property:", "%0", ", object is not extensible."], + non_extensible_proto: ["%0", " is not extensible"], + handler_non_object: ["Proxy.", "%0", " called with non-object as handler"], + proto_non_object: ["Proxy.", "%0", " called with non-object as prototype"], + trap_function_expected: ["Proxy.", "%0", " called with non-function for '", "%1", "' trap"], + handler_trap_missing: ["Proxy handler ", "%0", " has no '", "%1", "' trap"], + handler_trap_must_be_callable: ["Proxy handler ", "%0", " has non-callable '", "%1", "' trap"], + handler_returned_false: ["Proxy handler ", "%0", " returned false from '", "%1", "' trap"], + handler_returned_undefined: ["Proxy handler ", "%0", " returned undefined from '", "%1", "' trap"], + proxy_prop_not_configurable: ["Proxy handler ", "%0", " returned non-configurable descriptor for property '", "%2", "' from '", "%1", "' trap"], + proxy_non_object_prop_names: ["Trap '", "%1", "' returned non-object ", "%0"], + proxy_repeated_prop_name: ["Trap '", "%1", "' returned repeated property name '", "%2", "'"], + invalid_weakmap_key: ["Invalid value used as weak map key"], + not_date_object: ["this is not a Date object."], + observe_non_object: ["Object.", "%0", " cannot ", "%0", " non-object"], + observe_non_function: ["Object.", "%0", " cannot deliver to non-function"], + observe_callback_frozen: ["Object.observe cannot deliver to a frozen function object"], + observe_type_non_string: ["Invalid changeRecord with non-string 'type' property"], + observe_notify_non_notifier: ["notify called on non-notifier object"], + // RangeError + invalid_array_length: ["Invalid array length"], + stack_overflow: ["Maximum call stack size exceeded"], + invalid_time_value: ["Invalid time value"], + // SyntaxError + unable_to_parse: ["Parse error"], + invalid_regexp_flags: ["Invalid flags supplied to RegExp constructor '", "%0", "'"], + invalid_regexp: ["Invalid RegExp pattern /", "%0", "/"], + illegal_break: ["Illegal break statement"], + illegal_continue: ["Illegal continue statement"], + illegal_return: ["Illegal return statement"], + illegal_let: ["Illegal let declaration outside extended mode"], + error_loading_debugger: ["Error loading debugger"], + no_input_to_regexp: ["No input to ", "%0"], + invalid_json: ["String '", "%0", "' is not valid JSON"], + circular_structure: ["Converting circular structure to JSON"], + called_on_non_object: ["%0", " called on non-object"], + called_on_null_or_undefined: ["%0", " called on null or undefined"], + array_indexof_not_defined: ["Array.getIndexOf: Argument undefined"], + object_not_extensible: ["Can't add property ", "%0", ", object is not extensible"], + illegal_access: ["Illegal access"], + invalid_preparser_data: ["Invalid preparser data for function ", "%0"], + strict_mode_with: ["Strict mode code may not include a with statement"], + strict_catch_variable: ["Catch variable may not be eval or arguments in strict mode"], + too_many_arguments: ["Too many arguments in function call (only 32766 allowed)"], + too_many_parameters: ["Too many parameters in function definition (only 32766 allowed)"], + too_many_variables: ["Too many variables declared (only 131071 allowed)"], + strict_param_name: ["Parameter name eval or arguments is not allowed in strict mode"], + strict_param_dupe: ["Strict mode function may not have duplicate parameter names"], + strict_var_name: ["Variable name may not be eval or arguments in strict mode"], + strict_function_name: ["Function name may not be eval or arguments in strict mode"], + strict_octal_literal: ["Octal literals are not allowed in strict mode."], + strict_duplicate_property: ["Duplicate data property in object literal not allowed in strict mode"], + accessor_data_property: ["Object literal may not have data and accessor property with the same name"], + accessor_get_set: ["Object literal may not have multiple get/set accessors with the same name"], + strict_lhs_assignment: ["Assignment to eval or arguments is not allowed in strict mode"], + strict_lhs_postfix: ["Postfix increment/decrement may not have eval or arguments operand in strict mode"], + strict_lhs_prefix: ["Prefix increment/decrement may not have eval or arguments operand in strict mode"], + strict_reserved_word: ["Use of future reserved word in strict mode"], + strict_delete: ["Delete of an unqualified identifier in strict mode."], + strict_delete_property: ["Cannot delete property '", "%0", "' of ", "%1"], + strict_const: ["Use of const in strict mode."], + strict_function: ["In strict mode code, functions can only be declared at top level or immediately within another function." ], + strict_read_only_property: ["Cannot assign to read only property '", "%0", "' of ", "%1"], + strict_cannot_assign: ["Cannot assign to read only '", "%0", "' in strict mode"], + strict_poison_pill: ["'caller', 'callee', and 'arguments' properties may not be accessed on strict mode functions or the arguments objects for calls to them"], + strict_caller: ["Illegal access to a strict mode caller function."], + unprotected_let: ["Illegal let declaration in unprotected statement context."], + unprotected_const: ["Illegal const declaration in unprotected statement context."], + cant_prevent_ext_external_array_elements: ["Cannot prevent extension of an object with external array elements"], + redef_external_array_element: ["Cannot redefine a property of an object with external array elements"], + harmony_const_assign: ["Assignment to constant variable."], + invalid_module_path: ["Module does not export '", "%0", "', or export is not itself a module"], + module_type_error: ["Module '", "%0", "' used improperly"], + module_export_undefined: ["Export '", "%0", "' is not defined in module"], +}; + + +function FormatString(format, args) { var result = ""; var arg_num = 0; for (var i = 0; i < format.length; i++) { @@ -48,7 +167,7 @@ function FormatString(format, message) { if (arg_num < 4) { // str is one of %0, %1, %2 or %3. try { - str = ToDetailString(args[arg_num]); + str = NoSideEffectToString(args[arg_num]); } catch (e) { if (%IsJSModule(args[arg_num])) str = "module"; @@ -65,6 +184,27 @@ function FormatString(format, message) { } +function NoSideEffectToString(obj) { + if (IS_STRING(obj)) return obj; + if (IS_NUMBER(obj)) return %_NumberToString(obj); + if (IS_BOOLEAN(obj)) return x ? 'true' : 'false'; + if (IS_UNDEFINED(obj)) return 'undefined'; + if (IS_NULL(obj)) return 'null'; + if (IS_FUNCTION(obj)) return %_CallFunction(obj, FunctionToString); + if (IS_OBJECT(obj) && %GetDataProperty(obj, "toString") === ObjectToString) { + var constructor = %GetDataProperty(obj, "constructor"); + if (typeof constructor == "function") { + var constructorName = constructor.name; + if (IS_STRING(constructorName) && constructorName !== "") { + return "#<" + constructorName + ">"; + } + } + } + if (IsNativeErrorObject(obj)) return %_CallFunction(obj, ErrorToString); + return %_CallFunction(obj, ObjectToString); +} + + // To check if something is a native error we need to check the // concrete native error types. It is not sufficient to use instanceof // since it possible to create an object that has Error.prototype on @@ -112,13 +252,8 @@ function ToDetailString(obj) { function MakeGenericError(constructor, type, args) { - if (IS_UNDEFINED(args)) { - args = []; - } - var e = new constructor(kAddMessageAccessorsMarker); - e.type = type; - e.arguments = args; - return e; + if (IS_UNDEFINED(args)) args = []; + return new constructor(FormatMessage(type, args)); } @@ -135,156 +270,10 @@ function MakeGenericError(constructor, type, args) { // Helper functions; called from the runtime system. -function FormatMessage(message) { - if (kMessages === 0) { - var messagesDictionary = [ - // Error - "cyclic_proto", ["Cyclic __proto__ value"], - "code_gen_from_strings", ["%0"], - // TypeError - "unexpected_token", ["Unexpected token ", "%0"], - "unexpected_token_number", ["Unexpected number"], - "unexpected_token_string", ["Unexpected string"], - "unexpected_token_identifier", ["Unexpected identifier"], - "unexpected_reserved", ["Unexpected reserved word"], - "unexpected_strict_reserved", ["Unexpected strict mode reserved word"], - "unexpected_eos", ["Unexpected end of input"], - "malformed_regexp", ["Invalid regular expression: /", "%0", "/: ", "%1"], - "unterminated_regexp", ["Invalid regular expression: missing /"], - "regexp_flags", ["Cannot supply flags when constructing one RegExp from another"], - "incompatible_method_receiver", ["Method ", "%0", " called on incompatible receiver ", "%1"], - "invalid_lhs_in_assignment", ["Invalid left-hand side in assignment"], - "invalid_lhs_in_for_in", ["Invalid left-hand side in for-in"], - "invalid_lhs_in_postfix_op", ["Invalid left-hand side expression in postfix operation"], - "invalid_lhs_in_prefix_op", ["Invalid left-hand side expression in prefix operation"], - "multiple_defaults_in_switch", ["More than one default clause in switch statement"], - "newline_after_throw", ["Illegal newline after throw"], - "redeclaration", ["%0", " '", "%1", "' has already been declared"], - "no_catch_or_finally", ["Missing catch or finally after try"], - "unknown_label", ["Undefined label '", "%0", "'"], - "uncaught_exception", ["Uncaught ", "%0"], - "stack_trace", ["Stack Trace:\n", "%0"], - "called_non_callable", ["%0", " is not a function"], - "undefined_method", ["Object ", "%1", " has no method '", "%0", "'"], - "property_not_function", ["Property '", "%0", "' of object ", "%1", " is not a function"], - "cannot_convert_to_primitive", ["Cannot convert object to primitive value"], - "not_constructor", ["%0", " is not a constructor"], - "not_defined", ["%0", " is not defined"], - "non_object_property_load", ["Cannot read property '", "%0", "' of ", "%1"], - "non_object_property_store", ["Cannot set property '", "%0", "' of ", "%1"], - "non_object_property_call", ["Cannot call method '", "%0", "' of ", "%1"], - "with_expression", ["%0", " has no properties"], - "illegal_invocation", ["Illegal invocation"], - "no_setter_in_callback", ["Cannot set property ", "%0", " of ", "%1", " which has only a getter"], - "apply_non_function", ["Function.prototype.apply was called on ", "%0", ", which is a ", "%1", " and not a function"], - "apply_wrong_args", ["Function.prototype.apply: Arguments list has wrong type"], - "invalid_in_operator_use", ["Cannot use 'in' operator to search for '", "%0", "' in ", "%1"], - "instanceof_function_expected", ["Expecting a function in instanceof check, but got ", "%0"], - "instanceof_nonobject_proto", ["Function has non-object prototype '", "%0", "' in instanceof check"], - "null_to_object", ["Cannot convert null to object"], - "reduce_no_initial", ["Reduce of empty array with no initial value"], - "getter_must_be_callable", ["Getter must be a function: ", "%0"], - "setter_must_be_callable", ["Setter must be a function: ", "%0"], - "value_and_accessor", ["Invalid property. A property cannot both have accessors and be writable or have a value, ", "%0"], - "proto_object_or_null", ["Object prototype may only be an Object or null"], - "property_desc_object", ["Property description must be an object: ", "%0"], - "redefine_disallowed", ["Cannot redefine property: ", "%0"], - "define_disallowed", ["Cannot define property:", "%0", ", object is not extensible."], - "non_extensible_proto", ["%0", " is not extensible"], - "handler_non_object", ["Proxy.", "%0", " called with non-object as handler"], - "proto_non_object", ["Proxy.", "%0", " called with non-object as prototype"], - "trap_function_expected", ["Proxy.", "%0", " called with non-function for '", "%1", "' trap"], - "handler_trap_missing", ["Proxy handler ", "%0", " has no '", "%1", "' trap"], - "handler_trap_must_be_callable", ["Proxy handler ", "%0", " has non-callable '", "%1", "' trap"], - "handler_returned_false", ["Proxy handler ", "%0", " returned false from '", "%1", "' trap"], - "handler_returned_undefined", ["Proxy handler ", "%0", " returned undefined from '", "%1", "' trap"], - "proxy_prop_not_configurable", ["Proxy handler ", "%0", " returned non-configurable descriptor for property '", "%2", "' from '", "%1", "' trap"], - "proxy_non_object_prop_names", ["Trap '", "%1", "' returned non-object ", "%0"], - "proxy_repeated_prop_name", ["Trap '", "%1", "' returned repeated property name '", "%2", "'"], - "invalid_weakmap_key", ["Invalid value used as weak map key"], - "not_date_object", ["this is not a Date object."], - // RangeError - "invalid_array_length", ["Invalid array length"], - "stack_overflow", ["Maximum call stack size exceeded"], - "invalid_time_value", ["Invalid time value"], - // SyntaxError - "unable_to_parse", ["Parse error"], - "invalid_regexp_flags", ["Invalid flags supplied to RegExp constructor '", "%0", "'"], - "invalid_regexp", ["Invalid RegExp pattern /", "%0", "/"], - "illegal_break", ["Illegal break statement"], - "illegal_continue", ["Illegal continue statement"], - "illegal_return", ["Illegal return statement"], - "illegal_let", ["Illegal let declaration outside extended mode"], - "error_loading_debugger", ["Error loading debugger"], - "no_input_to_regexp", ["No input to ", "%0"], - "invalid_json", ["String '", "%0", "' is not valid JSON"], - "circular_structure", ["Converting circular structure to JSON"], - "called_on_non_object", ["%0", " called on non-object"], - "called_on_null_or_undefined", ["%0", " called on null or undefined"], - "array_indexof_not_defined", ["Array.getIndexOf: Argument undefined"], - "object_not_extensible", ["Can't add property ", "%0", ", object is not extensible"], - "illegal_access", ["Illegal access"], - "invalid_preparser_data", ["Invalid preparser data for function ", "%0"], - "strict_mode_with", ["Strict mode code may not include a with statement"], - "strict_catch_variable", ["Catch variable may not be eval or arguments in strict mode"], - "too_many_arguments", ["Too many arguments in function call (only 32766 allowed)"], - "too_many_parameters", ["Too many parameters in function definition (only 32766 allowed)"], - "too_many_variables", ["Too many variables declared (only 131071 allowed)"], - "strict_param_name", ["Parameter name eval or arguments is not allowed in strict mode"], - "strict_param_dupe", ["Strict mode function may not have duplicate parameter names"], - "strict_var_name", ["Variable name may not be eval or arguments in strict mode"], - "strict_function_name", ["Function name may not be eval or arguments in strict mode"], - "strict_octal_literal", ["Octal literals are not allowed in strict mode."], - "strict_duplicate_property", ["Duplicate data property in object literal not allowed in strict mode"], - "accessor_data_property", ["Object literal may not have data and accessor property with the same name"], - "accessor_get_set", ["Object literal may not have multiple get/set accessors with the same name"], - "strict_lhs_assignment", ["Assignment to eval or arguments is not allowed in strict mode"], - "strict_lhs_postfix", ["Postfix increment/decrement may not have eval or arguments operand in strict mode"], - "strict_lhs_prefix", ["Prefix increment/decrement may not have eval or arguments operand in strict mode"], - "strict_reserved_word", ["Use of future reserved word in strict mode"], - "strict_delete", ["Delete of an unqualified identifier in strict mode."], - "strict_delete_property", ["Cannot delete property '", "%0", "' of ", "%1"], - "strict_const", ["Use of const in strict mode."], - "strict_function", ["In strict mode code, functions can only be declared at top level or immediately within another function." ], - "strict_read_only_property", ["Cannot assign to read only property '", "%0", "' of ", "%1"], - "strict_cannot_assign", ["Cannot assign to read only '", "%0", "' in strict mode"], - "strict_poison_pill", ["'caller', 'callee', and 'arguments' properties may not be accessed on strict mode functions or the arguments objects for calls to them"], - "strict_caller", ["Illegal access to a strict mode caller function."], - "unprotected_let", ["Illegal let declaration in unprotected statement context."], - "unprotected_const", ["Illegal const declaration in unprotected statement context."], - "cant_prevent_ext_external_array_elements", ["Cannot prevent extension of an object with external array elements"], - "redef_external_array_element", ["Cannot redefine a property of an object with external array elements"], - "harmony_const_assign", ["Assignment to constant variable."], - "invalid_module_path", ["Module does not export '", "%0", "', or export is not itself a module"], - "module_type_error", ["Module '", "%0", "' used improperly"], - "module_export_undefined", ["Export '", "%0", "' is not defined in module"], - ]; - var messages = { __proto__ : null }; - for (var i = 0; i < messagesDictionary.length; i += 2) { - var key = messagesDictionary[i]; - var format = messagesDictionary[i + 1]; - - for (var j = 0; j < format.length; j++) { - %IgnoreAttributesAndSetProperty(format, %_NumberToString(j), format[j], - DONT_DELETE | READ_ONLY | DONT_ENUM); - } - %IgnoreAttributesAndSetProperty(format, 'length', format.length, - DONT_DELETE | READ_ONLY | DONT_ENUM); - %PreventExtensions(format); - %IgnoreAttributesAndSetProperty(messages, - key, - format, - DONT_DELETE | DONT_ENUM | READ_ONLY); - } - %PreventExtensions(messages); - %IgnoreAttributesAndSetProperty(builtins, "kMessages", - messages, - DONT_DELETE | DONT_ENUM | READ_ONLY); - } - var message_type = %MessageGetType(message); - var format = kMessages[message_type]; - if (!format) return "<unknown message " + message_type + ">"; - return FormatString(format, message); +function FormatMessage(type, args) { + var format = kMessages[type]; + if (!format) return "<unknown message " + type + ">"; + return FormatString(format, args); } @@ -573,7 +562,7 @@ function ScriptNameOrSourceURL() { %_RegExpExec(sourceUrlPattern, source, sourceUrlPos - 4, matchInfo); if (match) { this.cachedNameOrSourceURL = - SubString(source, matchInfo[CAPTURE(2)], matchInfo[CAPTURE(3)]); + %_SubString(source, matchInfo[CAPTURE(2)], matchInfo[CAPTURE(3)]); } } return this.cachedNameOrSourceURL; @@ -762,29 +751,6 @@ function GetStackTraceLine(recv, fun, pos, isGlobal) { // ---------------------------------------------------------------------------- // Error implementation -// Defines accessors for a property that is calculated the first time -// the property is read. -function DefineOneShotAccessor(obj, name, fun) { - // Note that the accessors consistently operate on 'obj', not 'this'. - // Since the object may occur in someone else's prototype chain we - // can't rely on 'this' being the same as 'obj'. - var value; - var value_factory = fun; - var getter = function() { - if (value_factory == null) { - return value; - } - value = value_factory(obj); - value_factory = null; - return value; - }; - var setter = function(v) { - value_factory = null; - value = v; - }; - %DefineOrRedefineAccessorProperty(obj, name, getter, setter, DONT_ENUM); -} - function CallSite(receiver, fun, pos) { this.receiver = receiver; this.fun = fun; @@ -854,7 +820,8 @@ function CallSiteGetMethodName() { %_CallFunction(this.receiver, ownName, ObjectLookupSetter) === this.fun || - this.receiver[ownName] === this.fun)) { + (IS_OBJECT(this.receiver) && + %GetDataProperty(this.receiver, ownName) === this.fun))) { // To handle DontEnum properties we guess that the method has // the same name as the function. return ownName; @@ -863,8 +830,8 @@ function CallSiteGetMethodName() { for (var prop in this.receiver) { if (%_CallFunction(this.receiver, prop, ObjectLookupGetter) === this.fun || %_CallFunction(this.receiver, prop, ObjectLookupSetter) === this.fun || - (!%_CallFunction(this.receiver, prop, ObjectLookupGetter) && - this.receiver[prop] === this.fun)) { + (IS_OBJECT(this.receiver) && + %GetDataProperty(this.receiver, prop) === this.fun)) { // If we find more than one match bail out to avoid confusion. if (name) { return null; @@ -917,10 +884,10 @@ function CallSiteGetPosition() { } function CallSiteIsConstructor() { - var constructor = this.receiver ? this.receiver.constructor : null; - if (!constructor) { - return false; - } + var receiver = this.receiver; + var constructor = + IS_OBJECT(receiver) ? %GetDataProperty(receiver, "constructor") : null; + if (!constructor) return false; return this.fun === constructor; } @@ -967,12 +934,14 @@ function CallSiteToString() { var typeName = GetTypeName(this, true); var methodName = this.getMethodName(); if (functionName) { - if (typeName && functionName.indexOf(typeName) != 0) { + if (typeName && + %_CallFunction(functionName, typeName, StringIndexOf) != 0) { line += typeName + "."; } line += functionName; - if (methodName && functionName.lastIndexOf("." + methodName) != - functionName.length - methodName.length - 1) { + if (methodName && + (%_CallFunction(functionName, "." + methodName, StringIndexOf) != + functionName.length - methodName.length - 1)) { line += " [as " + methodName + "]"; } } else { @@ -1050,17 +1019,37 @@ function FormatEvalOrigin(script) { return eval_origin; } -function FormatStackTrace(error, frames) { - var lines = []; + +function FormatErrorString(error) { try { - lines.push(error.toString()); + return %_CallFunction(error, ErrorToString); } catch (e) { try { - lines.push("<error: " + e + ">"); + return "<error: " + e + ">"; } catch (ee) { - lines.push("<error>"); + return "<error>"; } } +} + + +function GetStackFrames(raw_stack) { + var frames = new InternalArray(); + for (var i = 0; i < raw_stack.length; i += 4) { + var recv = raw_stack[i]; + var fun = raw_stack[i + 1]; + var code = raw_stack[i + 2]; + var pc = raw_stack[i + 3]; + var pos = %FunctionGetPositionForOffset(code, pc); + frames.push(new CallSite(recv, fun, pos)); + } + return frames; +} + + +function FormatStackTrace(error_string, frames) { + var lines = new InternalArray(); + lines.push(error_string); for (var i = 0; i < frames.length; i++) { var frame = frames[i]; var line; @@ -1076,25 +1065,9 @@ function FormatStackTrace(error, frames) { } lines.push(" at " + line); } - return lines.join("\n"); + return %_CallFunction(lines, "\n", ArrayJoin); } -function FormatRawStackTrace(error, raw_stack) { - var frames = [ ]; - for (var i = 0; i < raw_stack.length; i += 4) { - var recv = raw_stack[i]; - var fun = raw_stack[i + 1]; - var code = raw_stack[i + 2]; - var pc = raw_stack[i + 3]; - var pos = %FunctionGetPositionForOffset(code, pc); - frames.push(new CallSite(recv, fun, pos)); - } - if (IS_FUNCTION($Error.prepareStackTrace)) { - return $Error.prepareStackTrace(error, frames); - } else { - return FormatStackTrace(error, frames); - } -} function GetTypeName(obj, requireConstructor) { var constructor = obj.receiver.constructor; @@ -1110,18 +1083,58 @@ function GetTypeName(obj, requireConstructor) { return constructorName; } + +// Flag to prevent recursive call of Error.prepareStackTrace. +var formatting_custom_stack_trace = false; + + function captureStackTrace(obj, cons_opt) { var stackTraceLimit = $Error.stackTraceLimit; if (!stackTraceLimit || !IS_NUMBER(stackTraceLimit)) return; if (stackTraceLimit < 0 || stackTraceLimit > 10000) { stackTraceLimit = 10000; } - var raw_stack = %CollectStackTrace(obj, - cons_opt ? cons_opt : captureStackTrace, - stackTraceLimit); - DefineOneShotAccessor(obj, 'stack', function (obj) { - return FormatRawStackTrace(obj, raw_stack); - }); + var stack = %CollectStackTrace(obj, + cons_opt ? cons_opt : captureStackTrace, + stackTraceLimit); + + // Don't be lazy if the error stack formatting is custom (observable). + if (IS_FUNCTION($Error.prepareStackTrace) && !formatting_custom_stack_trace) { + var array = []; + %MoveArrayContents(GetStackFrames(stack), array); + formatting_custom_stack_trace = true; + try { + obj.stack = $Error.prepareStackTrace(obj, array); + } catch (e) { + throw e; // The custom formatting function threw. Rethrow. + } finally { + formatting_custom_stack_trace = false; + } + return; + } + + var error_string = FormatErrorString(obj); + // Note that 'obj' and 'this' maybe different when called on objects that + // have the error object on its prototype chain. The getter replaces itself + // with a data property as soon as the stack trace has been formatted. + // The getter must not change the object layout as it may be called after GC. + var getter = function() { + if (IS_STRING(stack)) return stack; + // Stack is still a raw array awaiting to be formatted. + stack = FormatStackTrace(error_string, GetStackFrames(stack)); + // Release context value. + error_string = void 0; + return stack; + }; + %MarkOneShotGetter(getter); + + // The 'stack' property of the receiver is set as data property. If + // the receiver is the same as holder, this accessor pair is replaced. + var setter = function(v) { + %DefineOrRedefineDataProperty(this, 'stack', v, NONE); + }; + + %DefineOrRedefineAccessorProperty(obj, 'stack', getter, setter, DONT_ENUM); } @@ -1160,15 +1173,7 @@ function SetUpError() { // object. This avoids going through getters and setters defined // on prototype objects. %IgnoreAttributesAndSetProperty(this, 'stack', void 0, DONT_ENUM); - %IgnoreAttributesAndSetProperty(this, 'arguments', void 0, DONT_ENUM); - %IgnoreAttributesAndSetProperty(this, 'type', void 0, DONT_ENUM); - if (m === kAddMessageAccessorsMarker) { - // DefineOneShotAccessor always inserts a message property and - // ignores setters. - DefineOneShotAccessor(this, 'message', function (obj) { - return FormatMessage(%NewMessageObject(obj.type, obj.arguments)); - }); - } else if (!IS_UNDEFINED(m)) { + if (!IS_UNDEFINED(m)) { %IgnoreAttributesAndSetProperty( this, 'message', ToString(m), DONT_ENUM); } @@ -1203,7 +1208,7 @@ var cyclic_error_marker = new $Object(); function GetPropertyWithoutInvokingMonkeyGetters(error, name) { // Climb the prototype chain until we find the holder. while (error && !%HasLocalProperty(error, name)) { - error = error.__proto__; + error = %GetPrototype(error); } if (error === null) return void 0; if (!IS_OBJECT(error)) return error[name]; @@ -1227,15 +1232,9 @@ function GetPropertyWithoutInvokingMonkeyGetters(error, name) { function ErrorToStringDetectCycle(error) { if (!%PushIfAbsent(visited_errors, error)) throw cyclic_error_marker; try { - var type = GetPropertyWithoutInvokingMonkeyGetters(error, "type"); var name = GetPropertyWithoutInvokingMonkeyGetters(error, "name"); name = IS_UNDEFINED(name) ? "Error" : TO_STRING_INLINE(name); var message = GetPropertyWithoutInvokingMonkeyGetters(error, "message"); - var hasMessage = %_CallFunction(error, "message", ObjectHasOwnProperty); - if (type && !hasMessage) { - var args = GetPropertyWithoutInvokingMonkeyGetters(error, "arguments"); - message = FormatMessage(%NewMessageObject(type, args)); - } message = IS_UNDEFINED(message) ? "" : TO_STRING_INLINE(message); if (name === "") return message; if (message === "") return name; @@ -1267,4 +1266,46 @@ InstallFunctions($Error.prototype, DONT_ENUM, ['toString', ErrorToString]); // Boilerplate for exceptions for stack overflows. Used from // Isolate::StackOverflow(). -var kStackOverflowBoilerplate = MakeRangeError('stack_overflow', []); +function SetUpStackOverflowBoilerplate() { + var boilerplate = MakeRangeError('stack_overflow', []); + + // The raw stack trace is stored as hidden property of the copy of this + // boilerplate error object. Note that the receiver 'this' may not be that + // error object copy, but can be found on the prototype chain of 'this'. + // When the stack trace is formatted, this accessor property is replaced by + // a data property. + var error_string = boilerplate.name + ": " + boilerplate.message; + + // The getter must not change the object layout as it may be called after GC. + function getter() { + var holder = this; + while (!IS_ERROR(holder)) { + holder = %GetPrototype(holder); + if (holder == null) return MakeSyntaxError('illegal_access', []); + } + var stack = %GetOverflowedStackTrace(holder); + if (IS_STRING(stack)) return stack; + if (IS_ARRAY(stack)) { + var result = FormatStackTrace(error_string, GetStackFrames(stack)); + %SetOverflowedStackTrace(holder, result); + return result; + } + return void 0; + } + %MarkOneShotGetter(getter); + + // The 'stack' property of the receiver is set as data property. If + // the receiver is the same as holder, this accessor pair is replaced. + function setter(v) { + %DefineOrRedefineDataProperty(this, 'stack', v, NONE); + // Release the stack trace that is stored as hidden property, if exists. + %SetOverflowedStackTrace(this, void 0); + } + + %DefineOrRedefineAccessorProperty( + boilerplate, 'stack', getter, setter, DONT_ENUM); + + return boilerplate; +} + +var kStackOverflowBoilerplate = SetUpStackOverflowBoilerplate(); diff --git a/deps/v8/src/mips/assembler-mips-inl.h b/deps/v8/src/mips/assembler-mips-inl.h index 3e726a7545..9c9f611ed0 100644 --- a/deps/v8/src/mips/assembler-mips-inl.h +++ b/deps/v8/src/mips/assembler-mips-inl.h @@ -1,3 +1,4 @@ + // Copyright (c) 1994-2006 Sun Microsystems Inc. // All Rights Reserved. // @@ -65,7 +66,7 @@ Operand::Operand(const ExternalReference& f) { Operand::Operand(Smi* value) { rm_ = no_reg; imm32_ = reinterpret_cast<intptr_t>(value); - rmode_ = RelocInfo::NONE; + rmode_ = RelocInfo::NONE32; } @@ -79,9 +80,36 @@ bool Operand::is_reg() const { } +int Register::NumAllocatableRegisters() { + if (CpuFeatures::IsSupported(FPU)) { + return kMaxNumAllocatableRegisters; + } else { + return kMaxNumAllocatableRegisters - kGPRsPerNonFPUDouble; + } +} + + +int DoubleRegister::NumRegisters() { + if (CpuFeatures::IsSupported(FPU)) { + return FPURegister::kMaxNumRegisters; + } else { + return 1; + } +} + + +int DoubleRegister::NumAllocatableRegisters() { + if (CpuFeatures::IsSupported(FPU)) { + return FPURegister::kMaxNumAllocatableRegisters; + } else { + return 1; + } +} + + int FPURegister::ToAllocationIndex(FPURegister reg) { ASSERT(reg.code() % 2 == 0); - ASSERT(reg.code() / 2 < kNumAllocatableRegisters); + ASSERT(reg.code() / 2 < kMaxNumAllocatableRegisters); ASSERT(reg.is_valid()); ASSERT(!reg.is(kDoubleRegZero)); ASSERT(!reg.is(kLithiumScratchDouble)); @@ -111,14 +139,14 @@ void RelocInfo::apply(intptr_t delta) { Address RelocInfo::target_address() { - ASSERT(IsCodeTarget(rmode_) || rmode_ == RUNTIME_ENTRY); + ASSERT(IsCodeTarget(rmode_) || IsRuntimeEntry(rmode_)); return Assembler::target_address_at(pc_); } Address RelocInfo::target_address_address() { ASSERT(IsCodeTarget(rmode_) || - rmode_ == RUNTIME_ENTRY || + IsRuntimeEntry(rmode_) || rmode_ == EMBEDDED_OBJECT || rmode_ == EXTERNAL_REFERENCE); // Read the address of the word containing the target_address in an @@ -146,7 +174,7 @@ int RelocInfo::target_address_size() { void RelocInfo::set_target_address(Address target, WriteBarrierMode mode) { - ASSERT(IsCodeTarget(rmode_) || rmode_ == RUNTIME_ENTRY); + ASSERT(IsCodeTarget(rmode_) || IsRuntimeEntry(rmode_)); Assembler::set_target_address_at(pc_, target); if (mode == UPDATE_WRITE_BARRIER && host() != NULL && IsCodeTarget(rmode_)) { Object* target_code = Code::GetCodeFromTargetAddress(target); @@ -203,6 +231,19 @@ Address* RelocInfo::target_reference_address() { } +Address RelocInfo::target_runtime_entry(Assembler* origin) { + ASSERT(IsRuntimeEntry(rmode_)); + return target_address(); +} + + +void RelocInfo::set_target_runtime_entry(Address target, + WriteBarrierMode mode) { + ASSERT(IsRuntimeEntry(rmode_)); + if (target_address() != target) set_target_address(target, mode); +} + + Handle<JSGlobalPropertyCell> RelocInfo::target_cell_handle() { ASSERT(rmode_ == RelocInfo::GLOBAL_PROPERTY_CELL); Address address = Memory::Address_at(pc_); @@ -231,6 +272,24 @@ void RelocInfo::set_target_cell(JSGlobalPropertyCell* cell, } +static const int kNoCodeAgeSequenceLength = 7; + +Code* RelocInfo::code_age_stub() { + ASSERT(rmode_ == RelocInfo::CODE_AGE_SEQUENCE); + return Code::GetCodeFromTargetAddress( + Memory::Address_at(pc_ + Assembler::kInstrSize * + (kNoCodeAgeSequenceLength - 1))); +} + + +void RelocInfo::set_code_age_stub(Code* stub) { + ASSERT(rmode_ == RelocInfo::CODE_AGE_SEQUENCE); + Memory::Address_at(pc_ + Assembler::kInstrSize * + (kNoCodeAgeSequenceLength - 1)) = + stub->instruction_start(); +} + + Address RelocInfo::call_address() { ASSERT((IsJSReturn(rmode()) && IsPatchedReturnSequence()) || (IsDebugBreakSlot(rmode()) && IsPatchedDebugBreakSlotSequence())); @@ -302,6 +361,8 @@ void RelocInfo::Visit(ObjectVisitor* visitor) { visitor->VisitGlobalPropertyCell(this); } else if (mode == RelocInfo::EXTERNAL_REFERENCE) { visitor->VisitExternalReference(this); + } else if (RelocInfo::IsCodeAgeSequence(mode)) { + visitor->VisitCodeAgeSequence(this); #ifdef ENABLE_DEBUGGER_SUPPORT // TODO(isolates): Get a cached isolate below. } else if (((RelocInfo::IsJSReturn(mode) && @@ -311,7 +372,7 @@ void RelocInfo::Visit(ObjectVisitor* visitor) { Isolate::Current()->debug()->has_break_points()) { visitor->VisitDebugTarget(this); #endif - } else if (mode == RelocInfo::RUNTIME_ENTRY) { + } else if (RelocInfo::IsRuntimeEntry(mode)) { visitor->VisitRuntimeEntry(this); } } @@ -328,6 +389,8 @@ void RelocInfo::Visit(Heap* heap) { StaticVisitor::VisitGlobalPropertyCell(heap, this); } else if (mode == RelocInfo::EXTERNAL_REFERENCE) { StaticVisitor::VisitExternalReference(this); + } else if (RelocInfo::IsCodeAgeSequence(mode)) { + StaticVisitor::VisitCodeAgeSequence(heap, this); #ifdef ENABLE_DEBUGGER_SUPPORT } else if (heap->isolate()->debug()->has_break_points() && ((RelocInfo::IsJSReturn(mode) && @@ -336,7 +399,7 @@ void RelocInfo::Visit(Heap* heap) { IsPatchedDebugBreakSlotSequence()))) { StaticVisitor::VisitDebugTarget(heap, this); #endif - } else if (mode == RelocInfo::RUNTIME_ENTRY) { + } else if (RelocInfo::IsRuntimeEntry(mode)) { StaticVisitor::VisitRuntimeEntry(this); } } diff --git a/deps/v8/src/mips/assembler-mips.cc b/deps/v8/src/mips/assembler-mips.cc index a4563a64f3..4c11c7f549 100644 --- a/deps/v8/src/mips/assembler-mips.cc +++ b/deps/v8/src/mips/assembler-mips.cc @@ -47,7 +47,13 @@ namespace internal { bool CpuFeatures::initialized_ = false; #endif unsigned CpuFeatures::supported_ = 0; -unsigned CpuFeatures::found_by_runtime_probing_ = 0; +unsigned CpuFeatures::found_by_runtime_probing_only_ = 0; + + +ExternalReference ExternalReference::cpu_features() { + ASSERT(CpuFeatures::initialized_); + return ExternalReference(&CpuFeatures::supported_); +} // Get the CPU features enabled by the build. For cross compilation the @@ -57,7 +63,7 @@ unsigned CpuFeatures::found_by_runtime_probing_ = 0; static uint64_t CpuFeaturesImpliedByCompiler() { uint64_t answer = 0; #ifdef CAN_USE_FPU_INSTRUCTIONS - answer |= 1u << FPU; + answer |= static_cast<uint64_t>(1) << FPU; #endif // def CAN_USE_FPU_INSTRUCTIONS #ifdef __mips__ @@ -65,7 +71,7 @@ static uint64_t CpuFeaturesImpliedByCompiler() { // generation even when generating snapshots. This won't work for cross // compilation. #if(defined(__mips_hard_float) && __mips_hard_float != 0) - answer |= 1u << FPU; + answer |= static_cast<uint64_t>(1) << FPU; #endif // defined(__mips_hard_float) && __mips_hard_float != 0 #endif // def __mips__ @@ -73,6 +79,33 @@ static uint64_t CpuFeaturesImpliedByCompiler() { } +const char* DoubleRegister::AllocationIndexToString(int index) { + if (CpuFeatures::IsSupported(FPU)) { + ASSERT(index >= 0 && index < kMaxNumAllocatableRegisters); + const char* const names[] = { + "f0", + "f2", + "f4", + "f6", + "f8", + "f10", + "f12", + "f14", + "f16", + "f18", + "f20", + "f22", + "f24", + "f26" + }; + return names[index]; + } else { + ASSERT(index == 0); + return "sfpd0"; + } +} + + void CpuFeatures::Probe() { unsigned standard_features = (OS::CpuFeaturesImpliedByPlatform() | CpuFeaturesImpliedByCompiler()); @@ -96,15 +129,15 @@ void CpuFeatures::Probe() { #if !defined(__mips__) // For the simulator=mips build, use FPU when FLAG_enable_fpu is enabled. if (FLAG_enable_fpu) { - supported_ |= 1u << FPU; + supported_ |= static_cast<uint64_t>(1) << FPU; } #else // Probe for additional features not already known to be available. if (OS::MipsCpuHasFeature(FPU)) { // This implementation also sets the FPU flags if // runtime detection of FPU returns true. - supported_ |= 1u << FPU; - found_by_runtime_probing_ |= 1u << FPU; + supported_ |= static_cast<uint64_t>(1) << FPU; + found_by_runtime_probing_only_ |= static_cast<uint64_t>(1) << FPU; } #endif } @@ -221,7 +254,7 @@ Operand::Operand(Handle<Object> handle) { } else { // No relocation needed. imm32_ = reinterpret_cast<intptr_t>(obj); - rmode_ = RelocInfo::NONE; + rmode_ = RelocInfo::NONE32; } } @@ -267,45 +300,11 @@ const Instr kLwSwInstrArgumentMask = ~kLwSwInstrTypeMask; const Instr kLwSwOffsetMask = kImm16Mask; -// Spare buffer. -static const int kMinimalBufferSize = 4 * KB; - - -Assembler::Assembler(Isolate* arg_isolate, void* buffer, int buffer_size) - : AssemblerBase(arg_isolate), +Assembler::Assembler(Isolate* isolate, void* buffer, int buffer_size) + : AssemblerBase(isolate, buffer, buffer_size), recorded_ast_id_(TypeFeedbackId::None()), - positions_recorder_(this), - emit_debug_code_(FLAG_debug_code) { - if (buffer == NULL) { - // Do our own buffer management. - if (buffer_size <= kMinimalBufferSize) { - buffer_size = kMinimalBufferSize; - - if (isolate()->assembler_spare_buffer() != NULL) { - buffer = isolate()->assembler_spare_buffer(); - isolate()->set_assembler_spare_buffer(NULL); - } - } - if (buffer == NULL) { - buffer_ = NewArray<byte>(buffer_size); - } else { - buffer_ = static_cast<byte*>(buffer); - } - buffer_size_ = buffer_size; - own_buffer_ = true; - - } else { - // Use externally provided buffer instead. - ASSERT(buffer_size > 0); - buffer_ = static_cast<byte*>(buffer); - buffer_size_ = buffer_size; - own_buffer_ = false; - } - - // Set up buffer pointers. - ASSERT(buffer_ != NULL); - pc_ = buffer_; - reloc_info_writer.Reposition(buffer_ + buffer_size, pc_); + positions_recorder_(this) { + reloc_info_writer.Reposition(buffer_ + buffer_size_, pc_); last_trampoline_pool_end_ = 0; no_trampoline_pool_before_ = 0; @@ -324,18 +323,6 @@ Assembler::Assembler(Isolate* arg_isolate, void* buffer, int buffer_size) } -Assembler::~Assembler() { - if (own_buffer_) { - if (isolate()->assembler_spare_buffer() == NULL && - buffer_size_ == kMinimalBufferSize) { - isolate()->set_assembler_spare_buffer(buffer_); - } else { - DeleteArray(buffer_); - } - } -} - - void Assembler::GetCode(CodeDesc* desc) { ASSERT(pc_ <= reloc_info_writer.pos()); // No overlap. // Set up code descriptor. @@ -602,7 +589,7 @@ bool Assembler::IsNop(Instr instr, unsigned int type) { int32_t Assembler::GetBranchOffset(Instr instr) { ASSERT(IsBranch(instr)); - return ((int16_t)(instr & kImm16Mask)) << 2; + return (static_cast<int16_t>(instr & kImm16Mask)) << 2; } @@ -735,7 +722,7 @@ void Assembler::target_at_put(int32_t pos, int32_t target_pos) { Instr instr_lui = instr_at(pos + 0 * Assembler::kInstrSize); Instr instr_ori = instr_at(pos + 1 * Assembler::kInstrSize); ASSERT(IsOri(instr_ori)); - uint32_t imm = (uint32_t)buffer_ + target_pos; + uint32_t imm = reinterpret_cast<uint32_t>(buffer_) + target_pos; ASSERT((imm & 3) == 0); instr_lui &= ~kImm16Mask; @@ -746,7 +733,7 @@ void Assembler::target_at_put(int32_t pos, int32_t target_pos) { instr_at_put(pos + 1 * Assembler::kInstrSize, instr_ori | (imm & kImm16Mask)); } else { - uint32_t imm28 = (uint32_t)buffer_ + target_pos; + uint32_t imm28 = reinterpret_cast<uint32_t>(buffer_) + target_pos; imm28 &= kImm28Mask; ASSERT((imm28 & 3) == 0); @@ -851,7 +838,7 @@ bool Assembler::is_near(Label* L) { // space. There is no guarantee that the relocated location can be similarly // encoded. bool Assembler::MustUseReg(RelocInfo::Mode rmode) { - return rmode != RelocInfo::NONE; + return !RelocInfo::IsNone(rmode); } void Assembler::GenInstrRegister(Opcode opcode, @@ -887,7 +874,7 @@ void Assembler::GenInstrRegister(Opcode opcode, FPURegister fd, SecondaryField func) { ASSERT(fd.is_valid() && fs.is_valid() && ft.is_valid()); - ASSERT(CpuFeatures::IsEnabled(FPU)); + ASSERT(IsEnabled(FPU)); Instr instr = opcode | fmt | (ft.code() << kFtShift) | (fs.code() << kFsShift) | (fd.code() << kFdShift) | func; emit(instr); @@ -895,13 +882,27 @@ void Assembler::GenInstrRegister(Opcode opcode, void Assembler::GenInstrRegister(Opcode opcode, + FPURegister fr, + FPURegister ft, + FPURegister fs, + FPURegister fd, + SecondaryField func) { + ASSERT(fd.is_valid() && fr.is_valid() && fs.is_valid() && ft.is_valid()); + ASSERT(IsEnabled(FPU)); + Instr instr = opcode | (fr.code() << kFrShift) | (ft.code() << kFtShift) + | (fs.code() << kFsShift) | (fd.code() << kFdShift) | func; + emit(instr); +} + + +void Assembler::GenInstrRegister(Opcode opcode, SecondaryField fmt, Register rt, FPURegister fs, FPURegister fd, SecondaryField func) { ASSERT(fd.is_valid() && fs.is_valid() && rt.is_valid()); - ASSERT(CpuFeatures::IsEnabled(FPU)); + ASSERT(IsEnabled(FPU)); Instr instr = opcode | fmt | (rt.code() << kRtShift) | (fs.code() << kFsShift) | (fd.code() << kFdShift) | func; emit(instr); @@ -914,7 +915,7 @@ void Assembler::GenInstrRegister(Opcode opcode, FPUControlRegister fs, SecondaryField func) { ASSERT(fs.is_valid() && rt.is_valid()); - ASSERT(CpuFeatures::IsEnabled(FPU)); + ASSERT(IsEnabled(FPU)); Instr instr = opcode | fmt | (rt.code() << kRtShift) | (fs.code() << kFsShift) | func; emit(instr); @@ -949,7 +950,7 @@ void Assembler::GenInstrImmediate(Opcode opcode, FPURegister ft, int32_t j) { ASSERT(rs.is_valid() && ft.is_valid() && (is_int16(j) || is_uint16(j))); - ASSERT(CpuFeatures::IsEnabled(FPU)); + ASSERT(IsEnabled(FPU)); Instr instr = opcode | (rs.code() << kRsShift) | (ft.code() << kFtShift) | (j & kImm16Mask); emit(instr); @@ -998,7 +999,7 @@ uint32_t Assembler::jump_address(Label* L) { } } - uint32_t imm = (uint32_t)buffer_ + target_pos; + uint32_t imm = reinterpret_cast<uint32_t>(buffer_) + target_pos; ASSERT((imm & 3) == 0); return imm; @@ -1133,7 +1134,8 @@ void Assembler::j(int32_t target) { #if DEBUG // Get pc of delay slot. uint32_t ipc = reinterpret_cast<uint32_t>(pc_ + 1 * kInstrSize); - bool in_range = ((uint32_t)(ipc^target) >> (kImm26Bits+kImmFieldShift)) == 0; + bool in_range = (ipc ^ static_cast<uint32_t>(target) >> + (kImm26Bits + kImmFieldShift)) == 0; ASSERT(in_range && ((target & 3) == 0)); #endif GenInstrJump(J, target >> 2); @@ -1154,7 +1156,8 @@ void Assembler::jal(int32_t target) { #ifdef DEBUG // Get pc of delay slot. uint32_t ipc = reinterpret_cast<uint32_t>(pc_ + 1 * kInstrSize); - bool in_range = ((uint32_t)(ipc^target) >> (kImm26Bits+kImmFieldShift)) == 0; + bool in_range = (ipc ^ static_cast<uint32_t>(target) >> + (kImm26Bits + kImmFieldShift)) == 0; ASSERT(in_range && ((target & 3) == 0)); #endif positions_recorder()->WriteRecordedPositions(); @@ -1173,8 +1176,8 @@ void Assembler::jalr(Register rs, Register rd) { void Assembler::j_or_jr(int32_t target, Register rs) { // Get pc of delay slot. uint32_t ipc = reinterpret_cast<uint32_t>(pc_ + 1 * kInstrSize); - bool in_range = ((uint32_t)(ipc^target) >> (kImm26Bits+kImmFieldShift)) == 0; - + bool in_range = (ipc ^ static_cast<uint32_t>(target) >> + (kImm26Bits + kImmFieldShift)) == 0; if (in_range) { j(target); } else { @@ -1186,8 +1189,8 @@ void Assembler::j_or_jr(int32_t target, Register rs) { void Assembler::jal_or_jalr(int32_t target, Register rs) { // Get pc of delay slot. uint32_t ipc = reinterpret_cast<uint32_t>(pc_ + 1 * kInstrSize); - bool in_range = ((uint32_t)(ipc^target) >> (kImm26Bits+kImmFieldShift)) == 0; - + bool in_range = (ipc ^ static_cast<uint32_t>(target) >> + (kImm26Bits+kImmFieldShift)) == 0; if (in_range) { jal(target); } else { @@ -1697,6 +1700,12 @@ void Assembler::mul_d(FPURegister fd, FPURegister fs, FPURegister ft) { } +void Assembler::madd_d(FPURegister fd, FPURegister fr, FPURegister fs, + FPURegister ft) { + GenInstrRegister(COP1X, fr, ft, fs, fd, MADD_D); +} + + void Assembler::div_d(FPURegister fd, FPURegister fs, FPURegister ft) { GenInstrRegister(COP1, D, ft, fs, fd, DIV_D); } @@ -1863,7 +1872,7 @@ void Assembler::cvt_d_s(FPURegister fd, FPURegister fs) { // Conditions. void Assembler::c(FPUCondition cond, SecondaryField fmt, FPURegister fs, FPURegister ft, uint16_t cc) { - ASSERT(CpuFeatures::IsEnabled(FPU)); + ASSERT(IsEnabled(FPU)); ASSERT(is_uint3(cc)); ASSERT((fmt & ~(31 << kRsShift)) == 0); Instr instr = COP1 | fmt | ft.code() << 16 | fs.code() << kFsShift @@ -1874,7 +1883,7 @@ void Assembler::c(FPUCondition cond, SecondaryField fmt, void Assembler::fcmp(FPURegister src1, const double src2, FPUCondition cond) { - ASSERT(CpuFeatures::IsEnabled(FPU)); + ASSERT(IsEnabled(FPU)); ASSERT(src2 == 0.0); mtc1(zero_reg, f14); cvt_d_w(f14, f14); @@ -1883,7 +1892,7 @@ void Assembler::fcmp(FPURegister src1, const double src2, void Assembler::bc1f(int16_t offset, uint16_t cc) { - ASSERT(CpuFeatures::IsEnabled(FPU)); + ASSERT(IsEnabled(FPU)); ASSERT(is_uint3(cc)); Instr instr = COP1 | BC1 | cc << 18 | 0 << 16 | (offset & kImm16Mask); emit(instr); @@ -1891,7 +1900,7 @@ void Assembler::bc1f(int16_t offset, uint16_t cc) { void Assembler::bc1t(int16_t offset, uint16_t cc) { - ASSERT(CpuFeatures::IsEnabled(FPU)); + ASSERT(IsEnabled(FPU)); ASSERT(is_uint3(cc)); Instr instr = COP1 | BC1 | cc << 18 | 1 << 16 | (offset & kImm16Mask); emit(instr); @@ -1946,7 +1955,7 @@ int Assembler::RelocateInternalReference(byte* pc, intptr_t pc_delta) { return 2; // Number of instructions patched. } else { uint32_t imm28 = (instr & static_cast<int32_t>(kImm26Mask)) << 2; - if ((int32_t)imm28 == kEndOfJumpChain) { + if (static_cast<int32_t>(imm28) == kEndOfJumpChain) { return 0; // Number of instructions patched. } imm28 += pc_delta; @@ -2036,7 +2045,7 @@ void Assembler::RecordRelocInfo(RelocInfo::Mode rmode, intptr_t data) { || RelocInfo::IsPosition(rmode)); // These modes do not need an entry in the constant pool. } - if (rinfo.rmode() != RelocInfo::NONE) { + if (!RelocInfo::IsNone(rinfo.rmode())) { // Don't record external references unless the heap will be serialized. if (rmode == RelocInfo::EXTERNAL_REFERENCE) { #ifdef DEBUG @@ -2196,9 +2205,10 @@ void Assembler::set_target_address_at(Address pc, Address target) { Instr instr3 = instr_at(pc + 2 * kInstrSize); uint32_t ipc = reinterpret_cast<uint32_t>(pc + 3 * kInstrSize); - bool in_range = - ((uint32_t)(ipc ^ itarget) >> (kImm26Bits + kImmFieldShift)) == 0; - uint32_t target_field = (uint32_t)(itarget & kJumpAddrMask) >> kImmFieldShift; + bool in_range = (ipc ^ static_cast<uint32_t>(itarget) >> + (kImm26Bits + kImmFieldShift)) == 0; + uint32_t target_field = + static_cast<uint32_t>(itarget & kJumpAddrMask) >>kImmFieldShift; bool patched_jump = false; #ifndef ALLOW_JAL_IN_BOUNDARY_REGION diff --git a/deps/v8/src/mips/assembler-mips.h b/deps/v8/src/mips/assembler-mips.h index 59c45c927a..e6c9e76c78 100644 --- a/deps/v8/src/mips/assembler-mips.h +++ b/deps/v8/src/mips/assembler-mips.h @@ -72,20 +72,23 @@ namespace internal { // Core register. struct Register { static const int kNumRegisters = v8::internal::kNumRegisters; - static const int kNumAllocatableRegisters = 14; // v0 through t7. + static const int kMaxNumAllocatableRegisters = 14; // v0 through t7. static const int kSizeInBytes = 4; + static const int kGPRsPerNonFPUDouble = 2; + + inline static int NumAllocatableRegisters(); static int ToAllocationIndex(Register reg) { return reg.code() - 2; // zero_reg and 'at' are skipped. } static Register FromAllocationIndex(int index) { - ASSERT(index >= 0 && index < kNumAllocatableRegisters); + ASSERT(index >= 0 && index < kMaxNumAllocatableRegisters); return from_code(index + 2); // zero_reg and 'at' are skipped. } static const char* AllocationIndexToString(int index) { - ASSERT(index >= 0 && index < kNumAllocatableRegisters); + ASSERT(index >= 0 && index < kMaxNumAllocatableRegisters); const char* const names[] = { "v0", "v1", @@ -186,7 +189,7 @@ Register ToRegister(int num); // Coprocessor register. struct FPURegister { - static const int kNumRegisters = v8::internal::kNumFPURegisters; + static const int kMaxNumRegisters = v8::internal::kNumFPURegisters; // TODO(plind): Warning, inconsistent numbering here. kNumFPURegisters refers // to number of 32-bit FPU regs, but kNumAllocatableRegisters refers to @@ -197,44 +200,25 @@ struct FPURegister { // f28: 0.0 // f30: scratch register. static const int kNumReservedRegisters = 2; - static const int kNumAllocatableRegisters = kNumRegisters / 2 - + static const int kMaxNumAllocatableRegisters = kMaxNumRegisters / 2 - kNumReservedRegisters; - + inline static int NumRegisters(); + inline static int NumAllocatableRegisters(); inline static int ToAllocationIndex(FPURegister reg); + static const char* AllocationIndexToString(int index); static FPURegister FromAllocationIndex(int index) { - ASSERT(index >= 0 && index < kNumAllocatableRegisters); + ASSERT(index >= 0 && index < kMaxNumAllocatableRegisters); return from_code(index * 2); } - static const char* AllocationIndexToString(int index) { - ASSERT(index >= 0 && index < kNumAllocatableRegisters); - const char* const names[] = { - "f0", - "f2", - "f4", - "f6", - "f8", - "f10", - "f12", - "f14", - "f16", - "f18", - "f20", - "f22", - "f24", - "f26" - }; - return names[index]; - } - static FPURegister from_code(int code) { FPURegister r = { code }; return r; } - bool is_valid() const { return 0 <= code_ && code_ < kNumFPURegisters ; } + bool is_valid() const { return 0 <= code_ && code_ < kMaxNumRegisters ; } bool is(FPURegister creg) const { return code_ == creg.code_; } FPURegister low() const { // Find low reg of a Double-reg pair, which is the reg itself. @@ -316,6 +300,9 @@ const FPURegister f29 = { 29 }; const FPURegister f30 = { 30 }; const FPURegister f31 = { 31 }; +const Register sfpd_lo = { kRegister_t6_Code }; +const Register sfpd_hi = { kRegister_t7_Code }; + // Register aliases. // cp is assumed to be a callee saved register. // Defined using #define instead of "static const Register&" because Clang @@ -361,7 +348,7 @@ class Operand BASE_EMBEDDED { public: // Immediate. INLINE(explicit Operand(int32_t immediate, - RelocInfo::Mode rmode = RelocInfo::NONE)); + RelocInfo::Mode rmode = RelocInfo::NONE32)); INLINE(explicit Operand(const ExternalReference& f)); INLINE(explicit Operand(const char* s)); INLINE(explicit Operand(Object** opp)); @@ -406,7 +393,7 @@ class MemOperand : public Operand { // CpuFeatures keeps track of which features are supported by the target CPU. -// Supported features must be enabled by a Scope before use. +// Supported features must be enabled by a CpuFeatureScope before use. class CpuFeatures : public AllStatic { public: // Detect features of the target CPU. Set safe defaults if the serializer @@ -420,89 +407,25 @@ class CpuFeatures : public AllStatic { return (supported_ & (1u << f)) != 0; } - -#ifdef DEBUG - // Check whether a feature is currently enabled. - static bool IsEnabled(CpuFeature f) { + static bool IsFoundByRuntimeProbingOnly(CpuFeature f) { ASSERT(initialized_); - Isolate* isolate = Isolate::UncheckedCurrent(); - if (isolate == NULL) { - // When no isolate is available, work as if we're running in - // release mode. - return IsSupported(f); - } - unsigned enabled = static_cast<unsigned>(isolate->enabled_cpu_features()); - return (enabled & (1u << f)) != 0; + return (found_by_runtime_probing_only_ & + (static_cast<uint64_t>(1) << f)) != 0; } -#endif - - // Enable a specified feature within a scope. - class Scope BASE_EMBEDDED { -#ifdef DEBUG - - public: - explicit Scope(CpuFeature f) { - unsigned mask = 1u << f; - ASSERT(CpuFeatures::IsSupported(f)); - ASSERT(!Serializer::enabled() || - (CpuFeatures::found_by_runtime_probing_ & mask) == 0); - isolate_ = Isolate::UncheckedCurrent(); - old_enabled_ = 0; - if (isolate_ != NULL) { - old_enabled_ = static_cast<unsigned>(isolate_->enabled_cpu_features()); - isolate_->set_enabled_cpu_features(old_enabled_ | mask); - } - } - ~Scope() { - ASSERT_EQ(Isolate::UncheckedCurrent(), isolate_); - if (isolate_ != NULL) { - isolate_->set_enabled_cpu_features(old_enabled_); - } - } - - private: - Isolate* isolate_; - unsigned old_enabled_; -#else - - public: - explicit Scope(CpuFeature f) {} -#endif - }; - class TryForceFeatureScope BASE_EMBEDDED { - public: - explicit TryForceFeatureScope(CpuFeature f) - : old_supported_(CpuFeatures::supported_) { - if (CanForce()) { - CpuFeatures::supported_ |= (1u << f); - } - } - - ~TryForceFeatureScope() { - if (CanForce()) { - CpuFeatures::supported_ = old_supported_; - } - } - - private: - static bool CanForce() { - // It's only safe to temporarily force support of CPU features - // when there's only a single isolate, which is guaranteed when - // the serializer is enabled. - return Serializer::enabled(); - } - - const unsigned old_supported_; - }; + static bool IsSafeForSnapshot(CpuFeature f) { + return (IsSupported(f) && + (!Serializer::enabled() || !IsFoundByRuntimeProbingOnly(f))); + } private: #ifdef DEBUG static bool initialized_; #endif static unsigned supported_; - static unsigned found_by_runtime_probing_; + static unsigned found_by_runtime_probing_only_; + friend class ExternalReference; DISALLOW_COPY_AND_ASSIGN(CpuFeatures); }; @@ -523,13 +446,7 @@ class Assembler : public AssemblerBase { // is too small, a fatal error occurs. No deallocation of the buffer is done // upon destruction of the assembler. Assembler(Isolate* isolate, void* buffer, int buffer_size); - ~Assembler(); - - // Overrides the default provided by FLAG_debug_code. - void set_emit_debug_code(bool value) { emit_debug_code_ = value; } - - // Dummy for cross platform compatibility. - void set_predictable_code_size(bool value) { } + virtual ~Assembler() { } // GetCode emits any pending (non-emitted) code and fills the descriptor // desc. GetCode() is idempotent; it returns the same result if no other @@ -669,7 +586,9 @@ class Assembler : public AssemblerBase { PROPERTY_ACCESS_INLINED_CONTEXT_DONT_DELETE, // Helper values. LAST_CODE_MARKER, - FIRST_IC_MARKER = PROPERTY_ACCESS_INLINED + FIRST_IC_MARKER = PROPERTY_ACCESS_INLINED, + // Code aging + CODE_AGE_MARKER_NOP = 6 }; // Type == 0 is the default non-marking nop. For mips this is a @@ -822,6 +741,7 @@ class Assembler : public AssemblerBase { void add_d(FPURegister fd, FPURegister fs, FPURegister ft); void sub_d(FPURegister fd, FPURegister fs, FPURegister ft); void mul_d(FPURegister fd, FPURegister fs, FPURegister ft); + void madd_d(FPURegister fd, FPURegister fr, FPURegister fs, FPURegister ft); void div_d(FPURegister fd, FPURegister fs, FPURegister ft); void abs_d(FPURegister fd, FPURegister fs); void mov_d(FPURegister fd, FPURegister fs); @@ -947,8 +867,6 @@ class Assembler : public AssemblerBase { void db(uint8_t data); void dd(uint32_t data); - int32_t pc_offset() const { return pc_ - buffer_; } - PositionsRecorder* positions_recorder() { return &positions_recorder_; } // Postpone the generation of the trampoline pool for the specified number of @@ -1033,8 +951,6 @@ class Assembler : public AssemblerBase { // the relocation info. TypeFeedbackId recorded_ast_id_; - bool emit_debug_code() const { return emit_debug_code_; } - int32_t buffer_space() const { return reloc_info_writer.pos() - pc_; } // Decode branch instruction at pos and return branch target pos. @@ -1093,13 +1009,6 @@ class Assembler : public AssemblerBase { } private: - // Code buffer: - // The buffer into which code and relocation info are generated. - byte* buffer_; - int buffer_size_; - // True if the assembler owns the buffer, false if buffer is external. - bool own_buffer_; - // Buffer size and constant pool distance are checked together at regular // intervals of kBufferCheckInterval emitted bytes. static const int kBufferCheckInterval = 1*KB/2; @@ -1110,7 +1019,6 @@ class Assembler : public AssemblerBase { // not have to check for overflow. The same is true for writes of large // relocation info entries. static const int kGap = 32; - byte* pc_; // The program counter - moves forward. // Repeated checking whether the trampoline pool should be emitted is rather @@ -1175,6 +1083,13 @@ class Assembler : public AssemblerBase { SecondaryField func = NULLSF); void GenInstrRegister(Opcode opcode, + FPURegister fr, + FPURegister ft, + FPURegister fs, + FPURegister fd, + SecondaryField func = NULLSF); + + void GenInstrRegister(Opcode opcode, SecondaryField fmt, Register rt, FPURegister fs, @@ -1285,7 +1200,6 @@ class Assembler : public AssemblerBase { friend class BlockTrampolinePoolScope; PositionsRecorder positions_recorder_; - bool emit_debug_code_; friend class PositionsRecorder; friend class EnsureSpace; }; diff --git a/deps/v8/src/mips/builtins-mips.cc b/deps/v8/src/mips/builtins-mips.cc index 0342e6505d..54efd94913 100644 --- a/deps/v8/src/mips/builtins-mips.cc +++ b/deps/v8/src/mips/builtins-mips.cc @@ -128,12 +128,8 @@ static void AllocateEmptyJSArray(MacroAssembler* masm, if (initial_capacity > 0) { size += FixedArray::SizeFor(initial_capacity); } - __ AllocateInNewSpace(size, - result, - scratch2, - scratch3, - gc_required, - TAG_OBJECT); + __ Allocate(size, result, scratch2, scratch3, gc_required, TAG_OBJECT); + // Allocated the JSArray. Now initialize the fields except for the elements // array. // result: JSObject @@ -555,34 +551,64 @@ void Builtins::Generate_ArrayConstructCode(MacroAssembler* masm) { // ----------- S t a t e ------------- // -- a0 : number of arguments // -- a1 : constructor function + // -- a2 : type info cell // -- ra : return address // -- sp[...]: constructor arguments // ----------------------------------- - Label generic_constructor; if (FLAG_debug_code) { // The array construct code is only set for the builtin and internal // Array functions which always have a map. // Initial map for the builtin Array function should be a map. - __ lw(a2, FieldMemOperand(a1, JSFunction::kPrototypeOrInitialMapOffset)); - __ And(t0, a2, Operand(kSmiTagMask)); + __ lw(a3, FieldMemOperand(a1, JSFunction::kPrototypeOrInitialMapOffset)); + __ And(t0, a3, Operand(kSmiTagMask)); __ Assert(ne, "Unexpected initial map for Array function (3)", t0, Operand(zero_reg)); - __ GetObjectType(a2, a3, t0); + __ GetObjectType(a3, a3, t0); __ Assert(eq, "Unexpected initial map for Array function (4)", t0, Operand(MAP_TYPE)); + + if (FLAG_optimize_constructed_arrays) { + // We should either have undefined in a2 or a valid jsglobalpropertycell + Label okay_here; + Handle<Object> undefined_sentinel( + masm->isolate()->heap()->undefined_value(), masm->isolate()); + Handle<Map> global_property_cell_map( + masm->isolate()->heap()->global_property_cell_map()); + __ Branch(&okay_here, eq, a2, Operand(undefined_sentinel)); + __ lw(a3, FieldMemOperand(a2, 0)); + __ Assert(eq, "Expected property cell in register a3", + a3, Operand(global_property_cell_map)); + __ bind(&okay_here); + } } - // Run the native code for the Array function called as a constructor. - ArrayNativeCode(masm, &generic_constructor); + if (FLAG_optimize_constructed_arrays) { + Label not_zero_case, not_one_case; + __ Branch(¬_zero_case, ne, a0, Operand(zero_reg)); + ArrayNoArgumentConstructorStub no_argument_stub; + __ TailCallStub(&no_argument_stub); - // Jump to the generic construct code in case the specialized code cannot - // handle the construction. - __ bind(&generic_constructor); + __ bind(¬_zero_case); + __ Branch(¬_one_case, gt, a0, Operand(1)); + ArraySingleArgumentConstructorStub single_argument_stub; + __ TailCallStub(&single_argument_stub); - Handle<Code> generic_construct_stub = - masm->isolate()->builtins()->JSConstructStubGeneric(); - __ Jump(generic_construct_stub, RelocInfo::CODE_TARGET); + __ bind(¬_one_case); + ArrayNArgumentsConstructorStub n_argument_stub; + __ TailCallStub(&n_argument_stub); + } else { + Label generic_constructor; + // Run the native code for the Array function called as a constructor. + ArrayNativeCode(masm, &generic_constructor); + + // Jump to the generic construct code in case the specialized code cannot + // handle the construction. + __ bind(&generic_constructor); + Handle<Code> generic_construct_stub = + masm->isolate()->builtins()->JSConstructStubGeneric(); + __ Jump(generic_construct_stub, RelocInfo::CODE_TARGET); + } } @@ -635,12 +661,12 @@ void Builtins::Generate_StringConstructCode(MacroAssembler* masm) { // ----------------------------------- Label gc_required; - __ AllocateInNewSpace(JSValue::kSize, - v0, // Result. - a3, // Scratch. - t0, // Scratch. - &gc_required, - TAG_OBJECT); + __ Allocate(JSValue::kSize, + v0, // Result. + a3, // Scratch. + t0, // Scratch. + &gc_required, + TAG_OBJECT); // Initialising the String Object. Register map = a3; @@ -698,7 +724,7 @@ void Builtins::Generate_StringConstructCode(MacroAssembler* masm) { // Load the empty string into a2, remove the receiver from the // stack, and jump back to the case where the argument is a string. __ bind(&no_arguments); - __ LoadRoot(argument, Heap::kEmptyStringRootIndex); + __ LoadRoot(argument, Heap::kempty_stringRootIndex); __ Drop(1); __ Branch(&argument_is_string); @@ -728,6 +754,35 @@ void Builtins::Generate_InRecompileQueue(MacroAssembler* masm) { } +void Builtins::Generate_InstallRecompiledCode(MacroAssembler* masm) { + // Enter an internal frame. + { + FrameScope scope(masm, StackFrame::INTERNAL); + + // Preserve the function. + __ push(a1); + // Push call kind information. + __ push(t1); + + // Push the function on the stack as the argument to the runtime function. + __ push(a1); + __ CallRuntime(Runtime::kInstallRecompiledCode, 1); + // Calculate the entry point. + __ Addu(t9, v0, Operand(Code::kHeaderSize - kHeapObjectTag)); + + // Restore call kind information. + __ pop(t1); + // Restore saved function. + __ pop(a1); + + // Tear down temporary frame. + } + + // Do a tail-call of the compiled function. + __ Jump(t9); +} + + void Builtins::Generate_ParallelRecompile(MacroAssembler* masm) { { FrameScope scope(masm, StackFrame::INTERNAL); @@ -1072,9 +1127,13 @@ static void Generate_JSConstructStubHelper(MacroAssembler* masm, // If the type of the result (stored in its map) is less than // FIRST_SPEC_OBJECT_TYPE, it is not an object in the ECMA sense. - __ GetObjectType(v0, a3, a3); + __ GetObjectType(v0, a1, a3); __ Branch(&exit, greater_equal, a3, Operand(FIRST_SPEC_OBJECT_TYPE)); + // Symbols are "objects". + __ lbu(a3, FieldMemOperand(a1, Map::kInstanceTypeOffset)); + __ Branch(&exit, eq, a3, Operand(SYMBOL_TYPE)); + // Throw away the result of the constructor invocation and use the // on-stack receiver as the result. __ bind(&use_receiver); @@ -1171,6 +1230,10 @@ static void Generate_JSEntryTrampolineHelper(MacroAssembler* masm, // Invoke the code and pass argc as a0. __ mov(a0, a3); if (is_construct) { + // No type feedback cell is available + Handle<Object> undefined_sentinel( + masm->isolate()->heap()->undefined_value(), masm->isolate()); + __ li(a2, Operand(undefined_sentinel)); CallConstructStub stub(NO_CALL_FUNCTION_FLAGS); __ CallStub(&stub); } else { @@ -1255,6 +1318,66 @@ void Builtins::Generate_LazyRecompile(MacroAssembler* masm) { } +static void GenerateMakeCodeYoungAgainCommon(MacroAssembler* masm) { + // For now, we are relying on the fact that make_code_young doesn't do any + // garbage collection which allows us to save/restore the registers without + // worrying about which of them contain pointers. We also don't build an + // internal frame to make the code faster, since we shouldn't have to do stack + // crawls in MakeCodeYoung. This seems a bit fragile. + + __ mov(a0, ra); + // Adjust a0 to point to the head of the PlatformCodeAge sequence + __ Subu(a0, a0, + Operand((kNoCodeAgeSequenceLength - 1) * Assembler::kInstrSize)); + // Restore the original return address of the function + __ mov(ra, at); + + // The following registers must be saved and restored when calling through to + // the runtime: + // a0 - contains return address (beginning of patch sequence) + // a1 - function object + RegList saved_regs = + (a0.bit() | a1.bit() | ra.bit() | fp.bit()) & ~sp.bit(); + FrameScope scope(masm, StackFrame::MANUAL); + __ MultiPush(saved_regs); + __ PrepareCallCFunction(1, 0, a1); + __ CallCFunction( + ExternalReference::get_make_code_young_function(masm->isolate()), 1); + __ MultiPop(saved_regs); + __ Jump(a0); +} + +#define DEFINE_CODE_AGE_BUILTIN_GENERATOR(C) \ +void Builtins::Generate_Make##C##CodeYoungAgainEvenMarking( \ + MacroAssembler* masm) { \ + GenerateMakeCodeYoungAgainCommon(masm); \ +} \ +void Builtins::Generate_Make##C##CodeYoungAgainOddMarking( \ + MacroAssembler* masm) { \ + GenerateMakeCodeYoungAgainCommon(masm); \ +} +CODE_AGE_LIST(DEFINE_CODE_AGE_BUILTIN_GENERATOR) +#undef DEFINE_CODE_AGE_BUILTIN_GENERATOR + + +void Builtins::Generate_NotifyStubFailure(MacroAssembler* masm) { + { + FrameScope scope(masm, StackFrame::INTERNAL); + + // Preserve registers across notification, this is important for compiled + // stubs that tail call the runtime on deopts passing their parameters in + // registers. + __ MultiPush(kJSCallerSaved | kCalleeSaved); + // Pass the function and deoptimization type to the runtime system. + __ CallRuntime(Runtime::kNotifyStubFailure, 0); + __ MultiPop(kJSCallerSaved | kCalleeSaved); + } + + __ Addu(sp, sp, Operand(kPointerSize)); // Ignore state + __ Jump(ra); // Jump to miss handler +} + + static void Generate_NotifyDeoptimizedHelper(MacroAssembler* masm, Deoptimizer::BailoutType type) { { @@ -1315,12 +1438,6 @@ void Builtins::Generate_NotifyOSR(MacroAssembler* masm) { void Builtins::Generate_OnStackReplacement(MacroAssembler* masm) { - CpuFeatures::TryForceFeatureScope scope(VFP3); - if (!CpuFeatures::IsSupported(FPU)) { - __ Abort("Unreachable code: Cannot optimize without FPU support."); - return; - } - // Lookup the function in the JavaScript frame and push it as an // argument to the on-stack replacement function. __ lw(a0, MemOperand(fp, JavaScriptFrameConstants::kFunctionOffset)); @@ -1371,7 +1488,7 @@ void Builtins::Generate_FunctionCall(MacroAssembler* masm) { // a0: actual number of arguments // a1: function Label shift_arguments; - __ li(t0, Operand(0, RelocInfo::NONE)); // Indicate regular JS_FUNCTION. + __ li(t0, Operand(0, RelocInfo::NONE32)); // Indicate regular JS_FUNCTION. { Label convert_to_object, use_global_receiver, patch_receiver; // Change context eagerly in case we need the global receiver. __ lw(cp, FieldMemOperand(a1, JSFunction::kContextOffset)); @@ -1425,7 +1542,7 @@ void Builtins::Generate_FunctionCall(MacroAssembler* masm) { __ sll(at, a0, kPointerSizeLog2); __ addu(at, sp, at); __ lw(a1, MemOperand(at)); - __ li(t0, Operand(0, RelocInfo::NONE)); + __ li(t0, Operand(0, RelocInfo::NONE32)); __ Branch(&patch_receiver); // Use the global receiver object from the called function as the @@ -1448,11 +1565,11 @@ void Builtins::Generate_FunctionCall(MacroAssembler* masm) { // 3b. Check for function proxy. __ bind(&slow); - __ li(t0, Operand(1, RelocInfo::NONE)); // Indicate function proxy. + __ li(t0, Operand(1, RelocInfo::NONE32)); // Indicate function proxy. __ Branch(&shift_arguments, eq, a2, Operand(JS_FUNCTION_PROXY_TYPE)); __ bind(&non_function); - __ li(t0, Operand(2, RelocInfo::NONE)); // Indicate non-function. + __ li(t0, Operand(2, RelocInfo::NONE32)); // Indicate non-function. // 3c. Patch the first argument when calling a non-function. The // CALL_NON_FUNCTION builtin expects the non-function callee as @@ -1683,7 +1800,7 @@ void Builtins::Generate_FunctionApply(MacroAssembler* masm) { __ bind(&call_proxy); __ push(a1); // Add function proxy as last argument. __ Addu(a0, a0, Operand(1)); - __ li(a2, Operand(0, RelocInfo::NONE)); + __ li(a2, Operand(0, RelocInfo::NONE32)); __ SetCallKind(t1, CALL_AS_METHOD); __ GetBuiltinEntry(a3, Builtins::CALL_FUNCTION_PROXY); __ Call(masm->isolate()->builtins()->ArgumentsAdaptorTrampoline(), diff --git a/deps/v8/src/mips/code-stubs-mips.cc b/deps/v8/src/mips/code-stubs-mips.cc index ca31826454..f5908d37bd 100644 --- a/deps/v8/src/mips/code-stubs-mips.cc +++ b/deps/v8/src/mips/code-stubs-mips.cc @@ -33,17 +33,90 @@ #include "code-stubs.h" #include "codegen.h" #include "regexp-macro-assembler.h" +#include "stub-cache.h" namespace v8 { namespace internal { +void FastCloneShallowObjectStub::InitializeInterfaceDescriptor( + Isolate* isolate, + CodeStubInterfaceDescriptor* descriptor) { + static Register registers[] = { a3, a2, a1, a0 }; + descriptor->register_param_count_ = 4; + descriptor->register_params_ = registers; + descriptor->stack_parameter_count_ = NULL; + descriptor->deoptimization_handler_ = + Runtime::FunctionForId(Runtime::kCreateObjectLiteralShallow)->entry; +} + + +void KeyedLoadFastElementStub::InitializeInterfaceDescriptor( + Isolate* isolate, + CodeStubInterfaceDescriptor* descriptor) { + static Register registers[] = { a1, a0 }; + descriptor->register_param_count_ = 2; + descriptor->register_params_ = registers; + descriptor->deoptimization_handler_ = + FUNCTION_ADDR(KeyedLoadIC_MissFromStubFailure); +} + + +void TransitionElementsKindStub::InitializeInterfaceDescriptor( + Isolate* isolate, + CodeStubInterfaceDescriptor* descriptor) { + static Register registers[] = { a0, a1 }; + descriptor->register_param_count_ = 2; + descriptor->register_params_ = registers; + Address entry = + Runtime::FunctionForId(Runtime::kTransitionElementsKind)->entry; + descriptor->deoptimization_handler_ = FUNCTION_ADDR(entry); +} + + +static void InitializeArrayConstructorDescriptor(Isolate* isolate, + CodeStubInterfaceDescriptor* descriptor) { + // register state + // a1 -- constructor function + // a2 -- type info cell with elements kind + // a0 -- number of arguments to the constructor function + static Register registers[] = { a1, a2 }; + descriptor->register_param_count_ = 2; + // stack param count needs (constructor pointer, and single argument) + descriptor->stack_parameter_count_ = &a0; + descriptor->register_params_ = registers; + descriptor->extra_expression_stack_count_ = 1; + descriptor->deoptimization_handler_ = + FUNCTION_ADDR(ArrayConstructor_StubFailure); +} + + +void ArrayNoArgumentConstructorStub::InitializeInterfaceDescriptor( + Isolate* isolate, + CodeStubInterfaceDescriptor* descriptor) { + InitializeArrayConstructorDescriptor(isolate, descriptor); +} + + +void ArraySingleArgumentConstructorStub::InitializeInterfaceDescriptor( + Isolate* isolate, + CodeStubInterfaceDescriptor* descriptor) { + InitializeArrayConstructorDescriptor(isolate, descriptor); +} + + +void ArrayNArgumentsConstructorStub::InitializeInterfaceDescriptor( + Isolate* isolate, + CodeStubInterfaceDescriptor* descriptor) { + InitializeArrayConstructorDescriptor(isolate, descriptor); +} + + #define __ ACCESS_MASM(masm) static void EmitIdenticalObjectComparison(MacroAssembler* masm, Label* slow, - Condition cc, - bool never_nan_nan); + Condition cc); static void EmitSmiNonsmiComparison(MacroAssembler* masm, Register lhs, Register rhs, @@ -95,12 +168,7 @@ void FastNewClosureStub::Generate(MacroAssembler* masm) { __ pop(a3); // Attempt to allocate new JSFunction in new space. - __ AllocateInNewSpace(JSFunction::kSize, - v0, - a1, - a2, - &gc, - TAG_OBJECT); + __ Allocate(JSFunction::kSize, v0, a1, a2, &gc, TAG_OBJECT); __ IncrementCounter(counters->fast_new_closure_total(), 1, t2, t3); @@ -227,12 +295,7 @@ void FastNewContextStub::Generate(MacroAssembler* masm) { int length = slots_ + Context::MIN_CONTEXT_SLOTS; // Attempt to allocate the context in new space. - __ AllocateInNewSpace(FixedArray::SizeFor(length), - v0, - a1, - a2, - &gc, - TAG_OBJECT); + __ Allocate(FixedArray::SizeFor(length), v0, a1, a2, &gc, TAG_OBJECT); // Load the function from the stack. __ lw(a3, MemOperand(sp, 0)); @@ -276,8 +339,7 @@ void FastNewBlockContextStub::Generate(MacroAssembler* masm) { // Try to allocate the context in new space. Label gc; int length = slots_ + Context::MIN_CONTEXT_SLOTS; - __ AllocateInNewSpace(FixedArray::SizeFor(length), - v0, a1, a2, &gc, TAG_OBJECT); + __ Allocate(FixedArray::SizeFor(length), v0, a1, a2, &gc, TAG_OBJECT); // Load the function from the stack. __ lw(a3, MemOperand(sp, 0)); @@ -333,6 +395,7 @@ static void GenerateFastCloneShallowArrayCommon( MacroAssembler* masm, int length, FastCloneShallowArrayStub::Mode mode, + AllocationSiteMode allocation_site_mode, Label* fail) { // Registers on entry: // a3: boilerplate literal array. @@ -345,16 +408,24 @@ static void GenerateFastCloneShallowArrayCommon( ? FixedDoubleArray::SizeFor(length) : FixedArray::SizeFor(length); } - int size = JSArray::kSize + elements_size; + + int size = JSArray::kSize; + int allocation_info_start = size; + if (allocation_site_mode == TRACK_ALLOCATION_SITE) { + size += AllocationSiteInfo::kSize; + } + size += elements_size; // Allocate both the JS array and the elements array in one big // allocation. This avoids multiple limit checks. - __ AllocateInNewSpace(size, - v0, - a1, - a2, - fail, - TAG_OBJECT); + __ Allocate(size, v0, a1, a2, fail, TAG_OBJECT); + + if (allocation_site_mode == TRACK_ALLOCATION_SITE) { + __ li(a2, Operand(Handle<Map>(masm->isolate()->heap()-> + allocation_site_info_map()))); + __ sw(a2, FieldMemOperand(v0, allocation_info_start)); + __ sw(a3, FieldMemOperand(v0, allocation_info_start + kPointerSize)); + } // Copy the JS array part. for (int i = 0; i < JSArray::kSize; i += kPointerSize) { @@ -368,7 +439,11 @@ static void GenerateFastCloneShallowArrayCommon( // Get hold of the elements array of the boilerplate and setup the // elements pointer in the resulting object. __ lw(a3, FieldMemOperand(a3, JSArray::kElementsOffset)); - __ Addu(a2, v0, Operand(JSArray::kSize)); + if (allocation_site_mode == TRACK_ALLOCATION_SITE) { + __ Addu(a2, v0, Operand(JSArray::kSize + AllocationSiteInfo::kSize)); + } else { + __ Addu(a2, v0, Operand(JSArray::kSize)); + } __ sw(a2, FieldMemOperand(v0, JSArray::kElementsOffset)); // Copy the elements array. @@ -403,16 +478,18 @@ void FastCloneShallowArrayStub::Generate(MacroAssembler* masm) { __ lw(v0, FieldMemOperand(v0, HeapObject::kMapOffset)); __ LoadRoot(t1, Heap::kFixedCOWArrayMapRootIndex); __ Branch(&check_fast_elements, ne, v0, Operand(t1)); - GenerateFastCloneShallowArrayCommon(masm, 0, - COPY_ON_WRITE_ELEMENTS, &slow_case); + GenerateFastCloneShallowArrayCommon(masm, 0, COPY_ON_WRITE_ELEMENTS, + allocation_site_mode_, + &slow_case); // Return and remove the on-stack parameters. __ DropAndRet(3); __ bind(&check_fast_elements); __ LoadRoot(t1, Heap::kFixedArrayMapRootIndex); __ Branch(&double_elements, ne, v0, Operand(t1)); - GenerateFastCloneShallowArrayCommon(masm, length_, - CLONE_ELEMENTS, &slow_case); + GenerateFastCloneShallowArrayCommon(masm, length_, CLONE_ELEMENTS, + allocation_site_mode_, + &slow_case); // Return and remove the on-stack parameters. __ DropAndRet(3); @@ -443,7 +520,9 @@ void FastCloneShallowArrayStub::Generate(MacroAssembler* masm) { __ pop(a3); } - GenerateFastCloneShallowArrayCommon(masm, length_, mode, &slow_case); + GenerateFastCloneShallowArrayCommon(masm, length_, mode, + allocation_site_mode_, + &slow_case); // Return and remove the on-stack parameters. __ DropAndRet(3); @@ -453,55 +532,12 @@ void FastCloneShallowArrayStub::Generate(MacroAssembler* masm) { } -void FastCloneShallowObjectStub::Generate(MacroAssembler* masm) { - // Stack layout on entry: - // - // [sp]: object literal flags. - // [sp + kPointerSize]: constant properties. - // [sp + (2 * kPointerSize)]: literal index. - // [sp + (3 * kPointerSize)]: literals array. - - // Load boilerplate object into a3 and check if we need to create a - // boilerplate. - Label slow_case; - __ lw(a3, MemOperand(sp, 3 * kPointerSize)); - __ lw(a0, MemOperand(sp, 2 * kPointerSize)); - __ Addu(a3, a3, Operand(FixedArray::kHeaderSize - kHeapObjectTag)); - __ sll(t0, a0, kPointerSizeLog2 - kSmiTagSize); - __ Addu(a3, t0, a3); - __ lw(a3, MemOperand(a3)); - __ LoadRoot(t0, Heap::kUndefinedValueRootIndex); - __ Branch(&slow_case, eq, a3, Operand(t0)); - - // Check that the boilerplate contains only fast properties and we can - // statically determine the instance size. - int size = JSObject::kHeaderSize + length_ * kPointerSize; - __ lw(a0, FieldMemOperand(a3, HeapObject::kMapOffset)); - __ lbu(a0, FieldMemOperand(a0, Map::kInstanceSizeOffset)); - __ Branch(&slow_case, ne, a0, Operand(size >> kPointerSizeLog2)); - - // Allocate the JS object and copy header together with all in-object - // properties from the boilerplate. - __ AllocateInNewSpace(size, v0, a1, a2, &slow_case, TAG_OBJECT); - for (int i = 0; i < size; i += kPointerSize) { - __ lw(a1, FieldMemOperand(a3, i)); - __ sw(a1, FieldMemOperand(v0, i)); - } - - // Return and remove the on-stack parameters. - __ DropAndRet(4); - - __ bind(&slow_case); - __ TailCallRuntime(Runtime::kCreateObjectLiteralShallow, 4, 1); -} - - // Takes a Smi and converts to an IEEE 64 bit floating point value in two // registers. The format is 1 sign bit, 11 exponent bits (biased 1023) and // 52 fraction bits (20 in the first word, 32 in the second). Zeros is a // scratch register. Destroys the source register. No GC occurs during this // stub so you don't have to set up the frame. -class ConvertToDoubleStub : public CodeStub { +class ConvertToDoubleStub : public PlatformCodeStub { public: ConvertToDoubleStub(Register result_reg_1, Register result_reg_2, @@ -600,7 +636,7 @@ void FloatingPointHelper::LoadSmis(MacroAssembler* masm, Register scratch1, Register scratch2) { if (CpuFeatures::IsSupported(FPU)) { - CpuFeatures::Scope scope(FPU); + CpuFeatureScope scope(masm, FPU); __ sra(scratch1, a0, kSmiTagSize); __ mtc1(scratch1, f14); __ cvt_d_w(f14, f14); @@ -617,34 +653,16 @@ void FloatingPointHelper::LoadSmis(MacroAssembler* masm, __ mov(scratch1, a0); ConvertToDoubleStub stub1(a3, a2, scratch1, scratch2); __ push(ra); - __ Call(stub1.GetCode()); + __ Call(stub1.GetCode(masm->isolate())); // Write Smi from a1 to a1 and a0 in double format. __ mov(scratch1, a1); ConvertToDoubleStub stub2(a1, a0, scratch1, scratch2); - __ Call(stub2.GetCode()); + __ Call(stub2.GetCode(masm->isolate())); __ pop(ra); } } -void FloatingPointHelper::LoadOperands( - MacroAssembler* masm, - FloatingPointHelper::Destination destination, - Register heap_number_map, - Register scratch1, - Register scratch2, - Label* slow) { - - // Load right operand (a0) to f12 or a2/a3. - LoadNumber(masm, destination, - a0, f14, a2, a3, heap_number_map, scratch1, scratch2, slow); - - // Load left operand (a1) to f14 or a0/a1. - LoadNumber(masm, destination, - a1, f12, a0, a1, heap_number_map, scratch1, scratch2, slow); -} - - void FloatingPointHelper::LoadNumber(MacroAssembler* masm, Destination destination, Register object, @@ -669,7 +687,7 @@ void FloatingPointHelper::LoadNumber(MacroAssembler* masm, // Handle loading a double from a heap number. if (CpuFeatures::IsSupported(FPU) && destination == kFPURegisters) { - CpuFeatures::Scope scope(FPU); + CpuFeatureScope scope(masm, FPU); // Load the double from tagged HeapNumber to double register. // ARM uses a workaround here because of the unaligned HeapNumber @@ -688,7 +706,7 @@ void FloatingPointHelper::LoadNumber(MacroAssembler* masm, // Handle loading a double from a smi. __ bind(&is_smi); if (CpuFeatures::IsSupported(FPU)) { - CpuFeatures::Scope scope(FPU); + CpuFeatureScope scope(masm, FPU); // Convert smi to double using FPU instructions. __ mtc1(scratch1, dst); __ cvt_d_w(dst, dst); @@ -702,7 +720,7 @@ void FloatingPointHelper::LoadNumber(MacroAssembler* masm, __ mov(scratch1, object); ConvertToDoubleStub stub(dst2, dst1, scratch1, scratch2); __ push(ra); - __ Call(stub.GetCode()); + __ Call(stub.GetCode(masm->isolate())); __ pop(ra); } @@ -753,79 +771,80 @@ void FloatingPointHelper::ConvertIntToDouble(MacroAssembler* masm, Register int_scratch, Destination destination, FPURegister double_dst, - Register dst1, - Register dst2, + Register dst_mantissa, + Register dst_exponent, Register scratch2, FPURegister single_scratch) { ASSERT(!int_scratch.is(scratch2)); - ASSERT(!int_scratch.is(dst1)); - ASSERT(!int_scratch.is(dst2)); + ASSERT(!int_scratch.is(dst_mantissa)); + ASSERT(!int_scratch.is(dst_exponent)); Label done; if (CpuFeatures::IsSupported(FPU)) { - CpuFeatures::Scope scope(FPU); + CpuFeatureScope scope(masm, FPU); __ mtc1(int_scratch, single_scratch); __ cvt_d_w(double_dst, single_scratch); if (destination == kCoreRegisters) { - __ Move(dst1, dst2, double_dst); + __ Move(dst_mantissa, dst_exponent, double_dst); } } else { Label fewer_than_20_useful_bits; // Expected output: - // | dst2 | dst1 | + // | dst_exponent | dst_mantissa | // | s | exp | mantissa | // Check for zero. - __ mov(dst2, int_scratch); - __ mov(dst1, int_scratch); + __ mov(dst_exponent, int_scratch); + __ mov(dst_mantissa, int_scratch); __ Branch(&done, eq, int_scratch, Operand(zero_reg)); // Preload the sign of the value. - __ And(dst2, int_scratch, Operand(HeapNumber::kSignMask)); + __ And(dst_exponent, int_scratch, Operand(HeapNumber::kSignMask)); // Get the absolute value of the object (as an unsigned integer). Label skip_sub; - __ Branch(&skip_sub, ge, dst2, Operand(zero_reg)); + __ Branch(&skip_sub, ge, dst_exponent, Operand(zero_reg)); __ Subu(int_scratch, zero_reg, int_scratch); __ bind(&skip_sub); // Get mantissa[51:20]. // Get the position of the first set bit. - __ Clz(dst1, int_scratch); + __ Clz(dst_mantissa, int_scratch); __ li(scratch2, 31); - __ Subu(dst1, scratch2, dst1); + __ Subu(dst_mantissa, scratch2, dst_mantissa); // Set the exponent. - __ Addu(scratch2, dst1, Operand(HeapNumber::kExponentBias)); - __ Ins(dst2, scratch2, + __ Addu(scratch2, dst_mantissa, Operand(HeapNumber::kExponentBias)); + __ Ins(dst_exponent, scratch2, HeapNumber::kExponentShift, HeapNumber::kExponentBits); // Clear the first non null bit. __ li(scratch2, Operand(1)); - __ sllv(scratch2, scratch2, dst1); + __ sllv(scratch2, scratch2, dst_mantissa); __ li(at, -1); __ Xor(scratch2, scratch2, at); __ And(int_scratch, int_scratch, scratch2); // Get the number of bits to set in the lower part of the mantissa. - __ Subu(scratch2, dst1, Operand(HeapNumber::kMantissaBitsInTopWord)); + __ Subu(scratch2, dst_mantissa, + Operand(HeapNumber::kMantissaBitsInTopWord)); __ Branch(&fewer_than_20_useful_bits, lt, scratch2, Operand(zero_reg)); // Set the higher 20 bits of the mantissa. __ srlv(at, int_scratch, scratch2); - __ or_(dst2, dst2, at); + __ or_(dst_exponent, dst_exponent, at); __ li(at, 32); __ subu(scratch2, at, scratch2); - __ sllv(dst1, int_scratch, scratch2); + __ sllv(dst_mantissa, int_scratch, scratch2); __ Branch(&done); __ bind(&fewer_than_20_useful_bits); __ li(at, HeapNumber::kMantissaBitsInTopWord); - __ subu(scratch2, at, dst1); + __ subu(scratch2, at, dst_mantissa); __ sllv(scratch2, int_scratch, scratch2); - __ Or(dst2, dst2, scratch2); - // Set dst1 to 0. - __ mov(dst1, zero_reg); + __ Or(dst_exponent, dst_exponent, scratch2); + // Set dst_mantissa to 0. + __ mov(dst_mantissa, zero_reg); } __ bind(&done); } @@ -835,8 +854,9 @@ void FloatingPointHelper::LoadNumberAsInt32Double(MacroAssembler* masm, Register object, Destination destination, DoubleRegister double_dst, - Register dst1, - Register dst2, + DoubleRegister double_scratch, + Register dst_mantissa, + Register dst_exponent, Register heap_number_map, Register scratch1, Register scratch2, @@ -852,8 +872,8 @@ void FloatingPointHelper::LoadNumberAsInt32Double(MacroAssembler* masm, __ JumpIfNotSmi(object, &obj_is_not_smi); __ SmiUntag(scratch1, object); - ConvertIntToDouble(masm, scratch1, destination, double_dst, dst1, dst2, - scratch2, single_scratch); + ConvertIntToDouble(masm, scratch1, destination, double_dst, dst_mantissa, + dst_exponent, scratch2, single_scratch); __ Branch(&done); __ bind(&obj_is_not_smi); @@ -864,15 +884,16 @@ void FloatingPointHelper::LoadNumberAsInt32Double(MacroAssembler* masm, // Load the number. if (CpuFeatures::IsSupported(FPU)) { - CpuFeatures::Scope scope(FPU); + CpuFeatureScope scope(masm, FPU); // Load the double value. __ ldc1(double_dst, FieldMemOperand(object, HeapNumber::kValueOffset)); Register except_flag = scratch2; __ EmitFPUTruncate(kRoundToZero, - single_scratch, - double_dst, scratch1, + double_dst, + at, + double_scratch, except_flag, kCheckForInexactConversion); @@ -880,27 +901,51 @@ void FloatingPointHelper::LoadNumberAsInt32Double(MacroAssembler* masm, __ Branch(not_int32, ne, except_flag, Operand(zero_reg)); if (destination == kCoreRegisters) { - __ Move(dst1, dst2, double_dst); + __ Move(dst_mantissa, dst_exponent, double_dst); } } else { ASSERT(!scratch1.is(object) && !scratch2.is(object)); // Load the double value in the destination registers. - __ lw(dst2, FieldMemOperand(object, HeapNumber::kExponentOffset)); - __ lw(dst1, FieldMemOperand(object, HeapNumber::kMantissaOffset)); + bool save_registers = object.is(dst_mantissa) || object.is(dst_exponent); + if (save_registers) { + // Save both output registers, because the other one probably holds + // an important value too. + __ Push(dst_exponent, dst_mantissa); + } + __ lw(dst_exponent, FieldMemOperand(object, HeapNumber::kExponentOffset)); + __ lw(dst_mantissa, FieldMemOperand(object, HeapNumber::kMantissaOffset)); // Check for 0 and -0. - __ And(scratch1, dst1, Operand(~HeapNumber::kSignMask)); - __ Or(scratch1, scratch1, Operand(dst2)); - __ Branch(&done, eq, scratch1, Operand(zero_reg)); + Label zero; + __ And(scratch1, dst_exponent, Operand(~HeapNumber::kSignMask)); + __ Or(scratch1, scratch1, Operand(dst_mantissa)); + __ Branch(&zero, eq, scratch1, Operand(zero_reg)); // Check that the value can be exactly represented by a 32-bit integer. // Jump to not_int32 if that's not the case. - DoubleIs32BitInteger(masm, dst1, dst2, scratch1, scratch2, not_int32); + Label restore_input_and_miss; + DoubleIs32BitInteger(masm, dst_exponent, dst_mantissa, scratch1, scratch2, + &restore_input_and_miss); - // dst1 and dst2 were trashed. Reload the double value. - __ lw(dst2, FieldMemOperand(object, HeapNumber::kExponentOffset)); - __ lw(dst1, FieldMemOperand(object, HeapNumber::kMantissaOffset)); + // dst_* were trashed. Reload the double value. + if (save_registers) { + __ Pop(dst_exponent, dst_mantissa); + } + __ lw(dst_exponent, FieldMemOperand(object, HeapNumber::kExponentOffset)); + __ lw(dst_mantissa, FieldMemOperand(object, HeapNumber::kMantissaOffset)); + __ Branch(&done); + + __ bind(&restore_input_and_miss); + if (save_registers) { + __ Pop(dst_exponent, dst_mantissa); + } + __ Branch(not_int32); + + __ bind(&zero); + if (save_registers) { + __ Drop(2); + } } __ bind(&done); @@ -914,7 +959,8 @@ void FloatingPointHelper::LoadNumberAsInt32(MacroAssembler* masm, Register scratch1, Register scratch2, Register scratch3, - DoubleRegister double_scratch, + DoubleRegister double_scratch0, + DoubleRegister double_scratch1, Label* not_int32) { ASSERT(!dst.is(object)); ASSERT(!scratch1.is(object) && !scratch2.is(object) && !scratch3.is(object)); @@ -922,36 +968,34 @@ void FloatingPointHelper::LoadNumberAsInt32(MacroAssembler* masm, !scratch1.is(scratch3) && !scratch2.is(scratch3)); - Label done; + Label done, maybe_undefined; __ UntagAndJumpIfSmi(dst, object, &done); __ AssertRootValue(heap_number_map, Heap::kHeapNumberMapRootIndex, "HeapNumberMap register clobbered."); - __ JumpIfNotHeapNumber(object, heap_number_map, scratch1, not_int32); + + __ JumpIfNotHeapNumber(object, heap_number_map, scratch1, &maybe_undefined); // Object is a heap number. // Convert the floating point value to a 32-bit integer. if (CpuFeatures::IsSupported(FPU)) { - CpuFeatures::Scope scope(FPU); + CpuFeatureScope scope(masm, FPU); // Load the double value. - __ ldc1(double_scratch, FieldMemOperand(object, HeapNumber::kValueOffset)); + __ ldc1(double_scratch0, FieldMemOperand(object, HeapNumber::kValueOffset)); - FPURegister single_scratch = double_scratch.low(); Register except_flag = scratch2; __ EmitFPUTruncate(kRoundToZero, - single_scratch, - double_scratch, + dst, + double_scratch0, scratch1, + double_scratch1, except_flag, kCheckForInexactConversion); // Jump to not_int32 if the operation did not succeed. __ Branch(not_int32, ne, except_flag, Operand(zero_reg)); - // Get the result in the destination register. - __ mfc1(dst, single_scratch); - } else { // Load the double value in the destination registers. __ lw(scratch2, FieldMemOperand(object, HeapNumber::kExponentOffset)); @@ -983,20 +1027,28 @@ void FloatingPointHelper::LoadNumberAsInt32(MacroAssembler* masm, __ Subu(dst, zero_reg, dst); __ bind(&skip_sub); } + __ Branch(&done); + + __ bind(&maybe_undefined); + __ LoadRoot(at, Heap::kUndefinedValueRootIndex); + __ Branch(not_int32, ne, object, Operand(at)); + // |undefined| is truncated to 0. + __ li(dst, Operand(Smi::FromInt(0))); + // Fall through. __ bind(&done); } void FloatingPointHelper::DoubleIs32BitInteger(MacroAssembler* masm, - Register src1, - Register src2, + Register src_exponent, + Register src_mantissa, Register dst, Register scratch, Label* not_int32) { // Get exponent alone in scratch. __ Ext(scratch, - src1, + src_exponent, HeapNumber::kExponentShift, HeapNumber::kExponentBits); @@ -1016,11 +1068,11 @@ void FloatingPointHelper::DoubleIs32BitInteger(MacroAssembler* masm, // Another way to put it is that if (exponent - signbit) > 30 then the // number cannot be represented as an int32. Register tmp = dst; - __ srl(at, src1, 31); + __ srl(at, src_exponent, 31); __ subu(tmp, scratch, at); __ Branch(not_int32, gt, tmp, Operand(30)); // - Bits [21:0] in the mantissa are not null. - __ And(tmp, src2, 0x3fffff); + __ And(tmp, src_mantissa, 0x3fffff); __ Branch(not_int32, ne, tmp, Operand(zero_reg)); // Otherwise the exponent needs to be big enough to shift left all the @@ -1031,20 +1083,20 @@ void FloatingPointHelper::DoubleIs32BitInteger(MacroAssembler* masm, // Get the 32 higher bits of the mantissa in dst. __ Ext(dst, - src2, + src_mantissa, HeapNumber::kMantissaBitsInTopWord, 32 - HeapNumber::kMantissaBitsInTopWord); - __ sll(at, src1, HeapNumber::kNonMantissaBitsInTopWord); + __ sll(at, src_exponent, HeapNumber::kNonMantissaBitsInTopWord); __ or_(dst, dst, at); // Create the mask and test the lower bits (of the higher bits). __ li(at, 32); __ subu(scratch, at, scratch); - __ li(src2, 1); - __ sllv(src1, src2, scratch); - __ Subu(src1, src1, Operand(1)); - __ And(src1, dst, src1); - __ Branch(not_int32, ne, src1, Operand(zero_reg)); + __ li(src_mantissa, 1); + __ sllv(src_exponent, src_mantissa, scratch); + __ Subu(src_exponent, src_exponent, Operand(1)); + __ And(src_exponent, dst, src_exponent); + __ Branch(not_int32, ne, src_exponent, Operand(zero_reg)); } @@ -1067,7 +1119,7 @@ void FloatingPointHelper::CallCCodeForDoubleOperation( __ push(ra); __ PrepareCallCFunction(4, scratch); // Two doubles are 4 arguments. if (!IsMipsSoftFloatABI) { - CpuFeatures::Scope scope(FPU); + CpuFeatureScope scope(masm, FPU); // We are not using MIPS FPU instructions, and parameters for the runtime // function call are prepaired in a0-a3 registers, but function we are // calling is compiled with hard-float flag and expecting hard float ABI @@ -1083,7 +1135,7 @@ void FloatingPointHelper::CallCCodeForDoubleOperation( } // Store answer in the overwritable heap number. if (!IsMipsSoftFloatABI) { - CpuFeatures::Scope scope(FPU); + CpuFeatureScope scope(masm, FPU); // Double returned in register f0. __ sdc1(f0, FieldMemOperand(heap_number_result, HeapNumber::kValueOffset)); } else { @@ -1119,11 +1171,12 @@ bool WriteInt32ToHeapNumberStub::IsPregenerated() { } -void WriteInt32ToHeapNumberStub::GenerateFixedRegStubsAheadOfTime() { +void WriteInt32ToHeapNumberStub::GenerateFixedRegStubsAheadOfTime( + Isolate* isolate) { WriteInt32ToHeapNumberStub stub1(a1, v0, a2, a3); WriteInt32ToHeapNumberStub stub2(a2, v0, a3, a0); - stub1.GetCode()->set_is_pregenerated(true); - stub2.GetCode()->set_is_pregenerated(true); + stub1.GetCode(isolate)->set_is_pregenerated(true); + stub2.GetCode(isolate)->set_is_pregenerated(true); } @@ -1183,48 +1236,43 @@ void WriteInt32ToHeapNumberStub::Generate(MacroAssembler* masm) { // for "identity and not NaN". static void EmitIdenticalObjectComparison(MacroAssembler* masm, Label* slow, - Condition cc, - bool never_nan_nan) { + Condition cc) { Label not_identical; Label heap_number, return_equal; Register exp_mask_reg = t5; __ Branch(¬_identical, ne, a0, Operand(a1)); - // The two objects are identical. If we know that one of them isn't NaN then - // we now know they test equal. - if (cc != eq || !never_nan_nan) { - __ li(exp_mask_reg, Operand(HeapNumber::kExponentMask)); - - // Test for NaN. Sadly, we can't just compare to factory->nan_value(), - // so we do the second best thing - test it ourselves. - // They are both equal and they are not both Smis so both of them are not - // Smis. If it's not a heap number, then return equal. - if (cc == less || cc == greater) { - __ GetObjectType(a0, t4, t4); - __ Branch(slow, greater, t4, Operand(FIRST_SPEC_OBJECT_TYPE)); - } else { - __ GetObjectType(a0, t4, t4); - __ Branch(&heap_number, eq, t4, Operand(HEAP_NUMBER_TYPE)); - // Comparing JS objects with <=, >= is complicated. - if (cc != eq) { - __ Branch(slow, greater, t4, Operand(FIRST_SPEC_OBJECT_TYPE)); - // Normally here we fall through to return_equal, but undefined is - // special: (undefined == undefined) == true, but - // (undefined <= undefined) == false! See ECMAScript 11.8.5. - if (cc == less_equal || cc == greater_equal) { - __ Branch(&return_equal, ne, t4, Operand(ODDBALL_TYPE)); - __ LoadRoot(t2, Heap::kUndefinedValueRootIndex); - __ Branch(&return_equal, ne, a0, Operand(t2)); - if (cc == le) { - // undefined <= undefined should fail. - __ li(v0, Operand(GREATER)); - } else { - // undefined >= undefined should fail. - __ li(v0, Operand(LESS)); - } - __ Ret(); + __ li(exp_mask_reg, Operand(HeapNumber::kExponentMask)); + + // Test for NaN. Sadly, we can't just compare to factory->nan_value(), + // so we do the second best thing - test it ourselves. + // They are both equal and they are not both Smis so both of them are not + // Smis. If it's not a heap number, then return equal. + if (cc == less || cc == greater) { + __ GetObjectType(a0, t4, t4); + __ Branch(slow, greater, t4, Operand(FIRST_SPEC_OBJECT_TYPE)); + } else { + __ GetObjectType(a0, t4, t4); + __ Branch(&heap_number, eq, t4, Operand(HEAP_NUMBER_TYPE)); + // Comparing JS objects with <=, >= is complicated. + if (cc != eq) { + __ Branch(slow, greater, t4, Operand(FIRST_SPEC_OBJECT_TYPE)); + // Normally here we fall through to return_equal, but undefined is + // special: (undefined == undefined) == true, but + // (undefined <= undefined) == false! See ECMAScript 11.8.5. + if (cc == less_equal || cc == greater_equal) { + __ Branch(&return_equal, ne, t4, Operand(ODDBALL_TYPE)); + __ LoadRoot(t2, Heap::kUndefinedValueRootIndex); + __ Branch(&return_equal, ne, a0, Operand(t2)); + if (cc == le) { + // undefined <= undefined should fail. + __ li(v0, Operand(GREATER)); + } else { + // undefined >= undefined should fail. + __ li(v0, Operand(LESS)); } + __ Ret(); } } } @@ -1240,46 +1288,44 @@ static void EmitIdenticalObjectComparison(MacroAssembler* masm, } __ Ret(); - if (cc != eq || !never_nan_nan) { - // For less and greater we don't have to check for NaN since the result of - // x < x is false regardless. For the others here is some code to check - // for NaN. - if (cc != lt && cc != gt) { - __ bind(&heap_number); - // It is a heap number, so return non-equal if it's NaN and equal if it's - // not NaN. - - // The representation of NaN values has all exponent bits (52..62) set, - // and not all mantissa bits (0..51) clear. - // Read top bits of double representation (second word of value). - __ lw(t2, FieldMemOperand(a0, HeapNumber::kExponentOffset)); - // Test that exponent bits are all set. - __ And(t3, t2, Operand(exp_mask_reg)); - // If all bits not set (ne cond), then not a NaN, objects are equal. - __ Branch(&return_equal, ne, t3, Operand(exp_mask_reg)); - - // Shift out flag and all exponent bits, retaining only mantissa. - __ sll(t2, t2, HeapNumber::kNonMantissaBitsInTopWord); - // Or with all low-bits of mantissa. - __ lw(t3, FieldMemOperand(a0, HeapNumber::kMantissaOffset)); - __ Or(v0, t3, Operand(t2)); - // For equal we already have the right value in v0: Return zero (equal) - // if all bits in mantissa are zero (it's an Infinity) and non-zero if - // not (it's a NaN). For <= and >= we need to load v0 with the failing - // value if it's a NaN. - if (cc != eq) { - // All-zero means Infinity means equal. - __ Ret(eq, v0, Operand(zero_reg)); - if (cc == le) { - __ li(v0, Operand(GREATER)); // NaN <= NaN should fail. - } else { - __ li(v0, Operand(LESS)); // NaN >= NaN should fail. - } + // For less and greater we don't have to check for NaN since the result of + // x < x is false regardless. For the others here is some code to check + // for NaN. + if (cc != lt && cc != gt) { + __ bind(&heap_number); + // It is a heap number, so return non-equal if it's NaN and equal if it's + // not NaN. + + // The representation of NaN values has all exponent bits (52..62) set, + // and not all mantissa bits (0..51) clear. + // Read top bits of double representation (second word of value). + __ lw(t2, FieldMemOperand(a0, HeapNumber::kExponentOffset)); + // Test that exponent bits are all set. + __ And(t3, t2, Operand(exp_mask_reg)); + // If all bits not set (ne cond), then not a NaN, objects are equal. + __ Branch(&return_equal, ne, t3, Operand(exp_mask_reg)); + + // Shift out flag and all exponent bits, retaining only mantissa. + __ sll(t2, t2, HeapNumber::kNonMantissaBitsInTopWord); + // Or with all low-bits of mantissa. + __ lw(t3, FieldMemOperand(a0, HeapNumber::kMantissaOffset)); + __ Or(v0, t3, Operand(t2)); + // For equal we already have the right value in v0: Return zero (equal) + // if all bits in mantissa are zero (it's an Infinity) and non-zero if + // not (it's a NaN). For <= and >= we need to load v0 with the failing + // value if it's a NaN. + if (cc != eq) { + // All-zero means Infinity means equal. + __ Ret(eq, v0, Operand(zero_reg)); + if (cc == le) { + __ li(v0, Operand(GREATER)); // NaN <= NaN should fail. + } else { + __ li(v0, Operand(LESS)); // NaN >= NaN should fail. } - __ Ret(); } - // No fall through here. + __ Ret(); } + // No fall through here. __ bind(¬_identical); } @@ -1313,7 +1359,7 @@ static void EmitSmiNonsmiComparison(MacroAssembler* masm, // Rhs is a smi, lhs is a number. // Convert smi rhs to double. if (CpuFeatures::IsSupported(FPU)) { - CpuFeatures::Scope scope(FPU); + CpuFeatureScope scope(masm, FPU); __ sra(at, rhs, kSmiTagSize); __ mtc1(at, f14); __ cvt_d_w(f14, f14); @@ -1327,7 +1373,7 @@ static void EmitSmiNonsmiComparison(MacroAssembler* masm, __ mov(t6, rhs); ConvertToDoubleStub stub1(a1, a0, t6, t5); __ push(ra); - __ Call(stub1.GetCode()); + __ Call(stub1.GetCode(masm->isolate())); __ pop(ra); } @@ -1352,7 +1398,7 @@ static void EmitSmiNonsmiComparison(MacroAssembler* masm, // Lhs is a smi, rhs is a number. // Convert smi lhs to double. if (CpuFeatures::IsSupported(FPU)) { - CpuFeatures::Scope scope(FPU); + CpuFeatureScope scope(masm, FPU); __ sra(at, lhs, kSmiTagSize); __ mtc1(at, f12); __ cvt_d_w(f12, f12); @@ -1362,7 +1408,7 @@ static void EmitSmiNonsmiComparison(MacroAssembler* masm, __ mov(t6, lhs); ConvertToDoubleStub stub2(a3, a2, t6, t5); __ push(ra); - __ Call(stub2.GetCode()); + __ Call(stub2.GetCode(masm->isolate())); __ pop(ra); // Load rhs to a double in a1, a0. if (rhs.is(a0)) { @@ -1380,7 +1426,7 @@ static void EmitSmiNonsmiComparison(MacroAssembler* masm, void EmitNanCheck(MacroAssembler* masm, Condition cc) { bool exp_first = (HeapNumber::kExponentOffset == HeapNumber::kValueOffset); if (CpuFeatures::IsSupported(FPU)) { - CpuFeatures::Scope scope(FPU); + CpuFeatureScope scope(masm, FPU); // Lhs and rhs are already loaded to f12 and f14 register pairs. __ Move(t0, t1, f14); __ Move(t2, t3, f12); @@ -1447,7 +1493,7 @@ static void EmitTwoNonNanDoubleComparison(MacroAssembler* masm, Condition cc) { // Exception: 0 and -0. bool exp_first = (HeapNumber::kExponentOffset == HeapNumber::kValueOffset); if (CpuFeatures::IsSupported(FPU)) { - CpuFeatures::Scope scope(FPU); + CpuFeatureScope scope(masm, FPU); // Lhs and rhs are already loaded to f12 and f14 register pairs. __ Move(t0, t1, f14); __ Move(t2, t3, f12); @@ -1503,7 +1549,7 @@ static void EmitTwoNonNanDoubleComparison(MacroAssembler* masm, Condition cc) { __ pop(ra); // Because this function returns int, result is in v0. __ Ret(); } else { - CpuFeatures::Scope scope(FPU); + CpuFeatureScope scope(masm, FPU); Label equal, less_than; __ BranchF(&equal, NULL, eq, f12, f14); __ BranchF(&less_than, NULL, lt, f12, f14); @@ -1553,12 +1599,13 @@ static void EmitStrictTwoHeapObjectCompare(MacroAssembler* masm, // Check for oddballs: true, false, null, undefined. __ Branch(&return_not_equal, eq, a3, Operand(ODDBALL_TYPE)); - // Now that we have the types we might as well check for symbol-symbol. - // Ensure that no non-strings have the symbol bit set. - STATIC_ASSERT(LAST_TYPE < kNotStringTag + kIsSymbolMask); - STATIC_ASSERT(kSymbolTag != 0); + // Now that we have the types we might as well check for + // internalized-internalized. + // Ensure that no non-strings have the internalized bit set. + STATIC_ASSERT(LAST_TYPE < kNotStringTag + kIsInternalizedMask); + STATIC_ASSERT(kInternalizedTag != 0); __ And(t2, a2, Operand(a3)); - __ And(t0, t2, Operand(kIsSymbolMask)); + __ And(t0, t2, Operand(kIsInternalizedMask)); __ Branch(&return_not_equal, ne, t0, Operand(zero_reg)); } @@ -1578,7 +1625,7 @@ static void EmitCheckForTwoHeapNumbers(MacroAssembler* masm, // Both are heap numbers. Load them up then jump to the code we have // for that. if (CpuFeatures::IsSupported(FPU)) { - CpuFeatures::Scope scope(FPU); + CpuFeatureScope scope(masm, FPU); __ ldc1(f12, FieldMemOperand(lhs, HeapNumber::kValueOffset)); __ ldc1(f14, FieldMemOperand(rhs, HeapNumber::kValueOffset)); } else { @@ -1596,30 +1643,30 @@ static void EmitCheckForTwoHeapNumbers(MacroAssembler* masm, } -// Fast negative check for symbol-to-symbol equality. -static void EmitCheckForSymbolsOrObjects(MacroAssembler* masm, - Register lhs, - Register rhs, - Label* possible_strings, - Label* not_both_strings) { +// Fast negative check for internalized-to-internalized equality. +static void EmitCheckForInternalizedStringsOrObjects(MacroAssembler* masm, + Register lhs, + Register rhs, + Label* possible_strings, + Label* not_both_strings) { ASSERT((lhs.is(a0) && rhs.is(a1)) || (lhs.is(a1) && rhs.is(a0))); // a2 is object type of lhs. - // Ensure that no non-strings have the symbol bit set. + // Ensure that no non-strings have the internalized bit set. Label object_test; - STATIC_ASSERT(kSymbolTag != 0); + STATIC_ASSERT(kInternalizedTag != 0); __ And(at, a2, Operand(kIsNotStringMask)); __ Branch(&object_test, ne, at, Operand(zero_reg)); - __ And(at, a2, Operand(kIsSymbolMask)); + __ And(at, a2, Operand(kIsInternalizedMask)); __ Branch(possible_strings, eq, at, Operand(zero_reg)); __ GetObjectType(rhs, a3, a3); __ Branch(not_both_strings, ge, a3, Operand(FIRST_NONSTRING_TYPE)); - __ And(at, a3, Operand(kIsSymbolMask)); + __ And(at, a3, Operand(kIsInternalizedMask)); __ Branch(possible_strings, eq, at, Operand(zero_reg)); - // Both are symbols. We already checked they weren't the same pointer - // so they are not equal. + // Both are internalized strings. We already checked they weren't the same + // pointer so they are not equal. __ Ret(USE_DELAY_SLOT); __ li(v0, Operand(1)); // Non-zero indicates not equal. @@ -1673,7 +1720,7 @@ void NumberToStringStub::GenerateLookupNumberStringCache(MacroAssembler* masm, if (!object_is_smi) { __ JumpIfSmi(object, &is_smi); if (CpuFeatures::IsSupported(FPU)) { - CpuFeatures::Scope scope(FPU); + CpuFeatureScope scope(masm, FPU); __ CheckMap(object, scratch1, Heap::kHeapNumberMapRootIndex, @@ -1752,43 +1799,61 @@ void NumberToStringStub::Generate(MacroAssembler* masm) { } -// On entry lhs_ (lhs) and rhs_ (rhs) are the things to be compared. -// On exit, v0 is 0, positive, or negative (smi) to indicate the result -// of the comparison. -void CompareStub::Generate(MacroAssembler* masm) { - Label slow; // Call builtin. - Label not_smis, both_loaded_as_doubles; +static void ICCompareStub_CheckInputType(MacroAssembler* masm, + Register input, + Register scratch, + CompareIC::State expected, + Label* fail) { + Label ok; + if (expected == CompareIC::SMI) { + __ JumpIfNotSmi(input, fail); + } else if (expected == CompareIC::NUMBER) { + __ JumpIfSmi(input, &ok); + __ CheckMap(input, scratch, Heap::kHeapNumberMapRootIndex, fail, + DONT_DO_SMI_CHECK); + } + // We could be strict about internalized/string here, but as long as + // hydrogen doesn't care, the stub doesn't have to care either. + __ bind(&ok); +} - if (include_smi_compare_) { - Label not_two_smis, smi_done; - __ Or(a2, a1, a0); - __ JumpIfNotSmi(a2, ¬_two_smis); - __ sra(a1, a1, 1); - __ sra(a0, a0, 1); - __ Ret(USE_DELAY_SLOT); - __ subu(v0, a1, a0); - __ bind(¬_two_smis); - } else if (FLAG_debug_code) { - __ Or(a2, a1, a0); - __ And(a2, a2, kSmiTagMask); - __ Assert(ne, "CompareStub: unexpected smi operands.", - a2, Operand(zero_reg)); - } +// On entry a1 and a2 are the values to be compared. +// On exit a0 is 0, positive or negative to indicate the result of +// the comparison. +void ICCompareStub::GenerateGeneric(MacroAssembler* masm) { + Register lhs = a1; + Register rhs = a0; + Condition cc = GetCondition(); + Label miss; + ICCompareStub_CheckInputType(masm, lhs, a2, left_, &miss); + ICCompareStub_CheckInputType(masm, rhs, a3, right_, &miss); + + Label slow; // Call builtin. + Label not_smis, both_loaded_as_doubles; + + Label not_two_smis, smi_done; + __ Or(a2, a1, a0); + __ JumpIfNotSmi(a2, ¬_two_smis); + __ sra(a1, a1, 1); + __ sra(a0, a0, 1); + __ Ret(USE_DELAY_SLOT); + __ subu(v0, a1, a0); + __ bind(¬_two_smis); // NOTICE! This code is only reached after a smi-fast-case check, so // it is certain that at least one operand isn't a smi. // Handle the case where the objects are identical. Either returns the answer // or goes to slow. Only falls through if the objects were not identical. - EmitIdenticalObjectComparison(masm, &slow, cc_, never_nan_nan_); + EmitIdenticalObjectComparison(masm, &slow, cc); // If either is a Smi (we know that not both are), then they can only // be strictly equal if the other is a HeapNumber. STATIC_ASSERT(kSmiTag == 0); ASSERT_EQ(0, Smi::FromInt(0)); - __ And(t2, lhs_, Operand(rhs_)); + __ And(t2, lhs, Operand(rhs)); __ JumpIfNotSmi(t2, ¬_smis, t0); // One operand is a smi. EmitSmiNonsmiComparison generates code that can: // 1) Return the answer. @@ -1798,8 +1863,8 @@ void CompareStub::Generate(MacroAssembler* masm) { // In cases 3 and 4 we have found out we were dealing with a number-number // comparison and the numbers have been loaded into f12 and f14 as doubles, // or in GP registers (a0, a1, a2, a3) depending on the presence of the FPU. - EmitSmiNonsmiComparison(masm, lhs_, rhs_, - &both_loaded_as_doubles, &slow, strict_); + EmitSmiNonsmiComparison(masm, lhs, rhs, + &both_loaded_as_doubles, &slow, strict()); __ bind(&both_loaded_as_doubles); // f12, f14 are the double representations of the left hand side @@ -1808,7 +1873,7 @@ void CompareStub::Generate(MacroAssembler* masm) { Isolate* isolate = masm->isolate(); if (CpuFeatures::IsSupported(FPU)) { - CpuFeatures::Scope scope(FPU); + CpuFeatureScope scope(masm, FPU); Label nan; __ li(t0, Operand(LESS)); __ li(t1, Operand(GREATER)); @@ -1835,7 +1900,7 @@ void CompareStub::Generate(MacroAssembler* masm) { __ bind(&nan); // NaN comparisons always fail. // Load whatever we need in v0 to make the comparison fail. - if (cc_ == lt || cc_ == le) { + if (cc == lt || cc == le) { __ li(v0, Operand(GREATER)); } else { __ li(v0, Operand(LESS)); @@ -1844,61 +1909,64 @@ void CompareStub::Generate(MacroAssembler* masm) { } else { // Checks for NaN in the doubles we have loaded. Can return the answer or // fall through if neither is a NaN. Also binds rhs_not_nan. - EmitNanCheck(masm, cc_); + EmitNanCheck(masm, cc); // Compares two doubles that are not NaNs. Returns the answer. // Never falls through. - EmitTwoNonNanDoubleComparison(masm, cc_); + EmitTwoNonNanDoubleComparison(masm, cc); } __ bind(¬_smis); // At this point we know we are dealing with two different objects, // and neither of them is a Smi. The objects are in lhs_ and rhs_. - if (strict_) { + if (strict()) { // This returns non-equal for some object types, or falls through if it // was not lucky. - EmitStrictTwoHeapObjectCompare(masm, lhs_, rhs_); + EmitStrictTwoHeapObjectCompare(masm, lhs, rhs); } - Label check_for_symbols; + Label check_for_internalized_strings; Label flat_string_check; // Check for heap-number-heap-number comparison. Can jump to slow case, // or load both doubles and jump to the code that handles - // that case. If the inputs are not doubles then jumps to check_for_symbols. + // that case. If the inputs are not doubles then jumps to + // check_for_internalized_strings. // In this case a2 will contain the type of lhs_. EmitCheckForTwoHeapNumbers(masm, - lhs_, - rhs_, + lhs, + rhs, &both_loaded_as_doubles, - &check_for_symbols, + &check_for_internalized_strings, &flat_string_check); - __ bind(&check_for_symbols); - if (cc_ == eq && !strict_) { - // Returns an answer for two symbols or two detectable objects. + __ bind(&check_for_internalized_strings); + if (cc == eq && !strict()) { + // Returns an answer for two internalized strings or two + // detectable objects. // Otherwise jumps to string case or not both strings case. // Assumes that a2 is the type of lhs_ on entry. - EmitCheckForSymbolsOrObjects(masm, lhs_, rhs_, &flat_string_check, &slow); + EmitCheckForInternalizedStringsOrObjects( + masm, lhs, rhs, &flat_string_check, &slow); } // Check for both being sequential ASCII strings, and inline if that is the // case. __ bind(&flat_string_check); - __ JumpIfNonSmisNotBothSequentialAsciiStrings(lhs_, rhs_, a2, a3, &slow); + __ JumpIfNonSmisNotBothSequentialAsciiStrings(lhs, rhs, a2, a3, &slow); __ IncrementCounter(isolate->counters()->string_compare_native(), 1, a2, a3); - if (cc_ == eq) { + if (cc == eq) { StringCompareStub::GenerateFlatAsciiStringEquals(masm, - lhs_, - rhs_, + lhs, + rhs, a2, a3, t0); } else { StringCompareStub::GenerateCompareFlatAsciiStrings(masm, - lhs_, - rhs_, + lhs, + rhs, a2, a3, t0, @@ -1909,18 +1977,18 @@ void CompareStub::Generate(MacroAssembler* masm) { __ bind(&slow); // Prepare for call to builtin. Push object pointers, a0 (lhs) first, // a1 (rhs) second. - __ Push(lhs_, rhs_); + __ Push(lhs, rhs); // Figure out which native to call and setup the arguments. Builtins::JavaScript native; - if (cc_ == eq) { - native = strict_ ? Builtins::STRICT_EQUALS : Builtins::EQUALS; + if (cc == eq) { + native = strict() ? Builtins::STRICT_EQUALS : Builtins::EQUALS; } else { native = Builtins::COMPARE; int ncr; // NaN compare result. - if (cc_ == lt || cc_ == le) { + if (cc == lt || cc == le) { ncr = GREATER; } else { - ASSERT(cc_ == gt || cc_ == ge); // Remaining cases. + ASSERT(cc == gt || cc == ge); // Remaining cases. ncr = LESS; } __ li(a0, Operand(Smi::FromInt(ncr))); @@ -1930,6 +1998,9 @@ void CompareStub::Generate(MacroAssembler* masm) { // Call the native; it returns -1 (less), 0 (equal), or 1 (greater) // tagged as a small integer. __ InvokeBuiltin(native, JUMP_FUNCTION); + + __ bind(&miss); + GenerateMiss(masm); } @@ -1937,7 +2008,7 @@ void CompareStub::Generate(MacroAssembler* masm) { // it, too: zero for false, and a non-zero value for true. void ToBooleanStub::Generate(MacroAssembler* masm) { // This stub uses FPU instructions. - CpuFeatures::Scope scope(FPU); + CpuFeatureScope scope(masm, FPU); Label patch; const Register map = t5.is(tos_) ? t3 : t5; @@ -2052,7 +2123,7 @@ void StoreBufferOverflowStub::Generate(MacroAssembler* masm) { // restore them. __ MultiPush(kJSCallerSaved | ra.bit()); if (save_doubles_ == kSaveFPRegs) { - CpuFeatures::Scope scope(FPU); + CpuFeatureScope scope(masm, FPU); __ MultiPushFPU(kCallerSavedFPU); } const int argument_count = 1; @@ -2066,7 +2137,7 @@ void StoreBufferOverflowStub::Generate(MacroAssembler* masm) { ExternalReference::store_buffer_overflow_function(masm->isolate()), argument_count); if (save_doubles_ == kSaveFPRegs) { - CpuFeatures::Scope scope(FPU); + CpuFeatureScope scope(masm, FPU); __ MultiPopFPU(kCallerSavedFPU); } @@ -2098,8 +2169,8 @@ void UnaryOpStub::Generate(MacroAssembler* masm) { case UnaryOpIC::SMI: GenerateSmiStub(masm); break; - case UnaryOpIC::HEAP_NUMBER: - GenerateHeapNumberStub(masm); + case UnaryOpIC::NUMBER: + GenerateNumberStub(masm); break; case UnaryOpIC::GENERIC: GenerateGenericStub(masm); @@ -2179,13 +2250,13 @@ void UnaryOpStub::GenerateSmiCodeBitNot(MacroAssembler* masm, // TODO(svenpanne): Use virtual functions instead of switch. -void UnaryOpStub::GenerateHeapNumberStub(MacroAssembler* masm) { +void UnaryOpStub::GenerateNumberStub(MacroAssembler* masm) { switch (op_) { case Token::SUB: - GenerateHeapNumberStubSub(masm); + GenerateNumberStubSub(masm); break; case Token::BIT_NOT: - GenerateHeapNumberStubBitNot(masm); + GenerateNumberStubBitNot(masm); break; default: UNREACHABLE(); @@ -2193,7 +2264,7 @@ void UnaryOpStub::GenerateHeapNumberStub(MacroAssembler* masm) { } -void UnaryOpStub::GenerateHeapNumberStubSub(MacroAssembler* masm) { +void UnaryOpStub::GenerateNumberStubSub(MacroAssembler* masm) { Label non_smi, slow, call_builtin; GenerateSmiCodeSub(masm, &non_smi, &call_builtin); __ bind(&non_smi); @@ -2205,7 +2276,7 @@ void UnaryOpStub::GenerateHeapNumberStubSub(MacroAssembler* masm) { } -void UnaryOpStub::GenerateHeapNumberStubBitNot(MacroAssembler* masm) { +void UnaryOpStub::GenerateNumberStubBitNot(MacroAssembler* masm) { Label non_smi, slow; GenerateSmiCodeBitNot(masm, &non_smi); __ bind(&non_smi); @@ -2299,7 +2370,7 @@ void UnaryOpStub::GenerateHeapNumberCodeBitNot( if (CpuFeatures::IsSupported(FPU)) { // Convert the int32 in a1 to the heap number in v0. a2 is corrupted. - CpuFeatures::Scope scope(FPU); + CpuFeatureScope scope(masm, FPU); __ mtc1(a1, f0); __ cvt_d_w(f0, f0); __ sdc1(f0, FieldMemOperand(v0, HeapNumber::kValueOffset)); @@ -2308,7 +2379,7 @@ void UnaryOpStub::GenerateHeapNumberCodeBitNot( // WriteInt32ToHeapNumberStub does not trigger GC, so we do not // have to set up a frame. WriteInt32ToHeapNumberStub stub(a1, v0, a2, a3); - __ Jump(stub.GetCode(), RelocInfo::CODE_TARGET); + __ Jump(stub.GetCode(masm->isolate()), RelocInfo::CODE_TARGET); } __ bind(&impossible); @@ -2370,20 +2441,23 @@ void UnaryOpStub::GenerateGenericCodeFallback( } +void BinaryOpStub::Initialize() { + platform_specific_bit_ = CpuFeatures::IsSupported(FPU); +} + + void BinaryOpStub::GenerateTypeTransition(MacroAssembler* masm) { Label get_result; __ Push(a1, a0); __ li(a2, Operand(Smi::FromInt(MinorKey()))); - __ li(a1, Operand(Smi::FromInt(op_))); - __ li(a0, Operand(Smi::FromInt(operands_type_))); - __ Push(a2, a1, a0); + __ push(a2); __ TailCallExternalReference( ExternalReference(IC_Utility(IC::kBinaryOp_Patch), masm->isolate()), - 5, + 3, 1); } @@ -2394,59 +2468,8 @@ void BinaryOpStub::GenerateTypeTransitionWithSavedArgs( } -void BinaryOpStub::Generate(MacroAssembler* masm) { - // Explicitly allow generation of nested stubs. It is safe here because - // generation code does not use any raw pointers. - AllowStubCallsScope allow_stub_calls(masm, true); - switch (operands_type_) { - case BinaryOpIC::UNINITIALIZED: - GenerateTypeTransition(masm); - break; - case BinaryOpIC::SMI: - GenerateSmiStub(masm); - break; - case BinaryOpIC::INT32: - GenerateInt32Stub(masm); - break; - case BinaryOpIC::HEAP_NUMBER: - GenerateHeapNumberStub(masm); - break; - case BinaryOpIC::ODDBALL: - GenerateOddballStub(masm); - break; - case BinaryOpIC::BOTH_STRING: - GenerateBothStringStub(masm); - break; - case BinaryOpIC::STRING: - GenerateStringStub(masm); - break; - case BinaryOpIC::GENERIC: - GenerateGeneric(masm); - break; - default: - UNREACHABLE(); - } -} - - -void BinaryOpStub::PrintName(StringStream* stream) { - const char* op_name = Token::Name(op_); - const char* overwrite_name; - switch (mode_) { - case NO_OVERWRITE: overwrite_name = "Alloc"; break; - case OVERWRITE_RIGHT: overwrite_name = "OverwriteRight"; break; - case OVERWRITE_LEFT: overwrite_name = "OverwriteLeft"; break; - default: overwrite_name = "UnknownOverwrite"; break; - } - stream->Add("BinaryOpStub_%s_%s_%s", - op_name, - overwrite_name, - BinaryOpIC::GetName(operands_type_)); -} - - - -void BinaryOpStub::GenerateSmiSmiOperation(MacroAssembler* masm) { +void BinaryOpStub_GenerateSmiSmiOperation(MacroAssembler* masm, + Token::Value op) { Register left = a1; Register right = a0; @@ -2457,7 +2480,7 @@ void BinaryOpStub::GenerateSmiSmiOperation(MacroAssembler* masm) { STATIC_ASSERT(kSmiTag == 0); Label not_smi_result; - switch (op_) { + switch (op) { case Token::ADD: __ AdduAndCheckForOverflow(v0, left, right, scratch1); __ RetOnNoOverflow(scratch1); @@ -2600,10 +2623,24 @@ void BinaryOpStub::GenerateSmiSmiOperation(MacroAssembler* masm) { } -void BinaryOpStub::GenerateFPOperation(MacroAssembler* masm, - bool smi_operands, - Label* not_numbers, - Label* gc_required) { +void BinaryOpStub_GenerateHeapResultAllocation(MacroAssembler* masm, + Register result, + Register heap_number_map, + Register scratch1, + Register scratch2, + Label* gc_required, + OverwriteMode mode); + + +void BinaryOpStub_GenerateFPOperation(MacroAssembler* masm, + BinaryOpIC::TypeInfo left_type, + BinaryOpIC::TypeInfo right_type, + bool smi_operands, + Label* not_numbers, + Label* gc_required, + Label* miss, + Token::Value op, + OverwriteMode mode) { Register left = a1; Register right = a0; Register scratch1 = t3; @@ -2615,11 +2652,17 @@ void BinaryOpStub::GenerateFPOperation(MacroAssembler* masm, __ AssertSmi(left); __ AssertSmi(right); } + if (left_type == BinaryOpIC::SMI) { + __ JumpIfNotSmi(left, miss); + } + if (right_type == BinaryOpIC::SMI) { + __ JumpIfNotSmi(right, miss); + } Register heap_number_map = t2; __ LoadRoot(heap_number_map, Heap::kHeapNumberMapRootIndex); - switch (op_) { + switch (op) { case Token::ADD: case Token::SUB: case Token::MUL: @@ -2629,25 +2672,42 @@ void BinaryOpStub::GenerateFPOperation(MacroAssembler* masm, // depending on whether FPU is available or not. FloatingPointHelper::Destination destination = CpuFeatures::IsSupported(FPU) && - op_ != Token::MOD ? + op != Token::MOD ? FloatingPointHelper::kFPURegisters : FloatingPointHelper::kCoreRegisters; // Allocate new heap number for result. Register result = s0; - GenerateHeapResultAllocation( - masm, result, heap_number_map, scratch1, scratch2, gc_required); + BinaryOpStub_GenerateHeapResultAllocation( + masm, result, heap_number_map, scratch1, scratch2, gc_required, mode); // Load the operands. if (smi_operands) { FloatingPointHelper::LoadSmis(masm, destination, scratch1, scratch2); } else { - FloatingPointHelper::LoadOperands(masm, - destination, - heap_number_map, - scratch1, - scratch2, - not_numbers); + // Load right operand to f14 or a2/a3. + if (right_type == BinaryOpIC::INT32) { + FloatingPointHelper::LoadNumberAsInt32Double( + masm, right, destination, f14, f16, a2, a3, heap_number_map, + scratch1, scratch2, f2, miss); + } else { + Label* fail = (right_type == BinaryOpIC::NUMBER) ? miss : not_numbers; + FloatingPointHelper::LoadNumber( + masm, destination, right, f14, a2, a3, heap_number_map, + scratch1, scratch2, fail); + } + // Load left operand to f12 or a0/a1. This keeps a0/a1 intact if it + // jumps to |miss|. + if (left_type == BinaryOpIC::INT32) { + FloatingPointHelper::LoadNumberAsInt32Double( + masm, left, destination, f12, f16, a0, a1, heap_number_map, + scratch1, scratch2, f2, miss); + } else { + Label* fail = (left_type == BinaryOpIC::NUMBER) ? miss : not_numbers; + FloatingPointHelper::LoadNumber( + masm, destination, left, f12, a0, a1, heap_number_map, + scratch1, scratch2, fail); + } } // Calculate the result. @@ -2655,8 +2715,8 @@ void BinaryOpStub::GenerateFPOperation(MacroAssembler* masm, // Using FPU registers: // f12: Left value. // f14: Right value. - CpuFeatures::Scope scope(FPU); - switch (op_) { + CpuFeatureScope scope(masm, FPU); + switch (op) { case Token::ADD: __ add_d(f10, f12, f14); break; @@ -2682,7 +2742,7 @@ void BinaryOpStub::GenerateFPOperation(MacroAssembler* masm, } else { // Call the C function to handle the double operation. FloatingPointHelper::CallCCodeForDoubleOperation(masm, - op_, + op, result, scratch1); if (FLAG_debug_code) { @@ -2722,7 +2782,7 @@ void BinaryOpStub::GenerateFPOperation(MacroAssembler* masm, not_numbers); } Label result_not_a_smi; - switch (op_) { + switch (op) { case Token::BIT_OR: __ Or(a2, a3, Operand(a2)); break; @@ -2772,8 +2832,9 @@ void BinaryOpStub::GenerateFPOperation(MacroAssembler* masm, __ AllocateHeapNumber( result, scratch1, scratch2, heap_number_map, gc_required); } else { - GenerateHeapResultAllocation( - masm, result, heap_number_map, scratch1, scratch2, gc_required); + BinaryOpStub_GenerateHeapResultAllocation( + masm, result, heap_number_map, scratch1, scratch2, gc_required, + mode); } // a2: Answer as signed int32. @@ -2786,9 +2847,9 @@ void BinaryOpStub::GenerateFPOperation(MacroAssembler* masm, if (CpuFeatures::IsSupported(FPU)) { // Convert the int32 in a2 to the heap number in a0. As // mentioned above SHR needs to always produce a positive result. - CpuFeatures::Scope scope(FPU); + CpuFeatureScope scope(masm, FPU); __ mtc1(a2, f0); - if (op_ == Token::SHR) { + if (op == Token::SHR) { __ Cvt_d_uw(f0, f0, f22); } else { __ cvt_d_w(f0, f0); @@ -2815,12 +2876,14 @@ void BinaryOpStub::GenerateFPOperation(MacroAssembler* masm, // Generate the smi code. If the operation on smis are successful this return is // generated. If the result is not a smi and heap number allocation is not // requested the code falls through. If number allocation is requested but a -// heap number cannot be allocated the code jumps to the lable gc_required. -void BinaryOpStub::GenerateSmiCode( +// heap number cannot be allocated the code jumps to the label gc_required. +void BinaryOpStub_GenerateSmiCode( MacroAssembler* masm, Label* use_runtime, Label* gc_required, - SmiCodeGenerateHeapNumberResults allow_heapnumber_results) { + Token::Value op, + BinaryOpStub::SmiCodeGenerateHeapNumberResults allow_heapnumber_results, + OverwriteMode mode) { Label not_smis; Register left = a1; @@ -2833,12 +2896,14 @@ void BinaryOpStub::GenerateSmiCode( __ JumpIfNotSmi(scratch1, ¬_smis); // If the smi-smi operation results in a smi return is generated. - GenerateSmiSmiOperation(masm); + BinaryOpStub_GenerateSmiSmiOperation(masm, op); // If heap number results are possible generate the result in an allocated // heap number. - if (allow_heapnumber_results == ALLOW_HEAPNUMBER_RESULTS) { - GenerateFPOperation(masm, true, use_runtime, gc_required); + if (allow_heapnumber_results == BinaryOpStub::ALLOW_HEAPNUMBER_RESULTS) { + BinaryOpStub_GenerateFPOperation( + masm, BinaryOpIC::UNINITIALIZED, BinaryOpIC::UNINITIALIZED, true, + use_runtime, gc_required, ¬_smis, op, mode); } __ bind(¬_smis); } @@ -2850,14 +2915,14 @@ void BinaryOpStub::GenerateSmiStub(MacroAssembler* masm) { if (result_type_ == BinaryOpIC::UNINITIALIZED || result_type_ == BinaryOpIC::SMI) { // Only allow smi results. - GenerateSmiCode(masm, &call_runtime, NULL, NO_HEAPNUMBER_RESULTS); + BinaryOpStub_GenerateSmiCode( + masm, &call_runtime, NULL, op_, NO_HEAPNUMBER_RESULTS, mode_); } else { // Allow heap number result and don't make a transition if a heap number // cannot be allocated. - GenerateSmiCode(masm, - &call_runtime, - &call_runtime, - ALLOW_HEAPNUMBER_RESULTS); + BinaryOpStub_GenerateSmiCode( + masm, &call_runtime, &call_runtime, op_, ALLOW_HEAPNUMBER_RESULTS, + mode_); } // Code falls through if the result is not returned as either a smi or heap @@ -2865,22 +2930,14 @@ void BinaryOpStub::GenerateSmiStub(MacroAssembler* masm) { GenerateTypeTransition(masm); __ bind(&call_runtime); + GenerateRegisterArgsPush(masm); GenerateCallRuntime(masm); } -void BinaryOpStub::GenerateStringStub(MacroAssembler* masm) { - ASSERT(operands_type_ == BinaryOpIC::STRING); - // Try to add arguments as strings, otherwise, transition to the generic - // BinaryOpIC type. - GenerateAddStrings(masm); - GenerateTypeTransition(masm); -} - - void BinaryOpStub::GenerateBothStringStub(MacroAssembler* masm) { Label call_runtime; - ASSERT(operands_type_ == BinaryOpIC::BOTH_STRING); + ASSERT(left_type_ == BinaryOpIC::STRING && right_type_ == BinaryOpIC::STRING); ASSERT(op_ == Token::ADD); // If both arguments are strings, call the string add stub. // Otherwise, do a transition. @@ -2909,7 +2966,7 @@ void BinaryOpStub::GenerateBothStringStub(MacroAssembler* masm) { void BinaryOpStub::GenerateInt32Stub(MacroAssembler* masm) { - ASSERT(operands_type_ == BinaryOpIC::INT32); + ASSERT(Max(left_type_, right_type_) == BinaryOpIC::INT32); Register left = a1; Register right = a0; @@ -2932,7 +2989,7 @@ void BinaryOpStub::GenerateInt32Stub(MacroAssembler* masm) { Label skip; __ Or(scratch1, left, right); __ JumpIfNotSmi(scratch1, &skip); - GenerateSmiSmiOperation(masm); + BinaryOpStub_GenerateSmiSmiOperation(masm, op_); // Fall through if the result is not a smi. __ bind(&skip); @@ -2942,6 +2999,15 @@ void BinaryOpStub::GenerateInt32Stub(MacroAssembler* masm) { case Token::MUL: case Token::DIV: case Token::MOD: { + // It could be that only SMIs have been seen at either the left + // or the right operand. For precise type feedback, patch the IC + // again if this changes. + if (left_type_ == BinaryOpIC::SMI) { + __ JumpIfNotSmi(left, &transition); + } + if (right_type_ == BinaryOpIC::SMI) { + __ JumpIfNotSmi(right, &transition); + } // Load both operands and check that they are 32-bit integer. // Jump to type transition if they are not. The registers a0 and a1 (right // and left) are preserved for the runtime call. @@ -2954,6 +3020,7 @@ void BinaryOpStub::GenerateInt32Stub(MacroAssembler* masm) { right, destination, f14, + f16, a2, a3, heap_number_map, @@ -2965,6 +3032,7 @@ void BinaryOpStub::GenerateInt32Stub(MacroAssembler* masm) { left, destination, f12, + f16, t0, t1, heap_number_map, @@ -2974,7 +3042,7 @@ void BinaryOpStub::GenerateInt32Stub(MacroAssembler* masm) { &transition); if (destination == FloatingPointHelper::kFPURegisters) { - CpuFeatures::Scope scope(FPU); + CpuFeatureScope scope(masm, FPU); Label return_heap_number; switch (op_) { case Token::ADD: @@ -3001,9 +3069,10 @@ void BinaryOpStub::GenerateInt32Stub(MacroAssembler* masm) { Register except_flag = scratch2; __ EmitFPUTruncate(kRoundToZero, - single_scratch, - f10, scratch1, + f10, + at, + f16, except_flag); if (result_type_ <= BinaryOpIC::INT32) { @@ -3012,7 +3081,6 @@ void BinaryOpStub::GenerateInt32Stub(MacroAssembler* masm) { } // Check if the result fits in a smi. - __ mfc1(scratch1, single_scratch); __ Addu(scratch2, scratch1, Operand(0x40000000)); // If not try to return a heap number. __ Branch(&return_heap_number, lt, scratch2, Operand(zero_reg)); @@ -3034,16 +3102,17 @@ void BinaryOpStub::GenerateInt32Stub(MacroAssembler* masm) { __ bind(&return_heap_number); // Return a heap number, or fall through to type transition or runtime // call if we can't. - if (result_type_ >= ((op_ == Token::DIV) ? BinaryOpIC::HEAP_NUMBER + if (result_type_ >= ((op_ == Token::DIV) ? BinaryOpIC::NUMBER : BinaryOpIC::INT32)) { // We are using FPU registers so s0 is available. heap_number_result = s0; - GenerateHeapResultAllocation(masm, - heap_number_result, - heap_number_map, - scratch1, - scratch2, - &call_runtime); + BinaryOpStub_GenerateHeapResultAllocation(masm, + heap_number_result, + heap_number_map, + scratch1, + scratch2, + &call_runtime, + mode_); __ mov(v0, heap_number_result); __ sdc1(f10, FieldMemOperand(v0, HeapNumber::kValueOffset)); __ Ret(); @@ -3061,12 +3130,13 @@ void BinaryOpStub::GenerateInt32Stub(MacroAssembler* masm) { // Allocate a heap number to store the result. heap_number_result = s0; - GenerateHeapResultAllocation(masm, - heap_number_result, - heap_number_map, - scratch1, - scratch2, - &pop_and_call_runtime); + BinaryOpStub_GenerateHeapResultAllocation(masm, + heap_number_result, + heap_number_map, + scratch1, + scratch2, + &pop_and_call_runtime, + mode_); // Load the left value from the value saved on the stack. __ Pop(a1, a0); @@ -3105,6 +3175,7 @@ void BinaryOpStub::GenerateInt32Stub(MacroAssembler* masm) { scratch2, scratch3, f0, + f2, &transition); FloatingPointHelper::LoadNumberAsInt32(masm, right, @@ -3114,6 +3185,7 @@ void BinaryOpStub::GenerateInt32Stub(MacroAssembler* masm) { scratch2, scratch3, f0, + f2, &transition); // The ECMA-262 standard specifies that, for shift operations, only the @@ -3175,15 +3247,16 @@ void BinaryOpStub::GenerateInt32Stub(MacroAssembler* masm) { __ bind(&return_heap_number); heap_number_result = t1; - GenerateHeapResultAllocation(masm, - heap_number_result, - heap_number_map, - scratch1, - scratch2, - &call_runtime); + BinaryOpStub_GenerateHeapResultAllocation(masm, + heap_number_result, + heap_number_map, + scratch1, + scratch2, + &call_runtime, + mode_); if (CpuFeatures::IsSupported(FPU)) { - CpuFeatures::Scope scope(FPU); + CpuFeatureScope scope(masm, FPU); if (op_ != Token::SHR) { // Convert the result to a floating point value. @@ -3224,6 +3297,7 @@ void BinaryOpStub::GenerateInt32Stub(MacroAssembler* masm) { } __ bind(&call_runtime); + GenerateRegisterArgsPush(masm); GenerateCallRuntime(masm); } @@ -3257,25 +3331,37 @@ void BinaryOpStub::GenerateOddballStub(MacroAssembler* masm) { } __ bind(&done); - GenerateHeapNumberStub(masm); + GenerateNumberStub(masm); } -void BinaryOpStub::GenerateHeapNumberStub(MacroAssembler* masm) { - Label call_runtime; - GenerateFPOperation(masm, false, &call_runtime, &call_runtime); +void BinaryOpStub::GenerateNumberStub(MacroAssembler* masm) { + Label call_runtime, transition; + BinaryOpStub_GenerateFPOperation( + masm, left_type_, right_type_, false, + &transition, &call_runtime, &transition, op_, mode_); + + __ bind(&transition); + GenerateTypeTransition(masm); __ bind(&call_runtime); + GenerateRegisterArgsPush(masm); GenerateCallRuntime(masm); } void BinaryOpStub::GenerateGeneric(MacroAssembler* masm) { - Label call_runtime, call_string_add_or_runtime; + Label call_runtime, call_string_add_or_runtime, transition; - GenerateSmiCode(masm, &call_runtime, &call_runtime, ALLOW_HEAPNUMBER_RESULTS); + BinaryOpStub_GenerateSmiCode( + masm, &call_runtime, &call_runtime, op_, ALLOW_HEAPNUMBER_RESULTS, mode_); - GenerateFPOperation(masm, false, &call_string_add_or_runtime, &call_runtime); + BinaryOpStub_GenerateFPOperation( + masm, left_type_, right_type_, false, + &call_string_add_or_runtime, &call_runtime, &transition, op_, mode_); + + __ bind(&transition); + GenerateTypeTransition(masm); __ bind(&call_string_add_or_runtime); if (op_ == Token::ADD) { @@ -3283,6 +3369,7 @@ void BinaryOpStub::GenerateGeneric(MacroAssembler* masm) { } __ bind(&call_runtime); + GenerateRegisterArgsPush(masm); GenerateCallRuntime(masm); } @@ -3318,63 +3405,20 @@ void BinaryOpStub::GenerateAddStrings(MacroAssembler* masm) { } -void BinaryOpStub::GenerateCallRuntime(MacroAssembler* masm) { - GenerateRegisterArgsPush(masm); - switch (op_) { - case Token::ADD: - __ InvokeBuiltin(Builtins::ADD, JUMP_FUNCTION); - break; - case Token::SUB: - __ InvokeBuiltin(Builtins::SUB, JUMP_FUNCTION); - break; - case Token::MUL: - __ InvokeBuiltin(Builtins::MUL, JUMP_FUNCTION); - break; - case Token::DIV: - __ InvokeBuiltin(Builtins::DIV, JUMP_FUNCTION); - break; - case Token::MOD: - __ InvokeBuiltin(Builtins::MOD, JUMP_FUNCTION); - break; - case Token::BIT_OR: - __ InvokeBuiltin(Builtins::BIT_OR, JUMP_FUNCTION); - break; - case Token::BIT_AND: - __ InvokeBuiltin(Builtins::BIT_AND, JUMP_FUNCTION); - break; - case Token::BIT_XOR: - __ InvokeBuiltin(Builtins::BIT_XOR, JUMP_FUNCTION); - break; - case Token::SAR: - __ InvokeBuiltin(Builtins::SAR, JUMP_FUNCTION); - break; - case Token::SHR: - __ InvokeBuiltin(Builtins::SHR, JUMP_FUNCTION); - break; - case Token::SHL: - __ InvokeBuiltin(Builtins::SHL, JUMP_FUNCTION); - break; - default: - UNREACHABLE(); - } -} - - -void BinaryOpStub::GenerateHeapResultAllocation( - MacroAssembler* masm, - Register result, - Register heap_number_map, - Register scratch1, - Register scratch2, - Label* gc_required) { - +void BinaryOpStub_GenerateHeapResultAllocation(MacroAssembler* masm, + Register result, + Register heap_number_map, + Register scratch1, + Register scratch2, + Label* gc_required, + OverwriteMode mode) { // Code below will scratch result if allocation fails. To keep both arguments // intact for the runtime call result cannot be one of these. ASSERT(!result.is(a0) && !result.is(a1)); - if (mode_ == OVERWRITE_LEFT || mode_ == OVERWRITE_RIGHT) { + if (mode == OVERWRITE_LEFT || mode == OVERWRITE_RIGHT) { Label skip_allocation, allocated; - Register overwritable_operand = mode_ == OVERWRITE_LEFT ? a1 : a0; + Register overwritable_operand = mode == OVERWRITE_LEFT ? a1 : a0; // If the overwritable operand is already an object, we skip the // allocation of a heap number. __ JumpIfNotSmi(overwritable_operand, &skip_allocation); @@ -3387,7 +3431,7 @@ void BinaryOpStub::GenerateHeapResultAllocation( __ mov(result, overwritable_operand); __ bind(&allocated); } else { - ASSERT(mode_ == NO_OVERWRITE); + ASSERT(mode == NO_OVERWRITE); __ AllocateHeapNumber( result, scratch1, scratch2, heap_number_map, gc_required); } @@ -3416,7 +3460,7 @@ void TranscendentalCacheStub::Generate(MacroAssembler* masm) { const bool tagged = (argument_type_ == TAGGED); if (CpuFeatures::IsSupported(FPU)) { - CpuFeatures::Scope scope(FPU); + CpuFeatureScope scope(masm, FPU); if (tagged) { // Argument is a number and is on stack and in a0. @@ -3526,7 +3570,7 @@ void TranscendentalCacheStub::Generate(MacroAssembler* masm) { 1); } else { ASSERT(CpuFeatures::IsSupported(FPU)); - CpuFeatures::Scope scope(FPU); + CpuFeatureScope scope(masm, FPU); Label no_update; Label skip_cache; @@ -3654,7 +3698,7 @@ void InterruptStub::Generate(MacroAssembler* masm) { void MathPowStub::Generate(MacroAssembler* masm) { - CpuFeatures::Scope fpu_scope(FPU); + CpuFeatureScope fpu_scope(masm, FPU); const Register base = a1; const Register exponent = a2; const Register heapnumbermap = t1; @@ -3708,9 +3752,10 @@ void MathPowStub::Generate(MacroAssembler* masm) { Label int_exponent_convert; // Detect integer exponents stored as double. __ EmitFPUTruncate(kRoundToMinusInf, - single_scratch, - double_exponent, scratch, + double_exponent, + at, + double_scratch, scratch2, kCheckForInexactConversion); // scratch2 == 0 means there was no conversion error. @@ -3768,7 +3813,7 @@ void MathPowStub::Generate(MacroAssembler* masm) { __ push(ra); { AllowExternalCallThatCantCauseGC scope(masm); - __ PrepareCallCFunction(0, 2, scratch); + __ PrepareCallCFunction(0, 2, scratch2); __ SetCallCDoubleArguments(double_base, double_exponent); __ CallCFunction( ExternalReference::power_double_double_function(masm->isolate()), @@ -3779,7 +3824,6 @@ void MathPowStub::Generate(MacroAssembler* masm) { __ jmp(&done); __ bind(&int_exponent_convert); - __ mfc1(scratch, single_scratch); } // Calculate power with integer exponent. @@ -3880,31 +3924,53 @@ bool CEntryStub::IsPregenerated() { } -void CodeStub::GenerateStubsAheadOfTime() { - CEntryStub::GenerateAheadOfTime(); - WriteInt32ToHeapNumberStub::GenerateFixedRegStubsAheadOfTime(); - StoreBufferOverflowStub::GenerateFixedRegStubsAheadOfTime(); - RecordWriteStub::GenerateFixedRegStubsAheadOfTime(); +void CodeStub::GenerateStubsAheadOfTime(Isolate* isolate) { + CEntryStub::GenerateAheadOfTime(isolate); + WriteInt32ToHeapNumberStub::GenerateFixedRegStubsAheadOfTime(isolate); + StoreBufferOverflowStub::GenerateFixedRegStubsAheadOfTime(isolate); + RecordWriteStub::GenerateFixedRegStubsAheadOfTime(isolate); } -void CodeStub::GenerateFPStubs() { - CEntryStub save_doubles(1, kSaveFPRegs); - Handle<Code> code = save_doubles.GetCode(); - code->set_is_pregenerated(true); - StoreBufferOverflowStub stub(kSaveFPRegs); - stub.GetCode()->set_is_pregenerated(true); - code->GetIsolate()->set_fp_stubs_generated(true); +void CodeStub::GenerateFPStubs(Isolate* isolate) { + SaveFPRegsMode mode = CpuFeatures::IsSupported(FPU) + ? kSaveFPRegs + : kDontSaveFPRegs; + CEntryStub save_doubles(1, mode); + StoreBufferOverflowStub stub(mode); + // These stubs might already be in the snapshot, detect that and don't + // regenerate, which would lead to code stub initialization state being messed + // up. + Code* save_doubles_code; + if (!save_doubles.FindCodeInCache(&save_doubles_code, isolate)) { + save_doubles_code = *save_doubles.GetCode(isolate); + save_doubles_code->set_is_pregenerated(true); + + Code* store_buffer_overflow_code = *stub.GetCode(isolate); + store_buffer_overflow_code->set_is_pregenerated(true); + } + isolate->set_fp_stubs_generated(true); } -void CEntryStub::GenerateAheadOfTime() { +void CEntryStub::GenerateAheadOfTime(Isolate* isolate) { CEntryStub stub(1, kDontSaveFPRegs); - Handle<Code> code = stub.GetCode(); + Handle<Code> code = stub.GetCode(isolate); code->set_is_pregenerated(true); } +static void JumpIfOOM(MacroAssembler* masm, + Register value, + Register scratch, + Label* oom_label) { + STATIC_ASSERT(Failure::OUT_OF_MEMORY_EXCEPTION == 3); + STATIC_ASSERT(kFailureTag == 3); + __ andi(scratch, value, 0xf); + __ Branch(oom_label, eq, scratch, Operand(0xf)); +} + + void CEntryStub::GenerateCore(MacroAssembler* masm, Label* throw_normal_exception, Label* throw_termination_exception, @@ -4011,14 +4077,7 @@ void CEntryStub::GenerateCore(MacroAssembler* masm, __ Branch(&retry, eq, t0, Operand(zero_reg)); // Special handling of out of memory exceptions. - Failure* out_of_memory = Failure::OutOfMemoryException(); - __ Branch(USE_DELAY_SLOT, - throw_out_of_memory_exception, - eq, - v0, - Operand(reinterpret_cast<int32_t>(out_of_memory))); - // If we throw the OOM exception, the value of a3 doesn't matter. - // Any instruction can be in the delay slot that's not a jump. + JumpIfOOM(masm, v0, t0, throw_out_of_memory_exception); // Retrieve the pending exception and clear the variable. __ LoadRoot(a3, Heap::kTheHoleValueRootIndex); @@ -4105,13 +4164,16 @@ void CEntryStub::Generate(MacroAssembler* masm) { Isolate* isolate = masm->isolate(); ExternalReference external_caught(Isolate::kExternalCaughtExceptionAddress, isolate); - __ li(a0, Operand(false, RelocInfo::NONE)); + __ li(a0, Operand(false, RelocInfo::NONE32)); __ li(a2, Operand(external_caught)); __ sw(a0, MemOperand(a2)); // Set pending exception and v0 to out of memory exception. - Failure* out_of_memory = Failure::OutOfMemoryException(); + Label already_have_failure; + JumpIfOOM(masm, v0, t0, &already_have_failure); + Failure* out_of_memory = Failure::OutOfMemoryException(0x1); __ li(v0, Operand(reinterpret_cast<int32_t>(out_of_memory))); + __ bind(&already_have_failure); __ li(a2, Operand(ExternalReference(Isolate::kPendingExceptionAddress, isolate))); __ sw(v0, MemOperand(a2)); @@ -4143,7 +4205,7 @@ void JSEntryStub::GenerateBody(MacroAssembler* masm, bool is_construct) { __ MultiPush(kCalleeSaved | ra.bit()); if (CpuFeatures::IsSupported(FPU)) { - CpuFeatures::Scope scope(FPU); + CpuFeatureScope scope(masm, FPU); // Save callee-saved FPU registers. __ MultiPushFPU(kCalleeSavedFPU); // Set up the reserved register for 0.0. @@ -4292,7 +4354,7 @@ void JSEntryStub::GenerateBody(MacroAssembler* masm, bool is_construct) { __ addiu(sp, sp, -EntryFrameConstants::kCallerFPOffset); if (CpuFeatures::IsSupported(FPU)) { - CpuFeatures::Scope scope(FPU); + CpuFeatureScope scope(masm, FPU); // Restore callee-saved fpu registers. __ MultiPopFPU(kCalleeSavedFPU); } @@ -4482,12 +4544,177 @@ void InstanceofStub::Generate(MacroAssembler* masm) { } +void ArrayLengthStub::Generate(MacroAssembler* masm) { + Label miss; + Register receiver; + if (kind() == Code::KEYED_LOAD_IC) { + // ----------- S t a t e ------------- + // -- ra : return address + // -- a0 : key + // -- a1 : receiver + // ----------------------------------- + __ Branch(&miss, ne, a0, + Operand(masm->isolate()->factory()->length_string())); + receiver = a1; + } else { + ASSERT(kind() == Code::LOAD_IC); + // ----------- S t a t e ------------- + // -- a2 : name + // -- ra : return address + // -- a0 : receiver + // -- sp[0] : receiver + // ----------------------------------- + receiver = a0; + } + + StubCompiler::GenerateLoadArrayLength(masm, receiver, a3, &miss); + __ bind(&miss); + StubCompiler::TailCallBuiltin(masm, StubCompiler::MissBuiltin(kind())); +} + + +void FunctionPrototypeStub::Generate(MacroAssembler* masm) { + Label miss; + Register receiver; + if (kind() == Code::KEYED_LOAD_IC) { + // ----------- S t a t e ------------- + // -- ra : return address + // -- a0 : key + // -- a1 : receiver + // ----------------------------------- + __ Branch(&miss, ne, a0, + Operand(masm->isolate()->factory()->prototype_string())); + receiver = a1; + } else { + ASSERT(kind() == Code::LOAD_IC); + // ----------- S t a t e ------------- + // -- a2 : name + // -- ra : return address + // -- a0 : receiver + // -- sp[0] : receiver + // ----------------------------------- + receiver = a0; + } + + StubCompiler::GenerateLoadFunctionPrototype(masm, receiver, a3, t0, &miss); + __ bind(&miss); + StubCompiler::TailCallBuiltin(masm, StubCompiler::MissBuiltin(kind())); +} + + +void StringLengthStub::Generate(MacroAssembler* masm) { + Label miss; + Register receiver; + if (kind() == Code::KEYED_LOAD_IC) { + // ----------- S t a t e ------------- + // -- ra : return address + // -- a0 : key + // -- a1 : receiver + // ----------------------------------- + __ Branch(&miss, ne, a0, + Operand(masm->isolate()->factory()->length_string())); + receiver = a1; + } else { + ASSERT(kind() == Code::LOAD_IC); + // ----------- S t a t e ------------- + // -- a2 : name + // -- ra : return address + // -- a0 : receiver + // -- sp[0] : receiver + // ----------------------------------- + receiver = a0; + } + + StubCompiler::GenerateLoadStringLength(masm, receiver, a3, t0, &miss, + support_wrapper_); + + __ bind(&miss); + StubCompiler::TailCallBuiltin(masm, StubCompiler::MissBuiltin(kind())); +} + + +void StoreArrayLengthStub::Generate(MacroAssembler* masm) { + // This accepts as a receiver anything JSArray::SetElementsLength accepts + // (currently anything except for external arrays which means anything with + // elements of FixedArray type). Value must be a number, but only smis are + // accepted as the most common case. + Label miss; + + Register receiver; + Register value; + if (kind() == Code::KEYED_STORE_IC) { + // ----------- S t a t e ------------- + // -- ra : return address + // -- a0 : value + // -- a1 : key + // -- a2 : receiver + // ----------------------------------- + __ Branch(&miss, ne, a1, + Operand(masm->isolate()->factory()->length_string())); + receiver = a2; + value = a0; + } else { + ASSERT(kind() == Code::STORE_IC); + // ----------- S t a t e ------------- + // -- ra : return address + // -- a0 : value + // -- a1 : receiver + // -- a2 : key + // ----------------------------------- + receiver = a1; + value = a0; + } + Register scratch = a3; + + // Check that the receiver isn't a smi. + __ JumpIfSmi(receiver, &miss); + + // Check that the object is a JS array. + __ GetObjectType(receiver, scratch, scratch); + __ Branch(&miss, ne, scratch, Operand(JS_ARRAY_TYPE)); + + // Check that elements are FixedArray. + // We rely on StoreIC_ArrayLength below to deal with all types of + // fast elements (including COW). + __ lw(scratch, FieldMemOperand(receiver, JSArray::kElementsOffset)); + __ GetObjectType(scratch, scratch, scratch); + __ Branch(&miss, ne, scratch, Operand(FIXED_ARRAY_TYPE)); + + // Check that the array has fast properties, otherwise the length + // property might have been redefined. + __ lw(scratch, FieldMemOperand(receiver, JSArray::kPropertiesOffset)); + __ lw(scratch, FieldMemOperand(scratch, FixedArray::kMapOffset)); + __ LoadRoot(at, Heap::kHashTableMapRootIndex); + __ Branch(&miss, eq, scratch, Operand(at)); + + // Check that value is a smi. + __ JumpIfNotSmi(value, &miss); + + // Prepare tail call to StoreIC_ArrayLength. + __ Push(receiver, value); + + ExternalReference ref = + ExternalReference(IC_Utility(IC::kStoreIC_ArrayLength), masm->isolate()); + __ TailCallExternalReference(ref, 2, 1); + + __ bind(&miss); + + StubCompiler::TailCallBuiltin(masm, StubCompiler::MissBuiltin(kind())); +} + + Register InstanceofStub::left() { return a0; } Register InstanceofStub::right() { return a1; } +void LoadFieldStub::Generate(MacroAssembler* masm) { + StubCompiler::DoGenerateFastPropertyLoad(masm, v0, reg_, inobject_, index_); + __ Ret(); +} + + void ArgumentsAccessStub::GenerateReadElement(MacroAssembler* masm) { // The displacement is the offset of the last parameter (if any) // relative to the frame pointer. @@ -4910,8 +5137,7 @@ void RegExpExecStub::Generate(MacroAssembler* masm) { Isolate* isolate = masm->isolate(); - Label runtime, invoke_regexp; - + Label runtime; // Allocation of registers for this function. These are in callee save // registers and will be preserved by the call to the native RegExp code, as // this code is called using the normal C calling convention. When calling @@ -4963,149 +5189,111 @@ void RegExpExecStub::Generate(MacroAssembler* masm) { // Check that the number of captures fit in the static offsets vector buffer. __ lw(a2, FieldMemOperand(regexp_data, JSRegExp::kIrregexpCaptureCountOffset)); - // Calculate number of capture registers (number_of_captures + 1) * 2. This - // uses the asumption that smis are 2 * their untagged value. + // Check (number_of_captures + 1) * 2 <= offsets vector size + // Or number_of_captures * 2 <= offsets vector size - 2 + // Multiplying by 2 comes for free since a2 is smi-tagged. STATIC_ASSERT(kSmiTag == 0); STATIC_ASSERT(kSmiTagSize + kSmiShiftSize == 1); - __ Addu(a2, a2, Operand(2)); // a2 was a smi. - // Check that the static offsets vector buffer is large enough. + STATIC_ASSERT(Isolate::kJSRegexpStaticOffsetsVectorSize >= 2); __ Branch( - &runtime, hi, a2, Operand(Isolate::kJSRegexpStaticOffsetsVectorSize)); - - // a2: Number of capture registers - // regexp_data: RegExp data (FixedArray) - // Check that the second argument is a string. - __ lw(subject, MemOperand(sp, kSubjectOffset)); - __ JumpIfSmi(subject, &runtime); - __ GetObjectType(subject, a0, a0); - __ And(a0, a0, Operand(kIsNotStringMask)); - STATIC_ASSERT(kStringTag == 0); - __ Branch(&runtime, ne, a0, Operand(zero_reg)); - - // Get the length of the string to r3. - __ lw(a3, FieldMemOperand(subject, String::kLengthOffset)); - - // a2: Number of capture registers - // a3: Length of subject string as a smi - // subject: Subject string - // regexp_data: RegExp data (FixedArray) - // Check that the third argument is a positive smi less than the subject - // string length. A negative value will be greater (unsigned comparison). - __ lw(a0, MemOperand(sp, kPreviousIndexOffset)); - __ JumpIfNotSmi(a0, &runtime); - __ Branch(&runtime, ls, a3, Operand(a0)); - - // a2: Number of capture registers - // subject: Subject string - // regexp_data: RegExp data (FixedArray) - // Check that the fourth object is a JSArray object. - __ lw(a0, MemOperand(sp, kLastMatchInfoOffset)); - __ JumpIfSmi(a0, &runtime); - __ GetObjectType(a0, a1, a1); - __ Branch(&runtime, ne, a1, Operand(JS_ARRAY_TYPE)); - // Check that the JSArray is in fast case. - __ lw(last_match_info_elements, - FieldMemOperand(a0, JSArray::kElementsOffset)); - __ lw(a0, FieldMemOperand(last_match_info_elements, HeapObject::kMapOffset)); - __ Branch(&runtime, ne, a0, Operand( - isolate->factory()->fixed_array_map())); - // Check that the last match info has space for the capture registers and the - // additional information. - __ lw(a0, - FieldMemOperand(last_match_info_elements, FixedArray::kLengthOffset)); - __ Addu(a2, a2, Operand(RegExpImpl::kLastMatchOverhead)); - __ sra(at, a0, kSmiTagSize); // Untag length for comparison. - __ Branch(&runtime, gt, a2, Operand(at)); + &runtime, hi, a2, Operand(Isolate::kJSRegexpStaticOffsetsVectorSize - 2)); // Reset offset for possibly sliced string. __ mov(t0, zero_reg); - // subject: Subject string - // regexp_data: RegExp data (FixedArray) - // Check the representation and encoding of the subject string. - Label seq_string; + __ lw(subject, MemOperand(sp, kSubjectOffset)); + __ JumpIfSmi(subject, &runtime); + __ mov(a3, subject); // Make a copy of the original subject string. __ lw(a0, FieldMemOperand(subject, HeapObject::kMapOffset)); __ lbu(a0, FieldMemOperand(a0, Map::kInstanceTypeOffset)); - // First check for flat string. None of the following string type tests will - // succeed if subject is not a string or a short external string. + // subject: subject string + // a3: subject string + // a0: subject string instance type + // regexp_data: RegExp data (FixedArray) + // Handle subject string according to its encoding and representation: + // (1) Sequential string? If yes, go to (5). + // (2) Anything but sequential or cons? If yes, go to (6). + // (3) Cons string. If the string is flat, replace subject with first string. + // Otherwise bailout. + // (4) Is subject external? If yes, go to (7). + // (5) Sequential string. Load regexp code according to encoding. + // (E) Carry on. + /// [...] + + // Deferred code at the end of the stub: + // (6) Not a long external string? If yes, go to (8). + // (7) External string. Make it, offset-wise, look like a sequential string. + // Go to (5). + // (8) Short external string or not a string? If yes, bail out to runtime. + // (9) Sliced string. Replace subject with parent. Go to (4). + + Label seq_string /* 5 */, external_string /* 7 */, + check_underlying /* 4 */, not_seq_nor_cons /* 6 */, + not_long_external /* 8 */; + + // (1) Sequential string? If yes, go to (5). __ And(a1, a0, Operand(kIsNotStringMask | kStringRepresentationMask | kShortExternalStringMask)); STATIC_ASSERT((kStringTag | kSeqStringTag) == 0); - __ Branch(&seq_string, eq, a1, Operand(zero_reg)); + __ Branch(&seq_string, eq, a1, Operand(zero_reg)); // Go to (5). - // subject: Subject string - // a0: instance type if Subject string - // regexp_data: RegExp data (FixedArray) - // a1: whether subject is a string and if yes, its string representation - // Check for flat cons string or sliced string. - // A flat cons string is a cons string where the second part is the empty - // string. In that case the subject string is just the first part of the cons - // string. Also in this case the first part of the cons string is known to be - // a sequential string or an external string. - // In the case of a sliced string its offset has to be taken into account. - Label cons_string, external_string, check_encoding; + // (2) Anything but sequential or cons? If yes, go to (6). STATIC_ASSERT(kConsStringTag < kExternalStringTag); STATIC_ASSERT(kSlicedStringTag > kExternalStringTag); STATIC_ASSERT(kIsNotStringMask > kExternalStringTag); STATIC_ASSERT(kShortExternalStringTag > kExternalStringTag); - __ Branch(&cons_string, lt, a1, Operand(kExternalStringTag)); - __ Branch(&external_string, eq, a1, Operand(kExternalStringTag)); + // Go to (6). + __ Branch(¬_seq_nor_cons, ge, a1, Operand(kExternalStringTag)); - // Catch non-string subject or short external string. - STATIC_ASSERT(kNotStringTag != 0 && kShortExternalStringTag !=0); - __ And(at, a1, Operand(kIsNotStringMask | kShortExternalStringMask)); - __ Branch(&runtime, ne, at, Operand(zero_reg)); - - // String is sliced. - __ lw(t0, FieldMemOperand(subject, SlicedString::kOffsetOffset)); - __ sra(t0, t0, kSmiTagSize); - __ lw(subject, FieldMemOperand(subject, SlicedString::kParentOffset)); - // t5: offset of sliced string, smi-tagged. - __ jmp(&check_encoding); - // String is a cons string, check whether it is flat. - __ bind(&cons_string); + // (3) Cons string. Check that it's flat. + // Replace subject with first string and reload instance type. __ lw(a0, FieldMemOperand(subject, ConsString::kSecondOffset)); - __ LoadRoot(a1, Heap::kEmptyStringRootIndex); + __ LoadRoot(a1, Heap::kempty_stringRootIndex); __ Branch(&runtime, ne, a0, Operand(a1)); __ lw(subject, FieldMemOperand(subject, ConsString::kFirstOffset)); - // Is first part of cons or parent of slice a flat string? - __ bind(&check_encoding); + + // (4) Is subject external? If yes, go to (7). + __ bind(&check_underlying); __ lw(a0, FieldMemOperand(subject, HeapObject::kMapOffset)); __ lbu(a0, FieldMemOperand(a0, Map::kInstanceTypeOffset)); STATIC_ASSERT(kSeqStringTag == 0); __ And(at, a0, Operand(kStringRepresentationMask)); - __ Branch(&external_string, ne, at, Operand(zero_reg)); + // The underlying external string is never a short external string. + STATIC_CHECK(ExternalString::kMaxShortLength < ConsString::kMinLength); + STATIC_CHECK(ExternalString::kMaxShortLength < SlicedString::kMinLength); + __ Branch(&external_string, ne, at, Operand(zero_reg)); // Go to (7). + // (5) Sequential string. Load regexp code according to encoding. __ bind(&seq_string); - // subject: Subject string - // regexp_data: RegExp data (FixedArray) - // a0: Instance type of subject string + // subject: sequential subject string (or look-alike, external string) + // a3: original subject string + // Load previous index and check range before a3 is overwritten. We have to + // use a3 instead of subject here because subject might have been only made + // to look like a sequential string when it actually is an external string. + __ lw(a1, MemOperand(sp, kPreviousIndexOffset)); + __ JumpIfNotSmi(a1, &runtime); + __ lw(a3, FieldMemOperand(a3, String::kLengthOffset)); + __ Branch(&runtime, ls, a3, Operand(a1)); + __ sra(a1, a1, kSmiTagSize); // Untag the Smi. + STATIC_ASSERT(kStringEncodingMask == 4); - STATIC_ASSERT(kAsciiStringTag == 4); + STATIC_ASSERT(kOneByteStringTag == 4); STATIC_ASSERT(kTwoByteStringTag == 0); - // Find the code object based on the assumptions above. __ And(a0, a0, Operand(kStringEncodingMask)); // Non-zero for ASCII. __ lw(t9, FieldMemOperand(regexp_data, JSRegExp::kDataAsciiCodeOffset)); __ sra(a3, a0, 2); // a3 is 1 for ASCII, 0 for UC16 (used below). __ lw(t1, FieldMemOperand(regexp_data, JSRegExp::kDataUC16CodeOffset)); __ Movz(t9, t1, a0); // If UC16 (a0 is 0), replace t9 w/kDataUC16CodeOffset. + // (E) Carry on. String handling is done. + // t9: irregexp code // Check that the irregexp code has been generated for the actual string // encoding. If it has, the field contains a code object otherwise it contains // a smi (code flushing support). __ JumpIfSmi(t9, &runtime); - // a3: encoding of subject string (1 if ASCII, 0 if two_byte); - // t9: code - // subject: Subject string - // regexp_data: RegExp data (FixedArray) - // Load used arguments before starting to push arguments for call to native - // RegExp code to avoid handling changing stack height. - __ lw(a1, MemOperand(sp, kPreviousIndexOffset)); - __ sra(a1, a1, kSmiTagSize); // Untag the Smi. - // a1: previous index // a3: encoding of subject string (1 if ASCII, 0 if two_byte); // t9: code @@ -5200,9 +5388,7 @@ void RegExpExecStub::Generate(MacroAssembler* masm) { // subject: subject string (callee saved) // regexp_data: RegExp data (callee saved) // last_match_info_elements: Last match info elements (callee saved) - // Check the result. - Label success; __ Branch(&success, eq, v0, Operand(1)); // We expect exactly one result since we force the called regexp to behave @@ -5243,10 +5429,29 @@ void RegExpExecStub::Generate(MacroAssembler* masm) { __ lw(a1, FieldMemOperand(regexp_data, JSRegExp::kIrregexpCaptureCountOffset)); // Calculate number of capture registers (number_of_captures + 1) * 2. + // Multiplying by 2 comes for free since r1 is smi-tagged. STATIC_ASSERT(kSmiTag == 0); STATIC_ASSERT(kSmiTagSize + kSmiShiftSize == 1); __ Addu(a1, a1, Operand(2)); // a1 was a smi. + __ lw(a0, MemOperand(sp, kLastMatchInfoOffset)); + __ JumpIfSmi(a0, &runtime); + __ GetObjectType(a0, a2, a2); + __ Branch(&runtime, ne, a2, Operand(JS_ARRAY_TYPE)); + // Check that the JSArray is in fast case. + __ lw(last_match_info_elements, + FieldMemOperand(a0, JSArray::kElementsOffset)); + __ lw(a0, FieldMemOperand(last_match_info_elements, HeapObject::kMapOffset)); + __ LoadRoot(at, Heap::kFixedArrayMapRootIndex); + __ Branch(&runtime, ne, a0, Operand(at)); + // Check that the last match info has space for the capture registers and the + // additional information. + __ lw(a0, + FieldMemOperand(last_match_info_elements, FixedArray::kLengthOffset)); + __ Addu(a2, a1, Operand(RegExpImpl::kLastMatchOverhead)); + __ sra(at, a0, kSmiTagSize); + __ Branch(&runtime, gt, a2, Operand(at)); + // a1: number of capture registers // subject: subject string // Store the capture count. @@ -5260,10 +5465,11 @@ void RegExpExecStub::Generate(MacroAssembler* masm) { __ mov(a2, subject); __ RecordWriteField(last_match_info_elements, RegExpImpl::kLastSubjectOffset, - a2, + subject, t3, kRAHasNotBeenSaved, kDontSaveFPRegs); + __ mov(subject, a2); __ sw(subject, FieldMemOperand(last_match_info_elements, RegExpImpl::kLastInputOffset)); @@ -5305,8 +5511,17 @@ void RegExpExecStub::Generate(MacroAssembler* masm) { __ lw(v0, MemOperand(sp, kLastMatchInfoOffset)); __ DropAndRet(4); - // External string. Short external strings have already been ruled out. - // a0: scratch + // Do the runtime call to execute the regexp. + __ bind(&runtime); + __ TailCallRuntime(Runtime::kRegExpExec, 4, 1); + + // Deferred code for string handling. + // (6) Not a long external string? If yes, go to (8). + __ bind(¬_seq_nor_cons); + // Go to (8). + __ Branch(¬_long_external, gt, a1, Operand(kExternalStringTag)); + + // (7) External string. Make it, offset-wise, look like a sequential string. __ bind(&external_string); __ lw(a0, FieldMemOperand(subject, HeapObject::kMapOffset)); __ lbu(a0, FieldMemOperand(a0, Map::kInstanceTypeOffset)); @@ -5322,15 +5537,24 @@ void RegExpExecStub::Generate(MacroAssembler* masm) { __ lw(subject, FieldMemOperand(subject, ExternalString::kResourceDataOffset)); // Move the pointer so that offset-wise, it looks like a sequential string. - STATIC_ASSERT(SeqTwoByteString::kHeaderSize == SeqAsciiString::kHeaderSize); + STATIC_ASSERT(SeqTwoByteString::kHeaderSize == SeqOneByteString::kHeaderSize); __ Subu(subject, subject, SeqTwoByteString::kHeaderSize - kHeapObjectTag); - __ jmp(&seq_string); + __ jmp(&seq_string); // Go to (5). - // Do the runtime call to execute the regexp. - __ bind(&runtime); - __ TailCallRuntime(Runtime::kRegExpExec, 4, 1); + // (8) Short external string or not a string? If yes, bail out to runtime. + __ bind(¬_long_external); + STATIC_ASSERT(kNotStringTag != 0 && kShortExternalStringTag !=0); + __ And(at, a1, Operand(kIsNotStringMask | kShortExternalStringMask)); + __ Branch(&runtime, ne, at, Operand(zero_reg)); + + // (9) Sliced string. Replace subject with parent. Go to (4). + // Load offset into t0 and replace subject string with parent. + __ lw(t0, FieldMemOperand(subject, SlicedString::kOffsetOffset)); + __ sra(t0, t0, kSmiTagSize); + __ lw(subject, FieldMemOperand(subject, SlicedString::kParentOffset)); + __ jmp(&check_underlying); // Go to (4). #endif // V8_INTERPRETED_REGEXP } @@ -5422,12 +5646,13 @@ void RegExpConstructResultStub::Generate(MacroAssembler* masm) { } -static void GenerateRecordCallTarget(MacroAssembler* masm) { +static void GenerateRecordCallTargetNoArray(MacroAssembler* masm) { // Cache the called function in a global property cell. Cache states // are uninitialized, monomorphic (indicated by a JSFunction), and // megamorphic. // a1 : the function to call // a2 : cache cell for call target + ASSERT(!FLAG_optimize_constructed_arrays); Label done; ASSERT_EQ(*TypeFeedbackCells::MegamorphicSentinel(masm->isolate()), @@ -5464,6 +5689,78 @@ static void GenerateRecordCallTarget(MacroAssembler* masm) { } +static void GenerateRecordCallTarget(MacroAssembler* masm) { + // Cache the called function in a global property cell. Cache states + // are uninitialized, monomorphic (indicated by a JSFunction), and + // megamorphic. + // a1 : the function to call + // a2 : cache cell for call target + ASSERT(FLAG_optimize_constructed_arrays); + Label initialize, done, miss, megamorphic, not_array_function; + + ASSERT_EQ(*TypeFeedbackCells::MegamorphicSentinel(masm->isolate()), + masm->isolate()->heap()->undefined_value()); + ASSERT_EQ(*TypeFeedbackCells::UninitializedSentinel(masm->isolate()), + masm->isolate()->heap()->the_hole_value()); + + // Load the cache state into a3. + __ lw(a3, FieldMemOperand(a2, JSGlobalPropertyCell::kValueOffset)); + + // A monomorphic cache hit or an already megamorphic state: invoke the + // function without changing the state. + __ Branch(&done, eq, a3, Operand(a1)); + __ LoadRoot(at, Heap::kUndefinedValueRootIndex); + __ Branch(&done, eq, a3, Operand(at)); + + // Special handling of the Array() function, which caches not only the + // monomorphic Array function but the initial ElementsKind with special + // sentinels + Handle<Object> terminal_kind_sentinel = + TypeFeedbackCells::MonomorphicArraySentinel(masm->isolate(), + LAST_FAST_ELEMENTS_KIND); + __ Branch(&miss, ne, a3, Operand(terminal_kind_sentinel)); + // Make sure the function is the Array() function + __ LoadArrayFunction(a3); + __ Branch(&megamorphic, ne, a1, Operand(a3)); + __ jmp(&done); + + __ bind(&miss); + + // A monomorphic miss (i.e, here the cache is not uninitialized) goes + // megamorphic. + __ LoadRoot(at, Heap::kTheHoleValueRootIndex); + __ Branch(&initialize, eq, a3, Operand(at)); + // MegamorphicSentinel is an immortal immovable object (undefined) so no + // write-barrier is needed. + __ bind(&megamorphic); + __ LoadRoot(at, Heap::kUndefinedValueRootIndex); + __ sw(at, FieldMemOperand(a2, JSGlobalPropertyCell::kValueOffset)); + + // An uninitialized cache is patched with the function or sentinel to + // indicate the ElementsKind if function is the Array constructor. + __ bind(&initialize); + // Make sure the function is the Array() function + __ LoadArrayFunction(a3); + __ Branch(¬_array_function, ne, a1, Operand(a3)); + + // The target function is the Array constructor, install a sentinel value in + // the constructor's type info cell that will track the initial ElementsKind + // that should be used for the array when its constructed. + Handle<Object> initial_kind_sentinel = + TypeFeedbackCells::MonomorphicArraySentinel(masm->isolate(), + GetInitialFastElementsKind()); + __ li(a3, Operand(initial_kind_sentinel)); + __ sw(a3, FieldMemOperand(a2, JSGlobalPropertyCell::kValueOffset)); + __ Branch(&done); + + __ bind(¬_array_function); + __ sw(a1, FieldMemOperand(a2, JSGlobalPropertyCell::kValueOffset)); + // No need for a write barrier here - cells are rescanned. + + __ bind(&done); +} + + void CallFunctionStub::Generate(MacroAssembler* masm) { // a1 : the function to call // a2 : cache cell for call target @@ -5496,7 +5793,11 @@ void CallFunctionStub::Generate(MacroAssembler* masm) { __ Branch(&slow, ne, a3, Operand(JS_FUNCTION_TYPE)); if (RecordCallTarget()) { - GenerateRecordCallTarget(masm); + if (FLAG_optimize_constructed_arrays) { + GenerateRecordCallTarget(masm); + } else { + GenerateRecordCallTargetNoArray(masm); + } } // Fast-case: Invoke the function now. @@ -5534,8 +5835,8 @@ void CallFunctionStub::Generate(MacroAssembler* masm) { // Check for function proxy. __ Branch(&non_function, ne, a3, Operand(JS_FUNCTION_PROXY_TYPE)); __ push(a1); // Put proxy as additional argument. - __ li(a0, Operand(argc_ + 1, RelocInfo::NONE)); - __ li(a2, Operand(0, RelocInfo::NONE)); + __ li(a0, Operand(argc_ + 1, RelocInfo::NONE32)); + __ li(a2, Operand(0, RelocInfo::NONE32)); __ GetBuiltinEntry(a3, Builtins::CALL_FUNCTION_PROXY); __ SetCallKind(t1, CALL_AS_METHOD); { @@ -5570,13 +5871,19 @@ void CallConstructStub::Generate(MacroAssembler* masm) { __ Branch(&slow, ne, a3, Operand(JS_FUNCTION_TYPE)); if (RecordCallTarget()) { - GenerateRecordCallTarget(masm); + if (FLAG_optimize_constructed_arrays) { + GenerateRecordCallTarget(masm); + } else { + GenerateRecordCallTargetNoArray(masm); + } } // Jump to the function-specific construct stub. - __ lw(a2, FieldMemOperand(a1, JSFunction::kSharedFunctionInfoOffset)); - __ lw(a2, FieldMemOperand(a2, SharedFunctionInfo::kConstructStubOffset)); - __ Addu(at, a2, Operand(Code::kHeaderSize - kHeapObjectTag)); + Register jmp_reg = FLAG_optimize_constructed_arrays ? a3 : a2; + __ lw(jmp_reg, FieldMemOperand(a1, JSFunction::kSharedFunctionInfoOffset)); + __ lw(jmp_reg, FieldMemOperand(jmp_reg, + SharedFunctionInfo::kConstructStubOffset)); + __ Addu(at, jmp_reg, Operand(Code::kHeaderSize - kHeapObjectTag)); __ Jump(at); // a0: number of arguments @@ -5592,52 +5899,13 @@ void CallConstructStub::Generate(MacroAssembler* masm) { __ GetBuiltinEntry(a3, Builtins::CALL_NON_FUNCTION_AS_CONSTRUCTOR); __ bind(&do_call); // Set expected number of arguments to zero (not changing r0). - __ li(a2, Operand(0, RelocInfo::NONE)); + __ li(a2, Operand(0, RelocInfo::NONE32)); __ SetCallKind(t1, CALL_AS_METHOD); __ Jump(masm->isolate()->builtins()->ArgumentsAdaptorTrampoline(), RelocInfo::CODE_TARGET); } -// Unfortunately you have to run without snapshots to see most of these -// names in the profile since most compare stubs end up in the snapshot. -void CompareStub::PrintName(StringStream* stream) { - ASSERT((lhs_.is(a0) && rhs_.is(a1)) || - (lhs_.is(a1) && rhs_.is(a0))); - const char* cc_name; - switch (cc_) { - case lt: cc_name = "LT"; break; - case gt: cc_name = "GT"; break; - case le: cc_name = "LE"; break; - case ge: cc_name = "GE"; break; - case eq: cc_name = "EQ"; break; - case ne: cc_name = "NE"; break; - default: cc_name = "UnknownCondition"; break; - } - bool is_equality = cc_ == eq || cc_ == ne; - stream->Add("CompareStub_%s", cc_name); - stream->Add(lhs_.is(a0) ? "_a0" : "_a1"); - stream->Add(rhs_.is(a0) ? "_a0" : "_a1"); - if (strict_ && is_equality) stream->Add("_STRICT"); - if (never_nan_nan_ && is_equality) stream->Add("_NO_NAN"); - if (!include_number_compare_) stream->Add("_NO_NUMBER"); - if (!include_smi_compare_) stream->Add("_NO_SMI"); -} - - -int CompareStub::MinorKey() { - // Encode the two parameters in a unique 16 bit value. - ASSERT(static_cast<unsigned>(cc_) < (1 << 14)); - ASSERT((lhs_.is(a0) && rhs_.is(a1)) || - (lhs_.is(a1) && rhs_.is(a0))); - return ConditionField::encode(static_cast<unsigned>(cc_)) - | RegisterField::encode(lhs_.is(a0)) - | StrictField::encode(strict_) - | NeverNanNanField::encode(cc_ == eq ? never_nan_nan_ : false) - | IncludeSmiCompareField::encode(include_smi_compare_); -} - - // StringCharCodeAtGenerator. void StringCharCodeAtGenerator::GenerateFast(MacroAssembler* masm) { Label flat_string; @@ -5748,11 +6016,11 @@ void StringCharFromCodeGenerator::GenerateFast(MacroAssembler* masm) { STATIC_ASSERT(kSmiTag == 0); STATIC_ASSERT(kSmiShiftSize == 0); - ASSERT(IsPowerOf2(String::kMaxAsciiCharCode + 1)); + ASSERT(IsPowerOf2(String::kMaxOneByteCharCode + 1)); __ And(t0, code_, Operand(kSmiTagMask | - ((~String::kMaxAsciiCharCode) << kSmiTagSize))); + ((~String::kMaxOneByteCharCode) << kSmiTagSize))); __ Branch(&slow_case_, ne, t0, Operand(zero_reg)); __ LoadRoot(result_, Heap::kSingleCharacterStringCacheRootIndex); @@ -5785,23 +6053,6 @@ void StringCharFromCodeGenerator::GenerateSlow( } -// ------------------------------------------------------------------------- -// StringCharAtGenerator - -void StringCharAtGenerator::GenerateFast(MacroAssembler* masm) { - char_code_at_generator_.GenerateFast(masm); - char_from_code_generator_.GenerateFast(masm); -} - - -void StringCharAtGenerator::GenerateSlow( - MacroAssembler* masm, - const RuntimeCallHelper& call_helper) { - char_code_at_generator_.GenerateSlow(masm, call_helper); - char_from_code_generator_.GenerateSlow(masm, call_helper); -} - - void StringHelper::GenerateCopyCharacters(MacroAssembler* masm, Register dest, Register src, @@ -5947,7 +6198,7 @@ void StringHelper::GenerateCopyCharactersLong(MacroAssembler* masm, } -void StringHelper::GenerateTwoCharacterSymbolTableProbe(MacroAssembler* masm, +void StringHelper::GenerateTwoCharacterStringTableProbe(MacroAssembler* masm, Register c1, Register c2, Register scratch1, @@ -5960,7 +6211,7 @@ void StringHelper::GenerateTwoCharacterSymbolTableProbe(MacroAssembler* masm, Register scratch = scratch3; // Make sure that both characters are not digits as such strings has a - // different hash algorithm. Don't try to look for these in the symbol table. + // different hash algorithm. Don't try to look for these in the string table. Label not_array_index; __ Subu(scratch, c1, Operand(static_cast<int>('0'))); __ Branch(¬_array_index, @@ -5995,43 +6246,43 @@ void StringHelper::GenerateTwoCharacterSymbolTableProbe(MacroAssembler* masm, // chars: two character string, char 1 in byte 0 and char 2 in byte 1. // hash: hash of two character string. - // Load symbol table. - // Load address of first element of the symbol table. - Register symbol_table = c2; - __ LoadRoot(symbol_table, Heap::kSymbolTableRootIndex); + // Load string table. + // Load address of first element of the string table. + Register string_table = c2; + __ LoadRoot(string_table, Heap::kStringTableRootIndex); Register undefined = scratch4; __ LoadRoot(undefined, Heap::kUndefinedValueRootIndex); - // Calculate capacity mask from the symbol table capacity. + // Calculate capacity mask from the string table capacity. Register mask = scratch2; - __ lw(mask, FieldMemOperand(symbol_table, SymbolTable::kCapacityOffset)); + __ lw(mask, FieldMemOperand(string_table, StringTable::kCapacityOffset)); __ sra(mask, mask, 1); __ Addu(mask, mask, -1); - // Calculate untagged address of the first element of the symbol table. - Register first_symbol_table_element = symbol_table; - __ Addu(first_symbol_table_element, symbol_table, - Operand(SymbolTable::kElementsStartOffset - kHeapObjectTag)); + // Calculate untagged address of the first element of the string table. + Register first_string_table_element = string_table; + __ Addu(first_string_table_element, string_table, + Operand(StringTable::kElementsStartOffset - kHeapObjectTag)); // Registers. // chars: two character string, char 1 in byte 0 and char 2 in byte 1. // hash: hash of two character string // mask: capacity mask - // first_symbol_table_element: address of the first element of - // the symbol table + // first_string_table_element: address of the first element of + // the string table // undefined: the undefined object // scratch: - - // Perform a number of probes in the symbol table. + // Perform a number of probes in the string table. const int kProbes = 4; - Label found_in_symbol_table; + Label found_in_string_table; Label next_probe[kProbes]; Register candidate = scratch5; // Scratch register contains candidate. for (int i = 0; i < kProbes; i++) { - // Calculate entry in symbol table. + // Calculate entry in string table. if (i > 0) { - __ Addu(candidate, hash, Operand(SymbolTable::GetProbeOffset(i))); + __ Addu(candidate, hash, Operand(StringTable::GetProbeOffset(i))); } else { __ mov(candidate, hash); } @@ -6039,9 +6290,9 @@ void StringHelper::GenerateTwoCharacterSymbolTableProbe(MacroAssembler* masm, __ And(candidate, candidate, Operand(mask)); // Load the entry from the symble table. - STATIC_ASSERT(SymbolTable::kEntrySize == 1); + STATIC_ASSERT(StringTable::kEntrySize == 1); __ sll(scratch, candidate, kPointerSizeLog2); - __ Addu(scratch, scratch, first_symbol_table_element); + __ Addu(scratch, scratch, first_string_table_element); __ lw(candidate, MemOperand(scratch)); // If entry is undefined no string with this hash can be found. @@ -6053,7 +6304,7 @@ void StringHelper::GenerateTwoCharacterSymbolTableProbe(MacroAssembler* masm, // Must be the hole (deleted entry). if (FLAG_debug_code) { __ LoadRoot(scratch, Heap::kTheHoleValueRootIndex); - __ Assert(eq, "oddball in symbol table is not undefined or the hole", + __ Assert(eq, "oddball in string table is not undefined or the hole", scratch, Operand(candidate)); } __ jmp(&next_probe[i]); @@ -6071,8 +6322,8 @@ void StringHelper::GenerateTwoCharacterSymbolTableProbe(MacroAssembler* masm, // Check if the two characters match. // Assumes that word load is little endian. - __ lhu(scratch, FieldMemOperand(candidate, SeqAsciiString::kHeaderSize)); - __ Branch(&found_in_symbol_table, eq, chars, Operand(scratch)); + __ lhu(scratch, FieldMemOperand(candidate, SeqOneByteString::kHeaderSize)); + __ Branch(&found_in_string_table, eq, chars, Operand(scratch)); __ bind(&next_probe[i]); } @@ -6081,7 +6332,7 @@ void StringHelper::GenerateTwoCharacterSymbolTableProbe(MacroAssembler* masm, // Scratch register contains result when we fall through to here. Register result = candidate; - __ bind(&found_in_symbol_table); + __ bind(&found_in_string_table); __ mov(v0, result); } @@ -6182,6 +6433,9 @@ void SubStringStub::Generate(MacroAssembler* masm) { __ Branch(&runtime, ne, t0, Operand(zero_reg)); + Label single_char; + __ Branch(&single_char, eq, a2, Operand(1)); + // Short-cut for the case of trivial substring. Label return_v0; // v0: original string @@ -6211,7 +6465,7 @@ void SubStringStub::Generate(MacroAssembler* masm) { __ Branch(&sliced_string, ne, t0, Operand(zero_reg)); // Cons string. Check whether it is flat, then fetch first part. __ lw(t1, FieldMemOperand(v0, ConsString::kSecondOffset)); - __ LoadRoot(t0, Heap::kEmptyStringRootIndex); + __ LoadRoot(t0, Heap::kempty_stringRootIndex); __ Branch(&runtime, ne, t1, Operand(t0)); __ lw(t1, FieldMemOperand(v0, ConsString::kFirstOffset)); // Update instance type. @@ -6250,7 +6504,7 @@ void SubStringStub::Generate(MacroAssembler* masm) { // string's encoding is wrong because we always have to recheck encoding of // the newly created string's parent anyways due to externalized strings. Label two_byte_slice, set_slice_header; - STATIC_ASSERT((kStringEncodingMask & kAsciiStringTag) != 0); + STATIC_ASSERT((kStringEncodingMask & kOneByteStringTag) != 0); STATIC_ASSERT((kStringEncodingMask & kTwoByteStringTag) == 0); __ And(t0, a1, Operand(kStringEncodingMask)); __ Branch(&two_byte_slice, eq, t0, Operand(zero_reg)); @@ -6288,12 +6542,12 @@ void SubStringStub::Generate(MacroAssembler* masm) { __ bind(&sequential_string); // Locate first character of underlying subject string. - STATIC_ASSERT(SeqTwoByteString::kHeaderSize == SeqAsciiString::kHeaderSize); - __ Addu(t1, t1, Operand(SeqAsciiString::kHeaderSize - kHeapObjectTag)); + STATIC_ASSERT(SeqTwoByteString::kHeaderSize == SeqOneByteString::kHeaderSize); + __ Addu(t1, t1, Operand(SeqOneByteString::kHeaderSize - kHeapObjectTag)); __ bind(&allocate_result); // Sequential acii string. Allocate the result. - STATIC_ASSERT((kAsciiStringTag & kStringEncodingMask) != 0); + STATIC_ASSERT((kOneByteStringTag & kStringEncodingMask) != 0); __ And(t0, a1, Operand(kStringEncodingMask)); __ Branch(&two_byte_sequential, eq, t0, Operand(zero_reg)); @@ -6304,13 +6558,13 @@ void SubStringStub::Generate(MacroAssembler* masm) { __ Addu(t1, t1, a3); // Locate first character of result. - __ Addu(a1, v0, Operand(SeqAsciiString::kHeaderSize - kHeapObjectTag)); + __ Addu(a1, v0, Operand(SeqOneByteString::kHeaderSize - kHeapObjectTag)); // v0: result string // a1: first character of result string // a2: result string length // t1: first character of substring to copy - STATIC_ASSERT((SeqAsciiString::kHeaderSize & kObjectAlignmentMask) == 0); + STATIC_ASSERT((SeqOneByteString::kHeaderSize & kObjectAlignmentMask) == 0); StringHelper::GenerateCopyCharactersLong( masm, a1, t1, a2, a3, t0, t2, t3, t4, COPY_ASCII | DEST_ALWAYS_ALIGNED); __ jmp(&return_v0); @@ -6342,6 +6596,18 @@ void SubStringStub::Generate(MacroAssembler* masm) { // Just jump to runtime to create the sub string. __ bind(&runtime); __ TailCallRuntime(Runtime::kSubString, 3, 1); + + __ bind(&single_char); + // v0: original string + // a1: instance type + // a2: length + // a3: from index (untagged) + __ SmiTag(a3, a3); + StringCharAtGenerator generator( + v0, a3, a2, v0, &runtime, &runtime, &runtime, STRING_INDEX_IS_NUMBER); + generator.GenerateFast(masm); + __ DropAndRet(3); + generator.SkipSlow(masm, &runtime); } @@ -6442,7 +6708,7 @@ void StringCompareStub::GenerateAsciiCharsCompareLoop( // doesn't need an additional compare. __ SmiUntag(length); __ Addu(scratch1, length, - Operand(SeqAsciiString::kHeaderSize - kHeapObjectTag)); + Operand(SeqOneByteString::kHeaderSize - kHeapObjectTag)); __ Addu(left, left, Operand(scratch1)); __ Addu(right, right, Operand(scratch1)); __ Subu(length, zero_reg, length); @@ -6582,8 +6848,8 @@ void StringAddStub::Generate(MacroAssembler* masm) { // Adding two lengths can't overflow. STATIC_ASSERT(String::kMaxLength < String::kMaxLength * 2); __ Addu(t2, a2, Operand(a3)); - // Use the symbol table when adding two one character strings, as it - // helps later optimizations to return a symbol here. + // Use the string table when adding two one character strings, as it + // helps later optimizations to return a string here. __ Branch(&longer_than_two, ne, t2, Operand(2)); // Check that both strings are non-external ASCII strings. @@ -6597,13 +6863,13 @@ void StringAddStub::Generate(MacroAssembler* masm) { &call_runtime); // Get the two characters forming the sub string. - __ lbu(a2, FieldMemOperand(a0, SeqAsciiString::kHeaderSize)); - __ lbu(a3, FieldMemOperand(a1, SeqAsciiString::kHeaderSize)); + __ lbu(a2, FieldMemOperand(a0, SeqOneByteString::kHeaderSize)); + __ lbu(a3, FieldMemOperand(a1, SeqOneByteString::kHeaderSize)); - // Try to lookup two character string in symbol table. If it is not found + // Try to lookup two character string in string table. If it is not found // just allocate a new one. Label make_two_character_string; - StringHelper::GenerateTwoCharacterSymbolTableProbe( + StringHelper::GenerateTwoCharacterStringTableProbe( masm, a2, a3, t2, t3, t0, t1, t5, &make_two_character_string); __ IncrementCounter(counters->string_add_native(), 1, a2, a3); __ DropAndRet(2); @@ -6616,7 +6882,7 @@ void StringAddStub::Generate(MacroAssembler* masm) { // in a little endian mode). __ li(t2, Operand(2)); __ AllocateAsciiString(v0, t2, t0, t1, t5, &call_runtime); - __ sh(a2, FieldMemOperand(v0, SeqAsciiString::kHeaderSize)); + __ sh(a2, FieldMemOperand(v0, SeqOneByteString::kHeaderSize)); __ IncrementCounter(counters->string_add_native(), 1, a2, a3); __ DropAndRet(2); @@ -6663,11 +6929,11 @@ void StringAddStub::Generate(MacroAssembler* masm) { __ And(at, t0, Operand(kAsciiDataHintMask)); __ and_(at, at, t1); __ Branch(&ascii_data, ne, at, Operand(zero_reg)); - - __ xor_(t0, t0, t1); - STATIC_ASSERT(kAsciiStringTag != 0 && kAsciiDataHintTag != 0); - __ And(t0, t0, Operand(kAsciiStringTag | kAsciiDataHintTag)); - __ Branch(&ascii_data, eq, t0, Operand(kAsciiStringTag | kAsciiDataHintTag)); + __ Xor(t0, t0, Operand(t1)); + STATIC_ASSERT(kOneByteStringTag != 0 && kAsciiDataHintTag != 0); + __ And(t0, t0, Operand(kOneByteStringTag | kAsciiDataHintTag)); + __ Branch(&ascii_data, eq, t0, + Operand(kOneByteStringTag | kAsciiDataHintTag)); // Allocate a two byte cons string. __ AllocateTwoByteConsString(v0, t2, t0, t1, &call_runtime); @@ -6700,11 +6966,11 @@ void StringAddStub::Generate(MacroAssembler* masm) { STATIC_ASSERT(kSeqStringTag == 0); __ And(t4, t0, Operand(kStringRepresentationMask)); - STATIC_ASSERT(SeqAsciiString::kHeaderSize == SeqTwoByteString::kHeaderSize); + STATIC_ASSERT(SeqOneByteString::kHeaderSize == SeqTwoByteString::kHeaderSize); Label skip_first_add; __ Branch(&skip_first_add, ne, t4, Operand(zero_reg)); __ Branch(USE_DELAY_SLOT, &first_prepared); - __ addiu(t3, a0, SeqAsciiString::kHeaderSize - kHeapObjectTag); + __ addiu(t3, a0, SeqOneByteString::kHeaderSize - kHeapObjectTag); __ bind(&skip_first_add); // External string: rule out short external string and load string resource. STATIC_ASSERT(kShortExternalStringTag != 0); @@ -6715,11 +6981,11 @@ void StringAddStub::Generate(MacroAssembler* masm) { STATIC_ASSERT(kSeqStringTag == 0); __ And(t4, t1, Operand(kStringRepresentationMask)); - STATIC_ASSERT(SeqAsciiString::kHeaderSize == SeqTwoByteString::kHeaderSize); + STATIC_ASSERT(SeqOneByteString::kHeaderSize == SeqTwoByteString::kHeaderSize); Label skip_second_add; __ Branch(&skip_second_add, ne, t4, Operand(zero_reg)); __ Branch(USE_DELAY_SLOT, &second_prepared); - __ addiu(a1, a1, SeqAsciiString::kHeaderSize - kHeapObjectTag); + __ addiu(a1, a1, SeqOneByteString::kHeaderSize - kHeapObjectTag); __ bind(&skip_second_add); // External string: rule out short external string and load string resource. STATIC_ASSERT(kShortExternalStringTag != 0); @@ -6740,7 +7006,7 @@ void StringAddStub::Generate(MacroAssembler* masm) { __ Branch(&non_ascii_string_add_flat_result, eq, t4, Operand(zero_reg)); __ AllocateAsciiString(v0, t2, t0, t1, t5, &call_runtime); - __ Addu(t2, v0, Operand(SeqAsciiString::kHeaderSize - kHeapObjectTag)); + __ Addu(t2, v0, Operand(SeqOneByteString::kHeaderSize - kHeapObjectTag)); // v0: result string. // t3: first character of first string. // a1: first character of second string @@ -6828,7 +7094,7 @@ void StringAddStub::GenerateConvertArgument(MacroAssembler* masm, void ICCompareStub::GenerateSmis(MacroAssembler* masm) { - ASSERT(state_ == CompareIC::SMIS); + ASSERT(state_ == CompareIC::SMI); Label miss; __ Or(a2, a1, a0); __ JumpIfNotSmi(a2, &miss); @@ -6849,30 +7115,53 @@ void ICCompareStub::GenerateSmis(MacroAssembler* masm) { } -void ICCompareStub::GenerateHeapNumbers(MacroAssembler* masm) { - ASSERT(state_ == CompareIC::HEAP_NUMBERS); +void ICCompareStub::GenerateNumbers(MacroAssembler* masm) { + ASSERT(state_ == CompareIC::NUMBER); Label generic_stub; Label unordered, maybe_undefined1, maybe_undefined2; Label miss; - __ And(a2, a1, Operand(a0)); - __ JumpIfSmi(a2, &generic_stub); - __ GetObjectType(a0, a2, a2); - __ Branch(&maybe_undefined1, ne, a2, Operand(HEAP_NUMBER_TYPE)); - __ GetObjectType(a1, a2, a2); - __ Branch(&maybe_undefined2, ne, a2, Operand(HEAP_NUMBER_TYPE)); + if (left_ == CompareIC::SMI) { + __ JumpIfNotSmi(a1, &miss); + } + if (right_ == CompareIC::SMI) { + __ JumpIfNotSmi(a0, &miss); + } // Inlining the double comparison and falling back to the general compare // stub if NaN is involved or FPU is unsupported. if (CpuFeatures::IsSupported(FPU)) { - CpuFeatures::Scope scope(FPU); + CpuFeatureScope scope(masm, FPU); // Load left and right operand. - __ Subu(a2, a1, Operand(kHeapObjectTag)); - __ ldc1(f0, MemOperand(a2, HeapNumber::kValueOffset)); + Label done, left, left_smi, right_smi; + __ JumpIfSmi(a0, &right_smi); + __ CheckMap(a0, a2, Heap::kHeapNumberMapRootIndex, &maybe_undefined1, + DONT_DO_SMI_CHECK); __ Subu(a2, a0, Operand(kHeapObjectTag)); __ ldc1(f2, MemOperand(a2, HeapNumber::kValueOffset)); + __ Branch(&left); + __ bind(&right_smi); + __ SmiUntag(a2, a0); // Can't clobber a0 yet. + FPURegister single_scratch = f6; + __ mtc1(a2, single_scratch); + __ cvt_d_w(f2, single_scratch); + + __ bind(&left); + __ JumpIfSmi(a1, &left_smi); + __ CheckMap(a1, a2, Heap::kHeapNumberMapRootIndex, &maybe_undefined2, + DONT_DO_SMI_CHECK); + __ Subu(a2, a1, Operand(kHeapObjectTag)); + __ ldc1(f0, MemOperand(a2, HeapNumber::kValueOffset)); + __ Branch(&done); + __ bind(&left_smi); + __ SmiUntag(a2, a1); // Can't clobber a1 yet. + single_scratch = f8; + __ mtc1(a2, single_scratch); + __ cvt_d_w(f0, single_scratch); + + __ bind(&done); // Return a result of -1, 0, or 1, or use CompareStub for NaNs. Label fpu_eq, fpu_lt; @@ -6896,15 +7185,16 @@ void ICCompareStub::GenerateHeapNumbers(MacroAssembler* masm) { } __ bind(&unordered); - - CompareStub stub(GetCondition(), strict(), NO_COMPARE_FLAGS, a1, a0); __ bind(&generic_stub); - __ Jump(stub.GetCode(), RelocInfo::CODE_TARGET); + ICCompareStub stub(op_, CompareIC::GENERIC, CompareIC::GENERIC, + CompareIC::GENERIC); + __ Jump(stub.GetCode(masm->isolate()), RelocInfo::CODE_TARGET); __ bind(&maybe_undefined1); if (Token::IsOrderedRelationalCompareOp(op_)) { __ LoadRoot(at, Heap::kUndefinedValueRootIndex); __ Branch(&miss, ne, a0, Operand(at)); + __ JumpIfSmi(a1, &unordered); __ GetObjectType(a1, a2, a2); __ Branch(&maybe_undefined2, ne, a2, Operand(HEAP_NUMBER_TYPE)); __ jmp(&unordered); @@ -6921,8 +7211,8 @@ void ICCompareStub::GenerateHeapNumbers(MacroAssembler* masm) { } -void ICCompareStub::GenerateSymbols(MacroAssembler* masm) { - ASSERT(state_ == CompareIC::SYMBOLS); +void ICCompareStub::GenerateInternalizedStrings(MacroAssembler* masm) { + ASSERT(state_ == CompareIC::INTERNALIZED_STRING); Label miss; // Registers containing left and right operands respectively. @@ -6934,14 +7224,14 @@ void ICCompareStub::GenerateSymbols(MacroAssembler* masm) { // Check that both operands are heap objects. __ JumpIfEitherSmi(left, right, &miss); - // Check that both operands are symbols. + // Check that both operands are internalized strings. __ lw(tmp1, FieldMemOperand(left, HeapObject::kMapOffset)); __ lw(tmp2, FieldMemOperand(right, HeapObject::kMapOffset)); __ lbu(tmp1, FieldMemOperand(tmp1, Map::kInstanceTypeOffset)); __ lbu(tmp2, FieldMemOperand(tmp2, Map::kInstanceTypeOffset)); - STATIC_ASSERT(kSymbolTag != 0); + STATIC_ASSERT(kInternalizedTag != 0); __ And(tmp1, tmp1, Operand(tmp2)); - __ And(tmp1, tmp1, kIsSymbolMask); + __ And(tmp1, tmp1, kIsInternalizedMask); __ Branch(&miss, eq, tmp1, Operand(zero_reg)); // Make sure a0 is non-zero. At this point input operands are // guaranteed to be non-zero. @@ -6949,7 +7239,7 @@ void ICCompareStub::GenerateSymbols(MacroAssembler* masm) { STATIC_ASSERT(EQUAL == 0); STATIC_ASSERT(kSmiTag == 0); __ mov(v0, right); - // Symbols are compared by identity. + // Internalized strings are compared by identity. __ Ret(ne, left, Operand(right)); __ li(v0, Operand(Smi::FromInt(EQUAL))); __ Ret(); @@ -6959,8 +7249,62 @@ void ICCompareStub::GenerateSymbols(MacroAssembler* masm) { } +void ICCompareStub::GenerateUniqueNames(MacroAssembler* masm) { + ASSERT(state_ == CompareIC::UNIQUE_NAME); + ASSERT(GetCondition() == eq); + Label miss; + + // Registers containing left and right operands respectively. + Register left = a1; + Register right = a0; + Register tmp1 = a2; + Register tmp2 = a3; + + // Check that both operands are heap objects. + __ JumpIfEitherSmi(left, right, &miss); + + // Check that both operands are unique names. This leaves the instance + // types loaded in tmp1 and tmp2. + STATIC_ASSERT(kInternalizedTag != 0); + __ lw(tmp1, FieldMemOperand(left, HeapObject::kMapOffset)); + __ lw(tmp2, FieldMemOperand(right, HeapObject::kMapOffset)); + __ lbu(tmp1, FieldMemOperand(tmp1, Map::kInstanceTypeOffset)); + __ lbu(tmp2, FieldMemOperand(tmp2, Map::kInstanceTypeOffset)); + + Label succeed1; + __ And(at, tmp1, Operand(kIsInternalizedMask)); + __ Branch(&succeed1, ne, at, Operand(zero_reg)); + __ Branch(&miss, ne, tmp1, Operand(SYMBOL_TYPE)); + __ bind(&succeed1); + + Label succeed2; + __ And(at, tmp2, Operand(kIsInternalizedMask)); + __ Branch(&succeed2, ne, at, Operand(zero_reg)); + __ Branch(&miss, ne, tmp2, Operand(SYMBOL_TYPE)); + __ bind(&succeed2); + + // Use a0 as result + __ mov(v0, a0); + + // Unique names are compared by identity. + Label done; + __ Branch(&done, ne, left, Operand(right)); + // Make sure a0 is non-zero. At this point input operands are + // guaranteed to be non-zero. + ASSERT(right.is(a0)); + STATIC_ASSERT(EQUAL == 0); + STATIC_ASSERT(kSmiTag == 0); + __ li(v0, Operand(Smi::FromInt(EQUAL))); + __ bind(&done); + __ Ret(); + + __ bind(&miss); + GenerateMiss(masm); +} + + void ICCompareStub::GenerateStrings(MacroAssembler* masm) { - ASSERT(state_ == CompareIC::STRINGS); + ASSERT(state_ == CompareIC::STRING); Label miss; bool equality = Token::IsEqualityOp(op_); @@ -6999,13 +7343,13 @@ void ICCompareStub::GenerateStrings(MacroAssembler* masm) { // Handle not identical strings. - // Check that both strings are symbols. If they are, we're done + // Check that both strings are internalized strings. If they are, we're done // because we already know they are not identical. if (equality) { ASSERT(GetCondition() == eq); - STATIC_ASSERT(kSymbolTag != 0); + STATIC_ASSERT(kInternalizedTag != 0); __ And(tmp3, tmp1, Operand(tmp2)); - __ And(tmp5, tmp3, Operand(kIsSymbolMask)); + __ And(tmp5, tmp3, Operand(kIsInternalizedMask)); Label is_symbol; __ Branch(&is_symbol, eq, tmp5, Operand(zero_reg)); // Make sure a0 is non-zero. At this point input operands are @@ -7045,7 +7389,7 @@ void ICCompareStub::GenerateStrings(MacroAssembler* masm) { void ICCompareStub::GenerateObjects(MacroAssembler* masm) { - ASSERT(state_ == CompareIC::OBJECTS); + ASSERT(state_ == CompareIC::OBJECT); Label miss; __ And(a2, a1, Operand(a0)); __ JumpIfSmi(a2, &miss); @@ -7149,10 +7493,9 @@ void DirectCEntryStub::GenerateCall(MacroAssembler* masm, // Push return address (accessible to GC through exit frame pc). // This spot for ra was reserved in EnterExitFrame. masm->sw(ra, MemOperand(sp, kCArgsSlotsSize)); - masm->li(ra, - Operand(reinterpret_cast<intptr_t>(GetCode().location()), - RelocInfo::CODE_TARGET), - CONSTANT_SIZE); + intptr_t loc = + reinterpret_cast<intptr_t>(GetCode(masm->isolate()).location()); + masm->li(ra, Operand(loc, RelocInfo::CODE_TARGET), CONSTANT_SIZE); // Call the function. masm->Jump(t9); // Make sure the stored 'ra' points to this position. @@ -7160,13 +7503,14 @@ void DirectCEntryStub::GenerateCall(MacroAssembler* masm, } -void StringDictionaryLookupStub::GenerateNegativeLookup(MacroAssembler* masm, - Label* miss, - Label* done, - Register receiver, - Register properties, - Handle<String> name, - Register scratch0) { +void NameDictionaryLookupStub::GenerateNegativeLookup(MacroAssembler* masm, + Label* miss, + Label* done, + Register receiver, + Register properties, + Handle<Name> name, + Register scratch0) { + ASSERT(name->IsUniqueName()); // If names of slots in range from 1 to kProbes - 1 for the hash value are // not equal to the name and kProbes-th slot is not used (its name is the // undefined value), it guarantees the hash table doesn't contain the @@ -7180,10 +7524,10 @@ void StringDictionaryLookupStub::GenerateNegativeLookup(MacroAssembler* masm, __ lw(index, FieldMemOperand(properties, kCapacityOffset)); __ Subu(index, index, Operand(1)); __ And(index, index, Operand( - Smi::FromInt(name->Hash() + StringDictionary::GetProbeOffset(i)))); + Smi::FromInt(name->Hash() + NameDictionary::GetProbeOffset(i)))); // Scale the index by multiplying by the entry size. - ASSERT(StringDictionary::kEntrySize == 3); + ASSERT(NameDictionary::kEntrySize == 3); __ sll(at, index, 1); __ Addu(index, index, at); @@ -7204,19 +7548,20 @@ void StringDictionaryLookupStub::GenerateNegativeLookup(MacroAssembler* masm, __ LoadRoot(tmp, Heap::kTheHoleValueRootIndex); // Stop if found the property. - __ Branch(miss, eq, entity_name, Operand(Handle<String>(name))); + __ Branch(miss, eq, entity_name, Operand(Handle<Name>(name))); - Label the_hole; - __ Branch(&the_hole, eq, entity_name, Operand(tmp)); + Label good; + __ Branch(&good, eq, entity_name, Operand(tmp)); - // Check if the entry name is not a symbol. + // Check if the entry name is not a unique name. __ lw(entity_name, FieldMemOperand(entity_name, HeapObject::kMapOffset)); __ lbu(entity_name, FieldMemOperand(entity_name, Map::kInstanceTypeOffset)); - __ And(scratch0, entity_name, Operand(kIsSymbolMask)); - __ Branch(miss, eq, scratch0, Operand(zero_reg)); + __ And(scratch0, entity_name, Operand(kIsInternalizedMask)); + __ Branch(&good, ne, scratch0, Operand(zero_reg)); + __ Branch(miss, ne, entity_name, Operand(SYMBOL_TYPE)); - __ bind(&the_hole); + __ bind(&good); // Restore the properties. __ lw(properties, @@ -7230,8 +7575,8 @@ void StringDictionaryLookupStub::GenerateNegativeLookup(MacroAssembler* masm, __ MultiPush(spill_mask); __ lw(a0, FieldMemOperand(receiver, JSObject::kPropertiesOffset)); - __ li(a1, Operand(Handle<String>(name))); - StringDictionaryLookupStub stub(NEGATIVE_LOOKUP); + __ li(a1, Operand(Handle<Name>(name))); + NameDictionaryLookupStub stub(NEGATIVE_LOOKUP); __ CallStub(&stub); __ mov(at, v0); __ MultiPop(spill_mask); @@ -7241,23 +7586,23 @@ void StringDictionaryLookupStub::GenerateNegativeLookup(MacroAssembler* masm, } -// Probe the string dictionary in the |elements| register. Jump to the +// Probe the name dictionary in the |elements| register. Jump to the // |done| label if a property with the given name is found. Jump to // the |miss| label otherwise. // If lookup was successful |scratch2| will be equal to elements + 4 * index. -void StringDictionaryLookupStub::GeneratePositiveLookup(MacroAssembler* masm, - Label* miss, - Label* done, - Register elements, - Register name, - Register scratch1, - Register scratch2) { +void NameDictionaryLookupStub::GeneratePositiveLookup(MacroAssembler* masm, + Label* miss, + Label* done, + Register elements, + Register name, + Register scratch1, + Register scratch2) { ASSERT(!elements.is(scratch1)); ASSERT(!elements.is(scratch2)); ASSERT(!name.is(scratch1)); ASSERT(!name.is(scratch2)); - __ AssertString(name); + __ AssertName(name); // Compute the capacity mask. __ lw(scratch1, FieldMemOperand(elements, kCapacityOffset)); @@ -7269,21 +7614,21 @@ void StringDictionaryLookupStub::GeneratePositiveLookup(MacroAssembler* masm, // cover ~93% of loads from dictionaries. for (int i = 0; i < kInlinedProbes; i++) { // Compute the masked index: (hash + i + i * i) & mask. - __ lw(scratch2, FieldMemOperand(name, String::kHashFieldOffset)); + __ lw(scratch2, FieldMemOperand(name, Name::kHashFieldOffset)); if (i > 0) { // Add the probe offset (i + i * i) left shifted to avoid right shifting // the hash in a separate instruction. The value hash + i + i * i is right // shifted in the following and instruction. - ASSERT(StringDictionary::GetProbeOffset(i) < - 1 << (32 - String::kHashFieldOffset)); + ASSERT(NameDictionary::GetProbeOffset(i) < + 1 << (32 - Name::kHashFieldOffset)); __ Addu(scratch2, scratch2, Operand( - StringDictionary::GetProbeOffset(i) << String::kHashShift)); + NameDictionary::GetProbeOffset(i) << Name::kHashShift)); } - __ srl(scratch2, scratch2, String::kHashShift); + __ srl(scratch2, scratch2, Name::kHashShift); __ And(scratch2, scratch1, scratch2); // Scale the index by multiplying by the element size. - ASSERT(StringDictionary::kEntrySize == 3); + ASSERT(NameDictionary::kEntrySize == 3); // scratch2 = scratch2 * 3. __ sll(at, scratch2, 1); @@ -7310,7 +7655,7 @@ void StringDictionaryLookupStub::GeneratePositiveLookup(MacroAssembler* masm, __ Move(a0, elements); __ Move(a1, name); } - StringDictionaryLookupStub stub(POSITIVE_LOOKUP); + NameDictionaryLookupStub stub(POSITIVE_LOOKUP); __ CallStub(&stub); __ mov(scratch2, a2); __ mov(at, v0); @@ -7321,15 +7666,15 @@ void StringDictionaryLookupStub::GeneratePositiveLookup(MacroAssembler* masm, } -void StringDictionaryLookupStub::Generate(MacroAssembler* masm) { +void NameDictionaryLookupStub::Generate(MacroAssembler* masm) { // This stub overrides SometimesSetsUpAFrame() to return false. That means // we cannot call anything that could cause a GC from this stub. // Registers: - // result: StringDictionary to probe + // result: NameDictionary to probe // a1: key - // : StringDictionary to probe. - // index_: will hold an index of entry if lookup is successful. - // might alias with result_. + // dictionary: NameDictionary to probe. + // index: will hold an index of entry if lookup is successful. + // might alias with result_. // Returns: // result_ is zero if lookup failed, non zero otherwise. @@ -7348,7 +7693,7 @@ void StringDictionaryLookupStub::Generate(MacroAssembler* masm) { __ sra(mask, mask, kSmiTagSize); __ Subu(mask, mask, Operand(1)); - __ lw(hash, FieldMemOperand(key, String::kHashFieldOffset)); + __ lw(hash, FieldMemOperand(key, Name::kHashFieldOffset)); __ LoadRoot(undefined, Heap::kUndefinedValueRootIndex); @@ -7359,18 +7704,18 @@ void StringDictionaryLookupStub::Generate(MacroAssembler* masm) { // Add the probe offset (i + i * i) left shifted to avoid right shifting // the hash in a separate instruction. The value hash + i + i * i is right // shifted in the following and instruction. - ASSERT(StringDictionary::GetProbeOffset(i) < - 1 << (32 - String::kHashFieldOffset)); + ASSERT(NameDictionary::GetProbeOffset(i) < + 1 << (32 - Name::kHashFieldOffset)); __ Addu(index, hash, Operand( - StringDictionary::GetProbeOffset(i) << String::kHashShift)); + NameDictionary::GetProbeOffset(i) << Name::kHashShift)); } else { __ mov(index, hash); } - __ srl(index, index, String::kHashShift); + __ srl(index, index, Name::kHashShift); __ And(index, mask, index); // Scale the index by multiplying by the entry size. - ASSERT(StringDictionary::kEntrySize == 3); + ASSERT(NameDictionary::kEntrySize == 3); // index *= 3. __ mov(at, index); __ sll(index, index, 1); @@ -7389,12 +7734,15 @@ void StringDictionaryLookupStub::Generate(MacroAssembler* masm) { __ Branch(&in_dictionary, eq, entry_key, Operand(key)); if (i != kTotalProbes - 1 && mode_ == NEGATIVE_LOOKUP) { - // Check if the entry name is not a symbol. + // Check if the entry name is not a unique name. + Label cont; __ lw(entry_key, FieldMemOperand(entry_key, HeapObject::kMapOffset)); __ lbu(entry_key, FieldMemOperand(entry_key, Map::kInstanceTypeOffset)); - __ And(result, entry_key, Operand(kIsSymbolMask)); - __ Branch(&maybe_in_dictionary, eq, result, Operand(zero_reg)); + __ And(result, entry_key, Operand(kIsInternalizedMask)); + __ Branch(&cont, ne, result, Operand(zero_reg)); + __ Branch(&maybe_in_dictionary, ne, entry_key, Operand(SYMBOL_TYPE)); + __ bind(&cont); } } @@ -7427,7 +7775,6 @@ struct AheadOfTimeWriteBarrierStubList { static const AheadOfTimeWriteBarrierStubList kAheadOfTime[] = { // Used in RegExpExecStub. { REG(s2), REG(s0), REG(t3), EMIT_REMEMBERED_SET }, - { REG(s2), REG(a2), REG(t3), EMIT_REMEMBERED_SET }, // Used in CompileArrayPushCall. // Also used in StoreIC::GenerateNormal via GenerateDictionaryStore. // Also used in KeyedStoreIC::GenerateGeneric. @@ -7483,13 +7830,14 @@ bool StoreBufferOverflowStub::IsPregenerated() { } -void StoreBufferOverflowStub::GenerateFixedRegStubsAheadOfTime() { +void StoreBufferOverflowStub::GenerateFixedRegStubsAheadOfTime( + Isolate* isolate) { StoreBufferOverflowStub stub1(kDontSaveFPRegs); - stub1.GetCode()->set_is_pregenerated(true); + stub1.GetCode(isolate)->set_is_pregenerated(true); } -void RecordWriteStub::GenerateFixedRegStubsAheadOfTime() { +void RecordWriteStub::GenerateFixedRegStubsAheadOfTime(Isolate* isolate) { for (const AheadOfTimeWriteBarrierStubList* entry = kAheadOfTime; !entry->object.is(no_reg); entry++) { @@ -7498,7 +7846,7 @@ void RecordWriteStub::GenerateFixedRegStubsAheadOfTime() { entry->address, entry->action, kDontSaveFPRegs); - stub.GetCode()->set_is_pregenerated(true); + stub.GetCode(isolate)->set_is_pregenerated(true); } } @@ -7600,12 +7948,7 @@ void RecordWriteStub::InformIncrementalMarker(MacroAssembler* masm, Mode mode) { ASSERT(!address.is(a0)); __ Move(address, regs_.address()); __ Move(a0, regs_.object()); - if (mode == INCREMENTAL_COMPACTION) { - __ Move(a1, address); - } else { - ASSERT(mode == INCREMENTAL); - __ lw(a1, MemOperand(address, 0)); - } + __ Move(a1, address); __ li(a2, Operand(ExternalReference::isolate_address())); AllowExternalCallThatCantCauseGC scope(masm); @@ -7767,7 +8110,7 @@ void StoreArrayLiteralElementStub::Generate(MacroAssembler* masm) { // Array literal has ElementsKind of FAST_*_DOUBLE_ELEMENTS. __ bind(&double_elements); __ lw(t1, FieldMemOperand(a1, JSObject::kElementsOffset)); - __ StoreNumberToDoubleElements(a0, a3, a1, + __ StoreNumberToDoubleElements(a0, a3, // Overwrites all regs after this. t1, t2, t3, t5, a2, &slow_elements); @@ -7776,6 +8119,21 @@ void StoreArrayLiteralElementStub::Generate(MacroAssembler* masm) { } +void StubFailureTrampolineStub::Generate(MacroAssembler* masm) { + ASSERT(!Serializer::enabled()); + bool save_fp_regs = CpuFeatures::IsSupported(FPU); + CEntryStub ces(1, save_fp_regs ? kSaveFPRegs : kDontSaveFPRegs); + __ Call(ces.GetCode(masm->isolate()), RelocInfo::CODE_TARGET); + int parameter_count_offset = + StubFailureTrampolineFrame::kCallerStackParameterCountFrameOffset; + __ lw(a1, MemOperand(fp, parameter_count_offset)); + masm->LeaveFrame(StackFrame::STUB_FAILURE_TRAMPOLINE); + __ sll(a1, a1, kPointerSizeLog2); + __ Addu(sp, sp, a1); + __ Ret(); +} + + void ProfileEntryHookStub::MaybeCallEntryHook(MacroAssembler* masm) { if (entry_hook_ != NULL) { ProfileEntryHookStub stub; diff --git a/deps/v8/src/mips/code-stubs-mips.h b/deps/v8/src/mips/code-stubs-mips.h index e0954d837e..225accc518 100644 --- a/deps/v8/src/mips/code-stubs-mips.h +++ b/deps/v8/src/mips/code-stubs-mips.h @@ -37,7 +37,7 @@ namespace internal { // Compute a transcendental math function natively, or call the // TranscendentalCache runtime function. -class TranscendentalCacheStub: public CodeStub { +class TranscendentalCacheStub: public PlatformCodeStub { public: enum ArgumentType { TAGGED = 0 << TranscendentalCache::kTranscendentalTypeBits, @@ -59,7 +59,7 @@ class TranscendentalCacheStub: public CodeStub { }; -class StoreBufferOverflowStub: public CodeStub { +class StoreBufferOverflowStub: public PlatformCodeStub { public: explicit StoreBufferOverflowStub(SaveFPRegsMode save_fp) : save_doubles_(save_fp) { } @@ -67,7 +67,7 @@ class StoreBufferOverflowStub: public CodeStub { void Generate(MacroAssembler* masm); virtual bool IsPregenerated(); - static void GenerateFixedRegStubsAheadOfTime(); + static void GenerateFixedRegStubsAheadOfTime(Isolate* isolate); virtual bool SometimesSetsUpAFrame() { return false; } private: @@ -78,7 +78,7 @@ class StoreBufferOverflowStub: public CodeStub { }; -class UnaryOpStub: public CodeStub { +class UnaryOpStub: public PlatformCodeStub { public: UnaryOpStub(Token::Value op, UnaryOverwriteMode mode, @@ -120,9 +120,9 @@ class UnaryOpStub: public CodeStub { void GenerateSmiCodeSub(MacroAssembler* masm, Label* non_smi, Label* slow); void GenerateSmiCodeBitNot(MacroAssembler* masm, Label* slow); - void GenerateHeapNumberStub(MacroAssembler* masm); - void GenerateHeapNumberStubSub(MacroAssembler* masm); - void GenerateHeapNumberStubBitNot(MacroAssembler* masm); + void GenerateNumberStub(MacroAssembler* masm); + void GenerateNumberStubSub(MacroAssembler* masm); + void GenerateNumberStubBitNot(MacroAssembler* masm); void GenerateHeapNumberCodeSub(MacroAssembler* masm, Label* slow); void GenerateHeapNumberCodeBitNot(MacroAssembler* masm, Label* slow); @@ -143,108 +143,6 @@ class UnaryOpStub: public CodeStub { }; -class BinaryOpStub: public CodeStub { - public: - BinaryOpStub(Token::Value op, OverwriteMode mode) - : op_(op), - mode_(mode), - operands_type_(BinaryOpIC::UNINITIALIZED), - result_type_(BinaryOpIC::UNINITIALIZED) { - use_fpu_ = CpuFeatures::IsSupported(FPU); - ASSERT(OpBits::is_valid(Token::NUM_TOKENS)); - } - - BinaryOpStub( - int key, - BinaryOpIC::TypeInfo operands_type, - BinaryOpIC::TypeInfo result_type = BinaryOpIC::UNINITIALIZED) - : op_(OpBits::decode(key)), - mode_(ModeBits::decode(key)), - use_fpu_(FPUBits::decode(key)), - operands_type_(operands_type), - result_type_(result_type) { } - - private: - enum SmiCodeGenerateHeapNumberResults { - ALLOW_HEAPNUMBER_RESULTS, - NO_HEAPNUMBER_RESULTS - }; - - Token::Value op_; - OverwriteMode mode_; - bool use_fpu_; - - // Operand type information determined at runtime. - BinaryOpIC::TypeInfo operands_type_; - BinaryOpIC::TypeInfo result_type_; - - virtual void PrintName(StringStream* stream); - - // Minor key encoding in 16 bits RRRTTTVOOOOOOOMM. - class ModeBits: public BitField<OverwriteMode, 0, 2> {}; - class OpBits: public BitField<Token::Value, 2, 7> {}; - class FPUBits: public BitField<bool, 9, 1> {}; - class OperandTypeInfoBits: public BitField<BinaryOpIC::TypeInfo, 10, 3> {}; - class ResultTypeInfoBits: public BitField<BinaryOpIC::TypeInfo, 13, 3> {}; - - Major MajorKey() { return BinaryOp; } - int MinorKey() { - return OpBits::encode(op_) - | ModeBits::encode(mode_) - | FPUBits::encode(use_fpu_) - | OperandTypeInfoBits::encode(operands_type_) - | ResultTypeInfoBits::encode(result_type_); - } - - void Generate(MacroAssembler* masm); - void GenerateGeneric(MacroAssembler* masm); - void GenerateSmiSmiOperation(MacroAssembler* masm); - void GenerateFPOperation(MacroAssembler* masm, - bool smi_operands, - Label* not_numbers, - Label* gc_required); - void GenerateSmiCode(MacroAssembler* masm, - Label* use_runtime, - Label* gc_required, - SmiCodeGenerateHeapNumberResults heapnumber_results); - void GenerateLoadArguments(MacroAssembler* masm); - void GenerateReturn(MacroAssembler* masm); - void GenerateUninitializedStub(MacroAssembler* masm); - void GenerateSmiStub(MacroAssembler* masm); - void GenerateInt32Stub(MacroAssembler* masm); - void GenerateHeapNumberStub(MacroAssembler* masm); - void GenerateOddballStub(MacroAssembler* masm); - void GenerateStringStub(MacroAssembler* masm); - void GenerateBothStringStub(MacroAssembler* masm); - void GenerateGenericStub(MacroAssembler* masm); - void GenerateAddStrings(MacroAssembler* masm); - void GenerateCallRuntime(MacroAssembler* masm); - - void GenerateHeapResultAllocation(MacroAssembler* masm, - Register result, - Register heap_number_map, - Register scratch1, - Register scratch2, - Label* gc_required); - void GenerateRegisterArgsPush(MacroAssembler* masm); - void GenerateTypeTransition(MacroAssembler* masm); - void GenerateTypeTransitionWithSavedArgs(MacroAssembler* masm); - - virtual int GetCodeKind() { return Code::BINARY_OP_IC; } - - virtual InlineCacheState GetICState() { - return BinaryOpIC::ToState(operands_type_); - } - - virtual void FinishCode(Handle<Code> code) { - code->set_binary_op_type(operands_type_); - code->set_binary_op_result_type(result_type_); - } - - friend class CodeGenerator; -}; - - class StringHelper : public AllStatic { public: // Generate code for copying characters using a simple loop. This should only @@ -275,14 +173,14 @@ class StringHelper : public AllStatic { int flags); - // Probe the symbol table for a two character string. If the string is + // Probe the string table for a two character string. If the string is // not found by probing a jump to the label not_found is performed. This jump - // does not guarantee that the string is not in the symbol table. If the + // does not guarantee that the string is not in the string table. If the // string is found the code falls through with the string in register r0. // Contents of both c1 and c2 registers are modified. At the exit c1 is // guaranteed to contain halfword with low and high bytes equal to // initial contents of c1 and c2 respectively. - static void GenerateTwoCharacterSymbolTableProbe(MacroAssembler* masm, + static void GenerateTwoCharacterStringTableProbe(MacroAssembler* masm, Register c1, Register c2, Register scratch1, @@ -322,7 +220,7 @@ enum StringAddFlags { }; -class StringAddStub: public CodeStub { +class StringAddStub: public PlatformCodeStub { public: explicit StringAddStub(StringAddFlags flags) : flags_(flags) {} @@ -345,7 +243,7 @@ class StringAddStub: public CodeStub { }; -class SubStringStub: public CodeStub { +class SubStringStub: public PlatformCodeStub { public: SubStringStub() {} @@ -357,7 +255,7 @@ class SubStringStub: public CodeStub { }; -class StringCompareStub: public CodeStub { +class StringCompareStub: public PlatformCodeStub { public: StringCompareStub() { } @@ -398,7 +296,7 @@ class StringCompareStub: public CodeStub { // This stub can convert a signed int32 to a heap number (double). It does // not work for int32s that are in Smi range! No GC occurs during this stub // so you don't have to set up the frame. -class WriteInt32ToHeapNumberStub : public CodeStub { +class WriteInt32ToHeapNumberStub : public PlatformCodeStub { public: WriteInt32ToHeapNumberStub(Register the_int, Register the_heap_number, @@ -415,7 +313,7 @@ class WriteInt32ToHeapNumberStub : public CodeStub { } bool IsPregenerated(); - static void GenerateFixedRegStubsAheadOfTime(); + static void GenerateFixedRegStubsAheadOfTime(Isolate* isolate); private: Register the_int_; @@ -442,7 +340,7 @@ class WriteInt32ToHeapNumberStub : public CodeStub { }; -class NumberToStringStub: public CodeStub { +class NumberToStringStub: public PlatformCodeStub { public: NumberToStringStub() { } @@ -468,7 +366,7 @@ class NumberToStringStub: public CodeStub { }; -class RecordWriteStub: public CodeStub { +class RecordWriteStub: public PlatformCodeStub { public: RecordWriteStub(Register object, Register value, @@ -492,7 +390,7 @@ class RecordWriteStub: public CodeStub { }; virtual bool IsPregenerated(); - static void GenerateFixedRegStubsAheadOfTime(); + static void GenerateFixedRegStubsAheadOfTime(Isolate* isolate); virtual bool SometimesSetsUpAFrame() { return false; } static void PatchBranchIntoNop(MacroAssembler* masm, int pos) { @@ -586,7 +484,7 @@ class RecordWriteStub: public CodeStub { void SaveCallerSaveRegisters(MacroAssembler* masm, SaveFPRegsMode mode) { masm->MultiPush((kJSCallerSaved | ra.bit()) & ~scratch1_.bit()); if (mode == kSaveFPRegs) { - CpuFeatures::Scope scope(FPU); + CpuFeatureScope scope(masm, FPU); masm->MultiPushFPU(kCallerSavedFPU); } } @@ -594,7 +492,7 @@ class RecordWriteStub: public CodeStub { inline void RestoreCallerSaveRegisters(MacroAssembler*masm, SaveFPRegsMode mode) { if (mode == kSaveFPRegs) { - CpuFeatures::Scope scope(FPU); + CpuFeatureScope scope(masm, FPU); masm->MultiPopFPU(kCallerSavedFPU); } masm->MultiPop((kJSCallerSaved | ra.bit()) & ~scratch1_.bit()); @@ -614,7 +512,7 @@ class RecordWriteStub: public CodeStub { Register GetRegThatIsNotOneOf(Register r1, Register r2, Register r3) { - for (int i = 0; i < Register::kNumAllocatableRegisters; i++) { + for (int i = 0; i < Register::NumAllocatableRegisters(); i++) { Register candidate = Register::FromAllocationIndex(i); if (candidate.is(r1)) continue; if (candidate.is(r2)) continue; @@ -673,7 +571,7 @@ class RecordWriteStub: public CodeStub { // Enter C code from generated RegExp code in a way that allows // the C code to fix the return address in case of a GC. // Currently only needed on ARM and MIPS. -class RegExpCEntryStub: public CodeStub { +class RegExpCEntryStub: public PlatformCodeStub { public: RegExpCEntryStub() {} virtual ~RegExpCEntryStub() {} @@ -691,7 +589,7 @@ class RegExpCEntryStub: public CodeStub { // keep the code which called into native pinned in the memory. Currently the // simplest approach is to generate such stub early enough so it can never be // moved by GC -class DirectCEntryStub: public CodeStub { +class DirectCEntryStub: public PlatformCodeStub { public: DirectCEntryStub() {} void Generate(MacroAssembler* masm); @@ -724,20 +622,6 @@ class FloatingPointHelper : public AllStatic { Register scratch1, Register scratch2); - // Loads objects from a0 and a1 (right and left in binary operations) into - // floating point registers. Depending on the destination the values ends up - // either f14 and f12 or in a2/a3 and a0/a1 respectively. If the destination - // is floating point registers FPU must be supported. If core registers are - // requested when FPU is supported f12 and f14 will still be scratched. If - // either a0 or a1 is not a number (not smi and not heap number object) the - // not_number label is jumped to with a0 and a1 intact. - static void LoadOperands(MacroAssembler* masm, - FloatingPointHelper::Destination destination, - Register heap_number_map, - Register scratch1, - Register scratch2, - Label* not_number); - // Convert the smi or heap number in object to an int32 using the rules // for ToInt32 as described in ECMAScript 9.5.: the value is truncated // and brought into the range -2^31 .. +2^31 - 1. @@ -773,6 +657,7 @@ class FloatingPointHelper : public AllStatic { Register object, Destination destination, FPURegister double_dst, + FPURegister double_scratch, Register dst1, Register dst2, Register heap_number_map, @@ -794,7 +679,8 @@ class FloatingPointHelper : public AllStatic { Register scratch1, Register scratch2, Register scratch3, - FPURegister double_scratch, + FPURegister double_scratch0, + FPURegister double_scratch1, Label* not_int32); // Generate non FPU code to check if a double can be exactly represented by a @@ -834,7 +720,12 @@ class FloatingPointHelper : public AllStatic { Register heap_number_result, Register scratch); - private: + // Loads the objects from |object| into floating point registers. + // Depending on |destination| the value ends up either in |dst| or + // in |dst1|/|dst2|. If |destination| is kFPURegisters, then FPU + // must be supported. If kCoreRegisters are requested and FPU is + // supported, |dst| will be scratched. If |object| is neither smi nor + // heap number, |not_number| is jumped to with |object| still intact. static void LoadNumber(MacroAssembler* masm, FloatingPointHelper::Destination destination, Register object, @@ -848,11 +739,11 @@ class FloatingPointHelper : public AllStatic { }; -class StringDictionaryLookupStub: public CodeStub { +class NameDictionaryLookupStub: public PlatformCodeStub { public: enum LookupMode { POSITIVE_LOOKUP, NEGATIVE_LOOKUP }; - explicit StringDictionaryLookupStub(LookupMode mode) : mode_(mode) { } + explicit NameDictionaryLookupStub(LookupMode mode) : mode_(mode) { } void Generate(MacroAssembler* masm); @@ -861,7 +752,7 @@ class StringDictionaryLookupStub: public CodeStub { Label* done, Register receiver, Register properties, - Handle<String> name, + Handle<Name> name, Register scratch0); static void GeneratePositiveLookup(MacroAssembler* masm, @@ -879,14 +770,14 @@ class StringDictionaryLookupStub: public CodeStub { static const int kTotalProbes = 20; static const int kCapacityOffset = - StringDictionary::kHeaderSize + - StringDictionary::kCapacityIndex * kPointerSize; + NameDictionary::kHeaderSize + + NameDictionary::kCapacityIndex * kPointerSize; static const int kElementsStartOffset = - StringDictionary::kHeaderSize + - StringDictionary::kElementsStartIndex * kPointerSize; + NameDictionary::kHeaderSize + + NameDictionary::kElementsStartIndex * kPointerSize; - Major MajorKey() { return StringDictionaryLookup; } + Major MajorKey() { return NameDictionaryLookup; } int MinorKey() { return LookupModeBits::encode(mode_); diff --git a/deps/v8/src/mips/codegen-mips.cc b/deps/v8/src/mips/codegen-mips.cc index 44e0359e44..f5cb5e4892 100644 --- a/deps/v8/src/mips/codegen-mips.cc +++ b/deps/v8/src/mips/codegen-mips.cc @@ -31,11 +31,11 @@ #include "codegen.h" #include "macro-assembler.h" +#include "simulator-mips.h" namespace v8 { namespace internal { -#define __ ACCESS_MASM(masm) UnaryMathFunction CreateTranscendentalFunction(TranscendentalCache::Type type) { switch (type) { @@ -49,6 +49,75 @@ UnaryMathFunction CreateTranscendentalFunction(TranscendentalCache::Type type) { } +#define __ masm. + + +#if defined(USE_SIMULATOR) +byte* fast_exp_mips_machine_code = NULL; +double fast_exp_simulator(double x) { + return Simulator::current(Isolate::Current())->CallFP( + fast_exp_mips_machine_code, x, 0); +} +#endif + + +UnaryMathFunction CreateExpFunction() { + if (!CpuFeatures::IsSupported(FPU)) return &exp; + if (!FLAG_fast_math) return &exp; + size_t actual_size; + byte* buffer = static_cast<byte*>(OS::Allocate(1 * KB, &actual_size, true)); + if (buffer == NULL) return &exp; + ExternalReference::InitializeMathExpData(); + + MacroAssembler masm(NULL, buffer, static_cast<int>(actual_size)); + + { + CpuFeatureScope use_fpu(&masm, FPU); + DoubleRegister input = f12; + DoubleRegister result = f0; + DoubleRegister double_scratch1 = f4; + DoubleRegister double_scratch2 = f6; + Register temp1 = t0; + Register temp2 = t1; + Register temp3 = t2; + + if (!IsMipsSoftFloatABI) { + // Input value is in f12 anyway, nothing to do. + } else { + __ Move(input, a0, a1); + } + __ Push(temp3, temp2, temp1); + MathExpGenerator::EmitMathExp( + &masm, input, result, double_scratch1, double_scratch2, + temp1, temp2, temp3); + __ Pop(temp3, temp2, temp1); + if (!IsMipsSoftFloatABI) { + // Result is already in f0, nothing to do. + } else { + __ Move(a0, a1, result); + } + __ Ret(); + } + + CodeDesc desc; + masm.GetCode(&desc); + ASSERT(!RelocInfo::RequiresRelocation(desc)); + + CPU::FlushICache(buffer, actual_size); + OS::ProtectCode(buffer, actual_size); + +#if !defined(USE_SIMULATOR) + return FUNCTION_CAST<UnaryMathFunction>(buffer); +#else + fast_exp_mips_machine_code = buffer; + return &fast_exp_simulator; +#endif +} + + +#undef __ + + UnaryMathFunction CreateSqrtFunction() { return &sqrt; } @@ -72,8 +141,11 @@ void StubRuntimeCallHelper::AfterCall(MacroAssembler* masm) const { // ------------------------------------------------------------------------- // Code generators +#define __ ACCESS_MASM(masm) + void ElementsTransitionGenerator::GenerateMapChangeElementsTransition( - MacroAssembler* masm) { + MacroAssembler* masm, AllocationSiteMode mode, + Label* allocation_site_info_found) { // ----------- S t a t e ------------- // -- a0 : value // -- a1 : key @@ -82,6 +154,12 @@ void ElementsTransitionGenerator::GenerateMapChangeElementsTransition( // -- a3 : target map, scratch for subsequent call // -- t0 : scratch (elements) // ----------------------------------- + if (mode == TRACK_ALLOCATION_SITE) { + ASSERT(allocation_site_info_found != NULL); + masm->TestJSArrayForAllocationSiteInfo(a2, t0, eq, + allocation_site_info_found); + } + // Set transitioned map. __ sw(a3, FieldMemOperand(a2, HeapObject::kMapOffset)); __ RecordWriteField(a2, @@ -96,7 +174,7 @@ void ElementsTransitionGenerator::GenerateMapChangeElementsTransition( void ElementsTransitionGenerator::GenerateSmiToDouble( - MacroAssembler* masm, Label* fail) { + MacroAssembler* masm, AllocationSiteMode mode, Label* fail) { // ----------- S t a t e ------------- // -- a0 : value // -- a1 : key @@ -110,6 +188,10 @@ void ElementsTransitionGenerator::GenerateSmiToDouble( Register scratch = t6; + if (mode == TRACK_ALLOCATION_SITE) { + masm->TestJSArrayForAllocationSiteInfo(a2, t0, eq, fail); + } + // Check for empty arrays, which only require a map transition and no changes // to the backing store. __ lw(t0, FieldMemOperand(a2, JSObject::kElementsOffset)); @@ -176,7 +258,7 @@ void ElementsTransitionGenerator::GenerateSmiToDouble( HeapObject::kMapOffset, a3, t5, - kRAHasBeenSaved, + kRAHasNotBeenSaved, kDontSaveFPRegs, OMIT_REMEMBERED_SET, OMIT_SMI_CHECK); @@ -196,7 +278,7 @@ void ElementsTransitionGenerator::GenerateSmiToDouble( // Normal smi, convert to double and store. if (fpu_supported) { - CpuFeatures::Scope scope(FPU); + CpuFeatureScope scope(masm, FPU); __ mtc1(t5, f0); __ cvt_d_w(f0, f0); __ sdc1(f0, MemOperand(t3)); @@ -239,7 +321,7 @@ void ElementsTransitionGenerator::GenerateSmiToDouble( void ElementsTransitionGenerator::GenerateDoubleToObject( - MacroAssembler* masm, Label* fail) { + MacroAssembler* masm, AllocationSiteMode mode, Label* fail) { // ----------- S t a t e ------------- // -- a0 : value // -- a1 : key @@ -250,6 +332,10 @@ void ElementsTransitionGenerator::GenerateDoubleToObject( // ----------------------------------- Label entry, loop, convert_hole, gc_required, only_change_map; + if (mode == TRACK_ALLOCATION_SITE) { + masm->TestJSArrayForAllocationSiteInfo(a2, t0, eq, fail); + } + // Check for empty arrays, which only require a map transition and no changes // to the backing store. __ lw(t0, FieldMemOperand(a2, JSObject::kElementsOffset)); @@ -389,7 +475,7 @@ void StringCharLoadGenerator::Generate(MacroAssembler* masm, // the string. __ bind(&cons_string); __ lw(result, FieldMemOperand(string, ConsString::kSecondOffset)); - __ LoadRoot(at, Heap::kEmptyStringRootIndex); + __ LoadRoot(at, Heap::kempty_stringRootIndex); __ Branch(call_runtime, ne, result, Operand(at)); // Get the first of the two strings and load its instance type. __ lw(string, FieldMemOperand(string, ConsString::kFirstOffset)); @@ -408,7 +494,7 @@ void StringCharLoadGenerator::Generate(MacroAssembler* masm, __ Branch(&external_string, ne, at, Operand(zero_reg)); // Prepare sequential strings - STATIC_ASSERT(SeqTwoByteString::kHeaderSize == SeqAsciiString::kHeaderSize); + STATIC_ASSERT(SeqTwoByteString::kHeaderSize == SeqOneByteString::kHeaderSize); __ Addu(string, string, SeqTwoByteString::kHeaderSize - kHeapObjectTag); @@ -446,6 +532,196 @@ void StringCharLoadGenerator::Generate(MacroAssembler* masm, __ bind(&done); } + +void SeqStringSetCharGenerator::Generate(MacroAssembler* masm, + String::Encoding encoding, + Register string, + Register index, + Register value) { + if (FLAG_debug_code) { + __ And(at, index, Operand(kSmiTagMask)); + __ Check(eq, "Non-smi index", at, Operand(zero_reg)); + __ And(at, value, Operand(kSmiTagMask)); + __ Check(eq, "Non-smi value", at, Operand(zero_reg)); + + __ lw(at, FieldMemOperand(string, String::kLengthOffset)); + __ Check(lt, "Index is too large", index, Operand(at)); + + __ Check(ge, "Index is negative", index, Operand(zero_reg)); + + __ lw(at, FieldMemOperand(string, HeapObject::kMapOffset)); + __ lbu(at, FieldMemOperand(at, Map::kInstanceTypeOffset)); + + __ And(at, at, Operand(kStringRepresentationMask | kStringEncodingMask)); + static const uint32_t one_byte_seq_type = kSeqStringTag | kOneByteStringTag; + static const uint32_t two_byte_seq_type = kSeqStringTag | kTwoByteStringTag; + __ Subu(at, at, Operand(encoding == String::ONE_BYTE_ENCODING + ? one_byte_seq_type : two_byte_seq_type)); + __ Check(eq, "Unexpected string type", at, Operand(zero_reg)); + } + + __ Addu(at, + string, + Operand(SeqString::kHeaderSize - kHeapObjectTag)); + __ SmiUntag(value); + STATIC_ASSERT(kSmiTagSize == 1 && kSmiTag == 0); + if (encoding == String::ONE_BYTE_ENCODING) { + __ SmiUntag(index); + __ Addu(at, at, index); + __ sb(value, MemOperand(at)); + } else { + // No need to untag a smi for two-byte addressing. + __ Addu(at, at, index); + __ sh(value, MemOperand(at)); + } +} + + +static MemOperand ExpConstant(int index, Register base) { + return MemOperand(base, index * kDoubleSize); +} + + +void MathExpGenerator::EmitMathExp(MacroAssembler* masm, + DoubleRegister input, + DoubleRegister result, + DoubleRegister double_scratch1, + DoubleRegister double_scratch2, + Register temp1, + Register temp2, + Register temp3) { + ASSERT(!input.is(result)); + ASSERT(!input.is(double_scratch1)); + ASSERT(!input.is(double_scratch2)); + ASSERT(!result.is(double_scratch1)); + ASSERT(!result.is(double_scratch2)); + ASSERT(!double_scratch1.is(double_scratch2)); + ASSERT(!temp1.is(temp2)); + ASSERT(!temp1.is(temp3)); + ASSERT(!temp2.is(temp3)); + ASSERT(ExternalReference::math_exp_constants(0).address() != NULL); + + Label done; + + __ li(temp3, Operand(ExternalReference::math_exp_constants(0))); + + __ ldc1(double_scratch1, ExpConstant(0, temp3)); + __ Move(result, kDoubleRegZero); + __ BranchF(&done, NULL, ge, double_scratch1, input); + __ ldc1(double_scratch2, ExpConstant(1, temp3)); + __ ldc1(result, ExpConstant(2, temp3)); + __ BranchF(&done, NULL, ge, input, double_scratch2); + __ ldc1(double_scratch1, ExpConstant(3, temp3)); + __ ldc1(result, ExpConstant(4, temp3)); + __ mul_d(double_scratch1, double_scratch1, input); + __ add_d(double_scratch1, double_scratch1, result); + __ Move(temp2, temp1, double_scratch1); + __ sub_d(double_scratch1, double_scratch1, result); + __ ldc1(result, ExpConstant(6, temp3)); + __ ldc1(double_scratch2, ExpConstant(5, temp3)); + __ mul_d(double_scratch1, double_scratch1, double_scratch2); + __ sub_d(double_scratch1, double_scratch1, input); + __ sub_d(result, result, double_scratch1); + __ mul_d(input, double_scratch1, double_scratch1); + __ mul_d(result, result, input); + __ srl(temp1, temp2, 11); + __ ldc1(double_scratch2, ExpConstant(7, temp3)); + __ mul_d(result, result, double_scratch2); + __ sub_d(result, result, double_scratch1); + __ ldc1(double_scratch2, ExpConstant(8, temp3)); + __ add_d(result, result, double_scratch2); + __ li(at, 0x7ff); + __ And(temp2, temp2, at); + __ Addu(temp1, temp1, Operand(0x3ff)); + __ sll(temp1, temp1, 20); + + // Must not call ExpConstant() after overwriting temp3! + __ li(temp3, Operand(ExternalReference::math_exp_log_table())); + __ sll(at, temp2, 3); + __ addu(at, at, temp3); + __ lw(at, MemOperand(at)); + __ Addu(temp3, temp3, Operand(kPointerSize)); + __ sll(temp2, temp2, 3); + __ addu(temp2, temp2, temp3); + __ lw(temp2, MemOperand(temp2)); + __ Or(temp1, temp1, temp2); + __ Move(input, at, temp1); + __ mul_d(result, result, input); + __ bind(&done); +} + + +// nop(CODE_AGE_MARKER_NOP) +static const uint32_t kCodeAgePatchFirstInstruction = 0x00010180; + +static byte* GetNoCodeAgeSequence(uint32_t* length) { + // The sequence of instructions that is patched out for aging code is the + // following boilerplate stack-building prologue that is found in FUNCTIONS + static bool initialized = false; + static uint32_t sequence[kNoCodeAgeSequenceLength]; + byte* byte_sequence = reinterpret_cast<byte*>(sequence); + *length = kNoCodeAgeSequenceLength * Assembler::kInstrSize; + if (!initialized) { + CodePatcher patcher(byte_sequence, kNoCodeAgeSequenceLength); + patcher.masm()->Push(ra, fp, cp, a1); + patcher.masm()->LoadRoot(at, Heap::kUndefinedValueRootIndex); + patcher.masm()->Addu(fp, sp, Operand(2 * kPointerSize)); + initialized = true; + } + return byte_sequence; +} + + +bool Code::IsYoungSequence(byte* sequence) { + uint32_t young_length; + byte* young_sequence = GetNoCodeAgeSequence(&young_length); + bool result = !memcmp(sequence, young_sequence, young_length); + ASSERT(result || + Memory::uint32_at(sequence) == kCodeAgePatchFirstInstruction); + return result; +} + + +void Code::GetCodeAgeAndParity(byte* sequence, Age* age, + MarkingParity* parity) { + if (IsYoungSequence(sequence)) { + *age = kNoAge; + *parity = NO_MARKING_PARITY; + } else { + Address target_address = Memory::Address_at( + sequence + Assembler::kInstrSize * (kNoCodeAgeSequenceLength - 1)); + Code* stub = GetCodeFromTargetAddress(target_address); + GetCodeAgeAndParity(stub, age, parity); + } +} + + +void Code::PatchPlatformCodeAge(byte* sequence, + Code::Age age, + MarkingParity parity) { + uint32_t young_length; + byte* young_sequence = GetNoCodeAgeSequence(&young_length); + if (age == kNoAge) { + memcpy(sequence, young_sequence, young_length); + CPU::FlushICache(sequence, young_length); + } else { + Code* stub = GetCodeAgeStub(age, parity); + CodePatcher patcher(sequence, young_length / Assembler::kInstrSize); + // Mark this code sequence for FindPlatformCodeAgeSequence() + patcher.masm()->nop(Assembler::CODE_AGE_MARKER_NOP); + // Save the function's original return address + // (it will be clobbered by Call(t9)) + patcher.masm()->mov(at, ra); + // Load the stub address to t9 and call it + patcher.masm()->li(t9, + Operand(reinterpret_cast<uint32_t>(stub->instruction_start()))); + patcher.masm()->Call(t9); + // Record the stub address in the empty space for GetCodeAgeAndParity() + patcher.masm()->dd(reinterpret_cast<uint32_t>(stub->instruction_start())); + } +} + + #undef __ } } // namespace v8::internal diff --git a/deps/v8/src/mips/codegen-mips.h b/deps/v8/src/mips/codegen-mips.h index e704c4f56c..d429443a88 100644 --- a/deps/v8/src/mips/codegen-mips.h +++ b/deps/v8/src/mips/codegen-mips.h @@ -46,6 +46,10 @@ enum TypeofState { INSIDE_TYPEOF, NOT_INSIDE_TYPEOF }; class CodeGenerator: public AstVisitor { public: + CodeGenerator() { + InitializeAstVisitor(); + } + static bool MakeCode(CompilationInfo* info); // Printing of AST, etc. as requested by flags. @@ -70,6 +74,8 @@ class CodeGenerator: public AstVisitor { int pos, bool right_here = false); + DEFINE_AST_VISITOR_SUBCLASS_MEMBERS(); + private: DISALLOW_COPY_AND_ASSIGN(CodeGenerator); }; @@ -90,6 +96,22 @@ class StringCharLoadGenerator : public AllStatic { DISALLOW_COPY_AND_ASSIGN(StringCharLoadGenerator); }; + +class MathExpGenerator : public AllStatic { + public: + static void EmitMathExp(MacroAssembler* masm, + DoubleRegister input, + DoubleRegister result, + DoubleRegister double_scratch1, + DoubleRegister double_scratch2, + Register temp1, + Register temp2, + Register temp3); + + private: + DISALLOW_COPY_AND_ASSIGN(MathExpGenerator); +}; + } } // namespace v8::internal #endif // V8_MIPS_CODEGEN_MIPS_H_ diff --git a/deps/v8/src/mips/constants-mips.cc b/deps/v8/src/mips/constants-mips.cc index 7d654f6d62..ddfa891326 100644 --- a/deps/v8/src/mips/constants-mips.cc +++ b/deps/v8/src/mips/constants-mips.cc @@ -302,6 +302,8 @@ Instruction::Type Instruction::InstructionType() const { return kRegisterType; }; break; + case COP1X: + return kRegisterType; // 16 bits Immediate type instructions. e.g.: addi dest, src, imm16. case REGIMM: case BEQ: diff --git a/deps/v8/src/mips/constants-mips.h b/deps/v8/src/mips/constants-mips.h index 3d585717cb..139e7db033 100644 --- a/deps/v8/src/mips/constants-mips.h +++ b/deps/v8/src/mips/constants-mips.h @@ -99,7 +99,7 @@ const int kInvalidFPURegister = -1; // FPU (coprocessor 1) control registers. Currently only FCSR is implemented. const int kFCSRRegister = 31; const int kInvalidFPUControlRegister = -1; -const uint32_t kFPUInvalidResult = (uint32_t) (1 << 31) - 1; +const uint32_t kFPUInvalidResult = static_cast<uint32_t>(1 << 31) - 1; // FCSR constants. const uint32_t kFCSRInexactFlagBit = 2; @@ -216,6 +216,8 @@ const int kImm28Bits = 28; // and are therefore shifted by 2. const int kImmFieldShift = 2; +const int kFrBits = 5; +const int kFrShift = 21; const int kFsShift = 11; const int kFsBits = 5; const int kFtShift = 16; @@ -295,7 +297,9 @@ enum Opcode { LDC1 = ((6 << 3) + 5) << kOpcodeShift, SWC1 = ((7 << 3) + 1) << kOpcodeShift, - SDC1 = ((7 << 3) + 5) << kOpcodeShift + SDC1 = ((7 << 3) + 5) << kOpcodeShift, + + COP1X = ((1 << 4) + 3) << kOpcodeShift }; enum SecondaryField { @@ -416,6 +420,8 @@ enum SecondaryField { CVT_S_L = ((4 << 3) + 0), CVT_D_L = ((4 << 3) + 1), // COP1 Encoding of Function Field When rs=PS. + // COP1X Encoding of Function Field. + MADD_D = ((4 << 3) + 1), NULLSF = 0 }; @@ -423,7 +429,9 @@ enum SecondaryField { // ----- Emulated conditions. // On MIPS we use this enum to abstract from conditionnal branch instructions. -// the 'U' prefix is used to specify unsigned comparisons. +// The 'U' prefix is used to specify unsigned comparisons. +// Oppposite conditions must be paired as odd/even numbers +// because 'NegateCondition' function flips LSB to negate condition. enum Condition { // Any value < 0 is considered no_condition. kNoCondition = -1, @@ -444,8 +452,10 @@ enum Condition { greater_equal = 13, less_equal = 14, greater = 15, + ueq = 16, // Unordered or Equal. + nue = 17, // Not (Unordered or Equal). - cc_always = 16, + cc_always = 18, // Aliases. carry = Uless, @@ -677,6 +687,10 @@ class Instruction { return Bits(kFtShift + kFtBits - 1, kFtShift); } + inline int FrValue() const { + return Bits(kFrShift + kFrBits -1, kFrShift); + } + // Float Compare condition code instruction bits. inline int FCccValue() const { return Bits(kFCccShift + kFCccBits - 1, kFCccShift); diff --git a/deps/v8/src/mips/deoptimizer-mips.cc b/deps/v8/src/mips/deoptimizer-mips.cc index 9fd815bb42..7158e4f551 100644 --- a/deps/v8/src/mips/deoptimizer-mips.cc +++ b/deps/v8/src/mips/deoptimizer-mips.cc @@ -42,11 +42,14 @@ int Deoptimizer::patch_size() { } -void Deoptimizer::DeoptimizeFunction(JSFunction* function) { - HandleScope scope; +void Deoptimizer::DeoptimizeFunctionWithPreparedFunctionList( + JSFunction* function) { + Isolate* isolate = function->GetIsolate(); + HandleScope scope(isolate); AssertNoAllocation no_allocation; - if (!function->IsOptimized()) return; + ASSERT(function->IsOptimized()); + ASSERT(function->FunctionsInFunctionListShareSameCode()); // The optimized code is going to be patched, so we cannot use it // any more. Play safe and reset the whole cache. @@ -70,14 +73,14 @@ void Deoptimizer::DeoptimizeFunction(JSFunction* function) { for (int i = 0; i < deopt_data->DeoptCount(); i++) { if (deopt_data->Pc(i)->value() == -1) continue; Address call_address = code_start_address + deopt_data->Pc(i)->value(); - Address deopt_entry = GetDeoptimizationEntry(i, LAZY); + Address deopt_entry = GetDeoptimizationEntry(isolate, i, LAZY); int call_size_in_bytes = MacroAssembler::CallSize(deopt_entry, - RelocInfo::NONE); + RelocInfo::NONE32); int call_size_in_words = call_size_in_bytes / Assembler::kInstrSize; ASSERT(call_size_in_bytes % Assembler::kInstrSize == 0); ASSERT(call_size_in_bytes <= patch_size()); CodePatcher patcher(call_address, call_size_in_words); - patcher.masm()->Call(deopt_entry, RelocInfo::NONE); + patcher.masm()->Call(deopt_entry, RelocInfo::NONE32); ASSERT(prev_call_address == NULL || call_address >= prev_call_address + patch_size()); ASSERT(call_address + patch_size() <= code->instruction_end()); @@ -87,8 +90,6 @@ void Deoptimizer::DeoptimizeFunction(JSFunction* function) { #endif } - Isolate* isolate = code->GetIsolate(); - // Add the deoptimizing code to the list. DeoptimizingCodeListNode* node = new DeoptimizingCodeListNode(code); DeoptimizerData* data = isolate->deoptimizer_data(); @@ -120,7 +121,7 @@ void Deoptimizer::PatchStackCheckCodeAt(Code* unoptimized_code, Code* check_code, Code* replacement_code) { const int kInstrSize = Assembler::kInstrSize; - // This structure comes from FullCodeGenerator::EmitStackCheck. + // This structure comes from FullCodeGenerator::EmitBackEdgeBookkeeping. // The call of the stack guard check has the following form: // sltu at, sp, t0 / slt at, a3, zero_reg (in case of count based interrupts) // beq at, zero_reg, ok @@ -170,11 +171,7 @@ void Deoptimizer::RevertStackCheckCodeAt(Code* unoptimized_code, // Restore the sltu instruction so beq can be taken again. CodePatcher patcher(pc_after - 6 * kInstrSize, 1); - if (FLAG_count_based_interrupts) { - patcher.masm()->slt(at, a3, zero_reg); - } else { - patcher.masm()->sltu(at, sp, t0); - } + patcher.masm()->slt(at, a3, zero_reg); // Replace the on-stack replacement address in the load-immediate (lui/ori // pair) with the entry address of the normal stack-check code. @@ -209,7 +206,7 @@ static int LookupBailoutId(DeoptimizationInputData* data, BailoutId ast_id) { void Deoptimizer::DoComputeOsrOutputFrame() { DeoptimizationInputData* data = DeoptimizationInputData::cast( - optimized_code_->deoptimization_data()); + compiled_code_->deoptimization_data()); unsigned ast_id = data->OsrAstId()->value(); int bailout_id = LookupBailoutId(data, BailoutId(ast_id)); @@ -243,7 +240,7 @@ void Deoptimizer::DoComputeOsrOutputFrame() { unsigned input_frame_size = input_->GetFrameSize(); ASSERT(fixed_size + height_in_bytes == input_frame_size); - unsigned stack_slot_size = optimized_code_->stack_slots() * kPointerSize; + unsigned stack_slot_size = compiled_code_->stack_slots() * kPointerSize; unsigned outgoing_height = data->ArgumentsStackHeight(bailout_id)->value(); unsigned outgoing_size = outgoing_height * kPointerSize; unsigned output_frame_size = fixed_size + stack_slot_size + outgoing_size; @@ -335,7 +332,7 @@ void Deoptimizer::DoComputeOsrOutputFrame() { unsigned pc_offset = data->OsrPcOffset()->value(); uint32_t pc = reinterpret_cast<uint32_t>( - optimized_code_->entry() + pc_offset); + compiled_code_->entry() + pc_offset); output_[0]->SetPc(pc); } Code* continuation = isolate_->builtins()->builtin(Builtins::kNotifyOSR); @@ -352,342 +349,6 @@ void Deoptimizer::DoComputeOsrOutputFrame() { } -void Deoptimizer::DoComputeArgumentsAdaptorFrame(TranslationIterator* iterator, - int frame_index) { - JSFunction* function = JSFunction::cast(ComputeLiteral(iterator->Next())); - unsigned height = iterator->Next(); - unsigned height_in_bytes = height * kPointerSize; - if (FLAG_trace_deopt) { - PrintF(" translating arguments adaptor => height=%d\n", height_in_bytes); - } - - unsigned fixed_frame_size = ArgumentsAdaptorFrameConstants::kFrameSize; - unsigned output_frame_size = height_in_bytes + fixed_frame_size; - - // Allocate and store the output frame description. - FrameDescription* output_frame = - new(output_frame_size) FrameDescription(output_frame_size, function); - output_frame->SetFrameType(StackFrame::ARGUMENTS_ADAPTOR); - - // Arguments adaptor can not be topmost or bottommost. - ASSERT(frame_index > 0 && frame_index < output_count_ - 1); - ASSERT(output_[frame_index] == NULL); - output_[frame_index] = output_frame; - - // The top address of the frame is computed from the previous - // frame's top and this frame's size. - uint32_t top_address; - top_address = output_[frame_index - 1]->GetTop() - output_frame_size; - output_frame->SetTop(top_address); - - // Compute the incoming parameter translation. - int parameter_count = height; - unsigned output_offset = output_frame_size; - for (int i = 0; i < parameter_count; ++i) { - output_offset -= kPointerSize; - DoTranslateCommand(iterator, frame_index, output_offset); - } - - // Read caller's PC from the previous frame. - output_offset -= kPointerSize; - intptr_t callers_pc = output_[frame_index - 1]->GetPc(); - output_frame->SetFrameSlot(output_offset, callers_pc); - if (FLAG_trace_deopt) { - PrintF(" 0x%08x: [top + %d] <- 0x%08x ; caller's pc\n", - top_address + output_offset, output_offset, callers_pc); - } - - // Read caller's FP from the previous frame, and set this frame's FP. - output_offset -= kPointerSize; - intptr_t value = output_[frame_index - 1]->GetFp(); - output_frame->SetFrameSlot(output_offset, value); - intptr_t fp_value = top_address + output_offset; - output_frame->SetFp(fp_value); - if (FLAG_trace_deopt) { - PrintF(" 0x%08x: [top + %d] <- 0x%08x ; caller's fp\n", - fp_value, output_offset, value); - } - - // A marker value is used in place of the context. - output_offset -= kPointerSize; - intptr_t context = reinterpret_cast<intptr_t>( - Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR)); - output_frame->SetFrameSlot(output_offset, context); - if (FLAG_trace_deopt) { - PrintF(" 0x%08x: [top + %d] <- 0x%08x ; context (adaptor sentinel)\n", - top_address + output_offset, output_offset, context); - } - - // The function was mentioned explicitly in the ARGUMENTS_ADAPTOR_FRAME. - output_offset -= kPointerSize; - value = reinterpret_cast<intptr_t>(function); - output_frame->SetFrameSlot(output_offset, value); - if (FLAG_trace_deopt) { - PrintF(" 0x%08x: [top + %d] <- 0x%08x ; function\n", - top_address + output_offset, output_offset, value); - } - - // Number of incoming arguments. - output_offset -= kPointerSize; - value = reinterpret_cast<uint32_t>(Smi::FromInt(height - 1)); - output_frame->SetFrameSlot(output_offset, value); - if (FLAG_trace_deopt) { - PrintF(" 0x%08x: [top + %d] <- 0x%08x ; argc (%d)\n", - top_address + output_offset, output_offset, value, height - 1); - } - - ASSERT(0 == output_offset); - - Builtins* builtins = isolate_->builtins(); - Code* adaptor_trampoline = - builtins->builtin(Builtins::kArgumentsAdaptorTrampoline); - uint32_t pc = reinterpret_cast<uint32_t>( - adaptor_trampoline->instruction_start() + - isolate_->heap()->arguments_adaptor_deopt_pc_offset()->value()); - output_frame->SetPc(pc); -} - - -void Deoptimizer::DoComputeConstructStubFrame(TranslationIterator* iterator, - int frame_index) { - Builtins* builtins = isolate_->builtins(); - Code* construct_stub = builtins->builtin(Builtins::kJSConstructStubGeneric); - JSFunction* function = JSFunction::cast(ComputeLiteral(iterator->Next())); - unsigned height = iterator->Next(); - unsigned height_in_bytes = height * kPointerSize; - if (FLAG_trace_deopt) { - PrintF(" translating construct stub => height=%d\n", height_in_bytes); - } - - unsigned fixed_frame_size = 8 * kPointerSize; - unsigned output_frame_size = height_in_bytes + fixed_frame_size; - - // Allocate and store the output frame description. - FrameDescription* output_frame = - new(output_frame_size) FrameDescription(output_frame_size, function); - output_frame->SetFrameType(StackFrame::CONSTRUCT); - - // Construct stub can not be topmost or bottommost. - ASSERT(frame_index > 0 && frame_index < output_count_ - 1); - ASSERT(output_[frame_index] == NULL); - output_[frame_index] = output_frame; - - // The top address of the frame is computed from the previous - // frame's top and this frame's size. - uint32_t top_address; - top_address = output_[frame_index - 1]->GetTop() - output_frame_size; - output_frame->SetTop(top_address); - - // Compute the incoming parameter translation. - int parameter_count = height; - unsigned output_offset = output_frame_size; - for (int i = 0; i < parameter_count; ++i) { - output_offset -= kPointerSize; - DoTranslateCommand(iterator, frame_index, output_offset); - } - - // Read caller's PC from the previous frame. - output_offset -= kPointerSize; - intptr_t callers_pc = output_[frame_index - 1]->GetPc(); - output_frame->SetFrameSlot(output_offset, callers_pc); - if (FLAG_trace_deopt) { - PrintF(" 0x%08x: [top + %d] <- 0x%08x ; caller's pc\n", - top_address + output_offset, output_offset, callers_pc); - } - - // Read caller's FP from the previous frame, and set this frame's FP. - output_offset -= kPointerSize; - intptr_t value = output_[frame_index - 1]->GetFp(); - output_frame->SetFrameSlot(output_offset, value); - intptr_t fp_value = top_address + output_offset; - output_frame->SetFp(fp_value); - if (FLAG_trace_deopt) { - PrintF(" 0x%08x: [top + %d] <- 0x%08x ; caller's fp\n", - fp_value, output_offset, value); - } - - // The context can be gotten from the previous frame. - output_offset -= kPointerSize; - value = output_[frame_index - 1]->GetContext(); - output_frame->SetFrameSlot(output_offset, value); - if (FLAG_trace_deopt) { - PrintF(" 0x%08x: [top + %d] <- 0x%08x ; context\n", - top_address + output_offset, output_offset, value); - } - - // A marker value is used in place of the function. - output_offset -= kPointerSize; - value = reinterpret_cast<intptr_t>(Smi::FromInt(StackFrame::CONSTRUCT)); - output_frame->SetFrameSlot(output_offset, value); - if (FLAG_trace_deopt) { - PrintF(" 0x%08x: [top + %d] <- 0x%08x ; function (construct sentinel)\n", - top_address + output_offset, output_offset, value); - } - - // The output frame reflects a JSConstructStubGeneric frame. - output_offset -= kPointerSize; - value = reinterpret_cast<intptr_t>(construct_stub); - output_frame->SetFrameSlot(output_offset, value); - if (FLAG_trace_deopt) { - PrintF(" 0x%08x: [top + %d] <- 0x%08x ; code object\n", - top_address + output_offset, output_offset, value); - } - - // Number of incoming arguments. - output_offset -= kPointerSize; - value = reinterpret_cast<uint32_t>(Smi::FromInt(height - 1)); - output_frame->SetFrameSlot(output_offset, value); - if (FLAG_trace_deopt) { - PrintF(" 0x%08x: [top + %d] <- 0x%08x ; argc (%d)\n", - top_address + output_offset, output_offset, value, height - 1); - } - - // Constructor function being invoked by the stub. - output_offset -= kPointerSize; - value = reinterpret_cast<intptr_t>(function); - output_frame->SetFrameSlot(output_offset, value); - if (FLAG_trace_deopt) { - PrintF(" 0x%08x: [top + %d] <- 0x%08x ; constructor function\n", - top_address + output_offset, output_offset, value); - } - - // The newly allocated object was passed as receiver in the artificial - // constructor stub environment created by HEnvironment::CopyForInlining(). - output_offset -= kPointerSize; - value = output_frame->GetFrameSlot(output_frame_size - kPointerSize); - output_frame->SetFrameSlot(output_offset, value); - if (FLAG_trace_deopt) { - PrintF(" 0x%08x: [top + %d] <- 0x%08x ; allocated receiver\n", - top_address + output_offset, output_offset, value); - } - - ASSERT(0 == output_offset); - - uint32_t pc = reinterpret_cast<uint32_t>( - construct_stub->instruction_start() + - isolate_->heap()->construct_stub_deopt_pc_offset()->value()); - output_frame->SetPc(pc); -} - - -void Deoptimizer::DoComputeAccessorStubFrame(TranslationIterator* iterator, - int frame_index, - bool is_setter_stub_frame) { - JSFunction* accessor = JSFunction::cast(ComputeLiteral(iterator->Next())); - // The receiver (and the implicit return value, if any) are expected in - // registers by the LoadIC/StoreIC, so they don't belong to the output stack - // frame. This means that we have to use a height of 0. - unsigned height = 0; - unsigned height_in_bytes = height * kPointerSize; - const char* kind = is_setter_stub_frame ? "setter" : "getter"; - if (FLAG_trace_deopt) { - PrintF(" translating %s stub => height=%u\n", kind, height_in_bytes); - } - - // We need 5 stack entries from StackFrame::INTERNAL (ra, fp, cp, frame type, - // code object, see MacroAssembler::EnterFrame). For a setter stub frame we - // need one additional entry for the implicit return value, see - // StoreStubCompiler::CompileStoreViaSetter. - unsigned fixed_frame_entries = 5 + (is_setter_stub_frame ? 1 : 0); - unsigned fixed_frame_size = fixed_frame_entries * kPointerSize; - unsigned output_frame_size = height_in_bytes + fixed_frame_size; - - // Allocate and store the output frame description. - FrameDescription* output_frame = - new(output_frame_size) FrameDescription(output_frame_size, accessor); - output_frame->SetFrameType(StackFrame::INTERNAL); - - // A frame for an accessor stub can not be the topmost or bottommost one. - ASSERT(frame_index > 0 && frame_index < output_count_ - 1); - ASSERT(output_[frame_index] == NULL); - output_[frame_index] = output_frame; - - // The top address of the frame is computed from the previous frame's top and - // this frame's size. - uint32_t top_address = output_[frame_index - 1]->GetTop() - output_frame_size; - output_frame->SetTop(top_address); - - unsigned output_offset = output_frame_size; - - // Read caller's PC from the previous frame. - output_offset -= kPointerSize; - intptr_t value = output_[frame_index - 1]->GetPc(); - output_frame->SetFrameSlot(output_offset, value); - if (FLAG_trace_deopt) { - PrintF(" 0x%08" V8PRIxPTR ": [top + %u] <- 0x%08" V8PRIxPTR - " ; caller's pc\n", - top_address + output_offset, output_offset, value); - } - - // Read caller's FP from the previous frame, and set this frame's FP. - output_offset -= kPointerSize; - value = output_[frame_index - 1]->GetFp(); - output_frame->SetFrameSlot(output_offset, value); - intptr_t fp_value = top_address + output_offset; - output_frame->SetFp(fp_value); - if (FLAG_trace_deopt) { - PrintF(" 0x%08" V8PRIxPTR ": [top + %u] <- 0x%08" V8PRIxPTR - " ; caller's fp\n", - fp_value, output_offset, value); - } - - // The context can be gotten from the previous frame. - output_offset -= kPointerSize; - value = output_[frame_index - 1]->GetContext(); - output_frame->SetFrameSlot(output_offset, value); - if (FLAG_trace_deopt) { - PrintF(" 0x%08" V8PRIxPTR ": [top + %u] <- 0x%08" V8PRIxPTR - " ; context\n", - top_address + output_offset, output_offset, value); - } - - // A marker value is used in place of the function. - output_offset -= kPointerSize; - value = reinterpret_cast<intptr_t>(Smi::FromInt(StackFrame::INTERNAL)); - output_frame->SetFrameSlot(output_offset, value); - if (FLAG_trace_deopt) { - PrintF(" 0x%08" V8PRIxPTR ": [top + %u] <- 0x%08" V8PRIxPTR - " ; function (%s sentinel)\n", - top_address + output_offset, output_offset, value, kind); - } - - // Get Code object from accessor stub. - output_offset -= kPointerSize; - Builtins::Name name = is_setter_stub_frame ? - Builtins::kStoreIC_Setter_ForDeopt : - Builtins::kLoadIC_Getter_ForDeopt; - Code* accessor_stub = isolate_->builtins()->builtin(name); - value = reinterpret_cast<intptr_t>(accessor_stub); - output_frame->SetFrameSlot(output_offset, value); - if (FLAG_trace_deopt) { - PrintF(" 0x%08" V8PRIxPTR ": [top + %u] <- 0x%08" V8PRIxPTR - " ; code object\n", - top_address + output_offset, output_offset, value); - } - - // Skip receiver. - Translation::Opcode opcode = - static_cast<Translation::Opcode>(iterator->Next()); - iterator->Skip(Translation::NumberOfOperandsFor(opcode)); - - if (is_setter_stub_frame) { - // The implicit return value was part of the artificial setter stub - // environment. - output_offset -= kPointerSize; - DoTranslateCommand(iterator, frame_index, output_offset); - } - - ASSERT(0 == output_offset); - - Smi* offset = is_setter_stub_frame ? - isolate_->heap()->setter_stub_deopt_pc_offset() : - isolate_->heap()->getter_stub_deopt_pc_offset(); - intptr_t pc = reinterpret_cast<intptr_t>( - accessor_stub->instruction_start() + offset->value()); - output_frame->SetPc(pc); -} - - // This code is very similar to ia32/arm code, but relies on register names // (fp, sp) and how the frame is laid out. void Deoptimizer::DoComputeJSFrame(TranslationIterator* iterator, @@ -705,7 +366,7 @@ void Deoptimizer::DoComputeJSFrame(TranslationIterator* iterator, } unsigned height = iterator->Next(); unsigned height_in_bytes = height * kPointerSize; - if (FLAG_trace_deopt) { + if (trace_) { PrintF(" translating "); function->PrintName(); PrintF(" => node=%d, height=%d\n", node_id.ToInt(), height_in_bytes); @@ -769,7 +430,7 @@ void Deoptimizer::DoComputeJSFrame(TranslationIterator* iterator, value = output_[frame_index - 1]->GetPc(); } output_frame->SetFrameSlot(output_offset, value); - if (FLAG_trace_deopt) { + if (trace_) { PrintF(" 0x%08x: [top + %d] <- 0x%08x ; caller's pc\n", top_address + output_offset, output_offset, value); } @@ -792,7 +453,7 @@ void Deoptimizer::DoComputeJSFrame(TranslationIterator* iterator, if (is_topmost) { output_frame->SetRegister(fp.code(), fp_value); } - if (FLAG_trace_deopt) { + if (trace_) { PrintF(" 0x%08x: [top + %d] <- 0x%08x ; caller's fp\n", fp_value, output_offset, value); } @@ -810,7 +471,7 @@ void Deoptimizer::DoComputeJSFrame(TranslationIterator* iterator, output_frame->SetFrameSlot(output_offset, value); output_frame->SetContext(value); if (is_topmost) output_frame->SetRegister(cp.code(), value); - if (FLAG_trace_deopt) { + if (trace_) { PrintF(" 0x%08x: [top + %d] <- 0x%08x ; context\n", top_address + output_offset, output_offset, value); } @@ -823,7 +484,7 @@ void Deoptimizer::DoComputeJSFrame(TranslationIterator* iterator, // input frame. ASSERT(!is_bottommost || input_->GetFrameSlot(input_offset) == value); output_frame->SetFrameSlot(output_offset, value); - if (FLAG_trace_deopt) { + if (trace_) { PrintF(" 0x%08x: [top + %d] <- 0x%08x ; function\n", top_address + output_offset, output_offset, value); } @@ -871,7 +532,7 @@ void Deoptimizer::FillInputFrame(Address tos, JavaScriptFrame* frame) { } input_->SetRegister(sp.code(), reinterpret_cast<intptr_t>(frame->sp())); input_->SetRegister(fp.code(), reinterpret_cast<intptr_t>(frame->fp())); - for (int i = 0; i < DoubleRegister::kNumAllocatableRegisters; i++) { + for (int i = 0; i < DoubleRegister::NumAllocatableRegisters(); i++) { input_->SetDoubleRegister(i, 0.0); } @@ -882,6 +543,29 @@ void Deoptimizer::FillInputFrame(Address tos, JavaScriptFrame* frame) { } +void Deoptimizer::SetPlatformCompiledStubRegisters( + FrameDescription* output_frame, CodeStubInterfaceDescriptor* descriptor) { + ApiFunction function(descriptor->deoptimization_handler_); + ExternalReference xref(&function, ExternalReference::BUILTIN_CALL, isolate_); + intptr_t handler = reinterpret_cast<intptr_t>(xref.address()); + int params = descriptor->register_param_count_; + if (descriptor->stack_parameter_count_ != NULL) { + params++; + } + output_frame->SetRegister(s0.code(), params); + output_frame->SetRegister(s1.code(), (params - 1) * kPointerSize); + output_frame->SetRegister(s2.code(), handler); +} + + +void Deoptimizer::CopyDoubleRegisters(FrameDescription* output_frame) { + for (int i = 0; i < DoubleRegister::kMaxNumRegisters; ++i) { + double double_value = input_->GetDoubleRegister(i); + output_frame->SetDoubleRegister(i, double_value); + } +} + + #define __ masm()-> @@ -892,7 +576,6 @@ void Deoptimizer::EntryGenerator::Generate() { Isolate* isolate = masm()->isolate(); - CpuFeatures::Scope scope(FPU); // Unlike on ARM we don't save all the registers, just the useful ones. // For the rest, there are gaps on the stack, so the offsets remain the same. const int kNumberOfRegisters = Register::kNumRegisters; @@ -901,14 +584,19 @@ void Deoptimizer::EntryGenerator::Generate() { RegList saved_regs = restored_regs | sp.bit() | ra.bit(); const int kDoubleRegsSize = - kDoubleSize * FPURegister::kNumAllocatableRegisters; - - // Save all FPU registers before messing with them. - __ Subu(sp, sp, Operand(kDoubleRegsSize)); - for (int i = 0; i < FPURegister::kNumAllocatableRegisters; ++i) { - FPURegister fpu_reg = FPURegister::FromAllocationIndex(i); - int offset = i * kDoubleSize; - __ sdc1(fpu_reg, MemOperand(sp, offset)); + kDoubleSize * FPURegister::kMaxNumAllocatableRegisters; + + if (CpuFeatures::IsSupported(FPU)) { + CpuFeatureScope scope(masm(), FPU); + // Save all FPU registers before messing with them. + __ Subu(sp, sp, Operand(kDoubleRegsSize)); + for (int i = 0; i < FPURegister::kMaxNumAllocatableRegisters; ++i) { + FPURegister fpu_reg = FPURegister::FromAllocationIndex(i); + int offset = i * kDoubleSize; + __ sdc1(fpu_reg, MemOperand(sp, offset)); + } + } else { + __ Subu(sp, sp, Operand(kDoubleRegsSize)); } // Push saved_regs (needed to populate FrameDescription::registers_). @@ -980,14 +668,17 @@ void Deoptimizer::EntryGenerator::Generate() { } } - // Copy FPU registers to - // double_registers_[DoubleRegister::kNumAllocatableRegisters] int double_regs_offset = FrameDescription::double_registers_offset(); - for (int i = 0; i < FPURegister::kNumAllocatableRegisters; ++i) { - int dst_offset = i * kDoubleSize + double_regs_offset; - int src_offset = i * kDoubleSize + kNumberOfRegisters * kPointerSize; - __ ldc1(f0, MemOperand(sp, src_offset)); - __ sdc1(f0, MemOperand(a1, dst_offset)); + if (CpuFeatures::IsSupported(FPU)) { + CpuFeatureScope scope(masm(), FPU); + // Copy FPU registers to + // double_registers_[DoubleRegister::kNumAllocatableRegisters] + for (int i = 0; i < FPURegister::NumAllocatableRegisters(); ++i) { + int dst_offset = i * kDoubleSize + double_regs_offset; + int src_offset = i * kDoubleSize + kNumberOfRegisters * kPointerSize; + __ ldc1(f0, MemOperand(sp, src_offset)); + __ sdc1(f0, MemOperand(a1, dst_offset)); + } } // Remove the bailout id, eventually return address, and the saved registers @@ -1008,11 +699,14 @@ void Deoptimizer::EntryGenerator::Generate() { // frame description. __ Addu(a3, a1, Operand(FrameDescription::frame_content_offset())); Label pop_loop; + Label pop_loop_header; + __ Branch(&pop_loop_header); __ bind(&pop_loop); __ pop(t0); __ sw(t0, MemOperand(a3, 0)); - __ Branch(USE_DELAY_SLOT, &pop_loop, ne, a2, Operand(sp)); - __ addiu(a3, a3, sizeof(uint32_t)); // In delay slot. + __ addiu(a3, a3, sizeof(uint32_t)); + __ bind(&pop_loop_header); + __ Branch(&pop_loop, ne, a2, Operand(sp)); // Compute the output frame in the deoptimizer. __ push(a0); // Preserve deoptimizer object across call. @@ -1027,27 +721,42 @@ void Deoptimizer::EntryGenerator::Generate() { __ pop(a0); // Restore deoptimizer object (class Deoptimizer). // Replace the current (input) frame with the output frames. - Label outer_push_loop, inner_push_loop; - // Outer loop state: a0 = current "FrameDescription** output_", + Label outer_push_loop, inner_push_loop, + outer_loop_header, inner_loop_header; + // Outer loop state: t0 = current "FrameDescription** output_", // a1 = one past the last FrameDescription**. __ lw(a1, MemOperand(a0, Deoptimizer::output_count_offset())); - __ lw(a0, MemOperand(a0, Deoptimizer::output_offset())); // a0 is output_. + __ lw(t0, MemOperand(a0, Deoptimizer::output_offset())); // t0 is output_. __ sll(a1, a1, kPointerSizeLog2); // Count to offset. - __ addu(a1, a0, a1); // a1 = one past the last FrameDescription**. + __ addu(a1, t0, a1); // a1 = one past the last FrameDescription**. + __ jmp(&outer_loop_header); __ bind(&outer_push_loop); // Inner loop state: a2 = current FrameDescription*, a3 = loop index. - __ lw(a2, MemOperand(a0, 0)); // output_[ix] + __ lw(a2, MemOperand(t0, 0)); // output_[ix] __ lw(a3, MemOperand(a2, FrameDescription::frame_size_offset())); + __ jmp(&inner_loop_header); __ bind(&inner_push_loop); __ Subu(a3, a3, Operand(sizeof(uint32_t))); __ Addu(t2, a2, Operand(a3)); __ lw(t3, MemOperand(t2, FrameDescription::frame_content_offset())); __ push(t3); + __ bind(&inner_loop_header); __ Branch(&inner_push_loop, ne, a3, Operand(zero_reg)); - __ Addu(a0, a0, Operand(kPointerSize)); - __ Branch(&outer_push_loop, lt, a0, Operand(a1)); + __ Addu(t0, t0, Operand(kPointerSize)); + __ bind(&outer_loop_header); + __ Branch(&outer_push_loop, lt, t0, Operand(a1)); + if (CpuFeatures::IsSupported(FPU)) { + CpuFeatureScope scope(masm(), FPU); + + __ lw(a1, MemOperand(a0, Deoptimizer::input_offset())); + for (int i = 0; i < FPURegister::kMaxNumAllocatableRegisters; ++i) { + const FPURegister fpu_reg = FPURegister::FromAllocationIndex(i); + int src_offset = i * kDoubleSize + double_regs_offset; + __ ldc1(fpu_reg, MemOperand(a1, src_offset)); + } + } // Push state, pc, and continuation from the last output frame. if (type() != OSR) { diff --git a/deps/v8/src/mips/disasm-mips.cc b/deps/v8/src/mips/disasm-mips.cc index 1d40c2c820..0eca71f2b8 100644 --- a/deps/v8/src/mips/disasm-mips.cc +++ b/deps/v8/src/mips/disasm-mips.cc @@ -350,6 +350,10 @@ int Decoder::FormatFPURegister(Instruction* instr, const char* format) { int reg = instr->FdValue(); PrintFPURegister(reg); return 2; + } else if (format[1] == 'r') { // 'fr: fr register. + int reg = instr->FrValue(); + PrintFPURegister(reg); + return 2; } UNREACHABLE(); return -1; @@ -618,6 +622,15 @@ void Decoder::DecodeTypeRegister(Instruction* instr) { UNREACHABLE(); } break; + case COP1X: + switch (instr->FunctionFieldRaw()) { + case MADD_D: + Format(instr, "madd.d 'fd, 'fr, 'fs, 'ft"); + break; + default: + UNREACHABLE(); + }; + break; case SPECIAL: switch (instr->FunctionFieldRaw()) { case JR: diff --git a/deps/v8/src/mips/frames-mips.cc b/deps/v8/src/mips/frames-mips.cc index faaa0e0f48..79505ae9cb 100644 --- a/deps/v8/src/mips/frames-mips.cc +++ b/deps/v8/src/mips/frames-mips.cc @@ -30,8 +30,13 @@ #if defined(V8_TARGET_ARCH_MIPS) +#include "assembler.h" +#include "assembler-mips.h" +#include "assembler-mips-inl.h" #include "frames-inl.h" #include "mips/assembler-mips-inl.h" +#include "macro-assembler.h" +#include "macro-assembler-mips.h" namespace v8 { namespace internal { @@ -42,6 +47,10 @@ Address ExitFrame::ComputeStackPointer(Address fp) { } +Register StubFailureTrampolineFrame::fp_register() { return v8::internal::fp; } +Register StubFailureTrampolineFrame::context_register() { return cp; } + + } } // namespace v8::internal #endif // V8_TARGET_ARCH_MIPS diff --git a/deps/v8/src/mips/frames-mips.h b/deps/v8/src/mips/frames-mips.h index 2ed358a913..1568ce66ec 100644 --- a/deps/v8/src/mips/frames-mips.h +++ b/deps/v8/src/mips/frames-mips.h @@ -193,30 +193,6 @@ class ExitFrameConstants : public AllStatic { }; -class StandardFrameConstants : public AllStatic { - public: - // Fixed part of the frame consists of return address, caller fp, - // context and function. - static const int kFixedFrameSize = 4 * kPointerSize; - static const int kExpressionsOffset = -3 * kPointerSize; - static const int kMarkerOffset = -2 * kPointerSize; - static const int kContextOffset = -1 * kPointerSize; - static const int kCallerFPOffset = 0 * kPointerSize; - static const int kCallerPCOffset = +1 * kPointerSize; - static const int kCallerSPOffset = +2 * kPointerSize; - - // Size of the MIPS 4 32-bit argument slots. - // This is just an alias with a shorter name. Use it from now on. - static const int kRArgsSlotsSize = 4 * kPointerSize; - static const int kRegularArgsSlotsSize = kRArgsSlotsSize; - - // JS argument slots size. - static const int kJSArgsSlotsSize = 0 * kPointerSize; - // Assembly builtins argument slots size. - static const int kBArgsSlotsSize = 0 * kPointerSize; -}; - - class JavaScriptFrameConstants : public AllStatic { public: // FP-relative. @@ -232,14 +208,30 @@ class JavaScriptFrameConstants : public AllStatic { class ArgumentsAdaptorFrameConstants : public AllStatic { public: + // FP-relative. static const int kLengthOffset = StandardFrameConstants::kExpressionsOffset; + static const int kFrameSize = StandardFrameConstants::kFixedFrameSize + kPointerSize; }; +class ConstructFrameConstants : public AllStatic { + public: + // FP-relative. + static const int kImplicitReceiverOffset = -6 * kPointerSize; + static const int kConstructorOffset = -5 * kPointerSize; + static const int kLengthOffset = -4 * kPointerSize; + static const int kCodeOffset = StandardFrameConstants::kExpressionsOffset; + + static const int kFrameSize = + StandardFrameConstants::kFixedFrameSize + 4 * kPointerSize; +}; + + class InternalFrameConstants : public AllStatic { public: + // FP-relative. static const int kCodeOffset = StandardFrameConstants::kExpressionsOffset; }; diff --git a/deps/v8/src/mips/full-codegen-mips.cc b/deps/v8/src/mips/full-codegen-mips.cc index 3e89fb43b4..bacec10f07 100644 --- a/deps/v8/src/mips/full-codegen-mips.cc +++ b/deps/v8/src/mips/full-codegen-mips.cc @@ -139,7 +139,7 @@ void FullCodeGenerator::Generate() { handler_table_ = isolate()->factory()->NewFixedArray(function()->handler_count(), TENURED); profiling_counter_ = isolate()->factory()->NewJSGlobalPropertyCell( - Handle<Smi>(Smi::FromInt(FLAG_interrupt_budget))); + Handle<Smi>(Smi::FromInt(FLAG_interrupt_budget), isolate())); SetFunctionPosition(function()); Comment cmnt(masm_, "[ function compiled by full code generator"); @@ -147,7 +147,7 @@ void FullCodeGenerator::Generate() { #ifdef DEBUG if (strlen(FLAG_stop_at) > 0 && - info->function()->name()->IsEqualTo(CStrVector(FLAG_stop_at))) { + info->function()->name()->IsUtf8EqualTo(CStrVector(FLAG_stop_at))) { __ stop("stop-at"); } #endif @@ -172,12 +172,13 @@ void FullCodeGenerator::Generate() { int locals_count = info->scope()->num_stack_slots(); + info->set_prologue_offset(masm_->pc_offset()); + // The following three instructions must remain together and unmodified for + // code aging to work properly. __ Push(ra, fp, cp, a1); - if (locals_count > 0) { - // Load undefined value here, so the value is ready for the loop - // below. - __ LoadRoot(at, Heap::kUndefinedValueRootIndex); - } + // Load undefined value here, so the value is ready for the loop + // below. + __ LoadRoot(at, Heap::kUndefinedValueRootIndex); // Adjust fp to point to caller's fp. __ Addu(fp, sp, Operand(2 * kPointerSize)); @@ -345,45 +346,34 @@ void FullCodeGenerator::EmitProfilingCounterReset() { } -void FullCodeGenerator::EmitStackCheck(IterationStatement* stmt, - Label* back_edge_target) { +void FullCodeGenerator::EmitBackEdgeBookkeeping(IterationStatement* stmt, + Label* back_edge_target) { // The generated code is used in Deoptimizer::PatchStackCheckCodeAt so we need // to make sure it is constant. Branch may emit a skip-or-jump sequence // instead of the normal Branch. It seems that the "skip" part of that // sequence is about as long as this Branch would be so it is safe to ignore // that. Assembler::BlockTrampolinePoolScope block_trampoline_pool(masm_); - Comment cmnt(masm_, "[ Stack check"); + Comment cmnt(masm_, "[ Back edge bookkeeping"); Label ok; - if (FLAG_count_based_interrupts) { - int weight = 1; - if (FLAG_weighted_back_edges) { - ASSERT(back_edge_target->is_bound()); - int distance = masm_->SizeOfCodeGeneratedSince(back_edge_target); - weight = Min(kMaxBackEdgeWeight, - Max(1, distance / kBackEdgeDistanceUnit)); - } - EmitProfilingCounterDecrement(weight); - __ slt(at, a3, zero_reg); - __ beq(at, zero_reg, &ok); - // CallStub will emit a li t9 first, so it is safe to use the delay slot. - InterruptStub stub; - __ CallStub(&stub); - } else { - __ LoadRoot(t0, Heap::kStackLimitRootIndex); - __ sltu(at, sp, t0); - __ beq(at, zero_reg, &ok); - // CallStub will emit a li t9 first, so it is safe to use the delay slot. - StackCheckStub stub; - __ CallStub(&stub); + int weight = 1; + if (FLAG_weighted_back_edges) { + ASSERT(back_edge_target->is_bound()); + int distance = masm_->SizeOfCodeGeneratedSince(back_edge_target); + weight = Min(kMaxBackEdgeWeight, + Max(1, distance / kBackEdgeDistanceUnit)); } + EmitProfilingCounterDecrement(weight); + __ slt(at, a3, zero_reg); + __ beq(at, zero_reg, &ok); + // CallStub will emit a li t9 first, so it is safe to use the delay slot. + InterruptStub stub; + __ CallStub(&stub); // Record a mapping of this PC offset to the OSR id. This is used to find // the AST id from the unoptimized code in order to use it as a key into // the deoptimization input data found in the optimized code. - RecordStackCheck(stmt->OsrEntryId()); - if (FLAG_count_based_interrupts) { - EmitProfilingCounterReset(); - } + RecordBackEdge(stmt->OsrEntryId()); + EmitProfilingCounterReset(); __ bind(&ok); PrepareForBailoutForId(stmt->EntryId(), NO_REGISTERS); @@ -685,7 +675,7 @@ void FullCodeGenerator::DoTest(Expression* condition, Label* fall_through) { if (CpuFeatures::IsSupported(FPU)) { ToBooleanStub stub(result_register()); - __ CallStub(&stub); + __ CallStub(&stub, condition->test_id()); __ mov(at, zero_reg); } else { // Call the runtime to find the boolean value of the source and then @@ -929,34 +919,33 @@ void FullCodeGenerator::VisitFunctionDeclaration( void FullCodeGenerator::VisitModuleDeclaration(ModuleDeclaration* declaration) { - VariableProxy* proxy = declaration->proxy(); - Variable* variable = proxy->var(); - Handle<JSModule> instance = declaration->module()->interface()->Instance(); - ASSERT(!instance.is_null()); + Variable* variable = declaration->proxy()->var(); + ASSERT(variable->location() == Variable::CONTEXT); + ASSERT(variable->interface()->IsFrozen()); - switch (variable->location()) { - case Variable::UNALLOCATED: { - Comment cmnt(masm_, "[ ModuleDeclaration"); - globals_->Add(variable->name(), zone()); - globals_->Add(instance, zone()); - Visit(declaration->module()); - break; - } + Comment cmnt(masm_, "[ ModuleDeclaration"); + EmitDebugCheckDeclarationContext(variable); - case Variable::CONTEXT: { - Comment cmnt(masm_, "[ ModuleDeclaration"); - EmitDebugCheckDeclarationContext(variable); - __ li(a1, Operand(instance)); - __ sw(a1, ContextOperand(cp, variable->index())); - Visit(declaration->module()); - break; - } + // Load instance object. + __ LoadContext(a1, scope_->ContextChainLength(scope_->GlobalScope())); + __ lw(a1, ContextOperand(a1, variable->interface()->Index())); + __ lw(a1, ContextOperand(a1, Context::EXTENSION_INDEX)); - case Variable::PARAMETER: - case Variable::LOCAL: - case Variable::LOOKUP: - UNREACHABLE(); - } + // Assign it. + __ sw(a1, ContextOperand(cp, variable->index())); + // We know that we have written a module, which is not a smi. + __ RecordWriteContextSlot(cp, + Context::SlotOffset(variable->index()), + a1, + a3, + kRAHasBeenSaved, + kDontSaveFPRegs, + EMIT_REMEMBERED_SET, + OMIT_SMI_CHECK); + PrepareForBailoutForId(declaration->proxy()->id(), NO_REGISTERS); + + // Traverse into body. + Visit(declaration->module()); } @@ -999,6 +988,14 @@ void FullCodeGenerator::DeclareGlobals(Handle<FixedArray> pairs) { } +void FullCodeGenerator::DeclareModules(Handle<FixedArray> descriptions) { + // Call the runtime to declare the modules. + __ Push(descriptions); + __ CallRuntime(Runtime::kDeclareModules, 1); + // Return value is ignored. +} + + void FullCodeGenerator::VisitSwitchStatement(SwitchStatement* stmt) { Comment cmnt(masm_, "[ SwitchStatement"); Breakable nested_statement(this, stmt); @@ -1049,7 +1046,7 @@ void FullCodeGenerator::VisitSwitchStatement(SwitchStatement* stmt) { // Record position before stub call for type feedback. SetSourcePosition(clause->position()); - Handle<Code> ic = CompareIC::GetUninitialized(Token::EQ_STRICT); + Handle<Code> ic = CompareIC::GetUninitialized(isolate(), Token::EQ_STRICT); CallIC(ic, RelocInfo::CODE_TARGET, clause->CompareId()); patch_site.EmitPatchInfo(); @@ -1174,7 +1171,8 @@ void FullCodeGenerator::VisitForInStatement(ForInStatement* stmt) { Handle<JSGlobalPropertyCell> cell = isolate()->factory()->NewJSGlobalPropertyCell( Handle<Object>( - Smi::FromInt(TypeFeedbackCells::kForInFastCaseMarker))); + Smi::FromInt(TypeFeedbackCells::kForInFastCaseMarker), + isolate())); RecordTypeFeedbackCell(stmt->ForInFeedbackId(), cell); __ LoadHeapObject(a1, cell); __ li(a2, Operand(Smi::FromInt(TypeFeedbackCells::kForInSlowCaseMarker))); @@ -1251,7 +1249,7 @@ void FullCodeGenerator::VisitForInStatement(ForInStatement* stmt) { __ Addu(a0, a0, Operand(Smi::FromInt(1))); __ push(a0); - EmitStackCheck(stmt, &loop); + EmitBackEdgeBookkeeping(stmt, &loop); __ Branch(&loop); // Remove the pointers stored on the stack. @@ -1399,9 +1397,9 @@ void FullCodeGenerator::EmitDynamicLookupFastCase(Variable* var, } else if (var->mode() == DYNAMIC_LOCAL) { Variable* local = var->local_if_not_shadowed(); __ lw(v0, ContextSlotOperandCheckExtensions(local, slow)); - if (local->mode() == CONST || - local->mode() == CONST_HARMONY || - local->mode() == LET) { + if (local->mode() == LET || + local->mode() == CONST || + local->mode() == CONST_HARMONY) { __ LoadRoot(at, Heap::kTheHoleValueRootIndex); __ subu(at, v0, at); // Sub as compare: at == 0 on eq. if (local->mode() == CONST) { @@ -1555,7 +1553,7 @@ void FullCodeGenerator::VisitRegExpLiteral(RegExpLiteral* expr) { __ bind(&materialized); int size = JSRegExp::kSize + JSRegExp::kInObjectFieldCount * kPointerSize; Label allocated, runtime_allocate; - __ AllocateInNewSpace(size, v0, a2, a3, &runtime_allocate, TAG_OBJECT); + __ Allocate(size, v0, a2, a3, &runtime_allocate, TAG_OBJECT); __ jmp(&allocated); __ bind(&runtime_allocate); @@ -1589,7 +1587,7 @@ void FullCodeGenerator::EmitAccessor(Expression* expression) { void FullCodeGenerator::VisitObjectLiteral(ObjectLiteral* expr) { Comment cmnt(masm_, "[ ObjectLiteral"); Handle<FixedArray> constant_properties = expr->constant_properties(); - __ lw(a3, MemOperand(fp, JavaScriptFrameConstants::kFunctionOffset)); + __ lw(a3, MemOperand(fp, JavaScriptFrameConstants::kFunctionOffset)); __ lw(a3, FieldMemOperand(a3, JSFunction::kLiteralsOffset)); __ li(a2, Operand(Smi::FromInt(expr->literal_index()))); __ li(a1, Operand(constant_properties)); @@ -1600,12 +1598,13 @@ void FullCodeGenerator::VisitObjectLiteral(ObjectLiteral* expr) { ? ObjectLiteral::kHasFunction : ObjectLiteral::kNoFlags; __ li(a0, Operand(Smi::FromInt(flags))); - __ Push(a3, a2, a1, a0); int properties_count = constant_properties->length() / 2; if (expr->depth() > 1) { + __ Push(a3, a2, a1, a0); __ CallRuntime(Runtime::kCreateObjectLiteral, 4); - } else if (flags != ObjectLiteral::kFastElements || + } else if (Serializer::enabled() || flags != ObjectLiteral::kFastElements || properties_count > FastCloneShallowObjectStub::kMaximumClonedProperties) { + __ Push(a3, a2, a1, a0); __ CallRuntime(Runtime::kCreateObjectLiteralShallow, 4); } else { FastCloneShallowObjectStub stub(properties_count); @@ -1639,7 +1638,7 @@ void FullCodeGenerator::VisitObjectLiteral(ObjectLiteral* expr) { ASSERT(!CompileTimeValue::IsCompileTimeValue(property->value())); // Fall through. case ObjectLiteral::Property::COMPUTED: - if (key->handle()->IsSymbol()) { + if (key->handle()->IsInternalizedString()) { if (property->emit_store()) { VisitForAccumulatorValue(value); __ mov(a0, result_register()); @@ -1655,8 +1654,6 @@ void FullCodeGenerator::VisitObjectLiteral(ObjectLiteral* expr) { } break; } - // Fall through. - case ObjectLiteral::Property::PROTOTYPE: // Duplicate receiver on stack. __ lw(a0, MemOperand(sp)); __ push(a0); @@ -1670,6 +1667,17 @@ void FullCodeGenerator::VisitObjectLiteral(ObjectLiteral* expr) { __ Drop(3); } break; + case ObjectLiteral::Property::PROTOTYPE: + // Duplicate receiver on stack. + __ lw(a0, MemOperand(sp)); + __ push(a0); + VisitForStackValue(value); + if (property->emit_store()) { + __ CallRuntime(Runtime::kSetPrototype, 2); + } else { + __ Drop(2); + } + break; case ObjectLiteral::Property::GETTER: accessor_table.lookup(key)->second->getter = value; break; @@ -1733,7 +1741,9 @@ void FullCodeGenerator::VisitArrayLiteral(ArrayLiteral* expr) { if (has_fast_elements && constant_elements_values->map() == isolate()->heap()->fixed_cow_array_map()) { FastCloneShallowArrayStub stub( - FastCloneShallowArrayStub::COPY_ON_WRITE_ELEMENTS, length); + FastCloneShallowArrayStub::COPY_ON_WRITE_ELEMENTS, + DONT_TRACK_ALLOCATION_SITE, + length); __ CallStub(&stub); __ IncrementCounter(isolate()->counters()->cow_arrays_created_stub(), 1, a1, a2); @@ -1744,10 +1754,17 @@ void FullCodeGenerator::VisitArrayLiteral(ArrayLiteral* expr) { } else { ASSERT(IsFastSmiOrObjectElementsKind(constant_elements_kind) || FLAG_smi_only_arrays); - FastCloneShallowArrayStub::Mode mode = has_fast_elements - ? FastCloneShallowArrayStub::CLONE_ELEMENTS - : FastCloneShallowArrayStub::CLONE_ANY_ELEMENTS; - FastCloneShallowArrayStub stub(mode, length); + FastCloneShallowArrayStub::Mode mode = + FastCloneShallowArrayStub::CLONE_ANY_ELEMENTS; + AllocationSiteMode allocation_site_mode = FLAG_track_allocation_sites + ? TRACK_ALLOCATION_SITE : DONT_TRACK_ALLOCATION_SITE; + + if (has_fast_elements) { + mode = FastCloneShallowArrayStub::CLONE_ELEMENTS; + allocation_site_mode = DONT_TRACK_ALLOCATION_SITE; + } + + FastCloneShallowArrayStub stub(mode, allocation_site_mode, length); __ CallStub(&stub); } @@ -1958,7 +1975,7 @@ void FullCodeGenerator::EmitInlineSmiBinaryOp(BinaryOperation* expr, __ bind(&stub_call); BinaryOpStub stub(op, mode); - CallIC(stub.GetCode(), RelocInfo::CODE_TARGET, + CallIC(stub.GetCode(isolate()), RelocInfo::CODE_TARGET, expr->BinaryOperationFeedbackId()); patch_site.EmitPatchInfo(); __ jmp(&done); @@ -2042,7 +2059,7 @@ void FullCodeGenerator::EmitBinaryOp(BinaryOperation* expr, __ pop(a1); BinaryOpStub stub(op, mode); JumpPatchSite patch_site(masm_); // unbound, signals no inlined smi code. - CallIC(stub.GetCode(), RelocInfo::CODE_TARGET, + CallIC(stub.GetCode(isolate()), RelocInfo::CODE_TARGET, expr->BinaryOperationFeedbackId()); patch_site.EmitPatchInfo(); context()->Plug(v0); @@ -2050,7 +2067,7 @@ void FullCodeGenerator::EmitBinaryOp(BinaryOperation* expr, void FullCodeGenerator::EmitAssignment(Expression* expr) { - // Invalid left-hand sides are rewritten to have a 'throw + // Invalid left-hand sides are rewritten by the parser to have a 'throw // ReferenceError' on the left-hand side. if (!expr->IsValidLeftHandSide()) { VisitForEffect(expr); @@ -2356,7 +2373,7 @@ void FullCodeGenerator::EmitCallWithStub(Call* expr, CallFunctionFlags flags) { CallFunctionStub stub(arg_count, flags); __ lw(a1, MemOperand(sp, (arg_count + 1) * kPointerSize)); - __ CallStub(&stub); + __ CallStub(&stub, expr->CallFeedbackId()); RecordJSReturnSite(expr); // Restore context register. __ lw(cp, MemOperand(fp, StandardFrameConstants::kContextOffset)); @@ -2402,7 +2419,7 @@ void FullCodeGenerator::VisitCall(Call* expr) { VariableProxy* proxy = callee->AsVariableProxy(); Property* property = callee->AsProperty(); - if (proxy != NULL && proxy->var()->is_possibly_eval()) { + if (proxy != NULL && proxy->var()->is_possibly_eval(isolate())) { // In a call to eval, we first call %ResolvePossiblyDirectEval to // resolve the function we need to call and the receiver of the // call. Then we call the resolved function using the given @@ -2550,7 +2567,7 @@ void FullCodeGenerator::VisitCallNew(CallNew* expr) { __ li(a2, Operand(cell)); CallConstructStub stub(RECORD_CALL_TARGET); - __ Call(stub.GetCode(), RelocInfo::CONSTRUCT_CALL); + __ Call(stub.GetCode(isolate()), RelocInfo::CONSTRUCT_CALL); PrepareForBailoutForId(expr->ReturnId(), TOS_REG); context()->Plug(v0); } @@ -2703,7 +2720,7 @@ void FullCodeGenerator::EmitIsStringWrapperSafeForDefaultValueOf( __ LoadRoot(t0, Heap::kHashTableMapRootIndex); __ Branch(if_false, eq, a2, Operand(t0)); - // Look for valueOf symbol in the descriptor array, and indicate false if + // Look for valueOf name in the descriptor array, and indicate false if // found. Since we omit an enumeration index check, if it is added via a // transition that shares its descriptor array, this is a false positive. Label entry, loop, done; @@ -2728,10 +2745,10 @@ void FullCodeGenerator::EmitIsStringWrapperSafeForDefaultValueOf( __ Addu(a2, a2, t1); // Loop through all the keys in the descriptor array. If one of these is the - // symbol valueOf the result is false. - // The use of t2 to store the valueOf symbol asumes that it is not otherwise + // string "valueOf" the result is false. + // The use of t2 to store the valueOf string assumes that it is not otherwise // used in the loop below. - __ LoadRoot(t2, Heap::kvalue_of_symbolRootIndex); + __ li(t2, Operand(FACTORY->value_of_string())); __ jmp(&entry); __ bind(&loop); __ lw(a3, MemOperand(t0, 0)); @@ -2763,6 +2780,28 @@ void FullCodeGenerator::EmitIsStringWrapperSafeForDefaultValueOf( } +void FullCodeGenerator::EmitIsSymbol(CallRuntime* expr) { + ZoneList<Expression*>* args = expr->arguments(); + ASSERT(args->length() == 1); + + VisitForAccumulatorValue(args->at(0)); + + Label materialize_true, materialize_false; + Label* if_true = NULL; + Label* if_false = NULL; + Label* fall_through = NULL; + context()->PrepareTest(&materialize_true, &materialize_false, + &if_true, &if_false, &fall_through); + + __ JumpIfSmi(v0, if_false); + __ GetObjectType(v0, a1, a2); + PrepareForBailoutBeforeSplit(expr, true, if_true, if_false); + Split(eq, a2, Operand(SYMBOL_TYPE), if_true, if_false, fall_through); + + context()->Plug(if_true, if_false); +} + + void FullCodeGenerator::EmitIsFunction(CallRuntime* expr) { ZoneList<Expression*>* args = expr->arguments(); ASSERT(args->length() == 1); @@ -2962,12 +3001,12 @@ void FullCodeGenerator::EmitClassOf(CallRuntime* expr) { // Functions have class 'Function'. __ bind(&function); - __ LoadRoot(v0, Heap::kfunction_class_symbolRootIndex); + __ LoadRoot(v0, Heap::kfunction_class_stringRootIndex); __ jmp(&done); // Objects with a non-function constructor have class 'Object'. __ bind(&non_function_constructor); - __ LoadRoot(v0, Heap::kObject_symbolRootIndex); + __ LoadRoot(v0, Heap::kObject_stringRootIndex); __ jmp(&done); // Non-JS objects have class null. @@ -3031,7 +3070,7 @@ void FullCodeGenerator::EmitRandomHeapNumber(CallRuntime* expr) { __ lw(a0, FieldMemOperand(a0, GlobalObject::kNativeContextOffset)); __ CallCFunction(ExternalReference::random_uint32_function(isolate()), 1); - CpuFeatures::Scope scope(FPU); + CpuFeatureScope scope(masm(), FPU); // 0x41300000 is the top half of 1.0 x 2^20 as a double. __ li(a1, Operand(0x41300000)); // Move 0x41300000xxxxxxxx (x = random bits in v0) to FPU. @@ -3149,6 +3188,38 @@ void FullCodeGenerator::EmitDateField(CallRuntime* expr) { } +void FullCodeGenerator::EmitOneByteSeqStringSetChar(CallRuntime* expr) { + ZoneList<Expression*>* args = expr->arguments(); + ASSERT_EQ(3, args->length()); + + VisitForStackValue(args->at(1)); // index + VisitForStackValue(args->at(2)); // value + __ pop(a2); + __ pop(a1); + VisitForAccumulatorValue(args->at(0)); // string + + static const String::Encoding encoding = String::ONE_BYTE_ENCODING; + SeqStringSetCharGenerator::Generate(masm_, encoding, v0, a1, a2); + context()->Plug(v0); +} + + +void FullCodeGenerator::EmitTwoByteSeqStringSetChar(CallRuntime* expr) { + ZoneList<Expression*>* args = expr->arguments(); + ASSERT_EQ(3, args->length()); + + VisitForStackValue(args->at(1)); // index + VisitForStackValue(args->at(2)); // value + __ pop(a2); + __ pop(a1); + VisitForAccumulatorValue(args->at(0)); // string + + static const String::Encoding encoding = String::TWO_BYTE_ENCODING; + SeqStringSetCharGenerator::Generate(masm_, encoding, v0, a1, a2); + context()->Plug(v0); +} + + void FullCodeGenerator::EmitMathPow(CallRuntime* expr) { // Load the arguments on the stack and call the runtime function. ZoneList<Expression*>* args = expr->arguments(); @@ -3305,7 +3376,7 @@ void FullCodeGenerator::EmitStringCharAt(CallRuntime* expr) { __ bind(&index_out_of_range); // When the index is out of range, the spec requires us to return // the empty string. - __ LoadRoot(result, Heap::kEmptyStringRootIndex); + __ LoadRoot(result, Heap::kempty_stringRootIndex); __ jmp(&done); __ bind(&need_conversion); @@ -3620,7 +3691,7 @@ void FullCodeGenerator::EmitFastAsciiArrayJoin(CallRuntime* expr) { __ lw(array_length, FieldMemOperand(array, JSArray::kLengthOffset)); __ SmiUntag(array_length); __ Branch(&non_trivial_array, ne, array_length, Operand(zero_reg)); - __ LoadRoot(v0, Heap::kEmptyStringRootIndex); + __ LoadRoot(v0, Heap::kempty_stringRootIndex); __ Branch(&done); __ bind(&non_trivial_array); @@ -3656,7 +3727,7 @@ void FullCodeGenerator::EmitFastAsciiArrayJoin(CallRuntime* expr) { __ lw(scratch1, FieldMemOperand(string, HeapObject::kMapOffset)); __ lbu(scratch1, FieldMemOperand(scratch1, Map::kInstanceTypeOffset)); __ JumpIfInstanceTypeIsNotSequentialAscii(scratch1, scratch2, &bailout); - __ lw(scratch1, FieldMemOperand(string, SeqAsciiString::kLengthOffset)); + __ lw(scratch1, FieldMemOperand(string, SeqOneByteString::kLengthOffset)); __ AdduAndCheckForOverflow(string_length, string_length, scratch1, scratch3); __ BranchOnOverflow(&bailout, scratch3); __ Branch(&loop, lt, element, Operand(elements_end)); @@ -3683,7 +3754,7 @@ void FullCodeGenerator::EmitFastAsciiArrayJoin(CallRuntime* expr) { // Add (separator length times array_length) - separator length to the // string_length to get the length of the result string. array_length is not // smi but the other values are, so the result is a smi. - __ lw(scratch1, FieldMemOperand(separator, SeqAsciiString::kLengthOffset)); + __ lw(scratch1, FieldMemOperand(separator, SeqOneByteString::kLengthOffset)); __ Subu(string_length, string_length, Operand(scratch1)); __ Mult(array_length, scratch1); // Check for smi overflow. No overflow if higher 33 bits of 64-bit result are @@ -3723,10 +3794,10 @@ void FullCodeGenerator::EmitFastAsciiArrayJoin(CallRuntime* expr) { array_length = no_reg; __ Addu(result_pos, result, - Operand(SeqAsciiString::kHeaderSize - kHeapObjectTag)); + Operand(SeqOneByteString::kHeaderSize - kHeapObjectTag)); // Check the length of the separator. - __ lw(scratch1, FieldMemOperand(separator, SeqAsciiString::kLengthOffset)); + __ lw(scratch1, FieldMemOperand(separator, SeqOneByteString::kLengthOffset)); __ li(at, Operand(Smi::FromInt(1))); __ Branch(&one_char_separator, eq, scratch1, Operand(at)); __ Branch(&long_separator, gt, scratch1, Operand(at)); @@ -3743,7 +3814,7 @@ void FullCodeGenerator::EmitFastAsciiArrayJoin(CallRuntime* expr) { __ Addu(element, element, kPointerSize); __ lw(string_length, FieldMemOperand(string, String::kLengthOffset)); __ SmiUntag(string_length); - __ Addu(string, string, SeqAsciiString::kHeaderSize - kHeapObjectTag); + __ Addu(string, string, SeqOneByteString::kHeaderSize - kHeapObjectTag); __ CopyBytes(string, result_pos, string_length, scratch1); // End while (element < elements_end). __ Branch(&empty_separator_loop, lt, element, Operand(elements_end)); @@ -3753,7 +3824,7 @@ void FullCodeGenerator::EmitFastAsciiArrayJoin(CallRuntime* expr) { // One-character separator case. __ bind(&one_char_separator); // Replace separator with its ASCII character value. - __ lbu(separator, FieldMemOperand(separator, SeqAsciiString::kHeaderSize)); + __ lbu(separator, FieldMemOperand(separator, SeqOneByteString::kHeaderSize)); // Jump into the loop after the code that copies the separator, so the first // element is not preceded by a separator. __ jmp(&one_char_separator_loop_entry); @@ -3775,7 +3846,7 @@ void FullCodeGenerator::EmitFastAsciiArrayJoin(CallRuntime* expr) { __ Addu(element, element, kPointerSize); __ lw(string_length, FieldMemOperand(string, String::kLengthOffset)); __ SmiUntag(string_length); - __ Addu(string, string, SeqAsciiString::kHeaderSize - kHeapObjectTag); + __ Addu(string, string, SeqOneByteString::kHeaderSize - kHeapObjectTag); __ CopyBytes(string, result_pos, string_length, scratch1); // End while (element < elements_end). __ Branch(&one_char_separator_loop, lt, element, Operand(elements_end)); @@ -3796,7 +3867,7 @@ void FullCodeGenerator::EmitFastAsciiArrayJoin(CallRuntime* expr) { __ SmiUntag(string_length); __ Addu(string, separator, - Operand(SeqAsciiString::kHeaderSize - kHeapObjectTag)); + Operand(SeqOneByteString::kHeaderSize - kHeapObjectTag)); __ CopyBytes(string, result_pos, string_length, scratch1); __ bind(&long_separator); @@ -3804,7 +3875,7 @@ void FullCodeGenerator::EmitFastAsciiArrayJoin(CallRuntime* expr) { __ Addu(element, element, kPointerSize); __ lw(string_length, FieldMemOperand(string, String::kLengthOffset)); __ SmiUntag(string_length); - __ Addu(string, string, SeqAsciiString::kHeaderSize - kHeapObjectTag); + __ Addu(string, string, SeqOneByteString::kHeaderSize - kHeapObjectTag); __ CopyBytes(string, result_pos, string_length, scratch1); // End while (element < elements_end). __ Branch(&long_separator_loop, lt, element, Operand(elements_end)); @@ -4004,7 +4075,7 @@ void FullCodeGenerator::EmitUnaryOperation(UnaryOperation* expr, VisitForAccumulatorValue(expr->expression()); SetSourcePosition(expr->position()); __ mov(a0, result_register()); - CallIC(stub.GetCode(), RelocInfo::CODE_TARGET, + CallIC(stub.GetCode(isolate()), RelocInfo::CODE_TARGET, expr->UnaryOperationFeedbackId()); context()->Plug(v0); } @@ -4100,9 +4171,8 @@ void FullCodeGenerator::VisitCountOperation(CountOperation* expr) { JumpPatchSite patch_site(masm_); int count_value = expr->op() == Token::INC ? 1 : -1; - __ li(a1, Operand(Smi::FromInt(count_value))); - if (ShouldInlineSmiCase(expr->op())) { + __ li(a1, Operand(Smi::FromInt(count_value))); __ AdduAndCheckForOverflow(v0, a0, a1, t0); __ BranchOnOverflow(&stub_call, t0); // Do stub on overflow. @@ -4111,12 +4181,16 @@ void FullCodeGenerator::VisitCountOperation(CountOperation* expr) { patch_site.EmitJumpIfSmi(v0, &done); __ bind(&stub_call); } + __ mov(a1, a0); + __ li(a0, Operand(Smi::FromInt(count_value))); // Record position before stub call. SetSourcePosition(expr->position()); BinaryOpStub stub(Token::ADD, NO_OVERWRITE); - CallIC(stub.GetCode(), RelocInfo::CODE_TARGET, expr->CountBinOpFeedbackId()); + CallIC(stub.GetCode(isolate()), + RelocInfo::CODE_TARGET, + expr->CountBinOpFeedbackId()); patch_site.EmitPatchInfo(); __ bind(&done); @@ -4232,12 +4306,12 @@ void FullCodeGenerator::EmitLiteralCompareTypeof(Expression* expr, } PrepareForBailoutBeforeSplit(expr, true, if_true, if_false); - if (check->Equals(isolate()->heap()->number_symbol())) { + if (check->Equals(isolate()->heap()->number_string())) { __ JumpIfSmi(v0, if_true); __ lw(v0, FieldMemOperand(v0, HeapObject::kMapOffset)); __ LoadRoot(at, Heap::kHeapNumberMapRootIndex); Split(eq, v0, Operand(at), if_true, if_false, fall_through); - } else if (check->Equals(isolate()->heap()->string_symbol())) { + } else if (check->Equals(isolate()->heap()->string_string())) { __ JumpIfSmi(v0, if_false); // Check for undetectable objects => false. __ GetObjectType(v0, v0, a1); @@ -4246,16 +4320,16 @@ void FullCodeGenerator::EmitLiteralCompareTypeof(Expression* expr, __ And(a1, a1, Operand(1 << Map::kIsUndetectable)); Split(eq, a1, Operand(zero_reg), if_true, if_false, fall_through); - } else if (check->Equals(isolate()->heap()->boolean_symbol())) { + } else if (check->Equals(isolate()->heap()->boolean_string())) { __ LoadRoot(at, Heap::kTrueValueRootIndex); __ Branch(if_true, eq, v0, Operand(at)); __ LoadRoot(at, Heap::kFalseValueRootIndex); Split(eq, v0, Operand(at), if_true, if_false, fall_through); } else if (FLAG_harmony_typeof && - check->Equals(isolate()->heap()->null_symbol())) { + check->Equals(isolate()->heap()->null_string())) { __ LoadRoot(at, Heap::kNullValueRootIndex); Split(eq, v0, Operand(at), if_true, if_false, fall_through); - } else if (check->Equals(isolate()->heap()->undefined_symbol())) { + } else if (check->Equals(isolate()->heap()->undefined_string())) { __ LoadRoot(at, Heap::kUndefinedValueRootIndex); __ Branch(if_true, eq, v0, Operand(at)); __ JumpIfSmi(v0, if_false); @@ -4264,19 +4338,23 @@ void FullCodeGenerator::EmitLiteralCompareTypeof(Expression* expr, __ lbu(a1, FieldMemOperand(v0, Map::kBitFieldOffset)); __ And(a1, a1, Operand(1 << Map::kIsUndetectable)); Split(ne, a1, Operand(zero_reg), if_true, if_false, fall_through); - } else if (check->Equals(isolate()->heap()->function_symbol())) { + } else if (check->Equals(isolate()->heap()->function_string())) { __ JumpIfSmi(v0, if_false); STATIC_ASSERT(NUM_OF_CALLABLE_SPEC_OBJECT_TYPES == 2); __ GetObjectType(v0, v0, a1); __ Branch(if_true, eq, a1, Operand(JS_FUNCTION_TYPE)); Split(eq, a1, Operand(JS_FUNCTION_PROXY_TYPE), if_true, if_false, fall_through); - } else if (check->Equals(isolate()->heap()->object_symbol())) { + } else if (check->Equals(isolate()->heap()->object_string())) { __ JumpIfSmi(v0, if_false); if (!FLAG_harmony_typeof) { __ LoadRoot(at, Heap::kNullValueRootIndex); __ Branch(if_true, eq, v0, Operand(at)); } + if (FLAG_harmony_symbols) { + __ GetObjectType(v0, v0, a1); + __ Branch(if_true, eq, a1, Operand(SYMBOL_TYPE)); + } // Check for JS objects => true. __ GetObjectType(v0, v0, a1); __ Branch(if_false, lt, a1, Operand(FIRST_NONCALLABLE_SPEC_OBJECT_TYPE)); @@ -4333,29 +4411,7 @@ void FullCodeGenerator::VisitCompareOperation(CompareOperation* expr) { default: { VisitForAccumulatorValue(expr->right()); - Condition cc = eq; - switch (op) { - case Token::EQ_STRICT: - case Token::EQ: - cc = eq; - break; - case Token::LT: - cc = lt; - break; - case Token::GT: - cc = gt; - break; - case Token::LTE: - cc = le; - break; - case Token::GTE: - cc = ge; - break; - case Token::IN: - case Token::INSTANCEOF: - default: - UNREACHABLE(); - } + Condition cc = CompareIC::ComputeCondition(op); __ mov(a0, result_register()); __ pop(a1); @@ -4370,7 +4426,7 @@ void FullCodeGenerator::VisitCompareOperation(CompareOperation* expr) { } // Record position and call the compare IC. SetSourcePosition(expr->position()); - Handle<Code> ic = CompareIC::GetUninitialized(op); + Handle<Code> ic = CompareIC::GetUninitialized(isolate(), op); CallIC(ic, RelocInfo::CODE_TARGET, expr->CompareOperationFeedbackId()); patch_site.EmitPatchInfo(); PrepareForBailoutBeforeSplit(expr, true, if_true, if_false); diff --git a/deps/v8/src/mips/ic-mips.cc b/deps/v8/src/mips/ic-mips.cc index cf706815e3..e434fdb774 100644 --- a/deps/v8/src/mips/ic-mips.cc +++ b/deps/v8/src/mips/ic-mips.cc @@ -61,12 +61,12 @@ static void GenerateGlobalInstanceTypeCheck(MacroAssembler* masm, // Generated code falls through if the receiver is a regular non-global // JS object with slow properties and no interceptors. -static void GenerateStringDictionaryReceiverCheck(MacroAssembler* masm, - Register receiver, - Register elements, - Register scratch0, - Register scratch1, - Label* miss) { +static void GenerateNameDictionaryReceiverCheck(MacroAssembler* masm, + Register receiver, + Register elements, + Register scratch0, + Register scratch1, + Label* miss) { // Register usage: // receiver: holds the receiver on entry and is unchanged. // elements: holds the property dictionary on fall through. @@ -129,19 +129,19 @@ static void GenerateDictionaryLoad(MacroAssembler* masm, Label done; // Probe the dictionary. - StringDictionaryLookupStub::GeneratePositiveLookup(masm, - miss, - &done, - elements, - name, - scratch1, - scratch2); + NameDictionaryLookupStub::GeneratePositiveLookup(masm, + miss, + &done, + elements, + name, + scratch1, + scratch2); // If probing finds an entry check that the value is a normal // property. __ bind(&done); // scratch2 == elements + 4 * index. - const int kElementsStartOffset = StringDictionary::kHeaderSize + - StringDictionary::kElementsStartIndex * kPointerSize; + const int kElementsStartOffset = NameDictionary::kHeaderSize + + NameDictionary::kElementsStartIndex * kPointerSize; const int kDetailsOffset = kElementsStartOffset + 2 * kPointerSize; __ lw(scratch1, FieldMemOperand(scratch2, kDetailsOffset)); __ And(at, @@ -182,19 +182,19 @@ static void GenerateDictionaryStore(MacroAssembler* masm, Label done; // Probe the dictionary. - StringDictionaryLookupStub::GeneratePositiveLookup(masm, - miss, - &done, - elements, - name, - scratch1, - scratch2); + NameDictionaryLookupStub::GeneratePositiveLookup(masm, + miss, + &done, + elements, + name, + scratch1, + scratch2); // If probing finds an entry in the dictionary check that the value // is a normal property that is not read only. __ bind(&done); // scratch2 == elements + 4 * index. - const int kElementsStartOffset = StringDictionary::kHeaderSize + - StringDictionary::kElementsStartIndex * kPointerSize; + const int kElementsStartOffset = NameDictionary::kHeaderSize + + NameDictionary::kElementsStartIndex * kPointerSize; const int kDetailsOffset = kElementsStartOffset + 2 * kPointerSize; const int kTypeAndReadOnlyMask = (PropertyDetails::TypeField::kMask | @@ -215,53 +215,6 @@ static void GenerateDictionaryStore(MacroAssembler* masm, } -void LoadIC::GenerateArrayLength(MacroAssembler* masm) { - // ----------- S t a t e ------------- - // -- a2 : name - // -- ra : return address - // -- a0 : receiver - // -- sp[0] : receiver - // ----------------------------------- - Label miss; - - StubCompiler::GenerateLoadArrayLength(masm, a0, a3, &miss); - __ bind(&miss); - StubCompiler::GenerateLoadMiss(masm, Code::LOAD_IC); -} - - -void LoadIC::GenerateStringLength(MacroAssembler* masm, bool support_wrappers) { - // ----------- S t a t e ------------- - // -- a2 : name - // -- lr : return address - // -- a0 : receiver - // -- sp[0] : receiver - // ----------------------------------- - Label miss; - - StubCompiler::GenerateLoadStringLength(masm, a0, a1, a3, &miss, - support_wrappers); - // Cache miss: Jump to runtime. - __ bind(&miss); - StubCompiler::GenerateLoadMiss(masm, Code::LOAD_IC); -} - - -void LoadIC::GenerateFunctionPrototype(MacroAssembler* masm) { - // ----------- S t a t e ------------- - // -- a2 : name - // -- lr : return address - // -- a0 : receiver - // -- sp[0] : receiver - // ----------------------------------- - Label miss; - - StubCompiler::GenerateLoadFunctionPrototype(masm, a0, a1, a3, &miss); - __ bind(&miss); - StubCompiler::GenerateLoadMiss(masm, Code::LOAD_IC); -} - - // Checks the receiver for special cases (value type, slow case bits). // Falls through for regular JS object. static void GenerateKeyedLoadReceiverCheck(MacroAssembler* masm, @@ -352,30 +305,35 @@ static void GenerateFastArrayLoad(MacroAssembler* masm, } -// Checks whether a key is an array index string or a symbol string. -// Falls through if a key is a symbol. -static void GenerateKeyStringCheck(MacroAssembler* masm, - Register key, - Register map, - Register hash, - Label* index_string, - Label* not_symbol) { +// Checks whether a key is an array index string or a unique name. +// Falls through if a key is a unique name. +static void GenerateKeyNameCheck(MacroAssembler* masm, + Register key, + Register map, + Register hash, + Label* index_string, + Label* not_unique) { // The key is not a smi. - // Is it a string? + Label unique; + // Is it a name? __ GetObjectType(key, map, hash); - __ Branch(not_symbol, ge, hash, Operand(FIRST_NONSTRING_TYPE)); + __ Branch(not_unique, hi, hash, Operand(LAST_UNIQUE_NAME_TYPE)); + STATIC_ASSERT(LAST_UNIQUE_NAME_TYPE == FIRST_NONSTRING_TYPE); + __ Branch(&unique, eq, hash, Operand(LAST_UNIQUE_NAME_TYPE)); // Is the string an array index, with cached numeric value? - __ lw(hash, FieldMemOperand(key, String::kHashFieldOffset)); - __ And(at, hash, Operand(String::kContainsCachedArrayIndexMask)); + __ lw(hash, FieldMemOperand(key, Name::kHashFieldOffset)); + __ And(at, hash, Operand(Name::kContainsCachedArrayIndexMask)); __ Branch(index_string, eq, at, Operand(zero_reg)); - // Is the string a symbol? + // Is the string internalized? // map: key map __ lbu(hash, FieldMemOperand(map, Map::kInstanceTypeOffset)); - STATIC_ASSERT(kSymbolTag != 0); - __ And(at, hash, Operand(kIsSymbolMask)); - __ Branch(not_symbol, eq, at, Operand(zero_reg)); + STATIC_ASSERT(kInternalizedTag != 0); + __ And(at, hash, Operand(kIsInternalizedMask)); + __ Branch(not_unique, eq, at, Operand(zero_reg)); + + __ bind(&unique); } @@ -473,7 +431,7 @@ void CallICBase::GenerateNormal(MacroAssembler* masm, int argc) { // Get the receiver of the function from the stack into a1. __ lw(a1, MemOperand(sp, argc * kPointerSize)); - GenerateStringDictionaryReceiverCheck(masm, a1, a0, a3, t0, &miss); + GenerateNameDictionaryReceiverCheck(masm, a1, a0, a3, t0, &miss); // a0: elements // Search the dictionary - put result in register a1. @@ -576,11 +534,11 @@ void KeyedCallIC::GenerateMegamorphic(MacroAssembler* masm, int argc) { __ lw(a1, MemOperand(sp, argc * kPointerSize)); Label do_call, slow_call, slow_load, slow_reload_receiver; - Label check_number_dictionary, check_string, lookup_monomorphic_cache; - Label index_smi, index_string; + Label check_number_dictionary, check_name, lookup_monomorphic_cache; + Label index_smi, index_name; // Check that the key is a smi. - __ JumpIfNotSmi(a2, &check_string); + __ JumpIfNotSmi(a2, &check_name); __ bind(&index_smi); // Now the key is known to be a smi. This place is also jumped to from below // where a numeric string is converted to a smi. @@ -627,10 +585,10 @@ void KeyedCallIC::GenerateMegamorphic(MacroAssembler* masm, int argc) { __ mov(a1, v0); __ jmp(&do_call); - __ bind(&check_string); - GenerateKeyStringCheck(masm, a2, a0, a3, &index_string, &slow_call); + __ bind(&check_name); + GenerateKeyNameCheck(masm, a2, a0, a3, &index_name, &slow_call); - // The key is known to be a symbol. + // The key is known to be a unique name. // If the receiver is a regular JS object with slow properties then do // a quick inline probe of the receiver's dictionary. // Otherwise do the monomorphic cache probe. @@ -657,14 +615,14 @@ void KeyedCallIC::GenerateMegamorphic(MacroAssembler* masm, int argc) { __ bind(&slow_call); // This branch is taken if: // - the receiver requires boxing or access check, - // - the key is neither smi nor symbol, + // - the key is neither smi nor a unique name, // - the value loaded is not a function, // - there is hope that the runtime will create a monomorphic call stub, // that will get fetched next time. __ IncrementCounter(counters->keyed_call_generic_slow(), 1, a0, a3); GenerateMiss(masm, argc); - __ bind(&index_string); + __ bind(&index_name); __ IndexFromHash(a3, a2); // Now jump to the place where smi keys are handled. __ jmp(&index_smi); @@ -677,10 +635,10 @@ void KeyedCallIC::GenerateNormal(MacroAssembler* masm, int argc) { // -- ra : return address // ----------------------------------- - // Check if the name is a string. + // Check if the name is really a name. Label miss; __ JumpIfSmi(a2, &miss); - __ IsObjectJSStringType(a2, a0, &miss); + __ IsObjectNameType(a2, a0, &miss); CallICBase::GenerateNormal(masm, argc); __ bind(&miss); @@ -700,7 +658,9 @@ void LoadIC::GenerateMegamorphic(MacroAssembler* masm) { // ----------------------------------- // Probe the stub cache. - Code::Flags flags = Code::ComputeFlags(Code::LOAD_IC, MONOMORPHIC); + Code::Flags flags = Code::ComputeFlags( + Code::STUB, MONOMORPHIC, Code::kNoExtraICState, + Code::NORMAL, Code::LOAD_IC); Isolate::Current()->stub_cache()->GenerateProbe( masm, flags, a0, a2, a3, t0, t1, t2); @@ -718,7 +678,7 @@ void LoadIC::GenerateNormal(MacroAssembler* masm) { // ----------------------------------- Label miss; - GenerateStringDictionaryReceiverCheck(masm, a0, a1, a3, t0, &miss); + GenerateNameDictionaryReceiverCheck(masm, a0, a1, a3, t0, &miss); // a1: elements GenerateDictionaryLoad(masm, &miss, a1, a2, v0, a3, t0); @@ -858,7 +818,7 @@ void KeyedLoadIC::GenerateNonStrictArguments(MacroAssembler* masm) { __ Ret(USE_DELAY_SLOT); __ mov(v0, a2); __ bind(&slow); - GenerateMiss(masm, false); + GenerateMiss(masm, MISS); } @@ -893,7 +853,7 @@ void KeyedStoreIC::GenerateNonStrictArguments(MacroAssembler* masm) { __ Ret(USE_DELAY_SLOT); __ mov(v0, a0); // (In delay slot) return the value stored in v0. __ bind(&slow); - GenerateMiss(masm, false); + GenerateMiss(masm, MISS); } @@ -926,7 +886,7 @@ void KeyedCallIC::GenerateNonStrictArguments(MacroAssembler* masm, Object* KeyedLoadIC_Miss(Arguments args); -void KeyedLoadIC::GenerateMiss(MacroAssembler* masm, bool force_generic) { +void KeyedLoadIC::GenerateMiss(MacroAssembler* masm, ICMissMode miss_mode) { // ---------- S t a t e -------------- // -- ra : return address // -- a0 : key @@ -939,7 +899,7 @@ void KeyedLoadIC::GenerateMiss(MacroAssembler* masm, bool force_generic) { __ Push(a1, a0); // Perform tail call to the entry. - ExternalReference ref = force_generic + ExternalReference ref = miss_mode == MISS_FORCE_GENERIC ? ExternalReference(IC_Utility(kKeyedLoadIC_MissForceGeneric), isolate) : ExternalReference(IC_Utility(kKeyedLoadIC_Miss), isolate); @@ -966,7 +926,7 @@ void KeyedLoadIC::GenerateGeneric(MacroAssembler* masm) { // -- a0 : key // -- a1 : receiver // ----------------------------------- - Label slow, check_string, index_smi, index_string, property_array_property; + Label slow, check_name, index_smi, index_name, property_array_property; Label probe_dictionary, check_number_dictionary; Register key = a0; @@ -975,7 +935,7 @@ void KeyedLoadIC::GenerateGeneric(MacroAssembler* masm) { Isolate* isolate = masm->isolate(); // Check that the key is a smi. - __ JumpIfNotSmi(key, &check_string); + __ JumpIfNotSmi(key, &check_name); __ bind(&index_smi); // Now the key is known to be a smi. This place is also jumped to from below // where a numeric string is converted to a smi. @@ -1014,8 +974,8 @@ void KeyedLoadIC::GenerateGeneric(MacroAssembler* masm) { a3); GenerateRuntimeGetProperty(masm); - __ bind(&check_string); - GenerateKeyStringCheck(masm, key, a2, a3, &index_string, &slow); + __ bind(&check_name); + GenerateKeyNameCheck(masm, key, a2, a3, &index_name, &slow); GenerateKeyedLoadReceiverCheck( masm, receiver, a2, a3, Map::kHasIndexedInterceptor, &slow); @@ -1029,16 +989,16 @@ void KeyedLoadIC::GenerateGeneric(MacroAssembler* masm) { __ Branch(&probe_dictionary, eq, t0, Operand(at)); // Load the map of the receiver, compute the keyed lookup cache hash - // based on 32 bits of the map pointer and the string hash. + // based on 32 bits of the map pointer and the name hash. __ lw(a2, FieldMemOperand(a1, HeapObject::kMapOffset)); __ sra(a3, a2, KeyedLookupCache::kMapHashShift); - __ lw(t0, FieldMemOperand(a0, String::kHashFieldOffset)); - __ sra(at, t0, String::kHashShift); + __ lw(t0, FieldMemOperand(a0, Name::kHashFieldOffset)); + __ sra(at, t0, Name::kHashShift); __ xor_(a3, a3, at); int mask = KeyedLookupCache::kCapacityMask & KeyedLookupCache::kHashMask; __ And(a3, a3, Operand(mask)); - // Load the key (consisting of map and symbol) from the cache and + // Load the key (consisting of map and unique name) from the cache and // check for match. Label load_in_object_property; static const int kEntriesPerBucket = KeyedLookupCache::kEntriesPerBucket; @@ -1131,7 +1091,7 @@ void KeyedLoadIC::GenerateGeneric(MacroAssembler* masm) { a3); __ Ret(); - __ bind(&index_string); + __ bind(&index_name); __ IndexFromHash(a3, key); // Now jump to the place where smi keys are handled. __ Branch(&index_smi); @@ -1166,7 +1126,7 @@ void KeyedLoadIC::GenerateString(MacroAssembler* masm) { char_at_generator.GenerateSlow(masm, call_helper); __ bind(&miss); - GenerateMiss(masm, false); + GenerateMiss(masm, MISS); } @@ -1268,7 +1228,6 @@ static void KeyedStoreGenerateGenericHelper( __ bind(&fast_double_without_map_check); __ StoreNumberToDoubleElements(value, key, - receiver, elements, // Overwritten. a3, // Scratch regs... t0, @@ -1296,7 +1255,9 @@ static void KeyedStoreGenerateGenericHelper( t0, slow); ASSERT(receiver_map.is(a3)); // Transition code expects map in a3 - ElementsTransitionGenerator::GenerateSmiToDouble(masm, slow); + AllocationSiteMode mode = AllocationSiteInfo::GetMode(FAST_SMI_ELEMENTS, + FAST_DOUBLE_ELEMENTS); + ElementsTransitionGenerator::GenerateSmiToDouble(masm, mode, slow); __ lw(elements, FieldMemOperand(receiver, JSObject::kElementsOffset)); __ jmp(&fast_double_without_map_check); @@ -1308,7 +1269,9 @@ static void KeyedStoreGenerateGenericHelper( t0, slow); ASSERT(receiver_map.is(a3)); // Transition code expects map in a3 - ElementsTransitionGenerator::GenerateMapChangeElementsTransition(masm); + mode = AllocationSiteInfo::GetMode(FAST_SMI_ELEMENTS, FAST_ELEMENTS); + ElementsTransitionGenerator::GenerateMapChangeElementsTransition(masm, mode, + slow); __ lw(elements, FieldMemOperand(receiver, JSObject::kElementsOffset)); __ jmp(&finish_object_store); @@ -1322,7 +1285,8 @@ static void KeyedStoreGenerateGenericHelper( t0, slow); ASSERT(receiver_map.is(a3)); // Transition code expects map in a3 - ElementsTransitionGenerator::GenerateDoubleToObject(masm, slow); + mode = AllocationSiteInfo::GetMode(FAST_DOUBLE_ELEMENTS, FAST_ELEMENTS); + ElementsTransitionGenerator::GenerateDoubleToObject(masm, mode, slow); __ lw(elements, FieldMemOperand(receiver, JSObject::kElementsOffset)); __ jmp(&finish_object_store); } @@ -1453,11 +1417,11 @@ void KeyedLoadIC::GenerateIndexedInterceptor(MacroAssembler* masm) { IC_Utility(kKeyedLoadPropertyWithInterceptor), masm->isolate()), 2, 1); __ bind(&slow); - GenerateMiss(masm, false); + GenerateMiss(masm, MISS); } -void KeyedStoreIC::GenerateMiss(MacroAssembler* masm, bool force_generic) { +void KeyedStoreIC::GenerateMiss(MacroAssembler* masm, ICMissMode miss_mode) { // ---------- S t a t e -------------- // -- a0 : value // -- a1 : key @@ -1468,7 +1432,7 @@ void KeyedStoreIC::GenerateMiss(MacroAssembler* masm, bool force_generic) { // Push receiver, key and value for runtime call. __ Push(a2, a1, a0); - ExternalReference ref = force_generic + ExternalReference ref = miss_mode == MISS_FORCE_GENERIC ? ExternalReference(IC_Utility(kKeyedStoreIC_MissForceGeneric), masm->isolate()) : ExternalReference(IC_Utility(kKeyedStoreIC_Miss), masm->isolate()); @@ -1506,7 +1470,9 @@ void KeyedStoreIC::GenerateTransitionElementsSmiToDouble(MacroAssembler* masm) { // Must return the modified receiver in v0. if (!FLAG_trace_elements_transitions) { Label fail; - ElementsTransitionGenerator::GenerateSmiToDouble(masm, &fail); + AllocationSiteMode mode = AllocationSiteInfo::GetMode(FAST_SMI_ELEMENTS, + FAST_DOUBLE_ELEMENTS); + ElementsTransitionGenerator::GenerateSmiToDouble(masm, mode, &fail); __ Ret(USE_DELAY_SLOT); __ mov(v0, a2); __ bind(&fail); @@ -1527,7 +1493,9 @@ void KeyedStoreIC::GenerateTransitionElementsDoubleToObject( // Must return the modified receiver in v0. if (!FLAG_trace_elements_transitions) { Label fail; - ElementsTransitionGenerator::GenerateDoubleToObject(masm, &fail); + AllocationSiteMode mode = AllocationSiteInfo::GetMode(FAST_DOUBLE_ELEMENTS, + FAST_ELEMENTS); + ElementsTransitionGenerator::GenerateDoubleToObject(masm, mode, &fail); __ Ret(USE_DELAY_SLOT); __ mov(v0, a2); __ bind(&fail); @@ -1574,62 +1542,6 @@ void StoreIC::GenerateMiss(MacroAssembler* masm) { } -void StoreIC::GenerateArrayLength(MacroAssembler* masm) { - // ----------- S t a t e ------------- - // -- a0 : value - // -- a1 : receiver - // -- a2 : name - // -- ra : return address - // ----------------------------------- - // - // This accepts as a receiver anything JSArray::SetElementsLength accepts - // (currently anything except for external arrays which means anything with - // elements of FixedArray type). Value must be a number, but only smis are - // accepted as the most common case. - - Label miss; - - Register receiver = a1; - Register value = a0; - Register scratch = a3; - - // Check that the receiver isn't a smi. - __ JumpIfSmi(receiver, &miss); - - // Check that the object is a JS array. - __ GetObjectType(receiver, scratch, scratch); - __ Branch(&miss, ne, scratch, Operand(JS_ARRAY_TYPE)); - - // Check that elements are FixedArray. - // We rely on StoreIC_ArrayLength below to deal with all types of - // fast elements (including COW). - __ lw(scratch, FieldMemOperand(receiver, JSArray::kElementsOffset)); - __ GetObjectType(scratch, scratch, scratch); - __ Branch(&miss, ne, scratch, Operand(FIXED_ARRAY_TYPE)); - - // Check that the array has fast properties, otherwise the length - // property might have been redefined. - __ lw(scratch, FieldMemOperand(receiver, JSArray::kPropertiesOffset)); - __ lw(scratch, FieldMemOperand(scratch, FixedArray::kMapOffset)); - __ LoadRoot(at, Heap::kHashTableMapRootIndex); - __ Branch(&miss, eq, scratch, Operand(at)); - - // Check that value is a smi. - __ JumpIfNotSmi(value, &miss); - - // Prepare tail call to StoreIC_ArrayLength. - __ Push(receiver, value); - - ExternalReference ref = ExternalReference(IC_Utility(kStoreIC_ArrayLength), - masm->isolate()); - __ TailCallExternalReference(ref, 2, 1); - - __ bind(&miss); - - GenerateMiss(masm); -} - - void StoreIC::GenerateNormal(MacroAssembler* masm) { // ----------- S t a t e ------------- // -- a0 : value @@ -1639,7 +1551,7 @@ void StoreIC::GenerateNormal(MacroAssembler* masm) { // ----------------------------------- Label miss; - GenerateStringDictionaryReceiverCheck(masm, a1, a3, t0, t1, &miss); + GenerateNameDictionaryReceiverCheck(masm, a1, a3, t0, t1, &miss); GenerateDictionaryStore(masm, &miss, a3, a2, a0, t0, t1); Counters* counters = masm->isolate()->counters(); @@ -1695,36 +1607,16 @@ Condition CompareIC::ComputeCondition(Token::Value op) { } -void CompareIC::UpdateCaches(Handle<Object> x, Handle<Object> y) { - HandleScope scope; - Handle<Code> rewritten; - State previous_state = GetState(); - State state = TargetState(previous_state, false, x, y); - if (state == GENERIC) { - CompareStub stub(GetCondition(), strict(), NO_COMPARE_FLAGS, a1, a0); - rewritten = stub.GetCode(); - } else { - ICCompareStub stub(op_, state); - if (state == KNOWN_OBJECTS) { - stub.set_known_map(Handle<Map>(Handle<JSObject>::cast(x)->map())); - } - rewritten = stub.GetCode(); - } - set_target(*rewritten); - -#ifdef DEBUG - if (FLAG_trace_ic) { - PrintF("[CompareIC (%s->%s)#%s]\n", - GetStateName(previous_state), - GetStateName(state), - Token::Name(op_)); - } -#endif +bool CompareIC::HasInlinedSmiCode(Address address) { + // The address of the instruction following the call. + Address andi_instruction_address = + address + Assembler::kCallTargetAddressOffset; - // Activate inlined smi code. - if (previous_state == UNINITIALIZED) { - PatchInlinedSmiCode(address(), ENABLE_INLINED_SMI_CHECK); - } + // If the instruction following the call is not a andi at, rx, #yyy, nothing + // was inlined. + Instr instr = Assembler::instr_at(andi_instruction_address); + return Assembler::IsAndImmediate(instr) && + Assembler::GetRt(instr) == static_cast<uint32_t>(zero_reg.code()); } @@ -1736,7 +1628,7 @@ void PatchInlinedSmiCode(Address address, InlinedSmiCheck check) { // was inlined. Instr instr = Assembler::instr_at(andi_instruction_address); if (!(Assembler::IsAndImmediate(instr) && - Assembler::GetRt(instr) == (uint32_t)zero_reg.code())) { + Assembler::GetRt(instr) == static_cast<uint32_t>(zero_reg.code()))) { return; } diff --git a/deps/v8/src/mips/lithium-codegen-mips.cc b/deps/v8/src/mips/lithium-codegen-mips.cc index 21fd2ce481..cd489346a6 100644 --- a/deps/v8/src/mips/lithium-codegen-mips.cc +++ b/deps/v8/src/mips/lithium-codegen-mips.cc @@ -65,9 +65,7 @@ bool LCodeGen::GenerateCode() { HPhase phase("Z_Code generation", chunk()); ASSERT(is_unused()); status_ = GENERATING; - CpuFeatures::Scope scope(FPU); - - CodeStub::GenerateFPStubs(); + CpuFeatureScope scope(masm(), FPU); // Open a frame scope to indicate that there is a frame on the stack. The // NONE indicates that the scope shouldn't actually generate code to set up @@ -77,6 +75,7 @@ bool LCodeGen::GenerateCode() { return GeneratePrologue() && GenerateBody() && GenerateDeferredCode() && + GenerateDeoptJumpTable() && GenerateSafepointTable(); } @@ -85,7 +84,14 @@ void LCodeGen::FinishCode(Handle<Code> code) { ASSERT(is_done()); code->set_stack_slots(GetStackSlotCount()); code->set_safepoint_table_offset(safepoints_.GetCodeOffset()); + if (FLAG_weak_embedded_maps_in_optimized_code) { + RegisterDependentCodeForEmbeddedMaps(code); + } PopulateDeoptimizationData(code); + for (int i = 0 ; i < prototype_maps_.length(); i++) { + prototype_maps_.at(i)->AddDependentCode( + DependentCode::kPrototypeCheckGroup, code); + } } @@ -116,55 +122,93 @@ void LCodeGen::Comment(const char* format, ...) { bool LCodeGen::GeneratePrologue() { ASSERT(is_generating()); - ProfileEntryHookStub::MaybeCallEntryHook(masm_); + if (info()->IsOptimizing()) { + ProfileEntryHookStub::MaybeCallEntryHook(masm_); #ifdef DEBUG - if (strlen(FLAG_stop_at) > 0 && - info_->function()->name()->IsEqualTo(CStrVector(FLAG_stop_at))) { - __ stop("stop_at"); - } + if (strlen(FLAG_stop_at) > 0 && + info_->function()->name()->IsUtf8EqualTo(CStrVector(FLAG_stop_at))) { + __ stop("stop_at"); + } #endif - // a1: Callee's JS function. - // cp: Callee's context. - // fp: Caller's frame pointer. - // lr: Caller's pc. - - // Strict mode functions and builtins need to replace the receiver - // with undefined when called as functions (without an explicit - // receiver object). r5 is zero for method calls and non-zero for - // function calls. - if (!info_->is_classic_mode() || info_->is_native()) { - Label ok; - __ Branch(&ok, eq, t1, Operand(zero_reg)); - - int receiver_offset = scope()->num_parameters() * kPointerSize; - __ LoadRoot(a2, Heap::kUndefinedValueRootIndex); - __ sw(a2, MemOperand(sp, receiver_offset)); - __ bind(&ok); + // a1: Callee's JS function. + // cp: Callee's context. + // fp: Caller's frame pointer. + // lr: Caller's pc. + + // Strict mode functions and builtins need to replace the receiver + // with undefined when called as functions (without an explicit + // receiver object). r5 is zero for method calls and non-zero for + // function calls. + if (!info_->is_classic_mode() || info_->is_native()) { + Label ok; + __ Branch(&ok, eq, t1, Operand(zero_reg)); + + int receiver_offset = scope()->num_parameters() * kPointerSize; + __ LoadRoot(a2, Heap::kUndefinedValueRootIndex); + __ sw(a2, MemOperand(sp, receiver_offset)); + __ bind(&ok); + } } - __ Push(ra, fp, cp, a1); - __ Addu(fp, sp, Operand(2 * kPointerSize)); // Adj. FP to point to saved FP. + info()->set_prologue_offset(masm_->pc_offset()); + if (NeedsEagerFrame()) { + if (info()->IsStub()) { + __ Push(ra, fp, cp); + __ Push(Smi::FromInt(StackFrame::STUB)); + // Adjust FP to point to saved FP. + __ Addu(fp, sp, Operand(2 * kPointerSize)); + } else { + // The following three instructions must remain together and unmodified + // for code aging to work properly. + __ Push(ra, fp, cp, a1); + // Add unused load of ip to ensure prologue sequence is identical for + // full-codegen and lithium-codegen. + __ LoadRoot(at, Heap::kUndefinedValueRootIndex); + // Adj. FP to point to saved FP. + __ Addu(fp, sp, Operand(2 * kPointerSize)); + } + frame_is_built_ = true; + } // Reserve space for the stack slots needed by the code. int slots = GetStackSlotCount(); if (slots > 0) { if (FLAG_debug_code) { - __ li(a0, Operand(slots)); - __ li(a2, Operand(kSlotsZapValue)); + __ Subu(sp, sp, Operand(slots * kPointerSize)); + __ push(a0); + __ push(a1); + __ Addu(a0, sp, Operand(slots * kPointerSize)); + __ li(a1, Operand(kSlotsZapValue)); Label loop; __ bind(&loop); - __ push(a2); - __ Subu(a0, a0, 1); - __ Branch(&loop, ne, a0, Operand(zero_reg)); + __ Subu(a0, a0, Operand(kPointerSize)); + __ sw(a1, MemOperand(a0, 2 * kPointerSize)); + __ Branch(&loop, ne, a0, Operand(sp)); + __ pop(a1); + __ pop(a0); } else { __ Subu(sp, sp, Operand(slots * kPointerSize)); } } + if (info()->saves_caller_doubles() && CpuFeatures::IsSupported(FPU)) { + CpuFeatureScope scope(masm(), FPU); + Comment(";;; Save clobbered callee double registers"); + int count = 0; + BitVector* doubles = chunk()->allocated_double_registers(); + BitVector::Iterator save_iterator(doubles); + while (!save_iterator.Done()) { + __ sdc1(DoubleRegister::FromAllocationIndex(save_iterator.Current()), + MemOperand(sp, count * kDoubleSize)); + save_iterator.Advance(); + count++; + } + } + // Possibly allocate a local context. - int heap_slots = scope()->num_heap_slots() - Context::MIN_CONTEXT_SLOTS; + int heap_slots = info()->num_heap_slots() - Context::MIN_CONTEXT_SLOTS; if (heap_slots > 0) { Comment(";;; Allocate local context"); // Argument to NewContext is the function, which is in a1. @@ -200,7 +244,7 @@ bool LCodeGen::GeneratePrologue() { } // Trace the call. - if (FLAG_trace) { + if (FLAG_trace && info()->IsOptimizing()) { __ CallRuntime(Runtime::kTraceEnter, 0); } EnsureSpaceForLazyDeopt(); @@ -221,7 +265,30 @@ bool LCodeGen::GenerateBody() { } if (emit_instructions) { - Comment(";;; @%d: %s.", current_instruction_, instr->Mnemonic()); + if (FLAG_code_comments) { + HValue* hydrogen = instr->hydrogen_value(); + if (hydrogen != NULL) { + if (hydrogen->IsChange()) { + HValue* changed_value = HChange::cast(hydrogen)->value(); + int use_id = 0; + const char* use_mnemo = "dead"; + if (hydrogen->UseCount() >= 1) { + HValue* use_value = hydrogen->uses().value(); + use_id = use_value->id(); + use_mnemo = use_value->Mnemonic(); + } + Comment(";;; @%d: %s. <of #%d %s for #%d %s>", + current_instruction_, instr->Mnemonic(), + changed_value->id(), changed_value->Mnemonic(), + use_id, use_mnemo); + } else { + Comment(";;; @%d: %s. <#%d>", current_instruction_, + instr->Mnemonic(), hydrogen->id()); + } + } else { + Comment(";;; @%d: %s.", current_instruction_, instr->Mnemonic()); + } + } instr->CompileToNative(this); } } @@ -235,10 +302,31 @@ bool LCodeGen::GenerateDeferredCode() { for (int i = 0; !is_aborted() && i < deferred_.length(); i++) { LDeferredCode* code = deferred_[i]; __ bind(code->entry()); + if (NeedsDeferredFrame()) { + Comment(";;; Deferred build frame", + code->instruction_index(), + code->instr()->Mnemonic()); + ASSERT(!frame_is_built_); + ASSERT(info()->IsStub()); + frame_is_built_ = true; + __ MultiPush(cp.bit() | fp.bit() | ra.bit()); + __ li(scratch0(), Operand(Smi::FromInt(StackFrame::STUB))); + __ push(scratch0()); + __ Addu(fp, sp, Operand(2 * kPointerSize)); + } Comment(";;; Deferred code @%d: %s.", code->instruction_index(), code->instr()->Mnemonic()); code->Generate(); + if (NeedsDeferredFrame()) { + Comment(";;; Deferred destroy frame", + code->instruction_index(), + code->instr()->Mnemonic()); + ASSERT(frame_is_built_); + __ pop(at); + __ MultiPop(cp.bit() | fp.bit() | ra.bit()); + frame_is_built_ = false; + } __ jmp(code->exit()); } } @@ -250,10 +338,81 @@ bool LCodeGen::GenerateDeferredCode() { bool LCodeGen::GenerateDeoptJumpTable() { - // TODO(plind): not clear that this will have advantage for MIPS. - // Skipping it for now. Raised issue #100 for this. - Abort("Unimplemented: GenerateDeoptJumpTable"); - return false; + // Check that the jump table is accessible from everywhere in the function + // code, i.e. that offsets to the table can be encoded in the 16bit signed + // immediate of a branch instruction. + // To simplify we consider the code size from the first instruction to the + // end of the jump table. + if (!is_int16((masm()->pc_offset() / Assembler::kInstrSize) + + deopt_jump_table_.length() * 12)) { + Abort("Generated code is too large"); + } + + Assembler::BlockTrampolinePoolScope block_trampoline_pool(masm_); + __ RecordComment("[ Deoptimization jump table"); + Label table_start; + __ bind(&table_start); + Label needs_frame_not_call; + Label needs_frame_is_call; + for (int i = 0; i < deopt_jump_table_.length(); i++) { + __ bind(&deopt_jump_table_[i].label); + Address entry = deopt_jump_table_[i].address; + bool is_lazy_deopt = deopt_jump_table_[i].is_lazy_deopt; + Deoptimizer::BailoutType type = + is_lazy_deopt ? Deoptimizer::LAZY : Deoptimizer::EAGER; + int id = Deoptimizer::GetDeoptimizationId(isolate(), entry, type); + if (id == Deoptimizer::kNotDeoptimizationEntry) { + Comment(";;; jump table entry %d.", i); + } else { + Comment(";;; jump table entry %d: deoptimization bailout %d.", i, id); + } + __ li(t9, Operand(ExternalReference::ForDeoptEntry(entry))); + if (deopt_jump_table_[i].needs_frame) { + if (is_lazy_deopt) { + if (needs_frame_is_call.is_bound()) { + __ Branch(&needs_frame_is_call); + } else { + __ bind(&needs_frame_is_call); + __ MultiPush(cp.bit() | fp.bit() | ra.bit()); + // This variant of deopt can only be used with stubs. Since we don't + // have a function pointer to install in the stack frame that we're + // building, install a special marker there instead. + ASSERT(info()->IsStub()); + __ li(scratch0(), Operand(Smi::FromInt(StackFrame::STUB))); + __ push(scratch0()); + __ Addu(fp, sp, Operand(2 * kPointerSize)); + __ Call(t9); + } + } else { + if (needs_frame_not_call.is_bound()) { + __ Branch(&needs_frame_not_call); + } else { + __ bind(&needs_frame_not_call); + __ MultiPush(cp.bit() | fp.bit() | ra.bit()); + // This variant of deopt can only be used with stubs. Since we don't + // have a function pointer to install in the stack frame that we're + // building, install a special marker there instead. + ASSERT(info()->IsStub()); + __ li(scratch0(), Operand(Smi::FromInt(StackFrame::STUB))); + __ push(scratch0()); + __ Addu(fp, sp, Operand(2 * kPointerSize)); + __ Jump(t9); + } + } + } else { + if (is_lazy_deopt) { + __ Call(t9); + } else { + __ Jump(t9); + } + } + } + __ RecordComment("]"); + + // The deoptimization jump table is the last part of the instruction + // sequence. Mark the generated code as done unless we bailed out. + if (!is_aborted()) status_ = DONE; + return !is_aborted(); } @@ -362,8 +521,6 @@ bool LCodeGen::IsInteger32(LConstantOperand* op) const { int LCodeGen::ToInteger32(LConstantOperand* op) const { HConstant* constant = chunk_->LookupConstant(op); - ASSERT(chunk_->LookupLiteralRepresentation(op).IsInteger32()); - ASSERT(constant->HasInteger32Value()); return constant->Integer32Value(); } @@ -404,37 +561,20 @@ MemOperand LCodeGen::ToMemOperand(LOperand* op) const { ASSERT(!op->IsRegister()); ASSERT(!op->IsDoubleRegister()); ASSERT(op->IsStackSlot() || op->IsDoubleStackSlot()); - int index = op->index(); - if (index >= 0) { - // Local or spill slot. Skip the frame pointer, function, and - // context in the fixed part of the frame. - return MemOperand(fp, -(index + 3) * kPointerSize); - } else { - // Incoming parameter. Skip the return address. - return MemOperand(fp, -(index - 1) * kPointerSize); - } + return MemOperand(fp, StackSlotOffset(op->index())); } MemOperand LCodeGen::ToHighMemOperand(LOperand* op) const { ASSERT(op->IsDoubleStackSlot()); - int index = op->index(); - if (index >= 0) { - // Local or spill slot. Skip the frame pointer, function, context, - // and the first word of the double in the fixed part of the frame. - return MemOperand(fp, -(index + 3) * kPointerSize + kPointerSize); - } else { - // Incoming parameter. Skip the return address and the first word of - // the double. - return MemOperand(fp, -(index - 1) * kPointerSize + kPointerSize); - } + return MemOperand(fp, StackSlotOffset(op->index()) + kPointerSize); } void LCodeGen::WriteTranslation(LEnvironment* environment, Translation* translation, - int* arguments_index, - int* arguments_count) { + int* pushed_arguments_index, + int* pushed_arguments_count) { if (environment == NULL) return; // The translation includes one command per value in the environment. @@ -446,14 +586,16 @@ void LCodeGen::WriteTranslation(LEnvironment* environment, // arguments index points to the first element of a sequence of tagged // values on the stack that represent the arguments. This needs to be // kept in sync with the LArgumentsElements implementation. - *arguments_index = -environment->parameter_count(); - *arguments_count = environment->parameter_count(); + *pushed_arguments_index = -environment->parameter_count(); + *pushed_arguments_count = environment->parameter_count(); WriteTranslation(environment->outer(), translation, - arguments_index, - arguments_count); - int closure_id = *info()->closure() != *environment->closure() + pushed_arguments_index, + pushed_arguments_count); + bool has_closure_id = !info()->closure().is_null() && + *info()->closure() != *environment->closure(); + int closure_id = has_closure_id ? DefineDeoptimizationLiteral(environment->closure()) : Translation::kSelfLiteralId; @@ -474,19 +616,29 @@ void LCodeGen::WriteTranslation(LEnvironment* environment, ASSERT(height == 0); translation->BeginSetterStubFrame(closure_id); break; + case STUB: + translation->BeginCompiledStubFrame(); + break; case ARGUMENTS_ADAPTOR: translation->BeginArgumentsAdaptorFrame(closure_id, translation_size); break; } // Inlined frames which push their arguments cause the index to be - // bumped and a new stack area to be used for materialization. - if (environment->entry() != NULL && - environment->entry()->arguments_pushed()) { - *arguments_index = *arguments_index < 0 - ? GetStackSlotCount() - : *arguments_index + *arguments_count; - *arguments_count = environment->entry()->arguments_count() + 1; + // bumped and another stack area to be used for materialization, + // otherwise actual argument values are unknown for inlined frames. + bool arguments_known = true; + int arguments_index = *pushed_arguments_index; + int arguments_count = *pushed_arguments_count; + if (environment->entry() != NULL) { + arguments_known = environment->entry()->arguments_pushed(); + arguments_index = arguments_index < 0 + ? GetStackSlotCount() : arguments_index + arguments_count; + arguments_count = environment->entry()->arguments_count() + 1; + if (environment->entry()->arguments_pushed()) { + *pushed_arguments_index = arguments_index; + *pushed_arguments_count = arguments_count; + } } for (int i = 0; i < translation_size; ++i) { @@ -501,8 +653,9 @@ void LCodeGen::WriteTranslation(LEnvironment* environment, environment->spilled_registers()[value->index()], environment->HasTaggedValueAt(i), environment->HasUint32ValueAt(i), - *arguments_index, - *arguments_count); + arguments_known, + arguments_index, + arguments_count); } else if ( value->IsDoubleRegister() && environment->spilled_double_registers()[value->index()] != NULL) { @@ -512,8 +665,9 @@ void LCodeGen::WriteTranslation(LEnvironment* environment, environment->spilled_double_registers()[value->index()], false, false, - *arguments_index, - *arguments_count); + arguments_known, + arguments_index, + arguments_count); } } @@ -521,8 +675,9 @@ void LCodeGen::WriteTranslation(LEnvironment* environment, value, environment->HasTaggedValueAt(i), environment->HasUint32ValueAt(i), - *arguments_index, - *arguments_count); + arguments_known, + arguments_index, + arguments_count); } } @@ -531,13 +686,15 @@ void LCodeGen::AddToTranslation(Translation* translation, LOperand* op, bool is_tagged, bool is_uint32, + bool arguments_known, int arguments_index, int arguments_count) { if (op == NULL) { // TODO(twuerthinger): Introduce marker operands to indicate that this value // is not present and must be reconstructed from the deoptimizer. Currently // this is only used for the arguments object. - translation->StoreArgumentsObject(arguments_index, arguments_count); + translation->StoreArgumentsObject( + arguments_known, arguments_index, arguments_count); } else if (op->IsStackSlot()) { if (is_tagged) { translation->StoreStackSlot(op->index()); @@ -660,16 +817,19 @@ void LCodeGen::DeoptimizeIf(Condition cc, RegisterEnvironmentForDeoptimization(environment, Safepoint::kNoLazyDeopt); ASSERT(environment->HasBeenRegistered()); int id = environment->deoptimization_index(); - Address entry = Deoptimizer::GetDeoptimizationEntry(id, Deoptimizer::EAGER); + ASSERT(info()->IsOptimizing() || info()->IsStub()); + Deoptimizer::BailoutType bailout_type = info()->IsStub() + ? Deoptimizer::LAZY + : Deoptimizer::EAGER; + Address entry = + Deoptimizer::GetDeoptimizationEntry(isolate(), id, bailout_type); if (entry == NULL) { Abort("bailout was not prepared"); return; } ASSERT(FLAG_deopt_every_n_times < 2); // Other values not supported on MIPS. - - if (FLAG_deopt_every_n_times == 1 && - info_->shared_info()->opt_count() == id) { + if (FLAG_deopt_every_n_times == 1 && info_->opt_count() == id) { __ Jump(entry, RelocInfo::RUNTIME_ENTRY); return; } @@ -683,9 +843,51 @@ void LCodeGen::DeoptimizeIf(Condition cc, __ bind(&skip); } - // TODO(plind): The Arm port is a little different here, due to their - // DeOpt jump table, which is not used for Mips yet. - __ Jump(entry, RelocInfo::RUNTIME_ENTRY, cc, src1, src2); + ASSERT(info()->IsStub() || frame_is_built_); + bool needs_lazy_deopt = info()->IsStub(); + if (cc == al && frame_is_built_) { + if (needs_lazy_deopt) { + __ Call(entry, RelocInfo::RUNTIME_ENTRY, cc, src1, src2); + } else { + __ Jump(entry, RelocInfo::RUNTIME_ENTRY, cc, src1, src2); + } + } else { + // We often have several deopts to the same entry, reuse the last + // jump entry if this is the case. + if (deopt_jump_table_.is_empty() || + (deopt_jump_table_.last().address != entry) || + (deopt_jump_table_.last().is_lazy_deopt != needs_lazy_deopt) || + (deopt_jump_table_.last().needs_frame != !frame_is_built_)) { + JumpTableEntry table_entry(entry, !frame_is_built_, needs_lazy_deopt); + deopt_jump_table_.Add(table_entry, zone()); + } + __ Branch(&deopt_jump_table_.last().label, cc, src1, src2); + } +} + + +void LCodeGen::RegisterDependentCodeForEmbeddedMaps(Handle<Code> code) { + ZoneList<Handle<Map> > maps(1, zone()); + int mode_mask = RelocInfo::ModeMask(RelocInfo::EMBEDDED_OBJECT); + for (RelocIterator it(*code, mode_mask); !it.done(); it.next()) { + RelocInfo::Mode mode = it.rinfo()->rmode(); + if (mode == RelocInfo::EMBEDDED_OBJECT && + it.rinfo()->target_object()->IsMap()) { + Handle<Map> map(Map::cast(it.rinfo()->target_object())); + if (map->CanTransition()) { + maps.Add(map, zone()); + } + } + } +#ifdef VERIFY_HEAP + // This disables verification of weak embedded maps after full GC. + // AddDependentCode can cause a GC, which would observe the state where + // this code is not yet in the depended code lists of the embedded maps. + NoWeakEmbeddedMapsVerificationScope disable_verification_of_embedded_maps; +#endif + for (int i = 0; i < maps.length(); i++) { + maps.at(i)->AddDependentCode(DependentCode::kWeaklyEmbeddedGroup, code); + } } @@ -695,7 +897,8 @@ void LCodeGen::PopulateDeoptimizationData(Handle<Code> code) { Handle<DeoptimizationInputData> data = factory()->NewDeoptimizationInputData(length, TENURED); - Handle<ByteArray> translations = translations_.CreateByteArray(); + Handle<ByteArray> translations = + translations_.CreateByteArray(isolate()->factory()); data->SetTranslationByteArray(*translations); data->SetInlinedFunctionCount(Smi::FromInt(inlined_function_count_)); @@ -863,39 +1066,39 @@ void LCodeGen::DoCallStub(LCallStub* instr) { switch (instr->hydrogen()->major_key()) { case CodeStub::RegExpConstructResult: { RegExpConstructResultStub stub; - CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr); + CallCode(stub.GetCode(isolate()), RelocInfo::CODE_TARGET, instr); break; } case CodeStub::RegExpExec: { RegExpExecStub stub; - CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr); + CallCode(stub.GetCode(isolate()), RelocInfo::CODE_TARGET, instr); break; } case CodeStub::SubString: { SubStringStub stub; - CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr); + CallCode(stub.GetCode(isolate()), RelocInfo::CODE_TARGET, instr); break; } case CodeStub::NumberToString: { NumberToStringStub stub; - CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr); + CallCode(stub.GetCode(isolate()), RelocInfo::CODE_TARGET, instr); break; } case CodeStub::StringAdd: { StringAddStub stub(NO_STRING_ADD_FLAGS); - CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr); + CallCode(stub.GetCode(isolate()), RelocInfo::CODE_TARGET, instr); break; } case CodeStub::StringCompare: { StringCompareStub stub; - CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr); + CallCode(stub.GetCode(isolate()), RelocInfo::CODE_TARGET, instr); break; } case CodeStub::TranscendentalCache: { __ lw(a0, MemOperand(sp, 0)); TranscendentalCacheStub stub(instr->transcendental_type(), TranscendentalCacheStub::TAGGED); - CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr); + CallCode(stub.GetCode(isolate()), RelocInfo::CODE_TARGET, instr); break; } default: @@ -947,6 +1150,14 @@ void LCodeGen::DoModI(LModI* instr) { DeoptimizeIf(eq, instr->environment(), right, Operand(zero_reg)); } + // Check for (kMinInt % -1). + if (instr->hydrogen()->CheckFlag(HValue::kCanOverflow)) { + Label left_not_min_int; + __ Branch(&left_not_min_int, ne, left, Operand(kMinInt)); + DeoptimizeIf(eq, instr->environment(), right, Operand(-1)); + __ bind(&left_not_min_int); + } + __ Branch(USE_DELAY_SLOT, &done, ge, left, Operand(zero_reg)); __ mfhi(result); @@ -980,7 +1191,7 @@ void LCodeGen::DoDivI(LDivI* instr) { __ bind(&left_not_zero); } - // Check for (-kMinInt / -1). + // Check for (kMinInt / -1). if (instr->hydrogen()->CheckFlag(HValue::kCanOverflow)) { Label left_not_min_int; __ Branch(&left_not_min_int, ne, left, Operand(kMinInt)); @@ -994,6 +1205,18 @@ void LCodeGen::DoDivI(LDivI* instr) { } +void LCodeGen::DoMultiplyAddD(LMultiplyAddD* instr) { + DoubleRegister addend = ToDoubleRegister(instr->addend()); + DoubleRegister multiplier = ToDoubleRegister(instr->multiplier()); + DoubleRegister multiplicand = ToDoubleRegister(instr->multiplicand()); + + // This is computed in-place. + ASSERT(addend.is(ToDoubleRegister(instr->result()))); + + __ madd_d(addend, addend, multiplier, multiplicand); +} + + void LCodeGen::DoMulI(LMulI* instr) { Register scratch = scratch0(); Register result = ToRegister(instr->result()); @@ -1046,12 +1269,12 @@ void LCodeGen::DoMulI(LMulI* instr) { __ sll(result, left, shift); } else if (IsPowerOf2(constant_abs - 1)) { int32_t shift = WhichPowerOf2(constant_abs - 1); - __ sll(result, left, shift); - __ Addu(result, result, left); + __ sll(scratch, left, shift); + __ Addu(result, scratch, left); } else if (IsPowerOf2(constant_abs + 1)) { int32_t shift = WhichPowerOf2(constant_abs + 1); - __ sll(result, left, shift); - __ Subu(result, result, left); + __ sll(scratch, left, shift); + __ Subu(result, scratch, left); } // Correct the sign of the result is the constant is negative. @@ -1140,6 +1363,9 @@ void LCodeGen::DoShiftI(LShiftI* instr) { // No need to mask the right operand on MIPS, it is built into the variable // shift instructions. switch (instr->op()) { + case Token::ROR: + __ Ror(result, left, Operand(ToRegister(right_op))); + break; case Token::SAR: __ srav(result, left, ToRegister(right_op)); break; @@ -1161,6 +1387,13 @@ void LCodeGen::DoShiftI(LShiftI* instr) { int value = ToInteger32(LConstantOperand::cast(right_op)); uint8_t shift_count = static_cast<uint8_t>(value & 0x1F); switch (instr->op()) { + case Token::ROR: + if (shift_count != 0) { + __ Ror(result, left, Operand(shift_count)); + } else { + __ Move(result, left); + } + break; case Token::SAR: if (shift_count != 0) { __ sra(result, left, shift_count); @@ -1242,6 +1475,7 @@ void LCodeGen::DoConstantI(LConstantI* instr) { void LCodeGen::DoConstantD(LConstantD* instr) { ASSERT(instr->result()->IsDoubleRegister()); DoubleRegister result = ToDoubleRegister(instr->result()); + CpuFeatureScope scope(masm(), FPU); double v = instr->value(); __ Move(result, v); } @@ -1350,6 +1584,15 @@ void LCodeGen::DoDateField(LDateField* instr) { } +void LCodeGen::DoSeqStringSetChar(LSeqStringSetChar* instr) { + SeqStringSetCharGenerator::Generate(masm(), + instr->encoding(), + ToRegister(instr->string()), + ToRegister(instr->index()), + ToRegister(instr->value())); +} + + void LCodeGen::DoBitNotI(LBitNotI* instr) { Register input = ToRegister(instr->value()); Register result = ToRegister(instr->result()); @@ -1430,6 +1673,7 @@ void LCodeGen::DoMathMinMax(LMathMinMax* instr) { __ bind(&done); } else { ASSERT(instr->hydrogen()->representation().IsDouble()); + CpuFeatureScope scope(masm(), FPU); FPURegister left_reg = ToDoubleRegister(left); FPURegister right_reg = ToDoubleRegister(right); FPURegister result_reg = ToDoubleRegister(instr->result()); @@ -1470,6 +1714,7 @@ void LCodeGen::DoMathMinMax(LMathMinMax* instr) { void LCodeGen::DoArithmeticD(LArithmeticD* instr) { + CpuFeatureScope scope(masm(), FPU); DoubleRegister left = ToDoubleRegister(instr->left()); DoubleRegister right = ToDoubleRegister(instr->right()); DoubleRegister result = ToDoubleRegister(instr->result()); @@ -1516,7 +1761,7 @@ void LCodeGen::DoArithmeticT(LArithmeticT* instr) { ASSERT(ToRegister(instr->result()).is(v0)); BinaryOpStub stub(instr->op(), NO_OVERWRITE); - CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr); + CallCode(stub.GetCode(isolate()), RelocInfo::CODE_TARGET, instr); // Other arch use a nop here, to signal that there is no inlined // patchable code. Mips does not need the nop, since our marker // instruction (andi zero_reg) will never be used in normal code. @@ -1579,9 +1824,10 @@ void LCodeGen::DoBranch(LBranch* instr) { Register reg = ToRegister(instr->value()); EmitBranch(true_block, false_block, ne, reg, Operand(zero_reg)); } else if (r.IsDouble()) { + CpuFeatureScope scope(masm(), FPU); DoubleRegister reg = ToDoubleRegister(instr->value()); // Test the double value. Zero and NaN are false. - EmitBranchF(true_block, false_block, ne, reg, kDoubleRegZero); + EmitBranchF(true_block, false_block, nue, reg, kDoubleRegZero); } else { ASSERT(r.IsTagged()); Register reg = ToRegister(instr->value()); @@ -1656,6 +1902,7 @@ void LCodeGen::DoBranch(LBranch* instr) { } if (expected.Contains(ToBooleanStub::HEAP_NUMBER)) { + CpuFeatureScope scope(masm(), FPU); // heap number -> false iff +0, -0, or NaN. DoubleRegister dbl_scratch = double_scratch0(); Label not_heap_number; @@ -1735,6 +1982,7 @@ void LCodeGen::DoCmpIDAndBranch(LCmpIDAndBranch* instr) { EmitGoto(next_block); } else { if (instr->is_double()) { + CpuFeatureScope scope(masm(), FPU); // Compare left and right as doubles and load the // resulting flags into the normal status register. FPURegister left_reg = ToDoubleRegister(left); @@ -1951,7 +2199,7 @@ void LCodeGen::DoStringCompareAndBranch(LStringCompareAndBranch* instr) { int true_block = chunk_->LookupDestination(instr->true_block_id()); int false_block = chunk_->LookupDestination(instr->false_block_id()); - Handle<Code> ic = CompareIC::GetUninitialized(op); + Handle<Code> ic = CompareIC::GetUninitialized(isolate(), op); CallCode(ic, RelocInfo::CODE_TARGET, instr); Condition condition = ComputeCompareCondition(op); @@ -2040,7 +2288,7 @@ void LCodeGen::EmitClassOfTest(Label* is_true, __ JumpIfSmi(input, is_false); - if (class_name->IsEqualTo(CStrVector("Function"))) { + if (class_name->IsOneByteEqualTo(STATIC_ASCII_VECTOR("Function"))) { // Assuming the following assertions, we can use the same compares to test // for both being a function type and being in the object type range. STATIC_ASSERT(NUM_OF_CALLABLE_SPEC_OBJECT_TYPES == 2); @@ -2069,7 +2317,7 @@ void LCodeGen::EmitClassOfTest(Label* is_true, // Objects with a non-function constructor have class 'Object'. __ GetObjectType(temp, temp2, temp2); - if (class_name->IsEqualTo(CStrVector("Object"))) { + if (class_name->IsOneByteEqualTo(STATIC_ASCII_VECTOR("Object"))) { __ Branch(is_true, ne, temp2, Operand(JS_FUNCTION_TYPE)); } else { __ Branch(is_false, ne, temp2, Operand(JS_FUNCTION_TYPE)); @@ -2080,12 +2328,12 @@ void LCodeGen::EmitClassOfTest(Label* is_true, __ lw(temp, FieldMemOperand(temp, JSFunction::kSharedFunctionInfoOffset)); __ lw(temp, FieldMemOperand(temp, SharedFunctionInfo::kInstanceClassNameOffset)); - // The class name we are testing against is a symbol because it's a literal. - // The name in the constructor is a symbol because of the way the context is - // booted. This routine isn't expected to work for random API-created + // The class name we are testing against is internalized since it's a literal. + // The name in the constructor is internalized because of the way the context + // is booted. This routine isn't expected to work for random API-created // classes and it doesn't have to because you can't access it with natives - // syntax. Since both sides are symbols it is sufficient to use an identity - // comparison. + // syntax. Since both sides are internalized it is sufficient to use an + // identity comparison. // End with the address of this class_name instance in temp register. // On MIPS, the caller must do the comparison with Handle<String>class_name. @@ -2129,7 +2377,7 @@ void LCodeGen::DoInstanceOf(LInstanceOf* instr) { ASSERT(result.is(v0)); InstanceofStub stub(InstanceofStub::kArgsInRegisters); - CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr); + CallCode(stub.GetCode(isolate()), RelocInfo::CODE_TARGET, instr); __ Branch(&true_label, eq, result, Operand(zero_reg)); __ li(result, Operand(factory()->false_value())); @@ -2249,7 +2497,7 @@ void LCodeGen::DoDeferredInstanceOfKnownGlobal(LInstanceOfKnownGlobal* instr, __ li(temp, Operand(delta * kPointerSize), CONSTANT_SIZE); __ StoreToSafepointRegisterSlot(temp, temp); } - CallCodeGeneric(stub.GetCode(), + CallCodeGeneric(stub.GetCode(isolate()), RelocInfo::CODE_TARGET, instr, RECORD_SAFEPOINT_WITH_REGISTERS_AND_NO_ARGUMENTS); @@ -2261,10 +2509,18 @@ void LCodeGen::DoDeferredInstanceOfKnownGlobal(LInstanceOfKnownGlobal* instr, } +void LCodeGen::DoInstanceSize(LInstanceSize* instr) { + Register object = ToRegister(instr->object()); + Register result = ToRegister(instr->result()); + __ lw(result, FieldMemOperand(object, HeapObject::kMapOffset)); + __ lbu(result, FieldMemOperand(result, Map::kInstanceSizeOffset)); +} + + void LCodeGen::DoCmpT(LCmpT* instr) { Token::Value op = instr->op(); - Handle<Code> ic = CompareIC::GetUninitialized(op); + Handle<Code> ic = CompareIC::GetUninitialized(isolate(), op); CallCode(ic, RelocInfo::CODE_TARGET, instr); // On MIPS there is no need for a "no inlined smi code" marker (nop). @@ -2272,26 +2528,53 @@ void LCodeGen::DoCmpT(LCmpT* instr) { // A minor optimization that relies on LoadRoot always emitting one // instruction. Assembler::BlockTrampolinePoolScope block_trampoline_pool(masm()); - Label done; + Label done, check; __ Branch(USE_DELAY_SLOT, &done, condition, v0, Operand(zero_reg)); + __ bind(&check); __ LoadRoot(ToRegister(instr->result()), Heap::kTrueValueRootIndex); + ASSERT_EQ(1, masm()->InstructionsGeneratedSince(&check)); __ LoadRoot(ToRegister(instr->result()), Heap::kFalseValueRootIndex); - ASSERT_EQ(3, masm()->InstructionsGeneratedSince(&done)); __ bind(&done); } void LCodeGen::DoReturn(LReturn* instr) { - if (FLAG_trace) { + if (FLAG_trace && info()->IsOptimizing()) { // Push the return value on the stack as the parameter. // Runtime::TraceExit returns its parameter in v0. __ push(v0); __ CallRuntime(Runtime::kTraceExit, 1); } - int32_t sp_delta = (GetParameterCount() + 1) * kPointerSize; - __ mov(sp, fp); - __ Pop(ra, fp); - __ Addu(sp, sp, Operand(sp_delta)); + if (info()->saves_caller_doubles() && CpuFeatures::IsSupported(FPU)) { + CpuFeatureScope scope(masm(), FPU); + ASSERT(NeedsEagerFrame()); + BitVector* doubles = chunk()->allocated_double_registers(); + BitVector::Iterator save_iterator(doubles); + int count = 0; + while (!save_iterator.Done()) { + __ ldc1(DoubleRegister::FromAllocationIndex(save_iterator.Current()), + MemOperand(sp, count * kDoubleSize)); + save_iterator.Advance(); + count++; + } + } + if (NeedsEagerFrame()) { + __ mov(sp, fp); + __ Pop(ra, fp); + + if (instr->has_constant_parameter_count()) { + int parameter_count = ToInteger32(instr->constant_parameter_count()); + int32_t sp_delta = (parameter_count + 1) * kPointerSize; + if (sp_delta != 0) { + __ Addu(sp, sp, Operand(sp_delta)); + } + } else { + Register reg = ToRegister(instr->parameter_count()); + __ Addu(reg, reg, Operand(1)); + __ sll(at, reg, kPointerSizeLog2); + __ Addu(sp, sp, at); + } + } __ Jump(ra); } @@ -2622,7 +2905,183 @@ void LCodeGen::DoAccessArgumentsAt(LAccessArgumentsAt* instr) { } -void LCodeGen::DoLoadKeyedFastElement(LLoadKeyedFastElement* instr) { +void LCodeGen::DoLoadKeyedExternalArray(LLoadKeyed* instr) { + Register external_pointer = ToRegister(instr->elements()); + Register key = no_reg; + ElementsKind elements_kind = instr->elements_kind(); + bool key_is_constant = instr->key()->IsConstantOperand(); + int constant_key = 0; + if (key_is_constant) { + constant_key = ToInteger32(LConstantOperand::cast(instr->key())); + if (constant_key & 0xF0000000) { + Abort("array index constant value too big."); + } + } else { + key = ToRegister(instr->key()); + } + int element_size_shift = ElementsKindToShiftSize(elements_kind); + int shift_size = (instr->hydrogen()->key()->representation().IsTagged()) + ? (element_size_shift - kSmiTagSize) : element_size_shift; + int additional_offset = instr->additional_index() << element_size_shift; + + if (elements_kind == EXTERNAL_FLOAT_ELEMENTS || + elements_kind == EXTERNAL_DOUBLE_ELEMENTS) { + FPURegister result = ToDoubleRegister(instr->result()); + if (key_is_constant) { + __ Addu(scratch0(), external_pointer, constant_key << element_size_shift); + } else { + __ sll(scratch0(), key, shift_size); + __ Addu(scratch0(), scratch0(), external_pointer); + } + if (CpuFeatures::IsSupported(FPU)) { + CpuFeatureScope scope(masm(), FPU); + if (elements_kind == EXTERNAL_FLOAT_ELEMENTS) { + __ lwc1(result, MemOperand(scratch0(), additional_offset)); + __ cvt_d_s(result, result); + } else { // i.e. elements_kind == EXTERNAL_DOUBLE_ELEMENTS + __ ldc1(result, MemOperand(scratch0(), additional_offset)); + } + } else { + if (elements_kind == EXTERNAL_FLOAT_ELEMENTS) { + Register value = external_pointer; + __ lw(value, MemOperand(scratch0(), additional_offset)); + __ And(sfpd_lo, value, Operand(kBinary32MantissaMask)); + + __ srl(scratch0(), value, kBinary32MantissaBits); + __ And(scratch0(), scratch0(), + Operand(kBinary32ExponentMask >> kBinary32MantissaBits)); + + Label exponent_rebiased; + __ Xor(at, scratch0(), Operand(0x00)); + __ Branch(&exponent_rebiased, eq, at, Operand(zero_reg)); + + __ Xor(at, scratch0(), Operand(0xff)); + Label skip; + __ Branch(&skip, ne, at, Operand(zero_reg)); + __ li(scratch0(), Operand(0x7ff)); + __ bind(&skip); + __ Branch(&exponent_rebiased, eq, at, Operand(zero_reg)); + + // Rebias exponent. + __ Addu(scratch0(), + scratch0(), + Operand(-kBinary32ExponentBias + HeapNumber::kExponentBias)); + + __ bind(&exponent_rebiased); + __ And(sfpd_hi, value, Operand(kBinary32SignMask)); + __ sll(at, scratch0(), HeapNumber::kMantissaBitsInTopWord); + __ Or(sfpd_hi, sfpd_hi, at); + + // Shift mantissa. + static const int kMantissaShiftForHiWord = + kBinary32MantissaBits - HeapNumber::kMantissaBitsInTopWord; + + static const int kMantissaShiftForLoWord = + kBitsPerInt - kMantissaShiftForHiWord; + + __ srl(at, sfpd_lo, kMantissaShiftForHiWord); + __ Or(sfpd_hi, sfpd_hi, at); + __ sll(sfpd_lo, sfpd_lo, kMantissaShiftForLoWord); + + } else { + __ lw(sfpd_lo, MemOperand(scratch0(), additional_offset)); + __ lw(sfpd_hi, MemOperand(scratch0(), + additional_offset + kPointerSize)); + } + } + } else { + Register result = ToRegister(instr->result()); + MemOperand mem_operand = PrepareKeyedOperand( + key, external_pointer, key_is_constant, constant_key, + element_size_shift, shift_size, + instr->additional_index(), additional_offset); + switch (elements_kind) { + case EXTERNAL_BYTE_ELEMENTS: + __ lb(result, mem_operand); + break; + case EXTERNAL_PIXEL_ELEMENTS: + case EXTERNAL_UNSIGNED_BYTE_ELEMENTS: + __ lbu(result, mem_operand); + break; + case EXTERNAL_SHORT_ELEMENTS: + __ lh(result, mem_operand); + break; + case EXTERNAL_UNSIGNED_SHORT_ELEMENTS: + __ lhu(result, mem_operand); + break; + case EXTERNAL_INT_ELEMENTS: + __ lw(result, mem_operand); + break; + case EXTERNAL_UNSIGNED_INT_ELEMENTS: + __ lw(result, mem_operand); + if (!instr->hydrogen()->CheckFlag(HInstruction::kUint32)) { + DeoptimizeIf(Ugreater_equal, instr->environment(), + result, Operand(0x80000000)); + } + break; + case EXTERNAL_FLOAT_ELEMENTS: + case EXTERNAL_DOUBLE_ELEMENTS: + case FAST_DOUBLE_ELEMENTS: + case FAST_ELEMENTS: + case FAST_SMI_ELEMENTS: + case FAST_HOLEY_DOUBLE_ELEMENTS: + case FAST_HOLEY_ELEMENTS: + case FAST_HOLEY_SMI_ELEMENTS: + case DICTIONARY_ELEMENTS: + case NON_STRICT_ARGUMENTS_ELEMENTS: + UNREACHABLE(); + break; + } + } +} + + +void LCodeGen::DoLoadKeyedFixedDoubleArray(LLoadKeyed* instr) { + Register elements = ToRegister(instr->elements()); + bool key_is_constant = instr->key()->IsConstantOperand(); + Register key = no_reg; + DoubleRegister result = ToDoubleRegister(instr->result()); + Register scratch = scratch0(); + + int element_size_shift = ElementsKindToShiftSize(FAST_DOUBLE_ELEMENTS); + int shift_size = (instr->hydrogen()->key()->representation().IsTagged()) + ? (element_size_shift - kSmiTagSize) : element_size_shift; + int constant_key = 0; + if (key_is_constant) { + constant_key = ToInteger32(LConstantOperand::cast(instr->key())); + if (constant_key & 0xF0000000) { + Abort("array index constant value too big."); + } + } else { + key = ToRegister(instr->key()); + } + + int base_offset = (FixedDoubleArray::kHeaderSize - kHeapObjectTag) + + ((constant_key + instr->additional_index()) << element_size_shift); + if (!key_is_constant) { + __ sll(scratch, key, shift_size); + __ Addu(elements, elements, scratch); + } + if (CpuFeatures::IsSupported(FPU)) { + CpuFeatureScope scope(masm(), FPU); + __ Addu(elements, elements, Operand(base_offset)); + __ ldc1(result, MemOperand(elements)); + if (instr->hydrogen()->RequiresHoleCheck()) { + __ lw(scratch, MemOperand(elements, sizeof(kHoleNanLower32))); + DeoptimizeIf(eq, instr->environment(), scratch, Operand(kHoleNanUpper32)); + } + } else { + __ lw(sfpd_hi, MemOperand(elements, base_offset + kPointerSize)); + __ lw(sfpd_lo, MemOperand(elements, base_offset)); + if (instr->hydrogen()->RequiresHoleCheck()) { + ASSERT(kPointerSize == sizeof(kHoleNanLower32)); + DeoptimizeIf(eq, instr->environment(), sfpd_hi, Operand(kHoleNanUpper32)); + } + } +} + + +void LCodeGen::DoLoadKeyedFixedArray(LLoadKeyed* instr) { Register elements = ToRegister(instr->elements()); Register result = ToRegister(instr->result()); Register scratch = scratch0(); @@ -2635,8 +3094,8 @@ void LCodeGen::DoLoadKeyedFastElement(LLoadKeyedFastElement* instr) { instr->additional_index()); store_base = elements; } else { - Register key = EmitLoadRegister(instr->key(), scratch); - // Even though the HLoadKeyedFastElement instruction forces the input + Register key = EmitLoadRegister(instr->key(), scratch0()); + // Even though the HLoadKeyed instruction forces the input // representation for the key to be an integer, the input gets replaced // during bound check elimination with the index argument to the bounds // check, which can be tagged, so that case must be handled here, too. @@ -2664,46 +3123,14 @@ void LCodeGen::DoLoadKeyedFastElement(LLoadKeyedFastElement* instr) { } -void LCodeGen::DoLoadKeyedFastDoubleElement( - LLoadKeyedFastDoubleElement* instr) { - Register elements = ToRegister(instr->elements()); - bool key_is_constant = instr->key()->IsConstantOperand(); - Register key = no_reg; - DoubleRegister result = ToDoubleRegister(instr->result()); - Register scratch = scratch0(); - - int element_size_shift = ElementsKindToShiftSize(FAST_DOUBLE_ELEMENTS); - int shift_size = (instr->hydrogen()->key()->representation().IsTagged()) - ? (element_size_shift - kSmiTagSize) : element_size_shift; - int constant_key = 0; - if (key_is_constant) { - constant_key = ToInteger32(LConstantOperand::cast(instr->key())); - if (constant_key & 0xF0000000) { - Abort("array index constant value too big."); - } - } else { - key = ToRegister(instr->key()); - } - - if (key_is_constant) { - __ Addu(elements, elements, - Operand(((constant_key + instr->additional_index()) << - element_size_shift) + - FixedDoubleArray::kHeaderSize - kHeapObjectTag)); +void LCodeGen::DoLoadKeyed(LLoadKeyed* instr) { + if (instr->is_external()) { + DoLoadKeyedExternalArray(instr); + } else if (instr->hydrogen()->representation().IsDouble()) { + DoLoadKeyedFixedDoubleArray(instr); } else { - __ sll(scratch, key, shift_size); - __ Addu(elements, elements, Operand(scratch)); - __ Addu(elements, elements, - Operand((FixedDoubleArray::kHeaderSize - kHeapObjectTag) + - (instr->additional_index() << element_size_shift))); - } - - if (instr->hydrogen()->RequiresHoleCheck()) { - __ lw(scratch, MemOperand(elements, sizeof(kHoleNanLower32))); - DeoptimizeIf(eq, instr->environment(), scratch, Operand(kHoleNanUpper32)); + DoLoadKeyedFixedArray(instr); } - - __ ldc1(result, MemOperand(elements)); } @@ -2751,89 +3178,6 @@ MemOperand LCodeGen::PrepareKeyedOperand(Register key, } -void LCodeGen::DoLoadKeyedSpecializedArrayElement( - LLoadKeyedSpecializedArrayElement* instr) { - Register external_pointer = ToRegister(instr->external_pointer()); - Register key = no_reg; - ElementsKind elements_kind = instr->elements_kind(); - bool key_is_constant = instr->key()->IsConstantOperand(); - int constant_key = 0; - if (key_is_constant) { - constant_key = ToInteger32(LConstantOperand::cast(instr->key())); - if (constant_key & 0xF0000000) { - Abort("array index constant value too big."); - } - } else { - key = ToRegister(instr->key()); - } - int element_size_shift = ElementsKindToShiftSize(elements_kind); - int shift_size = (instr->hydrogen()->key()->representation().IsTagged()) - ? (element_size_shift - kSmiTagSize) : element_size_shift; - int additional_offset = instr->additional_index() << element_size_shift; - - if (elements_kind == EXTERNAL_FLOAT_ELEMENTS || - elements_kind == EXTERNAL_DOUBLE_ELEMENTS) { - FPURegister result = ToDoubleRegister(instr->result()); - if (key_is_constant) { - __ Addu(scratch0(), external_pointer, constant_key << element_size_shift); - } else { - __ sll(scratch0(), key, shift_size); - __ Addu(scratch0(), scratch0(), external_pointer); - } - - if (elements_kind == EXTERNAL_FLOAT_ELEMENTS) { - __ lwc1(result, MemOperand(scratch0(), additional_offset)); - __ cvt_d_s(result, result); - } else { // i.e. elements_kind == EXTERNAL_DOUBLE_ELEMENTS - __ ldc1(result, MemOperand(scratch0(), additional_offset)); - } - } else { - Register result = ToRegister(instr->result()); - MemOperand mem_operand = PrepareKeyedOperand( - key, external_pointer, key_is_constant, constant_key, - element_size_shift, shift_size, - instr->additional_index(), additional_offset); - switch (elements_kind) { - case EXTERNAL_BYTE_ELEMENTS: - __ lb(result, mem_operand); - break; - case EXTERNAL_PIXEL_ELEMENTS: - case EXTERNAL_UNSIGNED_BYTE_ELEMENTS: - __ lbu(result, mem_operand); - break; - case EXTERNAL_SHORT_ELEMENTS: - __ lh(result, mem_operand); - break; - case EXTERNAL_UNSIGNED_SHORT_ELEMENTS: - __ lhu(result, mem_operand); - break; - case EXTERNAL_INT_ELEMENTS: - __ lw(result, mem_operand); - break; - case EXTERNAL_UNSIGNED_INT_ELEMENTS: - __ lw(result, mem_operand); - if (!instr->hydrogen()->CheckFlag(HInstruction::kUint32)) { - DeoptimizeIf(Ugreater_equal, instr->environment(), - result, Operand(0x80000000)); - } - break; - case EXTERNAL_FLOAT_ELEMENTS: - case EXTERNAL_DOUBLE_ELEMENTS: - case FAST_DOUBLE_ELEMENTS: - case FAST_ELEMENTS: - case FAST_SMI_ELEMENTS: - case FAST_HOLEY_DOUBLE_ELEMENTS: - case FAST_HOLEY_ELEMENTS: - case FAST_HOLEY_SMI_ELEMENTS: - case DICTIONARY_ELEMENTS: - case NON_STRICT_ARGUMENTS_ELEMENTS: - UNREACHABLE(); - break; - } - } -} - - void LCodeGen::DoLoadKeyedGeneric(LLoadKeyedGeneric* instr) { ASSERT(ToRegister(instr->object()).is(a1)); ASSERT(ToRegister(instr->key()).is(a0)); @@ -3006,8 +3350,14 @@ void LCodeGen::DoThisFunction(LThisFunction* instr) { void LCodeGen::DoContext(LContext* instr) { + // If there is a non-return use, the context must be moved to a register. Register result = ToRegister(instr->result()); - __ mov(result, cp); + for (HUseIterator it(instr->hydrogen()->uses()); !it.Done(); it.Advance()) { + if (!it.value()->IsReturn()) { + __ mov(result, cp); + return; + } + } } @@ -3167,8 +3517,6 @@ void LCodeGen::EmitIntegerMathAbs(LUnaryMathOperation* instr) { Label done; __ Branch(USE_DELAY_SLOT, &done, ge, input, Operand(zero_reg)); __ mov(result, input); - ASSERT_EQ(2, masm()->InstructionsGeneratedSince(&done)); - __ subu(result, zero_reg, input); // Overflow if result is still negative, i.e. 0x80000000. DeoptimizeIf(lt, instr->environment(), result, Operand(zero_reg)); __ bind(&done); @@ -3176,6 +3524,7 @@ void LCodeGen::EmitIntegerMathAbs(LUnaryMathOperation* instr) { void LCodeGen::DoMathAbs(LUnaryMathOperation* instr) { + CpuFeatureScope scope(masm(), FPU); // Class for deferred case. class DeferredMathAbsTaggedHeapNumber: public LDeferredCode { public: @@ -3212,24 +3561,22 @@ void LCodeGen::DoMathAbs(LUnaryMathOperation* instr) { void LCodeGen::DoMathFloor(LUnaryMathOperation* instr) { + CpuFeatureScope scope(masm(), FPU); DoubleRegister input = ToDoubleRegister(instr->value()); Register result = ToRegister(instr->result()); - FPURegister single_scratch = double_scratch0().low(); Register scratch1 = scratch0(); Register except_flag = ToRegister(instr->temp()); __ EmitFPUTruncate(kRoundToMinusInf, - single_scratch, + result, input, scratch1, + double_scratch0(), except_flag); // Deopt if the operation did not succeed. DeoptimizeIf(ne, instr->environment(), except_flag, Operand(zero_reg)); - // Load the result. - __ mfc1(result, single_scratch); - if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) { // Test for -0. Label done; @@ -3243,8 +3590,10 @@ void LCodeGen::DoMathFloor(LUnaryMathOperation* instr) { void LCodeGen::DoMathRound(LUnaryMathOperation* instr) { + CpuFeatureScope scope(masm(), FPU); DoubleRegister input = ToDoubleRegister(instr->value()); Register result = ToRegister(instr->result()); + DoubleRegister double_scratch1 = ToDoubleRegister(instr->temp()); Register scratch = scratch0(); Label done, check_sign_on_zero; @@ -3296,17 +3645,15 @@ void LCodeGen::DoMathRound(LUnaryMathOperation* instr) { } Register except_flag = scratch; - __ EmitFPUTruncate(kRoundToMinusInf, - double_scratch0().low(), - double_scratch0(), result, + double_scratch0(), + at, + double_scratch1, except_flag); DeoptimizeIf(ne, instr->environment(), except_flag, Operand(zero_reg)); - __ mfc1(result, double_scratch0().low()); - if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) { // Test for -0. __ Branch(&done, ne, result, Operand(zero_reg)); @@ -3320,6 +3667,7 @@ void LCodeGen::DoMathRound(LUnaryMathOperation* instr) { void LCodeGen::DoMathSqrt(LUnaryMathOperation* instr) { + CpuFeatureScope scope(masm(), FPU); DoubleRegister input = ToDoubleRegister(instr->value()); DoubleRegister result = ToDoubleRegister(instr->result()); __ sqrt_d(result, input); @@ -3327,6 +3675,7 @@ void LCodeGen::DoMathSqrt(LUnaryMathOperation* instr) { void LCodeGen::DoMathPowHalf(LUnaryMathOperation* instr) { + CpuFeatureScope scope(masm(), FPU); DoubleRegister input = ToDoubleRegister(instr->value()); DoubleRegister result = ToDoubleRegister(instr->result()); DoubleRegister temp = ToDoubleRegister(instr->temp()); @@ -3351,6 +3700,7 @@ void LCodeGen::DoMathPowHalf(LUnaryMathOperation* instr) { void LCodeGen::DoPower(LPower* instr) { + CpuFeatureScope scope(masm(), FPU); Representation exponent_type = instr->hydrogen()->right()->representation(); // Having marked this as a call, we can use any registers. // Just make sure that the input/output registers are the expected ones. @@ -3381,6 +3731,7 @@ void LCodeGen::DoPower(LPower* instr) { void LCodeGen::DoRandom(LRandom* instr) { + CpuFeatureScope scope(masm(), FPU); class DeferredDoRandom: public LDeferredCode { public: DeferredDoRandom(LCodeGen* codegen, LRandom* instr) @@ -3456,11 +3807,26 @@ void LCodeGen::DoDeferredRandom(LRandom* instr) { } +void LCodeGen::DoMathExp(LMathExp* instr) { + CpuFeatureScope scope(masm(), FPU); + DoubleRegister input = ToDoubleRegister(instr->value()); + DoubleRegister result = ToDoubleRegister(instr->result()); + DoubleRegister double_scratch1 = ToDoubleRegister(instr->double_temp()); + DoubleRegister double_scratch2 = double_scratch0(); + Register temp1 = ToRegister(instr->temp1()); + Register temp2 = ToRegister(instr->temp2()); + + MathExpGenerator::EmitMathExp( + masm(), input, result, double_scratch1, double_scratch2, + temp1, temp2, scratch0()); +} + + void LCodeGen::DoMathLog(LUnaryMathOperation* instr) { ASSERT(ToDoubleRegister(instr->result()).is(f4)); TranscendentalCacheStub stub(TranscendentalCache::LOG, TranscendentalCacheStub::UNTAGGED); - CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr); + CallCode(stub.GetCode(isolate()), RelocInfo::CODE_TARGET, instr); } @@ -3468,7 +3834,7 @@ void LCodeGen::DoMathTan(LUnaryMathOperation* instr) { ASSERT(ToDoubleRegister(instr->result()).is(f4)); TranscendentalCacheStub stub(TranscendentalCache::TAN, TranscendentalCacheStub::UNTAGGED); - CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr); + CallCode(stub.GetCode(isolate()), RelocInfo::CODE_TARGET, instr); } @@ -3476,7 +3842,7 @@ void LCodeGen::DoMathCos(LUnaryMathOperation* instr) { ASSERT(ToDoubleRegister(instr->result()).is(f4)); TranscendentalCacheStub stub(TranscendentalCache::COS, TranscendentalCacheStub::UNTAGGED); - CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr); + CallCode(stub.GetCode(isolate()), RelocInfo::CODE_TARGET, instr); } @@ -3484,7 +3850,7 @@ void LCodeGen::DoMathSin(LUnaryMathOperation* instr) { ASSERT(ToDoubleRegister(instr->result()).is(f4)); TranscendentalCacheStub stub(TranscendentalCache::SIN, TranscendentalCacheStub::UNTAGGED); - CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr); + CallCode(stub.GetCode(isolate()), RelocInfo::CODE_TARGET, instr); } @@ -3576,7 +3942,7 @@ void LCodeGen::DoCallFunction(LCallFunction* instr) { int arity = instr->arity(); CallFunctionStub stub(arity, NO_CALL_FUNCTION_FLAGS); - CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr); + CallCode(stub.GetCode(isolate()), RelocInfo::CODE_TARGET, instr); __ lw(cp, MemOperand(fp, StandardFrameConstants::kContextOffset)); } @@ -3608,9 +3974,29 @@ void LCodeGen::DoCallNew(LCallNew* instr) { ASSERT(ToRegister(instr->constructor()).is(a1)); ASSERT(ToRegister(instr->result()).is(v0)); + __ li(a0, Operand(instr->arity())); + if (FLAG_optimize_constructed_arrays) { + // No cell in a2 for construct type feedback in optimized code + Handle<Object> undefined_value(isolate()->heap()->undefined_value(), + isolate()); + __ li(a2, Operand(undefined_value)); + } CallConstructStub stub(NO_CALL_FUNCTION_FLAGS); + CallCode(stub.GetCode(isolate()), RelocInfo::CONSTRUCT_CALL, instr); +} + + +void LCodeGen::DoCallNewArray(LCallNewArray* instr) { + ASSERT(ToRegister(instr->constructor()).is(a1)); + ASSERT(ToRegister(instr->result()).is(v0)); + ASSERT(FLAG_optimize_constructed_arrays); + __ li(a0, Operand(instr->arity())); - CallCode(stub.GetCode(), RelocInfo::CONSTRUCT_CALL, instr); + __ li(a2, Operand(instr->hydrogen()->property_cell())); + Handle<Code> array_construct_code = + isolate()->builtins()->ArrayConstructCode(); + + CallCode(array_construct_code, RelocInfo::CONSTRUCT_CALL, instr); } @@ -3619,6 +4005,13 @@ void LCodeGen::DoCallRuntime(LCallRuntime* instr) { } +void LCodeGen::DoInnerAllocatedObject(LInnerAllocatedObject* instr) { + Register result = ToRegister(instr->result()); + Register base = ToRegister(instr->base_object()); + __ Addu(result, base, Operand(instr->offset())); +} + + void LCodeGen::DoStoreNamedField(LStoreNamedField* instr) { Register object = ToRegister(instr->object()); Register value = ToRegister(instr->value()); @@ -3693,29 +4086,9 @@ void LCodeGen::DoStoreNamedGeneric(LStoreNamedGeneric* instr) { } -void LCodeGen::DeoptIfTaggedButNotSmi(LEnvironment* environment, - HValue* value, - LOperand* operand) { - if (value->representation().IsTagged() && !value->type().IsSmi()) { - if (operand->IsRegister()) { - __ And(at, ToRegister(operand), Operand(kSmiTagMask)); - DeoptimizeIf(ne, environment, at, Operand(zero_reg)); - } else { - __ li(at, ToOperand(operand)); - __ And(at, at, Operand(kSmiTagMask)); - DeoptimizeIf(ne, environment, at, Operand(zero_reg)); - } - } -} - - void LCodeGen::DoBoundsCheck(LBoundsCheck* instr) { - DeoptIfTaggedButNotSmi(instr->environment(), - instr->hydrogen()->length(), - instr->length()); - DeoptIfTaggedButNotSmi(instr->environment(), - instr->hydrogen()->index(), - instr->index()); + if (instr->hydrogen()->skip_check()) return; + if (instr->index()->IsConstantOperand()) { int constant_index = ToInteger32(LConstantOperand::cast(instr->index())); @@ -3737,108 +4110,9 @@ void LCodeGen::DoBoundsCheck(LBoundsCheck* instr) { } -void LCodeGen::DoStoreKeyedFastElement(LStoreKeyedFastElement* instr) { - Register value = ToRegister(instr->value()); - Register elements = ToRegister(instr->object()); - Register key = instr->key()->IsRegister() ? ToRegister(instr->key()) : no_reg; - Register scratch = scratch0(); - Register store_base = scratch; - int offset = 0; - - // Do the store. - if (instr->key()->IsConstantOperand()) { - ASSERT(!instr->hydrogen()->NeedsWriteBarrier()); - LConstantOperand* const_operand = LConstantOperand::cast(instr->key()); - offset = FixedArray::OffsetOfElementAt(ToInteger32(const_operand) + - instr->additional_index()); - store_base = elements; - } else { - // Even though the HLoadKeyedFastElement instruction forces the input - // representation for the key to be an integer, the input gets replaced - // during bound check elimination with the index argument to the bounds - // check, which can be tagged, so that case must be handled here, too. - if (instr->hydrogen()->key()->representation().IsTagged()) { - __ sll(scratch, key, kPointerSizeLog2 - kSmiTagSize); - __ addu(scratch, elements, scratch); - } else { - __ sll(scratch, key, kPointerSizeLog2); - __ addu(scratch, elements, scratch); - } - offset = FixedArray::OffsetOfElementAt(instr->additional_index()); - } - __ sw(value, FieldMemOperand(store_base, offset)); - - if (instr->hydrogen()->NeedsWriteBarrier()) { - HType type = instr->hydrogen()->value()->type(); - SmiCheck check_needed = - type.IsHeapObject() ? OMIT_SMI_CHECK : INLINE_SMI_CHECK; - // Compute address of modified element and store it into key register. - __ Addu(key, store_base, Operand(offset - kHeapObjectTag)); - __ RecordWrite(elements, - key, - value, - kRAHasBeenSaved, - kSaveFPRegs, - EMIT_REMEMBERED_SET, - check_needed); - } -} - - -void LCodeGen::DoStoreKeyedFastDoubleElement( - LStoreKeyedFastDoubleElement* instr) { - DoubleRegister value = ToDoubleRegister(instr->value()); - Register elements = ToRegister(instr->elements()); - Register key = no_reg; - Register scratch = scratch0(); - bool key_is_constant = instr->key()->IsConstantOperand(); - int constant_key = 0; - Label not_nan; - - // Calculate the effective address of the slot in the array to store the - // double value. - if (key_is_constant) { - constant_key = ToInteger32(LConstantOperand::cast(instr->key())); - if (constant_key & 0xF0000000) { - Abort("array index constant value too big."); - } - } else { - key = ToRegister(instr->key()); - } - int element_size_shift = ElementsKindToShiftSize(FAST_DOUBLE_ELEMENTS); - int shift_size = (instr->hydrogen()->key()->representation().IsTagged()) - ? (element_size_shift - kSmiTagSize) : element_size_shift; - if (key_is_constant) { - __ Addu(scratch, elements, Operand((constant_key << element_size_shift) + - FixedDoubleArray::kHeaderSize - kHeapObjectTag)); - } else { - __ sll(scratch, key, shift_size); - __ Addu(scratch, elements, Operand(scratch)); - __ Addu(scratch, scratch, - Operand(FixedDoubleArray::kHeaderSize - kHeapObjectTag)); - } - - if (instr->NeedsCanonicalization()) { - Label is_nan; - // Check for NaN. All NaNs must be canonicalized. - __ BranchF(NULL, &is_nan, eq, value, value); - __ Branch(¬_nan); - - // Only load canonical NaN if the comparison above set the overflow. - __ bind(&is_nan); - __ Move(value, FixedDoubleArray::canonical_not_the_hole_nan_as_double()); - } - - __ bind(¬_nan); - __ sdc1(value, MemOperand(scratch, instr->additional_index() << - element_size_shift)); -} - - -void LCodeGen::DoStoreKeyedSpecializedArrayElement( - LStoreKeyedSpecializedArrayElement* instr) { - - Register external_pointer = ToRegister(instr->external_pointer()); +void LCodeGen::DoStoreKeyedExternalArray(LStoreKeyed* instr) { + CpuFeatureScope scope(masm(), FPU); + Register external_pointer = ToRegister(instr->elements()); Register key = no_reg; ElementsKind elements_kind = instr->elements_kind(); bool key_is_constant = instr->key()->IsConstantOperand(); @@ -3909,6 +4183,118 @@ void LCodeGen::DoStoreKeyedSpecializedArrayElement( } } + +void LCodeGen::DoStoreKeyedFixedDoubleArray(LStoreKeyed* instr) { + CpuFeatureScope scope(masm(), FPU); + DoubleRegister value = ToDoubleRegister(instr->value()); + Register elements = ToRegister(instr->elements()); + Register key = no_reg; + Register scratch = scratch0(); + bool key_is_constant = instr->key()->IsConstantOperand(); + int constant_key = 0; + Label not_nan; + + // Calculate the effective address of the slot in the array to store the + // double value. + if (key_is_constant) { + constant_key = ToInteger32(LConstantOperand::cast(instr->key())); + if (constant_key & 0xF0000000) { + Abort("array index constant value too big."); + } + } else { + key = ToRegister(instr->key()); + } + int element_size_shift = ElementsKindToShiftSize(FAST_DOUBLE_ELEMENTS); + int shift_size = (instr->hydrogen()->key()->representation().IsTagged()) + ? (element_size_shift - kSmiTagSize) : element_size_shift; + if (key_is_constant) { + __ Addu(scratch, elements, Operand((constant_key << element_size_shift) + + FixedDoubleArray::kHeaderSize - kHeapObjectTag)); + } else { + __ sll(scratch, key, shift_size); + __ Addu(scratch, elements, Operand(scratch)); + __ Addu(scratch, scratch, + Operand(FixedDoubleArray::kHeaderSize - kHeapObjectTag)); + } + + if (instr->NeedsCanonicalization()) { + Label is_nan; + // Check for NaN. All NaNs must be canonicalized. + __ BranchF(NULL, &is_nan, eq, value, value); + __ Branch(¬_nan); + + // Only load canonical NaN if the comparison above set the overflow. + __ bind(&is_nan); + __ Move(value, FixedDoubleArray::canonical_not_the_hole_nan_as_double()); + } + + __ bind(¬_nan); + __ sdc1(value, MemOperand(scratch, instr->additional_index() << + element_size_shift)); +} + + +void LCodeGen::DoStoreKeyedFixedArray(LStoreKeyed* instr) { + Register value = ToRegister(instr->value()); + Register elements = ToRegister(instr->elements()); + Register key = instr->key()->IsRegister() ? ToRegister(instr->key()) + : no_reg; + Register scratch = scratch0(); + Register store_base = scratch; + int offset = 0; + + // Do the store. + if (instr->key()->IsConstantOperand()) { + ASSERT(!instr->hydrogen()->NeedsWriteBarrier()); + LConstantOperand* const_operand = LConstantOperand::cast(instr->key()); + offset = FixedArray::OffsetOfElementAt(ToInteger32(const_operand) + + instr->additional_index()); + store_base = elements; + } else { + // Even though the HLoadKeyed instruction forces the input + // representation for the key to be an integer, the input gets replaced + // during bound check elimination with the index argument to the bounds + // check, which can be tagged, so that case must be handled here, too. + if (instr->hydrogen()->key()->representation().IsTagged()) { + __ sll(scratch, key, kPointerSizeLog2 - kSmiTagSize); + __ addu(scratch, elements, scratch); + } else { + __ sll(scratch, key, kPointerSizeLog2); + __ addu(scratch, elements, scratch); + } + offset = FixedArray::OffsetOfElementAt(instr->additional_index()); + } + __ sw(value, FieldMemOperand(store_base, offset)); + + if (instr->hydrogen()->NeedsWriteBarrier()) { + HType type = instr->hydrogen()->value()->type(); + SmiCheck check_needed = + type.IsHeapObject() ? OMIT_SMI_CHECK : INLINE_SMI_CHECK; + // Compute address of modified element and store it into key register. + __ Addu(key, store_base, Operand(offset - kHeapObjectTag)); + __ RecordWrite(elements, + key, + value, + kRAHasBeenSaved, + kSaveFPRegs, + EMIT_REMEMBERED_SET, + check_needed); + } +} + + +void LCodeGen::DoStoreKeyed(LStoreKeyed* instr) { + // By cases: external, fast double + if (instr->is_external()) { + DoStoreKeyedExternalArray(instr); + } else if (instr->hydrogen()->value()->representation().IsDouble()) { + DoStoreKeyedFixedDoubleArray(instr); + } else { + DoStoreKeyedFixedArray(instr); + } +} + + void LCodeGen::DoStoreKeyedGeneric(LStoreKeyedGeneric* instr) { ASSERT(ToRegister(instr->object()).is(a2)); ASSERT(ToRegister(instr->key()).is(a1)); @@ -3923,31 +4309,39 @@ void LCodeGen::DoStoreKeyedGeneric(LStoreKeyedGeneric* instr) { void LCodeGen::DoTransitionElementsKind(LTransitionElementsKind* instr) { Register object_reg = ToRegister(instr->object()); - Register new_map_reg = ToRegister(instr->new_map_temp()); Register scratch = scratch0(); Handle<Map> from_map = instr->original_map(); Handle<Map> to_map = instr->transitioned_map(); - ElementsKind from_kind = from_map->elements_kind(); - ElementsKind to_kind = to_map->elements_kind(); - - __ mov(ToRegister(instr->result()), object_reg); + ElementsKind from_kind = instr->from_kind(); + ElementsKind to_kind = instr->to_kind(); Label not_applicable; __ lw(scratch, FieldMemOperand(object_reg, HeapObject::kMapOffset)); __ Branch(¬_applicable, ne, scratch, Operand(from_map)); - __ li(new_map_reg, Operand(to_map)); if (IsSimpleMapChangeTransition(from_kind, to_kind)) { + Register new_map_reg = ToRegister(instr->new_map_temp()); + __ li(new_map_reg, Operand(to_map)); __ sw(new_map_reg, FieldMemOperand(object_reg, HeapObject::kMapOffset)); // Write barrier. __ RecordWriteField(object_reg, HeapObject::kMapOffset, new_map_reg, scratch, kRAHasBeenSaved, kDontSaveFPRegs); + } else if (FLAG_compiled_transitions) { + PushSafepointRegistersScope scope(this, Safepoint::kWithRegisters); + __ mov(a0, object_reg); + __ li(a1, Operand(to_map)); + TransitionElementsKindStub stub(from_kind, to_kind); + __ CallStub(&stub); + RecordSafepointWithRegisters( + instr->pointer_map(), 0, Safepoint::kNoLazyDeopt); } else if (IsFastSmiElementsKind(from_kind) && IsFastDoubleElementsKind(to_kind)) { Register fixed_object_reg = ToRegister(instr->temp()); ASSERT(fixed_object_reg.is(a2)); + Register new_map_reg = ToRegister(instr->new_map_temp()); ASSERT(new_map_reg.is(a3)); + __ li(new_map_reg, Operand(to_map)); __ mov(fixed_object_reg, object_reg); CallCode(isolate()->builtins()->TransitionElementsSmiToDouble(), RelocInfo::CODE_TARGET, instr); @@ -3955,7 +4349,9 @@ void LCodeGen::DoTransitionElementsKind(LTransitionElementsKind* instr) { IsFastObjectElementsKind(to_kind)) { Register fixed_object_reg = ToRegister(instr->temp()); ASSERT(fixed_object_reg.is(a2)); + Register new_map_reg = ToRegister(instr->new_map_temp()); ASSERT(new_map_reg.is(a3)); + __ li(new_map_reg, Operand(to_map)); __ mov(fixed_object_reg, object_reg); CallCode(isolate()->builtins()->TransitionElementsDoubleToObject(), RelocInfo::CODE_TARGET, instr); @@ -3966,11 +4362,21 @@ void LCodeGen::DoTransitionElementsKind(LTransitionElementsKind* instr) { } +void LCodeGen::DoTrapAllocationMemento(LTrapAllocationMemento* instr) { + Register object = ToRegister(instr->object()); + Register temp = ToRegister(instr->temp()); + Label fail; + __ TestJSArrayForAllocationSiteInfo(object, temp, ne, &fail); + DeoptimizeIf(al, instr->environment()); + __ bind(&fail); +} + + void LCodeGen::DoStringAdd(LStringAdd* instr) { __ push(ToRegister(instr->left())); __ push(ToRegister(instr->right())); StringAddStub stub(NO_STRING_CHECK_IN_STUB); - CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr); + CallCode(stub.GetCode(isolate()), RelocInfo::CODE_TARGET, instr); } @@ -4047,7 +4453,7 @@ void LCodeGen::DoStringCharFromCode(LStringCharFromCode* instr) { ASSERT(!char_code.is(result)); __ Branch(deferred->entry(), hi, - char_code, Operand(String::kMaxAsciiCharCode)); + char_code, Operand(String::kMaxOneByteCharCode)); __ LoadRoot(result, Heap::kSingleCharacterStringCacheRootIndex); __ sll(scratch, char_code, kPointerSizeLog2); __ Addu(result, result, scratch); @@ -4083,6 +4489,7 @@ void LCodeGen::DoStringLength(LStringLength* instr) { void LCodeGen::DoInteger32ToDouble(LInteger32ToDouble* instr) { + CpuFeatureScope scope(masm(), FPU); LOperand* input = instr->value(); ASSERT(input->IsRegister() || input->IsStackSlot()); LOperand* output = instr->result(); @@ -4100,6 +4507,7 @@ void LCodeGen::DoInteger32ToDouble(LInteger32ToDouble* instr) { void LCodeGen::DoUint32ToDouble(LUint32ToDouble* instr) { + CpuFeatureScope scope(masm(), FPU); LOperand* input = instr->value(); LOperand* output = instr->result(); @@ -4161,13 +4569,51 @@ void LCodeGen::DoNumberTagU(LNumberTagU* instr) { } +// Convert unsigned integer with specified number of leading zeroes in binary +// representation to IEEE 754 double. +// Integer to convert is passed in register hiword. +// Resulting double is returned in registers hiword:loword. +// This functions does not work correctly for 0. +static void GenerateUInt2Double(MacroAssembler* masm, + Register hiword, + Register loword, + Register scratch, + int leading_zeroes) { + const int meaningful_bits = kBitsPerInt - leading_zeroes - 1; + const int biased_exponent = HeapNumber::kExponentBias + meaningful_bits; + + const int mantissa_shift_for_hi_word = + meaningful_bits - HeapNumber::kMantissaBitsInTopWord; + const int mantissa_shift_for_lo_word = + kBitsPerInt - mantissa_shift_for_hi_word; + masm->li(scratch, Operand(biased_exponent << HeapNumber::kExponentShift)); + if (mantissa_shift_for_hi_word > 0) { + masm->sll(loword, hiword, mantissa_shift_for_lo_word); + masm->srl(hiword, hiword, mantissa_shift_for_hi_word); + masm->Or(hiword, scratch, hiword); + } else { + masm->mov(loword, zero_reg); + masm->sll(hiword, hiword, mantissa_shift_for_hi_word); + masm->Or(hiword, scratch, hiword); + } + + // If least significant bit of biased exponent was not 1 it was corrupted + // by most significant bit of mantissa so we should fix that. + if (!(biased_exponent & 1)) { + masm->li(scratch, 1 << HeapNumber::kExponentShift); + masm->nor(scratch, scratch, scratch); + masm->and_(hiword, hiword, scratch); + } +} + + void LCodeGen::DoDeferredNumberTagI(LInstruction* instr, LOperand* value, IntegerSignedness signedness) { Label slow; Register src = ToRegister(value); Register dst = ToRegister(instr->result()); - FPURegister dbl_scratch = double_scratch0(); + DoubleRegister dbl_scratch = double_scratch0(); // Preserve the value of all registers. PushSafepointRegistersScope scope(this, Safepoint::kWithRegisters); @@ -4181,16 +4627,40 @@ void LCodeGen::DoDeferredNumberTagI(LInstruction* instr, __ SmiUntag(src, dst); __ Xor(src, src, Operand(0x80000000)); } - __ mtc1(src, dbl_scratch); - __ cvt_d_w(dbl_scratch, dbl_scratch); + if (CpuFeatures::IsSupported(FPU)) { + CpuFeatureScope scope(masm(), FPU); + __ mtc1(src, dbl_scratch); + __ cvt_d_w(dbl_scratch, dbl_scratch); + } else { + FloatingPointHelper::Destination dest = + FloatingPointHelper::kCoreRegisters; + FloatingPointHelper::ConvertIntToDouble(masm(), src, dest, f0, + sfpd_lo, sfpd_hi, + scratch0(), f2); + } } else { - __ mtc1(src, dbl_scratch); - __ Cvt_d_uw(dbl_scratch, dbl_scratch, f22); + if (CpuFeatures::IsSupported(FPU)) { + CpuFeatureScope scope(masm(), FPU); + __ mtc1(src, dbl_scratch); + __ Cvt_d_uw(dbl_scratch, dbl_scratch, f22); + } else { + Label no_leading_zero, done; + __ And(at, src, Operand(0x80000000)); + __ Branch(&no_leading_zero, ne, at, Operand(zero_reg)); + + // Integer has one leading zeros. + GenerateUInt2Double(masm(), sfpd_hi, sfpd_lo, t0, 1); + __ Branch(&done); + + __ bind(&no_leading_zero); + GenerateUInt2Double(masm(), sfpd_hi, sfpd_lo, t0, 0); + __ Branch(&done); + } } if (FLAG_inline_new) { - __ LoadRoot(t2, Heap::kHeapNumberMapRootIndex); - __ AllocateHeapNumber(t1, a3, t0, t2, &slow); + __ LoadRoot(scratch0(), Heap::kHeapNumberMapRootIndex); + __ AllocateHeapNumber(t1, a3, t0, scratch0(), &slow, DONT_TAG_RESULT); __ Move(dst, t1); __ Branch(&done); } @@ -4204,11 +4674,19 @@ void LCodeGen::DoDeferredNumberTagI(LInstruction* instr, __ StoreToSafepointRegisterSlot(zero_reg, dst); CallRuntimeFromDeferred(Runtime::kAllocateHeapNumber, 0, instr); __ Move(dst, v0); + __ Subu(dst, dst, kHeapObjectTag); // Done. Put the value in dbl_scratch into the value of the allocated heap // number. __ bind(&done); - __ sdc1(dbl_scratch, FieldMemOperand(dst, HeapNumber::kValueOffset)); + if (CpuFeatures::IsSupported(FPU)) { + CpuFeatureScope scope(masm(), FPU); + __ sdc1(dbl_scratch, MemOperand(dst, HeapNumber::kValueOffset)); + } else { + __ sw(sfpd_lo, MemOperand(dst, HeapNumber::kMantissaOffset)); + __ sw(sfpd_hi, MemOperand(dst, HeapNumber::kExponentOffset)); + } + __ Addu(dst, dst, kHeapObjectTag); __ StoreToSafepointRegisterSlot(dst, dst); } @@ -4230,15 +4708,72 @@ void LCodeGen::DoNumberTagD(LNumberTagD* instr) { Register temp1 = ToRegister(instr->temp()); Register temp2 = ToRegister(instr->temp2()); + bool convert_hole = false; + HValue* change_input = instr->hydrogen()->value(); + if (change_input->IsLoadKeyed()) { + HLoadKeyed* load = HLoadKeyed::cast(change_input); + convert_hole = load->UsesMustHandleHole(); + } + + Label no_special_nan_handling; + Label done; + if (convert_hole) { + if (CpuFeatures::IsSupported(FPU)) { + CpuFeatureScope scope(masm(), FPU); + DoubleRegister input_reg = ToDoubleRegister(instr->value()); + __ BranchF(&no_special_nan_handling, NULL, eq, input_reg, input_reg); + __ Move(reg, scratch0(), input_reg); + Label canonicalize; + __ Branch(&canonicalize, ne, scratch0(), Operand(kHoleNanUpper32)); + __ li(reg, factory()->the_hole_value()); + __ Branch(&done); + __ bind(&canonicalize); + __ Move(input_reg, + FixedDoubleArray::canonical_not_the_hole_nan_as_double()); + } else { + Label not_hole; + __ Branch(¬_hole, ne, sfpd_hi, Operand(kHoleNanUpper32)); + __ li(reg, factory()->the_hole_value()); + __ Branch(&done); + __ bind(¬_hole); + __ And(scratch, sfpd_hi, Operand(0x7ff00000)); + __ Branch(&no_special_nan_handling, ne, scratch, Operand(0x7ff00000)); + Label special_nan_handling; + __ And(at, sfpd_hi, Operand(0x000FFFFF)); + __ Branch(&special_nan_handling, ne, at, Operand(zero_reg)); + __ Branch(&no_special_nan_handling, eq, sfpd_lo, Operand(zero_reg)); + __ bind(&special_nan_handling); + double canonical_nan = + FixedDoubleArray::canonical_not_the_hole_nan_as_double(); + uint64_t casted_nan = BitCast<uint64_t>(canonical_nan); + __ li(sfpd_lo, + Operand(static_cast<uint32_t>(casted_nan & 0xFFFFFFFF))); + __ li(sfpd_hi, + Operand(static_cast<uint32_t>(casted_nan >> 32))); + } + } + + __ bind(&no_special_nan_handling); DeferredNumberTagD* deferred = new(zone()) DeferredNumberTagD(this, instr); if (FLAG_inline_new) { __ LoadRoot(scratch, Heap::kHeapNumberMapRootIndex); - __ AllocateHeapNumber(reg, temp1, temp2, scratch, deferred->entry()); + // We want the untagged address first for performance + __ AllocateHeapNumber(reg, temp1, temp2, scratch, deferred->entry(), + DONT_TAG_RESULT); } else { __ Branch(deferred->entry()); } __ bind(deferred->exit()); - __ sdc1(input_reg, FieldMemOperand(reg, HeapNumber::kValueOffset)); + if (CpuFeatures::IsSupported(FPU)) { + CpuFeatureScope scope(masm(), FPU); + __ sdc1(input_reg, MemOperand(reg, HeapNumber::kValueOffset)); + } else { + __ sw(sfpd_lo, MemOperand(reg, HeapNumber::kValueOffset)); + __ sw(sfpd_hi, MemOperand(reg, HeapNumber::kValueOffset + kPointerSize)); + } + // Now that we have finished with the object's real address tag it + __ Addu(reg, reg, kHeapObjectTag); + __ bind(&done); } @@ -4251,6 +4786,7 @@ void LCodeGen::DoDeferredNumberTagD(LNumberTagD* instr) { PushSafepointRegistersScope scope(this, Safepoint::kWithRegisters); CallRuntimeFromDeferred(Runtime::kAllocateHeapNumber, 0, instr); + __ Subu(v0, v0, kHeapObjectTag); __ StoreToSafepointRegisterSlot(v0, reg); } @@ -4281,42 +4817,57 @@ void LCodeGen::EmitNumberUntagD(Register input_reg, DoubleRegister result_reg, bool deoptimize_on_undefined, bool deoptimize_on_minus_zero, - LEnvironment* env) { + LEnvironment* env, + NumberUntagDMode mode) { Register scratch = scratch0(); + CpuFeatureScope scope(masm(), FPU); Label load_smi, heap_number, done; - // Smi check. - __ UntagAndJumpIfSmi(scratch, input_reg, &load_smi); + if (mode == NUMBER_CANDIDATE_IS_ANY_TAGGED) { + // Smi check. + __ UntagAndJumpIfSmi(scratch, input_reg, &load_smi); - // Heap number map check. - __ lw(scratch, FieldMemOperand(input_reg, HeapObject::kMapOffset)); - __ LoadRoot(at, Heap::kHeapNumberMapRootIndex); - if (deoptimize_on_undefined) { - DeoptimizeIf(ne, env, scratch, Operand(at)); - } else { - Label heap_number; - __ Branch(&heap_number, eq, scratch, Operand(at)); + // Heap number map check. + __ lw(scratch, FieldMemOperand(input_reg, HeapObject::kMapOffset)); + __ LoadRoot(at, Heap::kHeapNumberMapRootIndex); + if (deoptimize_on_undefined) { + DeoptimizeIf(ne, env, scratch, Operand(at)); + } else { + Label heap_number; + __ Branch(&heap_number, eq, scratch, Operand(at)); - __ LoadRoot(at, Heap::kUndefinedValueRootIndex); - DeoptimizeIf(ne, env, input_reg, Operand(at)); + __ LoadRoot(at, Heap::kUndefinedValueRootIndex); + DeoptimizeIf(ne, env, input_reg, Operand(at)); - // Convert undefined to NaN. - __ LoadRoot(at, Heap::kNanValueRootIndex); - __ ldc1(result_reg, FieldMemOperand(at, HeapNumber::kValueOffset)); - __ Branch(&done); + // Convert undefined to NaN. + __ LoadRoot(at, Heap::kNanValueRootIndex); + __ ldc1(result_reg, FieldMemOperand(at, HeapNumber::kValueOffset)); + __ Branch(&done); - __ bind(&heap_number); - } - // Heap number to double register conversion. - __ ldc1(result_reg, FieldMemOperand(input_reg, HeapNumber::kValueOffset)); - if (deoptimize_on_minus_zero) { - __ mfc1(at, result_reg.low()); - __ Branch(&done, ne, at, Operand(zero_reg)); - __ mfc1(scratch, result_reg.high()); - DeoptimizeIf(eq, env, scratch, Operand(HeapNumber::kSignMask)); + __ bind(&heap_number); + } + // Heap number to double register conversion. + __ ldc1(result_reg, FieldMemOperand(input_reg, HeapNumber::kValueOffset)); + if (deoptimize_on_minus_zero) { + __ mfc1(at, result_reg.low()); + __ Branch(&done, ne, at, Operand(zero_reg)); + __ mfc1(scratch, result_reg.high()); + DeoptimizeIf(eq, env, scratch, Operand(HeapNumber::kSignMask)); + } + __ Branch(&done); + } else if (mode == NUMBER_CANDIDATE_IS_SMI_OR_HOLE) { + __ SmiUntag(scratch, input_reg); + DeoptimizeIf(Ugreater_equal, env, scratch, Operand(zero_reg)); + } else if (mode == NUMBER_CANDIDATE_IS_SMI_CONVERT_HOLE) { + __ UntagAndJumpIfSmi(scratch, input_reg, &load_smi); + __ Move(result_reg, + FixedDoubleArray::hole_nan_as_double()); + __ Branch(&done); + } else { + __ SmiUntag(scratch, input_reg); + ASSERT(mode == NUMBER_CANDIDATE_IS_SMI); } - __ Branch(&done); // Smi to double register conversion __ bind(&load_smi); @@ -4332,7 +4883,7 @@ void LCodeGen::DoDeferredTaggedToI(LTaggedToI* instr) { Register scratch1 = scratch0(); Register scratch2 = ToRegister(instr->temp()); DoubleRegister double_scratch = double_scratch0(); - FPURegister single_scratch = double_scratch.low(); + DoubleRegister double_scratch2 = ToDoubleRegister(instr->temp3()); ASSERT(!scratch1.is(input_reg) && !scratch1.is(scratch2)); ASSERT(!scratch2.is(input_reg) && !scratch2.is(scratch1)); @@ -4347,8 +4898,9 @@ void LCodeGen::DoDeferredTaggedToI(LTaggedToI* instr) { // of the if. if (instr->truncating()) { + CpuFeatureScope scope(masm(), FPU); Register scratch3 = ToRegister(instr->temp2()); - DoubleRegister double_scratch2 = ToDoubleRegister(instr->temp3()); + FPURegister single_scratch = double_scratch.low(); ASSERT(!scratch3.is(input_reg) && !scratch3.is(scratch1) && !scratch3.is(scratch2)); @@ -4383,18 +4935,16 @@ void LCodeGen::DoDeferredTaggedToI(LTaggedToI* instr) { Register except_flag = scratch2; __ EmitFPUTruncate(kRoundToZero, - single_scratch, + input_reg, double_scratch, scratch1, + double_scratch2, except_flag, kCheckForInexactConversion); // Deopt if the operation did not succeed. DeoptimizeIf(ne, instr->environment(), except_flag, Operand(zero_reg)); - // Load the result. - __ mfc1(input_reg, single_scratch); - if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) { __ Branch(&done, ne, input_reg, Operand(zero_reg)); @@ -4444,10 +4994,28 @@ void LCodeGen::DoNumberUntagD(LNumberUntagD* instr) { Register input_reg = ToRegister(input); DoubleRegister result_reg = ToDoubleRegister(result); + NumberUntagDMode mode = NUMBER_CANDIDATE_IS_ANY_TAGGED; + HValue* value = instr->hydrogen()->value(); + if (value->type().IsSmi()) { + if (value->IsLoadKeyed()) { + HLoadKeyed* load = HLoadKeyed::cast(value); + if (load->UsesMustHandleHole()) { + if (load->hole_mode() == ALLOW_RETURN_HOLE) { + mode = NUMBER_CANDIDATE_IS_SMI_CONVERT_HOLE; + } else { + mode = NUMBER_CANDIDATE_IS_SMI_OR_HOLE; + } + } else { + mode = NUMBER_CANDIDATE_IS_SMI; + } + } + } + EmitNumberUntagD(input_reg, result_reg, instr->hydrogen()->deoptimize_on_undefined(), instr->hydrogen()->deoptimize_on_minus_zero(), - instr->environment()); + instr->environment(), + mode); } @@ -4456,10 +5024,10 @@ void LCodeGen::DoDoubleToI(LDoubleToI* instr) { Register scratch1 = scratch0(); Register scratch2 = ToRegister(instr->temp()); DoubleRegister double_input = ToDoubleRegister(instr->value()); - FPURegister single_scratch = double_scratch0().low(); if (instr->truncating()) { Register scratch3 = ToRegister(instr->temp2()); + FPURegister single_scratch = double_scratch0().low(); __ EmitECMATruncate(result_reg, double_input, single_scratch, @@ -4470,17 +5038,15 @@ void LCodeGen::DoDoubleToI(LDoubleToI* instr) { Register except_flag = scratch2; __ EmitFPUTruncate(kRoundToMinusInf, - single_scratch, + result_reg, double_input, scratch1, + double_scratch0(), except_flag, kCheckForInexactConversion); // Deopt if the operation did not succeed (except_flag != 0). DeoptimizeIf(ne, instr->environment(), except_flag, Operand(zero_reg)); - - // Load the result. - __ mfc1(result_reg, single_scratch); } } @@ -4556,37 +5122,38 @@ void LCodeGen::DoCheckFunction(LCheckFunction* instr) { } -void LCodeGen::DoCheckMapCommon(Register reg, - Register scratch, +void LCodeGen::DoCheckMapCommon(Register map_reg, Handle<Map> map, CompareMapMode mode, LEnvironment* env) { Label success; - __ CompareMapAndBranch(reg, scratch, map, &success, eq, &success, mode); + __ CompareMapAndBranch(map_reg, map, &success, eq, &success, mode); DeoptimizeIf(al, env); __ bind(&success); } void LCodeGen::DoCheckMaps(LCheckMaps* instr) { - Register scratch = scratch0(); + Register map_reg = scratch0(); LOperand* input = instr->value(); ASSERT(input->IsRegister()); Register reg = ToRegister(input); Label success; SmallMapList* map_set = instr->hydrogen()->map_set(); + __ lw(map_reg, FieldMemOperand(reg, HeapObject::kMapOffset)); for (int i = 0; i < map_set->length() - 1; i++) { Handle<Map> map = map_set->at(i); __ CompareMapAndBranch( - reg, scratch, map, &success, eq, &success, REQUIRE_EXACT_MAP); + map_reg, map, &success, eq, &success, REQUIRE_EXACT_MAP); } Handle<Map> map = map_set->last(); - DoCheckMapCommon(reg, scratch, map, REQUIRE_EXACT_MAP, instr->environment()); + DoCheckMapCommon(map_reg, map, REQUIRE_EXACT_MAP, instr->environment()); __ bind(&success); } void LCodeGen::DoClampDToUint8(LClampDToUint8* instr) { + CpuFeatureScope vfp_scope(masm(), FPU); DoubleRegister value_reg = ToDoubleRegister(instr->unclamped()); Register result_reg = ToRegister(instr->result()); DoubleRegister temp_reg = ToDoubleRegister(instr->temp()); @@ -4595,6 +5162,7 @@ void LCodeGen::DoClampDToUint8(LClampDToUint8* instr) { void LCodeGen::DoClampIToUint8(LClampIToUint8* instr) { + CpuFeatureScope vfp_scope(masm(), FPU); Register unclamped_reg = ToRegister(instr->unclamped()); Register result_reg = ToRegister(instr->result()); __ ClampUint8(result_reg, unclamped_reg); @@ -4602,6 +5170,7 @@ void LCodeGen::DoClampIToUint8(LClampIToUint8* instr) { void LCodeGen::DoClampTToUint8(LClampTToUint8* instr) { + CpuFeatureScope vfp_scope(masm(), FPU); Register scratch = scratch0(); Register input_reg = ToRegister(instr->unclamped()); Register result_reg = ToRegister(instr->result()); @@ -4637,30 +5206,31 @@ void LCodeGen::DoClampTToUint8(LClampTToUint8* instr) { void LCodeGen::DoCheckPrototypeMaps(LCheckPrototypeMaps* instr) { - Register temp1 = ToRegister(instr->temp()); - Register temp2 = ToRegister(instr->temp2()); + ASSERT(instr->temp()->Equals(instr->result())); + Register prototype_reg = ToRegister(instr->temp()); + Register map_reg = ToRegister(instr->temp2()); - Handle<JSObject> holder = instr->holder(); - Handle<JSObject> current_prototype = instr->prototype(); + ZoneList<Handle<JSObject> >* prototypes = instr->prototypes(); + ZoneList<Handle<Map> >* maps = instr->maps(); - // Load prototype object. - __ LoadHeapObject(temp1, current_prototype); + ASSERT(prototypes->length() == maps->length()); - // Check prototype maps up to the holder. - while (!current_prototype.is_identical_to(holder)) { - DoCheckMapCommon(temp1, temp2, - Handle<Map>(current_prototype->map()), - ALLOW_ELEMENT_TRANSITION_MAPS, instr->environment()); - current_prototype = - Handle<JSObject>(JSObject::cast(current_prototype->GetPrototype())); - // Load next prototype object. - __ LoadHeapObject(temp1, current_prototype); + if (instr->hydrogen()->CanOmitPrototypeChecks()) { + for (int i = 0; i < maps->length(); i++) { + prototype_maps_.Add(maps->at(i), info()->zone()); + } + __ LoadHeapObject(prototype_reg, + prototypes->at(prototypes->length() - 1)); + } else { + for (int i = 0; i < prototypes->length(); i++) { + __ LoadHeapObject(prototype_reg, prototypes->at(i)); + __ lw(map_reg, FieldMemOperand(prototype_reg, HeapObject::kMapOffset)); + DoCheckMapCommon(map_reg, + maps->at(i), + ALLOW_ELEMENT_TRANSITION_MAPS, + instr->environment()); + } } - - // Check the holder map. - DoCheckMapCommon(temp1, temp2, - Handle<Map>(current_prototype->map()), - ALLOW_ELEMENT_TRANSITION_MAPS, instr->environment()); } @@ -4692,12 +5262,8 @@ void LCodeGen::DoAllocateObject(LAllocateObject* instr) { // the constructor's prototype changes, but instance size and property // counts remain unchanged (if slack tracking finished). ASSERT(!constructor->shared()->IsInobjectSlackTrackingInProgress()); - __ AllocateInNewSpace(instance_size, - result, - scratch, - scratch2, - deferred->entry(), - TAG_OBJECT); + __ Allocate(instance_size, result, scratch, scratch2, deferred->entry(), + TAG_OBJECT); __ bind(deferred->exit()); if (FLAG_debug_code) { @@ -4747,10 +5313,72 @@ void LCodeGen::DoDeferredAllocateObject(LAllocateObject* instr) { } +void LCodeGen::DoAllocate(LAllocate* instr) { + class DeferredAllocate: public LDeferredCode { + public: + DeferredAllocate(LCodeGen* codegen, LAllocate* instr) + : LDeferredCode(codegen), instr_(instr) { } + virtual void Generate() { codegen()->DoDeferredAllocate(instr_); } + virtual LInstruction* instr() { return instr_; } + private: + LAllocate* instr_; + }; + + DeferredAllocate* deferred = + new(zone()) DeferredAllocate(this, instr); + + Register result = ToRegister(instr->result()); + Register scratch = ToRegister(instr->temp1()); + Register scratch2 = ToRegister(instr->temp2()); + + // Allocate memory for the object. + AllocationFlags flags = TAG_OBJECT; + if (instr->hydrogen()->MustAllocateDoubleAligned()) { + flags = static_cast<AllocationFlags>(flags | DOUBLE_ALIGNMENT); + } + if (instr->size()->IsConstantOperand()) { + int32_t size = ToInteger32(LConstantOperand::cast(instr->size())); + if (instr->hydrogen()->CanAllocateInOldPointerSpace()) { + flags = static_cast<AllocationFlags>(flags | PRETENURE_OLD_POINTER_SPACE); + } + __ Allocate(size, result, scratch, scratch2, deferred->entry(), flags); + } else { + Register size = ToRegister(instr->size()); + __ AllocateInNewSpace(size, + result, + scratch, + scratch2, + deferred->entry(), + flags); + } + + __ bind(deferred->exit()); +} + + +void LCodeGen::DoDeferredAllocate(LAllocate* instr) { + Register size = ToRegister(instr->size()); + Register result = ToRegister(instr->result()); + + // TODO(3095996): Get rid of this. For now, we need to make the + // result register contain a valid pointer because it is already + // contained in the register pointer map. + __ mov(result, zero_reg); + + PushSafepointRegistersScope scope(this, Safepoint::kWithRegisters); + __ SmiTag(size, size); + __ push(size); + CallRuntimeFromDeferred(Runtime::kAllocateInNewSpace, 1, instr); + __ StoreToSafepointRegisterSlot(v0, result); +} + + void LCodeGen::DoArrayLiteral(LArrayLiteral* instr) { Handle<FixedArray> literals(instr->environment()->closure()->literals()); ElementsKind boilerplate_elements_kind = instr->hydrogen()->boilerplate_elements_kind(); + AllocationSiteMode allocation_site_mode = + instr->hydrogen()->allocation_site_mode(); // Deopt if the array literal boilerplate ElementsKind is of a type different // than the expected one. The check isn't necessary if the boilerplate has @@ -4784,8 +5412,8 @@ void LCodeGen::DoArrayLiteral(LArrayLiteral* instr) { ASSERT(instr->hydrogen()->depth() == 1); FastCloneShallowArrayStub::Mode mode = FastCloneShallowArrayStub::COPY_ON_WRITE_ELEMENTS; - FastCloneShallowArrayStub stub(mode, length); - CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr); + FastCloneShallowArrayStub stub(mode, DONT_TRACK_ALLOCATION_SITE, length); + CallCode(stub.GetCode(isolate()), RelocInfo::CODE_TARGET, instr); } else if (instr->hydrogen()->depth() > 1) { CallRuntime(Runtime::kCreateArrayLiteral, 3, instr); } else if (length > FastCloneShallowArrayStub::kMaximumClonedLength) { @@ -4793,10 +5421,10 @@ void LCodeGen::DoArrayLiteral(LArrayLiteral* instr) { } else { FastCloneShallowArrayStub::Mode mode = boilerplate_elements_kind == FAST_DOUBLE_ELEMENTS - ? FastCloneShallowArrayStub::CLONE_DOUBLE_ELEMENTS - : FastCloneShallowArrayStub::CLONE_ELEMENTS; - FastCloneShallowArrayStub stub(mode, length); - CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr); + ? FastCloneShallowArrayStub::CLONE_DOUBLE_ELEMENTS + : FastCloneShallowArrayStub::CLONE_ELEMENTS; + FastCloneShallowArrayStub stub(mode, allocation_site_mode, length); + CallCode(stub.GetCode(isolate()), RelocInfo::CODE_TARGET, instr); } } @@ -4804,10 +5432,14 @@ void LCodeGen::DoArrayLiteral(LArrayLiteral* instr) { void LCodeGen::EmitDeepCopy(Handle<JSObject> object, Register result, Register source, - int* offset) { + int* offset, + AllocationSiteMode mode) { ASSERT(!source.is(a2)); ASSERT(!result.is(a2)); + bool create_allocation_site_info = mode == TRACK_ALLOCATION_SITE && + object->map()->CanTrackAllocationSite(); + // Only elements backing stores for non-COW arrays need to be copied. Handle<FixedArrayBase> elements(object->elements()); bool has_elements = elements->length() > 0 && @@ -4817,8 +5449,13 @@ void LCodeGen::EmitDeepCopy(Handle<JSObject> object, // this object and its backing store. int object_offset = *offset; int object_size = object->map()->instance_size(); - int elements_offset = *offset + object_size; int elements_size = has_elements ? elements->Size() : 0; + int elements_offset = *offset + object_size; + if (create_allocation_site_info) { + elements_offset += AllocationSiteInfo::kSize; + *offset += AllocationSiteInfo::kSize; + } + *offset += object_size + elements_size; // Copy object header. @@ -4837,13 +5474,15 @@ void LCodeGen::EmitDeepCopy(Handle<JSObject> object, // Copy in-object properties. for (int i = 0; i < inobject_properties; i++) { int total_offset = object_offset + object->GetInObjectPropertyOffset(i); - Handle<Object> value = Handle<Object>(object->InObjectPropertyAt(i)); + Handle<Object> value = Handle<Object>(object->InObjectPropertyAt(i), + isolate()); if (value->IsJSObject()) { Handle<JSObject> value_object = Handle<JSObject>::cast(value); __ Addu(a2, result, Operand(*offset)); __ sw(a2, FieldMemOperand(result, total_offset)); __ LoadHeapObject(source, value_object); - EmitDeepCopy(value_object, result, source, offset); + EmitDeepCopy(value_object, result, source, offset, + DONT_TRACK_ALLOCATION_SITE); } else if (value->IsHeapObject()) { __ LoadHeapObject(a2, Handle<HeapObject>::cast(value)); __ sw(a2, FieldMemOperand(result, total_offset)); @@ -4853,6 +5492,13 @@ void LCodeGen::EmitDeepCopy(Handle<JSObject> object, } } + // Build Allocation Site Info if desired + if (create_allocation_site_info) { + __ li(a2, Operand(Handle<Map>(isolate()->heap()-> + allocation_site_info_map()))); + __ sw(a2, FieldMemOperand(result, object_size)); + __ sw(source, FieldMemOperand(result, object_size + kPointerSize)); + } if (has_elements) { // Copy elements backing store header. @@ -4883,13 +5529,14 @@ void LCodeGen::EmitDeepCopy(Handle<JSObject> object, Handle<FixedArray> fast_elements = Handle<FixedArray>::cast(elements); for (int i = 0; i < elements_length; i++) { int total_offset = elements_offset + FixedArray::OffsetOfElementAt(i); - Handle<Object> value(fast_elements->get(i)); + Handle<Object> value(fast_elements->get(i), isolate()); if (value->IsJSObject()) { Handle<JSObject> value_object = Handle<JSObject>::cast(value); __ Addu(a2, result, Operand(*offset)); __ sw(a2, FieldMemOperand(result, total_offset)); __ LoadHeapObject(source, value_object); - EmitDeepCopy(value_object, result, source, offset); + EmitDeepCopy(value_object, result, source, offset, + DONT_TRACK_ALLOCATION_SITE); } else if (value->IsHeapObject()) { __ LoadHeapObject(a2, Handle<HeapObject>::cast(value)); __ sw(a2, FieldMemOperand(result, total_offset)); @@ -4929,7 +5576,7 @@ void LCodeGen::DoFastLiteral(LFastLiteral* instr) { // Allocate all objects that are part of the literal in one big // allocation. This avoids multiple limit checks. Label allocated, runtime_allocate; - __ AllocateInNewSpace(size, v0, a2, a3, &runtime_allocate, TAG_OBJECT); + __ Allocate(size, v0, a2, a3, &runtime_allocate, TAG_OBJECT); __ jmp(&allocated); __ bind(&runtime_allocate); @@ -4940,7 +5587,8 @@ void LCodeGen::DoFastLiteral(LFastLiteral* instr) { __ bind(&allocated); int offset = 0; __ LoadHeapObject(a1, instr->hydrogen()->boilerplate()); - EmitDeepCopy(instr->hydrogen()->boilerplate(), v0, a1, &offset); + EmitDeepCopy(instr->hydrogen()->boilerplate(), v0, a1, &offset, + instr->hydrogen()->allocation_site_mode()); ASSERT_EQ(size, offset); } @@ -4952,25 +5600,26 @@ void LCodeGen::DoObjectLiteral(LObjectLiteral* instr) { instr->hydrogen()->constant_properties(); // Set up the parameters to the stub/runtime call. - __ LoadHeapObject(t0, literals); - __ li(a3, Operand(Smi::FromInt(instr->hydrogen()->literal_index()))); - __ li(a2, Operand(constant_properties)); + __ LoadHeapObject(a3, literals); + __ li(a2, Operand(Smi::FromInt(instr->hydrogen()->literal_index()))); + __ li(a1, Operand(constant_properties)); int flags = instr->hydrogen()->fast_elements() ? ObjectLiteral::kFastElements : ObjectLiteral::kNoFlags; - __ li(a1, Operand(Smi::FromInt(flags))); - __ Push(t0, a3, a2, a1); + __ li(a0, Operand(Smi::FromInt(flags))); // Pick the right runtime function or stub to call. int properties_count = constant_properties->length() / 2; if (instr->hydrogen()->depth() > 1) { + __ Push(a3, a2, a1, a0); CallRuntime(Runtime::kCreateObjectLiteral, 4, instr); } else if (flags != ObjectLiteral::kFastElements || properties_count > FastCloneShallowObjectStub::kMaximumClonedProperties) { + __ Push(a3, a2, a1, a0); CallRuntime(Runtime::kCreateObjectLiteralShallow, 4, instr); } else { FastCloneShallowObjectStub stub(properties_count); - CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr); + CallCode(stub.GetCode(isolate()), RelocInfo::CODE_TARGET, instr); } } @@ -5010,7 +5659,7 @@ void LCodeGen::DoRegExpLiteral(LRegExpLiteral* instr) { int size = JSRegExp::kSize + JSRegExp::kInObjectFieldCount * kPointerSize; Label allocated, runtime_allocate; - __ AllocateInNewSpace(size, v0, a2, a3, &runtime_allocate, TAG_OBJECT); + __ Allocate(size, v0, a2, a3, &runtime_allocate, TAG_OBJECT); __ jmp(&allocated); __ bind(&runtime_allocate); @@ -5044,7 +5693,7 @@ void LCodeGen::DoFunctionLiteral(LFunctionLiteral* instr) { FastNewClosureStub stub(shared_info->language_mode()); __ li(a1, Operand(shared_info)); __ push(a1); - CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr); + CallCode(stub.GetCode(isolate()), RelocInfo::CODE_TARGET, instr); } else { __ li(a2, Operand(shared_info)); __ li(a1, Operand(pretenure @@ -5101,7 +5750,7 @@ Condition LCodeGen::EmitTypeofIs(Label* true_label, // register. Condition final_branch_condition = kNoCondition; Register scratch = scratch0(); - if (type_name->Equals(heap()->number_symbol())) { + if (type_name->Equals(heap()->number_string())) { __ JumpIfSmi(input, true_label); __ lw(input, FieldMemOperand(input, HeapObject::kMapOffset)); __ LoadRoot(at, Heap::kHeapNumberMapRootIndex); @@ -5109,7 +5758,7 @@ Condition LCodeGen::EmitTypeofIs(Label* true_label, cmp2 = Operand(at); final_branch_condition = eq; - } else if (type_name->Equals(heap()->string_symbol())) { + } else if (type_name->Equals(heap()->string_string())) { __ JumpIfSmi(input, false_label); __ GetObjectType(input, input, scratch); __ Branch(USE_DELAY_SLOT, false_label, @@ -5122,7 +5771,7 @@ Condition LCodeGen::EmitTypeofIs(Label* true_label, cmp2 = Operand(zero_reg); final_branch_condition = eq; - } else if (type_name->Equals(heap()->boolean_symbol())) { + } else if (type_name->Equals(heap()->boolean_string())) { __ LoadRoot(at, Heap::kTrueValueRootIndex); __ Branch(USE_DELAY_SLOT, true_label, eq, at, Operand(input)); __ LoadRoot(at, Heap::kFalseValueRootIndex); @@ -5130,13 +5779,13 @@ Condition LCodeGen::EmitTypeofIs(Label* true_label, cmp2 = Operand(input); final_branch_condition = eq; - } else if (FLAG_harmony_typeof && type_name->Equals(heap()->null_symbol())) { + } else if (FLAG_harmony_typeof && type_name->Equals(heap()->null_string())) { __ LoadRoot(at, Heap::kNullValueRootIndex); cmp1 = at; cmp2 = Operand(input); final_branch_condition = eq; - } else if (type_name->Equals(heap()->undefined_symbol())) { + } else if (type_name->Equals(heap()->undefined_string())) { __ LoadRoot(at, Heap::kUndefinedValueRootIndex); __ Branch(USE_DELAY_SLOT, true_label, eq, at, Operand(input)); // The first instruction of JumpIfSmi is an And - it is safe in the delay @@ -5150,7 +5799,7 @@ Condition LCodeGen::EmitTypeofIs(Label* true_label, cmp2 = Operand(zero_reg); final_branch_condition = ne; - } else if (type_name->Equals(heap()->function_symbol())) { + } else if (type_name->Equals(heap()->function_string())) { STATIC_ASSERT(NUM_OF_CALLABLE_SPEC_OBJECT_TYPES == 2); __ JumpIfSmi(input, false_label); __ GetObjectType(input, scratch, input); @@ -5159,16 +5808,26 @@ Condition LCodeGen::EmitTypeofIs(Label* true_label, cmp2 = Operand(JS_FUNCTION_PROXY_TYPE); final_branch_condition = eq; - } else if (type_name->Equals(heap()->object_symbol())) { + } else if (type_name->Equals(heap()->object_string())) { __ JumpIfSmi(input, false_label); if (!FLAG_harmony_typeof) { __ LoadRoot(at, Heap::kNullValueRootIndex); __ Branch(USE_DELAY_SLOT, true_label, eq, at, Operand(input)); } - // input is an object, it is safe to use GetObjectType in the delay slot. - __ GetObjectType(input, input, scratch); - __ Branch(USE_DELAY_SLOT, false_label, - lt, scratch, Operand(FIRST_NONCALLABLE_SPEC_OBJECT_TYPE)); + if (FLAG_harmony_symbols) { + // input is an object, it is safe to use GetObjectType in the delay slot. + __ GetObjectType(input, input, scratch); + __ Branch(USE_DELAY_SLOT, true_label, eq, scratch, Operand(SYMBOL_TYPE)); + // Still an object, so the InstanceType can be loaded. + __ lbu(scratch, FieldMemOperand(input, Map::kInstanceTypeOffset)); + __ Branch(USE_DELAY_SLOT, false_label, + lt, scratch, Operand(FIRST_NONCALLABLE_SPEC_OBJECT_TYPE)); + } else { + // input is an object, it is safe to use GetObjectType in the delay slot. + __ GetObjectType(input, input, scratch); + __ Branch(USE_DELAY_SLOT, false_label, + lt, scratch, Operand(FIRST_NONCALLABLE_SPEC_OBJECT_TYPE)); + } // Still an object, so the InstanceType can be loaded. __ lbu(scratch, FieldMemOperand(input, Map::kInstanceTypeOffset)); __ Branch(USE_DELAY_SLOT, false_label, @@ -5222,6 +5881,7 @@ void LCodeGen::EmitIsConstructCall(Register temp1, Register temp2) { void LCodeGen::EnsureSpaceForLazyDeopt() { + if (info()->IsStub()) return; // Ensure that we have enough space after the previous lazy-bailout // instruction for patching the code here. int current_pc = masm()->pc_offset(); @@ -5252,6 +5912,11 @@ void LCodeGen::DoDeoptimize(LDeoptimize* instr) { } +void LCodeGen::DoDummyUse(LDummyUse* instr) { + // Nothing to see here, move on! +} + + void LCodeGen::DoDeleteProperty(LDeleteProperty* instr) { Register object = ToRegister(instr->object()); Register key = ToRegister(instr->key()); @@ -5311,7 +5976,7 @@ void LCodeGen::DoStackCheck(LStackCheck* instr) { __ LoadRoot(at, Heap::kStackLimitRootIndex); __ Branch(&done, hs, sp, Operand(at)); StackCheckStub stub; - CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr); + CallCode(stub.GetCode(isolate()), RelocInfo::CODE_TARGET, instr); EnsureSpaceForLazyDeopt(); __ bind(&done); RegisterEnvironmentForDeoptimization(env, Safepoint::kLazyDeopt); diff --git a/deps/v8/src/mips/lithium-codegen-mips.h b/deps/v8/src/mips/lithium-codegen-mips.h index 38c5255a4b..1d2a65912a 100644 --- a/deps/v8/src/mips/lithium-codegen-mips.h +++ b/deps/v8/src/mips/lithium-codegen-mips.h @@ -54,6 +54,7 @@ class LCodeGen BASE_EMBEDDED { deoptimizations_(4, info->zone()), deopt_jump_table_(4, info->zone()), deoptimization_literals_(8, info->zone()), + prototype_maps_(0, info->zone()), inlined_function_count_(0), scope_(info->scope()), status_(UNUSED), @@ -61,6 +62,7 @@ class LCodeGen BASE_EMBEDDED { deferred_(8, info->zone()), osr_pc_offset_(-1), last_lazy_deopt_pc_(0), + frame_is_built_(false), safepoints_(info->zone()), resolver_(this), expected_safepoint_kind_(Safepoint::kSimple) { @@ -76,6 +78,15 @@ class LCodeGen BASE_EMBEDDED { Heap* heap() const { return isolate()->heap(); } Zone* zone() const { return zone_; } + bool NeedsEagerFrame() const { + return GetStackSlotCount() > 0 || + info()->is_non_deferred_calling() || + !info()->IsStub(); + } + bool NeedsDeferredFrame() const { + return !NeedsEagerFrame() && info()->is_deferred_calling(); + } + // Support for converting LOperands to assembler types. // LOperand must be a register. Register ToRegister(LOperand* op) const; @@ -123,10 +134,11 @@ class LCodeGen BASE_EMBEDDED { void DoDeferredStringCharCodeAt(LStringCharCodeAt* instr); void DoDeferredStringCharFromCode(LStringCharFromCode* instr); void DoDeferredAllocateObject(LAllocateObject* instr); + void DoDeferredAllocate(LAllocate* instr); void DoDeferredInstanceOfKnownGlobal(LInstanceOfKnownGlobal* instr, Label* map_check); - void DoCheckMapCommon(Register reg, Register scratch, Handle<Map> map, + void DoCheckMapCommon(Register map_reg, Handle<Map> map, CompareMapMode mode, LEnvironment* env); // Parallel move support. @@ -189,7 +201,6 @@ class LCodeGen BASE_EMBEDDED { Register temporary2); int GetStackSlotCount() const { return chunk()->spill_slot_count(); } - int GetParameterCount() const { return scope()->num_parameters(); } void Abort(const char* reason); void Comment(const char* format, ...); @@ -262,8 +273,10 @@ class LCodeGen BASE_EMBEDDED { LOperand* op, bool is_tagged, bool is_uint32, + bool arguments_known, int arguments_index, int arguments_count); + void RegisterDependentCodeForEmbeddedMaps(Handle<Code> code); void PopulateDeoptimizationData(Handle<Code> code); int DefineDeoptimizationLiteral(Handle<Object> literal); @@ -316,11 +329,8 @@ class LCodeGen BASE_EMBEDDED { DoubleRegister result, bool deoptimize_on_undefined, bool deoptimize_on_minus_zero, - LEnvironment* env); - - void DeoptIfTaggedButNotSmi(LEnvironment* environment, - HValue* value, - LOperand* operand); + LEnvironment* env, + NumberUntagDMode mode); // Emits optimized code for typeof x == "y". Modifies input register. // Returns the condition on which a final split to @@ -365,17 +375,28 @@ class LCodeGen BASE_EMBEDDED { void EmitDeepCopy(Handle<JSObject> object, Register result, Register source, - int* offset); + int* offset, + AllocationSiteMode mode); struct JumpTableEntry { - explicit inline JumpTableEntry(Address entry) + inline JumpTableEntry(Address entry, bool frame, bool is_lazy) : label(), - address(entry) { } + address(entry), + needs_frame(frame), + is_lazy_deopt(is_lazy) { } Label label; Address address; + bool needs_frame; + bool is_lazy_deopt; }; void EnsureSpaceForLazyDeopt(); + void DoLoadKeyedExternalArray(LLoadKeyed* instr); + void DoLoadKeyedFixedDoubleArray(LLoadKeyed* instr); + void DoLoadKeyedFixedArray(LLoadKeyed* instr); + void DoStoreKeyedExternalArray(LStoreKeyed* instr); + void DoStoreKeyedFixedDoubleArray(LStoreKeyed* instr); + void DoStoreKeyedFixedArray(LStoreKeyed* instr); Zone* zone_; LPlatformChunk* const chunk_; @@ -388,6 +409,7 @@ class LCodeGen BASE_EMBEDDED { ZoneList<LEnvironment*> deoptimizations_; ZoneList<JumpTableEntry> deopt_jump_table_; ZoneList<Handle<Object> > deoptimization_literals_; + ZoneList<Handle<Map> > prototype_maps_; int inlined_function_count_; Scope* const scope_; Status status_; @@ -395,6 +417,7 @@ class LCodeGen BASE_EMBEDDED { ZoneList<LDeferredCode*> deferred_; int osr_pc_offset_; int last_lazy_deopt_pc_; + bool frame_is_built_; // Builder that keeps track of safepoints in the code. The table // itself is emitted at the end of the generated code. @@ -410,6 +433,7 @@ class LCodeGen BASE_EMBEDDED { PushSafepointRegistersScope(LCodeGen* codegen, Safepoint::Kind kind) : codegen_(codegen) { + ASSERT(codegen_->info()->is_calling()); ASSERT(codegen_->expected_safepoint_kind_ == Safepoint::kSimple); codegen_->expected_safepoint_kind_ = kind; diff --git a/deps/v8/src/mips/lithium-gap-resolver-mips.cc b/deps/v8/src/mips/lithium-gap-resolver-mips.cc index 87efae5f4d..b415156730 100644 --- a/deps/v8/src/mips/lithium-gap-resolver-mips.cc +++ b/deps/v8/src/mips/lithium-gap-resolver-mips.cc @@ -172,8 +172,10 @@ void LGapResolver::BreakCycle(int index) { } else if (source->IsStackSlot()) { __ lw(kLithiumScratchReg, cgen_->ToMemOperand(source)); } else if (source->IsDoubleRegister()) { + CpuFeatureScope scope(cgen_->masm(), FPU); __ mov_d(kLithiumScratchDouble, cgen_->ToDoubleRegister(source)); } else if (source->IsDoubleStackSlot()) { + CpuFeatureScope scope(cgen_->masm(), FPU); __ ldc1(kLithiumScratchDouble, cgen_->ToMemOperand(source)); } else { UNREACHABLE(); @@ -193,9 +195,11 @@ void LGapResolver::RestoreValue() { } else if (saved_destination_->IsStackSlot()) { __ sw(kLithiumScratchReg, cgen_->ToMemOperand(saved_destination_)); } else if (saved_destination_->IsDoubleRegister()) { + CpuFeatureScope scope(cgen_->masm(), FPU); __ mov_d(cgen_->ToDoubleRegister(saved_destination_), kLithiumScratchDouble); } else if (saved_destination_->IsDoubleStackSlot()) { + CpuFeatureScope scope(cgen_->masm(), FPU); __ sdc1(kLithiumScratchDouble, cgen_->ToMemOperand(saved_destination_)); } else { @@ -232,6 +236,7 @@ void LGapResolver::EmitMove(int index) { MemOperand destination_operand = cgen_->ToMemOperand(destination); if (in_cycle_) { if (!destination_operand.OffsetIsInt16Encodable()) { + CpuFeatureScope scope(cgen_->masm(), FPU); // 'at' is overwritten while saving the value to the destination. // Therefore we can't use 'at'. It is OK if the read from the source // destroys 'at', since that happens before the value is read. @@ -271,6 +276,7 @@ void LGapResolver::EmitMove(int index) { } } else if (source->IsDoubleRegister()) { + CpuFeatureScope scope(cgen_->masm(), FPU); DoubleRegister source_register = cgen_->ToDoubleRegister(source); if (destination->IsDoubleRegister()) { __ mov_d(cgen_->ToDoubleRegister(destination), source_register); @@ -281,6 +287,7 @@ void LGapResolver::EmitMove(int index) { } } else if (source->IsDoubleStackSlot()) { + CpuFeatureScope scope(cgen_->masm(), FPU); MemOperand source_operand = cgen_->ToMemOperand(source); if (destination->IsDoubleRegister()) { __ ldc1(cgen_->ToDoubleRegister(destination), source_operand); diff --git a/deps/v8/src/mips/lithium-mips.cc b/deps/v8/src/mips/lithium-mips.cc index 0b6dcaea51..8848032b64 100644 --- a/deps/v8/src/mips/lithium-mips.cc +++ b/deps/v8/src/mips/lithium-mips.cc @@ -42,10 +42,10 @@ LITHIUM_CONCRETE_INSTRUCTION_LIST(DEFINE_COMPILE) #undef DEFINE_COMPILE LOsrEntry::LOsrEntry() { - for (int i = 0; i < Register::kNumAllocatableRegisters; ++i) { + for (int i = 0; i < Register::NumAllocatableRegisters(); ++i) { register_spills_[i] = NULL; } - for (int i = 0; i < DoubleRegister::kNumAllocatableRegisters; ++i) { + for (int i = 0; i < DoubleRegister::NumAllocatableRegisters(); ++i) { double_register_spills_[i] = NULL; } } @@ -112,7 +112,11 @@ void LInstruction::PrintDataTo(StringStream* stream) { stream->Add("= "); for (int i = 0; i < InputCount(); i++) { if (i > 0) stream->Add(" "); - InputAt(i)->PrintTo(stream); + if (InputAt(i) == NULL) { + stream->Add("NULL"); + } else { + InputAt(i)->PrintTo(stream); + } } } @@ -177,6 +181,7 @@ const char* LArithmeticT::Mnemonic() const { case Token::BIT_AND: return "bit-and-t"; case Token::BIT_OR: return "bit-or-t"; case Token::BIT_XOR: return "bit-xor-t"; + case Token::ROR: return "ror-t"; case Token::SHL: return "sll-t"; case Token::SAR: return "sra-t"; case Token::SHR: return "srl-t"; @@ -285,6 +290,13 @@ void LTypeofIsAndBranch::PrintDataTo(StringStream* stream) { } +void LInnerAllocatedObject::PrintDataTo(StringStream* stream) { + stream->Add(" = "); + base_object()->PrintTo(stream); + stream->Add(" + %d", offset()); +} + + void LCallConstantFunction::PrintDataTo(StringStream* stream) { stream->Add("#%d / ", arity()); } @@ -296,6 +308,11 @@ void LUnaryMathOperation::PrintDataTo(StringStream* stream) { } +void LMathExp::PrintDataTo(StringStream* stream) { + value()->PrintTo(stream); +} + + void LLoadContextSlot::PrintDataTo(StringStream* stream) { context()->PrintTo(stream); stream->Add("[%d]", slot_index()); @@ -345,6 +362,17 @@ void LCallNew::PrintDataTo(StringStream* stream) { } +void LCallNewArray::PrintDataTo(StringStream* stream) { + stream->Add("= "); + constructor()->PrintTo(stream); + stream->Add(" #%d / ", arity()); + ASSERT(hydrogen()->property_cell()->value()->IsSmi()); + ElementsKind kind = static_cast<ElementsKind>( + Smi::cast(hydrogen()->property_cell()->value())->value()); + stream->Add(" (%s) ", ElementsKindToString(kind)); +} + + void LAccessArgumentsAt::PrintDataTo(StringStream* stream) { arguments()->PrintTo(stream); stream->Add(" length "); @@ -372,20 +400,27 @@ void LStoreNamedGeneric::PrintDataTo(StringStream* stream) { } -void LStoreKeyedFastElement::PrintDataTo(StringStream* stream) { - object()->PrintTo(stream); +void LLoadKeyed::PrintDataTo(StringStream* stream) { + elements()->PrintTo(stream); stream->Add("["); key()->PrintTo(stream); - stream->Add("] <- "); - value()->PrintTo(stream); + if (hydrogen()->IsDehoisted()) { + stream->Add(" + %d]", additional_index()); + } else { + stream->Add("]"); + } } -void LStoreKeyedFastDoubleElement::PrintDataTo(StringStream* stream) { +void LStoreKeyed::PrintDataTo(StringStream* stream) { elements()->PrintTo(stream); stream->Add("["); key()->PrintTo(stream); - stream->Add("] <- "); + if (hydrogen()->IsDehoisted()) { + stream->Add(" + %d] <-", additional_index()); + } else { + stream->Add("] <- "); + } value()->PrintTo(stream); } @@ -599,6 +634,7 @@ LInstruction* LChunkBuilder::AssignEnvironment(LInstruction* instr) { LInstruction* LChunkBuilder::MarkAsCall(LInstruction* instr, HInstruction* hinstr, CanDeoptimize can_deoptimize) { + info()->MarkAsNonDeferredCalling(); #ifdef DEBUG instr->VerifyCall(); #endif @@ -639,8 +675,12 @@ LInstruction* LChunkBuilder::AssignPointerMap(LInstruction* instr) { LUnallocated* LChunkBuilder::TempRegister() { LUnallocated* operand = new(zone()) LUnallocated(LUnallocated::MUST_HAVE_REGISTER); - operand->set_virtual_register(allocator_->GetVirtualRegister()); - if (!allocator_->AllocationOk()) Abort("Not enough virtual registers."); + int vreg = allocator_->GetVirtualRegister(); + if (!allocator_->AllocationOk()) { + Abort("Out of virtual registers while trying to allocate temp register."); + return NULL; + } + operand->set_virtual_register(vreg); return operand; } @@ -664,6 +704,11 @@ LInstruction* LChunkBuilder::DoBlockEntry(HBlockEntry* instr) { } +LInstruction* LChunkBuilder::DoDummyUse(HDummyUse* instr) { + return DefineAsRegister(new(zone()) LDummyUse(UseAny(instr->value()))); +} + + LInstruction* LChunkBuilder::DoSoftDeoptimize(HSoftDeoptimize* instr) { return AssignEnvironment(new(zone()) LDeoptimize); } @@ -702,15 +747,13 @@ LInstruction* LChunkBuilder::DoShift(Token::Value op, right = UseRegisterAtStart(right_value); } + // Shift operations can only deoptimize if we do a logical shift + // by 0 and the result cannot be truncated to int32. bool does_deopt = false; - - if (FLAG_opt_safe_uint32_operations) { - does_deopt = !instr->CheckFlag(HInstruction::kUint32); - } else { - // Shift operations can only deoptimize if we do a logical shift - // by 0 and the result cannot be truncated to int32. - bool may_deopt = (op == Token::SHR && constant_value == 0); - if (may_deopt) { + if (op == Token::SHR && constant_value == 0) { + if (FLAG_opt_safe_uint32_operations) { + does_deopt = !instr->CheckFlag(HInstruction::kUint32); + } else { for (HUseIterator it(instr->uses()); !it.Done(); it.Advance()) { if (!it.value()->CheckFlag(HValue::kTruncatingToInt32)) { does_deopt = true; @@ -896,7 +939,7 @@ LInstruction* LChunkBuilder::DoGoto(HGoto* instr) { LInstruction* LChunkBuilder::DoBranch(HBranch* instr) { HValue* value = instr->value(); if (value->EmitAtUses()) { - HBasicBlock* successor = HConstant::cast(value)->ToBoolean() + HBasicBlock* successor = HConstant::cast(value)->BooleanValue() ? instr->FirstSuccessor() : instr->SecondSuccessor(); return new(zone()) LGoto(successor->block_id()); @@ -950,6 +993,12 @@ LInstruction* LChunkBuilder::DoInstanceOfKnownGlobal( } +LInstruction* LChunkBuilder::DoInstanceSize(HInstanceSize* instr) { + LOperand* object = UseRegisterAtStart(instr->object()); + return DefineAsRegister(new(zone()) LInstanceSize(object)); +} + + LInstruction* LChunkBuilder::DoWrapReceiver(HWrapReceiver* instr) { LOperand* receiver = UseRegisterAtStart(instr->receiver()); LOperand* function = UseRegisterAtStart(instr->function()); @@ -978,6 +1027,15 @@ LInstruction* LChunkBuilder::DoPushArgument(HPushArgument* instr) { } +LInstruction* LChunkBuilder::DoInnerAllocatedObject( + HInnerAllocatedObject* inner_object) { + LOperand* base_object = UseRegisterAtStart(inner_object->base_object()); + LInnerAllocatedObject* result = + new(zone()) LInnerAllocatedObject(base_object); + return DefineAsRegister(result); +} + + LInstruction* LChunkBuilder::DoThisFunction(HThisFunction* instr) { return instr->HasNoUses() ? NULL @@ -986,7 +1044,14 @@ LInstruction* LChunkBuilder::DoThisFunction(HThisFunction* instr) { LInstruction* LChunkBuilder::DoContext(HContext* instr) { - return instr->HasNoUses() ? NULL : DefineAsRegister(new(zone()) LContext); + // If there is a non-return use, the context must be allocated in a register. + for (HUseIterator it(instr->uses()); !it.Done(); it.Advance()) { + if (!it.value()->IsReturn()) { + return DefineAsRegister(new(zone()) LContext); + } + } + + return NULL; } @@ -1034,6 +1099,15 @@ LInstruction* LChunkBuilder::DoUnaryMathOperation(HUnaryMathOperation* instr) { LOperand* input = UseFixedDouble(instr->value(), f4); LUnaryMathOperation* result = new(zone()) LUnaryMathOperation(input, NULL); return MarkAsCall(DefineFixedDouble(result, f4), instr); + } else if (op == kMathExp) { + ASSERT(instr->representation().IsDouble()); + ASSERT(instr->value()->representation().IsDouble()); + LOperand* input = UseTempRegister(instr->value()); + LOperand* temp1 = TempRegister(); + LOperand* temp2 = TempRegister(); + LOperand* double_temp = FixedTemp(f6); // Chosen by fair dice roll. + LMathExp* result = new(zone()) LMathExp(input, double_temp, temp1, temp2); + return DefineAsRegister(result); } else if (op == kMathPowHalf) { // Input cannot be the same as the result. // See lithium-codegen-mips.cc::DoMathPowHalf. @@ -1043,7 +1117,9 @@ LInstruction* LChunkBuilder::DoUnaryMathOperation(HUnaryMathOperation* instr) { return DefineFixedDouble(result, f4); } else { LOperand* input = UseRegisterAtStart(instr->value()); - LOperand* temp = (op == kMathFloor) ? TempRegister() : NULL; + + LOperand* temp = (op == kMathRound) ? FixedTemp(f6) : + (op == kMathFloor) ? TempRegister() : NULL; LUnaryMathOperation* result = new(zone()) LUnaryMathOperation(input, temp); switch (op) { case kMathAbs: @@ -1096,6 +1172,14 @@ LInstruction* LChunkBuilder::DoCallNew(HCallNew* instr) { } +LInstruction* LChunkBuilder::DoCallNewArray(HCallNewArray* instr) { + LOperand* constructor = UseFixed(instr->constructor(), a1); + argument_count_ -= instr->argument_count(); + LCallNewArray* result = new(zone()) LCallNewArray(constructor); + return MarkAsCall(DefineFixed(result, v0), instr); +} + + LInstruction* LChunkBuilder::DoCallFunction(HCallFunction* instr) { LOperand* function = UseFixed(instr->function(), a1); argument_count_ -= instr->argument_count(); @@ -1110,6 +1194,11 @@ LInstruction* LChunkBuilder::DoCallRuntime(HCallRuntime* instr) { } +LInstruction* LChunkBuilder::DoRor(HRor* instr) { + return DoShift(Token::ROR, instr); +} + + LInstruction* LChunkBuilder::DoShr(HShr* instr) { return DoShift(Token::SHR, instr); } @@ -1244,8 +1333,22 @@ LInstruction* LChunkBuilder::DoMul(HMul* instr) { return DefineAsRegister(mul); } else if (instr->representation().IsDouble()) { + if (kArchVariant == kMips32r2) { + if (instr->UseCount() == 1 && instr->uses().value()->IsAdd()) { + HAdd* add = HAdd::cast(instr->uses().value()); + if (instr == add->left()) { + // This mul is the lhs of an add. The add and mul will be folded + // into a multiply-add. + return NULL; + } + if (instr == add->right() && !add->left()->IsMul()) { + // This mul is the rhs of an add, where the lhs is not another mul. + // The add and mul will be folded into a multiply-add. + return NULL; + } + } + } return DoArithmeticD(Token::MUL, instr); - } else { return DoArithmeticT(Token::MUL, instr); } @@ -1272,6 +1375,15 @@ LInstruction* LChunkBuilder::DoSub(HSub* instr) { } +LInstruction* LChunkBuilder::DoMultiplyAdd(HMul* mul, HValue* addend) { + LOperand* multiplier_op = UseRegisterAtStart(mul->left()); + LOperand* multiplicand_op = UseRegisterAtStart(mul->right()); + LOperand* addend_op = UseRegisterAtStart(addend); + return DefineSameAsFirst(new(zone()) LMultiplyAddD(addend_op, multiplier_op, + multiplicand_op)); +} + + LInstruction* LChunkBuilder::DoAdd(HAdd* instr) { if (instr->representation().IsInteger32()) { ASSERT(instr->left()->representation().IsInteger32()); @@ -1285,6 +1397,15 @@ LInstruction* LChunkBuilder::DoAdd(HAdd* instr) { } return result; } else if (instr->representation().IsDouble()) { + if (kArchVariant == kMips32r2) { + if (instr->left()->IsMul()) + return DoMultiplyAdd(HMul::cast(instr->left()), instr->right()); + + if (instr->right()->IsMul()) { + ASSERT(!instr->left()->IsMul()); + return DoMultiplyAdd(HMul::cast(instr->right()), instr->left()); + } + } return DoArithmeticD(Token::ADD, instr); } else { ASSERT(instr->representation().IsTagged()); @@ -1350,7 +1471,7 @@ LInstruction* LChunkBuilder::DoCompareGeneric(HCompareGeneric* instr) { LInstruction* LChunkBuilder::DoCompareIDAndBranch( HCompareIDAndBranch* instr) { - Representation r = instr->GetInputRepresentation(); + Representation r = instr->representation(); if (r.IsInteger32()) { ASSERT(instr->left()->representation().IsInteger32()); ASSERT(instr->right()->representation().IsInteger32()); @@ -1504,6 +1625,27 @@ LInstruction* LChunkBuilder::DoDateField(HDateField* instr) { } +LInstruction* LChunkBuilder::DoSeqStringSetChar(HSeqStringSetChar* instr) { + LOperand* string = UseRegister(instr->string()); + LOperand* index = UseRegister(instr->index()); + LOperand* value = UseRegister(instr->value()); + LSeqStringSetChar* result = + new(zone()) LSeqStringSetChar(instr->encoding(), string, index, value); + return DefineAsRegister(result); +} + + +LInstruction* LChunkBuilder::DoNumericConstraint(HNumericConstraint* instr) { + return NULL; +} + + +LInstruction* LChunkBuilder::DoInductionVariableAnnotation( + HInductionVariableAnnotation* instr) { + return NULL; +} + + LInstruction* LChunkBuilder::DoBoundsCheck(HBoundsCheck* instr) { LOperand* value = UseRegisterOrConstantAtStart(instr->index()); LOperand* length = UseRegister(instr->length()); @@ -1511,6 +1653,13 @@ LInstruction* LChunkBuilder::DoBoundsCheck(HBoundsCheck* instr) { } +LInstruction* LChunkBuilder::DoBoundsCheckBaseIndexInformation( + HBoundsCheckBaseIndexInformation* instr) { + UNREACHABLE(); + return NULL; +} + + LInstruction* LChunkBuilder::DoAbnormalExit(HAbnormalExit* instr) { // The control instruction marking the end of a block that completed // abruptly (e.g., threw an exception). There is nothing specific to do. @@ -1542,6 +1691,7 @@ LInstruction* LChunkBuilder::DoChange(HChange* instr) { Representation to = instr->to(); if (from.IsTagged()) { if (to.IsDouble()) { + info()->MarkAsDeferredCalling(); LOperand* value = UseRegister(instr->value()); LNumberUntagD* res = new(zone()) LNumberUntagD(value); return AssignEnvironment(DefineAsRegister(res)); @@ -1555,8 +1705,7 @@ LInstruction* LChunkBuilder::DoChange(HChange* instr) { LOperand* temp1 = TempRegister(); LOperand* temp2 = instr->CanTruncateToInt32() ? TempRegister() : NULL; - LOperand* temp3 = instr->CanTruncateToInt32() ? FixedTemp(f22) - : NULL; + LOperand* temp3 = FixedTemp(f22); res = DefineSameAsFirst(new(zone()) LTaggedToI(value, temp1, temp2, @@ -1567,6 +1716,7 @@ LInstruction* LChunkBuilder::DoChange(HChange* instr) { } } else if (from.IsDouble()) { if (to.IsTagged()) { + info()->MarkAsDeferredCalling(); LOperand* value = UseRegister(instr->value()); LOperand* temp1 = TempRegister(); LOperand* temp2 = TempRegister(); @@ -1586,6 +1736,7 @@ LInstruction* LChunkBuilder::DoChange(HChange* instr) { return AssignEnvironment(DefineAsRegister(res)); } } else if (from.IsInteger32()) { + info()->MarkAsDeferredCalling(); if (to.IsTagged()) { HValue* val = instr->value(); LOperand* value = UseRegisterAtStart(val); @@ -1628,10 +1779,10 @@ LInstruction* LChunkBuilder::DoCheckInstanceType(HCheckInstanceType* instr) { LInstruction* LChunkBuilder::DoCheckPrototypeMaps(HCheckPrototypeMaps* instr) { - LOperand* temp1 = TempRegister(); + LUnallocated* temp1 = TempRegister(); LOperand* temp2 = TempRegister(); - LInstruction* result = new(zone()) LCheckPrototypeMaps(temp1, temp2); - return AssignEnvironment(result); + LCheckPrototypeMaps* result = new(zone()) LCheckPrototypeMaps(temp1, temp2); + return AssignEnvironment(Define(result, temp1)); } @@ -1641,6 +1792,12 @@ LInstruction* LChunkBuilder::DoCheckSmi(HCheckSmi* instr) { } +LInstruction* LChunkBuilder::DoCheckSmiOrInt32(HCheckSmiOrInt32* instr) { + LOperand* value = UseRegisterAtStart(instr->value()); + return AssignEnvironment(new(zone()) LCheckSmi(value)); +} + + LInstruction* LChunkBuilder::DoCheckFunction(HCheckFunction* instr) { LOperand* value = UseRegisterAtStart(instr->value()); return AssignEnvironment(new(zone()) LCheckFunction(value)); @@ -1674,7 +1831,9 @@ LInstruction* LChunkBuilder::DoClampToUint8(HClampToUint8* instr) { LInstruction* LChunkBuilder::DoReturn(HReturn* instr) { - return new(zone()) LReturn(UseFixed(instr->value(), v0)); + LOperand* parameter_count = UseRegisterOrConstant(instr->parameter_count()); + return new(zone()) LReturn(UseFixed(instr->value(), v0), + parameter_count); } @@ -1800,53 +1959,49 @@ LInstruction* LChunkBuilder::DoLoadExternalArrayPointer( } -LInstruction* LChunkBuilder::DoLoadKeyedFastElement( - HLoadKeyedFastElement* instr) { - ASSERT(instr->representation().IsTagged()); - ASSERT(instr->key()->representation().IsInteger32() || - instr->key()->representation().IsTagged()); - LOperand* obj = UseRegisterAtStart(instr->object()); - LOperand* key = UseRegisterOrConstantAtStart(instr->key()); - LLoadKeyedFastElement* result = new(zone()) LLoadKeyedFastElement(obj, key); - if (instr->RequiresHoleCheck()) AssignEnvironment(result); - return DefineAsRegister(result); -} - - -LInstruction* LChunkBuilder::DoLoadKeyedFastDoubleElement( - HLoadKeyedFastDoubleElement* instr) { - ASSERT(instr->representation().IsDouble()); +LInstruction* LChunkBuilder::DoLoadKeyed(HLoadKeyed* instr) { ASSERT(instr->key()->representation().IsInteger32() || instr->key()->representation().IsTagged()); - LOperand* elements = UseTempRegister(instr->elements()); + ElementsKind elements_kind = instr->elements_kind(); LOperand* key = UseRegisterOrConstantAtStart(instr->key()); - LLoadKeyedFastDoubleElement* result = - new(zone()) LLoadKeyedFastDoubleElement(elements, key); - return AssignEnvironment(DefineAsRegister(result)); -} + LLoadKeyed* result = NULL; + if (!instr->is_external()) { + LOperand* obj = NULL; + if (instr->representation().IsDouble()) { + obj = UseTempRegister(instr->elements()); + } else { + ASSERT(instr->representation().IsTagged()); + obj = UseRegisterAtStart(instr->elements()); + } + result = new(zone()) LLoadKeyed(obj, key); + } else { + ASSERT( + (instr->representation().IsInteger32() && + (elements_kind != EXTERNAL_FLOAT_ELEMENTS) && + (elements_kind != EXTERNAL_DOUBLE_ELEMENTS)) || + (instr->representation().IsDouble() && + ((elements_kind == EXTERNAL_FLOAT_ELEMENTS) || + (elements_kind == EXTERNAL_DOUBLE_ELEMENTS)))); + // float->double conversion on non-VFP2 requires an extra scratch + // register. For convenience, just mark the elements register as "UseTemp" + // so that it can be used as a temp during the float->double conversion + // after it's no longer needed after the float load. + bool needs_temp = + !CpuFeatures::IsSupported(FPU) && + (elements_kind == EXTERNAL_FLOAT_ELEMENTS); + LOperand* external_pointer = needs_temp + ? UseTempRegister(instr->elements()) + : UseRegister(instr->elements()); + result = new(zone()) LLoadKeyed(external_pointer, key); + } -LInstruction* LChunkBuilder::DoLoadKeyedSpecializedArrayElement( - HLoadKeyedSpecializedArrayElement* instr) { - ElementsKind elements_kind = instr->elements_kind(); - ASSERT( - (instr->representation().IsInteger32() && - (elements_kind != EXTERNAL_FLOAT_ELEMENTS) && - (elements_kind != EXTERNAL_DOUBLE_ELEMENTS)) || - (instr->representation().IsDouble() && - ((elements_kind == EXTERNAL_FLOAT_ELEMENTS) || - (elements_kind == EXTERNAL_DOUBLE_ELEMENTS)))); - ASSERT(instr->key()->representation().IsInteger32() || - instr->key()->representation().IsTagged()); - LOperand* external_pointer = UseRegister(instr->external_pointer()); - LOperand* key = UseRegisterOrConstant(instr->key()); - LLoadKeyedSpecializedArrayElement* result = - new(zone()) LLoadKeyedSpecializedArrayElement(external_pointer, key); - LInstruction* load_instr = DefineAsRegister(result); + DefineAsRegister(result); // An unsigned int array load might overflow and cause a deopt, make sure it // has an environment. - return (elements_kind == EXTERNAL_UNSIGNED_INT_ELEMENTS) ? - AssignEnvironment(load_instr) : load_instr; + bool can_deoptimize = instr->RequiresHoleCheck() || + (elements_kind == EXTERNAL_UNSIGNED_INT_ELEMENTS); + return can_deoptimize ? AssignEnvironment(result) : result; } @@ -1860,66 +2015,49 @@ LInstruction* LChunkBuilder::DoLoadKeyedGeneric(HLoadKeyedGeneric* instr) { } -LInstruction* LChunkBuilder::DoStoreKeyedFastElement( - HStoreKeyedFastElement* instr) { - bool needs_write_barrier = instr->NeedsWriteBarrier(); - ASSERT(instr->value()->representation().IsTagged()); - ASSERT(instr->object()->representation().IsTagged()); - ASSERT(instr->key()->representation().IsInteger32() || - instr->key()->representation().IsTagged()); - - LOperand* obj = UseTempRegister(instr->object()); - LOperand* val = needs_write_barrier - ? UseTempRegister(instr->value()) - : UseRegisterAtStart(instr->value()); - LOperand* key = needs_write_barrier - ? UseTempRegister(instr->key()) - : UseRegisterOrConstantAtStart(instr->key()); - return new(zone()) LStoreKeyedFastElement(obj, key, val); -} - - -LInstruction* LChunkBuilder::DoStoreKeyedFastDoubleElement( - HStoreKeyedFastDoubleElement* instr) { - ASSERT(instr->value()->representation().IsDouble()); - ASSERT(instr->elements()->representation().IsTagged()); - ASSERT(instr->key()->representation().IsInteger32() || - instr->key()->representation().IsTagged()); - - LOperand* elements = UseRegisterAtStart(instr->elements()); - LOperand* val = UseTempRegister(instr->value()); - LOperand* key = UseRegisterOrConstantAtStart(instr->key()); +LInstruction* LChunkBuilder::DoStoreKeyed(HStoreKeyed* instr) { + ElementsKind elements_kind = instr->elements_kind(); - return new(zone()) LStoreKeyedFastDoubleElement(elements, key, val); -} + if (!instr->is_external()) { + ASSERT(instr->elements()->representation().IsTagged()); + bool needs_write_barrier = instr->NeedsWriteBarrier(); + LOperand* object = NULL; + LOperand* val = NULL; + LOperand* key = NULL; + + if (instr->value()->representation().IsDouble()) { + object = UseRegisterAtStart(instr->elements()); + key = UseRegisterOrConstantAtStart(instr->key()); + val = UseTempRegister(instr->value()); + } else { + ASSERT(instr->value()->representation().IsTagged()); + object = UseTempRegister(instr->elements()); + val = needs_write_barrier ? UseTempRegister(instr->value()) + : UseRegisterAtStart(instr->value()); + key = needs_write_barrier ? UseTempRegister(instr->key()) + : UseRegisterOrConstantAtStart(instr->key()); + } + return new(zone()) LStoreKeyed(object, key, val); + } -LInstruction* LChunkBuilder::DoStoreKeyedSpecializedArrayElement( - HStoreKeyedSpecializedArrayElement* instr) { - ElementsKind elements_kind = instr->elements_kind(); ASSERT( (instr->value()->representation().IsInteger32() && (elements_kind != EXTERNAL_FLOAT_ELEMENTS) && (elements_kind != EXTERNAL_DOUBLE_ELEMENTS)) || (instr->value()->representation().IsDouble() && ((elements_kind == EXTERNAL_FLOAT_ELEMENTS) || - (elements_kind == EXTERNAL_DOUBLE_ELEMENTS)))); - ASSERT(instr->external_pointer()->representation().IsExternal()); - ASSERT(instr->key()->representation().IsInteger32() || - instr->key()->representation().IsTagged()); - - LOperand* external_pointer = UseRegister(instr->external_pointer()); + (elements_kind == EXTERNAL_DOUBLE_ELEMENTS)))); + ASSERT(instr->elements()->representation().IsExternal()); bool val_is_temp_register = elements_kind == EXTERNAL_PIXEL_ELEMENTS || elements_kind == EXTERNAL_FLOAT_ELEMENTS; - LOperand* val = val_is_temp_register - ? UseTempRegister(instr->value()) + LOperand* val = val_is_temp_register ? UseTempRegister(instr->value()) : UseRegister(instr->value()); - LOperand* key = UseRegisterOrConstant(instr->key()); + LOperand* key = UseRegisterOrConstantAtStart(instr->key()); + LOperand* external_pointer = UseRegister(instr->elements()); - return new(zone()) LStoreKeyedSpecializedArrayElement(external_pointer, - key, - val); + return new(zone()) LStoreKeyed(external_pointer, key, val); } @@ -1938,14 +2076,16 @@ LInstruction* LChunkBuilder::DoStoreKeyedGeneric(HStoreKeyedGeneric* instr) { LInstruction* LChunkBuilder::DoTransitionElementsKind( HTransitionElementsKind* instr) { - ElementsKind from_kind = instr->original_map()->elements_kind(); - ElementsKind to_kind = instr->transitioned_map()->elements_kind(); - if (IsSimpleMapChangeTransition(from_kind, to_kind)) { - LOperand* object = UseRegister(instr->object()); + LOperand* object = UseRegister(instr->object()); + if (IsSimpleMapChangeTransition(instr->from_kind(), instr->to_kind())) { LOperand* new_map_reg = TempRegister(); LTransitionElementsKind* result = new(zone()) LTransitionElementsKind(object, new_map_reg, NULL); return DefineSameAsFirst(result); + } else if (FLAG_compiled_transitions) { + LTransitionElementsKind* result = + new(zone()) LTransitionElementsKind(object, NULL, NULL); + return AssignPointerMap(result); } else { LOperand* object = UseFixed(instr->object(), a0); LOperand* fixed_object_reg = FixedTemp(a2); @@ -1954,11 +2094,21 @@ LInstruction* LChunkBuilder::DoTransitionElementsKind( new(zone()) LTransitionElementsKind(object, new_map_reg, fixed_object_reg); - return MarkAsCall(DefineFixed(result, v0), instr); + return MarkAsCall(result, instr); } } +LInstruction* LChunkBuilder::DoTrapAllocationMemento( + HTrapAllocationMemento* instr) { + LOperand* object = UseRegister(instr->object()); + LOperand* temp = TempRegister(); + LTrapAllocationMemento* result = + new(zone()) LTrapAllocationMemento(object, temp); + return AssignEnvironment(result); +} + + LInstruction* LChunkBuilder::DoStoreNamedField(HStoreNamedField* instr) { bool needs_write_barrier = instr->NeedsWriteBarrier(); bool needs_write_barrier_for_map = !instr->transition().is_null() && @@ -2025,12 +2175,23 @@ LInstruction* LChunkBuilder::DoStringLength(HStringLength* instr) { LInstruction* LChunkBuilder::DoAllocateObject(HAllocateObject* instr) { + info()->MarkAsDeferredCalling(); LAllocateObject* result = new(zone()) LAllocateObject(TempRegister(), TempRegister()); return AssignPointerMap(DefineAsRegister(result)); } +LInstruction* LChunkBuilder::DoAllocate(HAllocate* instr) { + info()->MarkAsDeferredCalling(); + LOperand* size = UseTempRegister(instr->size()); + LOperand* temp1 = TempRegister(); + LOperand* temp2 = TempRegister(); + LAllocate* result = new(zone()) LAllocate(size, temp1, temp2); + return AssignPointerMap(DefineAsRegister(result)); +} + + LInstruction* LChunkBuilder::DoFastLiteral(HFastLiteral* instr) { return MarkAsCall(DefineFixed(new(zone()) LFastLiteral, v0), instr); } @@ -2073,8 +2234,17 @@ LInstruction* LChunkBuilder::DoOsrEntry(HOsrEntry* instr) { LInstruction* LChunkBuilder::DoParameter(HParameter* instr) { - int spill_index = chunk()->GetParameterStackSlot(instr->index()); - return DefineAsSpilled(new(zone()) LParameter, spill_index); + LParameter* result = new(zone()) LParameter; + if (instr->kind() == HParameter::STACK_PARAMETER) { + int spill_index = chunk()->GetParameterStackSlot(instr->index()); + return DefineAsSpilled(result, spill_index); + } else { + ASSERT(info()->IsStub()); + CodeStubInterfaceDescriptor* descriptor = + info()->code_stub()->GetInterfaceDescriptor(info()->isolate()); + Register reg = descriptor->register_params_[instr->index()]; + return DefineFixed(result, reg); + } } @@ -2142,7 +2312,7 @@ LInstruction* LChunkBuilder::DoSimulate(HSimulate* instr) { env->set_ast_id(instr->ast_id()); env->Drop(instr->pop_count()); - for (int i = 0; i < instr->values()->length(); ++i) { + for (int i = instr->values()->length() - 1; i >= 0; --i) { HValue* value = instr->values()->at(i); if (instr->HasAssignedIndexAt(i)) { env->Bind(instr->GetAssignedIndexAt(i), value); @@ -2186,8 +2356,8 @@ LInstruction* LChunkBuilder::DoEnterInlined(HEnterInlined* instr) { instr->arguments_count(), instr->function(), undefined, - instr->call_kind(), - instr->inlining_kind()); + instr->inlining_kind(), + instr->undefined_receiver()); if (instr->arguments_var() != NULL) { inner->Bind(instr->arguments_var(), graph()->GetArgumentsObject()); } diff --git a/deps/v8/src/mips/lithium-mips.h b/deps/v8/src/mips/lithium-mips.h index 3a9aa7accf..80635c3896 100644 --- a/deps/v8/src/mips/lithium-mips.h +++ b/deps/v8/src/mips/lithium-mips.h @@ -50,6 +50,7 @@ class LCodeGen; V(AccessArgumentsAt) \ V(AddI) \ V(AllocateObject) \ + V(Allocate) \ V(ApplyArguments) \ V(ArgumentsElements) \ V(ArgumentsLength) \ @@ -67,6 +68,7 @@ class LCodeGen; V(CallKnownGlobal) \ V(CallNamed) \ V(CallNew) \ + V(CallNewArray) \ V(CallRuntime) \ V(CallStub) \ V(CheckFunction) \ @@ -93,6 +95,7 @@ class LCodeGen; V(Deoptimize) \ V(DivI) \ V(DoubleToI) \ + V(DummyUse) \ V(ElementsKind) \ V(FastLiteral) \ V(FixedArrayBaseLength) \ @@ -106,6 +109,7 @@ class LCodeGen; V(In) \ V(InstanceOf) \ V(InstanceOfKnownGlobal) \ + V(InstanceSize) \ V(InstructionGap) \ V(Integer32ToDouble) \ V(Uint32ToDouble) \ @@ -125,17 +129,17 @@ class LCodeGen; V(LoadFunctionPrototype) \ V(LoadGlobalCell) \ V(LoadGlobalGeneric) \ - V(LoadKeyedFastDoubleElement) \ - V(LoadKeyedFastElement) \ + V(LoadKeyed) \ V(LoadKeyedGeneric) \ - V(LoadKeyedSpecializedArrayElement) \ V(LoadNamedField) \ V(LoadNamedFieldPolymorphic) \ V(LoadNamedGeneric) \ V(MapEnumLength) \ + V(MathExp) \ V(MathMinMax) \ V(ModI) \ V(MulI) \ + V(MultiplyAddD) \ V(NumberTagD) \ V(NumberTagI) \ V(NumberTagU) \ @@ -149,6 +153,7 @@ class LCodeGen; V(Random) \ V(RegExpLiteral) \ V(Return) \ + V(SeqStringSetChar) \ V(ShiftI) \ V(SmiTag) \ V(SmiUntag) \ @@ -156,10 +161,8 @@ class LCodeGen; V(StoreContextSlot) \ V(StoreGlobalCell) \ V(StoreGlobalGeneric) \ - V(StoreKeyedFastDoubleElement) \ - V(StoreKeyedFastElement) \ + V(StoreKeyed) \ V(StoreKeyedGeneric) \ - V(StoreKeyedSpecializedArrayElement) \ V(StoreNamedField) \ V(StoreNamedGeneric) \ V(StringAdd) \ @@ -173,6 +176,7 @@ class LCodeGen; V(Throw) \ V(ToFastProperties) \ V(TransitionElementsKind) \ + V(TrapAllocationMemento) \ V(Typeof) \ V(TypeofIsAndBranch) \ V(UnaryMathOperation) \ @@ -184,7 +188,9 @@ class LCodeGen; V(LoadFieldByIndex) \ V(DateField) \ V(WrapReceiver) \ - V(Drop) + V(Drop) \ + V(InnerAllocatedObject) + #define DECLARE_CONCRETE_INSTRUCTION(type, mnemonic) \ virtual Opcode opcode() const { return LInstruction::k##type; } \ @@ -254,6 +260,11 @@ class LInstruction: public ZoneObject { void MarkAsCall() { is_call_ = true; } // Interface to the register allocator and iterators. + bool ClobbersTemps() const { return is_call_; } + bool ClobbersRegisters() const { return is_call_; } + bool ClobbersDoubleRegisters() const { return is_call_; } + + // Interface to the register allocator and iterators. bool IsMarkedAsCall() const { return is_call_; } virtual bool HasResult() const = 0; @@ -397,6 +408,15 @@ class LLazyBailout: public LTemplateInstruction<0, 0, 0> { }; +class LDummyUse: public LTemplateInstruction<1, 1, 0> { + public: + explicit LDummyUse(LOperand* value) { + inputs_[0] = value; + } + DECLARE_CONCRETE_INSTRUCTION(DummyUse, "dummy-use") +}; + + class LDeoptimize: public LTemplateInstruction<0, 0, 0> { public: DECLARE_CONCRETE_INSTRUCTION(Deoptimize, "deoptimize") @@ -605,6 +625,24 @@ class LMulI: public LTemplateInstruction<1, 2, 1> { }; +// Instruction for computing multiplier * multiplicand + addend. +class LMultiplyAddD: public LTemplateInstruction<1, 3, 0> { + public: + LMultiplyAddD(LOperand* addend, LOperand* multiplier, + LOperand* multiplicand) { + inputs_[0] = addend; + inputs_[1] = multiplier; + inputs_[2] = multiplicand; + } + + LOperand* addend() { return inputs_[0]; } + LOperand* multiplier() { return inputs_[1]; } + LOperand* multiplicand() { return inputs_[2]; } + + DECLARE_CONCRETE_INSTRUCTION(MultiplyAddD, "multiply-add-d") +}; + + class LCmpIDAndBranch: public LControlInstruction<2, 0> { public: LCmpIDAndBranch(LOperand* left, LOperand* right) { @@ -620,7 +658,7 @@ class LCmpIDAndBranch: public LControlInstruction<2, 0> { Token::Value op() const { return hydrogen()->token(); } bool is_double() const { - return hydrogen()->GetInputRepresentation().IsDouble(); + return hydrogen()->representation().IsDouble(); } virtual void PrintDataTo(StringStream* stream); @@ -645,6 +683,30 @@ class LUnaryMathOperation: public LTemplateInstruction<1, 1, 1> { }; +class LMathExp: public LTemplateInstruction<1, 1, 3> { + public: + LMathExp(LOperand* value, + LOperand* double_temp, + LOperand* temp1, + LOperand* temp2) { + inputs_[0] = value; + temps_[0] = temp1; + temps_[1] = temp2; + temps_[2] = double_temp; + ExternalReference::InitializeMathExpData(); + } + + LOperand* value() { return inputs_[0]; } + LOperand* temp1() { return temps_[0]; } + LOperand* temp2() { return temps_[1]; } + LOperand* double_temp() { return temps_[2]; } + + DECLARE_CONCRETE_INSTRUCTION(MathExp, "math-exp") + + virtual void PrintDataTo(StringStream* stream); +}; + + class LCmpObjectEqAndBranch: public LControlInstruction<2, 0> { public: LCmpObjectEqAndBranch(LOperand* left, LOperand* right) { @@ -901,6 +963,19 @@ class LInstanceOfKnownGlobal: public LTemplateInstruction<1, 1, 1> { }; +class LInstanceSize: public LTemplateInstruction<1, 1, 0> { + public: + explicit LInstanceSize(LOperand* object) { + inputs_[0] = object; + } + + LOperand* object() { return inputs_[0]; } + + DECLARE_CONCRETE_INSTRUCTION(InstanceSize, "instance-size") + DECLARE_HYDROGEN_ACCESSOR(InstanceSize) +}; + + class LBoundsCheck: public LTemplateInstruction<0, 2, 0> { public: LBoundsCheck(LOperand* index, LOperand* length) { @@ -1122,6 +1197,30 @@ class LDateField: public LTemplateInstruction<1, 1, 1> { }; +class LSeqStringSetChar: public LTemplateInstruction<1, 3, 0> { + public: + LSeqStringSetChar(String::Encoding encoding, + LOperand* string, + LOperand* index, + LOperand* value) : encoding_(encoding) { + inputs_[0] = string; + inputs_[1] = index; + inputs_[2] = value; + } + + String::Encoding encoding() { return encoding_; } + LOperand* string() { return inputs_[0]; } + LOperand* index() { return inputs_[1]; } + LOperand* value() { return inputs_[2]; } + + DECLARE_CONCRETE_INSTRUCTION(SeqStringSetChar, "seq-string-set-char") + DECLARE_HYDROGEN_ACCESSOR(SeqStringSetChar) + + private: + String::Encoding encoding_; +}; + + class LThrow: public LTemplateInstruction<0, 1, 0> { public: explicit LThrow(LOperand* value) { @@ -1246,14 +1345,24 @@ class LArithmeticT: public LTemplateInstruction<1, 2, 0> { }; -class LReturn: public LTemplateInstruction<0, 1, 0> { +class LReturn: public LTemplateInstruction<0, 2, 0> { public: - explicit LReturn(LOperand* value) { + explicit LReturn(LOperand* value, LOperand* parameter_count) { inputs_[0] = value; + inputs_[1] = parameter_count; } LOperand* value() { return inputs_[0]; } + bool has_constant_parameter_count() { + return parameter_count()->IsConstantOperand(); + } + LConstantOperand* constant_parameter_count() { + ASSERT(has_constant_parameter_count()); + return LConstantOperand::cast(parameter_count()); + } + LOperand* parameter_count() { return inputs_[1]; } + DECLARE_CONCRETE_INSTRUCTION(Return, "return") }; @@ -1337,59 +1446,26 @@ class LLoadExternalArrayPointer: public LTemplateInstruction<1, 1, 0> { }; -class LLoadKeyedFastElement: public LTemplateInstruction<1, 2, 0> { +class LLoadKeyed: public LTemplateInstruction<1, 2, 0> { public: - LLoadKeyedFastElement(LOperand* elements, LOperand* key) { + LLoadKeyed(LOperand* elements, LOperand* key) { inputs_[0] = elements; inputs_[1] = key; } LOperand* elements() { return inputs_[0]; } LOperand* key() { return inputs_[1]; } - - DECLARE_CONCRETE_INSTRUCTION(LoadKeyedFastElement, "load-keyed-fast-element") - DECLARE_HYDROGEN_ACCESSOR(LoadKeyedFastElement) - - uint32_t additional_index() const { return hydrogen()->index_offset(); } -}; - - -class LLoadKeyedFastDoubleElement: public LTemplateInstruction<1, 2, 0> { - public: - LLoadKeyedFastDoubleElement(LOperand* elements, LOperand* key) { - inputs_[0] = elements; - inputs_[1] = key; + ElementsKind elements_kind() const { + return hydrogen()->elements_kind(); } - - LOperand* elements() { return inputs_[0]; } - LOperand* key() { return inputs_[1]; } - - DECLARE_CONCRETE_INSTRUCTION(LoadKeyedFastDoubleElement, - "load-keyed-fast-double-element") - DECLARE_HYDROGEN_ACCESSOR(LoadKeyedFastDoubleElement) - - uint32_t additional_index() const { return hydrogen()->index_offset(); } -}; - - -class LLoadKeyedSpecializedArrayElement: public LTemplateInstruction<1, 2, 0> { - public: - LLoadKeyedSpecializedArrayElement(LOperand* external_pointer, - LOperand* key) { - inputs_[0] = external_pointer; - inputs_[1] = key; + bool is_external() const { + return hydrogen()->is_external(); } - LOperand* external_pointer() { return inputs_[0]; } - LOperand* key() { return inputs_[1]; } - - DECLARE_CONCRETE_INSTRUCTION(LoadKeyedSpecializedArrayElement, - "load-keyed-specialized-array-element") - DECLARE_HYDROGEN_ACCESSOR(LoadKeyedSpecializedArrayElement) + DECLARE_CONCRETE_INSTRUCTION(LoadKeyed, "load-keyed") + DECLARE_HYDROGEN_ACCESSOR(LoadKeyed) - ElementsKind elements_kind() const { - return hydrogen()->elements_kind(); - } + virtual void PrintDataTo(StringStream* stream); uint32_t additional_index() const { return hydrogen()->index_offset(); } }; @@ -1526,6 +1602,22 @@ class LDrop: public LTemplateInstruction<0, 0, 0> { }; +class LInnerAllocatedObject: public LTemplateInstruction<1, 1, 0> { + public: + explicit LInnerAllocatedObject(LOperand* base_object) { + inputs_[0] = base_object; + } + + LOperand* base_object() { return inputs_[0]; } + int offset() { return hydrogen()->offset(); } + + virtual void PrintDataTo(StringStream* stream); + + DECLARE_CONCRETE_INSTRUCTION(InnerAllocatedObject, "sub-allocated-object") + DECLARE_HYDROGEN_ACCESSOR(InnerAllocatedObject) +}; + + class LThisFunction: public LTemplateInstruction<1, 0, 0> { public: DECLARE_CONCRETE_INSTRUCTION(ThisFunction, "this-function") @@ -1536,6 +1628,7 @@ class LThisFunction: public LTemplateInstruction<1, 0, 0> { class LContext: public LTemplateInstruction<1, 0, 0> { public: DECLARE_CONCRETE_INSTRUCTION(Context, "context") + DECLARE_HYDROGEN_ACCESSOR(Context) }; @@ -1698,6 +1791,23 @@ class LCallNew: public LTemplateInstruction<1, 1, 0> { }; +class LCallNewArray: public LTemplateInstruction<1, 1, 0> { + public: + explicit LCallNewArray(LOperand* constructor) { + inputs_[0] = constructor; + } + + LOperand* constructor() { return inputs_[0]; } + + DECLARE_CONCRETE_INSTRUCTION(CallNewArray, "call-new-array") + DECLARE_HYDROGEN_ACCESSOR(CallNewArray) + + virtual void PrintDataTo(StringStream* stream); + + int arity() const { return hydrogen()->argument_count() - 1; } +}; + + class LCallRuntime: public LTemplateInstruction<1, 0, 0> { public: DECLARE_CONCRETE_INSTRUCTION(CallRuntime, "call-runtime") @@ -1769,6 +1879,7 @@ class LNumberTagD: public LTemplateInstruction<1, 1, 2> { LOperand* temp2() { return temps_[1]; } DECLARE_CONCRETE_INSTRUCTION(NumberTagD, "number-tag-d") + DECLARE_HYDROGEN_ACCESSOR(Change) }; @@ -1903,51 +2014,28 @@ class LStoreNamedGeneric: public LTemplateInstruction<0, 2, 0> { }; -class LStoreKeyedFastElement: public LTemplateInstruction<0, 3, 0> { +class LStoreKeyed: public LTemplateInstruction<0, 3, 0> { public: - LStoreKeyedFastElement(LOperand* object, LOperand* key, LOperand* value) { + LStoreKeyed(LOperand* object, LOperand* key, LOperand* value) { inputs_[0] = object; inputs_[1] = key; inputs_[2] = value; } - LOperand* object() { return inputs_[0]; } - LOperand* key() { return inputs_[1]; } - LOperand* value() { return inputs_[2]; } - - DECLARE_CONCRETE_INSTRUCTION(StoreKeyedFastElement, - "store-keyed-fast-element") - DECLARE_HYDROGEN_ACCESSOR(StoreKeyedFastElement) - - virtual void PrintDataTo(StringStream* stream); - - uint32_t additional_index() const { return hydrogen()->index_offset(); } -}; - - -class LStoreKeyedFastDoubleElement: public LTemplateInstruction<0, 3, 0> { - public: - LStoreKeyedFastDoubleElement(LOperand* elements, - LOperand* key, - LOperand* value) { - inputs_[0] = elements; - inputs_[1] = key; - inputs_[2] = value; - } - + bool is_external() const { return hydrogen()->is_external(); } LOperand* elements() { return inputs_[0]; } LOperand* key() { return inputs_[1]; } LOperand* value() { return inputs_[2]; } + ElementsKind elements_kind() const { + return hydrogen()->elements_kind(); + } - DECLARE_CONCRETE_INSTRUCTION(StoreKeyedFastDoubleElement, - "store-keyed-fast-double-element") - DECLARE_HYDROGEN_ACCESSOR(StoreKeyedFastDoubleElement) + DECLARE_CONCRETE_INSTRUCTION(StoreKeyed, "store-keyed") + DECLARE_HYDROGEN_ACCESSOR(StoreKeyed) virtual void PrintDataTo(StringStream* stream); - - uint32_t additional_index() const { return hydrogen()->index_offset(); } - bool NeedsCanonicalization() { return hydrogen()->NeedsCanonicalization(); } + uint32_t additional_index() const { return hydrogen()->index_offset(); } }; @@ -1971,37 +2059,15 @@ class LStoreKeyedGeneric: public LTemplateInstruction<0, 3, 0> { StrictModeFlag strict_mode_flag() { return hydrogen()->strict_mode_flag(); } }; -class LStoreKeyedSpecializedArrayElement: public LTemplateInstruction<0, 3, 0> { - public: - LStoreKeyedSpecializedArrayElement(LOperand* external_pointer, - LOperand* key, - LOperand* value) { - inputs_[0] = external_pointer; - inputs_[1] = key; - inputs_[2] = value; - } - - LOperand* external_pointer() { return inputs_[0]; } - LOperand* key() { return inputs_[1]; } - LOperand* value() { return inputs_[2]; } - - DECLARE_CONCRETE_INSTRUCTION(StoreKeyedSpecializedArrayElement, - "store-keyed-specialized-array-element") - DECLARE_HYDROGEN_ACCESSOR(StoreKeyedSpecializedArrayElement) - - ElementsKind elements_kind() const { return hydrogen()->elements_kind(); } - uint32_t additional_index() const { return hydrogen()->index_offset(); } -}; - class LTransitionElementsKind: public LTemplateInstruction<1, 1, 2> { public: LTransitionElementsKind(LOperand* object, LOperand* new_map_temp, - LOperand* temp) { + LOperand* fixed_object_temp) { inputs_[0] = object; temps_[0] = new_map_temp; - temps_[1] = temp; + temps_[1] = fixed_object_temp; } LOperand* object() { return inputs_[0]; } @@ -2016,6 +2082,24 @@ class LTransitionElementsKind: public LTemplateInstruction<1, 1, 2> { Handle<Map> original_map() { return hydrogen()->original_map(); } Handle<Map> transitioned_map() { return hydrogen()->transitioned_map(); } + ElementsKind from_kind() { return hydrogen()->from_kind(); } + ElementsKind to_kind() { return hydrogen()->to_kind(); } +}; + + +class LTrapAllocationMemento : public LTemplateInstruction<0, 1, 1> { + public: + LTrapAllocationMemento(LOperand* object, + LOperand* temp) { + inputs_[0] = object; + temps_[0] = temp; + } + + LOperand* object() { return inputs_[0]; } + LOperand* temp() { return temps_[0]; } + + DECLARE_CONCRETE_INSTRUCTION(TrapAllocationMemento, + "trap-allocation-memento") }; @@ -2115,7 +2199,7 @@ class LCheckMaps: public LTemplateInstruction<0, 1, 0> { }; -class LCheckPrototypeMaps: public LTemplateInstruction<0, 0, 2> { +class LCheckPrototypeMaps: public LTemplateInstruction<1, 0, 2> { public: LCheckPrototypeMaps(LOperand* temp, LOperand* temp2) { temps_[0] = temp; @@ -2128,8 +2212,10 @@ class LCheckPrototypeMaps: public LTemplateInstruction<0, 0, 2> { DECLARE_CONCRETE_INSTRUCTION(CheckPrototypeMaps, "check-prototype-maps") DECLARE_HYDROGEN_ACCESSOR(CheckPrototypeMaps) - Handle<JSObject> prototype() const { return hydrogen()->prototype(); } - Handle<JSObject> holder() const { return hydrogen()->holder(); } + ZoneList<Handle<JSObject> >* prototypes() const { + return hydrogen()->prototypes(); + } + ZoneList<Handle<Map> >* maps() const { return hydrogen()->maps(); } }; @@ -2197,7 +2283,7 @@ class LClampTToUint8: public LTemplateInstruction<1, 1, 1> { }; -class LAllocateObject: public LTemplateInstruction<1, 0, 2> { +class LAllocateObject: public LTemplateInstruction<1, 1, 2> { public: LAllocateObject(LOperand* temp, LOperand* temp2) { temps_[0] = temp; @@ -2212,6 +2298,23 @@ class LAllocateObject: public LTemplateInstruction<1, 0, 2> { }; +class LAllocate: public LTemplateInstruction<1, 2, 2> { + public: + LAllocate(LOperand* size, LOperand* temp1, LOperand* temp2) { + inputs_[1] = size; + temps_[0] = temp1; + temps_[1] = temp2; + } + + LOperand* size() { return inputs_[1]; } + LOperand* temp1() { return temps_[0]; } + LOperand* temp2() { return temps_[1]; } + + DECLARE_CONCRETE_INSTRUCTION(Allocate, "allocate") + DECLARE_HYDROGEN_ACCESSOR(Allocate) +}; + + class LFastLiteral: public LTemplateInstruction<1, 0, 0> { public: DECLARE_CONCRETE_INSTRUCTION(FastLiteral, "fast-literal") @@ -2336,8 +2439,9 @@ class LOsrEntry: public LTemplateInstruction<0, 0, 0> { // slot, i.e., that must also be restored to the spill slot on OSR entry. // NULL if the register has no assigned spill slot. Indexed by allocation // index. - LOperand* register_spills_[Register::kNumAllocatableRegisters]; - LOperand* double_register_spills_[DoubleRegister::kNumAllocatableRegisters]; + LOperand* register_spills_[Register::kMaxNumAllocatableRegisters]; + LOperand* double_register_spills_[ + DoubleRegister::kMaxNumAllocatableRegisters]; }; @@ -2459,6 +2563,8 @@ class LChunkBuilder BASE_EMBEDDED { HYDROGEN_CONCRETE_INSTRUCTION_LIST(DECLARE_DO) #undef DECLARE_DO + LInstruction* DoMultiplyAdd(HMul* mul, HValue* addend); + private: enum Status { UNUSED, diff --git a/deps/v8/src/mips/macro-assembler-mips.cc b/deps/v8/src/mips/macro-assembler-mips.cc index 052387ab01..603f1be70e 100644 --- a/deps/v8/src/mips/macro-assembler-mips.cc +++ b/deps/v8/src/mips/macro-assembler-mips.cc @@ -118,8 +118,8 @@ void MacroAssembler::PopSafepointRegisters() { void MacroAssembler::PushSafepointRegistersAndDoubles() { PushSafepointRegisters(); - Subu(sp, sp, Operand(FPURegister::kNumAllocatableRegisters * kDoubleSize)); - for (int i = 0; i < FPURegister::kNumAllocatableRegisters; i+=2) { + Subu(sp, sp, Operand(FPURegister::NumAllocatableRegisters() * kDoubleSize)); + for (int i = 0; i < FPURegister::NumAllocatableRegisters(); i+=2) { FPURegister reg = FPURegister::FromAllocationIndex(i); sdc1(reg, MemOperand(sp, i * kDoubleSize)); } @@ -127,11 +127,11 @@ void MacroAssembler::PushSafepointRegistersAndDoubles() { void MacroAssembler::PopSafepointRegistersAndDoubles() { - for (int i = 0; i < FPURegister::kNumAllocatableRegisters; i+=2) { + for (int i = 0; i < FPURegister::NumAllocatableRegisters(); i+=2) { FPURegister reg = FPURegister::FromAllocationIndex(i); ldc1(reg, MemOperand(sp, i * kDoubleSize)); } - Addu(sp, sp, Operand(FPURegister::kNumAllocatableRegisters * kDoubleSize)); + Addu(sp, sp, Operand(FPURegister::NumAllocatableRegisters() * kDoubleSize)); PopSafepointRegisters(); } @@ -167,7 +167,7 @@ MemOperand MacroAssembler::SafepointRegisterSlot(Register reg) { MemOperand MacroAssembler::SafepointRegistersAndDoublesSlot(Register reg) { UNIMPLEMENTED_MIPS(); // General purpose registers are pushed last on the stack. - int doubles_size = FPURegister::kNumAllocatableRegisters * kDoubleSize; + int doubles_size = FPURegister::NumAllocatableRegisters() * kDoubleSize; int register_offset = SafepointRegisterStackIndex(reg.code()) * kPointerSize; return MemOperand(sp, doubles_size + register_offset); } @@ -853,7 +853,7 @@ void MacroAssembler::MultiPopReversed(RegList regs) { void MacroAssembler::MultiPushFPU(RegList regs) { - CpuFeatures::Scope scope(FPU); + CpuFeatureScope scope(this, FPU); int16_t num_to_push = NumberOfBitsSet(regs); int16_t stack_offset = num_to_push * kDoubleSize; @@ -868,7 +868,7 @@ void MacroAssembler::MultiPushFPU(RegList regs) { void MacroAssembler::MultiPushReversedFPU(RegList regs) { - CpuFeatures::Scope scope(FPU); + CpuFeatureScope scope(this, FPU); int16_t num_to_push = NumberOfBitsSet(regs); int16_t stack_offset = num_to_push * kDoubleSize; @@ -883,7 +883,7 @@ void MacroAssembler::MultiPushReversedFPU(RegList regs) { void MacroAssembler::MultiPopFPU(RegList regs) { - CpuFeatures::Scope scope(FPU); + CpuFeatureScope scope(this, FPU); int16_t stack_offset = 0; for (int16_t i = 0; i < kNumRegisters; i++) { @@ -897,7 +897,7 @@ void MacroAssembler::MultiPopFPU(RegList regs) { void MacroAssembler::MultiPopReversedFPU(RegList regs) { - CpuFeatures::Scope scope(FPU); + CpuFeatureScope scope(this, FPU); int16_t stack_offset = 0; for (int16_t i = kNumRegisters - 1; i >= 0; i--) { @@ -1125,23 +1125,19 @@ void MacroAssembler::BranchF(Label* target, // have been handled by the caller. // Unsigned conditions are treated as their signed counterpart. switch (cc) { - case Uless: - case less: + case lt: c(OLT, D, cmp1, cmp2); bc1t(target); break; - case Ugreater: - case greater: + case gt: c(ULE, D, cmp1, cmp2); bc1f(target); break; - case Ugreater_equal: - case greater_equal: + case ge: c(ULT, D, cmp1, cmp2); bc1f(target); break; - case Uless_equal: - case less_equal: + case le: c(OLE, D, cmp1, cmp2); bc1t(target); break; @@ -1149,10 +1145,18 @@ void MacroAssembler::BranchF(Label* target, c(EQ, D, cmp1, cmp2); bc1t(target); break; + case ueq: + c(UEQ, D, cmp1, cmp2); + bc1t(target); + break; case ne: c(EQ, D, cmp1, cmp2); bc1f(target); break; + case nue: + c(UEQ, D, cmp1, cmp2); + bc1f(target); + break; default: CHECK(0); }; @@ -1165,7 +1169,7 @@ void MacroAssembler::BranchF(Label* target, void MacroAssembler::Move(FPURegister dst, double imm) { - ASSERT(CpuFeatures::IsEnabled(FPU)); + ASSERT(IsEnabled(FPU)); static const DoubleRepresentation minus_zero(-0.0); static const DoubleRepresentation zero(0.0); DoubleRepresentation value(imm); @@ -1345,7 +1349,7 @@ void MacroAssembler::ConvertToInt32(Register source, } bind(&right_exponent); if (CpuFeatures::IsSupported(FPU)) { - CpuFeatures::Scope scope(FPU); + CpuFeatureScope scope(this, FPU); // MIPS FPU instructions implementing double precision to integer // conversion using round to zero. Since the FP value was qualified // above, the resulting integer should be a legal int32. @@ -1395,49 +1399,68 @@ void MacroAssembler::ConvertToInt32(Register source, void MacroAssembler::EmitFPUTruncate(FPURoundingMode rounding_mode, - FPURegister result, + Register result, DoubleRegister double_input, - Register scratch1, + Register scratch, + DoubleRegister double_scratch, Register except_flag, CheckForInexactConversion check_inexact) { + ASSERT(!result.is(scratch)); + ASSERT(!double_input.is(double_scratch)); + ASSERT(!except_flag.is(scratch)); + ASSERT(CpuFeatures::IsSupported(FPU)); - CpuFeatures::Scope scope(FPU); + CpuFeatureScope scope(this, FPU); + Label done; + + // Clear the except flag (0 = no exception) + mov(except_flag, zero_reg); + + // Test for values that can be exactly represented as a signed 32-bit integer. + cvt_w_d(double_scratch, double_input); + mfc1(result, double_scratch); + cvt_d_w(double_scratch, double_scratch); + BranchF(&done, NULL, eq, double_input, double_scratch); int32_t except_mask = kFCSRFlagMask; // Assume interested in all exceptions. if (check_inexact == kDontCheckForInexactConversion) { - // Ingore inexact exceptions. + // Ignore inexact exceptions. except_mask &= ~kFCSRInexactFlagMask; } // Save FCSR. - cfc1(scratch1, FCSR); + cfc1(scratch, FCSR); // Disable FPU exceptions. ctc1(zero_reg, FCSR); // Do operation based on rounding mode. switch (rounding_mode) { case kRoundToNearest: - Round_w_d(result, double_input); + Round_w_d(double_scratch, double_input); break; case kRoundToZero: - Trunc_w_d(result, double_input); + Trunc_w_d(double_scratch, double_input); break; case kRoundToPlusInf: - Ceil_w_d(result, double_input); + Ceil_w_d(double_scratch, double_input); break; case kRoundToMinusInf: - Floor_w_d(result, double_input); + Floor_w_d(double_scratch, double_input); break; } // End of switch-statement. // Retrieve FCSR. cfc1(except_flag, FCSR); // Restore FCSR. - ctc1(scratch1, FCSR); + ctc1(scratch, FCSR); + // Move the converted value into the result register. + mfc1(result, double_scratch); // Check for fpu exceptions. And(except_flag, except_flag, Operand(except_mask)); + + bind(&done); } @@ -1529,7 +1552,7 @@ void MacroAssembler::EmitECMATruncate(Register result, Register scratch, Register scratch2, Register scratch3) { - CpuFeatures::Scope scope(FPU); + CpuFeatureScope scope(this, FPU); ASSERT(!scratch2.is(result)); ASSERT(!scratch3.is(result)); ASSERT(!scratch3.is(scratch2)); @@ -2736,7 +2759,7 @@ void MacroAssembler::DebugBreak() { PrepareCEntryFunction(ExternalReference(Runtime::kDebugBreak, isolate())); CEntryStub ces(1); ASSERT(AllowThisStubCall(&ces)); - Call(ces.GetCode(), RelocInfo::DEBUG_BREAK); + Call(ces.GetCode(isolate()), RelocInfo::DEBUG_BREAK); } #endif // ENABLE_DEBUGGER_SUPPORT @@ -2889,12 +2912,12 @@ void MacroAssembler::ThrowUncatchable(Register value) { } -void MacroAssembler::AllocateInNewSpace(int object_size, - Register result, - Register scratch1, - Register scratch2, - Label* gc_required, - AllocationFlags flags) { +void MacroAssembler::Allocate(int object_size, + Register result, + Register scratch1, + Register scratch2, + Label* gc_required, + AllocationFlags flags) { if (!FLAG_inline_new) { if (emit_debug_code()) { // Trash the registers to simulate an allocation failure. @@ -2922,20 +2945,21 @@ void MacroAssembler::AllocateInNewSpace(int object_size, // Check relative positions of allocation top and limit addresses. // ARM adds additional checks to make sure the ldm instruction can be // used. On MIPS we don't have ldm so we don't need additional checks either. - ExternalReference new_space_allocation_top = - ExternalReference::new_space_allocation_top_address(isolate()); - ExternalReference new_space_allocation_limit = - ExternalReference::new_space_allocation_limit_address(isolate()); + ExternalReference allocation_top = + AllocationUtils::GetAllocationTopReference(isolate(), flags); + ExternalReference allocation_limit = + AllocationUtils::GetAllocationLimitReference(isolate(), flags); + intptr_t top = - reinterpret_cast<intptr_t>(new_space_allocation_top.address()); + reinterpret_cast<intptr_t>(allocation_top.address()); intptr_t limit = - reinterpret_cast<intptr_t>(new_space_allocation_limit.address()); + reinterpret_cast<intptr_t>(allocation_limit.address()); ASSERT((limit - top) == kPointerSize); // Set up allocation top address and object size registers. Register topaddr = scratch1; Register obj_size_reg = scratch2; - li(topaddr, Operand(new_space_allocation_top)); + li(topaddr, Operand(allocation_top)); li(obj_size_reg, Operand(object_size)); // This code stores a temporary value in t9. @@ -2974,6 +2998,7 @@ void MacroAssembler::AllocateInNewSpace(Register object_size, Register scratch2, Label* gc_required, AllocationFlags flags) { + ASSERT((flags & PRETENURE_OLD_POINTER_SPACE) == 0); if (!FLAG_inline_new) { if (emit_debug_code()) { // Trash the registers to simulate an allocation failure. @@ -3109,9 +3134,9 @@ void MacroAssembler::AllocateAsciiString(Register result, Label* gc_required) { // Calculate the number of bytes needed for the characters in the string // while observing object alignment. - ASSERT((SeqAsciiString::kHeaderSize & kObjectAlignmentMask) == 0); + ASSERT((SeqOneByteString::kHeaderSize & kObjectAlignmentMask) == 0); ASSERT(kCharSize == 1); - addiu(scratch1, length, kObjectAlignmentMask + SeqAsciiString::kHeaderSize); + addiu(scratch1, length, kObjectAlignmentMask + SeqOneByteString::kHeaderSize); And(scratch1, scratch1, Operand(~kObjectAlignmentMask)); // Allocate ASCII string in new space. @@ -3136,12 +3161,8 @@ void MacroAssembler::AllocateTwoByteConsString(Register result, Register scratch1, Register scratch2, Label* gc_required) { - AllocateInNewSpace(ConsString::kSize, - result, - scratch1, - scratch2, - gc_required, - TAG_OBJECT); + Allocate(ConsString::kSize, result, scratch1, scratch2, gc_required, + TAG_OBJECT); InitializeNewString(result, length, Heap::kConsStringMapRootIndex, @@ -3155,12 +3176,8 @@ void MacroAssembler::AllocateAsciiConsString(Register result, Register scratch1, Register scratch2, Label* gc_required) { - AllocateInNewSpace(ConsString::kSize, - result, - scratch1, - scratch2, - gc_required, - TAG_OBJECT); + Allocate(ConsString::kSize, result, scratch1, scratch2, gc_required, + TAG_OBJECT); InitializeNewString(result, length, Heap::kConsAsciiStringMapRootIndex, @@ -3174,12 +3191,8 @@ void MacroAssembler::AllocateTwoByteSlicedString(Register result, Register scratch1, Register scratch2, Label* gc_required) { - AllocateInNewSpace(SlicedString::kSize, - result, - scratch1, - scratch2, - gc_required, - TAG_OBJECT); + Allocate(SlicedString::kSize, result, scratch1, scratch2, gc_required, + TAG_OBJECT); InitializeNewString(result, length, @@ -3194,12 +3207,8 @@ void MacroAssembler::AllocateAsciiSlicedString(Register result, Register scratch1, Register scratch2, Label* gc_required) { - AllocateInNewSpace(SlicedString::kSize, - result, - scratch1, - scratch2, - gc_required, - TAG_OBJECT); + Allocate(SlicedString::kSize, result, scratch1, scratch2, gc_required, + TAG_OBJECT); InitializeNewString(result, length, @@ -3215,19 +3224,20 @@ void MacroAssembler::AllocateHeapNumber(Register result, Register scratch1, Register scratch2, Register heap_number_map, - Label* need_gc) { + Label* need_gc, + TaggingMode tagging_mode) { // Allocate an object in the heap for the heap number and tag it as a heap // object. - AllocateInNewSpace(HeapNumber::kSize, - result, - scratch1, - scratch2, - need_gc, - TAG_OBJECT); + Allocate(HeapNumber::kSize, result, scratch1, scratch2, need_gc, + tagging_mode == TAG_RESULT ? TAG_OBJECT : NO_ALLOCATION_FLAGS); // Store heap number map in the allocated object. AssertRegisterIsRoot(heap_number_map, Heap::kHeapNumberMapRootIndex); - sw(heap_number_map, FieldMemOperand(result, HeapObject::kMapOffset)); + if (tagging_mode == TAG_RESULT) { + sw(heap_number_map, FieldMemOperand(result, HeapObject::kMapOffset)); + } else { + sw(heap_number_map, MemOperand(result, HeapObject::kMapOffset)); + } } @@ -3380,13 +3390,13 @@ void MacroAssembler::CheckFastSmiElements(Register map, void MacroAssembler::StoreNumberToDoubleElements(Register value_reg, Register key_reg, - Register receiver_reg, Register elements_reg, Register scratch1, Register scratch2, Register scratch3, Register scratch4, - Label* fail) { + Label* fail, + int elements_offset) { Label smi_value, maybe_nan, have_double_value, is_nan, done; Register mantissa_reg = scratch2; Register exponent_reg = scratch3; @@ -3412,8 +3422,10 @@ void MacroAssembler::StoreNumberToDoubleElements(Register value_reg, bind(&have_double_value); sll(scratch1, key_reg, kDoubleSizeLog2 - kSmiTagSize); Addu(scratch1, scratch1, elements_reg); - sw(mantissa_reg, FieldMemOperand(scratch1, FixedDoubleArray::kHeaderSize)); - uint32_t offset = FixedDoubleArray::kHeaderSize + sizeof(kHoleNanLower32); + sw(mantissa_reg, FieldMemOperand( + scratch1, FixedDoubleArray::kHeaderSize - elements_offset)); + uint32_t offset = FixedDoubleArray::kHeaderSize - elements_offset + + sizeof(kHoleNanLower32); sw(exponent_reg, FieldMemOperand(scratch1, offset)); jmp(&done); @@ -3433,7 +3445,8 @@ void MacroAssembler::StoreNumberToDoubleElements(Register value_reg, bind(&smi_value); Addu(scratch1, elements_reg, - Operand(FixedDoubleArray::kHeaderSize - kHeapObjectTag)); + Operand(FixedDoubleArray::kHeaderSize - kHeapObjectTag - + elements_offset)); sll(scratch2, key_reg, kDoubleSizeLog2 - kSmiTagSize); Addu(scratch1, scratch1, scratch2); // scratch1 is now effective address of the double element @@ -3456,7 +3469,7 @@ void MacroAssembler::StoreNumberToDoubleElements(Register value_reg, scratch4, f2); if (destination == FloatingPointHelper::kFPURegisters) { - CpuFeatures::Scope scope(FPU); + CpuFeatureScope scope(this, FPU); sdc1(f0, MemOperand(scratch1, 0)); } else { sw(mantissa_reg, MemOperand(scratch1, 0)); @@ -3549,7 +3562,7 @@ void MacroAssembler::CheckMap(Register obj, void MacroAssembler::GetCFunctionDoubleResult(const DoubleRegister dst) { - CpuFeatures::Scope scope(FPU); + CpuFeatureScope scope(this, FPU); if (IsMipsSoftFloatABI) { Move(dst, v0, v1); } else { @@ -3559,7 +3572,7 @@ void MacroAssembler::GetCFunctionDoubleResult(const DoubleRegister dst) { void MacroAssembler::SetCallCDoubleArguments(DoubleRegister dreg) { - CpuFeatures::Scope scope(FPU); + CpuFeatureScope scope(this, FPU); if (!IsMipsSoftFloatABI) { Move(f12, dreg); } else { @@ -3570,7 +3583,7 @@ void MacroAssembler::SetCallCDoubleArguments(DoubleRegister dreg) { void MacroAssembler::SetCallCDoubleArguments(DoubleRegister dreg1, DoubleRegister dreg2) { - CpuFeatures::Scope scope(FPU); + CpuFeatureScope scope(this, FPU); if (!IsMipsSoftFloatABI) { if (dreg2.is(f12)) { ASSERT(!dreg1.is(f14)); @@ -3589,7 +3602,7 @@ void MacroAssembler::SetCallCDoubleArguments(DoubleRegister dreg1, void MacroAssembler::SetCallCDoubleArguments(DoubleRegister dreg, Register reg) { - CpuFeatures::Scope scope(FPU); + CpuFeatureScope scope(this, FPU); if (!IsMipsSoftFloatABI) { Move(f12, dreg); Move(a2, reg); @@ -3834,6 +3847,15 @@ void MacroAssembler::IsObjectJSStringType(Register object, } +void MacroAssembler::IsObjectNameType(Register object, + Register scratch, + Label* fail) { + lw(scratch, FieldMemOperand(object, HeapObject::kMapOffset)); + lbu(scratch, FieldMemOperand(scratch, Map::kInstanceTypeOffset)); + Branch(fail, hi, scratch, Operand(LAST_NAME_TYPE)); +} + + // --------------------------------------------------------------------------- // Support functions. @@ -3907,19 +3929,21 @@ void MacroAssembler::GetObjectType(Register object, // Runtime calls. void MacroAssembler::CallStub(CodeStub* stub, + TypeFeedbackId ast_id, Condition cond, Register r1, const Operand& r2, BranchDelaySlot bd) { ASSERT(AllowThisStubCall(stub)); // Stub calls are not allowed in some stubs. - Call(stub->GetCode(), RelocInfo::CODE_TARGET, TypeFeedbackId::None(), + Call(stub->GetCode(isolate()), RelocInfo::CODE_TARGET, ast_id, cond, r1, r2, bd); } void MacroAssembler::TailCallStub(CodeStub* stub) { - ASSERT(allow_stub_calls_ || stub->CompilingCallsToThisStubIsGCSafe()); - Jump(stub->GetCode(), RelocInfo::CODE_TARGET); + ASSERT(allow_stub_calls_ || + stub->CompilingCallsToThisStubIsGCSafe(isolate())); + Jump(stub->GetCode(isolate()), RelocInfo::CODE_TARGET); } @@ -3931,13 +3955,13 @@ static int AddressOffset(ExternalReference ref0, ExternalReference ref1) { void MacroAssembler::CallApiFunctionAndReturn(ExternalReference function, int stack_space) { ExternalReference next_address = - ExternalReference::handle_scope_next_address(); + ExternalReference::handle_scope_next_address(isolate()); const int kNextOffset = 0; const int kLimitOffset = AddressOffset( - ExternalReference::handle_scope_limit_address(), + ExternalReference::handle_scope_limit_address(isolate()), next_address); const int kLevelOffset = AddressOffset( - ExternalReference::handle_scope_level_address(), + ExternalReference::handle_scope_level_address(isolate()), next_address); // Allocate HandleScope in callee-save registers. @@ -3948,6 +3972,14 @@ void MacroAssembler::CallApiFunctionAndReturn(ExternalReference function, Addu(s2, s2, Operand(1)); sw(s2, MemOperand(s3, kLevelOffset)); + if (FLAG_log_timer_events) { + FrameScope frame(this, StackFrame::MANUAL); + PushSafepointRegisters(); + PrepareCallCFunction(0, a0); + CallCFunction(ExternalReference::log_enter_external_function(isolate()), 0); + PopSafepointRegisters(); + } + // The O32 ABI requires us to pass a pointer in a0 where the returned struct // (4 bytes) will be placed. This is also built into the Simulator. // Set up the pointer to the returned value (a0). It was allocated in @@ -3960,6 +3992,14 @@ void MacroAssembler::CallApiFunctionAndReturn(ExternalReference function, DirectCEntryStub stub; stub.GenerateCall(this, function); + if (FLAG_log_timer_events) { + FrameScope frame(this, StackFrame::MANUAL); + PushSafepointRegisters(); + PrepareCallCFunction(0, a0); + CallCFunction(ExternalReference::log_leave_external_function(isolate()), 0); + PopSafepointRegisters(); + } + // As mentioned above, on MIPS a pointer is returned - we need to dereference // it to get the actual return value (which is also a pointer). lw(v0, MemOperand(v0)); @@ -4020,7 +4060,7 @@ void MacroAssembler::CallApiFunctionAndReturn(ExternalReference function, bool MacroAssembler::AllowThisStubCall(CodeStub* stub) { if (!has_frame_ && stub->SometimesSetsUpAFrame()) return false; - return allow_stub_calls_ || stub->CompilingCallsToThisStubIsGCSafe(); + return allow_stub_calls_ || stub->CompilingCallsToThisStubIsGCSafe(isolate()); } @@ -4205,7 +4245,10 @@ void MacroAssembler::CallRuntimeSaveDoubles(Runtime::FunctionId id) { const Runtime::Function* function = Runtime::FunctionForId(id); PrepareCEntryArgs(function->nargs); PrepareCEntryFunction(ExternalReference(function, isolate())); - CEntryStub stub(1, kSaveFPRegs); + SaveFPRegsMode mode = CpuFeatures::IsSupported(FPU) + ? kSaveFPRegs + : kDontSaveFPRegs; + CEntryStub stub(1, mode); CallStub(&stub); } @@ -4222,7 +4265,7 @@ void MacroAssembler::CallExternalReference(const ExternalReference& ext, PrepareCEntryFunction(ext); CEntryStub stub(1); - CallStub(&stub, al, zero_reg, Operand(zero_reg), bd); + CallStub(&stub, TypeFeedbackId::None(), al, zero_reg, Operand(zero_reg), bd); } @@ -4251,7 +4294,7 @@ void MacroAssembler::JumpToExternalReference(const ExternalReference& builtin, BranchDelaySlot bd) { PrepareCEntryFunction(builtin); CEntryStub stub(1); - Jump(stub.GetCode(), + Jump(stub.GetCode(isolate()), RelocInfo::CODE_TARGET, al, zero_reg, @@ -4509,6 +4552,19 @@ void MacroAssembler::LoadGlobalFunction(int index, Register function) { } +void MacroAssembler::LoadArrayFunction(Register function) { + // Load the global or builtins object from the current context. + lw(function, + MemOperand(cp, Context::SlotOffset(Context::GLOBAL_OBJECT_INDEX))); + // Load the global context from the global or builtins object. + lw(function, + FieldMemOperand(function, GlobalObject::kGlobalContextOffset)); + // Load the array function from the native context. + lw(function, + MemOperand(function, Context::SlotOffset(Context::ARRAY_FUNCTION_INDEX))); +} + + void MacroAssembler::LoadGlobalFunctionInitialMap(Register function, Register map, Register scratch) { @@ -4584,16 +4640,17 @@ void MacroAssembler::EnterExitFrame(bool save_doubles, const int frame_alignment = MacroAssembler::ActivationFrameAlignment(); if (save_doubles) { + CpuFeatureScope scope(this, FPU); // The stack must be allign to 0 modulo 8 for stores with sdc1. ASSERT(kDoubleSize == frame_alignment); if (frame_alignment > 0) { ASSERT(IsPowerOf2(frame_alignment)); And(sp, sp, Operand(-frame_alignment)); // Align stack. } - int space = FPURegister::kNumRegisters * kDoubleSize; + int space = FPURegister::kMaxNumRegisters * kDoubleSize; Subu(sp, sp, Operand(space)); // Remember: we only need to save every 2nd double FPU value. - for (int i = 0; i < FPURegister::kNumRegisters; i+=2) { + for (int i = 0; i < FPURegister::kMaxNumRegisters; i+=2) { FPURegister reg = FPURegister::from_code(i); sdc1(reg, MemOperand(sp, i * kDoubleSize)); } @@ -4621,9 +4678,10 @@ void MacroAssembler::LeaveExitFrame(bool save_doubles, bool do_return) { // Optionally restore all double registers. if (save_doubles) { + CpuFeatureScope scope(this, FPU); // Remember: we only need to restore every 2nd double FPU value. lw(t8, MemOperand(fp, ExitFrameConstants::kSPOffset)); - for (int i = 0; i < FPURegister::kNumRegisters; i+=2) { + for (int i = 0; i < FPURegister::kMaxNumRegisters; i+=2) { FPURegister reg = FPURegister::from_code(i); ldc1(reg, MemOperand(t8, i * kDoubleSize + kPointerSize)); } @@ -4830,6 +4888,20 @@ void MacroAssembler::AssertString(Register object) { } +void MacroAssembler::AssertName(Register object) { + if (emit_debug_code()) { + STATIC_ASSERT(kSmiTag == 0); + And(t0, object, Operand(kSmiTagMask)); + Check(ne, "Operand is a smi and not a name", t0, Operand(zero_reg)); + push(object); + lw(object, FieldMemOperand(object, HeapObject::kMapOffset)); + lbu(object, FieldMemOperand(object, Map::kInstanceTypeOffset)); + Check(le, "Operand is not a name", object, Operand(LAST_NAME_TYPE)); + pop(object); + } +} + + void MacroAssembler::AssertRootValue(Register src, Heap::RootListIndex root_value_index, const char* message) { @@ -5272,7 +5344,7 @@ void MacroAssembler::EnsureNotWhite( // For ASCII (char-size of 1) we shift the smi tag away to get the length. // For UC16 (char-size of 2) we just leave the smi tag in place, thereby // getting the length multiplied by 2. - ASSERT(kAsciiStringTag == 4 && kStringEncodingMask == 4); + ASSERT(kOneByteStringTag == 4 && kStringEncodingMask == 4); ASSERT(kSmiTag == 0 && kSmiTagSize == 1); lw(t9, FieldMemOperand(value, String::kLengthOffset)); And(t8, instance_type, Operand(kStringEncodingMask)); @@ -5397,6 +5469,29 @@ void MacroAssembler::ClampDoubleToUint8(Register result_reg, } +void MacroAssembler::TestJSArrayForAllocationSiteInfo( + Register receiver_reg, + Register scratch_reg, + Condition cond, + Label* allocation_info_present) { + Label no_info_available; + ExternalReference new_space_start = + ExternalReference::new_space_start(isolate()); + ExternalReference new_space_allocation_top = + ExternalReference::new_space_allocation_top_address(isolate()); + Addu(scratch_reg, receiver_reg, + Operand(JSArray::kSize + AllocationSiteInfo::kSize - kHeapObjectTag)); + Branch(&no_info_available, lt, scratch_reg, Operand(new_space_start)); + li(at, Operand(new_space_allocation_top)); + lw(at, MemOperand(at)); + Branch(&no_info_available, gt, scratch_reg, Operand(at)); + lw(scratch_reg, MemOperand(scratch_reg, -AllocationSiteInfo::kSize)); + Branch(allocation_info_present, cond, scratch_reg, + Operand(Handle<Map>(isolate()->heap()->allocation_site_info_map()))); + bind(&no_info_available); +} + + bool AreAliased(Register r1, Register r2, Register r3, Register r4) { if (r1.is(r2)) return true; if (r1.is(r3)) return true; diff --git a/deps/v8/src/mips/macro-assembler-mips.h b/deps/v8/src/mips/macro-assembler-mips.h index b57e51486c..e4cf3bcb7c 100644 --- a/deps/v8/src/mips/macro-assembler-mips.h +++ b/deps/v8/src/mips/macro-assembler-mips.h @@ -65,6 +65,14 @@ enum AllocationFlags { SIZE_IN_WORDS = 1 << 2 }; +// Flags used for AllocateHeapNumber +enum TaggingMode { + // Tag the result. + TAG_RESULT, + // Don't tag + DONT_TAG_RESULT +}; + // Flags used for the ObjectToDoubleFPURegister function. enum ObjectToDoubleFlags { // No special flags. @@ -469,19 +477,20 @@ class MacroAssembler: public Assembler { // --------------------------------------------------------------------------- // Allocation support. - // Allocate an object in new space. The object_size is specified - // either in bytes or in words if the allocation flag SIZE_IN_WORDS - // is passed. If the new space is exhausted control continues at the - // gc_required label. The allocated object is returned in result. If - // the flag tag_allocated_object is true the result is tagged as as - // a heap object. All registers are clobbered also when control - // continues at the gc_required label. - void AllocateInNewSpace(int object_size, - Register result, - Register scratch1, - Register scratch2, - Label* gc_required, - AllocationFlags flags); + // Allocate an object in new space or old pointer space. The object_size is + // specified either in bytes or in words if the allocation flag SIZE_IN_WORDS + // is passed. If the space is exhausted control continues at the gc_required + // label. The allocated object is returned in result. If the flag + // tag_allocated_object is true the result is tagged as as a heap object. + // All registers are clobbered also when control continues at the gc_required + // label. + void Allocate(int object_size, + Register result, + Register scratch1, + Register scratch2, + Label* gc_required, + AllocationFlags flags); + void AllocateInNewSpace(Register object_size, Register result, Register scratch1, @@ -536,7 +545,8 @@ class MacroAssembler: public Assembler { Register scratch1, Register scratch2, Register heap_number_map, - Label* gc_required); + Label* gc_required, + TaggingMode tagging_mode = TAG_RESULT); void AllocateHeapNumberWithValue(Register result, FPURegister value, Register scratch1, @@ -620,6 +630,7 @@ class MacroAssembler: public Assembler { // Push a handle. void Push(Handle<Object> handle); + void Push(Smi* smi) { Push(Handle<Smi>(smi, isolate())); } // Push two registers. Pushes leftmost register first (to highest address). void Push(Register src1, Register src2) { @@ -752,14 +763,16 @@ class MacroAssembler: public Assembler { FPURegister double_scratch, Label *not_int32); - // Truncates a double using a specific rounding mode. + // Truncates a double using a specific rounding mode, and writes the value + // to the result register. // The except_flag will contain any exceptions caused by the instruction. - // If check_inexact is kDontCheckForInexactConversion, then the inexacat + // If check_inexact is kDontCheckForInexactConversion, then the inexact // exception is masked. void EmitFPUTruncate(FPURoundingMode rounding_mode, - FPURegister result, + Register result, DoubleRegister double_input, - Register scratch1, + Register scratch, + DoubleRegister double_scratch, Register except_flag, CheckForInexactConversion check_inexact = kDontCheckForInexactConversion); @@ -823,6 +836,7 @@ class MacroAssembler: public Assembler { bool can_have_holes); void LoadGlobalFunction(int index, Register function); + void LoadArrayFunction(Register function); // Load the initial map from the global function. The registers // function and map can be the same, function is then overwritten. @@ -887,6 +901,10 @@ class MacroAssembler: public Assembler { Register scratch, Label* fail); + void IsObjectNameType(Register object, + Register scratch, + Label* fail); + #ifdef ENABLE_DEBUGGER_SUPPORT // ------------------------------------------------------------------------- // Debugger Support. @@ -972,14 +990,14 @@ class MacroAssembler: public Assembler { // case scratch2, scratch3 and scratch4 are unmodified. void StoreNumberToDoubleElements(Register value_reg, Register key_reg, - Register receiver_reg, // All regs below here overwritten. Register elements_reg, Register scratch1, Register scratch2, Register scratch3, Register scratch4, - Label* fail); + Label* fail, + int elements_offset = 0); // Compare an object's map with the specified map and its transitioned // elements maps if mode is ALLOW_ELEMENT_TRANSITION_MAPS. Jumps to @@ -1129,6 +1147,7 @@ class MacroAssembler: public Assembler { // Call a code stub. void CallStub(CodeStub* stub, + TypeFeedbackId ast_id = TypeFeedbackId::None(), Condition cond = cc_always, Register r1 = zero_reg, const Operand& r2 = Operand(zero_reg), @@ -1343,6 +1362,9 @@ class MacroAssembler: public Assembler { // Abort execution if argument is not a string, enabled via --debug-code. void AssertString(Register object); + // Abort execution if argument is not a name, enabled via --debug-code. + void AssertName(Register object); + // Abort execution if argument is not the root value with the given index, // enabled via --debug-code. void AssertRootValue(Register src, @@ -1427,6 +1449,17 @@ class MacroAssembler: public Assembler { // in a0. Assumes that any other register can be used as a scratch. void CheckEnumCache(Register null_value, Label* call_runtime); + // AllocationSiteInfo support. Arrays may have an associated + // AllocationSiteInfo object that can be checked for in order to pretransition + // to another type. + // On entry, receiver_reg should point to the array object. + // scratch_reg gets clobbered. + // If allocation info is present, jump to allocation_info_present + void TestJSArrayForAllocationSiteInfo(Register receiver_reg, + Register scratch_reg, + Condition cond, + Label* allocation_info_present); + private: void CallCFunctionHelper(Register function, int num_reg_arguments, @@ -1501,9 +1534,9 @@ class MacroAssembler: public Assembler { // This handle will be patched with the code object on installation. Handle<Object> code_object_; - // Needs access to SafepointRegisterStackIndex for optimized frame + // Needs access to SafepointRegisterStackIndex for compiled frame // traversal. - friend class OptimizedFrame; + friend class StandardFrame; }; diff --git a/deps/v8/src/mips/regexp-macro-assembler-mips.cc b/deps/v8/src/mips/regexp-macro-assembler-mips.cc index 672ba0eeee..036cbb13e4 100644 --- a/deps/v8/src/mips/regexp-macro-assembler-mips.cc +++ b/deps/v8/src/mips/regexp-macro-assembler-mips.cc @@ -262,7 +262,7 @@ void RegExpMacroAssemblerMIPS::CheckCharacters(Vector<const uc16> str, if (mode_ == ASCII) { __ lbu(a1, MemOperand(a0, 0)); __ addiu(a0, a0, char_size()); - ASSERT(str[i] <= String::kMaxAsciiCharCode); + ASSERT(str[i] <= String::kMaxOneByteCharCode); BranchOrBacktrack(on_failure, ne, a1, Operand(str[i])); } else { __ lhu(a1, MemOperand(a0, 0)); @@ -341,7 +341,13 @@ void RegExpMacroAssemblerMIPS::CheckNotBackReferenceIgnoreCase( __ Or(t0, t0, Operand(0x20)); // Also convert input character. __ Branch(&fail, ne, t0, Operand(a3)); __ Subu(a3, a3, Operand('a')); - __ Branch(&fail, hi, a3, Operand('z' - 'a')); // Is a3 a lowercase letter? + __ Branch(&loop_check, ls, a3, Operand('z' - 'a')); + // Latin-1: Check for values in range [224,254] but not 247. + __ Subu(a3, a3, Operand(224 - 'a')); + // Weren't Latin-1 letters. + __ Branch(&fail, hi, a3, Operand(254 - 224)); + // Check for 247. + __ Branch(&fail, eq, a3, Operand(247 - 224)); __ bind(&loop_check); __ Branch(&loop, lt, a0, Operand(a1)); @@ -511,7 +517,7 @@ void RegExpMacroAssemblerMIPS::CheckBitInTable( Handle<ByteArray> table, Label* on_bit_set) { __ li(a0, Operand(table)); - if (mode_ != ASCII || kTableMask != String::kMaxAsciiCharCode) { + if (mode_ != ASCII || kTableMask != String::kMaxOneByteCharCode) { __ And(a1, current_character(), Operand(kTableSize - 1)); __ Addu(a0, a0, a1); } else { @@ -531,25 +537,20 @@ bool RegExpMacroAssemblerMIPS::CheckSpecialCharacterClass(uc16 type, case 's': // Match space-characters. if (mode_ == ASCII) { - // ASCII space characters are '\t'..'\r' and ' '. + // One byte space characters are '\t'..'\r', ' ' and \u00a0. Label success; __ Branch(&success, eq, current_character(), Operand(' ')); // Check range 0x09..0x0d. __ Subu(a0, current_character(), Operand('\t')); - BranchOrBacktrack(on_no_match, hi, a0, Operand('\r' - '\t')); + __ Branch(&success, ls, a0, Operand('\r' - '\t')); + // \u00a0 (NBSP). + BranchOrBacktrack(on_no_match, ne, a0, Operand(0x00a0 - '\t')); __ bind(&success); return true; } return false; case 'S': - // Match non-space characters. - if (mode_ == ASCII) { - // ASCII space characters are '\t'..'\r' and ' '. - BranchOrBacktrack(on_no_match, eq, current_character(), Operand(' ')); - __ Subu(a0, current_character(), Operand('\t')); - BranchOrBacktrack(on_no_match, ls, a0, Operand('\r' - '\t')); - return true; - } + // The emitted code for generic character classes is good enough. return false; case 'd': // Match ASCII digits ('0'..'9'). @@ -1155,7 +1156,7 @@ int RegExpMacroAssemblerMIPS::CheckStackGuardState(Address* return_address, Handle<String> subject(frame_entry<String*>(re_frame, kInputString)); // Current string. - bool is_ascii = subject->IsAsciiRepresentationUnderneath(); + bool is_ascii = subject->IsOneByteRepresentationUnderneath(); ASSERT(re_code->instruction_start() <= *return_address); ASSERT(*return_address <= @@ -1186,7 +1187,7 @@ int RegExpMacroAssemblerMIPS::CheckStackGuardState(Address* return_address, } // String might have changed. - if (subject_tmp->IsAsciiRepresentation() != is_ascii) { + if (subject_tmp->IsOneByteRepresentation() != is_ascii) { // If we changed between an ASCII and an UC16 string, the specialized // code cannot be used, and we need to restart regexp matching from // scratch (including, potentially, compiling a new version of the code). diff --git a/deps/v8/src/mips/simulator-mips.cc b/deps/v8/src/mips/simulator-mips.cc index cf87f93602..be9f369d01 100644 --- a/deps/v8/src/mips/simulator-mips.cc +++ b/deps/v8/src/mips/simulator-mips.cc @@ -1016,6 +1016,13 @@ void Simulator::set_register(int reg, int32_t value) { } +void Simulator::set_dw_register(int reg, const int* dbl) { + ASSERT((reg >= 0) && (reg < kNumSimuRegisters)); + registers_[reg] = dbl[0]; + registers_[reg + 1] = dbl[1]; +} + + void Simulator::set_fpu_register(int fpureg, int32_t value) { ASSERT((fpureg >= 0) && (fpureg < kNumFPURegisters)); FPUregisters_[fpureg] = value; @@ -1045,6 +1052,19 @@ int32_t Simulator::get_register(int reg) const { } +double Simulator::get_double_from_register_pair(int reg) { + ASSERT((reg >= 0) && (reg < kNumSimuRegisters) && ((reg % 2) == 0)); + + double dm_val = 0.0; + // Read the bits from the unsigned integer register_[] array + // into the double precision floating point value and return it. + char buffer[2 * sizeof(registers_[0])]; + memcpy(buffer, ®isters_[reg], 2 * sizeof(registers_[0])); + memcpy(&dm_val, buffer, 2 * sizeof(registers_[0])); + return(dm_val); +} + + int32_t Simulator::get_fpu_register(int fpureg) const { ASSERT((fpureg >= 0) && (fpureg < kNumFPURegisters)); return FPUregisters_[fpureg]; @@ -1525,7 +1545,7 @@ void Simulator::SoftwareInterrupt(Instruction* instr) { FUNCTION_ADDR(target), arg1); } v8::Handle<v8::Value> result = target(arg1); - *(reinterpret_cast<int*>(arg0)) = (int32_t) *result; + *(reinterpret_cast<int*>(arg0)) = reinterpret_cast<int32_t>(*result); set_register(v0, arg0); } else if (redirection->type() == ExternalReference::DIRECT_GETTER_CALL) { // See DirectCEntryStub::GenerateCall for explanation of register usage. @@ -1536,7 +1556,7 @@ void Simulator::SoftwareInterrupt(Instruction* instr) { FUNCTION_ADDR(target), arg1, arg2); } v8::Handle<v8::Value> result = target(arg1, arg2); - *(reinterpret_cast<int*>(arg0)) = (int32_t) *result; + *(reinterpret_cast<int*>(arg0)) = reinterpret_cast<int32_t>(*result); set_register(v0, arg0); } else { SimulatorRuntimeCall target = @@ -1740,6 +1760,8 @@ void Simulator::ConfigureTypeRegister(Instruction* instr, UNIMPLEMENTED_MIPS(); }; break; + case COP1X: + break; case SPECIAL: switch (instr->FunctionFieldRaw()) { case JR: @@ -1929,6 +1951,7 @@ void Simulator::DecodeTypeRegister(Instruction* instr) { const uint32_t rt_u = static_cast<uint32_t>(rt); const int32_t rd_reg = instr->RdValue(); + const int32_t fr_reg = instr->FrValue(); const int32_t fs_reg = instr->FsValue(); const int32_t ft_reg = instr->FtValue(); const int32_t fd_reg = instr->FdValue(); @@ -2173,8 +2196,8 @@ void Simulator::DecodeTypeRegister(Instruction* instr) { case CVT_D_L: // Mips32r2 instruction. // Watch the signs here, we want 2 32-bit vals // to make a sign-64. - i64 = (uint32_t) get_fpu_register(fs_reg); - i64 |= ((int64_t) get_fpu_register(fs_reg + 1) << 32); + i64 = static_cast<uint32_t>(get_fpu_register(fs_reg)); + i64 |= static_cast<int64_t>(get_fpu_register(fs_reg + 1)) << 32; set_fpu_register_double(fd_reg, static_cast<double>(i64)); break; case CVT_S_L: @@ -2190,6 +2213,19 @@ void Simulator::DecodeTypeRegister(Instruction* instr) { UNREACHABLE(); }; break; + case COP1X: + switch (instr->FunctionFieldRaw()) { + case MADD_D: + double fr, ft, fs; + fr = get_fpu_register_double(fr_reg); + fs = get_fpu_register_double(fs_reg); + ft = get_fpu_register_double(ft_reg); + set_fpu_register_double(fd_reg, fs * ft + fr); + break; + default: + UNREACHABLE(); + }; + break; case SPECIAL: switch (instr->FunctionFieldRaw()) { case JR: { @@ -2219,10 +2255,10 @@ void Simulator::DecodeTypeRegister(Instruction* instr) { set_register(HI, static_cast<int32_t>(u64hilo >> 32)); break; case DIV: - // Divide by zero was not checked in the configuration step - div and - // divu do not raise exceptions. On division by 0, the result will - // be UNPREDICTABLE. - if (rt != 0) { + // Divide by zero and overflow was not checked in the configuration + // step - div and divu do not raise exceptions. On division by 0 and + // on overflow (INT_MIN/-1), the result will be UNPREDICTABLE. + if (rt != 0 && !(rs == INT_MIN && rt == -1)) { set_register(LO, rs / rt); set_register(HI, rs % rt); } @@ -2718,34 +2754,7 @@ void Simulator::Execute() { } -int32_t Simulator::Call(byte* entry, int argument_count, ...) { - va_list parameters; - va_start(parameters, argument_count); - // Set up arguments. - - // First four arguments passed in registers. - ASSERT(argument_count >= 4); - set_register(a0, va_arg(parameters, int32_t)); - set_register(a1, va_arg(parameters, int32_t)); - set_register(a2, va_arg(parameters, int32_t)); - set_register(a3, va_arg(parameters, int32_t)); - - // Remaining arguments passed on stack. - int original_stack = get_register(sp); - // Compute position of stack on entry to generated code. - int entry_stack = (original_stack - (argument_count - 4) * sizeof(int32_t) - - kCArgsSlotsSize); - if (OS::ActivationFrameAlignment() != 0) { - entry_stack &= -OS::ActivationFrameAlignment(); - } - // Store remaining arguments on stack, from low to high memory. - intptr_t* stack_argument = reinterpret_cast<intptr_t*>(entry_stack); - for (int i = 4; i < argument_count; i++) { - stack_argument[i - 4 + kCArgSlotCount] = va_arg(parameters, int32_t); - } - va_end(parameters); - set_register(sp, entry_stack); - +void Simulator::CallInternal(byte* entry) { // Prepare to execute the code at entry. set_register(pc, reinterpret_cast<int32_t>(entry)); // Put down marker for end of simulation. The simulator will stop simulation @@ -2809,6 +2818,38 @@ int32_t Simulator::Call(byte* entry, int argument_count, ...) { set_register(gp, gp_val); set_register(sp, sp_val); set_register(fp, fp_val); +} + + +int32_t Simulator::Call(byte* entry, int argument_count, ...) { + va_list parameters; + va_start(parameters, argument_count); + // Set up arguments. + + // First four arguments passed in registers. + ASSERT(argument_count >= 4); + set_register(a0, va_arg(parameters, int32_t)); + set_register(a1, va_arg(parameters, int32_t)); + set_register(a2, va_arg(parameters, int32_t)); + set_register(a3, va_arg(parameters, int32_t)); + + // Remaining arguments passed on stack. + int original_stack = get_register(sp); + // Compute position of stack on entry to generated code. + int entry_stack = (original_stack - (argument_count - 4) * sizeof(int32_t) + - kCArgsSlotsSize); + if (OS::ActivationFrameAlignment() != 0) { + entry_stack &= -OS::ActivationFrameAlignment(); + } + // Store remaining arguments on stack, from low to high memory. + intptr_t* stack_argument = reinterpret_cast<intptr_t*>(entry_stack); + for (int i = 4; i < argument_count; i++) { + stack_argument[i - 4 + kCArgSlotCount] = va_arg(parameters, int32_t); + } + va_end(parameters); + set_register(sp, entry_stack); + + CallInternal(entry); // Pop stack passed arguments. CHECK_EQ(entry_stack, get_register(sp)); @@ -2819,6 +2860,27 @@ int32_t Simulator::Call(byte* entry, int argument_count, ...) { } +double Simulator::CallFP(byte* entry, double d0, double d1) { + if (!IsMipsSoftFloatABI) { + set_fpu_register_double(f12, d0); + set_fpu_register_double(f14, d1); + } else { + int buffer[2]; + ASSERT(sizeof(buffer[0]) * 2 == sizeof(d0)); + memcpy(buffer, &d0, sizeof(d0)); + set_dw_register(a0, buffer); + memcpy(buffer, &d1, sizeof(d1)); + set_dw_register(a2, buffer); + } + CallInternal(entry); + if (!IsMipsSoftFloatABI) { + return get_fpu_register_double(f0); + } else { + return get_double_from_register_pair(v0); + } +} + + uintptr_t Simulator::PushAddress(uintptr_t address) { int new_sp = get_register(sp) - sizeof(uintptr_t); uintptr_t* stack_slot = reinterpret_cast<uintptr_t*>(new_sp); diff --git a/deps/v8/src/mips/simulator-mips.h b/deps/v8/src/mips/simulator-mips.h index 776badc29b..67f595302b 100644 --- a/deps/v8/src/mips/simulator-mips.h +++ b/deps/v8/src/mips/simulator-mips.h @@ -184,7 +184,9 @@ class Simulator { // architecture specification and is off by a 8 from the currently executing // instruction. void set_register(int reg, int32_t value); + void set_dw_register(int dreg, const int* dbl); int32_t get_register(int reg) const; + double get_double_from_register_pair(int reg); // Same for FPURegisters. void set_fpu_register(int fpureg, int32_t value); void set_fpu_register_float(int fpureg, float value); @@ -214,6 +216,8 @@ class Simulator { // generated RegExp code with 7 parameters. This is a convenience function, // which sets up the simulator state and grabs the result on return. int32_t Call(byte* entry, int argument_count, ...); + // Alternative: call a 2-argument double function. + double CallFP(byte* entry, double d0, double d1); // Push an address onto the JS stack. uintptr_t PushAddress(uintptr_t address); @@ -353,6 +357,7 @@ class Simulator { void GetFpArgs(double* x, int32_t* y); void SetFpResult(const double& result); + void CallInternal(byte* entry); // Architecture state. // Registers. diff --git a/deps/v8/src/mips/stub-cache-mips.cc b/deps/v8/src/mips/stub-cache-mips.cc index bd15775d4b..d5cf6de905 100644 --- a/deps/v8/src/mips/stub-cache-mips.cc +++ b/deps/v8/src/mips/stub-cache-mips.cc @@ -121,14 +121,14 @@ static void ProbeTable(Isolate* isolate, // the property. This function may return false negatives, so miss_label // must always call a backup property check that is complete. // This function is safe to call if the receiver has fast properties. -// Name must be a symbol and receiver must be a heap object. +// Name must be unique and receiver must be a heap object. static void GenerateDictionaryNegativeLookup(MacroAssembler* masm, Label* miss_label, Register receiver, - Handle<String> name, + Handle<Name> name, Register scratch0, Register scratch1) { - ASSERT(name->IsSymbol()); + ASSERT(name->IsUniqueName()); Counters* counters = masm->isolate()->counters(); __ IncrementCounter(counters->negative_lookups(), 1, scratch0, scratch1); __ IncrementCounter(counters->negative_lookups_miss(), 1, scratch0, scratch1); @@ -162,13 +162,13 @@ static void GenerateDictionaryNegativeLookup(MacroAssembler* masm, __ lw(properties, FieldMemOperand(receiver, JSObject::kPropertiesOffset)); - StringDictionaryLookupStub::GenerateNegativeLookup(masm, - miss_label, - &done, - receiver, - properties, - name, - scratch1); + NameDictionaryLookupStub::GenerateNegativeLookup(masm, + miss_label, + &done, + receiver, + properties, + name, + scratch1); __ bind(&done); __ DecrementCounter(counters->negative_lookups_miss(), 1, scratch0, scratch1); } @@ -217,7 +217,7 @@ void StubCache::GenerateProbe(MacroAssembler* masm, __ JumpIfSmi(receiver, &miss); // Get the map of the receiver and compute the hash. - __ lw(scratch, FieldMemOperand(name, String::kHashFieldOffset)); + __ lw(scratch, FieldMemOperand(name, Name::kHashFieldOffset)); __ lw(at, FieldMemOperand(receiver, HeapObject::kMapOffset)); __ Addu(scratch, scratch, at); uint32_t mask = kPrimaryTableSize - 1; @@ -307,26 +307,19 @@ void StubCompiler::GenerateDirectLoadGlobalFunctionPrototype( } -// Load a fast property out of a holder object (src). In-object properties -// are loaded directly otherwise the property is loaded from the properties -// fixed array. -void StubCompiler::GenerateFastPropertyLoad(MacroAssembler* masm, - Register dst, - Register src, - Handle<JSObject> holder, - int index) { - // Adjust for the number of properties stored in the holder. - index -= holder->map()->inobject_properties(); - if (index < 0) { - // Get the property straight out of the holder. - int offset = holder->map()->instance_size() + (index * kPointerSize); - __ lw(dst, FieldMemOperand(src, offset)); - } else { +void StubCompiler::DoGenerateFastPropertyLoad(MacroAssembler* masm, + Register dst, + Register src, + bool inobject, + int index) { + int offset = index * kPointerSize; + if (!inobject) { // Calculate the offset into the properties array. - int offset = index * kPointerSize + FixedArray::kHeaderSize; + offset = offset + FixedArray::kHeaderSize; __ lw(dst, FieldMemOperand(src, JSObject::kPropertiesOffset)); - __ lw(dst, FieldMemOperand(dst, offset)); + src = dst; } + __ lw(dst, FieldMemOperand(src, offset)); } @@ -424,12 +417,14 @@ void StubCompiler::GenerateStoreField(MacroAssembler* masm, Handle<JSObject> object, int index, Handle<Map> transition, - Handle<String> name, + Handle<Name> name, Register receiver_reg, Register name_reg, + Register value_reg, Register scratch1, Register scratch2, - Label* miss_label) { + Label* miss_label, + Label* miss_restore_name) { // a0 : value. Label exit; @@ -466,17 +461,8 @@ void StubCompiler::GenerateStoreField(MacroAssembler* masm, holder = JSObject::cast(holder->GetPrototype()); } while (holder->GetPrototype()->IsJSObject()); } - // We need an extra register, push - __ push(name_reg); - Label miss_pop, done_check; CheckPrototypes(object, receiver_reg, Handle<JSObject>(holder), name_reg, - scratch1, scratch2, name, &miss_pop); - __ jmp(&done_check); - __ bind(&miss_pop); - __ pop(name_reg); - __ jmp(miss_label); - __ bind(&done_check); - __ pop(name_reg); + scratch1, scratch2, name, miss_restore_name); } // Stub never generated for non-global objects that require access @@ -522,14 +508,14 @@ void StubCompiler::GenerateStoreField(MacroAssembler* masm, if (index < 0) { // Set the property straight into the object. int offset = object->map()->instance_size() + (index * kPointerSize); - __ sw(a0, FieldMemOperand(receiver_reg, offset)); + __ sw(value_reg, FieldMemOperand(receiver_reg, offset)); // Skip updating write barrier if storing a smi. - __ JumpIfSmi(a0, &exit, scratch1); + __ JumpIfSmi(value_reg, &exit); // Update the write barrier for the array address. // Pass the now unused name_reg as a scratch register. - __ mov(name_reg, a0); + __ mov(name_reg, value_reg); __ RecordWriteField(receiver_reg, offset, name_reg, @@ -542,14 +528,14 @@ void StubCompiler::GenerateStoreField(MacroAssembler* masm, // Get the properties array. __ lw(scratch1, FieldMemOperand(receiver_reg, JSObject::kPropertiesOffset)); - __ sw(a0, FieldMemOperand(scratch1, offset)); + __ sw(value_reg, FieldMemOperand(scratch1, offset)); // Skip updating write barrier if storing a smi. - __ JumpIfSmi(a0, &exit); + __ JumpIfSmi(value_reg, &exit); // Update the write barrier for the array address. // Ok to clobber receiver_reg and name_reg, since we return. - __ mov(name_reg, a0); + __ mov(name_reg, value_reg); __ RecordWriteField(scratch1, offset, name_reg, @@ -559,18 +545,20 @@ void StubCompiler::GenerateStoreField(MacroAssembler* masm, } // Return the value (register v0). + ASSERT(value_reg.is(a0)); __ bind(&exit); __ mov(v0, a0); __ Ret(); } -void StubCompiler::GenerateLoadMiss(MacroAssembler* masm, Code::Kind kind) { - ASSERT(kind == Code::LOAD_IC || kind == Code::KEYED_LOAD_IC); - Handle<Code> code = (kind == Code::LOAD_IC) - ? masm->isolate()->builtins()->LoadIC_Miss() - : masm->isolate()->builtins()->KeyedLoadIC_Miss(); - __ Jump(code, RelocInfo::CODE_TARGET); +void BaseStoreStubCompiler::GenerateRestoreName(MacroAssembler* masm, + Label* label, + Handle<Name> name) { + if (!label->is_unused()) { + __ bind(label); + __ li(this->name(), Operand(name)); + } } @@ -682,7 +670,7 @@ static void GenerateFastApiDirectCall(MacroAssembler* masm, // Pass the additional arguments. Handle<CallHandlerInfo> api_call_info = optimization.api_call_info(); - Handle<Object> call_data(api_call_info->data()); + Handle<Object> call_data(api_call_info->data(), masm->isolate()); if (masm->isolate()->heap()->InNewSpace(*call_data)) { __ li(a0, api_call_info); __ lw(t2, FieldMemOperand(a0, CallHandlerInfo::kDataOffset)); @@ -751,7 +739,7 @@ class CallInterceptorCompiler BASE_EMBEDDED { void Compile(MacroAssembler* masm, Handle<JSObject> object, Handle<JSObject> holder, - Handle<String> name, + Handle<Name> name, LookupResult* lookup, Register receiver, Register scratch1, @@ -782,7 +770,7 @@ class CallInterceptorCompiler BASE_EMBEDDED { Register scratch3, Handle<JSObject> interceptor_holder, LookupResult* lookup, - Handle<String> name, + Handle<Name> name, const CallOptimization& optimization, Label* miss_label) { ASSERT(optimization.is_constant_call()); @@ -876,7 +864,7 @@ class CallInterceptorCompiler BASE_EMBEDDED { Register scratch1, Register scratch2, Register scratch3, - Handle<String> name, + Handle<Name> name, Handle<JSObject> interceptor_holder, Label* miss_label) { Register holder = @@ -937,7 +925,7 @@ class CallInterceptorCompiler BASE_EMBEDDED { // property. static void GenerateCheckPropertyCell(MacroAssembler* masm, Handle<GlobalObject> global, - Handle<String> name, + Handle<Name> name, Register scratch, Label* miss) { Handle<JSGlobalPropertyCell> cell = @@ -956,7 +944,7 @@ static void GenerateCheckPropertyCell(MacroAssembler* masm, static void GenerateCheckPropertyCells(MacroAssembler* masm, Handle<JSObject> object, Handle<JSObject> holder, - Handle<String> name, + Handle<Name> name, Register scratch, Label* miss) { Handle<JSObject> current = object; @@ -984,7 +972,7 @@ static void StoreIntAsFloat(MacroAssembler* masm, Register scratch1, Register scratch2) { if (CpuFeatures::IsSupported(FPU)) { - CpuFeatures::Scope scope(FPU); + CpuFeatureScope scope(masm, FPU); __ mtc1(ival, f0); __ cvt_s_w(f0, f0); __ sll(scratch1, wordoffset, 2); @@ -1049,43 +1037,8 @@ static void StoreIntAsFloat(MacroAssembler* masm, } -// Convert unsigned integer with specified number of leading zeroes in binary -// representation to IEEE 754 double. -// Integer to convert is passed in register hiword. -// Resulting double is returned in registers hiword:loword. -// This functions does not work correctly for 0. -static void GenerateUInt2Double(MacroAssembler* masm, - Register hiword, - Register loword, - Register scratch, - int leading_zeroes) { - const int meaningful_bits = kBitsPerInt - leading_zeroes - 1; - const int biased_exponent = HeapNumber::kExponentBias + meaningful_bits; - - const int mantissa_shift_for_hi_word = - meaningful_bits - HeapNumber::kMantissaBitsInTopWord; - - const int mantissa_shift_for_lo_word = - kBitsPerInt - mantissa_shift_for_hi_word; - - __ li(scratch, biased_exponent << HeapNumber::kExponentShift); - if (mantissa_shift_for_hi_word > 0) { - __ sll(loword, hiword, mantissa_shift_for_lo_word); - __ srl(hiword, hiword, mantissa_shift_for_hi_word); - __ or_(hiword, scratch, hiword); - } else { - __ mov(loword, zero_reg); - __ sll(hiword, hiword, mantissa_shift_for_hi_word); - __ or_(hiword, scratch, hiword); - } - - // If least significant bit of biased exponent was not 1 it was corrupted - // by most significant bit of mantissa so we should fix that. - if (!(biased_exponent & 1)) { - __ li(scratch, 1 << HeapNumber::kExponentShift); - __ nor(scratch, scratch, scratch); - __ and_(hiword, hiword, scratch); - } +void StubCompiler::GenerateTailCall(MacroAssembler* masm, Handle<Code> code) { + __ Jump(code, RelocInfo::CODE_TARGET); } @@ -1099,9 +1052,11 @@ Register StubCompiler::CheckPrototypes(Handle<JSObject> object, Register holder_reg, Register scratch1, Register scratch2, - Handle<String> name, + Handle<Name> name, int save_at_depth, - Label* miss) { + Label* miss, + PrototypeCheckType check) { + Handle<JSObject> first = object; // Make sure there's no overlap between holder and object registers. ASSERT(!scratch1.is(object_reg) && !scratch1.is(holder_reg)); ASSERT(!scratch2.is(object_reg) && !scratch2.is(holder_reg) @@ -1129,11 +1084,12 @@ Register StubCompiler::CheckPrototypes(Handle<JSObject> object, if (!current->HasFastProperties() && !current->IsJSGlobalObject() && !current->IsJSGlobalProxy()) { - if (!name->IsSymbol()) { - name = factory()->LookupSymbol(name); + if (!name->IsUniqueName()) { + ASSERT(name->IsString()); + name = factory()->InternalizeString(Handle<String>::cast(name)); } ASSERT(current->property_dictionary()->FindEntry(*name) == - StringDictionary::kNotFound); + NameDictionary::kNotFound); GenerateDictionaryNegativeLookup(masm(), miss, reg, name, scratch1, scratch2); @@ -1142,9 +1098,15 @@ Register StubCompiler::CheckPrototypes(Handle<JSObject> object, reg = holder_reg; // From now on the object will be in holder_reg. __ lw(reg, FieldMemOperand(scratch1, Map::kPrototypeOffset)); } else { - Handle<Map> current_map(current->map()); - __ CheckMap(reg, scratch1, current_map, miss, DONT_DO_SMI_CHECK, - ALLOW_ELEMENT_TRANSITION_MAPS); + Register map_reg = scratch1; + if (!current.is_identical_to(first) || check == CHECK_ALL_MAPS) { + Handle<Map> current_map(current->map()); + // CheckMap implicitly loads the map of |reg| into |map_reg|. + __ CheckMap(reg, map_reg, current_map, miss, DONT_DO_SMI_CHECK, + ALLOW_ELEMENT_TRANSITION_MAPS); + } else { + __ lw(map_reg, FieldMemOperand(reg, HeapObject::kMapOffset)); + } // Check access rights to the global object. This has to happen after // the map check so that we know that the object is actually a global // object. @@ -1156,7 +1118,7 @@ Register StubCompiler::CheckPrototypes(Handle<JSObject> object, if (heap()->InNewSpace(*prototype)) { // The prototype is in new space; we cannot store a reference to it // in the code. Load it from the map. - __ lw(reg, FieldMemOperand(scratch1, Map::kPrototypeOffset)); + __ lw(reg, FieldMemOperand(map_reg, Map::kPrototypeOffset)); } else { // The prototype is in old space; load it directly. __ li(reg, Operand(prototype)); @@ -1174,9 +1136,11 @@ Register StubCompiler::CheckPrototypes(Handle<JSObject> object, // Log the check depth. LOG(masm()->isolate(), IntEvent("check-maps-depth", depth + 1)); - // Check the holder map. - __ CheckMap(reg, scratch1, Handle<Map>(current->map()), miss, - DONT_DO_SMI_CHECK, ALLOW_ELEMENT_TRANSITION_MAPS); + if (!holder.is_identical_to(first) || check == CHECK_ALL_MAPS) { + // Check the holder map. + __ CheckMap(reg, scratch1, Handle<Map>(holder->map()), miss, + DONT_DO_SMI_CHECK, ALLOW_ELEMENT_TRANSITION_MAPS); + } // Perform security check for access to the global object. ASSERT(holder->IsJSGlobalProxy() || !holder->IsAccessCheckNeeded()); @@ -1194,128 +1158,128 @@ Register StubCompiler::CheckPrototypes(Handle<JSObject> object, } -void StubCompiler::GenerateLoadField(Handle<JSObject> object, - Handle<JSObject> holder, - Register receiver, - Register scratch1, - Register scratch2, - Register scratch3, - int index, - Handle<String> name, - Label* miss) { - // Check that the receiver isn't a smi. - __ JumpIfSmi(receiver, miss); - - // Check that the maps haven't changed. - Register reg = CheckPrototypes( - object, receiver, holder, scratch1, scratch2, scratch3, name, miss); - GenerateFastPropertyLoad(masm(), v0, reg, holder, index); - __ Ret(); +void BaseLoadStubCompiler::HandlerFrontendFooter(Label* success, + Label* miss) { + if (!miss->is_unused()) { + __ Branch(success); + __ bind(miss); + TailCallBuiltin(masm(), MissBuiltin(kind())); + } } -void StubCompiler::GenerateLoadConstant(Handle<JSObject> object, - Handle<JSObject> holder, - Register receiver, - Register scratch1, - Register scratch2, - Register scratch3, - Handle<JSFunction> value, - Handle<String> name, - Label* miss) { - // Check that the receiver isn't a smi. - __ JumpIfSmi(receiver, miss, scratch1); - - // Check that the maps haven't changed. - CheckPrototypes(object, receiver, holder, - scratch1, scratch2, scratch3, name, miss); - - // Return the constant value. - __ LoadHeapObject(v0, value); - __ Ret(); -} +Register BaseLoadStubCompiler::CallbackHandlerFrontend( + Handle<JSObject> object, + Register object_reg, + Handle<JSObject> holder, + Handle<Name> name, + Label* success, + Handle<ExecutableAccessorInfo> callback) { + Label miss; + Register reg = HandlerFrontendHeader(object, object_reg, holder, name, &miss); -void StubCompiler::GenerateDictionaryLoadCallback(Register receiver, - Register name_reg, - Register scratch1, - Register scratch2, - Register scratch3, - Handle<AccessorInfo> callback, - Handle<String> name, - Label* miss) { - ASSERT(!receiver.is(scratch1)); - ASSERT(!receiver.is(scratch2)); - ASSERT(!receiver.is(scratch3)); - - // Load the properties dictionary. - Register dictionary = scratch1; - __ lw(dictionary, FieldMemOperand(receiver, JSObject::kPropertiesOffset)); - - // Probe the dictionary. - Label probe_done; - StringDictionaryLookupStub::GeneratePositiveLookup(masm(), - miss, + if (!holder->HasFastProperties() && !holder->IsJSGlobalObject()) { + ASSERT(!reg.is(scratch2())); + ASSERT(!reg.is(scratch3())); + ASSERT(!reg.is(scratch4())); + + // Load the properties dictionary. + Register dictionary = scratch4(); + __ lw(dictionary, FieldMemOperand(reg, JSObject::kPropertiesOffset)); + + // Probe the dictionary. + Label probe_done; + NameDictionaryLookupStub::GeneratePositiveLookup(masm(), + &miss, &probe_done, dictionary, - name_reg, - scratch2, - scratch3); - __ bind(&probe_done); - - // If probing finds an entry in the dictionary, scratch3 contains the - // pointer into the dictionary. Check that the value is the callback. - Register pointer = scratch3; - const int kElementsStartOffset = StringDictionary::kHeaderSize + - StringDictionary::kElementsStartIndex * kPointerSize; - const int kValueOffset = kElementsStartOffset + kPointerSize; - __ lw(scratch2, FieldMemOperand(pointer, kValueOffset)); - __ Branch(miss, ne, scratch2, Operand(callback)); + this->name(), + scratch2(), + scratch3()); + __ bind(&probe_done); + + // If probing finds an entry in the dictionary, scratch3 contains the + // pointer into the dictionary. Check that the value is the callback. + Register pointer = scratch3(); + const int kElementsStartOffset = NameDictionary::kHeaderSize + + NameDictionary::kElementsStartIndex * kPointerSize; + const int kValueOffset = kElementsStartOffset + kPointerSize; + __ lw(scratch2(), FieldMemOperand(pointer, kValueOffset)); + __ Branch(&miss, ne, scratch2(), Operand(callback)); + } + + HandlerFrontendFooter(success, &miss); + return reg; } -void StubCompiler::GenerateLoadCallback(Handle<JSObject> object, - Handle<JSObject> holder, - Register receiver, - Register name_reg, - Register scratch1, - Register scratch2, - Register scratch3, - Register scratch4, - Handle<AccessorInfo> callback, - Handle<String> name, - Label* miss) { - // Check that the receiver isn't a smi. - __ JumpIfSmi(receiver, miss, scratch1); +void BaseLoadStubCompiler::NonexistentHandlerFrontend( + Handle<JSObject> object, + Handle<JSObject> last, + Handle<Name> name, + Label* success, + Handle<GlobalObject> global) { + Label miss; - // Check that the maps haven't changed. - Register reg = CheckPrototypes(object, receiver, holder, scratch1, - scratch2, scratch3, name, miss); + Register reg = HandlerFrontendHeader(object, receiver(), last, name, &miss); - if (!holder->HasFastProperties() && !holder->IsJSGlobalObject()) { - GenerateDictionaryLoadCallback( - reg, name_reg, scratch2, scratch3, scratch4, callback, name, miss); + // If the last object in the prototype chain is a global object, + // check that the global property cell is empty. + if (!global.is_null()) { + GenerateCheckPropertyCell(masm(), global, name, scratch2(), &miss); + } + + if (!last->HasFastProperties()) { + __ lw(scratch2(), FieldMemOperand(reg, HeapObject::kMapOffset)); + __ lw(scratch2(), FieldMemOperand(scratch2(), Map::kPrototypeOffset)); + __ Branch(&miss, ne, scratch2(), + Operand(isolate()->factory()->null_value())); } + HandlerFrontendFooter(success, &miss); +} + + +void BaseLoadStubCompiler::GenerateLoadField(Register reg, + Handle<JSObject> holder, + PropertyIndex index) { + GenerateFastPropertyLoad(masm(), v0, reg, holder, index); + __ Ret(); +} + + +void BaseLoadStubCompiler::GenerateLoadConstant(Handle<JSFunction> value) { + // Return the constant value. + __ LoadHeapObject(v0, value); + __ Ret(); +} + + +void BaseLoadStubCompiler::GenerateLoadCallback( + Register reg, + Handle<ExecutableAccessorInfo> callback) { // Build AccessorInfo::args_ list on the stack and push property name below // the exit frame to make GC aware of them and store pointers to them. - __ push(receiver); - __ mov(scratch2, sp); // scratch2 = AccessorInfo::args_ + __ push(receiver()); + __ mov(scratch2(), sp); // scratch2 = AccessorInfo::args_ if (heap()->InNewSpace(callback->data())) { - __ li(scratch3, callback); - __ lw(scratch3, FieldMemOperand(scratch3, AccessorInfo::kDataOffset)); + __ li(scratch3(), callback); + __ lw(scratch3(), FieldMemOperand(scratch3(), + ExecutableAccessorInfo::kDataOffset)); } else { - __ li(scratch3, Handle<Object>(callback->data())); + __ li(scratch3(), Handle<Object>(callback->data(), + callback->GetIsolate())); } __ Subu(sp, sp, 4 * kPointerSize); __ sw(reg, MemOperand(sp, 3 * kPointerSize)); - __ sw(scratch3, MemOperand(sp, 2 * kPointerSize)); - __ li(scratch3, Operand(ExternalReference::isolate_address())); - __ sw(scratch3, MemOperand(sp, 1 * kPointerSize)); - __ sw(name_reg, MemOperand(sp, 0 * kPointerSize)); + __ sw(scratch3(), MemOperand(sp, 2 * kPointerSize)); + __ li(scratch3(), Operand(ExternalReference::isolate_address())); + __ sw(scratch3(), MemOperand(sp, 1 * kPointerSize)); + __ sw(name(), MemOperand(sp, 0 * kPointerSize)); - __ mov(a2, scratch2); // Saved in case scratch2 == a1. - __ mov(a1, sp); // a1 (first argument - see note below) = Handle<String> + __ mov(a2, scratch2()); // Saved in case scratch2 == a1. + __ mov(a1, sp); // a1 (first argument - see note below) = Handle<Name> // NOTE: the O32 abi requires a0 to hold a special pointer when returning a // struct from the function (which is currently the case). This means we pass @@ -1343,22 +1307,15 @@ void StubCompiler::GenerateLoadCallback(Handle<JSObject> object, } -void StubCompiler::GenerateLoadInterceptor(Handle<JSObject> object, - Handle<JSObject> interceptor_holder, - LookupResult* lookup, - Register receiver, - Register name_reg, - Register scratch1, - Register scratch2, - Register scratch3, - Handle<String> name, - Label* miss) { +void BaseLoadStubCompiler::GenerateLoadInterceptor( + Register holder_reg, + Handle<JSObject> object, + Handle<JSObject> interceptor_holder, + LookupResult* lookup, + Handle<Name> name) { ASSERT(interceptor_holder->HasNamedInterceptor()); ASSERT(!interceptor_holder->GetNamedInterceptor()->getter()->IsUndefined()); - // Check that the receiver isn't a smi. - __ JumpIfSmi(receiver, miss); - // So far the most popular follow ups for interceptor loads are FIELD // and CALLBACKS, so inline only them, other cases may be added // later. @@ -1367,8 +1324,9 @@ void StubCompiler::GenerateLoadInterceptor(Handle<JSObject> object, if (lookup->IsField()) { compile_followup_inline = true; } else if (lookup->type() == CALLBACKS && - lookup->GetCallbackObject()->IsAccessorInfo()) { - AccessorInfo* callback = AccessorInfo::cast(lookup->GetCallbackObject()); + lookup->GetCallbackObject()->IsExecutableAccessorInfo()) { + ExecutableAccessorInfo* callback = + ExecutableAccessorInfo::cast(lookup->GetCallbackObject()); compile_followup_inline = callback->getter() != NULL && callback->IsCompatibleReceiver(*object); } @@ -1378,17 +1336,14 @@ void StubCompiler::GenerateLoadInterceptor(Handle<JSObject> object, // Compile the interceptor call, followed by inline code to load the // property from further up the prototype chain if the call fails. // Check that the maps haven't changed. - Register holder_reg = CheckPrototypes(object, receiver, interceptor_holder, - scratch1, scratch2, scratch3, - name, miss); - ASSERT(holder_reg.is(receiver) || holder_reg.is(scratch1)); + ASSERT(holder_reg.is(receiver()) || holder_reg.is(scratch1())); // Preserve the receiver register explicitly whenever it is different from // the holder and it is needed should the interceptor return without any // result. The CALLBACKS case needs the receiver to be passed into C++ code, // the FIELD case might cause a miss during the prototype check. bool must_perfrom_prototype_check = *interceptor_holder != lookup->holder(); - bool must_preserve_receiver_reg = !receiver.is(holder_reg) && + bool must_preserve_receiver_reg = !receiver().is(holder_reg) && (lookup->type() == CALLBACKS || must_perfrom_prototype_check); // Save necessary data before invoking an interceptor. @@ -1396,86 +1351,40 @@ void StubCompiler::GenerateLoadInterceptor(Handle<JSObject> object, { FrameScope frame_scope(masm(), StackFrame::INTERNAL); if (must_preserve_receiver_reg) { - __ Push(receiver, holder_reg, name_reg); + __ Push(receiver(), holder_reg, this->name()); } else { - __ Push(holder_reg, name_reg); + __ Push(holder_reg, this->name()); } // Invoke an interceptor. Note: map checks from receiver to // interceptor's holder has been compiled before (see a caller // of this method). CompileCallLoadPropertyWithInterceptor(masm(), - receiver, + receiver(), holder_reg, - name_reg, + this->name(), interceptor_holder); // Check if interceptor provided a value for property. If it's // the case, return immediately. Label interceptor_failed; - __ LoadRoot(scratch1, Heap::kNoInterceptorResultSentinelRootIndex); - __ Branch(&interceptor_failed, eq, v0, Operand(scratch1)); + __ LoadRoot(scratch1(), Heap::kNoInterceptorResultSentinelRootIndex); + __ Branch(&interceptor_failed, eq, v0, Operand(scratch1())); frame_scope.GenerateLeaveFrame(); __ Ret(); __ bind(&interceptor_failed); - __ pop(name_reg); + __ pop(this->name()); __ pop(holder_reg); if (must_preserve_receiver_reg) { - __ pop(receiver); + __ pop(receiver()); } // Leave the internal frame. } - // Check that the maps from interceptor's holder to lookup's holder - // haven't changed. And load lookup's holder into |holder| register. - if (must_perfrom_prototype_check) { - holder_reg = CheckPrototypes(interceptor_holder, - holder_reg, - Handle<JSObject>(lookup->holder()), - scratch1, - scratch2, - scratch3, - name, - miss); - } - - if (lookup->IsField()) { - // We found FIELD property in prototype chain of interceptor's holder. - // Retrieve a field from field's holder. - GenerateFastPropertyLoad(masm(), v0, holder_reg, - Handle<JSObject>(lookup->holder()), - lookup->GetFieldIndex()); - __ Ret(); - } else { - // We found CALLBACKS property in prototype chain of interceptor's - // holder. - ASSERT(lookup->type() == CALLBACKS); - Handle<AccessorInfo> callback( - AccessorInfo::cast(lookup->GetCallbackObject())); - ASSERT(callback->getter() != NULL); - - // Tail call to runtime. - // Important invariant in CALLBACKS case: the code above must be - // structured to never clobber |receiver| register. - __ li(scratch2, callback); - - __ Push(receiver, holder_reg); - __ lw(scratch3, - FieldMemOperand(scratch2, AccessorInfo::kDataOffset)); - __ li(scratch1, Operand(ExternalReference::isolate_address())); - __ Push(scratch3, scratch1, scratch2, name_reg); - - ExternalReference ref = - ExternalReference(IC_Utility(IC::kLoadCallbackProperty), - masm()->isolate()); - __ TailCallExternalReference(ref, 6, 1); - } + GenerateLoadPostInterceptor(holder_reg, interceptor_holder, name, lookup); } else { // !compile_followup_inline // Call the runtime system to load the interceptor. // Check that the maps haven't changed. - Register holder_reg = CheckPrototypes(object, receiver, interceptor_holder, - scratch1, scratch2, scratch3, - name, miss); - PushInterceptorArguments(masm(), receiver, holder_reg, - name_reg, interceptor_holder); + PushInterceptorArguments(masm(), receiver(), holder_reg, + this->name(), interceptor_holder); ExternalReference ref = ExternalReference( IC_Utility(IC::kLoadPropertyWithInterceptorForLoad), masm()->isolate()); @@ -1484,7 +1393,7 @@ void StubCompiler::GenerateLoadInterceptor(Handle<JSObject> object, } -void CallStubCompiler::GenerateNameCheck(Handle<String> name, Label* miss) { +void CallStubCompiler::GenerateNameCheck(Handle<Name> name, Label* miss) { if (kind_ == Code::KEYED_CALL_IC) { __ Branch(miss, ne, a2, Operand(name)); } @@ -1493,7 +1402,7 @@ void CallStubCompiler::GenerateNameCheck(Handle<String> name, Label* miss) { void CallStubCompiler::GenerateGlobalReceiverCheck(Handle<JSObject> object, Handle<JSObject> holder, - Handle<String> name, + Handle<Name> name, Label* miss) { ASSERT(holder->IsGlobalObject()); @@ -1549,8 +1458,8 @@ void CallStubCompiler::GenerateMissBranch() { Handle<Code> CallStubCompiler::CompileCallField(Handle<JSObject> object, Handle<JSObject> holder, - int index, - Handle<String> name) { + PropertyIndex index, + Handle<Name> name) { // ----------- S t a t e ------------- // -- a2 : name // -- ra : return address @@ -1623,7 +1532,7 @@ Handle<Code> CallStubCompiler::CompileArrayPushCall( } else { Label call_builtin; if (argc == 1) { // Otherwise fall through to call the builtin. - Label attempt_to_grow_elements; + Label attempt_to_grow_elements, with_write_barrier, check_double; Register elements = t2; Register end_elements = t1; @@ -1634,7 +1543,7 @@ Handle<Code> CallStubCompiler::CompileArrayPushCall( __ CheckMap(elements, v0, Heap::kFixedArrayMapRootIndex, - &call_builtin, + &check_double, DONT_DO_SMI_CHECK); // Get the array's length into v0 and calculate new length. @@ -1650,7 +1559,6 @@ Handle<Code> CallStubCompiler::CompileArrayPushCall( __ Branch(&attempt_to_grow_elements, gt, v0, Operand(t0)); // Check if value is a smi. - Label with_write_barrier; __ lw(t0, MemOperand(sp, (argc - 1) * kPointerSize)); __ JumpIfNotSmi(t0, &with_write_barrier); @@ -1671,6 +1579,39 @@ Handle<Code> CallStubCompiler::CompileArrayPushCall( __ Drop(argc + 1); __ Ret(); + __ bind(&check_double); + + // Check that the elements are in fast mode and writable. + __ CheckMap(elements, + a0, + Heap::kFixedDoubleArrayMapRootIndex, + &call_builtin, + DONT_DO_SMI_CHECK); + + // Get the array's length into r0 and calculate new length. + __ lw(a0, FieldMemOperand(receiver, JSArray::kLengthOffset)); + STATIC_ASSERT(kSmiTagSize == 1); + STATIC_ASSERT(kSmiTag == 0); + __ Addu(a0, a0, Operand(Smi::FromInt(argc))); + + // Get the elements' length. + __ lw(t0, FieldMemOperand(elements, FixedArray::kLengthOffset)); + + // Check if we could survive without allocation. + __ Branch(&call_builtin, gt, a0, Operand(t0)); + + __ lw(t0, MemOperand(sp, (argc - 1) * kPointerSize)); + __ StoreNumberToDoubleElements( + t0, a0, elements, a3, t1, a2, t5, + &call_builtin, argc * kDoubleSize); + + // Save new length. + __ sw(a0, FieldMemOperand(receiver, JSArray::kLengthOffset)); + + // Check for a smi. + __ Drop(argc + 1); + __ Ret(); + __ bind(&with_write_barrier); __ lw(a3, FieldMemOperand(receiver, HeapObject::kMapOffset)); @@ -1682,8 +1623,12 @@ Handle<Code> CallStubCompiler::CompileArrayPushCall( // In case of fast smi-only, convert to fast object, otherwise bail out. __ bind(¬_fast_object); __ CheckFastSmiElements(a3, t3, &call_builtin); + + __ lw(t3, FieldMemOperand(t0, HeapObject::kMapOffset)); + __ LoadRoot(at, Heap::kHeapNumberMapRootIndex); + __ Branch(&call_builtin, eq, t3, Operand(at)); // edx: receiver - // r3: map + // a3: map Label try_holey_map; __ LoadTransitionedArrayMapConditional(FAST_SMI_ELEMENTS, FAST_ELEMENTS, @@ -1692,7 +1637,9 @@ Handle<Code> CallStubCompiler::CompileArrayPushCall( &try_holey_map); __ mov(a2, receiver); ElementsTransitionGenerator:: - GenerateMapChangeElementsTransition(masm()); + GenerateMapChangeElementsTransition(masm(), + DONT_TRACK_ALLOCATION_SITE, + NULL); __ jmp(&fast_object); __ bind(&try_holey_map); @@ -1703,7 +1650,9 @@ Handle<Code> CallStubCompiler::CompileArrayPushCall( &call_builtin); __ mov(a2, receiver); ElementsTransitionGenerator:: - GenerateMapChangeElementsTransition(masm()); + GenerateMapChangeElementsTransition(masm(), + DONT_TRACK_ALLOCATION_SITE, + NULL); __ bind(&fast_object); } else { __ CheckFastObjectElements(a3, a3, &call_builtin); @@ -1928,8 +1877,9 @@ Handle<Code> CallStubCompiler::CompileStringCharCodeAtCall( v0, &miss); ASSERT(!object.is_identical_to(holder)); - CheckPrototypes(Handle<JSObject>(JSObject::cast(object->GetPrototype())), - v0, holder, a1, a3, t0, name, &miss); + CheckPrototypes( + Handle<JSObject>(JSObject::cast(object->GetPrototype(isolate()))), + v0, holder, a1, a3, t0, name, &miss); Register receiver = a1; Register index = t1; @@ -2008,8 +1958,9 @@ Handle<Code> CallStubCompiler::CompileStringCharAtCall( v0, &miss); ASSERT(!object.is_identical_to(holder)); - CheckPrototypes(Handle<JSObject>(JSObject::cast(object->GetPrototype())), - v0, holder, a1, a3, t0, name, &miss); + CheckPrototypes( + Handle<JSObject>(JSObject::cast(object->GetPrototype(isolate()))), + v0, holder, a1, a3, t0, name, &miss); Register receiver = v0; Register index = t1; @@ -2039,7 +1990,7 @@ Handle<Code> CallStubCompiler::CompileStringCharAtCall( if (index_out_of_range.is_linked()) { __ bind(&index_out_of_range); - __ LoadRoot(v0, Heap::kEmptyStringRootIndex); + __ LoadRoot(v0, Heap::kempty_stringRootIndex); __ Drop(argc + 1); __ Ret(); } @@ -2146,7 +2097,7 @@ Handle<Code> CallStubCompiler::CompileMathFloorCall( return Handle<Code>::null(); } - CpuFeatures::Scope scope_fpu(FPU); + CpuFeatureScope scope_fpu(masm(), FPU); const int argc = arguments().immediate(); // If the object is not a JSObject or we got an unexpected number of // arguments, bail out to the regular call. @@ -2416,25 +2367,16 @@ Handle<Code> CallStubCompiler::CompileFastApiCall( } -Handle<Code> CallStubCompiler::CompileCallConstant(Handle<Object> object, - Handle<JSObject> holder, - Handle<JSFunction> function, - Handle<String> name, - CheckType check) { +void CallStubCompiler::CompileHandlerFrontend(Handle<Object> object, + Handle<JSObject> holder, + Handle<Name> name, + CheckType check, + Label* success) { // ----------- S t a t e ------------- // -- a2 : name // -- ra : return address // ----------------------------------- - if (HasCustomCallGenerator(function)) { - Handle<Code> code = CompileCustomCall(object, holder, - Handle<JSGlobalPropertyCell>::null(), - function, name); - // A null handle means bail out to the regular compiler code below. - if (!code.is_null()) return code; - } - Label miss; - GenerateNameCheck(name, &miss); // Get the receiver from the stack. @@ -2467,77 +2409,93 @@ Handle<Code> CallStubCompiler::CompileCallConstant(Handle<Object> object, break; case STRING_CHECK: - if (function->IsBuiltin() || !function->shared()->is_classic_mode()) { - // Check that the object is a two-byte string or a symbol. - __ GetObjectType(a1, a3, a3); - __ Branch(&miss, Ugreater_equal, a3, Operand(FIRST_NONSTRING_TYPE)); - // Check that the maps starting from the prototype haven't changed. - GenerateDirectLoadGlobalFunctionPrototype( - masm(), Context::STRING_FUNCTION_INDEX, a0, &miss); - CheckPrototypes( - Handle<JSObject>(JSObject::cast(object->GetPrototype())), - a0, holder, a3, a1, t0, name, &miss); - } else { - // Calling non-strict non-builtins with a value as the receiver - // requires boxing. - __ jmp(&miss); - } + // Check that the object is a string. + __ GetObjectType(a1, a3, a3); + __ Branch(&miss, Ugreater_equal, a3, Operand(FIRST_NONSTRING_TYPE)); + // Check that the maps starting from the prototype haven't changed. + GenerateDirectLoadGlobalFunctionPrototype( + masm(), Context::STRING_FUNCTION_INDEX, a0, &miss); + CheckPrototypes( + Handle<JSObject>(JSObject::cast(object->GetPrototype(isolate()))), + a0, holder, a3, a1, t0, name, &miss); break; - case NUMBER_CHECK: - if (function->IsBuiltin() || !function->shared()->is_classic_mode()) { - Label fast; - // Check that the object is a smi or a heap number. - __ JumpIfSmi(a1, &fast); - __ GetObjectType(a1, a0, a0); - __ Branch(&miss, ne, a0, Operand(HEAP_NUMBER_TYPE)); - __ bind(&fast); - // Check that the maps starting from the prototype haven't changed. - GenerateDirectLoadGlobalFunctionPrototype( - masm(), Context::NUMBER_FUNCTION_INDEX, a0, &miss); - CheckPrototypes( - Handle<JSObject>(JSObject::cast(object->GetPrototype())), - a0, holder, a3, a1, t0, name, &miss); - } else { - // Calling non-strict non-builtins with a value as the receiver - // requires boxing. - __ jmp(&miss); - } + case SYMBOL_CHECK: + // Check that the object is a symbol. + __ GetObjectType(a1, a1, a3); + __ Branch(&miss, ne, a3, Operand(SYMBOL_TYPE)); break; - case BOOLEAN_CHECK: - if (function->IsBuiltin() || !function->shared()->is_classic_mode()) { - Label fast; - // Check that the object is a boolean. - __ LoadRoot(t0, Heap::kTrueValueRootIndex); - __ Branch(&fast, eq, a1, Operand(t0)); - __ LoadRoot(t0, Heap::kFalseValueRootIndex); - __ Branch(&miss, ne, a1, Operand(t0)); - __ bind(&fast); - // Check that the maps starting from the prototype haven't changed. - GenerateDirectLoadGlobalFunctionPrototype( - masm(), Context::BOOLEAN_FUNCTION_INDEX, a0, &miss); - CheckPrototypes( - Handle<JSObject>(JSObject::cast(object->GetPrototype())), - a0, holder, a3, a1, t0, name, &miss); - } else { - // Calling non-strict non-builtins with a value as the receiver - // requires boxing. - __ jmp(&miss); - } + case NUMBER_CHECK: { + Label fast; + // Check that the object is a smi or a heap number. + __ JumpIfSmi(a1, &fast); + __ GetObjectType(a1, a0, a0); + __ Branch(&miss, ne, a0, Operand(HEAP_NUMBER_TYPE)); + __ bind(&fast); + // Check that the maps starting from the prototype haven't changed. + GenerateDirectLoadGlobalFunctionPrototype( + masm(), Context::NUMBER_FUNCTION_INDEX, a0, &miss); + CheckPrototypes( + Handle<JSObject>(JSObject::cast(object->GetPrototype(isolate()))), + a0, holder, a3, a1, t0, name, &miss); + break; + } + case BOOLEAN_CHECK: { + Label fast; + // Check that the object is a boolean. + __ LoadRoot(t0, Heap::kTrueValueRootIndex); + __ Branch(&fast, eq, a1, Operand(t0)); + __ LoadRoot(t0, Heap::kFalseValueRootIndex); + __ Branch(&miss, ne, a1, Operand(t0)); + __ bind(&fast); + // Check that the maps starting from the prototype haven't changed. + GenerateDirectLoadGlobalFunctionPrototype( + masm(), Context::BOOLEAN_FUNCTION_INDEX, a0, &miss); + CheckPrototypes( + Handle<JSObject>(JSObject::cast(object->GetPrototype(isolate()))), + a0, holder, a3, a1, t0, name, &miss); break; } + } + + __ jmp(success); + + // Handle call cache miss. + __ bind(&miss); + GenerateMissBranch(); +} + + +void CallStubCompiler::CompileHandlerBackend(Handle<JSFunction> function) { CallKind call_kind = CallICBase::Contextual::decode(extra_state_) ? CALL_AS_FUNCTION : CALL_AS_METHOD; __ InvokeFunction( function, arguments(), JUMP_FUNCTION, NullCallWrapper(), call_kind); +} - // Handle call cache miss. - __ bind(&miss); - GenerateMissBranch(); +Handle<Code> CallStubCompiler::CompileCallConstant( + Handle<Object> object, + Handle<JSObject> holder, + Handle<Name> name, + CheckType check, + Handle<JSFunction> function) { + if (HasCustomCallGenerator(function)) { + Handle<Code> code = CompileCustomCall(object, holder, + Handle<JSGlobalPropertyCell>::null(), + function, Handle<String>::cast(name)); + // A null handle means bail out to the regular compiler code below. + if (!code.is_null()) return code; + } + + Label success; + + CompileHandlerFrontend(object, holder, name, check, &success); + __ bind(&success); + CompileHandlerBackend(function); // Return the generated code. return GetCode(function); @@ -2546,7 +2504,7 @@ Handle<Code> CallStubCompiler::CompileCallConstant(Handle<Object> object, Handle<Code> CallStubCompiler::CompileCallInterceptor(Handle<JSObject> object, Handle<JSObject> holder, - Handle<String> name) { + Handle<Name> name) { // ----------- S t a t e ------------- // -- a2 : name // -- ra : return address @@ -2589,14 +2547,15 @@ Handle<Code> CallStubCompiler::CompileCallGlobal( Handle<GlobalObject> holder, Handle<JSGlobalPropertyCell> cell, Handle<JSFunction> function, - Handle<String> name) { + Handle<Name> name) { // ----------- S t a t e ------------- // -- a2 : name // -- ra : return address // ----------------------------------- if (HasCustomCallGenerator(function)) { - Handle<Code> code = CompileCustomCall(object, holder, cell, function, name); + Handle<Code> code = CompileCustomCall( + object, holder, cell, function, Handle<String>::cast(name)); // A null handle means bail out to the regular compiler code below. if (!code.is_null()) return code; } @@ -2643,61 +2602,24 @@ Handle<Code> CallStubCompiler::CompileCallGlobal( } -Handle<Code> StoreStubCompiler::CompileStoreField(Handle<JSObject> object, - int index, - Handle<Map> transition, - Handle<String> name) { - // ----------- S t a t e ------------- - // -- a0 : value - // -- a1 : receiver - // -- a2 : name - // -- ra : return address - // ----------------------------------- - Label miss; - - // Name register might be clobbered. - GenerateStoreField(masm(), - object, - index, - transition, - name, - a1, a2, a3, t0, - &miss); - __ bind(&miss); - __ li(a2, Operand(Handle<String>(name))); // Restore name. - Handle<Code> ic = masm()->isolate()->builtins()->Builtins::StoreIC_Miss(); - __ Jump(ic, RelocInfo::CODE_TARGET); - - // Return the generated code. - return GetCode(transition.is_null() - ? Code::FIELD - : Code::MAP_TRANSITION, name); -} - - Handle<Code> StoreStubCompiler::CompileStoreCallback( - Handle<String> name, - Handle<JSObject> receiver, + Handle<Name> name, + Handle<JSObject> object, Handle<JSObject> holder, - Handle<AccessorInfo> callback) { - // ----------- S t a t e ------------- - // -- a0 : value - // -- a1 : receiver - // -- a2 : name - // -- ra : return address - // ----------------------------------- + Handle<ExecutableAccessorInfo> callback) { Label miss; // Check that the maps haven't changed. - __ JumpIfSmi(a1, &miss, a3); - CheckPrototypes(receiver, a1, holder, a3, t0, t1, name, &miss); + __ JumpIfSmi(receiver(), &miss); + CheckPrototypes(object, receiver(), holder, + scratch1(), scratch2(), scratch3(), name, &miss); // Stub never generated for non-global objects that require access // checks. ASSERT(holder->IsJSGlobalProxy() || !holder->IsAccessCheckNeeded()); - __ push(a1); // Receiver. - __ li(a3, Operand(callback)); // Callback info. - __ Push(a3, a2, a0); + __ push(receiver()); // Receiver. + __ li(at, Operand(callback)); // Callback info. + __ Push(at, this->name(), value()); // Do tail-call to the runtime system. ExternalReference store_callback_property = @@ -2707,11 +2629,10 @@ Handle<Code> StoreStubCompiler::CompileStoreCallback( // Handle store cache miss. __ bind(&miss); - Handle<Code> ic = masm()->isolate()->builtins()->StoreIC_Miss(); - __ Jump(ic, RelocInfo::CODE_TARGET); + TailCallBuiltin(masm(), MissBuiltin(kind())); // Return the generated code. - return GetCode(Code::CALLBACKS, name); + return GetICCode(kind(), Code::CALLBACKS, name); } @@ -2761,62 +2682,28 @@ void StoreStubCompiler::GenerateStoreViaSetter( #define __ ACCESS_MASM(masm()) -Handle<Code> StoreStubCompiler::CompileStoreViaSetter( - Handle<String> name, - Handle<JSObject> receiver, - Handle<JSObject> holder, - Handle<JSFunction> setter) { - // ----------- S t a t e ------------- - // -- a0 : value - // -- a1 : receiver - // -- a2 : name - // -- ra : return address - // ----------------------------------- - Label miss; - - // Check that the maps haven't changed. - __ JumpIfSmi(a1, &miss); - CheckPrototypes(receiver, a1, holder, a3, t0, t1, name, &miss); - - GenerateStoreViaSetter(masm(), setter); - - __ bind(&miss); - Handle<Code> ic = masm()->isolate()->builtins()->StoreIC_Miss(); - __ Jump(ic, RelocInfo::CODE_TARGET); - - // Return the generated code. - return GetCode(Code::CALLBACKS, name); -} - - Handle<Code> StoreStubCompiler::CompileStoreInterceptor( - Handle<JSObject> receiver, - Handle<String> name) { - // ----------- S t a t e ------------- - // -- a0 : value - // -- a1 : receiver - // -- a2 : name - // -- ra : return address - // ----------------------------------- + Handle<JSObject> object, + Handle<Name> name) { Label miss; // Check that the map of the object hasn't changed. - __ CheckMap(a1, a3, Handle<Map>(receiver->map()), &miss, + __ CheckMap(receiver(), scratch1(), Handle<Map>(object->map()), &miss, DO_SMI_CHECK, ALLOW_ELEMENT_TRANSITION_MAPS); // Perform global security token check if needed. - if (receiver->IsJSGlobalProxy()) { - __ CheckAccessGlobalProxy(a1, a3, &miss); + if (object->IsJSGlobalProxy()) { + __ CheckAccessGlobalProxy(receiver(), scratch1(), &miss); } // Stub is never generated for non-global objects that require access // checks. - ASSERT(receiver->IsJSGlobalProxy() || !receiver->IsAccessCheckNeeded()); + ASSERT(object->IsJSGlobalProxy() || !object->IsAccessCheckNeeded()); - __ Push(a1, a2, a0); // Receiver, name, value. + __ Push(receiver(), this->name(), value()); - __ li(a0, Operand(Smi::FromInt(strict_mode_))); - __ push(a0); // Strict mode. + __ li(scratch1(), Operand(Smi::FromInt(strict_mode()))); + __ push(scratch1()); // strict mode // Do tail-call to the runtime system. ExternalReference store_ic_property = @@ -2826,133 +2713,113 @@ Handle<Code> StoreStubCompiler::CompileStoreInterceptor( // Handle store cache miss. __ bind(&miss); - Handle<Code> ic = masm()->isolate()->builtins()->Builtins::StoreIC_Miss(); - __ Jump(ic, RelocInfo::CODE_TARGET); + TailCallBuiltin(masm(), MissBuiltin(kind())); // Return the generated code. - return GetCode(Code::INTERCEPTOR, name); + return GetICCode(kind(), Code::INTERCEPTOR, name); } Handle<Code> StoreStubCompiler::CompileStoreGlobal( Handle<GlobalObject> object, Handle<JSGlobalPropertyCell> cell, - Handle<String> name) { - // ----------- S t a t e ------------- - // -- a0 : value - // -- a1 : receiver - // -- a2 : name - // -- ra : return address - // ----------------------------------- + Handle<Name> name) { Label miss; // Check that the map of the global has not changed. - __ lw(a3, FieldMemOperand(a1, HeapObject::kMapOffset)); - __ Branch(&miss, ne, a3, Operand(Handle<Map>(object->map()))); + __ lw(scratch1(), FieldMemOperand(receiver(), HeapObject::kMapOffset)); + __ Branch(&miss, ne, scratch1(), Operand(Handle<Map>(object->map()))); // Check that the value in the cell is not the hole. If it is, this // cell could have been deleted and reintroducing the global needs // to update the property details in the property dictionary of the // global object. We bail out to the runtime system to do that. - __ li(t0, Operand(cell)); - __ LoadRoot(t1, Heap::kTheHoleValueRootIndex); - __ lw(t2, FieldMemOperand(t0, JSGlobalPropertyCell::kValueOffset)); - __ Branch(&miss, eq, t1, Operand(t2)); + __ li(scratch1(), Operand(cell)); + __ LoadRoot(scratch2(), Heap::kTheHoleValueRootIndex); + __ lw(scratch3(), + FieldMemOperand(scratch1(), JSGlobalPropertyCell::kValueOffset)); + __ Branch(&miss, eq, scratch3(), Operand(scratch2())); // Store the value in the cell. - __ sw(a0, FieldMemOperand(t0, JSGlobalPropertyCell::kValueOffset)); + __ sw(value(), + FieldMemOperand(scratch1(), JSGlobalPropertyCell::kValueOffset)); __ mov(v0, a0); // Stored value must be returned in v0. // Cells are always rescanned, so no write barrier here. Counters* counters = masm()->isolate()->counters(); - __ IncrementCounter(counters->named_store_global_inline(), 1, a1, a3); + __ IncrementCounter( + counters->named_store_global_inline(), 1, scratch1(), scratch2()); __ Ret(); // Handle store cache miss. __ bind(&miss); - __ IncrementCounter(counters->named_store_global_inline_miss(), 1, a1, a3); - Handle<Code> ic = masm()->isolate()->builtins()->StoreIC_Miss(); - __ Jump(ic, RelocInfo::CODE_TARGET); + __ IncrementCounter( + counters->named_store_global_inline_miss(), 1, scratch1(), scratch2()); + TailCallBuiltin(masm(), MissBuiltin(kind())); // Return the generated code. - return GetCode(Code::NORMAL, name); + return GetICCode(kind(), Code::NORMAL, name); } -Handle<Code> LoadStubCompiler::CompileLoadNonexistent(Handle<String> name, - Handle<JSObject> object, - Handle<JSObject> last) { - // ----------- S t a t e ------------- - // -- a0 : receiver - // -- ra : return address - // ----------------------------------- - Label miss; - - // Check that the receiver is not a smi. - __ JumpIfSmi(a0, &miss); - - // Check the maps of the full prototype chain. - CheckPrototypes(object, a0, last, a3, a1, t0, name, &miss); +Handle<Code> LoadStubCompiler::CompileLoadNonexistent( + Handle<JSObject> object, + Handle<JSObject> last, + Handle<Name> name, + Handle<GlobalObject> global) { + Label success; - // If the last object in the prototype chain is a global object, - // check that the global property cell is empty. - if (last->IsGlobalObject()) { - GenerateCheckPropertyCell( - masm(), Handle<GlobalObject>::cast(last), name, a1, &miss); - } + NonexistentHandlerFrontend(object, last, name, &success, global); + __ bind(&success); // Return undefined if maps of the full prototype chain is still the same. __ LoadRoot(v0, Heap::kUndefinedValueRootIndex); __ Ret(); - __ bind(&miss); - GenerateLoadMiss(masm(), Code::LOAD_IC); - // Return the generated code. - return GetCode(Code::NONEXISTENT, factory()->empty_string()); + return GetCode(kind(), Code::NONEXISTENT, name); } -Handle<Code> LoadStubCompiler::CompileLoadField(Handle<JSObject> object, - Handle<JSObject> holder, - int index, - Handle<String> name) { - // ----------- S t a t e ------------- - // -- a0 : receiver - // -- a2 : name - // -- ra : return address - // ----------------------------------- - Label miss; +Register* LoadStubCompiler::registers() { + // receiver, name, scratch1, scratch2, scratch3, scratch4. + static Register registers[] = { a0, a2, a3, a1, t0, t1 }; + return registers; +} - __ mov(v0, a0); - GenerateLoadField(object, holder, v0, a3, a1, t0, index, name, &miss); - __ bind(&miss); - GenerateLoadMiss(masm(), Code::LOAD_IC); +Register* KeyedLoadStubCompiler::registers() { + // receiver, name, scratch1, scratch2, scratch3, scratch4. + static Register registers[] = { a1, a0, a2, a3, t0, t1 }; + return registers; +} - // Return the generated code. - return GetCode(Code::FIELD, name); + +Register* StoreStubCompiler::registers() { + // receiver, name, value, scratch1, scratch2, scratch3. + static Register registers[] = { a1, a2, a0, a3, t0, t1 }; + return registers; } -Handle<Code> LoadStubCompiler::CompileLoadCallback( - Handle<String> name, - Handle<JSObject> object, - Handle<JSObject> holder, - Handle<AccessorInfo> callback) { - // ----------- S t a t e ------------- - // -- a0 : receiver - // -- a2 : name - // -- ra : return address - // ----------------------------------- - Label miss; - GenerateLoadCallback(object, holder, a0, a2, a3, a1, t0, t1, callback, name, - &miss); - __ bind(&miss); - GenerateLoadMiss(masm(), Code::LOAD_IC); +Register* KeyedStoreStubCompiler::registers() { + // receiver, name, value, scratch1, scratch2, scratch3. + static Register registers[] = { a2, a1, a0, a3, t0, t1 }; + return registers; +} - // Return the generated code. - return GetCode(Code::CALLBACKS, name); + +void KeyedLoadStubCompiler::GenerateNameCheck(Handle<Name> name, + Register name_reg, + Label* miss) { + __ Branch(miss, ne, name_reg, Operand(name)); +} + + +void KeyedStoreStubCompiler::GenerateNameCheck(Handle<Name> name, + Register name_reg, + Label* miss) { + __ Branch(miss, ne, name_reg, Operand(name)); } @@ -2993,91 +2860,18 @@ void LoadStubCompiler::GenerateLoadViaGetter(MacroAssembler* masm, #define __ ACCESS_MASM(masm()) -Handle<Code> LoadStubCompiler::CompileLoadViaGetter( - Handle<String> name, - Handle<JSObject> receiver, - Handle<JSObject> holder, - Handle<JSFunction> getter) { - // ----------- S t a t e ------------- - // -- a0 : receiver - // -- a2 : name - // -- ra : return address - // ----------------------------------- - Label miss; - - // Check that the maps haven't changed. - __ JumpIfSmi(a0, &miss); - CheckPrototypes(receiver, a0, holder, a3, t0, a1, name, &miss); - - GenerateLoadViaGetter(masm(), getter); - - __ bind(&miss); - GenerateLoadMiss(masm(), Code::LOAD_IC); - - // Return the generated code. - return GetCode(Code::CALLBACKS, name); -} - - -Handle<Code> LoadStubCompiler::CompileLoadConstant(Handle<JSObject> object, - Handle<JSObject> holder, - Handle<JSFunction> value, - Handle<String> name) { - // ----------- S t a t e ------------- - // -- a0 : receiver - // -- a2 : name - // -- ra : return address - // ----------------------------------- - Label miss; - - GenerateLoadConstant(object, holder, a0, a3, a1, t0, value, name, &miss); - __ bind(&miss); - GenerateLoadMiss(masm(), Code::LOAD_IC); - - // Return the generated code. - return GetCode(Code::CONSTANT_FUNCTION, name); -} - - -Handle<Code> LoadStubCompiler::CompileLoadInterceptor(Handle<JSObject> object, - Handle<JSObject> holder, - Handle<String> name) { - // ----------- S t a t e ------------- - // -- a0 : receiver - // -- a2 : name - // -- ra : return address - // -- [sp] : receiver - // ----------------------------------- - Label miss; - - LookupResult lookup(isolate()); - LookupPostInterceptor(holder, name, &lookup); - GenerateLoadInterceptor(object, holder, &lookup, a0, a2, a3, a1, t0, name, - &miss); - __ bind(&miss); - GenerateLoadMiss(masm(), Code::LOAD_IC); - - // Return the generated code. - return GetCode(Code::INTERCEPTOR, name); -} - - Handle<Code> LoadStubCompiler::CompileLoadGlobal( Handle<JSObject> object, - Handle<GlobalObject> holder, + Handle<GlobalObject> global, Handle<JSGlobalPropertyCell> cell, - Handle<String> name, + Handle<Name> name, bool is_dont_delete) { - // ----------- S t a t e ------------- - // -- a0 : receiver - // -- a2 : name - // -- ra : return address - // ----------------------------------- - Label miss; + Label success, miss; - // Check that the map of the global has not changed. - __ JumpIfSmi(a0, &miss); - CheckPrototypes(object, a0, holder, a3, t0, a1, name, &miss); + __ CheckMap( + receiver(), scratch1(), Handle<Map>(object->map()), &miss, DO_SMI_CHECK); + HandlerFrontendHeader( + object, receiver(), Handle<JSObject>::cast(global), name, &miss); // Get the value from the cell. __ li(a3, Operand(cell)); @@ -3089,293 +2883,48 @@ Handle<Code> LoadStubCompiler::CompileLoadGlobal( __ Branch(&miss, eq, t0, Operand(at)); } - __ mov(v0, t0); + HandlerFrontendFooter(&success, &miss); + __ bind(&success); + Counters* counters = masm()->isolate()->counters(); __ IncrementCounter(counters->named_load_global_stub(), 1, a1, a3); + __ mov(v0, t0); __ Ret(); - __ bind(&miss); - __ IncrementCounter(counters->named_load_global_stub_miss(), 1, a1, a3); - GenerateLoadMiss(masm(), Code::LOAD_IC); - // Return the generated code. - return GetCode(Code::NORMAL, name); -} - - -Handle<Code> KeyedLoadStubCompiler::CompileLoadField(Handle<String> name, - Handle<JSObject> receiver, - Handle<JSObject> holder, - int index) { - // ----------- S t a t e ------------- - // -- ra : return address - // -- a0 : key - // -- a1 : receiver - // ----------------------------------- - Label miss; - - // Check the key is the cached one. - __ Branch(&miss, ne, a0, Operand(name)); - - GenerateLoadField(receiver, holder, a1, a2, a3, t0, index, name, &miss); - __ bind(&miss); - GenerateLoadMiss(masm(), Code::KEYED_LOAD_IC); - - return GetCode(Code::FIELD, name); + return GetICCode(kind(), Code::NORMAL, name); } -Handle<Code> KeyedLoadStubCompiler::CompileLoadCallback( - Handle<String> name, - Handle<JSObject> receiver, - Handle<JSObject> holder, - Handle<AccessorInfo> callback) { - // ----------- S t a t e ------------- - // -- ra : return address - // -- a0 : key - // -- a1 : receiver - // ----------------------------------- - Label miss; - - // Check the key is the cached one. - __ Branch(&miss, ne, a0, Operand(name)); - - GenerateLoadCallback(receiver, holder, a1, a0, a2, a3, t0, t1, callback, - name, &miss); - __ bind(&miss); - GenerateLoadMiss(masm(), Code::KEYED_LOAD_IC); - - return GetCode(Code::CALLBACKS, name); -} - - -Handle<Code> KeyedLoadStubCompiler::CompileLoadConstant( - Handle<String> name, - Handle<JSObject> receiver, - Handle<JSObject> holder, - Handle<JSFunction> value) { - // ----------- S t a t e ------------- - // -- ra : return address - // -- a0 : key - // -- a1 : receiver - // ----------------------------------- - Label miss; - - // Check the key is the cached one. - __ Branch(&miss, ne, a0, Operand(name)); - - GenerateLoadConstant(receiver, holder, a1, a2, a3, t0, value, name, &miss); - __ bind(&miss); - GenerateLoadMiss(masm(), Code::KEYED_LOAD_IC); - - // Return the generated code. - return GetCode(Code::CONSTANT_FUNCTION, name); -} - - -Handle<Code> KeyedLoadStubCompiler::CompileLoadInterceptor( - Handle<JSObject> receiver, - Handle<JSObject> holder, - Handle<String> name) { - // ----------- S t a t e ------------- - // -- ra : return address - // -- a0 : key - // -- a1 : receiver - // ----------------------------------- - Label miss; - - // Check the key is the cached one. - __ Branch(&miss, ne, a0, Operand(name)); - - LookupResult lookup(isolate()); - LookupPostInterceptor(holder, name, &lookup); - GenerateLoadInterceptor(receiver, holder, &lookup, a1, a0, a2, a3, t0, name, - &miss); - __ bind(&miss); - GenerateLoadMiss(masm(), Code::KEYED_LOAD_IC); - - return GetCode(Code::INTERCEPTOR, name); -} - - -Handle<Code> KeyedLoadStubCompiler::CompileLoadArrayLength( - Handle<String> name) { - // ----------- S t a t e ------------- - // -- ra : return address - // -- a0 : key - // -- a1 : receiver - // ----------------------------------- - Label miss; - - // Check the key is the cached one. - __ Branch(&miss, ne, a0, Operand(name)); - - GenerateLoadArrayLength(masm(), a1, a2, &miss); - __ bind(&miss); - GenerateLoadMiss(masm(), Code::KEYED_LOAD_IC); - - return GetCode(Code::CALLBACKS, name); -} - - -Handle<Code> KeyedLoadStubCompiler::CompileLoadStringLength( - Handle<String> name) { - // ----------- S t a t e ------------- - // -- ra : return address - // -- a0 : key - // -- a1 : receiver - // ----------------------------------- - Label miss; - - Counters* counters = masm()->isolate()->counters(); - __ IncrementCounter(counters->keyed_load_string_length(), 1, a2, a3); - - // Check the key is the cached one. - __ Branch(&miss, ne, a0, Operand(name)); - - GenerateLoadStringLength(masm(), a1, a2, a3, &miss, true); - __ bind(&miss); - __ DecrementCounter(counters->keyed_load_string_length(), 1, a2, a3); - - GenerateLoadMiss(masm(), Code::KEYED_LOAD_IC); - - return GetCode(Code::CALLBACKS, name); -} - - -Handle<Code> KeyedLoadStubCompiler::CompileLoadFunctionPrototype( - Handle<String> name) { - // ----------- S t a t e ------------- - // -- ra : return address - // -- a0 : key - // -- a1 : receiver - // ----------------------------------- +Handle<Code> BaseLoadStubCompiler::CompilePolymorphicIC( + MapHandleList* receiver_maps, + CodeHandleList* handlers, + Handle<Name> name, + Code::StubType type, + IcCheckType check) { Label miss; - Counters* counters = masm()->isolate()->counters(); - __ IncrementCounter(counters->keyed_load_function_prototype(), 1, a2, a3); - - // Check the name hasn't changed. - __ Branch(&miss, ne, a0, Operand(name)); - - GenerateLoadFunctionPrototype(masm(), a1, a2, a3, &miss); - __ bind(&miss); - __ DecrementCounter(counters->keyed_load_function_prototype(), 1, a2, a3); - GenerateLoadMiss(masm(), Code::KEYED_LOAD_IC); - - return GetCode(Code::CALLBACKS, name); -} - - -Handle<Code> KeyedLoadStubCompiler::CompileLoadElement( - Handle<Map> receiver_map) { - // ----------- S t a t e ------------- - // -- ra : return address - // -- a0 : key - // -- a1 : receiver - // ----------------------------------- - ElementsKind elements_kind = receiver_map->elements_kind(); - Handle<Code> stub = KeyedLoadElementStub(elements_kind).GetCode(); - - __ DispatchMap(a1, a2, receiver_map, stub, DO_SMI_CHECK); - - Handle<Code> ic = isolate()->builtins()->KeyedLoadIC_Miss(); - __ Jump(ic, RelocInfo::CODE_TARGET); - - // Return the generated code. - return GetCode(Code::NORMAL, factory()->empty_string()); -} - + if (check == PROPERTY) { + GenerateNameCheck(name, this->name(), &miss); + } -Handle<Code> KeyedLoadStubCompiler::CompileLoadPolymorphic( - MapHandleList* receiver_maps, - CodeHandleList* handler_ics) { - // ----------- S t a t e ------------- - // -- ra : return address - // -- a0 : key - // -- a1 : receiver - // ----------------------------------- - Label miss; - __ JumpIfSmi(a1, &miss); + __ JumpIfSmi(receiver(), &miss); + Register map_reg = scratch1(); int receiver_count = receiver_maps->length(); - __ lw(a2, FieldMemOperand(a1, HeapObject::kMapOffset)); + __ lw(map_reg, FieldMemOperand(receiver(), HeapObject::kMapOffset)); for (int current = 0; current < receiver_count; ++current) { - __ Jump(handler_ics->at(current), RelocInfo::CODE_TARGET, - eq, a2, Operand(receiver_maps->at(current))); + __ Jump(handlers->at(current), RelocInfo::CODE_TARGET, + eq, map_reg, Operand(receiver_maps->at(current))); } __ bind(&miss); - Handle<Code> miss_ic = isolate()->builtins()->KeyedLoadIC_Miss(); - __ Jump(miss_ic, RelocInfo::CODE_TARGET); - - // Return the generated code. - return GetCode(Code::NORMAL, factory()->empty_string(), MEGAMORPHIC); -} - - -Handle<Code> KeyedStoreStubCompiler::CompileStoreField(Handle<JSObject> object, - int index, - Handle<Map> transition, - Handle<String> name) { - // ----------- S t a t e ------------- - // -- a0 : value - // -- a1 : key - // -- a2 : receiver - // -- ra : return address - // ----------------------------------- - - Label miss; - - Counters* counters = masm()->isolate()->counters(); - __ IncrementCounter(counters->keyed_store_field(), 1, a3, t0); - - // Check that the name has not changed. - __ Branch(&miss, ne, a1, Operand(name)); - - // a3 is used as scratch register. a1 and a2 keep their values if a jump to - // the miss label is generated. - GenerateStoreField(masm(), - object, - index, - transition, - name, - a2, a1, a3, t0, - &miss); - __ bind(&miss); - - __ DecrementCounter(counters->keyed_store_field(), 1, a3, t0); - Handle<Code> ic = masm()->isolate()->builtins()->KeyedStoreIC_Miss(); - __ Jump(ic, RelocInfo::CODE_TARGET); - - // Return the generated code. - return GetCode(transition.is_null() - ? Code::FIELD - : Code::MAP_TRANSITION, name); -} - - -Handle<Code> KeyedStoreStubCompiler::CompileStoreElement( - Handle<Map> receiver_map) { - // ----------- S t a t e ------------- - // -- a0 : value - // -- a1 : key - // -- a2 : receiver - // -- ra : return address - // -- a3 : scratch - // ----------------------------------- - ElementsKind elements_kind = receiver_map->elements_kind(); - bool is_js_array = receiver_map->instance_type() == JS_ARRAY_TYPE; - Handle<Code> stub = - KeyedStoreElementStub(is_js_array, elements_kind, grow_mode_).GetCode(); - - __ DispatchMap(a2, a3, receiver_map, stub, DO_SMI_CHECK); - - Handle<Code> ic = isolate()->builtins()->KeyedStoreIC_Miss(); - __ Jump(ic, RelocInfo::CODE_TARGET); + TailCallBuiltin(masm(), MissBuiltin(kind())); // Return the generated code. - return GetCode(Code::NORMAL, factory()->empty_string()); + InlineCacheState state = + receiver_maps->length() > 1 ? POLYMORPHIC : MONOMORPHIC; + return GetICCode(kind(), type, name, state); } @@ -3383,37 +2932,30 @@ Handle<Code> KeyedStoreStubCompiler::CompileStorePolymorphic( MapHandleList* receiver_maps, CodeHandleList* handler_stubs, MapHandleList* transitioned_maps) { - // ----------- S t a t e ------------- - // -- a0 : value - // -- a1 : key - // -- a2 : receiver - // -- ra : return address - // -- a3 : scratch - // ----------------------------------- Label miss; - __ JumpIfSmi(a2, &miss); + __ JumpIfSmi(receiver(), &miss); int receiver_count = receiver_maps->length(); - __ lw(a3, FieldMemOperand(a2, HeapObject::kMapOffset)); + __ lw(scratch1(), FieldMemOperand(receiver(), HeapObject::kMapOffset)); for (int i = 0; i < receiver_count; ++i) { if (transitioned_maps->at(i).is_null()) { __ Jump(handler_stubs->at(i), RelocInfo::CODE_TARGET, eq, - a3, Operand(receiver_maps->at(i))); + scratch1(), Operand(receiver_maps->at(i))); } else { Label next_map; - __ Branch(&next_map, ne, a3, Operand(receiver_maps->at(i))); - __ li(a3, Operand(transitioned_maps->at(i))); + __ Branch(&next_map, ne, scratch1(), Operand(receiver_maps->at(i))); + __ li(transition_map(), Operand(transitioned_maps->at(i))); __ Jump(handler_stubs->at(i), RelocInfo::CODE_TARGET); __ bind(&next_map); } } __ bind(&miss); - Handle<Code> miss_ic = isolate()->builtins()->KeyedStoreIC_Miss(); - __ Jump(miss_ic, RelocInfo::CODE_TARGET); + TailCallBuiltin(masm(), MissBuiltin(kind())); // Return the generated code. - return GetCode(Code::NORMAL, factory()->empty_string(), MEGAMORPHIC); + return GetICCode( + kind(), Code::NORMAL, factory()->empty_string(), POLYMORPHIC); } @@ -3522,7 +3064,8 @@ Handle<Code> ConstructStubCompiler::CompileConstructStub( __ bind(&next); } else { // Set the property to the constant value. - Handle<Object> constant(shared->GetThisPropertyAssignmentConstant(i)); + Handle<Object> constant(shared->GetThisPropertyAssignmentConstant(i), + masm()->isolate()); __ li(a2, Operand(constant)); __ sw(a2, MemOperand(t5)); __ Addu(t5, t5, kPointerSize); @@ -3600,9 +3143,7 @@ void KeyedLoadStubCompiler::GenerateLoadDictionaryElement( // -- a0 : key // -- a1 : receiver // ----------------------------------- - Handle<Code> slow_ic = - masm->isolate()->builtins()->KeyedLoadIC_Slow(); - __ Jump(slow_ic, RelocInfo::CODE_TARGET); + TailCallBuiltin(masm, Builtins::kKeyedLoadIC_Slow); // Miss case, call the runtime. __ bind(&miss_force_generic); @@ -3612,10 +3153,7 @@ void KeyedLoadStubCompiler::GenerateLoadDictionaryElement( // -- a0 : key // -- a1 : receiver // ----------------------------------- - - Handle<Code> miss_ic = - masm->isolate()->builtins()->KeyedLoadIC_MissForceGeneric(); - __ Jump(miss_ic, RelocInfo::CODE_TARGET); + TailCallBuiltin(masm, Builtins::kKeyedLoadIC_MissForceGeneric); } @@ -3654,9 +3192,10 @@ static void GenerateSmiKeyCheck(MacroAssembler* masm, Register scratch0, Register scratch1, FPURegister double_scratch0, + FPURegister double_scratch1, Label* fail) { if (CpuFeatures::IsSupported(FPU)) { - CpuFeatures::Scope scope(FPU); + CpuFeatureScope scope(masm, FPU); Label key_ok; // Check for smi or a smi inside a heap number. We convert the heap // number and check if the conversion is exact and fits into the smi @@ -3669,15 +3208,15 @@ static void GenerateSmiKeyCheck(MacroAssembler* masm, DONT_DO_SMI_CHECK); __ ldc1(double_scratch0, FieldMemOperand(key, HeapNumber::kValueOffset)); __ EmitFPUTruncate(kRoundToZero, - double_scratch0, - double_scratch0, scratch0, + double_scratch0, + at, + double_scratch1, scratch1, kCheckForInexactConversion); __ Branch(fail, ne, scratch1, Operand(zero_reg)); - __ mfc1(scratch0, double_scratch0); __ SmiTagCheckOverflow(key, scratch0, scratch1); __ BranchOnOverflow(fail, scratch1); __ bind(&key_ok); @@ -3688,343 +3227,6 @@ static void GenerateSmiKeyCheck(MacroAssembler* masm, } -void KeyedLoadStubCompiler::GenerateLoadExternalArray( - MacroAssembler* masm, - ElementsKind elements_kind) { - // ---------- S t a t e -------------- - // -- ra : return address - // -- a0 : key - // -- a1 : receiver - // ----------------------------------- - Label miss_force_generic, slow, failed_allocation; - - Register key = a0; - Register receiver = a1; - - // This stub is meant to be tail-jumped to, the receiver must already - // have been verified by the caller to not be a smi. - - // Check that the key is a smi or a heap number convertible to a smi. - GenerateSmiKeyCheck(masm, key, t0, t1, f2, &miss_force_generic); - - __ lw(a3, FieldMemOperand(receiver, JSObject::kElementsOffset)); - // a3: elements array - - // Check that the index is in range. - __ lw(t1, FieldMemOperand(a3, ExternalArray::kLengthOffset)); - __ sra(t2, key, kSmiTagSize); - // Unsigned comparison catches both negative and too-large values. - __ Branch(&miss_force_generic, Ugreater_equal, key, Operand(t1)); - - __ lw(a3, FieldMemOperand(a3, ExternalArray::kExternalPointerOffset)); - // a3: base pointer of external storage - - // We are not untagging smi key and instead work with it - // as if it was premultiplied by 2. - STATIC_ASSERT((kSmiTag == 0) && (kSmiTagSize == 1)); - - Register value = a2; - switch (elements_kind) { - case EXTERNAL_BYTE_ELEMENTS: - __ srl(t2, key, 1); - __ addu(t3, a3, t2); - __ lb(value, MemOperand(t3, 0)); - break; - case EXTERNAL_PIXEL_ELEMENTS: - case EXTERNAL_UNSIGNED_BYTE_ELEMENTS: - __ srl(t2, key, 1); - __ addu(t3, a3, t2); - __ lbu(value, MemOperand(t3, 0)); - break; - case EXTERNAL_SHORT_ELEMENTS: - __ addu(t3, a3, key); - __ lh(value, MemOperand(t3, 0)); - break; - case EXTERNAL_UNSIGNED_SHORT_ELEMENTS: - __ addu(t3, a3, key); - __ lhu(value, MemOperand(t3, 0)); - break; - case EXTERNAL_INT_ELEMENTS: - case EXTERNAL_UNSIGNED_INT_ELEMENTS: - __ sll(t2, key, 1); - __ addu(t3, a3, t2); - __ lw(value, MemOperand(t3, 0)); - break; - case EXTERNAL_FLOAT_ELEMENTS: - __ sll(t3, t2, 2); - __ addu(t3, a3, t3); - if (CpuFeatures::IsSupported(FPU)) { - CpuFeatures::Scope scope(FPU); - __ lwc1(f0, MemOperand(t3, 0)); - } else { - __ lw(value, MemOperand(t3, 0)); - } - break; - case EXTERNAL_DOUBLE_ELEMENTS: - __ sll(t2, key, 2); - __ addu(t3, a3, t2); - if (CpuFeatures::IsSupported(FPU)) { - CpuFeatures::Scope scope(FPU); - __ ldc1(f0, MemOperand(t3, 0)); - } else { - // t3: pointer to the beginning of the double we want to load. - __ lw(a2, MemOperand(t3, 0)); - __ lw(a3, MemOperand(t3, Register::kSizeInBytes)); - } - break; - case FAST_ELEMENTS: - case FAST_SMI_ELEMENTS: - case FAST_DOUBLE_ELEMENTS: - case FAST_HOLEY_ELEMENTS: - case FAST_HOLEY_SMI_ELEMENTS: - case FAST_HOLEY_DOUBLE_ELEMENTS: - case DICTIONARY_ELEMENTS: - case NON_STRICT_ARGUMENTS_ELEMENTS: - UNREACHABLE(); - break; - } - - // For integer array types: - // a2: value - // For float array type: - // f0: value (if FPU is supported) - // a2: value (if FPU is not supported) - // For double array type: - // f0: value (if FPU is supported) - // a2/a3: value (if FPU is not supported) - - if (elements_kind == EXTERNAL_INT_ELEMENTS) { - // For the Int and UnsignedInt array types, we need to see whether - // the value can be represented in a Smi. If not, we need to convert - // it to a HeapNumber. - Label box_int; - __ Subu(t3, value, Operand(0xC0000000)); // Non-smi value gives neg result. - __ Branch(&box_int, lt, t3, Operand(zero_reg)); - // Tag integer as smi and return it. - __ sll(v0, value, kSmiTagSize); - __ Ret(); - - __ bind(&box_int); - // Allocate a HeapNumber for the result and perform int-to-double - // conversion. - // The arm version uses a temporary here to save r0, but we don't need to - // (a0 is not modified). - __ LoadRoot(t1, Heap::kHeapNumberMapRootIndex); - __ AllocateHeapNumber(v0, a3, t0, t1, &slow); - - if (CpuFeatures::IsSupported(FPU)) { - CpuFeatures::Scope scope(FPU); - __ mtc1(value, f0); - __ cvt_d_w(f0, f0); - __ sdc1(f0, FieldMemOperand(v0, HeapNumber::kValueOffset)); - __ Ret(); - } else { - Register dst1 = t2; - Register dst2 = t3; - FloatingPointHelper::Destination dest = - FloatingPointHelper::kCoreRegisters; - FloatingPointHelper::ConvertIntToDouble(masm, - value, - dest, - f0, - dst1, - dst2, - t1, - f2); - __ sw(dst1, FieldMemOperand(v0, HeapNumber::kMantissaOffset)); - __ sw(dst2, FieldMemOperand(v0, HeapNumber::kExponentOffset)); - __ Ret(); - } - } else if (elements_kind == EXTERNAL_UNSIGNED_INT_ELEMENTS) { - // The test is different for unsigned int values. Since we need - // the value to be in the range of a positive smi, we can't - // handle either of the top two bits being set in the value. - if (CpuFeatures::IsSupported(FPU)) { - CpuFeatures::Scope scope(FPU); - Label pl_box_int; - __ And(t2, value, Operand(0xC0000000)); - __ Branch(&pl_box_int, ne, t2, Operand(zero_reg)); - - // It can fit in an Smi. - // Tag integer as smi and return it. - __ sll(v0, value, kSmiTagSize); - __ Ret(); - - __ bind(&pl_box_int); - // Allocate a HeapNumber for the result and perform int-to-double - // conversion. Don't use a0 and a1 as AllocateHeapNumber clobbers all - // registers - also when jumping due to exhausted young space. - __ LoadRoot(t6, Heap::kHeapNumberMapRootIndex); - __ AllocateHeapNumber(v0, t2, t3, t6, &slow); - - // This is replaced by a macro: - // __ mtc1(value, f0); // LS 32-bits. - // __ mtc1(zero_reg, f1); // MS 32-bits are all zero. - // __ cvt_d_l(f0, f0); // Use 64 bit conv to get correct unsigned 32-bit. - - __ Cvt_d_uw(f0, value, f22); - - __ sdc1(f0, FieldMemOperand(v0, HeapNumber::kValueOffset)); - - __ Ret(); - } else { - // Check whether unsigned integer fits into smi. - Label box_int_0, box_int_1, done; - __ And(t2, value, Operand(0x80000000)); - __ Branch(&box_int_0, ne, t2, Operand(zero_reg)); - __ And(t2, value, Operand(0x40000000)); - __ Branch(&box_int_1, ne, t2, Operand(zero_reg)); - - // Tag integer as smi and return it. - __ sll(v0, value, kSmiTagSize); - __ Ret(); - - Register hiword = value; // a2. - Register loword = a3; - - __ bind(&box_int_0); - // Integer does not have leading zeros. - GenerateUInt2Double(masm, hiword, loword, t0, 0); - __ Branch(&done); - - __ bind(&box_int_1); - // Integer has one leading zero. - GenerateUInt2Double(masm, hiword, loword, t0, 1); - - - __ bind(&done); - // Integer was converted to double in registers hiword:loword. - // Wrap it into a HeapNumber. Don't use a0 and a1 as AllocateHeapNumber - // clobbers all registers - also when jumping due to exhausted young - // space. - __ LoadRoot(t6, Heap::kHeapNumberMapRootIndex); - __ AllocateHeapNumber(t2, t3, t5, t6, &slow); - - __ sw(hiword, FieldMemOperand(t2, HeapNumber::kExponentOffset)); - __ sw(loword, FieldMemOperand(t2, HeapNumber::kMantissaOffset)); - - __ mov(v0, t2); - __ Ret(); - } - } else if (elements_kind == EXTERNAL_FLOAT_ELEMENTS) { - // For the floating-point array type, we need to always allocate a - // HeapNumber. - if (CpuFeatures::IsSupported(FPU)) { - CpuFeatures::Scope scope(FPU); - // Allocate a HeapNumber for the result. Don't use a0 and a1 as - // AllocateHeapNumber clobbers all registers - also when jumping due to - // exhausted young space. - __ LoadRoot(t6, Heap::kHeapNumberMapRootIndex); - __ AllocateHeapNumber(v0, t3, t5, t6, &slow); - // The float (single) value is already in fpu reg f0 (if we use float). - __ cvt_d_s(f0, f0); - __ sdc1(f0, FieldMemOperand(v0, HeapNumber::kValueOffset)); - __ Ret(); - } else { - // Allocate a HeapNumber for the result. Don't use a0 and a1 as - // AllocateHeapNumber clobbers all registers - also when jumping due to - // exhausted young space. - __ LoadRoot(t6, Heap::kHeapNumberMapRootIndex); - __ AllocateHeapNumber(v0, t3, t5, t6, &slow); - // FPU is not available, do manual single to double conversion. - - // a2: floating point value (binary32). - // v0: heap number for result - - // Extract mantissa to t4. - __ And(t4, value, Operand(kBinary32MantissaMask)); - - // Extract exponent to t5. - __ srl(t5, value, kBinary32MantissaBits); - __ And(t5, t5, Operand(kBinary32ExponentMask >> kBinary32MantissaBits)); - - Label exponent_rebiased; - __ Branch(&exponent_rebiased, eq, t5, Operand(zero_reg)); - - __ li(t0, 0x7ff); - __ Xor(t1, t5, Operand(0xFF)); - __ Movz(t5, t0, t1); // Set t5 to 0x7ff only if t5 is equal to 0xff. - __ Branch(&exponent_rebiased, eq, t1, Operand(zero_reg)); - - // Rebias exponent. - __ Addu(t5, - t5, - Operand(-kBinary32ExponentBias + HeapNumber::kExponentBias)); - - __ bind(&exponent_rebiased); - __ And(a2, value, Operand(kBinary32SignMask)); - value = no_reg; - __ sll(t0, t5, HeapNumber::kMantissaBitsInTopWord); - __ or_(a2, a2, t0); - - // Shift mantissa. - static const int kMantissaShiftForHiWord = - kBinary32MantissaBits - HeapNumber::kMantissaBitsInTopWord; - - static const int kMantissaShiftForLoWord = - kBitsPerInt - kMantissaShiftForHiWord; - - __ srl(t0, t4, kMantissaShiftForHiWord); - __ or_(a2, a2, t0); - __ sll(a0, t4, kMantissaShiftForLoWord); - - __ sw(a2, FieldMemOperand(v0, HeapNumber::kExponentOffset)); - __ sw(a0, FieldMemOperand(v0, HeapNumber::kMantissaOffset)); - __ Ret(); - } - - } else if (elements_kind == EXTERNAL_DOUBLE_ELEMENTS) { - if (CpuFeatures::IsSupported(FPU)) { - CpuFeatures::Scope scope(FPU); - // Allocate a HeapNumber for the result. Don't use a0 and a1 as - // AllocateHeapNumber clobbers all registers - also when jumping due to - // exhausted young space. - __ LoadRoot(t6, Heap::kHeapNumberMapRootIndex); - __ AllocateHeapNumber(v0, t3, t5, t6, &slow); - // The double value is already in f0 - __ sdc1(f0, FieldMemOperand(v0, HeapNumber::kValueOffset)); - __ Ret(); - } else { - // Allocate a HeapNumber for the result. Don't use a0 and a1 as - // AllocateHeapNumber clobbers all registers - also when jumping due to - // exhausted young space. - __ LoadRoot(t6, Heap::kHeapNumberMapRootIndex); - __ AllocateHeapNumber(v0, t3, t5, t6, &slow); - - __ sw(a2, FieldMemOperand(v0, HeapNumber::kMantissaOffset)); - __ sw(a3, FieldMemOperand(v0, HeapNumber::kExponentOffset)); - __ Ret(); - } - - } else { - // Tag integer as smi and return it. - __ sll(v0, value, kSmiTagSize); - __ Ret(); - } - - // Slow case, key and receiver still in a0 and a1. - __ bind(&slow); - __ IncrementCounter( - masm->isolate()->counters()->keyed_load_external_array_slow(), - 1, a2, a3); - - // ---------- S t a t e -------------- - // -- ra : return address - // -- a0 : key - // -- a1 : receiver - // ----------------------------------- - - __ Push(a1, a0); - - __ TailCallRuntime(Runtime::kKeyedGetProperty, 2, 1); - - __ bind(&miss_force_generic); - Handle<Code> stub = - masm->isolate()->builtins()->KeyedLoadIC_MissForceGeneric(); - __ Jump(stub, RelocInfo::CODE_TARGET); -} - - void KeyedStoreStubCompiler::GenerateStoreExternalArray( MacroAssembler* masm, ElementsKind elements_kind) { @@ -4047,7 +3249,7 @@ void KeyedStoreStubCompiler::GenerateStoreExternalArray( // have been verified by the caller to not be a smi. // Check that the key is a smi or a heap number convertible to a smi. - GenerateSmiKeyCheck(masm, key, t0, t1, f2, &miss_force_generic); + GenerateSmiKeyCheck(masm, key, t0, t1, f2, f4, &miss_force_generic); __ lw(a3, FieldMemOperand(receiver, JSObject::kElementsOffset)); @@ -4126,10 +3328,10 @@ void KeyedStoreStubCompiler::GenerateStoreExternalArray( } FloatingPointHelper::ConvertIntToDouble( masm, t1, destination, - f0, t2, t3, // These are: double_dst, dst1, dst2. + f0, t2, t3, // These are: double_dst, dst_mantissa, dst_exponent. t0, f2); // These are: scratch2, single_scratch. if (destination == FloatingPointHelper::kFPURegisters) { - CpuFeatures::Scope scope(FPU); + CpuFeatureScope scope(masm, FPU); __ sdc1(f0, MemOperand(a3, 0)); } else { __ sw(t2, MemOperand(a3, 0)); @@ -4167,7 +3369,7 @@ void KeyedStoreStubCompiler::GenerateStoreExternalArray( // reproducible behavior, convert these to zero. if (CpuFeatures::IsSupported(FPU)) { - CpuFeatures::Scope scope(FPU); + CpuFeatureScope scope(masm, FPU); __ ldc1(f0, FieldMemOperand(a0, HeapNumber::kValueOffset)); @@ -4405,9 +3607,7 @@ void KeyedStoreStubCompiler::GenerateStoreExternalArray( // -- a0 : key // -- a1 : receiver // ----------------------------------- - Handle<Code> slow_ic = - masm->isolate()->builtins()->KeyedStoreIC_Slow(); - __ Jump(slow_ic, RelocInfo::CODE_TARGET); + TailCallBuiltin(masm, Builtins::kKeyedStoreIC_Slow); // Miss case, call the runtime. __ bind(&miss_force_generic); @@ -4417,119 +3617,7 @@ void KeyedStoreStubCompiler::GenerateStoreExternalArray( // -- a0 : key // -- a1 : receiver // ----------------------------------- - - Handle<Code> miss_ic = - masm->isolate()->builtins()->KeyedStoreIC_MissForceGeneric(); - __ Jump(miss_ic, RelocInfo::CODE_TARGET); -} - - -void KeyedLoadStubCompiler::GenerateLoadFastElement(MacroAssembler* masm) { - // ----------- S t a t e ------------- - // -- ra : return address - // -- a0 : key - // -- a1 : receiver - // ----------------------------------- - Label miss_force_generic; - - // This stub is meant to be tail-jumped to, the receiver must already - // have been verified by the caller to not be a smi. - - // Check that the key is a smi or a heap number convertible to a smi. - GenerateSmiKeyCheck(masm, a0, t0, t1, f2, &miss_force_generic); - - // Get the elements array. - __ lw(a2, FieldMemOperand(a1, JSObject::kElementsOffset)); - __ AssertFastElements(a2); - - // Check that the key is within bounds. - __ lw(a3, FieldMemOperand(a2, FixedArray::kLengthOffset)); - __ Branch(USE_DELAY_SLOT, &miss_force_generic, hs, a0, Operand(a3)); - - // Load the result and make sure it's not the hole. - __ Addu(a3, a2, Operand(FixedArray::kHeaderSize - kHeapObjectTag)); - STATIC_ASSERT(kSmiTag == 0 && kSmiTagSize < kPointerSizeLog2); - __ sll(t0, a0, kPointerSizeLog2 - kSmiTagSize); - __ Addu(t0, t0, a3); - __ lw(t0, MemOperand(t0)); - __ LoadRoot(t1, Heap::kTheHoleValueRootIndex); - __ Branch(&miss_force_generic, eq, t0, Operand(t1)); - __ Ret(USE_DELAY_SLOT); - __ mov(v0, t0); - - __ bind(&miss_force_generic); - Handle<Code> stub = - masm->isolate()->builtins()->KeyedLoadIC_MissForceGeneric(); - __ Jump(stub, RelocInfo::CODE_TARGET); -} - - -void KeyedLoadStubCompiler::GenerateLoadFastDoubleElement( - MacroAssembler* masm) { - // ----------- S t a t e ------------- - // -- ra : return address - // -- a0 : key - // -- a1 : receiver - // ----------------------------------- - Label miss_force_generic, slow_allocate_heapnumber; - - Register key_reg = a0; - Register receiver_reg = a1; - Register elements_reg = a2; - Register heap_number_reg = a2; - Register indexed_double_offset = a3; - Register scratch = t0; - Register scratch2 = t1; - Register scratch3 = t2; - Register heap_number_map = t3; - - // This stub is meant to be tail-jumped to, the receiver must already - // have been verified by the caller to not be a smi. - - // Check that the key is a smi or a heap number convertible to a smi. - GenerateSmiKeyCheck(masm, key_reg, t0, t1, f2, &miss_force_generic); - - // Get the elements array. - __ lw(elements_reg, - FieldMemOperand(receiver_reg, JSObject::kElementsOffset)); - - // Check that the key is within bounds. - __ lw(scratch, FieldMemOperand(elements_reg, FixedArray::kLengthOffset)); - __ Branch(&miss_force_generic, hs, key_reg, Operand(scratch)); - - // Load the upper word of the double in the fixed array and test for NaN. - __ sll(scratch2, key_reg, kDoubleSizeLog2 - kSmiTagSize); - __ Addu(indexed_double_offset, elements_reg, Operand(scratch2)); - uint32_t upper_32_offset = FixedArray::kHeaderSize + sizeof(kHoleNanLower32); - __ lw(scratch, FieldMemOperand(indexed_double_offset, upper_32_offset)); - __ Branch(&miss_force_generic, eq, scratch, Operand(kHoleNanUpper32)); - - // Non-NaN. Allocate a new heap number and copy the double value into it. - __ LoadRoot(heap_number_map, Heap::kHeapNumberMapRootIndex); - __ AllocateHeapNumber(heap_number_reg, scratch2, scratch3, - heap_number_map, &slow_allocate_heapnumber); - - // Don't need to reload the upper 32 bits of the double, it's already in - // scratch. - __ sw(scratch, FieldMemOperand(heap_number_reg, - HeapNumber::kExponentOffset)); - __ lw(scratch, FieldMemOperand(indexed_double_offset, - FixedArray::kHeaderSize)); - __ sw(scratch, FieldMemOperand(heap_number_reg, - HeapNumber::kMantissaOffset)); - - __ mov(v0, heap_number_reg); - __ Ret(); - - __ bind(&slow_allocate_heapnumber); - Handle<Code> slow_ic = - masm->isolate()->builtins()->KeyedLoadIC_Slow(); - __ Jump(slow_ic, RelocInfo::CODE_TARGET); - - __ bind(&miss_force_generic); - Handle<Code> miss_ic = - masm->isolate()->builtins()->KeyedLoadIC_MissForceGeneric(); - __ Jump(miss_ic, RelocInfo::CODE_TARGET); + TailCallBuiltin(masm, Builtins::kKeyedStoreIC_MissForceGeneric); } @@ -4537,7 +3625,7 @@ void KeyedStoreStubCompiler::GenerateStoreFastElement( MacroAssembler* masm, bool is_js_array, ElementsKind elements_kind, - KeyedAccessGrowMode grow_mode) { + KeyedAccessStoreMode store_mode) { // ----------- S t a t e ------------- // -- a0 : value // -- a1 : key @@ -4561,7 +3649,7 @@ void KeyedStoreStubCompiler::GenerateStoreFastElement( // have been verified by the caller to not be a smi. // Check that the key is a smi or a heap number convertible to a smi. - GenerateSmiKeyCheck(masm, key_reg, t0, t1, f2, &miss_force_generic); + GenerateSmiKeyCheck(masm, key_reg, t0, t1, f2, f4, &miss_force_generic); if (IsFastSmiElementsKind(elements_kind)) { __ JumpIfNotSmi(value_reg, &transition_elements_kind); @@ -4576,7 +3664,7 @@ void KeyedStoreStubCompiler::GenerateStoreFastElement( __ lw(scratch, FieldMemOperand(elements_reg, FixedArray::kLengthOffset)); } // Compare smis. - if (is_js_array && grow_mode == ALLOW_JSARRAY_GROWTH) { + if (is_js_array && IsGrowStoreMode(store_mode)) { __ Branch(&grow, hs, key_reg, Operand(scratch)); } else { __ Branch(&miss_force_generic, hs, key_reg, Operand(scratch)); @@ -4620,15 +3708,12 @@ void KeyedStoreStubCompiler::GenerateStoreFastElement( __ Ret(); __ bind(&miss_force_generic); - Handle<Code> ic = - masm->isolate()->builtins()->KeyedStoreIC_MissForceGeneric(); - __ Jump(ic, RelocInfo::CODE_TARGET); + TailCallBuiltin(masm, Builtins::kKeyedStoreIC_MissForceGeneric); __ bind(&transition_elements_kind); - Handle<Code> ic_miss = masm->isolate()->builtins()->KeyedStoreIC_Miss(); - __ Jump(ic_miss, RelocInfo::CODE_TARGET); + TailCallBuiltin(masm, Builtins::kKeyedStoreIC_Miss); - if (is_js_array && grow_mode == ALLOW_JSARRAY_GROWTH) { + if (is_js_array && IsGrowStoreMode(store_mode)) { // Grow the array by a single element if possible. __ bind(&grow); @@ -4646,8 +3731,7 @@ void KeyedStoreStubCompiler::GenerateStoreFastElement( __ Branch(&check_capacity, ne, elements_reg, Operand(at)); int size = FixedArray::SizeFor(JSArray::kPreallocatedArrayElements); - __ AllocateInNewSpace(size, elements_reg, scratch, scratch2, &slow, - TAG_OBJECT); + __ Allocate(size, elements_reg, scratch, scratch2, &slow, TAG_OBJECT); __ LoadRoot(scratch, Heap::kFixedArrayMapRootIndex); __ sw(scratch, FieldMemOperand(elements_reg, JSObject::kMapOffset)); @@ -4690,8 +3774,7 @@ void KeyedStoreStubCompiler::GenerateStoreFastElement( __ jmp(&finish_store); __ bind(&slow); - Handle<Code> ic_slow = masm->isolate()->builtins()->KeyedStoreIC_Slow(); - __ Jump(ic_slow, RelocInfo::CODE_TARGET); + TailCallBuiltin(masm, Builtins::kKeyedStoreIC_Slow); } } @@ -4699,17 +3782,18 @@ void KeyedStoreStubCompiler::GenerateStoreFastElement( void KeyedStoreStubCompiler::GenerateStoreFastDoubleElement( MacroAssembler* masm, bool is_js_array, - KeyedAccessGrowMode grow_mode) { + KeyedAccessStoreMode store_mode) { // ----------- S t a t e ------------- // -- a0 : value // -- a1 : key // -- a2 : receiver // -- ra : return address - // -- a3 : scratch + // -- a3 : scratch (elements backing store) // -- t0 : scratch (elements_reg) // -- t1 : scratch (mantissa_reg) // -- t2 : scratch (exponent_reg) // -- t3 : scratch4 + // -- t4 : scratch // ----------------------------------- Label miss_force_generic, transition_elements_kind, grow, slow; Label finish_store, check_capacity; @@ -4722,13 +3806,14 @@ void KeyedStoreStubCompiler::GenerateStoreFastDoubleElement( Register scratch2 = t1; Register scratch3 = t2; Register scratch4 = t3; + Register scratch5 = t4; Register length_reg = t3; // This stub is meant to be tail-jumped to, the receiver must already // have been verified by the caller to not be a smi. // Check that the key is a smi or a heap number convertible to a smi. - GenerateSmiKeyCheck(masm, key_reg, t0, t1, f2, &miss_force_generic); + GenerateSmiKeyCheck(masm, key_reg, t0, t1, f2, f4, &miss_force_generic); __ lw(elements_reg, FieldMemOperand(receiver_reg, JSObject::kElementsOffset)); @@ -4742,7 +3827,7 @@ void KeyedStoreStubCompiler::GenerateStoreFastDoubleElement( } // Compare smis, unsigned compare catches both negative and out-of-bound // indexes. - if (grow_mode == ALLOW_JSARRAY_GROWTH) { + if (IsGrowStoreMode(store_mode)) { __ Branch(&grow, hs, key_reg, Operand(scratch1)); } else { __ Branch(&miss_force_generic, hs, key_reg, Operand(scratch1)); @@ -4752,7 +3837,6 @@ void KeyedStoreStubCompiler::GenerateStoreFastDoubleElement( __ StoreNumberToDoubleElements(value_reg, key_reg, - receiver_reg, // All registers after this are overwritten. elements_reg, scratch1, @@ -4766,15 +3850,12 @@ void KeyedStoreStubCompiler::GenerateStoreFastDoubleElement( // Handle store cache miss, replacing the ic with the generic stub. __ bind(&miss_force_generic); - Handle<Code> ic = - masm->isolate()->builtins()->KeyedStoreIC_MissForceGeneric(); - __ Jump(ic, RelocInfo::CODE_TARGET); + TailCallBuiltin(masm, Builtins::kKeyedStoreIC_MissForceGeneric); __ bind(&transition_elements_kind); - Handle<Code> ic_miss = masm->isolate()->builtins()->KeyedStoreIC_Miss(); - __ Jump(ic_miss, RelocInfo::CODE_TARGET); + TailCallBuiltin(masm, Builtins::kKeyedStoreIC_Miss); - if (is_js_array && grow_mode == ALLOW_JSARRAY_GROWTH) { + if (is_js_array && IsGrowStoreMode(store_mode)) { // Grow the array by a single element if possible. __ bind(&grow); @@ -4800,17 +3881,34 @@ void KeyedStoreStubCompiler::GenerateStoreFastDoubleElement( __ Branch(&check_capacity, ne, elements_reg, Operand(at)); int size = FixedDoubleArray::SizeFor(JSArray::kPreallocatedArrayElements); - __ AllocateInNewSpace(size, elements_reg, scratch1, scratch2, &slow, - TAG_OBJECT); + __ Allocate(size, elements_reg, scratch1, scratch2, &slow, TAG_OBJECT); - // Initialize the new FixedDoubleArray. Leave elements unitialized for - // efficiency, they are guaranteed to be initialized before use. + // Initialize the new FixedDoubleArray. __ LoadRoot(scratch1, Heap::kFixedDoubleArrayMapRootIndex); __ sw(scratch1, FieldMemOperand(elements_reg, JSObject::kMapOffset)); __ li(scratch1, Operand(Smi::FromInt(JSArray::kPreallocatedArrayElements))); __ sw(scratch1, FieldMemOperand(elements_reg, FixedDoubleArray::kLengthOffset)); + __ mov(scratch1, elements_reg); + __ StoreNumberToDoubleElements(value_reg, + key_reg, + // All registers after this are overwritten. + scratch1, + scratch2, + scratch3, + scratch4, + scratch5, + &transition_elements_kind); + + __ li(scratch1, Operand(kHoleNanLower32)); + __ li(scratch2, Operand(kHoleNanUpper32)); + for (int i = 1; i < JSArray::kPreallocatedArrayElements; i++) { + int offset = FixedDoubleArray::OffsetOfElementAt(i); + __ sw(scratch1, FieldMemOperand(elements_reg, offset)); + __ sw(scratch2, FieldMemOperand(elements_reg, offset + kPointerSize)); + } + // Install the new backing store in the JSArray. __ sw(elements_reg, FieldMemOperand(receiver_reg, JSObject::kElementsOffset)); @@ -4823,7 +3921,7 @@ void KeyedStoreStubCompiler::GenerateStoreFastDoubleElement( __ sw(length_reg, FieldMemOperand(receiver_reg, JSArray::kLengthOffset)); __ lw(elements_reg, FieldMemOperand(receiver_reg, JSObject::kElementsOffset)); - __ jmp(&finish_store); + __ Ret(); __ bind(&check_capacity); // Make sure that the backing store can hold additional elements. @@ -4837,8 +3935,7 @@ void KeyedStoreStubCompiler::GenerateStoreFastDoubleElement( __ jmp(&finish_store); __ bind(&slow); - Handle<Code> ic_slow = masm->isolate()->builtins()->KeyedStoreIC_Slow(); - __ Jump(ic_slow, RelocInfo::CODE_TARGET); + TailCallBuiltin(masm, Builtins::kKeyedStoreIC_Slow); } } diff --git a/deps/v8/src/mirror-debugger.js b/deps/v8/src/mirror-debugger.js index a5331a014d..7f1a05aed9 100644 --- a/deps/v8/src/mirror-debugger.js +++ b/deps/v8/src/mirror-debugger.js @@ -1844,10 +1844,14 @@ function ScopeDetails(frame, fun, index) { frame.details_.frameId(), frame.details_.inlinedFrameIndex(), index); + this.frame_id_ = frame.details_.frameId(); + this.inlined_frame_id_ = frame.details_.inlinedFrameIndex(); } else { this.details_ = %GetFunctionScopeDetails(fun.value(), index); + this.fun_value_ = fun.value(); this.break_id_ = undefined; } + this.index_ = index; } @@ -1867,6 +1871,22 @@ ScopeDetails.prototype.object = function() { }; +ScopeDetails.prototype.setVariableValueImpl = function(name, new_value) { + var raw_res; + if (!IS_UNDEFINED(this.break_id_)) { + %CheckExecutionState(this.break_id_); + raw_res = %SetScopeVariableValue(this.break_id_, this.frame_id_, + this.inlined_frame_id_, this.index_, name, new_value); + } else { + raw_res = %SetScopeVariableValue(this.fun_value_, null, null, this.index_, + name, new_value); + } + if (!raw_res) { + throw new Error("Failed to set variable value"); + } +}; + + /** * Mirror object for scope of frame or function. Either frame or function must * be specified. @@ -1914,6 +1934,11 @@ ScopeMirror.prototype.scopeObject = function() { }; +ScopeMirror.prototype.setVariableValue = function(name, new_value) { + this.details_.setVariableValueImpl(name, new_value); +}; + + /** * Mirror object for script source. * @param {Script} script The script object diff --git a/deps/v8/src/mksnapshot.cc b/deps/v8/src/mksnapshot.cc index d7775517b7..abfe69397b 100644 --- a/deps/v8/src/mksnapshot.cc +++ b/deps/v8/src/mksnapshot.cc @@ -318,11 +318,12 @@ int main(int argc, char** argv) { "\nException thrown while compiling natives - see above.\n\n"); exit(1); } + Isolate* isolate = context->GetIsolate(); if (i::FLAG_extra_code != NULL) { context->Enter(); // Capture 100 frames if anything happens. V8::SetCaptureStackTraceForUncaughtExceptions(true, 100); - HandleScope scope; + HandleScope scope(isolate); const char* name = i::FLAG_extra_code; FILE* file = i::OS::FOpen(name, "rb"); if (file == NULL) { @@ -375,7 +376,7 @@ int main(int argc, char** argv) { context->Exit(); } // Make sure all builtin scripts are cached. - { HandleScope scope; + { HandleScope scope(isolate); for (int i = 0; i < i::Natives::GetBuiltinsCount(); i++) { i::Isolate::Current()->bootstrapper()->NativesSourceLookup(i); } @@ -384,7 +385,7 @@ int main(int argc, char** argv) { // context even after we have disposed of the context. HEAP->CollectAllGarbage(i::Heap::kNoGCFlags, "mksnapshot"); i::Object* raw_context = *(v8::Utils::OpenHandle(*context)); - context.Dispose(); + context.Dispose(context->GetIsolate()); CppByteSink sink(argv[1]); // This results in a somewhat smaller snapshot, probably because it gets rid // of some things that are cached between garbage collections. diff --git a/deps/v8/src/object-observe.js b/deps/v8/src/object-observe.js new file mode 100644 index 0000000000..b35f547eda --- /dev/null +++ b/deps/v8/src/object-observe.js @@ -0,0 +1,235 @@ +// Copyright 2012 the V8 project authors. All rights reserved. +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following +// disclaimer in the documentation and/or other materials provided +// with the distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived +// from this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +"use strict"; + +var observationState = %GetObservationState(); +if (IS_UNDEFINED(observationState.observerInfoMap)) { + observationState.observerInfoMap = %ObservationWeakMapCreate(); + observationState.objectInfoMap = %ObservationWeakMapCreate(); + observationState.notifierTargetMap = %ObservationWeakMapCreate(); + observationState.pendingObservers = new InternalArray; + observationState.observerPriority = 0; +} + +function ObservationWeakMap(map) { + this.map_ = map; +} + +ObservationWeakMap.prototype = { + get: function(key) { + key = %UnwrapGlobalProxy(key); + if (!IS_SPEC_OBJECT(key)) return void 0; + return %WeakMapGet(this.map_, key); + }, + set: function(key, value) { + key = %UnwrapGlobalProxy(key); + if (!IS_SPEC_OBJECT(key)) return void 0; + %WeakMapSet(this.map_, key, value); + }, + has: function(key) { + return !IS_UNDEFINED(this.get(key)); + } +}; + +var observerInfoMap = + new ObservationWeakMap(observationState.observerInfoMap); +var objectInfoMap = new ObservationWeakMap(observationState.objectInfoMap); +var notifierTargetMap = + new ObservationWeakMap(observationState.notifierTargetMap); + +function CreateObjectInfo(object) { + var info = { + changeObservers: new InternalArray, + notifier: null, + }; + objectInfoMap.set(object, info); + return info; +} + +function ObjectObserve(object, callback) { + if (!IS_SPEC_OBJECT(object)) + throw MakeTypeError("observe_non_object", ["observe"]); + if (!IS_SPEC_FUNCTION(callback)) + throw MakeTypeError("observe_non_function", ["observe"]); + if (ObjectIsFrozen(callback)) + throw MakeTypeError("observe_callback_frozen"); + + if (!observerInfoMap.has(callback)) { + observerInfoMap.set(callback, { + pendingChangeRecords: null, + priority: observationState.observerPriority++, + }); + } + + var objectInfo = objectInfoMap.get(object); + if (IS_UNDEFINED(objectInfo)) objectInfo = CreateObjectInfo(object); + %SetIsObserved(object, true); + + var changeObservers = objectInfo.changeObservers; + if (changeObservers.indexOf(callback) < 0) changeObservers.push(callback); + + return object; +} + +function ObjectUnobserve(object, callback) { + if (!IS_SPEC_OBJECT(object)) + throw MakeTypeError("observe_non_object", ["unobserve"]); + if (!IS_SPEC_FUNCTION(callback)) + throw MakeTypeError("observe_non_function", ["unobserve"]); + + var objectInfo = objectInfoMap.get(object); + if (IS_UNDEFINED(objectInfo)) + return object; + + var changeObservers = objectInfo.changeObservers; + var index = changeObservers.indexOf(callback); + if (index >= 0) { + changeObservers.splice(index, 1); + if (changeObservers.length === 0) %SetIsObserved(object, false); + } + + return object; +} + +function EnqueueChangeRecord(changeRecord, observers) { + for (var i = 0; i < observers.length; i++) { + var observer = observers[i]; + var observerInfo = observerInfoMap.get(observer); + observationState.pendingObservers[observerInfo.priority] = observer; + %SetObserverDeliveryPending(); + if (IS_NULL(observerInfo.pendingChangeRecords)) { + observerInfo.pendingChangeRecords = new InternalArray(changeRecord); + } else { + observerInfo.pendingChangeRecords.push(changeRecord); + } + } +} + +function NotifyChange(type, object, name, oldValue) { + var objectInfo = objectInfoMap.get(object); + var changeRecord = (arguments.length < 4) ? + { type: type, object: object, name: name } : + { type: type, object: object, name: name, oldValue: oldValue }; + ObjectFreeze(changeRecord); + EnqueueChangeRecord(changeRecord, objectInfo.changeObservers); +} + +var notifierPrototype = {}; + +function ObjectNotifierNotify(changeRecord) { + if (!IS_SPEC_OBJECT(this)) + throw MakeTypeError("called_on_non_object", ["notify"]); + + var target = notifierTargetMap.get(this); + if (IS_UNDEFINED(target)) + throw MakeTypeError("observe_notify_non_notifier"); + if (!IS_STRING(changeRecord.type)) + throw MakeTypeError("observe_type_non_string"); + + var objectInfo = objectInfoMap.get(target); + if (IS_UNDEFINED(objectInfo) || objectInfo.changeObservers.length === 0) + return; + + var newRecord = { object: target }; + for (var prop in changeRecord) { + if (prop === 'object') continue; + %DefineOrRedefineDataProperty(newRecord, prop, changeRecord[prop], + READ_ONLY + DONT_DELETE); + } + ObjectFreeze(newRecord); + + EnqueueChangeRecord(newRecord, objectInfo.changeObservers); +} + +function ObjectGetNotifier(object) { + if (!IS_SPEC_OBJECT(object)) + throw MakeTypeError("observe_non_object", ["getNotifier"]); + + if (ObjectIsFrozen(object)) return null; + + var objectInfo = objectInfoMap.get(object); + if (IS_UNDEFINED(objectInfo)) objectInfo = CreateObjectInfo(object); + + if (IS_NULL(objectInfo.notifier)) { + objectInfo.notifier = { __proto__: notifierPrototype }; + notifierTargetMap.set(objectInfo.notifier, object); + } + + return objectInfo.notifier; +} + +function DeliverChangeRecordsForObserver(observer) { + var observerInfo = observerInfoMap.get(observer); + if (IS_UNDEFINED(observerInfo)) + return false; + + var pendingChangeRecords = observerInfo.pendingChangeRecords; + if (IS_NULL(pendingChangeRecords)) + return false; + + observerInfo.pendingChangeRecords = null; + delete observationState.pendingObservers[observerInfo.priority]; + var delivered = []; + %MoveArrayContents(pendingChangeRecords, delivered); + try { + %Call(void 0, delivered, observer); + } catch (ex) {} + return true; +} + +function ObjectDeliverChangeRecords(callback) { + if (!IS_SPEC_FUNCTION(callback)) + throw MakeTypeError("observe_non_function", ["deliverChangeRecords"]); + + while (DeliverChangeRecordsForObserver(callback)) {} +} + +function DeliverChangeRecords() { + while (observationState.pendingObservers.length) { + var pendingObservers = observationState.pendingObservers; + observationState.pendingObservers = new InternalArray; + for (var i in pendingObservers) { + DeliverChangeRecordsForObserver(pendingObservers[i]); + } + } +} + +function SetupObjectObserve() { + %CheckIsBootstrapping(); + InstallFunctions($Object, DONT_ENUM, $Array( + "deliverChangeRecords", ObjectDeliverChangeRecords, + "getNotifier", ObjectGetNotifier, + "observe", ObjectObserve, + "unobserve", ObjectUnobserve + )); + InstallFunctions(notifierPrototype, DONT_ENUM, $Array( + "notify", ObjectNotifierNotify + )); +} + +SetupObjectObserve(); diff --git a/deps/v8/src/objects-debug.cc b/deps/v8/src/objects-debug.cc index be96592962..82a71a5ffa 100644 --- a/deps/v8/src/objects-debug.cc +++ b/deps/v8/src/objects-debug.cc @@ -30,6 +30,7 @@ #include "disassembler.h" #include "disasm.h" #include "jsregexp.h" +#include "macro-assembler.h" #include "objects-visiting.h" namespace v8 { @@ -79,6 +80,9 @@ void HeapObject::HeapObjectVerify() { } switch (instance_type) { + case SYMBOL_TYPE: + Symbol::cast(this)->SymbolVerify(); + break; case MAP_TYPE: Map::cast(this)->MapVerify(); break; @@ -212,6 +216,13 @@ void HeapObject::VerifyHeapPointer(Object* p) { } +void Symbol::SymbolVerify() { + CHECK(IsSymbol()); + CHECK(HasHashCode()); + CHECK_GT(Hash(), 0); +} + + void HeapNumber::HeapNumberVerify() { CHECK(IsHeapNumber()); } @@ -311,6 +322,9 @@ void Map::MapVerify() { SLOW_ASSERT(transitions()->IsSortedNoDuplicates()); SLOW_ASSERT(transitions()->IsConsistentWithBackPointers(this)); } + ASSERT(!is_observed() || instance_type() < FIRST_JS_OBJECT_TYPE || + instance_type() > LAST_JS_OBJECT_TYPE || + has_slow_elements_kind() || has_external_array_elements()); } @@ -325,6 +339,15 @@ void Map::SharedMapVerify() { } +void Map::VerifyOmittedPrototypeChecks() { + if (!FLAG_omit_prototype_checks_for_leaf_maps) return; + if (HasTransitionArray() || is_dictionary_map()) { + CHECK_EQ(0, dependent_code()->number_of_entries( + DependentCode::kPrototypeCheckGroup)); + } +} + + void CodeCache::CodeCacheVerify() { VerifyHeapPointer(default_cache()); VerifyHeapPointer(normal_type_cache()); @@ -456,24 +479,17 @@ void JSMessageObject::JSMessageObjectVerify() { void String::StringVerify() { CHECK(IsString()); CHECK(length() >= 0 && length() <= Smi::kMaxValue); - if (IsSymbol()) { + if (IsInternalizedString()) { CHECK(!HEAP->InNewSpace(this)); } if (IsConsString()) { ConsString::cast(this)->ConsStringVerify(); } else if (IsSlicedString()) { SlicedString::cast(this)->SlicedStringVerify(); - } else if (IsSeqAsciiString()) { - SeqAsciiString::cast(this)->SeqAsciiStringVerify(); } } -void SeqAsciiString::SeqAsciiStringVerify() { - CHECK(String::IsAscii(GetChars(), length())); -} - - void ConsString::ConsStringVerify() { CHECK(this->first()->IsString()); CHECK(this->second() == GetHeap()->empty_string() || @@ -499,7 +515,8 @@ void JSFunction::JSFunctionVerify() { VerifyObjectField(kPrototypeOrInitialMapOffset); VerifyObjectField(kNextFunctionLinkOffset); CHECK(code()->IsCode()); - CHECK(next_function_link()->IsUndefined() || + CHECK(next_function_link() == NULL || + next_function_link()->IsUndefined() || next_function_link()->IsJSFunction()); } @@ -589,6 +606,22 @@ void Code::CodeVerify() { } +void Code::VerifyEmbeddedMapsDependency() { + int mode_mask = RelocInfo::ModeMask(RelocInfo::EMBEDDED_OBJECT); + for (RelocIterator it(this, mode_mask); !it.done(); it.next()) { + RelocInfo::Mode mode = it.rinfo()->rmode(); + if (mode == RelocInfo::EMBEDDED_OBJECT && + it.rinfo()->target_object()->IsMap()) { + Map* map = Map::cast(it.rinfo()->target_object()); + if (map->CanTransition()) { + CHECK(map->dependent_code()->Contains( + DependentCode::kWeaklyEmbeddedGroup, this)); + } + } + } +} + + void JSArray::JSArrayVerify() { JSObjectVerify(); CHECK(length()->IsNumber() || length()->IsUndefined()); @@ -685,16 +718,34 @@ void Foreign::ForeignVerify() { void AccessorInfo::AccessorInfoVerify() { - CHECK(IsAccessorInfo()); - VerifyPointer(getter()); - VerifyPointer(setter()); VerifyPointer(name()); - VerifyPointer(data()); VerifyPointer(flag()); VerifyPointer(expected_receiver_type()); } +void ExecutableAccessorInfo::ExecutableAccessorInfoVerify() { + CHECK(IsExecutableAccessorInfo()); + AccessorInfoVerify(); + VerifyPointer(getter()); + VerifyPointer(setter()); + VerifyPointer(data()); +} + + +void DeclaredAccessorDescriptor::DeclaredAccessorDescriptorVerify() { + CHECK(IsDeclaredAccessorDescriptor()); + VerifyPointer(serialized_data()); +} + + +void DeclaredAccessorInfo::DeclaredAccessorInfoVerify() { + CHECK(IsDeclaredAccessorInfo()); + AccessorInfoVerify(); + VerifyPointer(descriptor()); +} + + void AccessorPair::AccessorPairVerify() { CHECK(IsAccessorPair()); VerifyPointer(getter()); @@ -770,6 +821,13 @@ void TypeSwitchInfo::TypeSwitchInfoVerify() { } +void AllocationSiteInfo::AllocationSiteInfoVerify() { + CHECK(IsAllocationSiteInfo()); + VerifyHeapPointer(payload()); + CHECK(payload()->IsObject()); +} + + void Script::ScriptVerify() { CHECK(IsScript()); VerifyPointer(source()); @@ -855,7 +913,7 @@ void JSObject::IncrementSpillStatistics(SpillInformation* info) { info->number_of_fast_used_fields_ += map()->NextFreePropertyIndex(); info->number_of_fast_unused_fields_ += map()->unused_property_fields(); } else { - StringDictionary* dict = property_dictionary(); + NameDictionary* dict = property_dictionary(); info->number_of_slow_used_properties_ += dict->NumberOfElements(); info->number_of_slow_unused_properties_ += dict->Capacity() - dict->NumberOfElements(); @@ -946,10 +1004,10 @@ void JSObject::SpillInformation::Print() { bool DescriptorArray::IsSortedNoDuplicates(int valid_entries) { if (valid_entries == -1) valid_entries = number_of_descriptors(); - String* current_key = NULL; + Name* current_key = NULL; uint32_t current = 0; for (int i = 0; i < number_of_descriptors(); i++) { - String* key = GetSortedKey(i); + Name* key = GetSortedKey(i); if (key == current_key) { PrintDescriptors(); return false; @@ -968,10 +1026,10 @@ bool DescriptorArray::IsSortedNoDuplicates(int valid_entries) { bool TransitionArray::IsSortedNoDuplicates(int valid_entries) { ASSERT(valid_entries == -1); - String* current_key = NULL; + Name* current_key = NULL; uint32_t current = 0; for (int i = 0; i < number_of_transitions(); i++) { - String* key = GetSortedKey(i); + Name* key = GetSortedKey(i); if (key == current_key) { PrintTransitions(); return false; diff --git a/deps/v8/src/objects-inl.h b/deps/v8/src/objects-inl.h index ea5a93f16b..02542612b8 100644 --- a/deps/v8/src/objects-inl.h +++ b/deps/v8/src/objects-inl.h @@ -134,6 +134,19 @@ bool Object::IsFixedArrayBase() { } +// External objects are not extensible, so the map check is enough. +bool Object::IsExternal() { + return Object::IsHeapObject() && + HeapObject::cast(this)->map() == + HeapObject::cast(this)->GetHeap()->external_map(); +} + + +bool Object::IsAccessorInfo() { + return IsExecutableAccessorInfo() || IsDeclaredAccessorInfo(); +} + + bool Object::IsInstanceOf(FunctionTemplateInfo* expected) { // There is a constraint on the object; check. if (!this->IsJSObject()) return false; @@ -170,6 +183,7 @@ bool Object::NonFailureIsHeapObject() { TYPE_CHECKER(HeapNumber, HEAP_NUMBER_TYPE) +TYPE_CHECKER(Symbol, SYMBOL_TYPE) bool Object::IsString() { @@ -178,6 +192,16 @@ bool Object::IsString() { } +bool Object::IsName() { + return IsString() || IsSymbol(); +} + + +bool Object::IsUniqueName() { + return IsInternalizedString() || IsSymbol(); +} + + bool Object::IsSpecObject() { return Object::IsHeapObject() && HeapObject::cast(this)->map()->instance_type() >= FIRST_SPEC_OBJECT_TYPE; @@ -191,15 +215,15 @@ bool Object::IsSpecFunction() { } -bool Object::IsSymbol() { +bool Object::IsInternalizedString() { if (!this->IsHeapObject()) return false; uint32_t type = HeapObject::cast(this)->map()->instance_type(); - // Because the symbol tag is non-zero and no non-string types have the - // symbol bit set we can test for symbols with a very simple test - // operation. - STATIC_ASSERT(kSymbolTag != 0); - ASSERT(kNotStringTag + kIsSymbolMask > LAST_TYPE); - return (type & kIsSymbolMask) != 0; + // Because the internalized tag is non-zero and no non-string types have the + // internalized bit set we can test for internalized strings with a very + // simple test operation. + STATIC_ASSERT(kInternalizedTag != 0); + ASSERT(kNotStringTag + kIsInternalizedMask > LAST_TYPE); + return (type & kIsInternalizedMask) != 0; } @@ -221,10 +245,10 @@ bool Object::IsSeqString() { } -bool Object::IsSeqAsciiString() { +bool Object::IsSeqOneByteString() { if (!IsString()) return false; return StringShape(String::cast(this)).IsSequential() && - String::cast(this)->IsAsciiRepresentation(); + String::cast(this)->IsOneByteRepresentation(); } @@ -244,7 +268,7 @@ bool Object::IsExternalString() { bool Object::IsExternalAsciiString() { if (!IsString()) return false; return StringShape(String::cast(this)).IsExternal() && - String::cast(this)->IsAsciiRepresentation(); + String::cast(this)->IsOneByteRepresentation(); } @@ -280,16 +304,16 @@ StringShape::StringShape(InstanceType t) } -bool StringShape::IsSymbol() { +bool StringShape::IsInternalized() { ASSERT(valid()); - STATIC_ASSERT(kSymbolTag != 0); - return (type_ & kIsSymbolMask) != 0; + STATIC_ASSERT(kInternalizedTag != 0); + return (type_ & kIsInternalizedMask) != 0; } -bool String::IsAsciiRepresentation() { +bool String::IsOneByteRepresentation() { uint32_t type = map()->instance_type(); - return (type & kStringEncodingMask) == kAsciiStringTag; + return (type & kStringEncodingMask) == kOneByteStringTag; } @@ -299,18 +323,18 @@ bool String::IsTwoByteRepresentation() { } -bool String::IsAsciiRepresentationUnderneath() { +bool String::IsOneByteRepresentationUnderneath() { uint32_t type = map()->instance_type(); STATIC_ASSERT(kIsIndirectStringTag != 0); STATIC_ASSERT((kIsIndirectStringMask & kStringEncodingMask) == 0); ASSERT(IsFlat()); switch (type & (kIsIndirectStringMask | kStringEncodingMask)) { - case kAsciiStringTag: + case kOneByteStringTag: return true; case kTwoByteStringTag: return false; default: // Cons or sliced string. Need to go deeper. - return GetUnderlying()->IsAsciiRepresentation(); + return GetUnderlying()->IsOneByteRepresentation(); } } @@ -321,7 +345,7 @@ bool String::IsTwoByteRepresentationUnderneath() { STATIC_ASSERT((kIsIndirectStringMask & kStringEncodingMask) == 0); ASSERT(IsFlat()); switch (type & (kIsIndirectStringMask | kStringEncodingMask)) { - case kAsciiStringTag: + case kOneByteStringTag: return false; case kTwoByteStringTag: return true; @@ -333,8 +357,12 @@ bool String::IsTwoByteRepresentationUnderneath() { bool String::HasOnlyAsciiChars() { uint32_t type = map()->instance_type(); - return (type & kStringEncodingMask) == kAsciiStringTag || - (type & kAsciiDataHintMask) == kAsciiDataHintTag; + return (type & kAsciiDataHintMask) == kAsciiDataHintTag; +} + + +bool String::IsOneByteConvertible() { + return HasOnlyAsciiChars() || IsOneByteRepresentation(); } @@ -387,7 +415,7 @@ STATIC_CHECK(static_cast<uint32_t>(kStringEncodingMask) == bool StringShape::IsSequentialAscii() { - return full_representation_tag() == (kSeqStringTag | kAsciiStringTag); + return full_representation_tag() == (kSeqStringTag | kOneByteStringTag); } @@ -397,14 +425,14 @@ bool StringShape::IsSequentialTwoByte() { bool StringShape::IsExternalAscii() { - return full_representation_tag() == (kExternalStringTag | kAsciiStringTag); + return full_representation_tag() == (kExternalStringTag | kOneByteStringTag); } -STATIC_CHECK((kExternalStringTag | kAsciiStringTag) == +STATIC_CHECK((kExternalStringTag | kOneByteStringTag) == Internals::kExternalAsciiRepresentationTag); -STATIC_CHECK(v8::String::ASCII_ENCODING == kAsciiStringTag); +STATIC_CHECK(v8::String::ASCII_ENCODING == kOneByteStringTag); bool StringShape::IsExternalTwoByte() { @@ -567,6 +595,14 @@ bool Object::IsDeoptimizationOutputData() { } +bool Object::IsDependentCode() { + if (!IsFixedArray()) return false; + // There's actually no way to see the difference between a fixed array and + // a dependent codes array. + return true; +} + + bool Object::IsTypeFeedbackCells() { if (!IsFixedArray()) return false; // There's actually no way to see the difference between a fixed array and @@ -655,13 +691,13 @@ bool Object::IsHashTable() { bool Object::IsDictionary() { return IsHashTable() && - this != HeapObject::cast(this)->GetHeap()->symbol_table(); + this != HeapObject::cast(this)->GetHeap()->string_table(); } -bool Object::IsSymbolTable() { - return IsHashTable() && this == - HeapObject::cast(this)->GetHeap()->raw_unchecked_symbol_table(); +bool Object::IsStringTable() { + return IsHashTable() && + this == HeapObject::cast(this)->GetHeap()->raw_unchecked_string_table(); } @@ -718,6 +754,11 @@ bool Object::IsMapCache() { } +bool Object::IsObjectHashTable() { + return IsHashTable(); +} + + bool Object::IsPrimitive() { return IsOddball() || IsNumber() || IsString(); } @@ -856,13 +897,13 @@ Object* Object::GetElementNoExceptionThrown(uint32_t index) { } -MaybeObject* Object::GetProperty(String* key) { +MaybeObject* Object::GetProperty(Name* key) { PropertyAttributes attributes; return GetPropertyWithReceiver(this, key, &attributes); } -MaybeObject* Object::GetProperty(String* key, PropertyAttributes* attributes) { +MaybeObject* Object::GetProperty(Name* key, PropertyAttributes* attributes) { return GetPropertyWithReceiver(this, key, attributes); } @@ -1026,8 +1067,8 @@ Failure* Failure::Exception() { } -Failure* Failure::OutOfMemoryException() { - return Construct(OUT_OF_MEMORY_EXCEPTION); +Failure* Failure::OutOfMemoryException(intptr_t value) { + return Construct(OUT_OF_MEMORY_EXCEPTION, value); } @@ -1052,7 +1093,11 @@ Failure* Failure::Construct(Type type, intptr_t value) { uintptr_t info = (static_cast<uintptr_t>(value) << kFailureTypeTagSize) | type; ASSERT(((info << kFailureTagSize) >> kFailureTagSize) == info); - return reinterpret_cast<Failure*>((info << kFailureTagSize) | kFailureTag); + // Fill the unused bits with a pattern that's easy to recognize in crash + // dumps. + static const int kFailureMagicPattern = 0x0BAD0000; + return reinterpret_cast<Failure*>( + (info << kFailureTagSize) | kFailureTag | kFailureMagicPattern); } @@ -1400,16 +1445,29 @@ void JSObject::initialize_elements() { MaybeObject* JSObject::ResetElements() { - Object* obj; + if (map()->is_observed()) { + // Maintain invariant that observed elements are always in dictionary mode. + SeededNumberDictionary* dictionary; + MaybeObject* maybe = SeededNumberDictionary::Allocate(GetHeap(), 0); + if (!maybe->To(&dictionary)) return maybe; + if (map() == GetHeap()->non_strict_arguments_elements_map()) { + FixedArray::cast(elements())->set(1, dictionary); + } else { + set_elements(dictionary); + } + return this; + } + ElementsKind elements_kind = GetInitialFastElementsKind(); if (!FLAG_smi_only_arrays) { elements_kind = FastSmiToObjectElementsKind(elements_kind); } - MaybeObject* maybe_obj = GetElementsTransitionMap(GetIsolate(), - elements_kind); - if (!maybe_obj->ToObject(&obj)) return maybe_obj; - set_map(Map::cast(obj)); + MaybeObject* maybe = GetElementsTransitionMap(GetIsolate(), elements_kind); + Map* map; + if (!maybe->To(&map)) return maybe; + set_map(map); initialize_elements(); + return this; } @@ -1430,7 +1488,7 @@ MaybeObject* JSObject::AddFastPropertyUsingMap(Map* map) { bool JSObject::TryTransitionToField(Handle<JSObject> object, - Handle<String> key) { + Handle<Name> key) { if (!object->map()->HasTransitionArray()) return false; Handle<TransitionArray> transitions(object->map()->transitions()); int transition = transitions->Search(*key); @@ -1500,6 +1558,10 @@ int JSObject::GetHeaderSize() { return JSDate::kSize; case JS_ARRAY_TYPE: return JSArray::kSize; + case JS_SET_TYPE: + return JSSet::kSize; + case JS_MAP_TYPE: + return JSMap::kSize; case JS_WEAK_MAP_TYPE: return JSWeakMap::kSize; case JS_REGEXP_TYPE: @@ -1705,7 +1767,7 @@ bool Object::IsStringObjectWithCharacterAt(uint32_t index) { if (!js_value->value()->IsString()) return false; String* str = String::cast(js_value->value()); - if (index >= (uint32_t)str->length()) return false; + if (index >= static_cast<uint32_t>(str->length())) return false; return true; } @@ -1850,7 +1912,7 @@ void FixedArray::set(int index, void FixedArray::NoIncrementalWriteBarrierSet(FixedArray* array, int index, Object* value) { - ASSERT(array->map() != HEAP->raw_unchecked_fixed_cow_array_map()); + ASSERT(array->map() != HEAP->fixed_cow_array_map()); ASSERT(index >= 0 && index < array->length()); int offset = kHeaderSize + index * kPointerSize; WRITE_FIELD(array, offset, value); @@ -1864,7 +1926,7 @@ void FixedArray::NoIncrementalWriteBarrierSet(FixedArray* array, void FixedArray::NoWriteBarrierSet(FixedArray* array, int index, Object* value) { - ASSERT(array->map() != HEAP->raw_unchecked_fixed_cow_array_map()); + ASSERT(array->map() != HEAP->fixed_cow_array_map()); ASSERT(index >= 0 && index < array->length()); ASSERT(!HEAP->InNewSpace(value)); WRITE_FIELD(array, kHeaderSize + index * kPointerSize, value); @@ -1931,6 +1993,11 @@ void FixedArray::set_null_unchecked(Heap* heap, int index) { } +double* FixedDoubleArray::data_start() { + return reinterpret_cast<double*>(FIELD_ADDR(this, kHeaderSize)); +} + + Object** FixedArray::data_start() { return HeapObject::RawField(this, kHeaderSize); } @@ -1953,7 +2020,7 @@ void DescriptorArray::SetNumberOfDescriptors(int number_of_descriptors) { // there are three entries in this array it should be called with low=0 and // high=2. template<SearchMode search_mode, typename T> -int BinarySearch(T* array, String* name, int low, int high, int valid_entries) { +int BinarySearch(T* array, Name* name, int low, int high, int valid_entries) { uint32_t hash = name->Hash(); int limit = high; @@ -1961,7 +2028,7 @@ int BinarySearch(T* array, String* name, int low, int high, int valid_entries) { while (low != high) { int mid = (low + high) / 2; - String* mid_name = array->GetSortedKey(mid); + Name* mid_name = array->GetSortedKey(mid); uint32_t mid_hash = mid_name->Hash(); if (mid_hash >= hash) { @@ -1973,7 +2040,7 @@ int BinarySearch(T* array, String* name, int low, int high, int valid_entries) { for (; low <= limit; ++low) { int sort_index = array->GetSortedKeyIndex(low); - String* entry = array->GetKey(sort_index); + Name* entry = array->GetKey(sort_index); if (entry->Hash() != hash) break; if (entry->Equals(name)) { if (search_mode == ALL_ENTRIES || sort_index < valid_entries) { @@ -1990,12 +2057,12 @@ int BinarySearch(T* array, String* name, int low, int high, int valid_entries) { // Perform a linear search in this fixed array. len is the number of entry // indices that are valid. template<SearchMode search_mode, typename T> -int LinearSearch(T* array, String* name, int len, int valid_entries) { +int LinearSearch(T* array, Name* name, int len, int valid_entries) { uint32_t hash = name->Hash(); if (search_mode == ALL_ENTRIES) { for (int number = 0; number < len; number++) { int sorted_index = array->GetSortedKeyIndex(number); - String* entry = array->GetKey(sorted_index); + Name* entry = array->GetKey(sorted_index); uint32_t current_hash = entry->Hash(); if (current_hash > hash) break; if (current_hash == hash && entry->Equals(name)) return sorted_index; @@ -2003,7 +2070,7 @@ int LinearSearch(T* array, String* name, int len, int valid_entries) { } else { ASSERT(len >= valid_entries); for (int number = 0; number < valid_entries; number++) { - String* entry = array->GetKey(number); + Name* entry = array->GetKey(number); uint32_t current_hash = entry->Hash(); if (current_hash == hash && entry->Equals(name)) return number; } @@ -2013,7 +2080,7 @@ int LinearSearch(T* array, String* name, int len, int valid_entries) { template<SearchMode search_mode, typename T> -int Search(T* array, String* name, int valid_entries) { +int Search(T* array, Name* name, int valid_entries) { if (search_mode == VALID_ENTRIES) { SLOW_ASSERT(array->IsSortedNoDuplicates(valid_entries)); } else { @@ -2037,12 +2104,12 @@ int Search(T* array, String* name, int valid_entries) { } -int DescriptorArray::Search(String* name, int valid_descriptors) { +int DescriptorArray::Search(Name* name, int valid_descriptors) { return internal::Search<VALID_ENTRIES>(this, name, valid_descriptors); } -int DescriptorArray::SearchWithCache(String* name, Map* map) { +int DescriptorArray::SearchWithCache(Name* name, Map* map) { int number_of_own_descriptors = map->NumberOfOwnDescriptors(); if (number_of_own_descriptors == 0) return kNotFound; @@ -2059,7 +2126,7 @@ int DescriptorArray::SearchWithCache(String* name, Map* map) { void Map::LookupDescriptor(JSObject* holder, - String* name, + Name* name, LookupResult* result) { DescriptorArray* descriptors = this->instance_descriptors(); int number = descriptors->SearchWithCache(name, this); @@ -2069,7 +2136,7 @@ void Map::LookupDescriptor(JSObject* holder, void Map::LookupTransition(JSObject* holder, - String* name, + Name* name, LookupResult* result) { if (HasTransitionArray()) { TransitionArray* transition_array = transitions(); @@ -2090,9 +2157,19 @@ Object** DescriptorArray::GetKeySlot(int descriptor_number) { } -String* DescriptorArray::GetKey(int descriptor_number) { +Object** DescriptorArray::GetDescriptorStartSlot(int descriptor_number) { + return GetKeySlot(descriptor_number); +} + + +Object** DescriptorArray::GetDescriptorEndSlot(int descriptor_number) { + return GetValueSlot(descriptor_number - 1) + 1; +} + + +Name* DescriptorArray::GetKey(int descriptor_number) { ASSERT(descriptor_number < number_of_descriptors()); - return String::cast(get(ToKeyIndex(descriptor_number))); + return Name::cast(get(ToKeyIndex(descriptor_number))); } @@ -2101,7 +2178,7 @@ int DescriptorArray::GetSortedKeyIndex(int descriptor_number) { } -String* DescriptorArray::GetSortedKey(int descriptor_number) { +Name* DescriptorArray::GetSortedKey(int descriptor_number) { return GetKey(GetSortedKeyIndex(descriptor_number)); } @@ -2215,7 +2292,7 @@ void DescriptorArray::Append(Descriptor* desc, int insertion; for (insertion = descriptor_number; insertion > 0; --insertion) { - String* key = GetSortedKey(insertion - 1); + Name* key = GetSortedKey(insertion - 1); if (key->Hash() <= hash) break; SetSortedKey(insertion, GetSortedKeyIndex(insertion - 1)); } @@ -2236,7 +2313,7 @@ void DescriptorArray::Append(Descriptor* desc) { int insertion; for (insertion = descriptor_number; insertion > 0; --insertion) { - String* key = GetSortedKey(insertion - 1); + Name* key = GetSortedKey(insertion - 1); if (key->Hash() <= hash) break; SetSortedKey(insertion, GetSortedKeyIndex(insertion - 1)); } @@ -2290,7 +2367,8 @@ int HashTable<Shape, Key>::FindEntry(Isolate* isolate, Key key) { // EnsureCapacity will guarantee the hash table is never full. while (true) { Object* element = KeyAt(entry); - // Empty entry. + // Empty entry. Uses raw unchecked accessors because it is called by the + // string table during bootstrapping. if (element == isolate->heap()->raw_unchecked_undefined_value()) break; if (element != isolate->heap()->raw_unchecked_the_hole_value() && Shape::IsMatch(key, element)) return entry; @@ -2329,8 +2407,9 @@ CAST_ACCESSOR(FixedDoubleArray) CAST_ACCESSOR(DescriptorArray) CAST_ACCESSOR(DeoptimizationInputData) CAST_ACCESSOR(DeoptimizationOutputData) +CAST_ACCESSOR(DependentCode) CAST_ACCESSOR(TypeFeedbackCells) -CAST_ACCESSOR(SymbolTable) +CAST_ACCESSOR(StringTable) CAST_ACCESSOR(JSFunctionResultCache) CAST_ACCESSOR(NormalizedMapCache) CAST_ACCESSOR(ScopeInfo) @@ -2340,13 +2419,15 @@ CAST_ACCESSOR(PolymorphicCodeCacheHashTable) CAST_ACCESSOR(MapCache) CAST_ACCESSOR(String) CAST_ACCESSOR(SeqString) -CAST_ACCESSOR(SeqAsciiString) +CAST_ACCESSOR(SeqOneByteString) CAST_ACCESSOR(SeqTwoByteString) CAST_ACCESSOR(SlicedString) CAST_ACCESSOR(ConsString) CAST_ACCESSOR(ExternalString) CAST_ACCESSOR(ExternalAsciiString) CAST_ACCESSOR(ExternalTwoByteString) +CAST_ACCESSOR(Symbol) +CAST_ACCESSOR(Name) CAST_ACCESSOR(JSReceiver) CAST_ACCESSOR(JSObject) CAST_ACCESSOR(Smi) @@ -2383,6 +2464,7 @@ CAST_ACCESSOR(ExternalFloatArray) CAST_ACCESSOR(ExternalDoubleArray) CAST_ACCESSOR(ExternalPixelArray) CAST_ACCESSOR(Struct) +CAST_ACCESSOR(AccessorInfo) #define MAKE_STRUCT_CAST(NAME, Name, name) CAST_ACCESSOR(Name) @@ -2403,12 +2485,12 @@ SMI_ACCESSORS(FreeSpace, size, kSizeOffset) SMI_ACCESSORS(String, length, kLengthOffset) -uint32_t String::hash_field() { +uint32_t Name::hash_field() { return READ_UINT32_FIELD(this, kHashFieldOffset); } -void String::set_hash_field(uint32_t value) { +void Name::set_hash_field(uint32_t value) { WRITE_UINT32_FIELD(this, kHashFieldOffset, value); #if V8_HOST_ARCH_64_BIT WRITE_UINT32_FIELD(this, kHashFieldOffset + kIntSize, 0); @@ -2416,9 +2498,16 @@ void String::set_hash_field(uint32_t value) { } +bool Name::Equals(Name* other) { + if (other == this) return true; + if (this->IsUniqueName() && other->IsUniqueName()) return false; + return String::cast(this)->SlowEquals(String::cast(other)); +} + + bool String::Equals(String* other) { if (other == this) return true; - if (StringShape(this).IsSymbol() && StringShape(other).IsSymbol()) { + if (this->IsInternalizedString() && other->IsInternalizedString()) { return false; } return SlowEquals(other); @@ -2444,18 +2533,18 @@ String* String::TryFlattenGetString(PretenureFlag pretenure) { uint16_t String::Get(int index) { ASSERT(index >= 0 && index < length()); switch (StringShape(this).full_representation_tag()) { - case kSeqStringTag | kAsciiStringTag: - return SeqAsciiString::cast(this)->SeqAsciiStringGet(index); + case kSeqStringTag | kOneByteStringTag: + return SeqOneByteString::cast(this)->SeqOneByteStringGet(index); case kSeqStringTag | kTwoByteStringTag: return SeqTwoByteString::cast(this)->SeqTwoByteStringGet(index); - case kConsStringTag | kAsciiStringTag: + case kConsStringTag | kOneByteStringTag: case kConsStringTag | kTwoByteStringTag: return ConsString::cast(this)->ConsStringGet(index); - case kExternalStringTag | kAsciiStringTag: + case kExternalStringTag | kOneByteStringTag: return ExternalAsciiString::cast(this)->ExternalAsciiStringGet(index); case kExternalStringTag | kTwoByteStringTag: return ExternalTwoByteString::cast(this)->ExternalTwoByteStringGet(index); - case kSlicedStringTag | kAsciiStringTag: + case kSlicedStringTag | kOneByteStringTag: case kSlicedStringTag | kTwoByteStringTag: return SlicedString::cast(this)->SlicedStringGet(index); default: @@ -2471,8 +2560,8 @@ void String::Set(int index, uint16_t value) { ASSERT(index >= 0 && index < length()); ASSERT(StringShape(this).IsSequential()); - return this->IsAsciiRepresentation() - ? SeqAsciiString::cast(this)->SeqAsciiStringSet(index, value) + return this->IsOneByteRepresentation() + ? SeqOneByteString::cast(this)->SeqOneByteStringSet(index, value) : SeqTwoByteString::cast(this)->SeqTwoByteStringSet(index, value); } @@ -2494,26 +2583,116 @@ String* String::GetUnderlying() { } -uint16_t SeqAsciiString::SeqAsciiStringGet(int index) { +template<class Visitor, class ConsOp> +void String::Visit( + String* string, + unsigned offset, + Visitor& visitor, + ConsOp& cons_op, + int32_t type, + unsigned length) { + ASSERT(length == static_cast<unsigned>(string->length())); + ASSERT(offset <= length); + unsigned slice_offset = offset; + while (true) { + ASSERT(type == string->map()->instance_type()); + + switch (type & (kStringRepresentationMask | kStringEncodingMask)) { + case kSeqStringTag | kOneByteStringTag: + visitor.VisitOneByteString( + SeqOneByteString::cast(string)->GetChars() + slice_offset, + length - offset); + return; + + case kSeqStringTag | kTwoByteStringTag: + visitor.VisitTwoByteString( + SeqTwoByteString::cast(string)->GetChars() + slice_offset, + length - offset); + return; + + case kExternalStringTag | kOneByteStringTag: + visitor.VisitOneByteString( + ExternalAsciiString::cast(string)->GetChars() + slice_offset, + length - offset); + return; + + case kExternalStringTag | kTwoByteStringTag: + visitor.VisitTwoByteString( + ExternalTwoByteString::cast(string)->GetChars() + slice_offset, + length - offset); + return; + + case kSlicedStringTag | kOneByteStringTag: + case kSlicedStringTag | kTwoByteStringTag: { + SlicedString* slicedString = SlicedString::cast(string); + slice_offset += slicedString->offset(); + string = slicedString->parent(); + type = string->map()->instance_type(); + continue; + } + + case kConsStringTag | kOneByteStringTag: + case kConsStringTag | kTwoByteStringTag: + string = cons_op.Operate(string, &offset, &type, &length); + if (string == NULL) return; + slice_offset = offset; + ASSERT(length == static_cast<unsigned>(string->length())); + continue; + + default: + UNREACHABLE(); + return; + } + } +} + + +// TODO(dcarney): Remove this class after conversion to VisitFlat. +class ConsStringCaptureOp { + public: + inline ConsStringCaptureOp() : cons_string_(NULL) {} + inline String* Operate(String* string, unsigned*, int32_t*, unsigned*) { + cons_string_ = ConsString::cast(string); + return NULL; + } + ConsString* cons_string_; +}; + + +template<class Visitor> +ConsString* String::VisitFlat(Visitor* visitor, + String* string, + int offset, + int length, + int32_t type) { + ASSERT(length >= 0 && length == string->length()); + ASSERT(offset >= 0 && offset <= length); + ConsStringCaptureOp op; + Visit(string, offset, *visitor, op, type, static_cast<unsigned>(length)); + return op.cons_string_; +} + + +uint16_t SeqOneByteString::SeqOneByteStringGet(int index) { ASSERT(index >= 0 && index < length()); return READ_BYTE_FIELD(this, kHeaderSize + index * kCharSize); } -void SeqAsciiString::SeqAsciiStringSet(int index, uint16_t value) { - ASSERT(index >= 0 && index < length() && value <= kMaxAsciiCharCode); +void SeqOneByteString::SeqOneByteStringSet(int index, uint16_t value) { + ASSERT(index >= 0 && index < length() && value <= kMaxOneByteCharCode); WRITE_BYTE_FIELD(this, kHeaderSize + index * kCharSize, static_cast<byte>(value)); } -Address SeqAsciiString::GetCharsAddress() { +Address SeqOneByteString::GetCharsAddress() { return FIELD_ADDR(this, kHeaderSize); } -char* SeqAsciiString::GetChars() { - return reinterpret_cast<char*>(GetCharsAddress()); +uint8_t* SeqOneByteString::GetChars() { + return reinterpret_cast<uint8_t*>(GetCharsAddress()); } @@ -2544,7 +2723,7 @@ int SeqTwoByteString::SeqTwoByteStringSize(InstanceType instance_type) { } -int SeqAsciiString::SeqAsciiStringSize(InstanceType instance_type) { +int SeqOneByteString::SeqOneByteStringSize(InstanceType instance_type) { return SizeFor(length()); } @@ -2623,8 +2802,8 @@ void ExternalAsciiString::set_resource( } -const char* ExternalAsciiString::GetChars() { - return resource()->data(); +const uint8_t* ExternalAsciiString::GetChars() { + return reinterpret_cast<const uint8_t*>(resource()->data()); } @@ -2672,6 +2851,135 @@ const uint16_t* ExternalTwoByteString::ExternalTwoByteStringGetData( } +String* ConsStringNullOp::Operate(String*, unsigned*, int32_t*, unsigned*) { + return NULL; +} + + +unsigned ConsStringIteratorOp::OffsetForDepth(unsigned depth) { + return depth & kDepthMask; +} + + +void ConsStringIteratorOp::PushLeft(ConsString* string) { + frames_[depth_++ & kDepthMask] = string; +} + + +void ConsStringIteratorOp::PushRight(ConsString* string) { + // Inplace update. + frames_[(depth_-1) & kDepthMask] = string; +} + + +void ConsStringIteratorOp::AdjustMaximumDepth() { + if (depth_ > maximum_depth_) maximum_depth_ = depth_; +} + + +void ConsStringIteratorOp::Pop() { + ASSERT(depth_ > 0); + ASSERT(depth_ <= maximum_depth_); + depth_--; +} + + +bool ConsStringIteratorOp::HasMore() { + return depth_ != 0; +} + + +void ConsStringIteratorOp::Reset() { + depth_ = 0; +} + + +String* ConsStringIteratorOp::ContinueOperation(int32_t* type_out, + unsigned* length_out) { + bool blew_stack = false; + String* string = NextLeaf(&blew_stack, type_out, length_out); + // String found. + if (string != NULL) { + // Verify output. + ASSERT(*length_out == static_cast<unsigned>(string->length())); + ASSERT(*type_out == string->map()->instance_type()); + return string; + } + // Traversal complete. + if (!blew_stack) return NULL; + // Restart search from root. + unsigned offset_out; + string = Search(&offset_out, type_out, length_out); + // Verify output. + ASSERT(string == NULL || offset_out == 0); + ASSERT(string == NULL || + *length_out == static_cast<unsigned>(string->length())); + ASSERT(string == NULL || *type_out == string->map()->instance_type()); + return string; +} + + +uint16_t StringCharacterStream::GetNext() { + ASSERT(buffer8_ != NULL && end_ != NULL); + // Advance cursor if needed. + // TODO(dcarney): Ensure uses of the api call HasMore first and avoid this. + if (buffer8_ == end_) HasMore(); + ASSERT(buffer8_ < end_); + return is_one_byte_ ? *buffer8_++ : *buffer16_++; +} + + +StringCharacterStream::StringCharacterStream(String* string, + ConsStringIteratorOp* op, + unsigned offset) + : is_one_byte_(false), + op_(op) { + Reset(string, offset); +} + + +void StringCharacterStream::Reset(String* string, unsigned offset) { + op_->Reset(); + buffer8_ = NULL; + end_ = NULL; + int32_t type = string->map()->instance_type(); + unsigned length = string->length(); + String::Visit(string, offset, *this, *op_, type, length); +} + + +bool StringCharacterStream::HasMore() { + if (buffer8_ != end_) return true; + if (!op_->HasMore()) return false; + unsigned length; + int32_t type; + String* string = op_->ContinueOperation(&type, &length); + if (string == NULL) return false; + ASSERT(!string->IsConsString()); + ASSERT(string->length() != 0); + ConsStringNullOp null_op; + String::Visit(string, 0, *this, null_op, type, length); + ASSERT(buffer8_ != end_); + return true; +} + + +void StringCharacterStream::VisitOneByteString( + const uint8_t* chars, unsigned length) { + is_one_byte_ = true; + buffer8_ = chars; + end_ = chars + length; +} + + +void StringCharacterStream::VisitTwoByteString( + const uint16_t* chars, unsigned length) { + is_one_byte_ = false; + buffer16_ = chars; + end_ = reinterpret_cast<const uint8_t*>(chars + length); +} + + void JSFunctionResultCache::MakeZeroSize() { set_finger_index(kEntriesIndex); set_size(kEntriesIndex); @@ -2954,16 +3262,17 @@ int Map::pre_allocated_property_fields() { int HeapObject::SizeFromMap(Map* map) { int instance_size = map->instance_size(); if (instance_size != kVariableSizeSentinel) return instance_size; - // We can ignore the "symbol" bit becase it is only set for symbols - // and implies a string type. - int instance_type = static_cast<int>(map->instance_type()) & ~kIsSymbolMask; + // We can ignore the "internalized" bit because it is only set for strings + // and thus implies a string type. + int instance_type = + static_cast<int>(map->instance_type()) & ~kIsInternalizedMask; // Only inline the most frequent cases. if (instance_type == FIXED_ARRAY_TYPE) { return FixedArray::BodyDescriptor::SizeOf(map, this); } if (instance_type == ASCII_STRING_TYPE) { - return SeqAsciiString::SizeFor( - reinterpret_cast<SeqAsciiString*>(this)->length()); + return SeqOneByteString::SizeFor( + reinterpret_cast<SeqOneByteString*>(this)->length()); } if (instance_type == BYTE_ARRAY_TYPE) { return reinterpret_cast<ByteArray*>(this)->ByteArraySize(); @@ -3140,6 +3449,11 @@ Code::Flags Code::flags() { } +inline bool Map::CanTrackAllocationSite() { + return instance_type() == JS_ARRAY_TYPE; +} + + void Map::set_owns_descriptors(bool is_shared) { set_bit_field3(OwnsDescriptors::update(bit_field3(), is_shared)); } @@ -3150,6 +3464,85 @@ bool Map::owns_descriptors() { } +void Map::set_is_observed(bool is_observed) { + ASSERT(instance_type() < FIRST_JS_OBJECT_TYPE || + instance_type() > LAST_JS_OBJECT_TYPE || + has_slow_elements_kind() || has_external_array_elements()); + set_bit_field3(IsObserved::update(bit_field3(), is_observed)); +} + + +bool Map::is_observed() { + return IsObserved::decode(bit_field3()); +} + + +void Map::NotifyLeafMapLayoutChange() { + dependent_code()->DeoptimizeDependentCodeGroup( + GetIsolate(), + DependentCode::kPrototypeCheckGroup); +} + + +bool Map::CanOmitPrototypeChecks() { + return !HasTransitionArray() && !is_dictionary_map() && + FLAG_omit_prototype_checks_for_leaf_maps; +} + + +void Map::AddDependentCode(DependentCode::DependencyGroup group, + Handle<Code> code) { + Handle<DependentCode> codes = + DependentCode::Insert(Handle<DependentCode>(dependent_code()), + group, code); + if (*codes != dependent_code()) { + set_dependent_code(*codes); + } +} + + +int DependentCode::number_of_entries(DependencyGroup group) { + if (length() == 0) return 0; + return Smi::cast(get(group))->value(); +} + + +void DependentCode::set_number_of_entries(DependencyGroup group, int value) { + set(group, Smi::FromInt(value)); +} + + +Code* DependentCode::code_at(int i) { + return Code::cast(get(kCodesStartIndex + i)); +} + + +void DependentCode::set_code_at(int i, Code* value) { + set(kCodesStartIndex + i, value); +} + + +Object** DependentCode::code_slot_at(int i) { + return HeapObject::RawField( + this, FixedArray::OffsetOfElementAt(kCodesStartIndex + i)); +} + + +void DependentCode::clear_code_at(int i) { + set_undefined(kCodesStartIndex + i); +} + + +void DependentCode::ExtendGroup(DependencyGroup group) { + GroupStartIndexes starts(this); + for (int g = kGroupCount - 1; g > group; g--) { + if (starts.at(g) < starts.at(g + 1)) { + set_code_at(starts.at(g + 1), code_at(starts.at(g))); + } + } +} + + void Code::set_flags(Code::Flags flags) { STATIC_ASSERT(Code::NUMBER_OF_KINDS <= KindField::kMax + 1); // Make sure that all call stubs have an arguments count. @@ -3172,14 +3565,13 @@ InlineCacheState Code::ic_state() { // a call to code object has been replaced with a debug break call. ASSERT(is_inline_cache_stub() || result == UNINITIALIZED || - result == DEBUG_BREAK || - result == DEBUG_PREPARE_STEP_IN); + result == DEBUG_STUB); return result; } Code::ExtraICState Code::extra_ic_state() { - ASSERT(is_inline_cache_stub()); + ASSERT(is_inline_cache_stub() || ic_state() == DEBUG_STUB); return ExtractExtraICStateFromFlags(flags()); } @@ -3197,9 +3589,12 @@ int Code::arguments_count() { int Code::major_key() { ASSERT(kind() == STUB || + kind() == COMPILED_STUB || kind() == UNARY_OP_IC || kind() == BINARY_OP_IC || kind() == COMPARE_IC || + kind() == LOAD_IC || + kind() == KEYED_LOAD_IC || kind() == TO_BOOLEAN_IC); return StubMajorKeyField::decode( READ_UINT32_FIELD(this, kKindSpecificFlags2Offset)); @@ -3208,9 +3603,14 @@ int Code::major_key() { void Code::set_major_key(int major) { ASSERT(kind() == STUB || + kind() == COMPILED_STUB || kind() == UNARY_OP_IC || kind() == BINARY_OP_IC || kind() == COMPARE_IC || + kind() == LOAD_IC || + kind() == KEYED_LOAD_IC || + kind() == STORE_IC || + kind() == KEYED_STORE_IC || kind() == TO_BOOLEAN_IC); ASSERT(0 <= major && major < 256); int previous = READ_UINT32_FIELD(this, kKindSpecificFlags2Offset); @@ -3316,7 +3716,7 @@ void Code::set_profiler_ticks(int ticks) { unsigned Code::stack_slots() { - ASSERT(kind() == OPTIMIZED_FUNCTION); + ASSERT(kind() == OPTIMIZED_FUNCTION || kind() == COMPILED_STUB); return StackSlotsField::decode( READ_UINT32_FIELD(this, kKindSpecificFlags1Offset)); } @@ -3324,7 +3724,7 @@ unsigned Code::stack_slots() { void Code::set_stack_slots(unsigned slots) { CHECK(slots <= (1 << kStackSlotsBitCount)); - ASSERT(kind() == OPTIMIZED_FUNCTION); + ASSERT(kind() == OPTIMIZED_FUNCTION || kind() == COMPILED_STUB); int previous = READ_UINT32_FIELD(this, kKindSpecificFlags1Offset); int updated = StackSlotsField::update(previous, slots); WRITE_UINT32_FIELD(this, kKindSpecificFlags1Offset, updated); @@ -3332,7 +3732,7 @@ void Code::set_stack_slots(unsigned slots) { unsigned Code::safepoint_table_offset() { - ASSERT(kind() == OPTIMIZED_FUNCTION); + ASSERT(kind() == OPTIMIZED_FUNCTION || kind() == COMPILED_STUB); return SafepointTableOffsetField::decode( READ_UINT32_FIELD(this, kKindSpecificFlags2Offset)); } @@ -3340,7 +3740,7 @@ unsigned Code::safepoint_table_offset() { void Code::set_safepoint_table_offset(unsigned offset) { CHECK(offset <= (1 << kSafepointTableOffsetBitCount)); - ASSERT(kind() == OPTIMIZED_FUNCTION); + ASSERT(kind() == OPTIMIZED_FUNCTION || kind() == COMPILED_STUB); ASSERT(IsAligned(offset, static_cast<unsigned>(kIntSize))); int previous = READ_UINT32_FIELD(this, kKindSpecificFlags2Offset); int updated = SafepointTableOffsetField::update(previous, offset); @@ -3364,6 +3764,22 @@ void Code::set_stack_check_table_offset(unsigned offset) { } +bool Code::stack_check_patched_for_osr() { + ASSERT_EQ(FUNCTION, kind()); + return StackCheckPatchedForOSRField::decode( + READ_UINT32_FIELD(this, kKindSpecificFlags2Offset)); +} + + +void Code::set_stack_check_patched_for_osr(bool value) { + ASSERT_EQ(FUNCTION, kind()); + int previous = READ_UINT32_FIELD(this, kKindSpecificFlags2Offset); + int updated = StackCheckPatchedForOSRField::update(previous, value); + WRITE_UINT32_FIELD(this, kKindSpecificFlags2Offset, updated); +} + + + CheckType Code::check_type() { ASSERT(is_call_stub() || is_keyed_call_stub()); byte type = READ_BYTE_FIELD(this, kCheckTypeOffset); @@ -3392,66 +3808,6 @@ void Code::set_unary_op_type(byte value) { } -byte Code::binary_op_type() { - ASSERT(is_binary_op_stub()); - return BinaryOpTypeField::decode( - READ_UINT32_FIELD(this, kKindSpecificFlags1Offset)); -} - - -void Code::set_binary_op_type(byte value) { - ASSERT(is_binary_op_stub()); - int previous = READ_UINT32_FIELD(this, kKindSpecificFlags1Offset); - int updated = BinaryOpTypeField::update(previous, value); - WRITE_UINT32_FIELD(this, kKindSpecificFlags1Offset, updated); -} - - -byte Code::binary_op_result_type() { - ASSERT(is_binary_op_stub()); - return BinaryOpResultTypeField::decode( - READ_UINT32_FIELD(this, kKindSpecificFlags1Offset)); -} - - -void Code::set_binary_op_result_type(byte value) { - ASSERT(is_binary_op_stub()); - int previous = READ_UINT32_FIELD(this, kKindSpecificFlags1Offset); - int updated = BinaryOpResultTypeField::update(previous, value); - WRITE_UINT32_FIELD(this, kKindSpecificFlags1Offset, updated); -} - - -byte Code::compare_state() { - ASSERT(is_compare_ic_stub()); - return CompareStateField::decode( - READ_UINT32_FIELD(this, kKindSpecificFlags1Offset)); -} - - -void Code::set_compare_state(byte value) { - ASSERT(is_compare_ic_stub()); - int previous = READ_UINT32_FIELD(this, kKindSpecificFlags1Offset); - int updated = CompareStateField::update(previous, value); - WRITE_UINT32_FIELD(this, kKindSpecificFlags1Offset, updated); -} - - -byte Code::compare_operation() { - ASSERT(is_compare_ic_stub()); - return CompareOperationField::decode( - READ_UINT32_FIELD(this, kKindSpecificFlags1Offset)); -} - - -void Code::set_compare_operation(byte value) { - ASSERT(is_compare_ic_stub()); - int previous = READ_UINT32_FIELD(this, kKindSpecificFlags1Offset); - int updated = CompareOperationField::update(previous, value); - WRITE_UINT32_FIELD(this, kKindSpecificFlags1Offset, updated); -} - - byte Code::to_boolean_state() { ASSERT(is_to_boolean_ic_stub()); return ToBooleanStateField::decode( @@ -3482,26 +3838,41 @@ void Code::set_has_function_cache(bool flag) { } +bool Code::marked_for_deoptimization() { + ASSERT(kind() == OPTIMIZED_FUNCTION); + return MarkedForDeoptimizationField::decode( + READ_UINT32_FIELD(this, kKindSpecificFlags1Offset)); +} + + +void Code::set_marked_for_deoptimization(bool flag) { + ASSERT(kind() == OPTIMIZED_FUNCTION); + int previous = READ_UINT32_FIELD(this, kKindSpecificFlags1Offset); + int updated = MarkedForDeoptimizationField::update(previous, flag); + WRITE_UINT32_FIELD(this, kKindSpecificFlags1Offset, updated); +} + + bool Code::is_inline_cache_stub() { Kind kind = this->kind(); return kind >= FIRST_IC_KIND && kind <= LAST_IC_KIND; } +bool Code::is_debug_break() { + return ic_state() == DEBUG_STUB && extra_ic_state() == DEBUG_BREAK; +} + + Code::Flags Code::ComputeFlags(Kind kind, InlineCacheState ic_state, ExtraICState extra_ic_state, StubType type, int argc, InlineCacheHolderFlag holder) { - // Extra IC state is only allowed for call IC stubs or for store IC - // stubs. - ASSERT(extra_ic_state == kNoExtraICState || - kind == CALL_IC || - kind == STORE_IC || - kind == KEYED_STORE_IC); + ASSERT(argc <= Code::kMaxArguments); // Compute the bit mask. - int bits = KindField::encode(kind) + unsigned int bits = KindField::encode(kind) | ICStateField::encode(ic_state) | TypeField::encode(type) | ExtraICStateField::encode(extra_ic_state) @@ -3512,10 +3883,10 @@ Code::Flags Code::ComputeFlags(Kind kind, Code::Flags Code::ComputeMonomorphicFlags(Kind kind, - StubType type, ExtraICState extra_ic_state, - InlineCacheHolderFlag holder, - int argc) { + StubType type, + int argc, + InlineCacheHolderFlag holder) { return ComputeFlags(kind, MONOMORPHIC, extra_ic_state, type, argc, holder); } @@ -3691,7 +4062,7 @@ bool Map::CanHaveMoreTransitions() { } -MaybeObject* Map::AddTransition(String* key, +MaybeObject* Map::AddTransition(Name* key, Map* target, SimpleTransitionFlag flag) { if (HasTransitionArray()) return transitions()->CopyInsert(key, target); @@ -3805,6 +4176,7 @@ HeapObject* Map::UncheckedPrototypeTransitions() { ACCESSORS(Map, code_cache, Object, kCodeCacheOffset) +ACCESSORS(Map, dependent_code, DependentCode, kDependentCodeOffset) ACCESSORS(Map, constructor, Object, kConstructorOffset) ACCESSORS(JSFunction, shared, SharedFunctionInfo, kSharedFunctionInfoOffset) @@ -3818,14 +4190,21 @@ ACCESSORS(GlobalObject, global_receiver, JSObject, kGlobalReceiverOffset) ACCESSORS(JSGlobalProxy, native_context, Object, kNativeContextOffset) -ACCESSORS(AccessorInfo, getter, Object, kGetterOffset) -ACCESSORS(AccessorInfo, setter, Object, kSetterOffset) -ACCESSORS(AccessorInfo, data, Object, kDataOffset) ACCESSORS(AccessorInfo, name, Object, kNameOffset) ACCESSORS_TO_SMI(AccessorInfo, flag, kFlagOffset) ACCESSORS(AccessorInfo, expected_receiver_type, Object, kExpectedReceiverTypeOffset) +ACCESSORS(DeclaredAccessorDescriptor, serialized_data, ByteArray, + kSerializedDataOffset) + +ACCESSORS(DeclaredAccessorInfo, descriptor, DeclaredAccessorDescriptor, + kDescriptorOffset) + +ACCESSORS(ExecutableAccessorInfo, getter, Object, kGetterOffset) +ACCESSORS(ExecutableAccessorInfo, setter, Object, kSetterOffset) +ACCESSORS(ExecutableAccessorInfo, data, Object, kDataOffset) + ACCESSORS(AccessorPair, getter, Object, kGetterOffset) ACCESSORS(AccessorPair, setter, Object, kSetterOffset) @@ -3876,6 +4255,8 @@ ACCESSORS(SignatureInfo, args, Object, kArgsOffset) ACCESSORS(TypeSwitchInfo, types, Object, kTypesOffset) +ACCESSORS(AllocationSiteInfo, payload, Object, kPayloadOffset) + ACCESSORS(Script, source, Object, kSourceOffset) ACCESSORS(Script, name, Object, kNameOffset) ACCESSORS(Script, id, Object, kIdOffset) @@ -3920,6 +4301,7 @@ ACCESSORS(SharedFunctionInfo, this_property_assignments, Object, SMI_ACCESSORS(SharedFunctionInfo, ast_node_count, kAstNodeCountOffset) +SMI_ACCESSORS(FunctionTemplateInfo, length, kLengthOffset) BOOL_ACCESSORS(FunctionTemplateInfo, flag, hidden_prototype, kHiddenPrototypeBit) BOOL_ACCESSORS(FunctionTemplateInfo, flag, undetectable, kUndetectableBit) @@ -3931,6 +4313,7 @@ BOOL_ACCESSORS(SharedFunctionInfo, start_position_and_type, is_expression, kIsExpressionBit) BOOL_ACCESSORS(SharedFunctionInfo, start_position_and_type, is_toplevel, kIsTopLevelBit) + BOOL_GETTER(SharedFunctionInfo, compiler_hints, has_only_simple_this_property_assignments, @@ -4125,11 +4508,10 @@ BOOL_ACCESSORS(SharedFunctionInfo, compiler_hints, dont_cache, kDontCache) void SharedFunctionInfo::BeforeVisitingPointers() { if (IsInobjectSlackTrackingInProgress()) DetachInitialMap(); +} + - // Flush optimized code map on major GC. - // Note: we may experiment with rebuilding it or retaining entries - // which should survive as we iterate through optimized functions - // anyway. +void SharedFunctionInfo::ClearOptimizedCodeMap() { set_optimized_code_map(Smi::FromInt(0)); } @@ -4144,7 +4526,7 @@ bool Script::HasValidSource() { if (!src->IsString()) return true; String* src_str = String::cast(src); if (!StringShape(src_str).IsExternal()) return true; - if (src_str->IsAsciiRepresentation()) { + if (src_str->IsOneByteRepresentation()) { return ExternalAsciiString::cast(src)->resource() != NULL; } else if (src_str->IsTwoByteRepresentation()) { return ExternalTwoByteString::cast(src)->resource() != NULL; @@ -4186,6 +4568,19 @@ void SharedFunctionInfo::set_code(Code* value, WriteBarrierMode mode) { } +void SharedFunctionInfo::ReplaceCode(Code* value) { + // If the GC metadata field is already used then the function was + // enqueued as a code flushing candidate and we remove it now. + if (code()->gc_metadata() != NULL) { + CodeFlusher* flusher = GetHeap()->mark_compact_collector()->code_flusher(); + flusher->EvictCandidate(this); + } + + ASSERT(code()->gc_metadata() == NULL && value->gc_metadata() == NULL); + set_code(value); +} + + ScopeInfo* SharedFunctionInfo::scope_info() { return reinterpret_cast<ScopeInfo*>(READ_FIELD(this, kScopeInfoOffset)); } @@ -4325,9 +4720,15 @@ bool JSFunction::IsMarkedForLazyRecompilation() { } +bool JSFunction::IsMarkedForInstallingRecompiledCode() { + return code() == GetIsolate()->builtins()->builtin( + Builtins::kInstallRecompiledCode); +} + + bool JSFunction::IsMarkedForParallelRecompilation() { - return code() == - GetIsolate()->builtins()->builtin(Builtins::kParallelRecompile); + return code() == GetIsolate()->builtins()->builtin( + Builtins::kParallelRecompile); } @@ -4359,6 +4760,13 @@ void JSFunction::set_code(Code* value) { } +void JSFunction::set_code_no_write_barrier(Code* value) { + ASSERT(!HEAP->InNewSpace(value)); + Address entry = value->entry(); + WRITE_INTPTR_FIELD(this, kCodeEntryOffset, reinterpret_cast<intptr_t>(entry)); +} + + void JSFunction::ReplaceCode(Code* code) { bool was_optimized = IsOptimized(); bool is_optimized = code->kind() == Code::OPTIMIZED_FUNCTION; @@ -4603,13 +5011,67 @@ JSMessageObject* JSMessageObject::cast(Object* obj) { INT_ACCESSORS(Code, instruction_size, kInstructionSizeOffset) +INT_ACCESSORS(Code, prologue_offset, kPrologueOffset) ACCESSORS(Code, relocation_info, ByteArray, kRelocationInfoOffset) ACCESSORS(Code, handler_table, FixedArray, kHandlerTableOffset) ACCESSORS(Code, deoptimization_data, FixedArray, kDeoptimizationDataOffset) -ACCESSORS(Code, type_feedback_info, Object, kTypeFeedbackInfoOffset) + + +// Type feedback slot: type_feedback_info for FUNCTIONs, stub_info for STUBs. +void Code::InitializeTypeFeedbackInfoNoWriteBarrier(Object* value) { + WRITE_FIELD(this, kTypeFeedbackInfoOffset, value); +} + + +Object* Code::type_feedback_info() { + ASSERT(kind() == FUNCTION); + return Object::cast(READ_FIELD(this, kTypeFeedbackInfoOffset)); +} + + +void Code::set_type_feedback_info(Object* value, WriteBarrierMode mode) { + ASSERT(kind() == FUNCTION); + WRITE_FIELD(this, kTypeFeedbackInfoOffset, value); + CONDITIONAL_WRITE_BARRIER(GetHeap(), this, kTypeFeedbackInfoOffset, + value, mode); +} + + +int Code::stub_info() { + ASSERT(kind() == COMPARE_IC || kind() == BINARY_OP_IC || kind() == LOAD_IC); + Object* value = READ_FIELD(this, kTypeFeedbackInfoOffset); + return Smi::cast(value)->value(); +} + + +void Code::set_stub_info(int value) { + ASSERT(kind() == COMPARE_IC || + kind() == BINARY_OP_IC || + kind() == STUB || + kind() == LOAD_IC || + kind() == KEYED_LOAD_IC || + kind() == STORE_IC || + kind() == KEYED_STORE_IC); + WRITE_FIELD(this, kTypeFeedbackInfoOffset, Smi::FromInt(value)); +} + + +void Code::set_deoptimizing_functions(Object* value) { + ASSERT(kind() == OPTIMIZED_FUNCTION); + WRITE_FIELD(this, kTypeFeedbackInfoOffset, value); +} + + +Object* Code::deoptimizing_functions() { + ASSERT(kind() == OPTIMIZED_FUNCTION); + return Object::cast(READ_FIELD(this, kTypeFeedbackInfoOffset)); +} + + ACCESSORS(Code, gc_metadata, Object, kGCMetadataOffset) INT_ACCESSORS(Code, ic_age, kICAgeOffset) + byte* Code::instruction_start() { return FIELD_ADDR(this, kHeaderSize); } @@ -4790,6 +5252,11 @@ bool JSObject::HasFastHoleyElements() { } +bool JSObject::HasFastElements() { + return IsFastElementsKind(GetElementsKind()); +} + + bool JSObject::HasDictionaryElements() { return GetElementsKind() == DICTIONARY_ELEMENTS; } @@ -4860,9 +5327,9 @@ MaybeObject* JSObject::EnsureWritableFastElements() { } -StringDictionary* JSObject::property_dictionary() { +NameDictionary* JSObject::property_dictionary() { ASSERT(!HasFastProperties()); - return StringDictionary::cast(properties()); + return NameDictionary::cast(properties()); } @@ -4872,22 +5339,22 @@ SeededNumberDictionary* JSObject::element_dictionary() { } -bool String::IsHashFieldComputed(uint32_t field) { +bool Name::IsHashFieldComputed(uint32_t field) { return (field & kHashNotComputedMask) == 0; } -bool String::HasHashCode() { +bool Name::HasHashCode() { return IsHashFieldComputed(hash_field()); } -uint32_t String::Hash() { +uint32_t Name::Hash() { // Fast case: has hash code already been computed? uint32_t field = hash_field(); if (IsHashFieldComputed(field)) return field >> kHashShift; - // Slow case: compute hash code and set it. - return ComputeAndSetHash(); + // Slow case: compute hash code and set it. Has to be a string. + return String::cast(this)->ComputeAndSetHash(); } @@ -4906,7 +5373,7 @@ bool StringHasher::has_trivial_hash() { } -uint32_t StringHasher::AddCharacterCore(uint32_t running_hash, uint32_t c) { +uint32_t StringHasher::AddCharacterCore(uint32_t running_hash, uint16_t c) { running_hash += c; running_hash += (running_hash << 10); running_hash ^= (running_hash >> 6); @@ -4925,70 +5392,71 @@ uint32_t StringHasher::GetHashCore(uint32_t running_hash) { } -void StringHasher::AddCharacter(uint32_t c) { - if (c > unibrow::Utf16::kMaxNonSurrogateCharCode) { - AddSurrogatePair(c); // Not inlined. - return; - } +void StringHasher::AddCharacter(uint16_t c) { // Use the Jenkins one-at-a-time hash function to update the hash // for the given character. raw_running_hash_ = AddCharacterCore(raw_running_hash_, c); - // Incremental array index computation. - if (is_array_index_) { - if (c < '0' || c > '9') { - is_array_index_ = false; - } else { - int d = c - '0'; - if (is_first_char_) { - is_first_char_ = false; - if (c == '0' && length_ > 1) { - is_array_index_ = false; - return; - } - } - if (array_index_ > 429496729U - ((d + 2) >> 3)) { - is_array_index_ = false; - } else { - array_index_ = array_index_ * 10 + d; - } - } - } } -void StringHasher::AddCharacterNoIndex(uint32_t c) { - ASSERT(!is_array_index()); - if (c > unibrow::Utf16::kMaxNonSurrogateCharCode) { - AddSurrogatePairNoIndex(c); // Not inlined. - return; +bool StringHasher::UpdateIndex(uint16_t c) { + ASSERT(is_array_index_); + if (c < '0' || c > '9') { + is_array_index_ = false; + return false; } - raw_running_hash_ = AddCharacterCore(raw_running_hash_, c); + int d = c - '0'; + if (is_first_char_) { + is_first_char_ = false; + if (c == '0' && length_ > 1) { + is_array_index_ = false; + return false; + } + } + if (array_index_ > 429496729U - ((d + 2) >> 3)) { + is_array_index_ = false; + return false; + } + array_index_ = array_index_ * 10 + d; + return true; } -uint32_t StringHasher::GetHash() { - // Get the calculated raw hash value and do some more bit ops to distribute - // the hash further. Ensure that we never return zero as the hash value. - return GetHashCore(raw_running_hash_); +template<typename Char> +inline void StringHasher::AddCharacters(const Char* chars, int length) { + ASSERT(sizeof(Char) == 1 || sizeof(Char) == 2); + int i = 0; + if (is_array_index_) { + for (; i < length; i++) { + AddCharacter(chars[i]); + if (!UpdateIndex(chars[i])) { + i++; + break; + } + } + } + for (; i < length; i++) { + ASSERT(!is_array_index_); + AddCharacter(chars[i]); + } } template <typename schar> -uint32_t HashSequentialString(const schar* chars, int length, uint32_t seed) { +uint32_t StringHasher::HashSequentialString(const schar* chars, + int length, + uint32_t seed) { StringHasher hasher(length, seed); - if (!hasher.has_trivial_hash()) { - int i; - for (i = 0; hasher.is_array_index() && (i < length); i++) { - hasher.AddCharacter(chars[i]); - } - for (; i < length; i++) { - hasher.AddCharacterNoIndex(chars[i]); - } - } + if (!hasher.has_trivial_hash()) hasher.AddCharacters(chars, length); return hasher.GetHashField(); } +bool Name::AsArrayIndex(uint32_t* index) { + return IsString() && String::cast(this)->AsArrayIndex(index); +} + + bool String::AsArrayIndex(uint32_t* index) { uint32_t field = hash_field(); if (IsHashFieldComputed(field) && (field & kIsNotArrayIndexMask)) { @@ -5008,7 +5476,7 @@ Object* JSReceiver::GetConstructor() { } -bool JSReceiver::HasProperty(String* name) { +bool JSReceiver::HasProperty(Name* name) { if (IsJSProxy()) { return JSProxy::cast(this)->HasPropertyWithHandler(name); } @@ -5016,7 +5484,7 @@ bool JSReceiver::HasProperty(String* name) { } -bool JSReceiver::HasLocalProperty(String* name) { +bool JSReceiver::HasLocalProperty(Name* name) { if (IsJSProxy()) { return JSProxy::cast(this)->HasPropertyWithHandler(name); } @@ -5024,10 +5492,24 @@ bool JSReceiver::HasLocalProperty(String* name) { } -PropertyAttributes JSReceiver::GetPropertyAttribute(String* key) { +PropertyAttributes JSReceiver::GetPropertyAttribute(Name* key) { + uint32_t index; + if (IsJSObject() && key->AsArrayIndex(&index)) { + return GetElementAttribute(index); + } return GetPropertyAttributeWithReceiver(this, key); } + +PropertyAttributes JSReceiver::GetElementAttribute(uint32_t index) { + if (IsJSProxy()) { + return JSProxy::cast(this)->GetElementAttributeWithHandler(this, index); + } + return JSObject::cast(this)->GetElementAttributeWithReceiver( + this, index, true); +} + + // TODO(504): this may be useful in other places too where JSGlobalProxy // is used. Object* JSObject::BypassGlobalProxy() { @@ -5052,7 +5534,26 @@ bool JSReceiver::HasElement(uint32_t index) { if (IsJSProxy()) { return JSProxy::cast(this)->HasElementWithHandler(index); } - return JSObject::cast(this)->HasElementWithReceiver(this, index); + return JSObject::cast(this)->GetElementAttributeWithReceiver( + this, index, true) != ABSENT; +} + + +bool JSReceiver::HasLocalElement(uint32_t index) { + if (IsJSProxy()) { + return JSProxy::cast(this)->HasElementWithHandler(index); + } + return JSObject::cast(this)->GetElementAttributeWithReceiver( + this, index, false) != ABSENT; +} + + +PropertyAttributes JSReceiver::GetLocalElementAttribute(uint32_t index) { + if (IsJSProxy()) { + return JSProxy::cast(this)->GetElementAttributeWithHandler(this, index); + } + return JSObject::cast(this)->GetElementAttributeWithReceiver( + this, index, false); } @@ -5116,7 +5617,7 @@ void Dictionary<Shape, Key>::SetEntry(int entry, Object* key, Object* value, PropertyDetails details) { - ASSERT(!key->IsString() || + ASSERT(!key->IsName() || details.IsDeleted() || details.dictionary_index() > 0); int index = HashTable<Shape, Key>::EntryToIndex(entry); @@ -5156,30 +5657,30 @@ uint32_t SeededNumberDictionaryShape::SeededHashForObject(uint32_t key, return ComputeIntegerHash(static_cast<uint32_t>(other->Number()), seed); } -MaybeObject* NumberDictionaryShape::AsObject(uint32_t key) { - return Isolate::Current()->heap()->NumberFromUint32(key); +MaybeObject* NumberDictionaryShape::AsObject(Heap* heap, uint32_t key) { + return heap->NumberFromUint32(key); } -bool StringDictionaryShape::IsMatch(String* key, Object* other) { +bool NameDictionaryShape::IsMatch(Name* key, Object* other) { // We know that all entries in a hash table had their hash keys created. // Use that knowledge to have fast failure. - if (key->Hash() != String::cast(other)->Hash()) return false; - return key->Equals(String::cast(other)); + if (key->Hash() != Name::cast(other)->Hash()) return false; + return key->Equals(Name::cast(other)); } -uint32_t StringDictionaryShape::Hash(String* key) { +uint32_t NameDictionaryShape::Hash(Name* key) { return key->Hash(); } -uint32_t StringDictionaryShape::HashForObject(String* key, Object* other) { - return String::cast(other)->Hash(); +uint32_t NameDictionaryShape::HashForObject(Name* key, Object* other) { + return Name::cast(other)->Hash(); } -MaybeObject* StringDictionaryShape::AsObject(String* key) { +MaybeObject* NameDictionaryShape::AsObject(Heap* heap, Name* key) { return key; } @@ -5206,7 +5707,8 @@ uint32_t ObjectHashTableShape<entrysize>::HashForObject(Object* key, template <int entrysize> -MaybeObject* ObjectHashTableShape<entrysize>::AsObject(Object* key) { +MaybeObject* ObjectHashTableShape<entrysize>::AsObject(Heap* heap, + Object* key) { return key; } @@ -5216,8 +5718,8 @@ void Map::ClearCodeCache(Heap* heap) { // Please note this function is used during marking: // - MarkCompactCollector::MarkUnmarkedObject // - IncrementalMarking::Step - ASSERT(!heap->InNewSpace(heap->raw_unchecked_empty_fixed_array())); - WRITE_FIELD(this, kCodeCacheOffset, heap->raw_unchecked_empty_fixed_array()); + ASSERT(!heap->InNewSpace(heap->empty_fixed_array())); + WRITE_FIELD(this, kCodeCacheOffset, heap->empty_fixed_array()); } @@ -5310,8 +5812,14 @@ Handle<Object> TypeFeedbackCells::MegamorphicSentinel(Isolate* isolate) { } +Handle<Object> TypeFeedbackCells::MonomorphicArraySentinel(Isolate* isolate, + ElementsKind elements_kind) { + return Handle<Object>(Smi::FromInt(static_cast<int>(elements_kind)), isolate); +} + + Object* TypeFeedbackCells::RawUninitializedSentinel(Heap* heap) { - return heap->raw_unchecked_the_hole_value(); + return heap->the_hole_value(); } diff --git a/deps/v8/src/objects-printer.cc b/deps/v8/src/objects-printer.cc index b1118de9c4..4522ee43e5 100644 --- a/deps/v8/src/objects-printer.cc +++ b/deps/v8/src/objects-printer.cc @@ -69,13 +69,16 @@ void HeapObject::PrintHeader(FILE* out, const char* id) { void HeapObject::HeapObjectPrint(FILE* out) { InstanceType instance_type = map()->instance_type(); - HandleScope scope; + HandleScope scope(GetIsolate()); if (instance_type < FIRST_NONSTRING_TYPE) { String::cast(this)->StringPrint(out); return; } switch (instance_type) { + case SYMBOL_TYPE: + Symbol::cast(this)->SymbolPrint(out); + break; case MAP_TYPE: Map::cast(this)->MapPrint(out); break; @@ -256,7 +259,7 @@ void JSObject::PrintProperties(FILE* out) { DescriptorArray* descs = map()->instance_descriptors(); for (int i = 0; i < map()->NumberOfOwnDescriptors(); i++) { PrintF(out, " "); - descs->GetKey(i)->StringPrint(out); + descs->GetKey(i)->NamePrint(out); PrintF(out, ": "); switch (descs->GetType(i)) { case FIELD: { @@ -384,7 +387,7 @@ void JSObject::PrintElements(FILE* out) { case EXTERNAL_DOUBLE_ELEMENTS: { ExternalDoubleArray* p = ExternalDoubleArray::cast(elements()); for (int i = 0; i < p->length(); i++) { - PrintF(out, " %d: %f\n", i, p->get_scalar(i)); + PrintF(out, " %d: %f\n", i, p->get_scalar(i)); } break; } @@ -393,11 +396,16 @@ void JSObject::PrintElements(FILE* out) { break; case NON_STRICT_ARGUMENTS_ELEMENTS: { FixedArray* p = FixedArray::cast(elements()); + PrintF(out, " parameter map:"); for (int i = 2; i < p->length(); i++) { - PrintF(out, " %d: ", i); + PrintF(out, " %d:", i - 2); p->get(i)->ShortPrint(out); - PrintF(out, "\n"); } + PrintF(out, "\n context: "); + p->get(0)->ShortPrint(out); + PrintF(out, "\n arguments: "); + p->get(1)->ShortPrint(out); + PrintF(out, "\n"); break; } } @@ -409,7 +417,7 @@ void JSObject::PrintTransitions(FILE* out) { TransitionArray* transitions = map()->transitions(); for (int i = 0; i < transitions->number_of_transitions(); i++) { PrintF(out, " "); - transitions->GetKey(i)->StringPrint(out); + transitions->GetKey(i)->NamePrint(out); PrintF(out, ": "); switch (transitions->GetTargetDetails(i).type()) { case FIELD: { @@ -473,25 +481,32 @@ static const char* TypeToString(InstanceType type) { case MAP_TYPE: return "MAP"; case HEAP_NUMBER_TYPE: return "HEAP_NUMBER"; case SYMBOL_TYPE: return "SYMBOL"; - case ASCII_SYMBOL_TYPE: return "ASCII_SYMBOL"; - case CONS_SYMBOL_TYPE: return "CONS_SYMBOL"; - case CONS_ASCII_SYMBOL_TYPE: return "CONS_ASCII_SYMBOL"; - case EXTERNAL_ASCII_SYMBOL_TYPE: - case EXTERNAL_SYMBOL_WITH_ASCII_DATA_TYPE: - case EXTERNAL_SYMBOL_TYPE: return "EXTERNAL_SYMBOL"; - case SHORT_EXTERNAL_ASCII_SYMBOL_TYPE: - case SHORT_EXTERNAL_SYMBOL_WITH_ASCII_DATA_TYPE: - case SHORT_EXTERNAL_SYMBOL_TYPE: return "SHORT_EXTERNAL_SYMBOL"; - case ASCII_STRING_TYPE: return "ASCII_STRING"; case STRING_TYPE: return "TWO_BYTE_STRING"; + case ASCII_STRING_TYPE: return "ASCII_STRING"; case CONS_STRING_TYPE: - case CONS_ASCII_STRING_TYPE: return "CONS_STRING"; + case CONS_ASCII_STRING_TYPE: + return "CONS_STRING"; + case EXTERNAL_STRING_TYPE: case EXTERNAL_ASCII_STRING_TYPE: case EXTERNAL_STRING_WITH_ASCII_DATA_TYPE: - case EXTERNAL_STRING_TYPE: return "EXTERNAL_STRING"; + return "EXTERNAL_STRING"; + case SHORT_EXTERNAL_STRING_TYPE: case SHORT_EXTERNAL_ASCII_STRING_TYPE: case SHORT_EXTERNAL_STRING_WITH_ASCII_DATA_TYPE: - case SHORT_EXTERNAL_STRING_TYPE: return "SHORT_EXTERNAL_STRING"; + return "SHORT_EXTERNAL_STRING"; + case INTERNALIZED_STRING_TYPE: return "INTERNALIZED_STRING"; + case ASCII_INTERNALIZED_STRING_TYPE: return "ASCII_INTERNALIZED_STRING"; + case CONS_INTERNALIZED_STRING_TYPE: return "CONS_INTERNALIZED_STRING"; + case CONS_ASCII_INTERNALIZED_STRING_TYPE: + return "CONS_ASCII_INTERNALIZED_STRING"; + case EXTERNAL_INTERNALIZED_STRING_TYPE: + case EXTERNAL_ASCII_INTERNALIZED_STRING_TYPE: + case EXTERNAL_INTERNALIZED_STRING_WITH_ASCII_DATA_TYPE: + return "EXTERNAL_INTERNALIZED_STRING"; + case SHORT_EXTERNAL_INTERNALIZED_STRING_TYPE: + case SHORT_EXTERNAL_ASCII_INTERNALIZED_STRING_TYPE: + case SHORT_EXTERNAL_INTERNALIZED_STRING_WITH_ASCII_DATA_TYPE: + return "SHORT_EXTERNAL_INTERNALIZED_STRING"; case FIXED_ARRAY_TYPE: return "FIXED_ARRAY"; case BYTE_ARRAY_TYPE: return "BYTE_ARRAY"; case FREE_SPACE_TYPE: return "FREE_SPACE"; @@ -534,6 +549,12 @@ static const char* TypeToString(InstanceType type) { } +void Symbol::SymbolPrint(FILE* out) { + HeapObject::PrintHeader(out, "Symbol"); + PrintF(out, " - hash: %d\n", Hash()); +} + + void Map::MapPrint(FILE* out) { HeapObject::PrintHeader(out, "Map"); PrintF(out, " - type: %s\n", TypeToString(instance_type())); @@ -578,6 +599,8 @@ void Map::MapPrint(FILE* out) { constructor()->ShortPrint(out); PrintF(out, "\n - code cache: "); code_cache()->ShortPrint(out); + PrintF(out, "\n - dependent code: "); + dependent_code()->ShortPrint(out); PrintF(out, "\n"); } @@ -663,7 +686,7 @@ void JSMessageObject::JSMessageObjectPrint(FILE* out) { void String::StringPrint(FILE* out) { - if (StringShape(this).IsSymbol()) { + if (StringShape(this).IsInternalized()) { PrintF(out, "#"); } else if (StringShape(this).IsCons()) { PrintF(out, "c\""); @@ -685,7 +708,15 @@ void String::StringPrint(FILE* out) { PrintF(out, "%s", truncated_epilogue); } - if (!StringShape(this).IsSymbol()) PrintF(out, "\""); + if (!StringShape(this).IsInternalized()) PrintF(out, "\""); +} + + +void Name::NamePrint(FILE* out) { + if (IsString()) + String::cast(this)->StringPrint(out); + else + ShortPrint(); } @@ -698,7 +729,7 @@ char* String::ToAsciiArray() { static char* buffer = NULL; if (buffer != NULL) free(buffer); buffer = new char[length()+1]; - WriteToFlat(this, buffer, 0, length()); + WriteToFlat(this, reinterpret_cast<uint8_t*>(buffer), 0, length()); buffer[length()] = 0; return buffer; } @@ -869,18 +900,36 @@ void Foreign::ForeignPrint(FILE* out) { } -void AccessorInfo::AccessorInfoPrint(FILE* out) { - HeapObject::PrintHeader(out, "AccessorInfo"); +void ExecutableAccessorInfo::ExecutableAccessorInfoPrint(FILE* out) { + HeapObject::PrintHeader(out, "ExecutableAccessorInfo"); + PrintF(out, "\n - name: "); + name()->ShortPrint(out); + PrintF(out, "\n - flag: "); + flag()->ShortPrint(out); PrintF(out, "\n - getter: "); getter()->ShortPrint(out); PrintF(out, "\n - setter: "); setter()->ShortPrint(out); - PrintF(out, "\n - name: "); - name()->ShortPrint(out); PrintF(out, "\n - data: "); data()->ShortPrint(out); +} + + +void DeclaredAccessorInfo::DeclaredAccessorInfoPrint(FILE* out) { + HeapObject::PrintHeader(out, "DeclaredAccessorInfo"); + PrintF(out, "\n - name: "); + name()->ShortPrint(out); PrintF(out, "\n - flag: "); flag()->ShortPrint(out); + PrintF(out, "\n - descriptor: "); + descriptor()->ShortPrint(out); +} + + +void DeclaredAccessorDescriptor::DeclaredAccessorDescriptorPrint(FILE* out) { + HeapObject::PrintHeader(out, "DeclaredAccessorDescriptor"); + PrintF(out, "\n - internal field: "); + serialized_data()->ShortPrint(out); } @@ -997,6 +1046,33 @@ void TypeSwitchInfo::TypeSwitchInfoPrint(FILE* out) { } +void AllocationSiteInfo::AllocationSiteInfoPrint(FILE* out) { + HeapObject::PrintHeader(out, "AllocationSiteInfo"); + PrintF(out, " - payload: "); + if (payload()->IsJSGlobalPropertyCell()) { + JSGlobalPropertyCell* cell = JSGlobalPropertyCell::cast(payload()); + Object* cell_contents = cell->value(); + if (cell_contents->IsSmi()) { + ElementsKind kind = static_cast<ElementsKind>( + Smi::cast(cell_contents)->value()); + PrintF(out, "Array allocation with ElementsKind "); + PrintElementsKind(out, kind); + PrintF(out, "\n"); + return; + } + } else if (payload()->IsJSArray()) { + PrintF(out, "Array literal "); + payload()->ShortPrint(out); + PrintF(out, "\n"); + return; + } + + PrintF(out, "unknown payload "); + payload()->ShortPrint(out); + PrintF(out, "\n"); +} + + void Script::ScriptPrint(FILE* out) { HeapObject::PrintHeader(out, "Script"); PrintF(out, "\n - source: "); @@ -1070,7 +1146,7 @@ void TransitionArray::PrintTransitions(FILE* out) { PrintF(out, "Transition array %d\n", number_of_transitions()); for (int i = 0; i < number_of_transitions(); i++) { PrintF(out, " %d: ", i); - GetKey(i)->StringPrint(out); + GetKey(i)->NamePrint(out); PrintF(out, ": "); switch (GetTargetDetails(i).type()) { case FIELD: { diff --git a/deps/v8/src/objects-visiting-inl.h b/deps/v8/src/objects-visiting-inl.h index d698a8df06..beb07b5644 100644 --- a/deps/v8/src/objects-visiting-inl.h +++ b/deps/v8/src/objects-visiting-inl.h @@ -68,7 +68,7 @@ void StaticNewSpaceVisitor<StaticVisitor>::Initialize() { SharedFunctionInfo::BodyDescriptor, int>::Visit); - table_.Register(kVisitSeqAsciiString, &VisitSeqAsciiString); + table_.Register(kVisitSeqOneByteString, &VisitSeqOneByteString); table_.Register(kVisitSeqTwoByteString, &VisitSeqTwoByteString); @@ -110,10 +110,7 @@ void StaticMarkingVisitor<StaticVisitor>::Initialize() { SlicedString::BodyDescriptor, void>::Visit); - table_.Register(kVisitFixedArray, - &FlexibleBodyVisitor<StaticVisitor, - FixedArray::BodyDescriptor, - void>::Visit); + table_.Register(kVisitFixedArray, &FixedArrayVisitor::Visit); table_.Register(kVisitFixedDoubleArray, &DataObjectVisitor::Visit); @@ -123,7 +120,7 @@ void StaticMarkingVisitor<StaticVisitor>::Initialize() { table_.Register(kVisitFreeSpace, &DataObjectVisitor::Visit); - table_.Register(kVisitSeqAsciiString, &DataObjectVisitor::Visit); + table_.Register(kVisitSeqOneByteString, &DataObjectVisitor::Visit); table_.Register(kVisitSeqTwoByteString, &DataObjectVisitor::Visit); @@ -178,8 +175,12 @@ void StaticMarkingVisitor<StaticVisitor>::VisitEmbeddedPointer( ASSERT(rinfo->rmode() == RelocInfo::EMBEDDED_OBJECT); ASSERT(!rinfo->target_object()->IsConsString()); HeapObject* object = HeapObject::cast(rinfo->target_object()); - heap->mark_compact_collector()->RecordRelocSlot(rinfo, object); - StaticVisitor::MarkObject(heap, object); + if (!FLAG_weak_embedded_maps_in_optimized_code || !FLAG_collect_maps || + rinfo->host()->kind() != Code::OPTIMIZED_FUNCTION || + !object->IsMap() || !Map::cast(object)->CanTransition()) { + heap->mark_compact_collector()->RecordRelocSlot(rinfo, object); + StaticVisitor::MarkObject(heap, object); + } } @@ -214,7 +215,8 @@ void StaticMarkingVisitor<StaticVisitor>::VisitCodeTarget( // when they might be keeping a Context alive, or when the heap is about // to be serialized. if (FLAG_cleanup_code_caches_at_gc && target->is_inline_cache_stub() - && (target->ic_state() == MEGAMORPHIC || heap->flush_monomorphic_ics() || + && (target->ic_state() == MEGAMORPHIC || target->ic_state() == GENERIC || + target->ic_state() == POLYMORPHIC || heap->flush_monomorphic_ics() || Serializer::enabled() || target->ic_age() != heap->global_ic_age())) { IC::Clear(rinfo->pc()); target = Code::GetCodeFromTargetAddress(rinfo->target_address()); @@ -225,6 +227,17 @@ void StaticMarkingVisitor<StaticVisitor>::VisitCodeTarget( template<typename StaticVisitor> +void StaticMarkingVisitor<StaticVisitor>::VisitCodeAgeSequence( + Heap* heap, RelocInfo* rinfo) { + ASSERT(RelocInfo::IsCodeAgeSequence(rinfo->rmode())); + Code* target = rinfo->code_age_stub(); + ASSERT(target != NULL); + heap->mark_compact_collector()->RecordRelocSlot(rinfo, target); + StaticVisitor::MarkObject(heap, target); +} + + +template<typename StaticVisitor> void StaticMarkingVisitor<StaticVisitor>::VisitNativeContext( Map* map, HeapObject* object) { FixedBodyVisitor<StaticVisitor, @@ -253,12 +266,9 @@ void StaticMarkingVisitor<StaticVisitor>::VisitMap( map_object->ClearCodeCache(heap); } - // When map collection is enabled we have to mark through map's - // transitions and back pointers in a special way to make these links - // weak. Only maps for subclasses of JSReceiver can have transitions. - STATIC_ASSERT(LAST_TYPE == LAST_JS_RECEIVER_TYPE); - if (FLAG_collect_maps && - map_object->instance_type() >= FIRST_JS_RECEIVER_TYPE) { + // When map collection is enabled we have to mark through map's transitions + // and back pointers in a special way to make these links weak. + if (FLAG_collect_maps && map_object->CanTransition()) { MarkMapContents(heap, map_object); } else { StaticVisitor::VisitPointers(heap, @@ -276,6 +286,9 @@ void StaticMarkingVisitor<StaticVisitor>::VisitCode( if (FLAG_cleanup_code_caches_at_gc) { code->ClearTypeFeedbackCells(heap); } + if (FLAG_age_code && !Serializer::enabled()) { + code->MakeOlder(heap->mark_compact_collector()->marking_parity()); + } code->CodeIterateBody<StaticVisitor>(heap); } @@ -288,6 +301,13 @@ void StaticMarkingVisitor<StaticVisitor>::VisitSharedFunctionInfo( if (shared->ic_age() != heap->global_ic_age()) { shared->ResetForNewContext(heap->global_ic_age()); } + if (FLAG_cache_optimized_code) { + // Flush optimized code map on major GC. + // TODO(mstarzinger): We may experiment with rebuilding it or with + // retaining entries which should survive as we iterate through + // optimized functions anyway. + shared->ClearOptimizedCodeMap(); + } MarkCompactCollector* collector = heap->mark_compact_collector(); if (collector->is_code_flushing_enabled()) { if (IsFlushable(heap, shared)) { @@ -376,6 +396,41 @@ void StaticMarkingVisitor<StaticVisitor>::MarkMapContents( ASSERT(transitions->IsMap() || transitions->IsUndefined()); } + // Since descriptor arrays are potentially shared, ensure that only the + // descriptors that appeared for this map are marked. The first time a + // non-empty descriptor array is marked, its header is also visited. The slot + // holding the descriptor array will be implicitly recorded when the pointer + // fields of this map are visited. + DescriptorArray* descriptors = map->instance_descriptors(); + if (StaticVisitor::MarkObjectWithoutPush(heap, descriptors) && + descriptors->length() > 0) { + StaticVisitor::VisitPointers(heap, + descriptors->GetFirstElementAddress(), + descriptors->GetDescriptorEndSlot(0)); + } + int start = 0; + int end = map->NumberOfOwnDescriptors(); + Object* back_pointer = map->GetBackPointer(); + if (!back_pointer->IsUndefined()) { + Map* parent_map = Map::cast(back_pointer); + if (descriptors == parent_map->instance_descriptors()) { + start = parent_map->NumberOfOwnDescriptors(); + } + } + if (start < end) { + StaticVisitor::VisitPointers(heap, + descriptors->GetDescriptorStartSlot(start), + descriptors->GetDescriptorEndSlot(end)); + } + + // Mark prototype dependent codes array but do not push it onto marking + // stack, this will make references from it weak. We will clean dead + // codes when we iterate over maps in ClearNonLiveTransitions. + Object** slot = HeapObject::RawField(map, Map::kDependentCodeOffset); + HeapObject* obj = HeapObject::cast(*slot); + heap->mark_compact_collector()->RecordSlot(slot, slot, obj); + StaticVisitor::MarkObjectWithoutPush(heap, obj); + // Mark the pointer fields of the Map. Since the transitions array has // been marked already, it is fine that one of these fields contains a // pointer to it. @@ -449,8 +504,10 @@ bool StaticMarkingVisitor<StaticVisitor>::IsFlushable( // by optimized version of function. MarkBit code_mark = Marking::MarkBitFrom(function->code()); if (code_mark.Get()) { - if (!Marking::MarkBitFrom(shared_info).Get()) { - shared_info->set_code_age(0); + if (!FLAG_age_code) { + if (!Marking::MarkBitFrom(shared_info).Get()) { + shared_info->set_code_age(0); + } } return false; } @@ -460,11 +517,16 @@ bool StaticMarkingVisitor<StaticVisitor>::IsFlushable( return false; } - // We do not flush code for optimized functions. + // We do not (yet) flush code for optimized functions. if (function->code() != shared_info->code()) { return false; } + // Check age of optimized code. + if (FLAG_age_code && !function->code()->IsOld()) { + return false; + } + return IsFlushable(heap, shared_info); } @@ -506,20 +568,20 @@ bool StaticMarkingVisitor<StaticVisitor>::IsFlushable( return false; } - // TODO(mstarzinger): The following will soon be replaced by a new way of - // aging code, that is based on an aging stub in the function prologue. - - // How many collections newly compiled code object will survive before being - // flushed. - static const int kCodeAgeThreshold = 5; - - // Age this shared function info. - if (shared_info->code_age() < kCodeAgeThreshold) { - shared_info->set_code_age(shared_info->code_age() + 1); - return false; + if (FLAG_age_code) { + return shared_info->code()->IsOld(); + } else { + // How many collections newly compiled code object will survive before being + // flushed. + static const int kCodeAgeThreshold = 5; + + // Age this shared function info. + if (shared_info->code_age() < kCodeAgeThreshold) { + shared_info->set_code_age(shared_info->code_age() + 1); + return false; + } + return true; } - - return true; } @@ -613,7 +675,7 @@ void Code::CodeIterateBody(ObjectVisitor* v) { RelocInfo::ModeMask(RelocInfo::RUNTIME_ENTRY); // There are two places where we iterate code bodies: here and the - // templated CodeIterateBody (below). They should be kept in sync. + // templated CodeIterateBody (below). They should be kept in sync. IteratePointer(v, kRelocationInfoOffset); IteratePointer(v, kHandlerTableOffset); IteratePointer(v, kDeoptimizationDataOffset); @@ -636,8 +698,8 @@ void Code::CodeIterateBody(Heap* heap) { RelocInfo::ModeMask(RelocInfo::DEBUG_BREAK_SLOT) | RelocInfo::ModeMask(RelocInfo::RUNTIME_ENTRY); - // There are two places where we iterate code bodies: here and the - // non-templated CodeIterateBody (above). They should be kept in sync. + // There are two places where we iterate code bodies: here and the non- + // templated CodeIterateBody (above). They should be kept in sync. StaticVisitor::VisitPointer( heap, reinterpret_cast<Object**>(this->address() + kRelocationInfoOffset)); diff --git a/deps/v8/src/objects-visiting.cc b/deps/v8/src/objects-visiting.cc index a2dc43e247..088f5ebdeb 100644 --- a/deps/v8/src/objects-visiting.cc +++ b/deps/v8/src/objects-visiting.cc @@ -45,8 +45,8 @@ StaticVisitorBase::VisitorId StaticVisitorBase::GetVisitorId( if (instance_type < FIRST_NONSTRING_TYPE) { switch (instance_type & kStringRepresentationMask) { case kSeqStringTag: - if ((instance_type & kStringEncodingMask) == kAsciiStringTag) { - return kVisitSeqAsciiString; + if ((instance_type & kStringEncodingMask) == kOneByteStringTag) { + return kVisitSeqOneByteString; } else { return kVisitSeqTwoByteString; } @@ -128,6 +128,11 @@ StaticVisitorBase::VisitorId StaticVisitorBase::GetVisitorId( kVisitDataObjectGeneric, Foreign::kSize); + case SYMBOL_TYPE: + return GetVisitorIdForSize(kVisitDataObject, + kVisitDataObjectGeneric, + Symbol::kSize); + case FILLER_TYPE: return kVisitDataObjectGeneric; diff --git a/deps/v8/src/objects-visiting.h b/deps/v8/src/objects-visiting.h index 26d1b121d2..9b2422ca22 100644 --- a/deps/v8/src/objects-visiting.h +++ b/deps/v8/src/objects-visiting.h @@ -47,7 +47,7 @@ namespace internal { class StaticVisitorBase : public AllStatic { public: #define VISITOR_ID_LIST(V) \ - V(SeqAsciiString) \ + V(SeqOneByteString) \ V(SeqTwoByteString) \ V(ShortcutCandidate) \ V(ByteArray) \ @@ -221,7 +221,7 @@ class BodyVisitorBase : public AllStatic { template<typename StaticVisitor, typename BodyDescriptor, typename ReturnType> class FlexibleBodyVisitor : public BodyVisitorBase<StaticVisitor> { public: - static inline ReturnType Visit(Map* map, HeapObject* object) { + INLINE(static ReturnType Visit(Map* map, HeapObject* object)) { int object_size = BodyDescriptor::SizeOf(map, object); BodyVisitorBase<StaticVisitor>::IteratePointers( map->GetHeap(), @@ -247,7 +247,7 @@ class FlexibleBodyVisitor : public BodyVisitorBase<StaticVisitor> { template<typename StaticVisitor, typename BodyDescriptor, typename ReturnType> class FixedBodyVisitor : public BodyVisitorBase<StaticVisitor> { public: - static inline ReturnType Visit(Map* map, HeapObject* object) { + INLINE(static ReturnType Visit(Map* map, HeapObject* object)) { BodyVisitorBase<StaticVisitor>::IteratePointers( map->GetHeap(), object, @@ -279,16 +279,16 @@ class StaticNewSpaceVisitor : public StaticVisitorBase { public: static void Initialize(); - static inline int IterateBody(Map* map, HeapObject* obj) { + INLINE(static int IterateBody(Map* map, HeapObject* obj)) { return table_.GetVisitor(map)(map, obj); } - static inline void VisitPointers(Heap* heap, Object** start, Object** end) { + INLINE(static void VisitPointers(Heap* heap, Object** start, Object** end)) { for (Object** p = start; p < end; p++) StaticVisitor::VisitPointer(heap, p); } private: - static inline int VisitJSFunction(Map* map, HeapObject* object) { + INLINE(static int VisitJSFunction(Map* map, HeapObject* object)) { Heap* heap = map->GetHeap(); VisitPointers(heap, HeapObject::RawField(object, JSFunction::kPropertiesOffset), @@ -305,30 +305,30 @@ class StaticNewSpaceVisitor : public StaticVisitorBase { return JSFunction::kSize; } - static inline int VisitByteArray(Map* map, HeapObject* object) { + INLINE(static int VisitByteArray(Map* map, HeapObject* object)) { return reinterpret_cast<ByteArray*>(object)->ByteArraySize(); } - static inline int VisitFixedDoubleArray(Map* map, HeapObject* object) { + INLINE(static int VisitFixedDoubleArray(Map* map, HeapObject* object)) { int length = reinterpret_cast<FixedDoubleArray*>(object)->length(); return FixedDoubleArray::SizeFor(length); } - static inline int VisitJSObject(Map* map, HeapObject* object) { + INLINE(static int VisitJSObject(Map* map, HeapObject* object)) { return JSObjectVisitor::Visit(map, object); } - static inline int VisitSeqAsciiString(Map* map, HeapObject* object) { - return SeqAsciiString::cast(object)-> - SeqAsciiStringSize(map->instance_type()); + INLINE(static int VisitSeqOneByteString(Map* map, HeapObject* object)) { + return SeqOneByteString::cast(object)-> + SeqOneByteStringSize(map->instance_type()); } - static inline int VisitSeqTwoByteString(Map* map, HeapObject* object) { + INLINE(static int VisitSeqTwoByteString(Map* map, HeapObject* object)) { return SeqTwoByteString::cast(object)-> SeqTwoByteStringSize(map->instance_type()); } - static inline int VisitFreeSpace(Map* map, HeapObject* object) { + INLINE(static int VisitFreeSpace(Map* map, HeapObject* object)) { return FreeSpace::cast(object)->Size(); } @@ -339,7 +339,7 @@ class StaticNewSpaceVisitor : public StaticVisitorBase { return object_size; } - static inline int Visit(Map* map, HeapObject* object) { + INLINE(static int Visit(Map* map, HeapObject* object)) { return map->instance_size(); } }; @@ -382,20 +382,18 @@ class StaticMarkingVisitor : public StaticVisitorBase { public: static void Initialize(); - static inline void IterateBody(Map* map, HeapObject* obj) { + INLINE(static void IterateBody(Map* map, HeapObject* obj)) { table_.GetVisitor(map)(map, obj); } - static inline void VisitCodeEntry(Heap* heap, Address entry_address); - static inline void VisitEmbeddedPointer(Heap* heap, RelocInfo* rinfo); - static inline void VisitGlobalPropertyCell(Heap* heap, RelocInfo* rinfo); - static inline void VisitDebugTarget(Heap* heap, RelocInfo* rinfo); - static inline void VisitCodeTarget(Heap* heap, RelocInfo* rinfo); - static inline void VisitExternalReference(RelocInfo* rinfo) { } - static inline void VisitRuntimeEntry(RelocInfo* rinfo) { } - - // TODO(mstarzinger): This should be made protected once refactoring is done. - static inline void VisitNativeContext(Map* map, HeapObject* object); + INLINE(static void VisitCodeEntry(Heap* heap, Address entry_address)); + INLINE(static void VisitEmbeddedPointer(Heap* heap, RelocInfo* rinfo)); + INLINE(static void VisitGlobalPropertyCell(Heap* heap, RelocInfo* rinfo)); + INLINE(static void VisitDebugTarget(Heap* heap, RelocInfo* rinfo)); + INLINE(static void VisitCodeTarget(Heap* heap, RelocInfo* rinfo)); + INLINE(static void VisitCodeAgeSequence(Heap* heap, RelocInfo* rinfo)); + INLINE(static void VisitExternalReference(RelocInfo* rinfo)) { } + INLINE(static void VisitRuntimeEntry(RelocInfo* rinfo)) { } // TODO(mstarzinger): This should be made protected once refactoring is done. // Mark non-optimize code for functions inlined into the given optimized @@ -403,11 +401,12 @@ class StaticMarkingVisitor : public StaticVisitorBase { static void MarkInlinedFunctionsCode(Heap* heap, Code* code); protected: - static inline void VisitMap(Map* map, HeapObject* object); - static inline void VisitCode(Map* map, HeapObject* object); - static inline void VisitSharedFunctionInfo(Map* map, HeapObject* object); - static inline void VisitJSFunction(Map* map, HeapObject* object); - static inline void VisitJSRegExp(Map* map, HeapObject* object); + INLINE(static void VisitMap(Map* map, HeapObject* object)); + INLINE(static void VisitCode(Map* map, HeapObject* object)); + INLINE(static void VisitSharedFunctionInfo(Map* map, HeapObject* object)); + INLINE(static void VisitJSFunction(Map* map, HeapObject* object)); + INLINE(static void VisitJSRegExp(Map* map, HeapObject* object)); + INLINE(static void VisitNativeContext(Map* map, HeapObject* object)); // Mark pointers in a Map and its TransitionArray together, possibly // treating transitions or back pointers weak. @@ -415,8 +414,8 @@ class StaticMarkingVisitor : public StaticVisitorBase { static void MarkTransitionArray(Heap* heap, TransitionArray* transitions); // Code flushing support. - static inline bool IsFlushable(Heap* heap, JSFunction* function); - static inline bool IsFlushable(Heap* heap, SharedFunctionInfo* shared_info); + INLINE(static bool IsFlushable(Heap* heap, JSFunction* function)); + INLINE(static bool IsFlushable(Heap* heap, SharedFunctionInfo* shared_info)); // Helpers used by code flushing support that visit pointer fields and treat // references to code objects either strongly or weakly. @@ -431,11 +430,15 @@ class StaticMarkingVisitor : public StaticVisitorBase { static inline void VisitSpecialized(Map* map, HeapObject* object) { } - static inline void Visit(Map* map, HeapObject* object) { + INLINE(static void Visit(Map* map, HeapObject* object)) { } }; typedef FlexibleBodyVisitor<StaticVisitor, + FixedArray::BodyDescriptor, + void> FixedArrayVisitor; + + typedef FlexibleBodyVisitor<StaticVisitor, JSObject::BodyDescriptor, void> JSObjectVisitor; diff --git a/deps/v8/src/objects.cc b/deps/v8/src/objects.cc index 37f8361d8f..00d00d5ee6 100644 --- a/deps/v8/src/objects.cc +++ b/deps/v8/src/objects.cc @@ -1,4 +1,4 @@ -// Copyright 2012 the V8 project authors. All rights reserved. +// Copyright 2013 the V8 project authors. All rights reserved. // Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions are // met: @@ -27,6 +27,7 @@ #include "v8.h" +#include "accessors.h" #include "api.h" #include "arguments.h" #include "bootstrapper.h" @@ -104,43 +105,31 @@ MaybeObject* Object::ToObject() { } -Object* Object::ToBoolean() { - if (IsTrue()) return this; - if (IsFalse()) return this; - if (IsSmi()) { - return Isolate::Current()->heap()->ToBoolean(Smi::cast(this)->value() != 0); - } - HeapObject* heap_object = HeapObject::cast(this); - if (heap_object->IsUndefined() || heap_object->IsNull()) { - return heap_object->GetHeap()->false_value(); - } - // Undetectable object is false - if (heap_object->IsUndetectableObject()) { - return heap_object->GetHeap()->false_value(); - } - if (heap_object->IsString()) { - return heap_object->GetHeap()->ToBoolean( - String::cast(this)->length() != 0); - } - if (heap_object->IsHeapNumber()) { - return HeapNumber::cast(this)->HeapNumberToBoolean(); - } - return heap_object->GetHeap()->true_value(); +bool Object::BooleanValue() { + if (IsBoolean()) return IsTrue(); + if (IsSmi()) return Smi::cast(this)->value() != 0; + if (IsUndefined() || IsNull()) return false; + if (IsUndetectableObject()) return false; // Undetectable object is false. + if (IsString()) return String::cast(this)->length() != 0; + if (IsHeapNumber()) return HeapNumber::cast(this)->HeapNumberBooleanValue(); + return true; } -void Object::Lookup(String* name, LookupResult* result) { +void Object::Lookup(Name* name, LookupResult* result) { Object* holder = NULL; if (IsJSReceiver()) { holder = this; } else { - Context* native_context = Isolate::Current()->context()->native_context(); + Context* native_context = result->isolate()->context()->native_context(); if (IsNumber()) { holder = native_context->number_function()->instance_prototype(); } else if (IsString()) { holder = native_context->string_function()->instance_prototype(); } else if (IsBoolean()) { holder = native_context->boolean_function()->instance_prototype(); + } else if (IsSymbol()) { + holder = native_context->symbol_delegate(); } else { Isolate::Current()->PushStackTraceAndDie( 0xDEAD0000, this, JSReceiver::cast(this)->map(), 0xDEAD0001); @@ -152,7 +141,7 @@ void Object::Lookup(String* name, LookupResult* result) { MaybeObject* Object::GetPropertyWithReceiver(Object* receiver, - String* name, + Name* name, PropertyAttributes* attributes) { LookupResult result(name->GetIsolate()); Lookup(name, &result); @@ -162,9 +151,149 @@ MaybeObject* Object::GetPropertyWithReceiver(Object* receiver, } +template<typename To> +static inline To* CheckedCast(void *from) { + uintptr_t temp = reinterpret_cast<uintptr_t>(from); + ASSERT(temp % sizeof(To) == 0); + return reinterpret_cast<To*>(temp); +} + + +static MaybeObject* PerformCompare(const BitmaskCompareDescriptor& descriptor, + char* ptr, + Heap* heap) { + uint32_t bitmask = descriptor.bitmask; + uint32_t compare_value = descriptor.compare_value; + uint32_t value; + switch (descriptor.size) { + case 1: + value = static_cast<uint32_t>(*CheckedCast<uint8_t>(ptr)); + compare_value &= 0xff; + bitmask &= 0xff; + break; + case 2: + value = static_cast<uint32_t>(*CheckedCast<uint16_t>(ptr)); + compare_value &= 0xffff; + bitmask &= 0xffff; + break; + case 4: + value = *CheckedCast<uint32_t>(ptr); + break; + default: + UNREACHABLE(); + return NULL; + } + return heap->ToBoolean((bitmask & value) == (bitmask & compare_value)); +} + + +static MaybeObject* PerformCompare(const PointerCompareDescriptor& descriptor, + char* ptr, + Heap* heap) { + uintptr_t compare_value = + reinterpret_cast<uintptr_t>(descriptor.compare_value); + uintptr_t value = *CheckedCast<uintptr_t>(ptr); + return heap->ToBoolean(compare_value == value); +} + + +static MaybeObject* GetPrimitiveValue( + const PrimitiveValueDescriptor& descriptor, + char* ptr, + Heap* heap) { + int32_t int32_value = 0; + switch (descriptor.data_type) { + case kDescriptorInt8Type: + int32_value = *CheckedCast<int8_t>(ptr); + break; + case kDescriptorUint8Type: + int32_value = *CheckedCast<uint8_t>(ptr); + break; + case kDescriptorInt16Type: + int32_value = *CheckedCast<int16_t>(ptr); + break; + case kDescriptorUint16Type: + int32_value = *CheckedCast<uint16_t>(ptr); + break; + case kDescriptorInt32Type: + int32_value = *CheckedCast<int32_t>(ptr); + break; + case kDescriptorUint32Type: { + uint32_t value = *CheckedCast<uint32_t>(ptr); + return heap->NumberFromUint32(value); + } + case kDescriptorBoolType: { + uint8_t byte = *CheckedCast<uint8_t>(ptr); + return heap->ToBoolean(byte & (0x1 << descriptor.bool_offset)); + } + case kDescriptorFloatType: { + float value = *CheckedCast<float>(ptr); + return heap->NumberFromDouble(value); + } + case kDescriptorDoubleType: { + double value = *CheckedCast<double>(ptr); + return heap->NumberFromDouble(value); + } + } + return heap->NumberFromInt32(int32_value); +} + + +static MaybeObject* GetDeclaredAccessorProperty(Object* receiver, + DeclaredAccessorInfo* info, + Isolate* isolate) { + char* current = reinterpret_cast<char*>(receiver); + DeclaredAccessorDescriptorIterator iterator(info->descriptor()); + while (true) { + const DeclaredAccessorDescriptorData* data = iterator.Next(); + switch (data->type) { + case kDescriptorReturnObject: { + ASSERT(iterator.Complete()); + current = *CheckedCast<char*>(current); + return *CheckedCast<Object*>(current); + } + case kDescriptorPointerDereference: + ASSERT(!iterator.Complete()); + current = *reinterpret_cast<char**>(current); + break; + case kDescriptorPointerShift: + ASSERT(!iterator.Complete()); + current += data->pointer_shift_descriptor.byte_offset; + break; + case kDescriptorObjectDereference: { + ASSERT(!iterator.Complete()); + Object* object = CheckedCast<Object>(current); + int field = data->object_dereference_descriptor.internal_field; + Object* smi = JSObject::cast(object)->GetInternalField(field); + ASSERT(smi->IsSmi()); + current = reinterpret_cast<char*>(smi); + break; + } + case kDescriptorBitmaskCompare: + ASSERT(iterator.Complete()); + return PerformCompare(data->bitmask_compare_descriptor, + current, + isolate->heap()); + case kDescriptorPointerCompare: + ASSERT(iterator.Complete()); + return PerformCompare(data->pointer_compare_descriptor, + current, + isolate->heap()); + case kDescriptorPrimitiveValue: + ASSERT(iterator.Complete()); + return GetPrimitiveValue(data->primitive_value_descriptor, + current, + isolate->heap()); + } + } + UNREACHABLE(); + return NULL; +} + + MaybeObject* JSObject::GetPropertyWithCallback(Object* receiver, Object* structure, - String* name) { + Name* name) { Isolate* isolate = name->GetIsolate(); // To accommodate both the old and the new api we switch on the // data structure used to store the callbacks. Eventually foreign @@ -180,10 +309,9 @@ MaybeObject* JSObject::GetPropertyWithCallback(Object* receiver, // api style callbacks. if (structure->IsAccessorInfo()) { - AccessorInfo* data = AccessorInfo::cast(structure); - if (!data->IsCompatibleReceiver(receiver)) { - Handle<Object> name_handle(name); - Handle<Object> receiver_handle(receiver); + if (!AccessorInfo::cast(structure)->IsCompatibleReceiver(receiver)) { + Handle<Object> name_handle(name, isolate); + Handle<Object> receiver_handle(receiver, isolate); Handle<Object> args[2] = { name_handle, receiver_handle }; Handle<Object> error = isolate->factory()->NewTypeError("incompatible_method_receiver", @@ -191,12 +319,21 @@ MaybeObject* JSObject::GetPropertyWithCallback(Object* receiver, ARRAY_SIZE(args))); return isolate->Throw(*error); } + // TODO(rossberg): Handling symbols in the API requires changing the API, + // so we do not support it for now. + if (name->IsSymbol()) return isolate->heap()->undefined_value(); + if (structure->IsDeclaredAccessorInfo()) { + return GetDeclaredAccessorProperty(receiver, + DeclaredAccessorInfo::cast(structure), + isolate); + } + ExecutableAccessorInfo* data = ExecutableAccessorInfo::cast(structure); Object* fun_obj = data->getter(); v8::AccessorGetter call_fun = v8::ToCData<v8::AccessorGetter>(fun_obj); if (call_fun == NULL) return isolate->heap()->undefined_value(); HandleScope scope(isolate); JSObject* self = JSObject::cast(receiver); - Handle<String> key(name); + Handle<String> key(String::cast(name)); LOG(isolate, ApiNamedPropertyAccess("load", self, name)); CustomArguments args(isolate, data->data(), self, this); v8::AccessorInfo info(args.end()); @@ -232,11 +369,11 @@ MaybeObject* JSObject::GetPropertyWithCallback(Object* receiver, MaybeObject* JSProxy::GetPropertyWithHandler(Object* receiver_raw, - String* name_raw) { + Name* name_raw) { Isolate* isolate = GetIsolate(); HandleScope scope(isolate); - Handle<Object> receiver(receiver_raw); - Handle<Object> name(name_raw); + Handle<Object> receiver(receiver_raw, isolate); + Handle<Object> name(name_raw, isolate); Handle<Object> args[] = { receiver, name }; Handle<Object> result = CallTrap( @@ -247,6 +384,19 @@ MaybeObject* JSProxy::GetPropertyWithHandler(Object* receiver_raw, } +Handle<Object> Object::GetProperty(Handle<Object> object, Handle<Name> name) { + // TODO(rossberg): The index test should not be here but in the GetProperty + // method (or somewhere else entirely). Needs more global clean-up. + uint32_t index; + if (name->AsArrayIndex(&index)) + return GetElement(object, index); + Isolate* isolate = object->IsHeapObject() + ? Handle<HeapObject>::cast(object)->GetIsolate() + : Isolate::Current(); + CALL_HEAP_FUNCTION(isolate, object->GetProperty(*name), Object); +} + + Handle<Object> Object::GetElement(Handle<Object> object, uint32_t index) { Isolate* isolate = object->IsHeapObject() ? Handle<HeapObject>::cast(object)->GetIsolate() @@ -285,11 +435,12 @@ bool JSProxy::HasElementWithHandler(uint32_t index) { MaybeObject* Object::GetPropertyWithDefinedGetter(Object* receiver, JSReceiver* getter) { - HandleScope scope; + Isolate* isolate = getter->GetIsolate(); + HandleScope scope(isolate); Handle<JSReceiver> fun(getter); - Handle<Object> self(receiver); + Handle<Object> self(receiver, isolate); #ifdef ENABLE_DEBUGGER_SUPPORT - Debug* debug = fun->GetHeap()->isolate()->debug(); + Debug* debug = isolate->debug(); // Handle stepping into a getter if step into is active. // TODO(rossberg): should this apply to getters that are function proxies? if (debug->StepInActive() && fun->IsJSFunction()) { @@ -311,7 +462,7 @@ MaybeObject* Object::GetPropertyWithDefinedGetter(Object* receiver, MaybeObject* JSObject::GetPropertyWithFailedAccessCheck( Object* receiver, LookupResult* result, - String* name, + Name* name, PropertyAttributes* attributes) { if (result->IsProperty()) { switch (result->type()) { @@ -371,7 +522,7 @@ MaybeObject* JSObject::GetPropertyWithFailedAccessCheck( PropertyAttributes JSObject::GetPropertyAttributeWithFailedAccessCheck( Object* receiver, LookupResult* result, - String* name, + Name* name, bool continue_search) { if (result->IsProperty()) { switch (result->type()) { @@ -457,7 +608,7 @@ Object* JSObject::SetNormalizedProperty(LookupResult* result, Object* value) { Handle<Object> JSObject::SetNormalizedProperty(Handle<JSObject> object, - Handle<String> key, + Handle<Name> key, Handle<Object> value, PropertyDetails details) { CALL_HEAP_FUNCTION(object->GetIsolate(), @@ -466,12 +617,12 @@ Handle<Object> JSObject::SetNormalizedProperty(Handle<JSObject> object, } -MaybeObject* JSObject::SetNormalizedProperty(String* name, +MaybeObject* JSObject::SetNormalizedProperty(Name* name, Object* value, PropertyDetails details) { ASSERT(!HasFastProperties()); int entry = property_dictionary()->FindEntry(name); - if (entry == StringDictionary::kNotFound) { + if (entry == NameDictionary::kNotFound) { Object* store_value = value; if (IsGlobalObject()) { Heap* heap = name->GetHeap(); @@ -484,7 +635,7 @@ MaybeObject* JSObject::SetNormalizedProperty(String* name, property_dictionary()->Add(name, store_value, details); if (!maybe_dict->ToObject(&dict)) return maybe_dict; } - set_properties(StringDictionary::cast(dict)); + set_properties(NameDictionary::cast(dict)); return value; } @@ -515,11 +666,11 @@ MaybeObject* JSObject::SetNormalizedProperty(String* name, } -MaybeObject* JSObject::DeleteNormalizedProperty(String* name, DeleteMode mode) { +MaybeObject* JSObject::DeleteNormalizedProperty(Name* name, DeleteMode mode) { ASSERT(!HasFastProperties()); - StringDictionary* dictionary = property_dictionary(); + NameDictionary* dictionary = property_dictionary(); int entry = dictionary->FindEntry(name); - if (entry != StringDictionary::kNotFound) { + if (entry != NameDictionary::kNotFound) { // If we have a global object set the cell to the hole. if (IsGlobalObject()) { PropertyDetails details = dictionary->DetailsAt(entry); @@ -575,7 +726,7 @@ bool JSObject::IsDirty() { Handle<Object> Object::GetProperty(Handle<Object> object, Handle<Object> receiver, LookupResult* result, - Handle<String> key, + Handle<Name> key, PropertyAttributes* attributes) { Isolate* isolate = object->IsHeapObject() ? Handle<HeapObject>::cast(object)->GetIsolate() @@ -589,12 +740,13 @@ Handle<Object> Object::GetProperty(Handle<Object> object, MaybeObject* Object::GetProperty(Object* receiver, LookupResult* result, - String* name, + Name* name, PropertyAttributes* attributes) { // Make sure that the top context does not change when doing // callbacks or interceptor calls. AssertNoContextChange ncc; - Heap* heap = name->GetHeap(); + Isolate* isolate = name->GetIsolate(); + Heap* heap = isolate->heap(); // Traverse the prototype chain from the current object (this) to // the holder and check for access rights. This avoids traversing the @@ -604,11 +756,13 @@ MaybeObject* Object::GetProperty(Object* receiver, // holder in the prototype chain. // Proxy handlers do not use the proxy's prototype, so we can skip this. if (!result->IsHandler()) { - Object* last = result->IsProperty() + Object* last = result->IsProperty() && !receiver->IsSymbol() ? result->holder() : Object::cast(heap->null_value()); - ASSERT(this != this->GetPrototype()); - for (Object* current = this; true; current = current->GetPrototype()) { + ASSERT(this != this->GetPrototype(isolate)); + for (Object* current = this; + true; + current = current->GetPrototype(isolate)) { if (current->IsAccessCheckNeeded()) { // Check if we're allowed to read from the current object. Note // that even though we may not actually end up loading the named @@ -641,7 +795,8 @@ MaybeObject* Object::GetProperty(Object* receiver, ASSERT(!value->IsTheHole() || result->IsReadOnly()); return value->IsTheHole() ? heap->undefined_value() : value; case FIELD: - value = result->holder()->FastPropertyAt(result->GetFieldIndex()); + value = result->holder()->FastPropertyAt( + result->GetFieldIndex().field_index()); ASSERT(!value->IsTheHole() || result->IsReadOnly()); return value->IsTheHole() ? heap->undefined_value() : value; case CONSTANT_FUNCTION: @@ -665,18 +820,18 @@ MaybeObject* Object::GetProperty(Object* receiver, MaybeObject* Object::GetElementWithReceiver(Object* receiver, uint32_t index) { - Heap* heap = IsSmi() - ? Isolate::Current()->heap() - : HeapObject::cast(this)->GetHeap(); + Isolate* isolate = IsSmi() + ? Isolate::Current() + : HeapObject::cast(this)->GetIsolate(); + Heap* heap = isolate->heap(); Object* holder = this; // Iterate up the prototype chain until an element is found or the null // prototype is encountered. for (holder = this; holder != heap->null_value(); - holder = holder->GetPrototype()) { + holder = holder->GetPrototype(isolate)) { if (!holder->IsJSObject()) { - Isolate* isolate = heap->isolate(); Context* native_context = isolate->context()->native_context(); if (holder->IsNumber()) { holder = native_context->number_function()->instance_prototype(); @@ -684,6 +839,8 @@ MaybeObject* Object::GetElementWithReceiver(Object* receiver, uint32_t index) { holder = native_context->string_function()->instance_prototype(); } else if (holder->IsBoolean()) { holder = native_context->boolean_function()->instance_prototype(); + } else if (holder->IsSymbol()) { + holder = native_context->symbol_delegate(); } else if (holder->IsJSProxy()) { return JSProxy::cast(holder)->GetElementWithHandler(receiver, index); } else { @@ -722,10 +879,9 @@ MaybeObject* Object::GetElementWithReceiver(Object* receiver, uint32_t index) { } -Object* Object::GetPrototype() { +Object* Object::GetPrototype(Isolate* isolate) { if (IsSmi()) { - Heap* heap = Isolate::Current()->heap(); - Context* context = heap->isolate()->context()->native_context(); + Context* context = isolate->context()->native_context(); return context->number_function()->instance_prototype(); } @@ -736,8 +892,7 @@ Object* Object::GetPrototype() { if (heap_object->IsJSReceiver()) { return heap_object->map()->prototype(); } - Heap* heap = heap_object->GetHeap(); - Context* context = heap->isolate()->context()->native_context(); + Context* context = isolate->context()->native_context(); if (heap_object->IsHeapNumber()) { return context->number_function()->instance_prototype(); @@ -748,20 +903,30 @@ Object* Object::GetPrototype() { if (heap_object->IsBoolean()) { return context->boolean_function()->instance_prototype(); } else { - return heap->null_value(); + return isolate->heap()->null_value(); + } +} + + +Object* Object::GetDelegate(Isolate* isolate) { + if (IsSymbol()) { + Heap* heap = Symbol::cast(this)->GetHeap(); + Context* context = heap->isolate()->context()->native_context(); + return context->symbol_delegate(); } + return GetPrototype(isolate); } MaybeObject* Object::GetHash(CreationFlag flag) { - // The object is either a number, a string, an odd-ball, + // The object is either a number, a name, an odd-ball, // a real JS object, or a Harmony proxy. if (IsNumber()) { uint32_t hash = ComputeLongHash(double_to_uint64(Number())); return Smi::FromInt(hash & Smi::kMaxValue); } - if (IsString()) { - uint32_t hash = String::cast(this)->Hash(); + if (IsName()) { + uint32_t hash = Name::cast(this)->Hash(); return Smi::FromInt(hash); } if (IsOddball()) { @@ -780,7 +945,7 @@ MaybeObject* Object::GetHash(CreationFlag flag) { bool Object::SameValue(Object* other) { if (other == this) return true; - // The object is either a number, a string, an odd-ball, + // The object is either a number, a name, an odd-ball, // a real JS object, or a Harmony proxy. if (IsNumber() && other->IsNumber()) { double this_value = Number(); @@ -881,14 +1046,15 @@ MaybeObject* String::SlowTryFlatten(PretenureFlag pretenure) { int len = length(); Object* object; String* result; - if (IsAsciiRepresentation()) { - { MaybeObject* maybe_object = heap->AllocateRawAsciiString(len, tenure); + if (IsOneByteRepresentation()) { + { MaybeObject* maybe_object = + heap->AllocateRawOneByteString(len, tenure); if (!maybe_object->ToObject(&object)) return maybe_object; } result = String::cast(object); String* first = cs->first(); int first_length = first->length(); - char* dest = SeqAsciiString::cast(result)->GetChars(); + uint8_t* dest = SeqOneByteString::cast(result)->GetChars(); WriteToFlat(first, dest, 0, first_length); String* second = cs->second(); WriteToFlat(second, @@ -941,29 +1107,34 @@ bool String::MakeExternal(v8::String::ExternalStringResource* resource) { if (size < ExternalString::kShortSize) { return false; } - bool is_ascii = this->IsAsciiRepresentation(); - bool is_symbol = this->IsSymbol(); + bool is_ascii = this->IsOneByteRepresentation(); + bool is_internalized = this->IsInternalizedString(); // Morph the object to an external string by adjusting the map and // reinitializing the fields. if (size >= ExternalString::kSize) { this->set_map_no_write_barrier( - is_symbol - ? (is_ascii ? heap->external_symbol_with_ascii_data_map() - : heap->external_symbol_map()) - : (is_ascii ? heap->external_string_with_ascii_data_map() - : heap->external_string_map())); + is_internalized + ? (is_ascii + ? heap->external_internalized_string_with_ascii_data_map() + : heap->external_internalized_string_map()) + : (is_ascii + ? heap->external_string_with_ascii_data_map() + : heap->external_string_map())); } else { this->set_map_no_write_barrier( - is_symbol - ? (is_ascii ? heap->short_external_symbol_with_ascii_data_map() - : heap->short_external_symbol_map()) - : (is_ascii ? heap->short_external_string_with_ascii_data_map() - : heap->short_external_string_map())); + is_internalized + ? (is_ascii + ? heap-> + short_external_internalized_string_with_ascii_data_map() + : heap->short_external_internalized_string_map()) + : (is_ascii + ? heap->short_external_string_with_ascii_data_map() + : heap->short_external_string_map())); } ExternalTwoByteString* self = ExternalTwoByteString::cast(this); self->set_resource(resource); - if (is_symbol) self->Hash(); // Force regeneration of the hash value. + if (is_internalized) self->Hash(); // Force regeneration of the hash value. // Fill the remainder of the string with dead wood. int new_size = this->Size(); // Byte size of the external String object. @@ -993,22 +1164,22 @@ bool String::MakeExternal(v8::String::ExternalAsciiStringResource* resource) { if (size < ExternalString::kShortSize) { return false; } - bool is_symbol = this->IsSymbol(); + bool is_internalized = this->IsInternalizedString(); // Morph the object to an external string by adjusting the map and // reinitializing the fields. Use short version if space is limited. if (size >= ExternalString::kSize) { this->set_map_no_write_barrier( - is_symbol ? heap->external_ascii_symbol_map() - : heap->external_ascii_string_map()); + is_internalized ? heap->external_ascii_internalized_string_map() + : heap->external_ascii_string_map()); } else { this->set_map_no_write_barrier( - is_symbol ? heap->short_external_ascii_symbol_map() - : heap->short_external_ascii_string_map()); + is_internalized ? heap->short_external_ascii_internalized_string_map() + : heap->short_external_ascii_string_map()); } ExternalAsciiString* self = ExternalAsciiString::cast(this); self->set_resource(resource); - if (is_symbol) self->Hash(); // Force regeneration of the hash value. + if (is_internalized) self->Hash(); // Force regeneration of the hash value. // Fill the remainder of the string with dead wood. int new_size = this->Size(); // Byte size of the external String object. @@ -1033,7 +1204,8 @@ void String::StringShortPrint(StringStream* accumulator) { return; } - StringInputBuffer buf(this); + ConsStringIteratorOp op; + StringCharacterStream stream(this, &op); bool truncated = false; if (len > kMaxShortPrintLength) { @@ -1042,17 +1214,17 @@ void String::StringShortPrint(StringStream* accumulator) { } bool ascii = true; for (int i = 0; i < len; i++) { - int c = buf.GetNext(); + uint16_t c = stream.GetNext(); if (c < 32 || c >= 127) { ascii = false; } } - buf.Reset(this); + stream.Reset(this); if (ascii) { accumulator->Add("<String[%u]: ", length()); for (int i = 0; i < len; i++) { - accumulator->Put(buf.GetNext()); + accumulator->Put(static_cast<char>(stream.GetNext())); } accumulator->Put('>'); } else { @@ -1060,7 +1232,7 @@ void String::StringShortPrint(StringStream* accumulator) { // characters and that backslashes are therefore escaped. accumulator->Add("<String[%u]\\: ", length()); for (int i = 0; i < len; i++) { - int c = buf.GetNext(); + uint16_t c = stream.GetNext(); if (c == '\n') { accumulator->Add("\\n"); } else if (c == '\r') { @@ -1070,7 +1242,7 @@ void String::StringShortPrint(StringStream* accumulator) { } else if (c < 32 || c > 126) { accumulator->Add("\\x%02x", c); } else { - accumulator->Put(c); + accumulator->Put(static_cast<char>(c)); } } if (truncated) { @@ -1118,6 +1290,10 @@ void JSObject::JSObjectShortPrint(StringStream* accumulator) { } break; } + case JS_MODULE_TYPE: { + accumulator->Add("<JS Module>"); + break; + } // All other JSObjects are rather similar to each other (JSObject, // JSGlobalProxy, JSGlobalObject, JSUndetectableObject, JSValue). default: { @@ -1173,7 +1349,7 @@ void JSObject::PrintElementsTransition( PrintF(file, " -> "); PrintElementsKind(file, to_kind); PrintF(file, "] in "); - JavaScriptFrame::PrintTop(file, false, true); + JavaScriptFrame::PrintTop(GetIsolate(), file, false, true); PrintF(file, " for "); ShortPrint(file); PrintF(file, " from "); @@ -1291,6 +1467,9 @@ void HeapObject::HeapObjectShortPrint(StringStream* accumulator) { accumulator->Add("<Odd Oddball>"); break; } + case SYMBOL_TYPE: + accumulator->Add("<Symbol: %d>", Symbol::cast(this)->Hash()); + break; case HEAP_NUMBER_TYPE: accumulator->Add("<Number: "); HeapNumber::cast(this)->HeapNumberPrint(accumulator); @@ -1340,7 +1519,7 @@ void HeapObject::IterateBody(InstanceType type, int object_size, SlicedString::BodyDescriptor::IterateBody(this, v); break; case kExternalStringTag: - if ((type & kStringEncodingMask) == kAsciiStringTag) { + if ((type & kStringEncodingMask) == kOneByteStringTag) { reinterpret_cast<ExternalAsciiString*>(this)-> ExternalAsciiStringIterateBody(v); } else { @@ -1399,6 +1578,7 @@ void HeapObject::IterateBody(InstanceType type, int object_size, case JS_GLOBAL_PROPERTY_CELL_TYPE: JSGlobalPropertyCell::BodyDescriptor::IterateBody(this, v); break; + case SYMBOL_TYPE: case HEAP_NUMBER_TYPE: case FILLER_TYPE: case BYTE_ARRAY_TYPE: @@ -1431,7 +1611,7 @@ void HeapObject::IterateBody(InstanceType type, int object_size, } -Object* HeapNumber::HeapNumberToBoolean() { +bool HeapNumber::HeapNumberBooleanValue() { // NaN, +0, and -0 should return the false object #if __BYTE_ORDER == __LITTLE_ENDIAN union IeeeDoubleLittleEndianArchType u; @@ -1441,15 +1621,13 @@ Object* HeapNumber::HeapNumberToBoolean() { u.d = value(); if (u.bits.exp == 2047) { // Detect NaN for IEEE double precision floating point. - if ((u.bits.man_low | u.bits.man_high) != 0) - return GetHeap()->false_value(); + if ((u.bits.man_low | u.bits.man_high) != 0) return false; } if (u.bits.exp == 0) { // Detect +0, and -0 for IEEE double precision floating point. - if ((u.bits.man_low | u.bits.man_high) == 0) - return GetHeap()->false_value(); + if ((u.bits.man_low | u.bits.man_high) == 0) return false; } - return GetHeap()->true_value(); + return true; } @@ -1473,14 +1651,14 @@ void HeapNumber::HeapNumberPrint(StringStream* accumulator) { String* JSReceiver::class_name() { if (IsJSFunction() && IsJSFunctionProxy()) { - return GetHeap()->function_class_symbol(); + return GetHeap()->function_class_string(); } if (map()->constructor()->IsJSFunction()) { JSFunction* constructor = JSFunction::cast(map()->constructor()); return String::cast(constructor->shared()->instance_class_name()); } // If the constructor is not present, return "Object". - return GetHeap()->Object_symbol(); + return GetHeap()->Object_string(); } @@ -1496,12 +1674,12 @@ String* JSReceiver::constructor_name() { } // TODO(rossberg): what about proxies? // If the constructor is not present, return "Object". - return GetHeap()->Object_symbol(); + return GetHeap()->Object_string(); } MaybeObject* JSObject::AddFastPropertyUsingMap(Map* new_map, - String* name, + Name* name, Object* value, int field_index) { if (map()->unused_property_fields() == 0) { @@ -1518,15 +1696,18 @@ MaybeObject* JSObject::AddFastPropertyUsingMap(Map* new_map, } -static bool IsIdentifier(UnicodeCache* cache, - unibrow::CharacterStream* buffer) { +static bool IsIdentifier(UnicodeCache* cache, Name* name) { // Checks whether the buffer contains an identifier (no escape). - if (!buffer->has_more()) return false; - if (!cache->IsIdentifierStart(buffer->GetNext())) { + if (!name->IsString()) return false; + String* string = String::cast(name); + if (string->length() == 0) return false; + ConsStringIteratorOp op; + StringCharacterStream stream(string, &op); + if (!cache->IsIdentifierStart(stream.GetNext())) { return false; } - while (buffer->has_more()) { - if (!cache->IsIdentifierPart(buffer->GetNext())) { + while (stream.HasMore()) { + if (!cache->IsIdentifierPart(stream.GetNext())) { return false; } } @@ -1534,7 +1715,7 @@ static bool IsIdentifier(UnicodeCache* cache, } -MaybeObject* JSObject::AddFastProperty(String* name, +MaybeObject* JSObject::AddFastProperty(Name* name, Object* value, PropertyAttributes attributes, StoreFromKeyed store_mode) { @@ -1543,13 +1724,12 @@ MaybeObject* JSObject::AddFastProperty(String* name, map()->instance_descriptors()->Search( name, map()->NumberOfOwnDescriptors())); - // Normalize the object if the name is an actual string (not the - // hidden symbols) and is not a real identifier. + // Normalize the object if the name is an actual name (not the + // hidden strings) and is not a real identifier. // Normalize the object if it will have too many fast properties. Isolate* isolate = GetHeap()->isolate(); - StringInputBuffer buffer(name); - if ((!IsIdentifier(isolate->unicode_cache(), &buffer) - && name != isolate->heap()->hidden_symbol()) || + if ((!IsIdentifier(isolate->unicode_cache(), name) + && name != isolate->heap()->hidden_string()) || (map()->unused_property_fields() == 0 && TooManyFastProperties(properties()->length(), store_mode))) { Object* obj; @@ -1579,10 +1759,7 @@ MaybeObject* JSObject::AddFastProperty(String* name, if (!maybe_values->To(&values)) return maybe_values; } - // Only allow map transition if the object isn't the global object. - TransitionFlag flag = isolate->empty_object_map() != map() - ? INSERT_TRANSITION - : OMIT_TRANSITION; + TransitionFlag flag = INSERT_TRANSITION; Map* new_map; MaybeObject* maybe_new_map = map()->CopyAddDescriptor(&new_field, flag); @@ -1602,21 +1779,17 @@ MaybeObject* JSObject::AddFastProperty(String* name, MaybeObject* JSObject::AddConstantFunctionProperty( - String* name, + Name* name, JSFunction* function, PropertyAttributes attributes) { // Allocate new instance descriptors with (name, function) added ConstantFunctionDescriptor d(name, function, attributes, 0); - Heap* heap = GetHeap(); TransitionFlag flag = - // Do not add transitions to the empty object map (map of "new Object()"), - // nor to global objects. - (map() == heap->isolate()->empty_object_map() || IsGlobalObject() || + // Do not add transitions to global objects. + (IsGlobalObject() || // Don't add transitions to special properties with non-trivial // attributes. - // TODO(verwaest): Once we support attribute changes, these transitions - // should be kept as well. attributes != NONE) ? OMIT_TRANSITION : INSERT_TRANSITION; @@ -1631,16 +1804,16 @@ MaybeObject* JSObject::AddConstantFunctionProperty( // Add property in slow mode -MaybeObject* JSObject::AddSlowProperty(String* name, +MaybeObject* JSObject::AddSlowProperty(Name* name, Object* value, PropertyAttributes attributes) { ASSERT(!HasFastProperties()); - StringDictionary* dict = property_dictionary(); + NameDictionary* dict = property_dictionary(); Object* store_value = value; if (IsGlobalObject()) { // In case name is an orphaned property reuse the cell. int entry = dict->FindEntry(name); - if (entry != StringDictionary::kNotFound) { + if (entry != NameDictionary::kNotFound) { store_value = dict->ValueAt(entry); JSGlobalPropertyCell::cast(store_value)->set_value(value); // Assign an enumeration index to the property and update @@ -1663,12 +1836,12 @@ MaybeObject* JSObject::AddSlowProperty(String* name, { MaybeObject* maybe_result = dict->Add(name, store_value, details); if (!maybe_result->ToObject(&result)) return maybe_result; } - if (dict != result) set_properties(StringDictionary::cast(result)); + if (dict != result) set_properties(NameDictionary::cast(result)); return value; } -MaybeObject* JSObject::AddProperty(String* name, +MaybeObject* JSObject::AddProperty(Name* name, Object* value, PropertyAttributes attributes, StrictModeFlag strict_mode, @@ -1677,44 +1850,93 @@ MaybeObject* JSObject::AddProperty(String* name, ASSERT(!IsJSGlobalProxy()); Map* map_of_this = map(); Heap* heap = GetHeap(); + Isolate* isolate = heap->isolate(); + MaybeObject* result; if (extensibility_check == PERFORM_EXTENSIBILITY_CHECK && !map_of_this->is_extensible()) { if (strict_mode == kNonStrictMode) { return value; } else { - Handle<Object> args[1] = {Handle<String>(name)}; - return heap->isolate()->Throw( + Handle<Object> args[1] = {Handle<Name>(name)}; + return isolate->Throw( *FACTORY->NewTypeError("object_not_extensible", HandleVector(args, 1))); } } + if (HasFastProperties()) { // Ensure the descriptor array does not get too big. if (map_of_this->NumberOfOwnDescriptors() < DescriptorArray::kMaxNumberOfDescriptors) { if (value->IsJSFunction()) { - return AddConstantFunctionProperty(name, - JSFunction::cast(value), - attributes); + result = AddConstantFunctionProperty(name, + JSFunction::cast(value), + attributes); } else { - return AddFastProperty(name, value, attributes, store_mode); + result = AddFastProperty(name, value, attributes, store_mode); } } else { // Normalize the object to prevent very large instance descriptors. // This eliminates unwanted N^2 allocation and lookup behavior. Object* obj; - { MaybeObject* maybe_obj = - NormalizeProperties(CLEAR_INOBJECT_PROPERTIES, 0); - if (!maybe_obj->ToObject(&obj)) return maybe_obj; - } + MaybeObject* maybe = NormalizeProperties(CLEAR_INOBJECT_PROPERTIES, 0); + if (!maybe->To(&obj)) return maybe; + result = AddSlowProperty(name, value, attributes); } + } else { + result = AddSlowProperty(name, value, attributes); + } + + Handle<Object> hresult; + if (!result->ToHandle(&hresult, isolate)) return result; + + if (FLAG_harmony_observation && map()->is_observed()) { + EnqueueChangeRecord(handle(this, isolate), + "new", + handle(name, isolate), + handle(heap->the_hole_value(), isolate)); + } + + return *hresult; +} + + +void JSObject::EnqueueChangeRecord(Handle<JSObject> object, + const char* type_str, + Handle<Name> name, + Handle<Object> old_value) { + Isolate* isolate = object->GetIsolate(); + HandleScope scope(isolate); + Handle<String> type = isolate->factory()->InternalizeUtf8String(type_str); + if (object->IsJSGlobalObject()) { + object = handle(JSGlobalObject::cast(*object)->global_receiver(), isolate); } - return AddSlowProperty(name, value, attributes); + Handle<Object> args[] = { type, object, name, old_value }; + bool threw; + Execution::Call(Handle<JSFunction>(isolate->observers_notify_change()), + isolate->factory()->undefined_value(), + old_value->IsTheHole() ? 3 : 4, args, + &threw); + ASSERT(!threw); +} + + +void JSObject::DeliverChangeRecords(Isolate* isolate) { + ASSERT(isolate->observer_delivery_pending()); + bool threw = false; + Execution::Call( + isolate->observers_deliver_changes(), + isolate->factory()->undefined_value(), + 0, + NULL, + &threw); + ASSERT(!threw); + isolate->set_observer_delivery_pending(false); } MaybeObject* JSObject::SetPropertyPostInterceptor( - String* name, + Name* name, Object* value, PropertyAttributes attributes, StrictModeFlag strict_mode, @@ -1739,10 +1961,10 @@ MaybeObject* JSObject::SetPropertyPostInterceptor( } -MaybeObject* JSObject::ReplaceSlowProperty(String* name, +MaybeObject* JSObject::ReplaceSlowProperty(Name* name, Object* value, PropertyAttributes attributes) { - StringDictionary* dictionary = property_dictionary(); + NameDictionary* dictionary = property_dictionary(); int old_index = dictionary->FindEntry(name); int new_enumeration_index = 0; // 0 means "Use the next available index." if (old_index != -1) { @@ -1757,7 +1979,7 @@ MaybeObject* JSObject::ReplaceSlowProperty(String* name, MaybeObject* JSObject::ConvertTransitionToMapTransition( int transition_index, - String* name, + Name* name, Object* new_value, PropertyAttributes attributes) { Map* old_map = map(); @@ -1770,10 +1992,8 @@ MaybeObject* JSObject::ConvertTransitionToMapTransition( if (!HasFastProperties()) return result; - // This method should only be used to convert existing transitions. Objects - // with the map of "new Object()" cannot have transitions in the first place. + // This method should only be used to convert existing transitions. Map* new_map = map(); - ASSERT(new_map != GetIsolate()->empty_object_map()); // TODO(verwaest): From here on we lose existing map transitions, causing // invalid back pointers. This will change once we can store multiple @@ -1805,7 +2025,7 @@ MaybeObject* JSObject::ConvertTransitionToMapTransition( } -MaybeObject* JSObject::ConvertDescriptorToField(String* name, +MaybeObject* JSObject::ConvertDescriptorToField(Name* name, Object* new_value, PropertyAttributes attributes) { if (map()->unused_property_fields() == 0 && @@ -1848,14 +2068,16 @@ MaybeObject* JSObject::ConvertDescriptorToField(String* name, MaybeObject* JSObject::SetPropertyWithInterceptor( - String* name, + Name* name, Object* value, PropertyAttributes attributes, StrictModeFlag strict_mode) { + // TODO(rossberg): Support symbols in the API. + if (name->IsSymbol()) return value; Isolate* isolate = GetIsolate(); HandleScope scope(isolate); Handle<JSObject> this_handle(this); - Handle<String> name_handle(name); + Handle<String> name_handle(String::cast(name)); Handle<Object> value_handle(value, isolate); Handle<InterceptorInfo> interceptor(GetNamedInterceptor()); if (!interceptor->setter()->IsUndefined()) { @@ -1891,7 +2113,7 @@ MaybeObject* JSObject::SetPropertyWithInterceptor( Handle<Object> JSReceiver::SetProperty(Handle<JSReceiver> object, - Handle<String> key, + Handle<Name> key, Handle<Object> value, PropertyAttributes attributes, StrictModeFlag strict_mode) { @@ -1901,13 +2123,13 @@ Handle<Object> JSReceiver::SetProperty(Handle<JSReceiver> object, } -MaybeObject* JSReceiver::SetProperty(String* name, +MaybeObject* JSReceiver::SetProperty(Name* name, Object* value, PropertyAttributes attributes, StrictModeFlag strict_mode, JSReceiver::StoreFromKeyed store_mode) { LookupResult result(GetIsolate()); - LocalLookup(name, &result); + LocalLookup(name, &result, true); if (!result.IsFound()) { map()->LookupTransition(JSObject::cast(this), name, &result); } @@ -1916,7 +2138,7 @@ MaybeObject* JSReceiver::SetProperty(String* name, MaybeObject* JSObject::SetPropertyWithCallback(Object* structure, - String* name, + Name* name, Object* value, JSObject* holder, StrictModeFlag strict_mode) { @@ -1941,12 +2163,12 @@ MaybeObject* JSObject::SetPropertyWithCallback(Object* structure, return *value_handle; } - if (structure->IsAccessorInfo()) { + if (structure->IsExecutableAccessorInfo()) { // api style callbacks - AccessorInfo* data = AccessorInfo::cast(structure); + ExecutableAccessorInfo* data = ExecutableAccessorInfo::cast(structure); if (!data->IsCompatibleReceiver(this)) { - Handle<Object> name_handle(name); - Handle<Object> receiver_handle(this); + Handle<Object> name_handle(name, isolate); + Handle<Object> receiver_handle(this, isolate); Handle<Object> args[2] = { name_handle, receiver_handle }; Handle<Object> error = isolate->factory()->NewTypeError("incompatible_method_receiver", @@ -1954,10 +2176,12 @@ MaybeObject* JSObject::SetPropertyWithCallback(Object* structure, ARRAY_SIZE(args))); return isolate->Throw(*error); } + // TODO(rossberg): Support symbols in the API. + if (name->IsSymbol()) return value; Object* call_obj = data->setter(); v8::AccessorSetter call_fun = v8::ToCData<v8::AccessorSetter>(call_obj); if (call_fun == NULL) return value; - Handle<String> key(name); + Handle<String> key(String::cast(name)); LOG(isolate, ApiNamedPropertyAccess("store", this, name)); CustomArguments args(isolate, data->data(), this, JSObject::cast(holder)); v8::AccessorInfo info(args.end()); @@ -1981,7 +2205,7 @@ MaybeObject* JSObject::SetPropertyWithCallback(Object* structure, if (strict_mode == kNonStrictMode) { return value; } - Handle<String> key(name); + Handle<Name> key(name); Handle<Object> holder_handle(holder, isolate); Handle<Object> args[2] = { key, holder_handle }; return isolate->Throw( @@ -1990,6 +2214,11 @@ MaybeObject* JSObject::SetPropertyWithCallback(Object* structure, } } + // TODO(dcarney): Handle correctly. + if (structure->IsDeclaredAccessorInfo()) { + return value; + } + UNREACHABLE(); return NULL; } @@ -2027,10 +2256,10 @@ MaybeObject* JSObject::SetElementWithCallbackSetterInPrototypes( Heap* heap = GetHeap(); for (Object* pt = GetPrototype(); pt != heap->null_value(); - pt = pt->GetPrototype()) { + pt = pt->GetPrototype(GetIsolate())) { if (pt->IsJSProxy()) { String* name; - MaybeObject* maybe = GetHeap()->Uint32ToString(index); + MaybeObject* maybe = heap->Uint32ToString(index); if (!maybe->To<String>(&name)) { *found = true; // Force abort return maybe; @@ -2061,7 +2290,7 @@ MaybeObject* JSObject::SetElementWithCallbackSetterInPrototypes( } MaybeObject* JSObject::SetPropertyViaPrototypes( - String* name, + Name* name, Object* value, PropertyAttributes attributes, StrictModeFlag strict_mode, @@ -2110,7 +2339,8 @@ MaybeObject* JSObject::SetPropertyViaPrototypes( if (!FLAG_es5_readonly) *done = false; if (*done) { if (strict_mode == kNonStrictMode) return value; - Handle<Object> args[] = { Handle<Object>(name), Handle<Object>(this)}; + Handle<Object> args[] = { Handle<Object>(name, isolate), + Handle<Object>(this, isolate)}; return isolate->Throw(*isolate->factory()->NewTypeError( "strict_read_only_property", HandleVector(args, ARRAY_SIZE(args)))); } @@ -2193,15 +2423,17 @@ void Map::AppendCallbackDescriptors(Handle<Map> map, ASSERT(array->NumberOfSlackDescriptors() >= nof_callbacks); - // Ensure the keys are symbols before writing them into the instance - // descriptor. Since it may cause a GC, it has to be done before we + // Ensure the keys are unique names before writing them into the + // instance descriptor. Since it may cause a GC, it has to be done before we // temporarily put the heap in an invalid state while appending descriptors. for (int i = 0; i < nof_callbacks; ++i) { Handle<AccessorInfo> entry(AccessorInfo::cast(callbacks.get(i))); - Handle<String> key = - isolate->factory()->SymbolFromString( - Handle<String>(String::cast(entry->name()))); - entry->set_name(*key); + if (!entry->name()->IsUniqueName()) { + Handle<String> key = + isolate->factory()->InternalizedStringFromString( + Handle<String>(String::cast(entry->name()))); + entry->set_name(*key); + } } int nof = map->NumberOfOwnDescriptors(); @@ -2211,7 +2443,7 @@ void Map::AppendCallbackDescriptors(Handle<Map> map, // precedence over previously added callbacks with that name. for (int i = nof_callbacks - 1; i >= 0; i--) { AccessorInfo* entry = AccessorInfo::cast(callbacks.get(i)); - String* key = String::cast(entry->name()); + Name* key = Name::cast(entry->name()); // Check if a descriptor with this name already exists before writing. if (array->Search(key, nof) == DescriptorArray::kNotFound) { CallbacksDescriptor desc(key, entry, entry->property_attributes()); @@ -2344,10 +2576,8 @@ MaybeObject* JSObject::GetElementsTransitionMapSlow(ElementsKind to_kind) { } bool allow_store_transition = - // Only remember the map transition if the object's map is NOT equal to - // the global object_function's map and there is not an already existing + // Only remember the map transition if there is not an already existing // non-matching element transition. - (GetIsolate()->empty_object_map() != map()) && !start_map->IsUndefined() && !start_map->is_shared() && IsFastElementsKind(from_kind); @@ -2372,8 +2602,7 @@ MaybeObject* JSObject::GetElementsTransitionMapSlow(ElementsKind to_kind) { } -void JSObject::LocalLookupRealNamedProperty(String* name, - LookupResult* result) { +void JSObject::LocalLookupRealNamedProperty(Name* name, LookupResult* result) { if (IsJSGlobalProxy()) { Object* proto = GetPrototype(); if (proto->IsNull()) return result->NotFound(); @@ -2393,14 +2622,14 @@ void JSObject::LocalLookupRealNamedProperty(String* name, // occur as fields. if (result->IsField() && result->IsReadOnly() && - FastPropertyAt(result->GetFieldIndex())->IsTheHole()) { + FastPropertyAt(result->GetFieldIndex().field_index())->IsTheHole()) { result->DisallowCaching(); } return; } int entry = property_dictionary()->FindEntry(name); - if (entry != StringDictionary::kNotFound) { + if (entry != NameDictionary::kNotFound) { Object* value = property_dictionary()->ValueAt(entry); if (IsGlobalObject()) { PropertyDetails d = property_dictionary()->DetailsAt(entry); @@ -2421,7 +2650,7 @@ void JSObject::LocalLookupRealNamedProperty(String* name, } -void JSObject::LookupRealNamedProperty(String* name, LookupResult* result) { +void JSObject::LookupRealNamedProperty(Name* name, LookupResult* result) { LocalLookupRealNamedProperty(name, result); if (result->IsFound()) return; @@ -2429,12 +2658,13 @@ void JSObject::LookupRealNamedProperty(String* name, LookupResult* result) { } -void JSObject::LookupRealNamedPropertyInPrototypes(String* name, +void JSObject::LookupRealNamedPropertyInPrototypes(Name* name, LookupResult* result) { - Heap* heap = GetHeap(); + Isolate* isolate = GetIsolate(); + Heap* heap = isolate->heap(); for (Object* pt = GetPrototype(); pt != heap->null_value(); - pt = pt->GetPrototype()) { + pt = pt->GetPrototype(isolate)) { if (pt->IsJSProxy()) { return result->HandlerResult(JSProxy::cast(pt)); } @@ -2449,7 +2679,7 @@ void JSObject::LookupRealNamedPropertyInPrototypes(String* name, // We only need to deal with CALLBACKS and INTERCEPTORS MaybeObject* JSObject::SetPropertyWithFailedAccessCheck( LookupResult* result, - String* name, + Name* name, Object* value, bool check_prototype, StrictModeFlag strict_mode) { @@ -2497,14 +2727,14 @@ MaybeObject* JSObject::SetPropertyWithFailedAccessCheck( Isolate* isolate = GetIsolate(); HandleScope scope(isolate); - Handle<Object> value_handle(value); + Handle<Object> value_handle(value, isolate); isolate->ReportFailedAccessCheck(this, v8::ACCESS_SET); return *value_handle; } MaybeObject* JSReceiver::SetProperty(LookupResult* result, - String* key, + Name* key, Object* value, PropertyAttributes attributes, StrictModeFlag strict_mode, @@ -2519,32 +2749,32 @@ MaybeObject* JSReceiver::SetProperty(LookupResult* result, } -bool JSProxy::HasPropertyWithHandler(String* name_raw) { +bool JSProxy::HasPropertyWithHandler(Name* name_raw) { Isolate* isolate = GetIsolate(); HandleScope scope(isolate); - Handle<Object> receiver(this); - Handle<Object> name(name_raw); + Handle<Object> receiver(this, isolate); + Handle<Object> name(name_raw, isolate); Handle<Object> args[] = { name }; Handle<Object> result = CallTrap( "has", isolate->derived_has_trap(), ARRAY_SIZE(args), args); if (isolate->has_pending_exception()) return false; - return result->ToBoolean()->IsTrue(); + return result->BooleanValue(); } MUST_USE_RESULT MaybeObject* JSProxy::SetPropertyWithHandler( JSReceiver* receiver_raw, - String* name_raw, + Name* name_raw, Object* value_raw, PropertyAttributes attributes, StrictModeFlag strict_mode) { Isolate* isolate = GetIsolate(); HandleScope scope(isolate); Handle<JSReceiver> receiver(receiver_raw); - Handle<Object> name(name_raw); - Handle<Object> value(value_raw); + Handle<Object> name(name_raw, isolate); + Handle<Object> value(value_raw, isolate); Handle<Object> args[] = { receiver, name, value }; CallTrap("set", isolate->derived_set_trap(), ARRAY_SIZE(args), args); @@ -2556,7 +2786,7 @@ MUST_USE_RESULT MaybeObject* JSProxy::SetPropertyWithHandler( MUST_USE_RESULT MaybeObject* JSProxy::SetPropertyViaPrototypesWithHandler( JSReceiver* receiver_raw, - String* name_raw, + Name* name_raw, Object* value_raw, PropertyAttributes attributes, StrictModeFlag strict_mode, @@ -2564,9 +2794,9 @@ MUST_USE_RESULT MaybeObject* JSProxy::SetPropertyViaPrototypesWithHandler( Isolate* isolate = GetIsolate(); Handle<JSProxy> proxy(this); Handle<JSReceiver> receiver(receiver_raw); - Handle<String> name(name_raw); - Handle<Object> value(value_raw); - Handle<Object> handler(this->handler()); // Trap might morph proxy. + Handle<Name> name(name_raw); + Handle<Object> value(value_raw, isolate); + Handle<Object> handler(this->handler(), isolate); // Trap might morph proxy. *done = true; // except where redefined... Handle<Object> args[] = { name }; @@ -2589,14 +2819,16 @@ MUST_USE_RESULT MaybeObject* JSProxy::SetPropertyViaPrototypesWithHandler( // [[GetProperty]] requires to check that all properties are configurable. Handle<String> configurable_name = - isolate->factory()->LookupAsciiSymbol("configurable_"); + isolate->factory()->InternalizeOneByteString( + STATIC_ASCII_VECTOR("configurable_")); Handle<Object> configurable( - v8::internal::GetProperty(desc, configurable_name)); + v8::internal::GetProperty(isolate, desc, configurable_name)); ASSERT(!isolate->has_pending_exception()); ASSERT(configurable->IsTrue() || configurable->IsFalse()); if (configurable->IsFalse()) { Handle<String> trap = - isolate->factory()->LookupAsciiSymbol("getPropertyDescriptor"); + isolate->factory()->InternalizeOneByteString( + STATIC_ASCII_VECTOR("getPropertyDescriptor")); Handle<Object> args[] = { handler, trap, name }; Handle<Object> error = isolate->factory()->NewTypeError( "proxy_prop_not_configurable", HandleVector(args, ARRAY_SIZE(args))); @@ -2606,14 +2838,18 @@ MUST_USE_RESULT MaybeObject* JSProxy::SetPropertyViaPrototypesWithHandler( // Check for DataDescriptor. Handle<String> hasWritable_name = - isolate->factory()->LookupAsciiSymbol("hasWritable_"); - Handle<Object> hasWritable(v8::internal::GetProperty(desc, hasWritable_name)); + isolate->factory()->InternalizeOneByteString( + STATIC_ASCII_VECTOR("hasWritable_")); + Handle<Object> hasWritable( + v8::internal::GetProperty(isolate, desc, hasWritable_name)); ASSERT(!isolate->has_pending_exception()); ASSERT(hasWritable->IsTrue() || hasWritable->IsFalse()); if (hasWritable->IsTrue()) { Handle<String> writable_name = - isolate->factory()->LookupAsciiSymbol("writable_"); - Handle<Object> writable(v8::internal::GetProperty(desc, writable_name)); + isolate->factory()->InternalizeOneByteString( + STATIC_ASCII_VECTOR("writable_")); + Handle<Object> writable( + v8::internal::GetProperty(isolate, desc, writable_name)); ASSERT(!isolate->has_pending_exception()); ASSERT(writable->IsTrue() || writable->IsFalse()); *done = writable->IsFalse(); @@ -2626,8 +2862,9 @@ MUST_USE_RESULT MaybeObject* JSProxy::SetPropertyViaPrototypesWithHandler( } // We have an AccessorDescriptor. - Handle<String> set_name = isolate->factory()->LookupAsciiSymbol("set_"); - Handle<Object> setter(v8::internal::GetProperty(desc, set_name)); + Handle<String> set_name = isolate->factory()->InternalizeOneByteString( + STATIC_ASCII_VECTOR("set_")); + Handle<Object> setter(v8::internal::GetProperty(isolate, desc, set_name)); ASSERT(!isolate->has_pending_exception()); if (!setter->IsUndefined()) { // TODO(rossberg): nicer would be to cast to some JSCallable here... @@ -2644,28 +2881,29 @@ MUST_USE_RESULT MaybeObject* JSProxy::SetPropertyViaPrototypesWithHandler( MUST_USE_RESULT MaybeObject* JSProxy::DeletePropertyWithHandler( - String* name_raw, DeleteMode mode) { + Name* name_raw, DeleteMode mode) { Isolate* isolate = GetIsolate(); HandleScope scope(isolate); Handle<JSProxy> receiver(this); - Handle<Object> name(name_raw); + Handle<Object> name(name_raw, isolate); Handle<Object> args[] = { name }; Handle<Object> result = CallTrap( "delete", Handle<Object>(), ARRAY_SIZE(args), args); if (isolate->has_pending_exception()) return Failure::Exception(); - Object* bool_result = result->ToBoolean(); - if (mode == STRICT_DELETION && bool_result == GetHeap()->false_value()) { - Handle<Object> handler(receiver->handler()); - Handle<String> trap_name = isolate->factory()->LookupAsciiSymbol("delete"); + bool result_bool = result->BooleanValue(); + if (mode == STRICT_DELETION && !result_bool) { + Handle<Object> handler(receiver->handler(), isolate); + Handle<String> trap_name = isolate->factory()->InternalizeOneByteString( + STATIC_ASCII_VECTOR("delete")); Handle<Object> args[] = { handler, trap_name }; Handle<Object> error = isolate->factory()->NewTypeError( "handler_failed", HandleVector(args, ARRAY_SIZE(args))); isolate->Throw(*error); return Failure::Exception(); } - return bool_result; + return isolate->heap()->ToBoolean(result_bool); } @@ -2681,13 +2919,13 @@ MUST_USE_RESULT MaybeObject* JSProxy::DeleteElementWithHandler( MUST_USE_RESULT PropertyAttributes JSProxy::GetPropertyAttributeWithHandler( JSReceiver* receiver_raw, - String* name_raw) { + Name* name_raw) { Isolate* isolate = GetIsolate(); HandleScope scope(isolate); Handle<JSProxy> proxy(this); - Handle<Object> handler(this->handler()); // Trap might morph proxy. + Handle<Object> handler(this->handler(), isolate); // Trap might morph proxy. Handle<JSReceiver> receiver(receiver_raw); - Handle<Object> name(name_raw); + Handle<Object> name(name_raw, isolate); Handle<Object> args[] = { name }; Handle<Object> result = CallTrap( @@ -2704,19 +2942,22 @@ MUST_USE_RESULT PropertyAttributes JSProxy::GetPropertyAttributeWithHandler( if (has_pending_exception) return NONE; // Convert result to PropertyAttributes. - Handle<String> enum_n = isolate->factory()->LookupAsciiSymbol("enumerable"); - Handle<Object> enumerable(v8::internal::GetProperty(desc, enum_n)); + Handle<String> enum_n = isolate->factory()->InternalizeOneByteString( + STATIC_ASCII_VECTOR("enumerable")); + Handle<Object> enumerable(v8::internal::GetProperty(isolate, desc, enum_n)); if (isolate->has_pending_exception()) return NONE; - Handle<String> conf_n = isolate->factory()->LookupAsciiSymbol("configurable"); - Handle<Object> configurable(v8::internal::GetProperty(desc, conf_n)); + Handle<String> conf_n = isolate->factory()->InternalizeOneByteString( + STATIC_ASCII_VECTOR("configurable")); + Handle<Object> configurable(v8::internal::GetProperty(isolate, desc, conf_n)); if (isolate->has_pending_exception()) return NONE; - Handle<String> writ_n = isolate->factory()->LookupAsciiSymbol("writable"); - Handle<Object> writable(v8::internal::GetProperty(desc, writ_n)); + Handle<String> writ_n = isolate->factory()->InternalizeOneByteString( + STATIC_ASCII_VECTOR("writable")); + Handle<Object> writable(v8::internal::GetProperty(isolate, desc, writ_n)); if (isolate->has_pending_exception()) return NONE; if (configurable->IsFalse()) { - Handle<String> trap = - isolate->factory()->LookupAsciiSymbol("getPropertyDescriptor"); + Handle<String> trap = isolate->factory()->InternalizeOneByteString( + STATIC_ASCII_VECTOR("getPropertyDescriptor")); Handle<Object> args[] = { handler, trap, name }; Handle<Object> error = isolate->factory()->NewTypeError( "proxy_prop_not_configurable", HandleVector(args, ARRAY_SIZE(args))); @@ -2725,20 +2966,22 @@ MUST_USE_RESULT PropertyAttributes JSProxy::GetPropertyAttributeWithHandler( } int attributes = NONE; - if (enumerable->ToBoolean()->IsFalse()) attributes |= DONT_ENUM; - if (configurable->ToBoolean()->IsFalse()) attributes |= DONT_DELETE; - if (writable->ToBoolean()->IsFalse()) attributes |= READ_ONLY; + if (!enumerable->BooleanValue()) attributes |= DONT_ENUM; + if (!configurable->BooleanValue()) attributes |= DONT_DELETE; + if (!writable->BooleanValue()) attributes |= READ_ONLY; return static_cast<PropertyAttributes>(attributes); } MUST_USE_RESULT PropertyAttributes JSProxy::GetElementAttributeWithHandler( - JSReceiver* receiver, + JSReceiver* receiver_raw, uint32_t index) { Isolate* isolate = GetIsolate(); HandleScope scope(isolate); + Handle<JSProxy> proxy(this); + Handle<JSReceiver> receiver(receiver_raw); Handle<String> name = isolate->factory()->Uint32ToString(index); - return GetPropertyAttributeWithHandler(receiver, *name); + return proxy->GetPropertyAttributeWithHandler(*receiver, *name); } @@ -2772,10 +3015,10 @@ MUST_USE_RESULT Handle<Object> JSProxy::CallTrap(const char* name, int argc, Handle<Object> argv[]) { Isolate* isolate = GetIsolate(); - Handle<Object> handler(this->handler()); + Handle<Object> handler(this->handler(), isolate); - Handle<String> trap_name = isolate->factory()->LookupAsciiSymbol(name); - Handle<Object> trap(v8::internal::GetProperty(handler, trap_name)); + Handle<String> trap_name = isolate->factory()->InternalizeUtf8String(name); + Handle<Object> trap(v8::internal::GetProperty(isolate, handler, trap_name)); if (isolate->has_pending_exception()) return trap; if (trap->IsUndefined()) { @@ -2802,34 +3045,37 @@ void JSObject::AddFastPropertyUsingMap(Handle<JSObject> object, } -MaybeObject* JSObject::SetPropertyForResult(LookupResult* result, - String* name_raw, +MaybeObject* JSObject::SetPropertyForResult(LookupResult* lookup, + Name* name_raw, Object* value_raw, PropertyAttributes attributes, StrictModeFlag strict_mode, StoreFromKeyed store_mode) { Heap* heap = GetHeap(); + Isolate* isolate = heap->isolate(); // Make sure that the top context does not change when doing callbacks or // interceptor calls. AssertNoContextChange ncc; // Optimization for 2-byte strings often used as keys in a decompression - // dictionary. We make these short keys into symbols to avoid constantly + // dictionary. We internalize these short keys to avoid constantly // reallocating them. - if (!name_raw->IsSymbol() && name_raw->length() <= 2) { - Object* symbol_version; - { MaybeObject* maybe_symbol_version = heap->LookupSymbol(name_raw); - if (maybe_symbol_version->ToObject(&symbol_version)) { - name_raw = String::cast(symbol_version); + if (name_raw->IsString() && !name_raw->IsInternalizedString() && + String::cast(name_raw)->length() <= 2) { + Object* internalized_version; + { MaybeObject* maybe_string_version = + heap->InternalizeString(String::cast(name_raw)); + if (maybe_string_version->ToObject(&internalized_version)) { + name_raw = String::cast(internalized_version); } } } // Check access rights if needed. if (IsAccessCheckNeeded()) { - if (!heap->isolate()->MayNamedAccess(this, name_raw, v8::ACCESS_SET)) { + if (!isolate->MayNamedAccess(this, name_raw, v8::ACCESS_SET)) { return SetPropertyWithFailedAccessCheck( - result, name_raw, value_raw, true, strict_mode); + lookup, name_raw, value_raw, true, strict_mode); } } @@ -2838,66 +3084,78 @@ MaybeObject* JSObject::SetPropertyForResult(LookupResult* result, if (proto->IsNull()) return value_raw; ASSERT(proto->IsJSGlobalObject()); return JSObject::cast(proto)->SetPropertyForResult( - result, name_raw, value_raw, attributes, strict_mode, store_mode); + lookup, name_raw, value_raw, attributes, strict_mode, store_mode); } + ASSERT(!lookup->IsFound() || lookup->holder() == this || + lookup->holder()->map()->is_hidden_prototype()); + // From this point on everything needs to be handlified, because // SetPropertyViaPrototypes might call back into JavaScript. - HandleScope scope(GetIsolate()); + HandleScope scope(isolate); Handle<JSObject> self(this); - Handle<String> name(name_raw); - Handle<Object> value(value_raw); + Handle<Name> name(name_raw); + Handle<Object> value(value_raw, isolate); - if (!result->IsProperty() && !self->IsJSContextExtensionObject()) { + if (!lookup->IsProperty() && !self->IsJSContextExtensionObject()) { bool done = false; MaybeObject* result_object = self->SetPropertyViaPrototypes( *name, *value, attributes, strict_mode, &done); if (done) return result_object; } - if (!result->IsFound()) { + if (!lookup->IsFound()) { // Neither properties nor transitions found. return self->AddProperty( *name, *value, attributes, strict_mode, store_mode); } - if (result->IsProperty() && result->IsReadOnly()) { + + if (lookup->IsProperty() && lookup->IsReadOnly()) { if (strict_mode == kStrictMode) { Handle<Object> args[] = { name, self }; - return heap->isolate()->Throw(*heap->isolate()->factory()->NewTypeError( + return isolate->Throw(*isolate->factory()->NewTypeError( "strict_read_only_property", HandleVector(args, ARRAY_SIZE(args)))); } else { return *value; } } + Handle<Object> old_value(heap->the_hole_value(), isolate); + if (FLAG_harmony_observation && + map()->is_observed() && lookup->IsDataProperty()) { + old_value = Object::GetProperty(self, name); + } + // This is a real property that is not read-only, or it is a // transition or null descriptor and there are no setters in the prototypes. - switch (result->type()) { + MaybeObject* result = *value; + switch (lookup->type()) { case NORMAL: - return self->SetNormalizedProperty(result, *value); + result = lookup->holder()->SetNormalizedProperty(lookup, *value); + break; case FIELD: - return self->FastPropertyAtPut(result->GetFieldIndex(), *value); + result = lookup->holder()->FastPropertyAtPut( + lookup->GetFieldIndex().field_index(), *value); + break; case CONSTANT_FUNCTION: // Only replace the function if necessary. - if (*value == result->GetConstantFunction()) return *value; + if (*value == lookup->GetConstantFunction()) return *value; // Preserve the attributes of this existing property. - attributes = result->GetAttributes(); - return self->ConvertDescriptorToField(*name, *value, attributes); + attributes = lookup->GetAttributes(); + result = + lookup->holder()->ConvertDescriptorToField(*name, *value, attributes); + break; case CALLBACKS: { - Object* callback_object = result->GetCallbackObject(); - return self->SetPropertyWithCallback(callback_object, - *name, - *value, - result->holder(), - strict_mode); + Object* callback_object = lookup->GetCallbackObject(); + return self->SetPropertyWithCallback( + callback_object, *name, *value, lookup->holder(), strict_mode); } case INTERCEPTOR: - return self->SetPropertyWithInterceptor(*name, - *value, - attributes, - strict_mode); + result = lookup->holder()->SetPropertyWithInterceptor( + *name, *value, attributes, strict_mode); + break; case TRANSITION: { - Map* transition_map = result->GetTransitionTarget(); + Map* transition_map = lookup->GetTransitionTarget(); int descriptor = transition_map->LastAdded(); DescriptorArray* descriptors = transition_map->instance_descriptors(); @@ -2906,37 +3164,55 @@ MaybeObject* JSObject::SetPropertyForResult(LookupResult* result, if (details.type() == FIELD) { if (attributes == details.attributes()) { int field_index = descriptors->GetFieldIndex(descriptor); - return self->AddFastPropertyUsingMap(transition_map, - *name, - *value, - field_index); + result = lookup->holder()->AddFastPropertyUsingMap( + transition_map, *name, *value, field_index); + } else { + result = lookup->holder()->ConvertDescriptorToField( + *name, *value, attributes); } - return self->ConvertDescriptorToField(*name, *value, attributes); } else if (details.type() == CALLBACKS) { - return ConvertDescriptorToField(*name, *value, attributes); - } - - ASSERT(details.type() == CONSTANT_FUNCTION); - - Object* constant_function = descriptors->GetValue(descriptor); - // If the same constant function is being added we can simply - // transition to the target map. - if (constant_function == *value) { - self->set_map(transition_map); - return constant_function; + result = lookup->holder()->ConvertDescriptorToField( + *name, *value, attributes); + } else { + ASSERT(details.type() == CONSTANT_FUNCTION); + + Object* constant_function = descriptors->GetValue(descriptor); + if (constant_function == *value) { + // If the same constant function is being added we can simply + // transition to the target map. + lookup->holder()->set_map(transition_map); + result = constant_function; + } else { + // Otherwise, replace with a map transition to a new map with a FIELD, + // even if the value is a constant function. + result = lookup->holder()->ConvertTransitionToMapTransition( + lookup->GetTransitionIndex(), *name, *value, attributes); + } } - // Otherwise, replace with a map transition to a new map with a FIELD, - // even if the value is a constant function. - return ConvertTransitionToMapTransition( - result->GetTransitionIndex(), *name, *value, attributes); + break; } case HANDLER: case NONEXISTENT: UNREACHABLE(); - return *value; } - UNREACHABLE(); // keep the compiler happy - return *value; + + Handle<Object> hresult; + if (!result->ToHandle(&hresult, isolate)) return result; + + if (FLAG_harmony_observation && map()->is_observed()) { + if (lookup->IsTransition()) { + EnqueueChangeRecord(self, "new", name, old_value); + } else { + LookupResult new_lookup(isolate); + self->LocalLookup(*name, &new_lookup, true); + if (new_lookup.IsDataProperty() && + !Object::GetProperty(self, name)->SameValue(*old_value)) { + EnqueueChangeRecord(self, "updated", name, old_value); + } + } + } + + return *hresult; } @@ -2951,7 +3227,7 @@ MaybeObject* JSObject::SetPropertyForResult(LookupResult* result, // doesn't handle function prototypes correctly. Handle<Object> JSObject::SetLocalPropertyIgnoreAttributes( Handle<JSObject> object, - Handle<String> key, + Handle<Name> key, Handle<Object> value, PropertyAttributes attributes) { CALL_HEAP_FUNCTION( @@ -2962,22 +3238,22 @@ Handle<Object> JSObject::SetLocalPropertyIgnoreAttributes( MaybeObject* JSObject::SetLocalPropertyIgnoreAttributes( - String* name, - Object* value, + Name* name_raw, + Object* value_raw, PropertyAttributes attributes) { // Make sure that the top context does not change when doing callbacks or // interceptor calls. AssertNoContextChange ncc; Isolate* isolate = GetIsolate(); - LookupResult result(isolate); - LocalLookup(name, &result); - if (!result.IsFound()) map()->LookupTransition(this, name, &result); + LookupResult lookup(isolate); + LocalLookup(name_raw, &lookup, true); + if (!lookup.IsFound()) map()->LookupTransition(this, name_raw, &lookup); // Check access rights if needed. if (IsAccessCheckNeeded()) { - if (!isolate->MayNamedAccess(this, name, v8::ACCESS_SET)) { - return SetPropertyWithFailedAccessCheck(&result, - name, - value, + if (!isolate->MayNamedAccess(this, name_raw, v8::ACCESS_SET)) { + return SetPropertyWithFailedAccessCheck(&lookup, + name_raw, + value_raw, false, kNonStrictMode); } @@ -2985,40 +3261,61 @@ MaybeObject* JSObject::SetLocalPropertyIgnoreAttributes( if (IsJSGlobalProxy()) { Object* proto = GetPrototype(); - if (proto->IsNull()) return value; + if (proto->IsNull()) return value_raw; ASSERT(proto->IsJSGlobalObject()); return JSObject::cast(proto)->SetLocalPropertyIgnoreAttributes( - name, - value, + name_raw, + value_raw, attributes); } // Check for accessor in prototype chain removed here in clone. - if (!result.IsFound()) { + if (!lookup.IsFound()) { // Neither properties nor transitions found. - return AddProperty(name, value, attributes, kNonStrictMode); + return AddProperty(name_raw, value_raw, attributes, kNonStrictMode); + } + + // From this point on everything needs to be handlified. + HandleScope scope(isolate); + Handle<JSObject> self(this); + Handle<Name> name(name_raw); + Handle<Object> value(value_raw, isolate); + + Handle<Object> old_value(isolate->heap()->the_hole_value(), isolate); + PropertyAttributes old_attributes = ABSENT; + bool is_observed = FLAG_harmony_observation && self->map()->is_observed(); + if (is_observed) { + if (lookup.IsDataProperty()) old_value = Object::GetProperty(self, name); + old_attributes = lookup.GetAttributes(); } // Check of IsReadOnly removed from here in clone. - switch (result.type()) { + MaybeObject* result = *value; + switch (lookup.type()) { case NORMAL: { PropertyDetails details = PropertyDetails(attributes, NORMAL); - return SetNormalizedProperty(name, value, details); + result = self->SetNormalizedProperty(*name, *value, details); + break; } case FIELD: - return FastPropertyAtPut(result.GetFieldIndex(), value); + result = self->FastPropertyAtPut( + lookup.GetFieldIndex().field_index(), *value); + break; case CONSTANT_FUNCTION: // Only replace the function if necessary. - if (value == result.GetConstantFunction()) return value; - // Preserve the attributes of this existing property. - attributes = result.GetAttributes(); - return ConvertDescriptorToField(name, value, attributes); + if (*value != lookup.GetConstantFunction()) { + // Preserve the attributes of this existing property. + attributes = lookup.GetAttributes(); + result = self->ConvertDescriptorToField(*name, *value, attributes); + } + break; case CALLBACKS: case INTERCEPTOR: // Override callback in clone - return ConvertDescriptorToField(name, value, attributes); + result = self->ConvertDescriptorToField(*name, *value, attributes); + break; case TRANSITION: { - Map* transition_map = result.GetTransitionTarget(); + Map* transition_map = lookup.GetTransitionTarget(); int descriptor = transition_map->LastAdded(); DescriptorArray* descriptors = transition_map->instance_descriptors(); @@ -3027,35 +3324,57 @@ MaybeObject* JSObject::SetLocalPropertyIgnoreAttributes( if (details.type() == FIELD) { if (attributes == details.attributes()) { int field_index = descriptors->GetFieldIndex(descriptor); - return AddFastPropertyUsingMap(transition_map, - name, - value, - field_index); + result = self->AddFastPropertyUsingMap( + transition_map, *name, *value, field_index); + } else { + result = self->ConvertDescriptorToField(*name, *value, attributes); } - return ConvertDescriptorToField(name, value, attributes); } else if (details.type() == CALLBACKS) { - return ConvertDescriptorToField(name, value, attributes); - } - - ASSERT(details.type() == CONSTANT_FUNCTION); + result = self->ConvertDescriptorToField(*name, *value, attributes); + } else { + ASSERT(details.type() == CONSTANT_FUNCTION); - // Replace transition to CONSTANT FUNCTION with a map transition to a new - // map with a FIELD, even if the value is a function. - return ConvertTransitionToMapTransition( - result.GetTransitionIndex(), name, value, attributes); + // Replace transition to CONSTANT FUNCTION with a map transition to a + // new map with a FIELD, even if the value is a function. + result = self->ConvertTransitionToMapTransition( + lookup.GetTransitionIndex(), *name, *value, attributes); + } + break; } case HANDLER: case NONEXISTENT: UNREACHABLE(); } - UNREACHABLE(); // keep the compiler happy - return value; + + Handle<Object> hresult; + if (!result->ToHandle(&hresult, isolate)) return result; + + if (is_observed) { + if (lookup.IsTransition()) { + EnqueueChangeRecord(self, "new", name, old_value); + } else if (old_value->IsTheHole()) { + EnqueueChangeRecord(self, "reconfigured", name, old_value); + } else { + LookupResult new_lookup(isolate); + self->LocalLookup(*name, &new_lookup, true); + bool value_changed = new_lookup.IsDataProperty() && + !old_value->SameValue(*Object::GetProperty(self, name)); + if (new_lookup.GetAttributes() != old_attributes) { + if (!value_changed) old_value = isolate->factory()->the_hole_value(); + EnqueueChangeRecord(self, "reconfigured", name, old_value); + } else if (value_changed) { + EnqueueChangeRecord(self, "updated", name, old_value); + } + } + } + + return *hresult; } PropertyAttributes JSObject::GetPropertyAttributePostInterceptor( JSObject* receiver, - String* name, + Name* name, bool continue_search) { // Check local property, ignore interceptor. LookupResult result(GetIsolate()); @@ -3076,8 +3395,11 @@ PropertyAttributes JSObject::GetPropertyAttributePostInterceptor( PropertyAttributes JSObject::GetPropertyAttributeWithInterceptor( JSObject* receiver, - String* name, + Name* name, bool continue_search) { + // TODO(rossberg): Support symbols in the API. + if (name->IsSymbol()) return ABSENT; + Isolate* isolate = GetIsolate(); // Make sure that the top context does not change when doing @@ -3088,7 +3410,7 @@ PropertyAttributes JSObject::GetPropertyAttributeWithInterceptor( Handle<InterceptorInfo> interceptor(GetNamedInterceptor()); Handle<JSObject> receiver_handle(receiver); Handle<JSObject> holder_handle(this); - Handle<String> name_handle(name); + Handle<String> name_handle(String::cast(name)); CustomArguments args(isolate, interceptor->data(), receiver, this); v8::AccessorInfo info(args.end()); if (!interceptor->query()->IsUndefined()) { @@ -3127,45 +3449,46 @@ PropertyAttributes JSObject::GetPropertyAttributeWithInterceptor( PropertyAttributes JSReceiver::GetPropertyAttributeWithReceiver( JSReceiver* receiver, - String* key) { + Name* key) { uint32_t index = 0; if (IsJSObject() && key->AsArrayIndex(&index)) { - return JSObject::cast(this)->HasElementWithReceiver(receiver, index) - ? NONE : ABSENT; + return JSObject::cast(this)->GetElementAttributeWithReceiver( + receiver, index, true); } // Named property. - LookupResult result(GetIsolate()); - Lookup(key, &result); - return GetPropertyAttribute(receiver, &result, key, true); + LookupResult lookup(GetIsolate()); + Lookup(key, &lookup); + return GetPropertyAttributeForResult(receiver, &lookup, key, true); } -PropertyAttributes JSReceiver::GetPropertyAttribute(JSReceiver* receiver, - LookupResult* result, - String* name, - bool continue_search) { +PropertyAttributes JSReceiver::GetPropertyAttributeForResult( + JSReceiver* receiver, + LookupResult* lookup, + Name* name, + bool continue_search) { // Check access rights if needed. if (IsAccessCheckNeeded()) { JSObject* this_obj = JSObject::cast(this); Heap* heap = GetHeap(); if (!heap->isolate()->MayNamedAccess(this_obj, name, v8::ACCESS_HAS)) { return this_obj->GetPropertyAttributeWithFailedAccessCheck( - receiver, result, name, continue_search); + receiver, lookup, name, continue_search); } } - if (result->IsFound()) { - switch (result->type()) { + if (lookup->IsFound()) { + switch (lookup->type()) { case NORMAL: // fall through case FIELD: case CONSTANT_FUNCTION: case CALLBACKS: - return result->GetAttributes(); + return lookup->GetAttributes(); case HANDLER: { - return JSProxy::cast(result->proxy())->GetPropertyAttributeWithHandler( + return JSProxy::cast(lookup->proxy())->GetPropertyAttributeWithHandler( receiver, name); } case INTERCEPTOR: - return result->holder()->GetPropertyAttributeWithInterceptor( + return lookup->holder()->GetPropertyAttributeWithInterceptor( JSObject::cast(receiver), name, continue_search); case TRANSITION: case NONEXISTENT: @@ -3176,17 +3499,114 @@ PropertyAttributes JSReceiver::GetPropertyAttribute(JSReceiver* receiver, } -PropertyAttributes JSReceiver::GetLocalPropertyAttribute(String* name) { +PropertyAttributes JSReceiver::GetLocalPropertyAttribute(Name* name) { // Check whether the name is an array index. uint32_t index = 0; if (IsJSObject() && name->AsArrayIndex(&index)) { - if (JSObject::cast(this)->HasLocalElement(index)) return NONE; - return ABSENT; + return GetLocalElementAttribute(index); } // Named property. - LookupResult result(GetIsolate()); - LocalLookup(name, &result); - return GetPropertyAttribute(this, &result, name, false); + LookupResult lookup(GetIsolate()); + LocalLookup(name, &lookup, true); + return GetPropertyAttributeForResult(this, &lookup, name, false); +} + + +PropertyAttributes JSObject::GetElementAttributeWithReceiver( + JSReceiver* receiver, uint32_t index, bool continue_search) { + Isolate* isolate = GetIsolate(); + + // Check access rights if needed. + if (IsAccessCheckNeeded()) { + if (!isolate->MayIndexedAccess(this, index, v8::ACCESS_HAS)) { + isolate->ReportFailedAccessCheck(this, v8::ACCESS_HAS); + return ABSENT; + } + } + + if (IsJSGlobalProxy()) { + Object* proto = GetPrototype(); + if (proto->IsNull()) return ABSENT; + ASSERT(proto->IsJSGlobalObject()); + return JSObject::cast(proto)->GetElementAttributeWithReceiver( + receiver, index, continue_search); + } + + // Check for lookup interceptor except when bootstrapping. + if (HasIndexedInterceptor() && !isolate->bootstrapper()->IsActive()) { + return GetElementAttributeWithInterceptor(receiver, index, continue_search); + } + + return GetElementAttributeWithoutInterceptor( + receiver, index, continue_search); +} + + +PropertyAttributes JSObject::GetElementAttributeWithInterceptor( + JSReceiver* receiver, uint32_t index, bool continue_search) { + Isolate* isolate = GetIsolate(); + // Make sure that the top context does not change when doing + // callbacks or interceptor calls. + AssertNoContextChange ncc; + HandleScope scope(isolate); + Handle<InterceptorInfo> interceptor(GetIndexedInterceptor()); + Handle<JSReceiver> hreceiver(receiver); + Handle<JSObject> holder(this); + CustomArguments args(isolate, interceptor->data(), receiver, this); + v8::AccessorInfo info(args.end()); + if (!interceptor->query()->IsUndefined()) { + v8::IndexedPropertyQuery query = + v8::ToCData<v8::IndexedPropertyQuery>(interceptor->query()); + LOG(isolate, + ApiIndexedPropertyAccess("interceptor-indexed-has", this, index)); + v8::Handle<v8::Integer> result; + { + // Leaving JavaScript. + VMState state(isolate, EXTERNAL); + result = query(index, info); + } + if (!result.IsEmpty()) + return static_cast<PropertyAttributes>(result->Int32Value()); + } else if (!interceptor->getter()->IsUndefined()) { + v8::IndexedPropertyGetter getter = + v8::ToCData<v8::IndexedPropertyGetter>(interceptor->getter()); + LOG(isolate, + ApiIndexedPropertyAccess("interceptor-indexed-get-has", this, index)); + v8::Handle<v8::Value> result; + { + // Leaving JavaScript. + VMState state(isolate, EXTERNAL); + result = getter(index, info); + } + if (!result.IsEmpty()) return NONE; + } + + return holder->GetElementAttributeWithoutInterceptor( + *hreceiver, index, continue_search); +} + + +PropertyAttributes JSObject::GetElementAttributeWithoutInterceptor( + JSReceiver* receiver, uint32_t index, bool continue_search) { + PropertyAttributes attr = GetElementsAccessor()->GetAttributes( + receiver, this, index); + if (attr != ABSENT) return attr; + + // Handle [] on String objects. + if (IsStringObjectWithCharacterAt(index)) { + return static_cast<PropertyAttributes>(READ_ONLY | DONT_DELETE); + } + + if (!continue_search) return ABSENT; + + Object* pt = GetPrototype(); + if (pt->IsJSProxy()) { + // We need to follow the spec and simulate a call to [[GetOwnProperty]]. + return JSProxy::cast(pt)->GetElementAttributeWithHandler(receiver, index); + } + if (pt->IsNull()) return ABSENT; + return JSObject::cast(pt)->GetElementAttributeWithReceiver( + receiver, index, true); } @@ -3215,7 +3635,9 @@ MaybeObject* NormalizedMapCache::Get(JSObject* obj, ASSERT(memcmp(Map::cast(fresh)->address(), Map::cast(result)->address(), Map::kCodeCacheOffset) == 0); - int offset = Map::kCodeCacheOffset + kPointerSize; + STATIC_ASSERT(Map::kDependentCodeOffset == + Map::kCodeCacheOffset + kPointerSize); + int offset = Map::kDependentCodeOffset + kPointerSize; ASSERT(memcmp(Map::cast(fresh)->address() + offset, Map::cast(result)->address() + offset, Map::kSize - offset) == 0); @@ -3246,7 +3668,7 @@ void NormalizedMapCache::Clear() { void JSObject::UpdateMapCodeCache(Handle<JSObject> object, - Handle<String> name, + Handle<Name> name, Handle<Code> code) { Isolate* isolate = object->GetIsolate(); CALL_HEAP_FUNCTION_VOID(isolate, @@ -3254,7 +3676,7 @@ void JSObject::UpdateMapCodeCache(Handle<JSObject> object, } -MaybeObject* JSObject::UpdateMapCodeCache(String* name, Code* code) { +MaybeObject* JSObject::UpdateMapCodeCache(Name* name, Code* code) { if (map()->is_shared()) { // Fast case maps are never marked as shared. ASSERT(!HasFastProperties()); @@ -3300,8 +3722,9 @@ MaybeObject* JSObject::NormalizeProperties(PropertyNormalizationMode mode, } else { property_count += 2; // Make space for two more properties. } - StringDictionary* dictionary; - MaybeObject* maybe_dictionary = StringDictionary::Allocate(property_count); + NameDictionary* dictionary; + MaybeObject* maybe_dictionary = + NameDictionary::Allocate(GetHeap(), property_count); if (!maybe_dictionary->To(&dictionary)) return maybe_dictionary; DescriptorArray* descs = map_of_this->instance_descriptors(); @@ -3374,6 +3797,7 @@ MaybeObject* JSObject::NormalizeProperties(PropertyNormalizationMode mode, } set_map(new_map); + map_of_this->NotifyLeafMapLayoutChange(); set_properties(dictionary); @@ -3438,7 +3862,8 @@ MaybeObject* JSObject::NormalizeElements() { GetElementsCapacityAndUsage(&old_capacity, &used_elements); SeededNumberDictionary* dictionary = NULL; { Object* object; - MaybeObject* maybe = SeededNumberDictionary::Allocate(used_elements); + MaybeObject* maybe = + SeededNumberDictionary::Allocate(GetHeap(), used_elements); if (!maybe->ToObject(&object)) return maybe; dictionary = SeededNumberDictionary::cast(object); } @@ -3521,7 +3946,7 @@ Smi* JSReceiver::GenerateIdentityHash() { MaybeObject* JSObject::SetIdentityHash(Smi* hash, CreationFlag flag) { - MaybeObject* maybe = SetHiddenProperty(GetHeap()->identity_hash_symbol(), + MaybeObject* maybe = SetHiddenProperty(GetHeap()->identity_hash_string(), hash); if (maybe->IsFailure()) return maybe; return this; @@ -3537,14 +3962,14 @@ int JSObject::GetIdentityHash(Handle<JSObject> obj) { MaybeObject* JSObject::GetIdentityHash(CreationFlag flag) { - Object* stored_value = GetHiddenProperty(GetHeap()->identity_hash_symbol()); + Object* stored_value = GetHiddenProperty(GetHeap()->identity_hash_string()); if (stored_value->IsSmi()) return stored_value; // Do not generate permanent identity hash code if not requested. if (flag == OMIT_CREATION) return GetHeap()->undefined_value(); Smi* hash = GenerateIdentityHash(); - MaybeObject* result = SetHiddenProperty(GetHeap()->identity_hash_symbol(), + MaybeObject* result = SetHiddenProperty(GetHeap()->identity_hash_string(), hash); if (result->IsFailure()) return result; if (result->ToObjectUnchecked()->IsUndefined()) { @@ -3565,8 +3990,8 @@ MaybeObject* JSProxy::GetIdentityHash(CreationFlag flag) { } -Object* JSObject::GetHiddenProperty(String* key) { - ASSERT(key->IsSymbol()); +Object* JSObject::GetHiddenProperty(Name* key) { + ASSERT(key->IsUniqueName()); if (IsJSGlobalProxy()) { // For a proxy, use the prototype as target object. Object* proxy_parent = GetPrototype(); @@ -3582,7 +4007,7 @@ Object* JSObject::GetHiddenProperty(String* key) { if (inline_value->IsSmi()) { // Handle inline-stored identity hash. - if (key == GetHeap()->identity_hash_symbol()) { + if (key == GetHeap()->identity_hash_string()) { return inline_value; } else { return GetHeap()->undefined_value(); @@ -3599,7 +4024,7 @@ Object* JSObject::GetHiddenProperty(String* key) { Handle<Object> JSObject::SetHiddenProperty(Handle<JSObject> obj, - Handle<String> key, + Handle<Name> key, Handle<Object> value) { CALL_HEAP_FUNCTION(obj->GetIsolate(), obj->SetHiddenProperty(*key, *value), @@ -3607,8 +4032,8 @@ Handle<Object> JSObject::SetHiddenProperty(Handle<JSObject> obj, } -MaybeObject* JSObject::SetHiddenProperty(String* key, Object* value) { - ASSERT(key->IsSymbol()); +MaybeObject* JSObject::SetHiddenProperty(Name* key, Object* value) { + ASSERT(key->IsUniqueName()); if (IsJSGlobalProxy()) { // For a proxy, use the prototype as target object. Object* proxy_parent = GetPrototype(); @@ -3624,7 +4049,7 @@ MaybeObject* JSObject::SetHiddenProperty(String* key, Object* value) { // If there is no backing store yet, store the identity hash inline. if (value->IsSmi() && - key == GetHeap()->identity_hash_symbol() && + key == GetHeap()->identity_hash_string() && (inline_value->IsUndefined() || inline_value->IsSmi())) { return SetHiddenPropertiesHashTable(value); } @@ -3648,8 +4073,8 @@ MaybeObject* JSObject::SetHiddenProperty(String* key, Object* value) { } -void JSObject::DeleteHiddenProperty(String* key) { - ASSERT(key->IsSymbol()); +void JSObject::DeleteHiddenProperty(Name* key) { + ASSERT(key->IsUniqueName()); if (IsJSGlobalProxy()) { // For a proxy, use the prototype as target object. Object* proxy_parent = GetPrototype(); @@ -3665,7 +4090,7 @@ void JSObject::DeleteHiddenProperty(String* key) { Object* inline_value = hidden_lookup->ToObjectUnchecked(); // We never delete (inline-stored) identity hashes. - ASSERT(key != GetHeap()->identity_hash_symbol()); + ASSERT(key != GetHeap()->identity_hash_string()); if (inline_value->IsUndefined() || inline_value->IsSmi()) return; ObjectHashTable* hashtable = ObjectHashTable::cast(inline_value); @@ -3677,7 +4102,7 @@ void JSObject::DeleteHiddenProperty(String* key) { bool JSObject::HasHiddenProperties() { return GetPropertyAttributePostInterceptor(this, - GetHeap()->hidden_symbol(), + GetHeap()->hidden_string(), false) != ABSENT; } @@ -3688,13 +4113,13 @@ MaybeObject* JSObject::GetHiddenPropertiesHashTable( Object* inline_value; if (HasFastProperties()) { // If the object has fast properties, check whether the first slot - // in the descriptor array matches the hidden symbol. Since the - // hidden symbols hash code is zero (and no other string has hash + // in the descriptor array matches the hidden string. Since the + // hidden strings hash code is zero (and no other name has hash // code zero) it will always occupy the first entry if present. DescriptorArray* descriptors = this->map()->instance_descriptors(); if (descriptors->number_of_descriptors() > 0) { int sorted_index = descriptors->GetSortedKeyIndex(0); - if (descriptors->GetKey(sorted_index) == GetHeap()->hidden_symbol() && + if (descriptors->GetKey(sorted_index) == GetHeap()->hidden_string() && sorted_index < map()->NumberOfOwnDescriptors()) { ASSERT(descriptors->GetType(sorted_index) == FIELD); inline_value = @@ -3707,12 +4132,12 @@ MaybeObject* JSObject::GetHiddenPropertiesHashTable( } } else { PropertyAttributes attributes; - // You can't install a getter on a property indexed by the hidden symbol, + // You can't install a getter on a property indexed by the hidden string, // so we can be sure that GetLocalPropertyPostInterceptor returns a real // object. inline_value = GetLocalPropertyPostInterceptor(this, - GetHeap()->hidden_symbol(), + GetHeap()->hidden_string(), &attributes)->ToObjectUnchecked(); } @@ -3724,7 +4149,8 @@ MaybeObject* JSObject::GetHiddenPropertiesHashTable( ObjectHashTable* hashtable; static const int kInitialCapacity = 4; MaybeObject* maybe_obj = - ObjectHashTable::Allocate(kInitialCapacity, + ObjectHashTable::Allocate(GetHeap(), + kInitialCapacity, ObjectHashTable::USE_CUSTOM_MINIMUM_CAPACITY); if (!maybe_obj->To<ObjectHashTable>(&hashtable)) return maybe_obj; @@ -3732,7 +4158,7 @@ MaybeObject* JSObject::GetHiddenPropertiesHashTable( // We were storing the identity hash inline and now allocated an actual // dictionary. Put the identity hash into the new dictionary. MaybeObject* insert_result = - hashtable->Put(GetHeap()->identity_hash_symbol(), inline_value); + hashtable->Put(GetHeap()->identity_hash_string(), inline_value); ObjectHashTable* new_table; if (!insert_result->To(&new_table)) return insert_result; // We expect no resizing for the first insert. @@ -3740,7 +4166,7 @@ MaybeObject* JSObject::GetHiddenPropertiesHashTable( } MaybeObject* store_result = - SetPropertyPostInterceptor(GetHeap()->hidden_symbol(), + SetPropertyPostInterceptor(GetHeap()->hidden_string(), hashtable, DONT_ENUM, kNonStrictMode, @@ -3757,13 +4183,13 @@ MaybeObject* JSObject::SetHiddenPropertiesHashTable(Object* value) { ASSERT(HasHiddenProperties() != value->IsSmi()); if (HasFastProperties()) { // If the object has fast properties, check whether the first slot - // in the descriptor array matches the hidden symbol. Since the - // hidden symbols hash code is zero (and no other string has hash + // in the descriptor array matches the hidden string. Since the + // hidden strings hash code is zero (and no other name has hash // code zero) it will always occupy the first entry if present. DescriptorArray* descriptors = this->map()->instance_descriptors(); if (descriptors->number_of_descriptors() > 0) { int sorted_index = descriptors->GetSortedKeyIndex(0); - if (descriptors->GetKey(sorted_index) == GetHeap()->hidden_symbol() && + if (descriptors->GetKey(sorted_index) == GetHeap()->hidden_string() && sorted_index < map()->NumberOfOwnDescriptors()) { ASSERT(descriptors->GetType(sorted_index) == FIELD); this->FastPropertyAtPut(descriptors->GetFieldIndex(sorted_index), @@ -3773,7 +4199,7 @@ MaybeObject* JSObject::SetHiddenPropertiesHashTable(Object* value) { } } MaybeObject* store_result = - SetPropertyPostInterceptor(GetHeap()->hidden_symbol(), + SetPropertyPostInterceptor(GetHeap()->hidden_string(), value, DONT_ENUM, kNonStrictMode, @@ -3783,7 +4209,7 @@ MaybeObject* JSObject::SetHiddenPropertiesHashTable(Object* value) { } -MaybeObject* JSObject::DeletePropertyPostInterceptor(String* name, +MaybeObject* JSObject::DeletePropertyPostInterceptor(Name* name, DeleteMode mode) { // Check local property, ignore interceptor. LookupResult result(GetIsolate()); @@ -3800,11 +4226,14 @@ MaybeObject* JSObject::DeletePropertyPostInterceptor(String* name, } -MaybeObject* JSObject::DeletePropertyWithInterceptor(String* name) { +MaybeObject* JSObject::DeletePropertyWithInterceptor(Name* name) { + // TODO(rossberg): Support symbols in the API. + if (name->IsSymbol()) return GetHeap()->false_value(); + Isolate* isolate = GetIsolate(); HandleScope scope(isolate); Handle<InterceptorInfo> interceptor(GetNamedInterceptor()); - Handle<String> name_handle(name); + Handle<String> name_handle(String::cast(name)); Handle<JSObject> this_handle(this); if (!interceptor->deleter()->IsUndefined()) { v8::NamedPropertyDeleter deleter = @@ -3893,7 +4322,7 @@ MaybeObject* JSObject::DeleteElement(uint32_t index, DeleteMode mode) { if (mode == STRICT_DELETION) { // Deleting a non-configurable property in strict mode. HandleScope scope(isolate); - Handle<Object> holder(this); + Handle<Object> holder(this, isolate); Handle<Object> name = isolate->factory()->NewNumberFromUint(index); Handle<Object> args[2] = { name, holder }; Handle<Object> error = @@ -3911,30 +4340,53 @@ MaybeObject* JSObject::DeleteElement(uint32_t index, DeleteMode mode) { return JSGlobalObject::cast(proto)->DeleteElement(index, mode); } - if (HasIndexedInterceptor()) { - // Skip interceptor if forcing deletion. - if (mode != FORCE_DELETION) { - return DeleteElementWithInterceptor(index); + // From this point on everything needs to be handlified. + HandleScope scope(isolate); + Handle<JSObject> self(this); + + Handle<Object> old_value; + bool should_enqueue_change_record = false; + if (FLAG_harmony_observation && self->map()->is_observed()) { + should_enqueue_change_record = self->HasLocalElement(index); + if (should_enqueue_change_record) { + old_value = self->GetLocalElementAccessorPair(index) != NULL + ? Handle<Object>::cast(isolate->factory()->the_hole_value()) + : Object::GetElement(self, index); } - mode = JSReceiver::FORCE_DELETION; } - return GetElementsAccessor()->Delete(this, index, mode); + MaybeObject* result; + // Skip interceptor if forcing deletion. + if (self->HasIndexedInterceptor() && mode != FORCE_DELETION) { + result = self->DeleteElementWithInterceptor(index); + } else { + result = self->GetElementsAccessor()->Delete(*self, index, mode); + } + + Handle<Object> hresult; + if (!result->ToHandle(&hresult, isolate)) return result; + + if (should_enqueue_change_record && !self->HasLocalElement(index)) { + Handle<String> name = isolate->factory()->Uint32ToString(index); + EnqueueChangeRecord(self, "deleted", name, old_value); + } + + return *hresult; } Handle<Object> JSObject::DeleteProperty(Handle<JSObject> obj, - Handle<String> prop) { + Handle<Name> prop) { CALL_HEAP_FUNCTION(obj->GetIsolate(), obj->DeleteProperty(*prop, JSObject::NORMAL_DELETION), Object); } -MaybeObject* JSObject::DeleteProperty(String* name, DeleteMode mode) { +MaybeObject* JSObject::DeleteProperty(Name* name, DeleteMode mode) { Isolate* isolate = GetIsolate(); // ECMA-262, 3rd, 8.6.2.5 - ASSERT(name->IsString()); + ASSERT(name->IsName()); // Check access rights if needed. if (IsAccessCheckNeeded() && @@ -3953,38 +4405,61 @@ MaybeObject* JSObject::DeleteProperty(String* name, DeleteMode mode) { uint32_t index = 0; if (name->AsArrayIndex(&index)) { return DeleteElement(index, mode); - } else { - LookupResult result(isolate); - LocalLookup(name, &result); - if (!result.IsFound()) return isolate->heap()->true_value(); - // Ignore attributes if forcing a deletion. - if (result.IsDontDelete() && mode != FORCE_DELETION) { - if (mode == STRICT_DELETION) { - // Deleting a non-configurable property in strict mode. - HandleScope scope(isolate); - Handle<Object> args[2] = { Handle<Object>(name), Handle<Object>(this) }; - return isolate->Throw(*isolate->factory()->NewTypeError( - "strict_delete_property", HandleVector(args, 2))); - } - return isolate->heap()->false_value(); - } - // Check for interceptor. - if (result.IsInterceptor()) { - // Skip interceptor if forcing a deletion. - if (mode == FORCE_DELETION) { - return DeletePropertyPostInterceptor(name, mode); - } - return DeletePropertyWithInterceptor(name); + } + + LookupResult lookup(isolate); + LocalLookup(name, &lookup, true); + if (!lookup.IsFound()) return isolate->heap()->true_value(); + // Ignore attributes if forcing a deletion. + if (lookup.IsDontDelete() && mode != FORCE_DELETION) { + if (mode == STRICT_DELETION) { + // Deleting a non-configurable property in strict mode. + HandleScope scope(isolate); + Handle<Object> args[2] = { Handle<Object>(name, isolate), + Handle<Object>(this, isolate) }; + return isolate->Throw(*isolate->factory()->NewTypeError( + "strict_delete_property", HandleVector(args, 2))); + } + return isolate->heap()->false_value(); + } + + // From this point on everything needs to be handlified. + HandleScope scope(isolate); + Handle<JSObject> self(this); + Handle<Name> hname(name); + + Handle<Object> old_value = isolate->factory()->the_hole_value(); + bool is_observed = FLAG_harmony_observation && self->map()->is_observed(); + if (is_observed && lookup.IsDataProperty()) { + old_value = Object::GetProperty(self, hname); + } + MaybeObject* result; + + // Check for interceptor. + if (lookup.IsInterceptor()) { + // Skip interceptor if forcing a deletion. + if (mode == FORCE_DELETION) { + result = self->DeletePropertyPostInterceptor(*hname, mode); + } else { + result = self->DeletePropertyWithInterceptor(*hname); } + } else { // Normalize object if needed. Object* obj; - { MaybeObject* maybe_obj = - NormalizeProperties(CLEAR_INOBJECT_PROPERTIES, 0); - if (!maybe_obj->ToObject(&obj)) return maybe_obj; - } + result = self->NormalizeProperties(CLEAR_INOBJECT_PROPERTIES, 0); + if (!result->To(&obj)) return result; // Make sure the properties are normalized before removing the entry. - return DeleteNormalizedProperty(name, mode); + result = self->DeleteNormalizedProperty(*hname, mode); } + + Handle<Object> hresult; + if (!result->ToHandle(&hresult, isolate)) return result; + + if (is_observed && !self->HasLocalProperty(*hname)) { + EnqueueChangeRecord(self, "deleted", hname, old_value); + } + + return *hresult; } @@ -3996,7 +4471,7 @@ MaybeObject* JSReceiver::DeleteElement(uint32_t index, DeleteMode mode) { } -MaybeObject* JSReceiver::DeleteProperty(String* name, DeleteMode mode) { +MaybeObject* JSReceiver::DeleteProperty(Name* name, DeleteMode mode) { if (IsJSProxy()) { return JSProxy::cast(this)->DeletePropertyWithHandler(name, mode); } @@ -4160,7 +4635,7 @@ MaybeObject* JSObject::PreventExtensions() { // It's not possible to seal objects with external array elements if (HasExternalArrayElements()) { HandleScope scope(isolate); - Handle<Object> object(this); + Handle<Object> object(this, isolate); Handle<Object> error = isolate->factory()->NewTypeError( "cant_prevent_ext_external_array_elements", @@ -4222,13 +4697,16 @@ int Map::NumberOfDescribedProperties(DescriptorFlag which, ? descs->number_of_descriptors() : NumberOfOwnDescriptors(); for (int i = 0; i < limit; i++) { - if ((descs->GetDetails(i).attributes() & filter) == 0) result++; + if ((descs->GetDetails(i).attributes() & filter) == 0 && + ((filter & SYMBOLIC) == 0 || !descs->GetKey(i)->IsSymbol())) { + result++; + } } return result; } -int Map::PropertyIndexFor(String* name) { +int Map::PropertyIndexFor(Name* name) { DescriptorArray* descs = instance_descriptors(); int limit = NumberOfOwnDescriptors(); for (int i = 0; i < limit; i++) { @@ -4252,7 +4730,7 @@ int Map::NextFreePropertyIndex() { } -AccessorDescriptor* Map::FindAccessor(String* name) { +AccessorDescriptor* Map::FindAccessor(Name* name) { DescriptorArray* descs = instance_descriptors(); int number_of_own_descriptors = NumberOfOwnDescriptors(); for (int i = 0; i < number_of_own_descriptors; i++) { @@ -4264,8 +4742,9 @@ AccessorDescriptor* Map::FindAccessor(String* name) { } -void JSReceiver::LocalLookup(String* name, LookupResult* result) { - ASSERT(name->IsString()); +void JSReceiver::LocalLookup( + Name* name, LookupResult* result, bool search_hidden_prototypes) { + ASSERT(name->IsName()); Heap* heap = GetHeap(); @@ -4273,7 +4752,8 @@ void JSReceiver::LocalLookup(String* name, LookupResult* result) { Object* proto = GetPrototype(); if (proto->IsNull()) return result->NotFound(); ASSERT(proto->IsJSGlobalObject()); - return JSReceiver::cast(proto)->LocalLookup(name, result); + return JSReceiver::cast(proto)->LocalLookup( + name, result, search_hidden_prototypes); } if (IsJSProxy()) { @@ -4289,12 +4769,6 @@ void JSReceiver::LocalLookup(String* name, LookupResult* result) { JSObject* js_object = JSObject::cast(this); - // Check __proto__ before interceptor. - if (name->Equals(heap->Proto_symbol()) && !IsJSContextExtensionObject()) { - result->ConstantResult(js_object); - return; - } - // Check for lookup interceptor except when bootstrapping. if (js_object->HasNamedInterceptor() && !heap->isolate()->bootstrapper()->IsActive()) { @@ -4303,16 +4777,24 @@ void JSReceiver::LocalLookup(String* name, LookupResult* result) { } js_object->LocalLookupRealNamedProperty(name, result); + if (result->IsFound() || !search_hidden_prototypes) return; + + Object* proto = js_object->GetPrototype(); + if (!proto->IsJSReceiver()) return; + JSReceiver* receiver = JSReceiver::cast(proto); + if (receiver->map()->is_hidden_prototype()) { + receiver->LocalLookup(name, result, search_hidden_prototypes); + } } -void JSReceiver::Lookup(String* name, LookupResult* result) { +void JSReceiver::Lookup(Name* name, LookupResult* result) { // Ecma-262 3rd 8.6.2.4 Heap* heap = GetHeap(); for (Object* current = this; current != heap->null_value(); current = JSObject::cast(current)->GetPrototype()) { - JSReceiver::cast(current)->LocalLookup(name, result); + JSReceiver::cast(current)->LocalLookup(name, result, false); if (result->IsFound()) return; } result->NotFound(); @@ -4320,7 +4802,7 @@ void JSReceiver::Lookup(String* name, LookupResult* result) { // Search object and its prototype chain for callback properties. -void JSObject::LookupCallbackProperty(String* name, LookupResult* result) { +void JSObject::LookupCallbackProperty(Name* name, LookupResult* result) { Heap* heap = GetHeap(); for (Object* current = this; current != heap->null_value() && current->IsJSObject(); @@ -4426,7 +4908,7 @@ MaybeObject* JSObject::DefineElementAccessor(uint32_t index, } -MaybeObject* JSObject::CreateAccessorPairFor(String* name) { +MaybeObject* JSObject::CreateAccessorPairFor(Name* name) { LookupResult result(GetHeap()->isolate()); LocalLookupRealNamedProperty(name, &result); if (result.IsPropertyCallbacks()) { @@ -4445,7 +4927,7 @@ MaybeObject* JSObject::CreateAccessorPairFor(String* name) { } -MaybeObject* JSObject::DefinePropertyAccessor(String* name, +MaybeObject* JSObject::DefinePropertyAccessor(Name* name, Object* getter, Object* setter, PropertyAttributes attributes) { @@ -4453,7 +4935,9 @@ MaybeObject* JSObject::DefinePropertyAccessor(String* name, // to do a lookup, which seems to be a bit of overkill. Heap* heap = GetHeap(); bool only_attribute_changes = getter->IsNull() && setter->IsNull(); - if (HasFastProperties() && !only_attribute_changes) { + if (HasFastProperties() && !only_attribute_changes && + (map()->NumberOfOwnDescriptors() < + DescriptorArray::kMaxNumberOfDescriptors)) { MaybeObject* getterOk = heap->undefined_value(); if (!getter->IsNull()) { getterOk = DefineFastAccessor(name, ACCESSOR_GETTER, getter, attributes); @@ -4480,7 +4964,7 @@ MaybeObject* JSObject::DefinePropertyAccessor(String* name, } -bool JSObject::CanSetCallback(String* name) { +bool JSObject::CanSetCallback(Name* name) { ASSERT(!IsAccessCheckNeeded() || GetIsolate()->MayNamedAccess(this, name, v8::ACCESS_SET)); @@ -4542,7 +5026,7 @@ MaybeObject* JSObject::SetElementCallback(uint32_t index, } -MaybeObject* JSObject::SetPropertyCallback(String* name, +MaybeObject* JSObject::SetPropertyCallback(Name* name, Object* structure, PropertyAttributes attributes) { // Normalize object to make this operation simple. @@ -4574,7 +5058,7 @@ MaybeObject* JSObject::SetPropertyCallback(String* name, void JSObject::DefineAccessor(Handle<JSObject> object, - Handle<String> name, + Handle<Name> name, Handle<Object> getter, Handle<Object> setter, PropertyAttributes attributes) { @@ -4583,14 +5067,14 @@ void JSObject::DefineAccessor(Handle<JSObject> object, object->DefineAccessor(*name, *getter, *setter, attributes)); } -MaybeObject* JSObject::DefineAccessor(String* name, - Object* getter, - Object* setter, +MaybeObject* JSObject::DefineAccessor(Name* name_raw, + Object* getter_raw, + Object* setter_raw, PropertyAttributes attributes) { Isolate* isolate = GetIsolate(); // Check access rights if needed. if (IsAccessCheckNeeded() && - !isolate->MayNamedAccess(this, name, v8::ACCESS_SET)) { + !isolate->MayNamedAccess(this, name_raw, v8::ACCESS_SET)) { isolate->ReportFailedAccessCheck(this, v8::ACCESS_SET); return isolate->heap()->undefined_value(); } @@ -4600,7 +5084,7 @@ MaybeObject* JSObject::DefineAccessor(String* name, if (proto->IsNull()) return this; ASSERT(proto->IsJSGlobalObject()); return JSObject::cast(proto)->DefineAccessor( - name, getter, setter, attributes); + name_raw, getter_raw, setter_raw, attributes); } // Make sure that the top context does not change when doing callbacks or @@ -4608,14 +5092,52 @@ MaybeObject* JSObject::DefineAccessor(String* name, AssertNoContextChange ncc; // Try to flatten before operating on the string. - name->TryFlatten(); + if (name_raw->IsString()) String::cast(name_raw)->TryFlatten(); - if (!CanSetCallback(name)) return isolate->heap()->undefined_value(); + if (!CanSetCallback(name_raw)) return isolate->heap()->undefined_value(); + + // From this point on everything needs to be handlified. + HandleScope scope(isolate); + Handle<JSObject> self(this); + Handle<Name> name(name_raw); + Handle<Object> getter(getter_raw, isolate); + Handle<Object> setter(setter_raw, isolate); uint32_t index = 0; - return name->AsArrayIndex(&index) ? - DefineElementAccessor(index, getter, setter, attributes) : - DefinePropertyAccessor(name, getter, setter, attributes); + bool is_element = name->AsArrayIndex(&index); + + Handle<Object> old_value = isolate->factory()->the_hole_value(); + bool is_observed = FLAG_harmony_observation && self->map()->is_observed(); + bool preexists = false; + if (is_observed) { + if (is_element) { + preexists = HasLocalElement(index); + if (preexists && self->GetLocalElementAccessorPair(index) == NULL) { + old_value = Object::GetElement(self, index); + } + } else { + LookupResult lookup(isolate); + LocalLookup(*name, &lookup, true); + preexists = lookup.IsProperty(); + if (preexists && lookup.IsDataProperty()) { + old_value = Object::GetProperty(self, name); + } + } + } + + MaybeObject* result = is_element ? + self->DefineElementAccessor(index, *getter, *setter, attributes) : + self->DefinePropertyAccessor(*name, *getter, *setter, attributes); + + Handle<Object> hresult; + if (!result->ToHandle(&hresult, isolate)) return result; + + if (is_observed) { + const char* type = preexists ? "reconfigured" : "new"; + EnqueueChangeRecord(self, type, name, old_value); + } + + return *hresult; } @@ -4648,7 +5170,7 @@ static MaybeObject* TryAccessorTransition(JSObject* self, } -MaybeObject* JSObject::DefineFastAccessor(String* name, +MaybeObject* JSObject::DefineFastAccessor(Name* name, AccessorComponent component, Object* accessor, PropertyAttributes attributes) { @@ -4695,7 +5217,8 @@ MaybeObject* JSObject::DefineFastAccessor(String* name, if (result.IsFound()) { Map* target = result.GetTransitionTarget(); int descriptor_number = target->LastAdded(); - ASSERT(target->instance_descriptors()->GetKey(descriptor_number) == name); + ASSERT(target->instance_descriptors()->GetKey(descriptor_number) + ->Equals(name)); return TryAccessorTransition( this, target, descriptor_number, component, accessor, attributes); } @@ -4730,7 +5253,7 @@ MaybeObject* JSObject::DefineFastAccessor(String* name, MaybeObject* JSObject::DefineAccessor(AccessorInfo* info) { Isolate* isolate = GetIsolate(); - String* name = String::cast(info->name()); + Name* name = Name::cast(info->name()); // Check access rights if needed. if (IsAccessCheckNeeded() && !isolate->MayNamedAccess(this, name, v8::ACCESS_SET)) { @@ -4750,7 +5273,7 @@ MaybeObject* JSObject::DefineAccessor(AccessorInfo* info) { AssertNoContextChange ncc; // Try to flatten before operating on the string. - name->TryFlatten(); + if (name->IsString()) String::cast(name)->TryFlatten(); if (!CanSetCallback(name)) return isolate->heap()->undefined_value(); @@ -4794,7 +5317,7 @@ MaybeObject* JSObject::DefineAccessor(AccessorInfo* info) { } else { // Lookup the name. LookupResult result(isolate); - LocalLookup(name, &result); + LocalLookup(name, &result, true); // ES5 forbids turning a property into an accessor if it's not // configurable (that is IsDontDelete in ES3 and v8), see 8.6.1 (Table 5). if (result.IsFound() && (result.IsReadOnly() || result.IsDontDelete())) { @@ -4810,7 +5333,7 @@ MaybeObject* JSObject::DefineAccessor(AccessorInfo* info) { } -Object* JSObject::LookupAccessor(String* name, AccessorComponent component) { +Object* JSObject::LookupAccessor(Name* name, AccessorComponent component) { Heap* heap = GetHeap(); // Make sure that the top context does not change when doing callbacks or @@ -4896,7 +5419,6 @@ MaybeObject* Map::RawCopy(int instance_size) { result->set_constructor(constructor()); result->set_bit_field(bit_field()); result->set_bit_field2(bit_field2()); - result->set_bit_field3(bit_field3()); int new_bit_field3 = bit_field3(); new_bit_field3 = OwnsDescriptors::update(new_bit_field3, true); new_bit_field3 = NumberOfOwnDescriptorsBits::update(new_bit_field3, 0); @@ -4921,7 +5443,6 @@ MaybeObject* Map::CopyNormalized(PropertyNormalizationMode mode, result->set_inobject_properties(inobject_properties()); } - result->set_code_cache(code_cache()); result->set_is_shared(sharing == SHARED_NORMALIZED_MAP); result->set_dictionary_map(true); @@ -4947,6 +5468,7 @@ MaybeObject* Map::CopyDropDescriptors() { result->set_pre_allocated_property_fields(pre_allocated_property_fields()); result->set_is_shared(false); result->ClearCodeCache(GetHeap()); + NotifyLeafMapLayoutChange(); return result; } @@ -4962,7 +5484,7 @@ MaybeObject* Map::ShareDescriptor(DescriptorArray* descriptors, MaybeObject* maybe_result = CopyDropDescriptors(); if (!maybe_result->To(&result)) return maybe_result; - String* name = descriptor->GetKey(); + Name* name = descriptor->GetKey(); TransitionArray* transitions; MaybeObject* maybe_transitions = @@ -5027,7 +5549,7 @@ MaybeObject* Map::ShareDescriptor(DescriptorArray* descriptors, MaybeObject* Map::CopyReplaceDescriptors(DescriptorArray* descriptors, - String* name, + Name* name, TransitionFlag flag, int descriptor_index) { ASSERT(descriptors->IsSortedNoDuplicates()); @@ -5143,8 +5665,8 @@ MaybeObject* Map::CopyAddDescriptor(Descriptor* descriptor, TransitionFlag flag) { DescriptorArray* descriptors = instance_descriptors(); - // Ensure the key is a symbol. - MaybeObject* maybe_failure = descriptor->KeyToSymbol(); + // Ensure the key is unique. + MaybeObject* maybe_failure = descriptor->KeyToUniqueName(); if (maybe_failure->IsFailure()) return maybe_failure; int old_size = NumberOfOwnDescriptors(); @@ -5176,7 +5698,7 @@ MaybeObject* Map::CopyAddDescriptor(Descriptor* descriptor, new_descriptors->Append(descriptor, witness); } - String* key = descriptor->GetKey(); + Name* key = descriptor->GetKey(); int insertion_index = new_descriptors->number_of_descriptors() - 1; return CopyReplaceDescriptors(new_descriptors, key, flag, insertion_index); @@ -5187,8 +5709,8 @@ MaybeObject* Map::CopyInsertDescriptor(Descriptor* descriptor, TransitionFlag flag) { DescriptorArray* old_descriptors = instance_descriptors(); - // Ensure the key is a symbol. - MaybeObject* maybe_result = descriptor->KeyToSymbol(); + // Ensure the key is unique. + MaybeObject* maybe_result = descriptor->KeyToUniqueName(); if (maybe_result->IsFailure()) return maybe_result; // We replace the key if it is already present. @@ -5224,11 +5746,11 @@ MaybeObject* Map::CopyReplaceDescriptor(DescriptorArray* descriptors, Descriptor* descriptor, int insertion_index, TransitionFlag flag) { - // Ensure the key is a symbol. - MaybeObject* maybe_failure = descriptor->KeyToSymbol(); + // Ensure the key is unique. + MaybeObject* maybe_failure = descriptor->KeyToUniqueName(); if (maybe_failure->IsFailure()) return maybe_failure; - String* key = descriptor->GetKey(); + Name* key = descriptor->GetKey(); ASSERT(key == descriptors->GetKey(insertion_index)); int new_size = NumberOfOwnDescriptors(); @@ -5259,7 +5781,7 @@ MaybeObject* Map::CopyReplaceDescriptor(DescriptorArray* descriptors, void Map::UpdateCodeCache(Handle<Map> map, - Handle<String> name, + Handle<Name> name, Handle<Code> code) { Isolate* isolate = map->GetIsolate(); CALL_HEAP_FUNCTION_VOID(isolate, @@ -5267,7 +5789,7 @@ void Map::UpdateCodeCache(Handle<Map> map, } -MaybeObject* Map::UpdateCodeCache(String* name, Code* code) { +MaybeObject* Map::UpdateCodeCache(Name* name, Code* code) { ASSERT(!is_shared() || code->allowed_in_shared_map_code_cache()); // Allocate the code cache if not present. @@ -5284,7 +5806,7 @@ MaybeObject* Map::UpdateCodeCache(String* name, Code* code) { } -Object* Map::FindInCodeCache(String* name, Code::Flags flags) { +Object* Map::FindInCodeCache(Name* name, Code::Flags flags) { // Do a lookup if a code cache exists. if (!code_cache()->IsFixedArray()) { return CodeCache::cast(code_cache())->Lookup(name, flags); @@ -5303,7 +5825,7 @@ int Map::IndexInCodeCache(Object* name, Code* code) { } -void Map::RemoveFromCodeCache(String* name, Code* code, int index) { +void Map::RemoveFromCodeCache(Name* name, Code* code, int index) { // No GC is supposed to happen between a call to IndexInCodeCache and // RemoveFromCodeCache so the code cache must be there. ASSERT(!code_cache()->IsFixedArray()); @@ -5506,7 +6028,7 @@ void Map::TraverseTransitionTree(TraverseCallback callback, void* data) { } -MaybeObject* CodeCache::Update(String* name, Code* code) { +MaybeObject* CodeCache::Update(Name* name, Code* code) { // The number of monomorphic stubs for normal load/store/call IC's can grow to // a large number and therefore they need to go into a hash table. They are // used to load global properties from cells. @@ -5515,7 +6037,8 @@ MaybeObject* CodeCache::Update(String* name, Code* code) { if (normal_type_cache()->IsUndefined()) { Object* result; { MaybeObject* maybe_result = - CodeCacheHashTable::Allocate(CodeCacheHashTable::kInitialSize); + CodeCacheHashTable::Allocate(GetHeap(), + CodeCacheHashTable::kInitialSize); if (!maybe_result->ToObject(&result)) return maybe_result; } set_normal_type_cache(result); @@ -5528,7 +6051,7 @@ MaybeObject* CodeCache::Update(String* name, Code* code) { } -MaybeObject* CodeCache::UpdateDefaultCache(String* name, Code* code) { +MaybeObject* CodeCache::UpdateDefaultCache(Name* name, Code* code) { // When updating the default code cache we disregard the type encoded in the // flags. This allows call constant stubs to overwrite call field // stubs, etc. @@ -5551,7 +6074,7 @@ MaybeObject* CodeCache::UpdateDefaultCache(String* name, Code* code) { cache->set(i + kCodeCacheEntryCodeOffset, code); return this; } - if (name->Equals(String::cast(key))) { + if (name->Equals(Name::cast(key))) { Code::Flags found = Code::cast(cache->get(i + kCodeCacheEntryCodeOffset))->flags(); if (Code::RemoveTypeFromFlags(found) == flags) { @@ -5588,7 +6111,7 @@ MaybeObject* CodeCache::UpdateDefaultCache(String* name, Code* code) { } -MaybeObject* CodeCache::UpdateNormalTypeCache(String* name, Code* code) { +MaybeObject* CodeCache::UpdateNormalTypeCache(Name* name, Code* code) { // Adding a new entry can cause a new cache to be allocated. CodeCacheHashTable* cache = CodeCacheHashTable::cast(normal_type_cache()); Object* new_cache; @@ -5600,7 +6123,7 @@ MaybeObject* CodeCache::UpdateNormalTypeCache(String* name, Code* code) { } -Object* CodeCache::Lookup(String* name, Code::Flags flags) { +Object* CodeCache::Lookup(Name* name, Code::Flags flags) { if (Code::ExtractTypeFromFlags(flags) == Code::NORMAL) { return LookupNormalTypeCache(name, flags); } else { @@ -5609,7 +6132,7 @@ Object* CodeCache::Lookup(String* name, Code::Flags flags) { } -Object* CodeCache::LookupDefaultCache(String* name, Code::Flags flags) { +Object* CodeCache::LookupDefaultCache(Name* name, Code::Flags flags) { FixedArray* cache = default_cache(); int length = cache->length(); for (int i = 0; i < length; i += kCodeCacheEntrySize) { @@ -5617,7 +6140,7 @@ Object* CodeCache::LookupDefaultCache(String* name, Code::Flags flags) { // Skip deleted elements. if (key->IsNull()) continue; if (key->IsUndefined()) return key; - if (name->Equals(String::cast(key))) { + if (name->Equals(Name::cast(key))) { Code* code = Code::cast(cache->get(i + kCodeCacheEntryCodeOffset)); if (code->flags() == flags) { return code; @@ -5628,7 +6151,7 @@ Object* CodeCache::LookupDefaultCache(String* name, Code::Flags flags) { } -Object* CodeCache::LookupNormalTypeCache(String* name, Code::Flags flags) { +Object* CodeCache::LookupNormalTypeCache(Name* name, Code::Flags flags) { if (!normal_type_cache()->IsUndefined()) { CodeCacheHashTable* cache = CodeCacheHashTable::cast(normal_type_cache()); return cache->Lookup(name, flags); @@ -5642,7 +6165,7 @@ int CodeCache::GetIndex(Object* name, Code* code) { if (code->type() == Code::NORMAL) { if (normal_type_cache()->IsUndefined()) return -1; CodeCacheHashTable* cache = CodeCacheHashTable::cast(normal_type_cache()); - return cache->GetIndex(String::cast(name), code->flags()); + return cache->GetIndex(Name::cast(name), code->flags()); } FixedArray* array = default_cache(); @@ -5658,7 +6181,7 @@ void CodeCache::RemoveByIndex(Object* name, Code* code, int index) { if (code->type() == Code::NORMAL) { ASSERT(!normal_type_cache()->IsUndefined()); CodeCacheHashTable* cache = CodeCacheHashTable::cast(normal_type_cache()); - ASSERT(cache->GetIndex(String::cast(name), code->flags()) == index); + ASSERT(cache->GetIndex(Name::cast(name), code->flags()) == index); cache->RemoveByIndex(index); } else { FixedArray* array = default_cache(); @@ -5679,10 +6202,10 @@ void CodeCache::RemoveByIndex(Object* name, Code* code, int index) { // lookup not to create a new entry. class CodeCacheHashTableKey : public HashTableKey { public: - CodeCacheHashTableKey(String* name, Code::Flags flags) + CodeCacheHashTableKey(Name* name, Code::Flags flags) : name_(name), flags_(flags), code_(NULL) { } - CodeCacheHashTableKey(String* name, Code* code) + CodeCacheHashTableKey(Name* name, Code* code) : name_(name), flags_(code->flags()), code_(code) { } @@ -5691,7 +6214,7 @@ class CodeCacheHashTableKey : public HashTableKey { bool IsMatch(Object* other) { if (!other->IsFixedArray()) return false; FixedArray* pair = FixedArray::cast(other); - String* name = String::cast(pair->get(0)); + Name* name = Name::cast(pair->get(0)); Code::Flags flags = Code::cast(pair->get(1))->flags(); if (flags != flags_) { return false; @@ -5699,7 +6222,7 @@ class CodeCacheHashTableKey : public HashTableKey { return name_->Equals(name); } - static uint32_t NameFlagsHashHelper(String* name, Code::Flags flags) { + static uint32_t NameFlagsHashHelper(Name* name, Code::Flags flags) { return name->Hash() ^ flags; } @@ -5707,15 +6230,15 @@ class CodeCacheHashTableKey : public HashTableKey { uint32_t HashForObject(Object* obj) { FixedArray* pair = FixedArray::cast(obj); - String* name = String::cast(pair->get(0)); + Name* name = Name::cast(pair->get(0)); Code* code = Code::cast(pair->get(1)); return NameFlagsHashHelper(name, code->flags()); } - MUST_USE_RESULT MaybeObject* AsObject() { + MUST_USE_RESULT MaybeObject* AsObject(Heap* heap) { ASSERT(code_ != NULL); Object* obj; - { MaybeObject* maybe_obj = code_->GetHeap()->AllocateFixedArray(2); + { MaybeObject* maybe_obj = heap->AllocateFixedArray(2); if (!maybe_obj->ToObject(&obj)) return maybe_obj; } FixedArray* pair = FixedArray::cast(obj); @@ -5725,14 +6248,14 @@ class CodeCacheHashTableKey : public HashTableKey { } private: - String* name_; + Name* name_; Code::Flags flags_; // TODO(jkummerow): We should be able to get by without this. Code* code_; }; -Object* CodeCacheHashTable::Lookup(String* name, Code::Flags flags) { +Object* CodeCacheHashTable::Lookup(Name* name, Code::Flags flags) { CodeCacheHashTableKey key(name, flags); int entry = FindEntry(&key); if (entry == kNotFound) return GetHeap()->undefined_value(); @@ -5740,7 +6263,7 @@ Object* CodeCacheHashTable::Lookup(String* name, Code::Flags flags) { } -MaybeObject* CodeCacheHashTable::Put(String* name, Code* code) { +MaybeObject* CodeCacheHashTable::Put(Name* name, Code* code) { CodeCacheHashTableKey key(name, code); Object* obj; { MaybeObject* maybe_obj = EnsureCapacity(1, &key); @@ -5752,7 +6275,7 @@ MaybeObject* CodeCacheHashTable::Put(String* name, Code* code) { int entry = cache->FindInsertionEntry(key.Hash()); Object* k; - { MaybeObject* maybe_k = key.AsObject(); + { MaybeObject* maybe_k = key.AsObject(GetHeap()); if (!maybe_k->ToObject(&k)) return maybe_k; } @@ -5763,7 +6286,7 @@ MaybeObject* CodeCacheHashTable::Put(String* name, Code* code) { } -int CodeCacheHashTable::GetIndex(String* name, Code::Flags flags) { +int CodeCacheHashTable::GetIndex(Name* name, Code::Flags flags) { CodeCacheHashTableKey key(name, flags); int entry = FindEntry(&key); return (entry == kNotFound) ? -1 : entry; @@ -5796,6 +6319,7 @@ MaybeObject* PolymorphicCodeCache::Update(MapHandleList* maps, Object* result; { MaybeObject* maybe_result = PolymorphicCodeCacheHashTable::Allocate( + GetHeap(), PolymorphicCodeCacheHashTable::kInitialSize); if (!maybe_result->ToObject(&result)) return maybe_result; } @@ -5821,7 +6345,7 @@ Handle<Object> PolymorphicCodeCache::Lookup(MapHandleList* maps, if (!cache()->IsUndefined()) { PolymorphicCodeCacheHashTable* hash_table = PolymorphicCodeCacheHashTable::cast(cache()); - return Handle<Object>(hash_table->Lookup(maps, flags)); + return Handle<Object>(hash_table->Lookup(maps, flags), GetIsolate()); } else { return GetIsolate()->factory()->undefined_value(); } @@ -5884,13 +6408,13 @@ class PolymorphicCodeCacheHashTableKey : public HashTableKey { return MapsHashHelper(&other_maps, other_flags); } - MUST_USE_RESULT MaybeObject* AsObject() { + MUST_USE_RESULT MaybeObject* AsObject(Heap* heap) { Object* obj; // The maps in |maps_| must be copied to a newly allocated FixedArray, // both because the referenced MapList is short-lived, and because C++ // objects can't be stored in the heap anyway. { MaybeObject* maybe_obj = - HEAP->AllocateUninitializedFixedArray(maps_->length() + 1); + heap->AllocateUninitializedFixedArray(maps_->length() + 1); if (!maybe_obj->ToObject(&obj)) return maybe_obj; } FixedArray* list = FixedArray::cast(obj); @@ -5940,7 +6464,7 @@ MaybeObject* PolymorphicCodeCacheHashTable::Put(MapHandleList* maps, PolymorphicCodeCacheHashTable* cache = reinterpret_cast<PolymorphicCodeCacheHashTable*>(obj); int entry = cache->FindInsertionEntry(key.Hash()); - { MaybeObject* maybe_obj = key.AsObject(); + { MaybeObject* maybe_obj = key.AsObject(GetHeap()); if (!maybe_obj->ToObject(&obj)) return maybe_obj; } cache->set(EntryToIndex(entry), obj); @@ -5960,7 +6484,7 @@ MaybeObject* FixedArray::AddKeysFromJSArray(JSArray* array) { if (FLAG_enable_slow_asserts) { for (int i = 0; i < result->length(); i++) { Object* current = result->get(i); - ASSERT(current->IsNumber() || current->IsString()); + ASSERT(current->IsNumber() || current->IsName()); } } #endif @@ -5978,7 +6502,7 @@ MaybeObject* FixedArray::UnionOfKeys(FixedArray* other) { if (FLAG_enable_slow_asserts) { for (int i = 0; i < result->length(); i++) { Object* current = result->get(i); - ASSERT(current->IsNumber() || current->IsString()); + ASSERT(current->IsNumber() || current->IsName()); } } #endif @@ -6209,14 +6733,14 @@ String::FlatContent String::GetFlatContent() { ASSERT(shape.representation_tag() != kConsStringTag && shape.representation_tag() != kSlicedStringTag); } - if (shape.encoding_tag() == kAsciiStringTag) { - const char* start; + if (shape.encoding_tag() == kOneByteStringTag) { + const uint8_t* start; if (shape.representation_tag() == kSeqStringTag) { - start = SeqAsciiString::cast(string)->GetChars(); + start = SeqOneByteString::cast(string)->GetChars(); } else { start = ExternalAsciiString::cast(string)->GetChars(); } - return FlatContent(Vector<const char>(start + offset, length)); + return FlatContent(Vector<const uint8_t>(start + offset, length)); } else { ASSERT(shape.encoding_tag() == kTwoByteStringTag); const uc16* start; @@ -6244,14 +6768,14 @@ SmartArrayPointer<char> String::ToCString(AllowNullsFlag allow_nulls, if (length < 0) length = kMaxInt - offset; // Compute the size of the UTF-8 string. Start at the specified offset. - Access<StringInputBuffer> buffer( - heap->isolate()->objects_string_input_buffer()); - buffer->Reset(offset, this); + Access<ConsStringIteratorOp> op( + heap->isolate()->objects_string_iterator()); + StringCharacterStream stream(this, op.value(), offset); int character_position = offset; int utf8_bytes = 0; int last = unibrow::Utf16::kNoPreviousCharacter; - while (buffer->has_more() && character_position++ < offset + length) { - uint16_t character = buffer->GetNext(); + while (stream.HasMore() && character_position++ < offset + length) { + uint16_t character = stream.GetNext(); utf8_bytes += unibrow::Utf8::Length(character, last); last = character; } @@ -6263,13 +6787,12 @@ SmartArrayPointer<char> String::ToCString(AllowNullsFlag allow_nulls, char* result = NewArray<char>(utf8_bytes + 1); // Convert the UTF-16 string to a UTF-8 buffer. Start at the specified offset. - buffer->Rewind(); - buffer->Seek(offset); + stream.Reset(this, offset); character_position = offset; int utf8_byte_position = 0; last = unibrow::Utf16::kNoPreviousCharacter; - while (buffer->has_more() && character_position++ < offset + length) { - uint16_t character = buffer->GetNext(); + while (stream.HasMore() && character_position++ < offset + length) { + uint16_t character = stream.GetNext(); if (allow_nulls == DISALLOW_NULLS && character == 0) { character = ' '; } @@ -6295,7 +6818,7 @@ const uc16* String::GetTwoByteData() { const uc16* String::GetTwoByteData(unsigned start) { - ASSERT(!IsAsciiRepresentationUnderneath()); + ASSERT(!IsOneByteRepresentationUnderneath()); switch (StringShape(this).representation_tag()) { case kSeqStringTag: return SeqTwoByteString::cast(this)->SeqTwoByteStringGetData(start); @@ -6321,15 +6844,15 @@ SmartArrayPointer<uc16> String::ToWideCString(RobustnessFlag robust_flag) { } Heap* heap = GetHeap(); - Access<StringInputBuffer> buffer( - heap->isolate()->objects_string_input_buffer()); - buffer->Reset(this); + Access<ConsStringIteratorOp> op( + heap->isolate()->objects_string_iterator()); + StringCharacterStream stream(this, op.value()); uc16* result = NewArray<uc16>(length() + 1); int i = 0; - while (buffer->has_more()) { - uint16_t character = buffer->GetNext(); + while (stream.HasMore()) { + uint16_t character = stream.GetNext(); result[i++] = character; } result[i] = 0; @@ -6343,252 +6866,6 @@ const uc16* SeqTwoByteString::SeqTwoByteStringGetData(unsigned start) { } -void SeqTwoByteString::SeqTwoByteStringReadBlockIntoBuffer(ReadBlockBuffer* rbb, - unsigned* offset_ptr, - unsigned max_chars) { - unsigned chars_read = 0; - unsigned offset = *offset_ptr; - while (chars_read < max_chars) { - uint16_t c = *reinterpret_cast<uint16_t*>( - reinterpret_cast<char*>(this) - - kHeapObjectTag + kHeaderSize + offset * kShortSize); - if (c <= kMaxAsciiCharCode) { - // Fast case for ASCII characters. Cursor is an input output argument. - if (!unibrow::CharacterStream::EncodeAsciiCharacter(c, - rbb->util_buffer, - rbb->capacity, - rbb->cursor)) { - break; - } - } else { - if (!unibrow::CharacterStream::EncodeNonAsciiCharacter(c, - rbb->util_buffer, - rbb->capacity, - rbb->cursor)) { - break; - } - } - offset++; - chars_read++; - } - *offset_ptr = offset; - rbb->remaining += chars_read; -} - - -const unibrow::byte* SeqAsciiString::SeqAsciiStringReadBlock( - unsigned* remaining, - unsigned* offset_ptr, - unsigned max_chars) { - const unibrow::byte* b = reinterpret_cast<unibrow::byte*>(this) - - kHeapObjectTag + kHeaderSize + *offset_ptr * kCharSize; - *remaining = max_chars; - *offset_ptr += max_chars; - return b; -} - - -// This will iterate unless the block of string data spans two 'halves' of -// a ConsString, in which case it will recurse. Since the block of string -// data to be read has a maximum size this limits the maximum recursion -// depth to something sane. Since C++ does not have tail call recursion -// elimination, the iteration must be explicit. Since this is not an -// -IntoBuffer method it can delegate to one of the efficient -// *AsciiStringReadBlock routines. -const unibrow::byte* ConsString::ConsStringReadBlock(ReadBlockBuffer* rbb, - unsigned* offset_ptr, - unsigned max_chars) { - ConsString* current = this; - unsigned offset = *offset_ptr; - int offset_correction = 0; - - while (true) { - String* left = current->first(); - unsigned left_length = (unsigned)left->length(); - if (left_length > offset && - (max_chars <= left_length - offset || - (rbb->capacity <= left_length - offset && - (max_chars = left_length - offset, true)))) { // comma operator! - // Left hand side only - iterate unless we have reached the bottom of - // the cons tree. The assignment on the left of the comma operator is - // in order to make use of the fact that the -IntoBuffer routines can - // produce at most 'capacity' characters. This enables us to postpone - // the point where we switch to the -IntoBuffer routines (below) in order - // to maximize the chances of delegating a big chunk of work to the - // efficient *AsciiStringReadBlock routines. - if (StringShape(left).IsCons()) { - current = ConsString::cast(left); - continue; - } else { - const unibrow::byte* answer = - String::ReadBlock(left, rbb, &offset, max_chars); - *offset_ptr = offset + offset_correction; - return answer; - } - } else if (left_length <= offset) { - // Right hand side only - iterate unless we have reached the bottom of - // the cons tree. - String* right = current->second(); - offset -= left_length; - offset_correction += left_length; - if (StringShape(right).IsCons()) { - current = ConsString::cast(right); - continue; - } else { - const unibrow::byte* answer = - String::ReadBlock(right, rbb, &offset, max_chars); - *offset_ptr = offset + offset_correction; - return answer; - } - } else { - // The block to be read spans two sides of the ConsString, so we call the - // -IntoBuffer version, which will recurse. The -IntoBuffer methods - // are able to assemble data from several part strings because they use - // the util_buffer to store their data and never return direct pointers - // to their storage. We don't try to read more than the buffer capacity - // here or we can get too much recursion. - ASSERT(rbb->remaining == 0); - ASSERT(rbb->cursor == 0); - current->ConsStringReadBlockIntoBuffer( - rbb, - &offset, - max_chars > rbb->capacity ? rbb->capacity : max_chars); - *offset_ptr = offset + offset_correction; - return rbb->util_buffer; - } - } -} - - -const unibrow::byte* ExternalAsciiString::ExternalAsciiStringReadBlock( - unsigned* remaining, - unsigned* offset_ptr, - unsigned max_chars) { - // Cast const char* to unibrow::byte* (signedness difference). - const unibrow::byte* b = - reinterpret_cast<const unibrow::byte*>(GetChars()) + *offset_ptr; - *remaining = max_chars; - *offset_ptr += max_chars; - return b; -} - - -void ExternalTwoByteString::ExternalTwoByteStringReadBlockIntoBuffer( - ReadBlockBuffer* rbb, - unsigned* offset_ptr, - unsigned max_chars) { - unsigned chars_read = 0; - unsigned offset = *offset_ptr; - const uint16_t* data = GetChars(); - while (chars_read < max_chars) { - uint16_t c = data[offset]; - if (c <= kMaxAsciiCharCode) { - // Fast case for ASCII characters. Cursor is an input output argument. - if (!unibrow::CharacterStream::EncodeAsciiCharacter(c, - rbb->util_buffer, - rbb->capacity, - rbb->cursor)) - break; - } else { - if (!unibrow::CharacterStream::EncodeNonAsciiCharacter(c, - rbb->util_buffer, - rbb->capacity, - rbb->cursor)) - break; - } - offset++; - chars_read++; - } - *offset_ptr = offset; - rbb->remaining += chars_read; -} - - -void SeqAsciiString::SeqAsciiStringReadBlockIntoBuffer(ReadBlockBuffer* rbb, - unsigned* offset_ptr, - unsigned max_chars) { - unsigned capacity = rbb->capacity - rbb->cursor; - if (max_chars > capacity) max_chars = capacity; - memcpy(rbb->util_buffer + rbb->cursor, - reinterpret_cast<char*>(this) - kHeapObjectTag + kHeaderSize + - *offset_ptr * kCharSize, - max_chars); - rbb->remaining += max_chars; - *offset_ptr += max_chars; - rbb->cursor += max_chars; -} - - -void ExternalAsciiString::ExternalAsciiStringReadBlockIntoBuffer( - ReadBlockBuffer* rbb, - unsigned* offset_ptr, - unsigned max_chars) { - unsigned capacity = rbb->capacity - rbb->cursor; - if (max_chars > capacity) max_chars = capacity; - memcpy(rbb->util_buffer + rbb->cursor, GetChars() + *offset_ptr, max_chars); - rbb->remaining += max_chars; - *offset_ptr += max_chars; - rbb->cursor += max_chars; -} - - -// This method determines the type of string involved and then copies -// a whole chunk of characters into a buffer, or returns a pointer to a buffer -// where they can be found. The pointer is not necessarily valid across a GC -// (see AsciiStringReadBlock). -const unibrow::byte* String::ReadBlock(String* input, - ReadBlockBuffer* rbb, - unsigned* offset_ptr, - unsigned max_chars) { - ASSERT(*offset_ptr <= static_cast<unsigned>(input->length())); - if (max_chars == 0) { - rbb->remaining = 0; - return NULL; - } - switch (StringShape(input).representation_tag()) { - case kSeqStringTag: - if (input->IsAsciiRepresentation()) { - SeqAsciiString* str = SeqAsciiString::cast(input); - return str->SeqAsciiStringReadBlock(&rbb->remaining, - offset_ptr, - max_chars); - } else { - SeqTwoByteString* str = SeqTwoByteString::cast(input); - str->SeqTwoByteStringReadBlockIntoBuffer(rbb, - offset_ptr, - max_chars); - return rbb->util_buffer; - } - case kConsStringTag: - return ConsString::cast(input)->ConsStringReadBlock(rbb, - offset_ptr, - max_chars); - case kExternalStringTag: - if (input->IsAsciiRepresentation()) { - return ExternalAsciiString::cast(input)->ExternalAsciiStringReadBlock( - &rbb->remaining, - offset_ptr, - max_chars); - } else { - ExternalTwoByteString::cast(input)-> - ExternalTwoByteStringReadBlockIntoBuffer(rbb, - offset_ptr, - max_chars); - return rbb->util_buffer; - } - case kSlicedStringTag: - return SlicedString::cast(input)->SlicedStringReadBlock(rbb, - offset_ptr, - max_chars); - default: - break; - } - - UNREACHABLE(); - return 0; -} - - void Relocatable::PostGarbageCollectionProcessing() { Isolate* isolate = Isolate::Current(); Relocatable* current = isolate->relocatable_top(); @@ -6666,168 +6943,145 @@ void FlatStringReader::PostGarbageCollection() { ASSERT(content.IsFlat()); is_ascii_ = content.IsAscii(); if (is_ascii_) { - start_ = content.ToAsciiVector().start(); + start_ = content.ToOneByteVector().start(); } else { start_ = content.ToUC16Vector().start(); } } -void StringInputBuffer::Seek(unsigned pos) { - Reset(pos, input_); -} - - -void SafeStringInputBuffer::Seek(unsigned pos) { - Reset(pos, input_); +String* ConsStringIteratorOp::Operate(String* string, + unsigned* offset_out, + int32_t* type_out, + unsigned* length_out) { + ASSERT(string->IsConsString()); + ConsString* cons_string = ConsString::cast(string); + // Set up search data. + root_ = cons_string; + consumed_ = *offset_out; + // Now search. + return Search(offset_out, type_out, length_out); } -// This method determines the type of string involved and then copies -// a whole chunk of characters into a buffer. It can be used with strings -// that have been glued together to form a ConsString and which must cooperate -// to fill up a buffer. -void String::ReadBlockIntoBuffer(String* input, - ReadBlockBuffer* rbb, - unsigned* offset_ptr, - unsigned max_chars) { - ASSERT(*offset_ptr <= (unsigned)input->length()); - if (max_chars == 0) return; - - switch (StringShape(input).representation_tag()) { - case kSeqStringTag: - if (input->IsAsciiRepresentation()) { - SeqAsciiString::cast(input)->SeqAsciiStringReadBlockIntoBuffer(rbb, - offset_ptr, - max_chars); - return; - } else { - SeqTwoByteString::cast(input)->SeqTwoByteStringReadBlockIntoBuffer(rbb, - offset_ptr, - max_chars); - return; +String* ConsStringIteratorOp::Search(unsigned* offset_out, + int32_t* type_out, + unsigned* length_out) { + ConsString* cons_string = root_; + // Reset the stack, pushing the root string. + depth_ = 1; + maximum_depth_ = 1; + frames_[0] = cons_string; + const unsigned consumed = consumed_; + unsigned offset = 0; + while (true) { + // Loop until the string is found which contains the target offset. + String* string = cons_string->first(); + unsigned length = string->length(); + int32_t type; + if (consumed < offset + length) { + // Target offset is in the left branch. + // Keep going if we're still in a ConString. + type = string->map()->instance_type(); + if ((type & kStringRepresentationMask) == kConsStringTag) { + cons_string = ConsString::cast(string); + PushLeft(cons_string); + continue; } - case kConsStringTag: - ConsString::cast(input)->ConsStringReadBlockIntoBuffer(rbb, - offset_ptr, - max_chars); - return; - case kExternalStringTag: - if (input->IsAsciiRepresentation()) { - ExternalAsciiString::cast(input)-> - ExternalAsciiStringReadBlockIntoBuffer(rbb, offset_ptr, max_chars); - } else { - ExternalTwoByteString::cast(input)-> - ExternalTwoByteStringReadBlockIntoBuffer(rbb, - offset_ptr, - max_chars); - } - return; - case kSlicedStringTag: - SlicedString::cast(input)->SlicedStringReadBlockIntoBuffer(rbb, - offset_ptr, - max_chars); - return; - default: - break; + // Tell the stack we're done decending. + AdjustMaximumDepth(); + } else { + // Descend right. + // Update progress through the string. + offset += length; + // Keep going if we're still in a ConString. + string = cons_string->second(); + type = string->map()->instance_type(); + if ((type & kStringRepresentationMask) == kConsStringTag) { + cons_string = ConsString::cast(string); + PushRight(cons_string); + // TODO(dcarney) Add back root optimization. + continue; + } + // Need this to be updated for the current string. + length = string->length(); + // Account for the possibility of an empty right leaf. + // This happens only if we have asked for an offset outside the string. + if (length == 0) { + // Reset depth so future operations will return null immediately. + Reset(); + return NULL; + } + // Tell the stack we're done decending. + AdjustMaximumDepth(); + // Pop stack so next iteration is in correct place. + Pop(); + } + ASSERT(length != 0); + // Adjust return values and exit. + consumed_ = offset + length; + *offset_out = consumed - offset; + *type_out = type; + *length_out = length; + return string; } - UNREACHABLE(); - return; + return NULL; } -const unibrow::byte* String::ReadBlock(String* input, - unibrow::byte* util_buffer, - unsigned capacity, - unsigned* remaining, - unsigned* offset_ptr) { - ASSERT(*offset_ptr <= (unsigned)input->length()); - unsigned chars = input->length() - *offset_ptr; - ReadBlockBuffer rbb(util_buffer, 0, capacity, 0); - const unibrow::byte* answer = ReadBlock(input, &rbb, offset_ptr, chars); - ASSERT(rbb.remaining <= static_cast<unsigned>(input->length())); - *remaining = rbb.remaining; - return answer; -} - - -const unibrow::byte* String::ReadBlock(String** raw_input, - unibrow::byte* util_buffer, - unsigned capacity, - unsigned* remaining, - unsigned* offset_ptr) { - Handle<String> input(raw_input); - ASSERT(*offset_ptr <= (unsigned)input->length()); - unsigned chars = input->length() - *offset_ptr; - if (chars > capacity) chars = capacity; - ReadBlockBuffer rbb(util_buffer, 0, capacity, 0); - ReadBlockIntoBuffer(*input, &rbb, offset_ptr, chars); - ASSERT(rbb.remaining <= static_cast<unsigned>(input->length())); - *remaining = rbb.remaining; - return rbb.util_buffer; -} - - -// This will iterate unless the block of string data spans two 'halves' of -// a ConsString, in which case it will recurse. Since the block of string -// data to be read has a maximum size this limits the maximum recursion -// depth to something sane. Since C++ does not have tail call recursion -// elimination, the iteration must be explicit. -void ConsString::ConsStringReadBlockIntoBuffer(ReadBlockBuffer* rbb, - unsigned* offset_ptr, - unsigned max_chars) { - ConsString* current = this; - unsigned offset = *offset_ptr; - int offset_correction = 0; - +String* ConsStringIteratorOp::NextLeaf(bool* blew_stack, + int32_t* type_out, + unsigned* length_out) { while (true) { - String* left = current->first(); - unsigned left_length = (unsigned)left->length(); - if (left_length > offset && - max_chars <= left_length - offset) { - // Left hand side only - iterate unless we have reached the bottom of - // the cons tree. - if (StringShape(left).IsCons()) { - current = ConsString::cast(left); - continue; - } else { - String::ReadBlockIntoBuffer(left, rbb, &offset, max_chars); - *offset_ptr = offset + offset_correction; - return; - } - } else if (left_length <= offset) { - // Right hand side only - iterate unless we have reached the bottom of - // the cons tree. - offset -= left_length; - offset_correction += left_length; - String* right = current->second(); - if (StringShape(right).IsCons()) { - current = ConsString::cast(right); - continue; - } else { - String::ReadBlockIntoBuffer(right, rbb, &offset, max_chars); - *offset_ptr = offset + offset_correction; - return; - } - } else { - // The block to be read spans two sides of the ConsString, so we recurse. - // First recurse on the left. - max_chars -= left_length - offset; - String::ReadBlockIntoBuffer(left, rbb, &offset, left_length - offset); - // We may have reached the max or there may not have been enough space - // in the buffer for the characters in the left hand side. - if (offset == left_length) { - // Recurse on the right. - String* right = String::cast(current->second()); - offset -= left_length; - offset_correction += left_length; - String::ReadBlockIntoBuffer(right, rbb, &offset, max_chars); - } - *offset_ptr = offset + offset_correction; - return; + // Tree traversal complete. + if (depth_ == 0) { + *blew_stack = false; + return NULL; + } + // We've lost track of higher nodes. + if (maximum_depth_ - depth_ == kStackSize) { + *blew_stack = true; + return NULL; + } + // Go right. + ConsString* cons_string = frames_[OffsetForDepth(depth_ - 1)]; + String* string = cons_string->second(); + int32_t type = string->map()->instance_type(); + if ((type & kStringRepresentationMask) != kConsStringTag) { + // Pop stack so next iteration is in correct place. + Pop(); + unsigned length = static_cast<unsigned>(string->length()); + // Could be a flattened ConsString. + if (length == 0) continue; + *length_out = length; + *type_out = type; + consumed_ += length; + return string; + } + cons_string = ConsString::cast(string); + // TODO(dcarney) Add back root optimization. + PushRight(cons_string); + // Need to traverse all the way left. + while (true) { + // Continue left. + string = cons_string->first(); + type = string->map()->instance_type(); + if ((type & kStringRepresentationMask) != kConsStringTag) { + AdjustMaximumDepth(); + unsigned length = static_cast<unsigned>(string->length()); + ASSERT(length != 0); + *length_out = length; + *type_out = type; + consumed_ += length; + return string; + } + cons_string = ConsString::cast(string); + PushLeft(cons_string); } } + UNREACHABLE(); + return NULL; } @@ -6867,26 +7121,6 @@ uint16_t SlicedString::SlicedStringGet(int index) { } -const unibrow::byte* SlicedString::SlicedStringReadBlock( - ReadBlockBuffer* buffer, unsigned* offset_ptr, unsigned chars) { - unsigned offset = this->offset(); - *offset_ptr += offset; - const unibrow::byte* answer = String::ReadBlock(String::cast(parent()), - buffer, offset_ptr, chars); - *offset_ptr -= offset; - return answer; -} - - -void SlicedString::SlicedStringReadBlockIntoBuffer( - ReadBlockBuffer* buffer, unsigned* offset_ptr, unsigned chars) { - unsigned offset = this->offset(); - *offset_ptr += offset; - String::ReadBlockIntoBuffer(String::cast(parent()), - buffer, offset_ptr, chars); - *offset_ptr -= offset; -} - template <typename sinkchar> void String::WriteToFlat(String* src, sinkchar* sink, @@ -6898,7 +7132,7 @@ void String::WriteToFlat(String* src, while (true) { ASSERT(0 <= from && from <= to && to <= source->length()); switch (StringShape(source).full_representation_tag()) { - case kAsciiStringTag | kExternalStringTag: { + case kOneByteStringTag | kExternalStringTag: { CopyChars(sink, ExternalAsciiString::cast(source)->GetChars() + from, to - from); @@ -6912,9 +7146,9 @@ void String::WriteToFlat(String* src, to - from); return; } - case kAsciiStringTag | kSeqStringTag: { + case kOneByteStringTag | kSeqStringTag: { CopyChars(sink, - SeqAsciiString::cast(source)->GetChars() + from, + SeqOneByteString::cast(source)->GetChars() + from, to - from); return; } @@ -6924,7 +7158,7 @@ void String::WriteToFlat(String* src, to - from); return; } - case kAsciiStringTag | kConsStringTag: + case kOneByteStringTag | kConsStringTag: case kTwoByteStringTag | kConsStringTag: { ConsString* cons_string = ConsString::cast(source); String* first = cons_string->first(); @@ -6949,9 +7183,9 @@ void String::WriteToFlat(String* src, // common case of sequential ascii right child. if (to - boundary == 1) { sink[boundary - from] = static_cast<sinkchar>(second->Get(0)); - } else if (second->IsSeqAsciiString()) { + } else if (second->IsSeqOneByteString()) { CopyChars(sink + boundary - from, - SeqAsciiString::cast(second)->GetChars(), + SeqOneByteString::cast(second)->GetChars(), to - boundary); } else { WriteToFlat(second, @@ -6965,7 +7199,7 @@ void String::WriteToFlat(String* src, } break; } - case kAsciiStringTag | kSlicedStringTag: + case kOneByteStringTag | kSlicedStringTag: case kTwoByteStringTag | kSlicedStringTag: { SlicedString* slice = SlicedString::cast(source); unsigned offset = slice->offset(); @@ -6977,46 +7211,28 @@ void String::WriteToFlat(String* src, } -template <typename IteratorA, typename IteratorB> -static inline bool CompareStringContents(IteratorA* ia, IteratorB* ib) { - // General slow case check. We know that the ia and ib iterators - // have the same length. - while (ia->has_more()) { - uint32_t ca = ia->GetNext(); - uint32_t cb = ib->GetNext(); - ASSERT(ca <= unibrow::Utf16::kMaxNonSurrogateCharCode); - ASSERT(cb <= unibrow::Utf16::kMaxNonSurrogateCharCode); - if (ca != cb) - return false; - } - return true; -} - - // Compares the contents of two strings by reading and comparing // int-sized blocks of characters. template <typename Char> -static inline bool CompareRawStringContents(Vector<Char> a, Vector<Char> b) { - int length = a.length(); - ASSERT_EQ(length, b.length()); - const Char* pa = a.start(); - const Char* pb = b.start(); +static inline bool CompareRawStringContents(const Char* const a, + const Char* const b, + int length) { int i = 0; #ifndef V8_HOST_CAN_READ_UNALIGNED // If this architecture isn't comfortable reading unaligned ints // then we have to check that the strings are aligned before // comparing them blockwise. const int kAlignmentMask = sizeof(uint32_t) - 1; // NOLINT - uint32_t pa_addr = reinterpret_cast<uint32_t>(pa); - uint32_t pb_addr = reinterpret_cast<uint32_t>(pb); + uint32_t pa_addr = reinterpret_cast<uint32_t>(a); + uint32_t pb_addr = reinterpret_cast<uint32_t>(b); if (((pa_addr & kAlignmentMask) | (pb_addr & kAlignmentMask)) == 0) { #endif const int kStepSize = sizeof(int) / sizeof(Char); // NOLINT int endpoint = length - kStepSize; // Compare blocks until we reach near the end of the string. for (; i <= endpoint; i += kStepSize) { - uint32_t wa = *reinterpret_cast<const uint32_t*>(pa + i); - uint32_t wb = *reinterpret_cast<const uint32_t*>(pb + i); + uint32_t wa = *reinterpret_cast<const uint32_t*>(a + i); + uint32_t wb = *reinterpret_cast<const uint32_t*>(b + i); if (wa != wb) { return false; } @@ -7034,25 +7250,145 @@ static inline bool CompareRawStringContents(Vector<Char> a, Vector<Char> b) { } -template <typename IteratorA> -static inline bool CompareStringContentsPartial(Isolate* isolate, - IteratorA* ia, - String* b) { - String::FlatContent content = b->GetFlatContent(); - if (content.IsFlat()) { - if (content.IsAscii()) { - VectorIterator<char> ib(content.ToAsciiVector()); - return CompareStringContents(ia, &ib); - } else { - VectorIterator<uc16> ib(content.ToUC16Vector()); - return CompareStringContents(ia, &ib); +template<typename Chars1, typename Chars2> +class RawStringComparator : public AllStatic { + public: + static inline bool compare(const Chars1* a, const Chars2* b, int len) { + ASSERT(sizeof(Chars1) != sizeof(Chars2)); + for (int i = 0; i < len; i++) { + if (a[i] != b[i]) { + return false; + } } - } else { - isolate->objects_string_compare_buffer_b()->Reset(0, b); - return CompareStringContents(ia, - isolate->objects_string_compare_buffer_b()); + return true; } -} +}; + + +template<> +class RawStringComparator<uint16_t, uint16_t> { + public: + static inline bool compare(const uint16_t* a, const uint16_t* b, int len) { + return CompareRawStringContents(a, b, len); + } +}; + + +template<> +class RawStringComparator<uint8_t, uint8_t> { + public: + static inline bool compare(const uint8_t* a, const uint8_t* b, int len) { + return CompareRawStringContents(a, b, len); + } +}; + + +class StringComparator { + class State { + public: + explicit inline State(ConsStringIteratorOp* op) + : op_(op), is_one_byte_(true), length_(0), buffer8_(NULL) {} + + inline void Init(String* string, unsigned len) { + op_->Reset(); + int32_t type = string->map()->instance_type(); + String::Visit(string, 0, *this, *op_, type, len); + } + + inline void VisitOneByteString(const uint8_t* chars, unsigned length) { + is_one_byte_ = true; + buffer8_ = chars; + length_ = length; + } + + inline void VisitTwoByteString(const uint16_t* chars, unsigned length) { + is_one_byte_ = false; + buffer16_ = chars; + length_ = length; + } + + void Advance(unsigned consumed) { + ASSERT(consumed <= length_); + // Still in buffer. + if (length_ != consumed) { + if (is_one_byte_) { + buffer8_ += consumed; + } else { + buffer16_ += consumed; + } + length_ -= consumed; + return; + } + // Advance state. + ASSERT(op_->HasMore()); + int32_t type = 0; + unsigned length = 0; + String* next = op_->ContinueOperation(&type, &length); + ASSERT(next != NULL); + ConsStringNullOp null_op; + String::Visit(next, 0, *this, null_op, type, length); + } + + ConsStringIteratorOp* const op_; + bool is_one_byte_; + unsigned length_; + union { + const uint8_t* buffer8_; + const uint16_t* buffer16_; + }; + DISALLOW_IMPLICIT_CONSTRUCTORS(State); + }; + + public: + inline StringComparator(ConsStringIteratorOp* op_1, + ConsStringIteratorOp* op_2) + : state_1_(op_1), + state_2_(op_2) { + } + + template<typename Chars1, typename Chars2> + static inline bool Equals(State* state_1, State* state_2, unsigned to_check) { + const Chars1* a = reinterpret_cast<const Chars1*>(state_1->buffer8_); + const Chars2* b = reinterpret_cast<const Chars2*>(state_2->buffer8_); + return RawStringComparator<Chars1, Chars2>::compare(a, b, to_check); + } + + bool Equals(unsigned length, String* string_1, String* string_2) { + ASSERT(length != 0); + state_1_.Init(string_1, length); + state_2_.Init(string_2, length); + while (true) { + unsigned to_check = Min(state_1_.length_, state_2_.length_); + ASSERT(to_check > 0 && to_check <= length); + bool is_equal; + if (state_1_.is_one_byte_) { + if (state_2_.is_one_byte_) { + is_equal = Equals<uint8_t, uint8_t>(&state_1_, &state_2_, to_check); + } else { + is_equal = Equals<uint8_t, uint16_t>(&state_1_, &state_2_, to_check); + } + } else { + if (state_2_.is_one_byte_) { + is_equal = Equals<uint16_t, uint8_t>(&state_1_, &state_2_, to_check); + } else { + is_equal = Equals<uint16_t, uint16_t>(&state_1_, &state_2_, to_check); + } + } + // Looping done. + if (!is_equal) return false; + length -= to_check; + // Exit condition. Strings are equal. + if (length == 0) return true; + state_1_.Advance(to_check); + state_2_.Advance(to_check); + } + } + + private: + State state_1_; + State state_2_; + DISALLOW_IMPLICIT_CONSTRUCTORS(StringComparator); +}; bool String::SlowEquals(String* other) { @@ -7088,63 +7424,24 @@ bool String::SlowEquals(String* other) { String* lhs = this->TryFlattenGetString(); String* rhs = other->TryFlattenGetString(); + // TODO(dcarney): Compare all types of flat strings with a Visitor. if (StringShape(lhs).IsSequentialAscii() && StringShape(rhs).IsSequentialAscii()) { - const char* str1 = SeqAsciiString::cast(lhs)->GetChars(); - const char* str2 = SeqAsciiString::cast(rhs)->GetChars(); - return CompareRawStringContents(Vector<const char>(str1, len), - Vector<const char>(str2, len)); + const uint8_t* str1 = SeqOneByteString::cast(lhs)->GetChars(); + const uint8_t* str2 = SeqOneByteString::cast(rhs)->GetChars(); + return CompareRawStringContents(str1, str2, len); } Isolate* isolate = GetIsolate(); - String::FlatContent lhs_content = lhs->GetFlatContent(); - String::FlatContent rhs_content = rhs->GetFlatContent(); - if (lhs_content.IsFlat()) { - if (lhs_content.IsAscii()) { - Vector<const char> vec1 = lhs_content.ToAsciiVector(); - if (rhs_content.IsFlat()) { - if (rhs_content.IsAscii()) { - Vector<const char> vec2 = rhs_content.ToAsciiVector(); - return CompareRawStringContents(vec1, vec2); - } else { - VectorIterator<char> buf1(vec1); - VectorIterator<uc16> ib(rhs_content.ToUC16Vector()); - return CompareStringContents(&buf1, &ib); - } - } else { - VectorIterator<char> buf1(vec1); - isolate->objects_string_compare_buffer_b()->Reset(0, rhs); - return CompareStringContents(&buf1, - isolate->objects_string_compare_buffer_b()); - } - } else { - Vector<const uc16> vec1 = lhs_content.ToUC16Vector(); - if (rhs_content.IsFlat()) { - if (rhs_content.IsAscii()) { - VectorIterator<uc16> buf1(vec1); - VectorIterator<char> ib(rhs_content.ToAsciiVector()); - return CompareStringContents(&buf1, &ib); - } else { - Vector<const uc16> vec2(rhs_content.ToUC16Vector()); - return CompareRawStringContents(vec1, vec2); - } - } else { - VectorIterator<uc16> buf1(vec1); - isolate->objects_string_compare_buffer_b()->Reset(0, rhs); - return CompareStringContents(&buf1, - isolate->objects_string_compare_buffer_b()); - } - } - } else { - isolate->objects_string_compare_buffer_a()->Reset(0, lhs); - return CompareStringContentsPartial(isolate, - isolate->objects_string_compare_buffer_a(), rhs); - } + StringComparator comparator(isolate->objects_string_compare_iterator_a(), + isolate->objects_string_compare_iterator_b()); + + return comparator.Equals(static_cast<unsigned>(len), lhs, rhs); } bool String::MarkAsUndetectable() { - if (StringShape(this).IsSymbol()) return false; + if (StringShape(this).IsInternalized()) return false; Map* map = this->map(); Heap* heap = GetHeap(); @@ -7160,15 +7457,21 @@ bool String::MarkAsUndetectable() { } -bool String::IsEqualTo(Vector<const char> str) { - Isolate* isolate = GetIsolate(); +bool String::IsUtf8EqualTo(Vector<const char> str) { int slen = length(); - Access<UnicodeCache::Utf8Decoder> - decoder(isolate->unicode_cache()->utf8_decoder()); - decoder->Reset(str.start(), str.length()); + // Can't check exact length equality, but we can check bounds. + int str_len = str.length(); + if (str_len < slen || + str_len > slen*static_cast<int>(unibrow::Utf8::kMaxEncodedSize)) { + return false; + } int i; - for (i = 0; i < slen && decoder->has_more(); i++) { - uint32_t r = decoder->GetNext(); + unsigned remaining_in_str = static_cast<unsigned>(str_len); + const uint8_t* utf8_data = reinterpret_cast<const uint8_t*>(str.start()); + for (i = 0; i < slen && remaining_in_str > 0; i++) { + unsigned cursor = 0; + uint32_t r = unibrow::Utf8::ValueOf(utf8_data, remaining_in_str, &cursor); + ASSERT(cursor > 0 && cursor <= remaining_in_str); if (r > unibrow::Utf16::kMaxNonSurrogateCharCode) { if (i > slen - 1) return false; if (Get(i++) != unibrow::Utf16::LeadSurrogate(r)) return false; @@ -7176,17 +7479,19 @@ bool String::IsEqualTo(Vector<const char> str) { } else { if (Get(i) != r) return false; } + utf8_data += cursor; + remaining_in_str -= cursor; } - return i == slen && !decoder->has_more(); + return i == slen && remaining_in_str == 0; } -bool String::IsAsciiEqualTo(Vector<const char> str) { +bool String::IsOneByteEqualTo(Vector<const uint8_t> str) { int slen = length(); if (str.length() != slen) return false; FlatContent content = GetFlatContent(); if (content.IsAscii()) { - return CompareChars(content.ToAsciiVector().start(), + return CompareChars(content.ToOneByteVector().start(), str.start(), slen) == 0; } for (int i = 0; i < slen; i++) { @@ -7210,28 +7515,62 @@ bool String::IsTwoByteEqualTo(Vector<const uc16> str) { } +class IteratingStringHasher: public StringHasher { + public: + static inline uint32_t Hash(String* string, uint32_t seed) { + const unsigned len = static_cast<unsigned>(string->length()); + IteratingStringHasher hasher(len, seed); + if (hasher.has_trivial_hash()) { + return hasher.GetHashField(); + } + int32_t type = string->map()->instance_type(); + ConsStringNullOp null_op; + String::Visit(string, 0, hasher, null_op, type, len); + // Flat strings terminate immediately. + if (hasher.consumed_ == len) { + ASSERT(!string->IsConsString()); + return hasher.GetHashField(); + } + ASSERT(string->IsConsString()); + // This is a ConsString, iterate across it. + ConsStringIteratorOp op; + unsigned offset = 0; + unsigned leaf_length = len; + string = op.Operate(string, &offset, &type, &leaf_length); + while (true) { + ASSERT(hasher.consumed_ < len); + String::Visit(string, 0, hasher, null_op, type, leaf_length); + if (hasher.consumed_ == len) break; + string = op.ContinueOperation(&type, &leaf_length); + // This should be taken care of by the length check. + ASSERT(string != NULL); + } + return hasher.GetHashField(); + } + inline void VisitOneByteString(const uint8_t* chars, unsigned length) { + AddCharacters(chars, static_cast<int>(length)); + consumed_ += length; + } + inline void VisitTwoByteString(const uint16_t* chars, unsigned length) { + AddCharacters(chars, static_cast<int>(length)); + consumed_ += length; + } + + private: + inline IteratingStringHasher(int len, uint32_t seed) + : StringHasher(len, seed), + consumed_(0) {} + unsigned consumed_; + DISALLOW_COPY_AND_ASSIGN(IteratingStringHasher); +}; + + uint32_t String::ComputeAndSetHash() { // Should only be called if hash code has not yet been computed. ASSERT(!HasHashCode()); - const int len = length(); - - // Compute the hash code. - uint32_t field = 0; - if (StringShape(this).IsSequentialAscii()) { - field = HashSequentialString(SeqAsciiString::cast(this)->GetChars(), - len, - GetHeap()->HashSeed()); - } else if (StringShape(this).IsSequentialTwoByte()) { - field = HashSequentialString(SeqTwoByteString::cast(this)->GetChars(), - len, - GetHeap()->HashSeed()); - } else { - StringInputBuffer buffer(this); - field = ComputeHashField(&buffer, len, GetHeap()->HashSeed()); - } - // Store the hash code in the object. + uint32_t field = IteratingStringHasher::Hash(this, GetHeap()->HashSeed()); set_hash_field(field); // Check the hash code is there. @@ -7242,11 +7581,12 @@ uint32_t String::ComputeAndSetHash() { } -bool String::ComputeArrayIndex(unibrow::CharacterStream* buffer, - uint32_t* index, - int length) { +bool String::ComputeArrayIndex(uint32_t* index) { + int length = this->length(); if (length == 0 || length > kMaxArrayIndexSize) return false; - uc32 ch = buffer->GetNext(); + ConsStringIteratorOp op; + StringCharacterStream stream(this, &op); + uint16_t ch = stream.GetNext(); // If the string begins with a '0' character, it must only consist // of it to be a legal array index. @@ -7259,8 +7599,8 @@ bool String::ComputeArrayIndex(unibrow::CharacterStream* buffer, int d = ch - '0'; if (d < 0 || d > 9) return false; uint32_t result = d; - while (buffer->has_more()) { - d = buffer->GetNext() - '0'; + while (stream.HasMore()) { + d = stream.GetNext() - '0'; if (d < 0 || d > 9) return false; // Check that the new result is below the 32 bit limit. if (result > 429496729U - ((d > 5) ? 1 : 0)) return false; @@ -7281,12 +7621,106 @@ bool String::SlowAsArrayIndex(uint32_t* index) { *index = (kArrayIndexHashMask & field) >> kHashShift; return true; } else { - StringInputBuffer buffer(this); - return ComputeArrayIndex(&buffer, index, length()); + return ComputeArrayIndex(index); } } +String* SeqString::Truncate(int new_length) { + Heap* heap = GetHeap(); + if (new_length <= 0) return heap->empty_string(); + + int string_size, allocated_string_size; + int old_length = length(); + if (old_length <= new_length) return this; + + if (IsSeqOneByteString()) { + allocated_string_size = SeqOneByteString::SizeFor(old_length); + string_size = SeqOneByteString::SizeFor(new_length); + } else { + allocated_string_size = SeqTwoByteString::SizeFor(old_length); + string_size = SeqTwoByteString::SizeFor(new_length); + } + + int delta = allocated_string_size - string_size; + set_length(new_length); + + // String sizes are pointer size aligned, so that we can use filler objects + // that are a multiple of pointer size. + Address end_of_string = address() + string_size; + heap->CreateFillerObjectAt(end_of_string, delta); + if (Marking::IsBlack(Marking::MarkBitFrom(this))) { + MemoryChunk::IncrementLiveBytesFromMutator(address(), -delta); + } + return this; +} + + +AllocationSiteInfo* AllocationSiteInfo::FindForJSObject(JSObject* object) { + // Currently, AllocationSiteInfo objects are only allocated immediately + // after JSArrays in NewSpace, and detecting whether a JSArray has one + // involves carefully checking the object immediately after the JSArray + // (if there is one) to see if it's an AllocationSiteInfo. + if (FLAG_track_allocation_sites && object->GetHeap()->InNewSpace(object)) { + Address ptr_end = (reinterpret_cast<Address>(object) - kHeapObjectTag) + + object->Size(); + if ((ptr_end + AllocationSiteInfo::kSize) <= + object->GetHeap()->NewSpaceTop()) { + // There is room in newspace for allocation info. Do we have some? + Map** possible_allocation_site_info_map = + reinterpret_cast<Map**>(ptr_end); + if (*possible_allocation_site_info_map == + object->GetHeap()->allocation_site_info_map()) { + AllocationSiteInfo* info = AllocationSiteInfo::cast( + reinterpret_cast<Object*>(ptr_end + 1)); + return info; + } + } + } + return NULL; +} + + +bool AllocationSiteInfo::GetElementsKindPayload(ElementsKind* kind) { + ASSERT(kind != NULL); + if (payload()->IsJSGlobalPropertyCell()) { + JSGlobalPropertyCell* cell = JSGlobalPropertyCell::cast(payload()); + Object* cell_contents = cell->value(); + if (cell_contents->IsSmi()) { + *kind = static_cast<ElementsKind>( + Smi::cast(cell_contents)->value()); + return true; + } + } + return false; +} + + +// Heuristic: We only need to create allocation site info if the boilerplate +// elements kind is the initial elements kind. +AllocationSiteMode AllocationSiteInfo::GetMode( + ElementsKind boilerplate_elements_kind) { + if (FLAG_track_allocation_sites && + IsFastSmiElementsKind(boilerplate_elements_kind)) { + return TRACK_ALLOCATION_SITE; + } + + return DONT_TRACK_ALLOCATION_SITE; +} + + +AllocationSiteMode AllocationSiteInfo::GetMode(ElementsKind from, + ElementsKind to) { + if (FLAG_track_allocation_sites && + IsFastSmiElementsKind(from) && + (IsFastObjectElementsKind(to) || IsFastDoubleElementsKind(to))) { + return TRACK_ALLOCATION_SITE; + } + + return DONT_TRACK_ALLOCATION_SITE; +} + + uint32_t StringHasher::MakeArrayIndexHash(uint32_t value, int length) { // For array indexes mix the length into the hash as an array index could // be zero. @@ -7305,57 +7739,64 @@ uint32_t StringHasher::MakeArrayIndexHash(uint32_t value, int length) { } -void StringHasher::AddSurrogatePair(uc32 c) { - uint16_t lead = unibrow::Utf16::LeadSurrogate(c); - AddCharacter(lead); - uint16_t trail = unibrow::Utf16::TrailSurrogate(c); - AddCharacter(trail); -} - - -void StringHasher::AddSurrogatePairNoIndex(uc32 c) { - uint16_t lead = unibrow::Utf16::LeadSurrogate(c); - AddCharacterNoIndex(lead); - uint16_t trail = unibrow::Utf16::TrailSurrogate(c); - AddCharacterNoIndex(trail); -} - - uint32_t StringHasher::GetHashField() { if (length_ <= String::kMaxHashCalcLength) { - if (is_array_index()) { - return MakeArrayIndexHash(array_index(), length_); + if (is_array_index_) { + return MakeArrayIndexHash(array_index_, length_); } - return (GetHash() << String::kHashShift) | String::kIsNotArrayIndexMask; + return (GetHashCore(raw_running_hash_) << String::kHashShift) | + String::kIsNotArrayIndexMask; } else { return (length_ << String::kHashShift) | String::kIsNotArrayIndexMask; } } -uint32_t String::ComputeHashField(unibrow::CharacterStream* buffer, - int length, - uint32_t seed) { - StringHasher hasher(length, seed); - - // Very long strings have a trivial hash that doesn't inspect the - // string contents. - if (hasher.has_trivial_hash()) { - return hasher.GetHashField(); - } - - // Do the iterative array index computation as long as there is a - // chance this is an array index. - while (buffer->has_more() && hasher.is_array_index()) { - hasher.AddCharacter(buffer->GetNext()); - } - - // Process the remaining characters without updating the array - // index. - while (buffer->has_more()) { - hasher.AddCharacterNoIndex(buffer->GetNext()); +uint32_t StringHasher::ComputeUtf8Hash(Vector<const char> chars, + uint32_t seed, + int* utf16_length_out) { + int vector_length = chars.length(); + // Handle some edge cases + if (vector_length <= 1) { + ASSERT(vector_length == 0 || + static_cast<uint8_t>(chars.start()[0]) <= + unibrow::Utf8::kMaxOneByteChar); + *utf16_length_out = vector_length; + return HashSequentialString(chars.start(), vector_length, seed); + } + // Start with a fake length which won't affect computation. + // It will be updated later. + StringHasher hasher(String::kMaxArrayIndexSize, seed); + unsigned remaining = static_cast<unsigned>(vector_length); + const uint8_t* stream = reinterpret_cast<const uint8_t*>(chars.start()); + int utf16_length = 0; + bool is_index = true; + ASSERT(hasher.is_array_index_); + while (remaining > 0) { + unsigned consumed = 0; + uint32_t c = unibrow::Utf8::ValueOf(stream, remaining, &consumed); + ASSERT(consumed > 0 && consumed <= remaining); + stream += consumed; + remaining -= consumed; + bool is_two_characters = c > unibrow::Utf16::kMaxNonSurrogateCharCode; + utf16_length += is_two_characters ? 2 : 1; + // No need to keep hashing. But we do need to calculate utf16_length. + if (utf16_length > String::kMaxHashCalcLength) continue; + if (is_two_characters) { + uint16_t c1 = unibrow::Utf16::LeadSurrogate(c); + uint16_t c2 = unibrow::Utf16::TrailSurrogate(c); + hasher.AddCharacter(c1); + hasher.AddCharacter(c2); + if (is_index) is_index = hasher.UpdateIndex(c1); + if (is_index) is_index = hasher.UpdateIndex(c2); + } else { + hasher.AddCharacter(c); + if (is_index) is_index = hasher.UpdateIndex(c); + } } - + *utf16_length_out = static_cast<int>(utf16_length); + // Must set length here so that hash computation is correct. + hasher.length_ = utf16_length; return hasher.GetHashField(); } @@ -7399,11 +7840,12 @@ static void TrimDescriptorArray(Heap* heap, Map* map, DescriptorArray* descriptors, int number_of_own_descriptors) { - int number_of_descriptors = descriptors->number_of_descriptors(); + int number_of_descriptors = descriptors->number_of_descriptors_storage(); int to_trim = number_of_descriptors - number_of_own_descriptors; - if (to_trim <= 0) return; + if (to_trim == 0) return; - RightTrimFixedArray<FROM_GC>(heap, descriptors, to_trim); + RightTrimFixedArray<FROM_GC>( + heap, descriptors, to_trim * DescriptorArray::kDescriptorSize); descriptors->SetNumberOfDescriptors(number_of_own_descriptors); if (descriptors->HasEnumCache()) TrimEnumCache(heap, map, descriptors); @@ -7443,11 +7885,10 @@ void Map::ClearNonLiveTransitions(Heap* heap) { if (ClearBackPointer(heap, target)) { if (target->instance_descriptors() == descriptors) { descriptors_owner_died = true; - descriptors_owner_died = true; } } else { if (i != transition_index) { - String* key = t->GetKey(i); + Name* key = t->GetKey(i); t->SetKey(transition_index, key); Object** key_slot = t->GetKeySlot(transition_index); collector->RecordSlot(key_slot, key_slot, key); @@ -7519,6 +7960,7 @@ bool Map::EquivalentToForNormalization(Map* other, instance_type() == other->instance_type() && bit_field() == other->bit_field() && bit_field2() == other->bit_field2() && + is_observed() == other->is_observed() && function_with_prototype() == other->function_with_prototype(); } @@ -7536,23 +7978,52 @@ void JSFunction::MarkForLazyRecompilation() { ASSERT(is_compiled() && !IsOptimized()); ASSERT(shared()->allows_lazy_compilation() || code()->optimizable()); - Builtins* builtins = GetIsolate()->builtins(); - ReplaceCode(builtins->builtin(Builtins::kLazyRecompile)); + set_code_no_write_barrier( + GetIsolate()->builtins()->builtin(Builtins::kLazyRecompile)); + // No write barrier required, since the builtin is part of the root set. } + void JSFunction::MarkForParallelRecompilation() { ASSERT(is_compiled() && !IsOptimized()); ASSERT(shared()->allows_lazy_compilation() || code()->optimizable()); - Builtins* builtins = GetIsolate()->builtins(); - ReplaceCode(builtins->builtin(Builtins::kParallelRecompile)); + ASSERT(FLAG_parallel_recompilation); + if (FLAG_trace_parallel_recompilation) { + PrintF(" ** Marking "); + PrintName(); + PrintF(" for parallel recompilation.\n"); + } + set_code_no_write_barrier( + GetIsolate()->builtins()->builtin(Builtins::kParallelRecompile)); + // No write barrier required, since the builtin is part of the root set. +} + - // Unlike MarkForLazyRecompilation, after queuing a function for - // recompilation on the compiler thread, we actually tail-call into - // the full code. We reset the profiler ticks here so that the - // function doesn't bother the runtime profiler too much. - shared()->code()->set_profiler_ticks(0); +void JSFunction::MarkForInstallingRecompiledCode() { + ASSERT(is_compiled() && !IsOptimized()); + ASSERT(shared()->allows_lazy_compilation() || code()->optimizable()); + ASSERT(FLAG_parallel_recompilation); + set_code_no_write_barrier( + GetIsolate()->builtins()->builtin(Builtins::kInstallRecompiledCode)); + // No write barrier required, since the builtin is part of the root set. } + +void JSFunction::MarkInRecompileQueue() { + ASSERT(is_compiled() && !IsOptimized()); + ASSERT(shared()->allows_lazy_compilation() || code()->optimizable()); + ASSERT(FLAG_parallel_recompilation); + if (FLAG_trace_parallel_recompilation) { + PrintF(" ** Queueing "); + PrintName(); + PrintF(" for parallel recompilation.\n"); + } + set_code_no_write_barrier( + GetIsolate()->builtins()->builtin(Builtins::kInRecompileQueue)); + // No write barrier required, since the builtin is part of the root set. +} + + static bool CompileLazyHelper(CompilationInfo* info, ClearExceptionFlag flag) { // Compile the source information to a code object. @@ -7575,11 +8046,6 @@ bool SharedFunctionInfo::CompileLazy(Handle<SharedFunctionInfo> shared, } -void SharedFunctionInfo::ClearOptimizedCodeMap() { - set_optimized_code_map(Smi::FromInt(0)); -} - - void SharedFunctionInfo::AddToOptimizedCodeMap( Handle<SharedFunctionInfo> shared, Handle<Context> native_context, @@ -7843,12 +8309,13 @@ Context* JSFunction::NativeContextFromLiterals(FixedArray* literals) { MaybeObject* Oddball::Initialize(const char* to_string, Object* to_number, byte kind) { - String* symbol; - { MaybeObject* maybe_symbol = - Isolate::Current()->heap()->LookupAsciiSymbol(to_string); - if (!maybe_symbol->To(&symbol)) return maybe_symbol; + String* internalized_to_string; + { MaybeObject* maybe_string = + Isolate::Current()->heap()->InternalizeUtf8String( + CStrVector(to_string)); + if (!maybe_string->To(&internalized_to_string)) return maybe_string; } - set_to_string(symbol); + set_to_string(internalized_to_string); set_to_number(to_number); set_kind(kind); return this; @@ -7904,13 +8371,14 @@ bool SharedFunctionInfo::CanGenerateInlineConstructor(Object* prototype) { return false; } - Heap* heap = GetHeap(); + Isolate* isolate = GetIsolate(); + Heap* heap = isolate->heap(); // Traverse the proposed prototype chain looking for properties of the // same names as are set by the inline constructor. for (Object* obj = prototype; obj != heap->null_value(); - obj = obj->GetPrototype()) { + obj = obj->GetPrototype(isolate)) { JSReceiver* receiver = JSReceiver::cast(obj); for (int i = 0; i < this_property_assignments_count(); i++) { LookupResult result(heap->isolate()); @@ -8069,7 +8537,7 @@ void SharedFunctionInfo::EnableDeoptimizationSupport(Code* recompiled) { // old code, we have to replace it. We should try to avoid this // altogether because it flushes valuable type feedback by // effectively resetting all IC state. - set_code(recompiled); + ReplaceCode(recompiled); } ASSERT(has_deoptimization_support()); } @@ -8149,7 +8617,7 @@ void SharedFunctionInfo::DetachInitialMap() { // constructor is called. The countdown will continue and (possibly after // several more GCs) CompleteInobjectSlackTracking will eventually be called. Heap* heap = map->GetHeap(); - set_initial_map(heap->raw_unchecked_undefined_value()); + set_initial_map(heap->undefined_value()); Builtins* builtins = heap->isolate()->builtins(); ASSERT_EQ(builtins->builtin(Builtins::kJSConstructStubCountdown), *RawField(this, kConstructStubOffset)); @@ -8277,6 +8745,15 @@ void ObjectVisitor::VisitCodeTarget(RelocInfo* rinfo) { } +void ObjectVisitor::VisitCodeAgeSequence(RelocInfo* rinfo) { + ASSERT(RelocInfo::IsCodeAgeSequence(rinfo->rmode())); + Object* stub = rinfo->code_age_stub(); + if (stub) { + VisitPointer(&stub); + } +} + + void ObjectVisitor::VisitCodeEntry(Address entry_address) { Object* code = Code::GetObjectFromEntryAddress(entry_address); Object* old_code = code; @@ -8336,9 +8813,16 @@ void Code::CopyFrom(const CodeDesc& desc) { ASSERT(Marking::Color(this) == Marking::WHITE_OBJECT); // copy code + CHECK(IsCode()); + CHECK(relocation_info()->IsByteArray()); + CHECK(reinterpret_cast<intptr_t>(instruction_start()) == + reinterpret_cast<intptr_t>(this) + Code::kHeaderSize - kHeapObjectTag); memmove(instruction_start(), desc.buffer, desc.instr_size); // copy reloc info + // TODO(mstarzinger): Remove once we found the bug. + CHECK(IsCode()); + CHECK(relocation_info()->IsByteArray()); memmove(relocation_start(), desc.buffer + desc.buffer_size - desc.reloc_size, desc.reloc_size); @@ -8348,8 +8832,10 @@ void Code::CopyFrom(const CodeDesc& desc) { int mode_mask = RelocInfo::kCodeTargetMask | RelocInfo::ModeMask(RelocInfo::EMBEDDED_OBJECT) | RelocInfo::ModeMask(RelocInfo::GLOBAL_PROPERTY_CELL) | + RelocInfo::ModeMask(RelocInfo::RUNTIME_ENTRY) | RelocInfo::kApplyMask; - Assembler* origin = desc.origin; // Needed to find target_object on X64. + // Needed to find target_object and runtime_entry on X64 + Assembler* origin = desc.origin; for (RelocIterator it(this, mode_mask); !it.done(); it.next()) { RelocInfo::Mode mode = it.rinfo()->rmode(); if (mode == RelocInfo::EMBEDDED_OBJECT) { @@ -8365,6 +8851,9 @@ void Code::CopyFrom(const CodeDesc& desc) { Code* code = Code::cast(*p); it.rinfo()->set_target_address(code->instruction_start(), SKIP_WRITE_BARRIER); + } else if (RelocInfo::IsRuntimeEntry(mode)) { + Address p = it.rinfo()->target_runtime_entry(origin); + it.rinfo()->set_target_runtime_entry(p, SKIP_WRITE_BARRIER); } else { it.rinfo()->apply(delta); } @@ -8455,6 +8944,46 @@ Map* Code::FindFirstMap() { } +void Code::FindAllMaps(MapHandleList* maps) { + ASSERT(is_inline_cache_stub()); + AssertNoAllocation no_allocation; + int mask = RelocInfo::ModeMask(RelocInfo::EMBEDDED_OBJECT); + for (RelocIterator it(this, mask); !it.done(); it.next()) { + RelocInfo* info = it.rinfo(); + Object* object = info->target_object(); + if (object->IsMap()) maps->Add(Handle<Map>(Map::cast(object))); + } +} + + +Code* Code::FindFirstCode() { + ASSERT(is_inline_cache_stub()); + AssertNoAllocation no_allocation; + int mask = RelocInfo::ModeMask(RelocInfo::CODE_TARGET); + for (RelocIterator it(this, mask); !it.done(); it.next()) { + RelocInfo* info = it.rinfo(); + return Code::GetCodeFromTargetAddress(info->target_address()); + } + return NULL; +} + + +void Code::FindAllCode(CodeHandleList* code_list, int length) { + ASSERT(is_inline_cache_stub()); + AssertNoAllocation no_allocation; + int mask = RelocInfo::ModeMask(RelocInfo::CODE_TARGET); + int i = 0; + for (RelocIterator it(this, mask); !it.done(); it.next()) { + if (i++ == length) return; + RelocInfo* info = it.rinfo(); + Code* code = Code::GetCodeFromTargetAddress(info->target_address()); + ASSERT(code->kind() == Code::STUB); + code_list->Add(Handle<Code>(code)); + } + UNREACHABLE(); +} + + void Code::ClearInlineCaches() { int mask = RelocInfo::ModeMask(RelocInfo::CODE_TARGET) | RelocInfo::ModeMask(RelocInfo::CONSTRUCT_CALL) | @@ -8471,6 +9000,7 @@ void Code::ClearInlineCaches() { void Code::ClearTypeFeedbackCells(Heap* heap) { + if (kind() != FUNCTION) return; Object* raw_info = type_feedback_info(); if (raw_info->IsTypeFeedbackInfo()) { TypeFeedbackCells* type_feedback_cells = @@ -8485,7 +9015,135 @@ void Code::ClearTypeFeedbackCells(Heap* heap) { bool Code::allowed_in_shared_map_code_cache() { return is_keyed_load_stub() || is_keyed_store_stub() || - (is_compare_ic_stub() && compare_state() == CompareIC::KNOWN_OBJECTS); + (is_compare_ic_stub() && + ICCompareStub::CompareState(stub_info()) == CompareIC::KNOWN_OBJECT); +} + + +void Code::MakeCodeAgeSequenceYoung(byte* sequence) { + PatchPlatformCodeAge(sequence, kNoAge, NO_MARKING_PARITY); +} + + +void Code::MakeOlder(MarkingParity current_parity) { + byte* sequence = FindCodeAgeSequence(); + if (sequence != NULL) { + Age age; + MarkingParity code_parity; + GetCodeAgeAndParity(sequence, &age, &code_parity); + if (age != kLastCodeAge && code_parity != current_parity) { + PatchPlatformCodeAge(sequence, static_cast<Age>(age + 1), + current_parity); + } + } +} + + +bool Code::IsOld() { + byte* sequence = FindCodeAgeSequence(); + if (sequence == NULL) return false; + Age age; + MarkingParity parity; + GetCodeAgeAndParity(sequence, &age, &parity); + return age >= kSexagenarianCodeAge; +} + + +byte* Code::FindCodeAgeSequence() { + return FLAG_age_code && + prologue_offset() != kPrologueOffsetNotSet && + (kind() == OPTIMIZED_FUNCTION || + (kind() == FUNCTION && !has_debug_break_slots())) + ? instruction_start() + prologue_offset() + : NULL; +} + + +void Code::GetCodeAgeAndParity(Code* code, Age* age, + MarkingParity* parity) { + Isolate* isolate = Isolate::Current(); + Builtins* builtins = isolate->builtins(); + Code* stub = NULL; +#define HANDLE_CODE_AGE(AGE) \ + stub = *builtins->Make##AGE##CodeYoungAgainEvenMarking(); \ + if (code == stub) { \ + *age = k##AGE##CodeAge; \ + *parity = EVEN_MARKING_PARITY; \ + return; \ + } \ + stub = *builtins->Make##AGE##CodeYoungAgainOddMarking(); \ + if (code == stub) { \ + *age = k##AGE##CodeAge; \ + *parity = ODD_MARKING_PARITY; \ + return; \ + } + CODE_AGE_LIST(HANDLE_CODE_AGE) +#undef HANDLE_CODE_AGE + UNREACHABLE(); +} + + +Code* Code::GetCodeAgeStub(Age age, MarkingParity parity) { + Isolate* isolate = Isolate::Current(); + Builtins* builtins = isolate->builtins(); + switch (age) { +#define HANDLE_CODE_AGE(AGE) \ + case k##AGE##CodeAge: { \ + Code* stub = parity == EVEN_MARKING_PARITY \ + ? *builtins->Make##AGE##CodeYoungAgainEvenMarking() \ + : *builtins->Make##AGE##CodeYoungAgainOddMarking(); \ + return stub; \ + } + CODE_AGE_LIST(HANDLE_CODE_AGE) +#undef HANDLE_CODE_AGE + default: + UNREACHABLE(); + break; + } + return NULL; +} + + +void Code::PrintDeoptLocation(int bailout_id) { + const char* last_comment = NULL; + int mask = RelocInfo::ModeMask(RelocInfo::COMMENT) + | RelocInfo::ModeMask(RelocInfo::RUNTIME_ENTRY); + for (RelocIterator it(this, mask); !it.done(); it.next()) { + RelocInfo* info = it.rinfo(); + if (info->rmode() == RelocInfo::COMMENT) { + last_comment = reinterpret_cast<const char*>(info->data()); + } else if (last_comment != NULL && + bailout_id == Deoptimizer::GetDeoptimizationId( + GetIsolate(), info->target_address(), Deoptimizer::EAGER)) { + CHECK(RelocInfo::IsRuntimeEntry(info->rmode())); + PrintF(" %s\n", last_comment); + return; + } + } +} + + +// Identify kind of code. +const char* Code::Kind2String(Kind kind) { + switch (kind) { + case FUNCTION: return "FUNCTION"; + case OPTIMIZED_FUNCTION: return "OPTIMIZED_FUNCTION"; + case COMPILED_STUB: return "COMPILED_STUB"; + case STUB: return "STUB"; + case BUILTIN: return "BUILTIN"; + case LOAD_IC: return "LOAD_IC"; + case KEYED_LOAD_IC: return "KEYED_LOAD_IC"; + case STORE_IC: return "STORE_IC"; + case KEYED_STORE_IC: return "KEYED_STORE_IC"; + case CALL_IC: return "CALL_IC"; + case KEYED_CALL_IC: return "KEYED_CALL_IC"; + case UNARY_OP_IC: return "UNARY_OP_IC"; + case BINARY_OP_IC: return "BINARY_OP_IC"; + case COMPARE_IC: return "COMPARE_IC"; + case TO_BOOLEAN_IC: return "TO_BOOLEAN_IC"; + } + UNREACHABLE(); + return NULL; } @@ -8548,6 +9206,12 @@ void DeoptimizationInputData::DeoptimizationInputDataPrint(FILE* out) { break; } + case Translation::COMPILED_STUB_FRAME: { + Code::Kind stub_kind = static_cast<Code::Kind>(iterator.Next()); + PrintF(out, "{kind=%d}", stub_kind); + break; + } + case Translation::ARGUMENTS_ADAPTOR_FRAME: case Translation::CONSTRUCT_STUB_FRAME: { int function_id = iterator.Next(); @@ -8588,8 +9252,7 @@ void DeoptimizationInputData::DeoptimizationInputDataPrint(FILE* out) { case Translation::UINT32_REGISTER: { int reg_code = iterator.Next(); - PrintF(out, - "{input=%s (unsigned)}", + PrintF(out, "{input=%s (unsigned)}", converter.NameOfCPURegister(reg_code)); break; } @@ -8631,8 +9294,14 @@ void DeoptimizationInputData::DeoptimizationInputDataPrint(FILE* out) { break; } - case Translation::ARGUMENTS_OBJECT: + case Translation::ARGUMENTS_OBJECT: { + bool args_known = iterator.Next(); + int args_index = iterator.Next(); + int args_length = iterator.Next(); + PrintF(out, "{index=%d, length=%d, known=%d}", + args_index, args_length, args_known); break; + } } PrintF(out, "\n"); } @@ -8657,38 +9326,16 @@ void DeoptimizationOutputData::DeoptimizationOutputDataPrint(FILE* out) { } -// Identify kind of code. -const char* Code::Kind2String(Kind kind) { - switch (kind) { - case FUNCTION: return "FUNCTION"; - case OPTIMIZED_FUNCTION: return "OPTIMIZED_FUNCTION"; - case STUB: return "STUB"; - case BUILTIN: return "BUILTIN"; - case LOAD_IC: return "LOAD_IC"; - case KEYED_LOAD_IC: return "KEYED_LOAD_IC"; - case STORE_IC: return "STORE_IC"; - case KEYED_STORE_IC: return "KEYED_STORE_IC"; - case CALL_IC: return "CALL_IC"; - case KEYED_CALL_IC: return "KEYED_CALL_IC"; - case UNARY_OP_IC: return "UNARY_OP_IC"; - case BINARY_OP_IC: return "BINARY_OP_IC"; - case COMPARE_IC: return "COMPARE_IC"; - case TO_BOOLEAN_IC: return "TO_BOOLEAN_IC"; - } - UNREACHABLE(); - return NULL; -} - - const char* Code::ICState2String(InlineCacheState state) { switch (state) { case UNINITIALIZED: return "UNINITIALIZED"; case PREMONOMORPHIC: return "PREMONOMORPHIC"; case MONOMORPHIC: return "MONOMORPHIC"; case MONOMORPHIC_PROTOTYPE_FAILURE: return "MONOMORPHIC_PROTOTYPE_FAILURE"; + case POLYMORPHIC: return "POLYMORPHIC"; case MEGAMORPHIC: return "MEGAMORPHIC"; - case DEBUG_BREAK: return "DEBUG_BREAK"; - case DEBUG_PREPARE_STEP_IN: return "DEBUG_PREPARE_STEP_IN"; + case GENERIC: return "GENERIC"; + case DEBUG_STUB: return "DEBUG_STUB"; } UNREACHABLE(); return NULL; @@ -8747,11 +9394,15 @@ void Code::Disassemble(const char* name, FILE* out) { PrintF(out, "argc = %d\n", arguments_count()); } if (is_compare_ic_stub()) { - CompareIC::State state = CompareIC::ComputeState(this); - PrintF(out, "compare_state = %s\n", CompareIC::GetStateName(state)); - } - if (is_compare_ic_stub() && major_key() == CodeStub::CompareIC) { - Token::Value op = CompareIC::ComputeOperation(this); + ASSERT(major_key() == CodeStub::CompareIC); + CompareIC::State left_state, right_state, handler_state; + Token::Value op; + ICCompareStub::DecodeMinorKey(stub_info(), &left_state, &right_state, + &handler_state, &op); + PrintF(out, "compare_state = %s*%s -> %s\n", + CompareIC::GetStateName(left_state), + CompareIC::GetStateName(right_state), + CompareIC::GetStateName(handler_state)); PrintF(out, "compare_operation = %s\n", Token::Name(op)); } } @@ -8777,7 +9428,7 @@ void Code::Disassemble(const char* name, FILE* out) { } PrintF("\n"); - if (kind() == OPTIMIZED_FUNCTION) { + if (kind() == OPTIMIZED_FUNCTION || kind() == COMPILED_STUB) { SafepointTable table(this); PrintF(out, "Safepoints (size = %u)\n", table.size()); for (unsigned i = 0; i < table.length(); i++) { @@ -8797,8 +9448,6 @@ void Code::Disassemble(const char* name, FILE* out) { PrintF(out, "\n"); } PrintF(out, "\n"); - // Just print if type feedback info is ever used for optimized code. - ASSERT(type_feedback_info()->IsUndefined()); } else if (kind() == FUNCTION) { unsigned offset = stack_check_table_offset(); // If there is no stack check table, the "table start" will at or after @@ -8824,7 +9473,9 @@ void Code::Disassemble(const char* name, FILE* out) { } PrintF("RelocInfo (size = %d)\n", relocation_size()); - for (RelocIterator it(this); !it.done(); it.next()) it.rinfo()->Print(out); + for (RelocIterator it(this); !it.done(); it.next()) { + it.rinfo()->Print(GetIsolate(), out); + } PrintF(out, "\n"); } #endif // ENABLE_DISASSEMBLER @@ -8837,12 +9488,12 @@ MaybeObject* JSObject::SetFastElementsCapacityAndLength( Heap* heap = GetHeap(); // We should never end in here with a pixel or external array. ASSERT(!HasExternalArrayElements()); + ASSERT(!map()->is_observed()); // Allocate a new fast elements backing store. FixedArray* new_elements; - { MaybeObject* maybe = heap->AllocateFixedArrayWithHoles(capacity); - if (!maybe->To(&new_elements)) return maybe; - } + MaybeObject* maybe = heap->AllocateUninitializedFixedArray(capacity); + if (!maybe->To(&new_elements)) return maybe; ElementsKind elements_kind = GetElementsKind(); ElementsKind new_elements_kind; @@ -8865,11 +9516,11 @@ MaybeObject* JSObject::SetFastElementsCapacityAndLength( } } FixedArrayBase* old_elements = elements(); - ElementsAccessor* accessor = ElementsAccessor::ForKind(elements_kind); - { MaybeObject* maybe_obj = - accessor->CopyElements(this, new_elements, new_elements_kind); - if (maybe_obj->IsFailure()) return maybe_obj; - } + ElementsAccessor* accessor = ElementsAccessor::ForKind(new_elements_kind); + MaybeObject* maybe_obj = + accessor->CopyElements(this, new_elements, elements_kind); + if (maybe_obj->IsFailure()) return maybe_obj; + if (elements_kind != NON_STRICT_ARGUMENTS_ELEMENTS) { Map* new_map = map(); if (new_elements_kind != elements_kind) { @@ -8902,6 +9553,7 @@ MaybeObject* JSObject::SetFastDoubleElementsCapacityAndLength( Heap* heap = GetHeap(); // We should never end in here with a pixel or external array. ASSERT(!HasExternalArrayElements()); + ASSERT(!map()->is_observed()); FixedArrayBase* elems; { MaybeObject* maybe_obj = @@ -8924,9 +9576,9 @@ MaybeObject* JSObject::SetFastDoubleElementsCapacityAndLength( } FixedArrayBase* old_elements = elements(); - ElementsAccessor* accessor = ElementsAccessor::ForKind(elements_kind); + ElementsAccessor* accessor = ElementsAccessor::ForKind(FAST_DOUBLE_ELEMENTS); { MaybeObject* maybe_obj = - accessor->CopyElements(this, elems, FAST_DOUBLE_ELEMENTS); + accessor->CopyElements(this, elems, elements_kind); if (maybe_obj->IsFailure()) return maybe_obj; } if (elements_kind != NON_STRICT_ARGUMENTS_ELEMENTS) { @@ -8950,19 +9602,10 @@ MaybeObject* JSObject::SetFastDoubleElementsCapacityAndLength( } -MaybeObject* JSArray::Initialize(int capacity) { - Heap* heap = GetHeap(); +MaybeObject* JSArray::Initialize(int capacity, int length) { ASSERT(capacity >= 0); - set_length(Smi::FromInt(0)); - FixedArray* new_elements; - if (capacity == 0) { - new_elements = heap->empty_fixed_array(); - } else { - MaybeObject* maybe_obj = heap->AllocateFixedArrayWithHoles(capacity); - if (!maybe_obj->To(&new_elements)) return maybe_obj; - } - set_elements(new_elements); - return this; + return GetHeap()->AllocateJSArrayStorage(this, length, capacity, + INITIALIZE_ARRAY_ELEMENTS_WITH_HOLE); } @@ -8972,10 +9615,84 @@ void JSArray::Expand(int required_size) { } +// Returns false if the passed-in index is marked non-configurable, +// which will cause the ES5 truncation operation to halt, and thus +// no further old values need be collected. +static bool GetOldValue(Isolate* isolate, + Handle<JSObject> object, + uint32_t index, + List<Handle<Object> >* old_values, + List<Handle<String> >* indices) { + PropertyAttributes attributes = object->GetLocalElementAttribute(index); + ASSERT(attributes != ABSENT); + if (attributes == DONT_DELETE) return false; + old_values->Add(object->GetLocalElementAccessorPair(index) == NULL + ? Object::GetElement(object, index) + : Handle<Object>::cast(isolate->factory()->the_hole_value())); + indices->Add(isolate->factory()->Uint32ToString(index)); + return true; +} + + MaybeObject* JSArray::SetElementsLength(Object* len) { // We should never end in here with a pixel or external array. ASSERT(AllowsSetElementsLength()); - return GetElementsAccessor()->SetLength(this, len); + if (!(FLAG_harmony_observation && map()->is_observed())) + return GetElementsAccessor()->SetLength(this, len); + + Isolate* isolate = GetIsolate(); + HandleScope scope(isolate); + Handle<JSArray> self(this); + List<Handle<String> > indices; + List<Handle<Object> > old_values; + Handle<Object> old_length_handle(self->length(), isolate); + Handle<Object> new_length_handle(len, isolate); + uint32_t old_length = 0; + CHECK(old_length_handle->ToArrayIndex(&old_length)); + uint32_t new_length = 0; + if (!new_length_handle->ToArrayIndex(&new_length)) + return Failure::InternalError(); + + // Observed arrays should always be in dictionary mode; + // if they were in fast mode, the below is slower than necessary + // as it iterates over the array backing store multiple times. + ASSERT(self->HasDictionaryElements()); + static const PropertyAttributes kNoAttrFilter = NONE; + int num_elements = self->NumberOfLocalElements(kNoAttrFilter); + if (num_elements > 0) { + if (old_length == static_cast<uint32_t>(num_elements)) { + // Simple case for arrays without holes. + for (uint32_t i = old_length - 1; i + 1 > new_length; --i) { + if (!GetOldValue(isolate, self, i, &old_values, &indices)) break; + } + } else { + // For sparse arrays, only iterate over existing elements. + Handle<FixedArray> keys = isolate->factory()->NewFixedArray(num_elements); + self->GetLocalElementKeys(*keys, kNoAttrFilter); + while (num_elements-- > 0) { + uint32_t index = NumberToUint32(keys->get(num_elements)); + if (index < new_length) break; + if (!GetOldValue(isolate, self, index, &old_values, &indices)) break; + } + } + } + + MaybeObject* result = + self->GetElementsAccessor()->SetLength(*self, *new_length_handle); + Handle<Object> hresult; + if (!result->ToHandle(&hresult, isolate)) return result; + + CHECK(self->length()->ToArrayIndex(&new_length)); + if (old_length != new_length) { + for (int i = 0; i < indices.length(); ++i) { + JSObject::EnqueueChangeRecord( + self, "deleted", indices[i], old_values[i]); + } + JSObject::EnqueueChangeRecord( + self, "updated", isolate->factory()->length_string(), + old_length_handle); + } + return *hresult; } @@ -9055,13 +9772,117 @@ void Map::ZapPrototypeTransitions() { } +DependentCode::GroupStartIndexes::GroupStartIndexes(DependentCode* entries) { + Recompute(entries); +} + + +void DependentCode::GroupStartIndexes::Recompute(DependentCode* entries) { + start_indexes_[0] = 0; + for (int g = 1; g <= kGroupCount; g++) { + int count = entries->number_of_entries(static_cast<DependencyGroup>(g - 1)); + start_indexes_[g] = start_indexes_[g - 1] + count; + } +} + + +Handle<DependentCode> DependentCode::Insert(Handle<DependentCode> entries, + DependencyGroup group, + Handle<Code> value) { + GroupStartIndexes starts(*entries); + int start = starts.at(group); + int end = starts.at(group + 1); + int number_of_entries = starts.number_of_entries(); + if (start < end && entries->code_at(end - 1) == *value) { + // Do not append the code if it is already in the array. + // It is sufficient to just check only the last element because + // we process embedded maps of an optimized code in one batch. + return entries; + } + if (entries->length() < kCodesStartIndex + number_of_entries + 1) { + Factory* factory = entries->GetIsolate()->factory(); + int capacity = kCodesStartIndex + number_of_entries + 1; + if (capacity > 5) capacity = capacity * 5 / 4; + Handle<DependentCode> new_entries = Handle<DependentCode>::cast( + factory->CopySizeFixedArray(entries, capacity)); + // The number of codes can change after GC. + starts.Recompute(*entries); + start = starts.at(group); + end = starts.at(group + 1); + number_of_entries = starts.number_of_entries(); + for (int i = 0; i < number_of_entries; i++) { + entries->clear_code_at(i); + } + // If the old fixed array was empty, we need to reset counters of the + // new array. + if (number_of_entries == 0) { + for (int g = 0; g < kGroupCount; g++) { + new_entries->set_number_of_entries(static_cast<DependencyGroup>(g), 0); + } + } + entries = new_entries; + } + entries->ExtendGroup(group); + entries->set_code_at(end, *value); + entries->set_number_of_entries(group, end + 1 - start); + return entries; +} + + +bool DependentCode::Contains(DependencyGroup group, Code* code) { + GroupStartIndexes starts(this); + int number_of_entries = starts.at(kGroupCount); + for (int i = 0; i < number_of_entries; i++) { + if (code_at(i) == code) return true; + } + return false; +} + + +class DeoptimizeDependentCodeFilter : public OptimizedFunctionFilter { + public: + virtual bool TakeFunction(JSFunction* function) { + return function->code()->marked_for_deoptimization(); + } +}; + + +void DependentCode::DeoptimizeDependentCodeGroup( + Isolate* isolate, + DependentCode::DependencyGroup group) { + AssertNoAllocation no_allocation_scope; + DependentCode::GroupStartIndexes starts(this); + int start = starts.at(group); + int end = starts.at(group + 1); + int number_of_entries = starts.at(DependentCode::kGroupCount); + if (start == end) return; + for (int i = start; i < end; i++) { + Code* code = code_at(i); + code->set_marked_for_deoptimization(true); + } + // Compact the array by moving all subsequent groups to fill in the new holes. + for (int src = end, dst = start; src < number_of_entries; src++, dst++) { + set_code_at(dst, code_at(src)); + } + // Now the holes are at the end of the array, zap them for heap-verifier. + int removed = end - start; + for (int i = number_of_entries - removed; i < number_of_entries; i++) { + clear_code_at(i); + } + set_number_of_entries(group, 0); + DeoptimizeDependentCodeFilter filter; + Deoptimizer::DeoptimizeAllFunctionsWith(isolate, &filter); +} + + MaybeObject* JSReceiver::SetPrototype(Object* value, bool skip_hidden_prototypes) { #ifdef DEBUG int size = Size(); #endif - Heap* heap = GetHeap(); + Isolate* isolate = GetIsolate(); + Heap* heap = isolate->heap(); // Silently ignore the change if value is not a JSObject or null. // SpiderMonkey behaves this way. if (!value->IsJSReceiver() && !value->IsNull()) return value; @@ -9075,22 +9896,24 @@ MaybeObject* JSReceiver::SetPrototype(Object* value, // or [[Extensible]] must not violate the invariants defined in the preceding // paragraph. if (!this->map()->is_extensible()) { - HandleScope scope(heap->isolate()); - Handle<Object> handle(this, heap->isolate()); - return heap->isolate()->Throw( - *FACTORY->NewTypeError("non_extensible_proto", - HandleVector<Object>(&handle, 1))); + HandleScope scope(isolate); + Handle<Object> handle(this, isolate); + return isolate->Throw( + *isolate->factory()->NewTypeError("non_extensible_proto", + HandleVector<Object>(&handle, 1))); } // Before we can set the prototype we need to be sure // prototype cycles are prevented. // It is sufficient to validate that the receiver is not in the new prototype // chain. - for (Object* pt = value; pt != heap->null_value(); pt = pt->GetPrototype()) { + for (Object* pt = value; + pt != heap->null_value(); + pt = pt->GetPrototype(isolate)) { if (JSReceiver::cast(pt) == this) { // Cycle detected. - HandleScope scope(heap->isolate()); - return heap->isolate()->Throw( + HandleScope scope(isolate); + return isolate->Throw( *FACTORY->NewError("cyclic_proto", HandleVector<Object>(NULL, 0))); } } @@ -9104,7 +9927,7 @@ MaybeObject* JSReceiver::SetPrototype(Object* value, while (current_proto->IsJSObject() && JSReceiver::cast(current_proto)->map()->is_hidden_prototype()) { real_receiver = JSReceiver::cast(current_proto); - current_proto = current_proto->GetPrototype(); + current_proto = current_proto->GetPrototype(isolate); } } @@ -9152,203 +9975,51 @@ MaybeObject* JSObject::EnsureCanContainElements(Arguments* args, } -bool JSObject::HasElementWithInterceptor(JSReceiver* receiver, uint32_t index) { - Isolate* isolate = GetIsolate(); - // Make sure that the top context does not change when doing - // callbacks or interceptor calls. - AssertNoContextChange ncc; - HandleScope scope(isolate); - Handle<InterceptorInfo> interceptor(GetIndexedInterceptor()); - Handle<JSReceiver> receiver_handle(receiver); - Handle<JSObject> holder_handle(this); - CustomArguments args(isolate, interceptor->data(), receiver, this); - v8::AccessorInfo info(args.end()); - if (!interceptor->query()->IsUndefined()) { - v8::IndexedPropertyQuery query = - v8::ToCData<v8::IndexedPropertyQuery>(interceptor->query()); - LOG(isolate, - ApiIndexedPropertyAccess("interceptor-indexed-has", this, index)); - v8::Handle<v8::Integer> result; - { - // Leaving JavaScript. - VMState state(isolate, EXTERNAL); - result = query(index, info); - } - if (!result.IsEmpty()) { - ASSERT(result->IsInt32()); - return true; // absence of property is signaled by empty handle. - } - } else if (!interceptor->getter()->IsUndefined()) { - v8::IndexedPropertyGetter getter = - v8::ToCData<v8::IndexedPropertyGetter>(interceptor->getter()); - LOG(isolate, - ApiIndexedPropertyAccess("interceptor-indexed-has-get", this, index)); - v8::Handle<v8::Value> result; - { - // Leaving JavaScript. - VMState state(isolate, EXTERNAL); - result = getter(index, info); - } - if (!result.IsEmpty()) return true; - } - - if (holder_handle->GetElementsAccessor()->HasElement( - *receiver_handle, *holder_handle, index)) { - return true; - } - - if (holder_handle->IsStringObjectWithCharacterAt(index)) return true; - Object* pt = holder_handle->GetPrototype(); - if (pt->IsJSProxy()) { - // We need to follow the spec and simulate a call to [[GetOwnProperty]]. - return JSProxy::cast(pt)->GetElementAttributeWithHandler( - receiver, index) != ABSENT; +PropertyType JSObject::GetLocalPropertyType(Name* name) { + uint32_t index = 0; + if (name->AsArrayIndex(&index)) { + return GetLocalElementType(index); } - if (pt->IsNull()) return false; - return JSObject::cast(pt)->HasElementWithReceiver(*receiver_handle, index); + LookupResult lookup(GetIsolate()); + LocalLookup(name, &lookup, true); + return lookup.type(); } -JSObject::LocalElementType JSObject::HasLocalElement(uint32_t index) { - // Check access rights if needed. - if (IsAccessCheckNeeded()) { - Heap* heap = GetHeap(); - if (!heap->isolate()->MayIndexedAccess(this, index, v8::ACCESS_HAS)) { - heap->isolate()->ReportFailedAccessCheck(this, v8::ACCESS_HAS); - return UNDEFINED_ELEMENT; - } - } +PropertyType JSObject::GetLocalElementType(uint32_t index) { + return GetElementsAccessor()->GetType(this, this, index); +} - if (IsJSGlobalProxy()) { - Object* proto = GetPrototype(); - if (proto->IsNull()) return UNDEFINED_ELEMENT; - ASSERT(proto->IsJSGlobalObject()); - return JSObject::cast(proto)->HasLocalElement(index); - } - // Check for lookup interceptor - if (HasIndexedInterceptor()) { - return HasElementWithInterceptor(this, index) ? INTERCEPTED_ELEMENT - : UNDEFINED_ELEMENT; +AccessorPair* JSObject::GetLocalPropertyAccessorPair(Name* name) { + uint32_t index = 0; + if (name->AsArrayIndex(&index)) { + return GetLocalElementAccessorPair(index); } - // Handle [] on String objects. - if (this->IsStringObjectWithCharacterAt(index)) { - return STRING_CHARACTER_ELEMENT; - } + LookupResult lookup(GetIsolate()); + LocalLookupRealNamedProperty(name, &lookup); - switch (GetElementsKind()) { - case FAST_SMI_ELEMENTS: - case FAST_ELEMENTS: - case FAST_HOLEY_SMI_ELEMENTS: - case FAST_HOLEY_ELEMENTS: { - uint32_t length = IsJSArray() ? - static_cast<uint32_t> - (Smi::cast(JSArray::cast(this)->length())->value()) : - static_cast<uint32_t>(FixedArray::cast(elements())->length()); - if ((index < length) && - !FixedArray::cast(elements())->get(index)->IsTheHole()) { - return FAST_ELEMENT; - } - break; - } - case FAST_DOUBLE_ELEMENTS: - case FAST_HOLEY_DOUBLE_ELEMENTS: { - uint32_t length = IsJSArray() ? - static_cast<uint32_t> - (Smi::cast(JSArray::cast(this)->length())->value()) : - static_cast<uint32_t>(FixedDoubleArray::cast(elements())->length()); - if ((index < length) && - !FixedDoubleArray::cast(elements())->is_the_hole(index)) { - return FAST_ELEMENT; - } - break; - } - case EXTERNAL_PIXEL_ELEMENTS: { - ExternalPixelArray* pixels = ExternalPixelArray::cast(elements()); - if (index < static_cast<uint32_t>(pixels->length())) return FAST_ELEMENT; - break; - } - case EXTERNAL_BYTE_ELEMENTS: - case EXTERNAL_UNSIGNED_BYTE_ELEMENTS: - case EXTERNAL_SHORT_ELEMENTS: - case EXTERNAL_UNSIGNED_SHORT_ELEMENTS: - case EXTERNAL_INT_ELEMENTS: - case EXTERNAL_UNSIGNED_INT_ELEMENTS: - case EXTERNAL_FLOAT_ELEMENTS: - case EXTERNAL_DOUBLE_ELEMENTS: { - ExternalArray* array = ExternalArray::cast(elements()); - if (index < static_cast<uint32_t>(array->length())) return FAST_ELEMENT; - break; - } - case DICTIONARY_ELEMENTS: { - if (element_dictionary()->FindEntry(index) != - SeededNumberDictionary::kNotFound) { - return DICTIONARY_ELEMENT; - } - break; - } - case NON_STRICT_ARGUMENTS_ELEMENTS: { - // Aliased parameters and non-aliased elements in a fast backing store - // behave as FAST_ELEMENT. Non-aliased elements in a dictionary - // backing store behave as DICTIONARY_ELEMENT. - FixedArray* parameter_map = FixedArray::cast(elements()); - uint32_t length = parameter_map->length(); - Object* probe = - index < (length - 2) ? parameter_map->get(index + 2) : NULL; - if (probe != NULL && !probe->IsTheHole()) return FAST_ELEMENT; - // If not aliased, check the arguments. - FixedArray* arguments = FixedArray::cast(parameter_map->get(1)); - if (arguments->IsDictionary()) { - SeededNumberDictionary* dictionary = - SeededNumberDictionary::cast(arguments); - if (dictionary->FindEntry(index) != SeededNumberDictionary::kNotFound) { - return DICTIONARY_ELEMENT; - } - } else { - length = arguments->length(); - probe = (index < length) ? arguments->get(index) : NULL; - if (probe != NULL && !probe->IsTheHole()) return FAST_ELEMENT; - } - break; - } + if (lookup.IsPropertyCallbacks() && + lookup.GetCallbackObject()->IsAccessorPair()) { + return AccessorPair::cast(lookup.GetCallbackObject()); } - - return UNDEFINED_ELEMENT; + return NULL; } -bool JSObject::HasElementWithReceiver(JSReceiver* receiver, uint32_t index) { - // Check access rights if needed. - if (IsAccessCheckNeeded()) { - Heap* heap = GetHeap(); - if (!heap->isolate()->MayIndexedAccess(this, index, v8::ACCESS_HAS)) { - heap->isolate()->ReportFailedAccessCheck(this, v8::ACCESS_HAS); - return false; - } - } - - // Check for lookup interceptor - if (HasIndexedInterceptor()) { - return HasElementWithInterceptor(receiver, index); - } - - ElementsAccessor* accessor = GetElementsAccessor(); - if (accessor->HasElement(receiver, this, index)) { - return true; +AccessorPair* JSObject::GetLocalElementAccessorPair(uint32_t index) { + if (IsJSGlobalProxy()) { + Object* proto = GetPrototype(); + if (proto->IsNull()) return NULL; + ASSERT(proto->IsJSGlobalObject()); + return JSObject::cast(proto)->GetLocalElementAccessorPair(index); } - // Handle [] on String objects. - if (this->IsStringObjectWithCharacterAt(index)) return true; + // Check for lookup interceptor. + if (HasIndexedInterceptor()) return NULL; - Object* pt = GetPrototype(); - if (pt->IsNull()) return false; - if (pt->IsJSProxy()) { - // We need to follow the spec and simulate a call to [[GetOwnProperty]]. - return JSProxy::cast(pt)->GetElementAttributeWithHandler( - receiver, index) != ABSENT; - } - return JSObject::cast(pt)->HasElementWithReceiver(receiver, index); + return GetElementsAccessor()->GetAccessorPair(this, this, index); } @@ -9402,8 +10073,9 @@ MaybeObject* JSObject::GetElementWithCallback(Object* receiver, ASSERT(!structure->IsForeign()); // api style callbacks. - if (structure->IsAccessorInfo()) { - Handle<AccessorInfo> data(AccessorInfo::cast(structure)); + if (structure->IsExecutableAccessorInfo()) { + Handle<ExecutableAccessorInfo> data( + ExecutableAccessorInfo::cast(structure)); Object* fun_obj = data->getter(); v8::AccessorGetter call_fun = v8::ToCData<v8::AccessorGetter>(fun_obj); if (call_fun == NULL) return isolate->heap()->undefined_value(); @@ -9439,6 +10111,12 @@ MaybeObject* JSObject::GetElementWithCallback(Object* receiver, return isolate->heap()->undefined_value(); } + if (structure->IsDeclaredAccessorInfo()) { + return GetDeclaredAccessorProperty(receiver, + DeclaredAccessorInfo::cast(structure), + isolate); + } + UNREACHABLE(); return NULL; } @@ -9462,11 +10140,12 @@ MaybeObject* JSObject::SetElementWithCallback(Object* structure, // callbacks should be phased out. ASSERT(!structure->IsForeign()); - if (structure->IsAccessorInfo()) { + if (structure->IsExecutableAccessorInfo()) { // api style callbacks Handle<JSObject> self(this); Handle<JSObject> holder_handle(JSObject::cast(holder)); - Handle<AccessorInfo> data(AccessorInfo::cast(structure)); + Handle<ExecutableAccessorInfo> data( + ExecutableAccessorInfo::cast(structure)); Object* call_obj = data->setter(); v8::AccessorSetter call_fun = v8::ToCData<v8::AccessorSetter>(call_obj); if (call_fun == NULL) return value; @@ -9487,7 +10166,7 @@ MaybeObject* JSObject::SetElementWithCallback(Object* structure, } if (structure->IsAccessorPair()) { - Handle<Object> setter(AccessorPair::cast(structure)->setter()); + Handle<Object> setter(AccessorPair::cast(structure)->setter(), isolate); if (setter->IsSpecFunction()) { // TODO(rossberg): nicer would be to cast to some JSCallable here... return SetPropertyWithDefinedSetter(JSReceiver::cast(*setter), value); @@ -9504,6 +10183,9 @@ MaybeObject* JSObject::SetElementWithCallback(Object* structure, } } + // TODO(dcarney): Handle correctly. + if (structure->IsDeclaredAccessorInfo()) return value; + UNREACHABLE(); return NULL; } @@ -9609,6 +10291,14 @@ MaybeObject* JSObject::SetFastElement(uint32_t index, } // Convert to fast double elements if appropriate. if (HasFastSmiElements() && !value->IsSmi() && value->IsNumber()) { + // Consider fixing the boilerplate as well if we have one. + ElementsKind to_kind = IsHoleyElementsKind(elements_kind) + ? FAST_HOLEY_DOUBLE_ELEMENTS + : FAST_DOUBLE_ELEMENTS; + + MaybeObject* maybe_failure = UpdateAllocationSiteInfo(to_kind); + if (maybe_failure->IsFailure()) return maybe_failure; + MaybeObject* maybe = SetFastDoubleElementsCapacityAndLength(new_capacity, array_length); if (maybe->IsFailure()) return maybe; @@ -9622,6 +10312,10 @@ MaybeObject* JSObject::SetFastElement(uint32_t index, ElementsKind kind = HasFastHoleyElements() ? FAST_HOLEY_ELEMENTS : FAST_ELEMENTS; + + MaybeObject* maybe_failure = UpdateAllocationSiteInfo(kind); + if (maybe_failure->IsFailure()) return maybe_failure; + MaybeObject* maybe_new_map = GetElementsTransitionMap(GetIsolate(), kind); if (!maybe_new_map->To(&new_map)) return maybe_new_map; @@ -9657,7 +10351,7 @@ MaybeObject* JSObject::SetFastElement(uint32_t index, MaybeObject* JSObject::SetDictionaryElement(uint32_t index, - Object* value, + Object* value_raw, PropertyAttributes attributes, StrictModeFlag strict_mode, bool check_prototype, @@ -9665,24 +10359,23 @@ MaybeObject* JSObject::SetDictionaryElement(uint32_t index, ASSERT(HasDictionaryElements() || HasDictionaryArgumentsElements()); Isolate* isolate = GetIsolate(); Heap* heap = isolate->heap(); + Handle<JSObject> self(this); + Handle<Object> value(value_raw, isolate); // Insert element in the dictionary. - FixedArray* elements = FixedArray::cast(this->elements()); + Handle<FixedArray> elements(FixedArray::cast(this->elements())); bool is_arguments = (elements->map() == heap->non_strict_arguments_elements_map()); - SeededNumberDictionary* dictionary = NULL; - if (is_arguments) { - dictionary = SeededNumberDictionary::cast(elements->get(1)); - } else { - dictionary = SeededNumberDictionary::cast(elements); - } + Handle<SeededNumberDictionary> dictionary(is_arguments + ? SeededNumberDictionary::cast(elements->get(1)) + : SeededNumberDictionary::cast(*elements)); int entry = dictionary->FindEntry(index); if (entry != SeededNumberDictionary::kNotFound) { Object* element = dictionary->ValueAt(entry); PropertyDetails details = dictionary->DetailsAt(entry); if (details.type() == CALLBACKS && set_mode == SET_PROPERTY) { - return SetElementWithCallback(element, index, value, this, strict_mode); + return SetElementWithCallback(element, index, *value, this, strict_mode); } else { dictionary->UpdateMaxNumberKey(index); // If a value has not been initialized we allow writing to it even if it @@ -9696,7 +10389,7 @@ MaybeObject* JSObject::SetDictionaryElement(uint32_t index, if (strict_mode == kNonStrictMode) { return isolate->heap()->undefined_value(); } else { - Handle<Object> holder(this); + Handle<Object> holder(this, isolate); Handle<Object> number = isolate->factory()->NewNumberFromUint(index); Handle<Object> args[2] = { number, holder }; Handle<Object> error = @@ -9711,24 +10404,24 @@ MaybeObject* JSObject::SetDictionaryElement(uint32_t index, Context* context = Context::cast(elements->get(0)); int context_index = entry->aliased_context_slot(); ASSERT(!context->get(context_index)->IsTheHole()); - context->set(context_index, value); + context->set(context_index, *value); // For elements that are still writable we keep slow aliasing. - if (!details.IsReadOnly()) value = element; + if (!details.IsReadOnly()) value = handle(element, isolate); } - dictionary->ValueAtPut(entry, value); + dictionary->ValueAtPut(entry, *value); } } else { // Index not already used. Look for an accessor in the prototype chain. + // Can cause GC! if (check_prototype) { bool found; - MaybeObject* result = - SetElementWithCallbackSetterInPrototypes( - index, value, &found, strict_mode); + MaybeObject* result = SetElementWithCallbackSetterInPrototypes( + index, *value, &found, strict_mode); if (found) return result; } // When we set the is_extensible flag to false we always force the // element into dictionary mode (and force them to stay there). - if (!map()->is_extensible()) { + if (!self->map()->is_extensible()) { if (strict_mode == kNonStrictMode) { return isolate->heap()->undefined_value(); } else { @@ -9743,30 +10436,31 @@ MaybeObject* JSObject::SetDictionaryElement(uint32_t index, } FixedArrayBase* new_dictionary; PropertyDetails details = PropertyDetails(attributes, NORMAL); - MaybeObject* maybe = dictionary->AddNumberEntry(index, value, details); + MaybeObject* maybe = dictionary->AddNumberEntry(index, *value, details); if (!maybe->To(&new_dictionary)) return maybe; - if (dictionary != SeededNumberDictionary::cast(new_dictionary)) { + if (*dictionary != SeededNumberDictionary::cast(new_dictionary)) { if (is_arguments) { elements->set(1, new_dictionary); } else { - set_elements(new_dictionary); + self->set_elements(new_dictionary); } - dictionary = SeededNumberDictionary::cast(new_dictionary); + dictionary = + handle(SeededNumberDictionary::cast(new_dictionary), isolate); } } // Update the array length if this JSObject is an array. - if (IsJSArray()) { + if (self->IsJSArray()) { MaybeObject* result = - JSArray::cast(this)->JSArrayUpdateLengthFromIndex(index, value); + JSArray::cast(*self)->JSArrayUpdateLengthFromIndex(index, *value); if (result->IsFailure()) return result; } // Attempt to put this object back in fast case. - if (ShouldConvertToFastElements()) { + if (self->ShouldConvertToFastElements()) { uint32_t new_length = 0; - if (IsJSArray()) { - CHECK(JSArray::cast(this)->length()->ToArrayIndex(&new_length)); + if (self->IsJSArray()) { + CHECK(JSArray::cast(*self)->length()->ToArrayIndex(&new_length)); } else { new_length = dictionary->max_number_key() + 1; } @@ -9775,16 +10469,15 @@ MaybeObject* JSObject::SetDictionaryElement(uint32_t index, : kDontAllowSmiElements; bool has_smi_only_elements = false; bool should_convert_to_fast_double_elements = - ShouldConvertToFastDoubleElements(&has_smi_only_elements); + self->ShouldConvertToFastDoubleElements(&has_smi_only_elements); if (has_smi_only_elements) { smi_mode = kForceSmiElements; } MaybeObject* result = should_convert_to_fast_double_elements - ? SetFastDoubleElementsCapacityAndLength(new_length, new_length) - : SetFastElementsCapacityAndLength(new_length, - new_length, - smi_mode); - ValidateElements(); + ? self->SetFastDoubleElementsCapacityAndLength(new_length, new_length) + : self->SetFastElementsCapacityAndLength( + new_length, new_length, smi_mode); + self->ValidateElements(); if (result->IsFailure()) return result; #ifdef DEBUG if (FLAG_trace_normalization) { @@ -9793,7 +10486,7 @@ MaybeObject* JSObject::SetDictionaryElement(uint32_t index, } #endif } - return value; + return *value; } @@ -9949,28 +10642,27 @@ Handle<Object> JSObject::SetElement(Handle<JSObject> object, MaybeObject* JSObject::SetElement(uint32_t index, - Object* value, + Object* value_raw, PropertyAttributes attributes, StrictModeFlag strict_mode, bool check_prototype, SetPropertyMode set_mode) { + Isolate* isolate = GetIsolate(); + // Check access rights if needed. if (IsAccessCheckNeeded()) { - Heap* heap = GetHeap(); - if (!heap->isolate()->MayIndexedAccess(this, index, v8::ACCESS_SET)) { - HandleScope scope(heap->isolate()); - Handle<Object> value_handle(value); - heap->isolate()->ReportFailedAccessCheck(this, v8::ACCESS_SET); - return *value_handle; + if (!isolate->MayIndexedAccess(this, index, v8::ACCESS_SET)) { + isolate->ReportFailedAccessCheck(this, v8::ACCESS_SET); + return value_raw; } } if (IsJSGlobalProxy()) { Object* proto = GetPrototype(); - if (proto->IsNull()) return value; + if (proto->IsNull()) return value_raw; ASSERT(proto->IsJSGlobalObject()); return JSObject::cast(proto)->SetElement(index, - value, + value_raw, attributes, strict_mode, check_prototype, @@ -9979,10 +10671,8 @@ MaybeObject* JSObject::SetElement(uint32_t index, // Don't allow element properties to be redefined for external arrays. if (HasExternalArrayElements() && set_mode == DEFINE_PROPERTY) { - Isolate* isolate = GetHeap()->isolate(); - Handle<Object> receiver(this); Handle<Object> number = isolate->factory()->NewNumberFromUint(index); - Handle<Object> args[] = { receiver, number }; + Handle<Object> args[] = { handle(this, isolate), number }; Handle<Object> error = isolate->factory()->NewTypeError( "redef_external_array_element", HandleVector(args, ARRAY_SIZE(args))); return isolate->Throw(*error); @@ -9997,22 +10687,62 @@ MaybeObject* JSObject::SetElement(uint32_t index, dictionary->set_requires_slow_elements(); } + if (!(FLAG_harmony_observation && map()->is_observed())) { + return HasIndexedInterceptor() + ? SetElementWithInterceptor( + index, value_raw, attributes, strict_mode, check_prototype, set_mode) + : SetElementWithoutInterceptor( + index, value_raw, attributes, strict_mode, check_prototype, set_mode); + } + + // From here on, everything has to be handlified. + Handle<JSObject> self(this); + Handle<Object> value(value_raw, isolate); + PropertyAttributes old_attributes = self->GetLocalElementAttribute(index); + Handle<Object> old_value = isolate->factory()->the_hole_value(); + Handle<Object> old_length; + + if (old_attributes != ABSENT) { + if (self->GetLocalElementAccessorPair(index) == NULL) + old_value = Object::GetElement(self, index); + } else if (self->IsJSArray()) { + // Store old array length in case adding an element grows the array. + old_length = handle(Handle<JSArray>::cast(self)->length(), isolate); + } + // Check for lookup interceptor - if (HasIndexedInterceptor()) { - return SetElementWithInterceptor(index, - value, - attributes, - strict_mode, - check_prototype, - set_mode); + MaybeObject* result = self->HasIndexedInterceptor() + ? self->SetElementWithInterceptor( + index, *value, attributes, strict_mode, check_prototype, set_mode) + : self->SetElementWithoutInterceptor( + index, *value, attributes, strict_mode, check_prototype, set_mode); + + Handle<Object> hresult; + if (!result->ToHandle(&hresult, isolate)) return result; + + Handle<String> name = isolate->factory()->Uint32ToString(index); + PropertyAttributes new_attributes = self->GetLocalElementAttribute(index); + if (old_attributes == ABSENT) { + EnqueueChangeRecord(self, "new", name, old_value); + if (self->IsJSArray() && + !old_length->SameValue(Handle<JSArray>::cast(self)->length())) { + EnqueueChangeRecord( + self, "updated", isolate->factory()->length_string(), old_length); + } + } else if (old_value->IsTheHole()) { + EnqueueChangeRecord(self, "reconfigured", name, old_value); + } else { + bool value_changed = + !old_value->SameValue(*Object::GetElement(self, index)); + if (old_attributes != new_attributes) { + if (!value_changed) old_value = isolate->factory()->the_hole_value(); + EnqueueChangeRecord(self, "reconfigured", name, old_value); + } else if (value_changed) { + EnqueueChangeRecord(self, "updated", name, old_value); + } } - return SetElementWithoutInterceptor(index, - value, - attributes, - strict_mode, - check_prototype, - set_mode); + return *hresult; } @@ -10026,6 +10756,16 @@ MaybeObject* JSObject::SetElementWithoutInterceptor(uint32_t index, HasDictionaryArgumentsElements() || (attr & (DONT_DELETE | DONT_ENUM | READ_ONLY)) == 0); Isolate* isolate = GetIsolate(); + if (FLAG_trace_external_array_abuse && + IsExternalArrayElementsKind(GetElementsKind())) { + CheckArrayAbuse(this, "external elements write", index); + } + if (FLAG_trace_js_array_abuse && + !IsExternalArrayElementsKind(GetElementsKind())) { + if (IsJSArray()) { + CheckArrayAbuse(this, "elements write", index, true); + } + } switch (GetElementsKind()) { case FAST_SMI_ELEMENTS: case FAST_ELEMENTS: @@ -10121,13 +10861,69 @@ Handle<Object> JSObject::TransitionElementsKind(Handle<JSObject> object, } +MaybeObject* JSObject::UpdateAllocationSiteInfo(ElementsKind to_kind) { + if (!FLAG_track_allocation_sites || !IsJSArray()) { + return this; + } + + AllocationSiteInfo* info = AllocationSiteInfo::FindForJSObject(this); + if (info == NULL) { + return this; + } + + if (info->payload()->IsJSArray()) { + JSArray* payload = JSArray::cast(info->payload()); + ElementsKind kind = payload->GetElementsKind(); + if (AllocationSiteInfo::GetMode(kind, to_kind) == TRACK_ALLOCATION_SITE) { + // If the array is huge, it's not likely to be defined in a local + // function, so we shouldn't make new instances of it very often. + uint32_t length = 0; + CHECK(payload->length()->ToArrayIndex(&length)); + if (length <= AllocationSiteInfo::kMaximumArrayBytesToPretransition) { + if (FLAG_trace_track_allocation_sites) { + PrintF( + "AllocationSiteInfo: JSArray %p boilerplate updated %s->%s\n", + reinterpret_cast<void*>(this), + ElementsKindToString(kind), + ElementsKindToString(to_kind)); + } + return payload->TransitionElementsKind(to_kind); + } + } + } else if (info->payload()->IsJSGlobalPropertyCell()) { + JSGlobalPropertyCell* cell = JSGlobalPropertyCell::cast(info->payload()); + Object* cell_contents = cell->value(); + if (cell_contents->IsSmi()) { + ElementsKind kind = static_cast<ElementsKind>( + Smi::cast(cell_contents)->value()); + if (AllocationSiteInfo::GetMode(kind, to_kind) == TRACK_ALLOCATION_SITE) { + if (FLAG_trace_track_allocation_sites) { + PrintF("AllocationSiteInfo: JSArray %p info updated %s->%s\n", + reinterpret_cast<void*>(this), + ElementsKindToString(kind), + ElementsKindToString(to_kind)); + } + cell->set_value(Smi::FromInt(to_kind)); + } + } + } + return this; +} + + MaybeObject* JSObject::TransitionElementsKind(ElementsKind to_kind) { + ASSERT(!map()->is_observed()); ElementsKind from_kind = map()->elements_kind(); if (IsFastHoleyElementsKind(from_kind)) { to_kind = GetHoleyElementsKind(to_kind); } + if (from_kind == to_kind) return this; + + MaybeObject* maybe_failure = UpdateAllocationSiteInfo(to_kind); + if (maybe_failure->IsFailure()) return maybe_failure; + Isolate* isolate = GetIsolate(); if (elements() == isolate->heap()->empty_fixed_array() || (IsFastSmiOrObjectElementsKind(from_kind) && @@ -10376,6 +11172,9 @@ bool JSObject::ShouldConvertToFastElements() { // An object requiring access checks is never allowed to have fast // elements. If it had fast elements we would skip security checks. if (IsAccessCheckNeeded()) return false; + // Observed objects may not go to fast mode because they rely on map checks, + // and for fast element accesses we sometimes check element kinds only. + if (FLAG_harmony_observation && map()->is_observed()) return false; FixedArray* elements = FixedArray::cast(this->elements()); SeededNumberDictionary* dictionary = NULL; @@ -10493,7 +11292,7 @@ InterceptorInfo* JSObject::GetIndexedInterceptor() { MaybeObject* JSObject::GetPropertyPostInterceptor( Object* receiver, - String* name, + Name* name, PropertyAttributes* attributes) { // Check local property in holder, ignore interceptor. LookupResult result(GetIsolate()); @@ -10511,7 +11310,7 @@ MaybeObject* JSObject::GetPropertyPostInterceptor( MaybeObject* JSObject::GetLocalPropertyPostInterceptor( Object* receiver, - String* name, + Name* name, PropertyAttributes* attributes) { // Check local property in holder, ignore interceptor. LookupResult result(GetIsolate()); @@ -10525,14 +11324,17 @@ MaybeObject* JSObject::GetLocalPropertyPostInterceptor( MaybeObject* JSObject::GetPropertyWithInterceptor( Object* receiver, - String* name, + Name* name, PropertyAttributes* attributes) { + // TODO(rossberg): Support symbols in the API. + if (name->IsSymbol()) return GetHeap()->undefined_value(); + Isolate* isolate = GetIsolate(); InterceptorInfo* interceptor = GetNamedInterceptor(); HandleScope scope(isolate); - Handle<Object> receiver_handle(receiver); + Handle<Object> receiver_handle(receiver, isolate); Handle<JSObject> holder_handle(this); - Handle<String> name_handle(name); + Handle<String> name_handle(String::cast(name)); if (!interceptor->getter()->IsUndefined()) { v8::NamedPropertyGetter getter = @@ -10565,7 +11367,7 @@ MaybeObject* JSObject::GetPropertyWithInterceptor( } -bool JSObject::HasRealNamedProperty(String* key) { +bool JSObject::HasRealNamedProperty(Name* key) { // Check access rights if needed. Isolate* isolate = GetIsolate(); if (IsAccessCheckNeeded()) { @@ -10645,7 +11447,7 @@ bool JSObject::HasRealElementProperty(uint32_t index) { } -bool JSObject::HasRealNamedCallbackProperty(String* key) { +bool JSObject::HasRealNamedCallbackProperty(Name* key) { // Check access rights if needed. Isolate* isolate = GetIsolate(); if (IsAccessCheckNeeded()) { @@ -10665,7 +11467,7 @@ int JSObject::NumberOfLocalProperties(PropertyAttributes filter) { if (HasFastProperties()) { Map* map = this->map(); if (filter == NONE) return map->NumberOfOwnDescriptors(); - if (filter == DONT_ENUM) { + if (filter & DONT_ENUM) { int result = map->EnumLength(); if (result != Map::kInvalidEnumCache) return result; } @@ -10803,7 +11605,7 @@ void JSObject::GetLocalPropertyNames(FixedArray* storage, int index) { } else { property_dictionary()->CopyKeysTo(storage, index, - StringDictionary::UNSORTED); + NameDictionary::UNSORTED); } } @@ -10989,7 +11791,7 @@ class StringKey : public HashTableKey { uint32_t HashForObject(Object* other) { return String::cast(other)->Hash(); } - Object* AsObject() { return string_; } + Object* AsObject(Heap* heap) { return string_; } String* string_; uint32_t hash_; @@ -11064,9 +11866,9 @@ class StringSharedKey : public HashTableKey { source, shared, language_mode, scope_position); } - MUST_USE_RESULT MaybeObject* AsObject() { + MUST_USE_RESULT MaybeObject* AsObject(Heap* heap) { Object* obj; - { MaybeObject* maybe_obj = source_->GetHeap()->AllocateFixedArray(4); + { MaybeObject* maybe_obj = heap->AllocateFixedArray(4); if (!maybe_obj->ToObject(&obj)) return maybe_obj; } FixedArray* other_array = FixedArray::cast(obj); @@ -11104,7 +11906,7 @@ class RegExpKey : public HashTableKey { uint32_t Hash() { return RegExpHash(string_, flags_); } - Object* AsObject() { + Object* AsObject(Heap* heap) { // Plain hash maps, which is where regexp keys are used, don't // use this function. UNREACHABLE(); @@ -11125,22 +11927,19 @@ class RegExpKey : public HashTableKey { Smi* flags_; }; -// Utf8SymbolKey carries a vector of chars as key. -class Utf8SymbolKey : public HashTableKey { +// Utf8StringKey carries a vector of chars as key. +class Utf8StringKey : public HashTableKey { public: - explicit Utf8SymbolKey(Vector<const char> string, uint32_t seed) + explicit Utf8StringKey(Vector<const char> string, uint32_t seed) : string_(string), hash_field_(0), seed_(seed) { } bool IsMatch(Object* string) { - return String::cast(string)->IsEqualTo(string_); + return String::cast(string)->IsUtf8EqualTo(string_); } uint32_t Hash() { if (hash_field_ != 0) return hash_field_ >> String::kHashShift; - unibrow::Utf8InputBuffer<> buffer(string_.start(), - static_cast<unsigned>(string_.length())); - chars_ = buffer.Utf16Length(); - hash_field_ = String::ComputeHashField(&buffer, chars_, seed_); + hash_field_ = StringHasher::ComputeUtf8Hash(string_, seed_, &chars_); uint32_t result = hash_field_ >> String::kHashShift; ASSERT(result != 0); // Ensure that the hash value of 0 is never computed. return result; @@ -11150,10 +11949,11 @@ class Utf8SymbolKey : public HashTableKey { return String::cast(other)->Hash(); } - MaybeObject* AsObject() { + MaybeObject* AsObject(Heap* heap) { if (hash_field_ == 0) Hash(); - return Isolate::Current()->heap()->AllocateSymbol( - string_, chars_, hash_field_); + return heap->AllocateInternalizedStringFromUtf8(string_, + chars_, + hash_field_); } Vector<const char> string_; @@ -11164,35 +11964,15 @@ class Utf8SymbolKey : public HashTableKey { template <typename Char> -class SequentialSymbolKey : public HashTableKey { +class SequentialStringKey : public HashTableKey { public: - explicit SequentialSymbolKey(Vector<const Char> string, uint32_t seed) + explicit SequentialStringKey(Vector<const Char> string, uint32_t seed) : string_(string), hash_field_(0), seed_(seed) { } uint32_t Hash() { - StringHasher hasher(string_.length(), seed_); - - // Very long strings have a trivial hash that doesn't inspect the - // string contents. - if (hasher.has_trivial_hash()) { - hash_field_ = hasher.GetHashField(); - } else { - int i = 0; - // Do the iterative array index computation as long as there is a - // chance this is an array index. - while (i < string_.length() && hasher.is_array_index()) { - hasher.AddCharacter(static_cast<uc32>(string_[i])); - i++; - } - - // Process the remaining characters without updating the array - // index. - while (i < string_.length()) { - hasher.AddCharacterNoIndex(static_cast<uc32>(string_[i])); - i++; - } - hash_field_ = hasher.GetHashField(); - } + hash_field_ = StringHasher::HashSequentialString<Char>(string_.start(), + string_.length(), + seed_); uint32_t result = hash_field_ >> String::kHashShift; ASSERT(result != 0); // Ensure that the hash value of 0 is never computed. @@ -11211,59 +11991,35 @@ class SequentialSymbolKey : public HashTableKey { -class AsciiSymbolKey : public SequentialSymbolKey<char> { +class OneByteStringKey : public SequentialStringKey<uint8_t> { public: - AsciiSymbolKey(Vector<const char> str, uint32_t seed) - : SequentialSymbolKey<char>(str, seed) { } + OneByteStringKey(Vector<const uint8_t> str, uint32_t seed) + : SequentialStringKey<uint8_t>(str, seed) { } bool IsMatch(Object* string) { - return String::cast(string)->IsAsciiEqualTo(string_); + return String::cast(string)->IsOneByteEqualTo(string_); } - MaybeObject* AsObject() { + MaybeObject* AsObject(Heap* heap) { if (hash_field_ == 0) Hash(); - return HEAP->AllocateAsciiSymbol(string_, hash_field_); + return heap->AllocateOneByteInternalizedString(string_, hash_field_); } }; -class SubStringAsciiSymbolKey : public HashTableKey { +class SubStringOneByteStringKey : public HashTableKey { public: - explicit SubStringAsciiSymbolKey(Handle<SeqAsciiString> string, - int from, - int length, - uint32_t seed) - : string_(string), from_(from), length_(length), seed_(seed) { } + explicit SubStringOneByteStringKey(Handle<SeqOneByteString> string, + int from, + int length) + : string_(string), from_(from), length_(length) { } uint32_t Hash() { ASSERT(length_ >= 0); ASSERT(from_ + length_ <= string_->length()); - StringHasher hasher(length_, string_->GetHeap()->HashSeed()); - - // Very long strings have a trivial hash that doesn't inspect the - // string contents. - if (hasher.has_trivial_hash()) { - hash_field_ = hasher.GetHashField(); - } else { - int i = 0; - // Do the iterative array index computation as long as there is a - // chance this is an array index. - while (i < length_ && hasher.is_array_index()) { - hasher.AddCharacter(static_cast<uc32>( - string_->SeqAsciiStringGet(i + from_))); - i++; - } - - // Process the remaining characters without updating the array - // index. - while (i < length_) { - hasher.AddCharacterNoIndex(static_cast<uc32>( - string_->SeqAsciiStringGet(i + from_))); - i++; - } - hash_field_ = hasher.GetHashField(); - } - + uint8_t* chars = string_->GetChars() + from_; + hash_field_ = StringHasher::HashSequentialString( + chars, length_, string_->GetHeap()->HashSeed()); uint32_t result = hash_field_ >> String::kHashShift; ASSERT(result != 0); // Ensure that the hash value of 0 is never computed. return result; @@ -11275,45 +12031,44 @@ class SubStringAsciiSymbolKey : public HashTableKey { } bool IsMatch(Object* string) { - Vector<const char> chars(string_->GetChars() + from_, length_); - return String::cast(string)->IsAsciiEqualTo(chars); + Vector<const uint8_t> chars(string_->GetChars() + from_, length_); + return String::cast(string)->IsOneByteEqualTo(chars); } - MaybeObject* AsObject() { + MaybeObject* AsObject(Heap* heap) { if (hash_field_ == 0) Hash(); - Vector<const char> chars(string_->GetChars() + from_, length_); - return HEAP->AllocateAsciiSymbol(chars, hash_field_); + Vector<const uint8_t> chars(string_->GetChars() + from_, length_); + return heap->AllocateOneByteInternalizedString(chars, hash_field_); } private: - Handle<SeqAsciiString> string_; + Handle<SeqOneByteString> string_; int from_; int length_; uint32_t hash_field_; - uint32_t seed_; }; -class TwoByteSymbolKey : public SequentialSymbolKey<uc16> { +class TwoByteStringKey : public SequentialStringKey<uc16> { public: - explicit TwoByteSymbolKey(Vector<const uc16> str, uint32_t seed) - : SequentialSymbolKey<uc16>(str, seed) { } + explicit TwoByteStringKey(Vector<const uc16> str, uint32_t seed) + : SequentialStringKey<uc16>(str, seed) { } bool IsMatch(Object* string) { return String::cast(string)->IsTwoByteEqualTo(string_); } - MaybeObject* AsObject() { + MaybeObject* AsObject(Heap* heap) { if (hash_field_ == 0) Hash(); - return HEAP->AllocateTwoByteSymbol(string_, hash_field_); + return heap->AllocateTwoByteInternalizedString(string_, hash_field_); } }; -// SymbolKey carries a string/symbol object as key. -class SymbolKey : public HashTableKey { +// InternalizedStringKey carries a string/internalized-string object as key. +class InternalizedStringKey : public HashTableKey { public: - explicit SymbolKey(String* string) + explicit InternalizedStringKey(String* string) : string_(string) { } bool IsMatch(Object* string) { @@ -11326,23 +12081,20 @@ class SymbolKey : public HashTableKey { return String::cast(other)->Hash(); } - MaybeObject* AsObject() { - // Attempt to flatten the string, so that symbols will most often - // be flat strings. + MaybeObject* AsObject(Heap* heap) { + // Attempt to flatten the string, so that internalized strings will most + // often be flat strings. string_ = string_->TryFlattenGetString(); - Heap* heap = string_->GetHeap(); - // Transform string to symbol if possible. - Map* map = heap->SymbolMapForString(string_); + // Internalize the string if possible. + Map* map = heap->InternalizedStringMapForString(string_); if (map != NULL) { string_->set_map_no_write_barrier(map); - ASSERT(string_->IsSymbol()); + ASSERT(string_->IsInternalizedString()); return string_; } - // Otherwise allocate a new symbol. - StringInputBuffer buffer(string_); - return heap->AllocateInternalSymbol(&buffer, - string_->length(), - string_->hash_field()); + // Otherwise allocate a new internalized string. + return heap->AllocateInternalizedStringImpl( + string_, string_->length(), string_->hash_field()); } static uint32_t StringHash(Object* obj) { @@ -11368,7 +12120,8 @@ void HashTable<Shape, Key>::IterateElements(ObjectVisitor* v) { template<typename Shape, typename Key> -MaybeObject* HashTable<Shape, Key>::Allocate(int at_least_space_for, +MaybeObject* HashTable<Shape, Key>::Allocate(Heap* heap, + int at_least_space_for, MinimumCapacity capacity_option, PretenureFlag pretenure) { ASSERT(!capacity_option || IS_POWER_OF_TWO(at_least_space_for)); @@ -11376,12 +12129,12 @@ MaybeObject* HashTable<Shape, Key>::Allocate(int at_least_space_for, ? at_least_space_for : ComputeCapacity(at_least_space_for); if (capacity > HashTable::kMaxCapacity) { - return Failure::OutOfMemoryException(); + return Failure::OutOfMemoryException(0x10); } Object* obj; - { MaybeObject* maybe_obj = Isolate::Current()->heap()-> - AllocateHashTable(EntryToIndex(capacity), pretenure); + { MaybeObject* maybe_obj = + heap-> AllocateHashTable(EntryToIndex(capacity), pretenure); if (!maybe_obj->ToObject(&obj)) return maybe_obj; } HashTable::cast(obj)->SetNumberOfElements(0); @@ -11392,19 +12145,19 @@ MaybeObject* HashTable<Shape, Key>::Allocate(int at_least_space_for, // Find entry for key otherwise return kNotFound. -int StringDictionary::FindEntry(String* key) { - if (!key->IsSymbol()) { - return HashTable<StringDictionaryShape, String*>::FindEntry(key); +int NameDictionary::FindEntry(Name* key) { + if (!key->IsUniqueName()) { + return HashTable<NameDictionaryShape, Name*>::FindEntry(key); } - // Optimized for symbol key. Knowledge of the key type allows: - // 1. Move the check if the key is a symbol out of the loop. - // 2. Avoid comparing hash codes in symbol to symbol comparison. - // 3. Detect a case when a dictionary key is not a symbol but the key is. - // In case of positive result the dictionary key may be replaced by - // the symbol with minimal performance penalty. It gives a chance to - // perform further lookups in code stubs (and significant performance boost - // a certain style of code). + // Optimized for unique names. Knowledge of the key type allows: + // 1. Move the check if the key is unique out of the loop. + // 2. Avoid comparing hash codes in unique-to-unique comparison. + // 3. Detect a case when a dictionary key is not unique but the key is. + // In case of positive result the dictionary key may be replaced by the + // internalized string with minimal performance penalty. It gives a chance + // to perform further lookups in code stubs (and significant performance + // boost a certain style of code). // EnsureCapacity will guarantee the hash table is never full. uint32_t capacity = Capacity(); @@ -11416,15 +12169,15 @@ int StringDictionary::FindEntry(String* key) { Object* element = get(index); if (element->IsUndefined()) break; // Empty entry. if (key == element) return entry; - if (!element->IsSymbol() && + if (!element->IsUniqueName() && !element->IsTheHole() && - String::cast(element)->Equals(key)) { - // Replace a non-symbol key by the equivalent symbol for faster further - // lookups. + Name::cast(element)->Equals(key)) { + // Replace a key that is a non-internalized string by the equivalent + // internalized string for faster further lookups. set(index, key); return entry; } - ASSERT(element->IsTheHole() || !String::cast(element)->Equals(key)); + ASSERT(element->IsTheHole() || !Name::cast(element)->Equals(key)); entry = NextProbe(entry, count++, capacity); } return kNotFound; @@ -11483,7 +12236,8 @@ MaybeObject* HashTable<Shape, Key>::EnsureCapacity(int n, Key key) { (capacity > kMinCapacityForPretenure) && !GetHeap()->InNewSpace(this); Object* obj; { MaybeObject* maybe_obj = - Allocate(nof * 2, + Allocate(GetHeap(), + nof * 2, USE_DEFAULT_MINIMUM_CAPACITY, pretenure ? TENURED : NOT_TENURED); if (!maybe_obj->ToObject(&obj)) return maybe_obj; @@ -11514,7 +12268,8 @@ MaybeObject* HashTable<Shape, Key>::Shrink(Key key) { !GetHeap()->InNewSpace(this); Object* obj; { MaybeObject* maybe_obj = - Allocate(at_least_room_for, + Allocate(GetHeap(), + at_least_room_for, USE_DEFAULT_MINIMUM_CAPACITY, pretenure ? TENURED : NOT_TENURED); if (!maybe_obj->ToObject(&obj)) return maybe_obj; @@ -11541,7 +12296,7 @@ uint32_t HashTable<Shape, Key>::FindInsertionEntry(uint32_t hash) { // Force instantiation of template instances class. // Please note this list is compiler dependent. -template class HashTable<SymbolTableShape, HashTableKey*>; +template class HashTable<StringTableShape, HashTableKey*>; template class HashTable<CompilationCacheShape, HashTableKey*>; @@ -11551,20 +12306,20 @@ template class HashTable<ObjectHashTableShape<1>, Object*>; template class HashTable<ObjectHashTableShape<2>, Object*>; -template class Dictionary<StringDictionaryShape, String*>; +template class Dictionary<NameDictionaryShape, Name*>; template class Dictionary<SeededNumberDictionaryShape, uint32_t>; template class Dictionary<UnseededNumberDictionaryShape, uint32_t>; template MaybeObject* Dictionary<SeededNumberDictionaryShape, uint32_t>:: - Allocate(int at_least_space_for); + Allocate(Heap* heap, int at_least_space_for); template MaybeObject* Dictionary<UnseededNumberDictionaryShape, uint32_t>:: - Allocate(int at_least_space_for); + Allocate(Heap* heap, int at_least_space_for); -template MaybeObject* Dictionary<StringDictionaryShape, String*>::Allocate( - int); +template MaybeObject* Dictionary<NameDictionaryShape, Name*>:: + Allocate(Heap* heap, int n); template MaybeObject* Dictionary<SeededNumberDictionaryShape, uint32_t>::AtPut( uint32_t, Object*); @@ -11578,7 +12333,7 @@ template Object* Dictionary<SeededNumberDictionaryShape, uint32_t>:: template Object* Dictionary<UnseededNumberDictionaryShape, uint32_t>:: SlowReverseLookup(Object* value); -template Object* Dictionary<StringDictionaryShape, String*>::SlowReverseLookup( +template Object* Dictionary<NameDictionaryShape, Name*>::SlowReverseLookup( Object*); template void Dictionary<SeededNumberDictionaryShape, uint32_t>::CopyKeysTo( @@ -11586,32 +12341,31 @@ template void Dictionary<SeededNumberDictionaryShape, uint32_t>::CopyKeysTo( PropertyAttributes, Dictionary<SeededNumberDictionaryShape, uint32_t>::SortMode); -template Object* Dictionary<StringDictionaryShape, String*>::DeleteProperty( +template Object* Dictionary<NameDictionaryShape, Name*>::DeleteProperty( int, JSObject::DeleteMode); template Object* Dictionary<SeededNumberDictionaryShape, uint32_t>:: DeleteProperty(int, JSObject::DeleteMode); -template MaybeObject* Dictionary<StringDictionaryShape, String*>::Shrink( - String*); +template MaybeObject* Dictionary<NameDictionaryShape, Name*>::Shrink(Name* n); template MaybeObject* Dictionary<SeededNumberDictionaryShape, uint32_t>::Shrink( uint32_t); -template void Dictionary<StringDictionaryShape, String*>::CopyKeysTo( +template void Dictionary<NameDictionaryShape, Name*>::CopyKeysTo( FixedArray*, int, - Dictionary<StringDictionaryShape, String*>::SortMode); + Dictionary<NameDictionaryShape, Name*>::SortMode); template int -Dictionary<StringDictionaryShape, String*>::NumberOfElementsFilterAttributes( +Dictionary<NameDictionaryShape, Name*>::NumberOfElementsFilterAttributes( PropertyAttributes); -template MaybeObject* Dictionary<StringDictionaryShape, String*>::Add( - String*, Object*, PropertyDetails); +template MaybeObject* Dictionary<NameDictionaryShape, Name*>::Add( + Name*, Object*, PropertyDetails); template MaybeObject* -Dictionary<StringDictionaryShape, String*>::GenerateNewEnumerationIndices(); +Dictionary<NameDictionaryShape, Name*>::GenerateNewEnumerationIndices(); template int Dictionary<SeededNumberDictionaryShape, uint32_t>:: @@ -11629,8 +12383,8 @@ template MaybeObject* Dictionary<SeededNumberDictionaryShape, uint32_t>:: template MaybeObject* Dictionary<UnseededNumberDictionaryShape, uint32_t>:: EnsureCapacity(int, uint32_t); -template MaybeObject* Dictionary<StringDictionaryShape, String*>:: - EnsureCapacity(int, String*); +template MaybeObject* Dictionary<NameDictionaryShape, Name*>:: + EnsureCapacity(int, Name*); template MaybeObject* Dictionary<SeededNumberDictionaryShape, uint32_t>:: AddEntry(uint32_t, Object*, PropertyDetails, uint32_t); @@ -11638,14 +12392,14 @@ template MaybeObject* Dictionary<SeededNumberDictionaryShape, uint32_t>:: template MaybeObject* Dictionary<UnseededNumberDictionaryShape, uint32_t>:: AddEntry(uint32_t, Object*, PropertyDetails, uint32_t); -template MaybeObject* Dictionary<StringDictionaryShape, String*>::AddEntry( - String*, Object*, PropertyDetails, uint32_t); +template MaybeObject* Dictionary<NameDictionaryShape, Name*>::AddEntry( + Name*, Object*, PropertyDetails, uint32_t); template int Dictionary<SeededNumberDictionaryShape, uint32_t>::NumberOfEnumElements(); template -int Dictionary<StringDictionaryShape, String*>::NumberOfEnumElements(); +int Dictionary<NameDictionaryShape, Name*>::NumberOfEnumElements(); template int HashTable<SeededNumberDictionaryShape, uint32_t>::FindEntry(uint32_t); @@ -11671,7 +12425,7 @@ MaybeObject* JSObject::PrepareSlowElementsForSort(uint32_t limit) { Object* obj; { MaybeObject* maybe_obj = - SeededNumberDictionary::Allocate(dict->NumberOfElements()); + SeededNumberDictionary::Allocate(GetHeap(), dict->NumberOfElements()); if (!maybe_obj->ToObject(&obj)) return maybe_obj; } SeededNumberDictionary* new_dict = SeededNumberDictionary::cast(obj); @@ -11690,8 +12444,9 @@ MaybeObject* JSObject::PrepareSlowElementsForSort(uint32_t limit) { ASSERT(!k->IsHeapNumber() || HeapNumber::cast(k)->value() <= kMaxUInt32); Object* value = dict->ValueAt(i); PropertyDetails details = dict->DetailsAt(i); - if (details.type() == CALLBACKS) { + if (details.type() == CALLBACKS || details.IsReadOnly()) { // Bail out and do the sorting of undefineds and array holes in JS. + // Also bail out if the element is not supposed to be moved. return Smi::FromInt(-1); } uint32_t key = NumberToUint32(k); @@ -12062,7 +12817,7 @@ JSGlobalPropertyCell* GlobalObject::GetPropertyCell(LookupResult* result) { Handle<JSGlobalPropertyCell> GlobalObject::EnsurePropertyCell( Handle<GlobalObject> global, - Handle<String> name) { + Handle<Name> name) { Isolate* isolate = global->GetIsolate(); CALL_HEAP_FUNCTION(isolate, global->EnsurePropertyCell(*name), @@ -12070,10 +12825,10 @@ Handle<JSGlobalPropertyCell> GlobalObject::EnsurePropertyCell( } -MaybeObject* GlobalObject::EnsurePropertyCell(String* name) { +MaybeObject* GlobalObject::EnsurePropertyCell(Name* name) { ASSERT(!HasFastProperties()); int entry = property_dictionary()->FindEntry(name); - if (entry == StringDictionary::kNotFound) { + if (entry == NameDictionary::kNotFound) { Heap* heap = GetHeap(); Object* cell; { MaybeObject* maybe_cell = @@ -12087,7 +12842,7 @@ MaybeObject* GlobalObject::EnsurePropertyCell(String* name) { property_dictionary()->Add(name, cell, details); if (!maybe_dictionary->ToObject(&dictionary)) return maybe_dictionary; } - set_properties(StringDictionary::cast(dictionary)); + set_properties(NameDictionary::cast(dictionary)); return cell; } else { Object* value = property_dictionary()->ValueAt(entry); @@ -12097,20 +12852,20 @@ MaybeObject* GlobalObject::EnsurePropertyCell(String* name) { } -MaybeObject* SymbolTable::LookupString(String* string, Object** s) { - SymbolKey key(string); +MaybeObject* StringTable::LookupString(String* string, Object** s) { + InternalizedStringKey key(string); return LookupKey(&key, s); } -// This class is used for looking up two character strings in the symbol table. +// This class is used for looking up two character strings in the string table. // If we don't have a hit we don't want to waste much time so we unroll the // string hash calculation loop here for speed. Doesn't work if the two // characters form a decimal integer, since such strings have a different hash // algorithm. class TwoCharHashTableKey : public HashTableKey { public: - TwoCharHashTableKey(uint32_t c1, uint32_t c2, uint32_t seed) + TwoCharHashTableKey(uint16_t c1, uint16_t c2, uint32_t seed) : c1_(c1), c2_(c2) { // Char 1. uint32_t hash = seed; @@ -12126,17 +12881,17 @@ class TwoCharHashTableKey : public HashTableKey { hash ^= hash >> 11; hash += hash << 15; if ((hash & String::kHashBitMask) == 0) hash = StringHasher::kZeroHash; + hash_ = hash; #ifdef DEBUG - StringHasher hasher(2, seed); - hasher.AddCharacter(c1); - hasher.AddCharacter(c2); // If this assert fails then we failed to reproduce the two-character // version of the string hashing algorithm above. One reason could be // that we were passed two digits as characters, since the hash // algorithm is different in that case. - ASSERT_EQ(static_cast<int>(hasher.GetHash()), static_cast<int>(hash)); + uint16_t chars[2] = {c1, c2}; + uint32_t check_hash = StringHasher::HashSequentialString(chars, 2, seed); + hash = (hash << String::kHashShift) | String::kIsNotArrayIndexMask; + ASSERT_EQ(static_cast<int32_t>(hash), static_cast<int32_t>(check_hash)); #endif - hash_ = hash; } bool IsMatch(Object* o) { @@ -12153,110 +12908,109 @@ class TwoCharHashTableKey : public HashTableKey { return String::cast(key)->Hash(); } - Object* AsObject() { - // The TwoCharHashTableKey is only used for looking in the symbol + Object* AsObject(Heap* heap) { + // The TwoCharHashTableKey is only used for looking in the string // table, not for adding to it. UNREACHABLE(); return NULL; } private: - uint32_t c1_; - uint32_t c2_; + uint16_t c1_; + uint16_t c2_; uint32_t hash_; }; -bool SymbolTable::LookupSymbolIfExists(String* string, String** symbol) { - SymbolKey key(string); +bool StringTable::LookupStringIfExists(String* string, String** result) { + InternalizedStringKey key(string); int entry = FindEntry(&key); if (entry == kNotFound) { return false; } else { - String* result = String::cast(KeyAt(entry)); - ASSERT(StringShape(result).IsSymbol()); - *symbol = result; + *result = String::cast(KeyAt(entry)); + ASSERT(StringShape(*result).IsInternalized()); return true; } } -bool SymbolTable::LookupTwoCharsSymbolIfExists(uint32_t c1, - uint32_t c2, - String** symbol) { +bool StringTable::LookupTwoCharsStringIfExists(uint16_t c1, + uint16_t c2, + String** result) { TwoCharHashTableKey key(c1, c2, GetHeap()->HashSeed()); int entry = FindEntry(&key); if (entry == kNotFound) { return false; } else { - String* result = String::cast(KeyAt(entry)); - ASSERT(StringShape(result).IsSymbol()); - *symbol = result; + *result = String::cast(KeyAt(entry)); + ASSERT(StringShape(*result).IsInternalized()); return true; } } -MaybeObject* SymbolTable::LookupSymbol(Vector<const char> str, - Object** s) { - Utf8SymbolKey key(str, GetHeap()->HashSeed()); +MaybeObject* StringTable::LookupUtf8String(Vector<const char> str, + Object** s) { + Utf8StringKey key(str, GetHeap()->HashSeed()); return LookupKey(&key, s); } -MaybeObject* SymbolTable::LookupAsciiSymbol(Vector<const char> str, - Object** s) { - AsciiSymbolKey key(str, GetHeap()->HashSeed()); +MaybeObject* StringTable::LookupOneByteString(Vector<const uint8_t> str, + Object** s) { + OneByteStringKey key(str, GetHeap()->HashSeed()); return LookupKey(&key, s); } -MaybeObject* SymbolTable::LookupSubStringAsciiSymbol(Handle<SeqAsciiString> str, - int from, - int length, - Object** s) { - SubStringAsciiSymbolKey key(str, from, length, GetHeap()->HashSeed()); +MaybeObject* StringTable::LookupSubStringOneByteString( + Handle<SeqOneByteString> str, + int from, + int length, + Object** s) { + SubStringOneByteStringKey key(str, from, length); return LookupKey(&key, s); } -MaybeObject* SymbolTable::LookupTwoByteSymbol(Vector<const uc16> str, +MaybeObject* StringTable::LookupTwoByteString(Vector<const uc16> str, Object** s) { - TwoByteSymbolKey key(str, GetHeap()->HashSeed()); + TwoByteStringKey key(str, GetHeap()->HashSeed()); return LookupKey(&key, s); } -MaybeObject* SymbolTable::LookupKey(HashTableKey* key, Object** s) { +MaybeObject* StringTable::LookupKey(HashTableKey* key, Object** s) { int entry = FindEntry(key); - // Symbol already in table. + // String already in table. if (entry != kNotFound) { *s = KeyAt(entry); return this; } - // Adding new symbol. Grow table if needed. + // Adding new string. Grow table if needed. Object* obj; { MaybeObject* maybe_obj = EnsureCapacity(1, key); if (!maybe_obj->ToObject(&obj)) return maybe_obj; } - // Create symbol object. - Object* symbol; - { MaybeObject* maybe_symbol = key->AsObject(); - if (!maybe_symbol->ToObject(&symbol)) return maybe_symbol; + // Create string object. + Object* string; + { MaybeObject* maybe_string = key->AsObject(GetHeap()); + if (!maybe_string->ToObject(&string)) return maybe_string; } - // If the symbol table grew as part of EnsureCapacity, obj is not - // the current symbol table and therefore we cannot use - // SymbolTable::cast here. - SymbolTable* table = reinterpret_cast<SymbolTable*>(obj); + // If the string table grew as part of EnsureCapacity, obj is not + // the current string table and therefore we cannot use + // StringTable::cast here. + StringTable* table = reinterpret_cast<StringTable*>(obj); - // Add the new symbol and return it along with the symbol table. + // Add the new string and return it along with the string table. entry = table->FindInsertionEntry(key->Hash()); - table->set(EntryToIndex(entry), symbol); + table->set(EntryToIndex(entry), string); table->ElementAdded(); - *s = symbol; + *s = string; return table; } @@ -12320,7 +13074,7 @@ MaybeObject* CompilationCacheTable::Put(String* src, if (!maybe_cache->To(&cache)) return maybe_cache; Object* k; - MaybeObject* maybe_k = key.AsObject(); + MaybeObject* maybe_k = key.AsObject(GetHeap()); if (!maybe_k->To(&k)) return maybe_k; int entry = cache->FindInsertionEntry(key.Hash()); @@ -12349,7 +13103,7 @@ MaybeObject* CompilationCacheTable::PutEval(String* src, int entry = cache->FindInsertionEntry(key.Hash()); Object* k; - { MaybeObject* maybe_k = key.AsObject(); + { MaybeObject* maybe_k = key.AsObject(GetHeap()); if (!maybe_k->ToObject(&k)) return maybe_k; } @@ -12396,42 +13150,42 @@ void CompilationCacheTable::Remove(Object* value) { } -// SymbolsKey used for HashTable where key is array of symbols. -class SymbolsKey : public HashTableKey { +// StringsKey used for HashTable where key is array of internalized strings. +class StringsKey : public HashTableKey { public: - explicit SymbolsKey(FixedArray* symbols) : symbols_(symbols) { } + explicit StringsKey(FixedArray* strings) : strings_(strings) { } - bool IsMatch(Object* symbols) { - FixedArray* o = FixedArray::cast(symbols); - int len = symbols_->length(); + bool IsMatch(Object* strings) { + FixedArray* o = FixedArray::cast(strings); + int len = strings_->length(); if (o->length() != len) return false; for (int i = 0; i < len; i++) { - if (o->get(i) != symbols_->get(i)) return false; + if (o->get(i) != strings_->get(i)) return false; } return true; } - uint32_t Hash() { return HashForObject(symbols_); } + uint32_t Hash() { return HashForObject(strings_); } uint32_t HashForObject(Object* obj) { - FixedArray* symbols = FixedArray::cast(obj); - int len = symbols->length(); + FixedArray* strings = FixedArray::cast(obj); + int len = strings->length(); uint32_t hash = 0; for (int i = 0; i < len; i++) { - hash ^= String::cast(symbols->get(i))->Hash(); + hash ^= String::cast(strings->get(i))->Hash(); } return hash; } - Object* AsObject() { return symbols_; } + Object* AsObject(Heap* heap) { return strings_; } private: - FixedArray* symbols_; + FixedArray* strings_; }; Object* MapCache::Lookup(FixedArray* array) { - SymbolsKey key(array); + StringsKey key(array); int entry = FindEntry(&key); if (entry == kNotFound) return GetHeap()->undefined_value(); return get(EntryToIndex(entry) + 1); @@ -12439,7 +13193,7 @@ Object* MapCache::Lookup(FixedArray* array) { MaybeObject* MapCache::Put(FixedArray* array, Map* value) { - SymbolsKey key(array); + StringsKey key(array); Object* obj; { MaybeObject* maybe_obj = EnsureCapacity(1, &key); if (!maybe_obj->ToObject(&obj)) return maybe_obj; @@ -12455,10 +13209,11 @@ MaybeObject* MapCache::Put(FixedArray* array, Map* value) { template<typename Shape, typename Key> -MaybeObject* Dictionary<Shape, Key>::Allocate(int at_least_space_for) { +MaybeObject* Dictionary<Shape, Key>::Allocate(Heap* heap, + int at_least_space_for) { Object* obj; { MaybeObject* maybe_obj = - HashTable<Shape, Key>::Allocate(at_least_space_for); + HashTable<Shape, Key>::Allocate(heap, at_least_space_for); if (!maybe_obj->ToObject(&obj)) return maybe_obj; } // Initialize the next enumeration index. @@ -12468,8 +13223,8 @@ MaybeObject* Dictionary<Shape, Key>::Allocate(int at_least_space_for) { } -void StringDictionary::DoGenerateNewEnumerationIndices( - Handle<StringDictionary> dictionary) { +void NameDictionary::DoGenerateNewEnumerationIndices( + Handle<NameDictionary> dictionary) { CALL_HEAP_FUNCTION_VOID(dictionary->GetIsolate(), dictionary->GenerateNewEnumerationIndices()); } @@ -12586,7 +13341,7 @@ MaybeObject* Dictionary<Shape, Key>::AtPut(Key key, Object* value) { } Object* k; - { MaybeObject* maybe_k = Shape::AsObject(key); + { MaybeObject* maybe_k = Shape::AsObject(this->GetHeap(), key); if (!maybe_k->ToObject(&k)) return maybe_k; } PropertyDetails details = PropertyDetails(NONE, NORMAL); @@ -12623,7 +13378,7 @@ MaybeObject* Dictionary<Shape, Key>::AddEntry(Key key, uint32_t hash) { // Compute the key object. Object* k; - { MaybeObject* maybe_k = Shape::AsObject(key); + { MaybeObject* maybe_k = Shape::AsObject(this->GetHeap(), key); if (!maybe_k->ToObject(&k)) return maybe_k; } @@ -12639,8 +13394,8 @@ MaybeObject* Dictionary<Shape, Key>::AddEntry(Key key, SetNextEnumerationIndex(index + 1); } SetEntry(entry, k, value, details); - ASSERT((Dictionary<Shape, Key>::KeyAt(entry)->IsNumber() - || Dictionary<Shape, Key>::KeyAt(entry)->IsString())); + ASSERT((Dictionary<Shape, Key>::KeyAt(entry)->IsNumber() || + Dictionary<Shape, Key>::KeyAt(entry)->IsName())); HashTable<Shape, Key>::ElementAdded(); return this; } @@ -12723,7 +13478,8 @@ MaybeObject* SeededNumberDictionary::Set(uint32_t key, details = PropertyDetails(details.attributes(), details.type(), DetailsAt(entry).dictionary_index()); - MaybeObject* maybe_object_key = SeededNumberDictionaryShape::AsObject(key); + MaybeObject* maybe_object_key = + SeededNumberDictionaryShape::AsObject(GetHeap(), key); Object* object_key; if (!maybe_object_key->ToObject(&object_key)) return maybe_object_key; SetEntry(entry, object_key, value, details); @@ -12735,7 +13491,8 @@ MaybeObject* UnseededNumberDictionary::Set(uint32_t key, Object* value) { int entry = FindEntry(key); if (entry == kNotFound) return AddNumberEntry(key, value); - MaybeObject* maybe_object_key = UnseededNumberDictionaryShape::AsObject(key); + MaybeObject* maybe_object_key = + UnseededNumberDictionaryShape::AsObject(GetHeap(), key); Object* object_key; if (!maybe_object_key->ToObject(&object_key)) return maybe_object_key; SetEntry(entry, object_key, value); @@ -12751,7 +13508,8 @@ int Dictionary<Shape, Key>::NumberOfElementsFilterAttributes( int result = 0; for (int i = 0; i < capacity; i++) { Object* k = HashTable<Shape, Key>::KeyAt(i); - if (HashTable<Shape, Key>::IsKey(k)) { + if (HashTable<Shape, Key>::IsKey(k) && + ((filter & SYMBOLIC) == 0 || !k->IsSymbol())) { PropertyDetails details = DetailsAt(i); if (details.IsDeleted()) continue; PropertyAttributes attr = details.attributes(); @@ -12793,7 +13551,7 @@ void Dictionary<Shape, Key>::CopyKeysTo( } -FixedArray* StringDictionary::CopyEnumKeysTo(FixedArray* storage) { +FixedArray* NameDictionary::CopyEnumKeysTo(FixedArray* storage) { int length = storage->length(); ASSERT(length >= NumberOfEnumElements()); Heap* heap = GetHeap(); @@ -12806,7 +13564,7 @@ FixedArray* StringDictionary::CopyEnumKeysTo(FixedArray* storage) { // that are deleted or not enumerable. for (int i = 0; i < capacity; i++) { Object* k = KeyAt(i); - if (IsKey(k)) { + if (IsKey(k) && !k->IsSymbol()) { PropertyDetails details = DetailsAt(i); if (details.IsDeleted() || details.IsDontEnum()) continue; properties++; @@ -12877,7 +13635,7 @@ Object* Dictionary<Shape, Key>::SlowReverseLookup(Object* value) { } -MaybeObject* StringDictionary::TransformPropertiesToFastFor( +MaybeObject* NameDictionary::TransformPropertiesToFastFor( JSObject* obj, int unused_property_fields) { // Make sure we preserve dictionary representation if there are too many // descriptors. @@ -12903,8 +13661,7 @@ MaybeObject* StringDictionary::TransformPropertiesToFastFor( PropertyType type = DetailsAt(i).type(); ASSERT(type != FIELD); instance_descriptor_length++; - if (type == NORMAL && - (!value->IsJSFunction() || heap->InNewSpace(value))) { + if (type == NORMAL && !value->IsJSFunction()) { number_of_fields += 1; } } @@ -12959,17 +13716,22 @@ MaybeObject* StringDictionary::TransformPropertiesToFastFor( Object* k = KeyAt(i); if (IsKey(k)) { Object* value = ValueAt(i); - // Ensure the key is a symbol before writing into the instance descriptor. - String* key; - MaybeObject* maybe_key = heap->LookupSymbol(String::cast(k)); - if (!maybe_key->To(&key)) return maybe_key; + Name* key; + if (k->IsSymbol()) { + key = Symbol::cast(k); + } else { + // Ensure the key is a unique name before writing into the + // instance descriptor. + MaybeObject* maybe_key = heap->InternalizeString(String::cast(k)); + if (!maybe_key->To(&key)) return maybe_key; + } PropertyDetails details = DetailsAt(i); ASSERT(details.descriptor_index() == details.dictionary_index()); int enumeration_index = details.descriptor_index(); PropertyType type = details.type(); - if (value->IsJSFunction() && !heap->InNewSpace(value)) { + if (value->IsJSFunction()) { ConstantFunctionDescriptor d(key, JSFunction::cast(value), details.attributes(), @@ -13141,6 +13903,58 @@ void ObjectHashTable::RemoveEntry(int entry) { } +DeclaredAccessorDescriptorIterator::DeclaredAccessorDescriptorIterator( + DeclaredAccessorDescriptor* descriptor) + : array_(descriptor->serialized_data()->GetDataStartAddress()), + length_(descriptor->serialized_data()->length()), + offset_(0) { +} + + +const DeclaredAccessorDescriptorData* + DeclaredAccessorDescriptorIterator::Next() { + ASSERT(offset_ < length_); + uint8_t* ptr = &array_[offset_]; + ASSERT(reinterpret_cast<uintptr_t>(ptr) % sizeof(uintptr_t) == 0); + const DeclaredAccessorDescriptorData* data = + reinterpret_cast<const DeclaredAccessorDescriptorData*>(ptr); + offset_ += sizeof(*data); + ASSERT(offset_ <= length_); + return data; +} + + +Handle<DeclaredAccessorDescriptor> DeclaredAccessorDescriptor::Create( + Isolate* isolate, + const DeclaredAccessorDescriptorData& descriptor, + Handle<DeclaredAccessorDescriptor> previous) { + int previous_length = + previous.is_null() ? 0 : previous->serialized_data()->length(); + int length = sizeof(descriptor) + previous_length; + Handle<ByteArray> serialized_descriptor = + isolate->factory()->NewByteArray(length); + Handle<DeclaredAccessorDescriptor> value = + isolate->factory()->NewDeclaredAccessorDescriptor(); + value->set_serialized_data(*serialized_descriptor); + // Copy in the data. + { + AssertNoAllocation no_allocation; + uint8_t* array = serialized_descriptor->GetDataStartAddress(); + if (previous_length != 0) { + uint8_t* previous_array = + previous->serialized_data()->GetDataStartAddress(); + memcpy(array, previous_array, previous_length); + array += previous_length; + } + ASSERT(reinterpret_cast<uintptr_t>(array) % sizeof(uintptr_t) == 0); + DeclaredAccessorDescriptorData* data = + reinterpret_cast<DeclaredAccessorDescriptorData*>(array); + *data = descriptor; + } + return value; +} + + #ifdef ENABLE_DEBUGGER_SUPPORT // Check if there is a break point at this code position. bool DebugInfo::HasBreakPoint(int code_position) { @@ -13169,7 +13983,8 @@ Object* DebugInfo::GetBreakPointInfo(int code_position) { void DebugInfo::ClearBreakPoint(Handle<DebugInfo> debug_info, int code_position, Handle<Object> break_point_object) { - Handle<Object> break_point_info(debug_info->GetBreakPointInfo(code_position)); + Handle<Object> break_point_info(debug_info->GetBreakPointInfo(code_position), + Isolate::Current()); if (break_point_info->IsUndefined()) return; BreakPointInfo::ClearBreakPoint( Handle<BreakPointInfo>::cast(break_point_info), @@ -13183,7 +13998,8 @@ void DebugInfo::SetBreakPoint(Handle<DebugInfo> debug_info, int statement_position, Handle<Object> break_point_object) { Isolate* isolate = Isolate::Current(); - Handle<Object> break_point_info(debug_info->GetBreakPointInfo(code_position)); + Handle<Object> break_point_info(debug_info->GetBreakPointInfo(code_position), + isolate); if (!break_point_info->IsUndefined()) { BreakPointInfo::SetBreakPoint( Handle<BreakPointInfo>::cast(break_point_info), diff --git a/deps/v8/src/objects.h b/deps/v8/src/objects.h index 755dd42d9e..933a07599a 100644 --- a/deps/v8/src/objects.h +++ b/deps/v8/src/objects.h @@ -77,7 +77,7 @@ // - DescriptorArray // - HashTable // - Dictionary -// - SymbolTable +// - StringTable // - CompilationCacheTable // - CodeCacheHashTable // - MapCache @@ -95,15 +95,25 @@ // - ExternalIntArray // - ExternalUnsignedIntArray // - ExternalFloatArray -// - String -// - SeqString -// - SeqAsciiString -// - SeqTwoByteString -// - SlicedString -// - ConsString -// - ExternalString -// - ExternalAsciiString -// - ExternalTwoByteString +// - Name +// - String +// - SeqString +// - SeqOneByteString +// - SeqTwoByteString +// - SlicedString +// - ConsString +// - ExternalString +// - ExternalAsciiString +// - ExternalTwoByteString +// - InternalizedString +// - SeqInternalizedString +// - SeqOneByteInternalizedString +// - SeqTwoByteInternalizedString +// - ConsInternalizedString +// - ExternalInternalizedString +// - ExternalAsciiInternalizedString +// - ExternalTwoByteInternalizedString +// - Symbol // - HeapNumber // - Code // - Map @@ -111,7 +121,10 @@ // - Foreign // - SharedFunctionInfo // - Struct +// - DeclaredAccessorDescriptor // - AccessorInfo +// - DeclaredAccessorInfo +// - ExecutableAccessorInfo // - AccessorPair // - AccessCheckInfo // - InterceptorInfo @@ -139,11 +152,75 @@ enum CompareMapMode { ALLOW_ELEMENT_TRANSITION_MAPS }; -enum KeyedAccessGrowMode { - DO_NOT_ALLOW_JSARRAY_GROWTH, - ALLOW_JSARRAY_GROWTH +enum KeyedAccessStoreMode { + STANDARD_STORE, + STORE_TRANSITION_SMI_TO_OBJECT, + STORE_TRANSITION_SMI_TO_DOUBLE, + STORE_TRANSITION_DOUBLE_TO_OBJECT, + STORE_TRANSITION_HOLEY_SMI_TO_OBJECT, + STORE_TRANSITION_HOLEY_SMI_TO_DOUBLE, + STORE_TRANSITION_HOLEY_DOUBLE_TO_OBJECT, + STORE_AND_GROW_NO_TRANSITION, + STORE_AND_GROW_TRANSITION_SMI_TO_OBJECT, + STORE_AND_GROW_TRANSITION_SMI_TO_DOUBLE, + STORE_AND_GROW_TRANSITION_DOUBLE_TO_OBJECT, + STORE_AND_GROW_TRANSITION_HOLEY_SMI_TO_OBJECT, + STORE_AND_GROW_TRANSITION_HOLEY_SMI_TO_DOUBLE, + STORE_AND_GROW_TRANSITION_HOLEY_DOUBLE_TO_OBJECT, + STORE_NO_TRANSITION_IGNORE_OUT_OF_BOUNDS, + STORE_NO_TRANSITION_HANDLE_COW }; + +static const int kGrowICDelta = STORE_AND_GROW_NO_TRANSITION - + STANDARD_STORE; +STATIC_ASSERT(STANDARD_STORE == 0); +STATIC_ASSERT(kGrowICDelta == + STORE_AND_GROW_TRANSITION_SMI_TO_OBJECT - + STORE_TRANSITION_SMI_TO_OBJECT); +STATIC_ASSERT(kGrowICDelta == + STORE_AND_GROW_TRANSITION_SMI_TO_DOUBLE - + STORE_TRANSITION_SMI_TO_DOUBLE); +STATIC_ASSERT(kGrowICDelta == + STORE_AND_GROW_TRANSITION_DOUBLE_TO_OBJECT - + STORE_TRANSITION_DOUBLE_TO_OBJECT); + + +static inline KeyedAccessStoreMode GetGrowStoreMode( + KeyedAccessStoreMode store_mode) { + if (store_mode < STORE_AND_GROW_NO_TRANSITION) { + store_mode = static_cast<KeyedAccessStoreMode>( + static_cast<int>(store_mode) + kGrowICDelta); + } + return store_mode; +} + + +static inline bool IsTransitionStoreMode(KeyedAccessStoreMode store_mode) { + return store_mode > STANDARD_STORE && + store_mode <= STORE_AND_GROW_TRANSITION_HOLEY_DOUBLE_TO_OBJECT && + store_mode != STORE_AND_GROW_NO_TRANSITION; +} + + +static inline KeyedAccessStoreMode GetNonTransitioningStoreMode( + KeyedAccessStoreMode store_mode) { + if (store_mode >= STORE_NO_TRANSITION_IGNORE_OUT_OF_BOUNDS) { + return store_mode; + } + if (store_mode >= STORE_AND_GROW_NO_TRANSITION) { + return STORE_AND_GROW_NO_TRANSITION; + } + return STANDARD_STORE; +} + + +static inline bool IsGrowStoreMode(KeyedAccessStoreMode store_mode) { + return store_mode >= STORE_AND_GROW_NO_TRANSITION && + store_mode <= STORE_AND_GROW_TRANSITION_HOLEY_DOUBLE_TO_OBJECT; +} + + // Setter that skips the write barrier if mode is SKIP_WRITE_BARRIER. enum WriteBarrierMode { SKIP_WRITE_BARRIER, UPDATE_WRITE_BARRIER }; @@ -178,6 +255,12 @@ enum TransitionFlag { }; +enum DebugExtraICState { + DEBUG_BREAK, + DEBUG_PREPARE_STEP_IN +}; + + // Indicates whether the transition is simple: the target map of the transition // either extends the current map with a new property, or it modifies the // property that was added last to the current map. @@ -194,6 +277,18 @@ enum DescriptorFlag { OWN_DESCRIPTORS }; +// The GC maintains a bit of information, the MarkingParity, which toggles +// from odd to even and back every time marking is completed. Incremental +// marking can visit an object twice during a marking phase, so algorithms that +// that piggy-back on marking can use the parity to ensure that they only +// perform an operation on an object once per marking phase: they record the +// MarkingParity when they visit an object, and only re-visit the object when it +// is marked again and the MarkingParity changes. +enum MarkingParity { + NO_MARKING_PARITY, + ODD_MARKING_PARITY, + EVEN_MARKING_PARITY +}; // Instance size sentinel for objects of variable size. const int kVariableSizeSentinel = 0; @@ -213,8 +308,8 @@ const int kStubMinorKeyBits = kBitsPerInt - kSmiTagSize - kStubMajorKeyBits; // encoding is mentioned explicitly in the name. Likewise, the default // representation is considered sequential. It is not mentioned in the // name. The other representations (e.g. CONS, EXTERNAL) are explicitly -// mentioned. Finally, the string is either a SYMBOL_TYPE (if it is a -// symbol) or a STRING_TYPE (if it is not a symbol). +// mentioned. Finally, the string is either a STRING_TYPE (if it is a normal +// string) or a INTERNALIZED_STRING_TYPE (if it is a internalized string). // // NOTE: The following things are some that depend on the string types having // instance_types that are less than those of all other types: @@ -225,29 +320,30 @@ const int kStubMinorKeyBits = kBitsPerInt - kSmiTagSize - kStubMajorKeyBits; // JSObject for GC purposes. The first four entries here have typeof // 'object', whereas JS_FUNCTION_TYPE has typeof 'function'. #define INSTANCE_TYPE_LIST_ALL(V) \ - V(SYMBOL_TYPE) \ - V(ASCII_SYMBOL_TYPE) \ - V(CONS_SYMBOL_TYPE) \ - V(CONS_ASCII_SYMBOL_TYPE) \ - V(EXTERNAL_SYMBOL_TYPE) \ - V(EXTERNAL_SYMBOL_WITH_ASCII_DATA_TYPE) \ - V(EXTERNAL_ASCII_SYMBOL_TYPE) \ - V(SHORT_EXTERNAL_SYMBOL_TYPE) \ - V(SHORT_EXTERNAL_SYMBOL_WITH_ASCII_DATA_TYPE) \ - V(SHORT_EXTERNAL_ASCII_SYMBOL_TYPE) \ V(STRING_TYPE) \ V(ASCII_STRING_TYPE) \ V(CONS_STRING_TYPE) \ V(CONS_ASCII_STRING_TYPE) \ V(SLICED_STRING_TYPE) \ V(EXTERNAL_STRING_TYPE) \ - V(EXTERNAL_STRING_WITH_ASCII_DATA_TYPE) \ V(EXTERNAL_ASCII_STRING_TYPE) \ + V(EXTERNAL_STRING_WITH_ASCII_DATA_TYPE) \ V(SHORT_EXTERNAL_STRING_TYPE) \ - V(SHORT_EXTERNAL_STRING_WITH_ASCII_DATA_TYPE) \ V(SHORT_EXTERNAL_ASCII_STRING_TYPE) \ - V(PRIVATE_EXTERNAL_ASCII_STRING_TYPE) \ + V(SHORT_EXTERNAL_STRING_WITH_ASCII_DATA_TYPE) \ + \ + V(INTERNALIZED_STRING_TYPE) \ + V(ASCII_INTERNALIZED_STRING_TYPE) \ + V(CONS_INTERNALIZED_STRING_TYPE) \ + V(CONS_ASCII_INTERNALIZED_STRING_TYPE) \ + V(EXTERNAL_INTERNALIZED_STRING_TYPE) \ + V(EXTERNAL_ASCII_INTERNALIZED_STRING_TYPE) \ + V(EXTERNAL_INTERNALIZED_STRING_WITH_ASCII_DATA_TYPE) \ + V(SHORT_EXTERNAL_INTERNALIZED_STRING_TYPE) \ + V(SHORT_EXTERNAL_ASCII_INTERNALIZED_STRING_TYPE) \ + V(SHORT_EXTERNAL_INTERNALIZED_STRING_WITH_ASCII_DATA_TYPE) \ \ + V(SYMBOL_TYPE) \ V(MAP_TYPE) \ V(CODE_TYPE) \ V(ODDBALL_TYPE) \ @@ -270,7 +366,9 @@ const int kStubMinorKeyBits = kBitsPerInt - kSmiTagSize - kStubMajorKeyBits; V(EXTERNAL_PIXEL_ARRAY_TYPE) \ V(FILLER_TYPE) \ \ - V(ACCESSOR_INFO_TYPE) \ + V(DECLARED_ACCESSOR_DESCRIPTOR_TYPE) \ + V(DECLARED_ACCESSOR_INFO_TYPE) \ + V(EXECUTABLE_ACCESSOR_INFO_TYPE) \ V(ACCESSOR_PAIR_TYPE) \ V(ACCESS_CHECK_INFO_TYPE) \ V(INTERCEPTOR_INFO_TYPE) \ @@ -279,6 +377,7 @@ const int kStubMinorKeyBits = kBitsPerInt - kSmiTagSize - kStubMajorKeyBits; V(OBJECT_TEMPLATE_INFO_TYPE) \ V(SIGNATURE_INFO_TYPE) \ V(TYPE_SWITCH_INFO_TYPE) \ + V(ALLOCATION_SITE_INFO_TYPE) \ V(SCRIPT_TYPE) \ V(CODE_CACHE_TYPE) \ V(POLYMORPHIC_CODE_CACHE_TYPE) \ @@ -323,46 +422,6 @@ const int kStubMinorKeyBits = kBitsPerInt - kSmiTagSize - kStubMajorKeyBits; // Since string types are not consecutive, this macro is used to // iterate over them. #define STRING_TYPE_LIST(V) \ - V(SYMBOL_TYPE, \ - kVariableSizeSentinel, \ - symbol, \ - Symbol) \ - V(ASCII_SYMBOL_TYPE, \ - kVariableSizeSentinel, \ - ascii_symbol, \ - AsciiSymbol) \ - V(CONS_SYMBOL_TYPE, \ - ConsString::kSize, \ - cons_symbol, \ - ConsSymbol) \ - V(CONS_ASCII_SYMBOL_TYPE, \ - ConsString::kSize, \ - cons_ascii_symbol, \ - ConsAsciiSymbol) \ - V(EXTERNAL_SYMBOL_TYPE, \ - ExternalTwoByteString::kSize, \ - external_symbol, \ - ExternalSymbol) \ - V(EXTERNAL_SYMBOL_WITH_ASCII_DATA_TYPE, \ - ExternalTwoByteString::kSize, \ - external_symbol_with_ascii_data, \ - ExternalSymbolWithAsciiData) \ - V(EXTERNAL_ASCII_SYMBOL_TYPE, \ - ExternalAsciiString::kSize, \ - external_ascii_symbol, \ - ExternalAsciiSymbol) \ - V(SHORT_EXTERNAL_SYMBOL_TYPE, \ - ExternalTwoByteString::kShortSize, \ - short_external_symbol, \ - ShortExternalSymbol) \ - V(SHORT_EXTERNAL_SYMBOL_WITH_ASCII_DATA_TYPE, \ - ExternalTwoByteString::kShortSize, \ - short_external_symbol_with_ascii_data, \ - ShortExternalSymbolWithAsciiData) \ - V(SHORT_EXTERNAL_ASCII_SYMBOL_TYPE, \ - ExternalAsciiString::kShortSize, \ - short_external_ascii_symbol, \ - ShortExternalAsciiSymbol) \ V(STRING_TYPE, \ kVariableSizeSentinel, \ string, \ @@ -391,26 +450,67 @@ const int kStubMinorKeyBits = kBitsPerInt - kSmiTagSize - kStubMajorKeyBits; ExternalTwoByteString::kSize, \ external_string, \ ExternalString) \ - V(EXTERNAL_STRING_WITH_ASCII_DATA_TYPE, \ - ExternalTwoByteString::kSize, \ - external_string_with_ascii_data, \ - ExternalStringWithAsciiData) \ V(EXTERNAL_ASCII_STRING_TYPE, \ ExternalAsciiString::kSize, \ external_ascii_string, \ ExternalAsciiString) \ + V(EXTERNAL_STRING_WITH_ASCII_DATA_TYPE, \ + ExternalTwoByteString::kSize, \ + external_string_with_ascii_data, \ + ExternalStringWithAsciiData) \ V(SHORT_EXTERNAL_STRING_TYPE, \ ExternalTwoByteString::kShortSize, \ short_external_string, \ ShortExternalString) \ + V(SHORT_EXTERNAL_ASCII_STRING_TYPE, \ + ExternalAsciiString::kShortSize, \ + short_external_ascii_string, \ + ShortExternalAsciiString) \ V(SHORT_EXTERNAL_STRING_WITH_ASCII_DATA_TYPE, \ ExternalTwoByteString::kShortSize, \ short_external_string_with_ascii_data, \ ShortExternalStringWithAsciiData) \ - V(SHORT_EXTERNAL_ASCII_STRING_TYPE, \ + \ + V(INTERNALIZED_STRING_TYPE, \ + kVariableSizeSentinel, \ + internalized_string, \ + InternalizedString) \ + V(ASCII_INTERNALIZED_STRING_TYPE, \ + kVariableSizeSentinel, \ + ascii_internalized_string, \ + AsciiInternalizedString) \ + V(CONS_INTERNALIZED_STRING_TYPE, \ + ConsString::kSize, \ + cons_internalized_string, \ + ConsInternalizedString) \ + V(CONS_ASCII_INTERNALIZED_STRING_TYPE, \ + ConsString::kSize, \ + cons_ascii_internalized_string, \ + ConsAsciiInternalizedString) \ + V(EXTERNAL_INTERNALIZED_STRING_TYPE, \ + ExternalTwoByteString::kSize, \ + external_internalized_string, \ + ExternalInternalizedString) \ + V(EXTERNAL_ASCII_INTERNALIZED_STRING_TYPE, \ + ExternalAsciiString::kSize, \ + external_ascii_internalized_string, \ + ExternalAsciiInternalizedString) \ + V(EXTERNAL_INTERNALIZED_STRING_WITH_ASCII_DATA_TYPE, \ + ExternalTwoByteString::kSize, \ + external_internalized_string_with_ascii_data, \ + ExternalInternalizedStringWithAsciiData) \ + V(SHORT_EXTERNAL_INTERNALIZED_STRING_TYPE, \ + ExternalTwoByteString::kShortSize, \ + short_external_internalized_string, \ + ShortExternalInternalizedString) \ + V(SHORT_EXTERNAL_ASCII_INTERNALIZED_STRING_TYPE, \ ExternalAsciiString::kShortSize, \ - short_external_ascii_string, \ - ShortExternalAsciiString) + short_external_ascii_internalized_string, \ + ShortExternalAsciiInternalizedString) \ + V(SHORT_EXTERNAL_INTERNALIZED_STRING_WITH_ASCII_DATA_TYPE, \ + ExternalTwoByteString::kShortSize, \ + short_external_internalized_string_with_ascii_data, \ + ShortExternalInternalizedStringWithAsciiData) \ // A struct is a simple object a set of object-valued fields. Including an // object type in this causes the compiler to generate most of the boilerplate @@ -422,7 +522,11 @@ const int kStubMinorKeyBits = kBitsPerInt - kSmiTagSize - kStubMajorKeyBits; // type tags, elements in this list have to be added to the INSTANCE_TYPE_LIST // manually. #define STRUCT_LIST_ALL(V) \ - V(ACCESSOR_INFO, AccessorInfo, accessor_info) \ + V(DECLARED_ACCESSOR_DESCRIPTOR, \ + DeclaredAccessorDescriptor, \ + declared_accessor_descriptor) \ + V(DECLARED_ACCESSOR_INFO, DeclaredAccessorInfo, declared_accessor_info) \ + V(EXECUTABLE_ACCESSOR_INFO, ExecutableAccessorInfo, executable_accessor_info)\ V(ACCESSOR_PAIR, AccessorPair, accessor_pair) \ V(ACCESS_CHECK_INFO, AccessCheckInfo, access_check_info) \ V(INTERCEPTOR_INFO, InterceptorInfo, interceptor_info) \ @@ -432,6 +536,7 @@ const int kStubMinorKeyBits = kBitsPerInt - kSmiTagSize - kStubMajorKeyBits; V(SIGNATURE_INFO, SignatureInfo, signature_info) \ V(TYPE_SWITCH_INFO, TypeSwitchInfo, type_switch_info) \ V(SCRIPT, Script, script) \ + V(ALLOCATION_SITE_INFO, AllocationSiteInfo, allocation_site_info) \ V(CODE_CACHE, CodeCache, code_cache) \ V(POLYMORPHIC_CODE_CACHE, PolymorphicCodeCache, polymorphic_code_cache) \ V(TYPE_FEEDBACK_INFO, TypeFeedbackInfo, type_feedback_info) \ @@ -456,18 +561,18 @@ const uint32_t kIsNotStringMask = 0x80; const uint32_t kStringTag = 0x0; const uint32_t kNotStringTag = 0x80; -// Bit 6 indicates that the object is a symbol (if set) or not (if cleared). +// Bit 6 indicates that the object is an internalized string (if set) or not. // There are not enough types that the non-string types (with bit 7 set) can // have bit 6 set too. -const uint32_t kIsSymbolMask = 0x40; -const uint32_t kNotSymbolTag = 0x0; -const uint32_t kSymbolTag = 0x40; +const uint32_t kIsInternalizedMask = 0x40; +const uint32_t kNotInternalizedTag = 0x0; +const uint32_t kInternalizedTag = 0x40; // If bit 7 is clear then bit 2 indicates whether the string consists of // two-byte characters or one-byte characters. const uint32_t kStringEncodingMask = 0x4; const uint32_t kTwoByteStringTag = 0x0; -const uint32_t kAsciiStringTag = 0x4; +const uint32_t kOneByteStringTag = 0x4; // If bit 7 is clear, the low-order 2 bits indicate the representation // of the string. @@ -504,57 +609,57 @@ const uint32_t kShortExternalStringTag = 0x10; // A ConsString with an empty string as the right side is a candidate -// for being shortcut by the garbage collector unless it is a -// symbol. It's not common to have non-flat symbols, so we do not -// shortcut them thereby avoiding turning symbols into strings. See -// heap.cc and mark-compact.cc. +// for being shortcut by the garbage collector unless it is internalized. +// It's not common to have non-flat internalized strings, so we do not +// shortcut them thereby avoiding turning internalized strings into strings. +// See heap.cc and mark-compact.cc. const uint32_t kShortcutTypeMask = kIsNotStringMask | - kIsSymbolMask | + kIsInternalizedMask | kStringRepresentationMask; const uint32_t kShortcutTypeTag = kConsStringTag; enum InstanceType { // String types. - SYMBOL_TYPE = kTwoByteStringTag | kSymbolTag | kSeqStringTag, - ASCII_SYMBOL_TYPE = kAsciiStringTag | kSymbolTag | kSeqStringTag, - CONS_SYMBOL_TYPE = kTwoByteStringTag | kSymbolTag | kConsStringTag, - CONS_ASCII_SYMBOL_TYPE = kAsciiStringTag | kSymbolTag | kConsStringTag, - SHORT_EXTERNAL_SYMBOL_TYPE = kTwoByteStringTag | kSymbolTag | - kExternalStringTag | kShortExternalStringTag, - SHORT_EXTERNAL_SYMBOL_WITH_ASCII_DATA_TYPE = - kTwoByteStringTag | kSymbolTag | kExternalStringTag | - kAsciiDataHintTag | kShortExternalStringTag, - SHORT_EXTERNAL_ASCII_SYMBOL_TYPE = kAsciiStringTag | kExternalStringTag | - kSymbolTag | kShortExternalStringTag, - EXTERNAL_SYMBOL_TYPE = kTwoByteStringTag | kSymbolTag | kExternalStringTag, - EXTERNAL_SYMBOL_WITH_ASCII_DATA_TYPE = - kTwoByteStringTag | kSymbolTag | kExternalStringTag | kAsciiDataHintTag, - EXTERNAL_ASCII_SYMBOL_TYPE = - kAsciiStringTag | kSymbolTag | kExternalStringTag, STRING_TYPE = kTwoByteStringTag | kSeqStringTag, - ASCII_STRING_TYPE = kAsciiStringTag | kSeqStringTag, + ASCII_STRING_TYPE = kOneByteStringTag | kSeqStringTag, CONS_STRING_TYPE = kTwoByteStringTag | kConsStringTag, - CONS_ASCII_STRING_TYPE = kAsciiStringTag | kConsStringTag, + CONS_ASCII_STRING_TYPE = kOneByteStringTag | kConsStringTag, SLICED_STRING_TYPE = kTwoByteStringTag | kSlicedStringTag, - SLICED_ASCII_STRING_TYPE = kAsciiStringTag | kSlicedStringTag, - SHORT_EXTERNAL_STRING_TYPE = - kTwoByteStringTag | kExternalStringTag | kShortExternalStringTag, - SHORT_EXTERNAL_STRING_WITH_ASCII_DATA_TYPE = - kTwoByteStringTag | kExternalStringTag | - kAsciiDataHintTag | kShortExternalStringTag, - SHORT_EXTERNAL_ASCII_STRING_TYPE = - kAsciiStringTag | kExternalStringTag | kShortExternalStringTag, + SLICED_ASCII_STRING_TYPE = kOneByteStringTag | kSlicedStringTag, EXTERNAL_STRING_TYPE = kTwoByteStringTag | kExternalStringTag, + EXTERNAL_ASCII_STRING_TYPE = kOneByteStringTag | kExternalStringTag, EXTERNAL_STRING_WITH_ASCII_DATA_TYPE = - kTwoByteStringTag | kExternalStringTag | kAsciiDataHintTag, - // LAST_STRING_TYPE - EXTERNAL_ASCII_STRING_TYPE = kAsciiStringTag | kExternalStringTag, - PRIVATE_EXTERNAL_ASCII_STRING_TYPE = EXTERNAL_ASCII_STRING_TYPE, + EXTERNAL_STRING_TYPE | kAsciiDataHintTag, + SHORT_EXTERNAL_STRING_TYPE = EXTERNAL_STRING_TYPE | kShortExternalStringTag, + SHORT_EXTERNAL_ASCII_STRING_TYPE = + EXTERNAL_ASCII_STRING_TYPE | kShortExternalStringTag, + SHORT_EXTERNAL_STRING_WITH_ASCII_DATA_TYPE = + EXTERNAL_STRING_WITH_ASCII_DATA_TYPE | kShortExternalStringTag, + + INTERNALIZED_STRING_TYPE = STRING_TYPE | kInternalizedTag, + ASCII_INTERNALIZED_STRING_TYPE = ASCII_STRING_TYPE | kInternalizedTag, + CONS_INTERNALIZED_STRING_TYPE = CONS_STRING_TYPE | kInternalizedTag, + CONS_ASCII_INTERNALIZED_STRING_TYPE = + CONS_ASCII_STRING_TYPE | kInternalizedTag, + EXTERNAL_INTERNALIZED_STRING_TYPE = EXTERNAL_STRING_TYPE | kInternalizedTag, + EXTERNAL_ASCII_INTERNALIZED_STRING_TYPE = + EXTERNAL_ASCII_STRING_TYPE | kInternalizedTag, + EXTERNAL_INTERNALIZED_STRING_WITH_ASCII_DATA_TYPE = + EXTERNAL_STRING_WITH_ASCII_DATA_TYPE | kInternalizedTag, + SHORT_EXTERNAL_INTERNALIZED_STRING_TYPE = + SHORT_EXTERNAL_STRING_TYPE | kInternalizedTag, + SHORT_EXTERNAL_ASCII_INTERNALIZED_STRING_TYPE = + SHORT_EXTERNAL_ASCII_STRING_TYPE | kInternalizedTag, + SHORT_EXTERNAL_INTERNALIZED_STRING_WITH_ASCII_DATA_TYPE = + SHORT_EXTERNAL_STRING_WITH_ASCII_DATA_TYPE | kInternalizedTag, + + // Non-string names + SYMBOL_TYPE = kNotStringTag, // LAST_NAME_TYPE, FIRST_NONSTRING_TYPE // Objects allocated in their own spaces (never in new space). - MAP_TYPE = kNotStringTag, // FIRST_NONSTRING_TYPE + MAP_TYPE, CODE_TYPE, ODDBALL_TYPE, JS_GLOBAL_PROPERTY_CELL_TYPE, @@ -578,7 +683,9 @@ enum InstanceType { FILLER_TYPE, // LAST_DATA_TYPE // Structs. - ACCESSOR_INFO_TYPE, + DECLARED_ACCESSOR_DESCRIPTOR_TYPE, + DECLARED_ACCESSOR_INFO_TYPE, + EXECUTABLE_ACCESSOR_INFO_TYPE, ACCESSOR_PAIR_TYPE, ACCESS_CHECK_INFO_TYPE, INTERCEPTOR_INFO_TYPE, @@ -587,6 +694,7 @@ enum InstanceType { OBJECT_TEMPLATE_INFO_TYPE, SIGNATURE_INFO_TYPE, TYPE_SWITCH_INFO_TYPE, + ALLOCATION_SITE_INFO_TYPE, SCRIPT_TYPE, CODE_CACHE_TYPE, POLYMORPHIC_CODE_CACHE_TYPE, @@ -633,7 +741,11 @@ enum InstanceType { FIRST_TYPE = 0x0, LAST_TYPE = JS_FUNCTION_TYPE, INVALID_TYPE = FIRST_TYPE - 1, - FIRST_NONSTRING_TYPE = MAP_TYPE, + FIRST_NAME_TYPE = FIRST_TYPE, + LAST_NAME_TYPE = SYMBOL_TYPE, + FIRST_UNIQUE_NAME_TYPE = INTERNALIZED_STRING_TYPE, + LAST_UNIQUE_NAME_TYPE = SYMBOL_TYPE, + FIRST_NONSTRING_TYPE = SYMBOL_TYPE, // Boundaries for testing for an external array. FIRST_EXTERNAL_ARRAY_TYPE = EXTERNAL_BYTE_ARRAY_TYPE, LAST_EXTERNAL_ARRAY_TYPE = EXTERNAL_PIXEL_ARRAY_TYPE, @@ -679,7 +791,7 @@ STATIC_CHECK(FOREIGN_TYPE == Internals::kForeignType); V(DICTIONARY_PROPERTIES_SUB_TYPE) \ V(MAP_CODE_CACHE_SUB_TYPE) \ V(SCOPE_INFO_SUB_TYPE) \ - V(SYMBOL_TABLE_SUB_TYPE) \ + V(STRING_TABLE_SUB_TYPE) \ V(DESCRIPTOR_ARRAY_SUB_TYPE) \ V(TRANSITION_ARRAY_SUB_TYPE) @@ -737,6 +849,12 @@ template <class C> static inline bool Is(Object* obj); #define DECLARE_VERIFIER(Name) #endif +#ifdef OBJECT_PRINT +#define DECLARE_PRINTER(Name) void Name##Print(FILE* out = stdout); +#else +#define DECLARE_PRINTER(Name) +#endif + class MaybeObject BASE_EMBEDDED { public: inline bool IsFailure(); @@ -754,7 +872,9 @@ class MaybeObject BASE_EMBEDDED { return reinterpret_cast<Failure*>(this); } inline Object* ToObjectUnchecked() { - ASSERT(!IsFailure()); + // TODO(jkummerow): Turn this back into an ASSERT when we can be certain + // that it never fires in Release mode in the wild. + CHECK(!IsFailure()); return reinterpret_cast<Object*>(this); } inline Object* ToObjectChecked() { @@ -769,6 +889,13 @@ class MaybeObject BASE_EMBEDDED { return true; } + template<typename T> + inline bool ToHandle(Handle<T>* obj, Isolate* isolate) { + if (IsFailure()) return false; + *obj = handle(T::cast(reinterpret_cast<Object*>(this)), isolate); + return true; + } + #ifdef OBJECT_PRINT // Prints this object with details. inline void Print() { @@ -794,8 +921,9 @@ class MaybeObject BASE_EMBEDDED { #define HEAP_OBJECT_TYPE_LIST(V) \ V(HeapNumber) \ + V(Name) \ + V(UniqueName) \ V(String) \ - V(Symbol) \ V(SeqString) \ V(ExternalString) \ V(ConsString) \ @@ -803,7 +931,9 @@ class MaybeObject BASE_EMBEDDED { V(ExternalTwoByteString) \ V(ExternalAsciiString) \ V(SeqTwoByteString) \ - V(SeqAsciiString) \ + V(SeqOneByteString) \ + V(InternalizedString) \ + V(Symbol) \ \ V(ExternalArray) \ V(ExternalByteArray) \ @@ -826,6 +956,7 @@ class MaybeObject BASE_EMBEDDED { V(TransitionArray) \ V(DeoptimizationInputData) \ V(DeoptimizationOutputData) \ + V(DependentCode) \ V(TypeFeedbackCells) \ V(FixedArray) \ V(FixedDoubleArray) \ @@ -851,7 +982,7 @@ class MaybeObject BASE_EMBEDDED { V(JSRegExp) \ V(HashTable) \ V(Dictionary) \ - V(SymbolTable) \ + V(StringTable) \ V(JSFunctionResultCache) \ V(NormalizedMapCache) \ V(CompilationCacheTable) \ @@ -866,10 +997,9 @@ class MaybeObject BASE_EMBEDDED { V(UndetectableObject) \ V(AccessCheckNeeded) \ V(JSGlobalPropertyCell) \ + V(ObjectHashTable) \ -class JSReceiver; - // Object is the abstract superclass for all classes in the // object hierarchy. // Object does not use any virtual functions to avoid the @@ -887,6 +1017,8 @@ class Object : public MaybeObject { #undef IS_TYPE_FUNCTION_DECL inline bool IsFixedArrayBase(); + inline bool IsExternal(); + inline bool IsAccessorInfo(); // Returns true if this object is an instance of the specified // function template. @@ -923,7 +1055,7 @@ class Object : public MaybeObject { inline bool HasSpecificClassOf(String* name); MUST_USE_RESULT MaybeObject* ToObject(); // ECMA-262 9.9. - Object* ToBoolean(); // ECMA-262 9.2. + bool BooleanValue(); // ECMA-262 9.2. // Convert to a JSObject if needed. // native_context is used when creating wrapper object. @@ -933,27 +1065,28 @@ class Object : public MaybeObject { // Failure is returned otherwise. MUST_USE_RESULT inline MaybeObject* ToSmi(); - void Lookup(String* name, LookupResult* result); + void Lookup(Name* name, LookupResult* result); // Property access. - MUST_USE_RESULT inline MaybeObject* GetProperty(String* key); + MUST_USE_RESULT inline MaybeObject* GetProperty(Name* key); MUST_USE_RESULT inline MaybeObject* GetProperty( - String* key, + Name* key, PropertyAttributes* attributes); MUST_USE_RESULT MaybeObject* GetPropertyWithReceiver( Object* receiver, - String* key, + Name* key, PropertyAttributes* attributes); + static Handle<Object> GetProperty(Handle<Object> object, Handle<Name> key); static Handle<Object> GetProperty(Handle<Object> object, Handle<Object> receiver, LookupResult* result, - Handle<String> key, + Handle<Name> key, PropertyAttributes* attributes); MUST_USE_RESULT MaybeObject* GetProperty(Object* receiver, LookupResult* result, - String* key, + Name* key, PropertyAttributes* attributes); MUST_USE_RESULT MaybeObject* GetPropertyWithDefinedGetter(Object* receiver, @@ -967,7 +1100,10 @@ class Object : public MaybeObject { uint32_t index); // Return the object's prototype (might be Heap::null_value()). - Object* GetPrototype(); + Object* GetPrototype(Isolate* isolate); + + // Return the prototype, or the method holder for a value-like object. + Object* GetDelegate(Isolate* isolate); // Returns the permanent hash code associated with this object depending on // the actual object type. Might return a failure in case no hash was @@ -1104,7 +1240,9 @@ class Failure: public MaybeObject { static inline Failure* RetryAfterGC(); // NEW_SPACE static inline Failure* Exception(); static inline Failure* InternalError(); - static inline Failure* OutOfMemoryException(); + // TODO(jkummerow): The value is temporary instrumentation. Remove it + // when it has served its purpose. + static inline Failure* OutOfMemoryException(intptr_t value); // Casting. static inline Failure* cast(MaybeObject* object); @@ -1327,7 +1465,8 @@ class HeapNumber: public HeapObject { static inline HeapNumber* cast(Object* obj); // Dispatched behavior. - Object* HeapNumberToBoolean(); + bool HeapNumberBooleanValue(); + inline void HeapNumberPrint() { HeapNumberPrint(stdout); } @@ -1415,20 +1554,20 @@ class JSReceiver: public HeapObject { static inline JSReceiver* cast(Object* obj); static Handle<Object> SetProperty(Handle<JSReceiver> object, - Handle<String> key, + Handle<Name> key, Handle<Object> value, PropertyAttributes attributes, StrictModeFlag strict_mode); // Can cause GC. MUST_USE_RESULT MaybeObject* SetProperty( - String* key, + Name* key, Object* value, PropertyAttributes attributes, StrictModeFlag strict_mode, StoreFromKeyed store_from_keyed = MAY_BE_STORE_FROM_KEYED); MUST_USE_RESULT MaybeObject* SetProperty( LookupResult* result, - String* key, + Name* key, Object* value, PropertyAttributes attributes, StrictModeFlag strict_mode, @@ -1436,7 +1575,7 @@ class JSReceiver: public HeapObject { MUST_USE_RESULT MaybeObject* SetPropertyWithDefinedSetter(JSReceiver* setter, Object* value); - MUST_USE_RESULT MaybeObject* DeleteProperty(String* name, DeleteMode mode); + MUST_USE_RESULT MaybeObject* DeleteProperty(Name* name, DeleteMode mode); MUST_USE_RESULT MaybeObject* DeleteElement(uint32_t index, DeleteMode mode); // Set the index'th array element. @@ -1457,15 +1596,19 @@ class JSReceiver: public HeapObject { // function that was used to instantiate the object). String* constructor_name(); - inline PropertyAttributes GetPropertyAttribute(String* name); + inline PropertyAttributes GetPropertyAttribute(Name* name); PropertyAttributes GetPropertyAttributeWithReceiver(JSReceiver* receiver, - String* name); - PropertyAttributes GetLocalPropertyAttribute(String* name); + Name* name); + PropertyAttributes GetLocalPropertyAttribute(Name* name); + + inline PropertyAttributes GetElementAttribute(uint32_t index); + inline PropertyAttributes GetLocalElementAttribute(uint32_t index); // Can cause a GC. - inline bool HasProperty(String* name); - inline bool HasLocalProperty(String* name); + inline bool HasProperty(Name* name); + inline bool HasLocalProperty(Name* name); inline bool HasElement(uint32_t index); + inline bool HasLocalElement(uint32_t index); // Return the object's prototype (might be Heap::null_value()). inline Object* GetPrototype(); @@ -1483,17 +1626,18 @@ class JSReceiver: public HeapObject { // Lookup a property. If found, the result is valid and has // detailed information. - void LocalLookup(String* name, LookupResult* result); - void Lookup(String* name, LookupResult* result); + void LocalLookup(Name* name, LookupResult* result, + bool search_hidden_prototypes = false); + void Lookup(Name* name, LookupResult* result); protected: Smi* GenerateIdentityHash(); private: - PropertyAttributes GetPropertyAttribute(JSReceiver* receiver, - LookupResult* result, - String* name, - bool continue_search); + PropertyAttributes GetPropertyAttributeForResult(JSReceiver* receiver, + LookupResult* result, + Name* name, + bool continue_search); DISALLOW_IMPLICIT_CONSTRUCTORS(JSReceiver); }; @@ -1510,7 +1654,7 @@ class JSObject: public JSReceiver { DECL_ACCESSORS(properties, FixedArray) // Get and set fast properties. inline void initialize_properties(); inline bool HasFastProperties(); - inline StringDictionary* property_dictionary(); // Gets slow properties. + inline NameDictionary* property_dictionary(); // Gets slow properties. // [elements]: The elements (properties with names that are integers). // @@ -1542,6 +1686,8 @@ class JSObject: public JSReceiver { // Returns true if an object has elements of FAST_ELEMENTS or // FAST_SMI_ONLY_ELEMENTS. inline bool HasFastSmiOrObjectElements(); + // Returns true if an object has any of the fast elements kinds. + inline bool HasFastElements(); // Returns true if an object has elements of FAST_DOUBLE_ELEMENTS // ElementsKind. inline bool HasFastDoubleElements(); @@ -1582,34 +1728,34 @@ class JSObject: public JSReceiver { MUST_USE_RESULT MaybeObject* GetPropertyWithCallback(Object* receiver, Object* structure, - String* name); + Name* name); // Can cause GC. MUST_USE_RESULT MaybeObject* SetPropertyForResult(LookupResult* result, - String* key, + Name* key, Object* value, PropertyAttributes attributes, StrictModeFlag strict_mode, StoreFromKeyed store_mode); MUST_USE_RESULT MaybeObject* SetPropertyWithFailedAccessCheck( LookupResult* result, - String* name, + Name* name, Object* value, bool check_prototype, StrictModeFlag strict_mode); MUST_USE_RESULT MaybeObject* SetPropertyWithCallback( Object* structure, - String* name, + Name* name, Object* value, JSObject* holder, StrictModeFlag strict_mode); MUST_USE_RESULT MaybeObject* SetPropertyWithInterceptor( - String* name, + Name* name, Object* value, PropertyAttributes attributes, StrictModeFlag strict_mode); MUST_USE_RESULT MaybeObject* SetPropertyPostInterceptor( - String* name, + Name* name, Object* value, PropertyAttributes attributes, StrictModeFlag strict_mode, @@ -1617,14 +1763,14 @@ class JSObject: public JSReceiver { static Handle<Object> SetLocalPropertyIgnoreAttributes( Handle<JSObject> object, - Handle<String> key, + Handle<Name> key, Handle<Object> value, PropertyAttributes attributes); // Try to follow an existing transition to a field with attributes NONE. The // return value indicates whether the transition was successful. static inline bool TryTransitionToField(Handle<JSObject> object, - Handle<String> key); + Handle<Name> key); inline int LastAddedFieldIndex(); @@ -1635,7 +1781,7 @@ class JSObject: public JSReceiver { // Can cause GC. MUST_USE_RESULT MaybeObject* SetLocalPropertyIgnoreAttributes( - String* key, + Name* key, Object* value, PropertyAttributes attributes); @@ -1650,16 +1796,16 @@ class JSObject: public JSReceiver { // Sets the property value in a normalized object given (key, value, details). // Handles the special representation of JS global objects. static Handle<Object> SetNormalizedProperty(Handle<JSObject> object, - Handle<String> key, + Handle<Name> key, Handle<Object> value, PropertyDetails details); - MUST_USE_RESULT MaybeObject* SetNormalizedProperty(String* name, + MUST_USE_RESULT MaybeObject* SetNormalizedProperty(Name* name, Object* value, PropertyDetails details); // Deletes the named property in a normalized object. - MUST_USE_RESULT MaybeObject* DeleteNormalizedProperty(String* name, + MUST_USE_RESULT MaybeObject* DeleteNormalizedProperty(Name* name, DeleteMode mode); MUST_USE_RESULT MaybeObject* OptimizeAsPrototype(); @@ -1670,23 +1816,27 @@ class JSObject: public JSReceiver { // Used from JSReceiver. PropertyAttributes GetPropertyAttributePostInterceptor(JSObject* receiver, - String* name, + Name* name, bool continue_search); PropertyAttributes GetPropertyAttributeWithInterceptor(JSObject* receiver, - String* name, + Name* name, bool continue_search); PropertyAttributes GetPropertyAttributeWithFailedAccessCheck( Object* receiver, LookupResult* result, - String* name, + Name* name, bool continue_search); + PropertyAttributes GetElementAttributeWithReceiver(JSReceiver* receiver, + uint32_t index, + bool continue_search); static void DefineAccessor(Handle<JSObject> object, - Handle<String> name, + Handle<Name> name, Handle<Object> getter, Handle<Object> setter, PropertyAttributes attributes); - MUST_USE_RESULT MaybeObject* DefineAccessor(String* name, + // Can cause GC. + MUST_USE_RESULT MaybeObject* DefineAccessor(Name* name, Object* getter, Object* setter, PropertyAttributes attributes); @@ -1694,11 +1844,11 @@ class JSObject: public JSReceiver { // Returns a JavaScript null if this was not possible and we have to use the // slow case. Note that we can fail due to allocations, too. MUST_USE_RESULT MaybeObject* DefineFastAccessor( - String* name, + Name* name, AccessorComponent component, Object* accessor, PropertyAttributes attributes); - Object* LookupAccessor(String* name, AccessorComponent component); + Object* LookupAccessor(Name* name, AccessorComponent component); MUST_USE_RESULT MaybeObject* DefineAccessor(AccessorInfo* info); @@ -1706,19 +1856,19 @@ class JSObject: public JSReceiver { MUST_USE_RESULT MaybeObject* GetPropertyWithFailedAccessCheck( Object* receiver, LookupResult* result, - String* name, + Name* name, PropertyAttributes* attributes); MUST_USE_RESULT MaybeObject* GetPropertyWithInterceptor( Object* receiver, - String* name, + Name* name, PropertyAttributes* attributes); MUST_USE_RESULT MaybeObject* GetPropertyPostInterceptor( Object* receiver, - String* name, + Name* name, PropertyAttributes* attributes); MUST_USE_RESULT MaybeObject* GetLocalPropertyPostInterceptor( Object* receiver, - String* name, + Name* name, PropertyAttributes* attributes); // Returns true if this is an instance of an api function and has @@ -1733,7 +1883,7 @@ class JSObject: public JSReceiver { // // Hidden properties are not local properties of the object itself. // Instead they are stored in an auxiliary structure kept as a local - // property with a special name Heap::hidden_symbol(). But if the + // property with a special name Heap::hidden_string(). But if the // receiver is a JSGlobalProxy then the auxiliary object is a property // of its prototype, and if it's a detached proxy, then you can't have // hidden properties. @@ -1741,18 +1891,18 @@ class JSObject: public JSReceiver { // Sets a hidden property on this object. Returns this object if successful, // undefined if called on a detached proxy. static Handle<Object> SetHiddenProperty(Handle<JSObject> obj, - Handle<String> key, + Handle<Name> key, Handle<Object> value); // Returns a failure if a GC is required. - MUST_USE_RESULT MaybeObject* SetHiddenProperty(String* key, Object* value); + MUST_USE_RESULT MaybeObject* SetHiddenProperty(Name* key, Object* value); // Gets the value of a hidden property with the given key. Returns undefined // if the property doesn't exist (or if called on a detached proxy), // otherwise returns the value set for the key. - Object* GetHiddenProperty(String* key); + Object* GetHiddenProperty(Name* key); // Deletes a hidden property. Deleting a non-existing property is // considered successful. - void DeleteHiddenProperty(String* key); - // Returns true if the object has a property with the hidden symbol as name. + void DeleteHiddenProperty(Name* key); + // Returns true if the object has a property with the hidden string as name. bool HasHiddenProperties(); static int GetIdentityHash(Handle<JSObject> obj); @@ -1760,8 +1910,9 @@ class JSObject: public JSReceiver { MUST_USE_RESULT MaybeObject* SetIdentityHash(Smi* hash, CreationFlag flag); static Handle<Object> DeleteProperty(Handle<JSObject> obj, - Handle<String> name); - MUST_USE_RESULT MaybeObject* DeleteProperty(String* name, DeleteMode mode); + Handle<Name> name); + // Can cause GC. + MUST_USE_RESULT MaybeObject* DeleteProperty(Name* name, DeleteMode mode); static Handle<Object> DeleteElement(Handle<JSObject> obj, uint32_t index); MUST_USE_RESULT MaybeObject* DeleteElement(uint32_t index, DeleteMode mode); @@ -1799,36 +1950,18 @@ class JSObject: public JSReceiver { // be represented as a double and not a Smi. bool ShouldConvertToFastDoubleElements(bool* has_smi_only_elements); - // Tells whether the index'th element is present. - bool HasElementWithReceiver(JSReceiver* receiver, uint32_t index); - // Computes the new capacity when expanding the elements of a JSObject. static int NewElementsCapacity(int old_capacity) { // (old_capacity + 50%) + 16 return old_capacity + (old_capacity >> 1) + 16; } - // Tells whether the index'th element is present and how it is stored. - enum LocalElementType { - // There is no element with given index. - UNDEFINED_ELEMENT, - - // Element with given index is handled by interceptor. - INTERCEPTED_ELEMENT, - - // Element with given index is character in string. - STRING_CHARACTER_ELEMENT, - - // Element with given index is stored in fast backing store. - FAST_ELEMENT, + PropertyType GetLocalPropertyType(Name* name); + PropertyType GetLocalElementType(uint32_t index); - // Element with given index is stored in slow backing store. - DICTIONARY_ELEMENT - }; - - LocalElementType HasLocalElement(uint32_t index); - - bool HasElementWithInterceptor(JSReceiver* receiver, uint32_t index); + // These methods do not perform access checks! + AccessorPair* GetLocalPropertyAccessorPair(Name* name); + AccessorPair* GetLocalElementAccessorPair(uint32_t index); MUST_USE_RESULT MaybeObject* SetFastElement(uint32_t index, Object* value, @@ -1855,7 +1988,7 @@ class JSObject: public JSReceiver { StrictModeFlag strict_mode); // Empty handle is returned if the element cannot be set to the given value. - static MUST_USE_RESULT Handle<Object> SetElement( + static Handle<Object> SetElement( Handle<JSObject> object, uint32_t index, Handle<Object> value, @@ -1900,9 +2033,9 @@ class JSObject: public JSReceiver { inline bool HasIndexedInterceptor(); // Support functions for v8 api (needed for correct interceptor behavior). - bool HasRealNamedProperty(String* key); + bool HasRealNamedProperty(Name* key); bool HasRealElementProperty(uint32_t index); - bool HasRealNamedCallbackProperty(String* key); + bool HasRealNamedCallbackProperty(Name* key); // Get the header size for a JSObject. Used to compute the index of // internal fields as well as the number of internal fields. @@ -1915,12 +2048,12 @@ class JSObject: public JSReceiver { inline void SetInternalField(int index, Smi* value); // The following lookup functions skip interceptors. - void LocalLookupRealNamedProperty(String* name, LookupResult* result); - void LookupRealNamedProperty(String* name, LookupResult* result); - void LookupRealNamedPropertyInPrototypes(String* name, LookupResult* result); + void LocalLookupRealNamedProperty(Name* name, LookupResult* result); + void LookupRealNamedProperty(Name* name, LookupResult* result); + void LookupRealNamedPropertyInPrototypes(Name* name, LookupResult* result); MUST_USE_RESULT MaybeObject* SetElementWithCallbackSetterInPrototypes( uint32_t index, Object* value, bool* found, StrictModeFlag strict_mode); - void LookupCallbackProperty(String* name, LookupResult* result); + void LookupCallbackProperty(Name* name, LookupResult* result); // Returns the number of properties on this object filtering out properties // with the specified attributes (ignoring interceptors). @@ -1947,7 +2080,7 @@ class JSObject: public JSReceiver { // Add a property to a fast-case object using a map transition to // new_map. MUST_USE_RESULT MaybeObject* AddFastPropertyUsingMap(Map* new_map, - String* name, + Name* name, Object* value, int field_index); @@ -1958,12 +2091,12 @@ class JSObject: public JSReceiver { // This avoids the creation of many maps with the same constant // function, all orphaned. MUST_USE_RESULT MaybeObject* AddConstantFunctionProperty( - String* name, + Name* name, JSFunction* function, PropertyAttributes attributes); MUST_USE_RESULT MaybeObject* ReplaceSlowProperty( - String* name, + Name* name, Object* value, PropertyAttributes attributes); @@ -1981,36 +2114,38 @@ class JSObject: public JSReceiver { ElementsKind to_kind); MUST_USE_RESULT MaybeObject* TransitionElementsKind(ElementsKind to_kind); + MUST_USE_RESULT MaybeObject* UpdateAllocationSiteInfo( + ElementsKind to_kind); // Replaces an existing transition with a transition to a map with a FIELD. MUST_USE_RESULT MaybeObject* ConvertTransitionToMapTransition( int transition_index, - String* name, + Name* name, Object* new_value, PropertyAttributes attributes); // Converts a descriptor of any other type to a real field, backed by the // properties array. MUST_USE_RESULT MaybeObject* ConvertDescriptorToField( - String* name, + Name* name, Object* new_value, PropertyAttributes attributes); // Add a property to a fast-case object. MUST_USE_RESULT MaybeObject* AddFastProperty( - String* name, + Name* name, Object* value, PropertyAttributes attributes, StoreFromKeyed store_mode = MAY_BE_STORE_FROM_KEYED); // Add a property to a slow-case object. - MUST_USE_RESULT MaybeObject* AddSlowProperty(String* name, + MUST_USE_RESULT MaybeObject* AddSlowProperty(Name* name, Object* value, PropertyAttributes attributes); - // Add a property to an object. + // Add a property to an object. May cause GC. MUST_USE_RESULT MaybeObject* AddProperty( - String* name, + Name* name, Object* value, PropertyAttributes attributes, StrictModeFlag strict_mode, @@ -2037,10 +2172,10 @@ class JSObject: public JSReceiver { MUST_USE_RESULT MaybeObject* NormalizeElements(); static void UpdateMapCodeCache(Handle<JSObject> object, - Handle<String> name, + Handle<Name> name, Handle<Code> code); - MUST_USE_RESULT MaybeObject* UpdateMapCodeCache(String* name, Code* code); + MUST_USE_RESULT MaybeObject* UpdateMapCodeCache(Name* name, Code* code); // Transform slow named properties to fast variants. // Returns failure if allocation failed. @@ -2084,12 +2219,7 @@ class JSObject: public JSReceiver { // Dispatched behavior. void JSObjectShortPrint(StringStream* accumulator); -#ifdef OBJECT_PRINT - inline void JSObjectPrint() { - JSObjectPrint(stdout); - } - void JSObjectPrint(FILE* out); -#endif + DECLARE_PRINTER(JSObject) DECLARE_VERIFIER(JSObject) #ifdef OBJECT_PRINT inline void PrintProperties() { @@ -2179,6 +2309,15 @@ class JSObject: public JSReceiver { static inline int SizeOf(Map* map, HeapObject* object); }; + // Enqueue change record for Object.observe. May cause GC. + static void EnqueueChangeRecord(Handle<JSObject> object, + const char* type, + Handle<Name> name, + Handle<Object> old_value); + + // Deliver change records to observers. May cause GC. + static void DeliverChangeRecords(Isolate* isolate); + private: friend class DictionaryElementsAccessor; @@ -2186,6 +2325,14 @@ class JSObject: public JSReceiver { Object* structure, uint32_t index, Object* holder); + MUST_USE_RESULT PropertyAttributes GetElementAttributeWithInterceptor( + JSReceiver* receiver, + uint32_t index, + bool continue_search); + MUST_USE_RESULT PropertyAttributes GetElementAttributeWithoutInterceptor( + JSReceiver* receiver, + uint32_t index, + bool continue_search); MUST_USE_RESULT MaybeObject* SetElementWithCallback( Object* structure, uint32_t index, @@ -2212,15 +2359,15 @@ class JSObject: public JSReceiver { // read-only, reject and set '*done' to true. Otherwise, set '*done' to // false. Can cause GC and can return a failure result with '*done==true'. MUST_USE_RESULT MaybeObject* SetPropertyViaPrototypes( - String* name, + Name* name, Object* value, PropertyAttributes attributes, StrictModeFlag strict_mode, bool* done); - MUST_USE_RESULT MaybeObject* DeletePropertyPostInterceptor(String* name, + MUST_USE_RESULT MaybeObject* DeletePropertyPostInterceptor(Name* name, DeleteMode mode); - MUST_USE_RESULT MaybeObject* DeletePropertyWithInterceptor(String* name); + MUST_USE_RESULT MaybeObject* DeletePropertyWithInterceptor(Name* name); MUST_USE_RESULT MaybeObject* DeleteElementWithInterceptor(uint32_t index); @@ -2238,13 +2385,13 @@ class JSObject: public JSReceiver { // Gets the current elements capacity and the number of used elements. void GetElementsCapacityAndUsage(int* capacity, int* used); - bool CanSetCallback(String* name); + bool CanSetCallback(Name* name); MUST_USE_RESULT MaybeObject* SetElementCallback( uint32_t index, Object* structure, PropertyAttributes attributes); MUST_USE_RESULT MaybeObject* SetPropertyCallback( - String* name, + Name* name, Object* structure, PropertyAttributes attributes); MUST_USE_RESULT MaybeObject* DefineElementAccessor( @@ -2252,9 +2399,9 @@ class JSObject: public JSReceiver { Object* getter, Object* setter, PropertyAttributes attributes); - MUST_USE_RESULT MaybeObject* CreateAccessorPairFor(String* name); + MUST_USE_RESULT MaybeObject* CreateAccessorPairFor(Name* name); MUST_USE_RESULT MaybeObject* DefinePropertyAccessor( - String* name, + Name* name, Object* getter, Object* setter, PropertyAttributes attributes); @@ -2330,12 +2477,12 @@ class FixedArray: public FixedArrayBase { inline void set_unchecked(Heap* heap, int index, Object* value, WriteBarrierMode mode); - // Gives access to raw memory which stores the array's data. - inline Object** data_start(); - inline Object** GetFirstElementAddress(); inline bool ContainsOnlySmisOrHoles(); + // Gives access to raw memory which stores the array's data. + inline Object** data_start(); + // Copy operations. MUST_USE_RESULT inline MaybeObject* Copy(); MUST_USE_RESULT MaybeObject* CopySize(int new_length); @@ -2366,12 +2513,7 @@ class FixedArray: public FixedArrayBase { static const int kMaxLength = (kMaxSize - kHeaderSize) / kPointerSize; // Dispatched behavior. -#ifdef OBJECT_PRINT - inline void FixedArrayPrint() { - FixedArrayPrint(stdout); - } - void FixedArrayPrint(FILE* out); -#endif + DECLARE_PRINTER(FixedArray) DECLARE_VERIFIER(FixedArray) #ifdef DEBUG // Checks if two FixedArrays have identical contents. @@ -2410,6 +2552,8 @@ class FixedArray: public FixedArrayBase { Object* value); private: + STATIC_CHECK(kHeaderSize == Internals::kFixedArrayHeaderSize); + DISALLOW_IMPLICIT_CONSTRUCTORS(FixedArray); }; @@ -2435,6 +2579,9 @@ class FixedDoubleArray: public FixedArrayBase { return kHeaderSize + length * kDoubleSize; } + // Gives access to raw memory which stores the array's data. + inline double* data_start(); + // Code Generation support. static int OffsetOfElementAt(int index) { return SizeFor(index); } @@ -2453,12 +2600,7 @@ class FixedDoubleArray: public FixedArrayBase { static const int kMaxLength = (kMaxSize - kHeaderSize) / kDoubleSize; // Dispatched behavior. -#ifdef OBJECT_PRINT - inline void FixedDoubleArrayPrint() { - FixedDoubleArrayPrint(stdout); - } - void FixedDoubleArrayPrint(FILE* out); -#endif + DECLARE_PRINTER(FixedDoubleArray) DECLARE_VERIFIER(FixedDoubleArray) private: @@ -2560,10 +2702,12 @@ class DescriptorArray: public FixedArray { Object* new_index_cache); // Accessors for fetching instance descriptor at descriptor number. - inline String* GetKey(int descriptor_number); + inline Name* GetKey(int descriptor_number); inline Object** GetKeySlot(int descriptor_number); inline Object* GetValue(int descriptor_number); inline Object** GetValueSlot(int descriptor_number); + inline Object** GetDescriptorStartSlot(int descriptor_number); + inline Object** GetDescriptorEndSlot(int descriptor_number); inline PropertyDetails GetDetails(int descriptor_number); inline PropertyType GetType(int descriptor_number); inline int GetFieldIndex(int descriptor_number); @@ -2571,7 +2715,7 @@ class DescriptorArray: public FixedArray { inline Object* GetCallbacksObject(int descriptor_number); inline AccessorDescriptor* GetCallbacks(int descriptor_number); - inline String* GetSortedKey(int descriptor_number); + inline Name* GetSortedKey(int descriptor_number); inline int GetSortedKeyIndex(int descriptor_number); inline void SetSortedKey(int pointer, int descriptor_number); @@ -2601,11 +2745,11 @@ class DescriptorArray: public FixedArray { void Sort(); // Search the instance descriptors for given name. - INLINE(int Search(String* name, int number_of_own_descriptors)); + INLINE(int Search(Name* name, int number_of_own_descriptors)); // As the above, but uses DescriptorLookupCache and updates it when // necessary. - INLINE(int SearchWithCache(String* name, Map* map)); + INLINE(int SearchWithCache(Name* name, Map* map)); // Allocates a DescriptorArray, but returns the singleton // empty descriptor array object if number_of_descriptors is 0. @@ -2714,11 +2858,11 @@ class DescriptorArray: public FixedArray { enum SearchMode { ALL_ENTRIES, VALID_ENTRIES }; template<SearchMode search_mode, typename T> -inline int LinearSearch(T* array, String* name, int len, int valid_entries); +inline int LinearSearch(T* array, Name* name, int len, int valid_entries); template<SearchMode search_mode, typename T> -inline int Search(T* array, String* name, int valid_entries = 0); +inline int Search(T* array, Name* name, int valid_entries = 0); // HashTable is a subclass of FixedArray that implements a hash table @@ -2743,7 +2887,7 @@ inline int Search(T* array, String* name, int valid_entries = 0); // // Returns the hash value for object. // static uint32_t HashForObject(Key key, Object* object); // // Convert key to an object. -// static inline Object* AsObject(Key key); +// static inline Object* AsObject(Heap* heap, Key key); // // The prefix size indicates number of elements in the beginning // // of the backing storage. // static const int kPrefixSize = ..; @@ -2829,6 +2973,7 @@ class HashTable: public FixedArray { // Returns a new HashTable object. Might return Failure. MUST_USE_RESULT static MaybeObject* Allocate( + Heap* heap, int at_least_space_for, MinimumCapacity capacity_option = USE_DEFAULT_MINIMUM_CAPACITY, PretenureFlag pretenure = NOT_TENURED); @@ -2951,13 +3096,13 @@ class HashTableKey { virtual uint32_t HashForObject(Object* key) = 0; // Returns the key object for storing into the hash table. // If allocations fails a failure object is returned. - MUST_USE_RESULT virtual MaybeObject* AsObject() = 0; + MUST_USE_RESULT virtual MaybeObject* AsObject(Heap* heap) = 0; // Required. virtual ~HashTableKey() {} }; -class SymbolTableShape : public BaseShape<HashTableKey*> { +class StringTableShape : public BaseShape<HashTableKey*> { public: static inline bool IsMatch(HashTableKey* key, Object* value) { return key->IsMatch(value); @@ -2968,53 +3113,58 @@ class SymbolTableShape : public BaseShape<HashTableKey*> { static inline uint32_t HashForObject(HashTableKey* key, Object* object) { return key->HashForObject(object); } - MUST_USE_RESULT static inline MaybeObject* AsObject(HashTableKey* key) { - return key->AsObject(); + MUST_USE_RESULT static inline MaybeObject* AsObject(Heap* heap, + HashTableKey* key) { + return key->AsObject(heap); } static const int kPrefixSize = 0; static const int kEntrySize = 1; }; -class SeqAsciiString; +class SeqOneByteString; -// SymbolTable. +// StringTable. // // No special elements in the prefix and the element size is 1 -// because only the symbol itself (the key) needs to be stored. -class SymbolTable: public HashTable<SymbolTableShape, HashTableKey*> { +// because only the string itself (the key) needs to be stored. +class StringTable: public HashTable<StringTableShape, HashTableKey*> { public: - // Find symbol in the symbol table. If it is not there yet, it is - // added. The return value is the symbol table which might have - // been enlarged. If the return value is not a failure, the symbol - // pointer *s is set to the symbol found. - MUST_USE_RESULT MaybeObject* LookupSymbol(Vector<const char> str, Object** s); - MUST_USE_RESULT MaybeObject* LookupAsciiSymbol(Vector<const char> str, - Object** s); - MUST_USE_RESULT MaybeObject* LookupSubStringAsciiSymbol( - Handle<SeqAsciiString> str, + // Find string in the string table. If it is not there yet, it is + // added. The return value is the string table which might have + // been enlarged. If the return value is not a failure, the string + // pointer *s is set to the string found. + MUST_USE_RESULT MaybeObject* LookupUtf8String( + Vector<const char> str, + Object** s); + MUST_USE_RESULT MaybeObject* LookupOneByteString( + Vector<const uint8_t> str, + Object** s); + MUST_USE_RESULT MaybeObject* LookupSubStringOneByteString( + Handle<SeqOneByteString> str, int from, int length, Object** s); - MUST_USE_RESULT MaybeObject* LookupTwoByteSymbol(Vector<const uc16> str, - Object** s); + MUST_USE_RESULT MaybeObject* LookupTwoByteString( + Vector<const uc16> str, + Object** s); MUST_USE_RESULT MaybeObject* LookupString(String* key, Object** s); - // Looks up a symbol that is equal to the given string and returns - // true if it is found, assigning the symbol to the given output + // Looks up a string that is equal to the given string and returns + // true if it is found, assigning the string to the given output // parameter. - bool LookupSymbolIfExists(String* str, String** symbol); - bool LookupTwoCharsSymbolIfExists(uint32_t c1, uint32_t c2, String** symbol); + bool LookupStringIfExists(String* str, String** result); + bool LookupTwoCharsStringIfExists(uint16_t c1, uint16_t c2, String** result); // Casting. - static inline SymbolTable* cast(Object* obj); + static inline StringTable* cast(Object* obj); private: MUST_USE_RESULT MaybeObject* LookupKey(HashTableKey* key, Object** s); template <bool seq_ascii> friend class JsonParser; - DISALLOW_IMPLICIT_CONSTRUCTORS(SymbolTable); + DISALLOW_IMPLICIT_CONSTRUCTORS(StringTable); }; @@ -3031,8 +3181,9 @@ class MapCacheShape : public BaseShape<HashTableKey*> { return key->HashForObject(object); } - MUST_USE_RESULT static inline MaybeObject* AsObject(HashTableKey* key) { - return key->AsObject(); + MUST_USE_RESULT static inline MaybeObject* AsObject(Heap* heap, + HashTableKey* key) { + return key->AsObject(heap); } static const int kPrefixSize = 0; @@ -3042,11 +3193,11 @@ class MapCacheShape : public BaseShape<HashTableKey*> { // MapCache. // -// Maps keys that are a fixed array of symbols to a map. +// Maps keys that are a fixed array of unique names to a map. // Used for canonicalize maps for object literals. class MapCache: public HashTable<MapCacheShape, HashTableKey*> { public: - // Find cached value for a string key, otherwise return null. + // Find cached value for a name key, otherwise return null. Object* Lookup(FixedArray* key); MUST_USE_RESULT MaybeObject* Put(FixedArray* key, Map* value); static inline MapCache* cast(Object* obj); @@ -3120,7 +3271,8 @@ class Dictionary: public HashTable<Shape, Key> { } // Returns a new array for dictionary usage. Might return Failure. - MUST_USE_RESULT static MaybeObject* Allocate(int at_least_space_for); + MUST_USE_RESULT static MaybeObject* Allocate(Heap* heap, + int at_least_space_for); // Ensure enough space for n additional elements. MUST_USE_RESULT MaybeObject* EnsureCapacity(int n, Key key); @@ -3165,29 +3317,30 @@ class Dictionary: public HashTable<Shape, Key> { }; -class StringDictionaryShape : public BaseShape<String*> { +class NameDictionaryShape : public BaseShape<Name*> { public: - static inline bool IsMatch(String* key, Object* other); - static inline uint32_t Hash(String* key); - static inline uint32_t HashForObject(String* key, Object* object); - MUST_USE_RESULT static inline MaybeObject* AsObject(String* key); + static inline bool IsMatch(Name* key, Object* other); + static inline uint32_t Hash(Name* key); + static inline uint32_t HashForObject(Name* key, Object* object); + MUST_USE_RESULT static inline MaybeObject* AsObject(Heap* heap, + Name* key); static const int kPrefixSize = 2; static const int kEntrySize = 3; static const bool kIsEnumerable = true; }; -class StringDictionary: public Dictionary<StringDictionaryShape, String*> { +class NameDictionary: public Dictionary<NameDictionaryShape, Name*> { public: - static inline StringDictionary* cast(Object* obj) { + static inline NameDictionary* cast(Object* obj) { ASSERT(obj->IsDictionary()); - return reinterpret_cast<StringDictionary*>(obj); + return reinterpret_cast<NameDictionary*>(obj); } // Copies enumerable keys to preallocated fixed array. FixedArray* CopyEnumKeysTo(FixedArray* storage); static void DoGenerateNewEnumerationIndices( - Handle<StringDictionary> dictionary); + Handle<NameDictionary> dictionary); // For transforming properties of a JSObject. MUST_USE_RESULT MaybeObject* TransformPropertiesToFastFor( @@ -3196,14 +3349,15 @@ class StringDictionary: public Dictionary<StringDictionaryShape, String*> { // Find entry for key, otherwise return kNotFound. Optimized version of // HashTable::FindEntry. - int FindEntry(String* key); + int FindEntry(Name* key); }; class NumberDictionaryShape : public BaseShape<uint32_t> { public: static inline bool IsMatch(uint32_t key, Object* other); - MUST_USE_RESULT static inline MaybeObject* AsObject(uint32_t key); + MUST_USE_RESULT static inline MaybeObject* AsObject(Heap* heap, + uint32_t key); static const int kEntrySize = 3; static const bool kIsEnumerable = false; }; @@ -3307,7 +3461,8 @@ class ObjectHashTableShape : public BaseShape<Object*> { static inline bool IsMatch(Object* key, Object* other); static inline uint32_t Hash(Object* key); static inline uint32_t HashForObject(Object* key, Object* object); - MUST_USE_RESULT static inline MaybeObject* AsObject(Object* key); + MUST_USE_RESULT static inline MaybeObject* AsObject(Heap* heap, + Object* key); static const int kPrefixSize = 0; static const int kEntrySize = entrysize; }; @@ -3475,13 +3630,13 @@ class ScopeInfo : public FixedArray { // Lookup support for serialized scope info. Returns the // the stack slot index for a given slot name if the slot is - // present; otherwise returns a value < 0. The name must be a symbol - // (canonicalized). + // present; otherwise returns a value < 0. The name must be an internalized + // string. int StackSlotIndex(String* name); // Lookup support for serialized scope info. Returns the // context slot index for a given slot name if the slot is present; otherwise - // returns a value < 0. The name must be a symbol (canonicalized). + // returns a value < 0. The name must be an internalized string. // If the slot is present and mode != NULL, sets *mode to the corresponding // mode for that variable. int ContextSlotIndex(String* name, @@ -3490,19 +3645,26 @@ class ScopeInfo : public FixedArray { // Lookup support for serialized scope info. Returns the // parameter index for a given parameter name if the parameter is present; - // otherwise returns a value < 0. The name must be a symbol (canonicalized). + // otherwise returns a value < 0. The name must be an internalized string. int ParameterIndex(String* name); // Lookup support for serialized scope info. Returns the function context // slot index if the function name is present and context-allocated (named // function expressions, only), otherwise returns a value < 0. The name - // must be a symbol (canonicalized). + // must be an internalized string. int FunctionContextSlotIndex(String* name, VariableMode* mode); + + // Copies all the context locals into an object used to materialize a scope. + bool CopyContextLocalsToScopeObject(Isolate* isolate, + Handle<Context> context, + Handle<JSObject> scope_object); + + static Handle<ScopeInfo> Create(Scope* scope, Zone* zone); // Serializes empty scope info. - static ScopeInfo* Empty(); + static ScopeInfo* Empty(Isolate* isolate); #ifdef DEBUG void Print(); @@ -3658,12 +3820,7 @@ class ByteArray: public FixedArrayBase { inline int ByteArraySize() { return SizeFor(this->length()); } -#ifdef OBJECT_PRINT - inline void ByteArrayPrint() { - ByteArrayPrint(stdout); - } - void ByteArrayPrint(FILE* out); -#endif + DECLARE_PRINTER(ByteArray) DECLARE_VERIFIER(ByteArray) // Layout description. @@ -3692,12 +3849,8 @@ class FreeSpace: public HeapObject { // Casting. static inline FreeSpace* cast(Object* obj); -#ifdef OBJECT_PRINT - inline void FreeSpacePrint() { - FreeSpacePrint(stdout); - } - void FreeSpacePrint(FILE* out); -#endif + // Dispatched behavior. + DECLARE_PRINTER(FreeSpace) DECLARE_VERIFIER(FreeSpace) // Layout description. @@ -3772,12 +3925,8 @@ class ExternalPixelArray: public ExternalArray { // Casting. static inline ExternalPixelArray* cast(Object* obj); -#ifdef OBJECT_PRINT - inline void ExternalPixelArrayPrint() { - ExternalPixelArrayPrint(stdout); - } - void ExternalPixelArrayPrint(FILE* out); -#endif + // Dispatched behavior. + DECLARE_PRINTER(ExternalPixelArray) DECLARE_VERIFIER(ExternalPixelArray) private: @@ -3799,12 +3948,8 @@ class ExternalByteArray: public ExternalArray { // Casting. static inline ExternalByteArray* cast(Object* obj); -#ifdef OBJECT_PRINT - inline void ExternalByteArrayPrint() { - ExternalByteArrayPrint(stdout); - } - void ExternalByteArrayPrint(FILE* out); -#endif + // Dispatched behavior. + DECLARE_PRINTER(ExternalByteArray) DECLARE_VERIFIER(ExternalByteArray) private: @@ -3826,12 +3971,8 @@ class ExternalUnsignedByteArray: public ExternalArray { // Casting. static inline ExternalUnsignedByteArray* cast(Object* obj); -#ifdef OBJECT_PRINT - inline void ExternalUnsignedByteArrayPrint() { - ExternalUnsignedByteArrayPrint(stdout); - } - void ExternalUnsignedByteArrayPrint(FILE* out); -#endif + // Dispatched behavior. + DECLARE_PRINTER(ExternalUnsignedByteArray) DECLARE_VERIFIER(ExternalUnsignedByteArray) private: @@ -3853,12 +3994,8 @@ class ExternalShortArray: public ExternalArray { // Casting. static inline ExternalShortArray* cast(Object* obj); -#ifdef OBJECT_PRINT - inline void ExternalShortArrayPrint() { - ExternalShortArrayPrint(stdout); - } - void ExternalShortArrayPrint(FILE* out); -#endif + // Dispatched behavior. + DECLARE_PRINTER(ExternalShortArray) DECLARE_VERIFIER(ExternalShortArray) private: @@ -3880,12 +4017,8 @@ class ExternalUnsignedShortArray: public ExternalArray { // Casting. static inline ExternalUnsignedShortArray* cast(Object* obj); -#ifdef OBJECT_PRINT - inline void ExternalUnsignedShortArrayPrint() { - ExternalUnsignedShortArrayPrint(stdout); - } - void ExternalUnsignedShortArrayPrint(FILE* out); -#endif + // Dispatched behavior. + DECLARE_PRINTER(ExternalUnsignedShortArray) DECLARE_VERIFIER(ExternalUnsignedShortArray) private: @@ -3907,12 +4040,8 @@ class ExternalIntArray: public ExternalArray { // Casting. static inline ExternalIntArray* cast(Object* obj); -#ifdef OBJECT_PRINT - inline void ExternalIntArrayPrint() { - ExternalIntArrayPrint(stdout); - } - void ExternalIntArrayPrint(FILE* out); -#endif + // Dispatched behavior. + DECLARE_PRINTER(ExternalIntArray) DECLARE_VERIFIER(ExternalIntArray) private: @@ -3934,12 +4063,8 @@ class ExternalUnsignedIntArray: public ExternalArray { // Casting. static inline ExternalUnsignedIntArray* cast(Object* obj); -#ifdef OBJECT_PRINT - inline void ExternalUnsignedIntArrayPrint() { - ExternalUnsignedIntArrayPrint(stdout); - } - void ExternalUnsignedIntArrayPrint(FILE* out); -#endif + // Dispatched behavior. + DECLARE_PRINTER(ExternalUnsignedIntArray) DECLARE_VERIFIER(ExternalUnsignedIntArray) private: @@ -3961,12 +4086,8 @@ class ExternalFloatArray: public ExternalArray { // Casting. static inline ExternalFloatArray* cast(Object* obj); -#ifdef OBJECT_PRINT - inline void ExternalFloatArrayPrint() { - ExternalFloatArrayPrint(stdout); - } - void ExternalFloatArrayPrint(FILE* out); -#endif + // Dispatched behavior. + DECLARE_PRINTER(ExternalFloatArray) DECLARE_VERIFIER(ExternalFloatArray) private: @@ -3988,12 +4109,8 @@ class ExternalDoubleArray: public ExternalArray { // Casting. static inline ExternalDoubleArray* cast(Object* obj); -#ifdef OBJECT_PRINT - inline void ExternalDoubleArrayPrint() { - ExternalDoubleArrayPrint(stdout); - } - void ExternalDoubleArrayPrint(FILE* out); -#endif // OBJECT_PRINT + // Dispatched behavior. + DECLARE_PRINTER(ExternalDoubleArray) DECLARE_VERIFIER(ExternalDoubleArray) private: @@ -4156,6 +4273,11 @@ class TypeFeedbackCells: public FixedArray { // The object that indicates a megamorphic state. static inline Handle<Object> MegamorphicSentinel(Isolate* isolate); + // The object that indicates a monomorphic state of Array with + // ElementsKind + static inline Handle<Object> MonomorphicArraySentinel(Isolate* isolate, + ElementsKind elements_kind); + // A raw version of the uninitialized sentinel that's safe to read during // garbage collection (e.g., for patching the cache). static inline Object* RawUninitializedSentinel(Heap* heap); @@ -4177,17 +4299,13 @@ class Code: public HeapObject { public: // Opaque data type for encapsulating code flags like kind, inline // cache state, and arguments count. - // FLAGS_MIN_VALUE and FLAGS_MAX_VALUE are specified to ensure that - // enumeration type has correct value range (see Issue 830 for more details). - enum Flags { - FLAGS_MIN_VALUE = kMinInt, - FLAGS_MAX_VALUE = kMaxInt - }; + typedef uint32_t Flags; #define CODE_KIND_LIST(V) \ V(FUNCTION) \ V(OPTIMIZED_FUNCTION) \ V(STUB) \ + V(COMPILED_STUB) \ V(BUILTIN) \ V(LOAD_IC) \ V(KEYED_LOAD_IC) \ @@ -4216,6 +4334,8 @@ class Code: public HeapObject { // Flags. STATIC_ASSERT(LAST_CODE_KIND < 16); + static const char* Kind2String(Kind kind); + // Types of stubs. enum StubType { NORMAL, @@ -4237,7 +4357,6 @@ class Code: public HeapObject { #ifdef ENABLE_DISASSEMBLER // Printing - static const char* Kind2String(Kind kind); static const char* ICState2String(InlineCacheState state); static const char* StubType2String(StubType type); static void PrintExtraICState(FILE* out, Kind kind, ExtraICState extra); @@ -4261,9 +4380,18 @@ class Code: public HeapObject { // [deoptimization_data]: Array containing data for deopt. DECL_ACCESSORS(deoptimization_data, FixedArray) - // [type_feedback_info]: Struct containing type feedback information. - // Will contain either a TypeFeedbackInfo object, or undefined. + // [type_feedback_info]: Struct containing type feedback information for + // unoptimized code. Optimized code can temporarily store the head of + // the list of the dependent optimized functions during deoptimization. + // STUBs can use this slot to store arbitrary information as a Smi. + // Will contain either a TypeFeedbackInfo object, or JSFunction object, + // or undefined, or a Smi. DECL_ACCESSORS(type_feedback_info, Object) + inline void InitializeTypeFeedbackInfoNoWriteBarrier(Object* value); + inline int stub_info(); + inline void set_stub_info(int info); + inline Object* deoptimizing_functions(); + inline void set_deoptimizing_functions(Object* value); // [gc_metadata]: Field used to hold GC related metadata. The contents of this // field does not have to be traced during garbage collection since @@ -4275,6 +4403,11 @@ class Code: public HeapObject { inline void set_ic_age(int count); inline int ic_age(); + // [prologue_offset]: Offset of the function prologue, used for aging + // FUNCTIONs and OPTIMIZED_FUNCTIONs. + inline int prologue_offset(); + inline void set_prologue_offset(int offset); + // Unchecked accessors to be used during GC. inline ByteArray* unchecked_relocation_info(); inline FixedArray* unchecked_deoptimization_data(); @@ -4294,6 +4427,7 @@ class Code: public HeapObject { // Testers for IC stub kinds. inline bool is_inline_cache_stub(); + inline bool is_debug_break(); inline bool is_load_stub() { return kind() == LOAD_IC; } inline bool is_keyed_load_stub() { return kind() == KEYED_LOAD_IC; } inline bool is_store_stub() { return kind() == STORE_IC; } @@ -4360,6 +4494,9 @@ class Code: public HeapObject { inline unsigned stack_check_table_offset(); inline void set_stack_check_table_offset(unsigned offset); + inline bool stack_check_patched_for_osr(); + inline void set_stack_check_patched_for_osr(bool value); + // [check type]: For kind CALL_IC, tells how to check if the // receiver is valid for the given call. inline CheckType check_type(); @@ -4369,21 +4506,6 @@ class Code: public HeapObject { inline byte unary_op_type(); inline void set_unary_op_type(byte value); - // [type-recording binary op type]: For kind BINARY_OP_IC. - inline byte binary_op_type(); - inline void set_binary_op_type(byte value); - inline byte binary_op_result_type(); - inline void set_binary_op_result_type(byte value); - - // [compare state]: For kind COMPARE_IC, tells what state the stub is in. - inline byte compare_state(); - inline void set_compare_state(byte value); - - // [compare_operation]: For kind COMPARE_IC tells what compare operation the - // stub was generated for. - inline byte compare_operation(); - inline void set_compare_operation(byte value); - // [to_boolean_foo]: For kind TO_BOOLEAN_IC tells what state the stub is in. inline byte to_boolean_state(); inline void set_to_boolean_state(byte value); @@ -4393,6 +4515,12 @@ class Code: public HeapObject { inline bool has_function_cache(); inline void set_has_function_cache(bool flag); + + // [marked_for_deoptimization]: For kind OPTIMIZED_FUNCTION tells whether + // the code is going to be deoptimized because of dead embedded maps. + inline bool marked_for_deoptimization(); + inline void set_marked_for_deoptimization(bool flag); + bool allowed_in_shared_map_code_cache(); // Get the safepoint entry for the given pc. @@ -4404,26 +4532,29 @@ class Code: public HeapObject { // Find the first map in an IC stub. Map* FindFirstMap(); + void FindAllMaps(MapHandleList* maps); - class ExtraICStateStrictMode: public BitField<StrictModeFlag, 0, 1> {}; - class ExtraICStateKeyedAccessGrowMode: - public BitField<KeyedAccessGrowMode, 1, 1> {}; // NOLINT + // Find the first code in an IC stub. + Code* FindFirstCode(); + void FindAllCode(CodeHandleList* code_list, int length); - static const int kExtraICStateGrowModeShift = 1; + class ExtraICStateStrictMode: public BitField<StrictModeFlag, 0, 1> {}; + class ExtraICStateKeyedAccessStoreMode: + public BitField<KeyedAccessStoreMode, 1, 4> {}; // NOLINT static inline StrictModeFlag GetStrictMode(ExtraICState extra_ic_state) { return ExtraICStateStrictMode::decode(extra_ic_state); } - static inline KeyedAccessGrowMode GetKeyedAccessGrowMode( + static inline KeyedAccessStoreMode GetKeyedAccessStoreMode( ExtraICState extra_ic_state) { - return ExtraICStateKeyedAccessGrowMode::decode(extra_ic_state); + return ExtraICStateKeyedAccessStoreMode::decode(extra_ic_state); } static inline ExtraICState ComputeExtraICState( - KeyedAccessGrowMode grow_mode, + KeyedAccessStoreMode store_mode, StrictModeFlag strict_mode) { - return ExtraICStateKeyedAccessGrowMode::encode(grow_mode) | + return ExtraICStateKeyedAccessStoreMode::encode(store_mode) | ExtraICStateStrictMode::encode(strict_mode); } @@ -4438,10 +4569,10 @@ class Code: public HeapObject { static inline Flags ComputeMonomorphicFlags( Kind kind, - StubType type, ExtraICState extra_ic_state = kNoExtraICState, - InlineCacheHolderFlag holder = OWN_MAP, - int argc = -1); + StubType type = NORMAL, + int argc = -1, + InlineCacheHolderFlag holder = OWN_MAP); static inline InlineCacheState ExtractICStateFromFlags(Flags flags); static inline StubType ExtractTypeFromFlags(Flags flags); @@ -4511,17 +4642,35 @@ class Code: public HeapObject { template<typename StaticVisitor> inline void CodeIterateBody(Heap* heap); -#ifdef OBJECT_PRINT - inline void CodePrint() { - CodePrint(stdout); - } - void CodePrint(FILE* out); -#endif + + DECLARE_PRINTER(Code) DECLARE_VERIFIER(Code) void ClearInlineCaches(); void ClearTypeFeedbackCells(Heap* heap); +#define DECLARE_CODE_AGE_ENUM(X) k##X##CodeAge, + enum Age { + kNoAge = 0, + CODE_AGE_LIST(DECLARE_CODE_AGE_ENUM) + kAfterLastCodeAge, + kLastCodeAge = kAfterLastCodeAge - 1, + kCodeAgeCount = kAfterLastCodeAge - 1 + }; +#undef DECLARE_CODE_AGE_ENUM + + // Code aging + static void MakeCodeAgeSequenceYoung(byte* sequence); + void MakeOlder(MarkingParity); + static bool IsYoungSequence(byte* sequence); + bool IsOld(); + + void PrintDeoptLocation(int bailout_id); + +#ifdef VERIFY_HEAP + void VerifyEmbeddedMapsDependency(); +#endif + // Max loop nesting marker used to postpose OSR. We don't take loop // nesting that is deeper than 5 levels into account. static const int kMaxLoopNestingMarker = 6; @@ -4541,8 +4690,10 @@ class Code: public HeapObject { static const int kKindSpecificFlags1Offset = kFlagsOffset + kIntSize; static const int kKindSpecificFlags2Offset = kKindSpecificFlags1Offset + kIntSize; + // Note: We might be able to squeeze this into the flags above. + static const int kPrologueOffset = kKindSpecificFlags2Offset + kIntSize; - static const int kHeaderPaddingStart = kKindSpecificFlags2Offset + kIntSize; + static const int kHeaderPaddingStart = kPrologueOffset + kIntSize; // Add padding to align the instruction start following right after // the Code object header. @@ -4567,8 +4718,8 @@ class Code: public HeapObject { class TypeField: public BitField<StubType, 3, 3> {}; class CacheHolderField: public BitField<InlineCacheHolderFlag, 6, 1> {}; class KindField: public BitField<Kind, 7, 4> {}; - class ExtraICStateField: public BitField<ExtraICState, 11, 2> {}; - class IsPregeneratedField: public BitField<bool, 13, 1> {}; + class ExtraICStateField: public BitField<ExtraICState, 11, 5> {}; + class IsPregeneratedField: public BitField<bool, 16, 1> {}; // KindSpecificFlags1 layout (STUB and OPTIMIZED_FUNCTION) static const int kStackSlotsFirstBit = 0; @@ -4576,51 +4727,34 @@ class Code: public HeapObject { static const int kUnaryOpTypeFirstBit = kStackSlotsFirstBit + kStackSlotsBitCount; static const int kUnaryOpTypeBitCount = 3; - static const int kBinaryOpTypeFirstBit = - kStackSlotsFirstBit + kStackSlotsBitCount; - static const int kBinaryOpTypeBitCount = 3; - static const int kBinaryOpResultTypeFirstBit = - kBinaryOpTypeFirstBit + kBinaryOpTypeBitCount; - static const int kBinaryOpResultTypeBitCount = 3; - static const int kCompareStateFirstBit = - kStackSlotsFirstBit + kStackSlotsBitCount; - static const int kCompareStateBitCount = 3; - static const int kCompareOperationFirstBit = - kCompareStateFirstBit + kCompareStateBitCount; - static const int kCompareOperationBitCount = 4; static const int kToBooleanStateFirstBit = kStackSlotsFirstBit + kStackSlotsBitCount; static const int kToBooleanStateBitCount = 8; static const int kHasFunctionCacheFirstBit = kStackSlotsFirstBit + kStackSlotsBitCount; static const int kHasFunctionCacheBitCount = 1; + static const int kMarkedForDeoptimizationFirstBit = + kStackSlotsFirstBit + kStackSlotsBitCount + 1; + static const int kMarkedForDeoptimizationBitCount = 1; STATIC_ASSERT(kStackSlotsFirstBit + kStackSlotsBitCount <= 32); STATIC_ASSERT(kUnaryOpTypeFirstBit + kUnaryOpTypeBitCount <= 32); - STATIC_ASSERT(kBinaryOpTypeFirstBit + kBinaryOpTypeBitCount <= 32); - STATIC_ASSERT(kBinaryOpResultTypeFirstBit + - kBinaryOpResultTypeBitCount <= 32); - STATIC_ASSERT(kCompareStateFirstBit + kCompareStateBitCount <= 32); - STATIC_ASSERT(kCompareOperationFirstBit + kCompareOperationBitCount <= 32); STATIC_ASSERT(kToBooleanStateFirstBit + kToBooleanStateBitCount <= 32); STATIC_ASSERT(kHasFunctionCacheFirstBit + kHasFunctionCacheBitCount <= 32); + STATIC_ASSERT(kMarkedForDeoptimizationFirstBit + + kMarkedForDeoptimizationBitCount <= 32); class StackSlotsField: public BitField<int, kStackSlotsFirstBit, kStackSlotsBitCount> {}; // NOLINT class UnaryOpTypeField: public BitField<int, kUnaryOpTypeFirstBit, kUnaryOpTypeBitCount> {}; // NOLINT - class BinaryOpTypeField: public BitField<int, - kBinaryOpTypeFirstBit, kBinaryOpTypeBitCount> {}; // NOLINT - class BinaryOpResultTypeField: public BitField<int, - kBinaryOpResultTypeFirstBit, kBinaryOpResultTypeBitCount> {}; // NOLINT - class CompareStateField: public BitField<int, - kCompareStateFirstBit, kCompareStateBitCount> {}; // NOLINT - class CompareOperationField: public BitField<int, - kCompareOperationFirstBit, kCompareOperationBitCount> {}; // NOLINT class ToBooleanStateField: public BitField<int, kToBooleanStateFirstBit, kToBooleanStateBitCount> {}; // NOLINT class HasFunctionCacheField: public BitField<bool, kHasFunctionCacheFirstBit, kHasFunctionCacheBitCount> {}; // NOLINT + class MarkedForDeoptimizationField: public BitField<bool, + kMarkedForDeoptimizationFirstBit, + kMarkedForDeoptimizationBitCount> {}; // NOLINT // KindSpecificFlags2 layout (STUB and OPTIMIZED_FUNCTION) static const int kStubMajorKeyFirstBit = 0; @@ -4640,20 +4774,108 @@ class Code: public HeapObject { // KindSpecificFlags2 layout (FUNCTION) class StackCheckTableOffsetField: public BitField<int, 0, 31> {}; + class StackCheckPatchedForOSRField: public BitField<bool, 31, 1> {}; // Signed field cannot be encoded using the BitField class. - static const int kArgumentsCountShift = 14; + static const int kArgumentsCountShift = 17; static const int kArgumentsCountMask = ~((1 << kArgumentsCountShift) - 1); + static const int kArgumentsBits = + PlatformSmiTagging::kSmiValueSize - Code::kArgumentsCountShift + 1; + static const int kMaxArguments = (1 << kArgumentsBits) - 1; // This constant should be encodable in an ARM instruction. static const int kFlagsNotUsedInLookup = TypeField::kMask | CacheHolderField::kMask; private: + friend class RelocIterator; + + // Code aging + byte* FindCodeAgeSequence(); + static void GetCodeAgeAndParity(Code* code, Age* age, + MarkingParity* parity); + static void GetCodeAgeAndParity(byte* sequence, Age* age, + MarkingParity* parity); + static Code* GetCodeAgeStub(Age age, MarkingParity parity); + + // Code aging -- platform-specific + static void PatchPlatformCodeAge(byte* sequence, Age age, + MarkingParity parity); + DISALLOW_IMPLICIT_CONSTRUCTORS(Code); }; +// This class describes the layout of dependent codes array of a map. The +// array is partitioned into several groups of dependent codes. Each group +// contains codes with the same dependency on the map. The array has the +// following layout for n dependency groups: +// +// +----+----+-----+----+---------+----------+-----+---------+-----------+ +// | C1 | C2 | ... | Cn | group 1 | group 2 | ... | group n | undefined | +// +----+----+-----+----+---------+----------+-----+---------+-----------+ +// +// The first n elements are Smis, each of them specifies the number of codes +// in the corresponding group. The subsequent elements contain grouped code +// objects. The suffix of the array can be filled with the undefined value if +// the number of codes is less than the length of the array. The order of the +// code objects within a group is not preserved. +// +// All code indexes used in the class are counted starting from the first +// code object of the first group. In other words, code index 0 corresponds +// to array index n = kCodesStartIndex. + +class DependentCode: public FixedArray { + public: + enum DependencyGroup { + // Group of code that weakly embed this map and depend on being + // deoptimized when the map is garbage collected. + kWeaklyEmbeddedGroup, + // Group of code that omit run-time prototype checks for prototypes + // described by this map. The group is deoptimized whenever an object + // described by this map changes shape (and transitions to a new map), + // possibly invalidating the assumptions embedded in the code. + kPrototypeCheckGroup, + kGroupCount = kPrototypeCheckGroup + 1 + }; + + // Array for holding the index of the first code object of each group. + // The last element stores the total number of code objects. + class GroupStartIndexes { + public: + explicit GroupStartIndexes(DependentCode* entries); + void Recompute(DependentCode* entries); + int at(int i) { return start_indexes_[i]; } + int number_of_entries() { return start_indexes_[kGroupCount]; } + private: + int start_indexes_[kGroupCount + 1]; + }; + + bool Contains(DependencyGroup group, Code* code); + static Handle<DependentCode> Insert(Handle<DependentCode> entries, + DependencyGroup group, + Handle<Code> value); + void DeoptimizeDependentCodeGroup(Isolate* isolate, + DependentCode::DependencyGroup group); + + // The following low-level accessors should only be used by this class + // and the mark compact collector. + inline int number_of_entries(DependencyGroup group); + inline void set_number_of_entries(DependencyGroup group, int value); + inline Code* code_at(int i); + inline void set_code_at(int i, Code* value); + inline Object** code_slot_at(int i); + inline void clear_code_at(int i); + static inline DependentCode* cast(Object* object); + + private: + // Make a room at the end of the given group by moving out the first + // code objects of the subsequent groups. + inline void ExtendGroup(DependencyGroup group); + static const int kCodesStartIndex = kGroupCount; +}; + + // All heap objects have a Map that describes their structure. // A Map contains information about: // - Size information about the object @@ -4701,6 +4923,7 @@ class Map: public HeapObject { class FunctionWithPrototype: public BitField<bool, 23, 1> {}; class DictionaryMap: public BitField<bool, 24, 1> {}; class OwnsDescriptors: public BitField<bool, 25, 1> {}; + class IsObserved: public BitField<bool, 26, 1> {}; // Tells whether the object in the prototype property will be used // for instances created from this function. If the prototype @@ -4717,7 +4940,7 @@ class Map: public HeapObject { inline bool function_with_prototype(); // Tells whether the instance with this map should be ignored by the - // __proto__ accessor. + // Object.getPrototypeOf() function and the __proto__ accessor. inline void set_is_hidden_prototype() { set_bit_field(bit_field() | (1 << kIsHiddenPrototype)); } @@ -4773,6 +4996,10 @@ class Map: public HeapObject { inline void set_elements_kind(ElementsKind elements_kind) { ASSERT(elements_kind < kElementsKindCount); ASSERT(kElementsKindCount <= (1 << kElementsKindBitCount)); + ASSERT(!is_observed() || + elements_kind == DICTIONARY_ELEMENTS || + elements_kind == NON_STRICT_ARGUMENTS_ELEMENTS || + IsExternalArrayElementsKind(elements_kind)); set_bit_field2((bit_field2() & ~kElementsKindMask) | (elements_kind << kElementsKindShift)); ASSERT(this->elements_kind() == elements_kind); @@ -4801,6 +5028,10 @@ class Map: public HeapObject { return IsFastDoubleElementsKind(elements_kind()); } + inline bool has_fast_elements() { + return IsFastElementsKind(elements_kind()); + } + inline bool has_non_strict_arguments_elements() { return elements_kind() == NON_STRICT_ARGUMENTS_ELEMENTS; } @@ -4828,7 +5059,7 @@ class Map: public HeapObject { Map* transitioned_map); inline void SetTransition(int transition_index, Map* target); inline Map* GetTransition(int transition_index); - MUST_USE_RESULT inline MaybeObject* AddTransition(String* key, + MUST_USE_RESULT inline MaybeObject* AddTransition(Name* key, Map* target, SimpleTransitionFlag flag); DECL_ACCESSORS(transitions, TransitionArray) @@ -4874,6 +5105,9 @@ class Map: public HeapObject { // [stub cache]: contains stubs compiled for this map. DECL_ACCESSORS(code_cache, Object) + // [dependent code]: list of optimized codes that have this map embedded. + DECL_ACCESSORS(dependent_code, DependentCode) + // [back pointer]: points back to the parent map from which a transition // leads to this map. The field overlaps with prototype transitions and the // back pointer will be moved into the prototype transitions array if @@ -4923,11 +5157,11 @@ class Map: public HeapObject { // with the given holder if the name is found. The holder may be // NULL when this function is used from the compiler. inline void LookupDescriptor(JSObject* holder, - String* name, + Name* name, LookupResult* result); inline void LookupTransition(JSObject* holder, - String* name, + Name* name, LookupResult* result); // The size of transition arrays are limited so they do not end up in large @@ -4965,16 +5199,18 @@ class Map: public HeapObject { set_bit_field3(EnumLengthBits::update(bit_field3(), length)); } - + inline bool CanTrackAllocationSite(); inline bool owns_descriptors(); inline void set_owns_descriptors(bool is_shared); + inline bool is_observed(); + inline void set_is_observed(bool is_observed); MUST_USE_RESULT MaybeObject* RawCopy(int instance_size); MUST_USE_RESULT MaybeObject* CopyWithPreallocatedFieldDescriptors(); MUST_USE_RESULT MaybeObject* CopyDropDescriptors(); MUST_USE_RESULT MaybeObject* CopyReplaceDescriptors( DescriptorArray* descriptors, - String* name, + Name* name, TransitionFlag flag, int descriptor_index); MUST_USE_RESULT MaybeObject* ShareDescriptor(DescriptorArray* descriptors, @@ -5002,7 +5238,7 @@ class Map: public HeapObject { MUST_USE_RESULT MaybeObject* Copy(); // Returns the property index for name (only valid for FAST MODE). - int PropertyIndexFor(String* name); + int PropertyIndexFor(Name* name); // Returns the next free property index (only valid for FAST MODE). int NextFreePropertyIndex(); @@ -5016,7 +5252,7 @@ class Map: public HeapObject { static inline Map* cast(Object* obj); // Locate an accessor in the instance descriptor. - AccessorDescriptor* FindAccessor(String* name); + AccessorDescriptor* FindAccessor(Name* name); // Code cache operations. @@ -5025,9 +5261,9 @@ class Map: public HeapObject { // Update code cache. static void UpdateCodeCache(Handle<Map> map, - Handle<String> name, + Handle<Name> name, Handle<Code> code); - MUST_USE_RESULT MaybeObject* UpdateCodeCache(String* name, Code* code); + MUST_USE_RESULT MaybeObject* UpdateCodeCache(Name* name, Code* code); // Extend the descriptor array of the map with the list of descriptors. // In case of duplicates, the latest descriptor is used. @@ -5037,14 +5273,14 @@ class Map: public HeapObject { static void EnsureDescriptorSlack(Handle<Map> map, int slack); // Returns the found code or undefined if absent. - Object* FindInCodeCache(String* name, Code::Flags flags); + Object* FindInCodeCache(Name* name, Code::Flags flags); // Returns the non-negative index of the code object if it is in the // cache and -1 otherwise. int IndexInCodeCache(Object* name, Code* code); // Removes a code object from the code cache at the given index. - void RemoveFromCodeCache(String* name, Code* code, int index); + void RemoveFromCodeCache(Name* name, Code* code, int index); // Set all map transitions from this map to dead maps to null. Also clear // back pointers in transition targets so that we do not process this map @@ -5081,17 +5317,29 @@ class Map: public HeapObject { void ZapPrototypeTransitions(); void ZapTransitions(); - // Dispatched behavior. -#ifdef OBJECT_PRINT - inline void MapPrint() { - MapPrint(stdout); + bool CanTransition() { + // Only JSObject and subtypes have map transitions and back pointers. + STATIC_ASSERT(LAST_TYPE == LAST_JS_OBJECT_TYPE); + return instance_type() >= FIRST_JS_OBJECT_TYPE; } - void MapPrint(FILE* out); -#endif + + // Fires when the layout of an object with a leaf map changes. + // This includes adding transitions to the leaf map or changing + // the descriptor array. + inline void NotifyLeafMapLayoutChange(); + + inline bool CanOmitPrototypeChecks(); + + inline void AddDependentCode(DependentCode::DependencyGroup group, + Handle<Code> code); + + // Dispatched behavior. + DECLARE_PRINTER(Map) DECLARE_VERIFIER(Map) #ifdef VERIFY_HEAP void SharedMapVerify(); + void VerifyOmittedPrototypeChecks(); #endif inline int visitor_id(); @@ -5133,9 +5381,9 @@ class Map: public HeapObject { kConstructorOffset + kPointerSize; static const int kDescriptorsOffset = kTransitionsOrBackPointerOffset + kPointerSize; - static const int kCodeCacheOffset = - kDescriptorsOffset + kPointerSize; - static const int kBitField3Offset = kCodeCacheOffset + kPointerSize; + static const int kCodeCacheOffset = kDescriptorsOffset + kPointerSize; + static const int kDependentCodeOffset = kCodeCacheOffset + kPointerSize; + static const int kBitField3Offset = kDependentCodeOffset + kPointerSize; static const int kSize = kBitField3Offset + kPointerSize; // Layout of pointer fields. Heap iteration code relies on them @@ -5288,12 +5536,8 @@ class Script: public Struct { // resource is accessible. Otherwise, always return true. inline bool HasValidSource(); -#ifdef OBJECT_PRINT - inline void ScriptPrint() { - ScriptPrint(stdout); - } - void ScriptPrint(FILE* out); -#endif + // Dispatched behavior. + DECLARE_PRINTER(Script) DECLARE_VERIFIER(Script) static const int kSourceOffset = HeapObject::kHeaderSize; @@ -5378,6 +5622,7 @@ class SharedFunctionInfo: public HeapObject { // [code]: Function code. DECL_ACCESSORS(code, Code) + inline void ReplaceCode(Code* code); // [optimized_code_map]: Map from native context to optimized code // and a shared literals array or Smi 0 if none. @@ -5393,7 +5638,7 @@ class SharedFunctionInfo: public HeapObject { void InstallFromOptimizedCodeMap(JSFunction* function, int index); // Clear optimized code map. - void ClearOptimizedCodeMap(); + inline void ClearOptimizedCodeMap(); // Add a new entry to the optimized code map. static void AddToOptimizedCodeMap(Handle<SharedFunctionInfo> shared, @@ -5768,12 +6013,7 @@ class SharedFunctionInfo: public HeapObject { // Dispatched behavior. // Set max_length to -1 for unlimited length. void SourceCodePrint(StringStream* accumulator, int max_length); -#ifdef OBJECT_PRINT - inline void SharedFunctionInfoPrint() { - SharedFunctionInfoPrint(stdout); - } - void SharedFunctionInfoPrint(FILE* out); -#endif + DECLARE_PRINTER(SharedFunctionInfo) DECLARE_VERIFIER(SharedFunctionInfo) void ResetForNewContext(int new_ic_age); @@ -5901,10 +6141,10 @@ class SharedFunctionInfo: public HeapObject { // Bit positions in start_position_and_type. // The source code start position is in the 30 most significant bits of // the start_position_and_type field. - static const int kIsExpressionBit = 0; - static const int kIsTopLevelBit = 1; + static const int kIsExpressionBit = 0; + static const int kIsTopLevelBit = 1; static const int kStartPositionShift = 2; - static const int kStartPositionMask = ~((1 << kStartPositionShift) - 1); + static const int kStartPositionMask = ~((1 << kStartPositionShift) - 1); // Bit positions in compiler_hints. static const int kCodeAgeSize = 3; @@ -6002,12 +6242,7 @@ class JSModule: public JSObject { static inline JSModule* cast(Object* obj); // Dispatched behavior. -#ifdef OBJECT_PRINT - inline void JSModulePrint() { - JSModulePrint(stdout); - } - void JSModulePrint(FILE* out); -#endif + DECLARE_PRINTER(JSModule) DECLARE_VERIFIER(JSModule) // Layout description. @@ -6043,6 +6278,7 @@ class JSFunction: public JSObject { // 8.6.2, page 27. inline Code* code(); inline void set_code(Code* code); + inline void set_code_no_write_barrier(Code* code); inline void ReplaceCode(Code* code); inline Code* unchecked_code(); @@ -6063,6 +6299,8 @@ class JSFunction: public JSObject { // recompiled the next time it is executed. void MarkForLazyRecompilation(); void MarkForParallelRecompilation(); + void MarkForInstallingRecompiledCode(); + void MarkInRecompileQueue(); // Helpers to compile this function. Returns true on success, false on // failure (e.g., stack overflow during compilation). @@ -6078,6 +6316,7 @@ class JSFunction: public JSObject { // recompilation. inline bool IsMarkedForLazyRecompilation(); inline bool IsMarkedForParallelRecompilation(); + inline bool IsMarkedForInstallingRecompiledCode(); // Tells whether or not the function is on the parallel // recompilation queue. @@ -6161,12 +6400,7 @@ class JSFunction: public JSObject { void JSFunctionIterateBody(int object_size, ObjectVisitor* v); // Dispatched behavior. -#ifdef OBJECT_PRINT - inline void JSFunctionPrint() { - JSFunctionPrint(stdout); - } - void JSFunctionPrint(FILE* out); -#endif + DECLARE_PRINTER(JSFunction) DECLARE_VERIFIER(JSFunction) // Returns the number of allocated literals. @@ -6175,6 +6409,18 @@ class JSFunction: public JSObject { // Retrieve the native context from a function's literal array. static Context* NativeContextFromLiterals(FixedArray* literals); +#ifdef DEBUG + bool FunctionsInFunctionListShareSameCode() { + Object* current = this; + while (!current->IsUndefined()) { + JSFunction* function = JSFunction::cast(current); + current = function->next_function_link(); + if (function->code() != this->code()) return false; + } + return true; + } +#endif + // Layout descriptors. The last property (from kNonWeakFieldsEndOffset to // kSize) is weak and has special handling during garbage collection. static const int kCodeEntryOffset = JSObject::kHeaderSize; @@ -6220,12 +6466,7 @@ class JSGlobalProxy : public JSObject { static inline JSGlobalProxy* cast(Object* obj); // Dispatched behavior. -#ifdef OBJECT_PRINT - inline void JSGlobalProxyPrint() { - JSGlobalProxyPrint(stdout); - } - void JSGlobalProxyPrint(FILE* out); -#endif + DECLARE_PRINTER(JSGlobalProxy) DECLARE_VERIFIER(JSGlobalProxy) // Layout description. @@ -6263,7 +6504,7 @@ class GlobalObject: public JSObject { // by throwing an exception. This is for the debug and builtins global // objects, where it is known which properties can be expected to be present // on the object. - Object* GetPropertyNoExceptionThrown(String* key) { + Object* GetPropertyNoExceptionThrown(Name* key) { Object* answer = GetProperty(key)->ToObjectUnchecked(); return answer; } @@ -6271,10 +6512,10 @@ class GlobalObject: public JSObject { // Ensure that the global object has a cell for the given property name. static Handle<JSGlobalPropertyCell> EnsurePropertyCell( Handle<GlobalObject> global, - Handle<String> name); + Handle<Name> name); // TODO(kmillikin): This function can be eliminated once the stub cache is - // full handlified (and the static helper can be written directly). - MUST_USE_RESULT MaybeObject* EnsurePropertyCell(String* name); + // fully handlified (and the static helper can be written directly). + MUST_USE_RESULT MaybeObject* EnsurePropertyCell(Name* name); // Casting. static inline GlobalObject* cast(Object* obj); @@ -6298,12 +6539,7 @@ class JSGlobalObject: public GlobalObject { static inline JSGlobalObject* cast(Object* obj); // Dispatched behavior. -#ifdef OBJECT_PRINT - inline void JSGlobalObjectPrint() { - JSGlobalObjectPrint(stdout); - } - void JSGlobalObjectPrint(FILE* out); -#endif + DECLARE_PRINTER(JSGlobalObject) DECLARE_VERIFIER(JSGlobalObject) // Layout description. @@ -6330,12 +6566,7 @@ class JSBuiltinsObject: public GlobalObject { static inline JSBuiltinsObject* cast(Object* obj); // Dispatched behavior. -#ifdef OBJECT_PRINT - inline void JSBuiltinsObjectPrint() { - JSBuiltinsObjectPrint(stdout); - } - void JSBuiltinsObjectPrint(FILE* out); -#endif + DECLARE_PRINTER(JSBuiltinsObject) DECLARE_VERIFIER(JSBuiltinsObject) // Layout description. The size of the builtins object includes @@ -6371,12 +6602,7 @@ class JSValue: public JSObject { static inline JSValue* cast(Object* obj); // Dispatched behavior. -#ifdef OBJECT_PRINT - inline void JSValuePrint() { - JSValuePrint(stdout); - } - void JSValuePrint(FILE* out); -#endif + DECLARE_PRINTER(JSValue) DECLARE_VERIFIER(JSValue) // Layout description. @@ -6425,12 +6651,7 @@ class JSDate: public JSObject { // Dispatched behavior. -#ifdef OBJECT_PRINT - inline void JSDatePrint() { - JSDatePrint(stdout); - } - void JSDatePrint(FILE* out); -#endif + DECLARE_PRINTER(JSDate) DECLARE_VERIFIER(JSDate) // The order is important. It must be kept in sync with date macros @@ -6522,12 +6743,7 @@ class JSMessageObject: public JSObject { static inline JSMessageObject* cast(Object* obj); // Dispatched behavior. -#ifdef OBJECT_PRINT - inline void JSMessageObjectPrint() { - JSMessageObjectPrint(stdout); - } - void JSMessageObjectPrint(FILE* out); -#endif + DECLARE_PRINTER(JSMessageObject) DECLARE_VERIFIER(JSMessageObject) // Layout description. @@ -6704,8 +6920,9 @@ class CompilationCacheShape : public BaseShape<HashTableKey*> { return key->HashForObject(object); } - MUST_USE_RESULT static MaybeObject* AsObject(HashTableKey* key) { - return key->AsObject(); + MUST_USE_RESULT static MaybeObject* AsObject(Heap* heap, + HashTableKey* key) { + return key->AsObject(heap); } static const int kPrefixSize = 0; @@ -6750,11 +6967,11 @@ class CodeCache: public Struct { DECL_ACCESSORS(normal_type_cache, Object) // Add the code object to the cache. - MUST_USE_RESULT MaybeObject* Update(String* name, Code* code); + MUST_USE_RESULT MaybeObject* Update(Name* name, Code* code); // Lookup code object in the cache. Returns code object if found and undefined // if not. - Object* Lookup(String* name, Code::Flags flags); + Object* Lookup(Name* name, Code::Flags flags); // Get the internal index of a code object in the cache. Returns -1 if the // code object is not in that cache. This index can be used to later call @@ -6767,12 +6984,8 @@ class CodeCache: public Struct { static inline CodeCache* cast(Object* obj); -#ifdef OBJECT_PRINT - inline void CodeCachePrint() { - CodeCachePrint(stdout); - } - void CodeCachePrint(FILE* out); -#endif + // Dispatched behavior. + DECLARE_PRINTER(CodeCache) DECLARE_VERIFIER(CodeCache) static const int kDefaultCacheOffset = HeapObject::kHeaderSize; @@ -6781,10 +6994,10 @@ class CodeCache: public Struct { static const int kSize = kNormalTypeCacheOffset + kPointerSize; private: - MUST_USE_RESULT MaybeObject* UpdateDefaultCache(String* name, Code* code); - MUST_USE_RESULT MaybeObject* UpdateNormalTypeCache(String* name, Code* code); - Object* LookupDefaultCache(String* name, Code::Flags flags); - Object* LookupNormalTypeCache(String* name, Code::Flags flags); + MUST_USE_RESULT MaybeObject* UpdateDefaultCache(Name* name, Code* code); + MUST_USE_RESULT MaybeObject* UpdateNormalTypeCache(Name* name, Code* code); + Object* LookupDefaultCache(Name* name, Code::Flags flags); + Object* LookupNormalTypeCache(Name* name, Code::Flags flags); // Code cache layout of the default cache. Elements are alternating name and // code objects for non normal load/store/call IC's. @@ -6810,8 +7023,9 @@ class CodeCacheHashTableShape : public BaseShape<HashTableKey*> { return key->HashForObject(object); } - MUST_USE_RESULT static MaybeObject* AsObject(HashTableKey* key) { - return key->AsObject(); + MUST_USE_RESULT static MaybeObject* AsObject(Heap* heap, + HashTableKey* key) { + return key->AsObject(heap); } static const int kPrefixSize = 0; @@ -6822,10 +7036,10 @@ class CodeCacheHashTableShape : public BaseShape<HashTableKey*> { class CodeCacheHashTable: public HashTable<CodeCacheHashTableShape, HashTableKey*> { public: - Object* Lookup(String* name, Code::Flags flags); - MUST_USE_RESULT MaybeObject* Put(String* name, Code* code); + Object* Lookup(Name* name, Code::Flags flags); + MUST_USE_RESULT MaybeObject* Put(Name* name, Code* code); - int GetIndex(String* name, Code::Flags flags); + int GetIndex(Name* name, Code::Flags flags); void RemoveByIndex(int index); static inline CodeCacheHashTable* cast(Object* obj); @@ -6856,12 +7070,8 @@ class PolymorphicCodeCache: public Struct { static inline PolymorphicCodeCache* cast(Object* obj); -#ifdef OBJECT_PRINT - inline void PolymorphicCodeCachePrint() { - PolymorphicCodeCachePrint(stdout); - } - void PolymorphicCodeCachePrint(FILE* out); -#endif + // Dispatched behavior. + DECLARE_PRINTER(PolymorphicCodeCache) DECLARE_VERIFIER(PolymorphicCodeCache) static const int kCacheOffset = HeapObject::kHeaderSize; @@ -6909,12 +7119,8 @@ class TypeFeedbackInfo: public Struct { static inline TypeFeedbackInfo* cast(Object* obj); -#ifdef OBJECT_PRINT - inline void TypeFeedbackInfoPrint() { - TypeFeedbackInfoPrint(stdout); - } - void TypeFeedbackInfoPrint(FILE* out); -#endif + // Dispatched behavior. + DECLARE_PRINTER(TypeFeedbackInfo) DECLARE_VERIFIER(TypeFeedbackInfo) static const int kStorage1Offset = HeapObject::kHeaderSize; @@ -6940,6 +7146,38 @@ class TypeFeedbackInfo: public Struct { }; +enum AllocationSiteMode { + DONT_TRACK_ALLOCATION_SITE, + TRACK_ALLOCATION_SITE, + LAST_ALLOCATION_SITE_MODE = TRACK_ALLOCATION_SITE +}; + + +class AllocationSiteInfo: public Struct { + public: + DECL_ACCESSORS(payload, Object) + + static inline AllocationSiteInfo* cast(Object* obj); + + DECLARE_PRINTER(AllocationSiteInfo) + DECLARE_VERIFIER(AllocationSiteInfo) + + // Returns NULL if no AllocationSiteInfo is available for object. + static AllocationSiteInfo* FindForJSObject(JSObject* object); + + static AllocationSiteMode GetMode(ElementsKind boilerplate_elements_kind); + static AllocationSiteMode GetMode(ElementsKind from, ElementsKind to); + + static const int kPayloadOffset = HeapObject::kHeaderSize; + static const int kSize = kPayloadOffset + kPointerSize; + static const uint32_t kMaximumArrayBytesToPretransition = 8 * 1024; + + bool GetElementsKindPayload(ElementsKind* kind); + private: + DISALLOW_IMPLICIT_CONSTRUCTORS(AllocationSiteInfo); +}; + + // Representation of a slow alias as part of a non-strict arguments objects. // For fast aliases (if HasNonStrictArgumentsElements()): // - the parameter map contains an index into the context @@ -6955,12 +7193,8 @@ class AliasedArgumentsEntry: public Struct { static inline AliasedArgumentsEntry* cast(Object* obj); -#ifdef OBJECT_PRINT - inline void AliasedArgumentsEntryPrint() { - AliasedArgumentsEntryPrint(stdout); - } - void AliasedArgumentsEntryPrint(FILE* out); -#endif + // Dispatched behavior. + DECLARE_PRINTER(AliasedArgumentsEntry) DECLARE_VERIFIER(AliasedArgumentsEntry) static const int kAliasedContextSlot = HeapObject::kHeaderSize; @@ -6979,30 +7213,15 @@ class StringHasher { public: explicit inline StringHasher(int length, uint32_t seed); - // Returns true if the hash of this string can be computed without - // looking at the contents. - inline bool has_trivial_hash(); - - // Add a character to the hash and update the array index calculation. - inline void AddCharacter(uint32_t c); + template <typename schar> + static inline uint32_t HashSequentialString(const schar* chars, + int length, + uint32_t seed); - // Adds a character to the hash but does not update the array index - // calculation. This can only be called when it has been verified - // that the input is not an array index. - inline void AddCharacterNoIndex(uint32_t c); - - // Add a character above 0xffff as a surrogate pair. These can get into - // the hasher through the routines that take a UTF-8 string and make a symbol. - void AddSurrogatePair(uc32 c); - void AddSurrogatePairNoIndex(uc32 c); - - // Returns the value to store in the hash field of a string with - // the given length and contents. - uint32_t GetHashField(); - - // Returns true if the characters seen so far make up a legal array - // index. - bool is_array_index() { return is_array_index_; } + // Reads all the data, even for long strings and computes the utf16 length. + static uint32_t ComputeUtf8Hash(Vector<const char> chars, + uint32_t seed, + int* utf16_length_out); // Calculated hash value for a string consisting of 1 to // String::kMaxArrayIndexSize digits with no leading zeros (except "0"). @@ -7014,51 +7233,36 @@ class StringHasher { // use 27 instead. static const int kZeroHash = 27; - private: - uint32_t array_index() { - ASSERT(is_array_index()); - return array_index_; - } - - inline uint32_t GetHash(); - // Reusable parts of the hashing algorithm. - INLINE(static uint32_t AddCharacterCore(uint32_t running_hash, uint32_t c)); + INLINE(static uint32_t AddCharacterCore(uint32_t running_hash, uint16_t c)); INLINE(static uint32_t GetHashCore(uint32_t running_hash)); - int length_; - uint32_t raw_running_hash_; - uint32_t array_index_; - bool is_array_index_; - bool is_first_char_; - friend class TwoCharHashTableKey; - - template <bool seq_ascii> friend class JsonParser; -}; - - -class IncrementalAsciiStringHasher { - public: - explicit inline IncrementalAsciiStringHasher(uint32_t seed, char first_char); - inline void AddCharacter(uc32 c); - inline uint32_t GetHash(); + protected: + // Returns the value to store in the hash field of a string with + // the given length and contents. + uint32_t GetHashField(); + // Returns true if the hash of this string can be computed without + // looking at the contents. + inline bool has_trivial_hash(); + // Adds a block of characters to the hash. + template<typename Char> + inline void AddCharacters(const Char* chars, int len); private: + // Add a character to the hash. + inline void AddCharacter(uint16_t c); + // Update index. Returns true if string is still an index. + inline bool UpdateIndex(uint16_t c); + int length_; uint32_t raw_running_hash_; uint32_t array_index_; bool is_array_index_; - char first_char_; + bool is_first_char_; + DISALLOW_COPY_AND_ASSIGN(StringHasher); }; -// Calculates string hash. -template <typename schar> -inline uint32_t HashSequentialString(const schar* chars, - int length, - uint32_t seed); - - // The characteristics of a string are stored in its map. Retrieving these // few bits of information is moderately expensive, involving two memory // loads where the second is dependent on the first. To improve efficiency @@ -7084,7 +7288,7 @@ class StringShape BASE_EMBEDDED { inline bool IsExternalTwoByte(); inline bool IsSequentialAscii(); inline bool IsSequentialTwoByte(); - inline bool IsSymbol(); + inline bool IsInternalized(); inline StringRepresentationTag representation_tag(); inline uint32_t encoding_tag(); inline uint32_t full_representation_tag(); @@ -7108,6 +7312,112 @@ class StringShape BASE_EMBEDDED { }; +// The Name abstract class captures anything that can be used as a property +// name, i.e., strings and symbols. All names store a hash value. +class Name: public HeapObject { + public: + // Get and set the hash field of the name. + inline uint32_t hash_field(); + inline void set_hash_field(uint32_t value); + + // Tells whether the hash code has been computed. + inline bool HasHashCode(); + + // Returns a hash value used for the property table + inline uint32_t Hash(); + + // Equality operations. + inline bool Equals(Name* other); + + // Conversion. + inline bool AsArrayIndex(uint32_t* index); + + // Casting. + static inline Name* cast(Object* obj); + + DECLARE_PRINTER(Name) + + // Layout description. + static const int kHashFieldOffset = HeapObject::kHeaderSize; + static const int kSize = kHashFieldOffset + kPointerSize; + + // Mask constant for checking if a name has a computed hash code + // and if it is a string that is an array index. The least significant bit + // indicates whether a hash code has been computed. If the hash code has + // been computed the 2nd bit tells whether the string can be used as an + // array index. + static const int kHashNotComputedMask = 1; + static const int kIsNotArrayIndexMask = 1 << 1; + static const int kNofHashBitFields = 2; + + // Shift constant retrieving hash code from hash field. + static const int kHashShift = kNofHashBitFields; + + // Only these bits are relevant in the hash, since the top two are shifted + // out. + static const uint32_t kHashBitMask = 0xffffffffu >> kHashShift; + + // Array index strings this short can keep their index in the hash field. + static const int kMaxCachedArrayIndexLength = 7; + + // For strings which are array indexes the hash value has the string length + // mixed into the hash, mainly to avoid a hash value of zero which would be + // the case for the string '0'. 24 bits are used for the array index value. + static const int kArrayIndexValueBits = 24; + static const int kArrayIndexLengthBits = + kBitsPerInt - kArrayIndexValueBits - kNofHashBitFields; + + STATIC_CHECK((kArrayIndexLengthBits > 0)); + + static const int kArrayIndexHashLengthShift = + kArrayIndexValueBits + kNofHashBitFields; + + static const int kArrayIndexHashMask = (1 << kArrayIndexHashLengthShift) - 1; + + static const int kArrayIndexValueMask = + ((1 << kArrayIndexValueBits) - 1) << kHashShift; + + // Check that kMaxCachedArrayIndexLength + 1 is a power of two so we + // could use a mask to test if the length of string is less than or equal to + // kMaxCachedArrayIndexLength. + STATIC_CHECK(IS_POWER_OF_TWO(kMaxCachedArrayIndexLength + 1)); + + static const int kContainsCachedArrayIndexMask = + (~kMaxCachedArrayIndexLength << kArrayIndexHashLengthShift) | + kIsNotArrayIndexMask; + + // Value of empty hash field indicating that the hash is not computed. + static const int kEmptyHashField = + kIsNotArrayIndexMask | kHashNotComputedMask; + + protected: + static inline bool IsHashFieldComputed(uint32_t field); + + private: + DISALLOW_IMPLICIT_CONSTRUCTORS(Name); +}; + + +// ES6 symbols. +class Symbol: public Name { + public: + // Casting. + static inline Symbol* cast(Object* obj); + + // Dispatched behavior. + DECLARE_PRINTER(Symbol) + DECLARE_VERIFIER(Symbol) + + // Layout description. + static const int kSize = Name::kSize; + + private: + DISALLOW_IMPLICIT_CONSTRUCTORS(Symbol); +}; + + +class ConsString; + // The String abstract class captures JavaScript string values: // // Ecma-262: @@ -7116,8 +7426,10 @@ class StringShape BASE_EMBEDDED { // ordered sequence of zero or more 16-bit unsigned integer values. // // All string values have a length field. -class String: public HeapObject { +class String: public Name { public: + enum Encoding { ONE_BYTE_ENCODING, TWO_BYTE_ENCODING }; + // Representation of the flat content of a String. // A non-flat string doesn't have flat content. // A flat string has content that's encoded as a sequence of either @@ -7132,11 +7444,11 @@ class String: public HeapObject { // Returns true if the structure contains two-byte content. bool IsTwoByte() { return state_ == TWO_BYTE; } - // Return the ASCII content of the string. Only use if IsAscii() returns + // Return the one byte content of the string. Only use if IsAscii() returns // true. - Vector<const char> ToAsciiVector() { + Vector<const uint8_t> ToOneByteVector() { ASSERT_EQ(ASCII, state_); - return Vector<const char>::cast(buffer_); + return buffer_; } // Return the two-byte content of the string. Only use if IsTwoByte() // returns true. @@ -7149,15 +7461,15 @@ class String: public HeapObject { enum State { NON_FLAT, ASCII, TWO_BYTE }; // Constructors only used by String::GetFlatContent(). - explicit FlatContent(Vector<const char> chars) - : buffer_(Vector<const byte>::cast(chars)), + explicit FlatContent(Vector<const uint8_t> chars) + : buffer_(chars), state_(ASCII) { } explicit FlatContent(Vector<const uc16> chars) : buffer_(Vector<const byte>::cast(chars)), state_(TWO_BYTE) { } FlatContent() : buffer_(), state_(NON_FLAT) { } - Vector<const byte> buffer_; + Vector<const uint8_t> buffer_; State state_; friend class String; @@ -7167,27 +7479,25 @@ class String: public HeapObject { inline int length(); inline void set_length(int value); - // Get and set the hash field of the string. - inline uint32_t hash_field(); - inline void set_hash_field(uint32_t value); - // Returns whether this string has only ASCII chars, i.e. all of them can // be ASCII encoded. This might be the case even if the string is // two-byte. Such strings may appear when the embedder prefers // two-byte external representations even for ASCII data. - inline bool IsAsciiRepresentation(); + inline bool IsOneByteRepresentation(); inline bool IsTwoByteRepresentation(); // Cons and slices have an encoding flag that may not represent the actual // encoding of the underlying string. This is taken into account here. // Requires: this->IsFlat() - inline bool IsAsciiRepresentationUnderneath(); + inline bool IsOneByteRepresentationUnderneath(); inline bool IsTwoByteRepresentationUnderneath(); // NOTE: this should be considered only a hint. False negatives are // possible. inline bool HasOnlyAsciiChars(); + inline bool IsOneByteConvertible(); + // Get and set individual two byte chars in the string. inline void Set(int index, uint16_t value); // Get individual two byte char in the string. Repeated calls @@ -7238,8 +7548,8 @@ class String: public HeapObject { // String equality operations. inline bool Equals(String* other); - bool IsEqualTo(Vector<const char> str); - bool IsAsciiEqualTo(Vector<const char> str); + bool IsUtf8EqualTo(Vector<const char> str); + bool IsOneByteEqualTo(Vector<const uint8_t> str); bool IsTwoByteEqualTo(Vector<const uc16> str); // Return a UTF8 representation of the string. The string is null @@ -7269,19 +7579,7 @@ class String: public HeapObject { SmartArrayPointer<uc16> ToWideCString( RobustnessFlag robustness_flag = FAST_STRING_TRAVERSAL); - // Tells whether the hash code has been computed. - inline bool HasHashCode(); - - // Returns a hash value used for the property table - inline uint32_t Hash(); - - static uint32_t ComputeHashField(unibrow::CharacterStream* buffer, - int length, - uint32_t seed); - - static bool ComputeArrayIndex(unibrow::CharacterStream* buffer, - uint32_t* index, - int length); + bool ComputeArrayIndex(uint32_t* index); // Externalization. bool MakeExternal(v8::String::ExternalStringResource* resource); @@ -7313,69 +7611,18 @@ class String: public HeapObject { inline bool IsFlat(); // Layout description. - static const int kLengthOffset = HeapObject::kHeaderSize; - static const int kHashFieldOffset = kLengthOffset + kPointerSize; - static const int kSize = kHashFieldOffset + kPointerSize; + static const int kLengthOffset = Name::kSize; + static const int kSize = kLengthOffset + kPointerSize; // Maximum number of characters to consider when trying to convert a string // value into an array index. static const int kMaxArrayIndexSize = 10; - - // Max ASCII char code. - static const int kMaxAsciiCharCode = unibrow::Utf8::kMaxOneByteChar; - static const unsigned kMaxAsciiCharCodeU = unibrow::Utf8::kMaxOneByteChar; - static const int kMaxUtf16CodeUnit = 0xffff; - - // Mask constant for checking if a string has a computed hash code - // and if it is an array index. The least significant bit indicates - // whether a hash code has been computed. If the hash code has been - // computed the 2nd bit tells whether the string can be used as an - // array index. - static const int kHashNotComputedMask = 1; - static const int kIsNotArrayIndexMask = 1 << 1; - static const int kNofHashBitFields = 2; - - // Shift constant retrieving hash code from hash field. - static const int kHashShift = kNofHashBitFields; - - // Only these bits are relevant in the hash, since the top two are shifted - // out. - static const uint32_t kHashBitMask = 0xffffffffu >> kHashShift; - - // Array index strings this short can keep their index in the hash - // field. - static const int kMaxCachedArrayIndexLength = 7; - - // For strings which are array indexes the hash value has the string length - // mixed into the hash, mainly to avoid a hash value of zero which would be - // the case for the string '0'. 24 bits are used for the array index value. - static const int kArrayIndexValueBits = 24; - static const int kArrayIndexLengthBits = - kBitsPerInt - kArrayIndexValueBits - kNofHashBitFields; - - STATIC_CHECK((kArrayIndexLengthBits > 0)); STATIC_CHECK(kMaxArrayIndexSize < (1 << kArrayIndexLengthBits)); - static const int kArrayIndexHashLengthShift = - kArrayIndexValueBits + kNofHashBitFields; - - static const int kArrayIndexHashMask = (1 << kArrayIndexHashLengthShift) - 1; - - static const int kArrayIndexValueMask = - ((1 << kArrayIndexValueBits) - 1) << kHashShift; - - // Check that kMaxCachedArrayIndexLength + 1 is a power of two so we - // could use a mask to test if the length of string is less than or equal to - // kMaxCachedArrayIndexLength. - STATIC_CHECK(IS_POWER_OF_TWO(kMaxCachedArrayIndexLength + 1)); - - static const int kContainsCachedArrayIndexMask = - (~kMaxCachedArrayIndexLength << kArrayIndexHashLengthShift) | - kIsNotArrayIndexMask; - - // Value of empty hash field indicating that the hash is not computed. - static const int kEmptyHashField = - kIsNotArrayIndexMask | kHashNotComputedMask; + // Max char codes. + static const int32_t kMaxOneByteCharCode = unibrow::Latin1::kMaxChar; + static const uint32_t kMaxOneByteCharCodeU = unibrow::Latin1::kMaxChar; + static const int kMaxUtf16CodeUnit = 0xffff; // Value of hash field containing computed hash equal to zero. static const int kEmptyStringHash = kIsNotArrayIndexMask; @@ -7394,18 +7641,6 @@ class String: public HeapObject { const uc16* GetTwoByteData(); const uc16* GetTwoByteData(unsigned start); - // Support for StringInputBuffer - static const unibrow::byte* ReadBlock(String* input, - unibrow::byte* util_buffer, - unsigned capacity, - unsigned* remaining, - unsigned* offset); - static const unibrow::byte* ReadBlock(String** input, - unibrow::byte* util_buffer, - unsigned capacity, - unsigned* remaining, - unsigned* offset); - // Helper function for flattening strings. template <typename sinkchar> static void WriteToFlat(String* source, @@ -7420,7 +7655,7 @@ class String: public HeapObject { const char* start = chars; const char* limit = chars + length; #ifdef V8_HOST_CAN_READ_UNALIGNED - ASSERT(kMaxAsciiCharCode == 0x7F); + ASSERT(unibrow::Utf8::kMaxOneByteChar == 0x7F); const uintptr_t non_ascii_mask = kUintptrAllBitsSet / 0xFF * 0x80; while (chars + sizeof(uintptr_t) <= limit) { if (*reinterpret_cast<const uintptr_t*>(chars) & non_ascii_mask) { @@ -7430,7 +7665,7 @@ class String: public HeapObject { } #endif while (chars < limit) { - if (static_cast<uint8_t>(*chars) > kMaxAsciiCharCodeU) { + if (static_cast<uint8_t>(*chars) > unibrow::Utf8::kMaxOneByteChar) { return static_cast<int>(chars - start); } ++chars; @@ -7442,55 +7677,57 @@ class String: public HeapObject { return NonAsciiStart(chars, length) >= length; } - static inline int NonAsciiStart(const uc16* chars, int length) { + static inline bool IsAscii(const uint8_t* chars, int length) { + return + NonAsciiStart(reinterpret_cast<const char*>(chars), length) >= length; + } + + static inline int NonOneByteStart(const uc16* chars, int length) { const uc16* limit = chars + length; const uc16* start = chars; while (chars < limit) { - if (*chars > kMaxAsciiCharCodeU) return static_cast<int>(chars - start); + if (*chars > kMaxOneByteCharCodeU) return static_cast<int>(chars - start); ++chars; } return static_cast<int>(chars - start); } - static inline bool IsAscii(const uc16* chars, int length) { - return NonAsciiStart(chars, length) >= length; + static inline bool IsOneByte(const uc16* chars, int length) { + return NonOneByteStart(chars, length) >= length; } - protected: - class ReadBlockBuffer { - public: - ReadBlockBuffer(unibrow::byte* util_buffer_, - unsigned cursor_, - unsigned capacity_, - unsigned remaining_) : - util_buffer(util_buffer_), - cursor(cursor_), - capacity(capacity_), - remaining(remaining_) { - } - unibrow::byte* util_buffer; - unsigned cursor; - unsigned capacity; - unsigned remaining; - }; + // TODO(dcarney): Replace all instances of this with VisitFlat. + template<class Visitor, class ConsOp> + static inline void Visit(String* string, + unsigned offset, + Visitor& visitor, + ConsOp& cons_op, + int32_t type, + unsigned length); + + template<class Visitor> + static inline ConsString* VisitFlat(Visitor* visitor, + String* string, + int offset, + int length, + int32_t type); - static inline const unibrow::byte* ReadBlock(String* input, - ReadBlockBuffer* buffer, - unsigned* offset, - unsigned max_chars); - static void ReadBlockIntoBuffer(String* input, - ReadBlockBuffer* buffer, - unsigned* offset_ptr, - unsigned max_chars); + template<class Visitor> + static inline ConsString* VisitFlat(Visitor* visitor, + String* string, + int offset = 0) { + int32_t type = string->map()->instance_type(); + return VisitFlat(visitor, string, offset, string->length(), type); + } private: + friend class Name; + // Try to flatten the top level ConsString that is hiding behind this // string. This is a no-op unless the string is a ConsString. Flatten // mutates the ConsString and might return a failure. MUST_USE_RESULT MaybeObject* SlowTryFlatten(PretenureFlag pretenure); - static inline bool IsHashFieldComputed(uint32_t field); - // Slow case of String::Equals. This implementation works on any strings // but it is most efficient on strings that are almost flat. bool SlowEquals(String* other); @@ -7514,6 +7751,11 @@ class SeqString: public String { // Layout description. static const int kHeaderSize = String::kSize; + // Truncate the string in-place if possible and return the result. + // In case of new_length == 0, the empty string is returned without + // truncating the original string. + MUST_USE_RESULT String* Truncate(int new_length); + private: DISALLOW_IMPLICIT_CONSTRUCTORS(SeqString); }; @@ -7521,26 +7763,26 @@ class SeqString: public String { // The AsciiString class captures sequential ASCII string objects. // Each character in the AsciiString is an ASCII character. -class SeqAsciiString: public SeqString { +class SeqOneByteString: public SeqString { public: static const bool kHasAsciiEncoding = true; // Dispatched behavior. - inline uint16_t SeqAsciiStringGet(int index); - inline void SeqAsciiStringSet(int index, uint16_t value); + inline uint16_t SeqOneByteStringGet(int index); + inline void SeqOneByteStringSet(int index, uint16_t value); // Get the address of the characters in this string. inline Address GetCharsAddress(); - inline char* GetChars(); + inline uint8_t* GetChars(); // Casting - static inline SeqAsciiString* cast(Object* obj); + static inline SeqOneByteString* cast(Object* obj); // Garbage collection support. This method is called by the // garbage collector to compute the actual size of an AsciiString // instance. - inline int SeqAsciiStringSize(InstanceType instance_type); + inline int SeqOneByteStringSize(InstanceType instance_type); // Computes the size for an AsciiString instance of a given length. static int SizeFor(int length) { @@ -7553,18 +7795,8 @@ class SeqAsciiString: public SeqString { // Q.v. String::kMaxLength which is the maximal size of concatenated strings. static const int kMaxLength = (kMaxSize - kHeaderSize); - // Support for StringInputBuffer. - inline void SeqAsciiStringReadBlockIntoBuffer(ReadBlockBuffer* buffer, - unsigned* offset, - unsigned chars); - inline const unibrow::byte* SeqAsciiStringReadBlock(unsigned* remaining, - unsigned* offset, - unsigned chars); - - DECLARE_VERIFIER(SeqAsciiString) - private: - DISALLOW_IMPLICIT_CONSTRUCTORS(SeqAsciiString); + DISALLOW_IMPLICIT_CONSTRUCTORS(SeqOneByteString); }; @@ -7605,11 +7837,6 @@ class SeqTwoByteString: public SeqString { // Q.v. String::kMaxLength which is the maximal size of concatenated strings. static const int kMaxLength = (kMaxSize - kHeaderSize) / sizeof(uint16_t); - // Support for StringInputBuffer. - inline void SeqTwoByteStringReadBlockIntoBuffer(ReadBlockBuffer* buffer, - unsigned* offset_ptr, - unsigned chars); - private: DISALLOW_IMPLICIT_CONSTRUCTORS(SeqTwoByteString); }; @@ -7652,14 +7879,6 @@ class ConsString: public String { static const int kSecondOffset = kFirstOffset + kPointerSize; static const int kSize = kSecondOffset + kPointerSize; - // Support for StringInputBuffer. - inline const unibrow::byte* ConsStringReadBlock(ReadBlockBuffer* buffer, - unsigned* offset_ptr, - unsigned chars); - inline void ConsStringReadBlockIntoBuffer(ReadBlockBuffer* buffer, - unsigned* offset_ptr, - unsigned chars); - // Minimum length for a cons string. static const int kMinLength = 13; @@ -7704,13 +7923,6 @@ class SlicedString: public String { static const int kOffsetOffset = kParentOffset + kPointerSize; static const int kSize = kOffsetOffset + kPointerSize; - // Support for StringInputBuffer - inline const unibrow::byte* SlicedStringReadBlock(ReadBlockBuffer* buffer, - unsigned* offset_ptr, - unsigned chars); - inline void SlicedStringReadBlockIntoBuffer(ReadBlockBuffer* buffer, - unsigned* offset_ptr, - unsigned chars); // Minimum length for a sliced string. static const int kMinLength = 13; @@ -7745,6 +7957,9 @@ class ExternalString: public String { static const int kResourceDataOffset = kResourceOffset + kPointerSize; static const int kSize = kResourceDataOffset + kPointerSize; + static const int kMaxShortLength = + (kShortSize - SeqString::kHeaderSize) / kCharSize; + // Return whether external string is short (data pointer is not cached). inline bool is_short(); @@ -7773,7 +7988,7 @@ class ExternalAsciiString: public ExternalString { // which the pointer cache has to be refreshed. inline void update_data_cache(); - inline const char* GetChars(); + inline const uint8_t* GetChars(); // Dispatched behavior. inline uint16_t ExternalAsciiStringGet(int index); @@ -7787,14 +8002,6 @@ class ExternalAsciiString: public ExternalString { template<typename StaticVisitor> inline void ExternalAsciiStringIterateBody(); - // Support for StringInputBuffer. - const unibrow::byte* ExternalAsciiStringReadBlock(unsigned* remaining, - unsigned* offset, - unsigned chars); - inline void ExternalAsciiStringReadBlockIntoBuffer(ReadBlockBuffer* buffer, - unsigned* offset, - unsigned chars); - private: DISALLOW_IMPLICIT_CONSTRUCTORS(ExternalAsciiString); }; @@ -7835,12 +8042,6 @@ class ExternalTwoByteString: public ExternalString { template<typename StaticVisitor> inline void ExternalTwoByteStringIterateBody(); - - // Support for StringInputBuffer. - void ExternalTwoByteStringReadBlockIntoBuffer(ReadBlockBuffer* buffer, - unsigned* offset_ptr, - unsigned chars); - private: DISALLOW_IMPLICIT_CONSTRUCTORS(ExternalTwoByteString); }; @@ -7887,32 +8088,82 @@ class FlatStringReader : public Relocatable { }; -// Note that StringInputBuffers are not valid across a GC! To fix this -// it would have to store a String Handle instead of a String* and -// AsciiStringReadBlock would have to be modified to use memcpy. -// -// StringInputBuffer is able to traverse any string regardless of how -// deeply nested a sequence of ConsStrings it is made of. However, -// performance will be better if deep strings are flattened before they -// are traversed. Since flattening requires memory allocation this is -// not always desirable, however (esp. in debugging situations). -class StringInputBuffer: public unibrow::InputBuffer<String, String*, 1024> { +// A ConsStringOp that returns null. +// Useful when the operation to apply on a ConsString +// requires an expensive data structure. +class ConsStringNullOp { public: - virtual void Seek(unsigned pos); - inline StringInputBuffer(): unibrow::InputBuffer<String, String*, 1024>() {} - explicit inline StringInputBuffer(String* backing): - unibrow::InputBuffer<String, String*, 1024>(backing) {} + inline ConsStringNullOp() {} + static inline String* Operate(String*, unsigned*, int32_t*, unsigned*); + private: + DISALLOW_COPY_AND_ASSIGN(ConsStringNullOp); }; -class SafeStringInputBuffer - : public unibrow::InputBuffer<String, String**, 256> { +// This maintains an off-stack representation of the stack frames required +// to traverse a ConsString, allowing an entirely iterative and restartable +// traversal of the entire string +// Note: this class is not GC-safe. +class ConsStringIteratorOp { public: - virtual void Seek(unsigned pos); - inline SafeStringInputBuffer() - : unibrow::InputBuffer<String, String**, 256>() {} - explicit inline SafeStringInputBuffer(String** backing) - : unibrow::InputBuffer<String, String**, 256>(backing) {} + inline ConsStringIteratorOp() {} + String* Operate(String* string, + unsigned* offset_out, + int32_t* type_out, + unsigned* length_out); + inline String* ContinueOperation(int32_t* type_out, unsigned* length_out); + inline void Reset(); + inline bool HasMore(); + + private: + // TODO(dcarney): Templatize this out for different stack sizes. + static const unsigned kStackSize = 32; + // Use a mask instead of doing modulo operations for stack wrapping. + static const unsigned kDepthMask = kStackSize-1; + STATIC_ASSERT(IS_POWER_OF_TWO(kStackSize)); + static inline unsigned OffsetForDepth(unsigned depth); + + inline void PushLeft(ConsString* string); + inline void PushRight(ConsString* string); + inline void AdjustMaximumDepth(); + inline void Pop(); + String* NextLeaf(bool* blew_stack, int32_t* type_out, unsigned* length_out); + String* Search(unsigned* offset_out, + int32_t* type_out, + unsigned* length_out); + + unsigned depth_; + unsigned maximum_depth_; + // Stack must always contain only frames for which right traversal + // has not yet been performed. + ConsString* frames_[kStackSize]; + unsigned consumed_; + ConsString* root_; + DISALLOW_COPY_AND_ASSIGN(ConsStringIteratorOp); +}; + + +// Note: this class is not GC-safe. +class StringCharacterStream { + public: + inline StringCharacterStream(String* string, + ConsStringIteratorOp* op, + unsigned offset = 0); + inline uint16_t GetNext(); + inline bool HasMore(); + inline void Reset(String* string, unsigned offset = 0); + inline void VisitOneByteString(const uint8_t* chars, unsigned length); + inline void VisitTwoByteString(const uint16_t* chars, unsigned length); + + private: + bool is_one_byte_; + union { + const uint8_t* buffer8_; + const uint16_t* buffer16_; + }; + const uint8_t* end_; + ConsStringIteratorOp* op_; + DISALLOW_COPY_AND_ASSIGN(StringCharacterStream); }; @@ -7996,15 +8247,10 @@ class JSGlobalPropertyCell: public HeapObject { return address() + kValueOffset; } + // Dispatched behavior. + DECLARE_PRINTER(JSGlobalPropertyCell) DECLARE_VERIFIER(JSGlobalPropertyCell) -#ifdef OBJECT_PRINT - inline void JSGlobalPropertyCellPrint() { - JSGlobalPropertyCellPrint(stdout); - } - void JSGlobalPropertyCellPrint(FILE* out); -#endif - // Layout description. static const int kValueOffset = HeapObject::kHeaderSize; static const int kSize = kValueOffset + kPointerSize; @@ -8030,19 +8276,19 @@ class JSProxy: public JSReceiver { // Casting. static inline JSProxy* cast(Object* obj); - bool HasPropertyWithHandler(String* name); + bool HasPropertyWithHandler(Name* name); bool HasElementWithHandler(uint32_t index); MUST_USE_RESULT MaybeObject* GetPropertyWithHandler( Object* receiver, - String* name); + Name* name); MUST_USE_RESULT MaybeObject* GetElementWithHandler( Object* receiver, uint32_t index); MUST_USE_RESULT MaybeObject* SetPropertyWithHandler( JSReceiver* receiver, - String* name, + Name* name, Object* value, PropertyAttributes attributes, StrictModeFlag strict_mode); @@ -8058,14 +8304,14 @@ class JSProxy: public JSReceiver { // otherwise set it to false. MUST_USE_RESULT MaybeObject* SetPropertyViaPrototypesWithHandler( JSReceiver* receiver, - String* name, + Name* name, Object* value, PropertyAttributes attributes, StrictModeFlag strict_mode, bool* done); MUST_USE_RESULT MaybeObject* DeletePropertyWithHandler( - String* name, + Name* name, DeleteMode mode); MUST_USE_RESULT MaybeObject* DeleteElementWithHandler( uint32_t index, @@ -8073,7 +8319,7 @@ class JSProxy: public JSReceiver { MUST_USE_RESULT PropertyAttributes GetPropertyAttributeWithHandler( JSReceiver* receiver, - String* name); + Name* name); MUST_USE_RESULT PropertyAttributes GetElementAttributeWithHandler( JSReceiver* receiver, uint32_t index); @@ -8094,12 +8340,7 @@ class JSProxy: public JSReceiver { Handle<Object> args[]); // Dispatched behavior. -#ifdef OBJECT_PRINT - inline void JSProxyPrint() { - JSProxyPrint(stdout); - } - void JSProxyPrint(FILE* out); -#endif + DECLARE_PRINTER(JSProxy) DECLARE_VERIFIER(JSProxy) // Layout description. We add padding so that a proxy has the same @@ -8135,12 +8376,7 @@ class JSFunctionProxy: public JSProxy { static inline JSFunctionProxy* cast(Object* obj); // Dispatched behavior. -#ifdef OBJECT_PRINT - inline void JSFunctionProxyPrint() { - JSFunctionProxyPrint(stdout); - } - void JSFunctionProxyPrint(FILE* out); -#endif + DECLARE_PRINTER(JSFunctionProxy) DECLARE_VERIFIER(JSFunctionProxy) // Layout description. @@ -8170,12 +8406,8 @@ class JSSet: public JSObject { // Casting. static inline JSSet* cast(Object* obj); -#ifdef OBJECT_PRINT - inline void JSSetPrint() { - JSSetPrint(stdout); - } - void JSSetPrint(FILE* out); -#endif + // Dispatched behavior. + DECLARE_PRINTER(JSSet) DECLARE_VERIFIER(JSSet) static const int kTableOffset = JSObject::kHeaderSize; @@ -8195,12 +8427,8 @@ class JSMap: public JSObject { // Casting. static inline JSMap* cast(Object* obj); -#ifdef OBJECT_PRINT - inline void JSMapPrint() { - JSMapPrint(stdout); - } - void JSMapPrint(FILE* out); -#endif + // Dispatched behavior. + DECLARE_PRINTER(JSMap) DECLARE_VERIFIER(JSMap) static const int kTableOffset = JSObject::kHeaderSize; @@ -8223,12 +8451,8 @@ class JSWeakMap: public JSObject { // Casting. static inline JSWeakMap* cast(Object* obj); -#ifdef OBJECT_PRINT - inline void JSWeakMapPrint() { - JSWeakMapPrint(stdout); - } - void JSWeakMapPrint(FILE* out); -#endif + // Dispatched behavior. + DECLARE_PRINTER(JSWeakMap) DECLARE_VERIFIER(JSWeakMap) static const int kTableOffset = JSObject::kHeaderSize; @@ -8258,12 +8482,8 @@ class Foreign: public HeapObject { template<typename StaticVisitor> inline void ForeignIterateBody(); -#ifdef OBJECT_PRINT - inline void ForeignPrint() { - ForeignPrint(stdout); - } - void ForeignPrint(FILE* out); -#endif + // Dispatched behavior. + DECLARE_PRINTER(Foreign) DECLARE_VERIFIER(Foreign) // Layout description. @@ -8298,10 +8518,11 @@ class JSArray: public JSObject { // Initialize the array with the given capacity. The function may // fail due to out-of-memory situations, but only if the requested // capacity is non-zero. - MUST_USE_RESULT MaybeObject* Initialize(int capacity); + MUST_USE_RESULT MaybeObject* Initialize(int capacity, int length = 0); // Initializes the array to a certain length. inline bool AllowsSetElementsLength(); + // Can cause GC. MUST_USE_RESULT MaybeObject* SetElementsLength(Object* length); // Set the content of the array to the content of storage. @@ -8315,12 +8536,7 @@ class JSArray: public JSObject { inline void EnsureSize(int minimum_size_of_backing_fixed_array); // Dispatched behavior. -#ifdef OBJECT_PRINT - inline void JSArrayPrint() { - JSArrayPrint(stdout); - } - void JSArrayPrint(FILE* out); -#endif + DECLARE_PRINTER(JSArray) DECLARE_VERIFIER(JSArray) // Number of element slots to pre-allocate for an empty array. @@ -8359,20 +8575,8 @@ class JSRegExpResult: public JSArray { }; -// An accessor must have a getter, but can have no setter. -// -// When setting a property, V8 searches accessors in prototypes. -// If an accessor was found and it does not have a setter, -// the request is ignored. -// -// If the accessor in the prototype has the READ_ONLY property attribute, then -// a new value is added to the local object when the property is set. -// This shadows the accessor in the prototype. class AccessorInfo: public Struct { public: - DECL_ACCESSORS(getter, Object) - DECL_ACCESSORS(setter, Object) - DECL_ACCESSORS(data, Object) DECL_ACCESSORS(name, Object) DECL_ACCESSORS(flag, Smi) DECL_ACCESSORS(expected_receiver_type, Object) @@ -8394,18 +8598,11 @@ class AccessorInfo: public Struct { static inline AccessorInfo* cast(Object* obj); -#ifdef OBJECT_PRINT - inline void AccessorInfoPrint() { - AccessorInfoPrint(stdout); - } - void AccessorInfoPrint(FILE* out); -#endif + // Dispatched behavior. DECLARE_VERIFIER(AccessorInfo) - static const int kGetterOffset = HeapObject::kHeaderSize; - static const int kSetterOffset = kGetterOffset + kPointerSize; - static const int kDataOffset = kSetterOffset + kPointerSize; - static const int kNameOffset = kDataOffset + kPointerSize; + + static const int kNameOffset = HeapObject::kHeaderSize; static const int kFlagOffset = kNameOffset + kPointerSize; static const int kExpectedReceiverTypeOffset = kFlagOffset + kPointerSize; static const int kSize = kExpectedReceiverTypeOffset + kPointerSize; @@ -8421,6 +8618,146 @@ class AccessorInfo: public Struct { }; +enum AccessorDescriptorType { + kDescriptorBitmaskCompare, + kDescriptorPointerCompare, + kDescriptorPrimitiveValue, + kDescriptorObjectDereference, + kDescriptorPointerDereference, + kDescriptorPointerShift, + kDescriptorReturnObject +}; + + +struct BitmaskCompareDescriptor { + uint32_t bitmask; + uint32_t compare_value; + uint8_t size; // Must be in {1,2,4}. +}; + + +struct PointerCompareDescriptor { + void* compare_value; +}; + + +struct PrimitiveValueDescriptor { + v8::DeclaredAccessorDescriptorDataType data_type; + uint8_t bool_offset; // Must be in [0,7], used for kDescriptorBoolType. +}; + + +struct ObjectDerefenceDescriptor { + uint8_t internal_field; +}; + + +struct PointerShiftDescriptor { + int16_t byte_offset; +}; + + +struct DeclaredAccessorDescriptorData { + AccessorDescriptorType type; + union { + struct BitmaskCompareDescriptor bitmask_compare_descriptor; + struct PointerCompareDescriptor pointer_compare_descriptor; + struct PrimitiveValueDescriptor primitive_value_descriptor; + struct ObjectDerefenceDescriptor object_dereference_descriptor; + struct PointerShiftDescriptor pointer_shift_descriptor; + }; +}; + + +class DeclaredAccessorDescriptor; + + +class DeclaredAccessorDescriptorIterator { + public: + explicit DeclaredAccessorDescriptorIterator( + DeclaredAccessorDescriptor* descriptor); + const DeclaredAccessorDescriptorData* Next(); + bool Complete() const { return length_ == offset_; } + private: + uint8_t* array_; + const int length_; + int offset_; + DISALLOW_IMPLICIT_CONSTRUCTORS(DeclaredAccessorDescriptorIterator); +}; + + +class DeclaredAccessorDescriptor: public Struct { + public: + DECL_ACCESSORS(serialized_data, ByteArray) + + static inline DeclaredAccessorDescriptor* cast(Object* obj); + + static Handle<DeclaredAccessorDescriptor> Create( + Isolate* isolate, + const DeclaredAccessorDescriptorData& data, + Handle<DeclaredAccessorDescriptor> previous); + + // Dispatched behavior. + DECLARE_PRINTER(DeclaredAccessorDescriptor) + DECLARE_VERIFIER(DeclaredAccessorDescriptor) + + static const int kSerializedDataOffset = HeapObject::kHeaderSize; + static const int kSize = kSerializedDataOffset + kPointerSize; + + private: + DISALLOW_IMPLICIT_CONSTRUCTORS(DeclaredAccessorDescriptor); +}; + + +class DeclaredAccessorInfo: public AccessorInfo { + public: + DECL_ACCESSORS(descriptor, DeclaredAccessorDescriptor) + + static inline DeclaredAccessorInfo* cast(Object* obj); + + // Dispatched behavior. + DECLARE_PRINTER(DeclaredAccessorInfo) + DECLARE_VERIFIER(DeclaredAccessorInfo) + + static const int kDescriptorOffset = AccessorInfo::kSize; + static const int kSize = kDescriptorOffset + kPointerSize; + + private: + DISALLOW_IMPLICIT_CONSTRUCTORS(DeclaredAccessorInfo); +}; + + +// An accessor must have a getter, but can have no setter. +// +// When setting a property, V8 searches accessors in prototypes. +// If an accessor was found and it does not have a setter, +// the request is ignored. +// +// If the accessor in the prototype has the READ_ONLY property attribute, then +// a new value is added to the local object when the property is set. +// This shadows the accessor in the prototype. +class ExecutableAccessorInfo: public AccessorInfo { + public: + DECL_ACCESSORS(getter, Object) + DECL_ACCESSORS(setter, Object) + DECL_ACCESSORS(data, Object) + + static inline ExecutableAccessorInfo* cast(Object* obj); + + // Dispatched behavior. + DECLARE_PRINTER(ExecutableAccessorInfo) + DECLARE_VERIFIER(ExecutableAccessorInfo) + + static const int kGetterOffset = AccessorInfo::kSize; + static const int kSetterOffset = kGetterOffset + kPointerSize; + static const int kDataOffset = kSetterOffset + kPointerSize; + static const int kSize = kDataOffset + kPointerSize; + + private: + DISALLOW_IMPLICIT_CONSTRUCTORS(ExecutableAccessorInfo); +}; + + // Support for JavaScript accessors: A pair of a getter and a setter. Each // accessor can either be // * a pointer to a JavaScript function or proxy: a real accessor @@ -8461,9 +8798,8 @@ class AccessorPair: public Struct { return IsJSAccessor(getter()) || IsJSAccessor(setter()); } -#ifdef OBJECT_PRINT - void AccessorPairPrint(FILE* out = stdout); -#endif + // Dispatched behavior. + DECLARE_PRINTER(AccessorPair) DECLARE_VERIFIER(AccessorPair) static const int kGetterOffset = HeapObject::kHeaderSize; @@ -8492,12 +8828,8 @@ class AccessCheckInfo: public Struct { static inline AccessCheckInfo* cast(Object* obj); -#ifdef OBJECT_PRINT - inline void AccessCheckInfoPrint() { - AccessCheckInfoPrint(stdout); - } - void AccessCheckInfoPrint(FILE* out); -#endif + // Dispatched behavior. + DECLARE_PRINTER(AccessCheckInfo) DECLARE_VERIFIER(AccessCheckInfo) static const int kNamedCallbackOffset = HeapObject::kHeaderSize; @@ -8521,12 +8853,8 @@ class InterceptorInfo: public Struct { static inline InterceptorInfo* cast(Object* obj); -#ifdef OBJECT_PRINT - inline void InterceptorInfoPrint() { - InterceptorInfoPrint(stdout); - } - void InterceptorInfoPrint(FILE* out); -#endif + // Dispatched behavior. + DECLARE_PRINTER(InterceptorInfo) DECLARE_VERIFIER(InterceptorInfo) static const int kGetterOffset = HeapObject::kHeaderSize; @@ -8549,12 +8877,8 @@ class CallHandlerInfo: public Struct { static inline CallHandlerInfo* cast(Object* obj); -#ifdef OBJECT_PRINT - inline void CallHandlerInfoPrint() { - CallHandlerInfoPrint(stdout); - } - void CallHandlerInfoPrint(FILE* out); -#endif + // Dispatched behavior. + DECLARE_PRINTER(CallHandlerInfo) DECLARE_VERIFIER(CallHandlerInfo) static const int kCallbackOffset = HeapObject::kHeaderSize; @@ -8598,6 +8922,9 @@ class FunctionTemplateInfo: public TemplateInfo { DECL_ACCESSORS(access_check_info, Object) DECL_ACCESSORS(flag, Smi) + inline int length(); + inline void set_length(int value); + // Following properties use flag bits. DECL_BOOLEAN_ACCESSORS(hidden_prototype) DECL_BOOLEAN_ACCESSORS(undetectable) @@ -8608,12 +8935,8 @@ class FunctionTemplateInfo: public TemplateInfo { static inline FunctionTemplateInfo* cast(Object* obj); -#ifdef OBJECT_PRINT - inline void FunctionTemplateInfoPrint() { - FunctionTemplateInfoPrint(stdout); - } - void FunctionTemplateInfoPrint(FILE* out); -#endif + // Dispatched behavior. + DECLARE_PRINTER(FunctionTemplateInfo) DECLARE_VERIFIER(FunctionTemplateInfo) static const int kSerialNumberOffset = TemplateInfo::kHeaderSize; @@ -8635,7 +8958,8 @@ class FunctionTemplateInfo: public TemplateInfo { static const int kAccessCheckInfoOffset = kInstanceCallHandlerOffset + kPointerSize; static const int kFlagOffset = kAccessCheckInfoOffset + kPointerSize; - static const int kSize = kFlagOffset + kPointerSize; + static const int kLengthOffset = kFlagOffset + kPointerSize; + static const int kSize = kLengthOffset + kPointerSize; private: // Bit position in the flag, from least significant bit position. @@ -8655,12 +8979,8 @@ class ObjectTemplateInfo: public TemplateInfo { static inline ObjectTemplateInfo* cast(Object* obj); -#ifdef OBJECT_PRINT - inline void ObjectTemplateInfoPrint() { - ObjectTemplateInfoPrint(stdout); - } - void ObjectTemplateInfoPrint(FILE* out); -#endif + // Dispatched behavior. + DECLARE_PRINTER(ObjectTemplateInfo) DECLARE_VERIFIER(ObjectTemplateInfo) static const int kConstructorOffset = TemplateInfo::kHeaderSize; @@ -8677,12 +8997,8 @@ class SignatureInfo: public Struct { static inline SignatureInfo* cast(Object* obj); -#ifdef OBJECT_PRINT - inline void SignatureInfoPrint() { - SignatureInfoPrint(stdout); - } - void SignatureInfoPrint(FILE* out); -#endif + // Dispatched behavior. + DECLARE_PRINTER(SignatureInfo) DECLARE_VERIFIER(SignatureInfo) static const int kReceiverOffset = Struct::kHeaderSize; @@ -8700,12 +9016,8 @@ class TypeSwitchInfo: public Struct { static inline TypeSwitchInfo* cast(Object* obj); -#ifdef OBJECT_PRINT - inline void TypeSwitchInfoPrint() { - TypeSwitchInfoPrint(stdout); - } - void TypeSwitchInfoPrint(FILE* out); -#endif + // Dispatched behavior. + DECLARE_PRINTER(TypeSwitchInfo) DECLARE_VERIFIER(TypeSwitchInfo) static const int kTypesOffset = Struct::kHeaderSize; @@ -8750,12 +9062,8 @@ class DebugInfo: public Struct { static inline DebugInfo* cast(Object* obj); -#ifdef OBJECT_PRINT - inline void DebugInfoPrint() { - DebugInfoPrint(stdout); - } - void DebugInfoPrint(FILE* out); -#endif + // Dispatched behavior. + DECLARE_PRINTER(DebugInfo) DECLARE_VERIFIER(DebugInfo) static const int kSharedFunctionInfoIndex = Struct::kHeaderSize; @@ -8806,12 +9114,8 @@ class BreakPointInfo: public Struct { static inline BreakPointInfo* cast(Object* obj); -#ifdef OBJECT_PRINT - inline void BreakPointInfoPrint() { - BreakPointInfoPrint(stdout); - } - void BreakPointInfoPrint(FILE* out); -#endif + // Dispatched behavior. + DECLARE_PRINTER(BreakPointInfo) DECLARE_VERIFIER(BreakPointInfo) static const int kCodePositionIndex = Struct::kHeaderSize; @@ -8833,10 +9137,10 @@ class BreakPointInfo: public Struct { #undef DECLARE_VERIFIER #define VISITOR_SYNCHRONIZATION_TAGS_LIST(V) \ - V(kSymbolTable, "symbol_table", "(Symbols)") \ + V(kStringTable, "string_table", "(Internalized strings)") \ V(kExternalStringsTable, "external_strings_table", "(External strings)") \ V(kStrongRootList, "strong_root_list", "(Strong roots)") \ - V(kSymbol, "symbol", "(Symbol)") \ + V(kInternalizedString, "internalized_string", "(Internal string)") \ V(kBootstrapper, "bootstrapper", "(Bootstrapper)") \ V(kTop, "top", "(Isolate)") \ V(kRelocatable, "relocatable", "(Relocatable)") \ @@ -8895,6 +9199,10 @@ class ObjectVisitor BASE_EMBEDDED { // Visits a debug call target in the instruction stream. virtual void VisitDebugTarget(RelocInfo* rinfo); + // Visits the byte sequence in a function's prologue that contains information + // about the code's age. + virtual void VisitCodeAgeSequence(RelocInfo* rinfo); + // Handy shorthand for visiting a single pointer. virtual void VisitPointer(Object** p) { VisitPointers(p, p + 1); } diff --git a/deps/v8/src/optimizing-compiler-thread.cc b/deps/v8/src/optimizing-compiler-thread.cc index 06018dd1a9..b982b94198 100644 --- a/deps/v8/src/optimizing-compiler-thread.cc +++ b/deps/v8/src/optimizing-compiler-thread.cc @@ -48,6 +48,13 @@ void OptimizingCompilerThread::Run() { while (true) { input_queue_semaphore_->Wait(); + Logger::TimerEventScope timer( + isolate_, Logger::TimerEventScope::v8_recompile_parallel); + + if (FLAG_parallel_recompilation_delay != 0) { + OS::Sleep(FLAG_parallel_recompilation_delay); + } + if (Acquire_Load(&stop_thread_)) { stop_semaphore_->Signal(); if (FLAG_trace_parallel_recompilation) { @@ -59,20 +66,7 @@ void OptimizingCompilerThread::Run() { int64_t compiling_start = 0; if (FLAG_trace_parallel_recompilation) compiling_start = OS::Ticks(); - Heap::RelocationLock relocation_lock(isolate_->heap()); - OptimizingCompiler* optimizing_compiler = NULL; - input_queue_.Dequeue(&optimizing_compiler); - Barrier_AtomicIncrement(&queue_length_, static_cast<Atomic32>(-1)); - - ASSERT(!optimizing_compiler->info()->closure()->IsOptimized()); - - OptimizingCompiler::Status status = optimizing_compiler->OptimizeGraph(); - ASSERT(status != OptimizingCompiler::FAILED); - // Prevent an unused-variable error in release mode. - USE(status); - - output_queue_.Enqueue(optimizing_compiler); - isolate_->stack_guard()->RequestCodeReadyEvent(); + CompileNext(); if (FLAG_trace_parallel_recompilation) { time_spent_compiling_ += OS::Ticks() - compiling_start; @@ -81,11 +75,40 @@ void OptimizingCompilerThread::Run() { } +void OptimizingCompilerThread::CompileNext() { + OptimizingCompiler* optimizing_compiler = NULL; + input_queue_.Dequeue(&optimizing_compiler); + Barrier_AtomicIncrement(&queue_length_, static_cast<Atomic32>(-1)); + + // The function may have already been optimized by OSR. Simply continue. + OptimizingCompiler::Status status = optimizing_compiler->OptimizeGraph(); + USE(status); // Prevent an unused-variable error in release mode. + ASSERT(status != OptimizingCompiler::FAILED); + + // The function may have already been optimized by OSR. Simply continue. + // Mark it for installing before queuing so that we can be sure of the write + // order: marking first and (after being queued) installing code second. + optimizing_compiler->info()->closure()->MarkForInstallingRecompiledCode(); + output_queue_.Enqueue(optimizing_compiler); +} + + void OptimizingCompilerThread::Stop() { + ASSERT(!IsOptimizerThread()); Release_Store(&stop_thread_, static_cast<AtomicWord>(true)); input_queue_semaphore_->Signal(); stop_semaphore_->Wait(); + if (FLAG_parallel_recompilation_delay != 0) { + InstallOptimizedFunctions(); + // Barrier when loading queue length is not necessary since the write + // happens in CompileNext on the same thread. + while (NoBarrier_Load(&queue_length_) > 0) { + CompileNext(); + InstallOptimizedFunctions(); + } + } + if (FLAG_trace_parallel_recompilation) { double compile_time = static_cast<double>(time_spent_compiling_); double total_time = static_cast<double>(time_spent_total_); @@ -96,26 +119,29 @@ void OptimizingCompilerThread::Stop() { void OptimizingCompilerThread::InstallOptimizedFunctions() { + ASSERT(!IsOptimizerThread()); HandleScope handle_scope(isolate_); int functions_installed = 0; while (!output_queue_.IsEmpty()) { - OptimizingCompiler* compiler = NULL; + OptimizingCompiler* compiler; output_queue_.Dequeue(&compiler); Compiler::InstallOptimizedCode(compiler); functions_installed++; } - if (FLAG_trace_parallel_recompilation && functions_installed != 0) { - PrintF(" ** Installed %d function(s).\n", functions_installed); - } } void OptimizingCompilerThread::QueueForOptimization( OptimizingCompiler* optimizing_compiler) { + ASSERT(IsQueueAvailable()); + ASSERT(!IsOptimizerThread()); + Barrier_AtomicIncrement(&queue_length_, static_cast<Atomic32>(1)); + optimizing_compiler->info()->closure()->MarkInRecompileQueue(); input_queue_.Enqueue(optimizing_compiler); input_queue_semaphore_->Signal(); } + #ifdef DEBUG bool OptimizingCompilerThread::IsOptimizerThread() { if (!FLAG_parallel_recompilation) return false; diff --git a/deps/v8/src/optimizing-compiler-thread.h b/deps/v8/src/optimizing-compiler-thread.h index d5627266d0..8cb5e2dd59 100644 --- a/deps/v8/src/optimizing-compiler-thread.h +++ b/deps/v8/src/optimizing-compiler-thread.h @@ -29,20 +29,24 @@ #define V8_OPTIMIZING_COMPILER_THREAD_H_ #include "atomicops.h" -#include "platform.h" #include "flags.h" +#include "platform.h" #include "unbound-queue.h" namespace v8 { namespace internal { -class HGraphBuilder; +class HOptimizedGraphBuilder; class OptimizingCompiler; +class SharedFunctionInfo; class OptimizingCompilerThread : public Thread { public: explicit OptimizingCompilerThread(Isolate *isolate) : Thread("OptimizingCompilerThread"), +#ifdef DEBUG + thread_id_(0), +#endif isolate_(isolate), stop_semaphore_(OS::CreateSemaphore(0)), input_queue_semaphore_(OS::CreateSemaphore(0)), @@ -54,6 +58,7 @@ class OptimizingCompilerThread : public Thread { void Run(); void Stop(); + void CompileNext(); void QueueForOptimization(OptimizingCompiler* optimizing_compiler); void InstallOptimizedFunctions(); @@ -81,6 +86,10 @@ class OptimizingCompilerThread : public Thread { } private: +#ifdef DEBUG + int thread_id_; +#endif + Isolate* isolate_; Semaphore* stop_semaphore_; Semaphore* input_queue_semaphore_; @@ -90,10 +99,6 @@ class OptimizingCompilerThread : public Thread { volatile Atomic32 queue_length_; int64_t time_spent_compiling_; int64_t time_spent_total_; - -#ifdef DEBUG - int thread_id_; -#endif }; } } // namespace v8::internal diff --git a/deps/v8/src/parser.cc b/deps/v8/src/parser.cc index 03e4b039cc..cdc0adb561 100644 --- a/deps/v8/src/parser.cc +++ b/deps/v8/src/parser.cc @@ -52,7 +52,10 @@ namespace internal { class PositionStack { public: explicit PositionStack(bool* ok) : top_(NULL), ok_(ok) {} - ~PositionStack() { ASSERT(!*ok_ || is_empty()); } + ~PositionStack() { + ASSERT(!*ok_ || is_empty()); + USE(ok_); + } class Element { public: @@ -254,10 +257,10 @@ Handle<String> Parser::LookupSymbol(int symbol_id) { if (static_cast<unsigned>(symbol_id) >= static_cast<unsigned>(symbol_cache_.length())) { if (scanner().is_literal_ascii()) { - return isolate()->factory()->LookupAsciiSymbol( - scanner().literal_ascii_string()); + return isolate()->factory()->InternalizeOneByteString( + Vector<const uint8_t>::cast(scanner().literal_ascii_string())); } else { - return isolate()->factory()->LookupTwoByteSymbol( + return isolate()->factory()->InternalizeTwoByteString( scanner().literal_utf16_string()); } } @@ -275,10 +278,10 @@ Handle<String> Parser::LookupCachedSymbol(int symbol_id) { Handle<String> result = symbol_cache_.at(symbol_id); if (result.is_null()) { if (scanner().is_literal_ascii()) { - result = isolate()->factory()->LookupAsciiSymbol( - scanner().literal_ascii_string()); + result = isolate()->factory()->InternalizeOneByteString( + Vector<const uint8_t>::cast(scanner().literal_ascii_string())); } else { - result = isolate()->factory()->LookupTwoByteSymbol( + result = isolate()->factory()->InternalizeTwoByteString( scanner().literal_utf16_string()); } symbol_cache_.at(symbol_id) = result; @@ -614,7 +617,7 @@ FunctionLiteral* Parser::DoParseProgram(CompilationInfo* info, ASSERT(target_stack_ == NULL); if (pre_data_ != NULL) pre_data_->Initialize(); - Handle<String> no_name = isolate()->factory()->empty_symbol(); + Handle<String> no_name = isolate()->factory()->empty_string(); FunctionLiteral* result = NULL; { Scope* scope = NewScope(top_scope_, GLOBAL_SCOPE); @@ -653,6 +656,16 @@ FunctionLiteral* Parser::DoParseProgram(CompilationInfo* info, CheckConflictingVarDeclarations(top_scope_, &ok); } + if (ok && info->parse_restriction() == ONLY_SINGLE_FUNCTION_LITERAL) { + if (body->length() != 1 || + !body->at(0)->IsExpressionStatement() || + !body->at(0)->AsExpressionStatement()-> + expression()->IsFunctionLiteral()) { + ReportMessage("unable_to_parse", Vector<const char*>::empty()); + ok = false; + } + } + if (ok) { result = factory()->NewFunctionLiteral( no_name, @@ -917,7 +930,7 @@ class ThisNamedPropertyAssignmentFinder { if (literal != NULL && literal->handle()->IsString() && !String::cast(*(literal->handle()))->Equals( - isolate_->heap()->Proto_symbol()) && + isolate_->heap()->proto_string()) && !String::cast(*(literal->handle()))->AsArrayIndex(&dummy)) { Handle<String> key = Handle<String>::cast(literal->handle()); @@ -1054,9 +1067,9 @@ void* Parser::ParseSourceElements(ZoneList<Statement*>* processor, // Check "use strict" directive (ES5 14.1). if (top_scope_->is_classic_mode() && - directive->Equals(isolate()->heap()->use_strict()) && + directive->Equals(isolate()->heap()->use_strict_string()) && token_loc.end_pos - token_loc.beg_pos == - isolate()->heap()->use_strict()->length() + 2) { + isolate()->heap()->use_strict_string()->length() + 2) { // TODO(mstarzinger): Global strict eval calls, need their own scope // as specified in ES5 10.4.2(3). The correct fix would be to always // add this scope in DoParseProgram(), but that requires adaptations @@ -1141,7 +1154,7 @@ Statement* Parser::ParseModuleElement(ZoneStringList* labels, if (estmt != NULL && estmt->expression()->AsVariableProxy() != NULL && estmt->expression()->AsVariableProxy()->name()->Equals( - isolate()->heap()->module_symbol()) && + isolate()->heap()->module_string()) && !scanner().literal_contains_escapes()) { return ParseModuleDeclaration(NULL, ok); } @@ -1164,7 +1177,7 @@ Statement* Parser::ParseModuleDeclaration(ZoneStringList* names, bool* ok) { #endif Module* module = ParseModule(CHECK_OK); - VariableProxy* proxy = NewUnresolved(name, LET, module->interface()); + VariableProxy* proxy = NewUnresolved(name, MODULE, module->interface()); Declaration* declaration = factory()->NewModuleDeclaration(proxy, module, top_scope_); Declare(declaration, true, CHECK_OK); @@ -1183,7 +1196,7 @@ Statement* Parser::ParseModuleDeclaration(ZoneStringList* names, bool* ok) { if (module->body() == NULL) return factory()->NewEmptyStatement(); else - return module->body(); + return factory()->NewModuleStatement(proxy, module->body()); } @@ -1332,12 +1345,15 @@ Module* Parser::ParseModuleUrl(bool* ok) { if (FLAG_print_interface_details) PrintF("# Url "); #endif - Module* result = factory()->NewModuleUrl(symbol); - Interface* interface = result->interface(); + // Create an empty literal as long as the feature isn't finished. + USE(symbol); + Scope* scope = NewScope(top_scope_, MODULE_SCOPE); + Block* body = factory()->NewBlock(NULL, 1, false); + body->set_scope(scope); + Interface* interface = scope->interface(); + Module* result = factory()->NewModuleLiteral(body, interface); interface->Freeze(ok); ASSERT(*ok); - // Create dummy scope to avoid errors as long as the feature isn't finished. - Scope* scope = NewScope(top_scope_, MODULE_SCOPE); interface->Unify(scope->interface(), zone(), ok); ASSERT(*ok); return result; @@ -1426,7 +1442,7 @@ Statement* Parser::ParseExportDeclaration(bool* ok) { case Token::IDENTIFIER: { Handle<String> name = ParseIdentifier(CHECK_OK); // Handle 'module' as a context-sensitive keyword. - if (!name->IsEqualTo(CStrVector("module"))) { + if (!name->IsOneByteEqualTo(STATIC_ASCII_VECTOR("module"))) { names.Add(name, zone()); while (peek() == Token::COMMA) { Consume(Token::COMMA); @@ -1706,12 +1722,11 @@ void Parser::Declare(Declaration* declaration, bool resolve, bool* ok) { *ok = false; return; } - const char* type = - (var->mode() == VAR) ? "var" : var->is_const_mode() ? "const" : "let"; Handle<String> type_string = - isolate()->factory()->NewStringFromUtf8(CStrVector(type), TENURED); + isolate()->factory()->NewStringFromUtf8(CStrVector("Variable"), + TENURED); Expression* expression = - NewThrowTypeError(isolate()->factory()->redeclaration_symbol(), + NewThrowTypeError(isolate()->factory()->redeclaration_string(), type_string, name); declaration_scope->SetIllegalRedeclaration(expression); } @@ -1973,8 +1988,8 @@ Block* Parser::ParseVariableStatement(VariableDeclarationContext var_context, bool Parser::IsEvalOrArguments(Handle<String> string) { - return string.is_identical_to(isolate()->factory()->eval_symbol()) || - string.is_identical_to(isolate()->factory()->arguments_symbol()); + return string.is_identical_to(isolate()->factory()->eval_string()) || + string.is_identical_to(isolate()->factory()->arguments_string()); } @@ -2228,7 +2243,7 @@ Block* Parser::ParseVariableDeclarations( // Note that the function does different things depending on // the number of arguments (1 or 2). initialize = factory()->NewCallRuntime( - isolate()->factory()->InitializeConstGlobal_symbol(), + isolate()->factory()->InitializeConstGlobal_string(), Runtime::FunctionForId(Runtime::kInitializeConstGlobal), arguments); } else { @@ -2251,7 +2266,7 @@ Block* Parser::ParseVariableDeclarations( // Note that the function does different things depending on // the number of arguments (2 or 3). initialize = factory()->NewCallRuntime( - isolate()->factory()->InitializeVarGlobal_symbol(), + isolate()->factory()->InitializeVarGlobal_string(), Runtime::FunctionForId(Runtime::kInitializeVarGlobal), arguments); } @@ -2363,7 +2378,7 @@ Statement* Parser::ParseExpressionOrLabelledStatement(ZoneStringList* labels, expr != NULL && expr->AsVariableProxy() != NULL && expr->AsVariableProxy()->name()->Equals( - isolate()->heap()->native_symbol()) && + isolate()->heap()->native_string()) && !scanner().literal_contains_escapes()) { return ParseNativeDeclaration(ok); } @@ -2375,7 +2390,7 @@ Statement* Parser::ParseExpressionOrLabelledStatement(ZoneStringList* labels, scanner().HasAnyLineTerminatorBeforeNext() || expr->AsVariableProxy() == NULL || !expr->AsVariableProxy()->name()->Equals( - isolate()->heap()->module_symbol()) || + isolate()->heap()->module_string()) || scanner().literal_contains_escapes()) { ExpectSemicolon(CHECK_OK); } @@ -2500,7 +2515,7 @@ Statement* Parser::ParseReturnStatement(bool* ok) { Scope* declaration_scope = top_scope_->DeclarationScope(); if (declaration_scope->is_global_scope() || declaration_scope->is_eval_scope()) { - Handle<String> type = isolate()->factory()->illegal_return_symbol(); + Handle<String> type = isolate()->factory()->illegal_return_string(); Expression* throw_error = NewThrowSyntaxError(type, Handle<Object>::null()); return factory()->NewExpressionStatement(throw_error); } @@ -2848,8 +2863,8 @@ Statement* Parser::ParseForStatement(ZoneStringList* labels, bool* ok) { // implementing stack allocated block scoped variables. Factory* heap_factory = isolate()->factory(); Handle<String> tempstr = - heap_factory->NewConsString(heap_factory->dot_for_symbol(), name); - Handle<String> tempname = heap_factory->LookupSymbol(tempstr); + heap_factory->NewConsString(heap_factory->dot_for_string(), name); + Handle<String> tempname = heap_factory->InternalizeString(tempstr); Variable* temp = top_scope_->DeclarationScope()->NewTemporary(tempname); VariableProxy* temp_proxy = factory()->NewVariableProxy(temp); ForInStatement* loop = factory()->NewForInStatement(labels); @@ -2893,7 +2908,7 @@ Statement* Parser::ParseForStatement(ZoneStringList* labels, bool* ok) { // the error at runtime. if (expression == NULL || !expression->IsValidLeftHandSide()) { Handle<String> type = - isolate()->factory()->invalid_lhs_in_for_in_symbol(); + isolate()->factory()->invalid_lhs_in_for_in_string(); expression = NewThrowReferenceError(type); } ForInStatement* loop = factory()->NewForInStatement(labels); @@ -3004,9 +3019,10 @@ Expression* Parser::ParseAssignmentExpression(bool accept_IN, bool* ok) { // side expression. We could report this as a syntax error here but // for compatibility with JSC we choose to report the error at // runtime. + // TODO(ES5): Should change parsing for spec conformance. if (expression == NULL || !expression->IsValidLeftHandSide()) { Handle<String> type = - isolate()->factory()->invalid_lhs_in_assignment_symbol(); + isolate()->factory()->invalid_lhs_in_assignment_string(); expression = NewThrowReferenceError(type); } @@ -3206,8 +3222,9 @@ Expression* Parser::ParseUnaryExpression(bool* ok) { Handle<Object> literal = expression->AsLiteral()->handle(); if (op == Token::NOT) { // Convert the literal to a boolean condition and negate it. - bool condition = literal->ToBoolean()->IsTrue(); - Handle<Object> result(isolate()->heap()->ToBoolean(!condition)); + bool condition = literal->BooleanValue(); + Handle<Object> result(isolate()->heap()->ToBoolean(!condition), + isolate()); return factory()->NewLiteral(result); } else if (literal->IsNumber()) { // Compute some expressions involving only number literals. @@ -3246,7 +3263,7 @@ Expression* Parser::ParseUnaryExpression(bool* ok) { // error at runtime. if (expression == NULL || !expression->IsValidLeftHandSide()) { Handle<String> type = - isolate()->factory()->invalid_lhs_in_prefix_op_symbol(); + isolate()->factory()->invalid_lhs_in_prefix_op_string(); expression = NewThrowReferenceError(type); } @@ -3281,7 +3298,7 @@ Expression* Parser::ParsePostfixExpression(bool* ok) { // error at runtime. if (expression == NULL || !expression->IsValidLeftHandSide()) { Handle<String> type = - isolate()->factory()->invalid_lhs_in_postfix_op_symbol(); + isolate()->factory()->invalid_lhs_in_postfix_op_string(); expression = NewThrowReferenceError(type); } @@ -3356,7 +3373,7 @@ Expression* Parser::ParseLeftHandSideExpression(bool* ok) { // they are actually direct calls to eval is determined at run time. VariableProxy* callee = result->AsVariableProxy(); if (callee != NULL && - callee->IsVariable(isolate()->factory()->eval_symbol())) { + callee->IsVariable(isolate()->factory()->eval_string())) { top_scope_->DeclarationScope()->RecordEvalCall(); } result = factory()->NewCall(result, args, pos); @@ -3463,7 +3480,7 @@ Expression* Parser::ParseMemberWithNewPrefixesExpression(PositionStack* stack, fni_->PushLiteralName(index->AsLiteral()->AsPropertyName()); } else { fni_->PushLiteralName( - isolate()->factory()->anonymous_function_symbol()); + isolate()->factory()->anonymous_function_string()); } } Expect(Token::RBRACK, CHECK_OK); @@ -3715,17 +3732,16 @@ Expression* Parser::ParseArrayLiteral(bool* ok) { int literal_index = current_function_state_->NextMaterializedLiteralIndex(); // Allocate a fixed array to hold all the object literals. - Handle<FixedArray> object_literals = - isolate()->factory()->NewFixedArray(values->length(), TENURED); - Handle<FixedDoubleArray> double_literals; - ElementsKind elements_kind = FAST_SMI_ELEMENTS; - bool has_only_undefined_values = true; - bool has_hole_values = false; + Handle<JSArray> array = + isolate()->factory()->NewJSArray(0, FAST_HOLEY_SMI_ELEMENTS); + isolate()->factory()->SetElementsCapacityAndLength( + array, values->length(), values->length()); // Fill in the literals. Heap* heap = isolate()->heap(); bool is_simple = true; int depth = 1; + bool is_holey = false; for (int i = 0, n = values->length(); i < n; i++) { MaterializedLiteral* m_literal = values->at(i)->AsMaterializedLiteral(); if (m_literal != NULL && m_literal->depth() + 1 > depth) { @@ -3733,91 +3749,33 @@ Expression* Parser::ParseArrayLiteral(bool* ok) { } Handle<Object> boilerplate_value = GetBoilerplateValue(values->at(i)); if (boilerplate_value->IsTheHole()) { - has_hole_values = true; - object_literals->set_the_hole(i); - if (elements_kind == FAST_DOUBLE_ELEMENTS) { - double_literals->set_the_hole(i); - } + is_holey = true; } else if (boilerplate_value->IsUndefined()) { is_simple = false; - object_literals->set(i, Smi::FromInt(0)); - if (elements_kind == FAST_DOUBLE_ELEMENTS) { - double_literals->set(i, 0); - } + JSObject::SetOwnElement( + array, i, handle(Smi::FromInt(0), isolate()), kNonStrictMode); } else { - // Examine each literal element, and adjust the ElementsKind if the - // literal element is not of a type that can be stored in the current - // ElementsKind. Start with FAST_SMI_ONLY_ELEMENTS, and transition to - // FAST_DOUBLE_ELEMENTS and FAST_ELEMENTS as necessary. Always remember - // the tagged value, no matter what the ElementsKind is in case we - // ultimately end up in FAST_ELEMENTS. - has_only_undefined_values = false; - object_literals->set(i, *boilerplate_value); - if (elements_kind == FAST_SMI_ELEMENTS) { - // Smi only elements. Notice if a transition to FAST_DOUBLE_ELEMENTS or - // FAST_ELEMENTS is required. - if (!boilerplate_value->IsSmi()) { - if (boilerplate_value->IsNumber() && FLAG_smi_only_arrays) { - // Allocate a double array on the FAST_DOUBLE_ELEMENTS transition to - // avoid over-allocating in TENURED space. - double_literals = isolate()->factory()->NewFixedDoubleArray( - values->length(), TENURED); - // Copy the contents of the FAST_SMI_ONLY_ELEMENT array to the - // FAST_DOUBLE_ELEMENTS array so that they are in sync. - for (int j = 0; j < i; ++j) { - Object* smi_value = object_literals->get(j); - if (smi_value->IsTheHole()) { - double_literals->set_the_hole(j); - } else { - double_literals->set(j, Smi::cast(smi_value)->value()); - } - } - double_literals->set(i, boilerplate_value->Number()); - elements_kind = FAST_DOUBLE_ELEMENTS; - } else { - elements_kind = FAST_ELEMENTS; - } - } - } else if (elements_kind == FAST_DOUBLE_ELEMENTS) { - // Continue to store double values in to FAST_DOUBLE_ELEMENTS arrays - // until the first value is seen that can't be stored as a double. - if (boilerplate_value->IsNumber()) { - double_literals->set(i, boilerplate_value->Number()); - } else { - elements_kind = FAST_ELEMENTS; - } - } + JSObject::SetOwnElement(array, i, boilerplate_value, kNonStrictMode); } } - // Very small array literals that don't have a concrete hint about their type - // from a constant value should default to the slow case to avoid lots of - // elements transitions on really small objects. - if (has_only_undefined_values && values->length() <= 2) { - elements_kind = FAST_ELEMENTS; - } + Handle<FixedArrayBase> element_values(array->elements()); // Simple and shallow arrays can be lazily copied, we transform the // elements array to a copy-on-write array. if (is_simple && depth == 1 && values->length() > 0 && - elements_kind != FAST_DOUBLE_ELEMENTS) { - object_literals->set_map(heap->fixed_cow_array_map()); + array->HasFastSmiOrObjectElements()) { + element_values->set_map(heap->fixed_cow_array_map()); } - Handle<FixedArrayBase> element_values = elements_kind == FAST_DOUBLE_ELEMENTS - ? Handle<FixedArrayBase>(double_literals) - : Handle<FixedArrayBase>(object_literals); - // Remember both the literal's constant values as well as the ElementsKind // in a 2-element FixedArray. - Handle<FixedArray> literals = - isolate()->factory()->NewFixedArray(2, TENURED); + Handle<FixedArray> literals = isolate()->factory()->NewFixedArray(2, TENURED); - if (has_hole_values || !FLAG_packed_arrays) { - elements_kind = GetHoleyElementsKind(elements_kind); - } + ElementsKind kind = array->GetElementsKind(); + kind = is_holey ? GetHoleyElementsKind(kind) : GetPackedElementsKind(kind); - literals->set(0, Smi::FromInt(elements_kind)); + literals->set(0, Smi::FromInt(kind)); literals->set(1, *element_values); return factory()->NewArrayLiteral( @@ -4044,7 +4002,7 @@ ObjectLiteral::Property* Parser::ParseObjectLiteralGetSet(bool is_getter, next == Token::STRING || is_keyword) { Handle<String> name; if (is_keyword) { - name = isolate_->factory()->LookupAsciiSymbol(Token::String(next)); + name = isolate_->factory()->InternalizeUtf8String(Token::String(next)); } else { name = GetSymbol(CHECK_OK); } @@ -4243,7 +4201,7 @@ ZoneList<Expression*>* Parser::ParseArguments(bool* ok) { while (!done) { Expression* argument = ParseAssignmentExpression(true, CHECK_OK); result->Add(argument, zone()); - if (result->length() > kMaxNumFunctionParameters) { + if (result->length() > Code::kMaxArguments) { ReportMessageAt(scanner().location(), "too_many_arguments", Vector<const char*>::empty()); *ok = false; @@ -4365,7 +4323,7 @@ FunctionLiteral* Parser::ParseFunctionLiteral(Handle<String> function_name, // We want a non-null handle as the function name. if (should_infer_name) { - function_name = isolate()->factory()->empty_symbol(); + function_name = isolate()->factory()->empty_string(); } int num_parameters = 0; @@ -4420,7 +4378,7 @@ FunctionLiteral* Parser::ParseFunctionLiteral(Handle<String> function_name, top_scope_->DeclareParameter(param_name, VAR); num_parameters++; - if (num_parameters > kMaxNumFunctionParameters) { + if (num_parameters > Code::kMaxArguments) { ReportMessageAt(scanner().location(), "too_many_parameters", Vector<const char*>::empty()); *ok = false; @@ -4671,7 +4629,7 @@ Expression* Parser::ParseV8Intrinsic(bool* ok) { top_scope_->DeclarationScope()->ForceEagerCompilation(); } - const Runtime::Function* function = Runtime::FunctionForSymbol(name); + const Runtime::Function* function = Runtime::FunctionForName(name); // Check for built-in IS_VAR macro. if (function != NULL && @@ -4766,7 +4724,7 @@ void Parser::ExpectContextualKeyword(const char* keyword, bool* ok) { if (!*ok) return; Handle<String> symbol = GetSymbol(ok); if (!*ok) return; - if (!symbol->IsEqualTo(CStrVector(keyword))) { + if (!symbol->IsUtf8EqualTo(CStrVector(keyword))) { *ok = false; ReportUnexpectedToken(scanner().current_token()); } @@ -4958,7 +4916,7 @@ void Parser::RegisterTargetUse(Label* target, Target* stop) { Expression* Parser::NewThrowReferenceError(Handle<String> type) { - return NewThrowError(isolate()->factory()->MakeReferenceError_symbol(), + return NewThrowError(isolate()->factory()->MakeReferenceError_string(), type, HandleVector<Object>(NULL, 0)); } @@ -4968,7 +4926,7 @@ Expression* Parser::NewThrowSyntaxError(Handle<String> type, int argc = first.is_null() ? 0 : 1; Vector< Handle<Object> > arguments = HandleVector<Object>(&first, argc); return NewThrowError( - isolate()->factory()->MakeSyntaxError_symbol(), type, arguments); + isolate()->factory()->MakeSyntaxError_string(), type, arguments); } @@ -4980,7 +4938,7 @@ Expression* Parser::NewThrowTypeError(Handle<String> type, Vector< Handle<Object> > arguments = HandleVector<Object>(elements, ARRAY_SIZE(elements)); return NewThrowError( - isolate()->factory()->MakeTypeError_symbol(), type, arguments); + isolate()->factory()->MakeTypeError_string(), type, arguments); } diff --git a/deps/v8/src/parser.h b/deps/v8/src/parser.h index 93fd1b8aa9..6dcf7f129f 100644 --- a/deps/v8/src/parser.h +++ b/deps/v8/src/parser.h @@ -96,7 +96,6 @@ class FunctionEntry BASE_EMBEDDED { private: Vector<unsigned> backing_; - bool owns_data_; }; @@ -449,11 +448,6 @@ class Parser { Vector<Handle<String> > args); private: - // Limit on number of function parameters is chosen arbitrarily. - // Code::Flags uses only the low 17 bits of num-parameters to - // construct a hashable id, so if more than 2^17 are allowed, this - // should be checked. - static const int kMaxNumFunctionParameters = 32766; static const int kMaxNumFunctionLocals = 131071; // 2^17-1 enum Mode { diff --git a/deps/v8/src/platform-cygwin.cc b/deps/v8/src/platform-cygwin.cc index 089ea38d9a..67c389e794 100644 --- a/deps/v8/src/platform-cygwin.cc +++ b/deps/v8/src/platform-cygwin.cc @@ -177,6 +177,11 @@ void OS::Sleep(int milliseconds) { } +int OS::NumberOfCores() { + return sysconf(_SC_NPROCESSORS_ONLN); +} + + void OS::Abort() { // Redirect to std abort to signal abnormal program termination. abort(); @@ -359,6 +364,12 @@ bool VirtualMemory::Guard(void* address) { } +bool VirtualMemory::HasLazyCommits() { + // TODO(alph): implement for the platform. + return false; +} + + class Thread::PlatformData : public Malloced { public: PlatformData() : thread_(kNoThread) {} @@ -649,23 +660,12 @@ class SamplerThread : public Thread { SamplerRegistry::State state; while ((state = SamplerRegistry::GetState()) != SamplerRegistry::HAS_NO_SAMPLERS) { - bool cpu_profiling_enabled = - (state == SamplerRegistry::HAS_CPU_PROFILING_SAMPLERS); - bool runtime_profiler_enabled = RuntimeProfiler::IsEnabled(); // When CPU profiling is enabled both JavaScript and C++ code is // profiled. We must not suspend. - if (!cpu_profiling_enabled) { - if (rate_limiter_.SuspendIfNecessary()) continue; - } - if (cpu_profiling_enabled) { - if (!SamplerRegistry::IterateActiveSamplers(&DoCpuProfile, this)) { - return; - } - } - if (runtime_profiler_enabled) { - if (!SamplerRegistry::IterateActiveSamplers(&DoRuntimeProfile, NULL)) { - return; - } + if (state == SamplerRegistry::HAS_CPU_PROFILING_SAMPLERS) { + SamplerRegistry::IterateActiveSamplers(&DoCpuProfile, this); + } else { + if (RuntimeProfiler::WaitForSomeIsolateToEnterJS()) continue; } OS::Sleep(interval_); } @@ -679,11 +679,6 @@ class SamplerThread : public Thread { sampler_thread->SampleContext(sampler); } - static void DoRuntimeProfile(Sampler* sampler, void* ignored) { - if (!sampler->isolate()->IsInitialized()) return; - sampler->isolate()->runtime_profiler()->NotifyTick(); - } - void SampleContext(Sampler* sampler) { HANDLE profiled_thread = sampler->platform_data()->profiled_thread(); if (profiled_thread == NULL) return; @@ -692,13 +687,14 @@ class SamplerThread : public Thread { CONTEXT context; memset(&context, 0, sizeof(context)); + Isolate* isolate = sampler->isolate(); TickSample sample_obj; - TickSample* sample = CpuProfiler::TickSampleEvent(sampler->isolate()); + TickSample* sample = isolate->cpu_profiler()->TickSampleEvent(); if (sample == NULL) sample = &sample_obj; static const DWORD kSuspendFailed = static_cast<DWORD>(-1); if (SuspendThread(profiled_thread) == kSuspendFailed) return; - sample->state = sampler->isolate()->current_vm_state(); + sample->state = isolate->current_vm_state(); context.ContextFlags = CONTEXT_FULL; if (GetThreadContext(profiled_thread, &context) != 0) { @@ -718,7 +714,6 @@ class SamplerThread : public Thread { } const int interval_; - RuntimeProfilerRateLimiter rate_limiter_; // Protects the process wide state below. static Mutex* mutex_; diff --git a/deps/v8/src/platform-freebsd.cc b/deps/v8/src/platform-freebsd.cc index 511759c485..14f7171a3c 100644 --- a/deps/v8/src/platform-freebsd.cc +++ b/deps/v8/src/platform-freebsd.cc @@ -181,6 +181,11 @@ void OS::Sleep(int milliseconds) { } +int OS::NumberOfCores() { + return sysconf(_SC_NPROCESSORS_ONLN); +} + + void OS::Abort() { // Redirect to std abort to signal abnormal program termination. abort(); @@ -198,6 +203,31 @@ void OS::DebugBreak() { } +void OS::DumpBacktrace() { + void* trace[100]; + int size = backtrace(trace, ARRAY_SIZE(trace)); + char** symbols = backtrace_symbols(trace, size); + fprintf(stderr, "\n==== C stack trace ===============================\n\n"); + if (size == 0) { + fprintf(stderr, "(empty)\n"); + } else if (symbols == NULL) { + fprintf(stderr, "(no symbols)\n"); + } else { + for (int i = 1; i < size; ++i) { + fprintf(stderr, "%2d: ", i); + char mangled[201]; + if (sscanf(symbols[i], "%*[^(]%*[(]%200[^)+]", mangled) == 1) { // NOLINT + fprintf(stderr, "%s\n", mangled); + } else { + fprintf(stderr, "??\n"); + } + } + } + fflush(stderr); + free(symbols); +} + + class PosixMemoryMappedFile : public OS::MemoryMappedFile { public: PosixMemoryMappedFile(FILE* file, void* memory, int size) @@ -456,6 +486,12 @@ bool VirtualMemory::ReleaseRegion(void* base, size_t size) { } +bool VirtualMemory::HasLazyCommits() { + // TODO(alph): implement for the platform. + return false; +} + + class Thread::PlatformData : public Malloced { public: pthread_t thread_; // Thread handle for pthread. @@ -679,7 +715,7 @@ static void ProfilerSignalHandler(int signal, siginfo_t* info, void* context) { if (sampler == NULL || !sampler->IsActive()) return; TickSample sample_obj; - TickSample* sample = CpuProfiler::TickSampleEvent(isolate); + TickSample* sample = isolate->cpu_profiler()->TickSampleEvent(); if (sample == NULL) sample = &sample_obj; // Extracting the sample from the context is extremely machine dependent. @@ -706,11 +742,6 @@ static void ProfilerSignalHandler(int signal, siginfo_t* info, void* context) { class SignalSender : public Thread { public: - enum SleepInterval { - HALF_INTERVAL, - FULL_INTERVAL - }; - static const int kSignalSenderStackSize = 64 * KB; explicit SignalSender(int interval) @@ -761,38 +792,14 @@ class SignalSender : public Thread { SamplerRegistry::State state; while ((state = SamplerRegistry::GetState()) != SamplerRegistry::HAS_NO_SAMPLERS) { - bool cpu_profiling_enabled = - (state == SamplerRegistry::HAS_CPU_PROFILING_SAMPLERS); - bool runtime_profiler_enabled = RuntimeProfiler::IsEnabled(); // When CPU profiling is enabled both JavaScript and C++ code is // profiled. We must not suspend. - if (!cpu_profiling_enabled) { - if (rate_limiter_.SuspendIfNecessary()) continue; - } - if (cpu_profiling_enabled && runtime_profiler_enabled) { - if (!SamplerRegistry::IterateActiveSamplers(&DoCpuProfile, this)) { - return; - } - Sleep(HALF_INTERVAL); - if (!SamplerRegistry::IterateActiveSamplers(&DoRuntimeProfile, NULL)) { - return; - } - Sleep(HALF_INTERVAL); + if (state == SamplerRegistry::HAS_CPU_PROFILING_SAMPLERS) { + SamplerRegistry::IterateActiveSamplers(&DoCpuProfile, this); } else { - if (cpu_profiling_enabled) { - if (!SamplerRegistry::IterateActiveSamplers(&DoCpuProfile, - this)) { - return; - } - } - if (runtime_profiler_enabled) { - if (!SamplerRegistry::IterateActiveSamplers(&DoRuntimeProfile, - NULL)) { - return; - } - } - Sleep(FULL_INTERVAL); + if (RuntimeProfiler::WaitForSomeIsolateToEnterJS()) continue; } + Sleep(); // TODO(svenpanne) Figure out if OS:Sleep(interval_) is enough. } } @@ -802,21 +809,15 @@ class SignalSender : public Thread { sender->SendProfilingSignal(sampler->platform_data()->vm_tid()); } - static void DoRuntimeProfile(Sampler* sampler, void* ignored) { - if (!sampler->isolate()->IsInitialized()) return; - sampler->isolate()->runtime_profiler()->NotifyTick(); - } - void SendProfilingSignal(pthread_t tid) { if (!signal_handler_installed_) return; pthread_kill(tid, SIGPROF); } - void Sleep(SleepInterval full_or_half) { + void Sleep() { // Convert ms to us and subtract 100 us to compensate delays // occuring during signal delivery. useconds_t interval = interval_ * 1000 - 100; - if (full_or_half == HALF_INTERVAL) interval /= 2; int result = usleep(interval); #ifdef DEBUG if (result != 0 && errno != EINTR) { @@ -831,7 +832,6 @@ class SignalSender : public Thread { } const int interval_; - RuntimeProfilerRateLimiter rate_limiter_; // Protects the process wide state below. static Mutex* mutex_; diff --git a/deps/v8/src/platform-linux.cc b/deps/v8/src/platform-linux.cc index beb2ccee29..d21f160b43 100644 --- a/deps/v8/src/platform-linux.cc +++ b/deps/v8/src/platform-linux.cc @@ -38,6 +38,11 @@ #include <sys/types.h> #include <stdlib.h> +#if defined(__GLIBC__) +#include <execinfo.h> +#include <cxxabi.h> +#endif + // Ubuntu Dapper requires memory pages to be marked as // executable. Otherwise, OS raises an exception when executing code // in that page. @@ -151,10 +156,17 @@ bool OS::ArmCpuHasFeature(CpuFeature feature) { case SUDIV: search_string = "idiva"; break; + case VFP32DREGS: + // This case is handled specially below. + break; default: UNREACHABLE(); } + if (feature == VFP32DREGS) { + return ArmCpuHasFeature(VFP3) && !CPUInfoContainsString("d16"); + } + if (CPUInfoContainsString(search_string)) { return true; } @@ -391,6 +403,11 @@ void OS::Sleep(int milliseconds) { } +int OS::NumberOfCores() { + return sysconf(_SC_NPROCESSORS_ONLN); +} + + void OS::Abort() { // Redirect to std abort to signal abnormal program termination. if (FLAG_break_on_abort) { @@ -415,6 +432,37 @@ void OS::DebugBreak() { } +void OS::DumpBacktrace() { +#if defined(__GLIBC__) + void* trace[100]; + int size = backtrace(trace, ARRAY_SIZE(trace)); + char** symbols = backtrace_symbols(trace, size); + fprintf(stderr, "\n==== C stack trace ===============================\n\n"); + if (size == 0) { + fprintf(stderr, "(empty)\n"); + } else if (symbols == NULL) { + fprintf(stderr, "(no symbols)\n"); + } else { + for (int i = 1; i < size; ++i) { + fprintf(stderr, "%2d: ", i); + char mangled[201]; + if (sscanf(symbols[i], "%*[^(]%*[(]%200[^)+]", mangled) == 1) { // NOLINT + int status; + size_t length; + char* demangled = abi::__cxa_demangle(mangled, NULL, &length, &status); + fprintf(stderr, "%s\n", demangled ? demangled : mangled); + free(demangled); + } else { + fprintf(stderr, "??\n"); + } + } + } + fflush(stderr); + free(symbols); +#endif +} + + class PosixMemoryMappedFile : public OS::MemoryMappedFile { public: PosixMemoryMappedFile(FILE* file, void* memory, int size) @@ -499,19 +547,20 @@ void OS::LogSharedLibraryAddresses() { // the beginning of the filename or the end of the line. do { c = getc(fp); - } while ((c != EOF) && (c != '\n') && (c != '/')); + } while ((c != EOF) && (c != '\n') && (c != '/') && (c != '[')); if (c == EOF) break; // EOF: Was unexpected, just exit. // Process the filename if found. - if (c == '/') { - ungetc(c, fp); // Push the '/' back into the stream to be read below. + if ((c == '/') || (c == '[')) { + // Push the '/' or '[' back into the stream to be read below. + ungetc(c, fp); // Read to the end of the line. Exit if the read fails. if (fgets(lib_name, kLibNameLen, fp) == NULL) break; // Drop the newline character read by fgets. We do not need to check // for a zero-length string because we know that we at least read the - // '/' character. + // '/' or '[' character. lib_name[strlen(lib_name) - 1] = '\0'; } else { // No library name found, just record the raw address range. @@ -722,6 +771,11 @@ bool VirtualMemory::ReleaseRegion(void* base, size_t size) { } +bool VirtualMemory::HasLazyCommits() { + return true; +} + + class Thread::PlatformData : public Malloced { public: PlatformData() : thread_(kNoThread) {} @@ -1034,7 +1088,7 @@ static void ProfilerSignalHandler(int signal, siginfo_t* info, void* context) { if (sampler == NULL || !sampler->IsActive()) return; TickSample sample_obj; - TickSample* sample = CpuProfiler::TickSampleEvent(isolate); + TickSample* sample = isolate->cpu_profiler()->TickSampleEvent(); if (sample == NULL) sample = &sample_obj; // Extracting the sample from the context is extremely machine dependent. @@ -1086,11 +1140,6 @@ class Sampler::PlatformData : public Malloced { class SignalSender : public Thread { public: - enum SleepInterval { - HALF_INTERVAL, - FULL_INTERVAL - }; - static const int kSignalSenderStackSize = 64 * KB; explicit SignalSender(int interval) @@ -1146,43 +1195,16 @@ class SignalSender : public Thread { SamplerRegistry::State state; while ((state = SamplerRegistry::GetState()) != SamplerRegistry::HAS_NO_SAMPLERS) { - bool cpu_profiling_enabled = - (state == SamplerRegistry::HAS_CPU_PROFILING_SAMPLERS); - bool runtime_profiler_enabled = RuntimeProfiler::IsEnabled(); - if (cpu_profiling_enabled && !signal_handler_installed_) { - InstallSignalHandler(); - } else if (!cpu_profiling_enabled && signal_handler_installed_) { - RestoreSignalHandler(); - } // When CPU profiling is enabled both JavaScript and C++ code is // profiled. We must not suspend. - if (!cpu_profiling_enabled) { - if (rate_limiter_.SuspendIfNecessary()) continue; - } - if (cpu_profiling_enabled && runtime_profiler_enabled) { - if (!SamplerRegistry::IterateActiveSamplers(&DoCpuProfile, this)) { - return; - } - Sleep(HALF_INTERVAL); - if (!SamplerRegistry::IterateActiveSamplers(&DoRuntimeProfile, NULL)) { - return; - } - Sleep(HALF_INTERVAL); + if (state == SamplerRegistry::HAS_CPU_PROFILING_SAMPLERS) { + if (!signal_handler_installed_) InstallSignalHandler(); + SamplerRegistry::IterateActiveSamplers(&DoCpuProfile, this); } else { - if (cpu_profiling_enabled) { - if (!SamplerRegistry::IterateActiveSamplers(&DoCpuProfile, - this)) { - return; - } - } - if (runtime_profiler_enabled) { - if (!SamplerRegistry::IterateActiveSamplers(&DoRuntimeProfile, - NULL)) { - return; - } - } - Sleep(FULL_INTERVAL); + if (signal_handler_installed_) RestoreSignalHandler(); + if (RuntimeProfiler::WaitForSomeIsolateToEnterJS()) continue; } + Sleep(); // TODO(svenpanne) Figure out if OS:Sleep(interval_) is enough. } } @@ -1192,26 +1214,22 @@ class SignalSender : public Thread { sender->SendProfilingSignal(sampler->platform_data()->vm_tid()); } - static void DoRuntimeProfile(Sampler* sampler, void* ignored) { - if (!sampler->isolate()->IsInitialized()) return; - sampler->isolate()->runtime_profiler()->NotifyTick(); - } - void SendProfilingSignal(int tid) { if (!signal_handler_installed_) return; // Glibc doesn't provide a wrapper for tgkill(2). #if defined(ANDROID) syscall(__NR_tgkill, vm_tgid_, tid, SIGPROF); #else - syscall(SYS_tgkill, vm_tgid_, tid, SIGPROF); + int result = syscall(SYS_tgkill, vm_tgid_, tid, SIGPROF); + USE(result); + ASSERT(result == 0); #endif } - void Sleep(SleepInterval full_or_half) { + void Sleep() { // Convert ms to us and subtract 100 us to compensate delays // occuring during signal delivery. useconds_t interval = interval_ * 1000 - 100; - if (full_or_half == HALF_INTERVAL) interval /= 2; #if defined(ANDROID) usleep(interval); #else @@ -1231,7 +1249,6 @@ class SignalSender : public Thread { const int vm_tgid_; const int interval_; - RuntimeProfilerRateLimiter rate_limiter_; // Protects the process wide state below. static Mutex* mutex_; diff --git a/deps/v8/src/platform-macos.cc b/deps/v8/src/platform-macos.cc index a216f6e4ca..30e2b890bb 100644 --- a/deps/v8/src/platform-macos.cc +++ b/deps/v8/src/platform-macos.cc @@ -171,6 +171,11 @@ void OS::Sleep(int milliseconds) { } +int OS::NumberOfCores() { + return sysconf(_SC_NPROCESSORS_ONLN); +} + + void OS::Abort() { // Redirect to std abort to signal abnormal program termination abort(); @@ -182,6 +187,11 @@ void OS::DebugBreak() { } +void OS::DumpBacktrace() { + // Currently unsupported. +} + + class PosixMemoryMappedFile : public OS::MemoryMappedFile { public: PosixMemoryMappedFile(FILE* file, void* memory, int size) @@ -471,6 +481,11 @@ bool VirtualMemory::ReleaseRegion(void* address, size_t size) { } +bool VirtualMemory::HasLazyCommits() { + return false; +} + + class Thread::PlatformData : public Malloced { public: PlatformData() : thread_(kNoThread) {} @@ -782,23 +797,12 @@ class SamplerThread : public Thread { SamplerRegistry::State state; while ((state = SamplerRegistry::GetState()) != SamplerRegistry::HAS_NO_SAMPLERS) { - bool cpu_profiling_enabled = - (state == SamplerRegistry::HAS_CPU_PROFILING_SAMPLERS); - bool runtime_profiler_enabled = RuntimeProfiler::IsEnabled(); // When CPU profiling is enabled both JavaScript and C++ code is // profiled. We must not suspend. - if (!cpu_profiling_enabled) { - if (rate_limiter_.SuspendIfNecessary()) continue; - } - if (cpu_profiling_enabled) { - if (!SamplerRegistry::IterateActiveSamplers(&DoCpuProfile, this)) { - return; - } - } - if (runtime_profiler_enabled) { - if (!SamplerRegistry::IterateActiveSamplers(&DoRuntimeProfile, NULL)) { - return; - } + if (state == SamplerRegistry::HAS_CPU_PROFILING_SAMPLERS) { + SamplerRegistry::IterateActiveSamplers(&DoCpuProfile, this); + } else { + if (RuntimeProfiler::WaitForSomeIsolateToEnterJS()) continue; } OS::Sleep(interval_); } @@ -812,15 +816,11 @@ class SamplerThread : public Thread { sampler_thread->SampleContext(sampler); } - static void DoRuntimeProfile(Sampler* sampler, void* ignored) { - if (!sampler->isolate()->IsInitialized()) return; - sampler->isolate()->runtime_profiler()->NotifyTick(); - } - void SampleContext(Sampler* sampler) { thread_act_t profiled_thread = sampler->platform_data()->profiled_thread(); + Isolate* isolate = sampler->isolate(); TickSample sample_obj; - TickSample* sample = CpuProfiler::TickSampleEvent(sampler->isolate()); + TickSample* sample = isolate->cpu_profiler()->TickSampleEvent(); if (sample == NULL) sample = &sample_obj; if (KERN_SUCCESS != thread_suspend(profiled_thread)) return; @@ -851,7 +851,7 @@ class SamplerThread : public Thread { flavor, reinterpret_cast<natural_t*>(&state), &count) == KERN_SUCCESS) { - sample->state = sampler->isolate()->current_vm_state(); + sample->state = isolate->current_vm_state(); sample->pc = reinterpret_cast<Address>(state.REGISTER_FIELD(ip)); sample->sp = reinterpret_cast<Address>(state.REGISTER_FIELD(sp)); sample->fp = reinterpret_cast<Address>(state.REGISTER_FIELD(bp)); @@ -862,7 +862,6 @@ class SamplerThread : public Thread { } const int interval_; - RuntimeProfilerRateLimiter rate_limiter_; // Protects the process wide state below. static Mutex* mutex_; diff --git a/deps/v8/src/platform-nullos.cc b/deps/v8/src/platform-nullos.cc index 7aaa7b204a..0fef0633e9 100644 --- a/deps/v8/src/platform-nullos.cc +++ b/deps/v8/src/platform-nullos.cc @@ -266,6 +266,12 @@ void OS::Sleep(int milliseconds) { } +int OS::NumberOfCores() { + UNIMPLEMENTED(); + return 0; +} + + void OS::Abort() { // Minimalistic implementation for bootstrapping. abort(); @@ -277,6 +283,11 @@ void OS::DebugBreak() { } +void OS::DumpBacktrace() { + // Currently unsupported. +} + + OS::MemoryMappedFile* OS::MemoryMappedFile::open(const char* name) { UNIMPLEMENTED(); return NULL; @@ -340,6 +351,12 @@ bool VirtualMemory::Guard(void* address) { } +bool VirtualMemory::HasLazyCommits() { + // TODO(alph): implement for the platform. + return false; +} + + class Thread::PlatformData : public Malloced { public: PlatformData() { diff --git a/deps/v8/src/platform-openbsd.cc b/deps/v8/src/platform-openbsd.cc index 408d4dc0f8..e48d4cb35a 100644 --- a/deps/v8/src/platform-openbsd.cc +++ b/deps/v8/src/platform-openbsd.cc @@ -204,6 +204,11 @@ void OS::Sleep(int milliseconds) { } +int OS::NumberOfCores() { + return sysconf(_SC_NPROCESSORS_ONLN); +} + + void OS::Abort() { // Redirect to std abort to signal abnormal program termination. abort(); @@ -215,6 +220,11 @@ void OS::DebugBreak() { } +void OS::DumpBacktrace() { + // Currently unsupported. +} + + class PosixMemoryMappedFile : public OS::MemoryMappedFile { public: PosixMemoryMappedFile(FILE* file, void* memory, int size) @@ -504,6 +514,12 @@ bool VirtualMemory::ReleaseRegion(void* base, size_t size) { } +bool VirtualMemory::HasLazyCommits() { + // TODO(alph): implement for the platform. + return false; +} + + class Thread::PlatformData : public Malloced { public: PlatformData() : thread_(kNoThread) {} @@ -732,7 +748,7 @@ static void ProfilerSignalHandler(int signal, siginfo_t* info, void* context) { if (sampler == NULL || !sampler->IsActive()) return; TickSample sample_obj; - TickSample* sample = CpuProfiler::TickSampleEvent(isolate); + TickSample* sample = isolate->cpu_profiler()->TickSampleEvent(); if (sample == NULL) sample = &sample_obj; // Extracting the sample from the context is extremely machine dependent. @@ -778,11 +794,6 @@ class Sampler::PlatformData : public Malloced { class SignalSender : public Thread { public: - enum SleepInterval { - HALF_INTERVAL, - FULL_INTERVAL - }; - static const int kSignalSenderStackSize = 64 * KB; explicit SignalSender(int interval) @@ -838,43 +849,16 @@ class SignalSender : public Thread { SamplerRegistry::State state; while ((state = SamplerRegistry::GetState()) != SamplerRegistry::HAS_NO_SAMPLERS) { - bool cpu_profiling_enabled = - (state == SamplerRegistry::HAS_CPU_PROFILING_SAMPLERS); - bool runtime_profiler_enabled = RuntimeProfiler::IsEnabled(); - if (cpu_profiling_enabled && !signal_handler_installed_) { - InstallSignalHandler(); - } else if (!cpu_profiling_enabled && signal_handler_installed_) { - RestoreSignalHandler(); - } // When CPU profiling is enabled both JavaScript and C++ code is // profiled. We must not suspend. - if (!cpu_profiling_enabled) { - if (rate_limiter_.SuspendIfNecessary()) continue; - } - if (cpu_profiling_enabled && runtime_profiler_enabled) { - if (!SamplerRegistry::IterateActiveSamplers(&DoCpuProfile, this)) { - return; - } - Sleep(HALF_INTERVAL); - if (!SamplerRegistry::IterateActiveSamplers(&DoRuntimeProfile, NULL)) { - return; - } - Sleep(HALF_INTERVAL); + if (state == SamplerRegistry::HAS_CPU_PROFILING_SAMPLERS) { + if (!signal_handler_installed_) InstallSignalHandler(); + SamplerRegistry::IterateActiveSamplers(&DoCpuProfile, this); } else { - if (cpu_profiling_enabled) { - if (!SamplerRegistry::IterateActiveSamplers(&DoCpuProfile, - this)) { - return; - } - } - if (runtime_profiler_enabled) { - if (!SamplerRegistry::IterateActiveSamplers(&DoRuntimeProfile, - NULL)) { - return; - } - } - Sleep(FULL_INTERVAL); + if (signal_handler_installed_) RestoreSignalHandler(); + if (RuntimeProfiler::WaitForSomeIsolateToEnterJS()) continue; } + Sleep(); // TODO(svenpanne) Figure out if OS:Sleep(interval_) is enough. } } @@ -884,21 +868,15 @@ class SignalSender : public Thread { sender->SendProfilingSignal(sampler->platform_data()->vm_tid()); } - static void DoRuntimeProfile(Sampler* sampler, void* ignored) { - if (!sampler->isolate()->IsInitialized()) return; - sampler->isolate()->runtime_profiler()->NotifyTick(); - } - void SendProfilingSignal(pthread_t tid) { if (!signal_handler_installed_) return; pthread_kill(tid, SIGPROF); } - void Sleep(SleepInterval full_or_half) { + void Sleep() { // Convert ms to us and subtract 100 us to compensate delays // occuring during signal delivery. useconds_t interval = interval_ * 1000 - 100; - if (full_or_half == HALF_INTERVAL) interval /= 2; int result = usleep(interval); #ifdef DEBUG if (result != 0 && errno != EINTR) { @@ -914,7 +892,6 @@ class SignalSender : public Thread { const int vm_tgid_; const int interval_; - RuntimeProfilerRateLimiter rate_limiter_; // Protects the process wide state below. static Mutex* mutex_; diff --git a/deps/v8/src/platform-posix.cc b/deps/v8/src/platform-posix.cc index 2b80015161..0016d59d3a 100644 --- a/deps/v8/src/platform-posix.cc +++ b/deps/v8/src/platform-posix.cc @@ -109,20 +109,11 @@ void* OS::GetRandomMmapAddr() { raw_addr &= V8_UINT64_C(0x3ffffffff000); #else uint32_t raw_addr = V8::RandomPrivate(isolate); - - // For our 32-bit mmap() hint, we pick a random address in the bottom - // half of the top half of the address space (that is, the third quarter). - // Because we do not MAP_FIXED, this will be treated only as a hint -- the - // system will not fail to mmap() because something else happens to already - // be mapped at our random address. We deliberately set the hint high enough - // to get well above the system's break (that is, the heap); systems will - // either try the hint and if that fails move higher (MacOS and other BSD - // derivatives) or try the hint and if that fails allocate as if there were - // no hint at all (Linux, Solaris, illumos and derivatives). The high hint - // prevents the break from getting hemmed in at low values, ceding half of - // the address space to the system heap. + // The range 0x20000000 - 0x60000000 is relatively unpopulated across a + // variety of ASLR modes (PAE kernel, NX compat mode, etc) and on macos + // 10.6 and 10.7. raw_addr &= 0x3ffff000; - raw_addr += 0x80000000; + raw_addr += 0x20000000; #endif return reinterpret_cast<void*>(raw_addr); } @@ -151,11 +142,19 @@ UNARY_MATH_FUNCTION(sin, CreateTranscendentalFunction(TranscendentalCache::SIN)) UNARY_MATH_FUNCTION(cos, CreateTranscendentalFunction(TranscendentalCache::COS)) UNARY_MATH_FUNCTION(tan, CreateTranscendentalFunction(TranscendentalCache::TAN)) UNARY_MATH_FUNCTION(log, CreateTranscendentalFunction(TranscendentalCache::LOG)) +UNARY_MATH_FUNCTION(exp, CreateExpFunction()) UNARY_MATH_FUNCTION(sqrt, CreateSqrtFunction()) #undef MATH_FUNCTION +void lazily_initialize_fast_exp() { + if (fast_exp_function == NULL) { + init_fast_exp_function(); + } +} + + double OS::nan_value() { // NAN from math.h is defined in C99 and not in POSIX. return NAN; @@ -341,6 +340,7 @@ void POSIXPostSetUp() { init_fast_cos_function(); init_fast_tan_function(); init_fast_log_function(); + // fast_exp is initialized lazily. init_fast_sqrt_function(); } diff --git a/deps/v8/src/platform-solaris.cc b/deps/v8/src/platform-solaris.cc index 4248ea214f..0e616d1ab4 100644 --- a/deps/v8/src/platform-solaris.cc +++ b/deps/v8/src/platform-solaris.cc @@ -125,12 +125,8 @@ const char* OS::LocalTimezone(double time) { double OS::LocalTimeOffset() { - // On Solaris, struct tm does not contain a tm_gmtoff field. - time_t utc = time(NULL); - ASSERT(utc != -1); - struct tm* loc = localtime(&utc); - ASSERT(loc != NULL); - return static_cast<double>((mktime(loc) - utc) * msPerSecond); + tzset(); + return -static_cast<double>(timezone * msPerSecond); } @@ -195,6 +191,11 @@ void OS::Sleep(int milliseconds) { } +int OS::NumberOfCores() { + return sysconf(_SC_NPROCESSORS_ONLN); +} + + void OS::Abort() { // Redirect to std abort to signal abnormal program termination. abort(); @@ -206,6 +207,11 @@ void OS::DebugBreak() { } +void OS::DumpBacktrace() { + // Currently unsupported. +} + + class PosixMemoryMappedFile : public OS::MemoryMappedFile { public: PosixMemoryMappedFile(FILE* file, void* memory, int size) @@ -448,6 +454,12 @@ bool VirtualMemory::ReleaseRegion(void* base, size_t size) { } +bool VirtualMemory::HasLazyCommits() { + // TODO(alph): implement for the platform. + return false; +} + + class Thread::PlatformData : public Malloced { public: PlatformData() : thread_(kNoThread) { } @@ -670,7 +682,7 @@ static void ProfilerSignalHandler(int signal, siginfo_t* info, void* context) { if (sampler == NULL || !sampler->IsActive()) return; TickSample sample_obj; - TickSample* sample = CpuProfiler::TickSampleEvent(isolate); + TickSample* sample = isolate->cpu_profiler()->TickSampleEvent(); if (sample == NULL) sample = &sample_obj; // Extracting the sample from the context is extremely machine dependent. @@ -699,11 +711,6 @@ class Sampler::PlatformData : public Malloced { class SignalSender : public Thread { public: - enum SleepInterval { - HALF_INTERVAL, - FULL_INTERVAL - }; - static const int kSignalSenderStackSize = 64 * KB; explicit SignalSender(int interval) @@ -758,44 +765,16 @@ class SignalSender : public Thread { SamplerRegistry::State state; while ((state = SamplerRegistry::GetState()) != SamplerRegistry::HAS_NO_SAMPLERS) { - bool cpu_profiling_enabled = - (state == SamplerRegistry::HAS_CPU_PROFILING_SAMPLERS); - bool runtime_profiler_enabled = RuntimeProfiler::IsEnabled(); - if (cpu_profiling_enabled && !signal_handler_installed_) { - InstallSignalHandler(); - } else if (!cpu_profiling_enabled && signal_handler_installed_) { - RestoreSignalHandler(); - } - // When CPU profiling is enabled both JavaScript and C++ code is // profiled. We must not suspend. - if (!cpu_profiling_enabled) { - if (rate_limiter_.SuspendIfNecessary()) continue; - } - if (cpu_profiling_enabled && runtime_profiler_enabled) { - if (!SamplerRegistry::IterateActiveSamplers(&DoCpuProfile, this)) { - return; - } - Sleep(HALF_INTERVAL); - if (!SamplerRegistry::IterateActiveSamplers(&DoRuntimeProfile, NULL)) { - return; - } - Sleep(HALF_INTERVAL); + if (state == SamplerRegistry::HAS_CPU_PROFILING_SAMPLERS) { + if (!signal_handler_installed_) InstallSignalHandler(); + SamplerRegistry::IterateActiveSamplers(&DoCpuProfile, this); } else { - if (cpu_profiling_enabled) { - if (!SamplerRegistry::IterateActiveSamplers(&DoCpuProfile, - this)) { - return; - } - } - if (runtime_profiler_enabled) { - if (!SamplerRegistry::IterateActiveSamplers(&DoRuntimeProfile, - NULL)) { - return; - } - } - Sleep(FULL_INTERVAL); + if (signal_handler_installed_) RestoreSignalHandler(); + if (RuntimeProfiler::WaitForSomeIsolateToEnterJS()) continue; } + Sleep(); // TODO(svenpanne) Figure out if OS:Sleep(interval_) is enough. } } @@ -805,21 +784,15 @@ class SignalSender : public Thread { sender->SendProfilingSignal(sampler->platform_data()->vm_tid()); } - static void DoRuntimeProfile(Sampler* sampler, void* ignored) { - if (!sampler->isolate()->IsInitialized()) return; - sampler->isolate()->runtime_profiler()->NotifyTick(); - } - void SendProfilingSignal(pthread_t tid) { if (!signal_handler_installed_) return; pthread_kill(tid, SIGPROF); } - void Sleep(SleepInterval full_or_half) { + void Sleep() { // Convert ms to us and subtract 100 us to compensate delays // occuring during signal delivery. useconds_t interval = interval_ * 1000 - 100; - if (full_or_half == HALF_INTERVAL) interval /= 2; int result = usleep(interval); #ifdef DEBUG if (result != 0 && errno != EINTR) { @@ -834,7 +807,6 @@ class SignalSender : public Thread { } const int interval_; - RuntimeProfilerRateLimiter rate_limiter_; // Protects the process wide state below. static Mutex* mutex_; diff --git a/deps/v8/src/platform-win32.cc b/deps/v8/src/platform-win32.cc index 49463be8e0..c1bae9352c 100644 --- a/deps/v8/src/platform-win32.cc +++ b/deps/v8/src/platform-win32.cc @@ -27,6 +27,17 @@ // Platform specific code for Win32. +// Secure API functions are not available using MinGW with msvcrt.dll +// on Windows XP. Make sure MINGW_HAS_SECURE_API is not defined to +// disable definition of secure API functions in standard headers that +// would conflict with our own implementation. +#ifdef __MINGW32__ +#include <_mingw.h> +#ifdef MINGW_HAS_SECURE_API +#undef MINGW_HAS_SECURE_API +#endif // MINGW_HAS_SECURE_API +#endif // __MINGW32__ + #define V8_WIN32_HEADERS_FULL #include "win32-headers.h" @@ -65,8 +76,6 @@ inline void MemoryBarrier() { #endif // __MINGW64_VERSION_MAJOR -#ifndef MINGW_HAS_SECURE_API - int localtime_s(tm* out_tm, const time_t* time) { tm* posix_local_time_struct = localtime(time); if (posix_local_time_struct == NULL) return 1; @@ -113,8 +122,6 @@ int strncpy_s(char* dest, size_t dest_size, const char* source, size_t count) { return 0; } -#endif // MINGW_HAS_SECURE_API - #endif // __MINGW32__ // Generate a pseudo-random number in the range 0-2^31-1. Usually @@ -199,9 +206,17 @@ UNARY_MATH_FUNCTION(sin, CreateTranscendentalFunction(TranscendentalCache::SIN)) UNARY_MATH_FUNCTION(cos, CreateTranscendentalFunction(TranscendentalCache::COS)) UNARY_MATH_FUNCTION(tan, CreateTranscendentalFunction(TranscendentalCache::TAN)) UNARY_MATH_FUNCTION(log, CreateTranscendentalFunction(TranscendentalCache::LOG)) +UNARY_MATH_FUNCTION(exp, CreateExpFunction()) UNARY_MATH_FUNCTION(sqrt, CreateSqrtFunction()) -#undef MATH_FUNCTION +#undef UNARY_MATH_FUNCTION + + +void lazily_initialize_fast_exp() { + if (fast_exp_function == NULL) { + init_fast_exp_function(); + } +} void MathSetup() { @@ -212,6 +227,7 @@ void MathSetup() { init_fast_cos_function(); init_fast_tan_function(); init_fast_log_function(); + // fast_exp is initialized lazily. init_fast_sqrt_function(); } @@ -803,6 +819,9 @@ void OS::StrNCpy(Vector<char> dest, const char* src, size_t n) { } +#undef _TRUNCATE +#undef STRUNCATE + // We keep the lowest and highest addresses mapped as a quick way of // determining that pointers are outside the heap (used mostly in assertions // and verification). The estimate is conservative, i.e., not all addresses in @@ -959,6 +978,13 @@ void OS::Sleep(int milliseconds) { } +int OS::NumberOfCores() { + SYSTEM_INFO info; + GetSystemInfo(&info); + return info.dwNumberOfProcessors; +} + + void OS::Abort() { if (IsDebuggerPresent() || FLAG_break_on_abort) { DebugBreak(); @@ -978,6 +1004,11 @@ void OS::DebugBreak() { } +void OS::DumpBacktrace() { + // Currently unsupported. +} + + class Win32MemoryMappedFile : public OS::MemoryMappedFile { public: Win32MemoryMappedFile(HANDLE file, @@ -1204,6 +1235,11 @@ TLHELP32_FUNCTION_LIST(DLL_FUNC_LOADED) // application is closed. } +#undef DBGHELP_FUNCTION_LIST +#undef TLHELP32_FUNCTION_LIST +#undef DLL_FUNC_VAR +#undef DLL_FUNC_TYPE + // Load the symbols for generating stack traces. static bool LoadSymbols(HANDLE process_handle) { @@ -1551,6 +1587,12 @@ bool VirtualMemory::ReleaseRegion(void* base, size_t size) { } +bool VirtualMemory::HasLazyCommits() { + // TODO(alph): implement for the platform. + return false; +} + + // ---------------------------------------------------------------------------- // Win32 thread support. @@ -1995,23 +2037,12 @@ class SamplerThread : public Thread { SamplerRegistry::State state; while ((state = SamplerRegistry::GetState()) != SamplerRegistry::HAS_NO_SAMPLERS) { - bool cpu_profiling_enabled = - (state == SamplerRegistry::HAS_CPU_PROFILING_SAMPLERS); - bool runtime_profiler_enabled = RuntimeProfiler::IsEnabled(); // When CPU profiling is enabled both JavaScript and C++ code is // profiled. We must not suspend. - if (!cpu_profiling_enabled) { - if (rate_limiter_.SuspendIfNecessary()) continue; - } - if (cpu_profiling_enabled) { - if (!SamplerRegistry::IterateActiveSamplers(&DoCpuProfile, this)) { - return; - } - } - if (runtime_profiler_enabled) { - if (!SamplerRegistry::IterateActiveSamplers(&DoRuntimeProfile, NULL)) { - return; - } + if (state == SamplerRegistry::HAS_CPU_PROFILING_SAMPLERS) { + SamplerRegistry::IterateActiveSamplers(&DoCpuProfile, this); + } else { + if (RuntimeProfiler::WaitForSomeIsolateToEnterJS()) continue; } OS::Sleep(interval_); } @@ -2025,11 +2056,6 @@ class SamplerThread : public Thread { sampler_thread->SampleContext(sampler); } - static void DoRuntimeProfile(Sampler* sampler, void* ignored) { - if (!sampler->isolate()->IsInitialized()) return; - sampler->isolate()->runtime_profiler()->NotifyTick(); - } - void SampleContext(Sampler* sampler) { HANDLE profiled_thread = sampler->platform_data()->profiled_thread(); if (profiled_thread == NULL) return; @@ -2038,13 +2064,14 @@ class SamplerThread : public Thread { CONTEXT context; memset(&context, 0, sizeof(context)); + Isolate* isolate = sampler->isolate(); TickSample sample_obj; - TickSample* sample = CpuProfiler::TickSampleEvent(sampler->isolate()); + TickSample* sample = isolate->cpu_profiler()->TickSampleEvent(); if (sample == NULL) sample = &sample_obj; static const DWORD kSuspendFailed = static_cast<DWORD>(-1); if (SuspendThread(profiled_thread) == kSuspendFailed) return; - sample->state = sampler->isolate()->current_vm_state(); + sample->state = isolate->current_vm_state(); context.ContextFlags = CONTEXT_FULL; if (GetThreadContext(profiled_thread, &context) != 0) { @@ -2064,7 +2091,6 @@ class SamplerThread : public Thread { } const int interval_; - RuntimeProfilerRateLimiter rate_limiter_; // Protects the process wide state below. static Mutex* mutex_; diff --git a/deps/v8/src/platform.h b/deps/v8/src/platform.h index de896acad5..56ac61dc7b 100644 --- a/deps/v8/src/platform.h +++ b/deps/v8/src/platform.h @@ -119,12 +119,16 @@ class Mutex; double ceiling(double x); double modulo(double x, double y); -// Custom implementation of sin, cos, tan and log. +// Custom implementation of math functions. double fast_sin(double input); double fast_cos(double input); double fast_tan(double input); double fast_log(double input); +double fast_exp(double input); double fast_sqrt(double input); +// The custom exp implementation needs 16KB of lookup data; initialize it +// on demand. +void lazily_initialize_fast_exp(); // Forward declarations. class Socket; @@ -235,12 +239,17 @@ class OS { // Sleep for a number of milliseconds. static void Sleep(const int milliseconds); + static int NumberOfCores(); + // Abort the current process. static void Abort(); // Debug break. static void DebugBreak(); + // Dump C++ current stack trace (only functional on Linux). + static void DumpBacktrace(); + // Walk the stack. static const int kStackWalkError = -1; static const int kStackWalkMaxNameLen = 256; @@ -432,6 +441,11 @@ class VirtualMemory { // and the same size it was reserved with. static bool ReleaseRegion(void* base, size_t size); + // Returns true if OS performs lazy commits, i.e. the memory allocation call + // defers actual physical memory allocation till the first memory access. + // Otherwise returns false. + static bool HasLazyCommits(); + private: void* address_; // Start address of the virtual memory. size_t size_; // Size of the virtual memory. diff --git a/deps/v8/src/preparse-data.cc b/deps/v8/src/preparse-data.cc index 98c343e79c..d0425b4b22 100644 --- a/deps/v8/src/preparse-data.cc +++ b/deps/v8/src/preparse-data.cc @@ -113,7 +113,7 @@ CompleteParserRecorder::CompleteParserRecorder() literal_chars_(0), symbol_store_(0), symbol_keys_(0), - symbol_table_(vector_compare), + string_table_(vector_compare), symbol_id_(0) { } @@ -123,7 +123,7 @@ void CompleteParserRecorder::LogSymbol(int start, bool is_ascii, Vector<const byte> literal_bytes) { Key key = { is_ascii, literal_bytes }; - HashMap::Entry* entry = symbol_table_.Lookup(&key, hash, true); + HashMap::Entry* entry = string_table_.Lookup(&key, hash, true); int id = static_cast<int>(reinterpret_cast<intptr_t>(entry->value)); if (id == 0) { // Copy literal contents for later comparison. diff --git a/deps/v8/src/preparse-data.h b/deps/v8/src/preparse-data.h index f347430208..3a1e99d5d1 100644 --- a/deps/v8/src/preparse-data.h +++ b/deps/v8/src/preparse-data.h @@ -221,7 +221,7 @@ class CompleteParserRecorder: public FunctionLoggingParserRecorder { Collector<byte> literal_chars_; Collector<byte> symbol_store_; Collector<Key> symbol_keys_; - HashMap symbol_table_; + HashMap string_table_; int symbol_id_; }; diff --git a/deps/v8/src/preparser.cc b/deps/v8/src/preparser.cc index 21da4f80d4..c461d8a4bd 100644 --- a/deps/v8/src/preparser.cc +++ b/deps/v8/src/preparser.cc @@ -1110,24 +1110,8 @@ PreParser::Expression PreParser::ParsePrimaryExpression(bool* ok) { break; } - case i::Token::FUTURE_RESERVED_WORD: { - Next(); - i::Scanner::Location location = scanner_->location(); - ReportMessageAt(location.beg_pos, location.end_pos, - "reserved_word", NULL); - *ok = false; - return Expression::Default(); - } - + case i::Token::FUTURE_RESERVED_WORD: case i::Token::FUTURE_STRICT_RESERVED_WORD: - if (!is_classic_mode()) { - Next(); - i::Scanner::Location location = scanner_->location(); - ReportMessageAt(location, "strict_reserved_word", NULL); - *ok = false; - return Expression::Default(); - } - // FALLTHROUGH case i::Token::IDENTIFIER: { Identifier id = ParseIdentifier(CHECK_OK); result = Expression::FromIdentifier(id); diff --git a/deps/v8/src/prettyprinter.cc b/deps/v8/src/prettyprinter.cc index 0d8dadce1a..df6183a366 100644 --- a/deps/v8/src/prettyprinter.cc +++ b/deps/v8/src/prettyprinter.cc @@ -42,6 +42,7 @@ PrettyPrinter::PrettyPrinter() { output_ = NULL; size_ = 0; pos_ = 0; + InitializeAstVisitor(); } @@ -122,6 +123,14 @@ void PrettyPrinter::VisitModuleUrl(ModuleUrl* node) { } +void PrettyPrinter::VisitModuleStatement(ModuleStatement* node) { + Print("module "); + PrintLiteral(node->proxy()->name(), false); + Print(" "); + Visit(node->body()); +} + + void PrettyPrinter::VisitExpressionStatement(ExpressionStatement* node) { Visit(node->expression()); Print(";"); @@ -353,7 +362,7 @@ void PrettyPrinter::VisitThrow(Throw* node) { void PrettyPrinter::VisitProperty(Property* node) { Expression* key = node->key(); Literal* literal = key->AsLiteral(); - if (literal != NULL && literal->handle()->IsSymbol()) { + if (literal != NULL && literal->handle()->IsInternalizedString()) { Print("("); Visit(node->obj()); Print(")."); @@ -822,6 +831,13 @@ void AstPrinter::VisitModuleUrl(ModuleUrl* node) { } +void AstPrinter::VisitModuleStatement(ModuleStatement* node) { + IndentedScope indent(this, "MODULE"); + PrintLiteralIndented("NAME", node->proxy()->name(), true); + PrintStatements(node->body()->statements()); +} + + void AstPrinter::VisitExpressionStatement(ExpressionStatement* node) { Visit(node->expression()); } @@ -1052,7 +1068,7 @@ void AstPrinter::VisitProperty(Property* node) { IndentedScope indent(this, "PROPERTY", node); Visit(node->obj()); Literal* literal = node->key()->AsLiteral(); - if (literal != NULL && literal->handle()->IsSymbol()) { + if (literal != NULL && literal->handle()->IsInternalizedString()) { PrintLiteralIndented("NAME", literal->handle(), false); } else { PrintIndentedVisit("KEY", node->key()); diff --git a/deps/v8/src/prettyprinter.h b/deps/v8/src/prettyprinter.h index 9ac7257640..41175ab2ae 100644 --- a/deps/v8/src/prettyprinter.h +++ b/deps/v8/src/prettyprinter.h @@ -74,6 +74,8 @@ class PrettyPrinter: public AstVisitor { void PrintDeclarations(ZoneList<Declaration*>* declarations); void PrintFunctionLiteral(FunctionLiteral* function); void PrintCaseClause(CaseClause* clause); + + DEFINE_AST_VISITOR_SUBCLASS_MEMBERS(); }; diff --git a/deps/v8/src/profile-generator-inl.h b/deps/v8/src/profile-generator-inl.h index 02e146f14a..4e6302c37e 100644 --- a/deps/v8/src/profile-generator-inl.h +++ b/deps/v8/src/profile-generator-inl.h @@ -33,7 +33,7 @@ namespace v8 { namespace internal { -const char* StringsStorage::GetFunctionName(String* name) { +const char* StringsStorage::GetFunctionName(Name* name) { return GetFunctionName(GetName(name)); } @@ -74,7 +74,8 @@ ProfileNode::ProfileNode(ProfileTree* tree, CodeEntry* entry) entry_(entry), total_ticks_(0), self_ticks_(0), - children_(CodeEntriesMatch) { + children_(CodeEntriesMatch), + id_(tree->next_node_id()) { } @@ -84,7 +85,7 @@ CodeEntry* ProfileGenerator::EntryForVMState(StateTag tag) { return gc_entry_; case JS: case COMPILER: - case PARALLEL_COMPILER_PROLOGUE: + case PARALLEL_COMPILER: // DOM events handlers are reported as OTHER / EXTERNAL entries. // To avoid confusing people, let's put all these entries into // one bucket. @@ -95,55 +96,6 @@ CodeEntry* ProfileGenerator::EntryForVMState(StateTag tag) { } } - -HeapEntry* HeapGraphEdge::from() const { - return &snapshot()->entries()[from_index_]; -} - - -HeapSnapshot* HeapGraphEdge::snapshot() const { - return to_entry_->snapshot(); -} - - -int HeapEntry::index() const { - return static_cast<int>(this - &snapshot_->entries().first()); -} - - -int HeapEntry::set_children_index(int index) { - children_index_ = index; - int next_index = index + children_count_; - children_count_ = 0; - return next_index; -} - - -HeapGraphEdge** HeapEntry::children_arr() { - ASSERT(children_index_ >= 0); - return &snapshot_->children()[children_index_]; -} - - -SnapshotObjectId HeapObjectsMap::GetNthGcSubrootId(int delta) { - return kGcRootsFirstSubrootId + delta * kObjectIdStep; -} - - -HeapObject* V8HeapExplorer::GetNthGcSubrootObject(int delta) { - return reinterpret_cast<HeapObject*>( - reinterpret_cast<char*>(kFirstGcSubrootObject) + - delta * HeapObjectsMap::kObjectIdStep); -} - - -int V8HeapExplorer::GetGcSubrootOrder(HeapObject* subroot) { - return static_cast<int>( - (reinterpret_cast<char*>(subroot) - - reinterpret_cast<char*>(kFirstGcSubrootObject)) / - HeapObjectsMap::kObjectIdStep); -} - } } // namespace v8::internal #endif // V8_PROFILE_GENERATOR_INL_H_ diff --git a/deps/v8/src/profile-generator.cc b/deps/v8/src/profile-generator.cc index b853f33cb1..ce07213b02 100644 --- a/deps/v8/src/profile-generator.cc +++ b/deps/v8/src/profile-generator.cc @@ -30,7 +30,6 @@ #include "profile-generator-inl.h" #include "global-handles.h" -#include "heap-profiler.h" #include "scopeinfo.h" #include "unicode.h" #include "zone-inl.h" @@ -66,7 +65,9 @@ int TokenEnumerator::GetTokenId(Object* token) { Handle<Object> handle = isolate->global_handles()->Create(token); // handle.location() points to a memory cell holding a pointer // to a token object in the V8's heap. - isolate->global_handles()->MakeWeak(handle.location(), this, + isolate->global_handles()->MakeWeak(handle.location(), + this, + NULL, TokenRemovedCallback); token_locations_.Add(handle.location()); token_removed_.Add(false); @@ -74,11 +75,12 @@ int TokenEnumerator::GetTokenId(Object* token) { } -void TokenEnumerator::TokenRemovedCallback(v8::Persistent<v8::Value> handle, +void TokenEnumerator::TokenRemovedCallback(v8::Isolate* isolate, + v8::Persistent<v8::Value> handle, void* parameter) { reinterpret_cast<TokenEnumerator*>(parameter)->TokenRemoved( Utils::OpenHandle(*handle).location()); - handle.Dispose(); + handle.Dispose(isolate); } @@ -112,7 +114,7 @@ const char* StringsStorage::GetCopy(const char* src) { OS::StrNCpy(dst, src, len); dst[len] = '\0'; uint32_t hash = - HashSequentialString(dst.start(), len, HEAP->HashSeed()); + StringHasher::HashSequentialString(dst.start(), len, HEAP->HashSeed()); return AddOrDisposeString(dst.start(), hash); } @@ -145,20 +147,23 @@ const char* StringsStorage::GetVFormatted(const char* format, va_list args) { DeleteArray(str.start()); return format; } - uint32_t hash = HashSequentialString( + uint32_t hash = StringHasher::HashSequentialString( str.start(), len, HEAP->HashSeed()); return AddOrDisposeString(str.start(), hash); } -const char* StringsStorage::GetName(String* name) { +const char* StringsStorage::GetName(Name* name) { if (name->IsString()) { - int length = Min(kMaxNameSize, name->length()); + String* str = String::cast(name); + int length = Min(kMaxNameSize, str->length()); SmartArrayPointer<char> data = - name->ToCString(DISALLOW_NULLS, ROBUST_STRING_TRAVERSAL, 0, length); - uint32_t hash = - HashSequentialString(*data, length, name->GetHeap()->HashSeed()); + str->ToCString(DISALLOW_NULLS, ROBUST_STRING_TRAVERSAL, 0, length); + uint32_t hash = StringHasher::HashSequentialString( + *data, length, name->GetHeap()->HashSeed()); return AddOrDisposeString(data.Detach(), hash); + } else if (name->IsSymbol()) { + return "<symbol>"; } return ""; } @@ -291,6 +296,7 @@ ProfileTree::ProfileTree() "", 0, TokenEnumerator::kNoSecurityToken), + next_node_id_(1), root_(new ProfileNode(this, &root_entry_)) { } @@ -301,7 +307,7 @@ ProfileTree::~ProfileTree() { } -void ProfileTree::AddPathFromEnd(const Vector<CodeEntry*>& path) { +ProfileNode* ProfileTree::AddPathFromEnd(const Vector<CodeEntry*>& path) { ProfileNode* node = root_; for (CodeEntry** entry = path.start() + path.length() - 1; entry != path.start() - 1; @@ -311,6 +317,7 @@ void ProfileTree::AddPathFromEnd(const Vector<CodeEntry*>& path) { } } node->IncrementSelfTicks(); + return node; } @@ -462,28 +469,25 @@ void ProfileTree::ShortPrint() { void CpuProfile::AddPath(const Vector<CodeEntry*>& path) { - top_down_.AddPathFromEnd(path); - bottom_up_.AddPathFromStart(path); + ProfileNode* top_frame_node = top_down_.AddPathFromEnd(path); + if (record_samples_) samples_.Add(top_frame_node); } void CpuProfile::CalculateTotalTicks() { top_down_.CalculateTotalTicks(); - bottom_up_.CalculateTotalTicks(); } void CpuProfile::SetActualSamplingRate(double actual_sampling_rate) { top_down_.SetTickRatePerMs(actual_sampling_rate); - bottom_up_.SetTickRatePerMs(actual_sampling_rate); } CpuProfile* CpuProfile::FilteredClone(int security_token_id) { ASSERT(security_token_id != TokenEnumerator::kNoSecurityToken); - CpuProfile* clone = new CpuProfile(title_, uid_); + CpuProfile* clone = new CpuProfile(title_, uid_, false); clone->top_down_.FilteredClone(&top_down_, security_token_id); - clone->bottom_up_.FilteredClone(&bottom_up_, security_token_id); return clone; } @@ -491,16 +495,12 @@ CpuProfile* CpuProfile::FilteredClone(int security_token_id) { void CpuProfile::ShortPrint() { OS::Print("top down "); top_down_.ShortPrint(); - OS::Print("bottom up "); - bottom_up_.ShortPrint(); } void CpuProfile::Print() { OS::Print("[Top down]:\n"); top_down_.Print(); - OS::Print("[Bottom up]:\n"); - bottom_up_.Print(); } @@ -612,7 +612,8 @@ CpuProfilesCollection::~CpuProfilesCollection() { } -bool CpuProfilesCollection::StartProfiling(const char* title, unsigned uid) { +bool CpuProfilesCollection::StartProfiling(const char* title, unsigned uid, + bool record_samples) { ASSERT(uid > 0); current_profiles_semaphore_->Wait(); if (current_profiles_.length() >= kMaxSimultaneousProfiles) { @@ -626,17 +627,12 @@ bool CpuProfilesCollection::StartProfiling(const char* title, unsigned uid) { return false; } } - current_profiles_.Add(new CpuProfile(title, uid)); + current_profiles_.Add(new CpuProfile(title, uid, record_samples)); current_profiles_semaphore_->Signal(); return true; } -bool CpuProfilesCollection::StartProfiling(String* title, unsigned uid) { - return StartProfiling(GetName(title), uid); -} - - CpuProfile* CpuProfilesCollection::StopProfiling(int security_token_id, const char* title, double actual_sampling_rate) { @@ -780,7 +776,7 @@ List<CpuProfile*>* CpuProfilesCollection::Profiles(int security_token_id) { CodeEntry* CpuProfilesCollection::NewCodeEntry(Logger::LogEventsAndTags tag, - String* name, + Name* name, String* resource_name, int line_number) { CodeEntry* entry = new CodeEntry(tag, @@ -809,7 +805,7 @@ CodeEntry* CpuProfilesCollection::NewCodeEntry(Logger::LogEventsAndTags tag, CodeEntry* CpuProfilesCollection::NewCodeEntry(Logger::LogEventsAndTags tag, const char* name_prefix, - String* name) { + Name* name) { CodeEntry* entry = new CodeEntry(tag, name_prefix, GetName(name), @@ -940,2649 +936,4 @@ void ProfileGenerator::RecordTickSample(const TickSample& sample) { } -HeapGraphEdge::HeapGraphEdge(Type type, const char* name, int from, int to) - : type_(type), - from_index_(from), - to_index_(to), - name_(name) { - ASSERT(type == kContextVariable - || type == kProperty - || type == kInternal - || type == kShortcut); -} - - -HeapGraphEdge::HeapGraphEdge(Type type, int index, int from, int to) - : type_(type), - from_index_(from), - to_index_(to), - index_(index) { - ASSERT(type == kElement || type == kHidden || type == kWeak); -} - - -void HeapGraphEdge::ReplaceToIndexWithEntry(HeapSnapshot* snapshot) { - to_entry_ = &snapshot->entries()[to_index_]; -} - - -const int HeapEntry::kNoEntry = -1; - -HeapEntry::HeapEntry(HeapSnapshot* snapshot, - Type type, - const char* name, - SnapshotObjectId id, - int self_size) - : type_(type), - children_count_(0), - children_index_(-1), - self_size_(self_size), - id_(id), - snapshot_(snapshot), - name_(name) { } - - -void HeapEntry::SetNamedReference(HeapGraphEdge::Type type, - const char* name, - HeapEntry* entry) { - HeapGraphEdge edge(type, name, this->index(), entry->index()); - snapshot_->edges().Add(edge); - ++children_count_; -} - - -void HeapEntry::SetIndexedReference(HeapGraphEdge::Type type, - int index, - HeapEntry* entry) { - HeapGraphEdge edge(type, index, this->index(), entry->index()); - snapshot_->edges().Add(edge); - ++children_count_; -} - - -Handle<HeapObject> HeapEntry::GetHeapObject() { - return snapshot_->collection()->FindHeapObjectById(id()); -} - - -void HeapEntry::Print( - const char* prefix, const char* edge_name, int max_depth, int indent) { - STATIC_CHECK(sizeof(unsigned) == sizeof(id())); - OS::Print("%6d @%6u %*c %s%s: ", - self_size(), id(), indent, ' ', prefix, edge_name); - if (type() != kString) { - OS::Print("%s %.40s\n", TypeAsString(), name_); - } else { - OS::Print("\""); - const char* c = name_; - while (*c && (c - name_) <= 40) { - if (*c != '\n') - OS::Print("%c", *c); - else - OS::Print("\\n"); - ++c; - } - OS::Print("\"\n"); - } - if (--max_depth == 0) return; - Vector<HeapGraphEdge*> ch = children(); - for (int i = 0; i < ch.length(); ++i) { - HeapGraphEdge& edge = *ch[i]; - const char* edge_prefix = ""; - EmbeddedVector<char, 64> index; - const char* edge_name = index.start(); - switch (edge.type()) { - case HeapGraphEdge::kContextVariable: - edge_prefix = "#"; - edge_name = edge.name(); - break; - case HeapGraphEdge::kElement: - OS::SNPrintF(index, "%d", edge.index()); - break; - case HeapGraphEdge::kInternal: - edge_prefix = "$"; - edge_name = edge.name(); - break; - case HeapGraphEdge::kProperty: - edge_name = edge.name(); - break; - case HeapGraphEdge::kHidden: - edge_prefix = "$"; - OS::SNPrintF(index, "%d", edge.index()); - break; - case HeapGraphEdge::kShortcut: - edge_prefix = "^"; - edge_name = edge.name(); - break; - case HeapGraphEdge::kWeak: - edge_prefix = "w"; - OS::SNPrintF(index, "%d", edge.index()); - break; - default: - OS::SNPrintF(index, "!!! unknown edge type: %d ", edge.type()); - } - edge.to()->Print(edge_prefix, edge_name, max_depth, indent + 2); - } -} - - -const char* HeapEntry::TypeAsString() { - switch (type()) { - case kHidden: return "/hidden/"; - case kObject: return "/object/"; - case kClosure: return "/closure/"; - case kString: return "/string/"; - case kCode: return "/code/"; - case kArray: return "/array/"; - case kRegExp: return "/regexp/"; - case kHeapNumber: return "/number/"; - case kNative: return "/native/"; - case kSynthetic: return "/synthetic/"; - default: return "???"; - } -} - - -// It is very important to keep objects that form a heap snapshot -// as small as possible. -namespace { // Avoid littering the global namespace. - -template <size_t ptr_size> struct SnapshotSizeConstants; - -template <> struct SnapshotSizeConstants<4> { - static const int kExpectedHeapGraphEdgeSize = 12; - static const int kExpectedHeapEntrySize = 24; - static const int kExpectedHeapSnapshotsCollectionSize = 96; - static const int kExpectedHeapSnapshotSize = 136; - static const size_t kMaxSerializableSnapshotRawSize = 256 * MB; -}; - -template <> struct SnapshotSizeConstants<8> { - static const int kExpectedHeapGraphEdgeSize = 24; - static const int kExpectedHeapEntrySize = 32; - static const int kExpectedHeapSnapshotsCollectionSize = 144; - static const int kExpectedHeapSnapshotSize = 168; - static const uint64_t kMaxSerializableSnapshotRawSize = - static_cast<uint64_t>(6000) * MB; -}; - -} // namespace - -HeapSnapshot::HeapSnapshot(HeapSnapshotsCollection* collection, - HeapSnapshot::Type type, - const char* title, - unsigned uid) - : collection_(collection), - type_(type), - title_(title), - uid_(uid), - root_index_(HeapEntry::kNoEntry), - gc_roots_index_(HeapEntry::kNoEntry), - natives_root_index_(HeapEntry::kNoEntry), - max_snapshot_js_object_id_(0) { - STATIC_CHECK( - sizeof(HeapGraphEdge) == - SnapshotSizeConstants<kPointerSize>::kExpectedHeapGraphEdgeSize); - STATIC_CHECK( - sizeof(HeapEntry) == - SnapshotSizeConstants<kPointerSize>::kExpectedHeapEntrySize); - for (int i = 0; i < VisitorSynchronization::kNumberOfSyncTags; ++i) { - gc_subroot_indexes_[i] = HeapEntry::kNoEntry; - } -} - - -void HeapSnapshot::Delete() { - collection_->RemoveSnapshot(this); - delete this; -} - - -void HeapSnapshot::RememberLastJSObjectId() { - max_snapshot_js_object_id_ = collection_->last_assigned_id(); -} - - -HeapEntry* HeapSnapshot::AddRootEntry() { - ASSERT(root_index_ == HeapEntry::kNoEntry); - ASSERT(entries_.is_empty()); // Root entry must be the first one. - HeapEntry* entry = AddEntry(HeapEntry::kObject, - "", - HeapObjectsMap::kInternalRootObjectId, - 0); - root_index_ = entry->index(); - ASSERT(root_index_ == 0); - return entry; -} - - -HeapEntry* HeapSnapshot::AddGcRootsEntry() { - ASSERT(gc_roots_index_ == HeapEntry::kNoEntry); - HeapEntry* entry = AddEntry(HeapEntry::kObject, - "(GC roots)", - HeapObjectsMap::kGcRootsObjectId, - 0); - gc_roots_index_ = entry->index(); - return entry; -} - - -HeapEntry* HeapSnapshot::AddGcSubrootEntry(int tag) { - ASSERT(gc_subroot_indexes_[tag] == HeapEntry::kNoEntry); - ASSERT(0 <= tag && tag < VisitorSynchronization::kNumberOfSyncTags); - HeapEntry* entry = AddEntry( - HeapEntry::kObject, - VisitorSynchronization::kTagNames[tag], - HeapObjectsMap::GetNthGcSubrootId(tag), - 0); - gc_subroot_indexes_[tag] = entry->index(); - return entry; -} - - -HeapEntry* HeapSnapshot::AddEntry(HeapEntry::Type type, - const char* name, - SnapshotObjectId id, - int size) { - HeapEntry entry(this, type, name, id, size); - entries_.Add(entry); - return &entries_.last(); -} - - -void HeapSnapshot::FillChildren() { - ASSERT(children().is_empty()); - children().Allocate(edges().length()); - int children_index = 0; - for (int i = 0; i < entries().length(); ++i) { - HeapEntry* entry = &entries()[i]; - children_index = entry->set_children_index(children_index); - } - ASSERT(edges().length() == children_index); - for (int i = 0; i < edges().length(); ++i) { - HeapGraphEdge* edge = &edges()[i]; - edge->ReplaceToIndexWithEntry(this); - edge->from()->add_child(edge); - } -} - - -class FindEntryById { - public: - explicit FindEntryById(SnapshotObjectId id) : id_(id) { } - int operator()(HeapEntry* const* entry) { - if ((*entry)->id() == id_) return 0; - return (*entry)->id() < id_ ? -1 : 1; - } - private: - SnapshotObjectId id_; -}; - - -HeapEntry* HeapSnapshot::GetEntryById(SnapshotObjectId id) { - List<HeapEntry*>* entries_by_id = GetSortedEntriesList(); - // Perform a binary search by id. - int index = SortedListBSearch(*entries_by_id, FindEntryById(id)); - if (index == -1) - return NULL; - return entries_by_id->at(index); -} - - -template<class T> -static int SortByIds(const T* entry1_ptr, - const T* entry2_ptr) { - if ((*entry1_ptr)->id() == (*entry2_ptr)->id()) return 0; - return (*entry1_ptr)->id() < (*entry2_ptr)->id() ? -1 : 1; -} - - -List<HeapEntry*>* HeapSnapshot::GetSortedEntriesList() { - if (sorted_entries_.is_empty()) { - sorted_entries_.Allocate(entries_.length()); - for (int i = 0; i < entries_.length(); ++i) { - sorted_entries_[i] = &entries_[i]; - } - sorted_entries_.Sort(SortByIds); - } - return &sorted_entries_; -} - - -void HeapSnapshot::Print(int max_depth) { - root()->Print("", "", max_depth, 0); -} - - -template<typename T, class P> -static size_t GetMemoryUsedByList(const List<T, P>& list) { - return list.length() * sizeof(T) + sizeof(list); -} - - -size_t HeapSnapshot::RawSnapshotSize() const { - STATIC_CHECK(SnapshotSizeConstants<kPointerSize>::kExpectedHeapSnapshotSize == - sizeof(HeapSnapshot)); // NOLINT - return - sizeof(*this) + - GetMemoryUsedByList(entries_) + - GetMemoryUsedByList(edges_) + - GetMemoryUsedByList(children_) + - GetMemoryUsedByList(sorted_entries_); -} - - -// We split IDs on evens for embedder objects (see -// HeapObjectsMap::GenerateId) and odds for native objects. -const SnapshotObjectId HeapObjectsMap::kInternalRootObjectId = 1; -const SnapshotObjectId HeapObjectsMap::kGcRootsObjectId = - HeapObjectsMap::kInternalRootObjectId + HeapObjectsMap::kObjectIdStep; -const SnapshotObjectId HeapObjectsMap::kGcRootsFirstSubrootId = - HeapObjectsMap::kGcRootsObjectId + HeapObjectsMap::kObjectIdStep; -const SnapshotObjectId HeapObjectsMap::kFirstAvailableObjectId = - HeapObjectsMap::kGcRootsFirstSubrootId + - VisitorSynchronization::kNumberOfSyncTags * HeapObjectsMap::kObjectIdStep; - -HeapObjectsMap::HeapObjectsMap() - : next_id_(kFirstAvailableObjectId), - entries_map_(AddressesMatch) { - // This dummy element solves a problem with entries_map_. - // When we do lookup in HashMap we see no difference between two cases: - // it has an entry with NULL as the value or it has created - // a new entry on the fly with NULL as the default value. - // With such dummy element we have a guaranty that all entries_map_ entries - // will have the value field grater than 0. - // This fact is using in MoveObject method. - entries_.Add(EntryInfo(0, NULL, 0)); -} - - -void HeapObjectsMap::SnapshotGenerationFinished() { - RemoveDeadEntries(); -} - - -void HeapObjectsMap::MoveObject(Address from, Address to) { - ASSERT(to != NULL); - ASSERT(from != NULL); - if (from == to) return; - void* from_value = entries_map_.Remove(from, AddressHash(from)); - if (from_value == NULL) return; - int from_entry_info_index = - static_cast<int>(reinterpret_cast<intptr_t>(from_value)); - entries_.at(from_entry_info_index).addr = to; - HashMap::Entry* to_entry = entries_map_.Lookup(to, AddressHash(to), true); - if (to_entry->value != NULL) { - int to_entry_info_index = - static_cast<int>(reinterpret_cast<intptr_t>(to_entry->value)); - // Without this operation we will have two EntryInfo's with the same - // value in addr field. It is bad because later at RemoveDeadEntries - // one of this entry will be removed with the corresponding entries_map_ - // entry. - entries_.at(to_entry_info_index).addr = NULL; - } - to_entry->value = reinterpret_cast<void*>(from_entry_info_index); -} - - -SnapshotObjectId HeapObjectsMap::FindEntry(Address addr) { - HashMap::Entry* entry = entries_map_.Lookup(addr, AddressHash(addr), false); - if (entry == NULL) return 0; - int entry_index = static_cast<int>(reinterpret_cast<intptr_t>(entry->value)); - EntryInfo& entry_info = entries_.at(entry_index); - ASSERT(static_cast<uint32_t>(entries_.length()) > entries_map_.occupancy()); - return entry_info.id; -} - - -SnapshotObjectId HeapObjectsMap::FindOrAddEntry(Address addr, - unsigned int size) { - ASSERT(static_cast<uint32_t>(entries_.length()) > entries_map_.occupancy()); - HashMap::Entry* entry = entries_map_.Lookup(addr, AddressHash(addr), true); - if (entry->value != NULL) { - int entry_index = - static_cast<int>(reinterpret_cast<intptr_t>(entry->value)); - EntryInfo& entry_info = entries_.at(entry_index); - entry_info.accessed = true; - entry_info.size = size; - return entry_info.id; - } - entry->value = reinterpret_cast<void*>(entries_.length()); - SnapshotObjectId id = next_id_; - next_id_ += kObjectIdStep; - entries_.Add(EntryInfo(id, addr, size)); - ASSERT(static_cast<uint32_t>(entries_.length()) > entries_map_.occupancy()); - return id; -} - - -void HeapObjectsMap::StopHeapObjectsTracking() { - time_intervals_.Clear(); -} - -void HeapObjectsMap::UpdateHeapObjectsMap() { - HEAP->CollectAllGarbage(Heap::kMakeHeapIterableMask, - "HeapSnapshotsCollection::UpdateHeapObjectsMap"); - HeapIterator iterator; - for (HeapObject* obj = iterator.next(); - obj != NULL; - obj = iterator.next()) { - FindOrAddEntry(obj->address(), obj->Size()); - } - RemoveDeadEntries(); -} - - -SnapshotObjectId HeapObjectsMap::PushHeapObjectsStats(OutputStream* stream) { - UpdateHeapObjectsMap(); - time_intervals_.Add(TimeInterval(next_id_)); - int prefered_chunk_size = stream->GetChunkSize(); - List<v8::HeapStatsUpdate> stats_buffer; - ASSERT(!entries_.is_empty()); - EntryInfo* entry_info = &entries_.first(); - EntryInfo* end_entry_info = &entries_.last() + 1; - for (int time_interval_index = 0; - time_interval_index < time_intervals_.length(); - ++time_interval_index) { - TimeInterval& time_interval = time_intervals_[time_interval_index]; - SnapshotObjectId time_interval_id = time_interval.id; - uint32_t entries_size = 0; - EntryInfo* start_entry_info = entry_info; - while (entry_info < end_entry_info && entry_info->id < time_interval_id) { - entries_size += entry_info->size; - ++entry_info; - } - uint32_t entries_count = - static_cast<uint32_t>(entry_info - start_entry_info); - if (time_interval.count != entries_count || - time_interval.size != entries_size) { - stats_buffer.Add(v8::HeapStatsUpdate( - time_interval_index, - time_interval.count = entries_count, - time_interval.size = entries_size)); - if (stats_buffer.length() >= prefered_chunk_size) { - OutputStream::WriteResult result = stream->WriteHeapStatsChunk( - &stats_buffer.first(), stats_buffer.length()); - if (result == OutputStream::kAbort) return last_assigned_id(); - stats_buffer.Clear(); - } - } - } - ASSERT(entry_info == end_entry_info); - if (!stats_buffer.is_empty()) { - OutputStream::WriteResult result = stream->WriteHeapStatsChunk( - &stats_buffer.first(), stats_buffer.length()); - if (result == OutputStream::kAbort) return last_assigned_id(); - } - stream->EndOfStream(); - return last_assigned_id(); -} - - -void HeapObjectsMap::RemoveDeadEntries() { - ASSERT(entries_.length() > 0 && - entries_.at(0).id == 0 && - entries_.at(0).addr == NULL); - int first_free_entry = 1; - for (int i = 1; i < entries_.length(); ++i) { - EntryInfo& entry_info = entries_.at(i); - if (entry_info.accessed) { - if (first_free_entry != i) { - entries_.at(first_free_entry) = entry_info; - } - entries_.at(first_free_entry).accessed = false; - HashMap::Entry* entry = entries_map_.Lookup( - entry_info.addr, AddressHash(entry_info.addr), false); - ASSERT(entry); - entry->value = reinterpret_cast<void*>(first_free_entry); - ++first_free_entry; - } else { - if (entry_info.addr) { - entries_map_.Remove(entry_info.addr, AddressHash(entry_info.addr)); - } - } - } - entries_.Rewind(first_free_entry); - ASSERT(static_cast<uint32_t>(entries_.length()) - 1 == - entries_map_.occupancy()); -} - - -SnapshotObjectId HeapObjectsMap::GenerateId(v8::RetainedObjectInfo* info) { - SnapshotObjectId id = static_cast<SnapshotObjectId>(info->GetHash()); - const char* label = info->GetLabel(); - id ^= HashSequentialString(label, - static_cast<int>(strlen(label)), - HEAP->HashSeed()); - intptr_t element_count = info->GetElementCount(); - if (element_count != -1) - id ^= ComputeIntegerHash(static_cast<uint32_t>(element_count), - v8::internal::kZeroHashSeed); - return id << 1; -} - - -size_t HeapObjectsMap::GetUsedMemorySize() const { - return - sizeof(*this) + - sizeof(HashMap::Entry) * entries_map_.capacity() + - GetMemoryUsedByList(entries_) + - GetMemoryUsedByList(time_intervals_); -} - - -HeapSnapshotsCollection::HeapSnapshotsCollection() - : is_tracking_objects_(false), - snapshots_uids_(HeapSnapshotsMatch), - token_enumerator_(new TokenEnumerator()) { -} - - -static void DeleteHeapSnapshot(HeapSnapshot** snapshot_ptr) { - delete *snapshot_ptr; -} - - -HeapSnapshotsCollection::~HeapSnapshotsCollection() { - delete token_enumerator_; - snapshots_.Iterate(DeleteHeapSnapshot); -} - - -HeapSnapshot* HeapSnapshotsCollection::NewSnapshot(HeapSnapshot::Type type, - const char* name, - unsigned uid) { - is_tracking_objects_ = true; // Start watching for heap objects moves. - return new HeapSnapshot(this, type, name, uid); -} - - -void HeapSnapshotsCollection::SnapshotGenerationFinished( - HeapSnapshot* snapshot) { - ids_.SnapshotGenerationFinished(); - if (snapshot != NULL) { - snapshots_.Add(snapshot); - HashMap::Entry* entry = - snapshots_uids_.Lookup(reinterpret_cast<void*>(snapshot->uid()), - static_cast<uint32_t>(snapshot->uid()), - true); - ASSERT(entry->value == NULL); - entry->value = snapshot; - } -} - - -HeapSnapshot* HeapSnapshotsCollection::GetSnapshot(unsigned uid) { - HashMap::Entry* entry = snapshots_uids_.Lookup(reinterpret_cast<void*>(uid), - static_cast<uint32_t>(uid), - false); - return entry != NULL ? reinterpret_cast<HeapSnapshot*>(entry->value) : NULL; -} - - -void HeapSnapshotsCollection::RemoveSnapshot(HeapSnapshot* snapshot) { - snapshots_.RemoveElement(snapshot); - unsigned uid = snapshot->uid(); - snapshots_uids_.Remove(reinterpret_cast<void*>(uid), - static_cast<uint32_t>(uid)); -} - - -Handle<HeapObject> HeapSnapshotsCollection::FindHeapObjectById( - SnapshotObjectId id) { - // First perform a full GC in order to avoid dead objects. - HEAP->CollectAllGarbage(Heap::kMakeHeapIterableMask, - "HeapSnapshotsCollection::FindHeapObjectById"); - AssertNoAllocation no_allocation; - HeapObject* object = NULL; - HeapIterator iterator(HeapIterator::kFilterUnreachable); - // Make sure that object with the given id is still reachable. - for (HeapObject* obj = iterator.next(); - obj != NULL; - obj = iterator.next()) { - if (ids_.FindEntry(obj->address()) == id) { - ASSERT(object == NULL); - object = obj; - // Can't break -- kFilterUnreachable requires full heap traversal. - } - } - return object != NULL ? Handle<HeapObject>(object) : Handle<HeapObject>(); -} - - -size_t HeapSnapshotsCollection::GetUsedMemorySize() const { - STATIC_CHECK(SnapshotSizeConstants<kPointerSize>:: - kExpectedHeapSnapshotsCollectionSize == - sizeof(HeapSnapshotsCollection)); // NOLINT - size_t size = sizeof(*this); - size += names_.GetUsedMemorySize(); - size += ids_.GetUsedMemorySize(); - size += sizeof(HashMap::Entry) * snapshots_uids_.capacity(); - size += GetMemoryUsedByList(snapshots_); - for (int i = 0; i < snapshots_.length(); ++i) { - size += snapshots_[i]->RawSnapshotSize(); - } - return size; -} - - -HeapEntriesMap::HeapEntriesMap() - : entries_(HeapThingsMatch) { -} - - -int HeapEntriesMap::Map(HeapThing thing) { - HashMap::Entry* cache_entry = entries_.Lookup(thing, Hash(thing), false); - if (cache_entry == NULL) return HeapEntry::kNoEntry; - return static_cast<int>(reinterpret_cast<intptr_t>(cache_entry->value)); -} - - -void HeapEntriesMap::Pair(HeapThing thing, int entry) { - HashMap::Entry* cache_entry = entries_.Lookup(thing, Hash(thing), true); - ASSERT(cache_entry->value == NULL); - cache_entry->value = reinterpret_cast<void*>(static_cast<intptr_t>(entry)); -} - - -HeapObjectsSet::HeapObjectsSet() - : entries_(HeapEntriesMap::HeapThingsMatch) { -} - - -void HeapObjectsSet::Clear() { - entries_.Clear(); -} - - -bool HeapObjectsSet::Contains(Object* obj) { - if (!obj->IsHeapObject()) return false; - HeapObject* object = HeapObject::cast(obj); - return entries_.Lookup(object, HeapEntriesMap::Hash(object), false) != NULL; -} - - -void HeapObjectsSet::Insert(Object* obj) { - if (!obj->IsHeapObject()) return; - HeapObject* object = HeapObject::cast(obj); - entries_.Lookup(object, HeapEntriesMap::Hash(object), true); -} - - -const char* HeapObjectsSet::GetTag(Object* obj) { - HeapObject* object = HeapObject::cast(obj); - HashMap::Entry* cache_entry = - entries_.Lookup(object, HeapEntriesMap::Hash(object), false); - return cache_entry != NULL - ? reinterpret_cast<const char*>(cache_entry->value) - : NULL; -} - - -void HeapObjectsSet::SetTag(Object* obj, const char* tag) { - if (!obj->IsHeapObject()) return; - HeapObject* object = HeapObject::cast(obj); - HashMap::Entry* cache_entry = - entries_.Lookup(object, HeapEntriesMap::Hash(object), true); - cache_entry->value = const_cast<char*>(tag); -} - - -HeapObject* const V8HeapExplorer::kInternalRootObject = - reinterpret_cast<HeapObject*>( - static_cast<intptr_t>(HeapObjectsMap::kInternalRootObjectId)); -HeapObject* const V8HeapExplorer::kGcRootsObject = - reinterpret_cast<HeapObject*>( - static_cast<intptr_t>(HeapObjectsMap::kGcRootsObjectId)); -HeapObject* const V8HeapExplorer::kFirstGcSubrootObject = - reinterpret_cast<HeapObject*>( - static_cast<intptr_t>(HeapObjectsMap::kGcRootsFirstSubrootId)); -HeapObject* const V8HeapExplorer::kLastGcSubrootObject = - reinterpret_cast<HeapObject*>( - static_cast<intptr_t>(HeapObjectsMap::kFirstAvailableObjectId)); - - -V8HeapExplorer::V8HeapExplorer( - HeapSnapshot* snapshot, - SnapshottingProgressReportingInterface* progress) - : heap_(Isolate::Current()->heap()), - snapshot_(snapshot), - collection_(snapshot_->collection()), - progress_(progress), - filler_(NULL) { -} - - -V8HeapExplorer::~V8HeapExplorer() { -} - - -HeapEntry* V8HeapExplorer::AllocateEntry(HeapThing ptr) { - return AddEntry(reinterpret_cast<HeapObject*>(ptr)); -} - - -HeapEntry* V8HeapExplorer::AddEntry(HeapObject* object) { - if (object == kInternalRootObject) { - snapshot_->AddRootEntry(); - return snapshot_->root(); - } else if (object == kGcRootsObject) { - HeapEntry* entry = snapshot_->AddGcRootsEntry(); - return entry; - } else if (object >= kFirstGcSubrootObject && object < kLastGcSubrootObject) { - HeapEntry* entry = snapshot_->AddGcSubrootEntry(GetGcSubrootOrder(object)); - return entry; - } else if (object->IsJSFunction()) { - JSFunction* func = JSFunction::cast(object); - SharedFunctionInfo* shared = func->shared(); - const char* name = shared->bound() ? "native_bind" : - collection_->names()->GetName(String::cast(shared->name())); - return AddEntry(object, HeapEntry::kClosure, name); - } else if (object->IsJSRegExp()) { - JSRegExp* re = JSRegExp::cast(object); - return AddEntry(object, - HeapEntry::kRegExp, - collection_->names()->GetName(re->Pattern())); - } else if (object->IsJSObject()) { - const char* name = collection_->names()->GetName( - GetConstructorName(JSObject::cast(object))); - if (object->IsJSGlobalObject()) { - const char* tag = objects_tags_.GetTag(object); - if (tag != NULL) { - name = collection_->names()->GetFormatted("%s / %s", name, tag); - } - } - return AddEntry(object, HeapEntry::kObject, name); - } else if (object->IsString()) { - return AddEntry(object, - HeapEntry::kString, - collection_->names()->GetName(String::cast(object))); - } else if (object->IsCode()) { - return AddEntry(object, HeapEntry::kCode, ""); - } else if (object->IsSharedFunctionInfo()) { - String* name = String::cast(SharedFunctionInfo::cast(object)->name()); - return AddEntry(object, - HeapEntry::kCode, - collection_->names()->GetName(name)); - } else if (object->IsScript()) { - Object* name = Script::cast(object)->name(); - return AddEntry(object, - HeapEntry::kCode, - name->IsString() - ? collection_->names()->GetName(String::cast(name)) - : ""); - } else if (object->IsNativeContext()) { - return AddEntry(object, HeapEntry::kHidden, "system / NativeContext"); - } else if (object->IsContext()) { - return AddEntry(object, HeapEntry::kHidden, "system / Context"); - } else if (object->IsFixedArray() || - object->IsFixedDoubleArray() || - object->IsByteArray() || - object->IsExternalArray()) { - return AddEntry(object, HeapEntry::kArray, ""); - } else if (object->IsHeapNumber()) { - return AddEntry(object, HeapEntry::kHeapNumber, "number"); - } - return AddEntry(object, HeapEntry::kHidden, GetSystemEntryName(object)); -} - - -HeapEntry* V8HeapExplorer::AddEntry(HeapObject* object, - HeapEntry::Type type, - const char* name) { - int object_size = object->Size(); - SnapshotObjectId object_id = - collection_->GetObjectId(object->address(), object_size); - return snapshot_->AddEntry(type, name, object_id, object_size); -} - - -class GcSubrootsEnumerator : public ObjectVisitor { - public: - GcSubrootsEnumerator( - SnapshotFillerInterface* filler, V8HeapExplorer* explorer) - : filler_(filler), - explorer_(explorer), - previous_object_count_(0), - object_count_(0) { - } - void VisitPointers(Object** start, Object** end) { - object_count_ += end - start; - } - void Synchronize(VisitorSynchronization::SyncTag tag) { - // Skip empty subroots. - if (previous_object_count_ != object_count_) { - previous_object_count_ = object_count_; - filler_->AddEntry(V8HeapExplorer::GetNthGcSubrootObject(tag), explorer_); - } - } - private: - SnapshotFillerInterface* filler_; - V8HeapExplorer* explorer_; - intptr_t previous_object_count_; - intptr_t object_count_; -}; - - -void V8HeapExplorer::AddRootEntries(SnapshotFillerInterface* filler) { - filler->AddEntry(kInternalRootObject, this); - filler->AddEntry(kGcRootsObject, this); - GcSubrootsEnumerator enumerator(filler, this); - heap_->IterateRoots(&enumerator, VISIT_ALL); -} - - -const char* V8HeapExplorer::GetSystemEntryName(HeapObject* object) { - switch (object->map()->instance_type()) { - case MAP_TYPE: return "system / Map"; - case JS_GLOBAL_PROPERTY_CELL_TYPE: return "system / JSGlobalPropertyCell"; - case FOREIGN_TYPE: return "system / Foreign"; - case ODDBALL_TYPE: return "system / Oddball"; -#define MAKE_STRUCT_CASE(NAME, Name, name) \ - case NAME##_TYPE: return "system / "#Name; - STRUCT_LIST(MAKE_STRUCT_CASE) -#undef MAKE_STRUCT_CASE - default: return "system"; - } -} - - -int V8HeapExplorer::EstimateObjectsCount(HeapIterator* iterator) { - int objects_count = 0; - for (HeapObject* obj = iterator->next(); - obj != NULL; - obj = iterator->next()) { - objects_count++; - } - return objects_count; -} - - -class IndexedReferencesExtractor : public ObjectVisitor { - public: - IndexedReferencesExtractor(V8HeapExplorer* generator, - HeapObject* parent_obj, - int parent) - : generator_(generator), - parent_obj_(parent_obj), - parent_(parent), - next_index_(1) { - } - void VisitPointers(Object** start, Object** end) { - for (Object** p = start; p < end; p++) { - if (CheckVisitedAndUnmark(p)) continue; - generator_->SetHiddenReference(parent_obj_, parent_, next_index_++, *p); - } - } - static void MarkVisitedField(HeapObject* obj, int offset) { - if (offset < 0) return; - Address field = obj->address() + offset; - ASSERT(!Memory::Object_at(field)->IsFailure()); - ASSERT(Memory::Object_at(field)->IsHeapObject()); - *field |= kFailureTag; - } - - private: - bool CheckVisitedAndUnmark(Object** field) { - if ((*field)->IsFailure()) { - intptr_t untagged = reinterpret_cast<intptr_t>(*field) & ~kFailureTagMask; - *field = reinterpret_cast<Object*>(untagged | kHeapObjectTag); - ASSERT((*field)->IsHeapObject()); - return true; - } - return false; - } - V8HeapExplorer* generator_; - HeapObject* parent_obj_; - int parent_; - int next_index_; -}; - - -void V8HeapExplorer::ExtractReferences(HeapObject* obj) { - HeapEntry* heap_entry = GetEntry(obj); - if (heap_entry == NULL) return; // No interest in this object. - int entry = heap_entry->index(); - - bool extract_indexed_refs = true; - if (obj->IsJSGlobalProxy()) { - ExtractJSGlobalProxyReferences(JSGlobalProxy::cast(obj)); - } else if (obj->IsJSObject()) { - ExtractJSObjectReferences(entry, JSObject::cast(obj)); - } else if (obj->IsString()) { - ExtractStringReferences(entry, String::cast(obj)); - extract_indexed_refs = false; - } else if (obj->IsContext()) { - ExtractContextReferences(entry, Context::cast(obj)); - } else if (obj->IsMap()) { - ExtractMapReferences(entry, Map::cast(obj)); - } else if (obj->IsSharedFunctionInfo()) { - ExtractSharedFunctionInfoReferences(entry, SharedFunctionInfo::cast(obj)); - } else if (obj->IsScript()) { - ExtractScriptReferences(entry, Script::cast(obj)); - } else if (obj->IsCodeCache()) { - ExtractCodeCacheReferences(entry, CodeCache::cast(obj)); - } else if (obj->IsCode()) { - ExtractCodeReferences(entry, Code::cast(obj)); - } else if (obj->IsJSGlobalPropertyCell()) { - ExtractJSGlobalPropertyCellReferences( - entry, JSGlobalPropertyCell::cast(obj)); - extract_indexed_refs = false; - } - if (extract_indexed_refs) { - SetInternalReference(obj, entry, "map", obj->map(), HeapObject::kMapOffset); - IndexedReferencesExtractor refs_extractor(this, obj, entry); - obj->Iterate(&refs_extractor); - } -} - - -void V8HeapExplorer::ExtractJSGlobalProxyReferences(JSGlobalProxy* proxy) { - // We need to reference JS global objects from snapshot's root. - // We use JSGlobalProxy because this is what embedder (e.g. browser) - // uses for the global object. - Object* object = proxy->map()->prototype(); - bool is_debug_object = false; -#ifdef ENABLE_DEBUGGER_SUPPORT - is_debug_object = object->IsGlobalObject() && - Isolate::Current()->debug()->IsDebugGlobal(GlobalObject::cast(object)); -#endif - if (!is_debug_object) { - SetUserGlobalReference(object); - } -} - - -void V8HeapExplorer::ExtractJSObjectReferences( - int entry, JSObject* js_obj) { - HeapObject* obj = js_obj; - ExtractClosureReferences(js_obj, entry); - ExtractPropertyReferences(js_obj, entry); - ExtractElementReferences(js_obj, entry); - ExtractInternalReferences(js_obj, entry); - SetPropertyReference( - obj, entry, heap_->Proto_symbol(), js_obj->GetPrototype()); - if (obj->IsJSFunction()) { - JSFunction* js_fun = JSFunction::cast(js_obj); - Object* proto_or_map = js_fun->prototype_or_initial_map(); - if (!proto_or_map->IsTheHole()) { - if (!proto_or_map->IsMap()) { - SetPropertyReference( - obj, entry, - heap_->prototype_symbol(), proto_or_map, - NULL, - JSFunction::kPrototypeOrInitialMapOffset); - } else { - SetPropertyReference( - obj, entry, - heap_->prototype_symbol(), js_fun->prototype()); - } - } - SharedFunctionInfo* shared_info = js_fun->shared(); - // JSFunction has either bindings or literals and never both. - bool bound = shared_info->bound(); - TagObject(js_fun->literals_or_bindings(), - bound ? "(function bindings)" : "(function literals)"); - SetInternalReference(js_fun, entry, - bound ? "bindings" : "literals", - js_fun->literals_or_bindings(), - JSFunction::kLiteralsOffset); - TagObject(shared_info, "(shared function info)"); - SetInternalReference(js_fun, entry, - "shared", shared_info, - JSFunction::kSharedFunctionInfoOffset); - TagObject(js_fun->unchecked_context(), "(context)"); - SetInternalReference(js_fun, entry, - "context", js_fun->unchecked_context(), - JSFunction::kContextOffset); - for (int i = JSFunction::kNonWeakFieldsEndOffset; - i < JSFunction::kSize; - i += kPointerSize) { - SetWeakReference(js_fun, entry, i, *HeapObject::RawField(js_fun, i), i); - } - } else if (obj->IsGlobalObject()) { - GlobalObject* global_obj = GlobalObject::cast(obj); - SetInternalReference(global_obj, entry, - "builtins", global_obj->builtins(), - GlobalObject::kBuiltinsOffset); - SetInternalReference(global_obj, entry, - "native_context", global_obj->native_context(), - GlobalObject::kNativeContextOffset); - SetInternalReference(global_obj, entry, - "global_receiver", global_obj->global_receiver(), - GlobalObject::kGlobalReceiverOffset); - } - TagObject(js_obj->properties(), "(object properties)"); - SetInternalReference(obj, entry, - "properties", js_obj->properties(), - JSObject::kPropertiesOffset); - TagObject(js_obj->elements(), "(object elements)"); - SetInternalReference(obj, entry, - "elements", js_obj->elements(), - JSObject::kElementsOffset); -} - - -void V8HeapExplorer::ExtractStringReferences(int entry, String* string) { - if (string->IsConsString()) { - ConsString* cs = ConsString::cast(string); - SetInternalReference(cs, entry, "first", cs->first()); - SetInternalReference(cs, entry, "second", cs->second()); - } else if (string->IsSlicedString()) { - SlicedString* ss = SlicedString::cast(string); - SetInternalReference(ss, entry, "parent", ss->parent()); - } -} - - -void V8HeapExplorer::ExtractContextReferences(int entry, Context* context) { -#define EXTRACT_CONTEXT_FIELD(index, type, name) \ - SetInternalReference(context, entry, #name, context->get(Context::index), \ - FixedArray::OffsetOfElementAt(Context::index)); - EXTRACT_CONTEXT_FIELD(CLOSURE_INDEX, JSFunction, closure); - EXTRACT_CONTEXT_FIELD(PREVIOUS_INDEX, Context, previous); - EXTRACT_CONTEXT_FIELD(EXTENSION_INDEX, Object, extension); - EXTRACT_CONTEXT_FIELD(GLOBAL_OBJECT_INDEX, GlobalObject, global); - if (context->IsNativeContext()) { - TagObject(context->jsfunction_result_caches(), - "(context func. result caches)"); - TagObject(context->normalized_map_cache(), "(context norm. map cache)"); - TagObject(context->runtime_context(), "(runtime context)"); - TagObject(context->data(), "(context data)"); - NATIVE_CONTEXT_FIELDS(EXTRACT_CONTEXT_FIELD); -#undef EXTRACT_CONTEXT_FIELD - for (int i = Context::FIRST_WEAK_SLOT; - i < Context::NATIVE_CONTEXT_SLOTS; - ++i) { - SetWeakReference(context, entry, i, context->get(i), - FixedArray::OffsetOfElementAt(i)); - } - } -} - - -void V8HeapExplorer::ExtractMapReferences(int entry, Map* map) { - SetInternalReference(map, entry, - "prototype", map->prototype(), Map::kPrototypeOffset); - SetInternalReference(map, entry, - "constructor", map->constructor(), - Map::kConstructorOffset); - if (map->HasTransitionArray()) { - TransitionArray* transitions = map->transitions(); - - Object* back_pointer = transitions->back_pointer_storage(); - TagObject(transitions->back_pointer_storage(), "(back pointer)"); - SetInternalReference(transitions, entry, - "backpointer", back_pointer, - TransitionArray::kBackPointerStorageOffset); - IndexedReferencesExtractor transitions_refs(this, transitions, entry); - transitions->Iterate(&transitions_refs); - - TagObject(transitions, "(transition array)"); - SetInternalReference(map, entry, - "transitions", transitions, - Map::kTransitionsOrBackPointerOffset); - } else { - Object* back_pointer = map->GetBackPointer(); - TagObject(back_pointer, "(back pointer)"); - SetInternalReference(map, entry, - "backpointer", back_pointer, - Map::kTransitionsOrBackPointerOffset); - } - DescriptorArray* descriptors = map->instance_descriptors(); - TagObject(descriptors, "(map descriptors)"); - SetInternalReference(map, entry, - "descriptors", descriptors, - Map::kDescriptorsOffset); - - SetInternalReference(map, entry, - "code_cache", map->code_cache(), - Map::kCodeCacheOffset); -} - - -void V8HeapExplorer::ExtractSharedFunctionInfoReferences( - int entry, SharedFunctionInfo* shared) { - HeapObject* obj = shared; - SetInternalReference(obj, entry, - "name", shared->name(), - SharedFunctionInfo::kNameOffset); - TagObject(shared->code(), "(code)"); - SetInternalReference(obj, entry, - "code", shared->code(), - SharedFunctionInfo::kCodeOffset); - TagObject(shared->scope_info(), "(function scope info)"); - SetInternalReference(obj, entry, - "scope_info", shared->scope_info(), - SharedFunctionInfo::kScopeInfoOffset); - SetInternalReference(obj, entry, - "instance_class_name", shared->instance_class_name(), - SharedFunctionInfo::kInstanceClassNameOffset); - SetInternalReference(obj, entry, - "script", shared->script(), - SharedFunctionInfo::kScriptOffset); - TagObject(shared->construct_stub(), "(code)"); - SetInternalReference(obj, entry, - "construct_stub", shared->construct_stub(), - SharedFunctionInfo::kConstructStubOffset); - SetInternalReference(obj, entry, - "function_data", shared->function_data(), - SharedFunctionInfo::kFunctionDataOffset); - SetInternalReference(obj, entry, - "debug_info", shared->debug_info(), - SharedFunctionInfo::kDebugInfoOffset); - SetInternalReference(obj, entry, - "inferred_name", shared->inferred_name(), - SharedFunctionInfo::kInferredNameOffset); - SetInternalReference(obj, entry, - "this_property_assignments", - shared->this_property_assignments(), - SharedFunctionInfo::kThisPropertyAssignmentsOffset); - SetWeakReference(obj, entry, - 1, shared->initial_map(), - SharedFunctionInfo::kInitialMapOffset); -} - - -void V8HeapExplorer::ExtractScriptReferences(int entry, Script* script) { - HeapObject* obj = script; - SetInternalReference(obj, entry, - "source", script->source(), - Script::kSourceOffset); - SetInternalReference(obj, entry, - "name", script->name(), - Script::kNameOffset); - SetInternalReference(obj, entry, - "data", script->data(), - Script::kDataOffset); - SetInternalReference(obj, entry, - "context_data", script->context_data(), - Script::kContextOffset); - TagObject(script->line_ends(), "(script line ends)"); - SetInternalReference(obj, entry, - "line_ends", script->line_ends(), - Script::kLineEndsOffset); -} - - -void V8HeapExplorer::ExtractCodeCacheReferences( - int entry, CodeCache* code_cache) { - TagObject(code_cache->default_cache(), "(default code cache)"); - SetInternalReference(code_cache, entry, - "default_cache", code_cache->default_cache(), - CodeCache::kDefaultCacheOffset); - TagObject(code_cache->normal_type_cache(), "(code type cache)"); - SetInternalReference(code_cache, entry, - "type_cache", code_cache->normal_type_cache(), - CodeCache::kNormalTypeCacheOffset); -} - - -void V8HeapExplorer::ExtractCodeReferences(int entry, Code* code) { - TagObject(code->relocation_info(), "(code relocation info)"); - SetInternalReference(code, entry, - "relocation_info", code->relocation_info(), - Code::kRelocationInfoOffset); - SetInternalReference(code, entry, - "handler_table", code->handler_table(), - Code::kHandlerTableOffset); - TagObject(code->deoptimization_data(), "(code deopt data)"); - SetInternalReference(code, entry, - "deoptimization_data", code->deoptimization_data(), - Code::kDeoptimizationDataOffset); - SetInternalReference(code, entry, - "type_feedback_info", code->type_feedback_info(), - Code::kTypeFeedbackInfoOffset); - SetInternalReference(code, entry, - "gc_metadata", code->gc_metadata(), - Code::kGCMetadataOffset); -} - - -void V8HeapExplorer::ExtractJSGlobalPropertyCellReferences( - int entry, JSGlobalPropertyCell* cell) { - SetInternalReference(cell, entry, "value", cell->value()); -} - - -void V8HeapExplorer::ExtractClosureReferences(JSObject* js_obj, int entry) { - if (!js_obj->IsJSFunction()) return; - - JSFunction* func = JSFunction::cast(js_obj); - if (func->shared()->bound()) { - FixedArray* bindings = func->function_bindings(); - SetNativeBindReference(js_obj, entry, "bound_this", - bindings->get(JSFunction::kBoundThisIndex)); - SetNativeBindReference(js_obj, entry, "bound_function", - bindings->get(JSFunction::kBoundFunctionIndex)); - for (int i = JSFunction::kBoundArgumentsStartIndex; - i < bindings->length(); i++) { - const char* reference_name = collection_->names()->GetFormatted( - "bound_argument_%d", - i - JSFunction::kBoundArgumentsStartIndex); - SetNativeBindReference(js_obj, entry, reference_name, - bindings->get(i)); - } - } else { - Context* context = func->context()->declaration_context(); - ScopeInfo* scope_info = context->closure()->shared()->scope_info(); - // Add context allocated locals. - int context_locals = scope_info->ContextLocalCount(); - for (int i = 0; i < context_locals; ++i) { - String* local_name = scope_info->ContextLocalName(i); - int idx = Context::MIN_CONTEXT_SLOTS + i; - SetClosureReference(js_obj, entry, local_name, context->get(idx)); - } - - // Add function variable. - if (scope_info->HasFunctionName()) { - String* name = scope_info->FunctionName(); - VariableMode mode; - int idx = scope_info->FunctionContextSlotIndex(name, &mode); - if (idx >= 0) { - SetClosureReference(js_obj, entry, name, context->get(idx)); - } - } - } -} - - -void V8HeapExplorer::ExtractPropertyReferences(JSObject* js_obj, int entry) { - if (js_obj->HasFastProperties()) { - DescriptorArray* descs = js_obj->map()->instance_descriptors(); - int real_size = js_obj->map()->NumberOfOwnDescriptors(); - for (int i = 0; i < descs->number_of_descriptors(); i++) { - if (descs->GetDetails(i).descriptor_index() > real_size) continue; - switch (descs->GetType(i)) { - case FIELD: { - int index = descs->GetFieldIndex(i); - - String* k = descs->GetKey(i); - if (index < js_obj->map()->inobject_properties()) { - Object* value = js_obj->InObjectPropertyAt(index); - if (k != heap_->hidden_symbol()) { - SetPropertyReference( - js_obj, entry, - k, value, - NULL, - js_obj->GetInObjectPropertyOffset(index)); - } else { - TagObject(value, "(hidden properties)"); - SetInternalReference( - js_obj, entry, - "hidden_properties", value, - js_obj->GetInObjectPropertyOffset(index)); - } - } else { - Object* value = js_obj->FastPropertyAt(index); - if (k != heap_->hidden_symbol()) { - SetPropertyReference(js_obj, entry, k, value); - } else { - TagObject(value, "(hidden properties)"); - SetInternalReference(js_obj, entry, "hidden_properties", value); - } - } - break; - } - case CONSTANT_FUNCTION: - SetPropertyReference( - js_obj, entry, - descs->GetKey(i), descs->GetConstantFunction(i)); - break; - case CALLBACKS: { - Object* callback_obj = descs->GetValue(i); - if (callback_obj->IsAccessorPair()) { - AccessorPair* accessors = AccessorPair::cast(callback_obj); - if (Object* getter = accessors->getter()) { - SetPropertyReference(js_obj, entry, descs->GetKey(i), - getter, "get-%s"); - } - if (Object* setter = accessors->setter()) { - SetPropertyReference(js_obj, entry, descs->GetKey(i), - setter, "set-%s"); - } - } - break; - } - case NORMAL: // only in slow mode - case HANDLER: // only in lookup results, not in descriptors - case INTERCEPTOR: // only in lookup results, not in descriptors - break; - case TRANSITION: - case NONEXISTENT: - UNREACHABLE(); - break; - } - } - } else { - StringDictionary* dictionary = js_obj->property_dictionary(); - int length = dictionary->Capacity(); - for (int i = 0; i < length; ++i) { - Object* k = dictionary->KeyAt(i); - if (dictionary->IsKey(k)) { - Object* target = dictionary->ValueAt(i); - // We assume that global objects can only have slow properties. - Object* value = target->IsJSGlobalPropertyCell() - ? JSGlobalPropertyCell::cast(target)->value() - : target; - if (k != heap_->hidden_symbol()) { - SetPropertyReference(js_obj, entry, String::cast(k), value); - } else { - TagObject(value, "(hidden properties)"); - SetInternalReference(js_obj, entry, "hidden_properties", value); - } - } - } - } -} - - -void V8HeapExplorer::ExtractElementReferences(JSObject* js_obj, int entry) { - if (js_obj->HasFastObjectElements()) { - FixedArray* elements = FixedArray::cast(js_obj->elements()); - int length = js_obj->IsJSArray() ? - Smi::cast(JSArray::cast(js_obj)->length())->value() : - elements->length(); - for (int i = 0; i < length; ++i) { - if (!elements->get(i)->IsTheHole()) { - SetElementReference(js_obj, entry, i, elements->get(i)); - } - } - } else if (js_obj->HasDictionaryElements()) { - SeededNumberDictionary* dictionary = js_obj->element_dictionary(); - int length = dictionary->Capacity(); - for (int i = 0; i < length; ++i) { - Object* k = dictionary->KeyAt(i); - if (dictionary->IsKey(k)) { - ASSERT(k->IsNumber()); - uint32_t index = static_cast<uint32_t>(k->Number()); - SetElementReference(js_obj, entry, index, dictionary->ValueAt(i)); - } - } - } -} - - -void V8HeapExplorer::ExtractInternalReferences(JSObject* js_obj, int entry) { - int length = js_obj->GetInternalFieldCount(); - for (int i = 0; i < length; ++i) { - Object* o = js_obj->GetInternalField(i); - SetInternalReference( - js_obj, entry, i, o, js_obj->GetInternalFieldOffset(i)); - } -} - - -String* V8HeapExplorer::GetConstructorName(JSObject* object) { - Heap* heap = object->GetHeap(); - if (object->IsJSFunction()) return heap->closure_symbol(); - String* constructor_name = object->constructor_name(); - if (constructor_name == heap->Object_symbol()) { - // Look up an immediate "constructor" property, if it is a function, - // return its name. This is for instances of binding objects, which - // have prototype constructor type "Object". - Object* constructor_prop = NULL; - LookupResult result(heap->isolate()); - object->LocalLookupRealNamedProperty(heap->constructor_symbol(), &result); - if (!result.IsFound()) return object->constructor_name(); - - constructor_prop = result.GetLazyValue(); - if (constructor_prop->IsJSFunction()) { - Object* maybe_name = - JSFunction::cast(constructor_prop)->shared()->name(); - if (maybe_name->IsString()) { - String* name = String::cast(maybe_name); - if (name->length() > 0) return name; - } - } - } - return object->constructor_name(); -} - - -HeapEntry* V8HeapExplorer::GetEntry(Object* obj) { - if (!obj->IsHeapObject()) return NULL; - return filler_->FindOrAddEntry(obj, this); -} - - -class RootsReferencesExtractor : public ObjectVisitor { - private: - struct IndexTag { - IndexTag(int index, VisitorSynchronization::SyncTag tag) - : index(index), tag(tag) { } - int index; - VisitorSynchronization::SyncTag tag; - }; - - public: - RootsReferencesExtractor() - : collecting_all_references_(false), - previous_reference_count_(0) { - } - - void VisitPointers(Object** start, Object** end) { - if (collecting_all_references_) { - for (Object** p = start; p < end; p++) all_references_.Add(*p); - } else { - for (Object** p = start; p < end; p++) strong_references_.Add(*p); - } - } - - void SetCollectingAllReferences() { collecting_all_references_ = true; } - - void FillReferences(V8HeapExplorer* explorer) { - ASSERT(strong_references_.length() <= all_references_.length()); - for (int i = 0; i < reference_tags_.length(); ++i) { - explorer->SetGcRootsReference(reference_tags_[i].tag); - } - int strong_index = 0, all_index = 0, tags_index = 0; - while (all_index < all_references_.length()) { - if (strong_index < strong_references_.length() && - strong_references_[strong_index] == all_references_[all_index]) { - explorer->SetGcSubrootReference(reference_tags_[tags_index].tag, - false, - all_references_[all_index++]); - ++strong_index; - } else { - explorer->SetGcSubrootReference(reference_tags_[tags_index].tag, - true, - all_references_[all_index++]); - } - if (reference_tags_[tags_index].index == all_index) ++tags_index; - } - } - - void Synchronize(VisitorSynchronization::SyncTag tag) { - if (collecting_all_references_ && - previous_reference_count_ != all_references_.length()) { - previous_reference_count_ = all_references_.length(); - reference_tags_.Add(IndexTag(previous_reference_count_, tag)); - } - } - - private: - bool collecting_all_references_; - List<Object*> strong_references_; - List<Object*> all_references_; - int previous_reference_count_; - List<IndexTag> reference_tags_; -}; - - -bool V8HeapExplorer::IterateAndExtractReferences( - SnapshotFillerInterface* filler) { - HeapIterator iterator(HeapIterator::kFilterUnreachable); - - filler_ = filler; - bool interrupted = false; - - // Heap iteration with filtering must be finished in any case. - for (HeapObject* obj = iterator.next(); - obj != NULL; - obj = iterator.next(), progress_->ProgressStep()) { - if (!interrupted) { - ExtractReferences(obj); - if (!progress_->ProgressReport(false)) interrupted = true; - } - } - if (interrupted) { - filler_ = NULL; - return false; - } - - SetRootGcRootsReference(); - RootsReferencesExtractor extractor; - heap_->IterateRoots(&extractor, VISIT_ONLY_STRONG); - extractor.SetCollectingAllReferences(); - heap_->IterateRoots(&extractor, VISIT_ALL); - extractor.FillReferences(this); - filler_ = NULL; - return progress_->ProgressReport(true); -} - - -bool V8HeapExplorer::IsEssentialObject(Object* object) { - // We have to use raw_unchecked_* versions because checked versions - // would fail during iteration over object properties. - return object->IsHeapObject() - && !object->IsOddball() - && object != heap_->raw_unchecked_empty_byte_array() - && object != heap_->raw_unchecked_empty_fixed_array() - && object != heap_->raw_unchecked_empty_descriptor_array() - && object != heap_->raw_unchecked_fixed_array_map() - && object != heap_->raw_unchecked_global_property_cell_map() - && object != heap_->raw_unchecked_shared_function_info_map() - && object != heap_->raw_unchecked_free_space_map() - && object != heap_->raw_unchecked_one_pointer_filler_map() - && object != heap_->raw_unchecked_two_pointer_filler_map(); -} - - -void V8HeapExplorer::SetClosureReference(HeapObject* parent_obj, - int parent_entry, - String* reference_name, - Object* child_obj) { - HeapEntry* child_entry = GetEntry(child_obj); - if (child_entry != NULL) { - filler_->SetNamedReference(HeapGraphEdge::kContextVariable, - parent_entry, - collection_->names()->GetName(reference_name), - child_entry); - } -} - - -void V8HeapExplorer::SetNativeBindReference(HeapObject* parent_obj, - int parent_entry, - const char* reference_name, - Object* child_obj) { - HeapEntry* child_entry = GetEntry(child_obj); - if (child_entry != NULL) { - filler_->SetNamedReference(HeapGraphEdge::kShortcut, - parent_entry, - reference_name, - child_entry); - } -} - - -void V8HeapExplorer::SetElementReference(HeapObject* parent_obj, - int parent_entry, - int index, - Object* child_obj) { - HeapEntry* child_entry = GetEntry(child_obj); - if (child_entry != NULL) { - filler_->SetIndexedReference(HeapGraphEdge::kElement, - parent_entry, - index, - child_entry); - } -} - - -void V8HeapExplorer::SetInternalReference(HeapObject* parent_obj, - int parent_entry, - const char* reference_name, - Object* child_obj, - int field_offset) { - HeapEntry* child_entry = GetEntry(child_obj); - if (child_entry == NULL) return; - if (IsEssentialObject(child_obj)) { - filler_->SetNamedReference(HeapGraphEdge::kInternal, - parent_entry, - reference_name, - child_entry); - } - IndexedReferencesExtractor::MarkVisitedField(parent_obj, field_offset); -} - - -void V8HeapExplorer::SetInternalReference(HeapObject* parent_obj, - int parent_entry, - int index, - Object* child_obj, - int field_offset) { - HeapEntry* child_entry = GetEntry(child_obj); - if (child_entry == NULL) return; - if (IsEssentialObject(child_obj)) { - filler_->SetNamedReference(HeapGraphEdge::kInternal, - parent_entry, - collection_->names()->GetName(index), - child_entry); - } - IndexedReferencesExtractor::MarkVisitedField(parent_obj, field_offset); -} - - -void V8HeapExplorer::SetHiddenReference(HeapObject* parent_obj, - int parent_entry, - int index, - Object* child_obj) { - HeapEntry* child_entry = GetEntry(child_obj); - if (child_entry != NULL && IsEssentialObject(child_obj)) { - filler_->SetIndexedReference(HeapGraphEdge::kHidden, - parent_entry, - index, - child_entry); - } -} - - -void V8HeapExplorer::SetWeakReference(HeapObject* parent_obj, - int parent_entry, - int index, - Object* child_obj, - int field_offset) { - HeapEntry* child_entry = GetEntry(child_obj); - if (child_entry != NULL) { - filler_->SetIndexedReference(HeapGraphEdge::kWeak, - parent_entry, - index, - child_entry); - IndexedReferencesExtractor::MarkVisitedField(parent_obj, field_offset); - } -} - - -void V8HeapExplorer::SetPropertyReference(HeapObject* parent_obj, - int parent_entry, - String* reference_name, - Object* child_obj, - const char* name_format_string, - int field_offset) { - HeapEntry* child_entry = GetEntry(child_obj); - if (child_entry != NULL) { - HeapGraphEdge::Type type = reference_name->length() > 0 ? - HeapGraphEdge::kProperty : HeapGraphEdge::kInternal; - const char* name = name_format_string != NULL ? - collection_->names()->GetFormatted( - name_format_string, - *reference_name->ToCString(DISALLOW_NULLS, - ROBUST_STRING_TRAVERSAL)) : - collection_->names()->GetName(reference_name); - - filler_->SetNamedReference(type, - parent_entry, - name, - child_entry); - IndexedReferencesExtractor::MarkVisitedField(parent_obj, field_offset); - } -} - - -void V8HeapExplorer::SetRootGcRootsReference() { - filler_->SetIndexedAutoIndexReference( - HeapGraphEdge::kElement, - snapshot_->root()->index(), - snapshot_->gc_roots()); -} - - -void V8HeapExplorer::SetUserGlobalReference(Object* child_obj) { - HeapEntry* child_entry = GetEntry(child_obj); - ASSERT(child_entry != NULL); - filler_->SetNamedAutoIndexReference( - HeapGraphEdge::kShortcut, - snapshot_->root()->index(), - child_entry); -} - - -void V8HeapExplorer::SetGcRootsReference(VisitorSynchronization::SyncTag tag) { - filler_->SetIndexedAutoIndexReference( - HeapGraphEdge::kElement, - snapshot_->gc_roots()->index(), - snapshot_->gc_subroot(tag)); -} - - -void V8HeapExplorer::SetGcSubrootReference( - VisitorSynchronization::SyncTag tag, bool is_weak, Object* child_obj) { - HeapEntry* child_entry = GetEntry(child_obj); - if (child_entry != NULL) { - const char* name = GetStrongGcSubrootName(child_obj); - if (name != NULL) { - filler_->SetNamedReference( - HeapGraphEdge::kInternal, - snapshot_->gc_subroot(tag)->index(), - name, - child_entry); - } else { - filler_->SetIndexedAutoIndexReference( - is_weak ? HeapGraphEdge::kWeak : HeapGraphEdge::kElement, - snapshot_->gc_subroot(tag)->index(), - child_entry); - } - } -} - - -const char* V8HeapExplorer::GetStrongGcSubrootName(Object* object) { - if (strong_gc_subroot_names_.is_empty()) { -#define NAME_ENTRY(name) strong_gc_subroot_names_.SetTag(heap_->name(), #name); -#define ROOT_NAME(type, name, camel_name) NAME_ENTRY(name) - STRONG_ROOT_LIST(ROOT_NAME) -#undef ROOT_NAME -#define STRUCT_MAP_NAME(NAME, Name, name) NAME_ENTRY(name##_map) - STRUCT_LIST(STRUCT_MAP_NAME) -#undef STRUCT_MAP_NAME -#define SYMBOL_NAME(name, str) NAME_ENTRY(name) - SYMBOL_LIST(SYMBOL_NAME) -#undef SYMBOL_NAME -#undef NAME_ENTRY - CHECK(!strong_gc_subroot_names_.is_empty()); - } - return strong_gc_subroot_names_.GetTag(object); -} - - -void V8HeapExplorer::TagObject(Object* obj, const char* tag) { - if (IsEssentialObject(obj)) { - HeapEntry* entry = GetEntry(obj); - if (entry->name()[0] == '\0') { - entry->set_name(tag); - } - } -} - - -class GlobalObjectsEnumerator : public ObjectVisitor { - public: - virtual void VisitPointers(Object** start, Object** end) { - for (Object** p = start; p < end; p++) { - if ((*p)->IsNativeContext()) { - Context* context = Context::cast(*p); - JSObject* proxy = context->global_proxy(); - if (proxy->IsJSGlobalProxy()) { - Object* global = proxy->map()->prototype(); - if (global->IsJSGlobalObject()) { - objects_.Add(Handle<JSGlobalObject>(JSGlobalObject::cast(global))); - } - } - } - } - } - int count() { return objects_.length(); } - Handle<JSGlobalObject>& at(int i) { return objects_[i]; } - - private: - List<Handle<JSGlobalObject> > objects_; -}; - - -// Modifies heap. Must not be run during heap traversal. -void V8HeapExplorer::TagGlobalObjects() { - HandleScope scope; - Isolate* isolate = Isolate::Current(); - GlobalObjectsEnumerator enumerator; - isolate->global_handles()->IterateAllRoots(&enumerator); - Handle<String> document_string = - isolate->factory()->NewStringFromAscii(CStrVector("document")); - Handle<String> url_string = - isolate->factory()->NewStringFromAscii(CStrVector("URL")); - const char** urls = NewArray<const char*>(enumerator.count()); - for (int i = 0, l = enumerator.count(); i < l; ++i) { - urls[i] = NULL; - HandleScope scope; - Handle<JSGlobalObject> global_obj = enumerator.at(i); - Object* obj_document; - if (global_obj->GetProperty(*document_string)->ToObject(&obj_document) && - obj_document->IsJSObject()) { - // FixMe: Workaround: SharedWorker's current Isolate has NULL context. - // As result GetProperty(*url_string) will crash. - if (!Isolate::Current()->context() && obj_document->IsJSGlobalProxy()) - continue; - JSObject* document = JSObject::cast(obj_document); - Object* obj_url; - if (document->GetProperty(*url_string)->ToObject(&obj_url) && - obj_url->IsString()) { - urls[i] = collection_->names()->GetName(String::cast(obj_url)); - } - } - } - - AssertNoAllocation no_allocation; - for (int i = 0, l = enumerator.count(); i < l; ++i) { - objects_tags_.SetTag(*enumerator.at(i), urls[i]); - } - - DeleteArray(urls); -} - - -class GlobalHandlesExtractor : public ObjectVisitor { - public: - explicit GlobalHandlesExtractor(NativeObjectsExplorer* explorer) - : explorer_(explorer) {} - virtual ~GlobalHandlesExtractor() {} - virtual void VisitPointers(Object** start, Object** end) { - UNREACHABLE(); - } - virtual void VisitEmbedderReference(Object** p, uint16_t class_id) { - explorer_->VisitSubtreeWrapper(p, class_id); - } - private: - NativeObjectsExplorer* explorer_; -}; - - -class BasicHeapEntriesAllocator : public HeapEntriesAllocator { - public: - BasicHeapEntriesAllocator( - HeapSnapshot* snapshot, - HeapEntry::Type entries_type) - : snapshot_(snapshot), - collection_(snapshot_->collection()), - entries_type_(entries_type) { - } - virtual HeapEntry* AllocateEntry(HeapThing ptr); - private: - HeapSnapshot* snapshot_; - HeapSnapshotsCollection* collection_; - HeapEntry::Type entries_type_; -}; - - -HeapEntry* BasicHeapEntriesAllocator::AllocateEntry(HeapThing ptr) { - v8::RetainedObjectInfo* info = reinterpret_cast<v8::RetainedObjectInfo*>(ptr); - intptr_t elements = info->GetElementCount(); - intptr_t size = info->GetSizeInBytes(); - const char* name = elements != -1 - ? collection_->names()->GetFormatted( - "%s / %" V8_PTR_PREFIX "d entries", info->GetLabel(), elements) - : collection_->names()->GetCopy(info->GetLabel()); - return snapshot_->AddEntry( - entries_type_, - name, - HeapObjectsMap::GenerateId(info), - size != -1 ? static_cast<int>(size) : 0); -} - - -NativeObjectsExplorer::NativeObjectsExplorer( - HeapSnapshot* snapshot, SnapshottingProgressReportingInterface* progress) - : snapshot_(snapshot), - collection_(snapshot_->collection()), - progress_(progress), - embedder_queried_(false), - objects_by_info_(RetainedInfosMatch), - native_groups_(StringsMatch), - filler_(NULL) { - synthetic_entries_allocator_ = - new BasicHeapEntriesAllocator(snapshot, HeapEntry::kSynthetic); - native_entries_allocator_ = - new BasicHeapEntriesAllocator(snapshot, HeapEntry::kNative); -} - - -NativeObjectsExplorer::~NativeObjectsExplorer() { - for (HashMap::Entry* p = objects_by_info_.Start(); - p != NULL; - p = objects_by_info_.Next(p)) { - v8::RetainedObjectInfo* info = - reinterpret_cast<v8::RetainedObjectInfo*>(p->key); - info->Dispose(); - List<HeapObject*>* objects = - reinterpret_cast<List<HeapObject*>* >(p->value); - delete objects; - } - for (HashMap::Entry* p = native_groups_.Start(); - p != NULL; - p = native_groups_.Next(p)) { - v8::RetainedObjectInfo* info = - reinterpret_cast<v8::RetainedObjectInfo*>(p->value); - info->Dispose(); - } - delete synthetic_entries_allocator_; - delete native_entries_allocator_; -} - - -int NativeObjectsExplorer::EstimateObjectsCount() { - FillRetainedObjects(); - return objects_by_info_.occupancy(); -} - - -void NativeObjectsExplorer::FillRetainedObjects() { - if (embedder_queried_) return; - Isolate* isolate = Isolate::Current(); - // Record objects that are joined into ObjectGroups. - isolate->heap()->CallGlobalGCPrologueCallback(); - List<ObjectGroup*>* groups = isolate->global_handles()->object_groups(); - for (int i = 0; i < groups->length(); ++i) { - ObjectGroup* group = groups->at(i); - if (group->info_ == NULL) continue; - List<HeapObject*>* list = GetListMaybeDisposeInfo(group->info_); - for (size_t j = 0; j < group->length_; ++j) { - HeapObject* obj = HeapObject::cast(*group->objects_[j]); - list->Add(obj); - in_groups_.Insert(obj); - } - group->info_ = NULL; // Acquire info object ownership. - } - isolate->global_handles()->RemoveObjectGroups(); - isolate->heap()->CallGlobalGCEpilogueCallback(); - // Record objects that are not in ObjectGroups, but have class ID. - GlobalHandlesExtractor extractor(this); - isolate->global_handles()->IterateAllRootsWithClassIds(&extractor); - embedder_queried_ = true; -} - -void NativeObjectsExplorer::FillImplicitReferences() { - Isolate* isolate = Isolate::Current(); - List<ImplicitRefGroup*>* groups = - isolate->global_handles()->implicit_ref_groups(); - for (int i = 0; i < groups->length(); ++i) { - ImplicitRefGroup* group = groups->at(i); - HeapObject* parent = *group->parent_; - int parent_entry = - filler_->FindOrAddEntry(parent, native_entries_allocator_)->index(); - ASSERT(parent_entry != HeapEntry::kNoEntry); - Object*** children = group->children_; - for (size_t j = 0; j < group->length_; ++j) { - Object* child = *children[j]; - HeapEntry* child_entry = - filler_->FindOrAddEntry(child, native_entries_allocator_); - filler_->SetNamedReference( - HeapGraphEdge::kInternal, - parent_entry, - "native", - child_entry); - } - } -} - -List<HeapObject*>* NativeObjectsExplorer::GetListMaybeDisposeInfo( - v8::RetainedObjectInfo* info) { - HashMap::Entry* entry = - objects_by_info_.Lookup(info, InfoHash(info), true); - if (entry->value != NULL) { - info->Dispose(); - } else { - entry->value = new List<HeapObject*>(4); - } - return reinterpret_cast<List<HeapObject*>* >(entry->value); -} - - -bool NativeObjectsExplorer::IterateAndExtractReferences( - SnapshotFillerInterface* filler) { - filler_ = filler; - FillRetainedObjects(); - FillImplicitReferences(); - if (EstimateObjectsCount() > 0) { - for (HashMap::Entry* p = objects_by_info_.Start(); - p != NULL; - p = objects_by_info_.Next(p)) { - v8::RetainedObjectInfo* info = - reinterpret_cast<v8::RetainedObjectInfo*>(p->key); - SetNativeRootReference(info); - List<HeapObject*>* objects = - reinterpret_cast<List<HeapObject*>* >(p->value); - for (int i = 0; i < objects->length(); ++i) { - SetWrapperNativeReferences(objects->at(i), info); - } - } - SetRootNativeRootsReference(); - } - filler_ = NULL; - return true; -} - - -class NativeGroupRetainedObjectInfo : public v8::RetainedObjectInfo { - public: - explicit NativeGroupRetainedObjectInfo(const char* label) - : disposed_(false), - hash_(reinterpret_cast<intptr_t>(label)), - label_(label) { - } - - virtual ~NativeGroupRetainedObjectInfo() {} - virtual void Dispose() { - CHECK(!disposed_); - disposed_ = true; - delete this; - } - virtual bool IsEquivalent(RetainedObjectInfo* other) { - return hash_ == other->GetHash() && !strcmp(label_, other->GetLabel()); - } - virtual intptr_t GetHash() { return hash_; } - virtual const char* GetLabel() { return label_; } - - private: - bool disposed_; - intptr_t hash_; - const char* label_; -}; - - -NativeGroupRetainedObjectInfo* NativeObjectsExplorer::FindOrAddGroupInfo( - const char* label) { - const char* label_copy = collection_->names()->GetCopy(label); - uint32_t hash = HashSequentialString(label_copy, - static_cast<int>(strlen(label_copy)), - HEAP->HashSeed()); - HashMap::Entry* entry = native_groups_.Lookup(const_cast<char*>(label_copy), - hash, true); - if (entry->value == NULL) { - entry->value = new NativeGroupRetainedObjectInfo(label); - } - return static_cast<NativeGroupRetainedObjectInfo*>(entry->value); -} - - -void NativeObjectsExplorer::SetNativeRootReference( - v8::RetainedObjectInfo* info) { - HeapEntry* child_entry = - filler_->FindOrAddEntry(info, native_entries_allocator_); - ASSERT(child_entry != NULL); - NativeGroupRetainedObjectInfo* group_info = - FindOrAddGroupInfo(info->GetGroupLabel()); - HeapEntry* group_entry = - filler_->FindOrAddEntry(group_info, synthetic_entries_allocator_); - filler_->SetNamedAutoIndexReference( - HeapGraphEdge::kInternal, - group_entry->index(), - child_entry); -} - - -void NativeObjectsExplorer::SetWrapperNativeReferences( - HeapObject* wrapper, v8::RetainedObjectInfo* info) { - HeapEntry* wrapper_entry = filler_->FindEntry(wrapper); - ASSERT(wrapper_entry != NULL); - HeapEntry* info_entry = - filler_->FindOrAddEntry(info, native_entries_allocator_); - ASSERT(info_entry != NULL); - filler_->SetNamedReference(HeapGraphEdge::kInternal, - wrapper_entry->index(), - "native", - info_entry); - filler_->SetIndexedAutoIndexReference(HeapGraphEdge::kElement, - info_entry->index(), - wrapper_entry); -} - - -void NativeObjectsExplorer::SetRootNativeRootsReference() { - for (HashMap::Entry* entry = native_groups_.Start(); - entry; - entry = native_groups_.Next(entry)) { - NativeGroupRetainedObjectInfo* group_info = - static_cast<NativeGroupRetainedObjectInfo*>(entry->value); - HeapEntry* group_entry = - filler_->FindOrAddEntry(group_info, native_entries_allocator_); - ASSERT(group_entry != NULL); - filler_->SetIndexedAutoIndexReference( - HeapGraphEdge::kElement, - snapshot_->root()->index(), - group_entry); - } -} - - -void NativeObjectsExplorer::VisitSubtreeWrapper(Object** p, uint16_t class_id) { - if (in_groups_.Contains(*p)) return; - Isolate* isolate = Isolate::Current(); - v8::RetainedObjectInfo* info = - isolate->heap_profiler()->ExecuteWrapperClassCallback(class_id, p); - if (info == NULL) return; - GetListMaybeDisposeInfo(info)->Add(HeapObject::cast(*p)); -} - - -class SnapshotFiller : public SnapshotFillerInterface { - public: - explicit SnapshotFiller(HeapSnapshot* snapshot, HeapEntriesMap* entries) - : snapshot_(snapshot), - collection_(snapshot->collection()), - entries_(entries) { } - HeapEntry* AddEntry(HeapThing ptr, HeapEntriesAllocator* allocator) { - HeapEntry* entry = allocator->AllocateEntry(ptr); - entries_->Pair(ptr, entry->index()); - return entry; - } - HeapEntry* FindEntry(HeapThing ptr) { - int index = entries_->Map(ptr); - return index != HeapEntry::kNoEntry ? &snapshot_->entries()[index] : NULL; - } - HeapEntry* FindOrAddEntry(HeapThing ptr, HeapEntriesAllocator* allocator) { - HeapEntry* entry = FindEntry(ptr); - return entry != NULL ? entry : AddEntry(ptr, allocator); - } - void SetIndexedReference(HeapGraphEdge::Type type, - int parent, - int index, - HeapEntry* child_entry) { - HeapEntry* parent_entry = &snapshot_->entries()[parent]; - parent_entry->SetIndexedReference(type, index, child_entry); - } - void SetIndexedAutoIndexReference(HeapGraphEdge::Type type, - int parent, - HeapEntry* child_entry) { - HeapEntry* parent_entry = &snapshot_->entries()[parent]; - int index = parent_entry->children_count() + 1; - parent_entry->SetIndexedReference(type, index, child_entry); - } - void SetNamedReference(HeapGraphEdge::Type type, - int parent, - const char* reference_name, - HeapEntry* child_entry) { - HeapEntry* parent_entry = &snapshot_->entries()[parent]; - parent_entry->SetNamedReference(type, reference_name, child_entry); - } - void SetNamedAutoIndexReference(HeapGraphEdge::Type type, - int parent, - HeapEntry* child_entry) { - HeapEntry* parent_entry = &snapshot_->entries()[parent]; - int index = parent_entry->children_count() + 1; - parent_entry->SetNamedReference( - type, - collection_->names()->GetName(index), - child_entry); - } - - private: - HeapSnapshot* snapshot_; - HeapSnapshotsCollection* collection_; - HeapEntriesMap* entries_; -}; - - -HeapSnapshotGenerator::HeapSnapshotGenerator(HeapSnapshot* snapshot, - v8::ActivityControl* control) - : snapshot_(snapshot), - control_(control), - v8_heap_explorer_(snapshot_, this), - dom_explorer_(snapshot_, this) { -} - - -bool HeapSnapshotGenerator::GenerateSnapshot() { - v8_heap_explorer_.TagGlobalObjects(); - - // TODO(1562) Profiler assumes that any object that is in the heap after - // full GC is reachable from the root when computing dominators. - // This is not true for weakly reachable objects. - // As a temporary solution we call GC twice. - Isolate::Current()->heap()->CollectAllGarbage( - Heap::kMakeHeapIterableMask, - "HeapSnapshotGenerator::GenerateSnapshot"); - Isolate::Current()->heap()->CollectAllGarbage( - Heap::kMakeHeapIterableMask, - "HeapSnapshotGenerator::GenerateSnapshot"); - -#ifdef VERIFY_HEAP - Heap* debug_heap = Isolate::Current()->heap(); - CHECK(!debug_heap->old_data_space()->was_swept_conservatively()); - CHECK(!debug_heap->old_pointer_space()->was_swept_conservatively()); - CHECK(!debug_heap->code_space()->was_swept_conservatively()); - CHECK(!debug_heap->cell_space()->was_swept_conservatively()); - CHECK(!debug_heap->map_space()->was_swept_conservatively()); -#endif - - // The following code uses heap iterators, so we want the heap to be - // stable. It should follow TagGlobalObjects as that can allocate. - AssertNoAllocation no_alloc; - -#ifdef VERIFY_HEAP - debug_heap->Verify(); -#endif - - SetProgressTotal(1); // 1 pass. - -#ifdef VERIFY_HEAP - debug_heap->Verify(); -#endif - - if (!FillReferences()) return false; - - snapshot_->FillChildren(); - snapshot_->RememberLastJSObjectId(); - - progress_counter_ = progress_total_; - if (!ProgressReport(true)) return false; - return true; -} - - -void HeapSnapshotGenerator::ProgressStep() { - ++progress_counter_; -} - - -bool HeapSnapshotGenerator::ProgressReport(bool force) { - const int kProgressReportGranularity = 10000; - if (control_ != NULL - && (force || progress_counter_ % kProgressReportGranularity == 0)) { - return - control_->ReportProgressValue(progress_counter_, progress_total_) == - v8::ActivityControl::kContinue; - } - return true; -} - - -void HeapSnapshotGenerator::SetProgressTotal(int iterations_count) { - if (control_ == NULL) return; - HeapIterator iterator(HeapIterator::kFilterUnreachable); - progress_total_ = iterations_count * ( - v8_heap_explorer_.EstimateObjectsCount(&iterator) + - dom_explorer_.EstimateObjectsCount()); - progress_counter_ = 0; -} - - -bool HeapSnapshotGenerator::FillReferences() { - SnapshotFiller filler(snapshot_, &entries_); - v8_heap_explorer_.AddRootEntries(&filler); - return v8_heap_explorer_.IterateAndExtractReferences(&filler) - && dom_explorer_.IterateAndExtractReferences(&filler); -} - - -template<int bytes> struct MaxDecimalDigitsIn; -template<> struct MaxDecimalDigitsIn<4> { - static const int kSigned = 11; - static const int kUnsigned = 10; -}; -template<> struct MaxDecimalDigitsIn<8> { - static const int kSigned = 20; - static const int kUnsigned = 20; -}; - - -class OutputStreamWriter { - public: - explicit OutputStreamWriter(v8::OutputStream* stream) - : stream_(stream), - chunk_size_(stream->GetChunkSize()), - chunk_(chunk_size_), - chunk_pos_(0), - aborted_(false) { - ASSERT(chunk_size_ > 0); - } - bool aborted() { return aborted_; } - void AddCharacter(char c) { - ASSERT(c != '\0'); - ASSERT(chunk_pos_ < chunk_size_); - chunk_[chunk_pos_++] = c; - MaybeWriteChunk(); - } - void AddString(const char* s) { - AddSubstring(s, StrLength(s)); - } - void AddSubstring(const char* s, int n) { - if (n <= 0) return; - ASSERT(static_cast<size_t>(n) <= strlen(s)); - const char* s_end = s + n; - while (s < s_end) { - int s_chunk_size = Min( - chunk_size_ - chunk_pos_, static_cast<int>(s_end - s)); - ASSERT(s_chunk_size > 0); - memcpy(chunk_.start() + chunk_pos_, s, s_chunk_size); - s += s_chunk_size; - chunk_pos_ += s_chunk_size; - MaybeWriteChunk(); - } - } - void AddNumber(unsigned n) { AddNumberImpl<unsigned>(n, "%u"); } - void Finalize() { - if (aborted_) return; - ASSERT(chunk_pos_ < chunk_size_); - if (chunk_pos_ != 0) { - WriteChunk(); - } - stream_->EndOfStream(); - } - - private: - template<typename T> - void AddNumberImpl(T n, const char* format) { - // Buffer for the longest value plus trailing \0 - static const int kMaxNumberSize = - MaxDecimalDigitsIn<sizeof(T)>::kUnsigned + 1; - if (chunk_size_ - chunk_pos_ >= kMaxNumberSize) { - int result = OS::SNPrintF( - chunk_.SubVector(chunk_pos_, chunk_size_), format, n); - ASSERT(result != -1); - chunk_pos_ += result; - MaybeWriteChunk(); - } else { - EmbeddedVector<char, kMaxNumberSize> buffer; - int result = OS::SNPrintF(buffer, format, n); - USE(result); - ASSERT(result != -1); - AddString(buffer.start()); - } - } - void MaybeWriteChunk() { - ASSERT(chunk_pos_ <= chunk_size_); - if (chunk_pos_ == chunk_size_) { - WriteChunk(); - } - } - void WriteChunk() { - if (aborted_) return; - if (stream_->WriteAsciiChunk(chunk_.start(), chunk_pos_) == - v8::OutputStream::kAbort) aborted_ = true; - chunk_pos_ = 0; - } - - v8::OutputStream* stream_; - int chunk_size_; - ScopedVector<char> chunk_; - int chunk_pos_; - bool aborted_; -}; - - -// type, name|index, to_node. -const int HeapSnapshotJSONSerializer::kEdgeFieldsCount = 3; -// type, name, id, self_size, children_index. -const int HeapSnapshotJSONSerializer::kNodeFieldsCount = 5; - -void HeapSnapshotJSONSerializer::Serialize(v8::OutputStream* stream) { - ASSERT(writer_ == NULL); - writer_ = new OutputStreamWriter(stream); - - HeapSnapshot* original_snapshot = NULL; - if (snapshot_->RawSnapshotSize() >= - SnapshotSizeConstants<kPointerSize>::kMaxSerializableSnapshotRawSize) { - // The snapshot is too big. Serialize a fake snapshot. - original_snapshot = snapshot_; - snapshot_ = CreateFakeSnapshot(); - } - - SerializeImpl(); - - delete writer_; - writer_ = NULL; - - if (original_snapshot != NULL) { - delete snapshot_; - snapshot_ = original_snapshot; - } -} - - -HeapSnapshot* HeapSnapshotJSONSerializer::CreateFakeSnapshot() { - HeapSnapshot* result = new HeapSnapshot(snapshot_->collection(), - HeapSnapshot::kFull, - snapshot_->title(), - snapshot_->uid()); - result->AddRootEntry(); - const char* text = snapshot_->collection()->names()->GetFormatted( - "The snapshot is too big. " - "Maximum snapshot size is %" V8_PTR_PREFIX "u MB. " - "Actual snapshot size is %" V8_PTR_PREFIX "u MB.", - SnapshotSizeConstants<kPointerSize>::kMaxSerializableSnapshotRawSize / MB, - (snapshot_->RawSnapshotSize() + MB - 1) / MB); - HeapEntry* message = result->AddEntry(HeapEntry::kString, text, 0, 4); - result->root()->SetIndexedReference(HeapGraphEdge::kElement, 1, message); - result->FillChildren(); - return result; -} - - -void HeapSnapshotJSONSerializer::SerializeImpl() { - ASSERT(0 == snapshot_->root()->index()); - writer_->AddCharacter('{'); - writer_->AddString("\"snapshot\":{"); - SerializeSnapshot(); - if (writer_->aborted()) return; - writer_->AddString("},\n"); - writer_->AddString("\"nodes\":["); - SerializeNodes(); - if (writer_->aborted()) return; - writer_->AddString("],\n"); - writer_->AddString("\"edges\":["); - SerializeEdges(); - if (writer_->aborted()) return; - writer_->AddString("],\n"); - writer_->AddString("\"strings\":["); - SerializeStrings(); - if (writer_->aborted()) return; - writer_->AddCharacter(']'); - writer_->AddCharacter('}'); - writer_->Finalize(); -} - - -int HeapSnapshotJSONSerializer::GetStringId(const char* s) { - HashMap::Entry* cache_entry = strings_.Lookup( - const_cast<char*>(s), ObjectHash(s), true); - if (cache_entry->value == NULL) { - cache_entry->value = reinterpret_cast<void*>(next_string_id_++); - } - return static_cast<int>(reinterpret_cast<intptr_t>(cache_entry->value)); -} - - -static int utoa(unsigned value, const Vector<char>& buffer, int buffer_pos) { - int number_of_digits = 0; - unsigned t = value; - do { - ++number_of_digits; - } while (t /= 10); - - buffer_pos += number_of_digits; - int result = buffer_pos; - do { - int last_digit = value % 10; - buffer[--buffer_pos] = '0' + last_digit; - value /= 10; - } while (value); - return result; -} - - -void HeapSnapshotJSONSerializer::SerializeEdge(HeapGraphEdge* edge, - bool first_edge) { - // The buffer needs space for 3 unsigned ints, 3 commas, \n and \0 - static const int kBufferSize = - MaxDecimalDigitsIn<sizeof(unsigned)>::kUnsigned * 3 + 3 + 2; // NOLINT - EmbeddedVector<char, kBufferSize> buffer; - int edge_name_or_index = edge->type() == HeapGraphEdge::kElement - || edge->type() == HeapGraphEdge::kHidden - || edge->type() == HeapGraphEdge::kWeak - ? edge->index() : GetStringId(edge->name()); - int buffer_pos = 0; - if (!first_edge) { - buffer[buffer_pos++] = ','; - } - buffer_pos = utoa(edge->type(), buffer, buffer_pos); - buffer[buffer_pos++] = ','; - buffer_pos = utoa(edge_name_or_index, buffer, buffer_pos); - buffer[buffer_pos++] = ','; - buffer_pos = utoa(entry_index(edge->to()), buffer, buffer_pos); - buffer[buffer_pos++] = '\n'; - buffer[buffer_pos++] = '\0'; - writer_->AddString(buffer.start()); -} - - -void HeapSnapshotJSONSerializer::SerializeEdges() { - List<HeapGraphEdge*>& edges = snapshot_->children(); - for (int i = 0; i < edges.length(); ++i) { - ASSERT(i == 0 || - edges[i - 1]->from()->index() <= edges[i]->from()->index()); - SerializeEdge(edges[i], i == 0); - if (writer_->aborted()) return; - } -} - - -void HeapSnapshotJSONSerializer::SerializeNode(HeapEntry* entry) { - // The buffer needs space for 5 unsigned ints, 5 commas, \n and \0 - static const int kBufferSize = - 5 * MaxDecimalDigitsIn<sizeof(unsigned)>::kUnsigned // NOLINT - + 5 + 1 + 1; - EmbeddedVector<char, kBufferSize> buffer; - int buffer_pos = 0; - if (entry_index(entry) != 0) { - buffer[buffer_pos++] = ','; - } - buffer_pos = utoa(entry->type(), buffer, buffer_pos); - buffer[buffer_pos++] = ','; - buffer_pos = utoa(GetStringId(entry->name()), buffer, buffer_pos); - buffer[buffer_pos++] = ','; - buffer_pos = utoa(entry->id(), buffer, buffer_pos); - buffer[buffer_pos++] = ','; - buffer_pos = utoa(entry->self_size(), buffer, buffer_pos); - buffer[buffer_pos++] = ','; - buffer_pos = utoa(entry->children_count(), buffer, buffer_pos); - buffer[buffer_pos++] = '\n'; - buffer[buffer_pos++] = '\0'; - writer_->AddString(buffer.start()); -} - - -void HeapSnapshotJSONSerializer::SerializeNodes() { - List<HeapEntry>& entries = snapshot_->entries(); - for (int i = 0; i < entries.length(); ++i) { - SerializeNode(&entries[i]); - if (writer_->aborted()) return; - } -} - - -void HeapSnapshotJSONSerializer::SerializeSnapshot() { - writer_->AddString("\"title\":\""); - writer_->AddString(snapshot_->title()); - writer_->AddString("\""); - writer_->AddString(",\"uid\":"); - writer_->AddNumber(snapshot_->uid()); - writer_->AddString(",\"meta\":"); - // The object describing node serialization layout. - // We use a set of macros to improve readability. -#define JSON_A(s) "[" s "]" -#define JSON_O(s) "{" s "}" -#define JSON_S(s) "\"" s "\"" - writer_->AddString(JSON_O( - JSON_S("node_fields") ":" JSON_A( - JSON_S("type") "," - JSON_S("name") "," - JSON_S("id") "," - JSON_S("self_size") "," - JSON_S("edge_count")) "," - JSON_S("node_types") ":" JSON_A( - JSON_A( - JSON_S("hidden") "," - JSON_S("array") "," - JSON_S("string") "," - JSON_S("object") "," - JSON_S("code") "," - JSON_S("closure") "," - JSON_S("regexp") "," - JSON_S("number") "," - JSON_S("native") "," - JSON_S("synthetic")) "," - JSON_S("string") "," - JSON_S("number") "," - JSON_S("number") "," - JSON_S("number") "," - JSON_S("number") "," - JSON_S("number")) "," - JSON_S("edge_fields") ":" JSON_A( - JSON_S("type") "," - JSON_S("name_or_index") "," - JSON_S("to_node")) "," - JSON_S("edge_types") ":" JSON_A( - JSON_A( - JSON_S("context") "," - JSON_S("element") "," - JSON_S("property") "," - JSON_S("internal") "," - JSON_S("hidden") "," - JSON_S("shortcut") "," - JSON_S("weak")) "," - JSON_S("string_or_number") "," - JSON_S("node")))); -#undef JSON_S -#undef JSON_O -#undef JSON_A - writer_->AddString(",\"node_count\":"); - writer_->AddNumber(snapshot_->entries().length()); - writer_->AddString(",\"edge_count\":"); - writer_->AddNumber(snapshot_->edges().length()); -} - - -static void WriteUChar(OutputStreamWriter* w, unibrow::uchar u) { - static const char hex_chars[] = "0123456789ABCDEF"; - w->AddString("\\u"); - w->AddCharacter(hex_chars[(u >> 12) & 0xf]); - w->AddCharacter(hex_chars[(u >> 8) & 0xf]); - w->AddCharacter(hex_chars[(u >> 4) & 0xf]); - w->AddCharacter(hex_chars[u & 0xf]); -} - -void HeapSnapshotJSONSerializer::SerializeString(const unsigned char* s) { - writer_->AddCharacter('\n'); - writer_->AddCharacter('\"'); - for ( ; *s != '\0'; ++s) { - switch (*s) { - case '\b': - writer_->AddString("\\b"); - continue; - case '\f': - writer_->AddString("\\f"); - continue; - case '\n': - writer_->AddString("\\n"); - continue; - case '\r': - writer_->AddString("\\r"); - continue; - case '\t': - writer_->AddString("\\t"); - continue; - case '\"': - case '\\': - writer_->AddCharacter('\\'); - writer_->AddCharacter(*s); - continue; - default: - if (*s > 31 && *s < 128) { - writer_->AddCharacter(*s); - } else if (*s <= 31) { - // Special character with no dedicated literal. - WriteUChar(writer_, *s); - } else { - // Convert UTF-8 into \u UTF-16 literal. - unsigned length = 1, cursor = 0; - for ( ; length <= 4 && *(s + length) != '\0'; ++length) { } - unibrow::uchar c = unibrow::Utf8::CalculateValue(s, length, &cursor); - if (c != unibrow::Utf8::kBadChar) { - WriteUChar(writer_, c); - ASSERT(cursor != 0); - s += cursor - 1; - } else { - writer_->AddCharacter('?'); - } - } - } - } - writer_->AddCharacter('\"'); -} - - -void HeapSnapshotJSONSerializer::SerializeStrings() { - List<HashMap::Entry*> sorted_strings; - SortHashMap(&strings_, &sorted_strings); - writer_->AddString("\"<dummy>\""); - for (int i = 0; i < sorted_strings.length(); ++i) { - writer_->AddCharacter(','); - SerializeString( - reinterpret_cast<const unsigned char*>(sorted_strings[i]->key)); - if (writer_->aborted()) return; - } -} - - -template<typename T> -inline static int SortUsingEntryValue(const T* x, const T* y) { - uintptr_t x_uint = reinterpret_cast<uintptr_t>((*x)->value); - uintptr_t y_uint = reinterpret_cast<uintptr_t>((*y)->value); - if (x_uint > y_uint) { - return 1; - } else if (x_uint == y_uint) { - return 0; - } else { - return -1; - } -} - - -void HeapSnapshotJSONSerializer::SortHashMap( - HashMap* map, List<HashMap::Entry*>* sorted_entries) { - for (HashMap::Entry* p = map->Start(); p != NULL; p = map->Next(p)) - sorted_entries->Add(p); - sorted_entries->Sort(SortUsingEntryValue); -} - } } // namespace v8::internal diff --git a/deps/v8/src/profile-generator.h b/deps/v8/src/profile-generator.h index 04f4a1c71d..4ddb75337e 100644 --- a/deps/v8/src/profile-generator.h +++ b/deps/v8/src/profile-generator.h @@ -45,7 +45,8 @@ class TokenEnumerator { static const int kInheritsSecurityToken = -2; private: - static void TokenRemovedCallback(v8::Persistent<v8::Value> handle, + static void TokenRemovedCallback(v8::Isolate* isolate, + v8::Persistent<v8::Value> handle, void* parameter); void TokenRemoved(Object** token_location); @@ -68,9 +69,9 @@ class StringsStorage { const char* GetCopy(const char* src); const char* GetFormatted(const char* format, ...); const char* GetVFormatted(const char* format, va_list args); - const char* GetName(String* name); + const char* GetName(Name* name); const char* GetName(int index); - inline const char* GetFunctionName(String* name); + inline const char* GetFunctionName(Name* name); inline const char* GetFunctionName(const char* name); size_t GetUsedMemorySize() const; @@ -149,6 +150,7 @@ class ProfileNode { INLINE(const List<ProfileNode*>* children() const) { return &children_list_; } double GetSelfMillis() const; double GetTotalMillis() const; + unsigned id() const { return id_; } void Print(int indent); @@ -169,6 +171,7 @@ class ProfileNode { // Mapping from CodeEntry* to ProfileNode* HashMap children_; List<ProfileNode*> children_list_; + unsigned id_; DISALLOW_COPY_AND_ASSIGN(ProfileNode); }; @@ -179,7 +182,7 @@ class ProfileTree { ProfileTree(); ~ProfileTree(); - void AddPathFromEnd(const Vector<CodeEntry*>& path); + ProfileNode* AddPathFromEnd(const Vector<CodeEntry*>& path); void AddPathFromStart(const Vector<CodeEntry*>& path); void CalculateTotalTicks(); void FilteredClone(ProfileTree* src, int security_token_id); @@ -190,6 +193,8 @@ class ProfileTree { ProfileNode* root() const { return root_; } void SetTickRatePerMs(double ticks_per_ms); + unsigned next_node_id() { return next_node_id_++; } + void ShortPrint(); void Print() { root_->Print(0); @@ -200,6 +205,7 @@ class ProfileTree { void TraverseDepthFirst(Callback* callback); CodeEntry root_entry_; + unsigned next_node_id_; ProfileNode* root_; double ms_to_ticks_scale_; @@ -209,8 +215,8 @@ class ProfileTree { class CpuProfile { public: - CpuProfile(const char* title, unsigned uid) - : title_(title), uid_(uid) { } + CpuProfile(const char* title, unsigned uid, bool record_samples) + : title_(title), uid_(uid), record_samples_(record_samples) { } // Add pc -> ... -> main() call path to the profile. void AddPath(const Vector<CodeEntry*>& path); @@ -221,7 +227,9 @@ class CpuProfile { INLINE(const char* title() const) { return title_; } INLINE(unsigned uid() const) { return uid_; } INLINE(const ProfileTree* top_down() const) { return &top_down_; } - INLINE(const ProfileTree* bottom_up() const) { return &bottom_up_; } + + INLINE(int samples_count() const) { return samples_.length(); } + INLINE(ProfileNode* sample(int index) const) { return samples_.at(index); } void UpdateTicksScale(); @@ -231,8 +239,9 @@ class CpuProfile { private: const char* title_; unsigned uid_; + bool record_samples_; + List<ProfileNode*> samples_; ProfileTree top_down_; - ProfileTree bottom_up_; DISALLOW_COPY_AND_ASSIGN(CpuProfile); }; @@ -289,13 +298,12 @@ class CpuProfilesCollection { CpuProfilesCollection(); ~CpuProfilesCollection(); - bool StartProfiling(const char* title, unsigned uid); - bool StartProfiling(String* title, unsigned uid); + bool StartProfiling(const char* title, unsigned uid, bool record_samples); CpuProfile* StopProfiling(int security_token_id, const char* title, double actual_sampling_rate); List<CpuProfile*>* Profiles(int security_token_id); - const char* GetName(String* name) { + const char* GetName(Name* name) { return function_and_resource_names_.GetName(name); } const char* GetName(int args_count) { @@ -307,10 +315,10 @@ class CpuProfilesCollection { bool HasDetachedProfiles() { return detached_profiles_.length() > 0; } CodeEntry* NewCodeEntry(Logger::LogEventsAndTags tag, - String* name, String* resource_name, int line_number); + Name* name, String* resource_name, int line_number); CodeEntry* NewCodeEntry(Logger::LogEventsAndTags tag, const char* name); CodeEntry* NewCodeEntry(Logger::LogEventsAndTags tag, - const char* name_prefix, String* name); + const char* name_prefix, Name* name); CodeEntry* NewCodeEntry(Logger::LogEventsAndTags tag, int args_count); CodeEntry* NewCodeEntry(int security_token_id); @@ -321,7 +329,7 @@ class CpuProfilesCollection { static const int kMaxSimultaneousProfiles = 100; private: - const char* GetFunctionName(String* name) { + const char* GetFunctionName(Name* name) { return function_and_resource_names_.GetFunctionName(name); } const char* GetFunctionName(const char* name) { @@ -394,7 +402,7 @@ class ProfileGenerator { explicit ProfileGenerator(CpuProfilesCollection* profiles); INLINE(CodeEntry* NewCodeEntry(Logger::LogEventsAndTags tag, - String* name, + Name* name, String* resource_name, int line_number)) { return profiles_->NewCodeEntry(tag, name, resource_name, line_number); @@ -407,7 +415,7 @@ class ProfileGenerator { INLINE(CodeEntry* NewCodeEntry(Logger::LogEventsAndTags tag, const char* name_prefix, - String* name)) { + Name* name)) { return profiles_->NewCodeEntry(tag, name_prefix, name); } @@ -446,655 +454,6 @@ class ProfileGenerator { }; -class HeapEntry; -class HeapSnapshot; - -class HeapGraphEdge BASE_EMBEDDED { - public: - enum Type { - kContextVariable = v8::HeapGraphEdge::kContextVariable, - kElement = v8::HeapGraphEdge::kElement, - kProperty = v8::HeapGraphEdge::kProperty, - kInternal = v8::HeapGraphEdge::kInternal, - kHidden = v8::HeapGraphEdge::kHidden, - kShortcut = v8::HeapGraphEdge::kShortcut, - kWeak = v8::HeapGraphEdge::kWeak - }; - - HeapGraphEdge() { } - HeapGraphEdge(Type type, const char* name, int from, int to); - HeapGraphEdge(Type type, int index, int from, int to); - void ReplaceToIndexWithEntry(HeapSnapshot* snapshot); - - Type type() const { return static_cast<Type>(type_); } - int index() const { - ASSERT(type_ == kElement || type_ == kHidden || type_ == kWeak); - return index_; - } - const char* name() const { - ASSERT(type_ == kContextVariable - || type_ == kProperty - || type_ == kInternal - || type_ == kShortcut); - return name_; - } - INLINE(HeapEntry* from() const); - HeapEntry* to() const { return to_entry_; } - - private: - INLINE(HeapSnapshot* snapshot() const); - - unsigned type_ : 3; - int from_index_ : 29; - union { - // During entries population |to_index_| is used for storing the index, - // afterwards it is replaced with a pointer to the entry. - int to_index_; - HeapEntry* to_entry_; - }; - union { - int index_; - const char* name_; - }; -}; - - -// HeapEntry instances represent an entity from the heap (or a special -// virtual node, e.g. root). -class HeapEntry BASE_EMBEDDED { - public: - enum Type { - kHidden = v8::HeapGraphNode::kHidden, - kArray = v8::HeapGraphNode::kArray, - kString = v8::HeapGraphNode::kString, - kObject = v8::HeapGraphNode::kObject, - kCode = v8::HeapGraphNode::kCode, - kClosure = v8::HeapGraphNode::kClosure, - kRegExp = v8::HeapGraphNode::kRegExp, - kHeapNumber = v8::HeapGraphNode::kHeapNumber, - kNative = v8::HeapGraphNode::kNative, - kSynthetic = v8::HeapGraphNode::kSynthetic - }; - static const int kNoEntry; - - HeapEntry() { } - HeapEntry(HeapSnapshot* snapshot, - Type type, - const char* name, - SnapshotObjectId id, - int self_size); - - HeapSnapshot* snapshot() { return snapshot_; } - Type type() { return static_cast<Type>(type_); } - const char* name() { return name_; } - void set_name(const char* name) { name_ = name; } - inline SnapshotObjectId id() { return id_; } - int self_size() { return self_size_; } - INLINE(int index() const); - int children_count() const { return children_count_; } - INLINE(int set_children_index(int index)); - void add_child(HeapGraphEdge* edge) { - children_arr()[children_count_++] = edge; - } - Vector<HeapGraphEdge*> children() { - return Vector<HeapGraphEdge*>(children_arr(), children_count_); } - - void SetIndexedReference( - HeapGraphEdge::Type type, int index, HeapEntry* entry); - void SetNamedReference( - HeapGraphEdge::Type type, const char* name, HeapEntry* entry); - - void Print( - const char* prefix, const char* edge_name, int max_depth, int indent); - - Handle<HeapObject> GetHeapObject(); - - private: - INLINE(HeapGraphEdge** children_arr()); - const char* TypeAsString(); - - unsigned type_: 4; - int children_count_: 28; - int children_index_; - int self_size_; - SnapshotObjectId id_; - HeapSnapshot* snapshot_; - const char* name_; -}; - - -class HeapSnapshotsCollection; - -// HeapSnapshot represents a single heap snapshot. It is stored in -// HeapSnapshotsCollection, which is also a factory for -// HeapSnapshots. All HeapSnapshots share strings copied from JS heap -// to be able to return them even if they were collected. -// HeapSnapshotGenerator fills in a HeapSnapshot. -class HeapSnapshot { - public: - enum Type { - kFull = v8::HeapSnapshot::kFull - }; - - HeapSnapshot(HeapSnapshotsCollection* collection, - Type type, - const char* title, - unsigned uid); - void Delete(); - - HeapSnapshotsCollection* collection() { return collection_; } - Type type() { return type_; } - const char* title() { return title_; } - unsigned uid() { return uid_; } - size_t RawSnapshotSize() const; - HeapEntry* root() { return &entries_[root_index_]; } - HeapEntry* gc_roots() { return &entries_[gc_roots_index_]; } - HeapEntry* natives_root() { return &entries_[natives_root_index_]; } - HeapEntry* gc_subroot(int index) { - return &entries_[gc_subroot_indexes_[index]]; - } - List<HeapEntry>& entries() { return entries_; } - List<HeapGraphEdge>& edges() { return edges_; } - List<HeapGraphEdge*>& children() { return children_; } - void RememberLastJSObjectId(); - SnapshotObjectId max_snapshot_js_object_id() const { - return max_snapshot_js_object_id_; - } - - HeapEntry* AddEntry(HeapEntry::Type type, - const char* name, - SnapshotObjectId id, - int size); - HeapEntry* AddRootEntry(); - HeapEntry* AddGcRootsEntry(); - HeapEntry* AddGcSubrootEntry(int tag); - HeapEntry* AddNativesRootEntry(); - HeapEntry* GetEntryById(SnapshotObjectId id); - List<HeapEntry*>* GetSortedEntriesList(); - void FillChildren(); - - void Print(int max_depth); - void PrintEntriesSize(); - - private: - HeapSnapshotsCollection* collection_; - Type type_; - const char* title_; - unsigned uid_; - int root_index_; - int gc_roots_index_; - int natives_root_index_; - int gc_subroot_indexes_[VisitorSynchronization::kNumberOfSyncTags]; - List<HeapEntry> entries_; - List<HeapGraphEdge> edges_; - List<HeapGraphEdge*> children_; - List<HeapEntry*> sorted_entries_; - SnapshotObjectId max_snapshot_js_object_id_; - - friend class HeapSnapshotTester; - - DISALLOW_COPY_AND_ASSIGN(HeapSnapshot); -}; - - -class HeapObjectsMap { - public: - HeapObjectsMap(); - - void SnapshotGenerationFinished(); - SnapshotObjectId FindEntry(Address addr); - SnapshotObjectId FindOrAddEntry(Address addr, unsigned int size); - void MoveObject(Address from, Address to); - SnapshotObjectId last_assigned_id() const { - return next_id_ - kObjectIdStep; - } - - void StopHeapObjectsTracking(); - SnapshotObjectId PushHeapObjectsStats(OutputStream* stream); - size_t GetUsedMemorySize() const; - - static SnapshotObjectId GenerateId(v8::RetainedObjectInfo* info); - static inline SnapshotObjectId GetNthGcSubrootId(int delta); - - static const int kObjectIdStep = 2; - static const SnapshotObjectId kInternalRootObjectId; - static const SnapshotObjectId kGcRootsObjectId; - static const SnapshotObjectId kNativesRootObjectId; - static const SnapshotObjectId kGcRootsFirstSubrootId; - static const SnapshotObjectId kFirstAvailableObjectId; - - private: - struct EntryInfo { - EntryInfo(SnapshotObjectId id, Address addr, unsigned int size) - : id(id), addr(addr), size(size), accessed(true) { } - EntryInfo(SnapshotObjectId id, Address addr, unsigned int size, bool accessed) - : id(id), addr(addr), size(size), accessed(accessed) { } - SnapshotObjectId id; - Address addr; - unsigned int size; - bool accessed; - }; - struct TimeInterval { - explicit TimeInterval(SnapshotObjectId id) : id(id), size(0), count(0) { } - SnapshotObjectId id; - uint32_t size; - uint32_t count; - }; - - void UpdateHeapObjectsMap(); - void RemoveDeadEntries(); - - static bool AddressesMatch(void* key1, void* key2) { - return key1 == key2; - } - - static uint32_t AddressHash(Address addr) { - return ComputeIntegerHash( - static_cast<uint32_t>(reinterpret_cast<uintptr_t>(addr)), - v8::internal::kZeroHashSeed); - } - - SnapshotObjectId next_id_; - HashMap entries_map_; - List<EntryInfo> entries_; - List<TimeInterval> time_intervals_; - - DISALLOW_COPY_AND_ASSIGN(HeapObjectsMap); -}; - - -class HeapSnapshotsCollection { - public: - HeapSnapshotsCollection(); - ~HeapSnapshotsCollection(); - - bool is_tracking_objects() { return is_tracking_objects_; } - SnapshotObjectId PushHeapObjectsStats(OutputStream* stream) { - return ids_.PushHeapObjectsStats(stream); - } - void StartHeapObjectsTracking() { is_tracking_objects_ = true; } - void StopHeapObjectsTracking() { ids_.StopHeapObjectsTracking(); } - - HeapSnapshot* NewSnapshot( - HeapSnapshot::Type type, const char* name, unsigned uid); - void SnapshotGenerationFinished(HeapSnapshot* snapshot); - List<HeapSnapshot*>* snapshots() { return &snapshots_; } - HeapSnapshot* GetSnapshot(unsigned uid); - void RemoveSnapshot(HeapSnapshot* snapshot); - - StringsStorage* names() { return &names_; } - TokenEnumerator* token_enumerator() { return token_enumerator_; } - - SnapshotObjectId FindObjectId(Address object_addr) { - return ids_.FindEntry(object_addr); - } - SnapshotObjectId GetObjectId(Address object_addr, int object_size) { - return ids_.FindOrAddEntry(object_addr, object_size); - } - Handle<HeapObject> FindHeapObjectById(SnapshotObjectId id); - void ObjectMoveEvent(Address from, Address to) { ids_.MoveObject(from, to); } - SnapshotObjectId last_assigned_id() const { - return ids_.last_assigned_id(); - } - size_t GetUsedMemorySize() const; - - private: - INLINE(static bool HeapSnapshotsMatch(void* key1, void* key2)) { - return key1 == key2; - } - - bool is_tracking_objects_; // Whether tracking object moves is needed. - List<HeapSnapshot*> snapshots_; - // Mapping from snapshots' uids to HeapSnapshot* pointers. - HashMap snapshots_uids_; - StringsStorage names_; - TokenEnumerator* token_enumerator_; - // Mapping from HeapObject addresses to objects' uids. - HeapObjectsMap ids_; - - DISALLOW_COPY_AND_ASSIGN(HeapSnapshotsCollection); -}; - - -// A typedef for referencing anything that can be snapshotted living -// in any kind of heap memory. -typedef void* HeapThing; - - -// An interface that creates HeapEntries by HeapThings. -class HeapEntriesAllocator { - public: - virtual ~HeapEntriesAllocator() { } - virtual HeapEntry* AllocateEntry(HeapThing ptr) = 0; -}; - - -// The HeapEntriesMap instance is used to track a mapping between -// real heap objects and their representations in heap snapshots. -class HeapEntriesMap { - public: - HeapEntriesMap(); - - int Map(HeapThing thing); - void Pair(HeapThing thing, int entry); - - private: - static uint32_t Hash(HeapThing thing) { - return ComputeIntegerHash( - static_cast<uint32_t>(reinterpret_cast<uintptr_t>(thing)), - v8::internal::kZeroHashSeed); - } - static bool HeapThingsMatch(HeapThing key1, HeapThing key2) { - return key1 == key2; - } - - HashMap entries_; - - friend class HeapObjectsSet; - - DISALLOW_COPY_AND_ASSIGN(HeapEntriesMap); -}; - - -class HeapObjectsSet { - public: - HeapObjectsSet(); - void Clear(); - bool Contains(Object* object); - void Insert(Object* obj); - const char* GetTag(Object* obj); - void SetTag(Object* obj, const char* tag); - bool is_empty() const { return entries_.occupancy() == 0; } - - private: - HashMap entries_; - - DISALLOW_COPY_AND_ASSIGN(HeapObjectsSet); -}; - - -// An interface used to populate a snapshot with nodes and edges. -class SnapshotFillerInterface { - public: - virtual ~SnapshotFillerInterface() { } - virtual HeapEntry* AddEntry(HeapThing ptr, - HeapEntriesAllocator* allocator) = 0; - virtual HeapEntry* FindEntry(HeapThing ptr) = 0; - virtual HeapEntry* FindOrAddEntry(HeapThing ptr, - HeapEntriesAllocator* allocator) = 0; - virtual void SetIndexedReference(HeapGraphEdge::Type type, - int parent_entry, - int index, - HeapEntry* child_entry) = 0; - virtual void SetIndexedAutoIndexReference(HeapGraphEdge::Type type, - int parent_entry, - HeapEntry* child_entry) = 0; - virtual void SetNamedReference(HeapGraphEdge::Type type, - int parent_entry, - const char* reference_name, - HeapEntry* child_entry) = 0; - virtual void SetNamedAutoIndexReference(HeapGraphEdge::Type type, - int parent_entry, - HeapEntry* child_entry) = 0; -}; - - -class SnapshottingProgressReportingInterface { - public: - virtual ~SnapshottingProgressReportingInterface() { } - virtual void ProgressStep() = 0; - virtual bool ProgressReport(bool force) = 0; -}; - - -// An implementation of V8 heap graph extractor. -class V8HeapExplorer : public HeapEntriesAllocator { - public: - V8HeapExplorer(HeapSnapshot* snapshot, - SnapshottingProgressReportingInterface* progress); - virtual ~V8HeapExplorer(); - virtual HeapEntry* AllocateEntry(HeapThing ptr); - void AddRootEntries(SnapshotFillerInterface* filler); - int EstimateObjectsCount(HeapIterator* iterator); - bool IterateAndExtractReferences(SnapshotFillerInterface* filler); - void TagGlobalObjects(); - - static String* GetConstructorName(JSObject* object); - - static HeapObject* const kInternalRootObject; - - private: - HeapEntry* AddEntry(HeapObject* object); - HeapEntry* AddEntry(HeapObject* object, - HeapEntry::Type type, - const char* name); - const char* GetSystemEntryName(HeapObject* object); - - void ExtractReferences(HeapObject* obj); - void ExtractJSGlobalProxyReferences(JSGlobalProxy* proxy); - void ExtractJSObjectReferences(int entry, JSObject* js_obj); - void ExtractStringReferences(int entry, String* obj); - void ExtractContextReferences(int entry, Context* context); - void ExtractMapReferences(int entry, Map* map); - void ExtractSharedFunctionInfoReferences(int entry, - SharedFunctionInfo* shared); - void ExtractScriptReferences(int entry, Script* script); - void ExtractCodeCacheReferences(int entry, CodeCache* code_cache); - void ExtractCodeReferences(int entry, Code* code); - void ExtractJSGlobalPropertyCellReferences(int entry, - JSGlobalPropertyCell* cell); - void ExtractClosureReferences(JSObject* js_obj, int entry); - void ExtractPropertyReferences(JSObject* js_obj, int entry); - void ExtractElementReferences(JSObject* js_obj, int entry); - void ExtractInternalReferences(JSObject* js_obj, int entry); - bool IsEssentialObject(Object* object); - void SetClosureReference(HeapObject* parent_obj, - int parent, - String* reference_name, - Object* child); - void SetNativeBindReference(HeapObject* parent_obj, - int parent, - const char* reference_name, - Object* child); - void SetElementReference(HeapObject* parent_obj, - int parent, - int index, - Object* child); - void SetInternalReference(HeapObject* parent_obj, - int parent, - const char* reference_name, - Object* child, - int field_offset = -1); - void SetInternalReference(HeapObject* parent_obj, - int parent, - int index, - Object* child, - int field_offset = -1); - void SetHiddenReference(HeapObject* parent_obj, - int parent, - int index, - Object* child); - void SetWeakReference(HeapObject* parent_obj, - int parent, - int index, - Object* child_obj, - int field_offset); - void SetPropertyReference(HeapObject* parent_obj, - int parent, - String* reference_name, - Object* child, - const char* name_format_string = NULL, - int field_offset = -1); - void SetUserGlobalReference(Object* user_global); - void SetRootGcRootsReference(); - void SetGcRootsReference(VisitorSynchronization::SyncTag tag); - void SetGcSubrootReference( - VisitorSynchronization::SyncTag tag, bool is_weak, Object* child); - const char* GetStrongGcSubrootName(Object* object); - void TagObject(Object* obj, const char* tag); - - HeapEntry* GetEntry(Object* obj); - - static inline HeapObject* GetNthGcSubrootObject(int delta); - static inline int GetGcSubrootOrder(HeapObject* subroot); - - Heap* heap_; - HeapSnapshot* snapshot_; - HeapSnapshotsCollection* collection_; - SnapshottingProgressReportingInterface* progress_; - SnapshotFillerInterface* filler_; - HeapObjectsSet objects_tags_; - HeapObjectsSet strong_gc_subroot_names_; - - static HeapObject* const kGcRootsObject; - static HeapObject* const kFirstGcSubrootObject; - static HeapObject* const kLastGcSubrootObject; - - friend class IndexedReferencesExtractor; - friend class GcSubrootsEnumerator; - friend class RootsReferencesExtractor; - - DISALLOW_COPY_AND_ASSIGN(V8HeapExplorer); -}; - - -class NativeGroupRetainedObjectInfo; - - -// An implementation of retained native objects extractor. -class NativeObjectsExplorer { - public: - NativeObjectsExplorer(HeapSnapshot* snapshot, - SnapshottingProgressReportingInterface* progress); - virtual ~NativeObjectsExplorer(); - void AddRootEntries(SnapshotFillerInterface* filler); - int EstimateObjectsCount(); - bool IterateAndExtractReferences(SnapshotFillerInterface* filler); - - private: - void FillRetainedObjects(); - void FillImplicitReferences(); - List<HeapObject*>* GetListMaybeDisposeInfo(v8::RetainedObjectInfo* info); - void SetNativeRootReference(v8::RetainedObjectInfo* info); - void SetRootNativeRootsReference(); - void SetWrapperNativeReferences(HeapObject* wrapper, - v8::RetainedObjectInfo* info); - void VisitSubtreeWrapper(Object** p, uint16_t class_id); - - static uint32_t InfoHash(v8::RetainedObjectInfo* info) { - return ComputeIntegerHash(static_cast<uint32_t>(info->GetHash()), - v8::internal::kZeroHashSeed); - } - static bool RetainedInfosMatch(void* key1, void* key2) { - return key1 == key2 || - (reinterpret_cast<v8::RetainedObjectInfo*>(key1))->IsEquivalent( - reinterpret_cast<v8::RetainedObjectInfo*>(key2)); - } - INLINE(static bool StringsMatch(void* key1, void* key2)) { - return strcmp(reinterpret_cast<char*>(key1), - reinterpret_cast<char*>(key2)) == 0; - } - - NativeGroupRetainedObjectInfo* FindOrAddGroupInfo(const char* label); - - HeapSnapshot* snapshot_; - HeapSnapshotsCollection* collection_; - SnapshottingProgressReportingInterface* progress_; - bool embedder_queried_; - HeapObjectsSet in_groups_; - // RetainedObjectInfo* -> List<HeapObject*>* - HashMap objects_by_info_; - HashMap native_groups_; - HeapEntriesAllocator* synthetic_entries_allocator_; - HeapEntriesAllocator* native_entries_allocator_; - // Used during references extraction. - SnapshotFillerInterface* filler_; - - static HeapThing const kNativesRootObject; - - friend class GlobalHandlesExtractor; - - DISALLOW_COPY_AND_ASSIGN(NativeObjectsExplorer); -}; - - -class HeapSnapshotGenerator : public SnapshottingProgressReportingInterface { - public: - HeapSnapshotGenerator(HeapSnapshot* snapshot, - v8::ActivityControl* control); - bool GenerateSnapshot(); - - private: - bool FillReferences(); - void ProgressStep(); - bool ProgressReport(bool force = false); - void SetProgressTotal(int iterations_count); - - HeapSnapshot* snapshot_; - v8::ActivityControl* control_; - V8HeapExplorer v8_heap_explorer_; - NativeObjectsExplorer dom_explorer_; - // Mapping from HeapThing pointers to HeapEntry* pointers. - HeapEntriesMap entries_; - // Used during snapshot generation. - int progress_counter_; - int progress_total_; - - DISALLOW_COPY_AND_ASSIGN(HeapSnapshotGenerator); -}; - -class OutputStreamWriter; - -class HeapSnapshotJSONSerializer { - public: - explicit HeapSnapshotJSONSerializer(HeapSnapshot* snapshot) - : snapshot_(snapshot), - strings_(ObjectsMatch), - next_node_id_(1), - next_string_id_(1), - writer_(NULL) { - } - void Serialize(v8::OutputStream* stream); - - private: - INLINE(static bool ObjectsMatch(void* key1, void* key2)) { - return key1 == key2; - } - - INLINE(static uint32_t ObjectHash(const void* key)) { - return ComputeIntegerHash( - static_cast<uint32_t>(reinterpret_cast<uintptr_t>(key)), - v8::internal::kZeroHashSeed); - } - - HeapSnapshot* CreateFakeSnapshot(); - int GetStringId(const char* s); - int entry_index(HeapEntry* e) { return e->index() * kNodeFieldsCount; } - void SerializeEdge(HeapGraphEdge* edge, bool first_edge); - void SerializeEdges(); - void SerializeImpl(); - void SerializeNode(HeapEntry* entry); - void SerializeNodes(); - void SerializeSnapshot(); - void SerializeString(const unsigned char* s); - void SerializeStrings(); - void SortHashMap(HashMap* map, List<HashMap::Entry*>* sorted_entries); - - static const int kEdgeFieldsCount; - static const int kNodeFieldsCount; - - HeapSnapshot* snapshot_; - HashMap strings_; - int next_node_id_; - int next_string_id_; - OutputStreamWriter* writer_; - - friend class HeapSnapshotJSONSerializerEnumerator; - friend class HeapSnapshotJSONSerializerIterator; - - DISALLOW_COPY_AND_ASSIGN(HeapSnapshotJSONSerializer); -}; - } } // namespace v8::internal #endif // V8_PROFILE_GENERATOR_H_ diff --git a/deps/v8/src/property-details.h b/deps/v8/src/property-details.h index 64e320514e..2aa6dcfa95 100644 --- a/deps/v8/src/property-details.h +++ b/deps/v8/src/property-details.h @@ -38,6 +38,12 @@ enum PropertyAttributes { READ_ONLY = v8::ReadOnly, DONT_ENUM = v8::DontEnum, DONT_DELETE = v8::DontDelete, + + SEALED = DONT_ENUM | DONT_DELETE, + FROZEN = SEALED | READ_ONLY, + + SYMBOLIC = 8, // Used to filter symbol names + DONT_SHOW = DONT_ENUM | SYMBOLIC, ABSENT = 16 // Used in runtime to indicate a property is absent. // ABSENT can never be stored in or returned from a descriptor's attributes // bitfield. It is only used as a return value meaning the attributes of diff --git a/deps/v8/src/property.cc b/deps/v8/src/property.cc index 05342eea95..cbf2fc859d 100644 --- a/deps/v8/src/property.cc +++ b/deps/v8/src/property.cc @@ -63,7 +63,7 @@ void LookupResult::Print(FILE* out) { break; case FIELD: PrintF(out, " -type = field\n"); - PrintF(out, " -index = %d", GetFieldIndex()); + PrintF(out, " -index = %d", GetFieldIndex().field_index()); PrintF(out, "\n"); break; case CALLBACKS: diff --git a/deps/v8/src/property.h b/deps/v8/src/property.h index 9eb4194b42..546967ac44 100644 --- a/deps/v8/src/property.h +++ b/deps/v8/src/property.h @@ -48,15 +48,15 @@ class Descriptor BASE_EMBEDDED { return Smi::cast(value)->value(); } - MUST_USE_RESULT MaybeObject* KeyToSymbol() { - if (!StringShape(key_).IsSymbol()) { - MaybeObject* maybe_result = HEAP->LookupSymbol(key_); + MUST_USE_RESULT MaybeObject* KeyToUniqueName() { + if (!key_->IsUniqueName()) { + MaybeObject* maybe_result = HEAP->InternalizeString(String::cast(key_)); if (!maybe_result->To(&key_)) return maybe_result; } return key_; } - String* GetKey() { return key_; } + Name* GetKey() { return key_; } Object* GetValue() { return value_; } PropertyDetails GetDetails() { return details_; } @@ -71,25 +71,25 @@ class Descriptor BASE_EMBEDDED { void SetSortedKeyIndex(int index) { details_ = details_.set_pointer(index); } private: - String* key_; + Name* key_; Object* value_; PropertyDetails details_; protected: Descriptor() : details_(Smi::FromInt(0)) {} - void Init(String* key, Object* value, PropertyDetails details) { + void Init(Name* key, Object* value, PropertyDetails details) { key_ = key; value_ = value; details_ = details; } - Descriptor(String* key, Object* value, PropertyDetails details) + Descriptor(Name* key, Object* value, PropertyDetails details) : key_(key), value_(value), details_(details) { } - Descriptor(String* key, + Descriptor(Name* key, Object* value, PropertyAttributes attributes, PropertyType type, @@ -104,7 +104,7 @@ class Descriptor BASE_EMBEDDED { class FieldDescriptor: public Descriptor { public: - FieldDescriptor(String* key, + FieldDescriptor(Name* key, int field_index, PropertyAttributes attributes, int index = 0) @@ -114,7 +114,7 @@ class FieldDescriptor: public Descriptor { class ConstantFunctionDescriptor: public Descriptor { public: - ConstantFunctionDescriptor(String* key, + ConstantFunctionDescriptor(Name* key, JSFunction* function, PropertyAttributes attributes, int index) @@ -124,7 +124,7 @@ class ConstantFunctionDescriptor: public Descriptor { class CallbacksDescriptor: public Descriptor { public: - CallbacksDescriptor(String* key, + CallbacksDescriptor(Name* key, Object* foreign, PropertyAttributes attributes, int index = 0) @@ -132,6 +132,56 @@ class CallbacksDescriptor: public Descriptor { }; +// Holds a property index value distinguishing if it is a field index or an +// index inside the object header. +class PropertyIndex { + public: + static PropertyIndex NewFieldIndex(int index) { + return PropertyIndex(index, false); + } + static PropertyIndex NewHeaderIndex(int index) { + return PropertyIndex(index, true); + } + + bool is_field_index() { return (index_ & kHeaderIndexBit) == 0; } + bool is_header_index() { return (index_ & kHeaderIndexBit) != 0; } + + int field_index() { + ASSERT(is_field_index()); + return value(); + } + int header_index() { + ASSERT(is_header_index()); + return value(); + } + + bool is_inobject(Handle<JSObject> holder) { + if (is_header_index()) return true; + return field_index() < holder->map()->inobject_properties(); + } + + int translate(Handle<JSObject> holder) { + if (is_header_index()) return header_index(); + int index = field_index() - holder->map()->inobject_properties(); + if (index >= 0) return index; + return index + holder->map()->instance_size() / kPointerSize; + } + + private: + static const int kHeaderIndexBit = 1 << 31; + static const int kIndexMask = ~kHeaderIndexBit; + + int value() { return index_ & kIndexMask; } + + PropertyIndex(int index, bool is_header_based) + : index_(index | (is_header_based ? kHeaderIndexBit : 0)) { + ASSERT(index <= kIndexMask); + } + + int index_; +}; + + class LookupResult BASE_EMBEDDED { public: explicit LookupResult(Isolate* isolate) @@ -145,10 +195,12 @@ class LookupResult BASE_EMBEDDED { } ~LookupResult() { - ASSERT(isolate_->top_lookup_result() == this); - isolate_->SetTopLookupResult(next_); + ASSERT(isolate()->top_lookup_result() == this); + isolate()->SetTopLookupResult(next_); } + Isolate* isolate() const { return isolate_; } + void DescriptorResult(JSObject* holder, PropertyDetails details, int number) { lookup_type_ = DESCRIPTOR_TYPE; holder_ = holder; @@ -163,16 +215,6 @@ class LookupResult BASE_EMBEDDED { number_ = number; } - void ConstantResult(JSObject* holder) { - lookup_type_ = CONSTANT_TYPE; - holder_ = holder; - details_ = - PropertyDetails(static_cast<PropertyAttributes>(DONT_ENUM | - DONT_DELETE), - CALLBACKS); - number_ = -1; - } - void DictionaryResult(JSObject* holder, int entry) { lookup_type_ = DICTIONARY_TYPE; holder_ = holder; @@ -272,13 +314,33 @@ class LookupResult BASE_EMBEDDED { return IsFound() && !IsTransition(); } + bool IsDataProperty() { + switch (type()) { + case FIELD: + case NORMAL: + case CONSTANT_FUNCTION: + return true; + case CALLBACKS: { + Object* callback = GetCallbackObject(); + return callback->IsAccessorInfo() || callback->IsForeign(); + } + case HANDLER: + case INTERCEPTOR: + case TRANSITION: + case NONEXISTENT: + return false; + } + UNREACHABLE(); + return false; + } + bool IsCacheable() { return cacheable_; } void DisallowCaching() { cacheable_ = false; } Object* GetLazyValue() { switch (type()) { case FIELD: - return holder()->FastPropertyAt(GetFieldIndex()); + return holder()->FastPropertyAt(GetFieldIndex().field_index()); case NORMAL: { Object* value; value = holder()->property_dictionary()->ValueAt(GetDictionaryEntry()); @@ -289,9 +351,15 @@ class LookupResult BASE_EMBEDDED { } case CONSTANT_FUNCTION: return GetConstantFunction(); - default: - return Smi::FromInt(0); + case CALLBACKS: + case HANDLER: + case INTERCEPTOR: + case TRANSITION: + case NONEXISTENT: + return isolate()->heap()->the_hole_value(); } + UNREACHABLE(); + return NULL; } Map* GetTransitionTarget() { @@ -334,10 +402,11 @@ class LookupResult BASE_EMBEDDED { return number_; } - int GetFieldIndex() { + PropertyIndex GetFieldIndex() { ASSERT(lookup_type_ == DESCRIPTOR_TYPE); ASSERT(IsField()); - return Descriptor::IndexFromValue(GetValue()); + return PropertyIndex::NewFieldIndex( + Descriptor::IndexFromValue(GetValue())); } int GetLocalFieldIndexFromMap(Map* map) { @@ -362,10 +431,7 @@ class LookupResult BASE_EMBEDDED { } Object* GetCallbackObject() { - if (lookup_type_ == CONSTANT_TYPE) { - return HEAP->prototype_accessors(); - } - ASSERT(!IsTransition()); + ASSERT(type() == CALLBACKS && !IsTransition()); return GetValue(); } @@ -401,8 +467,7 @@ class LookupResult BASE_EMBEDDED { TRANSITION_TYPE, DICTIONARY_TYPE, HANDLER_TYPE, - INTERCEPTOR_TYPE, - CONSTANT_TYPE + INTERCEPTOR_TYPE } lookup_type_; JSReceiver* holder_; diff --git a/deps/v8/src/proxy.js b/deps/v8/src/proxy.js index 4e86c8892a..b16de016a7 100644 --- a/deps/v8/src/proxy.js +++ b/deps/v8/src/proxy.js @@ -31,7 +31,7 @@ global.Proxy = new $Object(); var $Proxy = global.Proxy -$Proxy.create = function(handler, proto) { +function ProxyCreate(handler, proto) { if (!IS_SPEC_OBJECT(handler)) throw MakeTypeError("handler_non_object", ["create"]) if (IS_UNDEFINED(proto)) @@ -41,7 +41,7 @@ $Proxy.create = function(handler, proto) { return %CreateJSProxy(handler, proto) } -$Proxy.createFunction = function(handler, callTrap, constructTrap) { +function ProxyCreateFunction(handler, callTrap, constructTrap) { if (!IS_SPEC_OBJECT(handler)) throw MakeTypeError("handler_non_object", ["create"]) if (!IS_SPEC_FUNCTION(callTrap)) @@ -62,6 +62,11 @@ $Proxy.createFunction = function(handler, callTrap, constructTrap) { handler, callTrap, constructTrap, $Function.prototype) } +%CheckIsBootstrapping() +InstallFunctions($Proxy, DONT_ENUM, [ + "create", ProxyCreate, + "createFunction", ProxyCreateFunction +]) //////////////////////////////////////////////////////////////////////////////// @@ -72,8 +77,7 @@ function DerivedConstructTrap(callTrap) { return function() { var proto = this.prototype if (!IS_SPEC_OBJECT(proto)) proto = $Object.prototype - var obj = new $Object() - obj.__proto__ = proto + var obj = { __proto__: proto }; var result = %Apply(callTrap, obj, arguments, 0, %_ArgumentsLength()); return IS_SPEC_OBJECT(result) ? result : obj } @@ -158,6 +162,7 @@ function DerivedKeysTrap() { var enumerableNames = [] for (var i = 0, count = 0; i < names.length; ++i) { var name = names[i] + if (IS_SYMBOL(name)) continue var desc = this.getOwnPropertyDescriptor(TO_STRING_INLINE(name)) if (!IS_UNDEFINED(desc) && desc.enumerable) { enumerableNames[count++] = names[i] @@ -171,6 +176,7 @@ function DerivedEnumerateTrap() { var enumerableNames = [] for (var i = 0, count = 0; i < names.length; ++i) { var name = names[i] + if (IS_SYMBOL(name)) continue var desc = this.getPropertyDescriptor(TO_STRING_INLINE(name)) if (!IS_UNDEFINED(desc) && desc.enumerable) { enumerableNames[count++] = names[i] @@ -184,6 +190,6 @@ function ProxyEnumerate(proxy) { if (IS_UNDEFINED(handler.enumerate)) { return %Apply(DerivedEnumerateTrap, handler, [], 0, 0) } else { - return ToStringArray(handler.enumerate(), "enumerate") + return ToNameArray(handler.enumerate(), "enumerate", false) } } diff --git a/deps/v8/src/regexp-macro-assembler.cc b/deps/v8/src/regexp-macro-assembler.cc index 82ba34d5c8..3ebf5a8e00 100644 --- a/deps/v8/src/regexp-macro-assembler.cc +++ b/deps/v8/src/regexp-macro-assembler.cc @@ -77,14 +77,14 @@ const byte* NativeRegExpMacroAssembler::StringCharacterPosition( ASSERT(subject->IsExternalString() || subject->IsSeqString()); ASSERT(start_index >= 0); ASSERT(start_index <= subject->length()); - if (subject->IsAsciiRepresentation()) { + if (subject->IsOneByteRepresentation()) { const byte* address; if (StringShape(subject).IsExternal()) { - const char* data = ExternalAsciiString::cast(subject)->GetChars(); + const uint8_t* data = ExternalAsciiString::cast(subject)->GetChars(); address = reinterpret_cast<const byte*>(data); } else { - ASSERT(subject->IsSeqAsciiString()); - char* data = SeqAsciiString::cast(subject)->GetChars(); + ASSERT(subject->IsSeqOneByteString()); + const uint8_t* data = SeqOneByteString::cast(subject)->GetChars(); address = reinterpret_cast<const byte*>(data); } return address + start_index; @@ -133,7 +133,7 @@ NativeRegExpMacroAssembler::Result NativeRegExpMacroAssembler::Match( slice_offset = slice->offset(); } // Ensure that an underlying string has the same ASCII-ness. - bool is_ascii = subject_ptr->IsAsciiRepresentation(); + bool is_ascii = subject_ptr->IsOneByteRepresentation(); ASSERT(subject_ptr->IsExternalString() || subject_ptr->IsSeqString()); // String is now either Sequential or External int char_size_shift = is_ascii ? 0 : 1; @@ -210,6 +210,26 @@ const byte NativeRegExpMacroAssembler::word_character_map[] = { 0xffu, 0xffu, 0xffu, 0xffu, 0xffu, 0xffu, 0xffu, 0xffu, // 'h' - 'o' 0xffu, 0xffu, 0xffu, 0xffu, 0xffu, 0xffu, 0xffu, 0xffu, // 'p' - 'w' 0xffu, 0xffu, 0xffu, 0x00u, 0x00u, 0x00u, 0x00u, 0x00u, // 'x' - 'z' + // Latin-1 range + 0x00u, 0x00u, 0x00u, 0x00u, 0x00u, 0x00u, 0x00u, 0x00u, + 0x00u, 0x00u, 0x00u, 0x00u, 0x00u, 0x00u, 0x00u, 0x00u, + 0x00u, 0x00u, 0x00u, 0x00u, 0x00u, 0x00u, 0x00u, 0x00u, + 0x00u, 0x00u, 0x00u, 0x00u, 0x00u, 0x00u, 0x00u, 0x00u, + + 0x00u, 0x00u, 0x00u, 0x00u, 0x00u, 0x00u, 0x00u, 0x00u, + 0x00u, 0x00u, 0x00u, 0x00u, 0x00u, 0x00u, 0x00u, 0x00u, + 0x00u, 0x00u, 0x00u, 0x00u, 0x00u, 0x00u, 0x00u, 0x00u, + 0x00u, 0x00u, 0x00u, 0x00u, 0x00u, 0x00u, 0x00u, 0x00u, + + 0x00u, 0x00u, 0x00u, 0x00u, 0x00u, 0x00u, 0x00u, 0x00u, + 0x00u, 0x00u, 0x00u, 0x00u, 0x00u, 0x00u, 0x00u, 0x00u, + 0x00u, 0x00u, 0x00u, 0x00u, 0x00u, 0x00u, 0x00u, 0x00u, + 0x00u, 0x00u, 0x00u, 0x00u, 0x00u, 0x00u, 0x00u, 0x00u, + + 0x00u, 0x00u, 0x00u, 0x00u, 0x00u, 0x00u, 0x00u, 0x00u, + 0x00u, 0x00u, 0x00u, 0x00u, 0x00u, 0x00u, 0x00u, 0x00u, + 0x00u, 0x00u, 0x00u, 0x00u, 0x00u, 0x00u, 0x00u, 0x00u, + 0x00u, 0x00u, 0x00u, 0x00u, 0x00u, 0x00u, 0x00u, 0x00u, }; diff --git a/deps/v8/src/regexp-macro-assembler.h b/deps/v8/src/regexp-macro-assembler.h index bcf36735c4..211ab6ba39 100644 --- a/deps/v8/src/regexp-macro-assembler.h +++ b/deps/v8/src/regexp-macro-assembler.h @@ -244,10 +244,10 @@ class NativeRegExpMacroAssembler: public RegExpMacroAssembler { static const byte* StringCharacterPosition(String* subject, int start_index); - // Byte map of ASCII characters with a 0xff if the character is a word + // Byte map of one byte characters with a 0xff if the character is a word // character (digit, letter or underscore) and 0x00 otherwise. // Used by generated RegExp code. - static const byte word_character_map[128]; + static const byte word_character_map[256]; static Address word_character_map_address() { return const_cast<Address>(&word_character_map[0]); diff --git a/deps/v8/src/regexp-stack.cc b/deps/v8/src/regexp-stack.cc index ff9547f3a7..325a1496c9 100644 --- a/deps/v8/src/regexp-stack.cc +++ b/deps/v8/src/regexp-stack.cc @@ -51,6 +51,7 @@ RegExpStack::RegExpStack() RegExpStack::~RegExpStack() { + thread_local_.Free(); } diff --git a/deps/v8/src/regexp.js b/deps/v8/src/regexp.js index a3675f0332..2349ca7fbc 100644 --- a/deps/v8/src/regexp.js +++ b/deps/v8/src/regexp.js @@ -132,21 +132,13 @@ function BuildResultFromMatchInfo(lastMatchInfo, s) { var start = lastMatchInfo[CAPTURE0]; var end = lastMatchInfo[CAPTURE1]; var result = %_RegExpConstructResult(numResults, start, s); - if (start + 1 == end) { - result[0] = %_StringCharAt(s, start); - } else { - result[0] = %_SubString(s, start, end); - } + result[0] = %_SubString(s, start, end); var j = REGEXP_FIRST_CAPTURE + 2; for (var i = 1; i < numResults; i++) { start = lastMatchInfo[j++]; if (start != -1) { end = lastMatchInfo[j]; - if (start + 1 == end) { - result[i] = %_StringCharAt(s, start); - } else { - result[i] = %_SubString(s, start, end); - } + result[i] = %_SubString(s, start, end); } j++; } @@ -161,6 +153,7 @@ function RegExpExecNoTests(regexp, string, start) { lastMatchInfoOverride = null; return BuildResultFromMatchInfo(matchInfo, string); } + regexp.lastIndex = 0; return null; } @@ -193,7 +186,7 @@ function RegExpExec(string) { var matchIndices = %_RegExpExec(this, string, i, lastMatchInfo); if (matchIndices === null) { - if (global) this.lastIndex = 0; + this.lastIndex = 0; return null; } @@ -256,7 +249,10 @@ function RegExpTest(string) { %_Log('regexp', 'regexp-exec,%0r,%1S,%2i', [regexp, string, lastIndex]); // matchIndices is either null or the lastMatchInfo array. var matchIndices = %_RegExpExec(regexp, string, 0, lastMatchInfo); - if (matchIndices === null) return false; + if (matchIndices === null) { + this.lastIndex = 0; + return false; + } lastMatchInfoOverride = null; return true; } @@ -266,7 +262,7 @@ function TrimRegExp(regexp) { if (!%_ObjectEquals(regexp_key, regexp)) { regexp_key = regexp; regexp_val = - new $RegExp(SubString(regexp.source, 2, regexp.source.length), + new $RegExp(%_SubString(regexp.source, 2, regexp.source.length), (regexp.ignoreCase ? regexp.multiline ? "im" : "i" : regexp.multiline ? "m" : "")); } @@ -296,9 +292,9 @@ function RegExpGetLastMatch() { return OVERRIDE_MATCH(lastMatchInfoOverride); } var regExpSubject = LAST_SUBJECT(lastMatchInfo); - return SubString(regExpSubject, - lastMatchInfo[CAPTURE0], - lastMatchInfo[CAPTURE1]); + return %_SubString(regExpSubject, + lastMatchInfo[CAPTURE0], + lastMatchInfo[CAPTURE1]); } @@ -317,7 +313,7 @@ function RegExpGetLastParen() { var start = lastMatchInfo[CAPTURE(length - 2)]; var end = lastMatchInfo[CAPTURE(length - 1)]; if (start != -1 && end != -1) { - return SubString(regExpSubject, start, end); + return %_SubString(regExpSubject, start, end); } return ""; } @@ -334,7 +330,7 @@ function RegExpGetLeftContext() { start_index = OVERRIDE_POS(override); subject = OVERRIDE_SUBJECT(override); } - return SubString(subject, 0, start_index); + return %_SubString(subject, 0, start_index); } @@ -350,7 +346,7 @@ function RegExpGetRightContext() { var match = OVERRIDE_MATCH(override); start_index = OVERRIDE_POS(override) + match.length; } - return SubString(subject, start_index, subject.length); + return %_SubString(subject, start_index, subject.length); } @@ -370,7 +366,7 @@ function RegExpMakeCaptureGetter(n) { var matchStart = lastMatchInfo[CAPTURE(index)]; var matchEnd = lastMatchInfo[CAPTURE(index + 1)]; if (matchStart == -1 || matchEnd == -1) return ''; - return SubString(LAST_SUBJECT(lastMatchInfo), matchStart, matchEnd); + return %_SubString(LAST_SUBJECT(lastMatchInfo), matchStart, matchEnd); }; } @@ -381,7 +377,7 @@ function RegExpMakeCaptureGetter(n) { // pairs for the match and all the captured substrings), the invariant is // that there are at least two capture indeces. The array also contains // the subject string for the last successful match. -var lastMatchInfo = new InternalArray( +var lastMatchInfo = new InternalPackedArray( 2, // REGEXP_NUMBER_OF_CAPTURES "", // Last subject. void 0, // Last input - settable with RegExpSetInput. diff --git a/deps/v8/src/rewriter.cc b/deps/v8/src/rewriter.cc index 6541546cb6..44fe0504e5 100644 --- a/deps/v8/src/rewriter.cc +++ b/deps/v8/src/rewriter.cc @@ -43,7 +43,9 @@ class Processor: public AstVisitor { result_assigned_(false), is_set_(false), in_try_(false), - factory_(isolate(), zone) { } + factory_(Isolate::Current(), zone) { + InitializeAstVisitor(); + } virtual ~Processor() { } @@ -86,6 +88,8 @@ class Processor: public AstVisitor { #undef DEF_VISIT void VisitIterationStatement(IterationStatement* stmt); + + DEFINE_AST_VISITOR_SUBCLASS_MEMBERS(); }; @@ -109,6 +113,13 @@ void Processor::VisitBlock(Block* node) { } +void Processor::VisitModuleStatement(ModuleStatement* node) { + bool set_after_body = is_set_; + Visit(node->body()); + is_set_ = is_set_ && set_after_body; +} + + void Processor::VisitExpressionStatement(ExpressionStatement* node) { // Rewrite : <x>; -> .result = <x>; if (!is_set_ && !node->expression()->IsThrow()) { @@ -242,7 +253,7 @@ bool Rewriter::Rewrite(CompilationInfo* info) { ZoneList<Statement*>* body = function->body(); if (!body->is_empty()) { Variable* result = scope->NewTemporary( - info->isolate()->factory()->result_symbol()); + info->isolate()->factory()->result_string()); Processor processor(result, info->zone()); processor.Process(body); if (processor.HasStackOverflow()) return false; @@ -257,7 +268,7 @@ bool Rewriter::Rewrite(CompilationInfo* info) { // coincides with the end of the with scope which is the position of '1'. int position = function->end_position(); VariableProxy* result_proxy = processor.factory()->NewVariableProxy( - result->name(), false, Interface::NewValue(), position); + result->name(), false, result->interface(), position); result_proxy->BindTo(result); Statement* result_statement = processor.factory()->NewReturnStatement(result_proxy); diff --git a/deps/v8/src/runtime-profiler.cc b/deps/v8/src/runtime-profiler.cc index 23f41fa7d2..2606f8ab37 100644 --- a/deps/v8/src/runtime-profiler.cc +++ b/deps/v8/src/runtime-profiler.cc @@ -140,6 +140,7 @@ static void GetICCounts(JSFunction* function, void RuntimeProfiler::Optimize(JSFunction* function, const char* reason) { ASSERT(function->IsOptimizable()); + if (FLAG_trace_opt) { PrintF("[marking "); function->PrintName(); @@ -154,6 +155,8 @@ void RuntimeProfiler::Optimize(JSFunction* function, const char* reason) { } if (FLAG_parallel_recompilation) { + ASSERT(!function->IsMarkedForInstallingRecompiledCode()); + ASSERT(!function->IsInRecompileQueue()); function->MarkForParallelRecompilation(); } else { // The next call to the function will trigger optimization. @@ -166,7 +169,8 @@ void RuntimeProfiler::AttemptOnStackReplacement(JSFunction* function) { // See AlwaysFullCompiler (in compiler.cc) comment on why we need // Debug::has_break_points(). ASSERT(function->IsMarkedForLazyRecompilation() || - function->IsMarkedForParallelRecompilation()); + function->IsMarkedForParallelRecompilation() || + function->IsOptimized()); if (!FLAG_use_osr || isolate_->DebuggerHasBreakPoints() || function->IsBuiltin()) { @@ -193,16 +197,9 @@ void RuntimeProfiler::AttemptOnStackReplacement(JSFunction* function) { // Get the stack check stub code object to match against. We aren't // prepared to generate it, but we don't expect to have to. - bool found_code = false; Code* stack_check_code = NULL; - if (FLAG_count_based_interrupts) { - InterruptStub interrupt_stub; - found_code = interrupt_stub.FindCodeInCache(&stack_check_code); - } else // NOLINT - { // NOLINT - StackCheckStub check_stub; - found_code = check_stub.FindCodeInCache(&stack_check_code); - } + InterruptStub interrupt_stub; + bool found_code = interrupt_stub.FindCodeInCache(&stack_check_code, isolate_); if (found_code) { Code* replacement_code = isolate_->builtins()->builtin(Builtins::kOnStackReplacement); @@ -249,6 +246,12 @@ void RuntimeProfiler::AddSample(JSFunction* function, int weight) { void RuntimeProfiler::OptimizeNow() { HandleScope scope(isolate_); + if (FLAG_parallel_recompilation) { + // Take this as opportunity to process the optimizing compiler thread's + // output queue so that it does not unnecessarily keep objects alive. + isolate_->optimizing_compiler_thread()->InstallOptimizedFunctions(); + } + // Run through the JavaScript frames and collect them. If we already // have a sample of the function, we mark it for optimizations // (eagerly or lazily). @@ -284,9 +287,14 @@ void RuntimeProfiler::OptimizeNow() { Code* shared_code = shared->code(); if (shared_code->kind() != Code::FUNCTION) continue; - - if (function->IsMarkedForLazyRecompilation() || - function->IsMarkedForParallelRecompilation()) { + if (function->IsInRecompileQueue()) continue; + + // Attempt OSR if we are still running unoptimized code even though the + // the function has long been marked or even already been optimized. + if (!frame->is_optimized() && + (function->IsMarkedForLazyRecompilation() || + function->IsMarkedForParallelRecompilation() || + function->IsOptimized())) { int nesting = shared_code->allow_osr_at_loop_nesting_level(); if (nesting == 0) AttemptOnStackReplacement(function); int new_nesting = Min(nesting + 1, Code::kMaxLoopNestingMarker); @@ -376,12 +384,6 @@ void RuntimeProfiler::OptimizeNow() { } -void RuntimeProfiler::NotifyTick() { - if (FLAG_count_based_interrupts) return; - isolate_->stack_guard()->RequestRuntimeProfilerTick(); -} - - void RuntimeProfiler::SetUp() { ASSERT(has_been_globally_set_up_); if (!FLAG_watch_ic_patching) { @@ -440,11 +442,6 @@ void RuntimeProfiler::HandleWakeUp(Isolate* isolate) { } -bool RuntimeProfiler::IsSomeIsolateInJS() { - return NoBarrier_Load(&state_) > 0; -} - - bool RuntimeProfiler::WaitForSomeIsolateToEnterJS() { Atomic32 old_state = NoBarrier_CompareAndSwap(&state_, 0, -1); ASSERT(old_state >= -1); @@ -494,12 +491,4 @@ void RuntimeProfiler::UpdateSamplesAfterCompact(ObjectVisitor* visitor) { } -bool RuntimeProfilerRateLimiter::SuspendIfNecessary() { - if (!RuntimeProfiler::IsSomeIsolateInJS()) { - return RuntimeProfiler::WaitForSomeIsolateToEnterJS(); - } - return false; -} - - } } // namespace v8::internal diff --git a/deps/v8/src/runtime-profiler.h b/deps/v8/src/runtime-profiler.h index ab6cb378ea..62c48c7a49 100644 --- a/deps/v8/src/runtime-profiler.h +++ b/deps/v8/src/runtime-profiler.h @@ -52,8 +52,6 @@ class RuntimeProfiler { void OptimizeNow(); - void NotifyTick(); - void SetUp(); void Reset(); void TearDown(); @@ -73,17 +71,12 @@ class RuntimeProfiler { // Profiler thread interface. // - // IsSomeIsolateInJS(): - // The profiler thread can query whether some isolate is currently - // running JavaScript code. - // // WaitForSomeIsolateToEnterJS(): // When no isolates are running JavaScript code for some time the // profiler thread suspends itself by calling the wait function. The // wait function returns true after it waited or false immediately. // While the function was waiting the profiler may have been // disabled so it *must check* whether it is allowed to continue. - static bool IsSomeIsolateInJS(); static bool WaitForSomeIsolateToEnterJS(); // Stops the runtime profiler thread when profiling support is being @@ -136,24 +129,6 @@ class RuntimeProfiler { }; -// Rate limiter intended to be used in the profiler thread. -class RuntimeProfilerRateLimiter BASE_EMBEDDED { - public: - RuntimeProfilerRateLimiter() {} - - // Suspends the current thread (which must be the profiler thread) - // when not executing JavaScript to minimize CPU usage. Returns - // whether the thread was suspended (and so must check whether - // profiling is still active.) - // - // Does nothing when runtime profiling is not enabled. - bool SuspendIfNecessary(); - - private: - DISALLOW_COPY_AND_ASSIGN(RuntimeProfilerRateLimiter); -}; - - // Implementation of RuntimeProfiler inline functions. void RuntimeProfiler::IsolateEnteredJS(Isolate* isolate) { diff --git a/deps/v8/src/runtime.cc b/deps/v8/src/runtime.cc index 19d9a3f0be..e220d6beda 100644 --- a/deps/v8/src/runtime.cc +++ b/deps/v8/src/runtime.cc @@ -45,9 +45,10 @@ #include "global-handles.h" #include "isolate-inl.h" #include "jsregexp.h" +#include "jsregexp-inl.h" #include "json-parser.h" +#include "json-stringifier.h" #include "liveedit.h" -#include "liveobjectlist-inl.h" #include "misc-intrinsics.h" #include "parser.h" #include "platform.h" @@ -57,6 +58,7 @@ #include "smart-pointers.h" #include "string-search.h" #include "stub-cache.h" +#include "uri.h" #include "v8threads.h" #include "vm-state-inl.h" @@ -285,40 +287,41 @@ static Handle<Map> ComputeObjectLiteralMap( Isolate* isolate = context->GetIsolate(); int properties_length = constant_properties->length(); int number_of_properties = properties_length / 2; - // Check that there are only symbols and array indices among keys. - int number_of_symbol_keys = 0; + // Check that there are only internal strings and array indices among keys. + int number_of_string_keys = 0; for (int p = 0; p != properties_length; p += 2) { Object* key = constant_properties->get(p); uint32_t element_index = 0; - if (key->IsSymbol()) { - number_of_symbol_keys++; + if (key->IsInternalizedString()) { + number_of_string_keys++; } else if (key->ToArrayIndex(&element_index)) { // An index key does not require space in the property backing store. number_of_properties--; } else { - // Bail out as a non-symbol non-index key makes caching impossible. + // Bail out as a non-internalized-string non-index key makes caching + // impossible. // ASSERT to make sure that the if condition after the loop is false. - ASSERT(number_of_symbol_keys != number_of_properties); + ASSERT(number_of_string_keys != number_of_properties); break; } } - // If we only have symbols and array indices among keys then we can - // use the map cache in the native context. + // If we only have internalized strings and array indices among keys then we + // can use the map cache in the native context. const int kMaxKeys = 10; - if ((number_of_symbol_keys == number_of_properties) && - (number_of_symbol_keys < kMaxKeys)) { + if ((number_of_string_keys == number_of_properties) && + (number_of_string_keys < kMaxKeys)) { // Create the fixed array with the key. Handle<FixedArray> keys = - isolate->factory()->NewFixedArray(number_of_symbol_keys); - if (number_of_symbol_keys > 0) { + isolate->factory()->NewFixedArray(number_of_string_keys); + if (number_of_string_keys > 0) { int index = 0; for (int p = 0; p < properties_length; p += 2) { Object* key = constant_properties->get(p); - if (key->IsSymbol()) { + if (key->IsInternalizedString()) { keys->set(index++, key); } } - ASSERT(index == number_of_symbol_keys); + ASSERT(index == number_of_string_keys); } *is_result_from_cache = true; return isolate->factory()->ObjectLiteralMapFromCache(context, keys); @@ -391,7 +394,7 @@ static Handle<Object> CreateObjectLiteralBoilerplate( } Handle<Object> result; uint32_t element_index = 0; - if (key->IsSymbol()) { + if (key->IsInternalizedString()) { if (Handle<String>::cast(key)->AsArrayIndex(&element_index)) { // Array index as string (uint32). result = JSObject::SetOwnElement( @@ -667,11 +670,27 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_CreateArrayLiteralShallow) { isolate->heap()->fixed_cow_array_map()) { isolate->counters()->cow_arrays_created_runtime()->Increment(); } - return isolate->heap()->CopyJSObject(JSObject::cast(*boilerplate)); + + JSObject* boilerplate_object = JSObject::cast(*boilerplate); + AllocationSiteMode mode = AllocationSiteInfo::GetMode( + boilerplate_object->GetElementsKind()); + if (mode == TRACK_ALLOCATION_SITE) { + return isolate->heap()->CopyJSObjectWithAllocationSite(boilerplate_object); + } + + return isolate->heap()->CopyJSObject(boilerplate_object); +} + + +RUNTIME_FUNCTION(MaybeObject*, Runtime_CreateSymbol) { + NoHandleAllocation ha(isolate); + ASSERT(args.length() == 0); + return isolate->heap()->AllocateSymbol(); } RUNTIME_FUNCTION(MaybeObject*, Runtime_CreateJSProxy) { + NoHandleAllocation ha(isolate); ASSERT(args.length() == 2); CONVERT_ARG_CHECKED(JSReceiver, handler, 0); Object* prototype = args[1]; @@ -682,6 +701,7 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_CreateJSProxy) { RUNTIME_FUNCTION(MaybeObject*, Runtime_CreateJSFunctionProxy) { + NoHandleAllocation ha(isolate); ASSERT(args.length() == 4); CONVERT_ARG_CHECKED(JSReceiver, handler, 0); Object* call_trap = args[1]; @@ -696,6 +716,7 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_CreateJSFunctionProxy) { RUNTIME_FUNCTION(MaybeObject*, Runtime_IsJSProxy) { + NoHandleAllocation ha(isolate); ASSERT(args.length() == 1); Object* obj = args[0]; return isolate->heap()->ToBoolean(obj->IsJSProxy()); @@ -703,6 +724,7 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_IsJSProxy) { RUNTIME_FUNCTION(MaybeObject*, Runtime_IsJSFunctionProxy) { + NoHandleAllocation ha(isolate); ASSERT(args.length() == 1); Object* obj = args[0]; return isolate->heap()->ToBoolean(obj->IsJSFunctionProxy()); @@ -710,6 +732,7 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_IsJSFunctionProxy) { RUNTIME_FUNCTION(MaybeObject*, Runtime_GetHandler) { + NoHandleAllocation ha(isolate); ASSERT(args.length() == 1); CONVERT_ARG_CHECKED(JSProxy, proxy, 0); return proxy->handler(); @@ -717,6 +740,7 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_GetHandler) { RUNTIME_FUNCTION(MaybeObject*, Runtime_GetCallTrap) { + NoHandleAllocation ha(isolate); ASSERT(args.length() == 1); CONVERT_ARG_CHECKED(JSFunctionProxy, proxy, 0); return proxy->call_trap(); @@ -724,6 +748,7 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_GetCallTrap) { RUNTIME_FUNCTION(MaybeObject*, Runtime_GetConstructTrap) { + NoHandleAllocation ha(isolate); ASSERT(args.length() == 1); CONVERT_ARG_CHECKED(JSFunctionProxy, proxy, 0); return proxy->construct_trap(); @@ -731,6 +756,7 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_GetConstructTrap) { RUNTIME_FUNCTION(MaybeObject*, Runtime_Fix) { + NoHandleAllocation ha(isolate); ASSERT(args.length() == 1); CONVERT_ARG_CHECKED(JSProxy, proxy, 0); proxy->Fix(); @@ -752,7 +778,7 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_SetAdd) { HandleScope scope(isolate); ASSERT(args.length() == 2); CONVERT_ARG_HANDLE_CHECKED(JSSet, holder, 0); - Handle<Object> key(args[1]); + Handle<Object> key(args[1], isolate); Handle<ObjectHashSet> table(ObjectHashSet::cast(holder->table())); table = ObjectHashSetAdd(table, key); holder->set_table(*table); @@ -764,7 +790,7 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_SetHas) { HandleScope scope(isolate); ASSERT(args.length() == 2); CONVERT_ARG_HANDLE_CHECKED(JSSet, holder, 0); - Handle<Object> key(args[1]); + Handle<Object> key(args[1], isolate); Handle<ObjectHashSet> table(ObjectHashSet::cast(holder->table())); return isolate->heap()->ToBoolean(table->Contains(*key)); } @@ -774,7 +800,7 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_SetDelete) { HandleScope scope(isolate); ASSERT(args.length() == 2); CONVERT_ARG_HANDLE_CHECKED(JSSet, holder, 0); - Handle<Object> key(args[1]); + Handle<Object> key(args[1], isolate); Handle<ObjectHashSet> table(ObjectHashSet::cast(holder->table())); table = ObjectHashSetRemove(table, key); holder->set_table(*table); @@ -782,6 +808,15 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_SetDelete) { } +RUNTIME_FUNCTION(MaybeObject*, Runtime_SetGetSize) { + HandleScope scope(isolate); + ASSERT(args.length() == 1); + CONVERT_ARG_HANDLE_CHECKED(JSSet, holder, 0); + Handle<ObjectHashSet> table(ObjectHashSet::cast(holder->table())); + return Smi::FromInt(table->NumberOfElements()); +} + + RUNTIME_FUNCTION(MaybeObject*, Runtime_MapInitialize) { HandleScope scope(isolate); ASSERT(args.length() == 1); @@ -798,7 +833,7 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_MapGet) { CONVERT_ARG_HANDLE_CHECKED(JSMap, holder, 0); CONVERT_ARG_HANDLE_CHECKED(Object, key, 1); Handle<ObjectHashTable> table(ObjectHashTable::cast(holder->table())); - Handle<Object> lookup(table->Lookup(*key)); + Handle<Object> lookup(table->Lookup(*key), isolate); return lookup->IsTheHole() ? isolate->heap()->undefined_value() : *lookup; } @@ -809,7 +844,7 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_MapHas) { CONVERT_ARG_HANDLE_CHECKED(JSMap, holder, 0); CONVERT_ARG_HANDLE_CHECKED(Object, key, 1); Handle<ObjectHashTable> table(ObjectHashTable::cast(holder->table())); - Handle<Object> lookup(table->Lookup(*key)); + Handle<Object> lookup(table->Lookup(*key), isolate); return isolate->heap()->ToBoolean(!lookup->IsTheHole()); } @@ -820,7 +855,7 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_MapDelete) { CONVERT_ARG_HANDLE_CHECKED(JSMap, holder, 0); CONVERT_ARG_HANDLE_CHECKED(Object, key, 1); Handle<ObjectHashTable> table(ObjectHashTable::cast(holder->table())); - Handle<Object> lookup(table->Lookup(*key)); + Handle<Object> lookup(table->Lookup(*key), isolate); Handle<ObjectHashTable> new_table = PutIntoObjectHashTable(table, key, isolate->factory()->the_hole_value()); holder->set_table(*new_table); @@ -841,10 +876,17 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_MapSet) { } -RUNTIME_FUNCTION(MaybeObject*, Runtime_WeakMapInitialize) { +RUNTIME_FUNCTION(MaybeObject*, Runtime_MapGetSize) { HandleScope scope(isolate); ASSERT(args.length() == 1); - CONVERT_ARG_HANDLE_CHECKED(JSWeakMap, weakmap, 0); + CONVERT_ARG_HANDLE_CHECKED(JSMap, holder, 0); + Handle<ObjectHashTable> table(ObjectHashTable::cast(holder->table())); + return Smi::FromInt(table->NumberOfElements()); +} + + +static JSWeakMap* WeakMapInitialize(Isolate* isolate, + Handle<JSWeakMap> weakmap) { ASSERT(weakmap->map()->inobject_properties() == 0); Handle<ObjectHashTable> table = isolate->factory()->NewObjectHashTable(0); weakmap->set_table(*table); @@ -853,13 +895,21 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_WeakMapInitialize) { } +RUNTIME_FUNCTION(MaybeObject*, Runtime_WeakMapInitialize) { + HandleScope scope(isolate); + ASSERT(args.length() == 1); + CONVERT_ARG_HANDLE_CHECKED(JSWeakMap, weakmap, 0); + return WeakMapInitialize(isolate, weakmap); +} + + RUNTIME_FUNCTION(MaybeObject*, Runtime_WeakMapGet) { HandleScope scope(isolate); ASSERT(args.length() == 2); CONVERT_ARG_HANDLE_CHECKED(JSWeakMap, weakmap, 0); - CONVERT_ARG_HANDLE_CHECKED(JSReceiver, key, 1); + CONVERT_ARG_HANDLE_CHECKED(Object, key, 1); Handle<ObjectHashTable> table(ObjectHashTable::cast(weakmap->table())); - Handle<Object> lookup(table->Lookup(*key)); + Handle<Object> lookup(table->Lookup(*key), isolate); return lookup->IsTheHole() ? isolate->heap()->undefined_value() : *lookup; } @@ -868,9 +918,9 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_WeakMapHas) { HandleScope scope(isolate); ASSERT(args.length() == 2); CONVERT_ARG_HANDLE_CHECKED(JSWeakMap, weakmap, 0); - CONVERT_ARG_HANDLE_CHECKED(JSReceiver, key, 1); + CONVERT_ARG_HANDLE_CHECKED(Object, key, 1); Handle<ObjectHashTable> table(ObjectHashTable::cast(weakmap->table())); - Handle<Object> lookup(table->Lookup(*key)); + Handle<Object> lookup(table->Lookup(*key), isolate); return isolate->heap()->ToBoolean(!lookup->IsTheHole()); } @@ -879,9 +929,9 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_WeakMapDelete) { HandleScope scope(isolate); ASSERT(args.length() == 2); CONVERT_ARG_HANDLE_CHECKED(JSWeakMap, weakmap, 0); - CONVERT_ARG_HANDLE_CHECKED(JSReceiver, key, 1); + CONVERT_ARG_HANDLE_CHECKED(Object, key, 1); Handle<ObjectHashTable> table(ObjectHashTable::cast(weakmap->table())); - Handle<Object> lookup(table->Lookup(*key)); + Handle<Object> lookup(table->Lookup(*key), isolate); Handle<ObjectHashTable> new_table = PutIntoObjectHashTable(table, key, isolate->factory()->the_hole_value()); weakmap->set_table(*new_table); @@ -893,8 +943,8 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_WeakMapSet) { HandleScope scope(isolate); ASSERT(args.length() == 3); CONVERT_ARG_HANDLE_CHECKED(JSWeakMap, weakmap, 0); - CONVERT_ARG_HANDLE_CHECKED(JSReceiver, key, 1); - Handle<Object> value(args[2]); + CONVERT_ARG_HANDLE_CHECKED(Object, key, 1); + Handle<Object> value(args[2], isolate); Handle<ObjectHashTable> table(ObjectHashTable::cast(weakmap->table())); Handle<ObjectHashTable> new_table = PutIntoObjectHashTable(table, key, value); weakmap->set_table(*new_table); @@ -903,7 +953,7 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_WeakMapSet) { RUNTIME_FUNCTION(MaybeObject*, Runtime_ClassOf) { - NoHandleAllocation ha; + NoHandleAllocation ha(isolate); ASSERT(args.length() == 1); Object* obj = args[0]; if (!obj->IsJSObject()) return isolate->heap()->null_value(); @@ -912,7 +962,7 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_ClassOf) { RUNTIME_FUNCTION(MaybeObject*, Runtime_GetPrototype) { - NoHandleAllocation ha; + NoHandleAllocation ha(isolate); ASSERT(args.length() == 1); CONVERT_ARG_CHECKED(JSReceiver, input_obj, 0); Object* obj = input_obj; @@ -921,26 +971,35 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_GetPrototype) { do { if (obj->IsAccessCheckNeeded() && !isolate->MayNamedAccess(JSObject::cast(obj), - isolate->heap()->Proto_symbol(), + isolate->heap()->proto_string(), v8::ACCESS_GET)) { isolate->ReportFailedAccessCheck(JSObject::cast(obj), v8::ACCESS_GET); return isolate->heap()->undefined_value(); } - obj = obj->GetPrototype(); + obj = obj->GetPrototype(isolate); } while (obj->IsJSObject() && JSObject::cast(obj)->map()->is_hidden_prototype()); return obj; } +RUNTIME_FUNCTION(MaybeObject*, Runtime_SetPrototype) { + NoHandleAllocation ha(isolate); + ASSERT(args.length() == 2); + CONVERT_ARG_CHECKED(JSReceiver, input_obj, 0); + CONVERT_ARG_CHECKED(Object, prototype, 1); + return input_obj->SetPrototype(prototype, true); +} + + RUNTIME_FUNCTION(MaybeObject*, Runtime_IsInPrototypeChain) { - NoHandleAllocation ha; + NoHandleAllocation ha(isolate); ASSERT(args.length() == 2); // See ECMA-262, section 15.3.5.3, page 88 (steps 5 - 8). Object* O = args[0]; Object* V = args[1]; while (true) { - Object* prototype = V->GetPrototype(); + Object* prototype = V->GetPrototype(isolate); if (prototype->IsNull()) return isolate->heap()->false_value(); if (O == prototype) return isolate->heap()->true_value(); V = prototype; @@ -948,104 +1007,107 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_IsInPrototypeChain) { } -// Recursively traverses hidden prototypes if property is not found -static void GetOwnPropertyImplementation(JSObject* obj, - String* name, - LookupResult* result) { - obj->LocalLookupRealNamedProperty(name, result); +static bool CheckAccessException(Object* callback, + v8::AccessType access_type) { + if (callback->IsAccessorInfo()) { + AccessorInfo* info = AccessorInfo::cast(callback); + return + (access_type == v8::ACCESS_HAS && + (info->all_can_read() || info->all_can_write())) || + (access_type == v8::ACCESS_GET && info->all_can_read()) || + (access_type == v8::ACCESS_SET && info->all_can_write()); + } + return false; +} - if (result->IsFound()) return; - Object* proto = obj->GetPrototype(); - if (proto->IsJSObject() && - JSObject::cast(proto)->map()->is_hidden_prototype()) - GetOwnPropertyImplementation(JSObject::cast(proto), - name, result); +template<class Key> +static bool CheckGenericAccess( + JSObject* receiver, + JSObject* holder, + Key key, + v8::AccessType access_type, + bool (Isolate::*mayAccess)(JSObject*, Key, v8::AccessType)) { + Isolate* isolate = receiver->GetIsolate(); + for (JSObject* current = receiver; + true; + current = JSObject::cast(current->GetPrototype())) { + if (current->IsAccessCheckNeeded() && + !(isolate->*mayAccess)(current, key, access_type)) { + return false; + } + if (current == holder) break; + } + return true; } -static bool CheckAccessException(LookupResult* result, - v8::AccessType access_type) { - if (result->type() == CALLBACKS) { - Object* callback = result->GetCallbackObject(); - if (callback->IsAccessorInfo()) { - AccessorInfo* info = AccessorInfo::cast(callback); - bool can_access = - (access_type == v8::ACCESS_HAS && - (info->all_can_read() || info->all_can_write())) || - (access_type == v8::ACCESS_GET && info->all_can_read()) || - (access_type == v8::ACCESS_SET && info->all_can_write()); - return can_access; - } +enum AccessCheckResult { + ACCESS_FORBIDDEN, + ACCESS_ALLOWED, + ACCESS_ABSENT +}; + + +static AccessCheckResult CheckElementAccess( + JSObject* obj, + uint32_t index, + v8::AccessType access_type) { + // TODO(1095): we should traverse hidden prototype hierachy as well. + if (CheckGenericAccess( + obj, obj, index, access_type, &Isolate::MayIndexedAccess)) { + return ACCESS_ALLOWED; } - return false; + obj->GetIsolate()->ReportFailedAccessCheck(obj, access_type); + return ACCESS_FORBIDDEN; } -static bool CheckAccess(JSObject* obj, - String* name, - LookupResult* result, - v8::AccessType access_type) { - ASSERT(result->IsProperty()); - - JSObject* holder = result->holder(); - JSObject* current = obj; - Isolate* isolate = obj->GetIsolate(); - while (true) { - if (current->IsAccessCheckNeeded() && - !isolate->MayNamedAccess(current, name, access_type)) { - // Access check callback denied the access, but some properties - // can have a special permissions which override callbacks descision - // (currently see v8::AccessControl). - break; - } +static AccessCheckResult CheckPropertyAccess( + JSObject* obj, + Name* name, + v8::AccessType access_type) { + uint32_t index; + if (name->AsArrayIndex(&index)) { + return CheckElementAccess(obj, index, access_type); + } - if (current == holder) { - return true; - } + LookupResult lookup(obj->GetIsolate()); + obj->LocalLookup(name, &lookup, true); - current = JSObject::cast(current->GetPrototype()); + if (!lookup.IsProperty()) return ACCESS_ABSENT; + if (CheckGenericAccess<Object*>( + obj, lookup.holder(), name, access_type, &Isolate::MayNamedAccess)) { + return ACCESS_ALLOWED; } + // Access check callback denied the access, but some properties + // can have a special permissions which override callbacks descision + // (currently see v8::AccessControl). // API callbacks can have per callback access exceptions. - switch (result->type()) { - case CALLBACKS: { - if (CheckAccessException(result, access_type)) { - return true; + switch (lookup.type()) { + case CALLBACKS: + if (CheckAccessException(lookup.GetCallbackObject(), access_type)) { + return ACCESS_ALLOWED; } break; - } - case INTERCEPTOR: { + case INTERCEPTOR: // If the object has an interceptor, try real named properties. // Overwrite the result to fetch the correct property later. - holder->LookupRealNamedProperty(name, result); - if (result->IsProperty()) { - if (CheckAccessException(result, access_type)) { - return true; + lookup.holder()->LookupRealNamedProperty(name, &lookup); + if (lookup.IsProperty() && lookup.IsPropertyCallbacks()) { + if (CheckAccessException(lookup.GetCallbackObject(), access_type)) { + return ACCESS_ALLOWED; } } break; - } default: break; } - isolate->ReportFailedAccessCheck(current, access_type); - return false; -} - - -// TODO(1095): we should traverse hidden prototype hierachy as well. -static bool CheckElementAccess(JSObject* obj, - uint32_t index, - v8::AccessType access_type) { - if (obj->IsAccessCheckNeeded() && - !obj->GetIsolate()->MayIndexedAccess(obj, index, access_type)) { - return false; - } - - return true; + obj->GetIsolate()->ReportFailedAccessCheck(obj, access_type); + return ACCESS_FORBIDDEN; } @@ -1064,143 +1126,46 @@ enum PropertyDescriptorIndices { static MaybeObject* GetOwnProperty(Isolate* isolate, Handle<JSObject> obj, - Handle<String> name) { + Handle<Name> name) { Heap* heap = isolate->heap(); - Handle<FixedArray> elms = isolate->factory()->NewFixedArray(DESCRIPTOR_SIZE); - Handle<JSArray> desc = isolate->factory()->NewJSArrayWithElements(elms); - LookupResult result(isolate); - // This could be an element. - uint32_t index; - if (name->AsArrayIndex(&index)) { - switch (obj->HasLocalElement(index)) { - case JSObject::UNDEFINED_ELEMENT: - return heap->undefined_value(); - - case JSObject::STRING_CHARACTER_ELEMENT: { - // Special handling of string objects according to ECMAScript 5 - // 15.5.5.2. Note that this might be a string object with elements - // other than the actual string value. This is covered by the - // subsequent cases. - Handle<JSValue> js_value = Handle<JSValue>::cast(obj); - Handle<String> str(String::cast(js_value->value())); - Handle<String> substr = SubString(str, index, index + 1, NOT_TENURED); - - elms->set(IS_ACCESSOR_INDEX, heap->false_value()); - elms->set(VALUE_INDEX, *substr); - elms->set(WRITABLE_INDEX, heap->false_value()); - elms->set(ENUMERABLE_INDEX, heap->true_value()); - elms->set(CONFIGURABLE_INDEX, heap->false_value()); - return *desc; - } - - case JSObject::INTERCEPTED_ELEMENT: - case JSObject::FAST_ELEMENT: { - elms->set(IS_ACCESSOR_INDEX, heap->false_value()); - Handle<Object> value = Object::GetElement(obj, index); - RETURN_IF_EMPTY_HANDLE(isolate, value); - elms->set(VALUE_INDEX, *value); - elms->set(WRITABLE_INDEX, heap->true_value()); - elms->set(ENUMERABLE_INDEX, heap->true_value()); - elms->set(CONFIGURABLE_INDEX, heap->true_value()); - return *desc; - } - - case JSObject::DICTIONARY_ELEMENT: { - Handle<JSObject> holder = obj; - if (obj->IsJSGlobalProxy()) { - Object* proto = obj->GetPrototype(); - if (proto->IsNull()) return heap->undefined_value(); - ASSERT(proto->IsJSGlobalObject()); - holder = Handle<JSObject>(JSObject::cast(proto)); - } - FixedArray* elements = FixedArray::cast(holder->elements()); - SeededNumberDictionary* dictionary = NULL; - if (elements->map() == heap->non_strict_arguments_elements_map()) { - dictionary = SeededNumberDictionary::cast(elements->get(1)); - } else { - dictionary = SeededNumberDictionary::cast(elements); - } - int entry = dictionary->FindEntry(index); - ASSERT(entry != SeededNumberDictionary::kNotFound); - PropertyDetails details = dictionary->DetailsAt(entry); - switch (details.type()) { - case CALLBACKS: { - // This is an accessor property with getter and/or setter. - AccessorPair* accessors = - AccessorPair::cast(dictionary->ValueAt(entry)); - elms->set(IS_ACCESSOR_INDEX, heap->true_value()); - if (CheckElementAccess(*obj, index, v8::ACCESS_GET)) { - elms->set(GETTER_INDEX, accessors->GetComponent(ACCESSOR_GETTER)); - } - if (CheckElementAccess(*obj, index, v8::ACCESS_SET)) { - elms->set(SETTER_INDEX, accessors->GetComponent(ACCESSOR_SETTER)); - } - break; - } - case NORMAL: { - // This is a data property. - elms->set(IS_ACCESSOR_INDEX, heap->false_value()); - Handle<Object> value = Object::GetElement(obj, index); - ASSERT(!value.is_null()); - elms->set(VALUE_INDEX, *value); - elms->set(WRITABLE_INDEX, heap->ToBoolean(!details.IsReadOnly())); - break; - } - default: - UNREACHABLE(); - break; - } - elms->set(ENUMERABLE_INDEX, heap->ToBoolean(!details.IsDontEnum())); - elms->set(CONFIGURABLE_INDEX, heap->ToBoolean(!details.IsDontDelete())); - return *desc; - } - } - } - - // Use recursive implementation to also traverse hidden prototypes - GetOwnPropertyImplementation(*obj, *name, &result); - - if (!result.IsProperty()) { - return heap->undefined_value(); + // Due to some WebKit tests, we want to make sure that we do not log + // more than one access failure here. + switch (CheckPropertyAccess(*obj, *name, v8::ACCESS_HAS)) { + case ACCESS_FORBIDDEN: return heap->false_value(); + case ACCESS_ALLOWED: break; + case ACCESS_ABSENT: return heap->undefined_value(); } - if (!CheckAccess(*obj, *name, &result, v8::ACCESS_HAS)) { - return heap->false_value(); - } + PropertyAttributes attrs = obj->GetLocalPropertyAttribute(*name); + if (attrs == ABSENT) return heap->undefined_value(); + AccessorPair* raw_accessors = obj->GetLocalPropertyAccessorPair(*name); + Handle<AccessorPair> accessors(raw_accessors, isolate); - elms->set(ENUMERABLE_INDEX, heap->ToBoolean(!result.IsDontEnum())); - elms->set(CONFIGURABLE_INDEX, heap->ToBoolean(!result.IsDontDelete())); - - bool is_js_accessor = result.IsPropertyCallbacks() && - (result.GetCallbackObject()->IsAccessorPair()); - - if (is_js_accessor) { - // __defineGetter__/__defineSetter__ callback. - elms->set(IS_ACCESSOR_INDEX, heap->true_value()); - - AccessorPair* accessors = AccessorPair::cast(result.GetCallbackObject()); + Handle<FixedArray> elms = isolate->factory()->NewFixedArray(DESCRIPTOR_SIZE); + elms->set(ENUMERABLE_INDEX, heap->ToBoolean((attrs & DONT_ENUM) == 0)); + elms->set(CONFIGURABLE_INDEX, heap->ToBoolean((attrs & DONT_DELETE) == 0)); + elms->set(IS_ACCESSOR_INDEX, heap->ToBoolean(raw_accessors != NULL)); + + if (raw_accessors == NULL) { + elms->set(WRITABLE_INDEX, heap->ToBoolean((attrs & READ_ONLY) == 0)); + // GetProperty does access check. + Handle<Object> value = GetProperty(isolate, obj, name); + if (value.is_null()) return Failure::Exception(); + elms->set(VALUE_INDEX, *value); + } else { + // Access checks are performed for both accessors separately. + // When they fail, the respective field is not set in the descriptor. Object* getter = accessors->GetComponent(ACCESSOR_GETTER); - if (!getter->IsMap() && CheckAccess(*obj, *name, &result, v8::ACCESS_GET)) { + Object* setter = accessors->GetComponent(ACCESSOR_SETTER); + if (!getter->IsMap() && CheckPropertyAccess(*obj, *name, v8::ACCESS_GET)) { elms->set(GETTER_INDEX, getter); } - Object* setter = accessors->GetComponent(ACCESSOR_SETTER); - if (!setter->IsMap() && CheckAccess(*obj, *name, &result, v8::ACCESS_SET)) { + if (!setter->IsMap() && CheckPropertyAccess(*obj, *name, v8::ACCESS_SET)) { elms->set(SETTER_INDEX, setter); } - } else { - elms->set(IS_ACCESSOR_INDEX, heap->false_value()); - elms->set(WRITABLE_INDEX, heap->ToBoolean(!result.IsReadOnly())); - - PropertyAttributes attrs; - Object* value; - // GetProperty will check access and report any violations. - { MaybeObject* maybe_value = obj->GetProperty(*obj, &result, *name, &attrs); - if (!maybe_value->ToObject(&value)) return maybe_value; - } - elms->set(VALUE_INDEX, value); } - return *desc; + return *isolate->factory()->NewJSArrayWithElements(elms); } @@ -1212,15 +1177,16 @@ static MaybeObject* GetOwnProperty(Isolate* isolate, // if args[1] is an accessor on args[0] // [true, GetFunction, SetFunction, Enumerable, Configurable] RUNTIME_FUNCTION(MaybeObject*, Runtime_GetOwnProperty) { - ASSERT(args.length() == 2); HandleScope scope(isolate); + ASSERT(args.length() == 2); CONVERT_ARG_HANDLE_CHECKED(JSObject, obj, 0); - CONVERT_ARG_HANDLE_CHECKED(String, name, 1); + CONVERT_ARG_HANDLE_CHECKED(Name, name, 1); return GetOwnProperty(isolate, obj, name); } RUNTIME_FUNCTION(MaybeObject*, Runtime_PreventExtensions) { + NoHandleAllocation ha(isolate); ASSERT(args.length() == 1); CONVERT_ARG_CHECKED(JSObject, obj, 0); return obj->PreventExtensions(); @@ -1228,6 +1194,7 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_PreventExtensions) { RUNTIME_FUNCTION(MaybeObject*, Runtime_IsExtensible) { + NoHandleAllocation ha(isolate); ASSERT(args.length() == 1); CONVERT_ARG_CHECKED(JSObject, obj, 0); if (obj->IsJSGlobalProxy()) { @@ -1262,6 +1229,7 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_CreateApiFunction) { RUNTIME_FUNCTION(MaybeObject*, Runtime_IsTemplate) { + NoHandleAllocation ha(isolate); ASSERT(args.length() == 1); Object* arg = args[0]; bool result = arg->IsObjectTemplateInfo() || arg->IsFunctionTemplateInfo(); @@ -1270,6 +1238,7 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_IsTemplate) { RUNTIME_FUNCTION(MaybeObject*, Runtime_GetTemplateField) { + NoHandleAllocation ha(isolate); ASSERT(args.length() == 2); CONVERT_ARG_CHECKED(HeapObject, templ, 0); CONVERT_SMI_ARG_CHECKED(index, 1) @@ -1288,6 +1257,7 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_GetTemplateField) { RUNTIME_FUNCTION(MaybeObject*, Runtime_DisableAccessChecks) { + NoHandleAllocation ha(isolate); ASSERT(args.length() == 1); CONVERT_ARG_CHECKED(HeapObject, object, 0); Map* old_map = object->map(); @@ -1306,6 +1276,7 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_DisableAccessChecks) { RUNTIME_FUNCTION(MaybeObject*, Runtime_EnableAccessChecks) { + NoHandleAllocation ha(isolate); ASSERT(args.length() == 1); CONVERT_ARG_CHECKED(HeapObject, object, 0); Map* old_map = object->map(); @@ -1336,8 +1307,8 @@ static Failure* ThrowRedeclarationError(Isolate* isolate, RUNTIME_FUNCTION(MaybeObject*, Runtime_DeclareGlobals) { - ASSERT(args.length() == 3); HandleScope scope(isolate); + ASSERT(args.length() == 3); Handle<GlobalObject> global = Handle<GlobalObject>( isolate->context()->global_object()); @@ -1358,8 +1329,7 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_DeclareGlobals) { bool is_var = value->IsUndefined(); bool is_const = value->IsTheHole(); bool is_function = value->IsSharedFunctionInfo(); - bool is_module = value->IsJSModule(); - ASSERT(is_var + is_const + is_function + is_module == 1); + ASSERT(is_var + is_const + is_function == 1); if (is_var || is_const) { // Lookup the property in the global object, and don't set the @@ -1367,13 +1337,7 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_DeclareGlobals) { // Do the lookup locally only, see ES5 erratum. LookupResult lookup(isolate); if (FLAG_es52_globals) { - Object* obj = *global; - do { - JSObject::cast(obj)->LocalLookup(*name, &lookup); - if (lookup.IsFound()) break; - obj = obj->GetPrototype(); - } while (obj->IsJSObject() && - JSObject::cast(obj)->map()->is_hidden_prototype()); + global->LocalLookup(*name, &lookup, true); } else { global->Lookup(*name, &lookup); } @@ -1397,30 +1361,29 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_DeclareGlobals) { } LookupResult lookup(isolate); - global->LocalLookup(*name, &lookup); + global->LocalLookup(*name, &lookup, true); // Compute the property attributes. According to ECMA-262, // the property must be non-configurable except in eval. int attr = NONE; bool is_eval = DeclareGlobalsEvalFlag::decode(flags); - if (!is_eval || is_module) { + if (!is_eval) { attr |= DONT_DELETE; } bool is_native = DeclareGlobalsNativeFlag::decode(flags); - if (is_const || is_module || (is_native && is_function)) { + if (is_const || (is_native && is_function)) { attr |= READ_ONLY; } LanguageMode language_mode = DeclareGlobalsLanguageMode::decode(flags); - if (!lookup.IsFound() || is_function || is_module) { + if (!lookup.IsFound() || is_function) { // If the local property exists, check that we can reconfigure it // as required for function declarations. if (lookup.IsFound() && lookup.IsDontDelete()) { if (lookup.IsReadOnly() || lookup.IsDontEnum() || lookup.IsPropertyCallbacks()) { - return ThrowRedeclarationError( - isolate, is_function ? "function" : "module", name); + return ThrowRedeclarationError(isolate, "function", name); } // If the existing property is not configurable, keep its attributes. attr = lookup.GetAttributes(); @@ -1546,7 +1509,7 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_DeclareContextSlot) { RUNTIME_FUNCTION(MaybeObject*, Runtime_InitializeVarGlobal) { - NoHandleAllocation nha; + NoHandleAllocation nha(isolate); // args[0] == name // args[1] == language_mode // args[2] == value (optional) @@ -1576,27 +1539,20 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_InitializeVarGlobal) { // the whole chain of hidden prototypes to do a 'local' lookup. Object* object = global; LookupResult lookup(isolate); - while (object->IsJSObject() && - JSObject::cast(object)->map()->is_hidden_prototype()) { - JSObject* raw_holder = JSObject::cast(object); - raw_holder->LocalLookup(*name, &lookup); - if (lookup.IsInterceptor()) { - HandleScope handle_scope(isolate); - Handle<JSObject> holder(raw_holder); - PropertyAttributes intercepted = holder->GetPropertyAttribute(*name); - // Update the raw pointer in case it's changed due to GC. - raw_holder = *holder; - if (intercepted != ABSENT && (intercepted & READ_ONLY) == 0) { - // Found an interceptor that's not read only. - if (assign) { - return raw_holder->SetProperty( - &lookup, *name, args[2], attributes, strict_mode_flag); - } else { - return isolate->heap()->undefined_value(); - } + JSObject::cast(object)->LocalLookup(*name, &lookup, true); + if (lookup.IsInterceptor()) { + HandleScope handle_scope(isolate); + PropertyAttributes intercepted = + lookup.holder()->GetPropertyAttribute(*name); + if (intercepted != ABSENT && (intercepted & READ_ONLY) == 0) { + // Found an interceptor that's not read only. + if (assign) { + return lookup.holder()->SetProperty( + &lookup, *name, args[2], attributes, strict_mode_flag); + } else { + return isolate->heap()->undefined_value(); } } - object = raw_holder->GetPrototype(); } // Reload global in case the loop above performed a GC. @@ -1609,6 +1565,7 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_InitializeVarGlobal) { RUNTIME_FUNCTION(MaybeObject*, Runtime_InitializeConstGlobal) { + NoHandleAllocation ha(isolate); // All constants are declared with an initial value. The name // of the constant is the first argument and the initial value // is the second. @@ -1660,7 +1617,7 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_InitializeConstGlobal) { // Strict mode handling not needed (const is disallowed in strict mode). if (lookup.IsField()) { FixedArray* properties = global->properties(); - int index = lookup.GetFieldIndex(); + int index = lookup.GetFieldIndex().field_index(); if (properties->get(index)->IsTheHole() || !lookup.IsReadOnly()) { properties->set(index, *value); } @@ -1750,7 +1707,7 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_InitializeConstContextSlot) { if (lookup.IsField()) { FixedArray* properties = object->properties(); - int index = lookup.GetFieldIndex(); + int index = lookup.GetFieldIndex().field_index(); if (properties->get(index)->IsTheHole()) { properties->set(index, *value); } @@ -1801,7 +1758,6 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_RegExpExec) { // length of a string, i.e. it is always a Smi. We check anyway for security. CONVERT_SMI_ARG_CHECKED(index, 2); CONVERT_ARG_HANDLE_CHECKED(JSArray, last_match_info, 3); - RUNTIME_ASSERT(last_match_info->HasFastObjectElements()); RUNTIME_ASSERT(index >= 0); RUNTIME_ASSERT(index <= subject->length()); isolate->counters()->regexp_entry_runtime()->Increment(); @@ -1815,6 +1771,7 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_RegExpExec) { RUNTIME_FUNCTION(MaybeObject*, Runtime_RegExpConstructResult) { + NoHandleAllocation ha(isolate); ASSERT(args.length() == 3); CONVERT_SMI_ARG_CHECKED(elements_count, 0); if (elements_count < 0 || @@ -1850,13 +1807,14 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_RegExpConstructResult) { RUNTIME_FUNCTION(MaybeObject*, Runtime_RegExpInitializeObject) { + NoHandleAllocation ha(isolate); AssertNoAllocation no_alloc; ASSERT(args.length() == 5); CONVERT_ARG_CHECKED(JSRegExp, regexp, 0); CONVERT_ARG_CHECKED(String, source, 1); // If source is the empty string we set it to "(?:)" instead as // suggested by ECMA-262, 5th, section 15.10.4.1. - if (source->length() == 0) source = isolate->heap()->query_colon_symbol(); + if (source->length() == 0) source = isolate->heap()->query_colon_string(); Object* global = args[2]; if (!global->IsTrue()) global = isolate->heap()->false_value(); @@ -1881,9 +1839,8 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_RegExpInitializeObject) { JSRegExp::kIgnoreCaseFieldIndex, ignoreCase, SKIP_WRITE_BARRIER); regexp->InObjectPropertyAtPut( JSRegExp::kMultilineFieldIndex, multiline, SKIP_WRITE_BARRIER); - regexp->InObjectPropertyAtPut(JSRegExp::kLastIndexFieldIndex, - Smi::FromInt(0), - SKIP_WRITE_BARRIER); // It's a Smi. + regexp->InObjectPropertyAtPut( + JSRegExp::kLastIndexFieldIndex, Smi::FromInt(0), SKIP_WRITE_BARRIER); return regexp; } @@ -1894,28 +1851,30 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_RegExpInitializeObject) { static_cast<PropertyAttributes>(DONT_ENUM | DONT_DELETE); Heap* heap = isolate->heap(); MaybeObject* result; - result = regexp->SetLocalPropertyIgnoreAttributes(heap->source_symbol(), + result = regexp->SetLocalPropertyIgnoreAttributes(heap->source_string(), source, final); - ASSERT(!result->IsFailure()); - result = regexp->SetLocalPropertyIgnoreAttributes(heap->global_symbol(), + // TODO(jkummerow): Turn these back into ASSERTs when we can be certain + // that it never fires in Release mode in the wild. + CHECK(!result->IsFailure()); + result = regexp->SetLocalPropertyIgnoreAttributes(heap->global_string(), global, final); - ASSERT(!result->IsFailure()); + CHECK(!result->IsFailure()); result = - regexp->SetLocalPropertyIgnoreAttributes(heap->ignore_case_symbol(), + regexp->SetLocalPropertyIgnoreAttributes(heap->ignore_case_string(), ignoreCase, final); - ASSERT(!result->IsFailure()); - result = regexp->SetLocalPropertyIgnoreAttributes(heap->multiline_symbol(), + CHECK(!result->IsFailure()); + result = regexp->SetLocalPropertyIgnoreAttributes(heap->multiline_string(), multiline, final); - ASSERT(!result->IsFailure()); + CHECK(!result->IsFailure()); result = - regexp->SetLocalPropertyIgnoreAttributes(heap->last_index_symbol(), + regexp->SetLocalPropertyIgnoreAttributes(heap->last_index_string(), Smi::FromInt(0), writable); - ASSERT(!result->IsFailure()); + CHECK(!result->IsFailure()); USE(result); return regexp; } @@ -1936,7 +1895,7 @@ static Handle<JSFunction> InstallBuiltin(Isolate* isolate, Handle<JSObject> holder, const char* name, Builtins::Name builtin_name) { - Handle<String> key = isolate->factory()->LookupAsciiSymbol(name); + Handle<String> key = isolate->factory()->InternalizeUtf8String(name); Handle<Code> code(isolate->builtins()->builtin(builtin_name)); Handle<JSFunction> optimized = isolate->factory()->NewFunction(key, @@ -1968,6 +1927,7 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_SpecialArrayFunctions) { RUNTIME_FUNCTION(MaybeObject*, Runtime_GetDefaultReceiver) { + NoHandleAllocation ha(isolate); ASSERT(args.length() == 1); CONVERT_ARG_CHECKED(JSReceiver, callable, 0); @@ -2025,7 +1985,7 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_MaterializeRegExpLiteral) { RUNTIME_FUNCTION(MaybeObject*, Runtime_FunctionGetName) { - NoHandleAllocation ha; + NoHandleAllocation ha(isolate); ASSERT(args.length() == 1); CONVERT_ARG_CHECKED(JSFunction, f, 0); @@ -2034,7 +1994,7 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_FunctionGetName) { RUNTIME_FUNCTION(MaybeObject*, Runtime_FunctionSetName) { - NoHandleAllocation ha; + NoHandleAllocation ha(isolate); ASSERT(args.length() == 2); CONVERT_ARG_CHECKED(JSFunction, f, 0); @@ -2045,7 +2005,7 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_FunctionSetName) { RUNTIME_FUNCTION(MaybeObject*, Runtime_FunctionNameShouldPrintAsAnonymous) { - NoHandleAllocation ha; + NoHandleAllocation ha(isolate); ASSERT(args.length() == 1); CONVERT_ARG_CHECKED(JSFunction, f, 0); return isolate->heap()->ToBoolean( @@ -2054,7 +2014,7 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_FunctionNameShouldPrintAsAnonymous) { RUNTIME_FUNCTION(MaybeObject*, Runtime_FunctionMarkNameShouldPrintAsAnonymous) { - NoHandleAllocation ha; + NoHandleAllocation ha(isolate); ASSERT(args.length() == 1); CONVERT_ARG_CHECKED(JSFunction, f, 0); f->shared()->set_name_should_print_as_anonymous(true); @@ -2063,7 +2023,7 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_FunctionMarkNameShouldPrintAsAnonymous) { RUNTIME_FUNCTION(MaybeObject*, Runtime_FunctionRemovePrototype) { - NoHandleAllocation ha; + NoHandleAllocation ha(isolate); ASSERT(args.length() == 1); CONVERT_ARG_CHECKED(JSFunction, f, 0); @@ -2096,7 +2056,7 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_FunctionGetSourceCode) { RUNTIME_FUNCTION(MaybeObject*, Runtime_FunctionGetScriptSourcePosition) { - NoHandleAllocation ha; + NoHandleAllocation ha(isolate); ASSERT(args.length() == 1); CONVERT_ARG_CHECKED(JSFunction, fun, 0); @@ -2106,6 +2066,7 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_FunctionGetScriptSourcePosition) { RUNTIME_FUNCTION(MaybeObject*, Runtime_FunctionGetPositionForOffset) { + NoHandleAllocation ha(isolate); ASSERT(args.length() == 2); CONVERT_ARG_CHECKED(Code, code, 0); @@ -2119,7 +2080,7 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_FunctionGetPositionForOffset) { RUNTIME_FUNCTION(MaybeObject*, Runtime_FunctionSetInstanceClassName) { - NoHandleAllocation ha; + NoHandleAllocation ha(isolate); ASSERT(args.length() == 2); CONVERT_ARG_CHECKED(JSFunction, fun, 0); @@ -2130,7 +2091,7 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_FunctionSetInstanceClassName) { RUNTIME_FUNCTION(MaybeObject*, Runtime_FunctionSetLength) { - NoHandleAllocation ha; + NoHandleAllocation ha(isolate); ASSERT(args.length() == 2); CONVERT_ARG_CHECKED(JSFunction, fun, 0); @@ -2141,7 +2102,7 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_FunctionSetLength) { RUNTIME_FUNCTION(MaybeObject*, Runtime_FunctionSetPrototype) { - NoHandleAllocation ha; + NoHandleAllocation ha(isolate); ASSERT(args.length() == 2); CONVERT_ARG_CHECKED(JSFunction, fun, 0); @@ -2156,11 +2117,11 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_FunctionSetPrototype) { RUNTIME_FUNCTION(MaybeObject*, Runtime_FunctionSetReadOnlyPrototype) { - NoHandleAllocation ha; + NoHandleAllocation ha(isolate); RUNTIME_ASSERT(args.length() == 1); CONVERT_ARG_CHECKED(JSFunction, function, 0); - String* name = isolate->heap()->prototype_symbol(); + String* name = isolate->heap()->prototype_string(); if (function->HasFastProperties()) { // Construct a new field descriptor with updated attributes. @@ -2186,7 +2147,7 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_FunctionSetReadOnlyPrototype) { } else { // Dictionary properties. // Directly manipulate the property details. int entry = function->property_dictionary()->FindEntry(name); - ASSERT(entry != StringDictionary::kNotFound); + ASSERT(entry != NameDictionary::kNotFound); PropertyDetails details = function->property_dictionary()->DetailsAt(entry); PropertyDetails new_details( static_cast<PropertyAttributes>(details.attributes() | READ_ONLY), @@ -2199,7 +2160,7 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_FunctionSetReadOnlyPrototype) { RUNTIME_FUNCTION(MaybeObject*, Runtime_FunctionIsAPIFunction) { - NoHandleAllocation ha; + NoHandleAllocation ha(isolate); ASSERT(args.length() == 1); CONVERT_ARG_CHECKED(JSFunction, f, 0); @@ -2208,7 +2169,7 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_FunctionIsAPIFunction) { RUNTIME_FUNCTION(MaybeObject*, Runtime_FunctionIsBuiltin) { - NoHandleAllocation ha; + NoHandleAllocation ha(isolate); ASSERT(args.length() == 1); CONVERT_ARG_CHECKED(JSFunction, f, 0); @@ -2238,7 +2199,7 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_SetCode) { // target function to undefined. SetCode is only used for built-in // constructors like String, Array, and Object, and some web code // doesn't like seeing source code for constructors. - target_shared->set_code(source_shared->code()); + target_shared->ReplaceCode(source_shared->code()); target_shared->set_scope_info(source_shared->scope_info()); target_shared->set_length(source_shared->length()); target_shared->set_formal_parameter_count( @@ -2270,7 +2231,7 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_SetCode) { target->set_literals(*literals); if (isolate->logger()->is_logging_code_events() || - CpuProfiler::is_profiling(isolate)) { + isolate->cpu_profiler()->is_profiling()) { isolate->logger()->LogExistingFunction( source_shared, Handle<Code>(source_shared->code())); } @@ -2303,7 +2264,7 @@ MUST_USE_RESULT static MaybeObject* CharFromCode(Isolate* isolate, RUNTIME_FUNCTION(MaybeObject*, Runtime_StringCharCodeAt) { - NoHandleAllocation ha; + NoHandleAllocation ha(isolate); ASSERT(args.length() == 2); CONVERT_ARG_CHECKED(String, subject, 0); @@ -2327,7 +2288,7 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_StringCharCodeAt) { RUNTIME_FUNCTION(MaybeObject*, Runtime_CharFromCode) { - NoHandleAllocation ha; + NoHandleAllocation ha(isolate); ASSERT(args.length() == 1); return CharFromCode(isolate, args[0]); } @@ -2442,7 +2403,7 @@ class ReplacementStringBuilder { array_builder_(heap->isolate(), estimated_part_count), subject_(subject), character_count_(0), - is_ascii_(subject->IsAsciiRepresentation()) { + is_ascii_(subject->IsOneByteRepresentation()) { // Require a non-zero initial size. Ensures that doubling the size to // extend the array will work. ASSERT(estimated_part_count > 0); @@ -2482,7 +2443,7 @@ class ReplacementStringBuilder { int length = string->length(); ASSERT(length > 0); AddElement(*string); - if (!string->IsAsciiRepresentation()) { + if (!string->IsOneByteRepresentation()) { is_ascii_ = false; } IncrementCharacterCount(length); @@ -2496,9 +2457,9 @@ class ReplacementStringBuilder { Handle<String> joined_string; if (is_ascii_) { - Handle<SeqAsciiString> seq = NewRawAsciiString(character_count_); + Handle<SeqOneByteString> seq = NewRawOneByteString(character_count_); AssertNoAllocation no_alloc; - char* char_buffer = seq->GetChars(); + uint8_t* char_buffer = seq->GetChars(); StringBuilderConcatHelper(*subject_, char_buffer, *array_builder_.array(), @@ -2527,8 +2488,8 @@ class ReplacementStringBuilder { } private: - Handle<SeqAsciiString> NewRawAsciiString(int length) { - return heap_->isolate()->factory()->NewRawAsciiString(length); + Handle<SeqOneByteString> NewRawOneByteString(int length) { + return heap_->isolate()->factory()->NewRawOneByteString(length); } @@ -2755,7 +2716,7 @@ bool CompiledReplacement::Compile(Handle<String> replacement, bool simple = false; if (content.IsAscii()) { simple = ParseReplacementPattern(&parts_, - content.ToAsciiVector(), + content.ToOneByteVector(), capture_count, subject_length, zone()); @@ -2831,7 +2792,7 @@ void CompiledReplacement::Apply(ReplacementStringBuilder* builder, } -void FindAsciiStringIndices(Vector<const char> subject, +void FindAsciiStringIndices(Vector<const uint8_t> subject, char pattern, ZoneList<int>* indices, unsigned int limit, @@ -2839,11 +2800,11 @@ void FindAsciiStringIndices(Vector<const char> subject, ASSERT(limit > 0); // Collect indices of pattern in subject using memchr. // Stop after finding at most limit values. - const char* subject_start = reinterpret_cast<const char*>(subject.start()); - const char* subject_end = subject_start + subject.length(); - const char* pos = subject_start; + const uint8_t* subject_start = subject.start(); + const uint8_t* subject_end = subject_start + subject.length(); + const uint8_t* pos = subject_start; while (limit > 0) { - pos = reinterpret_cast<const char*>( + pos = reinterpret_cast<const uint8_t*>( memchr(pos, pattern, subject_end - pos)); if (pos == NULL) return; indices->Add(static_cast<int>(pos - subject_start), zone); @@ -2853,6 +2814,23 @@ void FindAsciiStringIndices(Vector<const char> subject, } +void FindTwoByteStringIndices(const Vector<const uc16> subject, + uc16 pattern, + ZoneList<int>* indices, + unsigned int limit, + Zone* zone) { + ASSERT(limit > 0); + const uc16* subject_start = subject.start(); + const uc16* subject_end = subject_start + subject.length(); + for (const uc16* pos = subject_start; pos < subject_end && limit > 0; pos++) { + if (*pos == pattern) { + indices->Add(static_cast<int>(pos - subject_start), zone); + limit--; + } + } +} + + template <typename SubjectChar, typename PatternChar> void FindStringIndices(Isolate* isolate, Vector<const SubjectChar> subject, @@ -2889,9 +2867,10 @@ void FindStringIndicesDispatch(Isolate* isolate, ASSERT(subject_content.IsFlat()); ASSERT(pattern_content.IsFlat()); if (subject_content.IsAscii()) { - Vector<const char> subject_vector = subject_content.ToAsciiVector(); + Vector<const uint8_t> subject_vector = subject_content.ToOneByteVector(); if (pattern_content.IsAscii()) { - Vector<const char> pattern_vector = pattern_content.ToAsciiVector(); + Vector<const uint8_t> pattern_vector = + pattern_content.ToOneByteVector(); if (pattern_vector.length() == 1) { FindAsciiStringIndices(subject_vector, pattern_vector[0], @@ -2917,19 +2896,38 @@ void FindStringIndicesDispatch(Isolate* isolate, } else { Vector<const uc16> subject_vector = subject_content.ToUC16Vector(); if (pattern_content.IsAscii()) { - FindStringIndices(isolate, - subject_vector, - pattern_content.ToAsciiVector(), - indices, - limit, - zone); + Vector<const uint8_t> pattern_vector = + pattern_content.ToOneByteVector(); + if (pattern_vector.length() == 1) { + FindTwoByteStringIndices(subject_vector, + pattern_vector[0], + indices, + limit, + zone); + } else { + FindStringIndices(isolate, + subject_vector, + pattern_vector, + indices, + limit, + zone); + } } else { - FindStringIndices(isolate, - subject_vector, - pattern_content.ToUC16Vector(), - indices, - limit, - zone); + Vector<const uc16> pattern_vector = pattern_content.ToUC16Vector(); + if (pattern_vector.length() == 1) { + FindTwoByteStringIndices(subject_vector, + pattern_vector[0], + indices, + limit, + zone); + } else { + FindStringIndices(isolate, + subject_vector, + pattern_vector, + indices, + limit, + zone); + } } } } @@ -2937,7 +2935,7 @@ void FindStringIndicesDispatch(Isolate* isolate, template<typename ResultSeqString> -MUST_USE_RESULT static MaybeObject* StringReplaceAtomRegExpWithString( +MUST_USE_RESULT static MaybeObject* StringReplaceGlobalAtomRegExpWithString( Isolate* isolate, Handle<String> subject, Handle<JSRegExp> pattern_regexp, @@ -2968,7 +2966,7 @@ MUST_USE_RESULT static MaybeObject* StringReplaceAtomRegExpWithString( static_cast<int64_t>(pattern_len)) * static_cast<int64_t>(matches) + static_cast<int64_t>(subject_len); - if (result_len_64 > INT_MAX) return Failure::OutOfMemoryException(); + if (result_len_64 > INT_MAX) return Failure::OutOfMemoryException(0x11); int result_len = static_cast<int>(result_len_64); int subject_pos = 0; @@ -2977,7 +2975,7 @@ MUST_USE_RESULT static MaybeObject* StringReplaceAtomRegExpWithString( Handle<ResultSeqString> result; if (ResultSeqString::kHasAsciiEncoding) { result = Handle<ResultSeqString>::cast( - isolate->factory()->NewRawAsciiString(result_len)); + isolate->factory()->NewRawOneByteString(result_len)); } else { result = Handle<ResultSeqString>::cast( isolate->factory()->NewRawTwoByteString(result_len)); @@ -3020,7 +3018,7 @@ MUST_USE_RESULT static MaybeObject* StringReplaceAtomRegExpWithString( } -MUST_USE_RESULT static MaybeObject* StringReplaceRegExpWithString( +MUST_USE_RESULT static MaybeObject* StringReplaceGlobalRegExpWithString( Isolate* isolate, Handle<String> subject, Handle<JSRegExp> regexp, @@ -3029,7 +3027,6 @@ MUST_USE_RESULT static MaybeObject* StringReplaceRegExpWithString( ASSERT(subject->IsFlat()); ASSERT(replacement->IsFlat()); - bool is_global = regexp->GetFlags().is_global(); int capture_count = regexp->CaptureCount(); int subject_length = subject->length(); @@ -3042,19 +3039,18 @@ MUST_USE_RESULT static MaybeObject* StringReplaceRegExpWithString( subject_length); // Shortcut for simple non-regexp global replacements - if (is_global && - regexp->TypeTag() == JSRegExp::ATOM && - simple_replace) { - if (subject->HasOnlyAsciiChars() && replacement->HasOnlyAsciiChars()) { - return StringReplaceAtomRegExpWithString<SeqAsciiString>( + if (regexp->TypeTag() == JSRegExp::ATOM && simple_replace) { + if (subject->IsOneByteConvertible() && + replacement->IsOneByteConvertible()) { + return StringReplaceGlobalAtomRegExpWithString<SeqOneByteString>( isolate, subject, regexp, replacement, last_match_info); } else { - return StringReplaceAtomRegExpWithString<SeqTwoByteString>( + return StringReplaceGlobalAtomRegExpWithString<SeqTwoByteString>( isolate, subject, regexp, replacement, last_match_info); } } - RegExpImpl::GlobalCache global_cache(regexp, subject, is_global, isolate); + RegExpImpl::GlobalCache global_cache(regexp, subject, true, isolate); if (global_cache.HasException()) return Failure::Exception(); int32_t* current_match = global_cache.FetchNext(); @@ -3066,8 +3062,7 @@ MUST_USE_RESULT static MaybeObject* StringReplaceRegExpWithString( // Guessing the number of parts that the final result string is built // from. Global regexps can match any number of times, so we guess // conservatively. - int expected_parts = - (compiled_replacement.parts() + 1) * (is_global ? 4 : 1) + 1; + int expected_parts = (compiled_replacement.parts() + 1) * 4 + 1; ReplacementStringBuilder builder(isolate->heap(), subject, expected_parts); @@ -3099,9 +3094,6 @@ MUST_USE_RESULT static MaybeObject* StringReplaceRegExpWithString( } prev = end; - // Only continue checking for global regexps. - if (!is_global) break; - current_match = global_cache.FetchNext(); } while (current_match != NULL); @@ -3122,37 +3114,26 @@ MUST_USE_RESULT static MaybeObject* StringReplaceRegExpWithString( template <typename ResultSeqString> -MUST_USE_RESULT static MaybeObject* StringReplaceRegExpWithEmptyString( +MUST_USE_RESULT static MaybeObject* StringReplaceGlobalRegExpWithEmptyString( Isolate* isolate, Handle<String> subject, Handle<JSRegExp> regexp, Handle<JSArray> last_match_info) { ASSERT(subject->IsFlat()); - bool is_global = regexp->GetFlags().is_global(); - // Shortcut for simple non-regexp global replacements - if (is_global && - regexp->TypeTag() == JSRegExp::ATOM) { - Handle<String> empty_string(HEAP->empty_string()); - if (subject->HasOnlyAsciiChars()) { - return StringReplaceAtomRegExpWithString<SeqAsciiString>( - isolate, - subject, - regexp, - empty_string, - last_match_info); + if (regexp->TypeTag() == JSRegExp::ATOM) { + Handle<String> empty_string = isolate->factory()->empty_string(); + if (subject->IsOneByteRepresentation()) { + return StringReplaceGlobalAtomRegExpWithString<SeqOneByteString>( + isolate, subject, regexp, empty_string, last_match_info); } else { - return StringReplaceAtomRegExpWithString<SeqTwoByteString>( - isolate, - subject, - regexp, - empty_string, - last_match_info); + return StringReplaceGlobalAtomRegExpWithString<SeqTwoByteString>( + isolate, subject, regexp, empty_string, last_match_info); } } - RegExpImpl::GlobalCache global_cache(regexp, subject, is_global, isolate); + RegExpImpl::GlobalCache global_cache(regexp, subject, true, isolate); if (global_cache.HasException()) return Failure::Exception(); int32_t* current_match = global_cache.FetchNext(); @@ -3172,29 +3153,12 @@ MUST_USE_RESULT static MaybeObject* StringReplaceRegExpWithEmptyString( Handle<ResultSeqString> answer; if (ResultSeqString::kHasAsciiEncoding) { answer = Handle<ResultSeqString>::cast( - isolate->factory()->NewRawAsciiString(new_length)); + isolate->factory()->NewRawOneByteString(new_length)); } else { answer = Handle<ResultSeqString>::cast( isolate->factory()->NewRawTwoByteString(new_length)); } - if (!is_global) { - RegExpImpl::SetLastMatchInfo( - last_match_info, subject, capture_count, current_match); - if (start == end) { - return *subject; - } else { - if (start > 0) { - String::WriteToFlat(*subject, answer->GetChars(), 0, start); - } - if (end < subject_length) { - String::WriteToFlat( - *subject, answer->GetChars() + start, end, subject_length); - } - return *answer; - } - } - int prev = 0; int position = 0; @@ -3203,8 +3167,7 @@ MUST_USE_RESULT static MaybeObject* StringReplaceRegExpWithEmptyString( end = current_match[1]; if (prev < start) { // Add substring subject[prev;start] to answer string. - String::WriteToFlat( - *subject, answer->GetChars() + position, prev, start); + String::WriteToFlat(*subject, answer->GetChars() + position, prev, start); position += start - prev; } prev = end; @@ -3246,33 +3209,32 @@ MUST_USE_RESULT static MaybeObject* StringReplaceRegExpWithEmptyString( } -RUNTIME_FUNCTION(MaybeObject*, Runtime_StringReplaceRegExpWithString) { - ASSERT(args.length() == 4); - +RUNTIME_FUNCTION(MaybeObject*, Runtime_StringReplaceGlobalRegExpWithString) { HandleScope scope(isolate); + ASSERT(args.length() == 4); CONVERT_ARG_HANDLE_CHECKED(String, subject, 0); CONVERT_ARG_HANDLE_CHECKED(String, replacement, 2); CONVERT_ARG_HANDLE_CHECKED(JSRegExp, regexp, 1); CONVERT_ARG_HANDLE_CHECKED(JSArray, last_match_info, 3); - if (!subject->IsFlat()) subject = FlattenGetString(subject); - - if (!replacement->IsFlat()) replacement = FlattenGetString(replacement); + ASSERT(regexp->GetFlags().is_global()); - ASSERT(last_match_info->HasFastObjectElements()); + if (!subject->IsFlat()) subject = FlattenGetString(subject); if (replacement->length() == 0) { - if (subject->HasOnlyAsciiChars()) { - return StringReplaceRegExpWithEmptyString<SeqAsciiString>( + if (subject->IsOneByteConvertible()) { + return StringReplaceGlobalRegExpWithEmptyString<SeqOneByteString>( isolate, subject, regexp, last_match_info); } else { - return StringReplaceRegExpWithEmptyString<SeqTwoByteString>( + return StringReplaceGlobalRegExpWithEmptyString<SeqTwoByteString>( isolate, subject, regexp, last_match_info); } } - return StringReplaceRegExpWithString( + if (!replacement->IsFlat()) replacement = FlattenGetString(replacement); + + return StringReplaceGlobalRegExpWithString( isolate, subject, regexp, replacement, last_match_info); } @@ -3323,8 +3285,8 @@ Handle<String> StringReplaceOneCharWithString(Isolate* isolate, RUNTIME_FUNCTION(MaybeObject*, Runtime_StringReplaceOneCharWithString) { - ASSERT(args.length() == 3); HandleScope scope(isolate); + ASSERT(args.length() == 3); CONVERT_ARG_HANDLE_CHECKED(String, subject, 0); CONVERT_ARG_HANDLE_CHECKED(String, search, 1); CONVERT_ARG_HANDLE_CHECKED(String, replace, 2); @@ -3375,10 +3337,10 @@ int Runtime::StringMatch(Isolate* isolate, // dispatch on type of strings if (seq_pat.IsAscii()) { - Vector<const char> pat_vector = seq_pat.ToAsciiVector(); + Vector<const uint8_t> pat_vector = seq_pat.ToOneByteVector(); if (seq_sub.IsAscii()) { return SearchString(isolate, - seq_sub.ToAsciiVector(), + seq_sub.ToOneByteVector(), pat_vector, start_index); } @@ -3390,7 +3352,7 @@ int Runtime::StringMatch(Isolate* isolate, Vector<const uc16> pat_vector = seq_pat.ToUC16Vector(); if (seq_sub.IsAscii()) { return SearchString(isolate, - seq_sub.ToAsciiVector(), + seq_sub.ToOneByteVector(), pat_vector, start_index); } @@ -3402,7 +3364,7 @@ int Runtime::StringMatch(Isolate* isolate, RUNTIME_FUNCTION(MaybeObject*, Runtime_StringIndexOf) { - HandleScope scope(isolate); // create a new handle scope + HandleScope scope(isolate); ASSERT(args.length() == 3); CONVERT_ARG_HANDLE_CHECKED(String, sub, 0); @@ -3430,7 +3392,7 @@ static int StringMatchBackwards(Vector<const schar> subject, if (sizeof(schar) == 1 && sizeof(pchar) > 1) { for (int i = 0; i < pattern_length; i++) { uc16 c = pattern[i]; - if (c > String::kMaxAsciiCharCode) { + if (c > String::kMaxOneByteCharCode) { return -1; } } @@ -3454,7 +3416,7 @@ static int StringMatchBackwards(Vector<const schar> subject, } RUNTIME_FUNCTION(MaybeObject*, Runtime_StringLastIndexOf) { - HandleScope scope(isolate); // create a new handle scope + HandleScope scope(isolate); ASSERT(args.length() == 3); CONVERT_ARG_HANDLE_CHECKED(String, sub, 0); @@ -3485,9 +3447,9 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_StringLastIndexOf) { String::FlatContent pat_content = pat->GetFlatContent(); if (pat_content.IsAscii()) { - Vector<const char> pat_vector = pat_content.ToAsciiVector(); + Vector<const uint8_t> pat_vector = pat_content.ToOneByteVector(); if (sub_content.IsAscii()) { - position = StringMatchBackwards(sub_content.ToAsciiVector(), + position = StringMatchBackwards(sub_content.ToOneByteVector(), pat_vector, start_index); } else { @@ -3498,7 +3460,7 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_StringLastIndexOf) { } else { Vector<const uc16> pat_vector = pat_content.ToUC16Vector(); if (sub_content.IsAscii()) { - position = StringMatchBackwards(sub_content.ToAsciiVector(), + position = StringMatchBackwards(sub_content.ToOneByteVector(), pat_vector, start_index); } else { @@ -3513,7 +3475,7 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_StringLastIndexOf) { RUNTIME_FUNCTION(MaybeObject*, Runtime_StringLocaleCompare) { - NoHandleAllocation ha; + NoHandleAllocation ha(isolate); ASSERT(args.length() == 2); CONVERT_ARG_CHECKED(String, str1, 0); @@ -3542,17 +3504,17 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_StringLocaleCompare) { str1->TryFlatten(); str2->TryFlatten(); - StringInputBuffer& buf1 = - *isolate->runtime_state()->string_locale_compare_buf1(); - StringInputBuffer& buf2 = - *isolate->runtime_state()->string_locale_compare_buf2(); - - buf1.Reset(str1); - buf2.Reset(str2); + ConsStringIteratorOp* op1 = + isolate->runtime_state()->string_locale_compare_it1(); + ConsStringIteratorOp* op2 = + isolate->runtime_state()->string_locale_compare_it2(); + // TODO(dcarney) Can do array compares here more efficiently. + StringCharacterStream stream1(str1, op1); + StringCharacterStream stream2(str2, op2); for (int i = 0; i < end; i++) { - uint16_t char1 = buf1.GetNext(); - uint16_t char2 = buf2.GetNext(); + uint16_t char1 = stream1.GetNext(); + uint16_t char2 = stream2.GetNext(); if (char1 != char2) return Smi::FromInt(char1 - char2); } @@ -3561,7 +3523,7 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_StringLocaleCompare) { RUNTIME_FUNCTION(MaybeObject*, Runtime_SubString) { - NoHandleAllocation ha; + NoHandleAllocation ha(isolate); ASSERT(args.length() == 3); CONVERT_ARG_CHECKED(String, value, 0); @@ -3583,17 +3545,21 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_SubString) { RUNTIME_ASSERT(start >= 0); RUNTIME_ASSERT(end <= value->length()); isolate->counters()->sub_string_runtime()->Increment(); + if (end - start == 1) { + return isolate->heap()->LookupSingleCharacterStringFromCode( + value->Get(start)); + } return value->SubString(start, end); } RUNTIME_FUNCTION(MaybeObject*, Runtime_StringMatch) { + HandleScope handles(isolate); ASSERT_EQ(3, args.length()); CONVERT_ARG_HANDLE_CHECKED(String, subject, 0); CONVERT_ARG_HANDLE_CHECKED(JSRegExp, regexp, 1); CONVERT_ARG_HANDLE_CHECKED(JSArray, regexp_info, 2); - HandleScope handles; RegExpImpl::GlobalCache global_cache(regexp, subject, true, isolate); if (global_cache.HasException()) return Failure::Exception(); @@ -3664,7 +3630,7 @@ static MaybeObject* SearchRegExpMultiple( isolate->heap(), *subject, regexp->data(), - RegExpResultsCache::REGEXP_MULTIPLE_INDICES)); + RegExpResultsCache::REGEXP_MULTIPLE_INDICES), isolate); if (*cached_answer != Smi::FromInt(0)) { Handle<FixedArray> cached_fixed_array = Handle<FixedArray>(FixedArray::cast(*cached_answer)); @@ -3797,8 +3763,8 @@ static MaybeObject* SearchRegExpMultiple( // lastMatchInfoOverride to maintain the last match info, so we don't need to // set any other last match array info. RUNTIME_FUNCTION(MaybeObject*, Runtime_RegExpExecMultiple) { - ASSERT(args.length() == 4); HandleScope handles(isolate); + ASSERT(args.length() == 4); CONVERT_ARG_HANDLE_CHECKED(String, subject, 1); if (!subject->IsFlat()) FlattenString(subject); @@ -3806,7 +3772,6 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_RegExpExecMultiple) { CONVERT_ARG_HANDLE_CHECKED(JSArray, last_match_info, 2); CONVERT_ARG_HANDLE_CHECKED(JSArray, result_array, 3); - ASSERT(last_match_info->HasFastObjectElements()); ASSERT(regexp->GetFlags().is_global()); if (regexp->CaptureCount() == 0) { @@ -3820,7 +3785,7 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_RegExpExecMultiple) { RUNTIME_FUNCTION(MaybeObject*, Runtime_NumberToRadixString) { - NoHandleAllocation ha; + NoHandleAllocation ha(isolate); ASSERT(args.length() == 2); CONVERT_SMI_ARG_CHECKED(radix, 1); RUNTIME_ASSERT(2 <= radix && radix <= 36); @@ -3839,92 +3804,65 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_NumberToRadixString) { // Slow case. CONVERT_DOUBLE_ARG_CHECKED(value, 0); if (isnan(value)) { - return *isolate->factory()->nan_symbol(); + return *isolate->factory()->nan_string(); } if (isinf(value)) { if (value < 0) { - return *isolate->factory()->minus_infinity_symbol(); + return *isolate->factory()->minus_infinity_string(); } - return *isolate->factory()->infinity_symbol(); + return *isolate->factory()->infinity_string(); } char* str = DoubleToRadixCString(value, radix); MaybeObject* result = - isolate->heap()->AllocateStringFromAscii(CStrVector(str)); + isolate->heap()->AllocateStringFromOneByte(CStrVector(str)); DeleteArray(str); return result; } RUNTIME_FUNCTION(MaybeObject*, Runtime_NumberToFixed) { - NoHandleAllocation ha; + NoHandleAllocation ha(isolate); ASSERT(args.length() == 2); CONVERT_DOUBLE_ARG_CHECKED(value, 0); - if (isnan(value)) { - return *isolate->factory()->nan_symbol(); - } - if (isinf(value)) { - if (value < 0) { - return *isolate->factory()->minus_infinity_symbol(); - } - return *isolate->factory()->infinity_symbol(); - } CONVERT_DOUBLE_ARG_CHECKED(f_number, 1); int f = FastD2IChecked(f_number); RUNTIME_ASSERT(f >= 0); char* str = DoubleToFixedCString(value, f); MaybeObject* res = - isolate->heap()->AllocateStringFromAscii(CStrVector(str)); + isolate->heap()->AllocateStringFromOneByte(CStrVector(str)); DeleteArray(str); return res; } RUNTIME_FUNCTION(MaybeObject*, Runtime_NumberToExponential) { - NoHandleAllocation ha; + NoHandleAllocation ha(isolate); ASSERT(args.length() == 2); CONVERT_DOUBLE_ARG_CHECKED(value, 0); - if (isnan(value)) { - return *isolate->factory()->nan_symbol(); - } - if (isinf(value)) { - if (value < 0) { - return *isolate->factory()->minus_infinity_symbol(); - } - return *isolate->factory()->infinity_symbol(); - } CONVERT_DOUBLE_ARG_CHECKED(f_number, 1); int f = FastD2IChecked(f_number); RUNTIME_ASSERT(f >= -1 && f <= 20); char* str = DoubleToExponentialCString(value, f); MaybeObject* res = - isolate->heap()->AllocateStringFromAscii(CStrVector(str)); + isolate->heap()->AllocateStringFromOneByte(CStrVector(str)); DeleteArray(str); return res; } RUNTIME_FUNCTION(MaybeObject*, Runtime_NumberToPrecision) { - NoHandleAllocation ha; + NoHandleAllocation ha(isolate); ASSERT(args.length() == 2); CONVERT_DOUBLE_ARG_CHECKED(value, 0); - if (isnan(value)) { - return *isolate->factory()->nan_symbol(); - } - if (isinf(value)) { - if (value < 0) { - return *isolate->factory()->minus_infinity_symbol(); - } - return *isolate->factory()->infinity_symbol(); - } CONVERT_DOUBLE_ARG_CHECKED(f_number, 1); int f = FastD2IChecked(f_number); RUNTIME_ASSERT(f >= 1 && f <= 21); char* str = DoubleToPrecisionCString(value, f); MaybeObject* res = - isolate->heap()->AllocateStringFromAscii(CStrVector(str)); + isolate->heap()->AllocateStringFromOneByte(CStrVector(str)); DeleteArray(str); return res; } @@ -3936,6 +3874,7 @@ static Handle<Object> GetCharAt(Handle<String> string, uint32_t index) { if (index < static_cast<uint32_t>(string->length())) { string->TryFlatten(); return LookupSingleCharacterStringFromCode( + string->GetIsolate(), string->Get(index)); } return Execution::CharAt(string, index); @@ -3960,7 +3899,7 @@ MaybeObject* Runtime::GetElementOrCharAt(Isolate* isolate, } if (object->IsString() || object->IsNumber() || object->IsBoolean()) { - return object->GetPrototype()->GetElement(index); + return object->GetPrototype(isolate)->GetElement(index); } return object->GetElement(index); @@ -3986,16 +3925,16 @@ MaybeObject* Runtime::GetObjectProperty(Isolate* isolate, return GetElementOrCharAt(isolate, object, index); } - // Convert the key to a string - possibly by calling back into JavaScript. - Handle<String> name; - if (key->IsString()) { - name = Handle<String>::cast(key); + // Convert the key to a name - possibly by calling back into JavaScript. + Handle<Name> name; + if (key->IsName()) { + name = Handle<Name>::cast(key); } else { bool has_pending_exception = false; Handle<Object> converted = Execution::ToString(key, &has_pending_exception); if (has_pending_exception) return Failure::Exception(); - name = Handle<String>::cast(converted); + name = Handle<Name>::cast(converted); } // Check if the name is trivially convertible to an index and get @@ -4009,7 +3948,7 @@ MaybeObject* Runtime::GetObjectProperty(Isolate* isolate, RUNTIME_FUNCTION(MaybeObject*, Runtime_GetProperty) { - NoHandleAllocation ha; + NoHandleAllocation ha(isolate); ASSERT(args.length() == 2); Handle<Object> object = args.at<Object>(0); @@ -4019,9 +3958,9 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_GetProperty) { } -// KeyedStringGetProperty is called from KeyedLoadIC::GenerateGeneric. +// KeyedGetProperty is called from KeyedLoadIC::GenerateGeneric. RUNTIME_FUNCTION(MaybeObject*, Runtime_KeyedGetProperty) { - NoHandleAllocation ha; + NoHandleAllocation ha(isolate); ASSERT(args.length() == 2); // Fast cases for getting named properties of the receiver JSObject @@ -4038,9 +3977,9 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_KeyedGetProperty) { if (args[0]->IsJSObject()) { if (!args[0]->IsJSGlobalProxy() && !args[0]->IsAccessCheckNeeded() && - args[1]->IsString()) { + args[1]->IsName()) { JSObject* receiver = JSObject::cast(args[0]); - String* key = String::cast(args[1]); + Name* key = Name::cast(args[1]); if (receiver->HasFastProperties()) { // Attempt to use lookup cache. Map* receiver_map = receiver->map(); @@ -4057,15 +3996,15 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_KeyedGetProperty) { LookupResult result(isolate); receiver->LocalLookup(key, &result); if (result.IsField()) { - int offset = result.GetFieldIndex(); + int offset = result.GetFieldIndex().field_index(); keyed_lookup_cache->Update(receiver_map, key, offset); return receiver->FastPropertyAt(offset); } } else { // Attempt dictionary lookup. - StringDictionary* dictionary = receiver->property_dictionary(); + NameDictionary* dictionary = receiver->property_dictionary(); int entry = dictionary->FindEntry(key); - if ((entry != StringDictionary::kNotFound) && + if ((entry != NameDictionary::kNotFound) && (dictionary->DetailsAt(entry).type() == NORMAL)) { Object* value = dictionary->ValueAt(entry); if (!receiver->IsGlobalObject()) return value; @@ -4075,7 +4014,7 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_KeyedGetProperty) { } } } else if (FLAG_smi_only_arrays && args.at<Object>(1)->IsSmi()) { - // JSObject without a string key. If the key is a Smi, check for a + // JSObject without a name key. If the key is a Smi, check for a // definite out-of-bounds access to elements, which is a strong indicator // that subsequent accesses will also call the runtime. Proactively // transition elements to FAST_*_ELEMENTS to avoid excessive boxing of @@ -4083,8 +4022,7 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_KeyedGetProperty) { // become FAST_DOUBLE_ELEMENTS. Handle<JSObject> js_object(args.at<JSObject>(0)); ElementsKind elements_kind = js_object->GetElementsKind(); - if (IsFastElementsKind(elements_kind) && - !IsFastObjectElementsKind(elements_kind)) { + if (IsFastDoubleElementsKind(elements_kind)) { FixedArrayBase* elements = js_object->elements(); if (args.at<Smi>(1)->value() >= elements->length()) { if (IsFastHoleyElementsKind(elements_kind)) { @@ -4097,6 +4035,9 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_KeyedGetProperty) { isolate); if (maybe_object->IsFailure()) return maybe_object; } + } else { + ASSERT(IsFastSmiOrObjectElementsKind(elements_kind) || + !IsFastElementsKind(elements_kind)); } } } else if (args[0]->IsString() && args[1]->IsSmi()) { @@ -4129,11 +4070,11 @@ static bool IsValidAccessor(Handle<Object> obj) { // Step 12 - update an existing accessor property with an accessor or generic // descriptor. RUNTIME_FUNCTION(MaybeObject*, Runtime_DefineOrRedefineAccessorProperty) { - ASSERT(args.length() == 5); HandleScope scope(isolate); + ASSERT(args.length() == 5); CONVERT_ARG_HANDLE_CHECKED(JSObject, obj, 0); RUNTIME_ASSERT(!obj->IsNull()); - CONVERT_ARG_HANDLE_CHECKED(String, name, 1); + CONVERT_ARG_HANDLE_CHECKED(Name, name, 1); CONVERT_ARG_HANDLE_CHECKED(Object, getter, 2); RUNTIME_ASSERT(IsValidAccessor(getter)); CONVERT_ARG_HANDLE_CHECKED(Object, setter, 3); @@ -4155,10 +4096,10 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_DefineOrRedefineAccessorProperty) { // Step 12 - update an existing data property with a data or generic // descriptor. RUNTIME_FUNCTION(MaybeObject*, Runtime_DefineOrRedefineDataProperty) { - ASSERT(args.length() == 4); HandleScope scope(isolate); + ASSERT(args.length() == 4); CONVERT_ARG_HANDLE_CHECKED(JSObject, js_object, 0); - CONVERT_ARG_HANDLE_CHECKED(String, name, 1); + CONVERT_ARG_HANDLE_CHECKED(Name, name, 1); CONVERT_ARG_HANDLE_CHECKED(Object, obj_value, 2); CONVERT_SMI_ARG_CHECKED(unchecked, 3); RUNTIME_ASSERT((unchecked & ~(READ_ONLY | DONT_ENUM | DONT_DELETE)) == 0); @@ -4176,6 +4117,14 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_DefineOrRedefineDataProperty) { if (callback->IsAccessorInfo()) { return isolate->heap()->undefined_value(); } + // TODO(mstarzinger): The __proto__ property should actually be a real + // JavaScript accessor instead of a foreign callback. But for now we just + // avoid changing the writability and configurability attribute of this + // property. + Handle<Name> proto_string = isolate->factory()->proto_string(); + if (callback->IsForeign() && proto_string->Equals(*name)) { + attr = static_cast<PropertyAttributes>(attr & ~(READ_ONLY | DONT_DELETE)); + } // Avoid redefining foreign callback as data property, just use the stored // setter to update the value instead. // TODO(mstarzinger): So far this only works if property attributes don't @@ -4219,6 +4168,35 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_DefineOrRedefineDataProperty) { } +// Return property without being observable by accessors or interceptors. +RUNTIME_FUNCTION(MaybeObject*, Runtime_GetDataProperty) { + NoHandleAllocation ha(isolate); + ASSERT(args.length() == 2); + CONVERT_ARG_HANDLE_CHECKED(JSObject, object, 0); + CONVERT_ARG_HANDLE_CHECKED(Name, key, 1); + LookupResult lookup(isolate); + object->LookupRealNamedProperty(*key, &lookup); + if (!lookup.IsFound()) return isolate->heap()->undefined_value(); + switch (lookup.type()) { + case NORMAL: + return lookup.holder()->GetNormalizedProperty(&lookup); + case FIELD: + return lookup.holder()->FastPropertyAt( + lookup.GetFieldIndex().field_index()); + case CONSTANT_FUNCTION: + return lookup.GetConstantFunction(); + case CALLBACKS: + case HANDLER: + case INTERCEPTOR: + case TRANSITION: + return isolate->heap()->undefined_value(); + case NONEXISTENT: + UNREACHABLE(); + } + return isolate->heap()->undefined_value(); +} + + MaybeObject* Runtime::SetObjectProperty(Isolate* isolate, Handle<Object> object, Handle<Object> key, @@ -4238,10 +4216,11 @@ MaybeObject* Runtime::SetObjectProperty(Isolate* isolate, if (object->IsJSProxy()) { bool has_pending_exception = false; - Handle<Object> name = Execution::ToString(key, &has_pending_exception); + Handle<Object> name = key->IsSymbol() + ? key : Execution::ToString(key, &has_pending_exception); if (has_pending_exception) return Failure::Exception(); return JSProxy::cast(*object)->SetProperty( - String::cast(*name), *value, attr, strict_mode); + Name::cast(*name), *value, attr, strict_mode); } // If the object isn't a JavaScript object, we ignore the store. @@ -4271,16 +4250,16 @@ MaybeObject* Runtime::SetObjectProperty(Isolate* isolate, return *value; } - if (key->IsString()) { + if (key->IsName()) { Handle<Object> result; - if (Handle<String>::cast(key)->AsArrayIndex(&index)) { + Handle<Name> name = Handle<Name>::cast(key); + if (name->AsArrayIndex(&index)) { result = JSObject::SetElement( js_object, index, value, attr, strict_mode, set_mode); } else { - Handle<String> key_string = Handle<String>::cast(key); - key_string->TryFlatten(); + if (name->IsString()) Handle<String>::cast(name)->TryFlatten(); result = JSReceiver::SetProperty( - js_object, key_string, value, attr, strict_mode); + js_object, name, value, attr, strict_mode); } if (result.is_null()) return Failure::Exception(); return *value; @@ -4326,16 +4305,14 @@ MaybeObject* Runtime::ForceSetObjectProperty(Isolate* isolate, index, *value, attr, kNonStrictMode, false, DEFINE_PROPERTY); } - if (key->IsString()) { - if (Handle<String>::cast(key)->AsArrayIndex(&index)) { + if (key->IsName()) { + Handle<Name> name = Handle<Name>::cast(key); + if (name->AsArrayIndex(&index)) { return js_object->SetElement( index, *value, attr, kNonStrictMode, false, DEFINE_PROPERTY); } else { - Handle<String> key_string = Handle<String>::cast(key); - key_string->TryFlatten(); - return js_object->SetLocalPropertyIgnoreAttributes(*key_string, - *value, - attr); + if (name->IsString()) Handle<String>::cast(name)->TryFlatten(); + return js_object->SetLocalPropertyIgnoreAttributes(*name, *value, attr); } } @@ -4375,24 +4352,24 @@ MaybeObject* Runtime::ForceDeleteObjectProperty(Isolate* isolate, return receiver->DeleteElement(index, JSReceiver::FORCE_DELETION); } - Handle<String> key_string; - if (key->IsString()) { - key_string = Handle<String>::cast(key); + Handle<Name> name; + if (key->IsName()) { + name = Handle<Name>::cast(key); } else { // Call-back into JavaScript to convert the key to a string. bool has_pending_exception = false; Handle<Object> converted = Execution::ToString(key, &has_pending_exception); if (has_pending_exception) return Failure::Exception(); - key_string = Handle<String>::cast(converted); + name = Handle<String>::cast(converted); } - key_string->TryFlatten(); - return receiver->DeleteProperty(*key_string, JSReceiver::FORCE_DELETION); + if (name->IsString()) Handle<String>::cast(name)->TryFlatten(); + return receiver->DeleteProperty(*name, JSReceiver::FORCE_DELETION); } RUNTIME_FUNCTION(MaybeObject*, Runtime_SetProperty) { - NoHandleAllocation ha; + NoHandleAllocation ha(isolate); RUNTIME_ASSERT(args.length() == 4 || args.length() == 5); Handle<Object> object = args.at<Object>(0); @@ -4420,12 +4397,23 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_SetProperty) { } +RUNTIME_FUNCTION(MaybeObject*, Runtime_TransitionElementsKind) { + HandleScope scope(isolate); + RUNTIME_ASSERT(args.length() == 2); + CONVERT_ARG_HANDLE_CHECKED(JSArray, array, 0); + CONVERT_ARG_HANDLE_CHECKED(Map, map, 1); + JSObject::TransitionElementsKind(array, map->elements_kind()); + return *array; +} + + RUNTIME_FUNCTION(MaybeObject*, Runtime_TransitionElementsSmiToDouble) { - NoHandleAllocation ha; + NoHandleAllocation ha(isolate); RUNTIME_ASSERT(args.length() == 1); Handle<Object> object = args.at<Object>(0); if (object->IsJSObject()) { Handle<JSObject> js_object(Handle<JSObject>::cast(object)); + ASSERT(!js_object->map()->is_observed()); ElementsKind new_kind = js_object->HasFastHoleyElements() ? FAST_HOLEY_DOUBLE_ELEMENTS : FAST_DOUBLE_ELEMENTS; @@ -4437,11 +4425,12 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_TransitionElementsSmiToDouble) { RUNTIME_FUNCTION(MaybeObject*, Runtime_TransitionElementsDoubleToObject) { - NoHandleAllocation ha; + NoHandleAllocation ha(isolate); RUNTIME_ASSERT(args.length() == 1); Handle<Object> object = args.at<Object>(0); if (object->IsJSObject()) { Handle<JSObject> js_object(Handle<JSObject>::cast(object)); + ASSERT(!js_object->map()->is_observed()); ElementsKind new_kind = js_object->HasFastHoleyElements() ? FAST_HOLEY_ELEMENTS : FAST_ELEMENTS; @@ -4456,7 +4445,7 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_TransitionElementsDoubleToObject) { // This is used to decide if we should transform null and undefined // into the global object when doing call and apply. RUNTIME_FUNCTION(MaybeObject*, Runtime_SetNativeFlag) { - NoHandleAllocation ha; + NoHandleAllocation ha(isolate); RUNTIME_ASSERT(args.length() == 1); Handle<Object> object = args.at<Object>(0); @@ -4470,13 +4459,13 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_SetNativeFlag) { RUNTIME_FUNCTION(MaybeObject*, Runtime_StoreArrayLiteralElement) { + HandleScope scope(isolate); RUNTIME_ASSERT(args.length() == 5); CONVERT_ARG_HANDLE_CHECKED(JSObject, object, 0); CONVERT_SMI_ARG_CHECKED(store_index, 1); Handle<Object> value = args.at<Object>(2); CONVERT_ARG_HANDLE_CHECKED(FixedArray, literals, 3); CONVERT_SMI_ARG_CHECKED(literal_index, 4); - HandleScope scope; Object* raw_boilerplate_object = literals->get(literal_index); Handle<JSArray> boilerplate_object(JSArray::cast(raw_boilerplate_object)); @@ -4522,6 +4511,7 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_StoreArrayLiteralElement) { // Check whether debugger and is about to step into the callback that is passed // to a built-in function such as Array.forEach. RUNTIME_FUNCTION(MaybeObject*, Runtime_DebugCallbackSupportsStepping) { + NoHandleAllocation ha(isolate); #ifdef ENABLE_DEBUGGER_SUPPORT if (!isolate->IsDebuggerActive() || !isolate->debug()->StepInActive()) { return isolate->heap()->false_value(); @@ -4541,6 +4531,7 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_DebugCallbackSupportsStepping) { // Set one shot breakpoints for the callback function that is passed to a // built-in function such as Array.forEach to enable stepping into the callback. RUNTIME_FUNCTION(MaybeObject*, Runtime_DebugPrepareStepInIfStepping) { + NoHandleAllocation ha(isolate); #ifdef ENABLE_DEBUGGER_SUPPORT Debug* debug = isolate->debug(); if (!debug->IsStepping()) return isolate->heap()->undefined_value(); @@ -4559,10 +4550,10 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_DebugPrepareStepInIfStepping) { // Set a local property, even if it is READ_ONLY. If the property does not // exist, it will be added with attributes NONE. RUNTIME_FUNCTION(MaybeObject*, Runtime_IgnoreAttributesAndSetProperty) { - NoHandleAllocation ha; + NoHandleAllocation ha(isolate); RUNTIME_ASSERT(args.length() == 3 || args.length() == 4); CONVERT_ARG_CHECKED(JSObject, object, 0); - CONVERT_ARG_CHECKED(String, name, 1); + CONVERT_ARG_CHECKED(Name, name, 1); // Compute attributes. PropertyAttributes attributes = NONE; if (args.length() == 4) { @@ -4579,11 +4570,11 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_IgnoreAttributesAndSetProperty) { RUNTIME_FUNCTION(MaybeObject*, Runtime_DeleteProperty) { - NoHandleAllocation ha; + NoHandleAllocation ha(isolate); ASSERT(args.length() == 3); CONVERT_ARG_CHECKED(JSReceiver, object, 0); - CONVERT_ARG_CHECKED(String, key, 1); + CONVERT_ARG_CHECKED(Name, key, 1); CONVERT_STRICT_MODE_ARG_CHECKED(strict_mode, 2); return object->DeleteProperty(key, (strict_mode == kStrictMode) ? JSReceiver::STRICT_DELETION @@ -4593,12 +4584,12 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_DeleteProperty) { static Object* HasLocalPropertyImplementation(Isolate* isolate, Handle<JSObject> object, - Handle<String> key) { + Handle<Name> key) { if (object->HasLocalProperty(*key)) return isolate->heap()->true_value(); // Handle hidden prototypes. If there's a hidden prototype above this thing // then we have to check it for properties, because they are supposed to // look like they are on this object. - Handle<Object> proto(object->GetPrototype()); + Handle<Object> proto(object->GetPrototype(), isolate); if (proto->IsJSObject() && Handle<JSObject>::cast(proto)->map()->is_hidden_prototype()) { return HasLocalPropertyImplementation(isolate, @@ -4610,9 +4601,9 @@ static Object* HasLocalPropertyImplementation(Isolate* isolate, RUNTIME_FUNCTION(MaybeObject*, Runtime_HasLocalProperty) { - NoHandleAllocation ha; + NoHandleAllocation ha(isolate); ASSERT(args.length() == 2); - CONVERT_ARG_CHECKED(String, key, 1); + CONVERT_ARG_CHECKED(Name, key, 1); uint32_t index; const bool key_is_array_index = key->AsArrayIndex(&index); @@ -4635,7 +4626,7 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_HasLocalProperty) { HandleScope scope(isolate); return HasLocalPropertyImplementation(isolate, Handle<JSObject>(object), - Handle<String>(key)); + Handle<Name>(key)); } else if (obj->IsString() && key_is_array_index) { // Well, there is one exception: Handle [] on strings. String* string = String::cast(obj); @@ -4648,10 +4639,10 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_HasLocalProperty) { RUNTIME_FUNCTION(MaybeObject*, Runtime_HasProperty) { - NoHandleAllocation na; + NoHandleAllocation na(isolate); ASSERT(args.length() == 2); CONVERT_ARG_CHECKED(JSReceiver, receiver, 0); - CONVERT_ARG_CHECKED(String, key, 1); + CONVERT_ARG_CHECKED(Name, key, 1); bool result = receiver->HasProperty(key); if (isolate->has_pending_exception()) return Failure::Exception(); @@ -4660,7 +4651,7 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_HasProperty) { RUNTIME_FUNCTION(MaybeObject*, Runtime_HasElement) { - NoHandleAllocation na; + NoHandleAllocation na(isolate); ASSERT(args.length() == 2); CONVERT_ARG_CHECKED(JSReceiver, receiver, 0); CONVERT_SMI_ARG_CHECKED(index, 1); @@ -4672,46 +4663,11 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_HasElement) { RUNTIME_FUNCTION(MaybeObject*, Runtime_IsPropertyEnumerable) { - NoHandleAllocation ha; + NoHandleAllocation ha(isolate); ASSERT(args.length() == 2); CONVERT_ARG_CHECKED(JSObject, object, 0); - CONVERT_ARG_CHECKED(String, key, 1); - - uint32_t index; - if (key->AsArrayIndex(&index)) { - JSObject::LocalElementType type = object->HasLocalElement(index); - switch (type) { - case JSObject::UNDEFINED_ELEMENT: - case JSObject::STRING_CHARACTER_ELEMENT: - return isolate->heap()->false_value(); - case JSObject::INTERCEPTED_ELEMENT: - case JSObject::FAST_ELEMENT: - return isolate->heap()->true_value(); - case JSObject::DICTIONARY_ELEMENT: { - if (object->IsJSGlobalProxy()) { - Object* proto = object->GetPrototype(); - if (proto->IsNull()) { - return isolate->heap()->false_value(); - } - ASSERT(proto->IsJSGlobalObject()); - object = JSObject::cast(proto); - } - FixedArray* elements = FixedArray::cast(object->elements()); - SeededNumberDictionary* dictionary = NULL; - if (elements->map() == - isolate->heap()->non_strict_arguments_elements_map()) { - dictionary = SeededNumberDictionary::cast(elements->get(1)); - } else { - dictionary = SeededNumberDictionary::cast(elements); - } - int entry = dictionary->FindEntry(index); - ASSERT(entry != SeededNumberDictionary::kNotFound); - PropertyDetails details = dictionary->DetailsAt(entry); - return isolate->heap()->ToBoolean(!details.IsDontEnum()); - } - } - } + CONVERT_ARG_CHECKED(Name, key, 1); PropertyAttributes att = object->GetLocalPropertyAttribute(key); return isolate->heap()->ToBoolean(att != ABSENT && (att & DONT_ENUM) == 0); @@ -4735,6 +4691,7 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_GetPropertyNames) { // have none, the map of the object. This is used to speed up // the check for deletions during a for-in. RUNTIME_FUNCTION(MaybeObject*, Runtime_GetPropertyNamesFast) { + NoHandleAllocation ha(isolate); ASSERT(args.length() == 1); CONVERT_ARG_CHECKED(JSReceiver, raw_object, 0); @@ -4846,7 +4803,7 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_GetLocalPropertyNames) { int dest_pos = 0; for (int i = 0; i < total_property_count; i++) { Object* name = old_names->get(i); - if (name == isolate->heap()->hidden_symbol()) { + if (name == isolate->heap()->hidden_string()) { continue; } names->set(dest_pos++, name); @@ -4923,9 +4880,9 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_GetIndexedInterceptorElementNames) { RUNTIME_FUNCTION(MaybeObject*, Runtime_LocalKeys) { + HandleScope scope(isolate); ASSERT_EQ(args.length(), 1); CONVERT_ARG_CHECKED(JSObject, raw_object, 0); - HandleScope scope(isolate); Handle<JSObject> object(raw_object); if (object->IsJSGlobalProxy()) { @@ -4937,7 +4894,7 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_LocalKeys) { return *isolate->factory()->NewJSArray(0); } - Handle<Object> proto(object->GetPrototype()); + Handle<Object> proto(object->GetPrototype(), isolate); // If proxy is detached we simply return an empty array. if (proto->IsNull()) return *isolate->factory()->NewJSArray(0); object = Handle<JSObject>::cast(proto); @@ -4971,7 +4928,7 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_LocalKeys) { RUNTIME_FUNCTION(MaybeObject*, Runtime_GetArgumentsProperty) { - NoHandleAllocation ha; + NoHandleAllocation ha(isolate); ASSERT(args.length() == 1); // Compute the frame holding the arguments. @@ -4989,6 +4946,12 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_GetArgumentsProperty) { return frame->GetParameter(index); } + if (args[0]->IsSymbol()) { + // Lookup in the initial Object.prototype object. + return isolate->initial_object_prototype()->GetProperty( + Symbol::cast(args[0])); + } + // Convert the key to a string. HandleScope scope(isolate); bool exception = false; @@ -5007,8 +4970,8 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_GetArgumentsProperty) { } // Handle special arguments properties. - if (key->Equals(isolate->heap()->length_symbol())) return Smi::FromInt(n); - if (key->Equals(isolate->heap()->callee_symbol())) { + if (key->Equals(isolate->heap()->length_string())) return Smi::FromInt(n); + if (key->Equals(isolate->heap()->callee_string())) { Object* function = frame->function(); if (function->IsJSFunction() && !JSFunction::cast(function)->shared()->is_classic_mode()) { @@ -5024,6 +4987,7 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_GetArgumentsProperty) { RUNTIME_FUNCTION(MaybeObject*, Runtime_ToFastProperties) { + NoHandleAllocation ha(isolate); ASSERT(args.length() == 1); Object* object = args[0]; return (object->IsJSObject() && !object->IsGlobalObject()) @@ -5033,56 +4997,56 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_ToFastProperties) { RUNTIME_FUNCTION(MaybeObject*, Runtime_ToBool) { - NoHandleAllocation ha; + NoHandleAllocation ha(isolate); ASSERT(args.length() == 1); - return args[0]->ToBoolean(); + return isolate->heap()->ToBoolean(args[0]->BooleanValue()); } // Returns the type string of a value; see ECMA-262, 11.4.3 (p 47). // Possible optimizations: put the type string into the oddballs. RUNTIME_FUNCTION(MaybeObject*, Runtime_Typeof) { - NoHandleAllocation ha; + NoHandleAllocation ha(isolate); Object* obj = args[0]; - if (obj->IsNumber()) return isolate->heap()->number_symbol(); + if (obj->IsNumber()) return isolate->heap()->number_string(); HeapObject* heap_obj = HeapObject::cast(obj); // typeof an undetectable object is 'undefined' if (heap_obj->map()->is_undetectable()) { - return isolate->heap()->undefined_symbol(); + return isolate->heap()->undefined_string(); } InstanceType instance_type = heap_obj->map()->instance_type(); if (instance_type < FIRST_NONSTRING_TYPE) { - return isolate->heap()->string_symbol(); + return isolate->heap()->string_string(); } switch (instance_type) { case ODDBALL_TYPE: if (heap_obj->IsTrue() || heap_obj->IsFalse()) { - return isolate->heap()->boolean_symbol(); + return isolate->heap()->boolean_string(); } if (heap_obj->IsNull()) { return FLAG_harmony_typeof - ? isolate->heap()->null_symbol() - : isolate->heap()->object_symbol(); + ? isolate->heap()->null_string() + : isolate->heap()->object_string(); } ASSERT(heap_obj->IsUndefined()); - return isolate->heap()->undefined_symbol(); + return isolate->heap()->undefined_string(); case JS_FUNCTION_TYPE: case JS_FUNCTION_PROXY_TYPE: - return isolate->heap()->function_symbol(); + return isolate->heap()->function_string(); default: // For any kind of object not handled above, the spec rule for // host objects gives that it is okay to return "object" - return isolate->heap()->object_symbol(); + return isolate->heap()->object_string(); } } -static bool AreDigits(const char*s, int from, int to) { +static bool AreDigits(const uint8_t*s, int from, int to) { for (int i = from; i < to; i++) { if (s[i] < '0' || s[i] > '9') return false; } @@ -5091,7 +5055,7 @@ static bool AreDigits(const char*s, int from, int to) { } -static int ParseDecimalInteger(const char*s, int from, int to) { +static int ParseDecimalInteger(const uint8_t*s, int from, int to) { ASSERT(to - from < 10); // Overflow is not possible. ASSERT(from < to); int d = s[from] - '0'; @@ -5105,17 +5069,17 @@ static int ParseDecimalInteger(const char*s, int from, int to) { RUNTIME_FUNCTION(MaybeObject*, Runtime_StringToNumber) { - NoHandleAllocation ha; + NoHandleAllocation ha(isolate); ASSERT(args.length() == 1); CONVERT_ARG_CHECKED(String, subject, 0); subject->TryFlatten(); // Fast case: short integer or some sorts of junk values. int len = subject->length(); - if (subject->IsSeqAsciiString()) { + if (subject->IsSeqOneByteString()) { if (len == 0) return Smi::FromInt(0); - char const* data = SeqAsciiString::cast(subject)->GetChars(); + uint8_t const* data = SeqOneByteString::cast(subject)->GetChars(); bool minus = (data[0] == '-'); int start_pos = (minus ? 1 : 0); @@ -5125,8 +5089,8 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_StringToNumber) { // Fast check for a junk value. A valid string may start from a // whitespace, a sign ('+' or '-'), the decimal point, a decimal digit or // the 'I' character ('Infinity'). All of that have codes not greater than - // '9' except 'I'. - if (data[start_pos] != 'I') { + // '9' except 'I' and . + if (data[start_pos] != 'I' && data[start_pos] != 0xa0) { return isolate->heap()->nan_value(); } } else if (len - start_pos < 10 && AreDigits(data, start_pos, len)) { @@ -5159,244 +5123,51 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_StringToNumber) { } -RUNTIME_FUNCTION(MaybeObject*, Runtime_StringFromCharCodeArray) { - NoHandleAllocation ha; - ASSERT(args.length() == 1); - - CONVERT_ARG_CHECKED(JSArray, codes, 0); - int length = Smi::cast(codes->length())->value(); - - // Check if the string can be ASCII. - int i; - for (i = 0; i < length; i++) { - Object* element; - { MaybeObject* maybe_element = codes->GetElement(i); - // We probably can't get an exception here, but just in order to enforce - // the checking of inputs in the runtime calls we check here. - if (!maybe_element->ToObject(&element)) return maybe_element; - } - CONVERT_NUMBER_CHECKED(int, chr, Int32, element); - if ((chr & 0xffff) > String::kMaxAsciiCharCode) - break; - } - - MaybeObject* maybe_object = NULL; - if (i == length) { // The string is ASCII. - maybe_object = isolate->heap()->AllocateRawAsciiString(length); - } else { // The string is not ASCII. - maybe_object = isolate->heap()->AllocateRawTwoByteString(length); - } - - Object* object = NULL; - if (!maybe_object->ToObject(&object)) return maybe_object; - String* result = String::cast(object); - for (int i = 0; i < length; i++) { - Object* element; - { MaybeObject* maybe_element = codes->GetElement(i); - if (!maybe_element->ToObject(&element)) return maybe_element; - } - CONVERT_NUMBER_CHECKED(int, chr, Int32, element); - result->Set(i, chr & 0xffff); +RUNTIME_FUNCTION(MaybeObject*, Runtime_NewString) { + NoHandleAllocation ha(isolate); + CONVERT_SMI_ARG_CHECKED(length, 0); + CONVERT_BOOLEAN_ARG_CHECKED(is_one_byte, 1); + if (length == 0) return isolate->heap()->empty_string(); + if (is_one_byte) { + return isolate->heap()->AllocateRawOneByteString(length); + } else { + return isolate->heap()->AllocateRawTwoByteString(length); } - return result; } -// kNotEscaped is generated by the following: -// -// #!/bin/perl -// for (my $i = 0; $i < 256; $i++) { -// print "\n" if $i % 16 == 0; -// my $c = chr($i); -// my $escaped = 1; -// $escaped = 0 if $c =~ m#[A-Za-z0-9@*_+./-]#; -// print $escaped ? "0, " : "1, "; -// } - - -static bool IsNotEscaped(uint16_t character) { - // Only for 8 bit characters, the rest are always escaped (in a different way) - ASSERT(character < 256); - static const char kNotEscaped[256] = { - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 0, 1, 1, 1, - 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, - 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, - 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 1, - 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, - 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - }; - return kNotEscaped[character] != 0; +RUNTIME_FUNCTION(MaybeObject*, Runtime_TruncateString) { + NoHandleAllocation ha(isolate); + CONVERT_ARG_CHECKED(SeqString, string, 0); + CONVERT_SMI_ARG_CHECKED(new_length, 1); + return string->Truncate(new_length); } RUNTIME_FUNCTION(MaybeObject*, Runtime_URIEscape) { - const char hex_chars[] = "0123456789ABCDEF"; - NoHandleAllocation ha; + HandleScope scope(isolate); ASSERT(args.length() == 1); - CONVERT_ARG_CHECKED(String, source, 0); - - source->TryFlatten(); - - int escaped_length = 0; - int length = source->length(); - { - Access<StringInputBuffer> buffer( - isolate->runtime_state()->string_input_buffer()); - buffer->Reset(source); - while (buffer->has_more()) { - uint16_t character = buffer->GetNext(); - if (character >= 256) { - escaped_length += 6; - } else if (IsNotEscaped(character)) { - escaped_length++; - } else { - escaped_length += 3; - } - // We don't allow strings that are longer than a maximal length. - ASSERT(String::kMaxLength < 0x7fffffff - 6); // Cannot overflow. - if (escaped_length > String::kMaxLength) { - isolate->context()->mark_out_of_memory(); - return Failure::OutOfMemoryException(); - } - } - } - // No length change implies no change. Return original string if no change. - if (escaped_length == length) { - return source; - } - Object* o; - { MaybeObject* maybe_o = - isolate->heap()->AllocateRawAsciiString(escaped_length); - if (!maybe_o->ToObject(&o)) return maybe_o; - } - String* destination = String::cast(o); - int dest_position = 0; - - Access<StringInputBuffer> buffer( - isolate->runtime_state()->string_input_buffer()); - buffer->Rewind(); - while (buffer->has_more()) { - uint16_t chr = buffer->GetNext(); - if (chr >= 256) { - destination->Set(dest_position, '%'); - destination->Set(dest_position+1, 'u'); - destination->Set(dest_position+2, hex_chars[chr >> 12]); - destination->Set(dest_position+3, hex_chars[(chr >> 8) & 0xf]); - destination->Set(dest_position+4, hex_chars[(chr >> 4) & 0xf]); - destination->Set(dest_position+5, hex_chars[chr & 0xf]); - dest_position += 6; - } else if (IsNotEscaped(chr)) { - destination->Set(dest_position, chr); - dest_position++; - } else { - destination->Set(dest_position, '%'); - destination->Set(dest_position+1, hex_chars[chr >> 4]); - destination->Set(dest_position+2, hex_chars[chr & 0xf]); - dest_position += 3; - } - } - return destination; -} - - -static inline int TwoDigitHex(uint16_t character1, uint16_t character2) { - static const signed char kHexValue['g'] = { - -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, - -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, - -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, - 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, -1, -1, -1, -1, -1, -1, - -1, 10, 11, 12, 13, 14, 15, -1, -1, -1, -1, -1, -1, -1, -1, -1, - -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, - -1, 10, 11, 12, 13, 14, 15 }; - - if (character1 > 'f') return -1; - int hi = kHexValue[character1]; - if (hi == -1) return -1; - if (character2 > 'f') return -1; - int lo = kHexValue[character2]; - if (lo == -1) return -1; - return (hi << 4) + lo; -} - - -static inline int Unescape(String* source, - int i, - int length, - int* step) { - uint16_t character = source->Get(i); - int32_t hi = 0; - int32_t lo = 0; - if (character == '%' && - i <= length - 6 && - source->Get(i + 1) == 'u' && - (hi = TwoDigitHex(source->Get(i + 2), - source->Get(i + 3))) != -1 && - (lo = TwoDigitHex(source->Get(i + 4), - source->Get(i + 5))) != -1) { - *step = 6; - return (hi << 8) + lo; - } else if (character == '%' && - i <= length - 3 && - (lo = TwoDigitHex(source->Get(i + 1), - source->Get(i + 2))) != -1) { - *step = 3; - return lo; - } else { - *step = 1; - return character; - } + CONVERT_ARG_HANDLE_CHECKED(String, source, 0); + Handle<String> string = FlattenGetString(source); + String::FlatContent content = string->GetFlatContent(); + ASSERT(content.IsFlat()); + Handle<String> result = + content.IsAscii() ? URIEscape::Escape<uint8_t>(isolate, source) + : URIEscape::Escape<uc16>(isolate, source); + if (result.is_null()) return Failure::OutOfMemoryException(0x12); + return *result; } RUNTIME_FUNCTION(MaybeObject*, Runtime_URIUnescape) { - NoHandleAllocation ha; + HandleScope scope(isolate); ASSERT(args.length() == 1); - CONVERT_ARG_CHECKED(String, source, 0); - - source->TryFlatten(); - - bool ascii = true; - int length = source->length(); - - int unescaped_length = 0; - for (int i = 0; i < length; unescaped_length++) { - int step; - if (Unescape(source, i, length, &step) > String::kMaxAsciiCharCode) { - ascii = false; - } - i += step; - } - - // No length change implies no change. Return original string if no change. - if (unescaped_length == length) - return source; - - Object* o; - { MaybeObject* maybe_o = - ascii ? - isolate->heap()->AllocateRawAsciiString(unescaped_length) : - isolate->heap()->AllocateRawTwoByteString(unescaped_length); - if (!maybe_o->ToObject(&o)) return maybe_o; - } - String* destination = String::cast(o); - - int dest_position = 0; - for (int i = 0; i < length; dest_position++) { - int step; - destination->Set(dest_position, Unescape(source, i, length, &step)); - i += step; - } - return destination; + CONVERT_ARG_HANDLE_CHECKED(String, source, 0); + Handle<String> string = FlattenGetString(source); + String::FlatContent content = string->GetFlatContent(); + ASSERT(content.IsFlat()); + return content.IsAscii() ? *URIUnescape::Unescape<uint8_t>(isolate, source) + : *URIUnescape::Unescape<uc16>(isolate, source); } @@ -5482,8 +5253,8 @@ MaybeObject* AllocateRawString<SeqTwoByteString>(Isolate* isolate, int length) { template <> -MaybeObject* AllocateRawString<SeqAsciiString>(Isolate* isolate, int length) { - return isolate->heap()->AllocateRawAsciiString(length); +MaybeObject* AllocateRawString<SeqOneByteString>(Isolate* isolate, int length) { + return isolate->heap()->AllocateRawOneByteString(length); } @@ -5497,7 +5268,7 @@ static MaybeObject* SlowQuoteJsonString(Isolate* isolate, int quoted_length = kSpaceForQuotes; while (read_cursor < end) { Char c = *(read_cursor++); - if (sizeof(Char) > 1u && static_cast<unsigned>(c) >= kQuoteTableLength) { + if (static_cast<unsigned>(c) >= kQuoteTableLength) { quoted_length++; } else { quoted_length += JsonQuoteLengths[static_cast<unsigned>(c)]; @@ -5519,7 +5290,7 @@ static MaybeObject* SlowQuoteJsonString(Isolate* isolate, read_cursor = characters.start(); while (read_cursor < end) { Char c = *(read_cursor++); - if (sizeof(Char) > 1u && static_cast<unsigned>(c) >= kQuoteTableLength) { + if (static_cast<unsigned>(c) >= kQuoteTableLength) { *(write_cursor++) = c; } else { int len = JsonQuoteLengths[static_cast<unsigned>(c)]; @@ -5547,8 +5318,7 @@ static inline SinkChar* WriteQuoteJsonString( *(write_cursor++) = '"'; while (read_cursor < end) { SourceChar c = *(read_cursor++); - if (sizeof(SourceChar) > 1u && - static_cast<unsigned>(c) >= kQuoteTableLength) { + if (static_cast<unsigned>(c) >= kQuoteTableLength) { *(write_cursor++) = static_cast<SinkChar>(c); } else { int len = JsonQuoteLengths[static_cast<unsigned>(c)]; @@ -5617,7 +5387,7 @@ static MaybeObject* QuoteJsonString(Isolate* isolate, RUNTIME_FUNCTION(MaybeObject*, Runtime_QuoteJSONString) { - NoHandleAllocation ha; + NoHandleAllocation ha(isolate); CONVERT_ARG_CHECKED(String, str, 0); if (!str->IsFlat()) { MaybeObject* try_flatten = str->TryFlatten(); @@ -5634,14 +5404,15 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_QuoteJSONString) { return QuoteJsonString<uc16, SeqTwoByteString, false>(isolate, flat.ToUC16Vector()); } else { - return QuoteJsonString<char, SeqAsciiString, false>(isolate, - flat.ToAsciiVector()); + return QuoteJsonString<uint8_t, SeqOneByteString, false>( + isolate, + flat.ToOneByteVector()); } } RUNTIME_FUNCTION(MaybeObject*, Runtime_QuoteJSONStringComma) { - NoHandleAllocation ha; + NoHandleAllocation ha(isolate); CONVERT_ARG_CHECKED(String, str, 0); if (!str->IsFlat()) { MaybeObject* try_flatten = str->TryFlatten(); @@ -5657,8 +5428,9 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_QuoteJSONStringComma) { return QuoteJsonString<uc16, SeqTwoByteString, true>(isolate, flat.ToUC16Vector()); } else { - return QuoteJsonString<char, SeqAsciiString, true>(isolate, - flat.ToAsciiVector()); + return QuoteJsonString<uint8_t, SeqOneByteString, true>( + isolate, + flat.ToOneByteVector()); } } @@ -5699,9 +5471,10 @@ static MaybeObject* QuoteJsonStringArray(Isolate* isolate, write_cursor, content.ToUC16Vector()); } else { - write_cursor = WriteQuoteJsonString<Char, char>(isolate, - write_cursor, - content.ToAsciiVector()); + write_cursor = + WriteQuoteJsonString<Char, uint8_t>(isolate, + write_cursor, + content.ToOneByteVector()); } } *(write_cursor++) = ']'; @@ -5717,7 +5490,7 @@ static MaybeObject* QuoteJsonStringArray(Isolate* isolate, RUNTIME_FUNCTION(MaybeObject*, Runtime_QuoteJSONStringArray) { - NoHandleAllocation ha; + NoHandleAllocation ha(isolate); ASSERT(args.length() == 1); CONVERT_ARG_CHECKED(JSArray, array, 0); @@ -5749,7 +5522,7 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_QuoteJSONStringArray) { } if (ascii) { - return QuoteJsonStringArray<char, SeqAsciiString>(isolate, + return QuoteJsonStringArray<char, SeqOneByteString>(isolate, elements, worst_case_length); } else { @@ -5760,8 +5533,16 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_QuoteJSONStringArray) { } +RUNTIME_FUNCTION(MaybeObject*, Runtime_BasicJSONStringify) { + HandleScope scope(isolate); + ASSERT(args.length() == 1); + BasicJsonStringifier stringifier(isolate); + return stringifier.Stringify(Handle<Object>(args[0], isolate)); +} + + RUNTIME_FUNCTION(MaybeObject*, Runtime_StringParseInt) { - NoHandleAllocation ha; + NoHandleAllocation ha(isolate); CONVERT_ARG_CHECKED(String, s, 0); CONVERT_SMI_ARG_CHECKED(radix, 1); @@ -5775,7 +5556,7 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_StringParseInt) { RUNTIME_FUNCTION(MaybeObject*, Runtime_StringParseFloat) { - NoHandleAllocation ha; + NoHandleAllocation ha(isolate); CONVERT_ARG_CHECKED(String, str, 0); // ECMA-262 section 15.1.2.3, empty string is NaN @@ -5806,8 +5587,8 @@ MUST_USE_RESULT static MaybeObject* ConvertCaseHelper( // might break in the future if we implement more context and locale // dependent upper/lower conversions. Object* o; - { MaybeObject* maybe_o = s->IsAsciiRepresentation() - ? isolate->heap()->AllocateRawAsciiString(length) + { MaybeObject* maybe_o = s->IsOneByteRepresentation() + ? isolate->heap()->AllocateRawOneByteString(length) : isolate->heap()->AllocateRawTwoByteString(length); if (!maybe_o->ToObject(&o)) return maybe_o; } @@ -5816,15 +5597,15 @@ MUST_USE_RESULT static MaybeObject* ConvertCaseHelper( // Convert all characters to upper case, assuming that they will fit // in the buffer - Access<StringInputBuffer> buffer( - isolate->runtime_state()->string_input_buffer()); - buffer->Reset(s); + Access<ConsStringIteratorOp> op( + isolate->runtime_state()->string_iterator()); + StringCharacterStream stream(s, op.value()); unibrow::uchar chars[Converter::kMaxWidth]; // We can assume that the string is not empty - uc32 current = buffer->GetNext(); + uc32 current = stream.GetNext(); for (int i = 0; i < length;) { - bool has_next = buffer->has_more(); - uc32 next = has_next ? buffer->GetNext() : 0; + bool has_next = stream.HasMore(); + uc32 next = has_next ? stream.GetNext() : 0; int char_length = mapping->get(current, next, chars); if (char_length == 0) { // The case conversion of this character is the character itself. @@ -5854,8 +5635,8 @@ MUST_USE_RESULT static MaybeObject* ConvertCaseHelper( if (next_length == 0) next_length = 1; } int current_length = i + char_length + next_length; - while (buffer->has_more()) { - current = buffer->GetNext(); + while (stream.HasMore()) { + current = stream.GetNext(); // NOTE: we use 0 as the next character here because, while // the next character may affect what a character converts to, // it does not in any case affect the length of what it convert @@ -5865,7 +5646,7 @@ MUST_USE_RESULT static MaybeObject* ConvertCaseHelper( current_length += char_length; if (current_length > Smi::kMaxValue) { isolate->context()->mark_out_of_memory(); - return Failure::OutOfMemoryException(); + return Failure::OutOfMemoryException(0x13); } } // Try again with the real length. @@ -5894,7 +5675,7 @@ MUST_USE_RESULT static MaybeObject* ConvertCaseHelper( namespace { static const uintptr_t kOneInEveryByte = kUintptrAllBitsSet / 0xFF; - +static const uintptr_t kAsciiMask = kOneInEveryByte << 7; // Given a word and two range boundaries returns a word with high bit // set in every byte iff the corresponding input byte was strictly in @@ -5904,11 +5685,9 @@ static const uintptr_t kOneInEveryByte = kUintptrAllBitsSet / 0xFF; // Requires: all bytes in the input word and the boundaries must be // ASCII (less than 0x7F). static inline uintptr_t AsciiRangeMask(uintptr_t w, char m, char n) { - // Every byte in an ASCII string is less than or equal to 0x7F. - ASSERT((w & (kOneInEveryByte * 0x7F)) == w); // Use strict inequalities since in edge cases the function could be // further simplified. - ASSERT(0 < m && m < n && n < 0x7F); + ASSERT(0 < m && m < n); // Has high bit set in every w byte less than n. uintptr_t tmp1 = kOneInEveryByte * (0x7F + n) - w; // Has high bit set in every w byte greater than m. @@ -5925,7 +5704,7 @@ enum AsciiCaseConversion { template <AsciiCaseConversion dir> struct FastAsciiConverter { - static bool Convert(char* dst, char* src, int length) { + static bool Convert(char* dst, char* src, int length, bool* changed_out) { #ifdef DEBUG char* saved_dst = dst; char* saved_src = src; @@ -5937,12 +5716,14 @@ struct FastAsciiConverter { const char lo = (dir == ASCII_TO_LOWER) ? 'A' - 1 : 'a' - 1; const char hi = (dir == ASCII_TO_LOWER) ? 'Z' + 1 : 'z' + 1; bool changed = false; + uintptr_t or_acc = 0; char* const limit = src + length; #ifdef V8_HOST_CAN_READ_UNALIGNED // Process the prefix of the input that requires no conversion one // (machine) word at a time. while (src <= limit - sizeof(uintptr_t)) { uintptr_t w = *reinterpret_cast<uintptr_t*>(src); + or_acc |= w; if (AsciiRangeMask(w, lo, hi) != 0) { changed = true; break; @@ -5955,6 +5736,7 @@ struct FastAsciiConverter { // required one word at a time. while (src <= limit - sizeof(uintptr_t)) { uintptr_t w = *reinterpret_cast<uintptr_t*>(src); + or_acc |= w; uintptr_t m = AsciiRangeMask(w, lo, hi); // The mask has high (7th) bit set in every byte that needs // conversion and we know that the distance between cases is @@ -5968,6 +5750,7 @@ struct FastAsciiConverter { // unaligned access is not supported). while (src < limit) { char c = *src; + or_acc |= c; if (lo < c && c < hi) { c ^= (1 << 5); changed = true; @@ -5976,10 +5759,14 @@ struct FastAsciiConverter { ++src; ++dst; } + if ((or_acc & kAsciiMask) != 0) { + return false; + } #ifdef DEBUG CheckConvert(saved_dst, saved_src, length, changed); #endif - return changed; + *changed_out = changed; + return true; } #ifdef DEBUG @@ -6024,7 +5811,7 @@ MUST_USE_RESULT static MaybeObject* ConvertCase( Arguments args, Isolate* isolate, unibrow::Mapping<typename ConvertTraits::UnibrowConverter, 128>* mapping) { - NoHandleAllocation ha; + NoHandleAllocation ha(isolate); CONVERT_ARG_CHECKED(String, s, 0); s = s->TryFlattenGetString(); @@ -6038,15 +5825,22 @@ MUST_USE_RESULT static MaybeObject* ConvertCase( // character is also ASCII. This is currently the case, but it // might break in the future if we implement more context and locale // dependent upper/lower conversions. - if (s->IsSeqAsciiString()) { + if (s->IsSeqOneByteString()) { Object* o; - { MaybeObject* maybe_o = isolate->heap()->AllocateRawAsciiString(length); + { MaybeObject* maybe_o = isolate->heap()->AllocateRawOneByteString(length); if (!maybe_o->ToObject(&o)) return maybe_o; } - SeqAsciiString* result = SeqAsciiString::cast(o); - bool has_changed_character = ConvertTraits::AsciiConverter::Convert( - result->GetChars(), SeqAsciiString::cast(s)->GetChars(), length); - return has_changed_character ? result : s; + SeqOneByteString* result = SeqOneByteString::cast(o); + bool has_changed_character; + bool is_ascii = ConvertTraits::AsciiConverter::Convert( + reinterpret_cast<char*>(result->GetChars()), + reinterpret_cast<char*>(SeqOneByteString::cast(s)->GetChars()), + length, + &has_changed_character); + // If not ASCII, we discard the result and take the 2 byte path. + if (is_ascii) { + return has_changed_character ? result : s; + } } Object* answer; @@ -6084,7 +5878,7 @@ static inline bool IsTrimWhiteSpace(unibrow::uchar c) { RUNTIME_FUNCTION(MaybeObject*, Runtime_StringTrim) { - NoHandleAllocation ha; + NoHandleAllocation ha(isolate); ASSERT(args.length() == 3); CONVERT_ARG_CHECKED(String, s, 0); @@ -6112,8 +5906,8 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_StringTrim) { RUNTIME_FUNCTION(MaybeObject*, Runtime_StringSplit) { - ASSERT(args.length() == 3); HandleScope handle_scope(isolate); + ASSERT(args.length() == 3); CONVERT_ARG_HANDLE_CHECKED(String, subject, 0); CONVERT_ARG_HANDLE_CHECKED(String, pattern, 1); CONVERT_NUMBER_CHECKED(uint32_t, limit, Uint32, args[2]); @@ -6123,11 +5917,12 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_StringSplit) { RUNTIME_ASSERT(pattern_length > 0); if (limit == 0xffffffffu) { - Handle<Object> cached_answer(RegExpResultsCache::Lookup( - isolate->heap(), - *subject, - *pattern, - RegExpResultsCache::STRING_SPLIT_SUBSTRINGS)); + Handle<Object> cached_answer( + RegExpResultsCache::Lookup(isolate->heap(), + *subject, + *pattern, + RegExpResultsCache::STRING_SPLIT_SUBSTRINGS), + isolate); if (*cached_answer != Smi::FromInt(0)) { // The cache FixedArray is a COW-array and can therefore be reused. Handle<JSArray> result = @@ -6179,7 +5974,7 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_StringSplit) { Handle<FixedArray> elements(FixedArray::cast(result->elements())); int part_start = 0; for (int i = 0; i < part_count; i++) { - HandleScope local_loop_handle; + HandleScope local_loop_handle(isolate); int part_end = indices.at(i); Handle<String> substring = isolate->factory()->NewProperSubString(subject, part_start, part_end); @@ -6206,7 +6001,7 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_StringSplit) { // not in the cache and fills the remainder with smi zeros. Returns // the length of the successfully copied prefix. static int CopyCachedAsciiCharsToArray(Heap* heap, - const char* chars, + const uint8_t* chars, FixedArray* elements, int length) { AssertNoAllocation no_gc; @@ -6247,7 +6042,7 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_StringToArray) { Handle<FixedArray> elements; int position = 0; - if (s->IsFlat() && s->IsAsciiRepresentation()) { + if (s->IsFlat() && s->IsOneByteRepresentation()) { // Try using cached chars where possible. Object* obj; { MaybeObject* maybe_obj = @@ -6257,7 +6052,7 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_StringToArray) { elements = Handle<FixedArray>(FixedArray::cast(obj), isolate); String::FlatContent content = s->GetFlatContent(); if (content.IsAscii()) { - Vector<const char> chars = content.ToAsciiVector(); + Vector<const uint8_t> chars = content.ToOneByteVector(); // Note, this will initialize all elements (not only the prefix) // to prevent GC from seeing partially initialized array. position = CopyCachedAsciiCharsToArray(isolate->heap(), @@ -6273,7 +6068,8 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_StringToArray) { elements = isolate->factory()->NewFixedArray(length); } for (int i = position; i < length; ++i) { - Handle<Object> str = LookupSingleCharacterStringFromCode(s->Get(i)); + Handle<Object> str = + LookupSingleCharacterStringFromCode(isolate, s->Get(i)); elements->set(i, *str); } @@ -6288,7 +6084,7 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_StringToArray) { RUNTIME_FUNCTION(MaybeObject*, Runtime_NewStringWrapper) { - NoHandleAllocation ha; + NoHandleAllocation ha(isolate); ASSERT(args.length() == 1); CONVERT_ARG_CHECKED(String, value, 0); return value->ToObject(); @@ -6303,7 +6099,7 @@ bool Runtime::IsUpperCaseChar(RuntimeState* runtime_state, uint16_t ch) { RUNTIME_FUNCTION(MaybeObject*, Runtime_NumberToString) { - NoHandleAllocation ha; + NoHandleAllocation ha(isolate); ASSERT(args.length() == 1); Object* number = args[0]; @@ -6314,7 +6110,7 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_NumberToString) { RUNTIME_FUNCTION(MaybeObject*, Runtime_NumberToStringSkipCache) { - NoHandleAllocation ha; + NoHandleAllocation ha(isolate); ASSERT(args.length() == 1); Object* number = args[0]; @@ -6325,7 +6121,7 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_NumberToStringSkipCache) { RUNTIME_FUNCTION(MaybeObject*, Runtime_NumberToInteger) { - NoHandleAllocation ha; + NoHandleAllocation ha(isolate); ASSERT(args.length() == 1); CONVERT_DOUBLE_ARG_CHECKED(number, 0); @@ -6339,7 +6135,7 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_NumberToInteger) { RUNTIME_FUNCTION(MaybeObject*, Runtime_NumberToIntegerMapMinusZero) { - NoHandleAllocation ha; + NoHandleAllocation ha(isolate); ASSERT(args.length() == 1); CONVERT_DOUBLE_ARG_CHECKED(number, 0); @@ -6358,7 +6154,7 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_NumberToIntegerMapMinusZero) { RUNTIME_FUNCTION(MaybeObject*, Runtime_NumberToJSUint32) { - NoHandleAllocation ha; + NoHandleAllocation ha(isolate); ASSERT(args.length() == 1); CONVERT_NUMBER_CHECKED(int32_t, number, Uint32, args[0]); @@ -6367,7 +6163,7 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_NumberToJSUint32) { RUNTIME_FUNCTION(MaybeObject*, Runtime_NumberToJSInt32) { - NoHandleAllocation ha; + NoHandleAllocation ha(isolate); ASSERT(args.length() == 1); CONVERT_DOUBLE_ARG_CHECKED(number, 0); @@ -6383,7 +6179,7 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_NumberToJSInt32) { // Converts a Number to a Smi, if possible. Returns NaN if the number is not // a small integer. RUNTIME_FUNCTION(MaybeObject*, Runtime_NumberToSmi) { - NoHandleAllocation ha; + NoHandleAllocation ha(isolate); ASSERT(args.length() == 1); Object* obj = args[0]; @@ -6402,14 +6198,14 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_NumberToSmi) { RUNTIME_FUNCTION(MaybeObject*, Runtime_AllocateHeapNumber) { - NoHandleAllocation ha; + NoHandleAllocation ha(isolate); ASSERT(args.length() == 0); return isolate->heap()->AllocateHeapNumber(0); } RUNTIME_FUNCTION(MaybeObject*, Runtime_NumberAdd) { - NoHandleAllocation ha; + NoHandleAllocation ha(isolate); ASSERT(args.length() == 2); CONVERT_DOUBLE_ARG_CHECKED(x, 0); @@ -6419,7 +6215,7 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_NumberAdd) { RUNTIME_FUNCTION(MaybeObject*, Runtime_NumberSub) { - NoHandleAllocation ha; + NoHandleAllocation ha(isolate); ASSERT(args.length() == 2); CONVERT_DOUBLE_ARG_CHECKED(x, 0); @@ -6429,7 +6225,7 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_NumberSub) { RUNTIME_FUNCTION(MaybeObject*, Runtime_NumberMul) { - NoHandleAllocation ha; + NoHandleAllocation ha(isolate); ASSERT(args.length() == 2); CONVERT_DOUBLE_ARG_CHECKED(x, 0); @@ -6439,7 +6235,7 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_NumberMul) { RUNTIME_FUNCTION(MaybeObject*, Runtime_NumberUnaryMinus) { - NoHandleAllocation ha; + NoHandleAllocation ha(isolate); ASSERT(args.length() == 1); CONVERT_DOUBLE_ARG_CHECKED(x, 0); @@ -6448,7 +6244,7 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_NumberUnaryMinus) { RUNTIME_FUNCTION(MaybeObject*, Runtime_NumberAlloc) { - NoHandleAllocation ha; + NoHandleAllocation ha(isolate); ASSERT(args.length() == 0); return isolate->heap()->NumberFromDouble(9876543210.0); @@ -6456,7 +6252,7 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_NumberAlloc) { RUNTIME_FUNCTION(MaybeObject*, Runtime_NumberDiv) { - NoHandleAllocation ha; + NoHandleAllocation ha(isolate); ASSERT(args.length() == 2); CONVERT_DOUBLE_ARG_CHECKED(x, 0); @@ -6466,7 +6262,7 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_NumberDiv) { RUNTIME_FUNCTION(MaybeObject*, Runtime_NumberMod) { - NoHandleAllocation ha; + NoHandleAllocation ha(isolate); ASSERT(args.length() == 2); CONVERT_DOUBLE_ARG_CHECKED(x, 0); @@ -6479,7 +6275,7 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_NumberMod) { RUNTIME_FUNCTION(MaybeObject*, Runtime_StringAdd) { - NoHandleAllocation ha; + NoHandleAllocation ha(isolate); ASSERT(args.length() == 2); CONVERT_ARG_CHECKED(String, str1, 0); CONVERT_ARG_CHECKED(String, str2, 1); @@ -6528,12 +6324,12 @@ static inline void StringBuilderConcatHelper(String* special, RUNTIME_FUNCTION(MaybeObject*, Runtime_StringBuilderConcat) { - NoHandleAllocation ha; + NoHandleAllocation ha(isolate); ASSERT(args.length() == 3); CONVERT_ARG_CHECKED(JSArray, array, 0); if (!args[1]->IsSmi()) { isolate->context()->mark_out_of_memory(); - return Failure::OutOfMemoryException(); + return Failure::OutOfMemoryException(0x14); } int array_length = args.smi_at(1); CONVERT_ARG_CHECKED(String, special, 2); @@ -6546,7 +6342,7 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_StringBuilderConcat) { int special_length = special->length(); if (!array->HasFastObjectElements()) { - return isolate->Throw(isolate->heap()->illegal_argument_symbol()); + return isolate->Throw(isolate->heap()->illegal_argument_string()); } FixedArray* fixed_array = FixedArray::cast(array->elements()); if (fixed_array->length() < array_length) { @@ -6560,7 +6356,7 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_StringBuilderConcat) { if (first->IsString()) return first; } - bool ascii = special->HasOnlyAsciiChars(); + bool one_byte = special->IsOneByteConvertible(); int position = 0; for (int i = 0; i < array_length; i++) { int increment = 0; @@ -6580,37 +6376,37 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_StringBuilderConcat) { // Get the position and check that it is a positive smi. i++; if (i >= array_length) { - return isolate->Throw(isolate->heap()->illegal_argument_symbol()); + return isolate->Throw(isolate->heap()->illegal_argument_string()); } Object* next_smi = fixed_array->get(i); if (!next_smi->IsSmi()) { - return isolate->Throw(isolate->heap()->illegal_argument_symbol()); + return isolate->Throw(isolate->heap()->illegal_argument_string()); } pos = Smi::cast(next_smi)->value(); if (pos < 0) { - return isolate->Throw(isolate->heap()->illegal_argument_symbol()); + return isolate->Throw(isolate->heap()->illegal_argument_string()); } } ASSERT(pos >= 0); ASSERT(len >= 0); if (pos > special_length || len > special_length - pos) { - return isolate->Throw(isolate->heap()->illegal_argument_symbol()); + return isolate->Throw(isolate->heap()->illegal_argument_string()); } increment = len; } else if (elt->IsString()) { String* element = String::cast(elt); int element_length = element->length(); increment = element_length; - if (ascii && !element->HasOnlyAsciiChars()) { - ascii = false; + if (one_byte && !element->IsOneByteConvertible()) { + one_byte = false; } } else { ASSERT(!elt->IsTheHole()); - return isolate->Throw(isolate->heap()->illegal_argument_symbol()); + return isolate->Throw(isolate->heap()->illegal_argument_string()); } if (increment > String::kMaxLength - position) { isolate->context()->mark_out_of_memory(); - return Failure::OutOfMemoryException(); + return Failure::OutOfMemoryException(0x15); } position += increment; } @@ -6618,12 +6414,12 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_StringBuilderConcat) { int length = position; Object* object; - if (ascii) { + if (one_byte) { { MaybeObject* maybe_object = - isolate->heap()->AllocateRawAsciiString(length); + isolate->heap()->AllocateRawOneByteString(length); if (!maybe_object->ToObject(&object)) return maybe_object; } - SeqAsciiString* answer = SeqAsciiString::cast(object); + SeqOneByteString* answer = SeqOneByteString::cast(object); StringBuilderConcatHelper(special, answer->GetChars(), fixed_array, @@ -6645,18 +6441,18 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_StringBuilderConcat) { RUNTIME_FUNCTION(MaybeObject*, Runtime_StringBuilderJoin) { - NoHandleAllocation ha; + NoHandleAllocation ha(isolate); ASSERT(args.length() == 3); CONVERT_ARG_CHECKED(JSArray, array, 0); if (!args[1]->IsSmi()) { isolate->context()->mark_out_of_memory(); - return Failure::OutOfMemoryException(); + return Failure::OutOfMemoryException(0x16); } int array_length = args.smi_at(1); CONVERT_ARG_CHECKED(String, separator, 2); if (!array->HasFastObjectElements()) { - return isolate->Throw(isolate->heap()->illegal_argument_symbol()); + return isolate->Throw(isolate->heap()->illegal_argument_string()); } FixedArray* fixed_array = FixedArray::cast(array->elements()); if (fixed_array->length() < array_length) { @@ -6675,20 +6471,20 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_StringBuilderJoin) { (String::kMaxLength + separator_length - 1) / separator_length; if (max_nof_separators < (array_length - 1)) { isolate->context()->mark_out_of_memory(); - return Failure::OutOfMemoryException(); + return Failure::OutOfMemoryException(0x17); } int length = (array_length - 1) * separator_length; for (int i = 0; i < array_length; i++) { Object* element_obj = fixed_array->get(i); if (!element_obj->IsString()) { // TODO(1161): handle this case. - return isolate->Throw(isolate->heap()->illegal_argument_symbol()); + return isolate->Throw(isolate->heap()->illegal_argument_string()); } String* element = String::cast(element_obj); int increment = element->length(); if (increment > String::kMaxLength - length) { isolate->context()->mark_out_of_memory(); - return Failure::OutOfMemoryException(); + return Failure::OutOfMemoryException(0x18); } length += increment; } @@ -6723,7 +6519,8 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_StringBuilderJoin) { } ASSERT(sink == end); - ASSERT(!answer->HasOnlyAsciiChars()); // Use %_FastAsciiArrayJoin instead. + // Use %_FastAsciiArrayJoin instead. + ASSERT(!answer->IsOneByteRepresentation()); return answer; } @@ -6769,7 +6566,7 @@ static void JoinSparseArrayWithSeparator(FixedArray* elements, RUNTIME_FUNCTION(MaybeObject*, Runtime_SparseJoinWithSeparator) { - NoHandleAllocation ha; + NoHandleAllocation ha(isolate); ASSERT(args.length() == 3); CONVERT_ARG_CHECKED(JSArray, elements_array, 0); RUNTIME_ASSERT(elements_array->HasFastSmiOrObjectElements()); @@ -6782,10 +6579,10 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_SparseJoinWithSeparator) { // Find total length of join result. int string_length = 0; - bool is_ascii = separator->IsAsciiRepresentation(); + bool is_ascii = separator->IsOneByteRepresentation(); int max_string_length; if (is_ascii) { - max_string_length = SeqAsciiString::kMaxLength; + max_string_length = SeqOneByteString::kMaxLength; } else { max_string_length = SeqTwoByteString::kMaxLength; } @@ -6799,7 +6596,7 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_SparseJoinWithSeparator) { RUNTIME_ASSERT(elements->get(i + 1)->IsString()); String* string = String::cast(elements->get(i + 1)); int length = string->length(); - if (is_ascii && !string->IsAsciiRepresentation()) { + if (is_ascii && !string->IsOneByteRepresentation()) { is_ascii = false; max_string_length = SeqTwoByteString::kMaxLength; } @@ -6835,16 +6632,17 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_SparseJoinWithSeparator) { if (is_ascii) { MaybeObject* result_allocation = - isolate->heap()->AllocateRawAsciiString(string_length); + isolate->heap()->AllocateRawOneByteString(string_length); if (result_allocation->IsFailure()) return result_allocation; - SeqAsciiString* result_string = - SeqAsciiString::cast(result_allocation->ToObjectUnchecked()); - JoinSparseArrayWithSeparator<char>(elements, - elements_length, - array_length, - separator, - Vector<char>(result_string->GetChars(), - string_length)); + SeqOneByteString* result_string = + SeqOneByteString::cast(result_allocation->ToObjectUnchecked()); + JoinSparseArrayWithSeparator<uint8_t>(elements, + elements_length, + array_length, + separator, + Vector<uint8_t>( + result_string->GetChars(), + string_length)); return result_string; } else { MaybeObject* result_allocation = @@ -6864,7 +6662,7 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_SparseJoinWithSeparator) { RUNTIME_FUNCTION(MaybeObject*, Runtime_NumberOr) { - NoHandleAllocation ha; + NoHandleAllocation ha(isolate); ASSERT(args.length() == 2); CONVERT_NUMBER_CHECKED(int32_t, x, Int32, args[0]); @@ -6874,7 +6672,7 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_NumberOr) { RUNTIME_FUNCTION(MaybeObject*, Runtime_NumberAnd) { - NoHandleAllocation ha; + NoHandleAllocation ha(isolate); ASSERT(args.length() == 2); CONVERT_NUMBER_CHECKED(int32_t, x, Int32, args[0]); @@ -6884,7 +6682,7 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_NumberAnd) { RUNTIME_FUNCTION(MaybeObject*, Runtime_NumberXor) { - NoHandleAllocation ha; + NoHandleAllocation ha(isolate); ASSERT(args.length() == 2); CONVERT_NUMBER_CHECKED(int32_t, x, Int32, args[0]); @@ -6894,7 +6692,7 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_NumberXor) { RUNTIME_FUNCTION(MaybeObject*, Runtime_NumberNot) { - NoHandleAllocation ha; + NoHandleAllocation ha(isolate); ASSERT(args.length() == 1); CONVERT_NUMBER_CHECKED(int32_t, x, Int32, args[0]); @@ -6903,7 +6701,7 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_NumberNot) { RUNTIME_FUNCTION(MaybeObject*, Runtime_NumberShl) { - NoHandleAllocation ha; + NoHandleAllocation ha(isolate); ASSERT(args.length() == 2); CONVERT_NUMBER_CHECKED(int32_t, x, Int32, args[0]); @@ -6913,7 +6711,7 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_NumberShl) { RUNTIME_FUNCTION(MaybeObject*, Runtime_NumberShr) { - NoHandleAllocation ha; + NoHandleAllocation ha(isolate); ASSERT(args.length() == 2); CONVERT_NUMBER_CHECKED(uint32_t, x, Uint32, args[0]); @@ -6923,7 +6721,7 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_NumberShr) { RUNTIME_FUNCTION(MaybeObject*, Runtime_NumberSar) { - NoHandleAllocation ha; + NoHandleAllocation ha(isolate); ASSERT(args.length() == 2); CONVERT_NUMBER_CHECKED(int32_t, x, Int32, args[0]); @@ -6933,7 +6731,7 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_NumberSar) { RUNTIME_FUNCTION(MaybeObject*, Runtime_NumberEquals) { - NoHandleAllocation ha; + NoHandleAllocation ha(isolate); ASSERT(args.length() == 2); CONVERT_DOUBLE_ARG_CHECKED(x, 0); @@ -6952,7 +6750,7 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_NumberEquals) { RUNTIME_FUNCTION(MaybeObject*, Runtime_StringEquals) { - NoHandleAllocation ha; + NoHandleAllocation ha(isolate); ASSERT(args.length() == 2); CONVERT_ARG_CHECKED(String, x, 0); @@ -6970,7 +6768,7 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_StringEquals) { RUNTIME_FUNCTION(MaybeObject*, Runtime_NumberCompare) { - NoHandleAllocation ha; + NoHandleAllocation ha(isolate); ASSERT(args.length() == 3); CONVERT_DOUBLE_ARG_CHECKED(x, 0); @@ -6985,7 +6783,7 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_NumberCompare) { // Compare two Smis as if they were converted to strings and then // compared lexicographically. RUNTIME_FUNCTION(MaybeObject*, Runtime_SmiLexicographicCompare) { - NoHandleAllocation ha; + NoHandleAllocation ha(isolate); ASSERT(args.length() == 2); CONVERT_SMI_ARG_CHECKED(x_value, 0); CONVERT_SMI_ARG_CHECKED(y_value, 1); @@ -7059,23 +6857,21 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_SmiLexicographicCompare) { } -static Object* StringInputBufferCompare(RuntimeState* state, +static Object* StringCharacterStreamCompare(RuntimeState* state, String* x, String* y) { - StringInputBuffer& bufx = *state->string_input_buffer_compare_bufx(); - StringInputBuffer& bufy = *state->string_input_buffer_compare_bufy(); - bufx.Reset(x); - bufy.Reset(y); - while (bufx.has_more() && bufy.has_more()) { - int d = bufx.GetNext() - bufy.GetNext(); + StringCharacterStream stream_x(x, state->string_iterator_compare_x()); + StringCharacterStream stream_y(y, state->string_iterator_compare_y()); + while (stream_x.HasMore() && stream_y.HasMore()) { + int d = stream_x.GetNext() - stream_y.GetNext(); if (d < 0) return Smi::FromInt(LESS); else if (d > 0) return Smi::FromInt(GREATER); } // x is (non-trivial) prefix of y: - if (bufy.has_more()) return Smi::FromInt(LESS); + if (stream_y.HasMore()) return Smi::FromInt(LESS); // y is prefix of x: - return Smi::FromInt(bufx.has_more() ? GREATER : EQUAL); + return Smi::FromInt(stream_x.HasMore() ? GREATER : EQUAL); } @@ -7094,9 +6890,9 @@ static Object* FlatStringCompare(String* x, String* y) { String::FlatContent x_content = x->GetFlatContent(); String::FlatContent y_content = y->GetFlatContent(); if (x_content.IsAscii()) { - Vector<const char> x_chars = x_content.ToAsciiVector(); + Vector<const uint8_t> x_chars = x_content.ToOneByteVector(); if (y_content.IsAscii()) { - Vector<const char> y_chars = y_content.ToAsciiVector(); + Vector<const uint8_t> y_chars = y_content.ToOneByteVector(); r = CompareChars(x_chars.start(), y_chars.start(), prefix_length); } else { Vector<const uc16> y_chars = y_content.ToUC16Vector(); @@ -7105,7 +6901,7 @@ static Object* FlatStringCompare(String* x, String* y) { } else { Vector<const uc16> x_chars = x_content.ToUC16Vector(); if (y_content.IsAscii()) { - Vector<const char> y_chars = y_content.ToAsciiVector(); + Vector<const uint8_t> y_chars = y_content.ToOneByteVector(); r = CompareChars(x_chars.start(), y_chars.start(), prefix_length); } else { Vector<const uc16> y_chars = y_content.ToUC16Vector(); @@ -7119,13 +6915,13 @@ static Object* FlatStringCompare(String* x, String* y) { result = (r < 0) ? Smi::FromInt(LESS) : Smi::FromInt(GREATER); } ASSERT(result == - StringInputBufferCompare(Isolate::Current()->runtime_state(), x, y)); + StringCharacterStreamCompare(Isolate::Current()->runtime_state(), x, y)); return result; } RUNTIME_FUNCTION(MaybeObject*, Runtime_StringCompare) { - NoHandleAllocation ha; + NoHandleAllocation ha(isolate); ASSERT(args.length() == 2); CONVERT_ARG_CHECKED(String, x, 0); @@ -7155,12 +6951,12 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_StringCompare) { } return (x->IsFlat() && y->IsFlat()) ? FlatStringCompare(x, y) - : StringInputBufferCompare(isolate->runtime_state(), x, y); + : StringCharacterStreamCompare(isolate->runtime_state(), x, y); } RUNTIME_FUNCTION(MaybeObject*, Runtime_Math_acos) { - NoHandleAllocation ha; + NoHandleAllocation ha(isolate); ASSERT(args.length() == 1); isolate->counters()->math_acos()->Increment(); @@ -7170,7 +6966,7 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_Math_acos) { RUNTIME_FUNCTION(MaybeObject*, Runtime_Math_asin) { - NoHandleAllocation ha; + NoHandleAllocation ha(isolate); ASSERT(args.length() == 1); isolate->counters()->math_asin()->Increment(); @@ -7180,7 +6976,7 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_Math_asin) { RUNTIME_FUNCTION(MaybeObject*, Runtime_Math_atan) { - NoHandleAllocation ha; + NoHandleAllocation ha(isolate); ASSERT(args.length() == 1); isolate->counters()->math_atan()->Increment(); @@ -7193,7 +6989,7 @@ static const double kPiDividedBy4 = 0.78539816339744830962; RUNTIME_FUNCTION(MaybeObject*, Runtime_Math_atan2) { - NoHandleAllocation ha; + NoHandleAllocation ha(isolate); ASSERT(args.length() == 2); isolate->counters()->math_atan2()->Increment(); @@ -7216,7 +7012,7 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_Math_atan2) { RUNTIME_FUNCTION(MaybeObject*, Runtime_Math_ceil) { - NoHandleAllocation ha; + NoHandleAllocation ha(isolate); ASSERT(args.length() == 1); isolate->counters()->math_ceil()->Increment(); @@ -7226,7 +7022,7 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_Math_ceil) { RUNTIME_FUNCTION(MaybeObject*, Runtime_Math_cos) { - NoHandleAllocation ha; + NoHandleAllocation ha(isolate); ASSERT(args.length() == 1); isolate->counters()->math_cos()->Increment(); @@ -7236,17 +7032,18 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_Math_cos) { RUNTIME_FUNCTION(MaybeObject*, Runtime_Math_exp) { - NoHandleAllocation ha; + NoHandleAllocation ha(isolate); ASSERT(args.length() == 1); isolate->counters()->math_exp()->Increment(); CONVERT_DOUBLE_ARG_CHECKED(x, 0); - return isolate->transcendental_cache()->Get(TranscendentalCache::EXP, x); + lazily_initialize_fast_exp(); + return isolate->heap()->NumberFromDouble(fast_exp(x)); } RUNTIME_FUNCTION(MaybeObject*, Runtime_Math_floor) { - NoHandleAllocation ha; + NoHandleAllocation ha(isolate); ASSERT(args.length() == 1); isolate->counters()->math_floor()->Increment(); @@ -7256,7 +7053,7 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_Math_floor) { RUNTIME_FUNCTION(MaybeObject*, Runtime_Math_log) { - NoHandleAllocation ha; + NoHandleAllocation ha(isolate); ASSERT(args.length() == 1); isolate->counters()->math_log()->Increment(); @@ -7267,7 +7064,7 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_Math_log) { // Slow version of Math.pow. We check for fast paths for special cases. // Used if SSE2/VFP3 is not available. RUNTIME_FUNCTION(MaybeObject*, Runtime_Math_pow) { - NoHandleAllocation ha; + NoHandleAllocation ha(isolate); ASSERT(args.length() == 2); isolate->counters()->math_pow()->Increment(); @@ -7281,19 +7078,7 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_Math_pow) { } CONVERT_DOUBLE_ARG_CHECKED(y, 1); - int y_int = static_cast<int>(y); - double result; - if (y == y_int) { - result = power_double_int(x, y_int); // Returns 1 if exponent is 0. - } else if (y == 0.5) { - result = (isinf(x)) ? V8_INFINITY - : fast_sqrt(x + 0.0); // Convert -0 to +0. - } else if (y == -0.5) { - result = (isinf(x)) ? 0 - : 1.0 / fast_sqrt(x + 0.0); // Convert -0 to +0. - } else { - result = power_double_double(x, y); - } + double result = power_helper(x, y); if (isnan(result)) return isolate->heap()->nan_value(); return isolate->heap()->AllocateHeapNumber(result); } @@ -7301,7 +7086,7 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_Math_pow) { // Fast version of Math.pow if we know that y is not an integer and y is not // -0.5 or 0.5. Used as slow case from full codegen. RUNTIME_FUNCTION(MaybeObject*, Runtime_Math_pow_cfunction) { - NoHandleAllocation ha; + NoHandleAllocation ha(isolate); ASSERT(args.length() == 2); isolate->counters()->math_pow()->Increment(); @@ -7318,7 +7103,7 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_Math_pow_cfunction) { RUNTIME_FUNCTION(MaybeObject*, Runtime_RoundNumber) { - NoHandleAllocation ha; + NoHandleAllocation ha(isolate); ASSERT(args.length() == 1); isolate->counters()->math_round()->Increment(); @@ -7361,7 +7146,7 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_RoundNumber) { RUNTIME_FUNCTION(MaybeObject*, Runtime_Math_sin) { - NoHandleAllocation ha; + NoHandleAllocation ha(isolate); ASSERT(args.length() == 1); isolate->counters()->math_sin()->Increment(); @@ -7371,7 +7156,7 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_Math_sin) { RUNTIME_FUNCTION(MaybeObject*, Runtime_Math_sqrt) { - NoHandleAllocation ha; + NoHandleAllocation ha(isolate); ASSERT(args.length() == 1); isolate->counters()->math_sqrt()->Increment(); @@ -7381,7 +7166,7 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_Math_sqrt) { RUNTIME_FUNCTION(MaybeObject*, Runtime_Math_tan) { - NoHandleAllocation ha; + NoHandleAllocation ha(isolate); ASSERT(args.length() == 1); isolate->counters()->math_tan()->Increment(); @@ -7391,7 +7176,7 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_Math_tan) { RUNTIME_FUNCTION(MaybeObject*, Runtime_DateMakeDay) { - NoHandleAllocation ha; + NoHandleAllocation ha(isolate); ASSERT(args.length() == 2); CONVERT_SMI_ARG_CHECKED(year, 0); @@ -7534,7 +7319,7 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_NewArgumentsFast) { RUNTIME_FUNCTION(MaybeObject*, Runtime_NewStrictArgumentsFast) { - NoHandleAllocation ha; + NoHandleAllocation ha(isolate); ASSERT(args.length() == 3); JSFunction* callee = JSFunction::cast(args[0]); @@ -7591,10 +7376,11 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_NewClosure) { // into C++ code. Collect these in a newly allocated array of handles (possibly // prefixed by a number of empty handles). static SmartArrayPointer<Handle<Object> > GetCallerArguments( + Isolate* isolate, int prefix_argc, int* total_argc) { // Find frame containing arguments passed to the caller. - JavaScriptFrameIterator it; + JavaScriptFrameIterator it(isolate); JavaScriptFrame* frame = it.frame(); List<JSFunction*> functions(2); frame->GetFunctions(&functions); @@ -7613,7 +7399,7 @@ static SmartArrayPointer<Handle<Object> > GetCallerArguments( SmartArrayPointer<Handle<Object> > param_data( NewArray<Handle<Object> >(*total_argc)); for (int i = 0; i < args_count; i++) { - Handle<Object> val = args_slots[i].GetValue(); + Handle<Object> val = args_slots[i].GetValue(isolate); param_data[prefix_argc + i] = val; } @@ -7629,7 +7415,7 @@ static SmartArrayPointer<Handle<Object> > GetCallerArguments( SmartArrayPointer<Handle<Object> > param_data( NewArray<Handle<Object> >(*total_argc)); for (int i = 0; i < args_count; i++) { - Handle<Object> val = Handle<Object>(frame->GetParameter(i)); + Handle<Object> val = Handle<Object>(frame->GetParameter(i), isolate); param_data[prefix_argc + i] = val; } return param_data; @@ -7648,7 +7434,8 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_FunctionBindArguments) { bound_function->shared()->set_bound(true); // Get all arguments of calling function (Function.prototype.bind). int argc = 0; - SmartArrayPointer<Handle<Object> > arguments = GetCallerArguments(0, &argc); + SmartArrayPointer<Handle<Object> > arguments = + GetCallerArguments(isolate, 0, &argc); // Don't count the this-arg. if (argc > 0) { ASSERT(*arguments[0] == args[2]); @@ -7665,7 +7452,8 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_FunctionBindArguments) { JSFunction::cast(*bindee)->function_bindings()); new_bindings = isolate->factory()->NewFixedArray(old_bindings->length() + argc); - bindee = Handle<Object>(old_bindings->get(JSFunction::kBoundFunctionIndex)); + bindee = Handle<Object>(old_bindings->get(JSFunction::kBoundFunctionIndex), + isolate); i = 0; for (int n = old_bindings->length(); i < n; i++) { new_bindings->set(i, old_bindings->get(i)); @@ -7686,11 +7474,11 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_FunctionBindArguments) { bound_function->set_function_bindings(*new_bindings); // Update length. - Handle<String> length_symbol = isolate->factory()->length_symbol(); + Handle<String> length_string = isolate->factory()->length_string(); Handle<Object> new_length(args.at<Object>(3)); PropertyAttributes attr = static_cast<PropertyAttributes>(DONT_DELETE | DONT_ENUM | READ_ONLY); - ForceSetProperty(bound_function, length_symbol, new_length, attr); + ForceSetProperty(bound_function, length_string, new_length, attr); return *bound_function; } @@ -7724,16 +7512,17 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_NewObjectFromBound) { Handle<FixedArray>(FixedArray::cast(function->function_bindings())); int bound_argc = bound_args->length() - JSFunction::kBoundArgumentsStartIndex; Handle<Object> bound_function( - JSReceiver::cast(bound_args->get(JSFunction::kBoundFunctionIndex))); + JSReceiver::cast(bound_args->get(JSFunction::kBoundFunctionIndex)), + isolate); ASSERT(!bound_function->IsJSFunction() || !Handle<JSFunction>::cast(bound_function)->shared()->bound()); int total_argc = 0; SmartArrayPointer<Handle<Object> > param_data = - GetCallerArguments(bound_argc, &total_argc); + GetCallerArguments(isolate, bound_argc, &total_argc); for (int i = 0; i < bound_argc; i++) { param_data[i] = Handle<Object>(bound_args->get( - JSFunction::kBoundArgumentsStartIndex + i)); + JSFunction::kBoundArgumentsStartIndex + i), isolate); } if (!bound_function->IsJSFunction()) { @@ -7885,31 +7674,36 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_LazyCompile) { } -RUNTIME_FUNCTION(MaybeObject*, Runtime_LazyRecompile) { - HandleScope scope(isolate); - ASSERT(args.length() == 1); - Handle<JSFunction> function = args.at<JSFunction>(0); - +bool AllowOptimization(Isolate* isolate, Handle<JSFunction> function) { // If the function is not compiled ignore the lazy // recompilation. This can happen if the debugger is activated and // the function is returned to the not compiled state. - if (!function->shared()->is_compiled()) { - function->ReplaceCode(function->shared()->code()); - return function->code(); - } + if (!function->shared()->is_compiled()) return false; // If the function is not optimizable or debugger is active continue using the // code from the full compiler. if (!FLAG_crankshaft || - !function->shared()->code()->optimizable() || + function->shared()->optimization_disabled() || isolate->DebuggerHasBreakPoints()) { if (FLAG_trace_opt) { PrintF("[failed to optimize "); function->PrintName(); PrintF(": is code optimizable: %s, is debugger enabled: %s]\n", - function->shared()->code()->optimizable() ? "T" : "F", + function->shared()->optimization_disabled() ? "F" : "T", isolate->DebuggerHasBreakPoints() ? "T" : "F"); } + return false; + } + return true; +} + + +RUNTIME_FUNCTION(MaybeObject*, Runtime_LazyRecompile) { + HandleScope scope(isolate); + ASSERT(args.length() == 1); + Handle<JSFunction> function = args.at<JSFunction>(0); + + if (!AllowOptimization(isolate, function)) { function->ReplaceCode(function->shared()->code()); return function->code(); } @@ -7931,9 +7725,31 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_LazyRecompile) { RUNTIME_FUNCTION(MaybeObject*, Runtime_ParallelRecompile) { HandleScope handle_scope(isolate); + ASSERT(args.length() == 1); + CONVERT_ARG_HANDLE_CHECKED(JSFunction, function, 0); + if (!AllowOptimization(isolate, function)) { + function->ReplaceCode(function->shared()->code()); + return isolate->heap()->undefined_value(); + } + function->shared()->code()->set_profiler_ticks(0); ASSERT(FLAG_parallel_recompilation); - Compiler::RecompileParallel(args.at<JSFunction>(0)); - return *isolate->factory()->undefined_value(); + Compiler::RecompileParallel(function); + return isolate->heap()->undefined_value(); +} + + +RUNTIME_FUNCTION(MaybeObject*, Runtime_InstallRecompiledCode) { + HandleScope handle_scope(isolate); + ASSERT(args.length() == 1); + CONVERT_ARG_HANDLE_CHECKED(JSFunction, function, 0); + ASSERT(V8::UseCrankshaft() && FLAG_parallel_recompilation); + OptimizingCompilerThread* opt_thread = isolate->optimizing_compiler_thread(); + do { + // The function could have been marked for installing, but not queued just + // yet. In this case, retry until installed. + opt_thread->InstallOptimizedFunctions(); + } while (function->IsMarkedForInstallingRecompiledCode()); + return function->code(); } @@ -7962,6 +7778,17 @@ class ActivationsFinder : public ThreadVisitor { }; +RUNTIME_FUNCTION(MaybeObject*, Runtime_NotifyStubFailure) { + HandleScope scope(isolate); + ASSERT(args.length() == 0); + Deoptimizer* deoptimizer = Deoptimizer::Grab(isolate); + ASSERT(isolate->heap()->IsAllocationAllowed()); + ASSERT(deoptimizer->compiled_code_kind() == Code::COMPILED_STUB); + delete deoptimizer; + return isolate->heap()->undefined_value(); +} + + RUNTIME_FUNCTION(MaybeObject*, Runtime_NotifyDeoptimized) { HandleScope scope(isolate); ASSERT(args.length() == 1); @@ -7970,9 +7797,11 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_NotifyDeoptimized) { static_cast<Deoptimizer::BailoutType>(args.smi_at(0)); Deoptimizer* deoptimizer = Deoptimizer::Grab(isolate); ASSERT(isolate->heap()->IsAllocationAllowed()); - JavaScriptFrameIterator it(isolate); + + ASSERT(deoptimizer->compiled_code_kind() != Code::COMPILED_STUB); // Make sure to materialize objects before causing any allocation. + JavaScriptFrameIterator it(isolate); deoptimizer->MaterializeHeapObjects(&it); delete deoptimizer; @@ -8024,6 +7853,7 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_NotifyDeoptimized) { RUNTIME_FUNCTION(MaybeObject*, Runtime_NotifyOSR) { + NoHandleAllocation ha(isolate); Deoptimizer* deoptimizer = Deoptimizer::Grab(isolate); delete deoptimizer; return isolate->heap()->undefined_value(); @@ -8056,6 +7886,7 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_ClearFunctionTypeFeedback) { RUNTIME_FUNCTION(MaybeObject*, Runtime_RunningInSimulator) { + NoHandleAllocation ha(isolate); #if defined(USE_SIMULATOR) return isolate->heap()->true_value(); #else @@ -8076,16 +7907,32 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_OptimizeFunctionOnNextCall) { if (args.length() == 2 && unoptimized->kind() == Code::FUNCTION) { CONVERT_ARG_HANDLE_CHECKED(String, type, 1); - CHECK(type->IsEqualTo(CStrVector("osr"))); - isolate->runtime_profiler()->AttemptOnStackReplacement(*function); - unoptimized->set_allow_osr_at_loop_nesting_level( - Code::kMaxLoopNestingMarker); + if (type->IsOneByteEqualTo(STATIC_ASCII_VECTOR("osr"))) { + isolate->runtime_profiler()->AttemptOnStackReplacement(*function); + unoptimized->set_allow_osr_at_loop_nesting_level( + Code::kMaxLoopNestingMarker); + } else if (type->IsOneByteEqualTo(STATIC_ASCII_VECTOR("parallel"))) { + function->MarkForParallelRecompilation(); + } } return isolate->heap()->undefined_value(); } +RUNTIME_FUNCTION(MaybeObject*, Runtime_WaitUntilOptimized) { + HandleScope scope(isolate); + ASSERT(args.length() == 1); + CONVERT_ARG_HANDLE_CHECKED(JSFunction, function, 0); + if (FLAG_parallel_recompilation) { + if (V8::UseCrankshaft() && function->IsOptimizable()) { + while (!function->IsOptimized()) OS::Sleep(50); + } + } + return isolate->heap()->undefined_value(); +} + + RUNTIME_FUNCTION(MaybeObject*, Runtime_GetOptimizationStatus) { HandleScope scope(isolate); ASSERT(args.length() == 1); @@ -8208,15 +8055,8 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_CompileForOnStackReplacement) { function->PrintName(); PrintF("]\n"); } - Handle<Code> check_code; - if (FLAG_count_based_interrupts) { - InterruptStub interrupt_stub; - check_code = interrupt_stub.GetCode(); - } else // NOLINT - { // NOLINT - StackCheckStub check_stub; - check_code = check_stub.GetCode(); - } + InterruptStub interrupt_stub; + Handle<Code> check_code = interrupt_stub.GetCode(isolate); Handle<Code> replacement_code = isolate->builtins()->OnStackReplacement(); Deoptimizer::RevertStackCheckCode(*unoptimized, *check_code, @@ -8241,12 +8081,14 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_CompileForOnStackReplacement) { RUNTIME_FUNCTION(MaybeObject*, Runtime_CheckIsBootstrapping) { + NoHandleAllocation ha(isolate); RUNTIME_ASSERT(isolate->bootstrapper()->IsActive()); return isolate->heap()->undefined_value(); } RUNTIME_FUNCTION(MaybeObject*, Runtime_GetRootNaN) { + NoHandleAllocation ha(isolate); RUNTIME_ASSERT(isolate->bootstrapper()->IsActive()); return isolate->heap()->nan_value(); } @@ -8274,12 +8116,12 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_Call) { MaybeObject* maybe = args[1 + i]; Object* object; if (!maybe->To<Object>(&object)) return maybe; - argv[i] = Handle<Object>(object); + argv[i] = Handle<Object>(object, isolate); } bool threw; Handle<JSReceiver> hfun(fun); - Handle<Object> hreceiver(receiver); + Handle<Object> hreceiver(receiver, isolate); Handle<Object> result = Execution::Call(hfun, hreceiver, argc, argv, &threw, true); @@ -8340,7 +8182,7 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_GetConstructorDelegate) { RUNTIME_FUNCTION(MaybeObject*, Runtime_NewGlobalContext) { - NoHandleAllocation ha; + NoHandleAllocation ha(isolate); ASSERT(args.length() == 2); CONVERT_ARG_CHECKED(JSFunction, function, 0); @@ -8360,7 +8202,7 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_NewGlobalContext) { RUNTIME_FUNCTION(MaybeObject*, Runtime_NewFunctionContext) { - NoHandleAllocation ha; + NoHandleAllocation ha(isolate); ASSERT(args.length() == 1); CONVERT_ARG_CHECKED(JSFunction, function, 0); @@ -8377,7 +8219,7 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_NewFunctionContext) { RUNTIME_FUNCTION(MaybeObject*, Runtime_PushWithContext) { - NoHandleAllocation ha; + NoHandleAllocation ha(isolate); ASSERT(args.length() == 2); JSObject* extension_object; if (args[0]->IsJSObject()) { @@ -8421,7 +8263,7 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_PushWithContext) { RUNTIME_FUNCTION(MaybeObject*, Runtime_PushCatchContext) { - NoHandleAllocation ha; + NoHandleAllocation ha(isolate); ASSERT(args.length() == 3); String* name = String::cast(args[0]); Object* thrown_object = args[1]; @@ -8447,7 +8289,7 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_PushCatchContext) { RUNTIME_FUNCTION(MaybeObject*, Runtime_PushBlockContext) { - NoHandleAllocation ha; + NoHandleAllocation ha(isolate); ASSERT(args.length() == 2); ScopeInfo* scope_info = ScopeInfo::cast(args[0]); JSFunction* function; @@ -8471,6 +8313,7 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_PushBlockContext) { RUNTIME_FUNCTION(MaybeObject*, Runtime_IsJSModule) { + NoHandleAllocation ha(isolate); ASSERT(args.length() == 1); Object* obj = args[0]; return isolate->heap()->ToBoolean(obj->IsJSModule()); @@ -8478,20 +8321,90 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_IsJSModule) { RUNTIME_FUNCTION(MaybeObject*, Runtime_PushModuleContext) { - NoHandleAllocation ha; - ASSERT(args.length() == 1); - CONVERT_ARG_HANDLE_CHECKED(JSModule, instance, 0); + NoHandleAllocation ha(isolate); + ASSERT(args.length() == 2); + CONVERT_SMI_ARG_CHECKED(index, 0); + + if (!args[1]->IsScopeInfo()) { + // Module already initialized. Find hosting context and retrieve context. + Context* host = Context::cast(isolate->context())->global_context(); + Context* context = Context::cast(host->get(index)); + ASSERT(context->previous() == isolate->context()); + isolate->set_context(context); + return context; + } - Context* context = Context::cast(instance->context()); + CONVERT_ARG_HANDLE_CHECKED(ScopeInfo, scope_info, 1); + + // Allocate module context. + HandleScope scope(isolate); + Factory* factory = isolate->factory(); + Handle<Context> context = factory->NewModuleContext(scope_info); + Handle<JSModule> module = factory->NewJSModule(context, scope_info); + context->set_module(*module); Context* previous = isolate->context(); - ASSERT(context->IsModuleContext()); - // Initialize the context links. context->set_previous(previous); context->set_closure(previous->closure()); context->set_global_object(previous->global_object()); - isolate->set_context(context); + isolate->set_context(*context); - return context; + // Find hosting scope and initialize internal variable holding module there. + previous->global_context()->set(index, *context); + + return *context; +} + + +RUNTIME_FUNCTION(MaybeObject*, Runtime_DeclareModules) { + HandleScope scope(isolate); + ASSERT(args.length() == 1); + CONVERT_ARG_HANDLE_CHECKED(FixedArray, descriptions, 0); + Context* host_context = isolate->context(); + + for (int i = 0; i < descriptions->length(); ++i) { + Handle<ModuleInfo> description(ModuleInfo::cast(descriptions->get(i))); + int host_index = description->host_index(); + Handle<Context> context(Context::cast(host_context->get(host_index))); + Handle<JSModule> module(context->module()); + + for (int j = 0; j < description->length(); ++j) { + Handle<String> name(description->name(j)); + VariableMode mode = description->mode(j); + int index = description->index(j); + switch (mode) { + case VAR: + case LET: + case CONST: + case CONST_HARMONY: { + PropertyAttributes attr = + IsImmutableVariableMode(mode) ? FROZEN : SEALED; + Handle<AccessorInfo> info = + Accessors::MakeModuleExport(name, index, attr); + Handle<Object> result = SetAccessor(module, info); + ASSERT(!(result.is_null() || result->IsUndefined())); + USE(result); + break; + } + case MODULE: { + Object* referenced_context = Context::cast(host_context)->get(index); + Handle<JSModule> value(Context::cast(referenced_context)->module()); + JSReceiver::SetProperty(module, name, value, FROZEN, kStrictMode); + break; + } + case INTERNAL: + case TEMPORARY: + case DYNAMIC: + case DYNAMIC_GLOBAL: + case DYNAMIC_LOCAL: + UNREACHABLE(); + } + } + + JSObject::PreventExtensions(module); + } + + ASSERT(!isolate->has_pending_exception()); + return isolate->heap()->undefined_value(); } @@ -8653,9 +8566,11 @@ static ObjectPair LoadContextSlotHelper(Arguments args, Handle<JSObject> object = Handle<JSObject>::cast(holder); ASSERT(object->HasProperty(*name)); // GetProperty below can cause GC. - Handle<Object> receiver_handle(object->IsGlobalObject() - ? GlobalObject::cast(*object)->global_receiver() - : ComputeReceiverForNonGlobal(isolate, *object)); + Handle<Object> receiver_handle( + object->IsGlobalObject() + ? GlobalObject::cast(*object)->global_receiver() + : ComputeReceiverForNonGlobal(isolate, *object), + isolate); // No need to unhole the value here. This is taken care of by the // GetProperty function. @@ -8790,6 +8705,7 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_ReThrow) { RUNTIME_FUNCTION(MaybeObject*, Runtime_PromoteScheduledException) { + NoHandleAllocation ha(isolate); ASSERT_EQ(0, args.length()); return isolate->PromoteScheduledException(); } @@ -8817,11 +8733,12 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_ThrowNotDateError) { RUNTIME_FUNCTION(MaybeObject*, Runtime_StackGuard) { + NoHandleAllocation ha(isolate); ASSERT(args.length() == 0); // First check if this is a real stack overflow. if (isolate->stack_guard()->IsStackOverflow()) { - NoHandleAllocation na; + NoHandleAllocation na(isolate); return isolate->StackOverflow(); } @@ -8830,22 +8747,23 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_StackGuard) { RUNTIME_FUNCTION(MaybeObject*, Runtime_Interrupt) { + NoHandleAllocation ha(isolate); ASSERT(args.length() == 0); return Execution::HandleStackGuardInterrupt(isolate); } -static int StackSize() { +static int StackSize(Isolate* isolate) { int n = 0; - for (JavaScriptFrameIterator it; !it.done(); it.Advance()) n++; + for (JavaScriptFrameIterator it(isolate); !it.done(); it.Advance()) n++; return n; } -static void PrintTransition(Object* result) { +static void PrintTransition(Isolate* isolate, Object* result) { // indentation { const int nmax = 80; - int n = StackSize(); + int n = StackSize(isolate); if (n <= nmax) PrintF("%4d:%*s", n, n, ""); else @@ -8853,7 +8771,7 @@ static void PrintTransition(Object* result) { } if (result == NULL) { - JavaScriptFrame::PrintTop(stdout, true, false); + JavaScriptFrame::PrintTop(isolate, stdout, true, false); PrintF(" {\n"); } else { // function result @@ -8865,22 +8783,22 @@ static void PrintTransition(Object* result) { RUNTIME_FUNCTION(MaybeObject*, Runtime_TraceEnter) { + NoHandleAllocation ha(isolate); ASSERT(args.length() == 0); - NoHandleAllocation ha; - PrintTransition(NULL); + PrintTransition(isolate, NULL); return isolate->heap()->undefined_value(); } RUNTIME_FUNCTION(MaybeObject*, Runtime_TraceExit) { - NoHandleAllocation ha; - PrintTransition(args[0]); + NoHandleAllocation ha(isolate); + PrintTransition(isolate, args[0]); return args[0]; // return TOS } RUNTIME_FUNCTION(MaybeObject*, Runtime_DebugPrint) { - NoHandleAllocation ha; + NoHandleAllocation ha(isolate); ASSERT(args.length() == 1); #ifdef DEBUG @@ -8911,15 +8829,15 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_DebugPrint) { RUNTIME_FUNCTION(MaybeObject*, Runtime_DebugTrace) { + NoHandleAllocation ha(isolate); ASSERT(args.length() == 0); - NoHandleAllocation ha; isolate->PrintStack(); return isolate->heap()->undefined_value(); } RUNTIME_FUNCTION(MaybeObject*, Runtime_DateCurrentTime) { - NoHandleAllocation ha; + NoHandleAllocation ha(isolate); ASSERT(args.length() == 0); // According to ECMA-262, section 15.9.1, page 117, the precision of @@ -8952,7 +8870,7 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_DateParseString) { bool result; String::FlatContent str_content = str->GetFlatContent(); if (str_content.IsAscii()) { - result = DateParser::Parse(str_content.ToAsciiVector(), + result = DateParser::Parse(str_content.ToOneByteVector(), output_array, isolate->unicode_cache()); } else { @@ -8971,7 +8889,7 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_DateParseString) { RUNTIME_FUNCTION(MaybeObject*, Runtime_DateLocalTimezone) { - NoHandleAllocation ha; + NoHandleAllocation ha(isolate); ASSERT(args.length() == 1); CONVERT_DOUBLE_ARG_CHECKED(x, 0); @@ -8982,7 +8900,7 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_DateLocalTimezone) { RUNTIME_FUNCTION(MaybeObject*, Runtime_DateToUTC) { - NoHandleAllocation ha; + NoHandleAllocation ha(isolate); ASSERT(args.length() == 1); CONVERT_DOUBLE_ARG_CHECKED(x, 0); @@ -8993,6 +8911,7 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_DateToUTC) { RUNTIME_FUNCTION(MaybeObject*, Runtime_GlobalReceiver) { + NoHandleAllocation ha(isolate); ASSERT(args.length() == 1); Object* global = args[0]; if (!global->IsJSGlobalObject()) return isolate->heap()->null_value(); @@ -9009,7 +8928,7 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_ParseJson) { source = Handle<String>(source->TryFlattenGetString()); // Optimized fast case where we only have ASCII characters. Handle<Object> result; - if (source->IsSeqAsciiString()) { + if (source->IsSeqOneByteString()) { result = JsonParser<true>::Parse(source, zone); } else { result = JsonParser<false>::Parse(source, zone); @@ -9042,8 +8961,9 @@ bool CodeGenerationFromStringsAllowed(Isolate* isolate, RUNTIME_FUNCTION(MaybeObject*, Runtime_CompileString) { HandleScope scope(isolate); - ASSERT_EQ(1, args.length()); + ASSERT_EQ(2, args.length()); CONVERT_ARG_HANDLE_CHECKED(String, source, 0); + CONVERT_BOOLEAN_ARG_CHECKED(function_literal_only, 1); // Extract native context. Handle<Context> context(isolate->context()->native_context()); @@ -9059,8 +8979,10 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_CompileString) { } // Compile source string in the native context. + ParseRestriction restriction = function_literal_only + ? ONLY_SINGLE_FUNCTION_LITERAL : NO_PARSE_RESTRICTION; Handle<SharedFunctionInfo> shared = Compiler::CompileEval( - source, context, true, CLASSIC_MODE, RelocInfo::kNoPosition); + source, context, true, CLASSIC_MODE, restriction, RelocInfo::kNoPosition); if (shared.is_null()) return Failure::Exception(); Handle<JSFunction> fun = isolate->factory()->NewFunctionFromSharedFunctionInfo(shared, @@ -9096,6 +9018,7 @@ static ObjectPair CompileGlobalEval(Isolate* isolate, Handle<Context>(isolate->context()), context->IsNativeContext(), language_mode, + NO_PARSE_RESTRICTION, scope_position); if (shared.is_null()) return MakePair(Failure::Exception(), NULL); Handle<JSFunction> compiled = @@ -9106,9 +9029,9 @@ static ObjectPair CompileGlobalEval(Isolate* isolate, RUNTIME_FUNCTION(ObjectPair, Runtime_ResolvePossiblyDirectEval) { + HandleScope scope(isolate); ASSERT(args.length() == 5); - HandleScope scope(isolate); Handle<Object> callee = args.at<Object>(0); // If "eval" didn't refer to the original GlobalEval, it's not a @@ -9155,6 +9078,7 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_AllocateInNewSpace) { // Allocate a block of memory in NewSpace (filled with a filler). // Use as fallback for allocation in generated code when NewSpace // is full. + NoHandleAllocation ha(isolate); ASSERT(args.length() == 1); CONVERT_ARG_HANDLE_CHECKED(Smi, size_smi, 0); int size = size_smi->value(); @@ -9173,13 +9097,35 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_AllocateInNewSpace) { } +RUNTIME_FUNCTION(MaybeObject*, Runtime_AllocateInOldPointerSpace) { + // Allocate a block of memory in old pointer space (filled with a filler). + // Use as fallback for allocation in generated code when old pointer space + // is full. + ASSERT(args.length() == 1); + CONVERT_ARG_HANDLE_CHECKED(Smi, size_smi, 0); + int size = size_smi->value(); + RUNTIME_ASSERT(IsAligned(size, kPointerSize)); + RUNTIME_ASSERT(size > 0); + Heap* heap = isolate->heap(); + Object* allocation; + { MaybeObject* maybe_allocation = + heap->old_pointer_space()->AllocateRaw(size); + if (maybe_allocation->ToObject(&allocation)) { + heap->CreateFillerObjectAt(HeapObject::cast(allocation)->address(), size); + } + return maybe_allocation; + } +} + + // Push an object unto an array of objects if it is not already in the // array. Returns true if the element was pushed on the stack and // false otherwise. RUNTIME_FUNCTION(MaybeObject*, Runtime_PushIfAbsent) { + NoHandleAllocation ha(isolate); ASSERT(args.length() == 2); CONVERT_ARG_CHECKED(JSArray, array, 0); - CONVERT_ARG_CHECKED(JSObject, element, 1); + CONVERT_ARG_CHECKED(JSReceiver, element, 1); RUNTIME_ASSERT(array->HasFastSmiOrObjectElements()); int length = Smi::cast(array->length())->value(); FixedArray* elements = FixedArray::cast(array->elements()); @@ -9248,7 +9194,7 @@ class ArrayConcatVisitor { clear_storage(); set_storage(*result); } -} + } void increase_index_offset(uint32_t delta) { if (JSObject::kMaxElementCount - index_offset_ < delta) { @@ -9286,8 +9232,8 @@ class ArrayConcatVisitor { current_storage->length())); uint32_t current_length = static_cast<uint32_t>(current_storage->length()); for (uint32_t i = 0; i < current_length; i++) { - HandleScope loop_scope; - Handle<Object> element(current_storage->get(i)); + HandleScope loop_scope(isolate_); + Handle<Object> element(current_storage->get(i), isolate_); if (!element->IsTheHole()) { Handle<SeededNumberDictionary> new_storage = isolate_->factory()->DictionaryAtNumberPut(slow_storage, i, element); @@ -9339,16 +9285,28 @@ static uint32_t EstimateElementCount(Handle<JSArray> array) { break; } case FAST_DOUBLE_ELEMENTS: - case FAST_HOLEY_DOUBLE_ELEMENTS: - // TODO(1810): Decide if it's worthwhile to implement this. - UNREACHABLE(); + case FAST_HOLEY_DOUBLE_ELEMENTS: { + // Fast elements can't have lengths that are not representable by + // a 32-bit signed integer. + ASSERT(static_cast<int32_t>(FixedDoubleArray::kMaxLength) >= 0); + int fast_length = static_cast<int>(length); + if (array->elements()->IsFixedArray()) { + ASSERT(FixedArray::cast(array->elements())->length() == 0); + break; + } + Handle<FixedDoubleArray> elements( + FixedDoubleArray::cast(array->elements())); + for (int i = 0; i < fast_length; i++) { + if (!elements->is_the_hole(i)) element_count++; + } break; + } case DICTIONARY_ELEMENTS: { Handle<SeededNumberDictionary> dictionary( SeededNumberDictionary::cast(array->elements())); int capacity = dictionary->Capacity(); for (int i = 0; i < capacity; i++) { - Handle<Object> key(dictionary->KeyAt(i)); + Handle<Object> key(dictionary->KeyAt(i), array->GetIsolate()); if (dictionary->IsKey(*key)) { element_count++; } @@ -9389,16 +9347,17 @@ static void IterateExternalArrayElements(Isolate* isolate, if (elements_are_ints) { if (elements_are_guaranteed_smis) { for (uint32_t j = 0; j < len; j++) { - HandleScope loop_scope; - Handle<Smi> e(Smi::FromInt(static_cast<int>(array->get_scalar(j)))); + HandleScope loop_scope(isolate); + Handle<Smi> e(Smi::FromInt(static_cast<int>(array->get_scalar(j))), + isolate); visitor->visit(j, e); } } else { for (uint32_t j = 0; j < len; j++) { - HandleScope loop_scope; + HandleScope loop_scope(isolate); int64_t val = static_cast<int64_t>(array->get_scalar(j)); if (Smi::IsValid(static_cast<intptr_t>(val))) { - Handle<Smi> e(Smi::FromInt(static_cast<int>(val))); + Handle<Smi> e(Smi::FromInt(static_cast<int>(val)), isolate); visitor->visit(j, e); } else { Handle<Object> e = @@ -9428,6 +9387,7 @@ static int compareUInt32(const uint32_t* ap, const uint32_t* bp) { static void CollectElementIndices(Handle<JSObject> object, uint32_t range, List<uint32_t>* indices) { + Isolate* isolate = object->GetIsolate(); ElementsKind kind = object->GetElementsKind(); switch (kind) { case FAST_SMI_ELEMENTS: @@ -9455,8 +9415,8 @@ static void CollectElementIndices(Handle<JSObject> object, SeededNumberDictionary::cast(object->elements())); uint32_t capacity = dict->Capacity(); for (uint32_t j = 0; j < capacity; j++) { - HandleScope loop_scope; - Handle<Object> k(dict->KeyAt(j)); + HandleScope loop_scope(isolate); + Handle<Object> k(dict->KeyAt(j), isolate); if (dict->IsKey(*k)) { ASSERT(k->IsNumber()); uint32_t index = static_cast<uint32_t>(k->Number()); @@ -9535,7 +9495,7 @@ static void CollectElementIndices(Handle<JSObject> object, } } - Handle<Object> prototype(object->GetPrototype()); + Handle<Object> prototype(object->GetPrototype(), isolate); if (prototype->IsJSObject()) { // The prototype will usually have no inherited element indices, // but we have to check. @@ -9585,8 +9545,27 @@ static bool IterateElements(Isolate* isolate, } case FAST_HOLEY_DOUBLE_ELEMENTS: case FAST_DOUBLE_ELEMENTS: { - // TODO(1810): Decide if it's worthwhile to implement this. - UNREACHABLE(); + // Run through the elements FixedArray and use HasElement and GetElement + // to check the prototype for missing elements. + Handle<FixedDoubleArray> elements( + FixedDoubleArray::cast(receiver->elements())); + int fast_length = static_cast<int>(length); + ASSERT(fast_length <= elements->length()); + for (int j = 0; j < fast_length; j++) { + HandleScope loop_scope(isolate); + if (!elements->is_the_hole(j)) { + double double_value = elements->get_scalar(j); + Handle<Object> element_value = + isolate->factory()->NewNumber(double_value); + visitor->visit(j, element_value); + } else if (receiver->HasElement(j)) { + // Call GetElement on receiver, not its prototype, or getters won't + // have the correct receiver. + Handle<Object> element_value = Object::GetElement(receiver, j); + RETURN_IF_EMPTY_HANDLE_VALUE(isolate, element_value, false); + visitor->visit(j, element_value); + } + } break; } case DICTIONARY_ELEMENTS: { @@ -9599,7 +9578,7 @@ static bool IterateElements(Isolate* isolate, int j = 0; int n = indices.length(); while (j < n) { - HandleScope loop_scope; + HandleScope loop_scope(isolate); uint32_t index = indices[j]; Handle<Object> element = Object::GetElement(receiver, index); RETURN_IF_EMPTY_HANDLE_VALUE(isolate, element, false); @@ -9615,7 +9594,7 @@ static bool IterateElements(Isolate* isolate, Handle<ExternalPixelArray> pixels(ExternalPixelArray::cast( receiver->elements())); for (uint32_t j = 0; j < length; j++) { - Handle<Smi> e(Smi::FromInt(pixels->get_scalar(j))); + Handle<Smi> e(Smi::FromInt(pixels->get_scalar(j)), isolate); visitor->visit(j, e); } break; @@ -9676,8 +9655,8 @@ static bool IterateElements(Isolate* isolate, * following the ECMAScript 5 specification. */ RUNTIME_FUNCTION(MaybeObject*, Runtime_ArrayConcat) { - ASSERT(args.length() == 1); HandleScope handle_scope(isolate); + ASSERT(args.length() == 1); CONVERT_ARG_HANDLE_CHECKED(JSArray, arguments, 0); int argument_count = static_cast<int>(arguments->length()->Number()); @@ -9689,48 +9668,51 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_ArrayConcat) { // that mutate other arguments (but will otherwise be precise). // The number of elements is precise if there are no inherited elements. + ElementsKind kind = FAST_SMI_ELEMENTS; + uint32_t estimate_result_length = 0; uint32_t estimate_nof_elements = 0; - { - for (int i = 0; i < argument_count; i++) { - HandleScope loop_scope; - Handle<Object> obj(elements->get(i)); - uint32_t length_estimate; - uint32_t element_estimate; - if (obj->IsJSArray()) { - Handle<JSArray> array(Handle<JSArray>::cast(obj)); - // TODO(1810): Find out if it's worthwhile to properly support - // arbitrary ElementsKinds. For now, pessimistically transition to - // FAST_*_ELEMENTS. - if (array->HasFastDoubleElements()) { - ElementsKind to_kind = FAST_ELEMENTS; - if (array->HasFastHoleyElements()) { - to_kind = FAST_HOLEY_ELEMENTS; - } - array = Handle<JSArray>::cast( - JSObject::TransitionElementsKind(array, to_kind)); + for (int i = 0; i < argument_count; i++) { + HandleScope loop_scope(isolate); + Handle<Object> obj(elements->get(i), isolate); + uint32_t length_estimate; + uint32_t element_estimate; + if (obj->IsJSArray()) { + Handle<JSArray> array(Handle<JSArray>::cast(obj)); + length_estimate = static_cast<uint32_t>(array->length()->Number()); + if (length_estimate != 0) { + ElementsKind array_kind = + GetPackedElementsKind(array->map()->elements_kind()); + if (IsMoreGeneralElementsKindTransition(kind, array_kind)) { + kind = array_kind; } - length_estimate = - static_cast<uint32_t>(array->length()->Number()); - element_estimate = - EstimateElementCount(array); - } else { - length_estimate = 1; - element_estimate = 1; } - // Avoid overflows by capping at kMaxElementCount. - if (JSObject::kMaxElementCount - estimate_result_length < - length_estimate) { - estimate_result_length = JSObject::kMaxElementCount; - } else { - estimate_result_length += length_estimate; - } - if (JSObject::kMaxElementCount - estimate_nof_elements < - element_estimate) { - estimate_nof_elements = JSObject::kMaxElementCount; - } else { - estimate_nof_elements += element_estimate; + element_estimate = EstimateElementCount(array); + } else { + if (obj->IsHeapObject()) { + if (obj->IsNumber()) { + if (IsMoreGeneralElementsKindTransition(kind, FAST_DOUBLE_ELEMENTS)) { + kind = FAST_DOUBLE_ELEMENTS; + } + } else if (IsMoreGeneralElementsKindTransition(kind, FAST_ELEMENTS)) { + kind = FAST_ELEMENTS; + } } + length_estimate = 1; + element_estimate = 1; + } + // Avoid overflows by capping at kMaxElementCount. + if (JSObject::kMaxElementCount - estimate_result_length < + length_estimate) { + estimate_result_length = JSObject::kMaxElementCount; + } else { + estimate_result_length += length_estimate; + } + if (JSObject::kMaxElementCount - estimate_nof_elements < + element_estimate) { + estimate_nof_elements = JSObject::kMaxElementCount; + } else { + estimate_nof_elements += element_estimate; } } @@ -9741,8 +9723,76 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_ArrayConcat) { Handle<FixedArray> storage; if (fast_case) { - // The backing storage array must have non-existing elements to - // preserve holes across concat operations. + if (kind == FAST_DOUBLE_ELEMENTS) { + Handle<FixedDoubleArray> double_storage = + isolate->factory()->NewFixedDoubleArray(estimate_result_length); + int j = 0; + bool failure = false; + for (int i = 0; i < argument_count; i++) { + Handle<Object> obj(elements->get(i), isolate); + if (obj->IsSmi()) { + double_storage->set(j, Smi::cast(*obj)->value()); + j++; + } else if (obj->IsNumber()) { + double_storage->set(j, obj->Number()); + j++; + } else { + JSArray* array = JSArray::cast(*obj); + uint32_t length = static_cast<uint32_t>(array->length()->Number()); + switch (array->map()->elements_kind()) { + case FAST_HOLEY_DOUBLE_ELEMENTS: + case FAST_DOUBLE_ELEMENTS: { + // Empty fixed array indicates that there are no elements. + if (array->elements()->IsFixedArray()) break; + FixedDoubleArray* elements = + FixedDoubleArray::cast(array->elements()); + for (uint32_t i = 0; i < length; i++) { + if (elements->is_the_hole(i)) { + failure = true; + break; + } + double double_value = elements->get_scalar(i); + double_storage->set(j, double_value); + j++; + } + break; + } + case FAST_HOLEY_SMI_ELEMENTS: + case FAST_SMI_ELEMENTS: { + FixedArray* elements( + FixedArray::cast(array->elements())); + for (uint32_t i = 0; i < length; i++) { + Object* element = elements->get(i); + if (element->IsTheHole()) { + failure = true; + break; + } + int32_t int_value = Smi::cast(element)->value(); + double_storage->set(j, int_value); + j++; + } + break; + } + case FAST_HOLEY_ELEMENTS: + ASSERT_EQ(0, length); + break; + default: + UNREACHABLE(); + } + } + if (failure) break; + } + Handle<JSArray> array = isolate->factory()->NewJSArray(0); + Smi* length = Smi::FromInt(j); + Handle<Map> map; + map = isolate->factory()->GetElementsTransitionMap(array, kind); + array->set_map(*map); + array->set_length(length); + array->set_elements(*double_storage); + return *array; + } + // The backing storage array must have non-existing elements to preserve + // holes across concat operations. storage = isolate->factory()->NewFixedArrayWithHoles( estimate_result_length); } else { @@ -9756,7 +9806,7 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_ArrayConcat) { ArrayConcatVisitor visitor(isolate, storage, fast_case); for (int i = 0; i < argument_count; i++) { - Handle<Object> obj(elements->get(i)); + Handle<Object> obj(elements->get(i), isolate); if (obj->IsJSArray()) { Handle<JSArray> array = Handle<JSArray>::cast(obj); if (!IterateElements(isolate, array, &visitor)) { @@ -9775,13 +9825,14 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_ArrayConcat) { // This will not allocate (flatten the string), but it may run // very slowly for very deeply nested ConsStrings. For debugging use only. RUNTIME_FUNCTION(MaybeObject*, Runtime_GlobalPrint) { - NoHandleAllocation ha; + NoHandleAllocation ha(isolate); ASSERT(args.length() == 1); CONVERT_ARG_CHECKED(String, string, 0); - StringInputBuffer buffer(string); - while (buffer.has_more()) { - uint16_t character = buffer.GetNext(); + ConsStringIteratorOp op; + StringCharacterStream stream(string, &op); + while (stream.HasMore()) { + uint16_t character = stream.GetNext(); PrintF("%c", character); } return string; @@ -9793,6 +9844,7 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_GlobalPrint) { // property. // Returns the number of non-undefined elements collected. RUNTIME_FUNCTION(MaybeObject*, Runtime_RemoveArrayHoles) { + NoHandleAllocation ha(isolate); ASSERT(args.length() == 2); CONVERT_ARG_CHECKED(JSObject, object, 0); CONVERT_NUMBER_CHECKED(uint32_t, limit, Uint32, args[1]); @@ -9802,6 +9854,7 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_RemoveArrayHoles) { // Move contents of argument 0 (an array) to argument 1 (an array) RUNTIME_FUNCTION(MaybeObject*, Runtime_MoveArrayContents) { + NoHandleAllocation ha(isolate); ASSERT(args.length() == 2); CONVERT_ARG_CHECKED(JSArray, from, 0); CONVERT_ARG_CHECKED(JSArray, to, 1); @@ -9827,6 +9880,7 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_MoveArrayContents) { // How many elements does this object/array have? RUNTIME_FUNCTION(MaybeObject*, Runtime_EstimateNumberOfElements) { + NoHandleAllocation ha(isolate); ASSERT(args.length() == 1); CONVERT_ARG_CHECKED(JSObject, object, 0); HeapObject* elements = object->elements(); @@ -9847,8 +9901,8 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_EstimateNumberOfElements) { // positive (length)) or undefined values. // Intervals can span over some keys that are not in the object. RUNTIME_FUNCTION(MaybeObject*, Runtime_GetArrayKeys) { - ASSERT(args.length() == 2); HandleScope scope(isolate); + ASSERT(args.length() == 2); CONVERT_ARG_HANDLE_CHECKED(JSObject, array, 0); CONVERT_NUMBER_CHECKED(uint32_t, length, Uint32, args[1]); if (array->elements()->IsDictionary()) { @@ -9888,9 +9942,10 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_GetArrayKeys) { RUNTIME_FUNCTION(MaybeObject*, Runtime_LookupAccessor) { + NoHandleAllocation ha(isolate); ASSERT(args.length() == 3); CONVERT_ARG_CHECKED(JSReceiver, receiver, 0); - CONVERT_ARG_CHECKED(String, name, 1); + CONVERT_ARG_CHECKED(Name, name, 1); CONVERT_SMI_ARG_CHECKED(flag, 2); AccessorComponent component = flag == 0 ? ACCESSOR_GETTER : ACCESSOR_SETTER; if (!receiver->IsJSObject()) return isolate->heap()->undefined_value(); @@ -9900,6 +9955,7 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_LookupAccessor) { #ifdef ENABLE_DEBUGGER_SUPPORT RUNTIME_FUNCTION(MaybeObject*, Runtime_DebugBreak) { + NoHandleAllocation ha(isolate); ASSERT(args.length() == 0); return Execution::DebugBreakHelper(); } @@ -9922,6 +9978,7 @@ static StackFrame::Id UnwrapFrameId(int wrapped) { // clearing the event listener function // args[1]: object supplied during callback RUNTIME_FUNCTION(MaybeObject*, Runtime_SetDebugEventListener) { + NoHandleAllocation ha(isolate); ASSERT(args.length() == 2); RUNTIME_ASSERT(args[0]->IsJSFunction() || args[0]->IsUndefined() || @@ -9935,6 +9992,7 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_SetDebugEventListener) { RUNTIME_FUNCTION(MaybeObject*, Runtime_Break) { + NoHandleAllocation ha(isolate); ASSERT(args.length() == 0); isolate->stack_guard()->DebugBreak(); return isolate->heap()->undefined_value(); @@ -9943,7 +10001,7 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_Break) { static MaybeObject* DebugLookupResultValue(Heap* heap, Object* receiver, - String* name, + Name* name, LookupResult* result, bool* caught_exception) { Object* value; @@ -9956,8 +10014,8 @@ static MaybeObject* DebugLookupResultValue(Heap* heap, return value; case FIELD: value = - JSObject::cast( - result->holder())->FastPropertyAt(result->GetFieldIndex()); + JSObject::cast(result->holder())->FastPropertyAt( + result->GetFieldIndex().field_index()); if (value->IsTheHole()) { return heap->undefined_value(); } @@ -10015,7 +10073,7 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_DebugGetPropertyDetails) { ASSERT(args.length() == 2); CONVERT_ARG_HANDLE_CHECKED(JSObject, obj, 0); - CONVERT_ARG_HANDLE_CHECKED(String, name, 1); + CONVERT_ARG_HANDLE_CHECKED(Name, name, 1); // Make sure to set the current context to the context before the debugger was // entered (if the debugger is entered). The reason for switching context here @@ -10113,7 +10171,7 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_DebugGetProperty) { ASSERT(args.length() == 2); CONVERT_ARG_HANDLE_CHECKED(JSObject, obj, 0); - CONVERT_ARG_HANDLE_CHECKED(String, name, 1); + CONVERT_ARG_HANDLE_CHECKED(Name, name, 1); LookupResult result(isolate); obj->Lookup(*name, &result); @@ -10127,6 +10185,7 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_DebugGetProperty) { // Return the property type calculated from the property details. // args[0]: smi with property details. RUNTIME_FUNCTION(MaybeObject*, Runtime_DebugPropertyTypeFromDetails) { + NoHandleAllocation ha(isolate); ASSERT(args.length() == 1); CONVERT_PROPERTY_DETAILS_CHECKED(details, 0); return Smi::FromInt(static_cast<int>(details.type())); @@ -10136,6 +10195,7 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_DebugPropertyTypeFromDetails) { // Return the property attribute calculated from the property details. // args[0]: smi with property details. RUNTIME_FUNCTION(MaybeObject*, Runtime_DebugPropertyAttributesFromDetails) { + NoHandleAllocation ha(isolate); ASSERT(args.length() == 1); CONVERT_PROPERTY_DETAILS_CHECKED(details, 0); return Smi::FromInt(static_cast<int>(details.attributes())); @@ -10145,6 +10205,7 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_DebugPropertyAttributesFromDetails) { // Return the property insertion index calculated from the property details. // args[0]: smi with property details. RUNTIME_FUNCTION(MaybeObject*, Runtime_DebugPropertyIndexFromDetails) { + NoHandleAllocation ha(isolate); ASSERT(args.length() == 1); CONVERT_PROPERTY_DETAILS_CHECKED(details, 0); // TODO(verwaest): Depends on the type of details. @@ -10160,7 +10221,7 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_DebugNamedInterceptorPropertyValue) { ASSERT(args.length() == 2); CONVERT_ARG_HANDLE_CHECKED(JSObject, obj, 0); RUNTIME_ASSERT(obj->HasNamedInterceptor()); - CONVERT_ARG_HANDLE_CHECKED(String, name, 1); + CONVERT_ARG_HANDLE_CHECKED(Name, name, 1); PropertyAttributes attributes; return obj->GetPropertyWithInterceptor(*obj, *name, &attributes); @@ -10182,13 +10243,14 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_DebugIndexedInterceptorElementValue) { RUNTIME_FUNCTION(MaybeObject*, Runtime_CheckExecutionState) { + NoHandleAllocation ha(isolate); ASSERT(args.length() >= 1); CONVERT_NUMBER_CHECKED(int, break_id, Int32, args[0]); // Check that the break id is valid. if (isolate->debug()->break_id() == 0 || break_id != isolate->debug()->break_id()) { return isolate->Throw( - isolate->heap()->illegal_execution_state_symbol()); + isolate->heap()->illegal_execution_state_string()); } return isolate->heap()->true_value(); @@ -10392,7 +10454,7 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_GetFrameDetails) { Handle<JSFunction> function(JSFunction::cast(frame_inspector.GetFunction())); Handle<SharedFunctionInfo> shared(function->shared()); Handle<ScopeInfo> scope_info(shared->scope_info()); - ASSERT(*scope_info != ScopeInfo::Empty()); + ASSERT(*scope_info != ScopeInfo::Empty(isolate)); // Get the locals names and values into a temporary array. // @@ -10581,33 +10643,6 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_GetFrameDetails) { } -// Copy all the context locals into an object used to materialize a scope. -static bool CopyContextLocalsToScopeObject( - Isolate* isolate, - Handle<ScopeInfo> scope_info, - Handle<Context> context, - Handle<JSObject> scope_object) { - // Fill all context locals to the context extension. - for (int i = 0; i < scope_info->ContextLocalCount(); i++) { - VariableMode mode; - InitializationFlag init_flag; - int context_index = scope_info->ContextSlotIndex( - scope_info->ContextLocalName(i), &mode, &init_flag); - - RETURN_IF_EMPTY_HANDLE_VALUE( - isolate, - SetProperty(scope_object, - Handle<String>(scope_info->ContextLocalName(i)), - Handle<Object>(context->get(context_index), isolate), - NONE, - kNonStrictMode), - false); - } - - return true; -} - - // Create a plain JSObject which materializes the local scope for the specified // frame. static Handle<JSObject> MaterializeLocalScopeWithFrameInspector( @@ -10625,13 +10660,15 @@ static Handle<JSObject> MaterializeLocalScopeWithFrameInspector( // First fill all parameters. for (int i = 0; i < scope_info->ParameterCount(); ++i) { - Handle<Object> value( - i < frame_inspector->GetParametersCount() ? - frame_inspector->GetParameter(i) : isolate->heap()->undefined_value()); + Handle<Object> value(i < frame_inspector->GetParametersCount() + ? frame_inspector->GetParameter(i) + : isolate->heap()->undefined_value(), + isolate); RETURN_IF_EMPTY_HANDLE_VALUE( isolate, - SetProperty(local_scope, + SetProperty(isolate, + local_scope, Handle<String>(scope_info->ParameterName(i)), value, NONE, @@ -10643,9 +10680,10 @@ static Handle<JSObject> MaterializeLocalScopeWithFrameInspector( for (int i = 0; i < scope_info->StackLocalCount(); ++i) { RETURN_IF_EMPTY_HANDLE_VALUE( isolate, - SetProperty(local_scope, + SetProperty(isolate, + local_scope, Handle<String>(scope_info->StackLocalName(i)), - Handle<Object>(frame_inspector->GetExpression(i)), + Handle<Object>(frame_inspector->GetExpression(i), isolate), NONE, kNonStrictMode), Handle<JSObject>()); @@ -10655,8 +10693,8 @@ static Handle<JSObject> MaterializeLocalScopeWithFrameInspector( // Third fill all context locals. Handle<Context> frame_context(Context::cast(frame->context())); Handle<Context> function_context(frame_context->declaration_context()); - if (!CopyContextLocalsToScopeObject( - isolate, scope_info, function_context, local_scope)) { + if (!scope_info->CopyContextLocalsToScopeObject( + isolate, function_context, local_scope)) { return Handle<JSObject>(); } @@ -10677,9 +10715,10 @@ static Handle<JSObject> MaterializeLocalScopeWithFrameInspector( Handle<String> key(String::cast(keys->get(i))); RETURN_IF_EMPTY_HANDLE_VALUE( isolate, - SetProperty(local_scope, + SetProperty(isolate, + local_scope, key, - GetProperty(ext, key), + GetProperty(isolate, ext, key), NONE, kNonStrictMode), Handle<JSObject>()); @@ -10703,6 +10742,95 @@ static Handle<JSObject> MaterializeLocalScope( } +// Set the context local variable value. +static bool SetContextLocalValue(Isolate* isolate, + Handle<ScopeInfo> scope_info, + Handle<Context> context, + Handle<String> variable_name, + Handle<Object> new_value) { + for (int i = 0; i < scope_info->ContextLocalCount(); i++) { + Handle<String> next_name(scope_info->ContextLocalName(i)); + if (variable_name->Equals(*next_name)) { + VariableMode mode; + InitializationFlag init_flag; + int context_index = + scope_info->ContextSlotIndex(*next_name, &mode, &init_flag); + context->set(context_index, *new_value); + return true; + } + } + + return false; +} + + +static bool SetLocalVariableValue(Isolate* isolate, + JavaScriptFrame* frame, + int inlined_jsframe_index, + Handle<String> variable_name, + Handle<Object> new_value) { + if (inlined_jsframe_index != 0 || frame->is_optimized()) { + // Optimized frames are not supported. + return false; + } + + Handle<JSFunction> function(JSFunction::cast(frame->function())); + Handle<SharedFunctionInfo> shared(function->shared()); + Handle<ScopeInfo> scope_info(shared->scope_info()); + + bool default_result = false; + + // Parameters. + for (int i = 0; i < scope_info->ParameterCount(); ++i) { + if (scope_info->ParameterName(i)->Equals(*variable_name)) { + frame->SetParameterValue(i, *new_value); + // Argument might be shadowed in heap context, don't stop here. + default_result = true; + } + } + + // Stack locals. + for (int i = 0; i < scope_info->StackLocalCount(); ++i) { + if (scope_info->StackLocalName(i)->Equals(*variable_name)) { + frame->SetExpression(i, *new_value); + return true; + } + } + + if (scope_info->HasContext()) { + // Context locals. + Handle<Context> frame_context(Context::cast(frame->context())); + Handle<Context> function_context(frame_context->declaration_context()); + if (SetContextLocalValue( + isolate, scope_info, function_context, variable_name, new_value)) { + return true; + } + + // Function context extension. These are variables introduced by eval. + if (function_context->closure() == *function) { + if (function_context->has_extension() && + !function_context->IsNativeContext()) { + Handle<JSObject> ext(JSObject::cast(function_context->extension())); + + if (ext->HasProperty(*variable_name)) { + // We don't expect this to do anything except replacing + // property value. + SetProperty(isolate, + ext, + variable_name, + new_value, + NONE, + kNonStrictMode); + return true; + } + } + } + } + + return default_result; +} + + // Create a plain JSObject which materializes the closure content for the // context. static Handle<JSObject> MaterializeClosure(Isolate* isolate, @@ -10718,8 +10846,8 @@ static Handle<JSObject> MaterializeClosure(Isolate* isolate, isolate->factory()->NewJSObject(isolate->object_function()); // Fill all context locals to the context extension. - if (!CopyContextLocalsToScopeObject( - isolate, scope_info, context, closure_scope)) { + if (!scope_info->CopyContextLocalsToScopeObject( + isolate, context, closure_scope)) { return Handle<JSObject>(); } @@ -10738,9 +10866,10 @@ static Handle<JSObject> MaterializeClosure(Isolate* isolate, Handle<String> key(String::cast(keys->get(i))); RETURN_IF_EMPTY_HANDLE_VALUE( isolate, - SetProperty(closure_scope, + SetProperty(isolate, + closure_scope, key, - GetProperty(ext, key), + GetProperty(isolate, ext, key), NONE, kNonStrictMode), Handle<JSObject>()); @@ -10751,23 +10880,79 @@ static Handle<JSObject> MaterializeClosure(Isolate* isolate, } +// This method copies structure of MaterializeClosure method above. +static bool SetClosureVariableValue(Isolate* isolate, + Handle<Context> context, + Handle<String> variable_name, + Handle<Object> new_value) { + ASSERT(context->IsFunctionContext()); + + Handle<SharedFunctionInfo> shared(context->closure()->shared()); + Handle<ScopeInfo> scope_info(shared->scope_info()); + + // Context locals to the context extension. + if (SetContextLocalValue( + isolate, scope_info, context, variable_name, new_value)) { + return true; + } + + // Properties from the function context extension. This will + // be variables introduced by eval. + if (context->has_extension()) { + Handle<JSObject> ext(JSObject::cast(context->extension())); + if (ext->HasProperty(*variable_name)) { + // We don't expect this to do anything except replacing property value. + SetProperty(isolate, + ext, + variable_name, + new_value, + NONE, + kNonStrictMode); + return true; + } + } + + return false; +} + + // Create a plain JSObject which materializes the scope for the specified // catch context. static Handle<JSObject> MaterializeCatchScope(Isolate* isolate, Handle<Context> context) { ASSERT(context->IsCatchContext()); Handle<String> name(String::cast(context->extension())); - Handle<Object> thrown_object(context->get(Context::THROWN_OBJECT_INDEX)); + Handle<Object> thrown_object(context->get(Context::THROWN_OBJECT_INDEX), + isolate); Handle<JSObject> catch_scope = isolate->factory()->NewJSObject(isolate->object_function()); RETURN_IF_EMPTY_HANDLE_VALUE( isolate, - SetProperty(catch_scope, name, thrown_object, NONE, kNonStrictMode), + SetProperty(isolate, + catch_scope, + name, + thrown_object, + NONE, + kNonStrictMode), Handle<JSObject>()); return catch_scope; } +static bool SetCatchVariableValue(Isolate* isolate, + Handle<Context> context, + Handle<String> variable_name, + Handle<Object> new_value) { + ASSERT(context->IsCatchContext()); + Handle<String> name(String::cast(context->extension())); + if (!name->Equals(*variable_name)) { + return false; + } + context->set(Context::THROWN_OBJECT_INDEX, *new_value); + return true; +} + + // Create a plain JSObject which materializes the block scope for the specified // block context. static Handle<JSObject> MaterializeBlockScope( @@ -10782,8 +10967,8 @@ static Handle<JSObject> MaterializeBlockScope( isolate->factory()->NewJSObject(isolate->object_function()); // Fill all context locals. - if (!CopyContextLocalsToScopeObject( - isolate, scope_info, context, block_scope)) { + if (!scope_info->CopyContextLocalsToScopeObject( + isolate, context, block_scope)) { return Handle<JSObject>(); } @@ -10805,8 +10990,8 @@ static Handle<JSObject> MaterializeModuleScope( isolate->factory()->NewJSObject(isolate->object_function()); // Fill all context locals. - if (!CopyContextLocalsToScopeObject( - isolate, scope_info, context, module_scope)) { + if (!scope_info->CopyContextLocalsToScopeObject( + isolate, context, module_scope)) { return Handle<JSObject>(); } @@ -11026,6 +11211,33 @@ class ScopeIterator { return Handle<JSObject>(); } + bool SetVariableValue(Handle<String> variable_name, + Handle<Object> new_value) { + ASSERT(!failed_); + switch (Type()) { + case ScopeIterator::ScopeTypeGlobal: + break; + case ScopeIterator::ScopeTypeLocal: + return SetLocalVariableValue(isolate_, frame_, inlined_jsframe_index_, + variable_name, new_value); + case ScopeIterator::ScopeTypeWith: + break; + case ScopeIterator::ScopeTypeCatch: + return SetCatchVariableValue(isolate_, CurrentContext(), + variable_name, new_value); + case ScopeIterator::ScopeTypeClosure: + return SetClosureVariableValue(isolate_, CurrentContext(), + variable_name, new_value); + case ScopeIterator::ScopeTypeBlock: + // TODO(2399): should we implement it? + break; + case ScopeIterator::ScopeTypeModule: + // TODO(2399): should we implement it? + break; + } + return false; + } + Handle<ScopeInfo> CurrentScopeInfo() { ASSERT(!failed_); if (!nested_scope_chain_.is_empty()) { @@ -11068,7 +11280,7 @@ class ScopeIterator { if (!CurrentContext().is_null()) { CurrentContext()->Print(); if (CurrentContext()->has_extension()) { - Handle<Object> extension(CurrentContext()->extension()); + Handle<Object> extension(CurrentContext()->extension(), isolate_); if (extension->IsJSContextExtensionObject()) { extension->Print(); } @@ -11092,7 +11304,7 @@ class ScopeIterator { PrintF("Closure:\n"); CurrentContext()->Print(); if (CurrentContext()->has_extension()) { - Handle<Object> extension(CurrentContext()->extension()); + Handle<Object> extension(CurrentContext()->extension(), isolate_); if (extension->IsJSContextExtensionObject()) { extension->Print(); } @@ -11265,13 +11477,71 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_GetFunctionScopeDetails) { } +static bool SetScopeVariableValue(ScopeIterator* it, int index, + Handle<String> variable_name, + Handle<Object> new_value) { + for (int n = 0; !it->Done() && n < index; it->Next()) { + n++; + } + if (it->Done()) { + return false; + } + return it->SetVariableValue(variable_name, new_value); +} + + +// Change variable value in closure or local scope +// args[0]: number or JsFunction: break id or function +// args[1]: number: frame index (when arg[0] is break id) +// args[2]: number: inlined frame index (when arg[0] is break id) +// args[3]: number: scope index +// args[4]: string: variable name +// args[5]: object: new value +// +// Return true if success and false otherwise +RUNTIME_FUNCTION(MaybeObject*, Runtime_SetScopeVariableValue) { + HandleScope scope(isolate); + ASSERT(args.length() == 6); + + // Check arguments. + CONVERT_NUMBER_CHECKED(int, index, Int32, args[3]); + CONVERT_ARG_HANDLE_CHECKED(String, variable_name, 4); + Handle<Object> new_value = args.at<Object>(5); + + bool res; + if (args[0]->IsNumber()) { + Object* check; + { MaybeObject* maybe_check = Runtime_CheckExecutionState( + RUNTIME_ARGUMENTS(isolate, args)); + if (!maybe_check->ToObject(&check)) return maybe_check; + } + CONVERT_SMI_ARG_CHECKED(wrapped_id, 1); + CONVERT_NUMBER_CHECKED(int, inlined_jsframe_index, Int32, args[2]); + + // Get the frame where the debugging is performed. + StackFrame::Id id = UnwrapFrameId(wrapped_id); + JavaScriptFrameIterator frame_it(isolate, id); + JavaScriptFrame* frame = frame_it.frame(); + + ScopeIterator it(isolate, frame, inlined_jsframe_index); + res = SetScopeVariableValue(&it, index, variable_name, new_value); + } else { + CONVERT_ARG_HANDLE_CHECKED(JSFunction, fun, 0); + ScopeIterator it(isolate, fun); + res = SetScopeVariableValue(&it, index, variable_name, new_value); + } + + return isolate->heap()->ToBoolean(res); +} + + RUNTIME_FUNCTION(MaybeObject*, Runtime_DebugPrintScopes) { HandleScope scope(isolate); ASSERT(args.length() == 0); #ifdef DEBUG // Print the scopes for the top frame. - StackFrameLocator locator; + StackFrameLocator locator(isolate); JavaScriptFrame* frame = locator.FindJavaScriptFrame(0); for (ScopeIterator it(isolate, frame, 0); !it.Done(); @@ -11503,7 +11773,7 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_PrepareStep) { if (!maybe_check->ToObject(&check)) return maybe_check; } if (!args[1]->IsNumber() || !args[2]->IsNumber()) { - return isolate->Throw(isolate->heap()->illegal_argument_symbol()); + return isolate->Throw(isolate->heap()->illegal_argument_string()); } // Get the step action and check validity. @@ -11513,13 +11783,13 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_PrepareStep) { step_action != StepOut && step_action != StepInMin && step_action != StepMin) { - return isolate->Throw(isolate->heap()->illegal_argument_symbol()); + return isolate->Throw(isolate->heap()->illegal_argument_string()); } // Get the number of steps. int step_count = NumberToInt32(args[2]); if (step_count < 1) { - return isolate->Throw(isolate->heap()->illegal_argument_symbol()); + return isolate->Throw(isolate->heap()->illegal_argument_string()); } // Clear all current stepping setup. @@ -11573,7 +11843,8 @@ static Handle<Context> CopyNestedScopeContextChain(Isolate* isolate, if (scope_info->Type() == CATCH_SCOPE) { Handle<String> name(String::cast(current->extension())); - Handle<Object> thrown_object(current->get(Context::THROWN_OBJECT_INDEX)); + Handle<Object> thrown_object(current->get(Context::THROWN_OBJECT_INDEX), + isolate); context = isolate->factory()->NewCatchContext(function, context, @@ -11617,7 +11888,7 @@ static Handle<Object> GetArgumentsObject(Isolate* isolate, // does not support eval) then create an 'arguments' object. int index; if (scope_info->StackLocalCount() > 0) { - index = scope_info->StackSlotIndex(isolate->heap()->arguments_symbol()); + index = scope_info->StackSlotIndex(isolate->heap()->arguments_string()); if (index != -1) { return Handle<Object>(frame->GetExpression(index), isolate); } @@ -11627,7 +11898,7 @@ static Handle<Object> GetArgumentsObject(Isolate* isolate, VariableMode mode; InitializationFlag init_flag; index = scope_info->ContextSlotIndex( - isolate->heap()->arguments_symbol(), &mode, &init_flag); + isolate->heap()->arguments_string(), &mode, &init_flag); if (index != -1) { return Handle<Object>(function_context->get(index), isolate); } @@ -11681,7 +11952,7 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_DebugEvaluate) { CONVERT_NUMBER_CHECKED(int, inlined_jsframe_index, Int32, args[2]); CONVERT_ARG_HANDLE_CHECKED(String, source, 3); CONVERT_BOOLEAN_ARG_CHECKED(disable_break, 4); - Handle<Object> additional_context(args[5]); + Handle<Object> additional_context(args[5], isolate); // Handle the processing of break. DisableBreak disable_break_save(disable_break); @@ -11774,6 +12045,7 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_DebugEvaluate) { context, context->IsNativeContext(), CLASSIC_MODE, + NO_PARSE_RESTRICTION, RelocInfo::kNoPosition); if (shared.is_null()) return Failure::Exception(); Handle<JSFunction> compiled_function = @@ -11819,7 +12091,7 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_DebugEvaluate) { // Skip the global proxy as it has no properties and always delegates to the // real global object. if (result->IsJSGlobalProxy()) { - result = Handle<JSObject>(JSObject::cast(result->GetPrototype())); + result = Handle<JSObject>(JSObject::cast(result->GetPrototype(isolate))); } return *result; @@ -11841,7 +12113,7 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_DebugEvaluateGlobal) { } CONVERT_ARG_HANDLE_CHECKED(String, source, 1); CONVERT_BOOLEAN_ARG_CHECKED(disable_break, 2); - Handle<Object> additional_context(args[3]); + Handle<Object> additional_context(args[3], isolate); // Handle the processing of break. DisableBreak disable_break_save(disable_break); @@ -11880,6 +12152,7 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_DebugEvaluateGlobal) { context, is_global, CLASSIC_MODE, + NO_PARSE_RESTRICTION, RelocInfo::kNoPosition); if (shared.is_null()) return Failure::Exception(); Handle<JSFunction> compiled_function = @@ -11933,7 +12206,8 @@ static int DebugReferencedBy(HeapIterator* iterator, Object* instance_filter, int max_references, FixedArray* instances, int instances_size, JSFunction* arguments_function) { - NoHandleAllocation ha; + Isolate* isolate = target->GetIsolate(); + NoHandleAllocation ha(isolate); AssertNoAllocation no_alloc; // Iterate the heap. @@ -11959,7 +12233,7 @@ static int DebugReferencedBy(HeapIterator* iterator, if (!instance_filter->IsUndefined()) { Object* V = obj; while (true) { - Object* prototype = V->GetPrototype(); + Object* prototype = V->GetPrototype(isolate); if (prototype->IsNull()) { break; } @@ -12002,6 +12276,7 @@ static int DebugReferencedBy(HeapIterator* iterator, // args[1]: constructor function for instances to exclude (Mirror) // args[2]: the the maximum number of objects to return RUNTIME_FUNCTION(MaybeObject*, Runtime_DebugReferencedBy) { + NoHandleAllocation ha(isolate); ASSERT(args.length() == 3); // First perform a full GC in order to avoid references from dead objects. @@ -12029,29 +12304,30 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_DebugReferencedBy) { // Get the number of referencing objects. int count; - HeapIterator heap_iterator; + Heap* heap = isolate->heap(); + HeapIterator heap_iterator(heap); count = DebugReferencedBy(&heap_iterator, target, instance_filter, max_references, NULL, 0, arguments_function); // Allocate an array to hold the result. Object* object; - { MaybeObject* maybe_object = isolate->heap()->AllocateFixedArray(count); + { MaybeObject* maybe_object = heap->AllocateFixedArray(count); if (!maybe_object->ToObject(&object)) return maybe_object; } FixedArray* instances = FixedArray::cast(object); // Fill the referencing objects. // AllocateFixedArray above does not make the heap non-iterable. - ASSERT(HEAP->IsHeapIterable()); - HeapIterator heap_iterator2; + ASSERT(heap->IsHeapIterable()); + HeapIterator heap_iterator2(heap); count = DebugReferencedBy(&heap_iterator2, target, instance_filter, max_references, instances, count, arguments_function); // Return result as JS array. Object* result; - MaybeObject* maybe_result = isolate->heap()->AllocateJSObject( + MaybeObject* maybe_result = heap->AllocateJSObject( isolate->context()->native_context()->array_function()); if (!maybe_result->ToObject(&result)) return maybe_result; return JSArray::cast(result)->SetContent(instances); @@ -12094,11 +12370,12 @@ static int DebugConstructedBy(HeapIterator* iterator, // args[0]: the constructor to find instances of // args[1]: the the maximum number of objects to return RUNTIME_FUNCTION(MaybeObject*, Runtime_DebugConstructedBy) { + NoHandleAllocation ha(isolate); ASSERT(args.length() == 2); // First perform a full GC in order to avoid dead objects. - isolate->heap()->CollectAllGarbage(Heap::kMakeHeapIterableMask, - "%DebugConstructedBy"); + Heap* heap = isolate->heap(); + heap->CollectAllGarbage(Heap::kMakeHeapIterableMask, "%DebugConstructedBy"); // Check parameters. CONVERT_ARG_CHECKED(JSFunction, constructor, 0); @@ -12107,7 +12384,7 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_DebugConstructedBy) { // Get the number of referencing objects. int count; - HeapIterator heap_iterator; + HeapIterator heap_iterator(heap); count = DebugConstructedBy(&heap_iterator, constructor, max_references, @@ -12116,14 +12393,14 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_DebugConstructedBy) { // Allocate an array to hold the result. Object* object; - { MaybeObject* maybe_object = isolate->heap()->AllocateFixedArray(count); + { MaybeObject* maybe_object = heap->AllocateFixedArray(count); if (!maybe_object->ToObject(&object)) return maybe_object; } FixedArray* instances = FixedArray::cast(object); ASSERT(HEAP->IsHeapIterable()); // Fill the referencing objects. - HeapIterator heap_iterator2; + HeapIterator heap_iterator2(heap); count = DebugConstructedBy(&heap_iterator2, constructor, max_references, @@ -12133,7 +12410,7 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_DebugConstructedBy) { // Return result as JS array. Object* result; { MaybeObject* maybe_result = isolate->heap()->AllocateJSObject( - isolate->context()->native_context()->array_function()); + isolate->context()->native_context()->array_function()); if (!maybe_result->ToObject(&result)) return maybe_result; } return JSArray::cast(result)->SetContent(instances); @@ -12143,6 +12420,7 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_DebugConstructedBy) { // Find the effective prototype object as returned by __proto__. // args[0]: the object to find the prototype for. RUNTIME_FUNCTION(MaybeObject*, Runtime_DebugGetPrototype) { + NoHandleAllocation ha(isolate); ASSERT(args.length() == 1); CONVERT_ARG_CHECKED(JSObject, obj, 0); @@ -12172,6 +12450,7 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_DebugSetScriptSource) { RUNTIME_FUNCTION(MaybeObject*, Runtime_SystemBreak) { + NoHandleAllocation ha(isolate); ASSERT(args.length() == 0); CPU::DebugBreak(); return isolate->heap()->undefined_value(); @@ -12179,8 +12458,8 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_SystemBreak) { RUNTIME_FUNCTION(MaybeObject*, Runtime_DebugDisassembleFunction) { -#ifdef DEBUG HandleScope scope(isolate); +#ifdef DEBUG ASSERT(args.length() == 1); // Get the function and make sure it is compiled. CONVERT_ARG_HANDLE_CHECKED(JSFunction, func, 0); @@ -12194,8 +12473,8 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_DebugDisassembleFunction) { RUNTIME_FUNCTION(MaybeObject*, Runtime_DebugDisassembleConstructor) { -#ifdef DEBUG HandleScope scope(isolate); +#ifdef DEBUG ASSERT(args.length() == 1); // Get the function and make sure it is compiled. CONVERT_ARG_HANDLE_CHECKED(JSFunction, func, 0); @@ -12209,7 +12488,7 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_DebugDisassembleConstructor) { RUNTIME_FUNCTION(MaybeObject*, Runtime_FunctionGetInferredName) { - NoHandleAllocation ha; + NoHandleAllocation ha(isolate); ASSERT(args.length() == 1); CONVERT_ARG_CHECKED(JSFunction, f, 0); @@ -12247,9 +12526,9 @@ static int FindSharedFunctionInfosForScript(HeapIterator* iterator, // in OpaqueReferences. RUNTIME_FUNCTION(MaybeObject*, Runtime_LiveEditFindSharedFunctionInfosForScript) { + HandleScope scope(isolate); CHECK(isolate->debugger()->live_edit_enabled()); ASSERT(args.length() == 1); - HandleScope scope(isolate); CONVERT_ARG_CHECKED(JSValue, script_value, 0); RUNTIME_ASSERT(script_value->value()->IsScript()); @@ -12260,19 +12539,20 @@ RUNTIME_FUNCTION(MaybeObject*, Handle<FixedArray> array; array = isolate->factory()->NewFixedArray(kBufferSize); int number; + Heap* heap = isolate->heap(); { - isolate->heap()->EnsureHeapIsIterable(); + heap->EnsureHeapIsIterable(); AssertNoAllocation no_allocations; - HeapIterator heap_iterator; + HeapIterator heap_iterator(heap); Script* scr = *script; FixedArray* arr = *array; number = FindSharedFunctionInfosForScript(&heap_iterator, scr, arr); } if (number > kBufferSize) { array = isolate->factory()->NewFixedArray(number); - isolate->heap()->EnsureHeapIsIterable(); + heap->EnsureHeapIsIterable(); AssertNoAllocation no_allocations; - HeapIterator heap_iterator; + HeapIterator heap_iterator(heap); Script* scr = *script; FixedArray* arr = *array; FindSharedFunctionInfosForScript(&heap_iterator, scr, arr); @@ -12294,9 +12574,9 @@ RUNTIME_FUNCTION(MaybeObject*, // each function with all its descendant is always stored in a continues range // with the function itself going first. The root function is a script function. RUNTIME_FUNCTION(MaybeObject*, Runtime_LiveEditGatherCompileInfo) { + HandleScope scope(isolate); CHECK(isolate->debugger()->live_edit_enabled()); ASSERT(args.length() == 2); - HandleScope scope(isolate); CONVERT_ARG_CHECKED(JSValue, script, 0); CONVERT_ARG_HANDLE_CHECKED(String, source, 1); @@ -12316,9 +12596,9 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_LiveEditGatherCompileInfo) { // If old_script_name is provided (i.e. is a String), also creates a copy of // the script with its original source and sends notification to debugger. RUNTIME_FUNCTION(MaybeObject*, Runtime_LiveEditReplaceScript) { + HandleScope scope(isolate); CHECK(isolate->debugger()->live_edit_enabled()); ASSERT(args.length() == 3); - HandleScope scope(isolate); CONVERT_ARG_CHECKED(JSValue, original_script_value, 0); CONVERT_ARG_HANDLE_CHECKED(String, new_source, 1); Handle<Object> old_script_name(args[2], isolate); @@ -12340,9 +12620,9 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_LiveEditReplaceScript) { RUNTIME_FUNCTION(MaybeObject*, Runtime_LiveEditFunctionSourceUpdated) { + HandleScope scope(isolate); CHECK(isolate->debugger()->live_edit_enabled()); ASSERT(args.length() == 1); - HandleScope scope(isolate); CONVERT_ARG_HANDLE_CHECKED(JSArray, shared_info, 0); return LiveEdit::FunctionSourceUpdated(shared_info); } @@ -12350,9 +12630,9 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_LiveEditFunctionSourceUpdated) { // Replaces code of SharedFunctionInfo with a new one. RUNTIME_FUNCTION(MaybeObject*, Runtime_LiveEditReplaceFunctionCode) { + HandleScope scope(isolate); CHECK(isolate->debugger()->live_edit_enabled()); ASSERT(args.length() == 2); - HandleScope scope(isolate); CONVERT_ARG_HANDLE_CHECKED(JSArray, new_compile_info, 0); CONVERT_ARG_HANDLE_CHECKED(JSArray, shared_info, 1); @@ -12361,9 +12641,9 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_LiveEditReplaceFunctionCode) { // Connects SharedFunctionInfo to another script. RUNTIME_FUNCTION(MaybeObject*, Runtime_LiveEditFunctionSetScript) { + HandleScope scope(isolate); CHECK(isolate->debugger()->live_edit_enabled()); ASSERT(args.length() == 2); - HandleScope scope(isolate); Handle<Object> function_object(args[0], isolate); Handle<Object> script_object(args[1], isolate); @@ -12388,9 +12668,9 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_LiveEditFunctionSetScript) { // In a code of a parent function replaces original function as embedded object // with a substitution one. RUNTIME_FUNCTION(MaybeObject*, Runtime_LiveEditReplaceRefToNestedFunction) { + HandleScope scope(isolate); CHECK(isolate->debugger()->live_edit_enabled()); ASSERT(args.length() == 3); - HandleScope scope(isolate); CONVERT_ARG_HANDLE_CHECKED(JSValue, parent_wrapper, 0); CONVERT_ARG_HANDLE_CHECKED(JSValue, orig_wrapper, 1); @@ -12409,9 +12689,9 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_LiveEditReplaceRefToNestedFunction) { // (change_begin, change_end, change_end_new_position). // Each group describes a change in text; groups are sorted by change_begin. RUNTIME_FUNCTION(MaybeObject*, Runtime_LiveEditPatchFunctionPositions) { + HandleScope scope(isolate); CHECK(isolate->debugger()->live_edit_enabled()); ASSERT(args.length() == 2); - HandleScope scope(isolate); CONVERT_ARG_HANDLE_CHECKED(JSArray, shared_array, 0); CONVERT_ARG_HANDLE_CHECKED(JSArray, position_change_array, 1); @@ -12424,9 +12704,9 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_LiveEditPatchFunctionPositions) { // Returns array of the same length with corresponding results of // LiveEdit::FunctionPatchabilityStatus type. RUNTIME_FUNCTION(MaybeObject*, Runtime_LiveEditCheckAndDropActivations) { + HandleScope scope(isolate); CHECK(isolate->debugger()->live_edit_enabled()); ASSERT(args.length() == 2); - HandleScope scope(isolate); CONVERT_ARG_HANDLE_CHECKED(JSArray, shared_array, 0); CONVERT_BOOLEAN_ARG_CHECKED(do_drop, 1); @@ -12438,9 +12718,9 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_LiveEditCheckAndDropActivations) { // of JSArray of triplets (pos1, pos1_end, pos2_end) describing list // of diff chunks. RUNTIME_FUNCTION(MaybeObject*, Runtime_LiveEditCompareStrings) { + HandleScope scope(isolate); CHECK(isolate->debugger()->live_edit_enabled()); ASSERT(args.length() == 2); - HandleScope scope(isolate); CONVERT_ARG_HANDLE_CHECKED(String, s1, 0); CONVERT_ARG_HANDLE_CHECKED(String, s2, 1); @@ -12451,8 +12731,8 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_LiveEditCompareStrings) { // Restarts a call frame and completely drops all frames above. // Returns true if successful. Otherwise returns undefined or an error message. RUNTIME_FUNCTION(MaybeObject*, Runtime_LiveEditRestartFrame) { - CHECK(isolate->debugger()->live_edit_enabled()); HandleScope scope(isolate); + CHECK(isolate->debugger()->live_edit_enabled()); ASSERT(args.length() == 2); // Check arguments. @@ -12482,7 +12762,7 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_LiveEditRestartFrame) { const char* error_message = LiveEdit::RestartFrame(it.frame(), isolate->runtime_zone()); if (error_message) { - return *(isolate->factory()->LookupAsciiSymbol(error_message)); + return *(isolate->factory()->InternalizeUtf8String(error_message)); } return heap->true_value(); } @@ -12491,9 +12771,9 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_LiveEditRestartFrame) { // A testing entry. Returns statement position which is the closest to // source_position. RUNTIME_FUNCTION(MaybeObject*, Runtime_GetFunctionCodePositionFromSource) { + HandleScope scope(isolate); CHECK(isolate->debugger()->live_edit_enabled()); ASSERT(args.length() == 2); - HandleScope scope(isolate); CONVERT_ARG_HANDLE_CHECKED(JSFunction, function, 0); CONVERT_NUMBER_CHECKED(int32_t, source_position, Int32, args[1]); @@ -12529,8 +12809,8 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_GetFunctionCodePositionFromSource) { // This is used in unit tests to run code as if debugger is entered or simply // to have a stack with C++ frame in the middle. RUNTIME_FUNCTION(MaybeObject*, Runtime_ExecuteInDebugContext) { - ASSERT(args.length() == 2); HandleScope scope(isolate); + ASSERT(args.length() == 2); CONVERT_ARG_HANDLE_CHECKED(JSFunction, function, 0); CONVERT_BOOLEAN_ARG_CHECKED(without_debugger, 1); @@ -12556,6 +12836,7 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_ExecuteInDebugContext) { // Sets a v8 flag. RUNTIME_FUNCTION(MaybeObject*, Runtime_SetFlags) { + NoHandleAllocation ha(isolate); CONVERT_ARG_CHECKED(String, arg, 0); SmartArrayPointer<char> flags = arg->ToCString(DISALLOW_NULLS, ROBUST_STRING_TRAVERSAL); @@ -12567,6 +12848,7 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_SetFlags) { // Performs a GC. // Presently, it only does a full GC. RUNTIME_FUNCTION(MaybeObject*, Runtime_CollectGarbage) { + NoHandleAllocation ha(isolate); isolate->heap()->CollectAllGarbage(Heap::kNoGCFlags, "%CollectGarbage"); return isolate->heap()->undefined_value(); } @@ -12574,6 +12856,7 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_CollectGarbage) { // Gets the current heap usage. RUNTIME_FUNCTION(MaybeObject*, Runtime_GetHeapUsage) { + NoHandleAllocation ha(isolate); int usage = static_cast<int>(isolate->heap()->SizeOfObjects()); if (!Smi::IsValid(usage)) { return *isolate->factory()->NewNumberFromInt(usage); @@ -12581,218 +12864,18 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_GetHeapUsage) { return Smi::FromInt(usage); } - -// Captures a live object list from the present heap. -RUNTIME_FUNCTION(MaybeObject*, Runtime_HasLOLEnabled) { -#ifdef LIVE_OBJECT_LIST - return isolate->heap()->true_value(); -#else - return isolate->heap()->false_value(); -#endif -} - - -// Captures a live object list from the present heap. -RUNTIME_FUNCTION(MaybeObject*, Runtime_CaptureLOL) { -#ifdef LIVE_OBJECT_LIST - return LiveObjectList::Capture(); -#else - return isolate->heap()->undefined_value(); -#endif -} - - -// Deletes the specified live object list. -RUNTIME_FUNCTION(MaybeObject*, Runtime_DeleteLOL) { -#ifdef LIVE_OBJECT_LIST - CONVERT_SMI_ARG_CHECKED(id, 0); - bool success = LiveObjectList::Delete(id); - return isolate->heap()->ToBoolean(success); -#else - return isolate->heap()->undefined_value(); -#endif -} - - -// Generates the response to a debugger request for a dump of the objects -// contained in the difference between the captured live object lists -// specified by id1 and id2. -// If id1 is 0 (i.e. not a valid lol), then the whole of lol id2 will be -// dumped. -RUNTIME_FUNCTION(MaybeObject*, Runtime_DumpLOL) { -#ifdef LIVE_OBJECT_LIST - HandleScope scope; - CONVERT_SMI_ARG_CHECKED(id1, 0); - CONVERT_SMI_ARG_CHECKED(id2, 1); - CONVERT_SMI_ARG_CHECKED(start, 2); - CONVERT_SMI_ARG_CHECKED(count, 3); - CONVERT_ARG_HANDLE_CHECKED(JSObject, filter_obj, 4); - EnterDebugger enter_debugger; - return LiveObjectList::Dump(id1, id2, start, count, filter_obj); -#else - return isolate->heap()->undefined_value(); -#endif -} - - -// Gets the specified object as requested by the debugger. -// This is only used for obj ids shown in live object lists. -RUNTIME_FUNCTION(MaybeObject*, Runtime_GetLOLObj) { -#ifdef LIVE_OBJECT_LIST - CONVERT_SMI_ARG_CHECKED(obj_id, 0); - Object* result = LiveObjectList::GetObj(obj_id); - return result; -#else - return isolate->heap()->undefined_value(); -#endif -} - - -// Gets the obj id for the specified address if valid. -// This is only used for obj ids shown in live object lists. -RUNTIME_FUNCTION(MaybeObject*, Runtime_GetLOLObjId) { -#ifdef LIVE_OBJECT_LIST - HandleScope scope; - CONVERT_ARG_HANDLE_CHECKED(String, address, 0); - Object* result = LiveObjectList::GetObjId(address); - return result; -#else - return isolate->heap()->undefined_value(); -#endif -} - - -// Gets the retainers that references the specified object alive. -RUNTIME_FUNCTION(MaybeObject*, Runtime_GetLOLObjRetainers) { -#ifdef LIVE_OBJECT_LIST - HandleScope scope; - CONVERT_SMI_ARG_CHECKED(obj_id, 0); - RUNTIME_ASSERT(args[1]->IsUndefined() || args[1]->IsJSObject()); - RUNTIME_ASSERT(args[2]->IsUndefined() || args[2]->IsBoolean()); - RUNTIME_ASSERT(args[3]->IsUndefined() || args[3]->IsSmi()); - RUNTIME_ASSERT(args[4]->IsUndefined() || args[4]->IsSmi()); - CONVERT_ARG_HANDLE_CHECKED(JSObject, filter_obj, 5); - - Handle<JSObject> instance_filter; - if (args[1]->IsJSObject()) { - instance_filter = args.at<JSObject>(1); - } - bool verbose = false; - if (args[2]->IsBoolean()) { - verbose = args[2]->IsTrue(); - } - int start = 0; - if (args[3]->IsSmi()) { - start = args.smi_at(3); - } - int limit = Smi::kMaxValue; - if (args[4]->IsSmi()) { - limit = args.smi_at(4); - } - - return LiveObjectList::GetObjRetainers(obj_id, - instance_filter, - verbose, - start, - limit, - filter_obj); -#else - return isolate->heap()->undefined_value(); -#endif -} - - -// Gets the reference path between 2 objects. -RUNTIME_FUNCTION(MaybeObject*, Runtime_GetLOLPath) { -#ifdef LIVE_OBJECT_LIST - HandleScope scope; - CONVERT_SMI_ARG_CHECKED(obj_id1, 0); - CONVERT_SMI_ARG_CHECKED(obj_id2, 1); - RUNTIME_ASSERT(args[2]->IsUndefined() || args[2]->IsJSObject()); - - Handle<JSObject> instance_filter; - if (args[2]->IsJSObject()) { - instance_filter = args.at<JSObject>(2); - } - - Object* result = - LiveObjectList::GetPath(obj_id1, obj_id2, instance_filter); - return result; -#else - return isolate->heap()->undefined_value(); -#endif -} - - -// Generates the response to a debugger request for a list of all -// previously captured live object lists. -RUNTIME_FUNCTION(MaybeObject*, Runtime_InfoLOL) { -#ifdef LIVE_OBJECT_LIST - CONVERT_SMI_ARG_CHECKED(start, 0); - CONVERT_SMI_ARG_CHECKED(count, 1); - return LiveObjectList::Info(start, count); -#else - return isolate->heap()->undefined_value(); -#endif -} - - -// Gets a dump of the specified object as requested by the debugger. -// This is only used for obj ids shown in live object lists. -RUNTIME_FUNCTION(MaybeObject*, Runtime_PrintLOLObj) { -#ifdef LIVE_OBJECT_LIST - HandleScope scope; - CONVERT_SMI_ARG_CHECKED(obj_id, 0); - Object* result = LiveObjectList::PrintObj(obj_id); - return result; -#else - return isolate->heap()->undefined_value(); -#endif -} - - -// Resets and releases all previously captured live object lists. -RUNTIME_FUNCTION(MaybeObject*, Runtime_ResetLOL) { -#ifdef LIVE_OBJECT_LIST - LiveObjectList::Reset(); - return isolate->heap()->undefined_value(); -#else - return isolate->heap()->undefined_value(); -#endif -} - - -// Generates the response to a debugger request for a summary of the types -// of objects in the difference between the captured live object lists -// specified by id1 and id2. -// If id1 is 0 (i.e. not a valid lol), then the whole of lol id2 will be -// summarized. -RUNTIME_FUNCTION(MaybeObject*, Runtime_SummarizeLOL) { -#ifdef LIVE_OBJECT_LIST - HandleScope scope; - CONVERT_SMI_ARG_CHECKED(id1, 0); - CONVERT_SMI_ARG_CHECKED(id2, 1); - CONVERT_ARG_HANDLE_CHECKED(JSObject, filter_obj, 2); - - EnterDebugger enter_debugger; - return LiveObjectList::Summarize(id1, id2, filter_obj); -#else - return isolate->heap()->undefined_value(); -#endif -} - #endif // ENABLE_DEBUGGER_SUPPORT RUNTIME_FUNCTION(MaybeObject*, Runtime_ProfilerResume) { - NoHandleAllocation ha; + NoHandleAllocation ha(isolate); v8::V8::ResumeProfiler(); return isolate->heap()->undefined_value(); } RUNTIME_FUNCTION(MaybeObject*, Runtime_ProfilerPause) { - NoHandleAllocation ha; + NoHandleAllocation ha(isolate); v8::V8::PauseProfiler(); return isolate->heap()->undefined_value(); } @@ -12809,9 +12892,10 @@ static Handle<Object> Runtime_GetScriptFromScriptName( // Scan the heap for Script objects to find the script with the requested // script data. Handle<Script> script; - script_name->GetHeap()->EnsureHeapIsIterable(); + Heap* heap = script_name->GetHeap(); + heap->EnsureHeapIsIterable(); AssertNoAllocation no_allocation_during_heap_iteration; - HeapIterator iterator; + HeapIterator iterator(heap); HeapObject* obj = NULL; while (script.is_null() && ((obj = iterator.next()) != NULL)) { // If a script is found check if it has the script data requested. @@ -12849,125 +12933,80 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_GetScript) { } -// Determines whether the given stack frame should be displayed in -// a stack trace. The caller is the error constructor that asked -// for the stack trace to be collected. The first time a construct -// call to this function is encountered it is skipped. The seen_caller -// in/out parameter is used to remember if the caller has been seen -// yet. -static bool ShowFrameInStackTrace(StackFrame* raw_frame, - Object* caller, - bool* seen_caller) { - // Only display JS frames. - if (!raw_frame->is_java_script()) { - return false; - } - JavaScriptFrame* frame = JavaScriptFrame::cast(raw_frame); - Object* raw_fun = frame->function(); - // Not sure when this can happen but skip it just in case. - if (!raw_fun->IsJSFunction()) { - return false; - } - if ((raw_fun == caller) && !(*seen_caller)) { - *seen_caller = true; - return false; - } - // Skip all frames until we've seen the caller. - if (!(*seen_caller)) return false; - // Also, skip non-visible built-in functions and any call with the builtins - // object as receiver, so as to not reveal either the builtins object or - // an internal function. - // The --builtins-in-stack-traces command line flag allows including - // internal call sites in the stack trace for debugging purposes. - if (!FLAG_builtins_in_stack_traces) { - JSFunction* fun = JSFunction::cast(raw_fun); - if (frame->receiver()->IsJSBuiltinsObject() || - (fun->IsBuiltin() && !fun->shared()->native())) { - return false; - } - } - return true; -} - - // Collect the raw data for a stack trace. Returns an array of 4 // element segments each containing a receiver, function, code and // native code offset. RUNTIME_FUNCTION(MaybeObject*, Runtime_CollectStackTrace) { + HandleScope scope(isolate); ASSERT_EQ(args.length(), 3); CONVERT_ARG_HANDLE_CHECKED(JSObject, error_object, 0); Handle<Object> caller = args.at<Object>(1); CONVERT_NUMBER_CHECKED(int32_t, limit, Int32, args[2]); + // Optionally capture a more detailed stack trace for the message. + isolate->CaptureAndSetDetailedStackTrace(error_object); + // Capture a simple stack trace for the stack property. + return *isolate->CaptureSimpleStackTrace(error_object, caller, limit); +} + + +// Mark a function to recognize when called after GC to format the stack trace. +RUNTIME_FUNCTION(MaybeObject*, Runtime_MarkOneShotGetter) { HandleScope scope(isolate); - Factory* factory = isolate->factory(); + ASSERT_EQ(args.length(), 1); + CONVERT_ARG_HANDLE_CHECKED(JSFunction, fun, 0); + Handle<String> key = isolate->factory()->hidden_stack_trace_string(); + JSObject::SetHiddenProperty(fun, key, key); + return *fun; +} - limit = Max(limit, 0); // Ensure that limit is not negative. - int initial_size = Min(limit, 10); - Handle<FixedArray> elements = - factory->NewFixedArrayWithHoles(initial_size * 4); - StackFrameIterator iter(isolate); - // If the caller parameter is a function we skip frames until we're - // under it before starting to collect. - bool seen_caller = !caller->IsJSFunction(); - int cursor = 0; - int frames_seen = 0; - while (!iter.done() && frames_seen < limit) { - StackFrame* raw_frame = iter.frame(); - if (ShowFrameInStackTrace(raw_frame, *caller, &seen_caller)) { - frames_seen++; - JavaScriptFrame* frame = JavaScriptFrame::cast(raw_frame); - // Set initial size to the maximum inlining level + 1 for the outermost - // function. - List<FrameSummary> frames(Compiler::kMaxInliningLevels + 1); - frame->Summarize(&frames); - for (int i = frames.length() - 1; i >= 0; i--) { - if (cursor + 4 > elements->length()) { - int new_capacity = JSObject::NewElementsCapacity(elements->length()); - Handle<FixedArray> new_elements = - factory->NewFixedArrayWithHoles(new_capacity); - for (int i = 0; i < cursor; i++) { - new_elements->set(i, elements->get(i)); - } - elements = new_elements; - } - ASSERT(cursor + 4 <= elements->length()); - - Handle<Object> recv = frames[i].receiver(); - Handle<JSFunction> fun = frames[i].function(); - Handle<Code> code = frames[i].code(); - Handle<Smi> offset(Smi::FromInt(frames[i].offset())); - elements->set(cursor++, *recv); - elements->set(cursor++, *fun); - elements->set(cursor++, *code); - elements->set(cursor++, *offset); - } - } - iter.Advance(); +// Retrieve the stack trace. This could be the raw stack trace collected +// on stack overflow or the already formatted stack trace string. +RUNTIME_FUNCTION(MaybeObject*, Runtime_GetOverflowedStackTrace) { + HandleScope scope(isolate); + ASSERT_EQ(args.length(), 1); + CONVERT_ARG_CHECKED(JSObject, error_object, 0); + String* key = isolate->heap()->hidden_stack_trace_string(); + Object* result = error_object->GetHiddenProperty(key); + RUNTIME_ASSERT(result->IsJSArray() || + result->IsString() || + result->IsUndefined()); + return result; +} + + +// Set or clear the stack trace attached to an stack overflow error object. +RUNTIME_FUNCTION(MaybeObject*, Runtime_SetOverflowedStackTrace) { + HandleScope scope(isolate); + ASSERT_EQ(args.length(), 2); + CONVERT_ARG_HANDLE_CHECKED(JSObject, error_object, 0); + CONVERT_ARG_HANDLE_CHECKED(HeapObject, value, 1); + Handle<String> key = isolate->factory()->hidden_stack_trace_string(); + if (value->IsUndefined()) { + error_object->DeleteHiddenProperty(*key); + } else { + RUNTIME_ASSERT(value->IsString()); + JSObject::SetHiddenProperty(error_object, key, value); } - Handle<JSArray> result = factory->NewJSArrayWithElements(elements); - // Capture and attach a more detailed stack trace if necessary. - isolate->CaptureAndSetCurrentStackTraceFor(error_object); - result->set_length(Smi::FromInt(cursor)); - return *result; + return *error_object; } // Returns V8 version as a string. RUNTIME_FUNCTION(MaybeObject*, Runtime_GetV8Version) { + NoHandleAllocation ha(isolate); ASSERT_EQ(args.length(), 0); - NoHandleAllocation ha; - const char* version_string = v8::V8::GetVersion(); - return isolate->heap()->AllocateStringFromAscii(CStrVector(version_string), + return isolate->heap()->AllocateStringFromOneByte(CStrVector(version_string), NOT_TENURED); } RUNTIME_FUNCTION(MaybeObject*, Runtime_Abort) { + NoHandleAllocation ha(isolate); ASSERT(args.length() == 2); OS::PrintError("abort: %s\n", reinterpret_cast<char*>(args[0]) + args.smi_at(1)); @@ -12978,7 +13017,17 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_Abort) { } +RUNTIME_FUNCTION(MaybeObject*, Runtime_FlattenString) { + HandleScope scope(isolate); + ASSERT(args.length() == 1); + CONVERT_ARG_HANDLE_CHECKED(String, str, 0); + FlattenString(str); + return isolate->heap()->undefined_value(); +} + + RUNTIME_FUNCTION(MaybeObject*, Runtime_GetFromCache) { + NoHandleAllocation ha(isolate); // This is only called from codegen, so checks might be more lax. CONVERT_ARG_CHECKED(JSFunctionResultCache, cache, 0); Object* key = args[1]; @@ -13015,13 +13064,14 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_GetFromCache) { HandleScope scope(isolate); Handle<JSFunctionResultCache> cache_handle(cache); - Handle<Object> key_handle(key); + Handle<Object> key_handle(key, isolate); Handle<Object> value; { Handle<JSFunction> factory(JSFunction::cast( cache_handle->get(JSFunctionResultCache::kFactoryIndex))); // TODO(antonm): consider passing a receiver when constructing a cache. - Handle<Object> receiver(isolate->native_context()->global_object()); + Handle<Object> receiver(isolate->native_context()->global_object(), + isolate); // This handle is nor shared, nor used later, so it's safe. Handle<Object> argv[] = { key_handle }; bool pending_exception; @@ -13074,40 +13124,15 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_GetFromCache) { } -RUNTIME_FUNCTION(MaybeObject*, Runtime_NewMessageObject) { - HandleScope scope(isolate); - CONVERT_ARG_HANDLE_CHECKED(String, type, 0); - CONVERT_ARG_HANDLE_CHECKED(JSArray, arguments, 1); - return *isolate->factory()->NewJSMessageObject( - type, - arguments, - 0, - 0, - isolate->factory()->undefined_value(), - isolate->factory()->undefined_value(), - isolate->factory()->undefined_value()); -} - - -RUNTIME_FUNCTION(MaybeObject*, Runtime_MessageGetType) { - CONVERT_ARG_CHECKED(JSMessageObject, message, 0); - return message->type(); -} - - -RUNTIME_FUNCTION(MaybeObject*, Runtime_MessageGetArguments) { - CONVERT_ARG_CHECKED(JSMessageObject, message, 0); - return message->arguments(); -} - - RUNTIME_FUNCTION(MaybeObject*, Runtime_MessageGetStartPosition) { + NoHandleAllocation ha(isolate); CONVERT_ARG_CHECKED(JSMessageObject, message, 0); return Smi::FromInt(message->start_position()); } RUNTIME_FUNCTION(MaybeObject*, Runtime_MessageGetScript) { + NoHandleAllocation ha(isolate); CONVERT_ARG_CHECKED(JSMessageObject, message, 0); return message->script(); } @@ -13117,8 +13142,8 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_MessageGetScript) { // ListNatives is ONLY used by the fuzz-natives.js in debug mode // Exclude the code in release mode. RUNTIME_FUNCTION(MaybeObject*, Runtime_ListNatives) { + HandleScope scope(isolate); ASSERT(args.length() == 0); - HandleScope scope; #define COUNT_ENTRY(Name, argc, ressize) + 1 int entry_count = 0 RUNTIME_FUNCTION_LIST(COUNT_ENTRY) @@ -13131,7 +13156,7 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_ListNatives) { bool inline_runtime_functions = false; #define ADD_ENTRY(Name, argc, ressize) \ { \ - HandleScope inner; \ + HandleScope inner(isolate); \ Handle<String> name; \ /* Inline runtime functions have an underscore in front of the name. */ \ if (inline_runtime_functions) { \ @@ -13161,13 +13186,14 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_ListNatives) { RUNTIME_FUNCTION(MaybeObject*, Runtime_Log) { + NoHandleAllocation ha(isolate); ASSERT(args.length() == 2); CONVERT_ARG_CHECKED(String, format, 0); CONVERT_ARG_CHECKED(JSArray, elms, 1); String::FlatContent format_content = format->GetFlatContent(); RUNTIME_ASSERT(format_content.IsAscii()); - Vector<const char> chars = format_content.ToAsciiVector(); - LOGGER->LogRuntime(chars, elms); + Vector<const uint8_t> chars = format_content.ToOneByteVector(); + LOGGER->LogRuntime(isolate, Vector<const char>::cast(chars), elms); return isolate->heap()->undefined_value(); } @@ -13207,12 +13233,100 @@ ELEMENTS_KIND_CHECK_RUNTIME_FUNCTION(FastProperties) RUNTIME_FUNCTION(MaybeObject*, Runtime_HaveSameMap) { + NoHandleAllocation ha(isolate); ASSERT(args.length() == 2); CONVERT_ARG_CHECKED(JSObject, obj1, 0); CONVERT_ARG_CHECKED(JSObject, obj2, 1); return isolate->heap()->ToBoolean(obj1->map() == obj2->map()); } + +RUNTIME_FUNCTION(MaybeObject*, Runtime_IsObserved) { + NoHandleAllocation ha(isolate); + ASSERT(args.length() == 1); + CONVERT_ARG_CHECKED(JSReceiver, obj, 0); + if (obj->IsJSGlobalProxy()) { + Object* proto = obj->GetPrototype(); + if (obj->IsNull()) return isolate->heap()->false_value(); + ASSERT(proto->IsJSGlobalObject()); + obj = JSReceiver::cast(proto); + } + return isolate->heap()->ToBoolean(obj->map()->is_observed()); +} + + +RUNTIME_FUNCTION(MaybeObject*, Runtime_SetIsObserved) { + NoHandleAllocation ha(isolate); + ASSERT(args.length() == 2); + CONVERT_ARG_CHECKED(JSReceiver, obj, 0); + CONVERT_BOOLEAN_ARG_CHECKED(is_observed, 1); + if (obj->IsJSGlobalProxy()) { + Object* proto = obj->GetPrototype(); + if (obj->IsNull()) return isolate->heap()->undefined_value(); + ASSERT(proto->IsJSGlobalObject()); + obj = JSReceiver::cast(proto); + } + ASSERT(!(obj->map()->is_observed() && obj->IsJSObject() && + JSObject::cast(obj)->HasFastElements())); + if (obj->map()->is_observed() != is_observed) { + if (is_observed && obj->IsJSObject() && + !JSObject::cast(obj)->HasExternalArrayElements()) { + // Go to dictionary mode, so that we don't skip map checks. + MaybeObject* maybe = JSObject::cast(obj)->NormalizeElements(); + if (maybe->IsFailure()) return maybe; + ASSERT(!JSObject::cast(obj)->HasFastElements()); + } + MaybeObject* maybe = obj->map()->Copy(); + Map* map; + if (!maybe->To(&map)) return maybe; + map->set_is_observed(is_observed); + obj->set_map(map); + } + return isolate->heap()->undefined_value(); +} + + +RUNTIME_FUNCTION(MaybeObject*, Runtime_SetObserverDeliveryPending) { + NoHandleAllocation ha(isolate); + ASSERT(args.length() == 0); + isolate->set_observer_delivery_pending(true); + return isolate->heap()->undefined_value(); +} + + +RUNTIME_FUNCTION(MaybeObject*, Runtime_GetObservationState) { + NoHandleAllocation ha(isolate); + ASSERT(args.length() == 0); + return isolate->heap()->observation_state(); +} + + +RUNTIME_FUNCTION(MaybeObject*, Runtime_ObservationWeakMapCreate) { + HandleScope scope(isolate); + ASSERT(args.length() == 0); + // TODO(adamk): Currently this runtime function is only called three times per + // isolate. If it's called more often, the map should be moved into the + // strong root list. + Handle<Map> map = + isolate->factory()->NewMap(JS_WEAK_MAP_TYPE, JSWeakMap::kSize); + Handle<JSWeakMap> weakmap = + Handle<JSWeakMap>::cast(isolate->factory()->NewJSObjectFromMap(map)); + return WeakMapInitialize(isolate, weakmap); +} + + +RUNTIME_FUNCTION(MaybeObject*, Runtime_UnwrapGlobalProxy) { + NoHandleAllocation ha(isolate); + ASSERT(args.length() == 1); + Object* object = args[0]; + if (object->IsJSGlobalProxy()) { + object = object->GetPrototype(isolate); + if (object->IsNull()) return isolate->heap()->undefined_value(); + } + return object; +} + + // ---------------------------------------------------------------------------- // Implementation of Runtime @@ -13236,16 +13350,16 @@ MaybeObject* Runtime::InitializeIntrinsicFunctionNames(Heap* heap, Object* dictionary) { ASSERT(Isolate::Current()->heap() == heap); ASSERT(dictionary != NULL); - ASSERT(StringDictionary::cast(dictionary)->NumberOfElements() == 0); + ASSERT(NameDictionary::cast(dictionary)->NumberOfElements() == 0); for (int i = 0; i < kNumFunctions; ++i) { - Object* name_symbol; - { MaybeObject* maybe_name_symbol = - heap->LookupAsciiSymbol(kIntrinsicFunctions[i].name); - if (!maybe_name_symbol->ToObject(&name_symbol)) return maybe_name_symbol; - } - StringDictionary* string_dictionary = StringDictionary::cast(dictionary); - { MaybeObject* maybe_dictionary = string_dictionary->Add( - String::cast(name_symbol), + Object* name_string; + { MaybeObject* maybe_name_string = + heap->InternalizeUtf8String(kIntrinsicFunctions[i].name); + if (!maybe_name_string->ToObject(&name_string)) return maybe_name_string; + } + NameDictionary* name_dictionary = NameDictionary::cast(dictionary); + { MaybeObject* maybe_dictionary = name_dictionary->Add( + String::cast(name_string), Smi::FromInt(i), PropertyDetails(NONE, NORMAL)); if (!maybe_dictionary->ToObject(&dictionary)) { @@ -13259,7 +13373,7 @@ MaybeObject* Runtime::InitializeIntrinsicFunctionNames(Heap* heap, } -const Runtime::Function* Runtime::FunctionForSymbol(Handle<String> name) { +const Runtime::Function* Runtime::FunctionForName(Handle<String> name) { Heap* heap = name->GetHeap(); int entry = heap->intrinsic_function_names()->FindEntry(*name); if (entry != kNotFound) { diff --git a/deps/v8/src/runtime.h b/deps/v8/src/runtime.h index c9939d06c8..2959fedc56 100644 --- a/deps/v8/src/runtime.h +++ b/deps/v8/src/runtime.h @@ -67,6 +67,7 @@ namespace internal { F(GetDefaultReceiver, 1, 1) \ \ F(GetPrototype, 1, 1) \ + F(SetPrototype, 2, 1) \ F(IsInPrototypeChain, 2, 1) \ \ F(GetOwnProperty, 2, 1) \ @@ -85,22 +86,27 @@ namespace internal { F(NewStrictArgumentsFast, 3, 1) \ F(LazyCompile, 1, 1) \ F(LazyRecompile, 1, 1) \ - F(ParallelRecompile, 1, 1) \ + F(ParallelRecompile, 1, 1) \ + F(InstallRecompiledCode, 1, 1) \ F(NotifyDeoptimized, 1, 1) \ + F(NotifyStubFailure, 0, 1) \ F(NotifyOSR, 0, 1) \ F(DeoptimizeFunction, 1, 1) \ F(ClearFunctionTypeFeedback, 1, 1) \ F(RunningInSimulator, 0, 1) \ F(OptimizeFunctionOnNextCall, -1, 1) \ + F(WaitUntilOptimized, 1, 1) \ F(GetOptimizationStatus, 1, 1) \ F(GetOptimizationCount, 1, 1) \ F(CompileForOnStackReplacement, 1, 1) \ F(SetNewFunctionAttributes, 1, 1) \ F(AllocateInNewSpace, 1, 1) \ + F(AllocateInOldPointerSpace, 1, 1) \ F(SetNativeFlag, 1, 1) \ F(StoreArrayLiteralElement, 5, 1) \ F(DebugCallbackSupportsStepping, 1, 1) \ F(DebugPrepareStepInIfStepping, 1, 1) \ + F(FlattenString, 1, 1) \ \ /* Array join support */ \ F(PushIfAbsent, 2, 1) \ @@ -111,7 +117,6 @@ namespace internal { F(Typeof, 1, 1) \ \ F(StringToNumber, 1, 1) \ - F(StringFromCharCodeArray, 1, 1) \ F(StringParseInt, 2, 1) \ F(StringParseFloat, 1, 1) \ F(StringToLowerCase, 1, 1) \ @@ -120,9 +125,6 @@ namespace internal { F(CharFromCode, 1, 1) \ F(URIEscape, 1, 1) \ F(URIUnescape, 1, 1) \ - F(QuoteJSONString, 1, 1) \ - F(QuoteJSONStringComma, 1, 1) \ - F(QuoteJSONStringArray, 1, 1) \ \ F(NumberToString, 1, 1) \ F(NumberToStringSkipCache, 1, 1) \ @@ -191,6 +193,10 @@ namespace internal { \ /* JSON */ \ F(ParseJson, 1, 1) \ + F(BasicJSONStringify, 1, 1) \ + F(QuoteJSONString, 1, 1) \ + F(QuoteJSONStringComma, 1, 1) \ + F(QuoteJSONStringArray, 1, 1) \ \ /* Strings */ \ F(StringCharCodeAt, 2, 1) \ @@ -198,12 +204,14 @@ namespace internal { F(StringLastIndexOf, 3, 1) \ F(StringLocaleCompare, 2, 1) \ F(SubString, 3, 1) \ - F(StringReplaceRegExpWithString, 4, 1) \ + F(StringReplaceGlobalRegExpWithString, 4, 1) \ F(StringReplaceOneCharWithString, 3, 1) \ F(StringMatch, 3, 1) \ F(StringTrim, 3, 1) \ F(StringToArray, 2, 1) \ F(NewStringWrapper, 1, 1) \ + F(NewString, 2, 1) \ + F(TruncateString, 2, 1) \ \ /* Numbers */ \ F(NumberToRadixString, 2, 1) \ @@ -232,6 +240,9 @@ namespace internal { F(FunctionIsBuiltin, 1, 1) \ F(GetScript, 1, 1) \ F(CollectStackTrace, 3, 1) \ + F(MarkOneShotGetter, 1, 1) \ + F(GetOverflowedStackTrace, 1, 1) \ + F(SetOverflowedStackTrace, 2, 1) \ F(GetV8Version, 0, 1) \ \ F(ClassOf, 1, 1) \ @@ -255,7 +266,7 @@ namespace internal { /* Numbers */ \ \ /* Globals */ \ - F(CompileString, 1, 1) \ + F(CompileString, 2, 1) \ F(GlobalPrint, 1, 1) \ \ /* Eval */ \ @@ -266,6 +277,7 @@ namespace internal { F(DefineOrRedefineDataProperty, 4, 1) \ F(DefineOrRedefineAccessorProperty, 5, 1) \ F(IgnoreAttributesAndSetProperty, -1 /* 3 or 4 */, 1) \ + F(GetDataProperty, 2, 1) \ \ /* Arrays */ \ F(RemoveArrayHoles, 2, 1) \ @@ -286,6 +298,9 @@ namespace internal { /* Harmony modules */ \ F(IsJSModule, 1, 1) \ \ + /* Harmony symbols */ \ + F(CreateSymbol, 0, 1) \ + \ /* Harmony proxies */ \ F(CreateJSProxy, 2, 1) \ F(CreateJSFunctionProxy, 4, 1) \ @@ -301,6 +316,7 @@ namespace internal { F(SetAdd, 2, 1) \ F(SetHas, 2, 1) \ F(SetDelete, 2, 1) \ + F(SetGetSize, 1, 1) \ \ /* Harmony maps */ \ F(MapInitialize, 1, 1) \ @@ -308,6 +324,7 @@ namespace internal { F(MapHas, 2, 1) \ F(MapDelete, 2, 1) \ F(MapSet, 3, 1) \ + F(MapGetSize, 1, 1) \ \ /* Harmony weakmaps */ \ F(WeakMapInitialize, 1, 1) \ @@ -316,6 +333,14 @@ namespace internal { F(WeakMapDelete, 2, 1) \ F(WeakMapSet, 3, 1) \ \ + /* Harmony observe */ \ + F(IsObserved, 1, 1) \ + F(SetIsObserved, 2, 1) \ + F(SetObserverDeliveryPending, 0, 1) \ + F(GetObservationState, 0, 1) \ + F(ObservationWeakMapCreate, 0, 1) \ + F(UnwrapGlobalProxy, 1, 1) \ + \ /* Statements */ \ F(NewClosure, 3, 1) \ F(NewObject, 1, 1) \ @@ -335,7 +360,7 @@ namespace internal { F(PushWithContext, 2, 1) \ F(PushCatchContext, 3, 1) \ F(PushBlockContext, 2, 1) \ - F(PushModuleContext, 1, 1) \ + F(PushModuleContext, 2, 1) \ F(DeleteContextSlot, 2, 1) \ F(LoadContextSlot, 2, 2) \ F(LoadContextSlotNoReferenceError, 2, 2) \ @@ -343,6 +368,7 @@ namespace internal { \ /* Declarations and initialization */ \ F(DeclareGlobals, 3, 1) \ + F(DeclareModules, 1, 1) \ F(DeclareContextSlot, 4, 1) \ F(InitializeVarGlobal, -1 /* 2 or 3 */, 1) \ F(InitializeConstGlobal, 2, 1) \ @@ -363,9 +389,6 @@ namespace internal { F(GetFromCache, 2, 1) \ \ /* Message objects */ \ - F(NewMessageObject, 2, 1) \ - F(MessageGetType, 1, 1) \ - F(MessageGetArguments, 1, 1) \ F(MessageGetStartPosition, 1, 1) \ F(MessageGetScript, 1, 1) \ \ @@ -390,6 +413,7 @@ namespace internal { F(HasExternalFloatElements, 1, 1) \ F(HasExternalDoubleElements, 1, 1) \ F(HasFastProperties, 1, 1) \ + F(TransitionElementsKind, 2, 1) \ F(TransitionElementsSmiToDouble, 1, 1) \ F(TransitionElementsDoubleToObject, 1, 1) \ F(HaveSameMap, 2, 1) \ @@ -418,6 +442,7 @@ namespace internal { F(GetScopeDetails, 4, 1) \ F(GetFunctionScopeCount, 1, 1) \ F(GetFunctionScopeDetails, 2, 1) \ + F(SetScopeVariableValue, 6, 1) \ F(DebugPrintScopes, 0, 1) \ F(GetThreadCount, 1, 1) \ F(GetThreadDetails, 2, 1) \ @@ -458,20 +483,6 @@ namespace internal { F(SetFlags, 1, 1) \ F(CollectGarbage, 1, 1) \ F(GetHeapUsage, 0, 1) \ - \ - /* LiveObjectList support*/ \ - F(HasLOLEnabled, 0, 1) \ - F(CaptureLOL, 0, 1) \ - F(DeleteLOL, 1, 1) \ - F(DumpLOL, 5, 1) \ - F(GetLOLObj, 1, 1) \ - F(GetLOLObjId, 1, 1) \ - F(GetLOLObjRetainers, 6, 1) \ - F(GetLOLPath, 3, 1) \ - F(InfoLOL, 2, 1) \ - F(PrintLOLObj, 1, 1) \ - F(ResetLOL, 0, 1) \ - F(SummarizeLOL, 3, 1) #else #define RUNTIME_FUNCTION_LIST_DEBUGGER_SUPPORT(F) @@ -503,6 +514,7 @@ namespace internal { #define INLINE_FUNCTION_LIST(F) \ F(IsSmi, 1, 1) \ F(IsNonNegativeSmi, 1, 1) \ + F(IsSymbol, 1, 1) \ F(IsArray, 1, 1) \ F(IsRegExp, 1, 1) \ F(IsConstructCall, 0, 1) \ @@ -514,6 +526,8 @@ namespace internal { F(DateField, 2 /* date object, field index */, 1) \ F(StringCharFromCode, 1, 1) \ F(StringCharAt, 2, 1) \ + F(OneByteSeqStringSetChar, 3, 1) \ + F(TwoByteSeqStringSetChar, 3, 1) \ F(ObjectEquals, 2, 1) \ F(RandomHeapNumber, 0, 1) \ F(IsObject, 1, 1) \ @@ -534,7 +548,7 @@ namespace internal { // ---------------------------------------------------------------------------- -// INLINE_AND_RUNTIME_FUNCTION_LIST defines all inlined functions accessed +// INLINE_RUNTIME_FUNCTION_LIST defines all inlined functions accessed // with a native call of the form %_name from within JS code that also have // a corresponding runtime function, that is called for slow cases. // Entries have the form F(name, number of arguments, number of return values). @@ -556,8 +570,8 @@ namespace internal { class RuntimeState { public: - StaticResource<StringInputBuffer>* string_input_buffer() { - return &string_input_buffer_; + StaticResource<ConsStringIteratorOp>* string_iterator() { + return &string_iterator_; } unibrow::Mapping<unibrow::ToUppercase, 128>* to_upper_mapping() { return &to_upper_mapping_; @@ -565,29 +579,29 @@ class RuntimeState { unibrow::Mapping<unibrow::ToLowercase, 128>* to_lower_mapping() { return &to_lower_mapping_; } - StringInputBuffer* string_input_buffer_compare_bufx() { - return &string_input_buffer_compare_bufx_; + ConsStringIteratorOp* string_iterator_compare_x() { + return &string_iterator_compare_x_; } - StringInputBuffer* string_input_buffer_compare_bufy() { - return &string_input_buffer_compare_bufy_; + ConsStringIteratorOp* string_iterator_compare_y() { + return &string_iterator_compare_y_; } - StringInputBuffer* string_locale_compare_buf1() { - return &string_locale_compare_buf1_; + ConsStringIteratorOp* string_locale_compare_it1() { + return &string_locale_compare_it1_; } - StringInputBuffer* string_locale_compare_buf2() { - return &string_locale_compare_buf2_; + ConsStringIteratorOp* string_locale_compare_it2() { + return &string_locale_compare_it2_; } private: RuntimeState() {} // Non-reentrant string buffer for efficient general use in the runtime. - StaticResource<StringInputBuffer> string_input_buffer_; + StaticResource<ConsStringIteratorOp> string_iterator_; unibrow::Mapping<unibrow::ToUppercase, 128> to_upper_mapping_; unibrow::Mapping<unibrow::ToLowercase, 128> to_lower_mapping_; - StringInputBuffer string_input_buffer_compare_bufx_; - StringInputBuffer string_input_buffer_compare_bufy_; - StringInputBuffer string_locale_compare_buf1_; - StringInputBuffer string_locale_compare_buf2_; + ConsStringIteratorOp string_iterator_compare_x_; + ConsStringIteratorOp string_iterator_compare_y_; + ConsStringIteratorOp string_locale_compare_it1_; + ConsStringIteratorOp string_locale_compare_it2_; friend class Isolate; friend class Runtime; @@ -634,15 +648,16 @@ class Runtime : public AllStatic { static const int kNotFound = -1; - // Add symbols for all the intrinsic function names to a StringDictionary. + // Add internalized strings for all the intrinsic function names to a + // StringDictionary. // Returns failure if an allocation fails. In this case, it must be // retried with a new, empty StringDictionary, not with the same one. // Alternatively, heap initialization can be completely restarted. MUST_USE_RESULT static MaybeObject* InitializeIntrinsicFunctionNames( Heap* heap, Object* dictionary); - // Get the intrinsic function with the given name, which must be a symbol. - static const Function* FunctionForSymbol(Handle<String> name); + // Get the intrinsic function with the given name, which must be internalized. + static const Function* FunctionForName(Handle<String> name); // Get the intrinsic function with the given FunctionId. static const Function* FunctionForId(FunctionId id); diff --git a/deps/v8/src/runtime.js b/deps/v8/src/runtime.js index 6b487349d2..09b39ffe1d 100644 --- a/deps/v8/src/runtime.js +++ b/deps/v8/src/runtime.js @@ -346,7 +346,7 @@ function SHR(y) { // ECMA-262, section 11.4.1, page 46. function DELETE(key, strict) { - return %DeleteProperty(%ToObject(this), %ToString(key), strict); + return %DeleteProperty(%ToObject(this), %ToName(key), strict); } @@ -356,7 +356,7 @@ function IN(x) { throw %MakeTypeError('invalid_in_operator_use', [this, x]); } return %_IsNonNegativeSmi(this) ? - %HasElement(x, this) : %HasProperty(x, %ToString(this)); + %HasElement(x, this) : %HasProperty(x, %ToName(this)); } @@ -396,7 +396,7 @@ function INSTANCE_OF(F) { // has a property with the given key; return the key as a string if // it has. Otherwise returns 0 (smi). Used in for-in statements. function FILTER_KEY(key) { - var string = %ToString(key); + var string = %ToName(key); if (%HasProperty(this, string)) return string; return 0; } @@ -563,6 +563,12 @@ function NonStringToString(x) { } +// ES6 symbols +function ToName(x) { + return IS_SYMBOL(x) ? x : %ToString(x); +} + + // ECMA-262, section 9.9, page 36. function ToObject(x) { if (IS_STRING(x)) return new $String(x); diff --git a/deps/v8/src/safepoint-table.cc b/deps/v8/src/safepoint-table.cc index 714e5c3977..9e423045ae 100644 --- a/deps/v8/src/safepoint-table.cc +++ b/deps/v8/src/safepoint-table.cc @@ -59,7 +59,8 @@ bool SafepointEntry::HasRegisterAt(int reg_index) const { SafepointTable::SafepointTable(Code* code) { - ASSERT(code->kind() == Code::OPTIMIZED_FUNCTION); + ASSERT(code->kind() == Code::OPTIMIZED_FUNCTION || + code->kind() == Code::COMPILED_STUB); code_ = code; Address header = code->instruction_start() + code->safepoint_table_offset(); length_ = Memory::uint32_at(header + kLengthOffset); @@ -158,14 +159,6 @@ unsigned SafepointTableBuilder::GetCodeOffset() const { void SafepointTableBuilder::Emit(Assembler* assembler, int bits_per_entry) { - // For lazy deoptimization we need space to patch a call after every call. - // Ensure there is always space for such patching, even if the code ends - // in a call. - int target_offset = assembler->pc_offset() + Deoptimizer::patch_size(); - while (assembler->pc_offset() < target_offset) { - assembler->nop(); - } - // Make sure the safepoint table is properly aligned. Pad with nops. assembler->Align(kIntSize); assembler->RecordComment(";;; Safepoint table."); diff --git a/deps/v8/src/scanner.h b/deps/v8/src/scanner.h index 4de413b885..a454750de9 100644 --- a/deps/v8/src/scanner.h +++ b/deps/v8/src/scanner.h @@ -145,7 +145,7 @@ class UnicodeCache { // Caching predicates used by scanners. public: UnicodeCache() {} - typedef unibrow::Utf8InputBuffer<1024> Utf8Decoder; + typedef unibrow::Utf8Decoder<512> Utf8Decoder; StaticResource<Utf8Decoder>* utf8_decoder() { return &utf8_decoder_; @@ -183,9 +183,9 @@ class LiteralBuffer { INLINE(void AddChar(uint32_t code_unit)) { if (position_ >= backing_store_.length()) ExpandBuffer(); if (is_ascii_) { - if (code_unit < kMaxAsciiCharCodeU) { + if (code_unit <= unibrow::Latin1::kMaxChar) { backing_store_[position_] = static_cast<byte>(code_unit); - position_ += kASCIISize; + position_ += kOneByteSize; return; } ConvertToUtf16(); @@ -250,7 +250,7 @@ class LiteralBuffer { } else { new_store = backing_store_; } - char* src = reinterpret_cast<char*>(backing_store_.start()); + uint8_t* src = backing_store_.start(); uc16* dst = reinterpret_cast<uc16*>(new_store.start()); for (int i = position_ - 1; i >= 0; i--) { dst[i] = src[i]; @@ -315,8 +315,6 @@ class Scanner { // -1 is outside of the range of any real source code. static const int kNoOctalLocation = -1; - typedef unibrow::Utf8InputBuffer<1024> Utf8Decoder; - explicit Scanner(UnicodeCache* scanner_contants); void Initialize(Utf16CharacterStream* source); @@ -432,10 +430,6 @@ class Scanner { // be empty). bool ScanRegExpFlags(); - // Tells whether the buffer contains an identifier (no escapes). - // Used for checking if a property name is an identifier. - static bool IsIdentifier(unibrow::CharacterStream* buffer); - private: // The current and look-ahead token. struct TokenDesc { diff --git a/deps/v8/src/scopeinfo.cc b/deps/v8/src/scopeinfo.cc index 02b4323980..15ee29f9fc 100644 --- a/deps/v8/src/scopeinfo.cc +++ b/deps/v8/src/scopeinfo.cc @@ -149,8 +149,8 @@ Handle<ScopeInfo> ScopeInfo::Create(Scope* scope, Zone* zone) { } -ScopeInfo* ScopeInfo::Empty() { - return reinterpret_cast<ScopeInfo*>(HEAP->empty_fixed_array()); +ScopeInfo* ScopeInfo::Empty(Isolate* isolate) { + return reinterpret_cast<ScopeInfo*>(isolate->heap()->empty_fixed_array()); } @@ -280,7 +280,7 @@ InitializationFlag ScopeInfo::ContextLocalInitFlag(int var) { int ScopeInfo::StackSlotIndex(String* name) { - ASSERT(name->IsSymbol()); + ASSERT(name->IsInternalizedString()); if (length() > 0) { int start = StackLocalEntriesIndex(); int end = StackLocalEntriesIndex() + StackLocalCount(); @@ -297,7 +297,7 @@ int ScopeInfo::StackSlotIndex(String* name) { int ScopeInfo::ContextSlotIndex(String* name, VariableMode* mode, InitializationFlag* init_flag) { - ASSERT(name->IsSymbol()); + ASSERT(name->IsInternalizedString()); ASSERT(mode != NULL); ASSERT(init_flag != NULL); if (length() > 0) { @@ -321,6 +321,7 @@ int ScopeInfo::ContextSlotIndex(String* name, return result; } } + // Cache as not found. Mode and init flag don't matter. context_slot_cache->Update(this, name, INTERNAL, kNeedsInitialization, -1); } return -1; @@ -328,7 +329,7 @@ int ScopeInfo::ContextSlotIndex(String* name, int ScopeInfo::ParameterIndex(String* name) { - ASSERT(name->IsSymbol()); + ASSERT(name->IsInternalizedString()); if (length() > 0) { // We must read parameters from the end since for // multiply declared parameters the value of the @@ -348,7 +349,7 @@ int ScopeInfo::ParameterIndex(String* name) { int ScopeInfo::FunctionContextSlotIndex(String* name, VariableMode* mode) { - ASSERT(name->IsSymbol()); + ASSERT(name->IsInternalizedString()); ASSERT(mode != NULL); if (length() > 0) { if (FunctionVariableField::decode(Flags()) == CONTEXT && @@ -361,6 +362,31 @@ int ScopeInfo::FunctionContextSlotIndex(String* name, VariableMode* mode) { } +bool ScopeInfo::CopyContextLocalsToScopeObject( + Isolate* isolate, + Handle<Context> context, + Handle<JSObject> scope_object) { + int local_count = ContextLocalCount(); + if (local_count == 0) return true; + // Fill all context locals to the context extension. + int start = ContextLocalNameEntriesIndex(); + int end = start + local_count; + for (int i = start; i < end; ++i) { + int context_index = Context::MIN_CONTEXT_SLOTS + i - start; + RETURN_IF_EMPTY_HANDLE_VALUE( + isolate, + SetProperty(isolate, + scope_object, + Handle<String>(String::cast(get(i))), + Handle<Object>(context->get(context_index), isolate), + ::NONE, + kNonStrictMode), + false); + } + return true; +} + + int ScopeInfo::ParameterEntriesIndex() { ASSERT(length() > 0); return kVariablePartIndex; @@ -416,13 +442,13 @@ void ContextSlotCache::Update(Object* data, VariableMode mode, InitializationFlag init_flag, int slot_index) { - String* symbol; + String* internalized_name; ASSERT(slot_index > kNotFound); - if (HEAP->LookupSymbolIfExists(name, &symbol)) { - int index = Hash(data, symbol); + if (HEAP->InternalizeStringIfExists(name, &internalized_name)) { + int index = Hash(data, internalized_name); Key& key = keys_[index]; key.data = data; - key.name = symbol; + key.name = internalized_name; // Please note value only takes a uint as index. values_[index] = Value(mode, init_flag, slot_index - kNotFound).raw(); #ifdef DEBUG @@ -444,8 +470,8 @@ void ContextSlotCache::ValidateEntry(Object* data, VariableMode mode, InitializationFlag init_flag, int slot_index) { - String* symbol; - if (HEAP->LookupSymbolIfExists(name, &symbol)) { + String* internalized_name; + if (HEAP->InternalizeStringIfExists(name, &internalized_name)) { int index = Hash(data, name); Key& key = keys_[index]; ASSERT(key.data == data); @@ -504,4 +530,32 @@ void ScopeInfo::Print() { } #endif // DEBUG + +//--------------------------------------------------------------------------- +// ModuleInfo. + +Handle<ModuleInfo> ModuleInfo::Create( + Isolate* isolate, Interface* interface, Scope* scope) { + Handle<ModuleInfo> info = Allocate(isolate, interface->Length()); + info->set_host_index(interface->Index()); + int i = 0; + for (Interface::Iterator it = interface->iterator(); + !it.done(); it.Advance(), ++i) { + Variable* var = scope->LocalLookup(it.name()); + info->set_name(i, *it.name()); + info->set_mode(i, var->mode()); + ASSERT((var->mode() == MODULE) == (it.interface()->IsModule())); + if (var->mode() == MODULE) { + ASSERT(it.interface()->IsFrozen()); + ASSERT(it.interface()->Index() >= 0); + info->set_index(i, it.interface()->Index()); + } else { + ASSERT(var->index() >= 0); + info->set_index(i, var->index()); + } + } + ASSERT(i == info->length()); + return info; +} + } } // namespace v8::internal diff --git a/deps/v8/src/scopeinfo.h b/deps/v8/src/scopeinfo.h index 93734f5a16..a884b3b9ed 100644 --- a/deps/v8/src/scopeinfo.h +++ b/deps/v8/src/scopeinfo.h @@ -114,9 +114,9 @@ class ContextSlotCache { // Bit fields in value_ (type, shift, size). Must be public so the // constants can be embedded in generated code. - class ModeField: public BitField<VariableMode, 0, 3> {}; - class InitField: public BitField<InitializationFlag, 3, 1> {}; - class IndexField: public BitField<int, 4, 32-4> {}; + class ModeField: public BitField<VariableMode, 0, 4> {}; + class InitField: public BitField<InitializationFlag, 4, 1> {}; + class IndexField: public BitField<int, 5, 32-5> {}; private: uint32_t value_; @@ -130,6 +130,67 @@ class ContextSlotCache { }; + + +//--------------------------------------------------------------------------- +// Auxiliary class used for the description of module instances. +// Used by Runtime_DeclareModules. + +class ModuleInfo: public FixedArray { + public: + static ModuleInfo* cast(Object* description) { + return static_cast<ModuleInfo*>(FixedArray::cast(description)); + } + + static Handle<ModuleInfo> Create( + Isolate* isolate, Interface* interface, Scope* scope); + + // Index of module's context in host context. + int host_index() { return Smi::cast(get(HOST_OFFSET))->value(); } + + // Name, mode, and index of the i-th export, respectively. + // For value exports, the index is the slot of the value in the module + // context, for exported modules it is the slot index of the + // referred module's context in the host context. + // TODO(rossberg): This format cannot yet handle exports of modules declared + // in earlier scripts. + String* name(int i) { return String::cast(get(name_offset(i))); } + VariableMode mode(int i) { + return static_cast<VariableMode>(Smi::cast(get(mode_offset(i)))->value()); + } + int index(int i) { return Smi::cast(get(index_offset(i)))->value(); } + + int length() { return (FixedArray::length() - HEADER_SIZE) / ITEM_SIZE; } + + private: + // The internal format is: Index, (Name, VariableMode, Index)* + enum { + HOST_OFFSET, + NAME_OFFSET, + MODE_OFFSET, + INDEX_OFFSET, + HEADER_SIZE = NAME_OFFSET, + ITEM_SIZE = INDEX_OFFSET - NAME_OFFSET + 1 + }; + inline int name_offset(int i) { return NAME_OFFSET + i * ITEM_SIZE; } + inline int mode_offset(int i) { return MODE_OFFSET + i * ITEM_SIZE; } + inline int index_offset(int i) { return INDEX_OFFSET + i * ITEM_SIZE; } + + static Handle<ModuleInfo> Allocate(Isolate* isolate, int length) { + return Handle<ModuleInfo>::cast( + isolate->factory()->NewFixedArray(HEADER_SIZE + ITEM_SIZE * length)); + } + void set_host_index(int index) { set(HOST_OFFSET, Smi::FromInt(index)); } + void set_name(int i, String* name) { set(name_offset(i), name); } + void set_mode(int i, VariableMode mode) { + set(mode_offset(i), Smi::FromInt(mode)); + } + void set_index(int i, int index) { + set(index_offset(i), Smi::FromInt(index)); + } +}; + + } } // namespace v8::internal #endif // V8_SCOPEINFO_H_ diff --git a/deps/v8/src/scopes.cc b/deps/v8/src/scopes.cc index 434479ca5d..4ac9d0e6a4 100644 --- a/deps/v8/src/scopes.cc +++ b/deps/v8/src/scopes.cc @@ -52,8 +52,8 @@ namespace internal { static bool Match(void* key1, void* key2) { String* name1 = *reinterpret_cast<String**>(key1); String* name2 = *reinterpret_cast<String**>(key2); - ASSERT(name1->IsSymbol()); - ASSERT(name2->IsSymbol()); + ASSERT(name1->IsInternalizedString()); + ASSERT(name2->IsInternalizedString()); return name1 == name2; } @@ -105,9 +105,10 @@ Variable* VariableMap::Lookup(Handle<String> name) { // Implementation of Scope Scope::Scope(Scope* outer_scope, ScopeType type, Zone* zone) - : isolate_(Isolate::Current()), + : isolate_(zone->isolate()), inner_scopes_(4, zone), variables_(zone), + internals_(4, zone), temps_(4, zone), params_(4, zone), unresolved_(16, zone), @@ -131,6 +132,7 @@ Scope::Scope(Scope* inner_scope, : isolate_(Isolate::Current()), inner_scopes_(4, zone), variables_(zone), + internals_(4, zone), temps_(4, zone), params_(4, zone), unresolved_(16, zone), @@ -153,6 +155,7 @@ Scope::Scope(Scope* inner_scope, Handle<String> catch_variable_name, Zone* zone) : isolate_(Isolate::Current()), inner_scopes_(1, zone), variables_(zone), + internals_(0, zone), temps_(0, zone), params_(0, zone), unresolved_(0, zone), @@ -179,7 +182,7 @@ void Scope::SetDefaults(ScopeType type, Handle<ScopeInfo> scope_info) { outer_scope_ = outer_scope; type_ = type; - scope_name_ = isolate_->factory()->empty_symbol(); + scope_name_ = isolate_->factory()->empty_string(); dynamics_ = NULL; receiver_ = NULL; function_ = NULL; @@ -197,6 +200,8 @@ void Scope::SetDefaults(ScopeType type, num_var_or_const_ = 0; num_stack_slots_ = 0; num_heap_slots_ = 0; + num_modules_ = 0; + module_var_ = NULL, scope_info_ = scope_info; start_position_ = RelocInfo::kNoPosition; end_position_ = RelocInfo::kNoPosition; @@ -303,23 +308,6 @@ bool Scope::Analyze(CompilationInfo* info) { } #endif - if (FLAG_harmony_scoping) { - VariableProxy* proxy = scope->CheckAssignmentToConst(); - if (proxy != NULL) { - // Found an assignment to const. Throw a syntax error. - MessageLocation location(info->script(), - proxy->position(), - proxy->position()); - Isolate* isolate = info->isolate(); - Factory* factory = isolate->factory(); - Handle<JSArray> array = factory->NewJSArray(0); - Handle<Object> result = - factory->NewSyntaxError("harmony_const_assign", array); - isolate->Throw(*result, &location); - return false; - } - } - info->SetScope(scope); return true; } @@ -347,7 +335,7 @@ void Scope::Initialize() { if (is_declaration_scope()) { Variable* var = variables_.Declare(this, - isolate_->factory()->this_symbol(), + isolate_->factory()->this_string(), VAR, false, Variable::THIS, @@ -364,7 +352,7 @@ void Scope::Initialize() { // Note that it might never be accessed, in which case it won't be // allocated during variable allocation. variables_.Declare(this, - isolate_->factory()->arguments_symbol(), + isolate_->factory()->arguments_string(), VAR, true, Variable::ARGUMENTS, @@ -375,6 +363,7 @@ void Scope::Initialize() { Scope* Scope::FinalizeBlockScope() { ASSERT(is_block_scope()); + ASSERT(internals_.is_empty()); ASSERT(temps_.is_empty()); ASSERT(params_.is_empty()); @@ -515,6 +504,19 @@ void Scope::RemoveUnresolved(VariableProxy* var) { } +Variable* Scope::NewInternal(Handle<String> name) { + ASSERT(!already_resolved()); + Variable* var = new(zone()) Variable(this, + name, + INTERNAL, + false, + Variable::NORMAL, + kCreatedInitialized); + internals_.Add(var, zone()); + return var; +} + + Variable* Scope::NewTemporary(Handle<String> name) { ASSERT(!already_resolved()); Variable* var = new(zone()) Variable(this, @@ -572,29 +574,6 @@ Declaration* Scope::CheckConflictingVarDeclarations() { } -VariableProxy* Scope::CheckAssignmentToConst() { - // Check this scope. - if (is_extended_mode()) { - for (int i = 0; i < unresolved_.length(); i++) { - ASSERT(unresolved_[i]->var() != NULL); - if (unresolved_[i]->var()->is_const_mode() && - unresolved_[i]->IsLValue()) { - return unresolved_[i]; - } - } - } - - // Check inner scopes. - for (int i = 0; i < inner_scopes_.length(); i++) { - VariableProxy* proxy = inner_scopes_[i]->CheckAssignmentToConst(); - if (proxy != NULL) return proxy; - } - - // No assignments to const found. - return NULL; -} - - class VarAndOrder { public: VarAndOrder(Variable* var, int order) : var_(var), order_(order) { } @@ -615,6 +594,15 @@ void Scope::CollectStackAndContextLocals(ZoneList<Variable*>* stack_locals, ASSERT(stack_locals != NULL); ASSERT(context_locals != NULL); + // Collect internals which are always allocated on the heap. + for (int i = 0; i < internals_.length(); i++) { + Variable* var = internals_[i]; + if (var->is_used()) { + ASSERT(var->IsContextSlot()); + context_locals->Add(var, zone()); + } + } + // Collect temporaries which are always allocated on the stack. for (int i = 0; i < temps_.length(); i++) { Variable* var = temps_[i]; @@ -624,9 +612,8 @@ void Scope::CollectStackAndContextLocals(ZoneList<Variable*>* stack_locals, } } - ZoneList<VarAndOrder> vars(variables_.occupancy(), zone()); - // Collect declared local variables. + ZoneList<VarAndOrder> vars(variables_.occupancy(), zone()); for (VariableMap::Entry* p = variables_.Start(); p != NULL; p = variables_.Next(p)) { @@ -659,18 +646,18 @@ bool Scope::AllocateVariables(CompilationInfo* info, } PropagateScopeInfo(outer_scope_calls_non_strict_eval); - // 2) Resolve variables. + // 2) Allocate module instances. + if (FLAG_harmony_modules && (is_global_scope() || is_module_scope())) { + ASSERT(num_modules_ == 0); + AllocateModulesRecursively(this); + } + + // 3) Resolve variables. if (!ResolveVariablesRecursively(info, factory)) return false; - // 3) Allocate variables. + // 4) Allocate variables. AllocateVariablesRecursively(); - // 4) Allocate and link module instance objects. - if (FLAG_harmony_modules && (is_global_scope() || is_module_scope())) { - AllocateModules(info); - LinkModules(info); - } - return true; } @@ -737,6 +724,15 @@ int Scope::ContextChainLength(Scope* scope) { } +Scope* Scope::GlobalScope() { + Scope* scope = this; + while (!scope->is_global_scope()) { + scope = scope->outer_scope(); + } + return scope; +} + + Scope* Scope::DeclarationScope() { Scope* scope = this; while (!scope->is_declaration_scope()) { @@ -910,6 +906,11 @@ void Scope::Print(int n) { PrintVar(n1, temps_[i]); } + Indent(n1, "// internal vars\n"); + for (int i = 0; i < internals_.length(); i++) { + PrintVar(n1, internals_[i]); + } + Indent(n1, "// local vars\n"); PrintMap(n1, &variables_); @@ -1060,7 +1061,20 @@ bool Scope::ResolveVariable(CompilationInfo* info, } ASSERT(var != NULL); - proxy->BindTo(var); + + if (FLAG_harmony_scoping && is_extended_mode() && + var->is_const_mode() && proxy->IsLValue()) { + // Assignment to const. Throw a syntax error. + MessageLocation location( + info->script(), proxy->position(), proxy->position()); + Isolate* isolate = Isolate::Current(); + Factory* factory = isolate->factory(); + Handle<JSArray> array = factory->NewJSArray(0); + Handle<Object> result = + factory->NewSyntaxError("harmony_const_assign", array); + isolate->Throw(*result, &location); + return false; + } if (FLAG_harmony_modules) { bool ok; @@ -1082,9 +1096,8 @@ bool Scope::ResolveVariable(CompilationInfo* info, // Inconsistent use of module. Throw a syntax error. // TODO(rossberg): generate more helpful error message. - MessageLocation location(info->script(), - proxy->position(), - proxy->position()); + MessageLocation location( + info->script(), proxy->position(), proxy->position()); Isolate* isolate = Isolate::Current(); Factory* factory = isolate->factory(); Handle<JSArray> array = factory->NewJSArray(1); @@ -1096,6 +1109,8 @@ bool Scope::ResolveVariable(CompilationInfo* info, } } + proxy->BindTo(var); + return true; } @@ -1170,6 +1185,7 @@ bool Scope::MustAllocateInContext(Variable* var) { // Exceptions: temporary variables are never allocated in a context; // catch-bound variables are always allocated in a context. if (var->mode() == TEMPORARY) return false; + if (var->mode() == INTERNAL) return true; if (is_catch_scope() || is_block_scope() || is_module_scope()) return true; if (is_global_scope() && IsLexicalVariableMode(var->mode())) return true; return var->has_forced_context_allocation() || @@ -1182,7 +1198,7 @@ bool Scope::MustAllocateInContext(Variable* var) { bool Scope::HasArgumentsParameter() { for (int i = 0; i < params_.length(); i++) { if (params_[i]->name().is_identical_to( - isolate_->factory()->arguments_symbol())) { + isolate_->factory()->arguments_string())) { return true; } } @@ -1202,7 +1218,7 @@ void Scope::AllocateHeapSlot(Variable* var) { void Scope::AllocateParameterLocals() { ASSERT(is_function_scope()); - Variable* arguments = LocalLookup(isolate_->factory()->arguments_symbol()); + Variable* arguments = LocalLookup(isolate_->factory()->arguments_string()); ASSERT(arguments != NULL); // functions have 'arguments' declared implicitly bool uses_nonstrict_arguments = false; @@ -1258,7 +1274,7 @@ void Scope::AllocateParameterLocals() { void Scope::AllocateNonParameterLocal(Variable* var) { ASSERT(var->scope() == this); - ASSERT(!var->IsVariable(isolate_->factory()->result_symbol()) || + ASSERT(!var->IsVariable(isolate_->factory()->result_string()) || !var->IsStackLocal()); if (var->IsUnallocated() && MustAllocate(var)) { if (MustAllocateInContext(var)) { @@ -1276,15 +1292,17 @@ void Scope::AllocateNonParameterLocals() { AllocateNonParameterLocal(temps_[i]); } - ZoneList<VarAndOrder> vars(variables_.occupancy(), zone()); + for (int i = 0; i < internals_.length(); i++) { + AllocateNonParameterLocal(internals_[i]); + } + ZoneList<VarAndOrder> vars(variables_.occupancy(), zone()); for (VariableMap::Entry* p = variables_.Start(); p != NULL; p = variables_.Next(p)) { Variable* var = reinterpret_cast<Variable*>(p->value); vars.Add(VarAndOrder(var, p->order), zone()); } - vars.Sort(VarAndOrder::Compare); int var_count = vars.length(); for (int i = 0; i < var_count; i++) { @@ -1337,89 +1355,34 @@ void Scope::AllocateVariablesRecursively() { } -int Scope::StackLocalCount() const { - return num_stack_slots() - - (function_ != NULL && function_->proxy()->var()->IsStackLocal() ? 1 : 0); -} - - -int Scope::ContextLocalCount() const { - if (num_heap_slots() == 0) return 0; - return num_heap_slots() - Context::MIN_CONTEXT_SLOTS - - (function_ != NULL && function_->proxy()->var()->IsContextSlot() ? 1 : 0); -} - - -void Scope::AllocateModules(CompilationInfo* info) { - ASSERT(is_global_scope() || is_module_scope()); - +void Scope::AllocateModulesRecursively(Scope* host_scope) { + if (already_resolved()) return; if (is_module_scope()) { ASSERT(interface_->IsFrozen()); - ASSERT(scope_info_.is_null()); - - // TODO(rossberg): This has to be the initial compilation of this code. - // We currently do not allow recompiling any module definitions. - Handle<ScopeInfo> scope_info = GetScopeInfo(); - Factory* factory = info->isolate()->factory(); - Handle<Context> context = factory->NewModuleContext(scope_info); - Handle<JSModule> instance = factory->NewJSModule(context, scope_info); - context->set_module(*instance); - - bool ok; - interface_->MakeSingleton(instance, &ok); - ASSERT(ok); + Handle<String> name = isolate_->factory()->InternalizeOneByteString( + STATIC_ASCII_VECTOR(".module")); + ASSERT(module_var_ == NULL); + module_var_ = host_scope->NewInternal(name); + ++host_scope->num_modules_; } - // Allocate nested modules. for (int i = 0; i < inner_scopes_.length(); i++) { Scope* inner_scope = inner_scopes_.at(i); - if (inner_scope->is_module_scope()) { - inner_scope->AllocateModules(info); - } + inner_scope->AllocateModulesRecursively(host_scope); } } -void Scope::LinkModules(CompilationInfo* info) { - ASSERT(is_global_scope() || is_module_scope()); +int Scope::StackLocalCount() const { + return num_stack_slots() - + (function_ != NULL && function_->proxy()->var()->IsStackLocal() ? 1 : 0); +} - if (is_module_scope()) { - Handle<JSModule> instance = interface_->Instance(); - - // Populate the module instance object. - const PropertyAttributes ro_attr = - static_cast<PropertyAttributes>(READ_ONLY | DONT_DELETE | DONT_ENUM); - const PropertyAttributes rw_attr = - static_cast<PropertyAttributes>(DONT_DELETE | DONT_ENUM); - for (Interface::Iterator it = interface_->iterator(); - !it.done(); it.Advance()) { - if (it.interface()->IsModule()) { - Handle<Object> value = it.interface()->Instance(); - ASSERT(!value.is_null()); - JSReceiver::SetProperty( - instance, it.name(), value, ro_attr, kStrictMode); - } else { - Variable* var = LocalLookup(it.name()); - ASSERT(var != NULL && var->IsContextSlot()); - PropertyAttributes attr = var->is_const_mode() ? ro_attr : rw_attr; - Handle<AccessorInfo> info = - Accessors::MakeModuleExport(it.name(), var->index(), attr); - Handle<Object> result = SetAccessor(instance, info); - ASSERT(!(result.is_null() || result->IsUndefined())); - USE(result); - } - } - USE(JSObject::PreventExtensions(instance)); - } - // Link nested modules. - for (int i = 0; i < inner_scopes_.length(); i++) { - Scope* inner_scope = inner_scopes_.at(i); - if (inner_scope->is_module_scope()) { - inner_scope->LinkModules(info); - } - } +int Scope::ContextLocalCount() const { + if (num_heap_slots() == 0) return 0; + return num_heap_slots() - Context::MIN_CONTEXT_SLOTS - + (function_ != NULL && function_->proxy()->var()->IsContextSlot() ? 1 : 0); } - } } // namespace v8::internal diff --git a/deps/v8/src/scopes.h b/deps/v8/src/scopes.h index b9d151cba5..3ca2dcf0ce 100644 --- a/deps/v8/src/scopes.h +++ b/deps/v8/src/scopes.h @@ -186,6 +186,12 @@ class Scope: public ZoneObject { // such a variable again if it was added; otherwise this is a no-op. void RemoveUnresolved(VariableProxy* var); + // Creates a new internal variable in this scope. The name is only used + // for printing and cannot be used to find the variable. In particular, + // the only way to get hold of the temporary is by keeping the Variable* + // around. + Variable* NewInternal(Handle<String> name); + // Creates a new temporary variable in this scope. The name is only used // for printing and cannot be used to find the variable. In particular, // the only way to get hold of the temporary is by keeping the Variable* @@ -218,11 +224,6 @@ class Scope: public ZoneObject { // scope over a let binding of the same name. Declaration* CheckConflictingVarDeclarations(); - // For harmony block scoping mode: Check if the scope has variable proxies - // that are used as lvalues and point to const variables. Assumes that scopes - // have been analyzed and variables been resolved. - VariableProxy* CheckAssignmentToConst(); - // --------------------------------------------------------------------------- // Scope-specific info. @@ -369,6 +370,12 @@ class Scope: public ZoneObject { int StackLocalCount() const; int ContextLocalCount() const; + // For global scopes, the number of module literals (including nested ones). + int num_modules() const { return num_modules_; } + + // For module scopes, the host scope's internal variable binding this module. + Variable* module_var() const { return module_var_; } + // Make sure this scope and all outer scopes are eagerly compiled. void ForceEagerCompilation() { force_eager_compilation_ = true; } @@ -387,6 +394,9 @@ class Scope: public ZoneObject { // The number of contexts between this and scope; zero if this == scope. int ContextChainLength(Scope* scope); + // Find the innermost global scope. + Scope* GlobalScope(); + // Find the first function, global, or eval scope. This is the scope // where var declarations will be hoisted to in the implementation. Scope* DeclarationScope(); @@ -441,6 +451,8 @@ class Scope: public ZoneObject { // variables may be implicitly 'declared' by being used (possibly in // an inner scope) with no intervening with statements or eval calls. VariableMap variables_; + // Compiler-allocated (user-invisible) internals. + ZoneList<Variable*> internals_; // Compiler-allocated (user-invisible) temporaries. ZoneList<Variable*> temps_; // Parameter list in source order. @@ -494,6 +506,12 @@ class Scope: public ZoneObject { int num_stack_slots_; int num_heap_slots_; + // The number of modules (including nested ones). + int num_modules_; + + // For module scopes, the host scope's internal variable binding this module. + Variable* module_var_; + // Serialized scope info support. Handle<ScopeInfo> scope_info_; bool already_resolved() { return already_resolved_; } @@ -578,6 +596,7 @@ class Scope: public ZoneObject { void AllocateNonParameterLocal(Variable* var); void AllocateNonParameterLocals(); void AllocateVariablesRecursively(); + void AllocateModulesRecursively(Scope* host_scope); // Resolve and fill in the allocation information for all variables // in this scopes. Must be called *after* all scopes have been @@ -591,13 +610,6 @@ class Scope: public ZoneObject { bool AllocateVariables(CompilationInfo* info, AstNodeFactory<AstNullVisitor>* factory); - // Instance objects have to be created ahead of time (before code generation) - // because of potentially cyclic references between them. - // Linking also has to be a separate stage, since populating one object may - // potentially require (forward) references to others. - void AllocateModules(CompilationInfo* info); - void LinkModules(CompilationInfo* info); - private: // Construct a scope based on the scope info. Scope(Scope* inner_scope, ScopeType type, Handle<ScopeInfo> scope_info, diff --git a/deps/v8/src/serialize.cc b/deps/v8/src/serialize.cc index 2ea09f89c3..0ba730fd72 100644 --- a/deps/v8/src/serialize.cc +++ b/deps/v8/src/serialize.cc @@ -30,6 +30,7 @@ #include "accessors.h" #include "api.h" #include "bootstrapper.h" +#include "deoptimizer.h" #include "execution.h" #include "global-handles.h" #include "ic-inl.h" @@ -443,15 +444,15 @@ void ExternalReferenceTable::PopulateTable(Isolate* isolate) { UNCLASSIFIED, 30, "TranscendentalCache::caches()"); - Add(ExternalReference::handle_scope_next_address().address(), + Add(ExternalReference::handle_scope_next_address(isolate).address(), UNCLASSIFIED, 31, "HandleScope::next"); - Add(ExternalReference::handle_scope_limit_address().address(), + Add(ExternalReference::handle_scope_limit_address(isolate).address(), UNCLASSIFIED, 32, "HandleScope::limit"); - Add(ExternalReference::handle_scope_level_address().address(), + Add(ExternalReference::handle_scope_level_address(isolate).address(), UNCLASSIFIED, 33, "HandleScope::level"); @@ -523,6 +524,52 @@ void ExternalReferenceTable::PopulateTable(Isolate* isolate) { UNCLASSIFIED, 50, "pending_message_script"); + Add(ExternalReference::get_make_code_young_function(isolate).address(), + UNCLASSIFIED, + 51, + "Code::MakeCodeYoung"); + Add(ExternalReference::cpu_features().address(), + UNCLASSIFIED, + 52, + "cpu_features"); + Add(ExternalReference::new_space_allocation_top_address(isolate).address(), + UNCLASSIFIED, + 53, + "Heap::NewSpaceAllocationTopAddress"); + Add(ExternalReference::new_space_allocation_limit_address(isolate).address(), + UNCLASSIFIED, + 54, + "Heap::NewSpaceAllocationLimitAddress"); + Add(ExternalReference(Runtime::kAllocateInNewSpace, isolate).address(), + UNCLASSIFIED, + 55, + "Runtime::AllocateInNewSpace"); + Add(ExternalReference::old_pointer_space_allocation_top_address( + isolate).address(), + UNCLASSIFIED, + 56, + "Heap::OldPointerSpaceAllocationTopAddress"); + Add(ExternalReference::old_pointer_space_allocation_limit_address( + isolate).address(), + UNCLASSIFIED, + 57, + "Heap::OldPointerSpaceAllocationLimitAddress"); + Add(ExternalReference(Runtime::kAllocateInOldPointerSpace, isolate).address(), + UNCLASSIFIED, + 58, + "Runtime::AllocateInOldPointerSpace"); + + // Add a small set of deopt entry addresses to encoder without generating the + // deopt table code, which isn't possible at deserialization time. + HandleScope scope(isolate); + for (int entry = 0; entry < kDeoptTableSerializeEntryCount; ++entry) { + Address address = Deoptimizer::GetDeoptimizationEntry( + isolate, + entry, + Deoptimizer::LAZY, + Deoptimizer::CALCULATE_ENTRY_ADDRESS); + Add(address, LAZY_DEOPTIMIZATION, 59 + entry, "lazy_deopt"); + } } @@ -1297,7 +1344,7 @@ void PartialSerializer::SerializeObject( // The code-caches link to context-specific code objects, which // the startup and context serializes cannot currently handle. ASSERT(Map::cast(heap_object)->code_cache() == - heap_object->GetHeap()->raw_unchecked_empty_fixed_array()); + heap_object->GetHeap()->empty_fixed_array()); } int root_index; @@ -1323,9 +1370,9 @@ void PartialSerializer::SerializeObject( // should go through the root array or through the partial snapshot cache. // If this is not the case you may have to add something to the root array. ASSERT(!startup_serializer_->address_mapper()->IsMapped(heap_object)); - // All the symbols that the partial snapshot needs should be either in the - // root table or in the partial snapshot cache. - ASSERT(!heap_object->IsSymbol()); + // All the internalized strings that the partial snapshot needs should be + // either in the root table or in the partial snapshot cache. + ASSERT(!heap_object->IsInternalizedString()); if (address_mapper_.IsMapped(heap_object)) { int space = SpaceOfObject(heap_object); diff --git a/deps/v8/src/serialize.h b/deps/v8/src/serialize.h index 2041792856..e0bcf4e187 100644 --- a/deps/v8/src/serialize.h +++ b/deps/v8/src/serialize.h @@ -47,10 +47,11 @@ enum TypeCode { EXTENSION, ACCESSOR, RUNTIME_ENTRY, - STUB_CACHE_TABLE + STUB_CACHE_TABLE, + LAZY_DEOPTIMIZATION }; -const int kTypeCodeCount = STUB_CACHE_TABLE + 1; +const int kTypeCodeCount = LAZY_DEOPTIMIZATION + 1; const int kFirstTypeCode = UNCLASSIFIED; const int kReferenceIdBits = 16; @@ -59,6 +60,7 @@ const int kReferenceTypeShift = kReferenceIdBits; const int kDebugRegisterBits = 4; const int kDebugIdShift = kDebugRegisterBits; +const int kDeoptTableSerializeEntryCount = 8; // ExternalReferenceTable is a helper class that defines the relationship // between external references and their encodings. It is used to build @@ -636,7 +638,7 @@ class StartupSerializer : public Serializer { // Serialize the current state of the heap. The order is: // 1) Strong references. // 2) Partial snapshot cache. - // 3) Weak references (e.g. the symbol table). + // 3) Weak references (e.g. the string table). virtual void SerializeStrongReferences(); virtual void SerializeObject(Object* o, HowToCode how_to_code, diff --git a/deps/v8/src/smart-pointers.h b/deps/v8/src/smart-pointers.h index 345c4d47fb..7c35b2aff2 100644 --- a/deps/v8/src/smart-pointers.h +++ b/deps/v8/src/smart-pointers.h @@ -58,11 +58,16 @@ class SmartPointerBase { // You can get the underlying pointer out with the * operator. inline T* operator*() { return p_; } - // You can use [n] to index as if it was a plain pointer + // You can use [n] to index as if it was a plain pointer. inline T& operator[](size_t i) { return p_[i]; } + // You can use [n] to index as if it was a plain pointer. + const inline T& operator[](size_t i) const { + return p_[i]; + } + // We don't have implicit conversion to a T* since that hinders migration: // You would not be able to change a method from returning a T* to // returning an SmartArrayPointer<T> and then get errors wherever it is used. @@ -77,6 +82,11 @@ class SmartPointerBase { return temp; } + inline void Reset(T* new_value) { + if (p_) Deallocator::Delete(p_); + p_ = new_value; + } + // Assignment requires an empty (NULL) SmartArrayPointer as the receiver. Like // the copy constructor it removes the pointer in the original to avoid // double freeing. @@ -119,11 +129,12 @@ class SmartArrayPointer: public SmartPointerBase<ArrayDeallocator<T>, T> { template<typename T> struct ObjectDeallocator { - static void Delete(T* array) { - Malloced::Delete(array); + static void Delete(T* object) { + delete object; } }; + template<typename T> class SmartPointer: public SmartPointerBase<ObjectDeallocator<T>, T> { public: diff --git a/deps/v8/src/spaces-inl.h b/deps/v8/src/spaces-inl.h index c64772775a..aeac570f4e 100644 --- a/deps/v8/src/spaces-inl.h +++ b/deps/v8/src/spaces-inl.h @@ -164,7 +164,7 @@ Page* Page::Initialize(Heap* heap, Executability executable, PagedSpace* owner) { Page* page = reinterpret_cast<Page*>(chunk); - ASSERT(chunk->size() <= static_cast<size_t>(kPageSize)); + ASSERT(page->area_size() <= kNonCodeObjectAreaSize); ASSERT(chunk->owner() == owner); owner->IncreaseCapacity(page->area_size()); owner->Free(page->area_start(), page->area_size()); @@ -214,6 +214,19 @@ MemoryChunk* MemoryChunk::FromAnyPointerAddress(Address addr) { } +void MemoryChunk::UpdateHighWaterMark(Address mark) { + if (mark == NULL) return; + // Need to subtract one from the mark because when a chunk is full the + // top points to the next address after the chunk, which effectively belongs + // to another chunk. See the comment to Page::FromAllocationTop. + MemoryChunk* chunk = MemoryChunk::FromAddress(mark - 1); + int new_mark = static_cast<int>(mark - chunk->address()); + if (new_mark > chunk->high_water_mark_) { + chunk->high_water_mark_ = new_mark; + } +} + + PointerChunkIterator::PointerChunkIterator(Heap* heap) : state_(kOldPointerState), old_pointer_iterator_(heap->old_pointer_space()), diff --git a/deps/v8/src/spaces.cc b/deps/v8/src/spaces.cc index cc841806b6..2952fd52cf 100644 --- a/deps/v8/src/spaces.cc +++ b/deps/v8/src/spaces.cc @@ -27,7 +27,6 @@ #include "v8.h" -#include "liveobjectlist-inl.h" #include "macro-assembler.h" #include "mark-compact.h" #include "platform.h" @@ -69,11 +68,11 @@ HeapObjectIterator::HeapObjectIterator(PagedSpace* space, HeapObjectIterator::HeapObjectIterator(Page* page, HeapObjectCallback size_func) { Space* owner = page->owner(); - ASSERT(owner == HEAP->old_pointer_space() || - owner == HEAP->old_data_space() || - owner == HEAP->map_space() || - owner == HEAP->cell_space() || - owner == HEAP->code_space()); + ASSERT(owner == page->heap()->old_pointer_space() || + owner == page->heap()->old_data_space() || + owner == page->heap()->map_space() || + owner == page->heap()->cell_space() || + owner == page->heap()->code_space()); Initialize(reinterpret_cast<PagedSpace*>(owner), page->area_start(), page->area_end(), @@ -207,17 +206,18 @@ void CodeRange::GetNextAllocationBlock(size_t requested) { } - -Address CodeRange::AllocateRawMemory(const size_t requested, +Address CodeRange::AllocateRawMemory(const size_t requested_size, + const size_t commit_size, size_t* allocated) { + ASSERT(commit_size <= requested_size); ASSERT(current_allocation_block_index_ < allocation_list_.length()); - if (requested > allocation_list_[current_allocation_block_index_].size) { + if (requested_size > allocation_list_[current_allocation_block_index_].size) { // Find an allocation block large enough. This function call may // call V8::FatalProcessOutOfMemory if it cannot find a large enough block. - GetNextAllocationBlock(requested); + GetNextAllocationBlock(requested_size); } // Commit the requested memory at the start of the current allocation block. - size_t aligned_requested = RoundUp(requested, MemoryChunk::kAlignment); + size_t aligned_requested = RoundUp(requested_size, MemoryChunk::kAlignment); FreeBlock current = allocation_list_[current_allocation_block_index_]; if (aligned_requested >= (current.size - Page::kPageSize)) { // Don't leave a small free block, useless for a large object or chunk. @@ -227,9 +227,10 @@ Address CodeRange::AllocateRawMemory(const size_t requested, } ASSERT(*allocated <= current.size); ASSERT(IsAddressAligned(current.start, MemoryChunk::kAlignment)); - if (!MemoryAllocator::CommitCodePage(code_range_, - current.start, - *allocated)) { + if (!MemoryAllocator::CommitExecutableMemory(code_range_, + current.start, + commit_size, + *allocated)) { *allocated = 0; return NULL; } @@ -242,6 +243,16 @@ Address CodeRange::AllocateRawMemory(const size_t requested, } +bool CodeRange::CommitRawMemory(Address start, size_t length) { + return code_range_->Commit(start, length, true); +} + + +bool CodeRange::UncommitRawMemory(Address start, size_t length) { + return code_range_->Uncommit(start, length); +} + + void CodeRange::FreeRawMemory(Address address, size_t length) { ASSERT(IsAddressAligned(address, MemoryChunk::kAlignment)); free_list_.Add(FreeBlock(address, length)); @@ -353,20 +364,25 @@ Address MemoryAllocator::ReserveAlignedMemory(size_t size, } -Address MemoryAllocator::AllocateAlignedMemory(size_t size, +Address MemoryAllocator::AllocateAlignedMemory(size_t reserve_size, + size_t commit_size, size_t alignment, Executability executable, VirtualMemory* controller) { + ASSERT(commit_size <= reserve_size); VirtualMemory reservation; - Address base = ReserveAlignedMemory(size, alignment, &reservation); + Address base = ReserveAlignedMemory(reserve_size, alignment, &reservation); if (base == NULL) return NULL; if (executable == EXECUTABLE) { - if (!CommitCodePage(&reservation, base, size)) { + if (!CommitExecutableMemory(&reservation, + base, + commit_size, + reserve_size)) { base = NULL; } } else { - if (!reservation.Commit(base, size, false)) { + if (!reservation.Commit(base, commit_size, false)) { base = NULL; } } @@ -448,6 +464,9 @@ MemoryChunk* MemoryChunk::Initialize(Heap* heap, chunk->slots_buffer_ = NULL; chunk->skip_list_ = NULL; chunk->write_barrier_counter_ = kWriteBarrierCounterGranularity; + chunk->progress_bar_ = 0; + chunk->high_water_mark_ = static_cast<int>(area_start - base); + chunk->parallel_sweeping_ = 0; chunk->ResetLiveBytes(); Bitmap::Clear(chunk); chunk->initialize_scan_on_scavenge(false); @@ -468,9 +487,67 @@ MemoryChunk* MemoryChunk::Initialize(Heap* heap, } +// Commit MemoryChunk area to the requested size. +bool MemoryChunk::CommitArea(size_t requested) { + size_t guard_size = IsFlagSet(IS_EXECUTABLE) ? + MemoryAllocator::CodePageGuardSize() : 0; + size_t header_size = area_start() - address() - guard_size; + size_t commit_size = RoundUp(header_size + requested, OS::CommitPageSize()); + size_t committed_size = RoundUp(header_size + (area_end() - area_start()), + OS::CommitPageSize()); + + if (commit_size > committed_size) { + // Commit size should be less or equal than the reserved size. + ASSERT(commit_size <= size() - 2 * guard_size); + // Append the committed area. + Address start = address() + committed_size + guard_size; + size_t length = commit_size - committed_size; + if (reservation_.IsReserved()) { + if (!reservation_.Commit(start, length, IsFlagSet(IS_EXECUTABLE))) { + return false; + } + } else { + CodeRange* code_range = heap_->isolate()->code_range(); + ASSERT(code_range->exists() && IsFlagSet(IS_EXECUTABLE)); + if (!code_range->CommitRawMemory(start, length)) return false; + } + + if (Heap::ShouldZapGarbage()) { + heap_->isolate()->memory_allocator()->ZapBlock(start, length); + } + } else if (commit_size < committed_size) { + ASSERT(commit_size > 0); + // Shrink the committed area. + size_t length = committed_size - commit_size; + Address start = address() + committed_size + guard_size - length; + if (reservation_.IsReserved()) { + if (!reservation_.Uncommit(start, length)) return false; + } else { + CodeRange* code_range = heap_->isolate()->code_range(); + ASSERT(code_range->exists() && IsFlagSet(IS_EXECUTABLE)); + if (!code_range->UncommitRawMemory(start, length)) return false; + } + } + + area_end_ = area_start_ + requested; + return true; +} + + void MemoryChunk::InsertAfter(MemoryChunk* other) { next_chunk_ = other->next_chunk_; prev_chunk_ = other; + + // This memory barrier is needed since concurrent sweeper threads may iterate + // over the list of pages while a new page is inserted. + // TODO(hpayer): find a cleaner way to guarantee that the page list can be + // expanded concurrently + MemoryBarrier(); + + // The following two write operations can take effect in arbitrary order + // since pages are always iterated by the sweeper threads in LIFO order, i.e, + // the inserted page becomes visible for the sweeper threads after + // other->next_chunk_ = this; other->next_chunk_->prev_chunk_ = this; other->next_chunk_ = this; } @@ -488,9 +565,12 @@ void MemoryChunk::Unlink() { } -MemoryChunk* MemoryAllocator::AllocateChunk(intptr_t body_size, +MemoryChunk* MemoryAllocator::AllocateChunk(intptr_t reserve_area_size, + intptr_t commit_area_size, Executability executable, Space* owner) { + ASSERT(commit_area_size <= reserve_area_size); + size_t chunk_size; Heap* heap = isolate_->heap(); Address base = NULL; @@ -498,8 +578,38 @@ MemoryChunk* MemoryAllocator::AllocateChunk(intptr_t body_size, Address area_start = NULL; Address area_end = NULL; + // + // MemoryChunk layout: + // + // Executable + // +----------------------------+<- base aligned with MemoryChunk::kAlignment + // | Header | + // +----------------------------+<- base + CodePageGuardStartOffset + // | Guard | + // +----------------------------+<- area_start_ + // | Area | + // +----------------------------+<- area_end_ (area_start + commit_area_size) + // | Committed but not used | + // +----------------------------+<- aligned at OS page boundary + // | Reserved but not committed | + // +----------------------------+<- aligned at OS page boundary + // | Guard | + // +----------------------------+<- base + chunk_size + // + // Non-executable + // +----------------------------+<- base aligned with MemoryChunk::kAlignment + // | Header | + // +----------------------------+<- area_start_ (base + kObjectStartOffset) + // | Area | + // +----------------------------+<- area_end_ (area_start + commit_area_size) + // | Committed but not used | + // +----------------------------+<- aligned at OS page boundary + // | Reserved but not committed | + // +----------------------------+<- base + chunk_size + // + if (executable == EXECUTABLE) { - chunk_size = RoundUp(CodePageAreaStartOffset() + body_size, + chunk_size = RoundUp(CodePageAreaStartOffset() + reserve_area_size, OS::CommitPageSize()) + CodePageGuardSize(); // Check executable memory limit. @@ -510,10 +620,15 @@ MemoryChunk* MemoryAllocator::AllocateChunk(intptr_t body_size, return NULL; } + // Size of header (not executable) plus area (executable). + size_t commit_size = RoundUp(CodePageGuardStartOffset() + commit_area_size, + OS::CommitPageSize()); // Allocate executable memory either from code range or from the // OS. if (isolate_->code_range()->exists()) { - base = isolate_->code_range()->AllocateRawMemory(chunk_size, &chunk_size); + base = isolate_->code_range()->AllocateRawMemory(chunk_size, + commit_size, + &chunk_size); ASSERT(IsAligned(reinterpret_cast<intptr_t>(base), MemoryChunk::kAlignment)); if (base == NULL) return NULL; @@ -522,6 +637,7 @@ MemoryChunk* MemoryAllocator::AllocateChunk(intptr_t body_size, size_executable_ += chunk_size; } else { base = AllocateAlignedMemory(chunk_size, + commit_size, MemoryChunk::kAlignment, executable, &reservation); @@ -532,14 +648,18 @@ MemoryChunk* MemoryAllocator::AllocateChunk(intptr_t body_size, if (Heap::ShouldZapGarbage()) { ZapBlock(base, CodePageGuardStartOffset()); - ZapBlock(base + CodePageAreaStartOffset(), body_size); + ZapBlock(base + CodePageAreaStartOffset(), commit_area_size); } area_start = base + CodePageAreaStartOffset(); - area_end = area_start + body_size; + area_end = area_start + commit_area_size; } else { - chunk_size = MemoryChunk::kObjectStartOffset + body_size; + chunk_size = RoundUp(MemoryChunk::kObjectStartOffset + reserve_area_size, + OS::CommitPageSize()); + size_t commit_size = RoundUp(MemoryChunk::kObjectStartOffset + + commit_area_size, OS::CommitPageSize()); base = AllocateAlignedMemory(chunk_size, + commit_size, MemoryChunk::kAlignment, executable, &reservation); @@ -547,13 +667,15 @@ MemoryChunk* MemoryAllocator::AllocateChunk(intptr_t body_size, if (base == NULL) return NULL; if (Heap::ShouldZapGarbage()) { - ZapBlock(base, chunk_size); + ZapBlock(base, Page::kObjectStartOffset + commit_area_size); } area_start = base + Page::kObjectStartOffset; - area_end = base + chunk_size; + area_end = area_start + commit_area_size; } + // Use chunk_size for statistics and callbacks because we assume that they + // treat reserved but not-yet committed memory regions of chunks as allocated. isolate_->counters()->memory_allocated()-> Increment(static_cast<int>(chunk_size)); @@ -578,7 +700,7 @@ MemoryChunk* MemoryAllocator::AllocateChunk(intptr_t body_size, Page* MemoryAllocator::AllocatePage(intptr_t size, PagedSpace* owner, Executability executable) { - MemoryChunk* chunk = AllocateChunk(size, executable, owner); + MemoryChunk* chunk = AllocateChunk(size, size, executable, owner); if (chunk == NULL) return NULL; @@ -589,7 +711,10 @@ Page* MemoryAllocator::AllocatePage(intptr_t size, LargePage* MemoryAllocator::AllocateLargePage(intptr_t object_size, Space* owner, Executability executable) { - MemoryChunk* chunk = AllocateChunk(object_size, executable, owner); + MemoryChunk* chunk = AllocateChunk(object_size, + object_size, + executable, + owner); if (chunk == NULL) return NULL; return LargePage::Initialize(isolate_->heap(), chunk); } @@ -731,9 +856,10 @@ int MemoryAllocator::CodePageAreaEndOffset() { } -bool MemoryAllocator::CommitCodePage(VirtualMemory* vm, - Address start, - size_t size) { +bool MemoryAllocator::CommitExecutableMemory(VirtualMemory* vm, + Address start, + size_t commit_size, + size_t reserved_size) { // Commit page header (not executable). if (!vm->Commit(start, CodePageGuardStartOffset(), @@ -747,15 +873,14 @@ bool MemoryAllocator::CommitCodePage(VirtualMemory* vm, } // Commit page body (executable). - size_t area_size = size - CodePageAreaStartOffset() - CodePageGuardSize(); if (!vm->Commit(start + CodePageAreaStartOffset(), - area_size, + commit_size - CodePageGuardStartOffset(), true)) { return false; } - // Create guard page after the allocatable area. - if (!vm->Guard(start + CodePageAreaStartOffset() + area_size)) { + // Create guard page before the end. + if (!vm->Guard(start + reserved_size - CodePageGuardSize())) { return false; } @@ -824,6 +949,18 @@ void PagedSpace::TearDown() { } +size_t PagedSpace::CommittedPhysicalMemory() { + if (!VirtualMemory::HasLazyCommits()) return CommittedMemory(); + MemoryChunk::UpdateHighWaterMark(allocation_info_.top); + size_t size = 0; + PageIterator it(this); + while (it.has_next()) { + size += it.next()->CommittedPhysicalMemory(); + } + return size; +} + + MaybeObject* PagedSpace::FindObject(Address addr) { // Note: this function can only be called on precisely swept spaces. ASSERT(!heap()->mark_compact_collector()->in_use()); @@ -855,6 +992,7 @@ bool PagedSpace::CanExpand() { return true; } + bool PagedSpace::Expand() { if (!CanExpand()) return false; @@ -919,7 +1057,7 @@ int PagedSpace::CountTotalPages() { } -void PagedSpace::ReleasePage(Page* page) { +void PagedSpace::ReleasePage(Page* page, bool unlink) { ASSERT(page->LiveBytes() == 0); ASSERT(AreaSize() == page->area_size()); @@ -943,7 +1081,9 @@ void PagedSpace::ReleasePage(Page* page) { allocation_info_.top = allocation_info_.limit = NULL; } - page->Unlink(); + if (unlink) { + page->Unlink(); + } if (page->IsFlagSet(MemoryChunk::CONTAINS_ONLY_DATA)) { heap()->isolate()->memory_allocator()->Free(page); } else { @@ -955,36 +1095,6 @@ void PagedSpace::ReleasePage(Page* page) { } -void PagedSpace::ReleaseAllUnusedPages() { - PageIterator it(this); - while (it.has_next()) { - Page* page = it.next(); - if (!page->WasSwept()) { - if (page->LiveBytes() == 0) ReleasePage(page); - } else { - HeapObject* obj = HeapObject::FromAddress(page->area_start()); - if (obj->IsFreeSpace() && - FreeSpace::cast(obj)->size() == AreaSize()) { - // Sometimes we allocate memory from free list but don't - // immediately initialize it (e.g. see PagedSpace::ReserveSpace - // called from Heap::ReserveSpace that can cause GC before - // reserved space is actually initialized). - // Thus we can't simply assume that obj represents a valid - // node still owned by a free list - // Instead we should verify that the page is fully covered - // by free list items. - FreeList::SizeStats sizes; - free_list_.CountFreeListItems(page, &sizes); - if (sizes.Total() == AreaSize()) { - ReleasePage(page); - } - } - } - } - heap()->FreeQueuedChunks(); -} - - #ifdef DEBUG void PagedSpace::Print() { } #endif @@ -1175,6 +1285,7 @@ void NewSpace::Shrink() { void NewSpace::UpdateAllocationInfo() { + MemoryChunk::UpdateHighWaterMark(allocation_info_.top); allocation_info_.top = to_space_.page_low(); allocation_info_.limit = to_space_.page_high(); @@ -1387,6 +1498,17 @@ bool SemiSpace::Uncommit() { } +size_t SemiSpace::CommittedPhysicalMemory() { + if (!is_committed()) return 0; + size_t size = 0; + NewSpacePageIterator it(this); + while (it.has_next()) { + size += it.next()->CommittedPhysicalMemory(); + } + return size; +} + + bool SemiSpace::GrowTo(int new_capacity) { if (!is_committed()) { if (!Commit()) return false; @@ -1654,6 +1776,7 @@ static void ReportCodeKindStatistics() { CASE(FUNCTION); CASE(OPTIMIZED_FUNCTION); CASE(STUB); + CASE(COMPILED_STUB); CASE(BUILTIN); CASE(LOAD_IC); CASE(KEYED_LOAD_IC); @@ -1821,6 +1944,17 @@ void NewSpace::RecordPromotion(HeapObject* obj) { promoted_histogram_[type].increment_bytes(obj->Size()); } + +size_t NewSpace::CommittedPhysicalMemory() { + if (!VirtualMemory::HasLazyCommits()) return CommittedMemory(); + MemoryChunk::UpdateHighWaterMark(allocation_info_.top); + size_t size = to_space_.CommittedPhysicalMemory(); + if (from_space_.is_committed()) { + size += from_space_.CommittedPhysicalMemory(); + } + return size; +} + // ----------------------------------------------------------------------------- // Free lists for old object spaces implementation @@ -1854,7 +1988,7 @@ void FreeListNode::set_size(Heap* heap, int size_in_bytes) { FreeListNode* FreeListNode::next() { ASSERT(IsFreeListNode(this)); - if (map() == HEAP->raw_unchecked_free_space_map()) { + if (map() == GetHeap()->raw_unchecked_free_space_map()) { ASSERT(map() == NULL || Size() >= kNextOffset + kPointerSize); return reinterpret_cast<FreeListNode*>( Memory::Address_at(address() + kNextOffset)); @@ -1867,7 +2001,7 @@ FreeListNode* FreeListNode::next() { FreeListNode** FreeListNode::next_address() { ASSERT(IsFreeListNode(this)); - if (map() == HEAP->raw_unchecked_free_space_map()) { + if (map() == GetHeap()->raw_unchecked_free_space_map()) { ASSERT(Size() >= kNextOffset + kPointerSize); return reinterpret_cast<FreeListNode**>(address() + kNextOffset); } else { @@ -1881,7 +2015,7 @@ void FreeListNode::set_next(FreeListNode* next) { // While we are booting the VM the free space map will actually be null. So // we have to make sure that we don't try to use it for anything at that // stage. - if (map() == HEAP->raw_unchecked_free_space_map()) { + if (map() == GetHeap()->raw_unchecked_free_space_map()) { ASSERT(map() == NULL || Size() >= kNextOffset + kPointerSize); Memory::Address_at(address() + kNextOffset) = reinterpret_cast<Address>(next); @@ -1892,52 +2026,72 @@ void FreeListNode::set_next(FreeListNode* next) { } -FreeList::FreeList(PagedSpace* owner) - : owner_(owner), heap_(owner->heap()) { - Reset(); +intptr_t FreeListCategory::Concatenate(FreeListCategory* category) { + intptr_t free_bytes = 0; + if (category->top_ != NULL) { + ASSERT(category->end_ != NULL); + // This is safe (not going to deadlock) since Concatenate operations + // are never performed on the same free lists at the same time in + // reverse order. + ScopedLock lock_target(mutex_); + ScopedLock lock_source(category->mutex()); + free_bytes = category->available(); + if (end_ == NULL) { + end_ = category->end(); + } else { + category->end()->set_next(top_); + } + top_ = category->top(); + available_ += category->available(); + category->Reset(); + } + return free_bytes; } -void FreeList::Reset() { +void FreeListCategory::Reset() { + top_ = NULL; + end_ = NULL; available_ = 0; - small_list_ = NULL; - medium_list_ = NULL; - large_list_ = NULL; - huge_list_ = NULL; } -int FreeList::Free(Address start, int size_in_bytes) { - if (size_in_bytes == 0) return 0; - FreeListNode* node = FreeListNode::FromAddress(start); - node->set_size(heap_, size_in_bytes); +intptr_t FreeListCategory::CountFreeListItemsInList(Page* p) { + int sum = 0; + FreeListNode* n = top_; + while (n != NULL) { + if (Page::FromAddress(n->address()) == p) { + FreeSpace* free_space = reinterpret_cast<FreeSpace*>(n); + sum += free_space->Size(); + } + n = n->next(); + } + return sum; +} - // Early return to drop too-small blocks on the floor. - if (size_in_bytes < kSmallListMin) return size_in_bytes; - // Insert other blocks at the head of a free list of the appropriate - // magnitude. - if (size_in_bytes <= kSmallListMax) { - node->set_next(small_list_); - small_list_ = node; - } else if (size_in_bytes <= kMediumListMax) { - node->set_next(medium_list_); - medium_list_ = node; - } else if (size_in_bytes <= kLargeListMax) { - node->set_next(large_list_); - large_list_ = node; - } else { - node->set_next(huge_list_); - huge_list_ = node; +intptr_t FreeListCategory::EvictFreeListItemsInList(Page* p) { + int sum = 0; + FreeListNode** n = &top_; + while (*n != NULL) { + if (Page::FromAddress((*n)->address()) == p) { + FreeSpace* free_space = reinterpret_cast<FreeSpace*>(*n); + sum += free_space->Size(); + *n = (*n)->next(); + } else { + n = (*n)->next_address(); + } } - available_ += size_in_bytes; - ASSERT(IsVeryLong() || available_ == SumFreeLists()); - return 0; + if (top_ == NULL) { + end_ = NULL; + } + available_ -= sum; + return sum; } -FreeListNode* FreeList::PickNodeFromList(FreeListNode** list, int* node_size) { - FreeListNode* node = *list; +FreeListNode* FreeListCategory::PickNodeFromList(int *node_size) { + FreeListNode* node = top_; if (node == NULL) return NULL; @@ -1948,59 +2102,150 @@ FreeListNode* FreeList::PickNodeFromList(FreeListNode** list, int* node_size) { } if (node != NULL) { + set_top(node->next()); *node_size = node->Size(); - *list = node->next(); + available_ -= *node_size; } else { - *list = NULL; + set_top(NULL); + } + + if (top() == NULL) { + set_end(NULL); } return node; } +void FreeListCategory::Free(FreeListNode* node, int size_in_bytes) { + node->set_next(top_); + top_ = node; + if (end_ == NULL) { + end_ = node; + } + available_ += size_in_bytes; +} + + +void FreeListCategory::RepairFreeList(Heap* heap) { + FreeListNode* n = top_; + while (n != NULL) { + Map** map_location = reinterpret_cast<Map**>(n->address()); + if (*map_location == NULL) { + *map_location = heap->free_space_map(); + } else { + ASSERT(*map_location == heap->free_space_map()); + } + n = n->next(); + } +} + + +FreeList::FreeList(PagedSpace* owner) + : owner_(owner), heap_(owner->heap()) { + Reset(); +} + + +intptr_t FreeList::Concatenate(FreeList* free_list) { + intptr_t free_bytes = 0; + free_bytes += small_list_.Concatenate(free_list->small_list()); + free_bytes += medium_list_.Concatenate(free_list->medium_list()); + free_bytes += large_list_.Concatenate(free_list->large_list()); + free_bytes += huge_list_.Concatenate(free_list->huge_list()); + return free_bytes; +} + + +void FreeList::Reset() { + small_list_.Reset(); + medium_list_.Reset(); + large_list_.Reset(); + huge_list_.Reset(); +} + + +int FreeList::Free(Address start, int size_in_bytes) { + if (size_in_bytes == 0) return 0; + + FreeListNode* node = FreeListNode::FromAddress(start); + node->set_size(heap_, size_in_bytes); + + // Early return to drop too-small blocks on the floor. + if (size_in_bytes < kSmallListMin) return size_in_bytes; + + // Insert other blocks at the head of a free list of the appropriate + // magnitude. + if (size_in_bytes <= kSmallListMax) { + small_list_.Free(node, size_in_bytes); + } else if (size_in_bytes <= kMediumListMax) { + medium_list_.Free(node, size_in_bytes); + } else if (size_in_bytes <= kLargeListMax) { + large_list_.Free(node, size_in_bytes); + } else { + huge_list_.Free(node, size_in_bytes); + } + + ASSERT(IsVeryLong() || available() == SumFreeLists()); + return 0; +} + + FreeListNode* FreeList::FindNodeFor(int size_in_bytes, int* node_size) { FreeListNode* node = NULL; if (size_in_bytes <= kSmallAllocationMax) { - node = PickNodeFromList(&small_list_, node_size); + node = small_list_.PickNodeFromList(node_size); if (node != NULL) return node; } if (size_in_bytes <= kMediumAllocationMax) { - node = PickNodeFromList(&medium_list_, node_size); + node = medium_list_.PickNodeFromList(node_size); if (node != NULL) return node; } if (size_in_bytes <= kLargeAllocationMax) { - node = PickNodeFromList(&large_list_, node_size); + node = large_list_.PickNodeFromList(node_size); if (node != NULL) return node; } - for (FreeListNode** cur = &huge_list_; + int huge_list_available = huge_list_.available(); + for (FreeListNode** cur = huge_list_.GetTopAddress(); *cur != NULL; cur = (*cur)->next_address()) { FreeListNode* cur_node = *cur; while (cur_node != NULL && Page::FromAddress(cur_node->address())->IsEvacuationCandidate()) { - available_ -= reinterpret_cast<FreeSpace*>(cur_node)->Size(); + huge_list_available -= reinterpret_cast<FreeSpace*>(cur_node)->Size(); cur_node = cur_node->next(); } *cur = cur_node; - if (cur_node == NULL) break; + if (cur_node == NULL) { + huge_list_.set_end(NULL); + break; + } - ASSERT((*cur)->map() == HEAP->raw_unchecked_free_space_map()); + ASSERT((*cur)->map() == heap_->raw_unchecked_free_space_map()); FreeSpace* cur_as_free_space = reinterpret_cast<FreeSpace*>(*cur); int size = cur_as_free_space->Size(); if (size >= size_in_bytes) { // Large enough node found. Unlink it from the list. node = *cur; - *node_size = size; *cur = node->next(); + *node_size = size; + huge_list_available -= size; break; } } + if (huge_list_.top() == NULL) { + huge_list_.set_end(NULL); + } + + huge_list_.set_available(huge_list_available); + ASSERT(IsVeryLong() || available() == SumFreeLists()); + return node; } @@ -2020,8 +2265,6 @@ HeapObject* FreeList::Allocate(int size_in_bytes) { FreeListNode* new_node = FindNodeFor(size_in_bytes, &new_node_size); if (new_node == NULL) return NULL; - available_ -= new_node_size; - ASSERT(IsVeryLong() || available_ == SumFreeLists()); int bytes_left = new_node_size - size_in_bytes; ASSERT(bytes_left >= 0); @@ -2079,25 +2322,12 @@ HeapObject* FreeList::Allocate(int size_in_bytes) { } -static intptr_t CountFreeListItemsInList(FreeListNode* n, Page* p) { - intptr_t sum = 0; - while (n != NULL) { - if (Page::FromAddress(n->address()) == p) { - FreeSpace* free_space = reinterpret_cast<FreeSpace*>(n); - sum += free_space->Size(); - } - n = n->next(); - } - return sum; -} - - void FreeList::CountFreeListItems(Page* p, SizeStats* sizes) { - sizes->huge_size_ = CountFreeListItemsInList(huge_list_, p); + sizes->huge_size_ = huge_list_.CountFreeListItemsInList(p); if (sizes->huge_size_ < p->area_size()) { - sizes->small_size_ = CountFreeListItemsInList(small_list_, p); - sizes->medium_size_ = CountFreeListItemsInList(medium_list_, p); - sizes->large_size_ = CountFreeListItemsInList(large_list_, p); + sizes->small_size_ = small_list_.CountFreeListItemsInList(p); + sizes->medium_size_ = medium_list_.CountFreeListItemsInList(p); + sizes->large_size_ = large_list_.CountFreeListItemsInList(p); } else { sizes->small_size_ = 0; sizes->medium_size_ = 0; @@ -2106,41 +2336,33 @@ void FreeList::CountFreeListItems(Page* p, SizeStats* sizes) { } -static intptr_t EvictFreeListItemsInList(FreeListNode** n, Page* p) { - intptr_t sum = 0; - while (*n != NULL) { - if (Page::FromAddress((*n)->address()) == p) { - FreeSpace* free_space = reinterpret_cast<FreeSpace*>(*n); - sum += free_space->Size(); - *n = (*n)->next(); - } else { - n = (*n)->next_address(); - } - } - return sum; -} - - intptr_t FreeList::EvictFreeListItems(Page* p) { - intptr_t sum = EvictFreeListItemsInList(&huge_list_, p); + intptr_t sum = huge_list_.EvictFreeListItemsInList(p); if (sum < p->area_size()) { - sum += EvictFreeListItemsInList(&small_list_, p) + - EvictFreeListItemsInList(&medium_list_, p) + - EvictFreeListItemsInList(&large_list_, p); + sum += small_list_.EvictFreeListItemsInList(p) + + medium_list_.EvictFreeListItemsInList(p) + + large_list_.EvictFreeListItemsInList(p); } - available_ -= static_cast<int>(sum); - return sum; } +void FreeList::RepairLists(Heap* heap) { + small_list_.RepairFreeList(heap); + medium_list_.RepairFreeList(heap); + large_list_.RepairFreeList(heap); + huge_list_.RepairFreeList(heap); +} + + #ifdef DEBUG -intptr_t FreeList::SumFreeList(FreeListNode* cur) { +intptr_t FreeListCategory::SumFreeList() { intptr_t sum = 0; + FreeListNode* cur = top_; while (cur != NULL) { - ASSERT(cur->map() == HEAP->raw_unchecked_free_space_map()); + ASSERT(cur->map() == cur->GetHeap()->raw_unchecked_free_space_map()); FreeSpace* cur_as_free_space = reinterpret_cast<FreeSpace*>(cur); sum += cur_as_free_space->Size(); cur = cur->next(); @@ -2152,8 +2374,9 @@ intptr_t FreeList::SumFreeList(FreeListNode* cur) { static const int kVeryLongFreeList = 500; -int FreeList::FreeListLength(FreeListNode* cur) { +int FreeListCategory::FreeListLength() { int length = 0; + FreeListNode* cur = top_; while (cur != NULL) { length++; cur = cur->next(); @@ -2164,10 +2387,10 @@ int FreeList::FreeListLength(FreeListNode* cur) { bool FreeList::IsVeryLong() { - if (FreeListLength(small_list_) == kVeryLongFreeList) return true; - if (FreeListLength(medium_list_) == kVeryLongFreeList) return true; - if (FreeListLength(large_list_) == kVeryLongFreeList) return true; - if (FreeListLength(huge_list_) == kVeryLongFreeList) return true; + if (small_list_.FreeListLength() == kVeryLongFreeList) return true; + if (medium_list_.FreeListLength() == kVeryLongFreeList) return true; + if (large_list_.FreeListLength() == kVeryLongFreeList) return true; + if (huge_list_.FreeListLength() == kVeryLongFreeList) return true; return false; } @@ -2176,10 +2399,10 @@ bool FreeList::IsVeryLong() { // on the free list, so it should not be called if FreeListLength returns // kVeryLongFreeList. intptr_t FreeList::SumFreeLists() { - intptr_t sum = SumFreeList(small_list_); - sum += SumFreeList(medium_list_); - sum += SumFreeList(large_list_); - sum += SumFreeList(huge_list_); + intptr_t sum = small_list_.SumFreeList(); + sum += medium_list_.SumFreeList(); + sum += large_list_.SumFreeList(); + sum += huge_list_.SumFreeList(); return sum; } #endif @@ -2267,24 +2490,9 @@ bool PagedSpace::ReserveSpace(int size_in_bytes) { } -static void RepairFreeList(Heap* heap, FreeListNode* n) { - while (n != NULL) { - Map** map_location = reinterpret_cast<Map**>(n->address()); - if (*map_location == NULL) { - *map_location = heap->free_space_map(); - } else { - ASSERT(*map_location == heap->free_space_map()); - } - n = n->next(); - } -} - - -void FreeList::RepairLists(Heap* heap) { - RepairFreeList(heap, small_list_); - RepairFreeList(heap, medium_list_); - RepairFreeList(heap, large_list_); - RepairFreeList(heap, huge_list_); +intptr_t PagedSpace::SizeOfObjects() { + ASSERT(!heap()->IsSweepingComplete() || (unswept_free_bytes_ == 0)); + return Size() - unswept_free_bytes_ - (limit() - top()); } @@ -2307,7 +2515,7 @@ bool LargeObjectSpace::ReserveSpace(int bytes) { bool PagedSpace::AdvanceSweeper(intptr_t bytes_to_sweep) { - if (IsSweepingComplete()) return true; + if (IsLazySweepingComplete()) return true; intptr_t freed_bytes = 0; Page* p = first_unswept_page_; @@ -2319,7 +2527,10 @@ bool PagedSpace::AdvanceSweeper(intptr_t bytes_to_sweep) { reinterpret_cast<intptr_t>(p)); } DecreaseUnsweptFreeBytes(p); - freed_bytes += MarkCompactCollector::SweepConservatively(this, p); + freed_bytes += + MarkCompactCollector:: + SweepConservatively<MarkCompactCollector::SWEEP_SEQUENTIALLY>( + this, NULL, p); } p = next_page; } while (p != anchor() && freed_bytes < bytes_to_sweep); @@ -2332,7 +2543,7 @@ bool PagedSpace::AdvanceSweeper(intptr_t bytes_to_sweep) { heap()->FreeQueuedChunks(); - return IsSweepingComplete(); + return IsLazySweepingComplete(); } @@ -2351,13 +2562,35 @@ void PagedSpace::EvictEvacuationCandidatesFromFreeLists() { } +bool PagedSpace::EnsureSweeperProgress(intptr_t size_in_bytes) { + MarkCompactCollector* collector = heap()->mark_compact_collector(); + if (collector->AreSweeperThreadsActivated()) { + if (collector->IsConcurrentSweepingInProgress()) { + if (collector->StealMemoryFromSweeperThreads(this) < size_in_bytes) { + if (!collector->sequential_sweeping()) { + collector->WaitUntilSweepingCompleted(); + return true; + } + } + return false; + } + return true; + } else { + return AdvanceSweeper(size_in_bytes); + } +} + + HeapObject* PagedSpace::SlowAllocateRaw(int size_in_bytes) { // Allocation in this space has failed. - // If there are unswept pages advance lazy sweeper then sweep one page before - // allocating a new page. - if (first_unswept_page_->is_valid()) { - AdvanceSweeper(size_in_bytes); + // If there are unswept pages advance lazy sweeper a bounded number of times + // until we find a size_in_bytes contiguous piece of memory + const int kMaxSweepingTries = 5; + bool sweeping_complete = false; + + for (int i = 0; i < kMaxSweepingTries && !sweeping_complete; i++) { + sweeping_complete = EnsureSweeperProgress(size_in_bytes); // Retry the free list allocation. HeapObject* object = free_list_.Allocate(size_in_bytes); @@ -2379,8 +2612,8 @@ HeapObject* PagedSpace::SlowAllocateRaw(int size_in_bytes) { // Last ditch, sweep all the remaining pages to try to find space. This may // cause a pause. - if (!IsSweepingComplete()) { - AdvanceSweeper(kMaxInt); + if (!IsLazySweepingComplete()) { + EnsureSweeperProgress(kMaxInt); // Retry the free list allocation. HeapObject* object = free_list_.Allocate(size_in_bytes); @@ -2698,6 +2931,18 @@ MaybeObject* LargeObjectSpace::AllocateRaw(int object_size, } +size_t LargeObjectSpace::CommittedPhysicalMemory() { + if (!VirtualMemory::HasLazyCommits()) return CommittedMemory(); + size_t size = 0; + LargePage* current = first_page_; + while (current != NULL) { + size += current->CommittedPhysicalMemory(); + current = current->next_page(); + } + return size; +} + + // GC support MaybeObject* LargeObjectSpace::FindObject(Address a) { LargePage* page = FindPage(a); @@ -2736,7 +2981,8 @@ void LargeObjectSpace::FreeUnmarkedObjects() { MarkBit mark_bit = Marking::MarkBitFrom(object); if (mark_bit.Get()) { mark_bit.Clear(); - MemoryChunk::IncrementLiveBytesFromGC(object->address(), -object->Size()); + Page::FromAddress(object->address())->ResetProgressBar(); + Page::FromAddress(object->address())->ResetLiveBytes(); previous = current; current = current->next_page(); } else { diff --git a/deps/v8/src/spaces.h b/deps/v8/src/spaces.h index 95c63d6b61..39c19a4e39 100644 --- a/deps/v8/src/spaces.h +++ b/deps/v8/src/spaces.h @@ -320,7 +320,8 @@ class MemoryChunk { Space* owner() const { if ((reinterpret_cast<intptr_t>(owner_) & kFailureTagMask) == kFailureTag) { - return reinterpret_cast<Space*>(owner_ - kFailureTag); + return reinterpret_cast<Space*>(reinterpret_cast<intptr_t>(owner_) - + kFailureTag); } else { return NULL; } @@ -397,6 +398,12 @@ class MemoryChunk { WAS_SWEPT_PRECISELY, WAS_SWEPT_CONSERVATIVELY, + // Large objects can have a progress bar in their page header. These object + // are scanned in increments and will be kept black while being scanned. + // Even if the mutator writes to them they will be kept black and a white + // to grey transition is performed in the value. + HAS_PROGRESS_BAR, + // Last flag, keep at bottom. NUM_MEMORY_CHUNK_FLAGS }; @@ -448,6 +455,18 @@ class MemoryChunk { // Return all current flags. intptr_t GetFlags() { return flags_; } + intptr_t parallel_sweeping() const { + return parallel_sweeping_; + } + + void set_parallel_sweeping(intptr_t state) { + parallel_sweeping_ = state; + } + + bool TryParallelSweeping() { + return NoBarrier_CompareAndSwap(¶llel_sweeping_, 1, 0) == 1; + } + // Manage live byte count (count of bytes known to be live, // because they are marked black). void ResetLiveBytes() { @@ -480,6 +499,29 @@ class MemoryChunk { write_barrier_counter_ = counter; } + int progress_bar() { + ASSERT(IsFlagSet(HAS_PROGRESS_BAR)); + return progress_bar_; + } + + void set_progress_bar(int progress_bar) { + ASSERT(IsFlagSet(HAS_PROGRESS_BAR)); + progress_bar_ = progress_bar; + } + + void ResetProgressBar() { + if (IsFlagSet(MemoryChunk::HAS_PROGRESS_BAR)) { + set_progress_bar(0); + ClearFlag(MemoryChunk::HAS_PROGRESS_BAR); + } + } + + bool IsLeftOfProgressBar(Object** slot) { + Address slot_address = reinterpret_cast<Address>(slot); + ASSERT(slot_address > this->address()); + return (slot_address - (this->address() + kObjectStartOffset)) < + progress_bar(); + } static void IncrementLiveBytesFromGC(Address address, int by) { MemoryChunk::FromAddress(address)->IncrementLiveBytes(by); @@ -504,7 +546,8 @@ class MemoryChunk { static const size_t kWriteBarrierCounterOffset = kSlotsBufferOffset + kPointerSize + kPointerSize; - static const size_t kHeaderSize = kWriteBarrierCounterOffset + kPointerSize; + static const size_t kHeaderSize = kWriteBarrierCounterOffset + kPointerSize + + kIntSize + kIntSize + kPointerSize; static const int kBodyOffset = CODE_POINTER_ALIGN(kHeaderSize + Bitmap::kSize); @@ -615,6 +658,14 @@ class MemoryChunk { int area_size() { return static_cast<int>(area_end() - area_start()); } + bool CommitArea(size_t requested); + + // Approximate amount of physical memory committed for this chunk. + size_t CommittedPhysicalMemory() { + return high_water_mark_; + } + + static inline void UpdateHighWaterMark(Address mark); protected: MemoryChunk* next_chunk_; @@ -641,6 +692,14 @@ class MemoryChunk { SlotsBuffer* slots_buffer_; SkipList* skip_list_; intptr_t write_barrier_counter_; + // Used by the incremental marker to keep track of the scanning progress in + // large objects that have a progress bar and are scanned in increments. + int progress_bar_; + // Assuming the initial allocation on a page is sequential, + // count highest number of bytes ever allocated on the page. + int high_water_mark_; + + intptr_t parallel_sweeping_; static MemoryChunk* Initialize(Heap* heap, Address base, @@ -835,6 +894,10 @@ class CodeRange { void TearDown(); bool exists() { return this != NULL && code_range_ != NULL; } + Address start() { + if (this == NULL || code_range_ == NULL) return NULL; + return static_cast<Address>(code_range_->address()); + } bool contains(Address address) { if (this == NULL || code_range_ == NULL) return false; Address start = static_cast<Address>(code_range_->address()); @@ -844,8 +907,11 @@ class CodeRange { // Allocates a chunk of memory from the large-object portion of // the code range. On platforms with no separate code range, should // not be called. - MUST_USE_RESULT Address AllocateRawMemory(const size_t requested, + MUST_USE_RESULT Address AllocateRawMemory(const size_t requested_size, + const size_t commit_size, size_t* allocated); + bool CommitRawMemory(Address start, size_t length); + bool UncommitRawMemory(Address start, size_t length); void FreeRawMemory(Address buf, size_t length); private: @@ -993,14 +1059,19 @@ class MemoryAllocator { void ReportStatistics(); #endif - MemoryChunk* AllocateChunk(intptr_t body_size, + // Returns a MemoryChunk in which the memory region from commit_area_size to + // reserve_area_size of the chunk area is reserved but not committed, it + // could be committed later by calling MemoryChunk::CommitArea. + MemoryChunk* AllocateChunk(intptr_t reserve_area_size, + intptr_t commit_area_size, Executability executable, Space* space); Address ReserveAlignedMemory(size_t requested, size_t alignment, VirtualMemory* controller); - Address AllocateAlignedMemory(size_t requested, + Address AllocateAlignedMemory(size_t reserve_size, + size_t commit_size, size_t alignment, Executability executable, VirtualMemory* controller); @@ -1050,9 +1121,10 @@ class MemoryAllocator { return CodePageAreaEndOffset() - CodePageAreaStartOffset(); } - MUST_USE_RESULT static bool CommitCodePage(VirtualMemory* vm, - Address start, - size_t size); + MUST_USE_RESULT static bool CommitExecutableMemory(VirtualMemory* vm, + Address start, + size_t commit_size, + size_t reserved_size); private: Isolate* isolate_; @@ -1338,6 +1410,63 @@ class FreeListNode: public HeapObject { }; +// The free list category holds a pointer to the top element and a pointer to +// the end element of the linked list of free memory blocks. +class FreeListCategory { + public: + FreeListCategory() : + top_(NULL), + end_(NULL), + mutex_(OS::CreateMutex()), + available_(0) {} + + ~FreeListCategory() { + delete mutex_; + } + + intptr_t Concatenate(FreeListCategory* category); + + void Reset(); + + void Free(FreeListNode* node, int size_in_bytes); + + FreeListNode* PickNodeFromList(int *node_size); + + intptr_t CountFreeListItemsInList(Page* p); + + intptr_t EvictFreeListItemsInList(Page* p); + + void RepairFreeList(Heap* heap); + + FreeListNode** GetTopAddress() { return &top_; } + FreeListNode* top() const { return top_; } + void set_top(FreeListNode* top) { top_ = top; } + + FreeListNode** GetEndAddress() { return &end_; } + FreeListNode* end() const { return end_; } + void set_end(FreeListNode* end) { end_ = end; } + + int* GetAvailableAddress() { return &available_; } + int available() const { return available_; } + void set_available(int available) { available_ = available; } + + Mutex* mutex() { return mutex_; } + +#ifdef DEBUG + intptr_t SumFreeList(); + int FreeListLength(); +#endif + + private: + FreeListNode* top_; + FreeListNode* end_; + Mutex* mutex_; + + // Total available bytes in all blocks of this free list category. + int available_; +}; + + // The free list for the old space. The free list is organized in such a way // as to encourage objects allocated around the same time to be near each // other. The normal way to allocate is intended to be by bumping a 'top' @@ -1365,11 +1494,16 @@ class FreeList BASE_EMBEDDED { public: explicit FreeList(PagedSpace* owner); + intptr_t Concatenate(FreeList* free_list); + // Clear the free list. void Reset(); // Return the number of bytes available on the free list. - intptr_t available() { return available_; } + intptr_t available() { + return small_list_.available() + medium_list_.available() + + large_list_.available() + huge_list_.available(); + } // Place a node on the free list. The block of size 'size_in_bytes' // starting at 'start' is placed on the free list. The return value is the @@ -1387,8 +1521,6 @@ class FreeList BASE_EMBEDDED { #ifdef DEBUG void Zap(); - static intptr_t SumFreeList(FreeListNode* node); - static int FreeListLength(FreeListNode* cur); intptr_t SumFreeLists(); bool IsVeryLong(); #endif @@ -1411,21 +1543,21 @@ class FreeList BASE_EMBEDDED { intptr_t EvictFreeListItems(Page* p); + FreeListCategory* small_list() { return &small_list_; } + FreeListCategory* medium_list() { return &medium_list_; } + FreeListCategory* large_list() { return &large_list_; } + FreeListCategory* huge_list() { return &huge_list_; } + private: // The size range of blocks, in bytes. static const int kMinBlockSize = 3 * kPointerSize; static const int kMaxBlockSize = Page::kMaxNonCodeHeapObjectSize; - FreeListNode* PickNodeFromList(FreeListNode** list, int* node_size); - FreeListNode* FindNodeFor(int size_in_bytes, int* node_size); PagedSpace* owner_; Heap* heap_; - // Total available bytes in all blocks on this free list. - int available_; - static const int kSmallListMin = 0x20 * kPointerSize; static const int kSmallListMax = 0xff * kPointerSize; static const int kMediumListMax = 0x7ff * kPointerSize; @@ -1433,10 +1565,10 @@ class FreeList BASE_EMBEDDED { static const int kSmallAllocationMax = kSmallListMin - kPointerSize; static const int kMediumAllocationMax = kSmallListMax; static const int kLargeAllocationMax = kMediumListMax; - FreeListNode* small_list_; - FreeListNode* medium_list_; - FreeListNode* large_list_; - FreeListNode* huge_list_; + FreeListCategory small_list_; + FreeListCategory medium_list_; + FreeListCategory large_list_; + FreeListCategory huge_list_; DISALLOW_IMPLICIT_CONSTRUCTORS(FreeList); }; @@ -1490,6 +1622,9 @@ class PagedSpace : public Space { // spaces this equals the capacity. intptr_t CommittedMemory() { return Capacity(); } + // Approximate amount of physical memory committed for this space. + size_t CommittedPhysicalMemory(); + // Sets the capacity, the available space and the wasted space to zero. // The stats are rebuilt during sweeping by adding each page to the // capacity and the size when it is encountered. As free spaces are @@ -1499,6 +1634,11 @@ class PagedSpace : public Space { accounting_stats_.ClearSizeWaste(); } + // Increases the number of available bytes of that space. + void AddToAccountingStats(intptr_t bytes) { + accounting_stats_.DeallocateBytes(bytes); + } + // Available bytes without growing. These are the bytes on the free list. // The bytes in the linear allocation area are not included in this total // because updating the stats would slow down allocation. New pages are @@ -1512,10 +1652,7 @@ class PagedSpace : public Space { // As size, but the bytes in lazily swept pages are estimated and the bytes // in the current linear allocation area are not included. - virtual intptr_t SizeOfObjects() { - ASSERT(!IsSweepingComplete() || (unswept_free_bytes_ == 0)); - return Size() - unswept_free_bytes_ - (limit() - top()); - } + virtual intptr_t SizeOfObjects(); // Wasted bytes in this space. These are just the bytes that were thrown away // due to being too small to use for allocation. They do not include the @@ -1526,6 +1663,10 @@ class PagedSpace : public Space { Address top() { return allocation_info_.top; } Address limit() { return allocation_info_.limit; } + // The allocation top and limit addresses. + Address* allocation_top_address() { return &allocation_info_.top; } + Address* allocation_limit_address() { return &allocation_info_.limit; } + // Allocate the requested number of bytes in the space if possible, return a // failure object if not. MUST_USE_RESULT inline MaybeObject* AllocateRaw(int size_in_bytes); @@ -1550,6 +1691,7 @@ class PagedSpace : public Space { void SetTop(Address top, Address limit) { ASSERT(top == limit || Page::FromAddress(top) == Page::FromAddress(limit - 1)); + MemoryChunk::UpdateHighWaterMark(allocation_info_.top); allocation_info_.top = top; allocation_info_.limit = limit; } @@ -1563,10 +1705,7 @@ class PagedSpace : public Space { } // Releases an unused page and shrinks the space. - void ReleasePage(Page* page); - - // Releases all of the unused pages. - void ReleaseAllUnusedPages(); + void ReleasePage(Page* page, bool unlink); // The dummy page that anchors the linked list of pages. Page* anchor() { return &anchor_; } @@ -1610,7 +1749,7 @@ class PagedSpace : public Space { first_unswept_page_ = first; } - void IncrementUnsweptFreeBytes(int by) { + void IncrementUnsweptFreeBytes(intptr_t by) { unswept_free_bytes_ += by; } @@ -1619,14 +1758,27 @@ class PagedSpace : public Space { unswept_free_bytes_ += (p->area_size() - p->LiveBytes()); } + void DecrementUnsweptFreeBytes(intptr_t by) { + unswept_free_bytes_ -= by; + } + void DecreaseUnsweptFreeBytes(Page* p) { ASSERT(ShouldBeSweptLazily(p)); unswept_free_bytes_ -= (p->area_size() - p->LiveBytes()); } + void ResetUnsweptFreeBytes() { + unswept_free_bytes_ = 0; + } + bool AdvanceSweeper(intptr_t bytes_to_sweep); - bool IsSweepingComplete() { + // When parallel sweeper threads are active and the main thread finished + // its sweeping phase, this function waits for them to complete, otherwise + // AdvanceSweeper with size_in_bytes is called. + bool EnsureSweeperProgress(intptr_t size_in_bytes); + + bool IsLazySweepingComplete() { return !first_unswept_page_->is_valid(); } @@ -1650,6 +1802,8 @@ class PagedSpace : public Space { } protected: + FreeList* free_list() { return &free_list_; } + int area_size_; // Maximum capacity of this space. @@ -1699,6 +1853,7 @@ class PagedSpace : public Space { MUST_USE_RESULT virtual HeapObject* SlowAllocateRaw(int size_in_bytes); friend class PageIterator; + friend class SweeperThread; }; @@ -1961,6 +2116,9 @@ class SemiSpace : public Space { static void Swap(SemiSpace* from, SemiSpace* to); + // Approximate amount of physical memory committed for this space. + size_t CommittedPhysicalMemory(); + private: // Flips the semispace between being from-space and to-space. // Copies the flags into the masked positions on all pages in the space. @@ -2158,6 +2316,9 @@ class NewSpace : public Space { return Capacity(); } + // Approximate amount of physical memory committed for this space. + size_t CommittedPhysicalMemory(); + // Return the available bytes without growing. intptr_t Available() { return Capacity() - Size(); @@ -2387,11 +2548,9 @@ class FixedSpace : public PagedSpace { FixedSpace(Heap* heap, intptr_t max_capacity, AllocationSpace id, - int object_size_in_bytes, - const char* name) + int object_size_in_bytes) : PagedSpace(heap, max_capacity, id, NOT_EXECUTABLE), - object_size_in_bytes_(object_size_in_bytes), - name_(name) { + object_size_in_bytes_(object_size_in_bytes) { page_extra_ = Page::kNonCodeObjectAreaSize % object_size_in_bytes; } @@ -2408,9 +2567,6 @@ class FixedSpace : public PagedSpace { private: // The size of objects in this space. int object_size_in_bytes_; - - // The name of this space. - const char* name_; }; @@ -2421,7 +2577,7 @@ class MapSpace : public FixedSpace { public: // Creates a map space object with a maximum capacity. MapSpace(Heap* heap, intptr_t max_capacity, AllocationSpace id) - : FixedSpace(heap, max_capacity, id, Map::kSize, "map"), + : FixedSpace(heap, max_capacity, id, Map::kSize), max_map_space_pages_(kMaxMapPageIndex - 1) { } @@ -2462,7 +2618,7 @@ class CellSpace : public FixedSpace { public: // Creates a property cell space object with a maximum capacity. CellSpace(Heap* heap, intptr_t max_capacity, AllocationSpace id) - : FixedSpace(heap, max_capacity, id, JSGlobalPropertyCell::kSize, "cell") + : FixedSpace(heap, max_capacity, id, JSGlobalPropertyCell::kSize) {} virtual int RoundSizeDownToObjectAlignment(int size) { @@ -2524,6 +2680,9 @@ class LargeObjectSpace : public Space { return Size(); } + // Approximate amount of physical memory committed for this space. + size_t CommittedPhysicalMemory(); + int PageCount() { return page_count_; } diff --git a/deps/v8/src/store-buffer.cc b/deps/v8/src/store-buffer.cc index 66488ae504..8a69164039 100644 --- a/deps/v8/src/store-buffer.cc +++ b/deps/v8/src/store-buffer.cc @@ -687,10 +687,15 @@ void StoreBuffer::Compact() { uintptr_t int_addr = reinterpret_cast<uintptr_t>(*current); // Shift out the last bits including any tags. int_addr >>= kPointerSizeLog2; - int hash1 = - ((int_addr ^ (int_addr >> kHashSetLengthLog2)) & (kHashSetLength - 1)); + // The upper part of an address is basically random because of ASLR and OS + // non-determinism, so we use only the bits within a page for hashing to + // make v8's behavior (more) deterministic. + uintptr_t hash_addr = + int_addr & (Page::kPageAlignmentMask >> kPointerSizeLog2); + int hash1 = ((hash_addr ^ (hash_addr >> kHashSetLengthLog2)) & + (kHashSetLength - 1)); if (hash_set_1_[hash1] == int_addr) continue; - uintptr_t hash2 = (int_addr - (int_addr >> kHashSetLengthLog2)); + uintptr_t hash2 = (hash_addr - (hash_addr >> kHashSetLengthLog2)); hash2 ^= hash2 >> (kHashSetLengthLog2 * 2); hash2 &= (kHashSetLength - 1); if (hash_set_2_[hash2] == int_addr) continue; diff --git a/deps/v8/src/store-buffer.h b/deps/v8/src/store-buffer.h index 0ade8cee17..79046d1540 100644 --- a/deps/v8/src/store-buffer.h +++ b/deps/v8/src/store-buffer.h @@ -210,8 +210,7 @@ class StoreBufferRebuildScope { explicit StoreBufferRebuildScope(Heap* heap, StoreBuffer* store_buffer, StoreBufferCallback callback) - : heap_(heap), - store_buffer_(store_buffer), + : store_buffer_(store_buffer), stored_state_(store_buffer->store_buffer_rebuilding_enabled_), stored_callback_(store_buffer->callback_) { store_buffer_->store_buffer_rebuilding_enabled_ = true; @@ -226,7 +225,6 @@ class StoreBufferRebuildScope { } private: - Heap* heap_; StoreBuffer* store_buffer_; bool stored_state_; StoreBufferCallback stored_callback_; diff --git a/deps/v8/src/string-search.h b/deps/v8/src/string-search.h index 8c3456aa0a..bc685ffe58 100644 --- a/deps/v8/src/string-search.h +++ b/deps/v8/src/string-search.h @@ -53,7 +53,7 @@ class StringSearchBase { // a potentially less efficient searching, but is a safe approximation. // For needles using only characters in the same Unicode 256-code point page, // there is no search speed degradation. - static const int kAsciiAlphabetSize = 128; + static const int kAsciiAlphabetSize = 256; static const int kUC16AlphabetSize = Isolate::kUC16AlphabetSize; // Bad-char shift table stored in the state. It's length is the alphabet size. @@ -61,12 +61,12 @@ class StringSearchBase { // to compensate for the algorithmic overhead compared to simple brute force. static const int kBMMinPatternLength = 7; - static inline bool IsAsciiString(Vector<const char>) { + static inline bool IsOneByteString(Vector<const uint8_t> string) { return true; } - static inline bool IsAsciiString(Vector<const uc16> string) { - return String::IsAscii(string.start(), string.length()); + static inline bool IsOneByteString(Vector<const uc16> string) { + return String::IsOneByte(string.start(), string.length()); } friend class Isolate; @@ -81,7 +81,7 @@ class StringSearch : private StringSearchBase { pattern_(pattern), start_(Max(0, pattern.length() - kBMMaxShift)) { if (sizeof(PatternChar) > sizeof(SubjectChar)) { - if (!IsAsciiString(pattern_)) { + if (!IsOneByteString(pattern_)) { strategy_ = &FailSearch; return; } @@ -150,13 +150,21 @@ class StringSearch : private StringSearchBase { void PopulateBoyerMooreTable(); + static inline bool exceedsOneByte(uint8_t c) { + return false; + } + + static inline bool exceedsOneByte(uint16_t c) { + return c > String::kMaxOneByteCharCodeU; + } + static inline int CharOccurrence(int* bad_char_occurrence, SubjectChar char_code) { if (sizeof(SubjectChar) == 1) { return bad_char_occurrence[static_cast<int>(char_code)]; } if (sizeof(PatternChar) == 1) { - if (static_cast<unsigned int>(char_code) > String::kMaxAsciiCharCodeU) { + if (exceedsOneByte(char_code)) { return -1; } return bad_char_occurrence[static_cast<unsigned int>(char_code)]; @@ -223,7 +231,7 @@ int StringSearch<PatternChar, SubjectChar>::SingleCharSearch( return static_cast<int>(pos - subject.start()); } else { if (sizeof(PatternChar) > sizeof(SubjectChar)) { - if (static_cast<uc16>(pattern_first_char) > String::kMaxAsciiCharCodeU) { + if (exceedsOneByte(pattern_first_char)) { return -1; } } diff --git a/deps/v8/src/string-stream.cc b/deps/v8/src/string-stream.cc index 30519b5610..bcc30f9ab3 100644 --- a/deps/v8/src/string-stream.cc +++ b/deps/v8/src/string-stream.cc @@ -311,14 +311,14 @@ bool StringStream::Put(String* str) { bool StringStream::Put(String* str, int start, int end) { - StringInputBuffer name_buffer(str); - name_buffer.Seek(start); - for (int i = start; i < end && name_buffer.has_more(); i++) { - int c = name_buffer.GetNext(); + ConsStringIteratorOp op; + StringCharacterStream stream(str, &op, start); + for (int i = start; i < end && stream.HasMore(); i++) { + uint16_t c = stream.GetNext(); if (c >= 127 || c < 32) { c = '?'; } - if (!Put(c)) { + if (!Put(static_cast<char>(c))) { return false; // Output was truncated. } } @@ -493,7 +493,7 @@ void StringStream::PrintFunction(Object* f, Object* receiver, Code** code) { // Common case: on-stack function present and resolved. PrintPrototype(fun, receiver); *code = fun->code(); - } else if (f->IsSymbol()) { + } else if (f->IsInternalizedString()) { // Unresolved and megamorphic calls: Instead of the function // we have the function name on the stack. PrintName(f); @@ -533,11 +533,13 @@ void StringStream::PrintFunction(Object* f, Object* receiver, Code** code) { void StringStream::PrintPrototype(JSFunction* fun, Object* receiver) { Object* name = fun->shared()->name(); bool print_name = false; - Heap* heap = HEAP; - for (Object* p = receiver; p != heap->null_value(); p = p->GetPrototype()) { + Isolate* isolate = fun->GetIsolate(); + for (Object* p = receiver; + p != isolate->heap()->null_value(); + p = p->GetPrototype(isolate)) { if (p->IsJSObject()) { Object* key = JSObject::cast(p)->SlowReverseLookup(fun); - if (key != heap->undefined_value()) { + if (key != isolate->heap()->undefined_value()) { if (!name->IsString() || !key->IsString() || !String::cast(name)->Equals(String::cast(key))) { diff --git a/deps/v8/src/string.js b/deps/v8/src/string.js index 6115930b6c..2f8043c609 100644 --- a/deps/v8/src/string.js +++ b/deps/v8/src/string.js @@ -186,11 +186,15 @@ function StringMatch(regexp) { } var subject = TO_STRING_INLINE(this); if (IS_REGEXP(regexp)) { + // Emulate RegExp.prototype.exec's side effect in step 5, even though + // value is discarded. + ToInteger(regexp.lastIndex); if (!regexp.global) return RegExpExecNoTests(regexp, subject, 0); %_Log('regexp', 'regexp-match,%0S,%1r', [subject, regexp]); // lastMatchInfo is defined in regexp.js. var result = %StringMatch(subject, regexp, lastMatchInfo); if (result !== null) lastMatchInfoOverride = null; + regexp.lastIndex = 0; return result; } // Non-regexp argument. @@ -199,16 +203,6 @@ function StringMatch(regexp) { } -// SubString is an internal function that returns the sub string of 'string'. -// If resulting string is of length 1, we use the one character cache -// otherwise we call the runtime system. -function SubString(string, start, end) { - // Use the one character string cache. - if (start + 1 == end) return %_StringCharAt(string, start); - return %_SubString(string, start, end); -} - - // This has the same size as the lastMatchInfo array, and can be used for // functions that expect that structure to be returned. It is used when the // needle is a string rather than a regexp. In this case we can't update @@ -225,33 +219,61 @@ function StringReplace(search, replace) { } var subject = TO_STRING_INLINE(this); - // Delegate to one of the regular expression variants if necessary. + // Decision tree for dispatch + // .. regexp search + // .... string replace + // ...... non-global search + // ........ empty string replace + // ........ non-empty string replace (with $-expansion) + // ...... global search + // ........ no need to circumvent last match info override + // ........ need to circument last match info override + // .... function replace + // ...... global search + // ...... non-global search + // .. string search + // .... special case that replaces with one single character + // ...... function replace + // ...... string replace (with $-expansion) + if (IS_REGEXP(search)) { + // Emulate RegExp.prototype.exec's side effect in step 5, even if + // value is discarded. + ToInteger(search.lastIndex); %_Log('regexp', 'regexp-replace,%0r,%1S', [search, subject]); - if (IS_SPEC_FUNCTION(replace)) { - if (search.global) { - return StringReplaceGlobalRegExpWithFunction(subject, search, replace); - } else { - return StringReplaceNonGlobalRegExpWithFunction(subject, - search, - replace); + + if (!IS_SPEC_FUNCTION(replace)) { + replace = TO_STRING_INLINE(replace); + + if (!search.global) { + // Non-global regexp search, string replace. + var match = DoRegExpExec(search, subject, 0); + if (match == null) { + search.lastIndex = 0 + return subject; + } + if (replace.length == 0) { + return %_SubString(subject, 0, match[CAPTURE0]) + + %_SubString(subject, match[CAPTURE1], subject.length) + } + return ExpandReplacement(replace, subject, lastMatchInfo, + %_SubString(subject, 0, match[CAPTURE0])) + + %_SubString(subject, match[CAPTURE1], subject.length); } - } else { + + // Global regexp search, string replace. + search.lastIndex = 0; if (lastMatchInfoOverride == null) { - return %StringReplaceRegExpWithString(subject, - search, - TO_STRING_INLINE(replace), - lastMatchInfo); + return %StringReplaceGlobalRegExpWithString( + subject, search, replace, lastMatchInfo); } else { // We use this hack to detect whether StringReplaceRegExpWithString - // found at least one hit. In that case we need to remove any + // found at least one hit. In that case we need to remove any // override. var saved_subject = lastMatchInfo[LAST_SUBJECT_INDEX]; lastMatchInfo[LAST_SUBJECT_INDEX] = 0; - var answer = %StringReplaceRegExpWithString(subject, - search, - TO_STRING_INLINE(replace), - lastMatchInfo); + var answer = %StringReplaceGlobalRegExpWithString( + subject, search, replace, lastMatchInfo); if (%_IsSmi(lastMatchInfo[LAST_SUBJECT_INDEX])) { lastMatchInfo[LAST_SUBJECT_INDEX] = saved_subject; } else { @@ -260,10 +282,17 @@ function StringReplace(search, replace) { return answer; } } + + if (search.global) { + // Global regexp search, function replace. + return StringReplaceGlobalRegExpWithFunction(subject, search, replace); + } + // Non-global regexp search, function replace. + return StringReplaceNonGlobalRegExpWithFunction(subject, search, replace); } - // Convert the search argument to a string and search for it. search = TO_STRING_INLINE(search); + if (search.length == 1 && subject.length > 0xFF && IS_STRING(replace) && @@ -277,7 +306,7 @@ function StringReplace(search, replace) { if (start < 0) return subject; var end = start + search.length; - var result = SubString(subject, 0, start); + var result = %_SubString(subject, 0, start); // Compute the string to replace with. if (IS_SPEC_FUNCTION(replace)) { @@ -286,11 +315,13 @@ function StringReplace(search, replace) { } else { reusableMatchInfo[CAPTURE0] = start; reusableMatchInfo[CAPTURE1] = end; - replace = TO_STRING_INLINE(replace); - result = ExpandReplacement(replace, subject, reusableMatchInfo, result); + result = ExpandReplacement(TO_STRING_INLINE(replace), + subject, + reusableMatchInfo, + result); } - return result + SubString(subject, end, subject.length); + return result + %_SubString(subject, end, subject.length); } @@ -304,7 +335,7 @@ function ExpandReplacement(string, subject, matchInfo, result) { return result; } - if (next > 0) result += SubString(string, 0, next); + if (next > 0) result += %_SubString(string, 0, next); while (true) { var expansion = '$'; @@ -316,13 +347,39 @@ function ExpandReplacement(string, subject, matchInfo, result) { result += '$'; } else if (peek == 38) { // $& - match ++position; - result += SubString(subject, matchInfo[CAPTURE0], matchInfo[CAPTURE1]); + result += + %_SubString(subject, matchInfo[CAPTURE0], matchInfo[CAPTURE1]); } else if (peek == 96) { // $` - prefix ++position; - result += SubString(subject, 0, matchInfo[CAPTURE0]); + result += %_SubString(subject, 0, matchInfo[CAPTURE0]); } else if (peek == 39) { // $' - suffix ++position; - result += SubString(subject, matchInfo[CAPTURE1], subject.length); + result += %_SubString(subject, matchInfo[CAPTURE1], subject.length); + } else if (peek >= 48 && peek <= 57) { + // Valid indices are $1 .. $9, $01 .. $09 and $10 .. $99 + var scaled_index = (peek - 48) << 1; + var advance = 1; + var number_of_captures = NUMBER_OF_CAPTURES(matchInfo); + if (position + 1 < string.length) { + var next = %_StringCharCodeAt(string, position + 1); + if (next >= 48 && next <= 57) { + var new_scaled_index = scaled_index * 10 + ((next - 48) << 1); + if (new_scaled_index < number_of_captures) { + scaled_index = new_scaled_index; + advance = 2; + } + } + } + if (scaled_index != 0 && scaled_index < number_of_captures) { + var start = matchInfo[CAPTURE(scaled_index)]; + if (start >= 0) { + result += + %_SubString(subject, start, matchInfo[CAPTURE(scaled_index + 1)]); + } + position += advance; + } else { + result += '$'; + } } else { result += '$'; } @@ -337,14 +394,14 @@ function ExpandReplacement(string, subject, matchInfo, result) { // haven't reached the end, we need to append the suffix. if (next < 0) { if (position < length) { - result += SubString(string, position, length); + result += %_SubString(string, position, length); } return result; } // Append substring between the previous and the next $ character. if (next > position) { - result += SubString(string, position, next); + result += %_SubString(string, position, next); } } return result; @@ -360,7 +417,7 @@ function CaptureString(string, lastCaptureInfo, index) { // If start isn't valid, return undefined. if (start < 0) return; var end = lastCaptureInfo[CAPTURE(scaled + 1)]; - return SubString(string, start, end); + return %_SubString(string, start, end); } @@ -401,7 +458,7 @@ function StringReplaceGlobalRegExpWithFunction(subject, regexp, replace) { // input string and some replacements that were returned from the replace // function. var match_start = 0; - var override = new InternalArray(null, 0, subject); + var override = new InternalPackedArray(null, 0, subject); var receiver = %GetDefaultReceiver(replace); for (var i = 0; i < len; i++) { var elem = res[i]; @@ -451,9 +508,12 @@ function StringReplaceGlobalRegExpWithFunction(subject, regexp, replace) { function StringReplaceNonGlobalRegExpWithFunction(subject, regexp, replace) { var matchInfo = DoRegExpExec(regexp, subject, 0); - if (IS_NULL(matchInfo)) return subject; + if (IS_NULL(matchInfo)) { + regexp.lastIndex = 0; + return subject; + } var index = matchInfo[CAPTURE0]; - var result = SubString(subject, 0, index); + var result = %_SubString(subject, 0, index); var endOfMatch = matchInfo[CAPTURE1]; // Compute the parameter list consisting of the match, captures, index, // and subject for the replace function invocation. @@ -463,7 +523,7 @@ function StringReplaceNonGlobalRegExpWithFunction(subject, regexp, replace) { var receiver = %GetDefaultReceiver(replace); if (m == 1) { // No captures, only the match, which is always valid. - var s = SubString(subject, index, endOfMatch); + var s = %_SubString(subject, index, endOfMatch); // Don't call directly to avoid exposing the built-in global object. replacement = %_CallFunction(receiver, s, index, subject, replace); } else { @@ -480,7 +540,7 @@ function StringReplaceNonGlobalRegExpWithFunction(subject, regexp, replace) { result += replacement; // The add method converts to string if necessary. // Can't use matchInfo any more from here, since the function could // overwrite it. - return result + SubString(subject, endOfMatch, subject.length); + return result + %_SubString(subject, endOfMatch, subject.length); } @@ -546,7 +606,7 @@ function StringSlice(start, end) { return ''; } - return SubString(s, start_i, end_i); + return %_SubString(s, start_i, end_i); } @@ -607,13 +667,13 @@ function StringSplitOnRegExp(subject, separator, limit, length) { while (true) { if (startIndex === length) { - result.push(SubString(subject, currentIndex, length)); + result.push(%_SubString(subject, currentIndex, length)); break; } var matchInfo = DoRegExpExec(separator, subject, startIndex); if (matchInfo == null || length === (startMatch = matchInfo[CAPTURE0])) { - result.push(SubString(subject, currentIndex, length)); + result.push(%_SubString(subject, currentIndex, length)); break; } var endIndex = matchInfo[CAPTURE1]; @@ -624,11 +684,7 @@ function StringSplitOnRegExp(subject, separator, limit, length) { continue; } - if (currentIndex + 1 == startMatch) { - result.push(%_StringCharAt(subject, currentIndex)); - } else { - result.push(%_SubString(subject, currentIndex, startMatch)); - } + result.push(%_SubString(subject, currentIndex, startMatch)); if (result.length === limit) break; @@ -637,11 +693,7 @@ function StringSplitOnRegExp(subject, separator, limit, length) { var start = matchInfo[i++]; var end = matchInfo[i++]; if (end != -1) { - if (start + 1 == end) { - result.push(%_StringCharAt(subject, start)); - } else { - result.push(%_SubString(subject, start, end)); - } + result.push(%_SubString(subject, start, end)); } else { result.push(void 0); } @@ -685,9 +737,7 @@ function StringSubstring(start, end) { } } - return ((start_i + 1 == end_i) - ? %_StringCharAt(s, start_i) - : %_SubString(s, start_i, end_i)); + return %_SubString(s, start_i, end_i); } @@ -729,9 +779,7 @@ function StringSubstr(start, n) { var end = start + len; if (end > s.length) end = s.length; - return ((start + 1 == end) - ? %_StringCharAt(s, start) - : %_SubString(s, start, end)); + return %_SubString(s, start, end); } @@ -799,7 +847,6 @@ function StringTrimRight() { return %StringTrim(TO_STRING_INLINE(this), false, true); } -var static_charcode_array = new InternalArray(4); // ECMA-262, section 15.5.3.2 function StringFromCharCode(code) { @@ -809,17 +856,25 @@ function StringFromCharCode(code) { return %_StringCharFromCode(code & 0xffff); } - // NOTE: This is not super-efficient, but it is necessary because we - // want to avoid converting to numbers from within the virtual - // machine. Maybe we can find another way of doing this? - var codes = static_charcode_array; - for (var i = 0; i < n; i++) { + var one_byte = %NewString(n, NEW_ONE_BYTE_STRING); + var i; + for (i = 0; i < n; i++) { var code = %_Arguments(i); - if (!%_IsSmi(code)) code = ToNumber(code); - codes[i] = code; + if (!%_IsSmi(code)) code = ToNumber(code) & 0xffff; + if (code < 0) code = code & 0xffff; + if (code > 0xff) break; + %_OneByteSeqStringSetChar(one_byte, i, code); + } + if (i == n) return one_byte; + one_byte = %TruncateString(one_byte, i); + + var two_byte = %NewString(n - i, NEW_TWO_BYTE_STRING); + for (var j = 0; i < n; i++, j++) { + var code = %_Arguments(i); + if (!%_IsSmi(code)) code = ToNumber(code) & 0xffff; + %_TwoByteSeqStringSetChar(two_byte, j, code); } - codes.length = n; - return %StringFromCharCodeArray(codes); + return one_byte + two_byte; } diff --git a/deps/v8/src/stub-cache.cc b/deps/v8/src/stub-cache.cc index 411914719c..a0d98e27d5 100644 --- a/deps/v8/src/stub-cache.cc +++ b/deps/v8/src/stub-cache.cc @@ -56,14 +56,14 @@ void StubCache::Initialize() { } -Code* StubCache::Set(String* name, Map* map, Code* code) { +Code* StubCache::Set(Name* name, Map* map, Code* code) { // Get the flags from the code. Code::Flags flags = Code::RemoveTypeFromFlags(code->flags()); // Validate that the name does not move on scavenge, and that we - // can use identity checks instead of string equality checks. + // can use identity checks instead of structural equality checks. ASSERT(!heap()->InNewSpace(name)); - ASSERT(name->IsSymbol()); + ASSERT(name->IsUniqueName()); // The state bits are not important to the hash function because // the stub cache only contains monomorphic stubs. Make sure that @@ -100,379 +100,359 @@ Code* StubCache::Set(String* name, Map* map, Code* code) { } -Handle<Code> StubCache::ComputeLoadNonexistent(Handle<String> name, +Handle<JSObject> StubCache::StubHolder(Handle<JSObject> receiver, + Handle<JSObject> holder) { + InlineCacheHolderFlag cache_holder = + IC::GetCodeCacheForObject(*receiver, *holder); + return Handle<JSObject>(IC::GetCodeCacheHolder( + isolate_, *receiver, cache_holder)); +} + + +Handle<Code> StubCache::FindIC(Handle<Name> name, + Handle<JSObject> stub_holder, + Code::Kind kind, + Code::StubType type, + Code::ExtraICState extra_ic_state) { + Code::Flags flags = Code::ComputeMonomorphicFlags(kind, extra_ic_state, type); + Handle<Object> probe(stub_holder->map()->FindInCodeCache(*name, flags), + isolate_); + if (probe->IsCode()) return Handle<Code>::cast(probe); + return Handle<Code>::null(); +} + + +Handle<Code> StubCache::FindHandler(Handle<Name> name, + Handle<JSObject> stub_holder, + Code::Kind kind, + Code::StubType type, + Code::ExtraICState extra_ic_state) { + ASSERT(type != Code::NORMAL); + Code::Flags flags = Code::ComputeMonomorphicFlags( + Code::STUB, extra_ic_state, type, kind); + Handle<Object> probe(stub_holder->map()->FindInCodeCache(*name, flags), + isolate_); + if (probe->IsCode()) return Handle<Code>::cast(probe); + return Handle<Code>::null(); +} + + +Handle<Code> StubCache::ComputeMonomorphicIC(Handle<JSObject> receiver, + Handle<Code> handler, + Handle<Name> name) { + Handle<Code> ic = FindIC(name, receiver, Code::LOAD_IC, handler->type()); + if (!ic.is_null()) return ic; + + LoadStubCompiler ic_compiler(isolate()); + ic = ic_compiler.CompileMonomorphicIC( + Handle<Map>(receiver->map()), handler, name); + + JSObject::UpdateMapCodeCache(receiver, name, ic); + return ic; +} + + +Handle<Code> StubCache::ComputeKeyedMonomorphicIC(Handle<JSObject> receiver, + Handle<Code> handler, + Handle<Name> name) { + Handle<Code> ic = FindIC( + name, receiver, Code::KEYED_LOAD_IC, handler->type()); + if (!ic.is_null()) return ic; + + KeyedLoadStubCompiler ic_compiler(isolate()); + ic = ic_compiler.CompileMonomorphicIC( + Handle<Map>(receiver->map()), handler, name); + + JSObject::UpdateMapCodeCache(receiver, name, ic); + return ic; +} + + +Handle<Code> StubCache::ComputeLoadNonexistent(Handle<Name> name, Handle<JSObject> receiver) { - ASSERT(receiver->IsGlobalObject() || receiver->HasFastProperties()); // If no global objects are present in the prototype chain, the load // nonexistent IC stub can be shared for all names for a given map // and we use the empty string for the map cache in that case. If // there are global objects involved, we need to check global // property cells in the stub and therefore the stub will be // specific to the name. - Handle<String> cache_name = factory()->empty_string(); - if (receiver->IsGlobalObject()) cache_name = name; - Handle<JSObject> last = receiver; - while (last->GetPrototype() != heap()->null_value()) { - last = Handle<JSObject>(JSObject::cast(last->GetPrototype())); - if (last->IsGlobalObject()) cache_name = name; - } + Handle<Name> cache_name = factory()->empty_string(); + Handle<JSObject> current; + Handle<Object> next = receiver; + Handle<GlobalObject> global; + do { + current = Handle<JSObject>::cast(next); + next = Handle<Object>(current->GetPrototype(), isolate_); + if (current->IsGlobalObject()) { + global = Handle<GlobalObject>::cast(current); + cache_name = name; + } else if (!current->HasFastProperties()) { + cache_name = name; + } + } while (!next->IsNull()); + // Compile the stub that is either shared for all names or // name specific if there are global objects involved. - Code::Flags flags = - Code::ComputeMonomorphicFlags(Code::LOAD_IC, Code::NONEXISTENT); - Handle<Object> probe(receiver->map()->FindInCodeCache(*cache_name, flags)); - if (probe->IsCode()) return Handle<Code>::cast(probe); + Handle<Code> handler = FindHandler( + cache_name, receiver, Code::LOAD_IC, Code::NONEXISTENT); + if (!handler.is_null()) return handler; LoadStubCompiler compiler(isolate_); - Handle<Code> code = - compiler.CompileLoadNonexistent(cache_name, receiver, last); - PROFILE(isolate_, CodeCreateEvent(Logger::LOAD_IC_TAG, *code, *cache_name)); - GDBJIT(AddCode(GDBJITInterface::LOAD_IC, *cache_name, *code)); - JSObject::UpdateMapCodeCache(receiver, cache_name, code); - return code; + handler = + compiler.CompileLoadNonexistent(receiver, current, cache_name, global); + JSObject::UpdateMapCodeCache(receiver, cache_name, handler); + return handler; } -Handle<Code> StubCache::ComputeLoadField(Handle<String> name, +Handle<Code> StubCache::ComputeLoadField(Handle<Name> name, Handle<JSObject> receiver, Handle<JSObject> holder, - int field_index) { - ASSERT(IC::GetCodeCacheForObject(*receiver, *holder) == OWN_MAP); - Code::Flags flags = Code::ComputeMonomorphicFlags(Code::LOAD_IC, Code::FIELD); - Handle<Object> probe(receiver->map()->FindInCodeCache(*name, flags)); - if (probe->IsCode()) return Handle<Code>::cast(probe); + PropertyIndex field) { + if (receiver.is_identical_to(holder)) { + LoadFieldStub stub(LoadStubCompiler::receiver(), + field.is_inobject(holder), + field.translate(holder)); + return stub.GetCode(isolate()); + } + + Handle<JSObject> stub_holder = StubHolder(receiver, holder); + Handle<Code> stub = FindHandler( + name, stub_holder, Code::LOAD_IC, Code::FIELD); + if (!stub.is_null()) return stub; LoadStubCompiler compiler(isolate_); - Handle<Code> code = - compiler.CompileLoadField(receiver, holder, field_index, name); - PROFILE(isolate_, CodeCreateEvent(Logger::LOAD_IC_TAG, *code, *name)); - GDBJIT(AddCode(GDBJITInterface::LOAD_IC, *name, *code)); - JSObject::UpdateMapCodeCache(receiver, name, code); - return code; + Handle<Code> handler = + compiler.CompileLoadField(receiver, holder, name, field); + JSObject::UpdateMapCodeCache(stub_holder, name, handler); + return handler; } -Handle<Code> StubCache::ComputeLoadCallback(Handle<String> name, - Handle<JSObject> receiver, - Handle<JSObject> holder, - Handle<AccessorInfo> callback) { +Handle<Code> StubCache::ComputeLoadCallback( + Handle<Name> name, + Handle<JSObject> receiver, + Handle<JSObject> holder, + Handle<ExecutableAccessorInfo> callback) { ASSERT(v8::ToCData<Address>(callback->getter()) != 0); - ASSERT(IC::GetCodeCacheForObject(*receiver, *holder) == OWN_MAP); - Code::Flags flags = - Code::ComputeMonomorphicFlags(Code::LOAD_IC, Code::CALLBACKS); - Handle<Object> probe(receiver->map()->FindInCodeCache(*name, flags)); - if (probe->IsCode()) return Handle<Code>::cast(probe); + Handle<JSObject> stub_holder = StubHolder(receiver, holder); + Handle<Code> stub = FindHandler( + name, stub_holder, Code::LOAD_IC, Code::CALLBACKS); + if (!stub.is_null()) return stub; LoadStubCompiler compiler(isolate_); - Handle<Code> code = - compiler.CompileLoadCallback(name, receiver, holder, callback); - PROFILE(isolate_, CodeCreateEvent(Logger::LOAD_IC_TAG, *code, *name)); - GDBJIT(AddCode(GDBJITInterface::LOAD_IC, *name, *code)); - JSObject::UpdateMapCodeCache(receiver, name, code); - return code; + Handle<Code> handler = + compiler.CompileLoadCallback(receiver, holder, name, callback); + JSObject::UpdateMapCodeCache(stub_holder, name, handler); + return handler; } -Handle<Code> StubCache::ComputeLoadViaGetter(Handle<String> name, +Handle<Code> StubCache::ComputeLoadViaGetter(Handle<Name> name, Handle<JSObject> receiver, Handle<JSObject> holder, Handle<JSFunction> getter) { - ASSERT(IC::GetCodeCacheForObject(*receiver, *holder) == OWN_MAP); - Code::Flags flags = - Code::ComputeMonomorphicFlags(Code::LOAD_IC, Code::CALLBACKS); - Handle<Object> probe(receiver->map()->FindInCodeCache(*name, flags)); - if (probe->IsCode()) return Handle<Code>::cast(probe); + Handle<JSObject> stub_holder = StubHolder(receiver, holder); + Handle<Code> stub = FindHandler( + name, stub_holder, Code::LOAD_IC, Code::CALLBACKS); + if (!stub.is_null()) return stub; LoadStubCompiler compiler(isolate_); - Handle<Code> code = - compiler.CompileLoadViaGetter(name, receiver, holder, getter); - PROFILE(isolate_, CodeCreateEvent(Logger::LOAD_IC_TAG, *code, *name)); - GDBJIT(AddCode(GDBJITInterface::LOAD_IC, *name, *code)); - JSObject::UpdateMapCodeCache(receiver, name, code); - return code; + Handle<Code> handler = + compiler.CompileLoadViaGetter(receiver, holder, name, getter); + JSObject::UpdateMapCodeCache(stub_holder, name, handler); + return handler; } -Handle<Code> StubCache::ComputeLoadConstant(Handle<String> name, +Handle<Code> StubCache::ComputeLoadConstant(Handle<Name> name, Handle<JSObject> receiver, Handle<JSObject> holder, Handle<JSFunction> value) { - ASSERT(IC::GetCodeCacheForObject(*receiver, *holder) == OWN_MAP); - Code::Flags flags = - Code::ComputeMonomorphicFlags(Code::LOAD_IC, Code::CONSTANT_FUNCTION); - Handle<Object> probe(receiver->map()->FindInCodeCache(*name, flags)); - if (probe->IsCode()) return Handle<Code>::cast(probe); + Handle<JSObject> stub_holder = StubHolder(receiver, holder); + Handle<Code> handler = FindHandler( + name, stub_holder, Code::LOAD_IC, Code::CONSTANT_FUNCTION); + if (!handler.is_null()) return handler; LoadStubCompiler compiler(isolate_); - Handle<Code> code = - compiler.CompileLoadConstant(receiver, holder, value, name); - PROFILE(isolate_, CodeCreateEvent(Logger::LOAD_IC_TAG, *code, *name)); - GDBJIT(AddCode(GDBJITInterface::LOAD_IC, *name, *code)); - JSObject::UpdateMapCodeCache(receiver, name, code); - return code; + handler = compiler.CompileLoadConstant(receiver, holder, name, value); + JSObject::UpdateMapCodeCache(stub_holder, name, handler); + + return handler; } -Handle<Code> StubCache::ComputeLoadInterceptor(Handle<String> name, +Handle<Code> StubCache::ComputeLoadInterceptor(Handle<Name> name, Handle<JSObject> receiver, Handle<JSObject> holder) { - ASSERT(IC::GetCodeCacheForObject(*receiver, *holder) == OWN_MAP); - Code::Flags flags = - Code::ComputeMonomorphicFlags(Code::LOAD_IC, Code::INTERCEPTOR); - Handle<Object> probe(receiver->map()->FindInCodeCache(*name, flags)); - if (probe->IsCode()) return Handle<Code>::cast(probe); + Handle<JSObject> stub_holder = StubHolder(receiver, holder); + Handle<Code> stub = FindHandler( + name, stub_holder, Code::LOAD_IC, Code::INTERCEPTOR); + if (!stub.is_null()) return stub; LoadStubCompiler compiler(isolate_); - Handle<Code> code = + Handle<Code> handler = compiler.CompileLoadInterceptor(receiver, holder, name); - PROFILE(isolate_, CodeCreateEvent(Logger::LOAD_IC_TAG, *code, *name)); - GDBJIT(AddCode(GDBJITInterface::LOAD_IC, *name, *code)); - JSObject::UpdateMapCodeCache(receiver, name, code); - return code; + JSObject::UpdateMapCodeCache(stub_holder, name, handler); + return handler; } -Handle<Code> StubCache::ComputeLoadNormal() { +Handle<Code> StubCache::ComputeLoadNormal(Handle<Name> name, + Handle<JSObject> receiver) { return isolate_->builtins()->LoadIC_Normal(); } -Handle<Code> StubCache::ComputeLoadGlobal(Handle<String> name, +Handle<Code> StubCache::ComputeLoadGlobal(Handle<Name> name, Handle<JSObject> receiver, Handle<GlobalObject> holder, Handle<JSGlobalPropertyCell> cell, bool is_dont_delete) { - ASSERT(IC::GetCodeCacheForObject(*receiver, *holder) == OWN_MAP); - Code::Flags flags = - Code::ComputeMonomorphicFlags(Code::LOAD_IC, Code::NORMAL); - Handle<Object> probe(receiver->map()->FindInCodeCache(*name, flags)); - if (probe->IsCode()) return Handle<Code>::cast(probe); + Handle<JSObject> stub_holder = StubHolder(receiver, holder); + Handle<Code> stub = FindIC(name, stub_holder, Code::LOAD_IC, Code::NORMAL); + if (!stub.is_null()) return stub; LoadStubCompiler compiler(isolate_); - Handle<Code> code = + Handle<Code> ic = compiler.CompileLoadGlobal(receiver, holder, cell, name, is_dont_delete); - PROFILE(isolate_, CodeCreateEvent(Logger::LOAD_IC_TAG, *code, *name)); - GDBJIT(AddCode(GDBJITInterface::LOAD_IC, *name, *code)); - JSObject::UpdateMapCodeCache(receiver, name, code); - return code; + JSObject::UpdateMapCodeCache(stub_holder, name, ic); + return ic; } -Handle<Code> StubCache::ComputeKeyedLoadField(Handle<String> name, +Handle<Code> StubCache::ComputeKeyedLoadField(Handle<Name> name, Handle<JSObject> receiver, Handle<JSObject> holder, - int field_index) { - ASSERT(IC::GetCodeCacheForObject(*receiver, *holder) == OWN_MAP); - Code::Flags flags = - Code::ComputeMonomorphicFlags(Code::KEYED_LOAD_IC, Code::FIELD); - Handle<Object> probe(receiver->map()->FindInCodeCache(*name, flags)); - if (probe->IsCode()) return Handle<Code>::cast(probe); + PropertyIndex field) { + if (receiver.is_identical_to(holder)) { + LoadFieldStub stub(KeyedLoadStubCompiler::receiver(), + field.is_inobject(holder), + field.translate(holder)); + return stub.GetCode(isolate()); + } + + Handle<JSObject> stub_holder = StubHolder(receiver, holder); + Handle<Code> stub = FindHandler( + name, stub_holder, Code::KEYED_LOAD_IC, Code::FIELD); + if (!stub.is_null()) return stub; KeyedLoadStubCompiler compiler(isolate_); - Handle<Code> code = - compiler.CompileLoadField(name, receiver, holder, field_index); - PROFILE(isolate_, CodeCreateEvent(Logger::KEYED_LOAD_IC_TAG, *code, *name)); - GDBJIT(AddCode(GDBJITInterface::KEYED_LOAD_IC, *name, *code)); - JSObject::UpdateMapCodeCache(receiver, name, code); - return code; + Handle<Code> handler = + compiler.CompileLoadField(receiver, holder, name, field); + JSObject::UpdateMapCodeCache(stub_holder, name, handler); + return handler; } -Handle<Code> StubCache::ComputeKeyedLoadConstant(Handle<String> name, +Handle<Code> StubCache::ComputeKeyedLoadConstant(Handle<Name> name, Handle<JSObject> receiver, Handle<JSObject> holder, Handle<JSFunction> value) { - ASSERT(IC::GetCodeCacheForObject(*receiver, *holder) == OWN_MAP); - Code::Flags flags = Code::ComputeMonomorphicFlags(Code::KEYED_LOAD_IC, - Code::CONSTANT_FUNCTION); - Handle<Object> probe(receiver->map()->FindInCodeCache(*name, flags)); - if (probe->IsCode()) return Handle<Code>::cast(probe); + Handle<JSObject> stub_holder = StubHolder(receiver, holder); + Handle<Code> handler = FindHandler( + name, stub_holder, Code::KEYED_LOAD_IC, Code::CONSTANT_FUNCTION); + if (!handler.is_null()) return handler; KeyedLoadStubCompiler compiler(isolate_); - Handle<Code> code = - compiler.CompileLoadConstant(name, receiver, holder, value); - PROFILE(isolate_, CodeCreateEvent(Logger::KEYED_LOAD_IC_TAG, *code, *name)); - GDBJIT(AddCode(GDBJITInterface::KEYED_LOAD_IC, *name, *code)); - JSObject::UpdateMapCodeCache(receiver, name, code); - return code; + handler = compiler.CompileLoadConstant(receiver, holder, name, value); + JSObject::UpdateMapCodeCache(stub_holder, name, handler); + return handler; } -Handle<Code> StubCache::ComputeKeyedLoadInterceptor(Handle<String> name, +Handle<Code> StubCache::ComputeKeyedLoadInterceptor(Handle<Name> name, Handle<JSObject> receiver, Handle<JSObject> holder) { - ASSERT(IC::GetCodeCacheForObject(*receiver, *holder) == OWN_MAP); - Code::Flags flags = - Code::ComputeMonomorphicFlags(Code::KEYED_LOAD_IC, Code::INTERCEPTOR); - Handle<Object> probe(receiver->map()->FindInCodeCache(*name, flags)); - if (probe->IsCode()) return Handle<Code>::cast(probe); + Handle<JSObject> stub_holder = StubHolder(receiver, holder); + Handle<Code> stub = FindHandler( + name, stub_holder, Code::KEYED_LOAD_IC, Code::INTERCEPTOR); + if (!stub.is_null()) return stub; KeyedLoadStubCompiler compiler(isolate_); - Handle<Code> code = compiler.CompileLoadInterceptor(receiver, holder, name); - PROFILE(isolate_, CodeCreateEvent(Logger::KEYED_LOAD_IC_TAG, *code, *name)); - GDBJIT(AddCode(GDBJITInterface::KEYED_LOAD_IC, *name, *code)); - JSObject::UpdateMapCodeCache(receiver, name, code); - return code; + Handle<Code> handler = + compiler.CompileLoadInterceptor(receiver, holder, name); + JSObject::UpdateMapCodeCache(stub_holder, name, handler); + return handler; } Handle<Code> StubCache::ComputeKeyedLoadCallback( - Handle<String> name, + Handle<Name> name, Handle<JSObject> receiver, Handle<JSObject> holder, - Handle<AccessorInfo> callback) { - ASSERT(IC::GetCodeCacheForObject(*receiver, *holder) == OWN_MAP); - Code::Flags flags = - Code::ComputeMonomorphicFlags(Code::KEYED_LOAD_IC, Code::CALLBACKS); - Handle<Object> probe(receiver->map()->FindInCodeCache(*name, flags)); - if (probe->IsCode()) return Handle<Code>::cast(probe); + Handle<ExecutableAccessorInfo> callback) { + Handle<JSObject> stub_holder = StubHolder(receiver, holder); + Handle<Code> stub = FindHandler( + name, stub_holder, Code::KEYED_LOAD_IC, Code::CALLBACKS); + if (!stub.is_null()) return stub; KeyedLoadStubCompiler compiler(isolate_); - Handle<Code> code = - compiler.CompileLoadCallback(name, receiver, holder, callback); - PROFILE(isolate_, CodeCreateEvent(Logger::KEYED_LOAD_IC_TAG, *code, *name)); - GDBJIT(AddCode(GDBJITInterface::KEYED_LOAD_IC, *name, *code)); - JSObject::UpdateMapCodeCache(receiver, name, code); - return code; + Handle<Code> handler = + compiler.CompileLoadCallback(receiver, holder, name, callback); + JSObject::UpdateMapCodeCache(stub_holder, name, handler); + return handler; } -Handle<Code> StubCache::ComputeKeyedLoadArrayLength(Handle<String> name, - Handle<JSArray> receiver) { - Code::Flags flags = - Code::ComputeMonomorphicFlags(Code::KEYED_LOAD_IC, Code::CALLBACKS); - Handle<Object> probe(receiver->map()->FindInCodeCache(*name, flags)); - if (probe->IsCode()) return Handle<Code>::cast(probe); - - KeyedLoadStubCompiler compiler(isolate_); - Handle<Code> code = compiler.CompileLoadArrayLength(name); - PROFILE(isolate_, CodeCreateEvent(Logger::KEYED_LOAD_IC_TAG, *code, *name)); - GDBJIT(AddCode(GDBJITInterface::KEYED_LOAD_IC, *name, *code)); - JSObject::UpdateMapCodeCache(receiver, name, code); - return code; -} - - -Handle<Code> StubCache::ComputeKeyedLoadStringLength(Handle<String> name, - Handle<String> receiver) { - Code::Flags flags = - Code::ComputeMonomorphicFlags(Code::KEYED_LOAD_IC, Code::CALLBACKS); - Handle<Map> map(receiver->map()); - Handle<Object> probe(map->FindInCodeCache(*name, flags)); - if (probe->IsCode()) return Handle<Code>::cast(probe); - - KeyedLoadStubCompiler compiler(isolate_); - Handle<Code> code = compiler.CompileLoadStringLength(name); - PROFILE(isolate_, CodeCreateEvent(Logger::KEYED_LOAD_IC_TAG, *code, *name)); - GDBJIT(AddCode(GDBJITInterface::KEYED_LOAD_IC, *name, *code)); - Map::UpdateCodeCache(map, name, code); - return code; -} - - -Handle<Code> StubCache::ComputeKeyedLoadFunctionPrototype( - Handle<String> name, - Handle<JSFunction> receiver) { - Code::Flags flags = - Code::ComputeMonomorphicFlags(Code::KEYED_LOAD_IC, Code::CALLBACKS); - Handle<Object> probe(receiver->map()->FindInCodeCache(*name, flags)); - if (probe->IsCode()) return Handle<Code>::cast(probe); - - KeyedLoadStubCompiler compiler(isolate_); - Handle<Code> code = compiler.CompileLoadFunctionPrototype(name); - PROFILE(isolate_, CodeCreateEvent(Logger::KEYED_LOAD_IC_TAG, *code, *name)); - GDBJIT(AddCode(GDBJITInterface::KEYED_LOAD_IC, *name, *code)); - JSObject::UpdateMapCodeCache(receiver, name, code); - return code; -} - - -Handle<Code> StubCache::ComputeStoreField(Handle<String> name, +Handle<Code> StubCache::ComputeStoreField(Handle<Name> name, Handle<JSObject> receiver, int field_index, Handle<Map> transition, StrictModeFlag strict_mode) { Code::StubType type = - (transition.is_null()) ? Code::FIELD : Code::MAP_TRANSITION; - Code::Flags flags = Code::ComputeMonomorphicFlags( - Code::STORE_IC, type, strict_mode); - Handle<Object> probe(receiver->map()->FindInCodeCache(*name, flags)); - if (probe->IsCode()) return Handle<Code>::cast(probe); + transition.is_null() ? Code::FIELD : Code::MAP_TRANSITION; + + Handle<Code> stub = FindIC( + name, receiver, Code::STORE_IC, type, strict_mode); + if (!stub.is_null()) return stub; StoreStubCompiler compiler(isolate_, strict_mode); Handle<Code> code = compiler.CompileStoreField(receiver, field_index, transition, name); - PROFILE(isolate_, CodeCreateEvent(Logger::STORE_IC_TAG, *code, *name)); - GDBJIT(AddCode(GDBJITInterface::STORE_IC, *name, *code)); JSObject::UpdateMapCodeCache(receiver, name, code); return code; } -Handle<Code> StubCache::ComputeKeyedLoadOrStoreElement( +Handle<Code> StubCache::ComputeKeyedLoadElement(Handle<Map> receiver_map) { + Code::Flags flags = Code::ComputeMonomorphicFlags(Code::KEYED_LOAD_IC); + Handle<Name> name = + isolate()->factory()->KeyedLoadElementMonomorphic_string(); + + Handle<Object> probe(receiver_map->FindInCodeCache(*name, flags), isolate_); + if (probe->IsCode()) return Handle<Code>::cast(probe); + + KeyedLoadStubCompiler compiler(isolate()); + Handle<Code> code = compiler.CompileLoadElement(receiver_map); + + Map::UpdateCodeCache(receiver_map, name, code); + return code; +} + + +Handle<Code> StubCache::ComputeKeyedStoreElement( Handle<Map> receiver_map, - KeyedIC::StubKind stub_kind, - StrictModeFlag strict_mode) { - KeyedAccessGrowMode grow_mode = - KeyedIC::GetGrowModeFromStubKind(stub_kind); + StrictModeFlag strict_mode, + KeyedAccessStoreMode store_mode) { Code::ExtraICState extra_state = - Code::ComputeExtraICState(grow_mode, strict_mode); - Code::Flags flags = - Code::ComputeMonomorphicFlags( - stub_kind == KeyedIC::LOAD ? Code::KEYED_LOAD_IC - : Code::KEYED_STORE_IC, - Code::NORMAL, - extra_state); - Handle<String> name; - switch (stub_kind) { - case KeyedIC::LOAD: - name = isolate()->factory()->KeyedLoadElementMonomorphic_symbol(); - break; - case KeyedIC::STORE_NO_TRANSITION: - name = isolate()->factory()->KeyedStoreElementMonomorphic_symbol(); - break; - case KeyedIC::STORE_AND_GROW_NO_TRANSITION: - name = isolate()->factory()->KeyedStoreAndGrowElementMonomorphic_symbol(); - break; - default: - UNREACHABLE(); - break; - } - Handle<Object> probe(receiver_map->FindInCodeCache(*name, flags)); - if (probe->IsCode()) return Handle<Code>::cast(probe); + Code::ComputeExtraICState(store_mode, strict_mode); + Code::Flags flags = Code::ComputeMonomorphicFlags( + Code::KEYED_STORE_IC, extra_state); - Handle<Code> code; - switch (stub_kind) { - case KeyedIC::LOAD: { - KeyedLoadStubCompiler compiler(isolate_); - code = compiler.CompileLoadElement(receiver_map); - break; - } - case KeyedIC::STORE_AND_GROW_NO_TRANSITION: { - KeyedStoreStubCompiler compiler(isolate_, strict_mode, - ALLOW_JSARRAY_GROWTH); - code = compiler.CompileStoreElement(receiver_map); - break; - } - case KeyedIC::STORE_NO_TRANSITION: { - KeyedStoreStubCompiler compiler(isolate_, strict_mode, - DO_NOT_ALLOW_JSARRAY_GROWTH); - code = compiler.CompileStoreElement(receiver_map); - break; - } - default: - UNREACHABLE(); - break; - } + ASSERT(store_mode == STANDARD_STORE || + store_mode == STORE_AND_GROW_NO_TRANSITION); - ASSERT(!code.is_null()); + Handle<String> name = + isolate()->factory()->KeyedStoreElementMonomorphic_string(); + Handle<Object> probe(receiver_map->FindInCodeCache(*name, flags), isolate_); + if (probe->IsCode()) return Handle<Code>::cast(probe); + + KeyedStoreStubCompiler compiler(isolate(), strict_mode, store_mode); + Handle<Code> code = compiler.CompileStoreElement(receiver_map); - if (stub_kind == KeyedIC::LOAD) { - PROFILE(isolate_, CodeCreateEvent(Logger::KEYED_LOAD_IC_TAG, *code, 0)); - } else { - PROFILE(isolate_, CodeCreateEvent(Logger::KEYED_STORE_IC_TAG, *code, 0)); - } Map::UpdateCodeCache(receiver_map, name, code); + ASSERT(Code::GetKeyedAccessStoreMode(code->extra_ic_state()) == store_mode); return code; } @@ -484,99 +464,84 @@ Handle<Code> StubCache::ComputeStoreNormal(StrictModeFlag strict_mode) { } -Handle<Code> StubCache::ComputeStoreGlobal(Handle<String> name, +Handle<Code> StubCache::ComputeStoreGlobal(Handle<Name> name, Handle<GlobalObject> receiver, Handle<JSGlobalPropertyCell> cell, StrictModeFlag strict_mode) { - Code::Flags flags = Code::ComputeMonomorphicFlags( - Code::STORE_IC, Code::NORMAL, strict_mode); - Handle<Object> probe(receiver->map()->FindInCodeCache(*name, flags)); - if (probe->IsCode()) return Handle<Code>::cast(probe); + Handle<Code> stub = FindIC( + name, receiver, Code::STORE_IC, Code::NORMAL, strict_mode); + if (!stub.is_null()) return stub; StoreStubCompiler compiler(isolate_, strict_mode); Handle<Code> code = compiler.CompileStoreGlobal(receiver, cell, name); - PROFILE(isolate_, CodeCreateEvent(Logger::STORE_IC_TAG, *code, *name)); - GDBJIT(AddCode(GDBJITInterface::STORE_IC, *name, *code)); JSObject::UpdateMapCodeCache(receiver, name, code); return code; } -Handle<Code> StubCache::ComputeStoreCallback(Handle<String> name, - Handle<JSObject> receiver, - Handle<JSObject> holder, - Handle<AccessorInfo> callback, - StrictModeFlag strict_mode) { +Handle<Code> StubCache::ComputeStoreCallback( + Handle<Name> name, + Handle<JSObject> receiver, + Handle<JSObject> holder, + Handle<ExecutableAccessorInfo> callback, + StrictModeFlag strict_mode) { ASSERT(v8::ToCData<Address>(callback->setter()) != 0); - Code::Flags flags = Code::ComputeMonomorphicFlags( - Code::STORE_IC, Code::CALLBACKS, strict_mode); - Handle<Object> probe(receiver->map()->FindInCodeCache(*name, flags)); - if (probe->IsCode()) return Handle<Code>::cast(probe); + Handle<Code> stub = FindIC( + name, receiver, Code::STORE_IC, Code::CALLBACKS, strict_mode); + if (!stub.is_null()) return stub; StoreStubCompiler compiler(isolate_, strict_mode); Handle<Code> code = compiler.CompileStoreCallback(name, receiver, holder, callback); - PROFILE(isolate_, CodeCreateEvent(Logger::STORE_IC_TAG, *code, *name)); - GDBJIT(AddCode(GDBJITInterface::STORE_IC, *name, *code)); JSObject::UpdateMapCodeCache(receiver, name, code); return code; } -Handle<Code> StubCache::ComputeStoreViaSetter(Handle<String> name, +Handle<Code> StubCache::ComputeStoreViaSetter(Handle<Name> name, Handle<JSObject> receiver, Handle<JSObject> holder, Handle<JSFunction> setter, StrictModeFlag strict_mode) { - Code::Flags flags = Code::ComputeMonomorphicFlags( - Code::STORE_IC, Code::CALLBACKS, strict_mode); - Handle<Object> probe(receiver->map()->FindInCodeCache(*name, flags)); - if (probe->IsCode()) return Handle<Code>::cast(probe); + Handle<Code> stub = FindIC( + name, receiver, Code::STORE_IC, Code::CALLBACKS, strict_mode); + if (!stub.is_null()) return stub; StoreStubCompiler compiler(isolate_, strict_mode); Handle<Code> code = compiler.CompileStoreViaSetter(name, receiver, holder, setter); - PROFILE(isolate_, CodeCreateEvent(Logger::STORE_IC_TAG, *code, *name)); - GDBJIT(AddCode(GDBJITInterface::STORE_IC, *name, *code)); JSObject::UpdateMapCodeCache(receiver, name, code); return code; } -Handle<Code> StubCache::ComputeStoreInterceptor(Handle<String> name, +Handle<Code> StubCache::ComputeStoreInterceptor(Handle<Name> name, Handle<JSObject> receiver, StrictModeFlag strict_mode) { - Code::Flags flags = Code::ComputeMonomorphicFlags( - Code::STORE_IC, Code::INTERCEPTOR, strict_mode); - Handle<Object> probe(receiver->map()->FindInCodeCache(*name, flags)); - if (probe->IsCode()) return Handle<Code>::cast(probe); + Handle<Code> stub = FindIC( + name, receiver, Code::STORE_IC, Code::INTERCEPTOR, strict_mode); + if (!stub.is_null()) return stub; StoreStubCompiler compiler(isolate_, strict_mode); Handle<Code> code = compiler.CompileStoreInterceptor(receiver, name); - PROFILE(isolate_, CodeCreateEvent(Logger::STORE_IC_TAG, *code, *name)); - GDBJIT(AddCode(GDBJITInterface::STORE_IC, *name, *code)); JSObject::UpdateMapCodeCache(receiver, name, code); return code; } -Handle<Code> StubCache::ComputeKeyedStoreField(Handle<String> name, +Handle<Code> StubCache::ComputeKeyedStoreField(Handle<Name> name, Handle<JSObject> receiver, int field_index, Handle<Map> transition, StrictModeFlag strict_mode) { Code::StubType type = (transition.is_null()) ? Code::FIELD : Code::MAP_TRANSITION; - Code::Flags flags = Code::ComputeMonomorphicFlags( - Code::KEYED_STORE_IC, type, strict_mode); - Handle<Object> probe(receiver->map()->FindInCodeCache(*name, flags)); - if (probe->IsCode()) return Handle<Code>::cast(probe); + Handle<Code> stub = FindIC( + name, receiver, Code::KEYED_STORE_IC, type, strict_mode); + if (!stub.is_null()) return stub; - KeyedStoreStubCompiler compiler(isolate(), strict_mode, - DO_NOT_ALLOW_JSARRAY_GROWTH); + KeyedStoreStubCompiler compiler(isolate(), strict_mode, STANDARD_STORE); Handle<Code> code = compiler.CompileStoreField(receiver, field_index, transition, name); - PROFILE(isolate_, CodeCreateEvent(Logger::KEYED_STORE_IC_TAG, *code, *name)); - GDBJIT(AddCode(GDBJITInterface::KEYED_STORE_IC, *name, *code)); JSObject::UpdateMapCodeCache(receiver, name, code); return code; } @@ -588,40 +553,51 @@ Handle<Code> StubCache::ComputeKeyedStoreField(Handle<String> name, Handle<Code> StubCache::ComputeCallConstant(int argc, Code::Kind kind, Code::ExtraICState extra_state, - Handle<String> name, + Handle<Name> name, Handle<Object> object, Handle<JSObject> holder, Handle<JSFunction> function) { // Compute the check type and the map. InlineCacheHolderFlag cache_holder = IC::GetCodeCacheForObject(*object, *holder); - Handle<JSObject> map_holder(IC::GetCodeCacheHolder(*object, cache_holder)); + Handle<JSObject> stub_holder(IC::GetCodeCacheHolder( + isolate_, *object, cache_holder)); // Compute check type based on receiver/holder. CheckType check = RECEIVER_MAP_CHECK; if (object->IsString()) { check = STRING_CHECK; + } else if (object->IsSymbol()) { + check = SYMBOL_CHECK; } else if (object->IsNumber()) { check = NUMBER_CHECK; } else if (object->IsBoolean()) { check = BOOLEAN_CHECK; } - Code::Flags flags = - Code::ComputeMonomorphicFlags(kind, Code::CONSTANT_FUNCTION, extra_state, - cache_holder, argc); - Handle<Object> probe(map_holder->map()->FindInCodeCache(*name, flags)); + if (check != RECEIVER_MAP_CHECK && + !function->IsBuiltin() && + function->shared()->is_classic_mode()) { + // Calling non-strict non-builtins with a value as the receiver + // requires boxing. + return Handle<Code>::null(); + } + + Code::Flags flags = Code::ComputeMonomorphicFlags( + kind, extra_state, Code::CONSTANT_FUNCTION, argc, cache_holder); + Handle<Object> probe(stub_holder->map()->FindInCodeCache(*name, flags), + isolate_); if (probe->IsCode()) return Handle<Code>::cast(probe); CallStubCompiler compiler(isolate_, argc, kind, extra_state, cache_holder); Handle<Code> code = - compiler.CompileCallConstant(object, holder, function, name, check); + compiler.CompileCallConstant(object, holder, name, check, function); code->set_check_type(check); - ASSERT_EQ(flags, code->flags()); + ASSERT(flags == code->flags()); PROFILE(isolate_, CodeCreateEvent(CALL_LOGGER_TAG(kind, CALL_IC_TAG), *code, *name)); GDBJIT(AddCode(GDBJITInterface::CALL_IC, *name, *code)); - JSObject::UpdateMapCodeCache(map_holder, name, code); + JSObject::UpdateMapCodeCache(stub_holder, name, code); return code; } @@ -629,37 +605,39 @@ Handle<Code> StubCache::ComputeCallConstant(int argc, Handle<Code> StubCache::ComputeCallField(int argc, Code::Kind kind, Code::ExtraICState extra_state, - Handle<String> name, + Handle<Name> name, Handle<Object> object, Handle<JSObject> holder, - int index) { + PropertyIndex index) { // Compute the check type and the map. InlineCacheHolderFlag cache_holder = IC::GetCodeCacheForObject(*object, *holder); - Handle<JSObject> map_holder(IC::GetCodeCacheHolder(*object, cache_holder)); + Handle<JSObject> stub_holder(IC::GetCodeCacheHolder( + isolate_, *object, cache_holder)); // TODO(1233596): We cannot do receiver map check for non-JS objects // because they may be represented as immediates without a // map. Instead, we check against the map in the holder. - if (object->IsNumber() || object->IsBoolean() || object->IsString()) { + if (object->IsNumber() || object->IsSymbol() || + object->IsBoolean() || object->IsString()) { object = holder; } - Code::Flags flags = - Code::ComputeMonomorphicFlags(kind, Code::FIELD, extra_state, - cache_holder, argc); - Handle<Object> probe(map_holder->map()->FindInCodeCache(*name, flags)); + Code::Flags flags = Code::ComputeMonomorphicFlags( + kind, extra_state, Code::FIELD, argc, cache_holder); + Handle<Object> probe(stub_holder->map()->FindInCodeCache(*name, flags), + isolate_); if (probe->IsCode()) return Handle<Code>::cast(probe); CallStubCompiler compiler(isolate_, argc, kind, extra_state, cache_holder); Handle<Code> code = compiler.CompileCallField(Handle<JSObject>::cast(object), holder, index, name); - ASSERT_EQ(flags, code->flags()); + ASSERT(flags == code->flags()); PROFILE(isolate_, CodeCreateEvent(CALL_LOGGER_TAG(kind, CALL_IC_TAG), *code, *name)); GDBJIT(AddCode(GDBJITInterface::CALL_IC, *name, *code)); - JSObject::UpdateMapCodeCache(map_holder, name, code); + JSObject::UpdateMapCodeCache(stub_holder, name, code); return code; } @@ -667,36 +645,38 @@ Handle<Code> StubCache::ComputeCallField(int argc, Handle<Code> StubCache::ComputeCallInterceptor(int argc, Code::Kind kind, Code::ExtraICState extra_state, - Handle<String> name, + Handle<Name> name, Handle<Object> object, Handle<JSObject> holder) { // Compute the check type and the map. InlineCacheHolderFlag cache_holder = IC::GetCodeCacheForObject(*object, *holder); - Handle<JSObject> map_holder(IC::GetCodeCacheHolder(*object, cache_holder)); + Handle<JSObject> stub_holder(IC::GetCodeCacheHolder( + isolate_, *object, cache_holder)); // TODO(1233596): We cannot do receiver map check for non-JS objects // because they may be represented as immediates without a // map. Instead, we check against the map in the holder. - if (object->IsNumber() || object->IsBoolean() || object->IsString()) { + if (object->IsNumber() || object->IsSymbol() || + object->IsBoolean() || object->IsString()) { object = holder; } - Code::Flags flags = - Code::ComputeMonomorphicFlags(kind, Code::INTERCEPTOR, extra_state, - cache_holder, argc); - Handle<Object> probe(map_holder->map()->FindInCodeCache(*name, flags)); + Code::Flags flags = Code::ComputeMonomorphicFlags( + kind, extra_state, Code::INTERCEPTOR, argc, cache_holder); + Handle<Object> probe(stub_holder->map()->FindInCodeCache(*name, flags), + isolate_); if (probe->IsCode()) return Handle<Code>::cast(probe); CallStubCompiler compiler(isolate(), argc, kind, extra_state, cache_holder); Handle<Code> code = compiler.CompileCallInterceptor(Handle<JSObject>::cast(object), holder, name); - ASSERT_EQ(flags, code->flags()); + ASSERT(flags == code->flags()); PROFILE(isolate(), CodeCreateEvent(CALL_LOGGER_TAG(kind, CALL_IC_TAG), *code, *name)); GDBJIT(AddCode(GDBJITInterface::CALL_IC, *name, *code)); - JSObject::UpdateMapCodeCache(map_holder, name, code); + JSObject::UpdateMapCodeCache(stub_holder, name, code); return code; } @@ -704,28 +684,29 @@ Handle<Code> StubCache::ComputeCallInterceptor(int argc, Handle<Code> StubCache::ComputeCallGlobal(int argc, Code::Kind kind, Code::ExtraICState extra_state, - Handle<String> name, + Handle<Name> name, Handle<JSObject> receiver, Handle<GlobalObject> holder, Handle<JSGlobalPropertyCell> cell, Handle<JSFunction> function) { InlineCacheHolderFlag cache_holder = IC::GetCodeCacheForObject(*receiver, *holder); - Handle<JSObject> map_holder(IC::GetCodeCacheHolder(*receiver, cache_holder)); - Code::Flags flags = - Code::ComputeMonomorphicFlags(kind, Code::NORMAL, extra_state, - cache_holder, argc); - Handle<Object> probe(map_holder->map()->FindInCodeCache(*name, flags)); + Handle<JSObject> stub_holder(IC::GetCodeCacheHolder( + isolate_, *receiver, cache_holder)); + Code::Flags flags = Code::ComputeMonomorphicFlags( + kind, extra_state, Code::NORMAL, argc, cache_holder); + Handle<Object> probe(stub_holder->map()->FindInCodeCache(*name, flags), + isolate_); if (probe->IsCode()) return Handle<Code>::cast(probe); CallStubCompiler compiler(isolate(), argc, kind, extra_state, cache_holder); Handle<Code> code = compiler.CompileCallGlobal(receiver, holder, cell, function, name); - ASSERT_EQ(flags, code->flags()); + ASSERT(flags == code->flags()); PROFILE(isolate(), CodeCreateEvent(CALL_LOGGER_TAG(kind, CALL_IC_TAG), *code, *name)); GDBJIT(AddCode(GDBJITInterface::CALL_IC, *name, *code)); - JSObject::UpdateMapCodeCache(map_holder, name, code); + JSObject::UpdateMapCodeCache(stub_holder, name, code); return code; } @@ -747,10 +728,8 @@ Code* StubCache::FindCallInitialize(int argc, CallICBase::Contextual::encode(mode == RelocInfo::CODE_TARGET_CONTEXT); Code::Flags flags = Code::ComputeFlags(kind, UNINITIALIZED, extra_state, Code::NORMAL, argc); - - // Use raw_unchecked... so we don't get assert failures during GC. UnseededNumberDictionary* dictionary = - isolate()->heap()->raw_unchecked_non_monomorphic_cache(); + isolate()->heap()->non_monomorphic_cache(); int entry = dictionary->FindEntry(isolate(), flags); ASSERT(entry != -1); Object* code = dictionary->ValueAt(entry); @@ -826,11 +805,10 @@ Handle<Code> StubCache::ComputeCallNormal(int argc, } -Handle<Code> StubCache::ComputeCallArguments(int argc, Code::Kind kind) { - ASSERT(kind == Code::KEYED_CALL_IC); +Handle<Code> StubCache::ComputeCallArguments(int argc) { Code::Flags flags = - Code::ComputeFlags(kind, MEGAMORPHIC, Code::kNoExtraICState, - Code::NORMAL, argc); + Code::ComputeFlags(Code::KEYED_CALL_IC, MEGAMORPHIC, + Code::kNoExtraICState, Code::NORMAL, argc); Handle<UnseededNumberDictionary> cache = isolate_->factory()->non_monomorphic_cache(); int entry = cache->FindEntry(isolate_, flags); @@ -882,13 +860,69 @@ Handle<Code> StubCache::ComputeCallMiss(int argc, } +Handle<Code> StubCache::ComputeLoadElementPolymorphic( + MapHandleList* receiver_maps) { + Code::Flags flags = Code::ComputeFlags(Code::KEYED_LOAD_IC, POLYMORPHIC); + Handle<PolymorphicCodeCache> cache = + isolate_->factory()->polymorphic_code_cache(); + Handle<Object> probe = cache->Lookup(receiver_maps, flags); + if (probe->IsCode()) return Handle<Code>::cast(probe); + + CodeHandleList handlers(receiver_maps->length()); + KeyedLoadStubCompiler compiler(isolate_); + compiler.CompileElementHandlers(receiver_maps, &handlers); + Handle<Code> code = compiler.CompilePolymorphicIC( + receiver_maps, &handlers, factory()->empty_string(), + Code::NORMAL, ELEMENT); + + isolate()->counters()->keyed_load_polymorphic_stubs()->Increment(); + + PolymorphicCodeCache::Update(cache, receiver_maps, flags, code); + return code; +} + + +Handle<Code> StubCache::ComputePolymorphicIC(MapHandleList* receiver_maps, + CodeHandleList* handlers, + Handle<Name> name) { + LoadStubCompiler ic_compiler(isolate_); + Code::StubType type = handlers->length() == 1 ? handlers->at(0)->type() + : Code::NORMAL; + Handle<Code> ic = ic_compiler.CompilePolymorphicIC( + receiver_maps, handlers, name, type, PROPERTY); + return ic; +} + + +Handle<Code> StubCache::ComputeStoreElementPolymorphic( + MapHandleList* receiver_maps, + KeyedAccessStoreMode store_mode, + StrictModeFlag strict_mode) { + ASSERT(store_mode == STANDARD_STORE || + store_mode == STORE_AND_GROW_NO_TRANSITION); + Handle<PolymorphicCodeCache> cache = + isolate_->factory()->polymorphic_code_cache(); + Code::ExtraICState extra_state = Code::ComputeExtraICState(store_mode, + strict_mode); + Code::Flags flags = + Code::ComputeFlags(Code::KEYED_STORE_IC, POLYMORPHIC, extra_state); + Handle<Object> probe = cache->Lookup(receiver_maps, flags); + if (probe->IsCode()) return Handle<Code>::cast(probe); + + KeyedStoreStubCompiler compiler(isolate_, strict_mode, store_mode); + Handle<Code> code = compiler.CompileStoreElementPolymorphic(receiver_maps); + PolymorphicCodeCache::Update(cache, receiver_maps, flags, code); + return code; +} + + #ifdef ENABLE_DEBUGGER_SUPPORT Handle<Code> StubCache::ComputeCallDebugBreak(int argc, Code::Kind kind) { // Extra IC state is irrelevant for debug break ICs. They jump to // the actual call ic to carry out the work. Code::Flags flags = - Code::ComputeFlags(kind, DEBUG_BREAK, Code::kNoExtraICState, + Code::ComputeFlags(kind, DEBUG_STUB, DEBUG_BREAK, Code::NORMAL, argc); Handle<UnseededNumberDictionary> cache = isolate_->factory()->non_monomorphic_cache(); @@ -907,7 +941,7 @@ Handle<Code> StubCache::ComputeCallDebugPrepareStepIn(int argc, // Extra IC state is irrelevant for debug break ICs. They jump to // the actual call ic to carry out the work. Code::Flags flags = - Code::ComputeFlags(kind, DEBUG_PREPARE_STEP_IN, Code::kNoExtraICState, + Code::ComputeFlags(kind, DEBUG_STUB, DEBUG_PREPARE_STEP_IN, Code::NORMAL, argc); Handle<UnseededNumberDictionary> cache = isolate_->factory()->non_monomorphic_cache(); @@ -936,13 +970,13 @@ void StubCache::Clear() { void StubCache::CollectMatchingMaps(SmallMapList* types, - String* name, + Name* name, Code::Flags flags, Handle<Context> native_context, Zone* zone) { for (int i = 0; i < kPrimaryTableSize; i++) { if (primary_[i].key == name) { - Map* map = primary_[i].value->FindFirstMap(); + Map* map = primary_[i].map; // Map can be NULL, if the stub is constant function call // with a primitive receiver. if (map == NULL) continue; @@ -957,7 +991,7 @@ void StubCache::CollectMatchingMaps(SmallMapList* types, for (int i = 0; i < kSecondaryTableSize; i++) { if (secondary_[i].key == name) { - Map* map = secondary_[i].value->FindFirstMap(); + Map* map = secondary_[i].map; // Map can be NULL, if the stub is constant function call // with a primitive receiver. if (map == NULL) continue; @@ -966,7 +1000,7 @@ void StubCache::CollectMatchingMaps(SmallMapList* types, int primary_offset = PrimaryOffset(name, flags, map); Entry* primary_entry = entry(primary_, primary_offset); if (primary_entry->key == name) { - Map* primary_map = primary_entry->value->FindFirstMap(); + Map* primary_map = primary_entry->map; if (map == primary_map) continue; } @@ -985,42 +1019,21 @@ void StubCache::CollectMatchingMaps(SmallMapList* types, // StubCompiler implementation. -RUNTIME_FUNCTION(MaybeObject*, LoadCallbackProperty) { - ASSERT(args[0]->IsJSObject()); - ASSERT(args[1]->IsJSObject()); - ASSERT(args[3]->IsSmi()); - AccessorInfo* callback = AccessorInfo::cast(args[4]); - Address getter_address = v8::ToCData<Address>(callback->getter()); - v8::AccessorGetter fun = FUNCTION_CAST<v8::AccessorGetter>(getter_address); - ASSERT(fun != NULL); - ASSERT(callback->IsCompatibleReceiver(args[0])); - v8::AccessorInfo info(&args[0]); - HandleScope scope(isolate); - v8::Handle<v8::Value> result; - { - // Leaving JavaScript. - VMState state(isolate, EXTERNAL); - ExternalCallbackScope call_scope(isolate, getter_address); - result = fun(v8::Utils::ToLocal(args.at<String>(5)), info); - } - RETURN_IF_SCHEDULED_EXCEPTION(isolate); - if (result.IsEmpty()) return HEAP->undefined_value(); - Handle<Object> result_internal = v8::Utils::OpenHandle(*result); - result_internal->VerifyApiCallResultType(); - return *result_internal; -} - - RUNTIME_FUNCTION(MaybeObject*, StoreCallbackProperty) { JSObject* recv = JSObject::cast(args[0]); - AccessorInfo* callback = AccessorInfo::cast(args[1]); + ExecutableAccessorInfo* callback = ExecutableAccessorInfo::cast(args[1]); Address setter_address = v8::ToCData<Address>(callback->setter()); v8::AccessorSetter fun = FUNCTION_CAST<v8::AccessorSetter>(setter_address); ASSERT(fun != NULL); ASSERT(callback->IsCompatibleReceiver(recv)); - Handle<String> name = args.at<String>(2); + Handle<Name> name = args.at<Name>(2); Handle<Object> value = args.at<Object>(3); HandleScope scope(isolate); + + // TODO(rossberg): Support symbols in the API. + if (name->IsSymbol()) return *value; + Handle<String> str = Handle<String>::cast(name); + LOG(isolate, ApiNamedPropertyAccess("store", recv, *name)); CustomArguments custom_args(isolate, callback->data(), recv, recv); v8::AccessorInfo info(custom_args.end()); @@ -1028,7 +1041,7 @@ RUNTIME_FUNCTION(MaybeObject*, StoreCallbackProperty) { // Leaving JavaScript. VMState state(isolate, EXTERNAL); ExternalCallbackScope call_scope(isolate, setter_address); - fun(v8::Utils::ToLocal(name), v8::Utils::ToLocal(value), info); + fun(v8::Utils::ToLocal(str), v8::Utils::ToLocal(value), info); } RETURN_IF_SCHEDULED_EXCEPTION(isolate); return *value; @@ -1046,7 +1059,7 @@ static const int kAccessorInfoOffsetInInterceptorArgs = 2; * provide any value for the given name. */ RUNTIME_FUNCTION(MaybeObject*, LoadPropertyWithInterceptorOnly) { - Handle<String> name_handle = args.at<String>(0); + Handle<Name> name_handle = args.at<Name>(0); Handle<InterceptorInfo> interceptor_info = args.at<InterceptorInfo>(1); ASSERT(kAccessorInfoOffsetInInterceptorArgs == 2); ASSERT(args[2]->IsJSObject()); // Receiver. @@ -1054,6 +1067,11 @@ RUNTIME_FUNCTION(MaybeObject*, LoadPropertyWithInterceptorOnly) { ASSERT(args[5]->IsSmi()); // Isolate. ASSERT(args.length() == 6); + // TODO(rossberg): Support symbols in the API. + if (name_handle->IsSymbol()) + return isolate->heap()->no_interceptor_result_sentinel(); + Handle<String> name = Handle<String>::cast(name_handle); + Address getter_address = v8::ToCData<Address>(interceptor_info->getter()); v8::NamedPropertyGetter getter = FUNCTION_CAST<v8::NamedPropertyGetter>(getter_address); @@ -1068,7 +1086,7 @@ RUNTIME_FUNCTION(MaybeObject*, LoadPropertyWithInterceptorOnly) { { // Leaving JavaScript. VMState state(isolate, EXTERNAL); - r = getter(v8::Utils::ToLocal(name_handle), info); + r = getter(v8::Utils::ToLocal(name), info); } RETURN_IF_SCHEDULED_EXCEPTION(isolate); if (!r.IsEmpty()) { @@ -1082,27 +1100,27 @@ RUNTIME_FUNCTION(MaybeObject*, LoadPropertyWithInterceptorOnly) { } -static MaybeObject* ThrowReferenceError(String* name) { +static MaybeObject* ThrowReferenceError(Isolate* isolate, Name* name) { // If the load is non-contextual, just return the undefined result. // Note that both keyed and non-keyed loads may end up here, so we // can't use either LoadIC or KeyedLoadIC constructors. - IC ic(IC::NO_EXTRA_FRAME, Isolate::Current()); + IC ic(IC::NO_EXTRA_FRAME, isolate); ASSERT(ic.target()->is_load_stub() || ic.target()->is_keyed_load_stub()); - if (!ic.SlowIsContextual()) return HEAP->undefined_value(); + if (!ic.SlowIsUndeclaredGlobal()) return HEAP->undefined_value(); // Throw a reference error. - HandleScope scope; - Handle<String> name_handle(name); + HandleScope scope(isolate); + Handle<Name> name_handle(name); Handle<Object> error = FACTORY->NewReferenceError("not_defined", HandleVector(&name_handle, 1)); - return Isolate::Current()->Throw(*error); + return isolate->Throw(*error); } static MaybeObject* LoadWithInterceptor(Arguments* args, PropertyAttributes* attrs) { - Handle<String> name_handle = args->at<String>(0); + Handle<Name> name_handle = args->at<Name>(0); Handle<InterceptorInfo> interceptor_info = args->at<InterceptorInfo>(1); ASSERT(kAccessorInfoOffsetInInterceptorArgs == 2); Handle<JSObject> receiver_handle = args->at<JSObject>(2); @@ -1111,6 +1129,12 @@ static MaybeObject* LoadWithInterceptor(Arguments* args, Isolate* isolate = receiver_handle->GetIsolate(); + // TODO(rossberg): Support symbols in the API. + if (name_handle->IsSymbol()) + return holder_handle->GetPropertyPostInterceptor( + *receiver_handle, *name_handle, attrs); + Handle<String> name = Handle<String>::cast(name_handle); + Address getter_address = v8::ToCData<Address>(interceptor_info->getter()); v8::NamedPropertyGetter getter = FUNCTION_CAST<v8::NamedPropertyGetter>(getter_address); @@ -1125,7 +1149,7 @@ static MaybeObject* LoadWithInterceptor(Arguments* args, { // Leaving JavaScript. VMState state(isolate, EXTERNAL); - r = getter(v8::Utils::ToLocal(name_handle), info); + r = getter(v8::Utils::ToLocal(name), info); } RETURN_IF_SCHEDULED_EXCEPTION(isolate); if (!r.IsEmpty()) { @@ -1158,7 +1182,7 @@ RUNTIME_FUNCTION(MaybeObject*, LoadPropertyWithInterceptorForLoad) { // If the property is present, return it. if (attr != ABSENT) return result; - return ThrowReferenceError(String::cast(args[0])); + return ThrowReferenceError(isolate, Name::cast(args[0])); } @@ -1176,7 +1200,7 @@ RUNTIME_FUNCTION(MaybeObject*, LoadPropertyWithInterceptorForCall) { RUNTIME_FUNCTION(MaybeObject*, StoreInterceptorProperty) { ASSERT(args.length() == 4); JSObject* recv = JSObject::cast(args[0]); - String* name = String::cast(args[1]); + Name* name = Name::cast(args[1]); Object* value = args[2]; ASSERT(args.smi_at(3) == kStrictMode || args.smi_at(3) == kNonStrictMode); StrictModeFlag strict_mode = static_cast<StrictModeFlag>(args.smi_at(3)); @@ -1358,15 +1382,15 @@ Handle<Code> StubCompiler::GetCodeWithFlags(Code::Flags flags, Handle<Code> StubCompiler::GetCodeWithFlags(Code::Flags flags, - Handle<String> name) { - return (FLAG_print_code_stubs && !name.is_null()) - ? GetCodeWithFlags(flags, *name->ToCString()) + Handle<Name> name) { + return (FLAG_print_code_stubs && !name.is_null() && name->IsString()) + ? GetCodeWithFlags(flags, *Handle<String>::cast(name)->ToCString()) : GetCodeWithFlags(flags, reinterpret_cast<char*>(NULL)); } void StubCompiler::LookupPostInterceptor(Handle<JSObject> holder, - Handle<String> name, + Handle<Name> name, LookupResult* lookup) { holder->LocalLookupRealNamedProperty(*name, lookup); if (lookup->IsFound()) return; @@ -1375,49 +1399,430 @@ void StubCompiler::LookupPostInterceptor(Handle<JSObject> holder, } -Handle<Code> LoadStubCompiler::GetCode(Code::StubType type, - Handle<String> name) { - Code::Flags flags = Code::ComputeMonomorphicFlags(Code::LOAD_IC, type); - Handle<Code> code = GetCodeWithFlags(flags, name); - PROFILE(isolate(), CodeCreateEvent(Logger::LOAD_IC_TAG, *code, *name)); +#define __ ACCESS_MASM(masm()) + + +Register BaseLoadStubCompiler::HandlerFrontendHeader(Handle<JSObject> object, + Register object_reg, + Handle<JSObject> holder, + Handle<Name> name, + Label* miss) { + // Check the prototype chain. + return CheckPrototypes(object, object_reg, holder, + scratch1(), scratch2(), scratch3(), + name, miss, SKIP_RECEIVER); +} + + +Register BaseLoadStubCompiler::HandlerFrontend(Handle<JSObject> object, + Register object_reg, + Handle<JSObject> holder, + Handle<Name> name, + Label* success) { + Label miss; + + Register reg = HandlerFrontendHeader(object, object_reg, holder, name, &miss); + + HandlerFrontendFooter(success, &miss); + return reg; +} + + +Handle<Code> BaseLoadStubCompiler::CompileLoadField(Handle<JSObject> object, + Handle<JSObject> holder, + Handle<Name> name, + PropertyIndex field) { + Label miss; + + Register reg = HandlerFrontendHeader(object, receiver(), holder, name, &miss); + + LoadFieldStub stub(reg, field.is_inobject(holder), field.translate(holder)); + GenerateTailCall(masm(), stub.GetCode(isolate())); + + __ bind(&miss); + TailCallBuiltin(masm(), MissBuiltin(kind())); + + // Return the generated code. + return GetCode(kind(), Code::FIELD, name); +} + + +// Load a fast property out of a holder object (src). In-object properties +// are loaded directly otherwise the property is loaded from the properties +// fixed array. +void StubCompiler::GenerateFastPropertyLoad(MacroAssembler* masm, + Register dst, + Register src, + Handle<JSObject> holder, + PropertyIndex index) { + DoGenerateFastPropertyLoad( + masm, dst, src, index.is_inobject(holder), index.translate(holder)); +} + + +Handle<Code> BaseLoadStubCompiler::CompileLoadConstant( + Handle<JSObject> object, + Handle<JSObject> holder, + Handle<Name> name, + Handle<JSFunction> value) { + Label success; + HandlerFrontend(object, receiver(), holder, name, &success); + __ bind(&success); + GenerateLoadConstant(value); + + // Return the generated code. + return GetCode(kind(), Code::CONSTANT_FUNCTION, name); +} + + +Handle<Code> BaseLoadStubCompiler::CompileLoadCallback( + Handle<JSObject> object, + Handle<JSObject> holder, + Handle<Name> name, + Handle<ExecutableAccessorInfo> callback) { + Label success; + + Register reg = CallbackHandlerFrontend( + object, receiver(), holder, name, &success, callback); + __ bind(&success); + GenerateLoadCallback(reg, callback); + + // Return the generated code. + return GetCode(kind(), Code::CALLBACKS, name); +} + + +Handle<Code> BaseLoadStubCompiler::CompileLoadInterceptor( + Handle<JSObject> object, + Handle<JSObject> holder, + Handle<Name> name) { + Label success; + + LookupResult lookup(isolate()); + LookupPostInterceptor(holder, name, &lookup); + + Register reg = HandlerFrontend(object, receiver(), holder, name, &success); + __ bind(&success); + // TODO(368): Compile in the whole chain: all the interceptors in + // prototypes and ultimate answer. + GenerateLoadInterceptor(reg, object, holder, &lookup, name); + + // Return the generated code. + return GetCode(kind(), Code::INTERCEPTOR, name); +} + + +void BaseLoadStubCompiler::GenerateLoadPostInterceptor( + Register interceptor_reg, + Handle<JSObject> interceptor_holder, + Handle<Name> name, + LookupResult* lookup) { + Label success; + Handle<JSObject> holder(lookup->holder()); + if (lookup->IsField()) { + PropertyIndex field = lookup->GetFieldIndex(); + if (interceptor_holder.is_identical_to(holder)) { + LoadFieldStub stub(interceptor_reg, + field.is_inobject(holder), + field.translate(holder)); + GenerateTailCall(masm(), stub.GetCode(isolate())); + } else { + // We found FIELD property in prototype chain of interceptor's holder. + // Retrieve a field from field's holder. + Register reg = HandlerFrontend( + interceptor_holder, interceptor_reg, holder, name, &success); + __ bind(&success); + GenerateLoadField(reg, holder, field); + } + } else { + // We found CALLBACKS property in prototype chain of interceptor's + // holder. + ASSERT(lookup->type() == CALLBACKS); + Handle<ExecutableAccessorInfo> callback( + ExecutableAccessorInfo::cast(lookup->GetCallbackObject())); + ASSERT(callback->getter() != NULL); + + Register reg = CallbackHandlerFrontend( + interceptor_holder, interceptor_reg, holder, name, &success, callback); + __ bind(&success); + GenerateLoadCallback(reg, callback); + } +} + + +Handle<Code> BaseLoadStubCompiler::CompileMonomorphicIC( + Handle<Map> receiver_map, + Handle<Code> handler, + Handle<Name> name) { + MapHandleList receiver_maps(1); + receiver_maps.Add(receiver_map); + CodeHandleList handlers(1); + handlers.Add(handler); + Code::StubType type = handler->type(); + return CompilePolymorphicIC(&receiver_maps, &handlers, name, type, PROPERTY); +} + + +Handle<Code> LoadStubCompiler::CompileLoadViaGetter( + Handle<JSObject> object, + Handle<JSObject> holder, + Handle<Name> name, + Handle<JSFunction> getter) { + Label success; + HandlerFrontend(object, receiver(), holder, name, &success); + + __ bind(&success); + GenerateLoadViaGetter(masm(), getter); + + // Return the generated code. + return GetCode(kind(), Code::CALLBACKS, name); +} + + +Handle<Code> BaseStoreStubCompiler::CompileStoreField(Handle<JSObject> object, + int index, + Handle<Map> transition, + Handle<Name> name) { + Label miss, miss_restore_name; + + GenerateNameCheck(name, this->name(), &miss); + + // Generate store field code. + GenerateStoreField(masm(), + object, + index, + transition, + name, + receiver(), this->name(), value(), scratch1(), scratch2(), + &miss, + &miss_restore_name); + + // Handle store cache miss. + GenerateRestoreName(masm(), &miss_restore_name, name); + __ bind(&miss); + TailCallBuiltin(masm(), MissBuiltin(kind())); + + // Return the generated code. + return GetICCode(kind(), + transition.is_null() ? Code::FIELD : Code::MAP_TRANSITION, + name); +} + + +Handle<Code> StoreStubCompiler::CompileStoreViaSetter( + Handle<Name> name, + Handle<JSObject> object, + Handle<JSObject> holder, + Handle<JSFunction> setter) { + Label miss, miss_restore_name; + + // Check that the maps haven't changed, preserving the name register. + __ JumpIfSmi(receiver(), &miss); + CheckPrototypes(object, receiver(), holder, + this->name(), scratch1(), scratch2(), + name, &miss_restore_name); + + GenerateStoreViaSetter(masm(), setter); + + GenerateRestoreName(masm(), &miss_restore_name, name); + + __ bind(&miss); + TailCallBuiltin(masm(), MissBuiltin(kind())); + + // Return the generated code. + return GetICCode(kind(), Code::CALLBACKS, name); +} + + +Handle<Code> KeyedLoadStubCompiler::CompileLoadElement( + Handle<Map> receiver_map) { + ElementsKind elements_kind = receiver_map->elements_kind(); + if (receiver_map->has_fast_elements() || + receiver_map->has_external_array_elements()) { + Handle<Code> stub = KeyedLoadFastElementStub( + receiver_map->instance_type() == JS_ARRAY_TYPE, + elements_kind).GetCode(isolate()); + __ DispatchMap(receiver(), scratch1(), receiver_map, stub, DO_SMI_CHECK); + } else { + Handle<Code> stub = + KeyedLoadDictionaryElementStub().GetCode(isolate()); + __ DispatchMap(receiver(), scratch1(), receiver_map, stub, DO_SMI_CHECK); + } + + TailCallBuiltin(masm(), Builtins::kKeyedLoadIC_Miss); + + // Return the generated code. + return GetICCode(kind(), Code::NORMAL, factory()->empty_string()); +} + + +Handle<Code> KeyedStoreStubCompiler::CompileStoreElement( + Handle<Map> receiver_map) { + ElementsKind elements_kind = receiver_map->elements_kind(); + bool is_jsarray = receiver_map->instance_type() == JS_ARRAY_TYPE; + Handle<Code> stub = + KeyedStoreElementStub(is_jsarray, + elements_kind, + store_mode_).GetCode(isolate()); + + __ DispatchMap(receiver(), scratch1(), receiver_map, stub, DO_SMI_CHECK); + + TailCallBuiltin(masm(), Builtins::kKeyedStoreIC_Miss); + + // Return the generated code. + return GetICCode(kind(), Code::NORMAL, factory()->empty_string()); +} + + +#undef __ + + +void StubCompiler::TailCallBuiltin(MacroAssembler* masm, Builtins::Name name) { + Handle<Code> code(masm->isolate()->builtins()->builtin(name)); + GenerateTailCall(masm, code); +} + + +void LoadStubCompiler::JitEvent(Handle<Name> name, Handle<Code> code) { GDBJIT(AddCode(GDBJITInterface::LOAD_IC, *name, *code)); +} + + +void KeyedLoadStubCompiler::JitEvent(Handle<Name> name, Handle<Code> code) { + GDBJIT(AddCode(GDBJITInterface::KEYED_LOAD_IC, *name, *code)); +} + + +void StoreStubCompiler::JitEvent(Handle<Name> name, Handle<Code> code) { + GDBJIT(AddCode(GDBJITInterface::STORE_IC, *name, *code)); +} + + +void KeyedStoreStubCompiler::JitEvent(Handle<Name> name, Handle<Code> code) { + GDBJIT(AddCode(GDBJITInterface::KEYED_STORE_IC, *name, *code)); +} + + +Handle<Code> BaseLoadStubCompiler::GetICCode(Code::Kind kind, + Code::StubType type, + Handle<Name> name, + InlineCacheState state) { + Code::Flags flags = Code::ComputeFlags( + kind, state, Code::kNoExtraICState, type); + Handle<Code> code = GetCodeWithFlags(flags, name); + PROFILE(isolate(), CodeCreateEvent(log_kind(code), *code, *name)); + JitEvent(name, code); return code; } -Handle<Code> KeyedLoadStubCompiler::GetCode(Code::StubType type, - Handle<String> name, - InlineCacheState state) { +Handle<Code> BaseLoadStubCompiler::GetCode(Code::Kind kind, + Code::StubType type, + Handle<Name> name) { + ASSERT(type != Code::NORMAL); Code::Flags flags = Code::ComputeFlags( - Code::KEYED_LOAD_IC, state, Code::kNoExtraICState, type); + Code::STUB, MONOMORPHIC, Code::kNoExtraICState, type, kind); Handle<Code> code = GetCodeWithFlags(flags, name); - PROFILE(isolate(), CodeCreateEvent(Logger::KEYED_LOAD_IC_TAG, *code, *name)); - GDBJIT(AddCode(GDBJITInterface::LOAD_IC, *name, *code)); + PROFILE(isolate(), CodeCreateEvent(log_kind(code), *code, *name)); + JitEvent(name, code); return code; } -Handle<Code> StoreStubCompiler::GetCode(Code::StubType type, - Handle<String> name) { - Code::Flags flags = - Code::ComputeMonomorphicFlags(Code::STORE_IC, type, strict_mode_); +Handle<Code> BaseStoreStubCompiler::GetICCode(Code::Kind kind, + Code::StubType type, + Handle<Name> name, + InlineCacheState state) { + Code::Flags flags = Code::ComputeFlags( + kind, state, extra_state(), type); Handle<Code> code = GetCodeWithFlags(flags, name); - PROFILE(isolate(), CodeCreateEvent(Logger::STORE_IC_TAG, *code, *name)); - GDBJIT(AddCode(GDBJITInterface::STORE_IC, *name, *code)); + PROFILE(isolate(), CodeCreateEvent(log_kind(code), *code, *name)); + JitEvent(name, code); return code; } -Handle<Code> KeyedStoreStubCompiler::GetCode(Code::StubType type, - Handle<String> name, - InlineCacheState state) { - Code::ExtraICState extra_state = - Code::ComputeExtraICState(grow_mode_, strict_mode_); - Code::Flags flags = - Code::ComputeFlags(Code::KEYED_STORE_IC, state, extra_state, type); +Handle<Code> BaseStoreStubCompiler::GetCode(Code::Kind kind, + Code::StubType type, + Handle<Name> name) { + ASSERT(type != Code::NORMAL); + Code::Flags flags = Code::ComputeFlags( + Code::STUB, MONOMORPHIC, extra_state(), type, kind); Handle<Code> code = GetCodeWithFlags(flags, name); - PROFILE(isolate(), CodeCreateEvent(Logger::KEYED_STORE_IC_TAG, *code, *name)); - GDBJIT(AddCode(GDBJITInterface::KEYED_STORE_IC, *name, *code)); + PROFILE(isolate(), CodeCreateEvent(log_kind(code), *code, *name)); + JitEvent(name, code); + return code; +} + + +void KeyedLoadStubCompiler::CompileElementHandlers(MapHandleList* receiver_maps, + CodeHandleList* handlers) { + for (int i = 0; i < receiver_maps->length(); ++i) { + Handle<Map> receiver_map = receiver_maps->at(i); + Handle<Code> cached_stub; + + if ((receiver_map->instance_type() & kNotStringTag) == 0) { + cached_stub = isolate()->builtins()->KeyedLoadIC_String(); + } else { + bool is_js_array = receiver_map->instance_type() == JS_ARRAY_TYPE; + ElementsKind elements_kind = receiver_map->elements_kind(); + + if (IsFastElementsKind(elements_kind) || + IsExternalArrayElementsKind(elements_kind)) { + cached_stub = + KeyedLoadFastElementStub(is_js_array, + elements_kind).GetCode(isolate()); + } else { + ASSERT(elements_kind == DICTIONARY_ELEMENTS); + cached_stub = KeyedLoadDictionaryElementStub().GetCode(isolate()); + } + } + + handlers->Add(cached_stub); + } +} + + +Handle<Code> KeyedStoreStubCompiler::CompileStoreElementPolymorphic( + MapHandleList* receiver_maps) { + // Collect MONOMORPHIC stubs for all |receiver_maps|. + CodeHandleList handlers(receiver_maps->length()); + MapHandleList transitioned_maps(receiver_maps->length()); + for (int i = 0; i < receiver_maps->length(); ++i) { + Handle<Map> receiver_map(receiver_maps->at(i)); + Handle<Code> cached_stub; + Handle<Map> transitioned_map = + receiver_map->FindTransitionedMap(receiver_maps); + + // TODO(mvstanton): The code below is doing pessimistic elements + // transitions. I would like to stop doing that and rely on Allocation Site + // Tracking to do a better job of ensuring the data types are what they need + // to be. Not all the elements are in place yet, pessimistic elements + // transitions are still important for performance. + bool is_js_array = receiver_map->instance_type() == JS_ARRAY_TYPE; + ElementsKind elements_kind = receiver_map->elements_kind(); + if (!transitioned_map.is_null()) { + cached_stub = ElementsTransitionAndStoreStub( + elements_kind, + transitioned_map->elements_kind(), + is_js_array, + strict_mode(), + store_mode_).GetCode(isolate()); + } else { + cached_stub = KeyedStoreElementStub( + is_js_array, + elements_kind, + store_mode_).GetCode(isolate()); + } + ASSERT(!cached_stub.is_null()); + handlers.Add(cached_stub); + transitioned_maps.Add(transitioned_map); + } + Handle<Code> code = + CompileStorePolymorphic(receiver_maps, &handlers, &transitioned_maps); + isolate()->counters()->keyed_store_polymorphic_stubs()->Increment(); + PROFILE(isolate(), + CodeCreateEvent(Logger::KEYED_STORE_POLYMORPHIC_IC_TAG, *code, 0)); return code; } @@ -1487,13 +1892,13 @@ Handle<Code> CallStubCompiler::CompileCustomCall( Handle<Code> CallStubCompiler::GetCode(Code::StubType type, - Handle<String> name) { + Handle<Name> name) { int argc = arguments_.immediate(); Code::Flags flags = Code::ComputeMonomorphicFlags(kind_, - type, extra_state_, - cache_holder_, - argc); + type, + argc, + cache_holder_); return GetCodeWithFlags(flags, name); } @@ -1541,6 +1946,7 @@ int CallOptimization::GetPrototypeDepthOfExpectedType( while (!object.is_identical_to(holder)) { if (object->IsInstanceOf(*expected_receiver_type_)) return depth; object = Handle<JSObject>(JSObject::cast(object->GetPrototype())); + if (!object->map()->is_hidden_prototype()) return kInvalidProtoDepth; ++depth; } if (holder->IsInstanceOf(*expected_receiver_type_)) return depth; diff --git a/deps/v8/src/stub-cache.h b/deps/v8/src/stub-cache.h index 005c537ab1..7c455664c4 100644 --- a/deps/v8/src/stub-cache.h +++ b/deps/v8/src/stub-cache.h @@ -67,138 +67,155 @@ class SCTableReference { class StubCache { public: struct Entry { - String* key; + Name* key; Code* value; Map* map; }; void Initialize(); + Handle<JSObject> StubHolder(Handle<JSObject> receiver, + Handle<JSObject> holder); + + Handle<Code> FindIC(Handle<Name> name, + Handle<JSObject> stub_holder, + Code::Kind kind, + Code::StubType type, + Code::ExtraICState extra_state = Code::kNoExtraICState); + + Handle<Code> FindHandler( + Handle<Name> name, + Handle<JSObject> stub_holder, + Code::Kind kind, + Code::StubType type, + Code::ExtraICState extra_state = Code::kNoExtraICState); + + Handle<Code> ComputeMonomorphicIC(Handle<JSObject> receiver, + Handle<Code> handler, + Handle<Name> name); + Handle<Code> ComputeKeyedMonomorphicIC(Handle<JSObject> receiver, + Handle<Code> handler, + Handle<Name> name); // Computes the right stub matching. Inserts the result in the // cache before returning. This might compile a stub if needed. - Handle<Code> ComputeLoadNonexistent(Handle<String> name, - Handle<JSObject> receiver); + Handle<Code> ComputeLoadNonexistent(Handle<Name> name, + Handle<JSObject> object); - Handle<Code> ComputeLoadField(Handle<String> name, - Handle<JSObject> receiver, + Handle<Code> ComputeLoadField(Handle<Name> name, + Handle<JSObject> object, Handle<JSObject> holder, - int field_index); + PropertyIndex field_index); - Handle<Code> ComputeLoadCallback(Handle<String> name, - Handle<JSObject> receiver, + Handle<Code> ComputeLoadCallback(Handle<Name> name, + Handle<JSObject> object, Handle<JSObject> holder, - Handle<AccessorInfo> callback); + Handle<ExecutableAccessorInfo> callback); - Handle<Code> ComputeLoadViaGetter(Handle<String> name, - Handle<JSObject> receiver, + Handle<Code> ComputeLoadViaGetter(Handle<Name> name, + Handle<JSObject> object, Handle<JSObject> holder, Handle<JSFunction> getter); - Handle<Code> ComputeLoadConstant(Handle<String> name, - Handle<JSObject> receiver, + Handle<Code> ComputeLoadConstant(Handle<Name> name, + Handle<JSObject> object, Handle<JSObject> holder, Handle<JSFunction> value); - Handle<Code> ComputeLoadInterceptor(Handle<String> name, - Handle<JSObject> receiver, + Handle<Code> ComputeLoadInterceptor(Handle<Name> name, + Handle<JSObject> object, Handle<JSObject> holder); - Handle<Code> ComputeLoadNormal(); + Handle<Code> ComputeLoadNormal(Handle<Name> name, + Handle<JSObject> object); - Handle<Code> ComputeLoadGlobal(Handle<String> name, - Handle<JSObject> receiver, + Handle<Code> ComputeLoadGlobal(Handle<Name> name, + Handle<JSObject> object, Handle<GlobalObject> holder, Handle<JSGlobalPropertyCell> cell, bool is_dont_delete); // --- - Handle<Code> ComputeKeyedLoadField(Handle<String> name, - Handle<JSObject> receiver, + Handle<Code> ComputeKeyedLoadField(Handle<Name> name, + Handle<JSObject> object, Handle<JSObject> holder, - int field_index); + PropertyIndex field_index); - Handle<Code> ComputeKeyedLoadCallback(Handle<String> name, - Handle<JSObject> receiver, - Handle<JSObject> holder, - Handle<AccessorInfo> callback); + Handle<Code> ComputeKeyedLoadCallback( + Handle<Name> name, + Handle<JSObject> object, + Handle<JSObject> holder, + Handle<ExecutableAccessorInfo> callback); - Handle<Code> ComputeKeyedLoadConstant(Handle<String> name, - Handle<JSObject> receiver, + Handle<Code> ComputeKeyedLoadConstant(Handle<Name> name, + Handle<JSObject> object, Handle<JSObject> holder, Handle<JSFunction> value); - Handle<Code> ComputeKeyedLoadInterceptor(Handle<String> name, - Handle<JSObject> receiver, + Handle<Code> ComputeKeyedLoadInterceptor(Handle<Name> name, + Handle<JSObject> object, Handle<JSObject> holder); - Handle<Code> ComputeKeyedLoadArrayLength(Handle<String> name, - Handle<JSArray> receiver); - - Handle<Code> ComputeKeyedLoadStringLength(Handle<String> name, - Handle<String> receiver); - - Handle<Code> ComputeKeyedLoadFunctionPrototype(Handle<String> name, - Handle<JSFunction> receiver); - // --- - Handle<Code> ComputeStoreField(Handle<String> name, - Handle<JSObject> receiver, + Handle<Code> ComputeStoreField(Handle<Name> name, + Handle<JSObject> object, int field_index, Handle<Map> transition, StrictModeFlag strict_mode); Handle<Code> ComputeStoreNormal(StrictModeFlag strict_mode); - Handle<Code> ComputeStoreGlobal(Handle<String> name, - Handle<GlobalObject> receiver, + Handle<Code> ComputeStoreGlobal(Handle<Name> name, + Handle<GlobalObject> object, Handle<JSGlobalPropertyCell> cell, StrictModeFlag strict_mode); - Handle<Code> ComputeStoreCallback(Handle<String> name, - Handle<JSObject> receiver, + Handle<Code> ComputeStoreCallback(Handle<Name> name, + Handle<JSObject> object, Handle<JSObject> holder, - Handle<AccessorInfo> callback, + Handle<ExecutableAccessorInfo> callback, StrictModeFlag strict_mode); - Handle<Code> ComputeStoreViaSetter(Handle<String> name, - Handle<JSObject> receiver, + Handle<Code> ComputeStoreViaSetter(Handle<Name> name, + Handle<JSObject> object, Handle<JSObject> holder, Handle<JSFunction> setter, StrictModeFlag strict_mode); - Handle<Code> ComputeStoreInterceptor(Handle<String> name, - Handle<JSObject> receiver, + Handle<Code> ComputeStoreInterceptor(Handle<Name> name, + Handle<JSObject> object, StrictModeFlag strict_mode); // --- - Handle<Code> ComputeKeyedStoreField(Handle<String> name, - Handle<JSObject> receiver, + Handle<Code> ComputeKeyedStoreField(Handle<Name> name, + Handle<JSObject> object, int field_index, Handle<Map> transition, StrictModeFlag strict_mode); - Handle<Code> ComputeKeyedLoadOrStoreElement(Handle<Map> receiver_map, - KeyedIC::StubKind stub_kind, - StrictModeFlag strict_mode); + Handle<Code> ComputeKeyedLoadElement(Handle<Map> receiver_map); + + Handle<Code> ComputeKeyedStoreElement(Handle<Map> receiver_map, + StrictModeFlag strict_mode, + KeyedAccessStoreMode store_mode); // --- Handle<Code> ComputeCallField(int argc, Code::Kind, Code::ExtraICState extra_state, - Handle<String> name, + Handle<Name> name, Handle<Object> object, Handle<JSObject> holder, - int index); + PropertyIndex index); Handle<Code> ComputeCallConstant(int argc, Code::Kind, Code::ExtraICState extra_state, - Handle<String> name, + Handle<Name> name, Handle<Object> object, Handle<JSObject> holder, Handle<JSFunction> function); @@ -206,15 +223,15 @@ class StubCache { Handle<Code> ComputeCallInterceptor(int argc, Code::Kind, Code::ExtraICState extra_state, - Handle<String> name, + Handle<Name> name, Handle<Object> object, Handle<JSObject> holder); Handle<Code> ComputeCallGlobal(int argc, Code::Kind, Code::ExtraICState extra_state, - Handle<String> name, - Handle<JSObject> receiver, + Handle<Name> name, + Handle<JSObject> object, Handle<GlobalObject> holder, Handle<JSGlobalPropertyCell> cell, Handle<JSFunction> function); @@ -233,7 +250,7 @@ class StubCache { Code::Kind kind, Code::ExtraICState state); - Handle<Code> ComputeCallArguments(int argc, Code::Kind kind); + Handle<Code> ComputeCallArguments(int argc); Handle<Code> ComputeCallMegamorphic(int argc, Code::Kind kind, @@ -243,6 +260,17 @@ class StubCache { Code::Kind kind, Code::ExtraICState state); + // --- + + Handle<Code> ComputeLoadElementPolymorphic(MapHandleList* receiver_maps); + Handle<Code> ComputeStoreElementPolymorphic(MapHandleList* receiver_maps, + KeyedAccessStoreMode store_mode, + StrictModeFlag strict_mode); + + Handle<Code> ComputePolymorphicIC(MapHandleList* receiver_maps, + CodeHandleList* handlers, + Handle<Name> name); + // Finds the Code object stored in the Heap::non_monomorphic_cache(). Code* FindCallInitialize(int argc, RelocInfo::Mode mode, Code::Kind kind); @@ -253,14 +281,14 @@ class StubCache { #endif // Update cache for entry hash(name, map). - Code* Set(String* name, Map* map, Code* code); + Code* Set(Name* name, Map* map, Code* code); // Clear the lookup table (@ mark compact collection). void Clear(); // Collect all maps that match the name and flags. void CollectMatchingMaps(SmallMapList* types, - String* name, + Name* name, Code::Flags flags, Handle<Context> native_context, Zone* zone); @@ -331,12 +359,12 @@ class StubCache { // Hash algorithm for the primary table. This algorithm is replicated in // assembler for every architecture. Returns an index into the table that // is scaled by 1 << kHeapObjectTagSize. - static int PrimaryOffset(String* name, Code::Flags flags, Map* map) { + static int PrimaryOffset(Name* name, Code::Flags flags, Map* map) { // This works well because the heap object tag size and the hash // shift are equal. Shifting down the length field to get the // hash code would effectively throw away two bits of the hash // code. - STATIC_ASSERT(kHeapObjectTagSize == String::kHashShift); + STATIC_ASSERT(kHeapObjectTagSize == Name::kHashShift); // Compute the hash of the name (use entire hash field). ASSERT(name->HasHashCode()); uint32_t field = name->hash_field(); @@ -357,25 +385,25 @@ class StubCache { // Hash algorithm for the secondary table. This algorithm is replicated in // assembler for every architecture. Returns an index into the table that // is scaled by 1 << kHeapObjectTagSize. - static int SecondaryOffset(String* name, Code::Flags flags, int seed) { + static int SecondaryOffset(Name* name, Code::Flags flags, int seed) { // Use the seed from the primary cache in the secondary cache. - uint32_t string_low32bits = + uint32_t name_low32bits = static_cast<uint32_t>(reinterpret_cast<uintptr_t>(name)); // We always set the in_loop bit to zero when generating the lookup code // so do it here too so the hash codes match. uint32_t iflags = (static_cast<uint32_t>(flags) & ~Code::kFlagsNotUsedInLookup); - uint32_t key = (seed - string_low32bits) + iflags; + uint32_t key = (seed - name_low32bits) + iflags; return key & ((kSecondaryTableSize - 1) << kHeapObjectTagSize); } // Compute the entry for a given offset in exactly the same way as // we do in generated code. We generate an hash code that already - // ends in String::kHashShift 0s. Then we multiply it so it is a multiple + // ends in Name::kHashShift 0s. Then we multiply it so it is a multiple // of sizeof(Entry). This makes it easier to avoid making mistakes // in the hashed offset computations. static Entry* entry(Entry* table, int offset) { - const int multiplier = sizeof(*table) >> String::kHashShift; + const int multiplier = sizeof(*table) >> Name::kHashShift; return reinterpret_cast<Entry*>( reinterpret_cast<Address>(table) + offset * multiplier); } @@ -400,7 +428,6 @@ class StubCache { // Support functions for IC stubs for callbacks. -DECLARE_RUNTIME_FUNCTION(MaybeObject*, LoadCallbackProperty); DECLARE_RUNTIME_FUNCTION(MaybeObject*, StoreCallbackProperty); @@ -413,6 +440,10 @@ DECLARE_RUNTIME_FUNCTION(MaybeObject*, CallInterceptorProperty); DECLARE_RUNTIME_FUNCTION(MaybeObject*, KeyedLoadPropertyWithInterceptor); +enum PrototypeCheckType { CHECK_ALL_MAPS, SKIP_RECEIVER }; +enum IcCheckType { ELEMENT, PROPERTY }; + + // The stub compilers compile stubs for the stub cache. class StubCompiler BASE_EMBEDDED { public: @@ -453,7 +484,12 @@ class StubCompiler BASE_EMBEDDED { Register dst, Register src, Handle<JSObject> holder, - int index); + PropertyIndex index); + static void DoGenerateFastPropertyLoad(MacroAssembler* masm, + Register dst, + Register src, + bool inobject, + int index); static void GenerateLoadArrayLength(MacroAssembler* masm, Register receiver, @@ -477,17 +513,26 @@ class StubCompiler BASE_EMBEDDED { Handle<JSObject> object, int index, Handle<Map> transition, - Handle<String> name, + Handle<Name> name, Register receiver_reg, Register name_reg, + Register value_reg, Register scratch1, Register scratch2, - Label* miss_label); - - static void GenerateLoadMiss(MacroAssembler* masm, - Code::Kind kind); - - static void GenerateKeyedLoadMissForceGeneric(MacroAssembler* masm); + Label* miss_label, + Label* miss_restore_name); + + static Builtins::Name MissBuiltin(Code::Kind kind) { + switch (kind) { + case Code::LOAD_IC: return Builtins::kLoadIC_Miss; + case Code::STORE_IC: return Builtins::kStoreIC_Miss; + case Code::KEYED_LOAD_IC: return Builtins::kKeyedLoadIC_Miss; + case Code::KEYED_STORE_IC: return Builtins::kKeyedStoreIC_Miss; + default: UNREACHABLE(); + } + return Builtins::kLoadIC_Miss; + } + static void TailCallBuiltin(MacroAssembler* masm, Builtins::Name name); // Generates code that verifies that the property holder has not changed // (checking maps of objects in the prototype chain for fast and global @@ -510,10 +555,11 @@ class StubCompiler BASE_EMBEDDED { Register holder_reg, Register scratch1, Register scratch2, - Handle<String> name, - Label* miss) { + Handle<Name> name, + Label* miss, + PrototypeCheckType check = CHECK_ALL_MAPS) { return CheckPrototypes(object, object_reg, holder, holder_reg, scratch1, - scratch2, name, kInvalidProtoDepth, miss); + scratch2, name, kInvalidProtoDepth, miss, check); } Register CheckPrototypes(Handle<JSObject> object, @@ -522,78 +568,29 @@ class StubCompiler BASE_EMBEDDED { Register holder_reg, Register scratch1, Register scratch2, - Handle<String> name, + Handle<Name> name, int save_at_depth, - Label* miss); + Label* miss, + PrototypeCheckType check = CHECK_ALL_MAPS); protected: Handle<Code> GetCodeWithFlags(Code::Flags flags, const char* name); - Handle<Code> GetCodeWithFlags(Code::Flags flags, Handle<String> name); + Handle<Code> GetCodeWithFlags(Code::Flags flags, Handle<Name> name); MacroAssembler* masm() { return &masm_; } void set_failure(Failure* failure) { failure_ = failure; } - void GenerateLoadField(Handle<JSObject> object, - Handle<JSObject> holder, - Register receiver, - Register scratch1, - Register scratch2, - Register scratch3, - int index, - Handle<String> name, - Label* miss); - - void GenerateLoadCallback(Handle<JSObject> object, - Handle<JSObject> holder, - Register receiver, - Register name_reg, - Register scratch1, - Register scratch2, - Register scratch3, - Register scratch4, - Handle<AccessorInfo> callback, - Handle<String> name, - Label* miss); - - void GenerateDictionaryLoadCallback(Register receiver, - Register name_reg, - Register scratch1, - Register scratch2, - Register scratch3, - Handle<AccessorInfo> callback, - Handle<String> name, - Label* miss); - - void GenerateLoadConstant(Handle<JSObject> object, - Handle<JSObject> holder, - Register receiver, - Register scratch1, - Register scratch2, - Register scratch3, - Handle<JSFunction> value, - Handle<String> name, - Label* miss); - - void GenerateLoadInterceptor(Handle<JSObject> object, - Handle<JSObject> holder, - LookupResult* lookup, - Register receiver, - Register name_reg, - Register scratch1, - Register scratch2, - Register scratch3, - Handle<String> name, - Label* miss); - static void LookupPostInterceptor(Handle<JSObject> holder, - Handle<String> name, + Handle<Name> name, LookupResult* lookup); Isolate* isolate() { return isolate_; } Heap* heap() { return isolate()->heap(); } Factory* factory() { return isolate()->factory(); } + static void GenerateTailCall(MacroAssembler* masm, Handle<Code> code); + private: Isolate* isolate_; MacroAssembler masm_; @@ -601,153 +598,274 @@ class StubCompiler BASE_EMBEDDED { }; -class LoadStubCompiler: public StubCompiler { - public: - explicit LoadStubCompiler(Isolate* isolate) : StubCompiler(isolate) { } +enum FrontendCheckType { PERFORM_INITIAL_CHECKS, SKIP_INITIAL_CHECKS }; - Handle<Code> CompileLoadNonexistent(Handle<String> name, - Handle<JSObject> object, - Handle<JSObject> last); + +class BaseLoadStubCompiler: public StubCompiler { + public: + BaseLoadStubCompiler(Isolate* isolate, Register* registers) + : StubCompiler(isolate), registers_(registers) { } + virtual ~BaseLoadStubCompiler() { } Handle<Code> CompileLoadField(Handle<JSObject> object, Handle<JSObject> holder, - int index, - Handle<String> name); + Handle<Name> name, + PropertyIndex index); - Handle<Code> CompileLoadCallback(Handle<String> name, - Handle<JSObject> object, + Handle<Code> CompileLoadCallback(Handle<JSObject> object, Handle<JSObject> holder, - Handle<AccessorInfo> callback); - - static void GenerateLoadViaGetter(MacroAssembler* masm, - Handle<JSFunction> getter); - - Handle<Code> CompileLoadViaGetter(Handle<String> name, - Handle<JSObject> receiver, - Handle<JSObject> holder, - Handle<JSFunction> getter); + Handle<Name> name, + Handle<ExecutableAccessorInfo> callback); Handle<Code> CompileLoadConstant(Handle<JSObject> object, Handle<JSObject> holder, - Handle<JSFunction> value, - Handle<String> name); + Handle<Name> name, + Handle<JSFunction> value); Handle<Code> CompileLoadInterceptor(Handle<JSObject> object, Handle<JSObject> holder, - Handle<String> name); + Handle<Name> name); - Handle<Code> CompileLoadGlobal(Handle<JSObject> object, - Handle<GlobalObject> holder, - Handle<JSGlobalPropertyCell> cell, - Handle<String> name, - bool is_dont_delete); + Handle<Code> CompileMonomorphicIC(Handle<Map> receiver_map, + Handle<Code> handler, + Handle<Name> name); + Handle<Code> CompilePolymorphicIC(MapHandleList* receiver_maps, + CodeHandleList* handlers, + Handle<Name> name, + Code::StubType type, + IcCheckType check); + + protected: + Register HandlerFrontendHeader(Handle<JSObject> object, + Register object_reg, + Handle<JSObject> holder, + Handle<Name> name, + Label* success); + void HandlerFrontendFooter(Label* success, Label* miss); + + Register HandlerFrontend(Handle<JSObject> object, + Register object_reg, + Handle<JSObject> holder, + Handle<Name> name, + Label* success); + Register CallbackHandlerFrontend(Handle<JSObject> object, + Register object_reg, + Handle<JSObject> holder, + Handle<Name> name, + Label* success, + Handle<ExecutableAccessorInfo> callback); + void NonexistentHandlerFrontend(Handle<JSObject> object, + Handle<JSObject> last, + Handle<Name> name, + Label* success, + Handle<GlobalObject> global); + + void GenerateLoadField(Register reg, + Handle<JSObject> holder, + PropertyIndex index); + void GenerateLoadConstant(Handle<JSFunction> value); + void GenerateLoadCallback(Register reg, + Handle<ExecutableAccessorInfo> callback); + void GenerateLoadInterceptor(Register holder_reg, + Handle<JSObject> object, + Handle<JSObject> holder, + LookupResult* lookup, + Handle<Name> name); + void GenerateLoadPostInterceptor(Register reg, + Handle<JSObject> interceptor_holder, + Handle<Name> name, + LookupResult* lookup); + + Handle<Code> GetICCode(Code::Kind kind, + Code::StubType type, + Handle<Name> name, + InlineCacheState state = MONOMORPHIC); + + Handle<Code> GetCode(Code::Kind kind, + Code::StubType type, + Handle<Name> name); + + Register receiver() { return registers_[0]; } + Register name() { return registers_[1]; } + Register scratch1() { return registers_[2]; } + Register scratch2() { return registers_[3]; } + Register scratch3() { return registers_[4]; } + Register scratch4() { return registers_[5]; } private: - Handle<Code> GetCode(Code::StubType type, Handle<String> name); + virtual Code::Kind kind() = 0; + virtual Logger::LogEventsAndTags log_kind(Handle<Code> code) = 0; + virtual void JitEvent(Handle<Name> name, Handle<Code> code) = 0; + virtual void GenerateNameCheck(Handle<Name> name, + Register name_reg, + Label* miss) { } + Register* registers_; }; -class KeyedLoadStubCompiler: public StubCompiler { +class LoadStubCompiler: public BaseLoadStubCompiler { public: - explicit KeyedLoadStubCompiler(Isolate* isolate) : StubCompiler(isolate) { } + explicit LoadStubCompiler(Isolate* isolate) + : BaseLoadStubCompiler(isolate, registers()) { } - Handle<Code> CompileLoadField(Handle<String> name, - Handle<JSObject> object, - Handle<JSObject> holder, - int index); + Handle<Code> CompileLoadNonexistent(Handle<JSObject> object, + Handle<JSObject> last, + Handle<Name> name, + Handle<GlobalObject> global); - Handle<Code> CompileLoadCallback(Handle<String> name, - Handle<JSObject> object, - Handle<JSObject> holder, - Handle<AccessorInfo> callback); - - Handle<Code> CompileLoadConstant(Handle<String> name, - Handle<JSObject> object, - Handle<JSObject> holder, - Handle<JSFunction> value); - - Handle<Code> CompileLoadInterceptor(Handle<JSObject> object, - Handle<JSObject> holder, - Handle<String> name); + static void GenerateLoadViaGetter(MacroAssembler* masm, + Handle<JSFunction> getter); - Handle<Code> CompileLoadArrayLength(Handle<String> name); + Handle<Code> CompileLoadViaGetter(Handle<JSObject> object, + Handle<JSObject> holder, + Handle<Name> name, + Handle<JSFunction> getter); - Handle<Code> CompileLoadStringLength(Handle<String> name); + Handle<Code> CompileLoadGlobal(Handle<JSObject> object, + Handle<GlobalObject> holder, + Handle<JSGlobalPropertyCell> cell, + Handle<Name> name, + bool is_dont_delete); - Handle<Code> CompileLoadFunctionPrototype(Handle<String> name); + static Register receiver() { return registers()[0]; } - Handle<Code> CompileLoadElement(Handle<Map> receiver_map); + private: + static Register* registers(); + virtual Code::Kind kind() { return Code::LOAD_IC; } + virtual Logger::LogEventsAndTags log_kind(Handle<Code> code) { + if (!code->is_inline_cache_stub()) return Logger::STUB_TAG; + return code->ic_state() == MONOMORPHIC + ? Logger::LOAD_IC_TAG : Logger::LOAD_POLYMORPHIC_IC_TAG; + } + virtual void JitEvent(Handle<Name> name, Handle<Code> code); +}; - Handle<Code> CompileLoadPolymorphic(MapHandleList* receiver_maps, - CodeHandleList* handler_ics); - static void GenerateLoadExternalArray(MacroAssembler* masm, - ElementsKind elements_kind); +class KeyedLoadStubCompiler: public BaseLoadStubCompiler { + public: + explicit KeyedLoadStubCompiler(Isolate* isolate) + : BaseLoadStubCompiler(isolate, registers()) { } - static void GenerateLoadFastElement(MacroAssembler* masm); + Handle<Code> CompileLoadElement(Handle<Map> receiver_map); - static void GenerateLoadFastDoubleElement(MacroAssembler* masm); + void CompileElementHandlers(MapHandleList* receiver_maps, + CodeHandleList* handlers); static void GenerateLoadDictionaryElement(MacroAssembler* masm); + static Register receiver() { return registers()[0]; } + private: - Handle<Code> GetCode(Code::StubType type, - Handle<String> name, - InlineCacheState state = MONOMORPHIC); + static Register* registers(); + virtual Code::Kind kind() { return Code::KEYED_LOAD_IC; } + virtual Logger::LogEventsAndTags log_kind(Handle<Code> code) { + if (!code->is_inline_cache_stub()) return Logger::STUB_TAG; + return code->ic_state() == MONOMORPHIC + ? Logger::KEYED_LOAD_IC_TAG : Logger::KEYED_LOAD_POLYMORPHIC_IC_TAG; + } + virtual void JitEvent(Handle<Name> name, Handle<Code> code); + virtual void GenerateNameCheck(Handle<Name> name, + Register name_reg, + Label* miss); }; -class StoreStubCompiler: public StubCompiler { +class BaseStoreStubCompiler: public StubCompiler { public: - StoreStubCompiler(Isolate* isolate, StrictModeFlag strict_mode) - : StubCompiler(isolate), strict_mode_(strict_mode) { } + BaseStoreStubCompiler(Isolate* isolate, + StrictModeFlag strict_mode, + Register* registers) + : StubCompiler(isolate), + strict_mode_(strict_mode), + registers_(registers) { } + virtual ~BaseStoreStubCompiler() { } Handle<Code> CompileStoreField(Handle<JSObject> object, int index, Handle<Map> transition, - Handle<String> name); + Handle<Name> name); + + protected: + Handle<Code> GetICCode(Code::Kind kind, + Code::StubType type, + Handle<Name> name, + InlineCacheState state = MONOMORPHIC); + + Handle<Code> GetCode(Code::Kind kind, + Code::StubType type, + Handle<Name> name); + + void GenerateRestoreName(MacroAssembler* masm, + Label* label, + Handle<Name> name); + + Register receiver() { return registers_[0]; } + Register name() { return registers_[1]; } + Register value() { return registers_[2]; } + Register scratch1() { return registers_[3]; } + Register scratch2() { return registers_[4]; } + Register scratch3() { return registers_[5]; } + StrictModeFlag strict_mode() { return strict_mode_; } + virtual Code::ExtraICState extra_state() { return strict_mode_; } + + private: + virtual Code::Kind kind() = 0; + virtual Logger::LogEventsAndTags log_kind(Handle<Code> code) = 0; + virtual void JitEvent(Handle<Name> name, Handle<Code> code) = 0; + virtual void GenerateNameCheck(Handle<Name> name, + Register name_reg, + Label* miss) { } + StrictModeFlag strict_mode_; + Register* registers_; +}; + + +class StoreStubCompiler: public BaseStoreStubCompiler { + public: + StoreStubCompiler(Isolate* isolate, StrictModeFlag strict_mode) + : BaseStoreStubCompiler(isolate, strict_mode, registers()) { } - Handle<Code> CompileStoreCallback(Handle<String> name, - Handle<JSObject> receiver, + + Handle<Code> CompileStoreCallback(Handle<Name> name, + Handle<JSObject> object, Handle<JSObject> holder, - Handle<AccessorInfo> callback); + Handle<ExecutableAccessorInfo> callback); static void GenerateStoreViaSetter(MacroAssembler* masm, Handle<JSFunction> setter); - Handle<Code> CompileStoreViaSetter(Handle<String> name, - Handle<JSObject> receiver, + Handle<Code> CompileStoreViaSetter(Handle<Name> name, + Handle<JSObject> object, Handle<JSObject> holder, Handle<JSFunction> setter); Handle<Code> CompileStoreInterceptor(Handle<JSObject> object, - Handle<String> name); + Handle<Name> name); Handle<Code> CompileStoreGlobal(Handle<GlobalObject> object, Handle<JSGlobalPropertyCell> holder, - Handle<String> name); + Handle<Name> name); private: - Handle<Code> GetCode(Code::StubType type, Handle<String> name); - - StrictModeFlag strict_mode_; + static Register* registers(); + virtual Code::Kind kind() { return Code::STORE_IC; } + virtual Logger::LogEventsAndTags log_kind(Handle<Code> code) { + if (!code->is_inline_cache_stub()) return Logger::STUB_TAG; + return code->ic_state() == MONOMORPHIC + ? Logger::STORE_IC_TAG : Logger::STORE_POLYMORPHIC_IC_TAG; + } + virtual void JitEvent(Handle<Name> name, Handle<Code> code); }; -class KeyedStoreStubCompiler: public StubCompiler { +class KeyedStoreStubCompiler: public BaseStoreStubCompiler { public: KeyedStoreStubCompiler(Isolate* isolate, StrictModeFlag strict_mode, - KeyedAccessGrowMode grow_mode) - : StubCompiler(isolate), - strict_mode_(strict_mode), - grow_mode_(grow_mode) { } - - Handle<Code> CompileStoreField(Handle<JSObject> object, - int index, - Handle<Map> transition, - Handle<String> name); + KeyedAccessStoreMode store_mode) + : BaseStoreStubCompiler(isolate, strict_mode, registers()), + store_mode_(store_mode) { } Handle<Code> CompileStoreElement(Handle<Map> receiver_map); @@ -755,27 +873,44 @@ class KeyedStoreStubCompiler: public StubCompiler { CodeHandleList* handler_stubs, MapHandleList* transitioned_maps); + Handle<Code> CompileStoreElementPolymorphic(MapHandleList* receiver_maps); + static void GenerateStoreFastElement(MacroAssembler* masm, bool is_js_array, ElementsKind element_kind, - KeyedAccessGrowMode grow_mode); + KeyedAccessStoreMode store_mode); static void GenerateStoreFastDoubleElement(MacroAssembler* masm, bool is_js_array, - KeyedAccessGrowMode grow_mode); + KeyedAccessStoreMode store_mode); static void GenerateStoreExternalArray(MacroAssembler* masm, ElementsKind elements_kind); static void GenerateStoreDictionaryElement(MacroAssembler* masm); + protected: + virtual Code::ExtraICState extra_state() { + return Code::ComputeExtraICState(store_mode_, strict_mode()); + } + private: - Handle<Code> GetCode(Code::StubType type, - Handle<String> name, - InlineCacheState state = MONOMORPHIC); + Register transition_map() { + return registers()[3]; + } - StrictModeFlag strict_mode_; - KeyedAccessGrowMode grow_mode_; + static Register* registers(); + virtual Code::Kind kind() { return Code::KEYED_STORE_IC; } + virtual Logger::LogEventsAndTags log_kind(Handle<Code> code) { + if (!code->is_inline_cache_stub()) return Logger::STUB_TAG; + return code->ic_state() == MONOMORPHIC + ? Logger::KEYED_STORE_IC_TAG : Logger::KEYED_STORE_POLYMORPHIC_IC_TAG; + } + virtual void JitEvent(Handle<Name> name, Handle<Code> code); + virtual void GenerateNameCheck(Handle<Name> name, + Register name_reg, + Label* miss); + KeyedAccessStoreMode store_mode_; }; @@ -803,24 +938,32 @@ class CallStubCompiler: public StubCompiler { Handle<Code> CompileCallField(Handle<JSObject> object, Handle<JSObject> holder, - int index, - Handle<String> name); + PropertyIndex index, + Handle<Name> name); + + void CompileHandlerFrontend(Handle<Object> object, + Handle<JSObject> holder, + Handle<Name> name, + CheckType check, + Label* success); + + void CompileHandlerBackend(Handle<JSFunction> function); Handle<Code> CompileCallConstant(Handle<Object> object, Handle<JSObject> holder, - Handle<JSFunction> function, - Handle<String> name, - CheckType check); + Handle<Name> name, + CheckType check, + Handle<JSFunction> function); Handle<Code> CompileCallInterceptor(Handle<JSObject> object, Handle<JSObject> holder, - Handle<String> name); + Handle<Name> name); Handle<Code> CompileCallGlobal(Handle<JSObject> object, Handle<GlobalObject> holder, Handle<JSGlobalPropertyCell> cell, Handle<JSFunction> function, - Handle<String> name); + Handle<Name> name); static bool HasCustomCallGenerator(Handle<JSFunction> function); @@ -850,16 +993,16 @@ class CallStubCompiler: public StubCompiler { Handle<JSFunction> function, Handle<String> name); - Handle<Code> GetCode(Code::StubType type, Handle<String> name); + Handle<Code> GetCode(Code::StubType type, Handle<Name> name); Handle<Code> GetCode(Handle<JSFunction> function); const ParameterCount& arguments() { return arguments_; } - void GenerateNameCheck(Handle<String> name, Label* miss); + void GenerateNameCheck(Handle<Name> name, Label* miss); void GenerateGlobalReceiverCheck(Handle<JSObject> object, Handle<JSObject> holder, - Handle<String> name, + Handle<Name> name, Label* miss); // Generates code to load the function from the cell checking that diff --git a/deps/v8/src/sweeper-thread.cc b/deps/v8/src/sweeper-thread.cc new file mode 100644 index 0000000000..f08fcfbc6f --- /dev/null +++ b/deps/v8/src/sweeper-thread.cc @@ -0,0 +1,103 @@ +// Copyright 2012 the V8 project authors. All rights reserved. +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following +// disclaimer in the documentation and/or other materials provided +// with the distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived +// from this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +#include "sweeper-thread.h" + +#include "v8.h" + +#include "isolate.h" +#include "v8threads.h" + +namespace v8 { +namespace internal { + +static const int kSweeperThreadStackSize = 64 * KB; + +SweeperThread::SweeperThread(Isolate* isolate) + : Thread(Thread::Options("v8:SweeperThread", kSweeperThreadStackSize)), + isolate_(isolate), + heap_(isolate->heap()), + collector_(heap_->mark_compact_collector()), + start_sweeping_semaphore_(OS::CreateSemaphore(0)), + end_sweeping_semaphore_(OS::CreateSemaphore(0)), + stop_semaphore_(OS::CreateSemaphore(0)), + free_list_old_data_space_(heap_->paged_space(OLD_DATA_SPACE)), + free_list_old_pointer_space_(heap_->paged_space(OLD_POINTER_SPACE)), + private_free_list_old_data_space_(heap_->paged_space(OLD_DATA_SPACE)), + private_free_list_old_pointer_space_( + heap_->paged_space(OLD_POINTER_SPACE)) { + NoBarrier_Store(&stop_thread_, static_cast<AtomicWord>(false)); +} + + +void SweeperThread::Run() { + Isolate::SetIsolateThreadLocals(isolate_, NULL); + while (true) { + start_sweeping_semaphore_->Wait(); + + if (Acquire_Load(&stop_thread_)) { + stop_semaphore_->Signal(); + return; + } + + collector_->SweepInParallel(heap_->old_data_space(), + &private_free_list_old_data_space_, + &free_list_old_data_space_); + collector_->SweepInParallel(heap_->old_pointer_space(), + &private_free_list_old_pointer_space_, + &free_list_old_pointer_space_); + end_sweeping_semaphore_->Signal(); + } +} + + +intptr_t SweeperThread::StealMemory(PagedSpace* space) { + if (space->identity() == OLD_POINTER_SPACE) { + return space->free_list()->Concatenate(&free_list_old_pointer_space_); + } else if (space->identity() == OLD_DATA_SPACE) { + return space->free_list()->Concatenate(&free_list_old_data_space_); + } + return 0; +} + + +void SweeperThread::Stop() { + Release_Store(&stop_thread_, static_cast<AtomicWord>(true)); + start_sweeping_semaphore_->Signal(); + stop_semaphore_->Wait(); +} + + +void SweeperThread::StartSweeping() { + start_sweeping_semaphore_->Signal(); +} + + +void SweeperThread::WaitForSweeperThread() { + end_sweeping_semaphore_->Wait(); +} +} } // namespace v8::internal diff --git a/deps/v8/src/sweeper-thread.h b/deps/v8/src/sweeper-thread.h new file mode 100644 index 0000000000..a170982141 --- /dev/null +++ b/deps/v8/src/sweeper-thread.h @@ -0,0 +1,75 @@ +// Copyright 2012 the V8 project authors. All rights reserved. +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following +// disclaimer in the documentation and/or other materials provided +// with the distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived +// from this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +#ifndef V8_SWEEPER_THREAD_H_ +#define V8_SWEEPER_THREAD_H_ + +#include "atomicops.h" +#include "flags.h" +#include "platform.h" +#include "v8utils.h" + +#include "spaces.h" + +#include "heap.h" + +namespace v8 { +namespace internal { + +class SweeperThread : public Thread { + public: + explicit SweeperThread(Isolate* isolate); + + void Run(); + void Stop(); + void StartSweeping(); + void WaitForSweeperThread(); + intptr_t StealMemory(PagedSpace* space); + + ~SweeperThread() { + delete start_sweeping_semaphore_; + delete end_sweeping_semaphore_; + delete stop_semaphore_; + } + + private: + Isolate* isolate_; + Heap* heap_; + MarkCompactCollector* collector_; + Semaphore* start_sweeping_semaphore_; + Semaphore* end_sweeping_semaphore_; + Semaphore* stop_semaphore_; + FreeList free_list_old_data_space_; + FreeList free_list_old_pointer_space_; + FreeList private_free_list_old_data_space_; + FreeList private_free_list_old_pointer_space_; + volatile AtomicWord stop_thread_; +}; + +} } // namespace v8::internal + +#endif // V8_SWEEPER_THREAD_H_ diff --git a/deps/v8/src/inspector.h b/deps/v8/src/symbol.js index 6962e21f4f..b7f9dc9496 100644 --- a/deps/v8/src/inspector.h +++ b/deps/v8/src/symbol.js @@ -1,4 +1,4 @@ -// Copyright 2011 the V8 project authors. All rights reserved. +// Copyright 2013 the V8 project authors. All rights reserved. // Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions are // met: @@ -25,36 +25,15 @@ // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +"use strict"; -#ifndef V8_INSPECTOR_H_ -#define V8_INSPECTOR_H_ +var $Symbol = function() { return %CreateSymbol() } +global.Symbol = $Symbol -// Only build this code if we're configured with the INSPECTOR. -#ifdef INSPECTOR +// Symbols only have a toString method and no prototype. +var SymbolDelegate = { + __proto__: null, + toString: $Object.prototype.toString +} -#include "v8.h" - -#include "objects.h" - -namespace v8 { -namespace internal { - -class Inspector { - public: - static void DumpObjectType(FILE* out, Object* obj, bool print_more); - static void DumpObjectType(FILE* out, Object* obj) { - DumpObjectType(out, obj, false); - } - static void DumpObjectType(Object* obj, bool print_more) { - DumpObjectType(stdout, obj, print_more); - } - static void DumpObjectType(Object* obj) { - DumpObjectType(stdout, obj, false); - } -}; - -} } // namespace v8::internal - -#endif // INSPECTOR - -#endif // V8_INSPECTOR_H_ +$Object.freeze(SymbolDelegate) diff --git a/deps/v8/src/token.h b/deps/v8/src/token.h index 3036e5512a..4078a15cdb 100644 --- a/deps/v8/src/token.h +++ b/deps/v8/src/token.h @@ -99,6 +99,7 @@ namespace internal { T(SHL, "<<", 11) \ T(SAR, ">>", 11) \ T(SHR, ">>>", 11) \ + T(ROR, "rotate right", 11) /* only used by Crankshaft */ \ T(ADD, "+", 12) \ T(SUB, "-", 12) \ T(MUL, "*", 13) \ @@ -229,26 +230,30 @@ class Token { case EQ: return NE; case NE: return EQ; case EQ_STRICT: return NE_STRICT; + case NE_STRICT: return EQ_STRICT; case LT: return GTE; case GT: return LTE; case LTE: return GT; case GTE: return LT; default: + UNREACHABLE(); return op; } } - static Value InvertCompareOp(Value op) { + static Value ReverseCompareOp(Value op) { ASSERT(IsCompareOp(op)); switch (op) { - case EQ: return NE; - case NE: return EQ; - case EQ_STRICT: return NE_STRICT; + case EQ: return EQ; + case NE: return NE; + case EQ_STRICT: return EQ_STRICT; + case NE_STRICT: return NE_STRICT; case LT: return GT; case GT: return LT; case LTE: return GTE; case GTE: return LTE; default: + UNREACHABLE(); return op; } } diff --git a/deps/v8/src/transitions-inl.h b/deps/v8/src/transitions-inl.h index cfaa99d737..45b6457245 100644 --- a/deps/v8/src/transitions-inl.h +++ b/deps/v8/src/transitions-inl.h @@ -143,19 +143,19 @@ Object** TransitionArray::GetKeySlot(int transition_number) { } -String* TransitionArray::GetKey(int transition_number) { +Name* TransitionArray::GetKey(int transition_number) { if (IsSimpleTransition()) { Map* target = GetTarget(kSimpleTransitionIndex); int descriptor = target->LastAdded(); - String* key = target->instance_descriptors()->GetKey(descriptor); + Name* key = target->instance_descriptors()->GetKey(descriptor); return key; } ASSERT(transition_number < number_of_transitions()); - return String::cast(get(ToKeyIndex(transition_number))); + return Name::cast(get(ToKeyIndex(transition_number))); } -void TransitionArray::SetKey(int transition_number, String* key) { +void TransitionArray::SetKey(int transition_number, Name* key) { ASSERT(!IsSimpleTransition()); ASSERT(transition_number < number_of_transitions()); set(ToKeyIndex(transition_number), key); @@ -190,9 +190,9 @@ PropertyDetails TransitionArray::GetTargetDetails(int transition_number) { } -int TransitionArray::Search(String* name) { +int TransitionArray::Search(Name* name) { if (IsSimpleTransition()) { - String* key = GetKey(kSimpleTransitionIndex); + Name* key = GetKey(kSimpleTransitionIndex); if (key->Equals(name)) return kSimpleTransitionIndex; return kNotFound; } @@ -201,7 +201,7 @@ int TransitionArray::Search(String* name) { void TransitionArray::NoIncrementalWriteBarrierSet(int transition_number, - String* key, + Name* key, Map* target) { FixedArray::NoIncrementalWriteBarrierSet( this, ToKeyIndex(transition_number), key); diff --git a/deps/v8/src/transitions.cc b/deps/v8/src/transitions.cc index 56b6caf3db..adbe6a1b32 100644 --- a/deps/v8/src/transitions.cc +++ b/deps/v8/src/transitions.cc @@ -65,13 +65,13 @@ void TransitionArray::NoIncrementalWriteBarrierCopyFrom(TransitionArray* origin, } -static bool InsertionPointFound(String* key1, String* key2) { +static bool InsertionPointFound(Name* key1, Name* key2) { return key1->Hash() > key2->Hash(); } MaybeObject* TransitionArray::NewWith(SimpleTransitionFlag flag, - String* key, + Name* key, Map* target, Object* back_pointer) { TransitionArray* result; @@ -107,7 +107,7 @@ MaybeObject* TransitionArray::ExtendToFullTransitionArray() { } -MaybeObject* TransitionArray::CopyInsert(String* name, Map* target) { +MaybeObject* TransitionArray::CopyInsert(Name* name, Map* target) { TransitionArray* result; int number_of_transitions = this->number_of_transitions(); diff --git a/deps/v8/src/transitions.h b/deps/v8/src/transitions.h index 0a660261c5..7abef47346 100644 --- a/deps/v8/src/transitions.h +++ b/deps/v8/src/transitions.h @@ -59,12 +59,12 @@ namespace internal { class TransitionArray: public FixedArray { public: // Accessors for fetching instance transition at transition number. - inline String* GetKey(int transition_number); - inline void SetKey(int transition_number, String* value); + inline Name* GetKey(int transition_number); + inline void SetKey(int transition_number, Name* value); inline Object** GetKeySlot(int transition_number); int GetSortedKeyIndex(int transition_number) { return transition_number; } - String* GetSortedKey(int transition_number) { + Name* GetSortedKey(int transition_number) { return GetKey(transition_number); } @@ -105,7 +105,7 @@ class TransitionArray: public FixedArray { // Allocate a new transition array with a single entry. static MUST_USE_RESULT MaybeObject* NewWith( SimpleTransitionFlag flag, - String* key, + Name* key, Map* target, Object* back_pointer); @@ -114,7 +114,7 @@ class TransitionArray: public FixedArray { // Copy the transition array, inserting a new transition. // TODO(verwaest): This should not cause an existing transition to be // overwritten. - MUST_USE_RESULT MaybeObject* CopyInsert(String* name, Map* target); + MUST_USE_RESULT MaybeObject* CopyInsert(Name* name, Map* target); // Copy a single transition from the origin array. inline void NoIncrementalWriteBarrierCopyFrom(TransitionArray* origin, @@ -122,7 +122,7 @@ class TransitionArray: public FixedArray { int target_transition); // Search a transition for a given property name. - inline int Search(String* name); + inline int Search(Name* name); // Allocates a TransitionArray. MUST_USE_RESULT static MaybeObject* Allocate(int number_of_transitions); @@ -195,7 +195,7 @@ class TransitionArray: public FixedArray { } inline void NoIncrementalWriteBarrierSet(int transition_number, - String* key, + Name* key, Map* target); DISALLOW_IMPLICIT_CONSTRUCTORS(TransitionArray); diff --git a/deps/v8/src/type-info.cc b/deps/v8/src/type-info.cc index bc6a46b4b6..6ac05547aa 100644 --- a/deps/v8/src/type-info.cc +++ b/deps/v8/src/type-info.cc @@ -62,10 +62,10 @@ TypeInfo TypeInfo::TypeFromValue(Handle<Object> value) { TypeFeedbackOracle::TypeFeedbackOracle(Handle<Code> code, Handle<Context> native_context, Isolate* isolate, - Zone* zone) { - native_context_ = native_context; - isolate_ = isolate; - zone_ = zone; + Zone* zone) + : native_context_(native_context), + isolate_(isolate), + zone_(zone) { BuildDictionary(code); ASSERT(reinterpret_cast<Address>(*dictionary_.location()) != kHandleZapValue); } @@ -79,7 +79,7 @@ static uint32_t IdToKey(TypeFeedbackId ast_id) { Handle<Object> TypeFeedbackOracle::GetInfo(TypeFeedbackId ast_id) { int entry = dictionary_->FindEntry(IdToKey(ast_id)); return entry != UnseededNumberDictionary::kNotFound - ? Handle<Object>(dictionary_->ValueAt(entry)) + ? Handle<Object>(dictionary_->ValueAt(entry), isolate_) : Handle<Object>::cast(isolate_->factory()->undefined_value()); } @@ -111,14 +111,11 @@ bool TypeFeedbackOracle::LoadIsMonomorphicNormal(Property* expr) { } -bool TypeFeedbackOracle::LoadIsMegamorphicWithTypeInfo(Property* expr) { +bool TypeFeedbackOracle::LoadIsPolymorphic(Property* expr) { Handle<Object> map_or_code = GetInfo(expr->PropertyFeedbackId()); if (map_or_code->IsCode()) { Handle<Code> code = Handle<Code>::cast(map_or_code); - Builtins* builtins = isolate_->builtins(); - return code->is_keyed_load_stub() && - *code != builtins->builtin(Builtins::kKeyedLoadIC_Generic) && - code->ic_state() == MEGAMORPHIC; + return code->is_keyed_load_stub() && code->ic_state() == POLYMORPHIC; } return false; } @@ -129,12 +126,12 @@ bool TypeFeedbackOracle::StoreIsMonomorphicNormal(TypeFeedbackId ast_id) { if (map_or_code->IsMap()) return true; if (map_or_code->IsCode()) { Handle<Code> code = Handle<Code>::cast(map_or_code); - bool allow_growth = - Code::GetKeyedAccessGrowMode(code->extra_ic_state()) == - ALLOW_JSARRAY_GROWTH; + bool standard_store = + Code::GetKeyedAccessStoreMode(code->extra_ic_state()) == + STANDARD_STORE; bool preliminary_checks = code->is_keyed_store_stub() && - !allow_growth && + standard_store && code->ic_state() == MONOMORPHIC && Code::ExtractTypeFromFlags(code->flags()) == Code::NORMAL; if (!preliminary_checks) return false; @@ -145,19 +142,15 @@ bool TypeFeedbackOracle::StoreIsMonomorphicNormal(TypeFeedbackId ast_id) { } -bool TypeFeedbackOracle::StoreIsMegamorphicWithTypeInfo(TypeFeedbackId ast_id) { +bool TypeFeedbackOracle::StoreIsPolymorphic(TypeFeedbackId ast_id) { Handle<Object> map_or_code = GetInfo(ast_id); if (map_or_code->IsCode()) { Handle<Code> code = Handle<Code>::cast(map_or_code); - Builtins* builtins = isolate_->builtins(); - bool allow_growth = - Code::GetKeyedAccessGrowMode(code->extra_ic_state()) == - ALLOW_JSARRAY_GROWTH; - return code->is_keyed_store_stub() && - !allow_growth && - *code != builtins->builtin(Builtins::kKeyedStoreIC_Generic) && - *code != builtins->builtin(Builtins::kKeyedStoreIC_Generic_Strict) && - code->ic_state() == MEGAMORPHIC; + bool standard_store = + Code::GetKeyedAccessStoreMode(code->extra_ic_state()) == + STANDARD_STORE; + return code->is_keyed_store_stub() && standard_store && + code->ic_state() == POLYMORPHIC; } return false; } @@ -170,8 +163,13 @@ bool TypeFeedbackOracle::CallIsMonomorphic(Call* expr) { bool TypeFeedbackOracle::CallNewIsMonomorphic(CallNew* expr) { - Handle<Object> value = GetInfo(expr->CallNewFeedbackId()); - return value->IsJSFunction(); + Handle<Object> info = GetInfo(expr->CallNewFeedbackId()); + if (info->IsSmi()) { + ASSERT(static_cast<ElementsKind>(Smi::cast(*info)->value()) <= + LAST_FAST_ELEMENTS_KIND); + return isolate_->global_context()->array_function(); + } + return info->IsJSFunction(); } @@ -220,11 +218,23 @@ Handle<Map> TypeFeedbackOracle::StoreMonomorphicReceiverType( } +KeyedAccessStoreMode TypeFeedbackOracle::GetStoreMode( + TypeFeedbackId ast_id) { + Handle<Object> map_or_code = GetInfo(ast_id); + if (map_or_code->IsCode()) { + Handle<Code> code = Handle<Code>::cast(map_or_code); + if (code->kind() == Code::KEYED_STORE_IC) { + return Code::GetKeyedAccessStoreMode(code->extra_ic_state()); + } + } + return STANDARD_STORE; +} + + void TypeFeedbackOracle::LoadReceiverTypes(Property* expr, Handle<String> name, SmallMapList* types) { - Code::Flags flags = - Code::ComputeMonomorphicFlags(Code::LOAD_IC, Code::NORMAL); + Code::Flags flags = Code::ComputeMonomorphicFlags(Code::LOAD_IC); CollectReceiverTypes(expr->PropertyFeedbackId(), name, flags, types); } @@ -232,8 +242,7 @@ void TypeFeedbackOracle::LoadReceiverTypes(Property* expr, void TypeFeedbackOracle::StoreReceiverTypes(Assignment* expr, Handle<String> name, SmallMapList* types) { - Code::Flags flags = - Code::ComputeMonomorphicFlags(Code::STORE_IC, Code::NORMAL); + Code::Flags flags = Code::ComputeMonomorphicFlags(Code::STORE_IC); CollectReceiverTypes(expr->AssignmentFeedbackId(), name, flags, types); } @@ -250,10 +259,10 @@ void TypeFeedbackOracle::CallReceiverTypes(Call* expr, CallIC::Contextual::encode(call_kind == CALL_AS_FUNCTION); Code::Flags flags = Code::ComputeMonomorphicFlags(Code::CALL_IC, - Code::NORMAL, extra_ic_state, - OWN_MAP, - arity); + Code::NORMAL, + arity, + OWN_MAP); CollectReceiverTypes(expr->CallFeedbackId(), name, flags, types); } @@ -274,6 +283,8 @@ Handle<JSObject> TypeFeedbackOracle::GetPrototypeForPrimitiveCheck( case RECEIVER_MAP_CHECK: UNREACHABLE(); break; + case SYMBOL_CHECK: + return Handle<JSObject>(native_context_->symbol_delegate()); case STRING_CHECK: function = native_context_->string_function(); break; @@ -295,10 +306,32 @@ Handle<JSFunction> TypeFeedbackOracle::GetCallTarget(Call* expr) { Handle<JSFunction> TypeFeedbackOracle::GetCallNewTarget(CallNew* expr) { - return Handle<JSFunction>::cast(GetInfo(expr->CallNewFeedbackId())); + Handle<Object> info = GetInfo(expr->CallNewFeedbackId()); + if (info->IsSmi()) { + ASSERT(static_cast<ElementsKind>(Smi::cast(*info)->value()) <= + LAST_FAST_ELEMENTS_KIND); + return Handle<JSFunction>(isolate_->global_context()->array_function()); + } else { + return Handle<JSFunction>::cast(info); + } } +ElementsKind TypeFeedbackOracle::GetCallNewElementsKind(CallNew* expr) { + Handle<Object> info = GetInfo(expr->CallNewFeedbackId()); + if (info->IsSmi()) { + return static_cast<ElementsKind>(Smi::cast(*info)->value()); + } else { + // TODO(mvstanton): avoided calling GetInitialFastElementsKind() for perf + // reasons. Is there a better fix? + if (FLAG_packed_arrays) { + return FAST_SMI_ELEMENTS; + } else { + return FAST_HOLEY_SMI_ELEMENTS; + } + } +} + Handle<Map> TypeFeedbackOracle::GetObjectLiteralStoreMap( ObjectLiteral::Property* prop) { ASSERT(ObjectLiteralStoreIsMonomorphic(prop)); @@ -312,43 +345,63 @@ bool TypeFeedbackOracle::LoadIsBuiltin(Property* expr, Builtins::Name id) { } -TypeInfo TypeFeedbackOracle::CompareType(CompareOperation* expr) { - Handle<Object> object = GetInfo(expr->CompareOperationFeedbackId()); - TypeInfo unknown = TypeInfo::Unknown(); - if (!object->IsCode()) return unknown; +bool TypeFeedbackOracle::LoadIsStub(Property* expr, ICStub* stub) { + Handle<Object> object = GetInfo(expr->PropertyFeedbackId()); + if (!object->IsCode()) return false; Handle<Code> code = Handle<Code>::cast(object); - if (!code->is_compare_ic_stub()) return unknown; + if (!code->is_load_stub()) return false; + if (code->ic_state() != MONOMORPHIC) return false; + return stub->Describes(*code); +} - CompareIC::State state = static_cast<CompareIC::State>(code->compare_state()); + +static TypeInfo TypeFromCompareType(CompareIC::State state) { switch (state) { case CompareIC::UNINITIALIZED: // Uninitialized means never executed. return TypeInfo::Uninitialized(); - case CompareIC::SMIS: + case CompareIC::SMI: return TypeInfo::Smi(); - case CompareIC::HEAP_NUMBERS: + case CompareIC::NUMBER: return TypeInfo::Number(); - case CompareIC::SYMBOLS: - case CompareIC::STRINGS: + case CompareIC::INTERNALIZED_STRING: + return TypeInfo::InternalizedString(); + case CompareIC::STRING: return TypeInfo::String(); - case CompareIC::OBJECTS: - case CompareIC::KNOWN_OBJECTS: + case CompareIC::OBJECT: + case CompareIC::KNOWN_OBJECT: // TODO(kasperl): We really need a type for JS objects here. return TypeInfo::NonPrimitive(); case CompareIC::GENERIC: default: - return unknown; + return TypeInfo::Unknown(); } } -bool TypeFeedbackOracle::IsSymbolCompare(CompareOperation* expr) { +void TypeFeedbackOracle::CompareType(CompareOperation* expr, + TypeInfo* left_type, + TypeInfo* right_type, + TypeInfo* overall_type) { Handle<Object> object = GetInfo(expr->CompareOperationFeedbackId()); - if (!object->IsCode()) return false; + TypeInfo unknown = TypeInfo::Unknown(); + if (!object->IsCode()) { + *left_type = *right_type = *overall_type = unknown; + return; + } Handle<Code> code = Handle<Code>::cast(object); - if (!code->is_compare_ic_stub()) return false; - CompareIC::State state = static_cast<CompareIC::State>(code->compare_state()); - return state == CompareIC::SYMBOLS; + if (!code->is_compare_ic_stub()) { + *left_type = *right_type = *overall_type = unknown; + return; + } + + int stub_minor_key = code->stub_info(); + CompareIC::State left_state, right_state, handler_state; + ICCompareStub::DecodeMinorKey(stub_minor_key, &left_state, &right_state, + &handler_state, NULL); + *left_type = TypeFromCompareType(left_state); + *right_type = TypeFromCompareType(right_state); + *overall_type = TypeFromCompareType(handler_state); } @@ -357,8 +410,8 @@ Handle<Map> TypeFeedbackOracle::GetCompareMap(CompareOperation* expr) { if (!object->IsCode()) return Handle<Map>::null(); Handle<Code> code = Handle<Code>::cast(object); if (!code->is_compare_ic_stub()) return Handle<Map>::null(); - CompareIC::State state = static_cast<CompareIC::State>(code->compare_state()); - if (state != CompareIC::KNOWN_OBJECTS) { + CompareIC::State state = ICCompareStub::CompareState(code->stub_info()); + if (state != CompareIC::KNOWN_OBJECT) { return Handle<Map>::null(); } Map* first_map = code->FindFirstMap(); @@ -380,7 +433,7 @@ TypeInfo TypeFeedbackOracle::UnaryType(UnaryOperation* expr) { switch (type) { case UnaryOpIC::SMI: return TypeInfo::Smi(); - case UnaryOpIC::HEAP_NUMBER: + case UnaryOpIC::NUMBER: return TypeInfo::Double(); default: return unknown; @@ -388,55 +441,44 @@ TypeInfo TypeFeedbackOracle::UnaryType(UnaryOperation* expr) { } -TypeInfo TypeFeedbackOracle::BinaryType(BinaryOperation* expr) { +static TypeInfo TypeFromBinaryOpType(BinaryOpIC::TypeInfo binary_type) { + switch (binary_type) { + // Uninitialized means never executed. + case BinaryOpIC::UNINITIALIZED: return TypeInfo::Uninitialized(); + case BinaryOpIC::SMI: return TypeInfo::Smi(); + case BinaryOpIC::INT32: return TypeInfo::Integer32(); + case BinaryOpIC::NUMBER: return TypeInfo::Double(); + case BinaryOpIC::ODDBALL: return TypeInfo::Unknown(); + case BinaryOpIC::STRING: return TypeInfo::String(); + case BinaryOpIC::GENERIC: return TypeInfo::Unknown(); + } + UNREACHABLE(); + return TypeInfo::Unknown(); +} + + +void TypeFeedbackOracle::BinaryType(BinaryOperation* expr, + TypeInfo* left, + TypeInfo* right, + TypeInfo* result) { Handle<Object> object = GetInfo(expr->BinaryOperationFeedbackId()); TypeInfo unknown = TypeInfo::Unknown(); - if (!object->IsCode()) return unknown; + if (!object->IsCode()) { + *left = *right = *result = unknown; + return; + } Handle<Code> code = Handle<Code>::cast(object); if (code->is_binary_op_stub()) { - BinaryOpIC::TypeInfo type = static_cast<BinaryOpIC::TypeInfo>( - code->binary_op_type()); - BinaryOpIC::TypeInfo result_type = static_cast<BinaryOpIC::TypeInfo>( - code->binary_op_result_type()); - - switch (type) { - case BinaryOpIC::UNINITIALIZED: - // Uninitialized means never executed. - return TypeInfo::Uninitialized(); - case BinaryOpIC::SMI: - switch (result_type) { - case BinaryOpIC::UNINITIALIZED: - if (expr->op() == Token::DIV) { - return TypeInfo::Double(); - } - return TypeInfo::Smi(); - case BinaryOpIC::SMI: - return TypeInfo::Smi(); - case BinaryOpIC::INT32: - return TypeInfo::Integer32(); - case BinaryOpIC::HEAP_NUMBER: - return TypeInfo::Double(); - default: - return unknown; - } - case BinaryOpIC::INT32: - if (expr->op() == Token::DIV || - result_type == BinaryOpIC::HEAP_NUMBER) { - return TypeInfo::Double(); - } - return TypeInfo::Integer32(); - case BinaryOpIC::HEAP_NUMBER: - return TypeInfo::Double(); - case BinaryOpIC::BOTH_STRING: - return TypeInfo::String(); - case BinaryOpIC::STRING: - case BinaryOpIC::GENERIC: - return unknown; - default: - return unknown; - } + BinaryOpIC::TypeInfo left_type, right_type, result_type; + BinaryOpStub::decode_types_from_minor_key(code->stub_info(), &left_type, + &right_type, &result_type); + *left = TypeFromBinaryOpType(left_type); + *right = TypeFromBinaryOpType(right_type); + *result = TypeFromBinaryOpType(result_type); + return; } - return unknown; + // Not a binary op stub. + *left = *right = *result = unknown; } @@ -447,28 +489,8 @@ TypeInfo TypeFeedbackOracle::SwitchType(CaseClause* clause) { Handle<Code> code = Handle<Code>::cast(object); if (!code->is_compare_ic_stub()) return unknown; - CompareIC::State state = static_cast<CompareIC::State>(code->compare_state()); - switch (state) { - case CompareIC::UNINITIALIZED: - // Uninitialized means never executed. - // TODO(fschneider): Introduce a separate value for never-executed ICs. - return unknown; - case CompareIC::SMIS: - return TypeInfo::Smi(); - case CompareIC::STRINGS: - return TypeInfo::String(); - case CompareIC::SYMBOLS: - return TypeInfo::Symbol(); - case CompareIC::HEAP_NUMBERS: - return TypeInfo::Number(); - case CompareIC::OBJECTS: - case CompareIC::KNOWN_OBJECTS: - // TODO(kasperl): We really need a type for JS objects here. - return TypeInfo::NonPrimitive(); - case CompareIC::GENERIC: - default: - return unknown; - } + CompareIC::State state = ICCompareStub::CompareState(code->stub_info()); + return TypeFromCompareType(state); } @@ -479,17 +501,21 @@ TypeInfo TypeFeedbackOracle::IncrementType(CountOperation* expr) { Handle<Code> code = Handle<Code>::cast(object); if (!code->is_binary_op_stub()) return unknown; - BinaryOpIC::TypeInfo type = static_cast<BinaryOpIC::TypeInfo>( - code->binary_op_type()); - switch (type) { + BinaryOpIC::TypeInfo left_type, right_type, unused_result_type; + BinaryOpStub::decode_types_from_minor_key(code->stub_info(), &left_type, + &right_type, &unused_result_type); + // CountOperations should always have +1 or -1 as their right input. + ASSERT(right_type == BinaryOpIC::SMI || + right_type == BinaryOpIC::UNINITIALIZED); + + switch (left_type) { case BinaryOpIC::UNINITIALIZED: case BinaryOpIC::SMI: return TypeInfo::Smi(); case BinaryOpIC::INT32: return TypeInfo::Integer32(); - case BinaryOpIC::HEAP_NUMBER: + case BinaryOpIC::NUMBER: return TypeInfo::Double(); - case BinaryOpIC::BOTH_STRING: case BinaryOpIC::STRING: case BinaryOpIC::GENERIC: return unknown; @@ -501,6 +527,29 @@ TypeInfo TypeFeedbackOracle::IncrementType(CountOperation* expr) { } +static void AddMapIfMissing(Handle<Map> map, SmallMapList* list, + Zone* zone) { + for (int i = 0; i < list->length(); ++i) { + if (list->at(i).is_identical_to(map)) return; + } + list->Add(map, zone); +} + + +void TypeFeedbackOracle::CollectPolymorphicMaps(Handle<Code> code, + SmallMapList* types) { + MapHandleList maps; + code->FindAllMaps(&maps); + types->Reserve(maps.length(), zone()); + for (int i = 0; i < maps.length(); i++) { + Handle<Map> map(maps.at(i)); + if (!CanRetainOtherContext(*map, *native_context_)) { + AddMapIfMissing(map, types, zone()); + } + } +} + + void TypeFeedbackOracle::CollectReceiverTypes(TypeFeedbackId ast_id, Handle<String> name, Code::Flags flags, @@ -508,13 +557,14 @@ void TypeFeedbackOracle::CollectReceiverTypes(TypeFeedbackId ast_id, Handle<Object> object = GetInfo(ast_id); if (object->IsUndefined() || object->IsSmi()) return; - if (*object == - isolate_->builtins()->builtin(Builtins::kStoreIC_GlobalProxy)) { + if (object.is_identical_to(isolate_->builtins()->StoreIC_GlobalProxy())) { // TODO(fschneider): We could collect the maps and signal that // we need a generic store (or load) here. - ASSERT(Handle<Code>::cast(object)->ic_state() == MEGAMORPHIC); + ASSERT(Handle<Code>::cast(object)->ic_state() == GENERIC); } else if (object->IsMap()) { types->Add(Handle<Map>::cast(object), zone()); + } else if (Handle<Code>::cast(object)->ic_state() == POLYMORPHIC) { + CollectPolymorphicMaps(Handle<Code>::cast(object), types); } else if (FLAG_collect_megamorphic_maps_from_stub_cache && Handle<Code>::cast(object)->ic_state() == MEGAMORPHIC) { types->Reserve(4, zone()); @@ -562,15 +612,6 @@ bool TypeFeedbackOracle::CanRetainOtherContext(JSFunction* function, } -static void AddMapIfMissing(Handle<Map> map, SmallMapList* list, - Zone* zone) { - for (int i = 0; i < list->length(); ++i) { - if (list->at(i).is_identical_to(map)) return; - } - list->Add(map, zone); -} - - void TypeFeedbackOracle::CollectKeyedReceiverTypes(TypeFeedbackId ast_id, SmallMapList* types) { Handle<Object> object = GetInfo(ast_id); @@ -578,18 +619,7 @@ void TypeFeedbackOracle::CollectKeyedReceiverTypes(TypeFeedbackId ast_id, Handle<Code> code = Handle<Code>::cast(object); if (code->kind() == Code::KEYED_LOAD_IC || code->kind() == Code::KEYED_STORE_IC) { - AssertNoAllocation no_allocation; - int mask = RelocInfo::ModeMask(RelocInfo::EMBEDDED_OBJECT); - for (RelocIterator it(*code, mask); !it.done(); it.next()) { - RelocInfo* info = it.rinfo(); - Object* object = info->target_object(); - if (object->IsMap()) { - Map* map = Map::cast(object); - if (!CanRetainOtherContext(map, *native_context_)) { - AddMapIfMissing(Handle<Map>(map), types, zone()); - } - } - } + CollectPolymorphicMaps(code, types); } } @@ -607,7 +637,7 @@ byte TypeFeedbackOracle::ToBooleanTypes(TypeFeedbackId ast_id) { void TypeFeedbackOracle::BuildDictionary(Handle<Code> code) { AssertNoAllocation no_allocation; ZoneList<RelocInfo> infos(16, zone()); - HandleScope scope; + HandleScope scope(isolate_); GetRelocInfos(code, &infos); CreateDictionary(code, &infos); ProcessRelocInfos(&infos); @@ -684,7 +714,7 @@ void TypeFeedbackOracle::ProcessRelocInfos(ZoneList<RelocInfo>* infos) { case Code::KEYED_LOAD_IC: case Code::KEYED_STORE_IC: if (target->ic_state() == MONOMORPHIC || - target->ic_state() == MEGAMORPHIC) { + target->ic_state() == POLYMORPHIC) { SetInfo(ast_id, target); } break; diff --git a/deps/v8/src/type-info.h b/deps/v8/src/type-info.h index 00d88c2afc..583c3fc520 100644 --- a/deps/v8/src/type-info.h +++ b/deps/v8/src/type-info.h @@ -65,12 +65,12 @@ class TypeInfo { static TypeInfo Integer32() { return TypeInfo(kInteger32); } // We know it's a Smi. static TypeInfo Smi() { return TypeInfo(kSmi); } - // We know it's a Symbol. - static TypeInfo Symbol() { return TypeInfo(kSymbol); } // We know it's a heap number. static TypeInfo Double() { return TypeInfo(kDouble); } // We know it's a string. static TypeInfo String() { return TypeInfo(kString); } + // We know it's an internalized string. + static TypeInfo InternalizedString() { return TypeInfo(kInternalizedString); } // We know it's a non-primitive (object) type. static TypeInfo NonPrimitive() { return TypeInfo(kNonPrimitive); } // We haven't started collecting info yet. @@ -140,14 +140,14 @@ class TypeInfo { return ((type_ & kSmi) == kSmi); } - inline bool IsSymbol() { + inline bool IsInternalizedString() { ASSERT(type_ != kUninitialized); - return ((type_ & kSymbol) == kSymbol); + return ((type_ & kInternalizedString) == kInternalizedString); } - inline bool IsNonSymbol() { + inline bool IsNonInternalizedString() { ASSERT(type_ != kUninitialized); - return ((type_ & kSymbol) == kString); + return ((type_ & kInternalizedString) == kString); } inline bool IsInteger32() { @@ -181,7 +181,7 @@ class TypeInfo { case kNumber: return "Number"; case kInteger32: return "Integer32"; case kSmi: return "Smi"; - case kSymbol: return "Symbol"; + case kInternalizedString: return "InternalizedString"; case kDouble: return "Double"; case kString: return "String"; case kNonPrimitive: return "Object"; @@ -193,17 +193,18 @@ class TypeInfo { private: enum Type { - kUnknown = 0, // 0000000 - kPrimitive = 0x10, // 0010000 - kNumber = 0x11, // 0010001 - kInteger32 = 0x13, // 0010011 - kSmi = 0x17, // 0010111 - kDouble = 0x19, // 0011001 - kString = 0x30, // 0110000 - kSymbol = 0x32, // 0110010 - kNonPrimitive = 0x40, // 1000000 - kUninitialized = 0x7f // 1111111 + kUnknown = 0, // 0000000 + kPrimitive = 0x10, // 0010000 + kNumber = 0x11, // 0010001 + kInteger32 = 0x13, // 0010011 + kSmi = 0x17, // 0010111 + kDouble = 0x19, // 0011001 + kString = 0x30, // 0110000 + kInternalizedString = 0x32, // 0110010 + kNonPrimitive = 0x40, // 1000000 + kUninitialized = 0x7f // 1111111 }; + explicit inline TypeInfo(Type t) : type_(t) { } Type type_; @@ -226,10 +227,11 @@ class CompareOperation; class CompilationInfo; class CountOperation; class Expression; +class ForInStatement; +class ICStub; class Property; class SmallMapList; class UnaryOperation; -class ForInStatement; class TypeFeedbackOracle: public ZoneObject { @@ -241,9 +243,9 @@ class TypeFeedbackOracle: public ZoneObject { bool LoadIsMonomorphicNormal(Property* expr); bool LoadIsUninitialized(Property* expr); - bool LoadIsMegamorphicWithTypeInfo(Property* expr); + bool LoadIsPolymorphic(Property* expr); bool StoreIsMonomorphicNormal(TypeFeedbackId ast_id); - bool StoreIsMegamorphicWithTypeInfo(TypeFeedbackId ast_id); + bool StoreIsPolymorphic(TypeFeedbackId ast_id); bool CallIsMonomorphic(Call* expr); bool CallNewIsMonomorphic(CallNew* expr); bool ObjectLiteralStoreIsMonomorphic(ObjectLiteral::Property* prop); @@ -253,6 +255,8 @@ class TypeFeedbackOracle: public ZoneObject { Handle<Map> LoadMonomorphicReceiverType(Property* expr); Handle<Map> StoreMonomorphicReceiverType(TypeFeedbackId ast_id); + KeyedAccessStoreMode GetStoreMode(TypeFeedbackId ast_id); + void LoadReceiverTypes(Property* expr, Handle<String> name, SmallMapList* types); @@ -270,15 +274,19 @@ class TypeFeedbackOracle: public ZoneObject { static bool CanRetainOtherContext(JSFunction* function, Context* native_context); + void CollectPolymorphicMaps(Handle<Code> code, SmallMapList* types); + CheckType GetCallCheckType(Call* expr); Handle<JSObject> GetPrototypeForPrimitiveCheck(CheckType check); Handle<JSFunction> GetCallTarget(Call* expr); Handle<JSFunction> GetCallNewTarget(CallNew* expr); + ElementsKind GetCallNewElementsKind(CallNew* expr); Handle<Map> GetObjectLiteralStoreMap(ObjectLiteral::Property* prop); bool LoadIsBuiltin(Property* expr, Builtins::Name id); + bool LoadIsStub(Property* expr, ICStub* stub); // TODO(1571) We can't use ToBooleanStub::Types as the return value because // of various cylces in our headers. Death to tons of implementations in @@ -287,9 +295,14 @@ class TypeFeedbackOracle: public ZoneObject { // Get type information for arithmetic operations and compares. TypeInfo UnaryType(UnaryOperation* expr); - TypeInfo BinaryType(BinaryOperation* expr); - TypeInfo CompareType(CompareOperation* expr); - bool IsSymbolCompare(CompareOperation* expr); + void BinaryType(BinaryOperation* expr, + TypeInfo* left, + TypeInfo* right, + TypeInfo* result); + void CompareType(CompareOperation* expr, + TypeInfo* left_type, + TypeInfo* right_type, + TypeInfo* overall_type); Handle<Map> GetCompareMap(CompareOperation* expr); TypeInfo SwitchType(CaseClause* clause); TypeInfo IncrementType(CountOperation* expr); @@ -315,12 +328,16 @@ class TypeFeedbackOracle: public ZoneObject { // Returns an element from the backing store. Returns undefined if // there is no information. + public: + // TODO(mvstanton): how to get this information without making the method + // public? Handle<Object> GetInfo(TypeFeedbackId ast_id); + private: Handle<Context> native_context_; Isolate* isolate_; - Handle<UnseededNumberDictionary> dictionary_; Zone* zone_; + Handle<UnseededNumberDictionary> dictionary_; DISALLOW_COPY_AND_ASSIGN(TypeFeedbackOracle); }; diff --git a/deps/v8/src/unicode-inl.h b/deps/v8/src/unicode-inl.h index ec9c69f8da..02be457688 100644 --- a/deps/v8/src/unicode-inl.h +++ b/deps/v8/src/unicode-inl.h @@ -79,6 +79,34 @@ template <class T, int s> int Mapping<T, s>::CalculateValue(uchar c, uchar n, } +uint16_t Latin1::ConvertNonLatin1ToLatin1(uint16_t c) { + ASSERT(c > Latin1::kMaxChar); + switch (c) { + // This are equivalent characters in unicode. + case 0x39c: + case 0x3bc: + return 0xb5; + // This is an uppercase of a Latin-1 character + // outside of Latin-1. + case 0x178: + return 0xff; + } + return 0; +} + + +unsigned Utf8::EncodeOneByte(char* str, uint8_t c) { + static const int kMask = ~(1 << 6); + if (c <= kMaxOneByteChar) { + str[0] = c; + return 1; + } + str[0] = 0xC0 | (c >> 6); + str[1] = 0x80 | (c & kMask); + return 2; +} + + unsigned Utf8::Encode(char* str, uchar c, int previous) { static const int kMask = ~(1 << 6); if (c <= kMaxOneByteChar) { @@ -137,113 +165,51 @@ unsigned Utf8::Length(uchar c, int previous) { } } -uchar CharacterStream::GetNext() { - uchar result = DecodeCharacter(buffer_, &cursor_); - if (remaining_ == 1) { - cursor_ = 0; - FillBuffer(); - } else { - remaining_--; - } - ASSERT(BoundsCheck(cursor_)); - return result; -} - -#if __BYTE_ORDER == __LITTLE_ENDIAN -#define IF_LITTLE(expr) expr -#define IF_BIG(expr) ((void) 0) -#elif __BYTE_ORDER == __BIG_ENDIAN -#define IF_LITTLE(expr) ((void) 0) -#define IF_BIG(expr) expr -#else -#warning Unknown byte ordering -#endif - -bool CharacterStream::EncodeAsciiCharacter(uchar c, byte* buffer, - unsigned capacity, unsigned& offset) { - if (offset >= capacity) return false; - buffer[offset] = c; - offset += 1; - return true; -} - -bool CharacterStream::EncodeNonAsciiCharacter(uchar c, byte* buffer, - unsigned capacity, unsigned& offset) { - unsigned aligned = (offset + 0x3) & ~0x3; - if ((aligned + sizeof(uchar)) > capacity) - return false; - if (offset == aligned) { - IF_LITTLE(*reinterpret_cast<uchar*>(buffer + aligned) = (c << 8) | 0x80); - IF_BIG(*reinterpret_cast<uchar*>(buffer + aligned) = c | (1 << 31)); - } else { - buffer[offset] = 0x80; - IF_LITTLE(*reinterpret_cast<uchar*>(buffer + aligned) = c << 8); - IF_BIG(*reinterpret_cast<uchar*>(buffer + aligned) = c); - } - offset = aligned + sizeof(uchar); - return true; -} - -bool CharacterStream::EncodeCharacter(uchar c, byte* buffer, unsigned capacity, - unsigned& offset) { - if (c <= Utf8::kMaxOneByteChar) { - return EncodeAsciiCharacter(c, buffer, capacity, offset); - } else { - return EncodeNonAsciiCharacter(c, buffer, capacity, offset); - } -} - -uchar CharacterStream::DecodeCharacter(const byte* buffer, unsigned* offset) { - byte b = buffer[*offset]; - if (b <= Utf8::kMaxOneByteChar) { - (*offset)++; - return b; - } else { - unsigned aligned = (*offset + 0x3) & ~0x3; - *offset = aligned + sizeof(uchar); - IF_LITTLE(return *reinterpret_cast<const uchar*>(buffer + aligned) >> 8); - IF_BIG(return *reinterpret_cast<const uchar*>(buffer + aligned) & - ~(1 << 31)); - } -} - -#undef IF_LITTLE -#undef IF_BIG - -template <class R, class I, unsigned s> -void InputBuffer<R, I, s>::FillBuffer() { - buffer_ = R::ReadBlock(input_, util_buffer_, s, &remaining_, &offset_); -} - -template <class R, class I, unsigned s> -void InputBuffer<R, I, s>::Rewind() { - Reset(input_); -} - -template <class R, class I, unsigned s> -void InputBuffer<R, I, s>::Reset(unsigned position, I input) { - input_ = input; - remaining_ = 0; - cursor_ = 0; - offset_ = position; - buffer_ = R::ReadBlock(input_, util_buffer_, s, &remaining_, &offset_); -} - -template <class R, class I, unsigned s> -void InputBuffer<R, I, s>::Reset(I input) { - Reset(0, input); -} - -template <class R, class I, unsigned s> -void InputBuffer<R, I, s>::Seek(unsigned position) { - offset_ = position; - buffer_ = R::ReadBlock(input_, util_buffer_, s, &remaining_, &offset_); -} - -template <unsigned s> -Utf8InputBuffer<s>::Utf8InputBuffer(const char* data, unsigned length) - : InputBuffer<Utf8, Buffer<const char*>, s>(Buffer<const char*>(data, - length)) { +Utf8DecoderBase::Utf8DecoderBase() + : unbuffered_start_(NULL), + utf16_length_(0), + last_byte_of_buffer_unused_(false) {} + +Utf8DecoderBase::Utf8DecoderBase(uint16_t* buffer, + unsigned buffer_length, + const uint8_t* stream, + unsigned stream_length) { + Reset(buffer, buffer_length, stream, stream_length); +} + +template<unsigned kBufferSize> +Utf8Decoder<kBufferSize>::Utf8Decoder(const char* stream, unsigned length) + : Utf8DecoderBase(buffer_, + kBufferSize, + reinterpret_cast<const uint8_t*>(stream), + length) { +} + +template<unsigned kBufferSize> +void Utf8Decoder<kBufferSize>::Reset(const char* stream, unsigned length) { + Utf8DecoderBase::Reset(buffer_, + kBufferSize, + reinterpret_cast<const uint8_t*>(stream), + length); +} + +template <unsigned kBufferSize> +unsigned Utf8Decoder<kBufferSize>::WriteUtf16(uint16_t* data, + unsigned length) const { + ASSERT(length > 0); + if (length > utf16_length_) length = utf16_length_; + // memcpy everything in buffer. + unsigned buffer_length = + last_byte_of_buffer_unused_ ? kBufferSize - 1 : kBufferSize; + unsigned memcpy_length = length <= buffer_length ? length : buffer_length; + memcpy(data, buffer_, memcpy_length*sizeof(uint16_t)); + if (length <= buffer_length) return length; + ASSERT(unbuffered_start_ != NULL); + // Copy the rest the slow way. + WriteUtf16Slow(unbuffered_start_, + data + buffer_length, + length - buffer_length); + return length; } } // namespace unibrow diff --git a/deps/v8/src/unicode.cc b/deps/v8/src/unicode.cc index 14f380642a..04065b0479 100644 --- a/deps/v8/src/unicode.cc +++ b/deps/v8/src/unicode.cc @@ -277,84 +277,74 @@ uchar Utf8::CalculateValue(const byte* str, } -const byte* Utf8::ReadBlock(Buffer<const char*> str, byte* buffer, - unsigned capacity, unsigned* chars_read_ptr, unsigned* offset_ptr) { - unsigned offset = *offset_ptr; - // Bail out early if we've reached the end of the string. - if (offset == str.length()) { - *chars_read_ptr = 0; - return NULL; - } - const byte* data = reinterpret_cast<const byte*>(str.data()); - if (data[offset] <= kMaxOneByteChar) { - // The next character is an ASCII char so we scan forward over - // the following ASCII characters and return the next pure ASCII - // substring - const byte* result = data + offset; - offset++; - while ((offset < str.length()) && (data[offset] <= kMaxOneByteChar)) - offset++; - *chars_read_ptr = offset - *offset_ptr; - *offset_ptr = offset; - return result; - } else { - // The next character is non-ASCII so we just fill the buffer +void Utf8DecoderBase::Reset(uint16_t* buffer, + unsigned buffer_length, + const uint8_t* stream, + unsigned stream_length) { + // Assume everything will fit in the buffer and stream won't be needed. + last_byte_of_buffer_unused_ = false; + unbuffered_start_ = NULL; + bool writing_to_buffer = true; + // Loop until stream is read, writing to buffer as long as buffer has space. + unsigned utf16_length = 0; + while (stream_length != 0) { unsigned cursor = 0; - unsigned chars_read = 0; - while (offset < str.length()) { - uchar c = data[offset]; - if (c <= kMaxOneByteChar) { - // Fast case for ASCII characters - if (!CharacterStream::EncodeAsciiCharacter(c, - buffer, - capacity, - cursor)) - break; - offset += 1; + uint32_t character = Utf8::ValueOf(stream, stream_length, &cursor); + ASSERT(cursor > 0 && cursor <= stream_length); + stream += cursor; + stream_length -= cursor; + bool is_two_characters = character > Utf16::kMaxNonSurrogateCharCode; + utf16_length += is_two_characters ? 2 : 1; + // Don't need to write to the buffer, but still need utf16_length. + if (!writing_to_buffer) continue; + // Write out the characters to the buffer. + // Must check for equality with buffer_length as we've already updated it. + if (utf16_length <= buffer_length) { + if (is_two_characters) { + *buffer++ = Utf16::LeadSurrogate(character); + *buffer++ = Utf16::TrailSurrogate(character); } else { - unsigned chars = 0; - c = Utf8::ValueOf(data + offset, str.length() - offset, &chars); - if (!CharacterStream::EncodeNonAsciiCharacter(c, - buffer, - capacity, - cursor)) - break; - offset += chars; + *buffer++ = character; + } + if (utf16_length == buffer_length) { + // Just wrote last character of buffer + writing_to_buffer = false; + unbuffered_start_ = stream; } - chars_read++; + continue; } - *offset_ptr = offset; - *chars_read_ptr = chars_read; - return buffer; + // Have gone over buffer. + // Last char of buffer is unused, set cursor back. + ASSERT(is_two_characters); + writing_to_buffer = false; + last_byte_of_buffer_unused_ = true; + unbuffered_start_ = stream - cursor; } + utf16_length_ = utf16_length; } -unsigned CharacterStream::Length() { - unsigned result = 0; - while (has_more()) { - result++; - GetNext(); - } - Rewind(); - return result; -} -unsigned CharacterStream::Utf16Length() { - unsigned result = 0; - while (has_more()) { - uchar c = GetNext(); - result += c > Utf16::kMaxNonSurrogateCharCode ? 2 : 1; +void Utf8DecoderBase::WriteUtf16Slow(const uint8_t* stream, + uint16_t* data, + unsigned data_length) { + while (data_length != 0) { + unsigned cursor = 0; + uint32_t character = Utf8::ValueOf(stream, Utf8::kMaxEncodedSize, &cursor); + // There's a total lack of bounds checking for stream + // as it was already done in Reset. + stream += cursor; + if (character > unibrow::Utf16::kMaxNonSurrogateCharCode) { + *data++ = Utf16::LeadSurrogate(character); + *data++ = Utf16::TrailSurrogate(character); + ASSERT(data_length > 1); + data_length -= 2; + } else { + *data++ = character; + data_length -= 1; + } } - Rewind(); - return result; } -void CharacterStream::Seek(unsigned position) { - Rewind(); - for (unsigned i = 0; i < position; i++) { - GetNext(); - } -} // Uppercase: point.category == 'Lu' diff --git a/deps/v8/src/unicode.h b/deps/v8/src/unicode.h index 91b16c9f35..42a81824ba 100644 --- a/deps/v8/src/unicode.h +++ b/deps/v8/src/unicode.h @@ -29,7 +29,7 @@ #define V8_UNICODE_H_ #include <sys/types.h> - +#include <globals.h> /** * \file * Definitions and convenience functions for working with unicode. @@ -100,21 +100,6 @@ class UnicodeData { static const uchar kMaxCodePoint; }; -// --- U t f 8 a n d 16 --- - -template <typename Data> -class Buffer { - public: - inline Buffer(Data data, unsigned length) : data_(data), length_(length) { } - inline Buffer() : data_(0), length_(0) { } - Data data() { return data_; } - unsigned length() { return length_; } - private: - Data data_; - unsigned length_; -}; - - class Utf16 { public: static inline bool IsLeadSurrogate(int code) { @@ -140,22 +125,29 @@ class Utf16 { // One UTF-16 surrogate is endoded (illegally) as 3 UTF-8 bytes. // The illegality stems from the surrogate not being part of a pair. static const int kUtf8BytesToCodeASurrogate = 3; - static inline uchar LeadSurrogate(int char_code) { + static inline uint16_t LeadSurrogate(uint32_t char_code) { return 0xd800 + (((char_code - 0x10000) >> 10) & 0x3ff); } - static inline uchar TrailSurrogate(int char_code) { + static inline uint16_t TrailSurrogate(uint32_t char_code) { return 0xdc00 + (char_code & 0x3ff); } }; +class Latin1 { + public: + static const unsigned kMaxChar = 0xff; + // Returns 0 if character does not convert to single latin-1 character + // or if the character doesn't not convert back to latin-1 via inverse + // operation (upper to lower, etc). + static inline uint16_t ConvertNonLatin1ToLatin1(uint16_t); +}; class Utf8 { public: static inline uchar Length(uchar chr, int previous); + static inline unsigned EncodeOneByte(char* out, uint8_t c); static inline unsigned Encode( char* out, uchar c, int previous); - static const byte* ReadBlock(Buffer<const char*> str, byte* buffer, - unsigned capacity, unsigned* chars_read, unsigned* offset); static uchar CalculateValue(const byte* str, unsigned length, unsigned* cursor); @@ -170,92 +162,47 @@ class Utf8 { // that match are coded as a 4 byte UTF-8 sequence. static const unsigned kBytesSavedByCombiningSurrogates = 2; static const unsigned kSizeOfUnmatchedSurrogate = 3; - - private: - template <unsigned s> friend class Utf8InputBuffer; - friend class Test; static inline uchar ValueOf(const byte* str, unsigned length, unsigned* cursor); }; -// --- C h a r a c t e r S t r e a m --- - -class CharacterStream { - public: - inline uchar GetNext(); - inline bool has_more() { return remaining_ != 0; } - // Note that default implementation is not efficient. - virtual void Seek(unsigned); - unsigned Length(); - unsigned Utf16Length(); - virtual ~CharacterStream() { } - static inline bool EncodeCharacter(uchar c, byte* buffer, unsigned capacity, - unsigned& offset); - static inline bool EncodeAsciiCharacter(uchar c, byte* buffer, - unsigned capacity, unsigned& offset); - static inline bool EncodeNonAsciiCharacter(uchar c, byte* buffer, - unsigned capacity, unsigned& offset); - static inline uchar DecodeCharacter(const byte* buffer, unsigned* offset); - virtual void Rewind() = 0; - - protected: - virtual void FillBuffer() = 0; - virtual bool BoundsCheck(unsigned offset) = 0; - // The number of characters left in the current buffer - unsigned remaining_; - // The current offset within the buffer - unsigned cursor_; - // The buffer containing the decoded characters. - const byte* buffer_; -}; - -// --- I n p u t B u f f e r --- -/** - * Provides efficient access to encoded characters in strings. It - * does so by reading characters one block at a time, rather than one - * character at a time, which gives string implementations an - * opportunity to optimize the decoding. - */ -template <class Reader, class Input = Reader*, unsigned kSize = 256> -class InputBuffer : public CharacterStream { +class Utf8DecoderBase { public: - virtual void Rewind(); - inline void Reset(Input input); - void Seek(unsigned position); - inline void Reset(unsigned position, Input input); + // Initialization done in subclass. + inline Utf8DecoderBase(); + inline Utf8DecoderBase(uint16_t* buffer, + unsigned buffer_length, + const uint8_t* stream, + unsigned stream_length); + inline unsigned Utf16Length() const { return utf16_length_; } protected: - InputBuffer() { } - explicit InputBuffer(Input input) { Reset(input); } - virtual void FillBuffer(); - virtual bool BoundsCheck(unsigned offset) { - return (buffer_ != util_buffer_) || (offset < kSize); - } - - // A custom offset that can be used by the string implementation to - // mark progress within the encoded string. - unsigned offset_; - // The input string - Input input_; - // To avoid heap allocation, we keep an internal buffer to which - // the encoded string can write its characters. The string - // implementation is free to decide whether it wants to use this - // buffer or not. - byte util_buffer_[kSize]; + // This reads all characters and sets the utf16_length_. + // The first buffer_length utf16 chars are cached in the buffer. + void Reset(uint16_t* buffer, + unsigned buffer_length, + const uint8_t* stream, + unsigned stream_length); + static void WriteUtf16Slow(const uint8_t* stream, + uint16_t* data, + unsigned length); + const uint8_t* unbuffered_start_; + unsigned utf16_length_; + bool last_byte_of_buffer_unused_; + private: + DISALLOW_COPY_AND_ASSIGN(Utf8DecoderBase); }; -// --- U t f 8 I n p u t B u f f e r --- - -template <unsigned s = 256> -class Utf8InputBuffer : public InputBuffer<Utf8, Buffer<const char*>, s> { +template <unsigned kBufferSize> +class Utf8Decoder : public Utf8DecoderBase { public: - inline Utf8InputBuffer() { } - inline Utf8InputBuffer(const char* data, unsigned length); - inline void Reset(const char* data, unsigned length) { - InputBuffer<Utf8, Buffer<const char*>, s>::Reset( - Buffer<const char*>(data, length)); - } + inline Utf8Decoder() {} + inline Utf8Decoder(const char* stream, unsigned length); + inline void Reset(const char* stream, unsigned length); + inline unsigned WriteUtf16(uint16_t* data, unsigned length) const; + private: + uint16_t buffer_[kBufferSize]; }; diff --git a/deps/v8/src/uri.h b/deps/v8/src/uri.h new file mode 100644 index 0000000000..c7a6301f12 --- /dev/null +++ b/deps/v8/src/uri.h @@ -0,0 +1,309 @@ +// Copyright 2013 the V8 project authors. All rights reserved. +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following +// disclaimer in the documentation and/or other materials provided +// with the distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived +// from this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +#ifndef V8_URI_H_ +#define V8_URI_H_ + +#include "v8.h" + +#include "string-search.h" +#include "v8utils.h" +#include "v8conversions.h" + +namespace v8 { +namespace internal { + + +template <typename Char> +static INLINE(Vector<const Char> GetCharVector(Handle<String> string)); + + +template <> +Vector<const uint8_t> GetCharVector(Handle<String> string) { + String::FlatContent flat = string->GetFlatContent(); + ASSERT(flat.IsAscii()); + return flat.ToOneByteVector(); +} + + +template <> +Vector<const uc16> GetCharVector(Handle<String> string) { + String::FlatContent flat = string->GetFlatContent(); + ASSERT(flat.IsTwoByte()); + return flat.ToUC16Vector(); +} + + +class URIUnescape : public AllStatic { + public: + template<typename Char> + static Handle<String> Unescape(Isolate* isolate, Handle<String> source); + + private: + static const signed char kHexValue['g']; + + template<typename Char> + static Handle<String> UnescapeSlow( + Isolate* isolate, Handle<String> string, int start_index); + + static INLINE(int TwoDigitHex(uint16_t character1, uint16_t character2)); + + template <typename Char> + static INLINE(int UnescapeChar(Vector<const Char> vector, + int i, + int length, + int* step)); +}; + + +const signed char URIUnescape::kHexValue[] = { + -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, + -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, + -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, + -0, 1, 2, 3, 4, 5, 6, 7, 8, 9, -1, -1, -1, -1, -1, -1, + -1, 10, 11, 12, 13, 14, 15, -1, -1, -1, -1, -1, -1, -1, -1, -1, + -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, + -1, 10, 11, 12, 13, 14, 15 }; + + +template<typename Char> +Handle<String> URIUnescape::Unescape(Isolate* isolate, Handle<String> source) { + int index; + { AssertNoAllocation no_allocation; + StringSearch<uint8_t, Char> search(isolate, STATIC_ASCII_VECTOR("%")); + index = search.Search(GetCharVector<Char>(source), 0); + if (index < 0) return source; + } + return UnescapeSlow<Char>(isolate, source, index); +} + + +template <typename Char> +Handle<String> URIUnescape::UnescapeSlow( + Isolate* isolate, Handle<String> string, int start_index) { + bool one_byte = true; + int length = string->length(); + + int unescaped_length = 0; + { AssertNoAllocation no_allocation; + Vector<const Char> vector = GetCharVector<Char>(string); + for (int i = start_index; i < length; unescaped_length++) { + int step; + if (UnescapeChar(vector, i, length, &step) > + String::kMaxOneByteCharCode) { + one_byte = false; + } + i += step; + } + } + + ASSERT(start_index < length); + Handle<String> first_part = + isolate->factory()->NewProperSubString(string, 0, start_index); + + int dest_position = 0; + Handle<String> second_part; + if (one_byte) { + Handle<SeqOneByteString> dest = + isolate->factory()->NewRawOneByteString(unescaped_length); + AssertNoAllocation no_allocation; + Vector<const Char> vector = GetCharVector<Char>(string); + for (int i = start_index; i < length; dest_position++) { + int step; + dest->SeqOneByteStringSet(dest_position, + UnescapeChar(vector, i, length, &step)); + i += step; + } + second_part = dest; + } else { + Handle<SeqTwoByteString> dest = + isolate->factory()->NewRawTwoByteString(unescaped_length); + AssertNoAllocation no_allocation; + Vector<const Char> vector = GetCharVector<Char>(string); + for (int i = start_index; i < length; dest_position++) { + int step; + dest->SeqTwoByteStringSet(dest_position, + UnescapeChar(vector, i, length, &step)); + i += step; + } + second_part = dest; + } + return isolate->factory()->NewConsString(first_part, second_part); +} + + +int URIUnescape::TwoDigitHex(uint16_t character1, uint16_t character2) { + if (character1 > 'f') return -1; + int hi = kHexValue[character1]; + if (hi == -1) return -1; + if (character2 > 'f') return -1; + int lo = kHexValue[character2]; + if (lo == -1) return -1; + return (hi << 4) + lo; +} + + +template <typename Char> +int URIUnescape::UnescapeChar(Vector<const Char> vector, + int i, + int length, + int* step) { + uint16_t character = vector[i]; + int32_t hi = 0; + int32_t lo = 0; + if (character == '%' && + i <= length - 6 && + vector[i + 1] == 'u' && + (hi = TwoDigitHex(vector[i + 2], + vector[i + 3])) != -1 && + (lo = TwoDigitHex(vector[i + 4], + vector[i + 5])) != -1) { + *step = 6; + return (hi << 8) + lo; + } else if (character == '%' && + i <= length - 3 && + (lo = TwoDigitHex(vector[i + 1], + vector[i + 2])) != -1) { + *step = 3; + return lo; + } else { + *step = 1; + return character; + } +} + + +class URIEscape : public AllStatic { + public: + template<typename Char> + static Handle<String> Escape(Isolate* isolate, Handle<String> string); + + private: + static const char kHexChars[17]; + static const char kNotEscaped[256]; + + static bool IsNotEscaped(uint16_t c) { return kNotEscaped[c] != 0; } +}; + + +const char URIEscape::kHexChars[] = "0123456789ABCDEF"; + + +// kNotEscaped is generated by the following: +// +// #!/bin/perl +// for (my $i = 0; $i < 256; $i++) { +// print "\n" if $i % 16 == 0; +// my $c = chr($i); +// my $escaped = 1; +// $escaped = 0 if $c =~ m#[A-Za-z0-9@*_+./-]#; +// print $escaped ? "0, " : "1, "; +// } + +const char URIEscape::kNotEscaped[] = { + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 0, 1, 1, 1, + 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, + 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, + 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 1, + 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, + 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 }; + + +template<typename Char> +Handle<String> URIEscape::Escape(Isolate* isolate, Handle<String> string) { + ASSERT(string->IsFlat()); + int escaped_length = 0; + int length = string->length(); + + { AssertNoAllocation no_allocation; + Vector<const Char> vector = GetCharVector<Char>(string); + for (int i = 0; i < length; i++) { + uint16_t c = vector[i]; + if (c >= 256) { + escaped_length += 6; + } else if (IsNotEscaped(c)) { + escaped_length++; + } else { + escaped_length += 3; + } + + // We don't allow strings that are longer than a maximal length. + ASSERT(String::kMaxLength < 0x7fffffff - 6); // Cannot overflow. + if (escaped_length > String::kMaxLength) { + isolate->context()->mark_out_of_memory(); + return Handle<String>::null(); + } + } + } + + // No length change implies no change. Return original string if no change. + if (escaped_length == length) return string; + + Handle<SeqOneByteString> dest = + isolate->factory()->NewRawOneByteString(escaped_length); + int dest_position = 0; + + { AssertNoAllocation no_allocation; + Vector<const Char> vector = GetCharVector<Char>(string); + for (int i = 0; i < length; i++) { + uint16_t c = vector[i]; + if (c >= 256) { + dest->SeqOneByteStringSet(dest_position, '%'); + dest->SeqOneByteStringSet(dest_position+1, 'u'); + dest->SeqOneByteStringSet(dest_position+2, kHexChars[c >> 12]); + dest->SeqOneByteStringSet(dest_position+3, kHexChars[(c >> 8) & 0xf]); + dest->SeqOneByteStringSet(dest_position+4, kHexChars[(c >> 4) & 0xf]); + dest->SeqOneByteStringSet(dest_position+5, kHexChars[c & 0xf]); + dest_position += 6; + } else if (IsNotEscaped(c)) { + dest->SeqOneByteStringSet(dest_position, c); + dest_position++; + } else { + dest->SeqOneByteStringSet(dest_position, '%'); + dest->SeqOneByteStringSet(dest_position+1, kHexChars[c >> 4]); + dest->SeqOneByteStringSet(dest_position+2, kHexChars[c & 0xf]); + dest_position += 3; + } + } + } + + return dest; +} + +} } // namespace v8::internal + +#endif // V8_URI_H_ diff --git a/deps/v8/src/uri.js b/deps/v8/src/uri.js index b195f3da79..1de22f8aeb 100644 --- a/deps/v8/src/uri.js +++ b/deps/v8/src/uri.js @@ -165,11 +165,11 @@ function URIDecodeOctets(octets, result, index) { throw new $URIError("URI malformed"); } if (value < 0x10000) { - result[index++] = value; + %_TwoByteSeqStringSetChar(result, index++, value); return index; } else { - result[index++] = (value >> 10) + 0xd7c0; - result[index++] = (value & 0x3ff) + 0xdc00; + %_TwoByteSeqStringSetChar(result, index++, (value >> 10) + 0xd7c0); + %_TwoByteSeqStringSetChar(result, index++, (value & 0x3ff) + 0xdc00); return index; } } @@ -178,43 +178,72 @@ function URIDecodeOctets(octets, result, index) { // ECMA-262, section 15.1.3 function Encode(uri, unescape) { var uriLength = uri.length; - // We are going to pass result to %StringFromCharCodeArray - // which does not expect any getters/setters installed - // on the incoming array. - var result = new InternalArray(uriLength); + var array = new InternalArray(uriLength); var index = 0; for (var k = 0; k < uriLength; k++) { var cc1 = uri.charCodeAt(k); if (unescape(cc1)) { - result[index++] = cc1; + array[index++] = cc1; } else { if (cc1 >= 0xDC00 && cc1 <= 0xDFFF) throw new $URIError("URI malformed"); if (cc1 < 0xD800 || cc1 > 0xDBFF) { - index = URIEncodeSingle(cc1, result, index); + index = URIEncodeSingle(cc1, array, index); } else { k++; if (k == uriLength) throw new $URIError("URI malformed"); var cc2 = uri.charCodeAt(k); if (cc2 < 0xDC00 || cc2 > 0xDFFF) throw new $URIError("URI malformed"); - index = URIEncodePair(cc1, cc2, result, index); + index = URIEncodePair(cc1, cc2, array, index); } } } - return %StringFromCharCodeArray(result); + + var result = %NewString(array.length, NEW_ONE_BYTE_STRING); + for (var i = 0; i < array.length; i++) { + %_OneByteSeqStringSetChar(result, i, array[i]); + } + return result; } // ECMA-262, section 15.1.3 function Decode(uri, reserved) { var uriLength = uri.length; - // We are going to pass result to %StringFromCharCodeArray - // which does not expect any getters/setters installed - // on the incoming array. - var result = new InternalArray(uriLength); + var one_byte = %NewString(uriLength, NEW_ONE_BYTE_STRING); var index = 0; - for (var k = 0; k < uriLength; k++) { - var ch = uri.charAt(k); - if (ch == '%') { + var k = 0; + + // Optimistically assume ascii string. + for ( ; k < uriLength; k++) { + var code = uri.charCodeAt(k); + if (code == 37) { // '%' + if (k + 2 >= uriLength) throw new $URIError("URI malformed"); + var cc = URIHexCharsToCharCode(uri.charCodeAt(k+1), uri.charCodeAt(k+2)); + if (cc >> 7) break; // Assumption wrong, two byte string. + if (reserved(cc)) { + %_OneByteSeqStringSetChar(one_byte, index++, 37); // '%'. + %_OneByteSeqStringSetChar(one_byte, index++, uri.charCodeAt(k+1)); + %_OneByteSeqStringSetChar(one_byte, index++, uri.charCodeAt(k+2)); + } else { + %_OneByteSeqStringSetChar(one_byte, index++, cc); + } + k += 2; + } else { + if (code > 0x7f) break; // Assumption wrong, two byte string. + %_OneByteSeqStringSetChar(one_byte, index++, code); + } + } + + one_byte = %TruncateString(one_byte, index); + if (k == uriLength) return one_byte; + + // Write into two byte string. + var two_byte = %NewString(uriLength - k, NEW_TWO_BYTE_STRING); + index = 0; + + for ( ; k < uriLength; k++) { + var code = uri.charCodeAt(k); + if (code == 37) { // '%' if (k + 2 >= uriLength) throw new $URIError("URI malformed"); var cc = URIHexCharsToCharCode(uri.charCodeAt(++k), uri.charCodeAt(++k)); if (cc >> 7) { @@ -229,22 +258,21 @@ function Decode(uri, reserved) { octets[i] = URIHexCharsToCharCode(uri.charCodeAt(++k), uri.charCodeAt(++k)); } - index = URIDecodeOctets(octets, result, index); + index = URIDecodeOctets(octets, two_byte, index); + } else if (reserved(cc)) { + %_TwoByteSeqStringSetChar(two_byte, index++, 37); // '%'. + %_TwoByteSeqStringSetChar(two_byte, index++, uri.charCodeAt(k - 1)); + %_TwoByteSeqStringSetChar(two_byte, index++, uri.charCodeAt(k)); } else { - if (reserved(cc)) { - result[index++] = 37; // Char code of '%'. - result[index++] = uri.charCodeAt(k - 1); - result[index++] = uri.charCodeAt(k); - } else { - result[index++] = cc; - } + %_TwoByteSeqStringSetChar(two_byte, index++, cc); } } else { - result[index++] = ch.charCodeAt(0); + %_TwoByteSeqStringSetChar(two_byte, index++, code); } } - result.length = index; - return %StringFromCharCodeArray(result); + + two_byte = %TruncateString(two_byte, index); + return one_byte + two_byte; } diff --git a/deps/v8/src/utils.h b/deps/v8/src/utils.h index e03f96f6e5..c391b9c432 100644 --- a/deps/v8/src/utils.h +++ b/deps/v8/src/utils.h @@ -249,6 +249,7 @@ class BitField { // using a shift count of 32. static const uint32_t kMask = ((1U << shift) << size) - (1U << shift); static const uint32_t kShift = shift; + static const uint32_t kSize = size; // Value for the field with all bits set. static const T kMax = static_cast<T>((1U << size) - 1); @@ -304,7 +305,7 @@ inline uint32_t ComputeLongHash(uint64_t key) { hash = hash ^ (hash >> 11); hash = hash + (hash << 6); hash = hash ^ (hash >> 22); - return (uint32_t) hash; + return static_cast<uint32_t>(hash); } @@ -522,11 +523,22 @@ class ScopedVector : public Vector<T> { DISALLOW_IMPLICIT_CONSTRUCTORS(ScopedVector); }; +#define STATIC_ASCII_VECTOR(x) \ + v8::internal::Vector<const uint8_t>(reinterpret_cast<const uint8_t*>(x), \ + ARRAY_SIZE(x)-1) inline Vector<const char> CStrVector(const char* data) { return Vector<const char>(data, StrLength(data)); } +inline Vector<const uint8_t> OneByteVector(const char* data, int length) { + return Vector<const uint8_t>(reinterpret_cast<const uint8_t*>(data), length); +} + +inline Vector<const uint8_t> OneByteVector(const char* data) { + return OneByteVector(data, StrLength(data)); +} + inline Vector<char> MutableCStrVector(char* data) { return Vector<char>(data, StrLength(data)); } @@ -765,7 +777,9 @@ class SequenceCollector : public Collector<T, growth_factor, max_growth> { // Compare ASCII/16bit chars to ASCII/16bit chars. template <typename lchar, typename rchar> -inline int CompareChars(const lchar* lhs, const rchar* rhs, int chars) { +inline int CompareCharsUnsigned(const lchar* lhs, + const rchar* rhs, + int chars) { const lchar* limit = lhs + chars; #ifdef V8_HOST_CAN_READ_UNALIGNED if (sizeof(*lhs) == sizeof(*rhs)) { @@ -790,6 +804,33 @@ inline int CompareChars(const lchar* lhs, const rchar* rhs, int chars) { return 0; } +template<typename lchar, typename rchar> +inline int CompareChars(const lchar* lhs, const rchar* rhs, int chars) { + ASSERT(sizeof(lchar) <= 2); + ASSERT(sizeof(rchar) <= 2); + if (sizeof(lchar) == 1) { + if (sizeof(rchar) == 1) { + return CompareCharsUnsigned(reinterpret_cast<const uint8_t*>(lhs), + reinterpret_cast<const uint8_t*>(rhs), + chars); + } else { + return CompareCharsUnsigned(reinterpret_cast<const uint8_t*>(lhs), + reinterpret_cast<const uint16_t*>(rhs), + chars); + } + } else { + if (sizeof(rchar) == 1) { + return CompareCharsUnsigned(reinterpret_cast<const uint16_t*>(lhs), + reinterpret_cast<const uint8_t*>(rhs), + chars); + } else { + return CompareCharsUnsigned(reinterpret_cast<const uint16_t*>(lhs), + reinterpret_cast<const uint16_t*>(rhs), + chars); + } + } +} + // Calculate 10^exponent. inline int TenToThe(int exponent) { @@ -1015,6 +1056,7 @@ class BailoutId { static BailoutId FunctionEntry() { return BailoutId(kFunctionEntryId); } static BailoutId Declarations() { return BailoutId(kDeclarationsId); } static BailoutId FirstUsable() { return BailoutId(kFirstUsableId); } + static BailoutId StubEntry() { return BailoutId(kStubEntryId); } bool IsNone() const { return id_ == kNoneId; } bool operator==(const BailoutId& other) const { return id_ == other.id_; } @@ -1030,9 +1072,12 @@ class BailoutId { // code (function declarations). static const int kDeclarationsId = 3; - // Ever FunctionState starts with this id. + // Every FunctionState starts with this id. static const int kFirstUsableId = 4; + // Every compiled stub starts with this id. + static const int kStubEntryId = 5; + int id_; }; diff --git a/deps/v8/src/v8-counters.cc b/deps/v8/src/v8-counters.cc index 3f83dffcae..4107dd3e48 100644 --- a/deps/v8/src/v8-counters.cc +++ b/deps/v8/src/v8-counters.cc @@ -86,17 +86,6 @@ Counters::Counters() { size_of_FIXED_ARRAY_##name##_ = size_of_FIXED_ARRAY_##name; FIXED_ARRAY_SUB_INSTANCE_TYPE_LIST(SC) #undef SC - - StatsCounter state_counters[] = { -#define COUNTER_NAME(name) \ - { "c:V8.State" #name, NULL, false }, - STATE_TAG_LIST(COUNTER_NAME) -#undef COUNTER_NAME - }; - - for (int i = 0; i < kSlidingStateWindowCounterCount; ++i) { - state_counters_[i] = state_counters[i]; - } } void Counters::ResetHistograms() { diff --git a/deps/v8/src/v8-counters.h b/deps/v8/src/v8-counters.h index fad3454812..374ebbcd89 100644 --- a/deps/v8/src/v8-counters.h +++ b/deps/v8/src/v8-counters.h @@ -50,7 +50,6 @@ namespace internal { HT(compile_eval, V8.CompileEval) \ HT(compile_lazy, V8.CompileLazy) - #define HISTOGRAM_PERCENTAGE_LIST(HP) \ HP(external_fragmentation_total, \ V8.MemoryExternalFragmentationTotal) \ @@ -99,7 +98,7 @@ namespace internal { SC(alive_after_last_gc, V8.AliveAfterLastGC) \ SC(objs_since_last_young, V8.ObjsSinceLastYoung) \ SC(objs_since_last_full, V8.ObjsSinceLastFull) \ - SC(symbol_table_capacity, V8.SymbolTableCapacity) \ + SC(string_table_capacity, V8.StringTableCapacity) \ SC(number_of_symbols, V8.NumberOfSymbols) \ SC(script_wrappers, V8.ScriptWrappers) \ SC(call_initialize_stubs, V8.CallInitializeStubs) \ @@ -374,16 +373,9 @@ class Counters { kSizeOfFIXED_ARRAY__##name, FIXED_ARRAY_SUB_INSTANCE_TYPE_LIST(COUNTER_ID) #undef COUNTER_ID -#define COUNTER_ID(name) k_##name, - STATE_TAG_LIST(COUNTER_ID) -#undef COUNTER_ID stats_counter_count }; - StatsCounter* state_counters(StateTag state) { - return &state_counters_[state]; - } - void ResetHistograms(); private: @@ -426,15 +418,6 @@ class Counters { FIXED_ARRAY_SUB_INSTANCE_TYPE_LIST(SC) #undef SC - enum { -#define COUNTER_ID(name) __##name, - STATE_TAG_LIST(COUNTER_ID) -#undef COUNTER_ID - kSlidingStateWindowCounterCount - }; - - // Sliding state window counters. - StatsCounter state_counters_[kSlidingStateWindowCounterCount]; friend class Isolate; DISALLOW_IMPLICIT_CONSTRUCTORS(Counters); diff --git a/deps/v8/src/v8.cc b/deps/v8/src/v8.cc index 2407037b32..2e8cd50e61 100644 --- a/deps/v8/src/v8.cc +++ b/deps/v8/src/v8.cc @@ -38,6 +38,7 @@ #include "hydrogen.h" #include "lithium-allocator.h" #include "log.h" +#include "objects.h" #include "once.h" #include "platform.h" #include "runtime-profiler.h" @@ -62,8 +63,6 @@ static EntropySource entropy_source; bool V8::Initialize(Deserializer* des) { - FlagList::EnforceFlagImplications(); - InitializeOncePerProcess(); // The current thread may not yet had entered an isolate to run. @@ -114,7 +113,9 @@ void V8::TearDown() { ElementsAccessor::TearDown(); LOperand::TearDownCaches(); + ExternalReference::TearDownMathExpData(); RegisteredExtension::UnregisterAll(); + Isolate::GlobalTearDown(); is_running_ = false; has_been_disposed_ = true; @@ -216,14 +217,22 @@ void V8::RemoveCallCompletedCallback(CallCompletedCallback callback) { void V8::FireCallCompletedCallback(Isolate* isolate) { - if (call_completed_callbacks_ == NULL) return; + bool has_call_completed_callbacks = call_completed_callbacks_ != NULL; + bool observer_delivery_pending = + FLAG_harmony_observation && isolate->observer_delivery_pending(); + if (!has_call_completed_callbacks && !observer_delivery_pending) return; HandleScopeImplementer* handle_scope_implementer = isolate->handle_scope_implementer(); if (!handle_scope_implementer->CallDepthIsZero()) return; // Fire callbacks. Increase call depth to prevent recursive callbacks. handle_scope_implementer->IncrementCallDepth(); - for (int i = 0; i < call_completed_callbacks_->length(); i++) { - call_completed_callbacks_->at(i)(); + if (observer_delivery_pending) { + JSObject::DeliverChangeRecords(isolate); + } + if (has_call_completed_callbacks) { + for (int i = 0; i < call_completed_callbacks_->length(); i++) { + call_completed_callbacks_->at(i)(); + } } handle_scope_implementer->DecrementCallDepth(); } @@ -253,31 +262,21 @@ Object* V8::FillHeapNumberWithRandom(Object* heap_number, } void V8::InitializeOncePerProcessImpl() { - OS::SetUp(); - - use_crankshaft_ = FLAG_crankshaft; - - if (Serializer::enabled()) { - use_crankshaft_ = false; - } - - CPU::SetUp(); - if (!CPU::SupportsCrankshaft()) { - use_crankshaft_ = false; - } - - OS::PostSetUp(); - - RuntimeProfiler::GlobalSetUp(); - - ElementsAccessor::InitializeOncePerProcess(); - + FlagList::EnforceFlagImplications(); if (FLAG_stress_compaction) { FLAG_force_marking_deque_overflows = true; FLAG_gc_global = true; FLAG_max_new_space_size = (1 << (kPageSizeBits - 10)) * 2; } - + if (FLAG_trace_hydrogen) FLAG_parallel_recompilation = false; + OS::SetUp(); + CPU::SetUp(); + use_crankshaft_ = FLAG_crankshaft + && !Serializer::enabled() + && CPU::SupportsCrankshaft(); + OS::PostSetUp(); + RuntimeProfiler::GlobalSetUp(); + ElementsAccessor::InitializeOncePerProcess(); LOperand::SetUpCaches(); SetUpJSCallerSavedCodeData(); SamplerRegistry::SetUp(); diff --git a/deps/v8/src/v8.h b/deps/v8/src/v8.h index 67716d8107..b041fc2ba8 100644 --- a/deps/v8/src/v8.h +++ b/deps/v8/src/v8.h @@ -65,6 +65,7 @@ #include "log-inl.h" #include "cpu-profiler-inl.h" #include "handles-inl.h" +#include "heap-snapshot-generator-inl.h" #include "zone-inl.h" namespace v8 { diff --git a/deps/v8/src/v8conversions.cc b/deps/v8/src/v8conversions.cc index bf175e50b5..900b62d10b 100644 --- a/deps/v8/src/v8conversions.cc +++ b/deps/v8/src/v8conversions.cc @@ -41,40 +41,40 @@ namespace internal { namespace { -// C++-style iterator adaptor for StringInputBuffer +// C++-style iterator adaptor for StringCharacterStream // (unlike C++ iterators the end-marker has different type). -class StringInputBufferIterator { +class StringCharacterStreamIterator { public: class EndMarker {}; - explicit StringInputBufferIterator(StringInputBuffer* buffer); + explicit StringCharacterStreamIterator(StringCharacterStream* stream); - int operator*() const; + uint16_t operator*() const; void operator++(); bool operator==(EndMarker const&) const { return end_; } bool operator!=(EndMarker const& m) const { return !end_; } private: - StringInputBuffer* const buffer_; - int current_; + StringCharacterStream* const stream_; + uint16_t current_; bool end_; }; -StringInputBufferIterator::StringInputBufferIterator( - StringInputBuffer* buffer) : buffer_(buffer) { +StringCharacterStreamIterator::StringCharacterStreamIterator( + StringCharacterStream* stream) : stream_(stream) { ++(*this); } -int StringInputBufferIterator::operator*() const { +uint16_t StringCharacterStreamIterator::operator*() const { return current_; } -void StringInputBufferIterator::operator++() { - end_ = !buffer_->has_more(); +void StringCharacterStreamIterator::operator++() { + end_ = !stream_->HasMore(); if (!end_) { - current_ = buffer_->GetNext(); + current_ = stream_->GetNext(); } } } // End anonymous namespace. @@ -83,9 +83,10 @@ void StringInputBufferIterator::operator++() { double StringToDouble(UnicodeCache* unicode_cache, String* str, int flags, double empty_string_val) { StringShape shape(str); + // TODO(dcarney): Use a Visitor here. if (shape.IsSequentialAscii()) { - const char* begin = SeqAsciiString::cast(str)->GetChars(); - const char* end = begin + str->length(); + const uint8_t* begin = SeqOneByteString::cast(str)->GetChars(); + const uint8_t* end = begin + str->length(); return InternalStringToDouble(unicode_cache, begin, end, flags, empty_string_val); } else if (shape.IsSequentialTwoByte()) { @@ -94,10 +95,11 @@ double StringToDouble(UnicodeCache* unicode_cache, return InternalStringToDouble(unicode_cache, begin, end, flags, empty_string_val); } else { - StringInputBuffer buffer(str); + ConsStringIteratorOp op; + StringCharacterStream stream(str, &op); return InternalStringToDouble(unicode_cache, - StringInputBufferIterator(&buffer), - StringInputBufferIterator::EndMarker(), + StringCharacterStreamIterator(&stream), + StringCharacterStreamIterator::EndMarker(), flags, empty_string_val); } @@ -108,19 +110,21 @@ double StringToInt(UnicodeCache* unicode_cache, String* str, int radix) { StringShape shape(str); + // TODO(dcarney): Use a Visitor here. if (shape.IsSequentialAscii()) { - const char* begin = SeqAsciiString::cast(str)->GetChars(); - const char* end = begin + str->length(); + const uint8_t* begin = SeqOneByteString::cast(str)->GetChars(); + const uint8_t* end = begin + str->length(); return InternalStringToInt(unicode_cache, begin, end, radix); } else if (shape.IsSequentialTwoByte()) { const uc16* begin = SeqTwoByteString::cast(str)->GetChars(); const uc16* end = begin + str->length(); return InternalStringToInt(unicode_cache, begin, end, radix); } else { - StringInputBuffer buffer(str); + ConsStringIteratorOp op; + StringCharacterStream stream(str, &op); return InternalStringToInt(unicode_cache, - StringInputBufferIterator(&buffer), - StringInputBufferIterator::EndMarker(), + StringCharacterStreamIterator(&stream), + StringCharacterStreamIterator::EndMarker(), radix); } } diff --git a/deps/v8/src/v8globals.h b/deps/v8/src/v8globals.h index 95390adcf6..8874cfb25a 100644 --- a/deps/v8/src/v8globals.h +++ b/deps/v8/src/v8globals.h @@ -71,6 +71,8 @@ const Address kZapValue = reinterpret_cast<Address>(V8_UINT64_C(0xdeadbeedbeadbeef)); const Address kHandleZapValue = reinterpret_cast<Address>(V8_UINT64_C(0x1baddead0baddeaf)); +const Address kGlobalHandleZapValue = + reinterpret_cast<Address>(V8_UINT64_C(0x1baffed00baffedf)); const Address kFromSpaceZapValue = reinterpret_cast<Address>(V8_UINT64_C(0x1beefdad0beefdaf)); const uint64_t kDebugZapValue = V8_UINT64_C(0xbadbaddbbadbaddb); @@ -79,6 +81,7 @@ const uint64_t kFreeListZapValue = 0xfeed1eaffeed1eaf; #else const Address kZapValue = reinterpret_cast<Address>(0xdeadbeef); const Address kHandleZapValue = reinterpret_cast<Address>(0xbaddeaf); +const Address kGlobalHandleZapValue = reinterpret_cast<Address>(0xbaffedf); const Address kFromSpaceZapValue = reinterpret_cast<Address>(0xbeefdaf); const uint32_t kSlotsZapValue = 0xbeefdeef; const uint32_t kDebugZapValue = 0xbadbaddb; @@ -125,12 +128,13 @@ class FunctionTemplateInfo; class MemoryChunk; class SeededNumberDictionary; class UnseededNumberDictionary; -class StringDictionary; +class NameDictionary; template <typename T> class Handle; class Heap; class HeapObject; class IC; class InterceptorInfo; +class JSReceiver; class JSArray; class JSFunction; class JSObject; @@ -152,6 +156,7 @@ class Smi; template <typename Config, class Allocator = FreeStoreAllocationPolicy> class SplayTree; class String; +class Name; class Struct; class Variable; class RelocInfo; @@ -260,16 +265,20 @@ enum InlineCacheState { // Like MONOMORPHIC but check failed due to prototype. MONOMORPHIC_PROTOTYPE_FAILURE, // Multiple receiver types have been seen. + POLYMORPHIC, + // Many receiver types have been seen. MEGAMORPHIC, - // Special states for debug break or step in prepare stubs. - DEBUG_BREAK, - DEBUG_PREPARE_STEP_IN + // A generic handler is installed and no extra typefeedback is recorded. + GENERIC, + // Special state for debug break or step in prepare stubs. + DEBUG_STUB }; enum CheckType { RECEIVER_MAP_CHECK, STRING_CHECK, + SYMBOL_CHECK, NUMBER_CHECK, BOOLEAN_CHECK }; @@ -287,7 +296,7 @@ enum CallFunctionFlags { enum InlineCacheHolderFlag { OWN_MAP, // For fast properties objects. - PROTOTYPE_MAP // For slow properties objects (except GlobalObjects). + DELEGATE_MAP // For slow properties objects (except GlobalObjects). }; @@ -351,20 +360,13 @@ struct AccessorDescriptor { // VMState object leaves a state by popping the current state from the // stack. -#define STATE_TAG_LIST(V) \ - V(JS) \ - V(GC) \ - V(COMPILER) \ - V(PARALLEL_COMPILER_PROLOGUE) \ - V(OTHER) \ - V(EXTERNAL) - enum StateTag { -#define DEF_STATE_TAG(name) name, - STATE_TAG_LIST(DEF_STATE_TAG) -#undef DEF_STATE_TAG - // Pseudo-types. - state_tag_count + JS, + GC, + COMPILER, + PARALLEL_COMPILER, + OTHER, + EXTERNAL }; @@ -435,6 +437,7 @@ enum CpuFeature { SSE4_1 = 32 + 19, // x86 SUDIV = 4, // ARM UNALIGNED_ACCESSES = 5, // ARM MOVW_MOVT_IMMEDIATE_LOADS = 6, // ARM + VFP32DREGS = 7, // ARM SAHF = 0, // x86 FPU = 1}; // MIPS @@ -483,11 +486,19 @@ enum VariableMode { CONST, // declared via 'const' declarations - LET, // declared via 'let' declarations + LET, // declared via 'let' declarations (first lexical) CONST_HARMONY, // declared via 'const' declarations in harmony mode + MODULE, // declared via 'module' declaration (last lexical) + // Variables introduced by the compiler: + INTERNAL, // like VAR, but not user-visible (may or may not + // be in a context) + + TEMPORARY, // temporary variables (not user-visible), never + // in a context + DYNAMIC, // always require dynamic lookup (we don't know // the declaration) @@ -495,16 +506,10 @@ enum VariableMode { // variable is global unless it has been shadowed // by an eval-introduced variable - DYNAMIC_LOCAL, // requires dynamic lookup, but we know that the + DYNAMIC_LOCAL // requires dynamic lookup, but we know that the // variable is local and where it is unless it // has been shadowed by an eval-introduced // variable - - INTERNAL, // like VAR, but not user-visible (may or may not - // be in a context) - - TEMPORARY // temporary variables (not user-visible), never - // in a context }; @@ -514,17 +519,17 @@ inline bool IsDynamicVariableMode(VariableMode mode) { inline bool IsDeclaredVariableMode(VariableMode mode) { - return mode >= VAR && mode <= CONST_HARMONY; + return mode >= VAR && mode <= MODULE; } inline bool IsLexicalVariableMode(VariableMode mode) { - return mode >= LET && mode <= CONST_HARMONY; + return mode >= LET && mode <= MODULE; } inline bool IsImmutableVariableMode(VariableMode mode) { - return mode == CONST || mode == CONST_HARMONY; + return mode == CONST || (mode >= CONST_HARMONY && mode <= MODULE); } diff --git a/deps/v8/src/v8natives.js b/deps/v8/src/v8natives.js index e2e642941e..24ad22d96d 100644 --- a/deps/v8/src/v8natives.js +++ b/deps/v8/src/v8natives.js @@ -60,7 +60,17 @@ function InstallFunctions(object, attributes, functions) { %ToFastProperties(object); } -// Prevents changes to the prototype of a built-infunction. + +// Helper function to install a getter only property. +function InstallGetter(object, name, getter) { + %FunctionSetName(getter, name); + %FunctionRemovePrototype(getter); + %DefineOrRedefineAccessorProperty(object, name, getter, null, DONT_ENUM); + %SetNativeFlag(getter); +} + + +// Prevents changes to the prototype of a built-in function. // The "prototype" property of the function object is made non-configurable, // and the prototype object is made non-extensible. The latter prevents // changing the __proto__ property. @@ -84,7 +94,7 @@ function SetUpLockedPrototype(constructor, fields, methods) { %SetProperty(prototype, key, f, DONT_ENUM | DONT_DELETE | READ_ONLY); %SetNativeFlag(f); } - prototype.__proto__ = null; + %SetPrototype(prototype, null); %ToFastProperties(prototype); } @@ -164,7 +174,7 @@ function GlobalEval(x) { 'be the global object from which eval originated'); } - var f = %CompileString(x); + var f = %CompileString(x, false); if (!IS_FUNCTION(f)) return f; return %_CallFunction(global_receiver, f); @@ -221,10 +231,9 @@ $Object.prototype.constructor = $Object; // ECMA-262 - 15.2.4.2 function ObjectToString() { - if (IS_UNDEFINED(this) && !IS_UNDETECTABLE(this)) { - return '[object Undefined]'; - } - if (IS_NULL(this)) return '[object Null]'; + if (IS_UNDEFINED(this) && !IS_UNDETECTABLE(this)) return "[object Undefined]"; + if (IS_NULL(this)) return "[object Null]"; + if (IS_SYMBOL(this)) return "[object Symbol]"; return "[object " + %_ClassOf(ToObject(this)) + "]"; } @@ -249,9 +258,9 @@ function ObjectValueOf() { function ObjectHasOwnProperty(V) { if (%IsJSProxy(this)) { var handler = %GetHandler(this); - return CallTrap1(handler, "hasOwn", DerivedHasOwnTrap, TO_STRING_INLINE(V)); + return CallTrap1(handler, "hasOwn", DerivedHasOwnTrap, ToName(V)); } - return %HasLocalProperty(TO_OBJECT_INLINE(this), TO_STRING_INLINE(V)); + return %HasLocalProperty(TO_OBJECT_INLINE(this), ToName(V)); } @@ -268,7 +277,7 @@ function ObjectIsPrototypeOf(V) { // ECMA-262 - 15.2.4.6 function ObjectPropertyIsEnumerable(V) { - var P = ToString(V); + var P = ToName(V); if (%IsJSProxy(this)) { var desc = GetOwnProperty(this, P); return IS_UNDEFINED(desc) ? false : desc.isEnumerable(); @@ -291,7 +300,7 @@ function ObjectDefineGetter(name, fun) { desc.setGet(fun); desc.setEnumerable(true); desc.setConfigurable(true); - DefineOwnProperty(ToObject(receiver), ToString(name), desc, false); + DefineOwnProperty(ToObject(receiver), ToName(name), desc, false); } @@ -300,7 +309,7 @@ function ObjectLookupGetter(name) { if (receiver == null && !IS_UNDETECTABLE(receiver)) { receiver = %GlobalReceiver(global); } - return %LookupAccessor(ToObject(receiver), ToString(name), GETTER); + return %LookupAccessor(ToObject(receiver), ToName(name), GETTER); } @@ -317,7 +326,7 @@ function ObjectDefineSetter(name, fun) { desc.setSet(fun); desc.setEnumerable(true); desc.setConfigurable(true); - DefineOwnProperty(ToObject(receiver), ToString(name), desc, false); + DefineOwnProperty(ToObject(receiver), ToName(name), desc, false); } @@ -326,7 +335,7 @@ function ObjectLookupSetter(name) { if (receiver == null && !IS_UNDETECTABLE(receiver)) { receiver = %GlobalReceiver(global); } - return %LookupAccessor(ToObject(receiver), ToString(name), SETTER); + return %LookupAccessor(ToObject(receiver), ToName(name), SETTER); } @@ -337,7 +346,7 @@ function ObjectKeys(obj) { if (%IsJSProxy(obj)) { var handler = %GetHandler(obj); var names = CallTrap0(handler, "keys", DerivedKeysTrap); - return ToStringArray(names, "keys"); + return ToNameArray(names, "keys", false); } return %LocalKeys(obj); } @@ -635,7 +644,7 @@ function CallTrap2(handler, name, defaultTrap, x, y) { // ES5 section 8.12.1. function GetOwnProperty(obj, v) { - var p = ToString(v); + var p = ToName(v); if (%IsJSProxy(obj)) { var handler = %GetHandler(obj); var descriptor = CallTrap1(handler, "getOwnPropertyDescriptor", void 0, p); @@ -651,7 +660,7 @@ function GetOwnProperty(obj, v) { // GetOwnProperty returns an array indexed by the constants // defined in macros.py. // If p is not a property on obj undefined is returned. - var props = %GetOwnProperty(ToObject(obj), ToString(v)); + var props = %GetOwnProperty(ToObject(obj), p); // A false value here means that access checks failed. if (props === false) return void 0; @@ -693,7 +702,7 @@ function DefineProxyProperty(obj, p, attributes, should_throw) { // ES5 8.12.9. function DefineObjectProperty(obj, p, desc, should_throw) { - var current_or_access = %GetOwnProperty(ToObject(obj), ToString(p)); + var current_or_access = %GetOwnProperty(ToObject(obj), ToName(p)); // A false value here means that access checks failed. if (current_or_access === false) return void 0; @@ -883,16 +892,35 @@ function DefineArrayProperty(obj, p, desc, should_throw) { } // Make sure the below call to DefineObjectProperty() doesn't overwrite // any magic "length" property by removing the value. + // TODO(mstarzinger): This hack should be removed once we have addressed the + // respective TODO in Runtime_DefineOrRedefineDataProperty. + // For the time being, we need a hack to prevent Object.observe from + // generating two change records. + var isObserved = %IsObserved(obj); + if (isObserved) %SetIsObserved(obj, false); obj.length = new_length; desc.value_ = void 0; desc.hasValue_ = false; - if (!DefineObjectProperty(obj, "length", desc, should_throw) || threw) { + threw = !DefineObjectProperty(obj, "length", desc, should_throw) || threw; + if (isObserved) %SetIsObserved(obj, true); + if (threw) { if (should_throw) { throw MakeTypeError("redefine_disallowed", [p]); } else { return false; } } + if (isObserved) { + var new_desc = GetOwnProperty(obj, "length"); + var updated = length_desc.value_ !== new_desc.value_; + var reconfigured = length_desc.writable_ !== new_desc.writable_ || + length_desc.configurable_ !== new_desc.configurable_ || + length_desc.enumerable_ !== new_desc.configurable_; + if (updated || reconfigured) { + NotifyChange(reconfigured ? "reconfigured" : "updated", + obj, "length", length_desc.value_); + } + } return true; } @@ -954,15 +982,16 @@ function ObjectGetOwnPropertyDescriptor(obj, p) { // For Harmony proxies -function ToStringArray(obj, trap) { +function ToNameArray(obj, trap, includeSymbols) { if (!IS_SPEC_OBJECT(obj)) { throw MakeTypeError("proxy_non_object_prop_names", [obj, trap]); } var n = ToUint32(obj.length); var array = new $Array(n); - var names = {}; // TODO(rossberg): use sets once they are ready. + var names = { __proto__: null }; // TODO(rossberg): use sets once ready. for (var index = 0; index < n; index++) { - var s = ToString(obj[index]); + var s = ToName(obj[index]); + if (IS_SYMBOL(s) && !includeSymbols) continue; if (%HasLocalProperty(names, s)) { throw MakeTypeError("proxy_repeated_prop_name", [obj, trap, s]); } @@ -982,53 +1011,64 @@ function ObjectGetOwnPropertyNames(obj) { if (%IsJSProxy(obj)) { var handler = %GetHandler(obj); var names = CallTrap0(handler, "getOwnPropertyNames", void 0); - return ToStringArray(names, "getOwnPropertyNames"); + return ToNameArray(names, "getOwnPropertyNames", true); } + var nameArrays = new InternalArray(); + // Find all the indexed properties. // Get the local element names. - var propertyNames = %GetLocalElementNames(obj); + var localElementNames = %GetLocalElementNames(obj); + for (var i = 0; i < localElementNames.length; ++i) { + localElementNames[i] = %_NumberToString(localElementNames[i]); + } + nameArrays.push(localElementNames); // Get names for indexed interceptor properties. - if (%GetInterceptorInfo(obj) & 1) { - var indexedInterceptorNames = - %GetIndexedInterceptorElementNames(obj); - if (indexedInterceptorNames) { - propertyNames = propertyNames.concat(indexedInterceptorNames); + var interceptorInfo = %GetInterceptorInfo(obj); + if ((interceptorInfo & 1) != 0) { + var indexedInterceptorNames = %GetIndexedInterceptorElementNames(obj); + if (!IS_UNDEFINED(indexedInterceptorNames)) { + nameArrays.push(indexedInterceptorNames); } } // Find all the named properties. // Get the local property names. - propertyNames = propertyNames.concat(%GetLocalPropertyNames(obj)); + nameArrays.push(%GetLocalPropertyNames(obj)); // Get names for named interceptor properties if any. - - if (%GetInterceptorInfo(obj) & 2) { - var namedInterceptorNames = - %GetNamedInterceptorPropertyNames(obj); - if (namedInterceptorNames) { - propertyNames = propertyNames.concat(namedInterceptorNames); + if ((interceptorInfo & 2) != 0) { + var namedInterceptorNames = %GetNamedInterceptorPropertyNames(obj); + if (!IS_UNDEFINED(namedInterceptorNames)) { + nameArrays.push(namedInterceptorNames); } } - // Property names are expected to be unique strings. - var propertySet = {}; - var j = 0; - for (var i = 0; i < propertyNames.length; ++i) { - var name = ToString(propertyNames[i]); - // We need to check for the exact property value since for intrinsic - // properties like toString if(propertySet["toString"]) will always - // succeed. - if (propertySet[name] === true) { - continue; + var propertyNames = + %Apply(InternalArray.prototype.concat, + nameArrays[0], nameArrays, 1, nameArrays.length - 1); + + // Property names are expected to be unique strings, + // but interceptors can interfere with that assumption. + if (interceptorInfo != 0) { + var propertySet = { __proto__: null }; + var j = 0; + for (var i = 0; i < propertyNames.length; ++i) { + var name = ToName(propertyNames[i]); + // We need to check for the exact property value since for intrinsic + // properties like toString if(propertySet["toString"]) will always + // succeed. + if (propertySet[name] === true) { + continue; + } + propertySet[name] = true; + propertyNames[j++] = name; } - propertySet[name] = true; - propertyNames[j++] = name; + propertyNames.length = j; } - propertyNames.length = j; return propertyNames; } @@ -1039,8 +1079,7 @@ function ObjectCreate(proto, properties) { if (!IS_SPEC_OBJECT(proto) && proto !== null) { throw MakeTypeError("proto_object_or_null", [proto]); } - var obj = new $Object(); - obj.__proto__ = proto; + var obj = { __proto__: proto }; if (!IS_UNDEFINED(properties)) ObjectDefineProperties(obj, properties); return obj; } @@ -1051,12 +1090,12 @@ function ObjectDefineProperty(obj, p, attributes) { if (!IS_SPEC_OBJECT(obj)) { throw MakeTypeError("called_on_non_object", ["Object.defineProperty"]); } - var name = ToString(p); + var name = ToName(p); if (%IsJSProxy(obj)) { // Clone the attributes object for protection. // TODO(rossberg): not spec'ed yet, so not sure if this should involve // non-own properties as it does (or non-enumerable ones, as it doesn't?). - var attributesClone = {}; + var attributesClone = { __proto__: null }; for (var a in attributes) { attributesClone[a] = attributes[a]; } @@ -1211,16 +1250,16 @@ function ObjectIsSealed(obj) { if (%IsJSProxy(obj)) { return false; } + if (%IsExtensible(obj)) { + return false; + } var names = ObjectGetOwnPropertyNames(obj); for (var i = 0; i < names.length; i++) { var name = names[i]; var desc = GetOwnProperty(obj, name); if (desc.isConfigurable()) return false; } - if (!ObjectIsExtensible(obj)) { - return true; - } - return false; + return true; } @@ -1232,6 +1271,9 @@ function ObjectIsFrozen(obj) { if (%IsJSProxy(obj)) { return false; } + if (%IsExtensible(obj)) { + return false; + } var names = ObjectGetOwnPropertyNames(obj); for (var i = 0; i < names.length; i++) { var name = names[i]; @@ -1239,10 +1281,7 @@ function ObjectIsFrozen(obj) { if (IsDataDescriptor(desc) && desc.isWritable()) return false; if (desc.isConfigurable()) return false; } - if (!ObjectIsExtensible(obj)) { - return true; - } - return false; + return true; } @@ -1403,11 +1442,7 @@ function NumberToString(radix) { // ECMA-262 section 15.7.4.3 function NumberToLocaleString() { - if (IS_NULL_OR_UNDEFINED(this) && !IS_UNDETECTABLE(this)) { - throw MakeTypeError("called_on_null_or_undefined", - ["Number.prototype.toLocaleString"]); - } - return this.toString(); + return %_CallFunction(this, NumberToString); } @@ -1424,50 +1459,76 @@ function NumberValueOf() { // ECMA-262 section 15.7.4.5 function NumberToFixed(fractionDigits) { + var x = this; + if (!IS_NUMBER(this)) { + if (!IS_NUMBER_WRAPPER(this)) { + throw MakeTypeError("incompatible_method_receiver", + ["Number.prototype.toFixed", this]); + } + // Get the value of this number in case it's an object. + x = %_ValueOf(this); + } var f = TO_INTEGER(fractionDigits); + if (f < 0 || f > 20) { throw new $RangeError("toFixed() digits argument must be between 0 and 20"); } - if (IS_NULL_OR_UNDEFINED(this) && !IS_UNDETECTABLE(this)) { - throw MakeTypeError("called_on_null_or_undefined", - ["Number.prototype.toFixed"]); - } - var x = ToNumber(this); + + if (NUMBER_IS_NAN(x)) return "NaN"; + if (x == 1/0) return "Infinity"; + if (x == -1/0) return "-Infinity"; + return %NumberToFixed(x, f); } // ECMA-262 section 15.7.4.6 function NumberToExponential(fractionDigits) { - var f = -1; - if (!IS_UNDEFINED(fractionDigits)) { - f = TO_INTEGER(fractionDigits); - if (f < 0 || f > 20) { - throw new $RangeError( - "toExponential() argument must be between 0 and 20"); + var x = this; + if (!IS_NUMBER(this)) { + if (!IS_NUMBER_WRAPPER(this)) { + throw MakeTypeError("incompatible_method_receiver", + ["Number.prototype.toExponential", this]); } + // Get the value of this number in case it's an object. + x = %_ValueOf(this); } - if (IS_NULL_OR_UNDEFINED(this) && !IS_UNDETECTABLE(this)) { - throw MakeTypeError("called_on_null_or_undefined", - ["Number.prototype.toExponential"]); + var f = IS_UNDEFINED(fractionDigits) ? void 0 : TO_INTEGER(fractionDigits); + + if (NUMBER_IS_NAN(x)) return "NaN"; + if (x == 1/0) return "Infinity"; + if (x == -1/0) return "-Infinity"; + + if (IS_UNDEFINED(f)) { + f = -1; // Signal for runtime function that f is not defined. + } else if (f < 0 || f > 20) { + throw new $RangeError("toExponential() argument must be between 0 and 20"); } - var x = ToNumber(this); return %NumberToExponential(x, f); } // ECMA-262 section 15.7.4.7 function NumberToPrecision(precision) { - if (IS_NULL_OR_UNDEFINED(this) && !IS_UNDETECTABLE(this)) { - throw MakeTypeError("called_on_null_or_undefined", - ["Number.prototype.toPrecision"]); + var x = this; + if (!IS_NUMBER(this)) { + if (!IS_NUMBER_WRAPPER(this)) { + throw MakeTypeError("incompatible_method_receiver", + ["Number.prototype.toPrecision", this]); + } + // Get the value of this number in case it's an object. + x = %_ValueOf(this); } if (IS_UNDEFINED(precision)) return ToString(%_ValueOf(this)); var p = TO_INTEGER(precision); + + if (NUMBER_IS_NAN(x)) return "NaN"; + if (x == 1/0) return "Infinity"; + if (x == -1/0) return "-Infinity"; + if (p < 1 || p > 21) { throw new $RangeError("toPrecision() argument must be between 1 and 21"); } - var x = ToNumber(this); return %NumberToPrecision(x, p); } @@ -1648,6 +1709,10 @@ function NewFunction(arg1) { // length == 1 // character - it may make the combined function expression // compile. We avoid this problem by checking for this early on. if (p.indexOf(')') != -1) throw MakeSyntaxError('unable_to_parse',[]); + // If the formal parameters include an unbalanced block comment, the + // function must be rejected. Since JavaScript does not allow nested + // comments we can include a trailing block comment to catch this. + p += '\n/' + '**/'; } var body = (n > 0) ? ToString(%_Arguments(n - 1)) : ''; var source = '(function(' + p + ') {\n' + body + '\n})'; @@ -1655,7 +1720,7 @@ function NewFunction(arg1) { // length == 1 // The call to SetNewFunctionAttributes will ensure the prototype // property of the resulting function is enumerable (ECMA262, 15.3.5.2). var global_receiver = %GlobalReceiver(global); - var f = %_CallFunction(global_receiver, %CompileString(source)); + var f = %_CallFunction(global_receiver, %CompileString(source, true)); %FunctionMarkNameShouldPrintAsAnonymous(f); return %SetNewFunctionAttributes(f); diff --git a/deps/v8/src/v8threads.cc b/deps/v8/src/v8threads.cc index 32ea5e197c..925e1982c0 100644 --- a/deps/v8/src/v8threads.cc +++ b/deps/v8/src/v8threads.cc @@ -42,15 +42,18 @@ namespace v8 { bool Locker::active_ = false; -// Constructor for the Locker object. Once the Locker is constructed the -// current thread will be guaranteed to have the lock for a given isolate. -Locker::Locker(v8::Isolate* isolate) - : has_lock_(false), - top_level_(true), - isolate_(reinterpret_cast<i::Isolate*>(isolate)) { - if (isolate_ == NULL) { - isolate_ = i::Isolate::GetDefaultIsolateForLocking(); - } +Locker::Locker() { + Initialize(i::Isolate::GetDefaultIsolateForLocking()); +} + + +// Once the Locker is initialized, the current thread will be guaranteed to have +// the lock for a given isolate. +void Locker::Initialize(v8::Isolate* isolate) { + ASSERT(isolate != NULL); + has_lock_= false; + top_level_ = true; + isolate_ = reinterpret_cast<i::Isolate*>(isolate); // Record that the Locker has been used at least once. active_ = true; // Get the big lock if necessary. @@ -86,10 +89,8 @@ Locker::Locker(v8::Isolate* isolate) bool Locker::IsLocked(v8::Isolate* isolate) { + ASSERT(isolate != NULL); i::Isolate* internal_isolate = reinterpret_cast<i::Isolate*>(isolate); - if (internal_isolate == NULL) { - internal_isolate = i::Isolate::GetDefaultIsolateForLocking(); - } return internal_isolate->thread_manager()->IsLockedByCurrentThread(); } @@ -115,11 +116,14 @@ Locker::~Locker() { } -Unlocker::Unlocker(v8::Isolate* isolate) - : isolate_(reinterpret_cast<i::Isolate*>(isolate)) { - if (isolate_ == NULL) { - isolate_ = i::Isolate::GetDefaultIsolateForLocking(); - } +Unlocker::Unlocker() { + Initialize(i::Isolate::GetDefaultIsolateForLocking()); +} + + +void Unlocker::Initialize(v8::Isolate* isolate) { + ASSERT(isolate != NULL); + isolate_ = reinterpret_cast<i::Isolate*>(isolate); ASSERT(isolate_->thread_manager()->IsLockedByCurrentThread()); if (isolate_->IsDefaultIsolate()) { isolate_->Exit(); @@ -479,7 +483,7 @@ void ContextSwitcher::Run() { // Acknowledge the preemption by the receiving thread. void ContextSwitcher::PreemptionReceived() { - ASSERT(Locker::IsLocked()); + ASSERT(Locker::IsLocked(i::Isolate::GetDefaultIsolateForLocking())); // There is currently no accounting being done for this. But could be in the // future, which is why we leave this in. } diff --git a/deps/v8/src/v8utils.cc b/deps/v8/src/v8utils.cc index 627169e709..58ad4e5edd 100644 --- a/deps/v8/src/v8utils.cc +++ b/deps/v8/src/v8utils.cc @@ -273,97 +273,4 @@ void StringBuilder::AddFormattedList(const char* format, va_list list) { } } - -MemoryMappedExternalResource::MemoryMappedExternalResource(const char* filename) - : filename_(NULL), - data_(NULL), - length_(0), - remove_file_on_cleanup_(false) { - Init(filename); -} - - -MemoryMappedExternalResource:: - MemoryMappedExternalResource(const char* filename, - bool remove_file_on_cleanup) - : filename_(NULL), - data_(NULL), - length_(0), - remove_file_on_cleanup_(remove_file_on_cleanup) { - Init(filename); -} - - -MemoryMappedExternalResource::~MemoryMappedExternalResource() { - // Release the resources if we had successfully acquired them: - if (file_ != NULL) { - delete file_; - if (remove_file_on_cleanup_) { - OS::Remove(filename_); - } - DeleteArray<char>(filename_); - } -} - - -void MemoryMappedExternalResource::Init(const char* filename) { - file_ = OS::MemoryMappedFile::open(filename); - if (file_ != NULL) { - filename_ = StrDup(filename); - data_ = reinterpret_cast<char*>(file_->memory()); - length_ = file_->size(); - } -} - - -bool MemoryMappedExternalResource::EnsureIsAscii(bool abort_if_failed) const { - bool is_ascii = true; - - int line_no = 1; - const char* start_of_line = data_; - const char* end = data_ + length_; - for (const char* p = data_; p < end; p++) { - char c = *p; - if ((c & 0x80) != 0) { - // Non-ASCII detected: - is_ascii = false; - - // Report the error and abort if appropriate: - if (abort_if_failed) { - int char_no = static_cast<int>(p - start_of_line) - 1; - - ASSERT(filename_ != NULL); - PrintF("\n\n\n" - "Abort: Non-Ascii character 0x%.2x in file %s line %d char %d", - c, filename_, line_no, char_no); - - // Allow for some context up to kNumberOfLeadingContextChars chars - // before the offending non-ASCII char to help the user see where - // the offending char is. - const int kNumberOfLeadingContextChars = 10; - const char* err_context = p - kNumberOfLeadingContextChars; - if (err_context < data_) { - err_context = data_; - } - // Compute the length of the error context and print it. - int err_context_length = static_cast<int>(p - err_context); - if (err_context_length != 0) { - PrintF(" after \"%.*s\"", err_context_length, err_context); - } - PrintF(".\n\n\n"); - OS::Abort(); - } - - break; // Non-ASCII detected. No need to continue scanning. - } - if (c == '\n') { - start_of_line = p; - line_no++; - } - } - - return is_ascii; -} - - } } // namespace v8::internal diff --git a/deps/v8/src/v8utils.h b/deps/v8/src/v8utils.h index 111abdf8b8..793d34d9cc 100644 --- a/deps/v8/src/v8utils.h +++ b/deps/v8/src/v8utils.h @@ -202,15 +202,44 @@ Vector<const char> ReadFile(FILE* file, bool verbose = true); +template <typename sourcechar, typename sinkchar> +INLINE(static void CopyCharsUnsigned(sinkchar* dest, + const sourcechar* src, + int chars)); + // Copy from ASCII/16bit chars to ASCII/16bit chars. template <typename sourcechar, typename sinkchar> INLINE(void CopyChars(sinkchar* dest, const sourcechar* src, int chars)); +template<typename sourcechar, typename sinkchar> +void CopyChars(sinkchar* dest, const sourcechar* src, int chars) { + ASSERT(sizeof(sourcechar) <= 2); + ASSERT(sizeof(sinkchar) <= 2); + if (sizeof(sinkchar) == 1) { + if (sizeof(sourcechar) == 1) { + CopyCharsUnsigned(reinterpret_cast<uint8_t*>(dest), + reinterpret_cast<const uint8_t*>(src), + chars); + } else { + CopyCharsUnsigned(reinterpret_cast<uint8_t*>(dest), + reinterpret_cast<const uint16_t*>(src), + chars); + } + } else { + if (sizeof(sourcechar) == 1) { + CopyCharsUnsigned(reinterpret_cast<uint16_t*>(dest), + reinterpret_cast<const uint8_t*>(src), + chars); + } else { + CopyCharsUnsigned(reinterpret_cast<uint16_t*>(dest), + reinterpret_cast<const uint16_t*>(src), + chars); + } + } +} template <typename sourcechar, typename sinkchar> -void CopyChars(sinkchar* dest, const sourcechar* src, int chars) { - ASSERT(chars >= 0); - if (chars == 0) return; +void CopyCharsUnsigned(sinkchar* dest, const sourcechar* src, int chars) { sinkchar* limit = dest + chars; #ifdef V8_HOST_CAN_READ_UNALIGNED if (sizeof(*dest) == sizeof(*src)) { @@ -220,7 +249,8 @@ void CopyChars(sinkchar* dest, const sourcechar* src, int chars) { } // Number of characters in a uintptr_t. static const int kStepSize = sizeof(uintptr_t) / sizeof(*dest); // NOLINT - while (dest <= limit - kStepSize) { + ASSERT(dest + kStepSize > dest); // Check for overflow. + while (dest + kStepSize <= limit) { *reinterpret_cast<uintptr_t*>(dest) = *reinterpret_cast<const uintptr_t*>(src); dest += kStepSize; @@ -234,37 +264,6 @@ void CopyChars(sinkchar* dest, const sourcechar* src, int chars) { } -// A resource for using mmapped files to back external strings that are read -// from files. -class MemoryMappedExternalResource: public - v8::String::ExternalAsciiStringResource { - public: - explicit MemoryMappedExternalResource(const char* filename); - MemoryMappedExternalResource(const char* filename, - bool remove_file_on_cleanup); - virtual ~MemoryMappedExternalResource(); - - virtual const char* data() const { return data_; } - virtual size_t length() const { return length_; } - - bool exists() const { return file_ != NULL; } - bool is_empty() const { return length_ == 0; } - - bool EnsureIsAscii(bool abort_if_failed) const; - bool EnsureIsAscii() const { return EnsureIsAscii(true); } - bool IsAscii() const { return EnsureIsAscii(false); } - - private: - void Init(const char* filename); - - char* filename_; - OS::MemoryMappedFile* file_; - - const char* data_; - size_t length_; - bool remove_file_on_cleanup_; -}; - class StringBuilder : public SimpleStringBuilder { public: explicit StringBuilder(int size) : SimpleStringBuilder(size) { } diff --git a/deps/v8/src/variables.cc b/deps/v8/src/variables.cc index 0416f3a390..488da42ce6 100644 --- a/deps/v8/src/variables.cc +++ b/deps/v8/src/variables.cc @@ -41,8 +41,9 @@ const char* Variable::Mode2String(VariableMode mode) { switch (mode) { case VAR: return "VAR"; case CONST: return "CONST"; - case CONST_HARMONY: return "CONST_HARMONY"; case LET: return "LET"; + case CONST_HARMONY: return "CONST_HARMONY"; + case MODULE: return "MODULE"; case DYNAMIC: return "DYNAMIC"; case DYNAMIC_GLOBAL: return "DYNAMIC_GLOBAL"; case DYNAMIC_LOCAL: return "DYNAMIC_LOCAL"; @@ -75,7 +76,7 @@ Variable::Variable(Scope* scope, initialization_flag_(initialization_flag), interface_(interface) { // Names must be canonicalized for fast equality checks. - ASSERT(name->IsSymbol()); + ASSERT(name->IsInternalizedString()); // Var declared variables never need initialization. ASSERT(!(mode == VAR && initialization_flag == kNeedsInitialization)); } @@ -84,7 +85,8 @@ Variable::Variable(Scope* scope, bool Variable::IsGlobalObjectProperty() const { // Temporaries are never global, they must always be allocated in the // activation frame. - return mode_ != TEMPORARY && !IsLexicalVariableMode(mode_) + return (IsDynamicVariableMode(mode_) || + (IsDeclaredVariableMode(mode_) && !IsLexicalVariableMode(mode_))) && scope_ != NULL && scope_->is_global_scope(); } diff --git a/deps/v8/src/variables.h b/deps/v8/src/variables.h index ba26b80472..39451d5dfb 100644 --- a/deps/v8/src/variables.h +++ b/deps/v8/src/variables.h @@ -130,8 +130,8 @@ class Variable: public ZoneObject { bool is_arguments() const { return kind_ == ARGUMENTS; } // True if the variable is named eval and not known to be shadowed. - bool is_possibly_eval() const { - return IsVariable(FACTORY->eval_symbol()); + bool is_possibly_eval(Isolate* isolate) const { + return IsVariable(isolate->factory()->eval_string()); } Variable* local_if_not_shadowed() const { diff --git a/deps/v8/src/version.cc b/deps/v8/src/version.cc index 715c2e5393..4bb5a499d0 100644 --- a/deps/v8/src/version.cc +++ b/deps/v8/src/version.cc @@ -33,9 +33,9 @@ // NOTE these macros are used by the SCons build script so their names // cannot be changed without changing the SCons build script. #define MAJOR_VERSION 3 -#define MINOR_VERSION 14 -#define BUILD_NUMBER 5 -#define PATCH_LEVEL 8 +#define MINOR_VERSION 17 +#define BUILD_NUMBER 13 +#define PATCH_LEVEL 0 // Use 1 for candidates and 0 otherwise. // (Boolean macro values are not supported by all preprocessors.) #define IS_CANDIDATE_VERSION 0 diff --git a/deps/v8/src/vm-state-inl.h b/deps/v8/src/vm-state-inl.h index 384940dfa5..fae68ebeea 100644 --- a/deps/v8/src/vm-state-inl.h +++ b/deps/v8/src/vm-state-inl.h @@ -47,8 +47,8 @@ inline const char* StateToString(StateTag state) { return "GC"; case COMPILER: return "COMPILER"; - case PARALLEL_COMPILER_PROLOGUE: - return "PARALLEL_COMPILER_PROLOGUE"; + case PARALLEL_COMPILER: + return "PARALLEL_COMPILER"; case OTHER: return "OTHER"; case EXTERNAL: @@ -67,6 +67,10 @@ VMState::VMState(Isolate* isolate, StateTag tag) LOG(isolate, UncheckedStringEvent("From", StateToString(previous_tag_))); } + if (FLAG_log_timer_events && previous_tag_ != EXTERNAL && tag == EXTERNAL) { + LOG(isolate_, EnterExternal()); + } + isolate_->SetCurrentVMState(tag); } @@ -80,6 +84,11 @@ VMState::~VMState() { UncheckedStringEvent("To", StateToString(previous_tag_))); } + if (FLAG_log_timer_events && + previous_tag_ != EXTERNAL && isolate_->current_vm_state() == EXTERNAL) { + LOG(isolate_, LeaveExternal()); + } + isolate_->SetCurrentVMState(previous_tag_); } diff --git a/deps/v8/src/x64/assembler-x64-inl.h b/deps/v8/src/x64/assembler-x64-inl.h index d022340c10..91bc528f31 100644 --- a/deps/v8/src/x64/assembler-x64-inl.h +++ b/deps/v8/src/x64/assembler-x64-inl.h @@ -42,6 +42,9 @@ namespace internal { // Implementation of Assembler +static const byte kCallOpcode = 0xE8; + + void Assembler::emitl(uint32_t x) { Memory::uint32_at(pc_) = x; pc_ += sizeof(uint32_t); @@ -50,7 +53,7 @@ void Assembler::emitl(uint32_t x) { void Assembler::emitq(uint64_t x, RelocInfo::Mode rmode) { Memory::uint64_at(pc_) = x; - if (rmode != RelocInfo::NONE) { + if (!RelocInfo::IsNone(rmode)) { RecordRelocInfo(rmode, x); } pc_ += sizeof(uint64_t); @@ -83,6 +86,14 @@ void Assembler::emit_code_target(Handle<Code> target, } +void Assembler::emit_runtime_entry(Address entry, RelocInfo::Mode rmode) { + ASSERT(RelocInfo::IsRuntimeEntry(rmode)); + ASSERT(isolate()->code_range()->exists()); + RecordRelocInfo(rmode); + emitl(static_cast<uint32_t>(entry - isolate()->code_range()->start())); +} + + void Assembler::emit_rex_64(Register reg, Register rm_reg) { emit(0x48 | reg.high_bit() << 2 | rm_reg.high_bit()); } @@ -205,6 +216,12 @@ Handle<Object> Assembler::code_target_object_handle_at(Address pc) { return code_targets_[Memory::int32_at(pc)]; } + +Address Assembler::runtime_entry_at(Address pc) { + ASSERT(isolate()->code_range()->exists()); + return Memory::int32_at(pc) + isolate()->code_range()->start(); +} + // ----------------------------------------------------------------------------- // Implementation of RelocInfo @@ -214,25 +231,27 @@ void RelocInfo::apply(intptr_t delta) { // absolute code pointer inside code object moves with the code object. Memory::Address_at(pc_) += static_cast<int32_t>(delta); CPU::FlushICache(pc_, sizeof(Address)); - } else if (IsCodeTarget(rmode_)) { + } else if (IsCodeTarget(rmode_) || IsRuntimeEntry(rmode_)) { Memory::int32_at(pc_) -= static_cast<int32_t>(delta); CPU::FlushICache(pc_, sizeof(int32_t)); + } else if (rmode_ == CODE_AGE_SEQUENCE) { + if (*pc_ == kCallOpcode) { + int32_t* p = reinterpret_cast<int32_t*>(pc_ + 1); + *p -= static_cast<int32_t>(delta); // Relocate entry. + CPU::FlushICache(p, sizeof(uint32_t)); + } } } Address RelocInfo::target_address() { - ASSERT(IsCodeTarget(rmode_) || rmode_ == RUNTIME_ENTRY); - if (IsCodeTarget(rmode_)) { - return Assembler::target_address_at(pc_); - } else { - return Memory::Address_at(pc_); - } + ASSERT(IsCodeTarget(rmode_) || IsRuntimeEntry(rmode_)); + return Assembler::target_address_at(pc_); } Address RelocInfo::target_address_address() { - ASSERT(IsCodeTarget(rmode_) || rmode_ == RUNTIME_ENTRY + ASSERT(IsCodeTarget(rmode_) || IsRuntimeEntry(rmode_) || rmode_ == EMBEDDED_OBJECT || rmode_ == EXTERNAL_REFERENCE); return reinterpret_cast<Address>(pc_); @@ -249,17 +268,12 @@ int RelocInfo::target_address_size() { void RelocInfo::set_target_address(Address target, WriteBarrierMode mode) { - ASSERT(IsCodeTarget(rmode_) || rmode_ == RUNTIME_ENTRY); - if (IsCodeTarget(rmode_)) { - Assembler::set_target_address_at(pc_, target); + ASSERT(IsCodeTarget(rmode_) || IsRuntimeEntry(rmode_)); + Assembler::set_target_address_at(pc_, target); + if (mode == UPDATE_WRITE_BARRIER && host() != NULL && IsCodeTarget(rmode_)) { Object* target_code = Code::GetCodeFromTargetAddress(target); - if (mode == UPDATE_WRITE_BARRIER && host() != NULL) { - host()->GetHeap()->incremental_marking()->RecordWriteIntoCode( - host(), this, HeapObject::cast(target_code)); - } - } else { - Memory::Address_at(pc_) = target; - CPU::FlushICache(pc_, sizeof(Address)); + host()->GetHeap()->incremental_marking()->RecordWriteIntoCode( + host(), this, HeapObject::cast(target_code)); } } @@ -305,6 +319,19 @@ void RelocInfo::set_target_object(Object* target, WriteBarrierMode mode) { } +Address RelocInfo::target_runtime_entry(Assembler* origin) { + ASSERT(IsRuntimeEntry(rmode_)); + return origin->runtime_entry_at(pc_); +} + + +void RelocInfo::set_target_runtime_entry(Address target, + WriteBarrierMode mode) { + ASSERT(IsRuntimeEntry(rmode_)); + if (target_address() != target) set_target_address(target, mode); +} + + Handle<JSGlobalPropertyCell> RelocInfo::target_cell_handle() { ASSERT(rmode_ == RelocInfo::GLOBAL_PROPERTY_CELL); Address address = Memory::Address_at(pc_); @@ -355,6 +382,21 @@ bool RelocInfo::IsPatchedDebugBreakSlotSequence() { } +Code* RelocInfo::code_age_stub() { + ASSERT(rmode_ == RelocInfo::CODE_AGE_SEQUENCE); + ASSERT(*pc_ == kCallOpcode); + return Code::GetCodeFromTargetAddress( + Assembler::target_address_at(pc_ + 1)); +} + + +void RelocInfo::set_code_age_stub(Code* stub) { + ASSERT(*pc_ == kCallOpcode); + ASSERT(rmode_ == RelocInfo::CODE_AGE_SEQUENCE); + Assembler::set_target_address_at(pc_ + 1, stub->instruction_start()); +} + + Address RelocInfo::call_address() { ASSERT((IsJSReturn(rmode()) && IsPatchedReturnSequence()) || (IsDebugBreakSlot(rmode()) && IsPatchedDebugBreakSlotSequence())); @@ -408,6 +450,8 @@ void RelocInfo::Visit(ObjectVisitor* visitor) { } else if (mode == RelocInfo::EXTERNAL_REFERENCE) { visitor->VisitExternalReference(this); CPU::FlushICache(pc_, sizeof(Address)); + } else if (RelocInfo::IsCodeAgeSequence(mode)) { + visitor->VisitCodeAgeSequence(this); #ifdef ENABLE_DEBUGGER_SUPPORT // TODO(isolates): Get a cached isolate below. } else if (((RelocInfo::IsJSReturn(mode) && @@ -417,7 +461,7 @@ void RelocInfo::Visit(ObjectVisitor* visitor) { Isolate::Current()->debug()->has_break_points()) { visitor->VisitDebugTarget(this); #endif - } else if (mode == RelocInfo::RUNTIME_ENTRY) { + } else if (RelocInfo::IsRuntimeEntry(mode)) { visitor->VisitRuntimeEntry(this); } } @@ -436,6 +480,8 @@ void RelocInfo::Visit(Heap* heap) { } else if (mode == RelocInfo::EXTERNAL_REFERENCE) { StaticVisitor::VisitExternalReference(this); CPU::FlushICache(pc_, sizeof(Address)); + } else if (RelocInfo::IsCodeAgeSequence(mode)) { + StaticVisitor::VisitCodeAgeSequence(heap, this); #ifdef ENABLE_DEBUGGER_SUPPORT } else if (heap->isolate()->debug()->has_break_points() && ((RelocInfo::IsJSReturn(mode) && @@ -444,7 +490,7 @@ void RelocInfo::Visit(Heap* heap) { IsPatchedDebugBreakSlotSequence()))) { StaticVisitor::VisitDebugTarget(heap, this); #endif - } else if (mode == RelocInfo::RUNTIME_ENTRY) { + } else if (RelocInfo::IsRuntimeEntry(mode)) { StaticVisitor::VisitRuntimeEntry(this); } } diff --git a/deps/v8/src/x64/assembler-x64.cc b/deps/v8/src/x64/assembler-x64.cc index 862a735579..57d40f7946 100644 --- a/deps/v8/src/x64/assembler-x64.cc +++ b/deps/v8/src/x64/assembler-x64.cc @@ -43,7 +43,13 @@ namespace internal { bool CpuFeatures::initialized_ = false; #endif uint64_t CpuFeatures::supported_ = CpuFeatures::kDefaultCpuFeatures; -uint64_t CpuFeatures::found_by_runtime_probing_ = 0; +uint64_t CpuFeatures::found_by_runtime_probing_only_ = 0; + + +ExternalReference ExternalReference::cpu_features() { + ASSERT(CpuFeatures::initialized_); + return ExternalReference(&CpuFeatures::supported_); +} void CpuFeatures::Probe() { @@ -102,7 +108,7 @@ void CpuFeatures::Probe() { __ bind(&cpuid); __ movl(rax, Immediate(1)); supported_ = kDefaultCpuFeatures | (1 << CPUID); - { Scope fscope(CPUID); + { CpuFeatureScope fscope(&assm, CPUID); __ cpuid(); // Move the result from ecx:edx to rdi. __ movl(rdi, rdx); // Zero-extended to 64 bits. @@ -110,7 +116,7 @@ void CpuFeatures::Probe() { __ or_(rdi, rcx); // Get the sahf supported flag, from CPUID(0x80000001) - __ movq(rax, 0x80000001, RelocInfo::NONE); + __ movq(rax, 0x80000001, RelocInfo::NONE64); __ cpuid(); } supported_ = kDefaultCpuFeatures; @@ -137,12 +143,13 @@ void CpuFeatures::Probe() { typedef uint64_t (*F0)(); F0 probe = FUNCTION_CAST<F0>(reinterpret_cast<Address>(memory->address())); - supported_ = probe(); - found_by_runtime_probing_ = supported_; - found_by_runtime_probing_ &= ~kDefaultCpuFeatures; - uint64_t os_guarantees = OS::CpuFeaturesImpliedByPlatform(); - supported_ |= os_guarantees; - found_by_runtime_probing_ &= ~os_guarantees; + + uint64_t probed_features = probe(); + uint64_t platform_features = OS::CpuFeaturesImpliedByPlatform(); + supported_ = probed_features | platform_features; + found_by_runtime_probing_only_ + = probed_features & ~kDefaultCpuFeatures & ~platform_features; + // SSE2 and CMOV must be available on an X64 CPU. ASSERT(IsSupported(CPUID)); ASSERT(IsSupported(SSE2)); @@ -173,7 +180,7 @@ void RelocInfo::PatchCodeWithCall(Address target, int guard_bytes) { #endif // Patch the code. - patcher.masm()->movq(r10, target, RelocInfo::NONE); + patcher.masm()->movq(r10, target, RelocInfo::NONE64); patcher.masm()->call(r10); // Check that the size of the code generated is as expected. @@ -201,7 +208,8 @@ void RelocInfo::PatchCode(byte* instructions, int instruction_count) { // ----------------------------------------------------------------------------- // Register constants. -const int Register::kRegisterCodeByAllocationIndex[kNumAllocatableRegisters] = { +const int + Register::kRegisterCodeByAllocationIndex[kMaxNumAllocatableRegisters] = { // rax, rbx, rdx, rcx, rdi, r8, r9, r11, r14, r15 0, 3, 2, 1, 7, 8, 9, 11, 14, 15 }; @@ -346,50 +354,20 @@ bool Operand::AddressUsesRegister(Register reg) const { static void InitCoverageLog(); #endif -Assembler::Assembler(Isolate* arg_isolate, void* buffer, int buffer_size) - : AssemblerBase(arg_isolate), +Assembler::Assembler(Isolate* isolate, void* buffer, int buffer_size) + : AssemblerBase(isolate, buffer, buffer_size), code_targets_(100), - positions_recorder_(this), - emit_debug_code_(FLAG_debug_code), - predictable_code_size_(false) { - if (buffer == NULL) { - // Do our own buffer management. - if (buffer_size <= kMinimalBufferSize) { - buffer_size = kMinimalBufferSize; - - if (isolate() != NULL && isolate()->assembler_spare_buffer() != NULL) { - buffer = isolate()->assembler_spare_buffer(); - isolate()->set_assembler_spare_buffer(NULL); - } - } - if (buffer == NULL) { - buffer_ = NewArray<byte>(buffer_size); - } else { - buffer_ = static_cast<byte*>(buffer); - } - buffer_size_ = buffer_size; - own_buffer_ = true; - } else { - // Use externally provided buffer instead. - ASSERT(buffer_size > 0); - buffer_ = static_cast<byte*>(buffer); - buffer_size_ = buffer_size; - own_buffer_ = false; - } - + positions_recorder_(this) { // Clear the buffer in debug mode unless it was provided by the // caller in which case we can't be sure it's okay to overwrite // existing code in it. #ifdef DEBUG if (own_buffer_) { - memset(buffer_, 0xCC, buffer_size); // int3 + memset(buffer_, 0xCC, buffer_size_); // int3 } #endif - // Set up buffer pointers. - ASSERT(buffer_ != NULL); - pc_ = buffer_; - reloc_info_writer.Reposition(buffer_ + buffer_size, pc_); + reloc_info_writer.Reposition(buffer_ + buffer_size_, pc_); #ifdef GENERATED_CODE_COVERAGE @@ -398,19 +376,6 @@ Assembler::Assembler(Isolate* arg_isolate, void* buffer, int buffer_size) } -Assembler::~Assembler() { - if (own_buffer_) { - if (isolate() != NULL && - isolate()->assembler_spare_buffer() == NULL && - buffer_size_ == kMinimalBufferSize) { - isolate()->set_assembler_spare_buffer(buffer_); - } else { - DeleteArray(buffer_); - } - } -} - - void Assembler::GetCode(CodeDesc* desc) { // Finalize code (at this point overflow() may be true, but the gap ensures // that we are still not overlapping instructions and relocation info). @@ -876,6 +841,16 @@ void Assembler::call(Label* L) { } +void Assembler::call(Address entry, RelocInfo::Mode rmode) { + ASSERT(RelocInfo::IsRuntimeEntry(rmode)); + positions_recorder()->WriteRecordedPositions(); + EnsureSpace ensure_space(this); + // 1110 1000 #32-bit disp. + emit(0xE8); + emit_runtime_entry(entry, rmode); +} + + void Assembler::call(Handle<Code> target, RelocInfo::Mode rmode, TypeFeedbackId ast_id) { @@ -1014,7 +989,7 @@ void Assembler::cmpb_al(Immediate imm8) { void Assembler::cpuid() { - ASSERT(CpuFeatures::IsEnabled(CPUID)); + ASSERT(IsEnabled(CPUID)); EnsureSpace ensure_space(this); emit(0x0F); emit(0xA2); @@ -1238,13 +1213,13 @@ void Assembler::j(Condition cc, Label* L, Label::Distance distance) { // Determine whether we can use 1-byte offsets for backwards branches, // which have a max range of 128 bytes. - // We also need to check the predictable_code_size_ flag here, because - // on x64, when the full code generator recompiles code for debugging, some - // places need to be padded out to a certain size. The debugger is keeping - // track of how often it did this so that it can adjust return addresses on - // the stack, but if the size of jump instructions can also change, that's - // not enough and the calculated offsets would be incorrect. - if (is_int8(offs - short_size) && !predictable_code_size_) { + // We also need to check predictable_code_size() flag here, because on x64, + // when the full code generator recompiles code for debugging, some places + // need to be padded out to a certain size. The debugger is keeping track of + // how often it did this so that it can adjust return addresses on the + // stack, but if the size of jump instructions can also change, that's not + // enough and the calculated offsets would be incorrect. + if (is_int8(offs - short_size) && !predictable_code_size()) { // 0111 tttn #8-bit disp. emit(0x70 | cc); emit((offs - short_size) & 0xFF); @@ -1282,6 +1257,16 @@ void Assembler::j(Condition cc, Label* L, Label::Distance distance) { } +void Assembler::j(Condition cc, Address entry, RelocInfo::Mode rmode) { + ASSERT(RelocInfo::IsRuntimeEntry(rmode)); + EnsureSpace ensure_space(this); + ASSERT(is_uint4(cc)); + emit(0x0F); + emit(0x80 | cc); + emit_runtime_entry(entry, rmode); +} + + void Assembler::j(Condition cc, Handle<Code> target, RelocInfo::Mode rmode) { @@ -1301,7 +1286,7 @@ void Assembler::jmp(Label* L, Label::Distance distance) { if (L->is_bound()) { int offs = L->pos() - pc_offset() - 1; ASSERT(offs <= 0); - if (is_int8(offs - short_size) && !predictable_code_size_) { + if (is_int8(offs - short_size) && !predictable_code_size()) { // 1110 1011 #8-bit disp. emit(0xEB); emit((offs - short_size) & 0xFF); @@ -1344,6 +1329,15 @@ void Assembler::jmp(Handle<Code> target, RelocInfo::Mode rmode) { } +void Assembler::jmp(Address entry, RelocInfo::Mode rmode) { + ASSERT(RelocInfo::IsRuntimeEntry(rmode)); + EnsureSpace ensure_space(this); + ASSERT(RelocInfo::IsRuntimeEntry(rmode)); + emit(0xE9); + emit_runtime_entry(entry, rmode); +} + + void Assembler::jmp(Register target) { EnsureSpace ensure_space(this); // Opcode FF/4 r64. @@ -1540,14 +1534,13 @@ void Assembler::movq(Register dst, void* value, RelocInfo::Mode rmode) { void Assembler::movq(Register dst, int64_t value, RelocInfo::Mode rmode) { // Non-relocatable values might not need a 64-bit representation. - if (rmode == RelocInfo::NONE) { - // Sadly, there is no zero or sign extending move for 8-bit immediates. - if (is_int32(value)) { - movq(dst, Immediate(static_cast<int32_t>(value))); - return; - } else if (is_uint32(value)) { + if (RelocInfo::IsNone(rmode)) { + if (is_uint32(value)) { movl(dst, Immediate(static_cast<int32_t>(value))); return; + } else if (is_int32(value)) { + movq(dst, Immediate(static_cast<int32_t>(value))); + return; } // Value cannot be represented by 32 bits, so do a full 64 bit immediate // value. @@ -1600,11 +1593,11 @@ void Assembler::movl(const Operand& dst, Label* src) { void Assembler::movq(Register dst, Handle<Object> value, RelocInfo::Mode mode) { // If there is no relocation info, emit the value of the handle efficiently // (possibly using less that 8 bytes for the value). - if (mode == RelocInfo::NONE) { + if (RelocInfo::IsNone(mode)) { // There is no possible reason to store a heap pointer without relocation // info, so it must be a smi. ASSERT(value->IsSmi()); - movq(dst, reinterpret_cast<int64_t>(*value), RelocInfo::NONE); + movq(dst, reinterpret_cast<int64_t>(*value), RelocInfo::NONE64); } else { EnsureSpace ensure_space(this); ASSERT(value->IsHeapObject()); @@ -1688,6 +1681,15 @@ void Assembler::movzxwl(Register dst, const Operand& src) { } +void Assembler::movzxwl(Register dst, Register src) { + EnsureSpace ensure_space(this); + emit_optional_rex_32(dst, src); + emit(0x0F); + emit(0xB7); + emit_modrm(dst, src); +} + + void Assembler::repmovsb() { EnsureSpace ensure_space(this); emit(0xF3); @@ -2246,7 +2248,7 @@ void Assembler::fistp_s(const Operand& adr) { void Assembler::fisttp_s(const Operand& adr) { - ASSERT(CpuFeatures::IsEnabled(SSE3)); + ASSERT(IsEnabled(SSE3)); EnsureSpace ensure_space(this); emit_optional_rex_32(adr); emit(0xDB); @@ -2255,7 +2257,7 @@ void Assembler::fisttp_s(const Operand& adr) { void Assembler::fisttp_d(const Operand& adr) { - ASSERT(CpuFeatures::IsEnabled(SSE3)); + ASSERT(IsEnabled(SSE3)); EnsureSpace ensure_space(this); emit_optional_rex_32(adr); emit(0xDD); @@ -2850,6 +2852,16 @@ void Assembler::addsd(XMMRegister dst, XMMRegister src) { } +void Assembler::addsd(XMMRegister dst, const Operand& src) { + EnsureSpace ensure_space(this); + emit(0xF2); + emit_optional_rex_32(dst, src); + emit(0x0F); + emit(0x58); + emit_sse_operand(dst, src); +} + + void Assembler::mulsd(XMMRegister dst, XMMRegister src) { EnsureSpace ensure_space(this); emit(0xF2); @@ -2860,6 +2872,16 @@ void Assembler::mulsd(XMMRegister dst, XMMRegister src) { } +void Assembler::mulsd(XMMRegister dst, const Operand& src) { + EnsureSpace ensure_space(this); + emit(0xF2); + emit_optional_rex_32(dst, src); + emit(0x0F); + emit(0x59); + emit_sse_operand(dst, src); +} + + void Assembler::subsd(XMMRegister dst, XMMRegister src) { EnsureSpace ensure_space(this); emit(0xF2); @@ -2951,7 +2973,7 @@ void Assembler::ucomisd(XMMRegister dst, const Operand& src) { void Assembler::roundsd(XMMRegister dst, XMMRegister src, Assembler::RoundingMode mode) { - ASSERT(CpuFeatures::IsEnabled(SSE4_1)); + ASSERT(IsEnabled(SSE4_1)); EnsureSpace ensure_space(this); emit(0x66); emit_optional_rex_32(dst, src); @@ -2974,6 +2996,15 @@ void Assembler::movmskpd(Register dst, XMMRegister src) { } +void Assembler::movmskps(Register dst, XMMRegister src) { + EnsureSpace ensure_space(this); + emit_optional_rex_32(dst, src); + emit(0x0f); + emit(0x50); + emit_sse_operand(dst, src); +} + + void Assembler::emit_sse_operand(XMMRegister reg, const Operand& adr) { Register ireg = { reg.code() }; emit_operand(ireg, adr); @@ -3008,7 +3039,7 @@ void Assembler::dd(uint32_t data) { // Relocation information implementations. void Assembler::RecordRelocInfo(RelocInfo::Mode rmode, intptr_t data) { - ASSERT(rmode != RelocInfo::NONE); + ASSERT(!RelocInfo::IsNone(rmode)); // Don't record external references unless the heap will be serialized. if (rmode == RelocInfo::EXTERNAL_REFERENCE) { #ifdef DEBUG @@ -3047,7 +3078,9 @@ void Assembler::RecordComment(const char* msg, bool force) { const int RelocInfo::kApplyMask = RelocInfo::kCodeTargetMask | - 1 << RelocInfo::INTERNAL_REFERENCE; + 1 << RelocInfo::RUNTIME_ENTRY | + 1 << RelocInfo::INTERNAL_REFERENCE | + 1 << RelocInfo::CODE_AGE_SEQUENCE; bool RelocInfo::IsCodedSpecially() { diff --git a/deps/v8/src/x64/assembler-x64.h b/deps/v8/src/x64/assembler-x64.h index e8b0be9bab..49a57e6929 100644 --- a/deps/v8/src/x64/assembler-x64.h +++ b/deps/v8/src/x64/assembler-x64.h @@ -95,21 +95,24 @@ struct Register { // r10 - fixed scratch register // r12 - smi constant register // r13 - root register + static const int kMaxNumAllocatableRegisters = 10; + static int NumAllocatableRegisters() { + return kMaxNumAllocatableRegisters; + } static const int kNumRegisters = 16; - static const int kNumAllocatableRegisters = 10; static int ToAllocationIndex(Register reg) { return kAllocationIndexByRegisterCode[reg.code()]; } static Register FromAllocationIndex(int index) { - ASSERT(index >= 0 && index < kNumAllocatableRegisters); + ASSERT(index >= 0 && index < kMaxNumAllocatableRegisters); Register result = { kRegisterCodeByAllocationIndex[index] }; return result; } static const char* AllocationIndexToString(int index) { - ASSERT(index >= 0 && index < kNumAllocatableRegisters); + ASSERT(index >= 0 && index < kMaxNumAllocatableRegisters); const char* const names[] = { "rax", "rbx", @@ -157,7 +160,7 @@ struct Register { int code_; private: - static const int kRegisterCodeByAllocationIndex[kNumAllocatableRegisters]; + static const int kRegisterCodeByAllocationIndex[kMaxNumAllocatableRegisters]; static const int kAllocationIndexByRegisterCode[kNumRegisters]; }; @@ -199,8 +202,11 @@ const Register no_reg = { kRegister_no_reg_Code }; struct XMMRegister { - static const int kNumRegisters = 16; - static const int kNumAllocatableRegisters = 15; + static const int kMaxNumRegisters = 16; + static const int kMaxNumAllocatableRegisters = 15; + static int NumAllocatableRegisters() { + return kMaxNumAllocatableRegisters; + } static int ToAllocationIndex(XMMRegister reg) { ASSERT(reg.code() != 0); @@ -208,13 +214,13 @@ struct XMMRegister { } static XMMRegister FromAllocationIndex(int index) { - ASSERT(0 <= index && index < kNumAllocatableRegisters); + ASSERT(0 <= index && index < kMaxNumAllocatableRegisters); XMMRegister result = { index + 1 }; return result; } static const char* AllocationIndexToString(int index) { - ASSERT(index >= 0 && index < kNumAllocatableRegisters); + ASSERT(index >= 0 && index < kMaxNumAllocatableRegisters); const char* const names[] = { "xmm1", "xmm2", @@ -237,11 +243,11 @@ struct XMMRegister { static XMMRegister from_code(int code) { ASSERT(code >= 0); - ASSERT(code < kNumRegisters); + ASSERT(code < kMaxNumRegisters); XMMRegister r = { code }; return r; } - bool is_valid() const { return 0 <= code_ && code_ < kNumRegisters; } + bool is_valid() const { return 0 <= code_ && code_ < kMaxNumRegisters; } bool is(XMMRegister reg) const { return code_ == reg.code_; } int code() const { ASSERT(is_valid()); @@ -436,10 +442,10 @@ class Operand BASE_EMBEDDED { // CpuFeatures keeps track of which features are supported by the target CPU. -// Supported features must be enabled by a Scope before use. +// Supported features must be enabled by a CpuFeatureScope before use. // Example: -// if (CpuFeatures::IsSupported(SSE3)) { -// CpuFeatures::Scope fscope(SSE3); +// if (assembler->IsSupported(SSE3)) { +// CpuFeatureScope fscope(assembler, SSE3); // // Generate SSE3 floating point code. // } else { // // Generate standard x87 or SSE2 floating point code. @@ -459,57 +465,19 @@ class CpuFeatures : public AllStatic { if (f == CMOV && !FLAG_enable_cmov) return false; if (f == RDTSC && !FLAG_enable_rdtsc) return false; if (f == SAHF && !FLAG_enable_sahf) return false; - return (supported_ & (V8_UINT64_C(1) << f)) != 0; + return (supported_ & (static_cast<uint64_t>(1) << f)) != 0; } -#ifdef DEBUG - // Check whether a feature is currently enabled. - static bool IsEnabled(CpuFeature f) { + static bool IsFoundByRuntimeProbingOnly(CpuFeature f) { ASSERT(initialized_); - Isolate* isolate = Isolate::UncheckedCurrent(); - if (isolate == NULL) { - // When no isolate is available, work as if we're running in - // release mode. - return IsSupported(f); - } - uint64_t enabled = isolate->enabled_cpu_features(); - return (enabled & (V8_UINT64_C(1) << f)) != 0; + return (found_by_runtime_probing_only_ & + (static_cast<uint64_t>(1) << f)) != 0; } -#endif - // Enable a specified feature within a scope. - class Scope BASE_EMBEDDED { -#ifdef DEBUG - - public: - explicit Scope(CpuFeature f) { - uint64_t mask = V8_UINT64_C(1) << f; - ASSERT(CpuFeatures::IsSupported(f)); - ASSERT(!Serializer::enabled() || - (CpuFeatures::found_by_runtime_probing_ & mask) == 0); - isolate_ = Isolate::UncheckedCurrent(); - old_enabled_ = 0; - if (isolate_ != NULL) { - old_enabled_ = isolate_->enabled_cpu_features(); - isolate_->set_enabled_cpu_features(old_enabled_ | mask); - } - } - ~Scope() { - ASSERT_EQ(Isolate::UncheckedCurrent(), isolate_); - if (isolate_ != NULL) { - isolate_->set_enabled_cpu_features(old_enabled_); - } - } - - private: - Isolate* isolate_; - uint64_t old_enabled_; -#else - - public: - explicit Scope(CpuFeature f) {} -#endif - }; + static bool IsSafeForSnapshot(CpuFeature f) { + return (IsSupported(f) && + (!Serializer::enabled() || !IsFoundByRuntimeProbingOnly(f))); + } private: // Safe defaults include SSE2 and CMOV for X64. It is always available, if @@ -522,8 +490,9 @@ class CpuFeatures : public AllStatic { static bool initialized_; #endif static uint64_t supported_; - static uint64_t found_by_runtime_probing_; + static uint64_t found_by_runtime_probing_only_; + friend class ExternalReference; DISALLOW_COPY_AND_ASSIGN(CpuFeatures); }; @@ -556,15 +525,7 @@ class Assembler : public AssemblerBase { // is too small, a fatal error occurs. No deallocation of the buffer is done // upon destruction of the assembler. Assembler(Isolate* isolate, void* buffer, int buffer_size); - ~Assembler(); - - // Overrides the default provided by FLAG_debug_code. - void set_emit_debug_code(bool value) { emit_debug_code_ = value; } - - // Avoids using instructions that vary in size in unpredictable ways between - // the snapshot and the running VM. This is needed by the full compiler so - // that it can recompile code with debug support and fix the PC. - void set_predictable_code_size(bool value) { predictable_code_size_ = value; } + virtual ~Assembler() { } // GetCode emits any pending (non-emitted) code and fills the descriptor // desc. GetCode() is idempotent; it returns the same result if no other @@ -600,6 +561,7 @@ class Assembler : public AssemblerBase { } inline Handle<Object> code_target_object_handle_at(Address pc); + inline Address runtime_entry_at(Address pc); // Number of bytes taken up by the branch target in the code. static const int kSpecialTargetSize = 4; // Use 32-bit displacement. // Distance between the address of the code target in the call instruction @@ -734,6 +696,7 @@ class Assembler : public AssemblerBase { void movzxbl(Register dst, const Operand& src); void movzxwq(Register dst, const Operand& src); void movzxwl(Register dst, const Operand& src); + void movzxwl(Register dst, Register src); // Repeated moves. @@ -1021,6 +984,14 @@ class Assembler : public AssemblerBase { shift(dst, imm8, 0x1); } + void rorl(Register dst, Immediate imm8) { + shift_32(dst, imm8, 0x1); + } + + void rorl_cl(Register dst) { + shift_32(dst, 0x1); + } + // Shifts dst:src left by cl bits, affecting only dst. void shld(Register dst, Register src); @@ -1210,6 +1181,7 @@ class Assembler : public AssemblerBase { // Calls // Call near relative 32-bit displacement, relative to next instruction. void call(Label* L); + void call(Address entry, RelocInfo::Mode rmode); void call(Handle<Code> target, RelocInfo::Mode rmode = RelocInfo::CODE_TARGET, TypeFeedbackId ast_id = TypeFeedbackId::None()); @@ -1231,6 +1203,7 @@ class Assembler : public AssemblerBase { // Use a 32-bit signed displacement. // Unconditional jump to L void jmp(Label* L, Label::Distance distance = Label::kFar); + void jmp(Address entry, RelocInfo::Mode rmode); void jmp(Handle<Code> target, RelocInfo::Mode rmode); // Jump near absolute indirect (r64) @@ -1243,6 +1216,7 @@ class Assembler : public AssemblerBase { void j(Condition cc, Label* L, Label::Distance distance = Label::kFar); + void j(Condition cc, Address entry, RelocInfo::Mode rmode); void j(Condition cc, Handle<Code> target, RelocInfo::Mode rmode); // Floating-point operations @@ -1363,8 +1337,10 @@ class Assembler : public AssemblerBase { void cvtsd2siq(Register dst, XMMRegister src); void addsd(XMMRegister dst, XMMRegister src); + void addsd(XMMRegister dst, const Operand& src); void subsd(XMMRegister dst, XMMRegister src); void mulsd(XMMRegister dst, XMMRegister src); + void mulsd(XMMRegister dst, const Operand& src); void divsd(XMMRegister dst, XMMRegister src); void andpd(XMMRegister dst, XMMRegister src); @@ -1386,6 +1362,7 @@ class Assembler : public AssemblerBase { void roundsd(XMMRegister dst, XMMRegister src, RoundingMode mode); void movmskpd(Register dst, XMMRegister src); + void movmskps(Register dst, XMMRegister src); // The first argument is the reg field, the second argument is the r/m field. void emit_sse_operand(XMMRegister dst, XMMRegister src); @@ -1416,8 +1393,6 @@ class Assembler : public AssemblerBase { void db(uint8_t data); void dd(uint32_t data); - int pc_offset() const { return static_cast<int>(pc_ - buffer_); } - PositionsRecorder* positions_recorder() { return &positions_recorder_; } // Check if there is less than kGap bytes available in the buffer. @@ -1436,15 +1411,10 @@ class Assembler : public AssemblerBase { // Avoid overflows for displacements etc. static const int kMaximalBufferSize = 512*MB; - static const int kMinimalBufferSize = 4*KB; byte byte_at(int pos) { return buffer_[pos]; } void set_byte_at(int pos, byte value) { buffer_[pos] = value; } - protected: - bool emit_debug_code() const { return emit_debug_code_; } - bool predictable_code_size() const { return predictable_code_size_; } - private: byte* addr_at(int pos) { return buffer_ + pos; } uint32_t long_at(int pos) { @@ -1464,6 +1434,7 @@ class Assembler : public AssemblerBase { inline void emit_code_target(Handle<Code> target, RelocInfo::Mode rmode, TypeFeedbackId ast_id = TypeFeedbackId::None()); + inline void emit_runtime_entry(Address entry, RelocInfo::Mode rmode); void emit(Immediate x) { emitl(x.value_); } // Emits a REX prefix that encodes a 64-bit operand size and @@ -1632,24 +1603,12 @@ class Assembler : public AssemblerBase { friend class EnsureSpace; friend class RegExpMacroAssemblerX64; - // Code buffer: - // The buffer into which code and relocation info are generated. - byte* buffer_; - int buffer_size_; - // True if the assembler owns the buffer, false if buffer is external. - bool own_buffer_; - // code generation - byte* pc_; // the program counter; moves forward RelocInfoWriter reloc_info_writer; List< Handle<Code> > code_targets_; PositionsRecorder positions_recorder_; - - bool emit_debug_code_; - bool predictable_code_size_; - friend class PositionsRecorder; }; diff --git a/deps/v8/src/x64/builtins-x64.cc b/deps/v8/src/x64/builtins-x64.cc index 9e4153a868..0b70c1a0c0 100644 --- a/deps/v8/src/x64/builtins-x64.cc +++ b/deps/v8/src/x64/builtins-x64.cc @@ -88,6 +88,33 @@ void Builtins::Generate_InRecompileQueue(MacroAssembler* masm) { } +void Builtins::Generate_InstallRecompiledCode(MacroAssembler* masm) { + // Enter an internal frame. + { + FrameScope scope(masm, StackFrame::INTERNAL); + + // Push a copy of the function onto the stack. + __ push(rdi); + // Push call kind information. + __ push(rcx); + + __ push(rdi); // Function is also the parameter to the runtime call. + __ CallRuntime(Runtime::kInstallRecompiledCode, 1); + + // Restore call kind information. + __ pop(rcx); + // Restore function. + __ pop(rdi); + + // Tear down internal frame. + } + + // Do a tail-call of the compiled function. + __ lea(rax, FieldOperand(rax, Code::kHeaderSize)); + __ jmp(rax); +} + + void Builtins::Generate_ParallelRecompile(MacroAssembler* masm) { { FrameScope scope(masm, StackFrame::INTERNAL); @@ -389,6 +416,10 @@ static void Generate_JSConstructStubHelper(MacroAssembler* masm, __ CmpObjectType(rax, FIRST_SPEC_OBJECT_TYPE, rcx); __ j(above_equal, &exit); + // Symbols are "objects". + __ CmpInstanceType(rcx, SYMBOL_TYPE); + __ j(equal, &exit); + // Throw away the result of the constructor invocation and use the // on-stack receiver as the result. __ bind(&use_receiver); @@ -523,6 +554,10 @@ static void Generate_JSEntryTrampolineHelper(MacroAssembler* masm, // Invoke the code. if (is_construct) { + // No type feedback cell is available + Handle<Object> undefined_sentinel( + masm->isolate()->factory()->undefined_value()); + __ Move(rbx, undefined_sentinel); // Expects rdi to hold function pointer. CallConstructStub stub(NO_CALL_FUNCTION_FLAGS); __ CallStub(&stub); @@ -606,6 +641,65 @@ void Builtins::Generate_LazyRecompile(MacroAssembler* masm) { } +static void GenerateMakeCodeYoungAgainCommon(MacroAssembler* masm) { + // For now, we are relying on the fact that make_code_young doesn't do any + // garbage collection which allows us to save/restore the registers without + // worrying about which of them contain pointers. We also don't build an + // internal frame to make the code faster, since we shouldn't have to do stack + // crawls in MakeCodeYoung. This seems a bit fragile. + + // Re-execute the code that was patched back to the young age when + // the stub returns. + __ subq(Operand(rsp, 0), Immediate(5)); + __ Pushad(); +#ifdef _WIN64 + __ movq(rcx, Operand(rsp, kNumSafepointRegisters * kPointerSize)); +#else + __ movq(rdi, Operand(rsp, kNumSafepointRegisters * kPointerSize)); +#endif + { // NOLINT + FrameScope scope(masm, StackFrame::MANUAL); + __ PrepareCallCFunction(1); + __ CallCFunction( + ExternalReference::get_make_code_young_function(masm->isolate()), 1); + } + __ Popad(); + __ ret(0); +} + + +#define DEFINE_CODE_AGE_BUILTIN_GENERATOR(C) \ +void Builtins::Generate_Make##C##CodeYoungAgainEvenMarking( \ + MacroAssembler* masm) { \ + GenerateMakeCodeYoungAgainCommon(masm); \ +} \ +void Builtins::Generate_Make##C##CodeYoungAgainOddMarking( \ + MacroAssembler* masm) { \ + GenerateMakeCodeYoungAgainCommon(masm); \ +} +CODE_AGE_LIST(DEFINE_CODE_AGE_BUILTIN_GENERATOR) +#undef DEFINE_CODE_AGE_BUILTIN_GENERATOR + + +void Builtins::Generate_NotifyStubFailure(MacroAssembler* masm) { + // Enter an internal frame. + { + FrameScope scope(masm, StackFrame::INTERNAL); + + // Preserve registers across notification, this is important for compiled + // stubs that tail call the runtime on deopts passing their parameters in + // registers. + __ Pushad(); + __ CallRuntime(Runtime::kNotifyStubFailure, 0); + __ Popad(); + // Tear down internal frame. + } + + __ pop(MemOperand(rsp, 0)); // Ignore state offset + __ ret(0); // Return to IC Miss stub, continuation still on stack. +} + + static void Generate_NotifyDeoptimizedHelper(MacroAssembler* masm, Deoptimizer::BailoutType type) { // Enter an internal frame. @@ -620,17 +714,17 @@ static void Generate_NotifyDeoptimizedHelper(MacroAssembler* masm, } // Get the full codegen state from the stack and untag it. - __ SmiToInteger32(rcx, Operand(rsp, 1 * kPointerSize)); + __ SmiToInteger32(r10, Operand(rsp, 1 * kPointerSize)); // Switch on the state. Label not_no_registers, not_tos_rax; - __ cmpq(rcx, Immediate(FullCodeGenerator::NO_REGISTERS)); + __ cmpq(r10, Immediate(FullCodeGenerator::NO_REGISTERS)); __ j(not_equal, ¬_no_registers, Label::kNear); __ ret(1 * kPointerSize); // Remove state. __ bind(¬_no_registers); __ movq(rax, Operand(rsp, 2 * kPointerSize)); - __ cmpq(rcx, Immediate(FullCodeGenerator::TOS_REG)); + __ cmpq(r10, Immediate(FullCodeGenerator::TOS_REG)); __ j(not_equal, ¬_tos_rax, Label::kNear); __ ret(2 * kPointerSize); // Remove state, rax. @@ -1024,12 +1118,7 @@ static void AllocateEmptyJSArray(MacroAssembler* masm, if (initial_capacity > 0) { size += FixedArray::SizeFor(initial_capacity); } - __ AllocateInNewSpace(size, - result, - scratch2, - scratch3, - gc_required, - TAG_OBJECT); + __ Allocate(size, result, scratch2, scratch3, gc_required, TAG_OBJECT); // Allocated the JSArray. Now initialize the fields except for the elements // array. @@ -1444,30 +1533,62 @@ void Builtins::Generate_ArrayConstructCode(MacroAssembler* masm) { // -- rsp[0] : return address // -- rsp[8] : last argument // ----------------------------------- - Label generic_constructor; - if (FLAG_debug_code) { // The array construct code is only set for the builtin and internal // Array functions which always have a map. + // Initial map for the builtin Array function should be a map. - __ movq(rbx, FieldOperand(rdi, JSFunction::kPrototypeOrInitialMapOffset)); + __ movq(rcx, FieldOperand(rdi, JSFunction::kPrototypeOrInitialMapOffset)); // Will both indicate a NULL and a Smi. STATIC_ASSERT(kSmiTag == 0); - Condition not_smi = NegateCondition(masm->CheckSmi(rbx)); + Condition not_smi = NegateCondition(masm->CheckSmi(rcx)); __ Check(not_smi, "Unexpected initial map for Array function"); - __ CmpObjectType(rbx, MAP_TYPE, rcx); + __ CmpObjectType(rcx, MAP_TYPE, rcx); __ Check(equal, "Unexpected initial map for Array function"); - } - // Run the native code for the Array function called as constructor. - ArrayNativeCode(masm, &generic_constructor); + if (FLAG_optimize_constructed_arrays) { + // We should either have undefined in ebx or a valid jsglobalpropertycell + Label okay_here; + Handle<Object> undefined_sentinel( + masm->isolate()->factory()->undefined_value()); + Handle<Map> global_property_cell_map( + masm->isolate()->heap()->global_property_cell_map()); + __ Cmp(rbx, undefined_sentinel); + __ j(equal, &okay_here); + __ Cmp(FieldOperand(rbx, 0), global_property_cell_map); + __ Assert(equal, "Expected property cell in register rbx"); + __ bind(&okay_here); + } + } - // Jump to the generic construct code in case the specialized code cannot - // handle the construction. - __ bind(&generic_constructor); - Handle<Code> generic_construct_stub = - masm->isolate()->builtins()->JSConstructStubGeneric(); - __ Jump(generic_construct_stub, RelocInfo::CODE_TARGET); + if (FLAG_optimize_constructed_arrays) { + Label not_zero_case, not_one_case; + __ testq(rax, rax); + __ j(not_zero, ¬_zero_case); + ArrayNoArgumentConstructorStub no_argument_stub; + __ TailCallStub(&no_argument_stub); + + __ bind(¬_zero_case); + __ cmpq(rax, Immediate(1)); + __ j(greater, ¬_one_case); + ArraySingleArgumentConstructorStub single_argument_stub; + __ TailCallStub(&single_argument_stub); + + __ bind(¬_one_case); + ArrayNArgumentsConstructorStub n_argument_stub; + __ TailCallStub(&n_argument_stub); + } else { + Label generic_constructor; + // Run the native code for the Array function called as constructor. + ArrayNativeCode(masm, &generic_constructor); + + // Jump to the generic construct code in case the specialized code cannot + // handle the construction. + __ bind(&generic_constructor); + Handle<Code> generic_construct_stub = + masm->isolate()->builtins()->JSConstructStubGeneric(); + __ Jump(generic_construct_stub, RelocInfo::CODE_TARGET); + } } @@ -1520,12 +1641,12 @@ void Builtins::Generate_StringConstructCode(MacroAssembler* masm) { // Allocate a JSValue and put the tagged pointer into rax. Label gc_required; - __ AllocateInNewSpace(JSValue::kSize, - rax, // Result. - rcx, // New allocation top (we ignore it). - no_reg, - &gc_required, - TAG_OBJECT); + __ Allocate(JSValue::kSize, + rax, // Result. + rcx, // New allocation top (we ignore it). + no_reg, + &gc_required, + TAG_OBJECT); // Set the map. __ LoadGlobalFunctionInitialMap(rdi, rcx); @@ -1580,7 +1701,7 @@ void Builtins::Generate_StringConstructCode(MacroAssembler* masm) { // Load the empty string into rbx, remove the receiver from the // stack, and jump back to the case where the argument is a string. __ bind(&no_arguments); - __ LoadRoot(rbx, Heap::kEmptyStringRootIndex); + __ LoadRoot(rbx, Heap::kempty_stringRootIndex); __ pop(rcx); __ lea(rsp, Operand(rsp, kPointerSize)); __ push(rcx); diff --git a/deps/v8/src/x64/code-stubs-x64.cc b/deps/v8/src/x64/code-stubs-x64.cc index f0f9c5d272..04c3e6837b 100644 --- a/deps/v8/src/x64/code-stubs-x64.cc +++ b/deps/v8/src/x64/code-stubs-x64.cc @@ -32,10 +32,85 @@ #include "bootstrapper.h" #include "code-stubs.h" #include "regexp-macro-assembler.h" +#include "stub-cache.h" +#include "runtime.h" namespace v8 { namespace internal { + +void FastCloneShallowObjectStub::InitializeInterfaceDescriptor( + Isolate* isolate, + CodeStubInterfaceDescriptor* descriptor) { + static Register registers[] = { rax, rbx, rcx, rdx }; + descriptor->register_param_count_ = 4; + descriptor->register_params_ = registers; + descriptor->stack_parameter_count_ = NULL; + descriptor->deoptimization_handler_ = + Runtime::FunctionForId(Runtime::kCreateObjectLiteralShallow)->entry; +} + + +void KeyedLoadFastElementStub::InitializeInterfaceDescriptor( + Isolate* isolate, + CodeStubInterfaceDescriptor* descriptor) { + static Register registers[] = { rdx, rax }; + descriptor->register_param_count_ = 2; + descriptor->register_params_ = registers; + descriptor->deoptimization_handler_ = + FUNCTION_ADDR(KeyedLoadIC_MissFromStubFailure); +} + + +void TransitionElementsKindStub::InitializeInterfaceDescriptor( + Isolate* isolate, + CodeStubInterfaceDescriptor* descriptor) { + static Register registers[] = { rax, rbx }; + descriptor->register_param_count_ = 2; + descriptor->register_params_ = registers; + descriptor->deoptimization_handler_ = + Runtime::FunctionForId(Runtime::kTransitionElementsKind)->entry; +} + + +static void InitializeArrayConstructorDescriptor(Isolate* isolate, + CodeStubInterfaceDescriptor* descriptor) { + // register state + // rdi -- constructor function + // rbx -- type info cell with elements kind + // rax -- number of arguments to the constructor function + static Register registers[] = { rdi, rbx }; + descriptor->register_param_count_ = 2; + // stack param count needs (constructor pointer, and single argument) + descriptor->stack_parameter_count_ = &rax; + descriptor->register_params_ = registers; + descriptor->extra_expression_stack_count_ = 1; + descriptor->deoptimization_handler_ = + FUNCTION_ADDR(ArrayConstructor_StubFailure); +} + + +void ArrayNoArgumentConstructorStub::InitializeInterfaceDescriptor( + Isolate* isolate, + CodeStubInterfaceDescriptor* descriptor) { + InitializeArrayConstructorDescriptor(isolate, descriptor); +} + + +void ArraySingleArgumentConstructorStub::InitializeInterfaceDescriptor( + Isolate* isolate, + CodeStubInterfaceDescriptor* descriptor) { + InitializeArrayConstructorDescriptor(isolate, descriptor); +} + + +void ArrayNArgumentsConstructorStub::InitializeInterfaceDescriptor( + Isolate* isolate, + CodeStubInterfaceDescriptor* descriptor) { + InitializeArrayConstructorDescriptor(isolate, descriptor); +} + + #define __ ACCESS_MASM(masm) void ToNumberStub::Generate(MacroAssembler* masm) { @@ -65,7 +140,7 @@ void FastNewClosureStub::Generate(MacroAssembler* masm) { Counters* counters = masm->isolate()->counters(); Label gc; - __ AllocateInNewSpace(JSFunction::kSize, rax, rbx, rcx, &gc, TAG_OBJECT); + __ Allocate(JSFunction::kSize, rax, rbx, rcx, &gc, TAG_OBJECT); __ IncrementCounter(counters->fast_new_closure_total(), 1); @@ -199,8 +274,8 @@ void FastNewContextStub::Generate(MacroAssembler* masm) { // Try to allocate the context in new space. Label gc; int length = slots_ + Context::MIN_CONTEXT_SLOTS; - __ AllocateInNewSpace((length * kPointerSize) + FixedArray::kHeaderSize, - rax, rbx, rcx, &gc, TAG_OBJECT); + __ Allocate((length * kPointerSize) + FixedArray::kHeaderSize, + rax, rbx, rcx, &gc, TAG_OBJECT); // Get the function from the stack. __ movq(rcx, Operand(rsp, 1 * kPointerSize)); @@ -245,8 +320,8 @@ void FastNewBlockContextStub::Generate(MacroAssembler* masm) { // Try to allocate the context in new space. Label gc; int length = slots_ + Context::MIN_CONTEXT_SLOTS; - __ AllocateInNewSpace(FixedArray::SizeFor(length), - rax, rbx, rcx, &gc, TAG_OBJECT); + __ Allocate(FixedArray::SizeFor(length), + rax, rbx, rcx, &gc, TAG_OBJECT); // Get the function from the stack. __ movq(rcx, Operand(rsp, 1 * kPointerSize)); @@ -304,6 +379,7 @@ static void GenerateFastCloneShallowArrayCommon( MacroAssembler* masm, int length, FastCloneShallowArrayStub::Mode mode, + AllocationSiteMode allocation_site_mode, Label* fail) { // Registers on entry: // @@ -317,11 +393,26 @@ static void GenerateFastCloneShallowArrayCommon( ? FixedDoubleArray::SizeFor(length) : FixedArray::SizeFor(length); } - int size = JSArray::kSize + elements_size; + int size = JSArray::kSize; + int allocation_info_start = size; + if (allocation_site_mode == TRACK_ALLOCATION_SITE) { + size += AllocationSiteInfo::kSize; + } + size += elements_size; // Allocate both the JS array and the elements array in one big // allocation. This avoids multiple limit checks. - __ AllocateInNewSpace(size, rax, rbx, rdx, fail, TAG_OBJECT); + AllocationFlags flags = TAG_OBJECT; + if (mode == FastCloneShallowArrayStub::CLONE_DOUBLE_ELEMENTS) { + flags = static_cast<AllocationFlags>(DOUBLE_ALIGNMENT | flags); + } + __ Allocate(size, rax, rbx, rdx, fail, flags); + + if (allocation_site_mode == TRACK_ALLOCATION_SITE) { + __ LoadRoot(kScratchRegister, Heap::kAllocationSiteInfoMapRootIndex); + __ movq(FieldOperand(rax, allocation_info_start), kScratchRegister); + __ movq(FieldOperand(rax, allocation_info_start + kPointerSize), rcx); + } // Copy the JS array part. for (int i = 0; i < JSArray::kSize; i += kPointerSize) { @@ -335,7 +426,11 @@ static void GenerateFastCloneShallowArrayCommon( // Get hold of the elements array of the boilerplate and setup the // elements pointer in the resulting object. __ movq(rcx, FieldOperand(rcx, JSArray::kElementsOffset)); - __ lea(rdx, Operand(rax, JSArray::kSize)); + if (allocation_site_mode == TRACK_ALLOCATION_SITE) { + __ lea(rdx, Operand(rax, JSArray::kSize + AllocationSiteInfo::kSize)); + } else { + __ lea(rdx, Operand(rax, JSArray::kSize)); + } __ movq(FieldOperand(rax, JSArray::kElementsOffset), rdx); // Copy the elements array. @@ -388,16 +483,18 @@ void FastCloneShallowArrayStub::Generate(MacroAssembler* masm) { __ Cmp(FieldOperand(rbx, HeapObject::kMapOffset), factory->fixed_cow_array_map()); __ j(not_equal, &check_fast_elements); - GenerateFastCloneShallowArrayCommon(masm, 0, - COPY_ON_WRITE_ELEMENTS, &slow_case); + GenerateFastCloneShallowArrayCommon(masm, 0, COPY_ON_WRITE_ELEMENTS, + allocation_site_mode_, + &slow_case); __ ret(3 * kPointerSize); __ bind(&check_fast_elements); __ Cmp(FieldOperand(rbx, HeapObject::kMapOffset), factory->fixed_array_map()); __ j(not_equal, &double_elements); - GenerateFastCloneShallowArrayCommon(masm, length_, - CLONE_ELEMENTS, &slow_case); + GenerateFastCloneShallowArrayCommon(masm, length_, CLONE_ELEMENTS, + allocation_site_mode_, + &slow_case); __ ret(3 * kPointerSize); __ bind(&double_elements); @@ -427,7 +524,9 @@ void FastCloneShallowArrayStub::Generate(MacroAssembler* masm) { __ pop(rcx); } - GenerateFastCloneShallowArrayCommon(masm, length_, mode, &slow_case); + GenerateFastCloneShallowArrayCommon(masm, length_, mode, + allocation_site_mode_, + &slow_case); __ ret(3 * kPointerSize); __ bind(&slow_case); @@ -435,49 +534,6 @@ void FastCloneShallowArrayStub::Generate(MacroAssembler* masm) { } -void FastCloneShallowObjectStub::Generate(MacroAssembler* masm) { - // Stack layout on entry: - // - // [rsp + kPointerSize]: object literal flags. - // [rsp + (2 * kPointerSize)]: constant properties. - // [rsp + (3 * kPointerSize)]: literal index. - // [rsp + (4 * kPointerSize)]: literals array. - - // Load boilerplate object into ecx and check if we need to create a - // boilerplate. - Label slow_case; - __ movq(rcx, Operand(rsp, 4 * kPointerSize)); - __ movq(rax, Operand(rsp, 3 * kPointerSize)); - SmiIndex index = masm->SmiToIndex(rax, rax, kPointerSizeLog2); - __ movq(rcx, - FieldOperand(rcx, index.reg, index.scale, FixedArray::kHeaderSize)); - __ CompareRoot(rcx, Heap::kUndefinedValueRootIndex); - __ j(equal, &slow_case); - - // Check that the boilerplate contains only fast properties and we can - // statically determine the instance size. - int size = JSObject::kHeaderSize + length_ * kPointerSize; - __ movq(rax, FieldOperand(rcx, HeapObject::kMapOffset)); - __ movzxbq(rax, FieldOperand(rax, Map::kInstanceSizeOffset)); - __ cmpq(rax, Immediate(size >> kPointerSizeLog2)); - __ j(not_equal, &slow_case); - - // Allocate the JS object and copy header together with all in-object - // properties from the boilerplate. - __ AllocateInNewSpace(size, rax, rbx, rdx, &slow_case, TAG_OBJECT); - for (int i = 0; i < size; i += kPointerSize) { - __ movq(rbx, FieldOperand(rcx, i)); - __ movq(FieldOperand(rax, i), rbx); - } - - // Return and remove the on-stack parameters. - __ ret(4 * kPointerSize); - - __ bind(&slow_case); - __ TailCallRuntime(Runtime::kCreateObjectLiteralShallow, 4, 1); -} - - // The stub expects its argument on the stack and returns its result in tos_: // zero for false, and a non-zero value for true. void ToBooleanStub::Generate(MacroAssembler* masm) { @@ -637,6 +693,10 @@ void ToBooleanStub::GenerateTypeTransition(MacroAssembler* masm) { class FloatingPointHelper : public AllStatic { public: + enum ConvertUndefined { + CONVERT_UNDEFINED_TO_ZERO, + BAILOUT_ON_UNDEFINED + }; // Load the operands from rdx and rax into xmm0 and xmm1, as doubles. // If the operands are not both numbers, jump to not_numbers. // Leaves rdx and rax unchanged. SmiOperands assumes both are smis. @@ -672,7 +732,8 @@ class FloatingPointHelper : public AllStatic { Register scratch2, Register scratch3, Label* on_success, - Label* on_not_smis); + Label* on_not_smis, + ConvertUndefined convert_undefined); }; @@ -752,8 +813,8 @@ void UnaryOpStub::Generate(MacroAssembler* masm) { case UnaryOpIC::SMI: GenerateSmiStub(masm); break; - case UnaryOpIC::HEAP_NUMBER: - GenerateHeapNumberStub(masm); + case UnaryOpIC::NUMBER: + GenerateNumberStub(masm); break; case UnaryOpIC::GENERIC: GenerateGenericStub(masm); @@ -834,13 +895,13 @@ void UnaryOpStub::GenerateSmiCodeBitNot(MacroAssembler* masm, // TODO(svenpanne): Use virtual functions instead of switch. -void UnaryOpStub::GenerateHeapNumberStub(MacroAssembler* masm) { +void UnaryOpStub::GenerateNumberStub(MacroAssembler* masm) { switch (op_) { case Token::SUB: - GenerateHeapNumberStubSub(masm); + GenerateNumberStubSub(masm); break; case Token::BIT_NOT: - GenerateHeapNumberStubBitNot(masm); + GenerateNumberStubBitNot(masm); break; default: UNREACHABLE(); @@ -848,7 +909,7 @@ void UnaryOpStub::GenerateHeapNumberStub(MacroAssembler* masm) { } -void UnaryOpStub::GenerateHeapNumberStubSub(MacroAssembler* masm) { +void UnaryOpStub::GenerateNumberStubSub(MacroAssembler* masm) { Label non_smi, slow, call_builtin; GenerateSmiCodeSub(masm, &non_smi, &call_builtin, Label::kNear); __ bind(&non_smi); @@ -860,7 +921,7 @@ void UnaryOpStub::GenerateHeapNumberStubSub(MacroAssembler* masm) { } -void UnaryOpStub::GenerateHeapNumberStubBitNot( +void UnaryOpStub::GenerateNumberStubBitNot( MacroAssembler* masm) { Label non_smi, slow; GenerateSmiCodeBitNot(masm, &non_smi, Label::kNear); @@ -997,16 +1058,15 @@ void UnaryOpStub::PrintName(StringStream* stream) { } +void BinaryOpStub::Initialize() {} + + void BinaryOpStub::GenerateTypeTransition(MacroAssembler* masm) { __ pop(rcx); // Save return address. __ push(rdx); __ push(rax); // Left and right arguments are now on top. - // Push this stub's key. Although the operation and the type info are - // encoded into the key, the encoding is opaque, so push them too. __ Push(Smi::FromInt(MinorKey())); - __ Push(Smi::FromInt(op_)); - __ Push(Smi::FromInt(operands_type_)); __ push(rcx); // Push return address. @@ -1015,69 +1075,16 @@ void BinaryOpStub::GenerateTypeTransition(MacroAssembler* masm) { __ TailCallExternalReference( ExternalReference(IC_Utility(IC::kBinaryOp_Patch), masm->isolate()), - 5, + 3, 1); } -void BinaryOpStub::Generate(MacroAssembler* masm) { - // Explicitly allow generation of nested stubs. It is safe here because - // generation code does not use any raw pointers. - AllowStubCallsScope allow_stub_calls(masm, true); - - switch (operands_type_) { - case BinaryOpIC::UNINITIALIZED: - GenerateTypeTransition(masm); - break; - case BinaryOpIC::SMI: - GenerateSmiStub(masm); - break; - case BinaryOpIC::INT32: - UNREACHABLE(); - // The int32 case is identical to the Smi case. We avoid creating this - // ic state on x64. - break; - case BinaryOpIC::HEAP_NUMBER: - GenerateHeapNumberStub(masm); - break; - case BinaryOpIC::ODDBALL: - GenerateOddballStub(masm); - break; - case BinaryOpIC::BOTH_STRING: - GenerateBothStringStub(masm); - break; - case BinaryOpIC::STRING: - GenerateStringStub(masm); - break; - case BinaryOpIC::GENERIC: - GenerateGeneric(masm); - break; - default: - UNREACHABLE(); - } -} - - -void BinaryOpStub::PrintName(StringStream* stream) { - const char* op_name = Token::Name(op_); - const char* overwrite_name; - switch (mode_) { - case NO_OVERWRITE: overwrite_name = "Alloc"; break; - case OVERWRITE_RIGHT: overwrite_name = "OverwriteRight"; break; - case OVERWRITE_LEFT: overwrite_name = "OverwriteLeft"; break; - default: overwrite_name = "UnknownOverwrite"; break; - } - stream->Add("BinaryOpStub_%s_%s_%s", - op_name, - overwrite_name, - BinaryOpIC::GetName(operands_type_)); -} - - -void BinaryOpStub::GenerateSmiCode( +static void BinaryOpStub_GenerateSmiCode( MacroAssembler* masm, Label* slow, - SmiCodeGenerateHeapNumberResults allow_heapnumber_results) { + BinaryOpStub::SmiCodeGenerateHeapNumberResults allow_heapnumber_results, + Token::Value op) { // Arguments to BinaryOpStub are in rdx and rax. const Register left = rdx; @@ -1086,9 +1093,9 @@ void BinaryOpStub::GenerateSmiCode( // We only generate heapnumber answers for overflowing calculations // for the four basic arithmetic operations and logical right shift by 0. bool generate_inline_heapnumber_results = - (allow_heapnumber_results == ALLOW_HEAPNUMBER_RESULTS) && - (op_ == Token::ADD || op_ == Token::SUB || - op_ == Token::MUL || op_ == Token::DIV || op_ == Token::SHR); + (allow_heapnumber_results == BinaryOpStub::ALLOW_HEAPNUMBER_RESULTS) && + (op == Token::ADD || op == Token::SUB || + op == Token::MUL || op == Token::DIV || op == Token::SHR); // Smi check of both operands. If op is BIT_OR, the check is delayed // until after the OR operation. @@ -1096,7 +1103,7 @@ void BinaryOpStub::GenerateSmiCode( Label use_fp_on_smis; Label fail; - if (op_ != Token::BIT_OR) { + if (op != Token::BIT_OR) { Comment smi_check_comment(masm, "-- Smi check arguments"); __ JumpIfNotBothSmi(left, right, ¬_smis); } @@ -1105,7 +1112,7 @@ void BinaryOpStub::GenerateSmiCode( __ bind(&smi_values); // Perform the operation. Comment perform_smi(masm, "-- Perform smi operation"); - switch (op_) { + switch (op) { case Token::ADD: ASSERT(right.is(rax)); __ SmiAdd(right, right, left, &use_fp_on_smis); // ADD is commutative. @@ -1177,7 +1184,7 @@ void BinaryOpStub::GenerateSmiCode( // operations on known smis (e.g., if the result of the operation // overflowed the smi range). __ bind(&use_fp_on_smis); - if (op_ == Token::DIV || op_ == Token::MOD) { + if (op == Token::DIV || op == Token::MOD) { // Restore left and right to rdx and rax. __ movq(rdx, rcx); __ movq(rax, rbx); @@ -1186,12 +1193,12 @@ void BinaryOpStub::GenerateSmiCode( if (generate_inline_heapnumber_results) { __ AllocateHeapNumber(rcx, rbx, slow); Comment perform_float(masm, "-- Perform float operation on smis"); - if (op_ == Token::SHR) { + if (op == Token::SHR) { __ SmiToInteger32(left, left); __ cvtqsi2sd(xmm0, left); } else { FloatingPointHelper::LoadSSE2SmiOperands(masm); - switch (op_) { + switch (op) { case Token::ADD: __ addsd(xmm0, xmm1); break; case Token::SUB: __ subsd(xmm0, xmm1); break; case Token::MUL: __ mulsd(xmm0, xmm1); break; @@ -1214,31 +1221,50 @@ void BinaryOpStub::GenerateSmiCode( // values that could be smi. __ bind(¬_smis); Comment done_comment(masm, "-- Enter non-smi code"); + FloatingPointHelper::ConvertUndefined convert_undefined = + FloatingPointHelper::BAILOUT_ON_UNDEFINED; + // This list must be in sync with BinaryOpPatch() behavior in ic.cc. + if (op == Token::BIT_AND || + op == Token::BIT_OR || + op == Token::BIT_XOR || + op == Token::SAR || + op == Token::SHL || + op == Token::SHR) { + convert_undefined = FloatingPointHelper::CONVERT_UNDEFINED_TO_ZERO; + } FloatingPointHelper::NumbersToSmis(masm, left, right, rbx, rdi, rcx, - &smi_values, &fail); + &smi_values, &fail, convert_undefined); __ jmp(&smi_values); __ bind(&fail); } -void BinaryOpStub::GenerateFloatingPointCode(MacroAssembler* masm, - Label* allocation_failure, - Label* non_numeric_failure) { - switch (op_) { +static void BinaryOpStub_GenerateHeapResultAllocation(MacroAssembler* masm, + Label* alloc_failure, + OverwriteMode mode); + + +static void BinaryOpStub_GenerateFloatingPointCode(MacroAssembler* masm, + Label* allocation_failure, + Label* non_numeric_failure, + Token::Value op, + OverwriteMode mode) { + switch (op) { case Token::ADD: case Token::SUB: case Token::MUL: case Token::DIV: { FloatingPointHelper::LoadSSE2UnknownOperands(masm, non_numeric_failure); - switch (op_) { + switch (op) { case Token::ADD: __ addsd(xmm0, xmm1); break; case Token::SUB: __ subsd(xmm0, xmm1); break; case Token::MUL: __ mulsd(xmm0, xmm1); break; case Token::DIV: __ divsd(xmm0, xmm1); break; default: UNREACHABLE(); } - GenerateHeapResultAllocation(masm, allocation_failure); + BinaryOpStub_GenerateHeapResultAllocation( + masm, allocation_failure, mode); __ movsd(FieldOperand(rax, HeapNumber::kValueOffset), xmm0); __ ret(0); break; @@ -1259,7 +1285,7 @@ void BinaryOpStub::GenerateFloatingPointCode(MacroAssembler* masm, __ LoadRoot(heap_number_map, Heap::kHeapNumberMapRootIndex); FloatingPointHelper::LoadAsIntegers(masm, non_numeric_failure, heap_number_map); - switch (op_) { + switch (op) { case Token::BIT_OR: __ orl(rax, rcx); break; case Token::BIT_AND: __ andl(rax, rcx); break; case Token::BIT_XOR: __ xorl(rax, rcx); break; @@ -1283,19 +1309,15 @@ void BinaryOpStub::GenerateFloatingPointCode(MacroAssembler* masm, // Logical shift right can produce an unsigned int32 that is not // an int32, and so is not in the smi range. Allocate a heap number // in that case. - if (op_ == Token::SHR) { + if (op == Token::SHR) { __ bind(&non_smi_shr_result); Label allocation_failed; __ movl(rbx, rax); // rbx holds result value (uint32 value as int64). // Allocate heap number in new space. // Not using AllocateHeapNumber macro in order to reuse // already loaded heap_number_map. - __ AllocateInNewSpace(HeapNumber::kSize, - rax, - rdx, - no_reg, - &allocation_failed, - TAG_OBJECT); + __ Allocate(HeapNumber::kSize, rax, rdx, no_reg, &allocation_failed, + TAG_OBJECT); // Set the map. __ AssertRootValue(heap_number_map, Heap::kHeapNumberMapRootIndex, @@ -1320,12 +1342,12 @@ void BinaryOpStub::GenerateFloatingPointCode(MacroAssembler* masm, // No fall-through from this generated code. if (FLAG_debug_code) { __ Abort("Unexpected fall-through in " - "BinaryStub::GenerateFloatingPointCode."); + "BinaryStub_GenerateFloatingPointCode."); } } -void BinaryOpStub::GenerateStringAddCode(MacroAssembler* masm) { +void BinaryOpStub::GenerateAddStrings(MacroAssembler* masm) { ASSERT(op_ == Token::ADD); Label left_not_string, call_runtime; @@ -1356,58 +1378,17 @@ void BinaryOpStub::GenerateStringAddCode(MacroAssembler* masm) { } -void BinaryOpStub::GenerateCallRuntimeCode(MacroAssembler* masm) { - GenerateRegisterArgsPush(masm); - switch (op_) { - case Token::ADD: - __ InvokeBuiltin(Builtins::ADD, JUMP_FUNCTION); - break; - case Token::SUB: - __ InvokeBuiltin(Builtins::SUB, JUMP_FUNCTION); - break; - case Token::MUL: - __ InvokeBuiltin(Builtins::MUL, JUMP_FUNCTION); - break; - case Token::DIV: - __ InvokeBuiltin(Builtins::DIV, JUMP_FUNCTION); - break; - case Token::MOD: - __ InvokeBuiltin(Builtins::MOD, JUMP_FUNCTION); - break; - case Token::BIT_OR: - __ InvokeBuiltin(Builtins::BIT_OR, JUMP_FUNCTION); - break; - case Token::BIT_AND: - __ InvokeBuiltin(Builtins::BIT_AND, JUMP_FUNCTION); - break; - case Token::BIT_XOR: - __ InvokeBuiltin(Builtins::BIT_XOR, JUMP_FUNCTION); - break; - case Token::SAR: - __ InvokeBuiltin(Builtins::SAR, JUMP_FUNCTION); - break; - case Token::SHL: - __ InvokeBuiltin(Builtins::SHL, JUMP_FUNCTION); - break; - case Token::SHR: - __ InvokeBuiltin(Builtins::SHR, JUMP_FUNCTION); - break; - default: - UNREACHABLE(); - } -} - - void BinaryOpStub::GenerateSmiStub(MacroAssembler* masm) { Label call_runtime; if (result_type_ == BinaryOpIC::UNINITIALIZED || result_type_ == BinaryOpIC::SMI) { // Only allow smi results. - GenerateSmiCode(masm, NULL, NO_HEAPNUMBER_RESULTS); + BinaryOpStub_GenerateSmiCode(masm, NULL, NO_HEAPNUMBER_RESULTS, op_); } else { // Allow heap number result and don't make a transition if a heap number // cannot be allocated. - GenerateSmiCode(masm, &call_runtime, ALLOW_HEAPNUMBER_RESULTS); + BinaryOpStub_GenerateSmiCode( + masm, &call_runtime, ALLOW_HEAPNUMBER_RESULTS, op_); } // Code falls through if the result is not returned as either a smi or heap @@ -1416,24 +1397,22 @@ void BinaryOpStub::GenerateSmiStub(MacroAssembler* masm) { if (call_runtime.is_linked()) { __ bind(&call_runtime); - GenerateCallRuntimeCode(masm); + GenerateRegisterArgsPush(masm); + GenerateCallRuntime(masm); } } -void BinaryOpStub::GenerateStringStub(MacroAssembler* masm) { - ASSERT(operands_type_ == BinaryOpIC::STRING); - ASSERT(op_ == Token::ADD); - GenerateStringAddCode(masm); - // Try to add arguments as strings, otherwise, transition to the generic - // BinaryOpIC type. - GenerateTypeTransition(masm); +void BinaryOpStub::GenerateInt32Stub(MacroAssembler* masm) { + // The int32 case is identical to the Smi case. We avoid creating this + // ic state on x64. + UNREACHABLE(); } void BinaryOpStub::GenerateBothStringStub(MacroAssembler* masm) { Label call_runtime; - ASSERT(operands_type_ == BinaryOpIC::BOTH_STRING); + ASSERT(left_type_ == BinaryOpIC::STRING && right_type_ == BinaryOpIC::STRING); ASSERT(op_ == Token::ADD); // If both arguments are strings, call the string add stub. // Otherwise, do a transition. @@ -1467,7 +1446,7 @@ void BinaryOpStub::GenerateOddballStub(MacroAssembler* masm) { if (op_ == Token::ADD) { // Handle string addition here, because it is the only operation // that does not do a ToNumber conversion on the operands. - GenerateStringAddCode(masm); + GenerateAddStrings(masm); } // Convert oddball arguments to numbers. @@ -1490,43 +1469,83 @@ void BinaryOpStub::GenerateOddballStub(MacroAssembler* masm) { } __ bind(&done); - GenerateHeapNumberStub(masm); + GenerateNumberStub(masm); +} + + +static void BinaryOpStub_CheckSmiInput(MacroAssembler* masm, + Register input, + Label* fail) { + Label ok; + __ JumpIfSmi(input, &ok, Label::kNear); + Register heap_number_map = r8; + Register scratch1 = r9; + Register scratch2 = r10; + // HeapNumbers containing 32bit integer values are also allowed. + __ LoadRoot(heap_number_map, Heap::kHeapNumberMapRootIndex); + __ cmpq(FieldOperand(input, HeapObject::kMapOffset), heap_number_map); + __ j(not_equal, fail); + __ movsd(xmm0, FieldOperand(input, HeapNumber::kValueOffset)); + // Convert, convert back, and compare the two doubles' bits. + __ cvttsd2siq(scratch2, xmm0); + __ cvtlsi2sd(xmm1, scratch2); + __ movq(scratch1, xmm0); + __ movq(scratch2, xmm1); + __ cmpq(scratch1, scratch2); + __ j(not_equal, fail); + __ bind(&ok); } -void BinaryOpStub::GenerateHeapNumberStub(MacroAssembler* masm) { +void BinaryOpStub::GenerateNumberStub(MacroAssembler* masm) { Label gc_required, not_number; - GenerateFloatingPointCode(masm, &gc_required, ¬_number); + + // It could be that only SMIs have been seen at either the left + // or the right operand. For precise type feedback, patch the IC + // again if this changes. + if (left_type_ == BinaryOpIC::SMI) { + BinaryOpStub_CheckSmiInput(masm, rdx, ¬_number); + } + if (right_type_ == BinaryOpIC::SMI) { + BinaryOpStub_CheckSmiInput(masm, rax, ¬_number); + } + + BinaryOpStub_GenerateFloatingPointCode( + masm, &gc_required, ¬_number, op_, mode_); __ bind(¬_number); GenerateTypeTransition(masm); __ bind(&gc_required); - GenerateCallRuntimeCode(masm); + GenerateRegisterArgsPush(masm); + GenerateCallRuntime(masm); } void BinaryOpStub::GenerateGeneric(MacroAssembler* masm) { Label call_runtime, call_string_add_or_runtime; - GenerateSmiCode(masm, &call_runtime, ALLOW_HEAPNUMBER_RESULTS); + BinaryOpStub_GenerateSmiCode( + masm, &call_runtime, ALLOW_HEAPNUMBER_RESULTS, op_); - GenerateFloatingPointCode(masm, &call_runtime, &call_string_add_or_runtime); + BinaryOpStub_GenerateFloatingPointCode( + masm, &call_runtime, &call_string_add_or_runtime, op_, mode_); __ bind(&call_string_add_or_runtime); if (op_ == Token::ADD) { - GenerateStringAddCode(masm); + GenerateAddStrings(masm); } __ bind(&call_runtime); - GenerateCallRuntimeCode(masm); + GenerateRegisterArgsPush(masm); + GenerateCallRuntime(masm); } -void BinaryOpStub::GenerateHeapResultAllocation(MacroAssembler* masm, - Label* alloc_failure) { +static void BinaryOpStub_GenerateHeapResultAllocation(MacroAssembler* masm, + Label* alloc_failure, + OverwriteMode mode) { Label skip_allocation; - OverwriteMode mode = mode_; switch (mode) { case OVERWRITE_LEFT: { // If the argument in rdx is already an object, we skip the @@ -2022,17 +2041,21 @@ void FloatingPointHelper::NumbersToSmis(MacroAssembler* masm, Register scratch2, Register scratch3, Label* on_success, - Label* on_not_smis) { + Label* on_not_smis, + ConvertUndefined convert_undefined) { Register heap_number_map = scratch3; Register smi_result = scratch1; - Label done; + Label done, maybe_undefined_first, maybe_undefined_second, first_done; __ LoadRoot(heap_number_map, Heap::kHeapNumberMapRootIndex); Label first_smi; __ JumpIfSmi(first, &first_smi, Label::kNear); __ cmpq(FieldOperand(first, HeapObject::kMapOffset), heap_number_map); - __ j(not_equal, on_not_smis); + __ j(not_equal, + (convert_undefined == CONVERT_UNDEFINED_TO_ZERO) + ? &maybe_undefined_first + : on_not_smis); // Convert HeapNumber to smi if possible. __ movsd(xmm0, FieldOperand(first, HeapNumber::kValueOffset)); __ movq(scratch2, xmm0); @@ -2045,11 +2068,15 @@ void FloatingPointHelper::NumbersToSmis(MacroAssembler* masm, __ j(not_equal, on_not_smis); __ Integer32ToSmi(first, smi_result); + __ bind(&first_done); __ JumpIfSmi(second, (on_success != NULL) ? on_success : &done); __ bind(&first_smi); __ AssertNotSmi(second); __ cmpq(FieldOperand(second, HeapObject::kMapOffset), heap_number_map); - __ j(not_equal, on_not_smis); + __ j(not_equal, + (convert_undefined == CONVERT_UNDEFINED_TO_ZERO) + ? &maybe_undefined_second + : on_not_smis); // Convert second to smi, if possible. __ movsd(xmm0, FieldOperand(second, HeapNumber::kValueOffset)); __ movq(scratch2, xmm0); @@ -2062,8 +2089,25 @@ void FloatingPointHelper::NumbersToSmis(MacroAssembler* masm, if (on_success != NULL) { __ jmp(on_success); } else { - __ bind(&done); + __ jmp(&done); + } + + __ bind(&maybe_undefined_first); + __ CompareRoot(first, Heap::kUndefinedValueRootIndex); + __ j(not_equal, on_not_smis); + __ xor_(first, first); + __ jmp(&first_done); + + __ bind(&maybe_undefined_second); + __ CompareRoot(second, Heap::kUndefinedValueRootIndex); + __ j(not_equal, on_not_smis); + __ xor_(second, second); + if (on_success != NULL) { + __ jmp(on_success); } + // Else: fall through. + + __ bind(&done); } @@ -2144,7 +2188,7 @@ void MathPowStub::Generate(MacroAssembler* masm) { Label continue_sqrt, continue_rsqrt, not_plus_half; // Test for 0.5. // Load double_scratch with 0.5. - __ movq(scratch, V8_UINT64_C(0x3FE0000000000000), RelocInfo::NONE); + __ movq(scratch, V8_UINT64_C(0x3FE0000000000000), RelocInfo::NONE64); __ movq(double_scratch, scratch); // Already ruled out NaNs for exponent. __ ucomisd(double_scratch, double_exponent); @@ -2154,7 +2198,7 @@ void MathPowStub::Generate(MacroAssembler* masm) { // Math.pow(-Infinity, 0.5) == Infinity (ECMA spec, 15.8.2.13). // According to IEEE-754, double-precision -Infinity has the highest // 12 bits set and the lowest 52 bits cleared. - __ movq(scratch, V8_UINT64_C(0xFFF0000000000000), RelocInfo::NONE); + __ movq(scratch, V8_UINT64_C(0xFFF0000000000000), RelocInfo::NONE64); __ movq(double_scratch, scratch); __ ucomisd(double_scratch, double_base); // Comparing -Infinity with NaN results in "unordered", which sets the @@ -2186,7 +2230,7 @@ void MathPowStub::Generate(MacroAssembler* masm) { // case of Math.pow(-Infinity, -0.5) == 0 (ECMA spec, 15.8.2.13). // According to IEEE-754, double-precision -Infinity has the highest // 12 bits set and the lowest 52 bits cleared. - __ movq(scratch, V8_UINT64_C(0xFFF0000000000000), RelocInfo::NONE); + __ movq(scratch, V8_UINT64_C(0xFFF0000000000000), RelocInfo::NONE64); __ movq(double_scratch, scratch); __ ucomisd(double_scratch, double_base); // Comparing -Infinity with NaN results in "unordered", which sets the @@ -2229,7 +2273,7 @@ void MathPowStub::Generate(MacroAssembler* masm) { // F2XM1 calculates 2^st(0) - 1 for -1 < st(0) < 1 __ f2xm1(); // 2^(X-rnd(X)) - 1, rnd(X) __ fld1(); // 1, 2^(X-rnd(X)) - 1, rnd(X) - __ faddp(1); // 1, 2^(X-rnd(X)), rnd(X) + __ faddp(1); // 2^(X-rnd(X)), rnd(X) // FSCALE calculates st(0) * 2^st(1) __ fscale(); // 2^X, rnd(X) __ fstp(1); @@ -2331,6 +2375,160 @@ void MathPowStub::Generate(MacroAssembler* masm) { } +void ArrayLengthStub::Generate(MacroAssembler* masm) { + Label miss; + Register receiver; + if (kind() == Code::KEYED_LOAD_IC) { + // ----------- S t a t e ------------- + // -- rax : key + // -- rdx : receiver + // -- rsp[0] : return address + // ----------------------------------- + __ Cmp(rax, masm->isolate()->factory()->length_string()); + __ j(not_equal, &miss); + receiver = rdx; + } else { + ASSERT(kind() == Code::LOAD_IC); + // ----------- S t a t e ------------- + // -- rax : receiver + // -- rcx : name + // -- rsp[0] : return address + // ----------------------------------- + receiver = rax; + } + + StubCompiler::GenerateLoadArrayLength(masm, receiver, r8, &miss); + __ bind(&miss); + StubCompiler::TailCallBuiltin(masm, StubCompiler::MissBuiltin(kind())); +} + + +void FunctionPrototypeStub::Generate(MacroAssembler* masm) { + Label miss; + Register receiver; + if (kind() == Code::KEYED_LOAD_IC) { + // ----------- S t a t e ------------- + // -- rax : key + // -- rdx : receiver + // -- rsp[0] : return address + // ----------------------------------- + __ Cmp(rax, masm->isolate()->factory()->prototype_string()); + __ j(not_equal, &miss); + receiver = rdx; + } else { + ASSERT(kind() == Code::LOAD_IC); + // ----------- S t a t e ------------- + // -- rax : receiver + // -- rcx : name + // -- rsp[0] : return address + // ----------------------------------- + receiver = rax; + } + + StubCompiler::GenerateLoadFunctionPrototype(masm, receiver, r8, r9, &miss); + __ bind(&miss); + StubCompiler::TailCallBuiltin(masm, StubCompiler::MissBuiltin(kind())); +} + + +void StringLengthStub::Generate(MacroAssembler* masm) { + Label miss; + Register receiver; + if (kind() == Code::KEYED_LOAD_IC) { + // ----------- S t a t e ------------- + // -- rax : key + // -- rdx : receiver + // -- rsp[0] : return address + // ----------------------------------- + __ Cmp(rax, masm->isolate()->factory()->length_string()); + __ j(not_equal, &miss); + receiver = rdx; + } else { + ASSERT(kind() == Code::LOAD_IC); + // ----------- S t a t e ------------- + // -- rax : receiver + // -- rcx : name + // -- rsp[0] : return address + // ----------------------------------- + receiver = rax; + } + + StubCompiler::GenerateLoadStringLength(masm, receiver, r8, r9, &miss, + support_wrapper_); + __ bind(&miss); + StubCompiler::TailCallBuiltin(masm, StubCompiler::MissBuiltin(kind())); +} + + +void StoreArrayLengthStub::Generate(MacroAssembler* masm) { + // ----------- S t a t e ------------- + // -- rax : value + // -- rcx : key + // -- rdx : receiver + // -- rsp[0] : return address + // ----------------------------------- + // + // This accepts as a receiver anything JSArray::SetElementsLength accepts + // (currently anything except for external arrays which means anything with + // elements of FixedArray type). Value must be a number, but only smis are + // accepted as the most common case. + + Label miss; + + Register receiver = rdx; + Register value = rax; + Register scratch = rbx; + if (kind() == Code::KEYED_STORE_IC) { + __ Cmp(rcx, masm->isolate()->factory()->length_string()); + __ j(not_equal, &miss); + } + + // Check that the receiver isn't a smi. + __ JumpIfSmi(receiver, &miss); + + // Check that the object is a JS array. + __ CmpObjectType(receiver, JS_ARRAY_TYPE, scratch); + __ j(not_equal, &miss); + + // Check that elements are FixedArray. + // We rely on StoreIC_ArrayLength below to deal with all types of + // fast elements (including COW). + __ movq(scratch, FieldOperand(receiver, JSArray::kElementsOffset)); + __ CmpObjectType(scratch, FIXED_ARRAY_TYPE, scratch); + __ j(not_equal, &miss); + + // Check that the array has fast properties, otherwise the length + // property might have been redefined. + __ movq(scratch, FieldOperand(receiver, JSArray::kPropertiesOffset)); + __ CompareRoot(FieldOperand(scratch, FixedArray::kMapOffset), + Heap::kHashTableMapRootIndex); + __ j(equal, &miss); + + // Check that value is a smi. + __ JumpIfNotSmi(value, &miss); + + // Prepare tail call to StoreIC_ArrayLength. + __ pop(scratch); + __ push(receiver); + __ push(value); + __ push(scratch); // return address + + ExternalReference ref = + ExternalReference(IC_Utility(IC::kStoreIC_ArrayLength), masm->isolate()); + __ TailCallExternalReference(ref, 2, 1); + + __ bind(&miss); + + StubCompiler::TailCallBuiltin(masm, StubCompiler::MissBuiltin(kind())); +} + + +void LoadFieldStub::Generate(MacroAssembler* masm) { + StubCompiler::DoGenerateFastPropertyLoad(masm, rax, reg_, inobject_, index_); + __ ret(0); +} + + void ArgumentsAccessStub::GenerateReadElement(MacroAssembler* masm) { // The key is in rdx and the parameter count is in rax. @@ -2770,6 +2968,7 @@ void RegExpExecStub::Generate(MacroAssembler* masm) { __ JumpIfSmi(rax, &runtime); __ CmpObjectType(rax, JS_REGEXP_TYPE, kScratchRegister); __ j(not_equal, &runtime); + // Check that the RegExp has been compiled (data contains a fixed array). __ movq(rax, FieldOperand(rax, JSRegExp::kDataOffset)); if (FLAG_debug_code) { @@ -2790,149 +2989,121 @@ void RegExpExecStub::Generate(MacroAssembler* masm) { // Check that the number of captures fit in the static offsets vector buffer. __ SmiToInteger32(rdx, FieldOperand(rax, JSRegExp::kIrregexpCaptureCountOffset)); - // Calculate number of capture registers (number_of_captures + 1) * 2. - __ leal(rdx, Operand(rdx, rdx, times_1, 2)); - // Check that the static offsets vector buffer is large enough. - __ cmpl(rdx, Immediate(Isolate::kJSRegexpStaticOffsetsVectorSize)); + // Check (number_of_captures + 1) * 2 <= offsets vector size + // Or number_of_captures <= offsets vector size / 2 - 1 + STATIC_ASSERT(Isolate::kJSRegexpStaticOffsetsVectorSize >= 2); + __ cmpl(rdx, Immediate(Isolate::kJSRegexpStaticOffsetsVectorSize / 2 - 1)); __ j(above, &runtime); - // rax: RegExp data (FixedArray) - // rdx: Number of capture registers - // Check that the second argument is a string. - __ movq(rdi, Operand(rsp, kSubjectOffset)); - __ JumpIfSmi(rdi, &runtime); - Condition is_string = masm->IsObjectStringType(rdi, rbx, rbx); - __ j(NegateCondition(is_string), &runtime); - - // rdi: Subject string. - // rax: RegExp data (FixedArray). - // rdx: Number of capture registers. - // Check that the third argument is a positive smi less than the string - // length. A negative value will be greater (unsigned comparison). - __ movq(rbx, Operand(rsp, kPreviousIndexOffset)); - __ JumpIfNotSmi(rbx, &runtime); - __ SmiCompare(rbx, FieldOperand(rdi, String::kLengthOffset)); - __ j(above_equal, &runtime); - - // rax: RegExp data (FixedArray) - // rdx: Number of capture registers - // Check that the fourth object is a JSArray object. - __ movq(rdi, Operand(rsp, kLastMatchInfoOffset)); - __ JumpIfSmi(rdi, &runtime); - __ CmpObjectType(rdi, JS_ARRAY_TYPE, kScratchRegister); - __ j(not_equal, &runtime); - // Check that the JSArray is in fast case. - __ movq(rbx, FieldOperand(rdi, JSArray::kElementsOffset)); - __ movq(rdi, FieldOperand(rbx, HeapObject::kMapOffset)); - __ CompareRoot(FieldOperand(rbx, HeapObject::kMapOffset), - Heap::kFixedArrayMapRootIndex); - __ j(not_equal, &runtime); - // Check that the last match info has space for the capture registers and the - // additional information. Ensure no overflow in add. - STATIC_ASSERT(FixedArray::kMaxLength < kMaxInt - FixedArray::kLengthOffset); - __ SmiToInteger32(rdi, FieldOperand(rbx, FixedArray::kLengthOffset)); - __ addl(rdx, Immediate(RegExpImpl::kLastMatchOverhead)); - __ cmpl(rdx, rdi); - __ j(greater, &runtime); - // Reset offset for possibly sliced string. __ Set(r14, 0); - // rax: RegExp data (FixedArray) - // Check the representation and encoding of the subject string. - Label seq_ascii_string, seq_two_byte_string, check_code; __ movq(rdi, Operand(rsp, kSubjectOffset)); - // Make a copy of the original subject string. - __ movq(r15, rdi); + __ JumpIfSmi(rdi, &runtime); + __ movq(r15, rdi); // Make a copy of the original subject string. __ movq(rbx, FieldOperand(rdi, HeapObject::kMapOffset)); __ movzxbl(rbx, FieldOperand(rbx, Map::kInstanceTypeOffset)); - // First check for flat two byte string. + // rax: RegExp data (FixedArray) + // rdi: subject string + // r15: subject string + // Handle subject string according to its encoding and representation: + // (1) Sequential two byte? If yes, go to (9). + // (2) Sequential one byte? If yes, go to (6). + // (3) Anything but sequential or cons? If yes, go to (7). + // (4) Cons string. If the string is flat, replace subject with first string. + // Otherwise bailout. + // (5a) Is subject sequential two byte? If yes, go to (9). + // (5b) Is subject external? If yes, go to (8). + // (6) One byte sequential. Load regexp code for one byte. + // (E) Carry on. + /// [...] + + // Deferred code at the end of the stub: + // (7) Not a long external string? If yes, go to (10). + // (8) External string. Make it, offset-wise, look like a sequential string. + // (8a) Is the external string one byte? If yes, go to (6). + // (9) Two byte sequential. Load regexp code for one byte. Go to (E). + // (10) Short external string or not a string? If yes, bail out to runtime. + // (11) Sliced string. Replace subject with parent. Go to (5a). + + Label seq_one_byte_string /* 6 */, seq_two_byte_string /* 9 */, + external_string /* 8 */, check_underlying /* 5a */, + not_seq_nor_cons /* 7 */, check_code /* E */, + not_long_external /* 10 */; + + // (1) Sequential two byte? If yes, go to (9). __ andb(rbx, Immediate(kIsNotStringMask | kStringRepresentationMask | kStringEncodingMask | kShortExternalStringMask)); STATIC_ASSERT((kStringTag | kSeqStringTag | kTwoByteStringTag) == 0); - __ j(zero, &seq_two_byte_string, Label::kNear); - // Any other flat string must be a flat ASCII string. None of the following - // string type tests will succeed if subject is not a string or a short - // external string. + __ j(zero, &seq_two_byte_string); // Go to (9). + + // (2) Sequential one byte? If yes, go to (6). + // Any other sequential string must be one byte. __ andb(rbx, Immediate(kIsNotStringMask | kStringRepresentationMask | kShortExternalStringMask)); - __ j(zero, &seq_ascii_string, Label::kNear); - - // rbx: whether subject is a string and if yes, its string representation - // Check for flat cons string or sliced string. - // A flat cons string is a cons string where the second part is the empty - // string. In that case the subject string is just the first part of the cons - // string. Also in this case the first part of the cons string is known to be - // a sequential string or an external string. - // In the case of a sliced string its offset has to be taken into account. - Label cons_string, external_string, check_encoding; + __ j(zero, &seq_one_byte_string, Label::kNear); // Go to (6). + + // (3) Anything but sequential or cons? If yes, go to (7). + // We check whether the subject string is a cons, since sequential strings + // have already been covered. STATIC_ASSERT(kConsStringTag < kExternalStringTag); STATIC_ASSERT(kSlicedStringTag > kExternalStringTag); STATIC_ASSERT(kIsNotStringMask > kExternalStringTag); STATIC_ASSERT(kShortExternalStringTag > kExternalStringTag); __ cmpq(rbx, Immediate(kExternalStringTag)); - __ j(less, &cons_string, Label::kNear); - __ j(equal, &external_string); + __ j(greater_equal, ¬_seq_nor_cons); // Go to (7). - // Catch non-string subject or short external string. - STATIC_ASSERT(kNotStringTag != 0 && kShortExternalStringTag !=0); - __ testb(rbx, Immediate(kIsNotStringMask | kShortExternalStringMask)); - __ j(not_zero, &runtime); - - // String is sliced. - __ SmiToInteger32(r14, FieldOperand(rdi, SlicedString::kOffsetOffset)); - __ movq(rdi, FieldOperand(rdi, SlicedString::kParentOffset)); - // r14: slice offset - // r15: original subject string - // rdi: parent string - __ jmp(&check_encoding, Label::kNear); - // String is a cons string, check whether it is flat. - __ bind(&cons_string); + // (4) Cons string. Check that it's flat. + // Replace subject with first string and reload instance type. __ CompareRoot(FieldOperand(rdi, ConsString::kSecondOffset), - Heap::kEmptyStringRootIndex); + Heap::kempty_stringRootIndex); __ j(not_equal, &runtime); __ movq(rdi, FieldOperand(rdi, ConsString::kFirstOffset)); - // rdi: first part of cons string or parent of sliced string. - // rbx: map of first part of cons string or map of parent of sliced string. - // Is first part of cons or parent of slice a flat two byte string? - __ bind(&check_encoding); + __ bind(&check_underlying); __ movq(rbx, FieldOperand(rdi, HeapObject::kMapOffset)); - __ testb(FieldOperand(rbx, Map::kInstanceTypeOffset), - Immediate(kStringRepresentationMask | kStringEncodingMask)); + __ movq(rbx, FieldOperand(rbx, Map::kInstanceTypeOffset)); + + // (5a) Is subject sequential two byte? If yes, go to (9). + __ testb(rbx, Immediate(kStringRepresentationMask | kStringEncodingMask)); STATIC_ASSERT((kSeqStringTag | kTwoByteStringTag) == 0); - __ j(zero, &seq_two_byte_string, Label::kNear); - // Any other flat string must be sequential ASCII or external. - __ testb(FieldOperand(rbx, Map::kInstanceTypeOffset), - Immediate(kStringRepresentationMask)); - __ j(not_zero, &external_string); - - __ bind(&seq_ascii_string); - // rdi: subject string (sequential ASCII) + __ j(zero, &seq_two_byte_string); // Go to (9). + // (5b) Is subject external? If yes, go to (8). + __ testb(rbx, Immediate(kStringRepresentationMask)); + // The underlying external string is never a short external string. + STATIC_CHECK(ExternalString::kMaxShortLength < ConsString::kMinLength); + STATIC_CHECK(ExternalString::kMaxShortLength < SlicedString::kMinLength); + __ j(not_zero, &external_string); // Go to (8) + + // (6) One byte sequential. Load regexp code for one byte. + __ bind(&seq_one_byte_string); // rax: RegExp data (FixedArray) __ movq(r11, FieldOperand(rax, JSRegExp::kDataAsciiCodeOffset)); - __ Set(rcx, 1); // Type is ASCII. - __ jmp(&check_code, Label::kNear); - - __ bind(&seq_two_byte_string); - // rdi: subject string (flat two-byte) - // rax: RegExp data (FixedArray) - __ movq(r11, FieldOperand(rax, JSRegExp::kDataUC16CodeOffset)); - __ Set(rcx, 0); // Type is two byte. + __ Set(rcx, 1); // Type is one byte. + // (E) Carry on. String handling is done. __ bind(&check_code); + // r11: irregexp code // Check that the irregexp code has been generated for the actual string // encoding. If it has, the field contains a code object otherwise it contains // smi (code flushing support) __ JumpIfSmi(r11, &runtime); - // rdi: subject string + // rdi: sequential subject string (or look-alike, external string) + // r15: original subject string // rcx: encoding of subject string (1 if ASCII, 0 if two_byte); // r11: code // Load used arguments before starting to push arguments for call to native // RegExp code to avoid handling changing stack height. - __ SmiToInteger64(rbx, Operand(rsp, kPreviousIndexOffset)); + // We have to use r15 instead of rdi to load the length because rdi might + // have been only made to look like a sequential string when it actually + // is an external string. + __ movq(rbx, Operand(rsp, kPreviousIndexOffset)); + __ JumpIfNotSmi(rbx, &runtime); + __ SmiCompare(rbx, FieldOperand(r15, String::kLengthOffset)); + __ j(above_equal, &runtime); + __ SmiToInteger64(rbx, rbx); // rdi: subject string // rbx: previous index @@ -3022,8 +3193,8 @@ void RegExpExecStub::Generate(MacroAssembler* masm) { // r15: original subject string __ testb(rcx, rcx); // Last use of rcx as encoding of subject string. __ j(zero, &setup_two_byte, Label::kNear); - __ lea(arg4, FieldOperand(rdi, r14, times_1, SeqAsciiString::kHeaderSize)); - __ lea(arg3, FieldOperand(rdi, rbx, times_1, SeqAsciiString::kHeaderSize)); + __ lea(arg4, FieldOperand(rdi, r14, times_1, SeqOneByteString::kHeaderSize)); + __ lea(arg3, FieldOperand(rdi, rbx, times_1, SeqOneByteString::kHeaderSize)); __ jmp(&setup_rest, Label::kNear); __ bind(&setup_two_byte); __ lea(arg4, FieldOperand(rdi, r14, times_2, SeqTwoByteString::kHeaderSize)); @@ -3071,9 +3242,23 @@ void RegExpExecStub::Generate(MacroAssembler* masm) { __ leal(rdx, Operand(rax, rax, times_1, 2)); // rdx: Number of capture registers - // Load last_match_info which is still known to be a fast case JSArray. - __ movq(rax, Operand(rsp, kLastMatchInfoOffset)); - __ movq(rbx, FieldOperand(rax, JSArray::kElementsOffset)); + // Check that the fourth object is a JSArray object. + __ movq(r15, Operand(rsp, kLastMatchInfoOffset)); + __ JumpIfSmi(r15, &runtime); + __ CmpObjectType(r15, JS_ARRAY_TYPE, kScratchRegister); + __ j(not_equal, &runtime); + // Check that the JSArray is in fast case. + __ movq(rbx, FieldOperand(r15, JSArray::kElementsOffset)); + __ movq(rax, FieldOperand(rbx, HeapObject::kMapOffset)); + __ CompareRoot(rax, Heap::kFixedArrayMapRootIndex); + __ j(not_equal, &runtime); + // Check that the last match info has space for the capture registers and the + // additional information. Ensure no overflow in add. + STATIC_ASSERT(FixedArray::kMaxLength < kMaxInt - FixedArray::kLengthOffset); + __ SmiToInteger32(rax, FieldOperand(rbx, FixedArray::kLengthOffset)); + __ subl(rax, Immediate(RegExpImpl::kLastMatchOverhead)); + __ cmpl(rdx, rax); + __ j(greater, &runtime); // rbx: last_match_info backing store (FixedArray) // rdx: number of capture registers @@ -3084,12 +3269,13 @@ void RegExpExecStub::Generate(MacroAssembler* masm) { // Store last subject and last input. __ movq(rax, Operand(rsp, kSubjectOffset)); __ movq(FieldOperand(rbx, RegExpImpl::kLastSubjectOffset), rax); + __ movq(rcx, rax); __ RecordWriteField(rbx, RegExpImpl::kLastSubjectOffset, rax, rdi, kDontSaveFPRegs); - __ movq(rax, Operand(rsp, kSubjectOffset)); + __ movq(rax, rcx); __ movq(FieldOperand(rbx, RegExpImpl::kLastInputOffset), rax); __ RecordWriteField(rbx, RegExpImpl::kLastInputOffset, @@ -3123,7 +3309,7 @@ void RegExpExecStub::Generate(MacroAssembler* masm) { __ bind(&done); // Return last match info. - __ movq(rax, Operand(rsp, kLastMatchInfoOffset)); + __ movq(rax, r15); __ ret(4 * kPointerSize); __ bind(&exception); @@ -3149,9 +3335,17 @@ void RegExpExecStub::Generate(MacroAssembler* masm) { __ bind(&termination_exception); __ ThrowUncatchable(rax); - // External string. Short external strings have already been ruled out. - // rdi: subject string (expected to be external) - // rbx: scratch + // Do the runtime call to execute the regexp. + __ bind(&runtime); + __ TailCallRuntime(Runtime::kRegExpExec, 4, 1); + + // Deferred code for string handling. + // (7) Not a long external string? If yes, go to (10). + __ bind(¬_seq_nor_cons); + // Compare flags are still set from (3). + __ j(greater, ¬_long_external, Label::kNear); // Go to (10). + + // (8) External string. Short external strings have been ruled out. __ bind(&external_string); __ movq(rbx, FieldOperand(rdi, HeapObject::kMapOffset)); __ movzxbl(rbx, FieldOperand(rbx, Map::kInstanceTypeOffset)); @@ -3163,16 +3357,33 @@ void RegExpExecStub::Generate(MacroAssembler* masm) { } __ movq(rdi, FieldOperand(rdi, ExternalString::kResourceDataOffset)); // Move the pointer so that offset-wise, it looks like a sequential string. - STATIC_ASSERT(SeqTwoByteString::kHeaderSize == SeqAsciiString::kHeaderSize); + STATIC_ASSERT(SeqTwoByteString::kHeaderSize == SeqOneByteString::kHeaderSize); __ subq(rdi, Immediate(SeqTwoByteString::kHeaderSize - kHeapObjectTag)); STATIC_ASSERT(kTwoByteStringTag == 0); + // (8a) Is the external string one byte? If yes, go to (6). __ testb(rbx, Immediate(kStringEncodingMask)); - __ j(not_zero, &seq_ascii_string); - __ jmp(&seq_two_byte_string); + __ j(not_zero, &seq_one_byte_string); // Goto (6). - // Do the runtime call to execute the regexp. - __ bind(&runtime); - __ TailCallRuntime(Runtime::kRegExpExec, 4, 1); + // rdi: subject string (flat two-byte) + // rax: RegExp data (FixedArray) + // (9) Two byte sequential. Load regexp code for one byte. Go to (E). + __ bind(&seq_two_byte_string); + __ movq(r11, FieldOperand(rax, JSRegExp::kDataUC16CodeOffset)); + __ Set(rcx, 0); // Type is two byte. + __ jmp(&check_code); // Go to (E). + + // (10) Not a string or a short external string? If yes, bail out to runtime. + __ bind(¬_long_external); + // Catch non-string subject or short external string. + STATIC_ASSERT(kNotStringTag != 0 && kShortExternalStringTag !=0); + __ testb(rbx, Immediate(kIsNotStringMask | kShortExternalStringMask)); + __ j(not_zero, &runtime); + + // (11) Sliced string. Replace subject with parent. Go to (5a). + // Load offset into r14 and replace subject string with parent. + __ SmiToInteger32(r14, FieldOperand(rdi, SlicedString::kOffsetOffset)); + __ movq(rdi, FieldOperand(rdi, SlicedString::kParentOffset)); + __ jmp(&check_underlying); #endif // V8_INTERPRETED_REGEXP } @@ -3378,30 +3589,59 @@ static int NegativeComparisonResult(Condition cc) { } -void CompareStub::Generate(MacroAssembler* masm) { - ASSERT(lhs_.is(no_reg) && rhs_.is(no_reg)); +static void CheckInputType(MacroAssembler* masm, + Register input, + CompareIC::State expected, + Label* fail) { + Label ok; + if (expected == CompareIC::SMI) { + __ JumpIfNotSmi(input, fail); + } else if (expected == CompareIC::NUMBER) { + __ JumpIfSmi(input, &ok); + __ CompareMap(input, masm->isolate()->factory()->heap_number_map(), NULL); + __ j(not_equal, fail); + } + // We could be strict about internalized/non-internalized here, but as long as + // hydrogen doesn't care, the stub doesn't have to care either. + __ bind(&ok); +} + + +static void BranchIfNotInternalizedString(MacroAssembler* masm, + Label* label, + Register object, + Register scratch) { + __ JumpIfSmi(object, label); + __ movq(scratch, FieldOperand(object, HeapObject::kMapOffset)); + __ movzxbq(scratch, + FieldOperand(scratch, Map::kInstanceTypeOffset)); + // Ensure that no non-strings have the internalized bit set. + STATIC_ASSERT(LAST_TYPE < kNotStringTag + kIsInternalizedMask); + STATIC_ASSERT(kInternalizedTag != 0); + __ testb(scratch, Immediate(kIsInternalizedMask)); + __ j(zero, label); +} + +void ICCompareStub::GenerateGeneric(MacroAssembler* masm) { Label check_unequal_objects, done; + Condition cc = GetCondition(); Factory* factory = masm->isolate()->factory(); - // Compare two smis if required. - if (include_smi_compare_) { - Label non_smi, smi_done; - __ JumpIfNotBothSmi(rax, rdx, &non_smi); - __ subq(rdx, rax); - __ j(no_overflow, &smi_done); - __ not_(rdx); // Correct sign in case of overflow. rdx cannot be 0 here. - __ bind(&smi_done); - __ movq(rax, rdx); - __ ret(0); - __ bind(&non_smi); - } else if (FLAG_debug_code) { - Label ok; - __ JumpIfNotSmi(rdx, &ok); - __ JumpIfNotSmi(rax, &ok); - __ Abort("CompareStub: smi operands"); - __ bind(&ok); - } + Label miss; + CheckInputType(masm, rdx, left_, &miss); + CheckInputType(masm, rax, right_, &miss); + + // Compare two smis. + Label non_smi, smi_done; + __ JumpIfNotBothSmi(rax, rdx, &non_smi); + __ subq(rdx, rax); + __ j(no_overflow, &smi_done); + __ not_(rdx); // Correct sign in case of overflow. rdx cannot be 0 here. + __ bind(&smi_done); + __ movq(rax, rdx); + __ ret(0); + __ bind(&non_smi); // The compare stub returns a positive, negative, or zero 64-bit integer // value in rax, corresponding to result of comparing the two inputs. @@ -3414,66 +3654,58 @@ void CompareStub::Generate(MacroAssembler* masm) { __ cmpq(rax, rdx); __ j(not_equal, ¬_identical, Label::kNear); - if (cc_ != equal) { + if (cc != equal) { // Check for undefined. undefined OP undefined is false even though // undefined == undefined. Label check_for_nan; __ CompareRoot(rdx, Heap::kUndefinedValueRootIndex); __ j(not_equal, &check_for_nan, Label::kNear); - __ Set(rax, NegativeComparisonResult(cc_)); + __ Set(rax, NegativeComparisonResult(cc)); __ ret(0); __ bind(&check_for_nan); } // Test for NaN. Sadly, we can't just compare to FACTORY->nan_value(), // so we do the second best thing - test it ourselves. - // Note: if cc_ != equal, never_nan_nan_ is not used. - // We cannot set rax to EQUAL until just before return because - // rax must be unchanged on jump to not_identical. - if (never_nan_nan_ && (cc_ == equal)) { - __ Set(rax, EQUAL); - __ ret(0); - } else { - Label heap_number; - // If it's not a heap number, then return equal for (in)equality operator. - __ Cmp(FieldOperand(rdx, HeapObject::kMapOffset), - factory->heap_number_map()); - __ j(equal, &heap_number, Label::kNear); - if (cc_ != equal) { - // Call runtime on identical objects. Otherwise return equal. - __ CmpObjectType(rax, FIRST_SPEC_OBJECT_TYPE, rcx); - __ j(above_equal, ¬_identical, Label::kNear); - } - __ Set(rax, EQUAL); - __ ret(0); + Label heap_number; + // If it's not a heap number, then return equal for (in)equality operator. + __ Cmp(FieldOperand(rdx, HeapObject::kMapOffset), + factory->heap_number_map()); + __ j(equal, &heap_number, Label::kNear); + if (cc != equal) { + // Call runtime on identical objects. Otherwise return equal. + __ CmpObjectType(rax, FIRST_SPEC_OBJECT_TYPE, rcx); + __ j(above_equal, ¬_identical, Label::kNear); + } + __ Set(rax, EQUAL); + __ ret(0); - __ bind(&heap_number); - // It is a heap number, so return equal if it's not NaN. - // For NaN, return 1 for every condition except greater and - // greater-equal. Return -1 for them, so the comparison yields - // false for all conditions except not-equal. - __ Set(rax, EQUAL); - __ movsd(xmm0, FieldOperand(rdx, HeapNumber::kValueOffset)); - __ ucomisd(xmm0, xmm0); - __ setcc(parity_even, rax); - // rax is 0 for equal non-NaN heapnumbers, 1 for NaNs. - if (cc_ == greater_equal || cc_ == greater) { - __ neg(rax); - } - __ ret(0); + __ bind(&heap_number); + // It is a heap number, so return equal if it's not NaN. + // For NaN, return 1 for every condition except greater and + // greater-equal. Return -1 for them, so the comparison yields + // false for all conditions except not-equal. + __ Set(rax, EQUAL); + __ movsd(xmm0, FieldOperand(rdx, HeapNumber::kValueOffset)); + __ ucomisd(xmm0, xmm0); + __ setcc(parity_even, rax); + // rax is 0 for equal non-NaN heapnumbers, 1 for NaNs. + if (cc == greater_equal || cc == greater) { + __ neg(rax); } + __ ret(0); __ bind(¬_identical); } - if (cc_ == equal) { // Both strict and non-strict. + if (cc == equal) { // Both strict and non-strict. Label slow; // Fallthrough label. // If we're doing a strict equality comparison, we don't have to do // type conversion, so we generate code to do fast comparison for objects // and oddballs. Non-smi numbers and strings still go through the usual // slow-case code. - if (strict_) { + if (strict()) { // If either is a Smi (we know that not both are), then they can only // be equal if the other is a HeapNumber. If so, use the slow case. { @@ -3525,46 +3757,46 @@ void CompareStub::Generate(MacroAssembler* masm) { } // Generate the number comparison code. - if (include_number_compare_) { - Label non_number_comparison; - Label unordered; - FloatingPointHelper::LoadSSE2UnknownOperands(masm, &non_number_comparison); - __ xorl(rax, rax); - __ xorl(rcx, rcx); - __ ucomisd(xmm0, xmm1); - - // Don't base result on EFLAGS when a NaN is involved. - __ j(parity_even, &unordered, Label::kNear); - // Return a result of -1, 0, or 1, based on EFLAGS. - __ setcc(above, rax); - __ setcc(below, rcx); - __ subq(rax, rcx); - __ ret(0); + Label non_number_comparison; + Label unordered; + FloatingPointHelper::LoadSSE2UnknownOperands(masm, &non_number_comparison); + __ xorl(rax, rax); + __ xorl(rcx, rcx); + __ ucomisd(xmm0, xmm1); - // If one of the numbers was NaN, then the result is always false. - // The cc is never not-equal. - __ bind(&unordered); - ASSERT(cc_ != not_equal); - if (cc_ == less || cc_ == less_equal) { - __ Set(rax, 1); - } else { - __ Set(rax, -1); - } - __ ret(0); + // Don't base result on EFLAGS when a NaN is involved. + __ j(parity_even, &unordered, Label::kNear); + // Return a result of -1, 0, or 1, based on EFLAGS. + __ setcc(above, rax); + __ setcc(below, rcx); + __ subq(rax, rcx); + __ ret(0); - // The number comparison code did not provide a valid result. - __ bind(&non_number_comparison); + // If one of the numbers was NaN, then the result is always false. + // The cc is never not-equal. + __ bind(&unordered); + ASSERT(cc != not_equal); + if (cc == less || cc == less_equal) { + __ Set(rax, 1); + } else { + __ Set(rax, -1); } + __ ret(0); - // Fast negative check for symbol-to-symbol equality. - Label check_for_strings; - if (cc_ == equal) { - BranchIfNonSymbol(masm, &check_for_strings, rax, kScratchRegister); - BranchIfNonSymbol(masm, &check_for_strings, rdx, kScratchRegister); + // The number comparison code did not provide a valid result. + __ bind(&non_number_comparison); - // We've already checked for object identity, so if both operands - // are symbols they aren't equal. Register eax (not rax) already holds a - // non-zero value, which indicates not equal, so just return. + // Fast negative check for internalized-to-internalized equality. + Label check_for_strings; + if (cc == equal) { + BranchIfNotInternalizedString( + masm, &check_for_strings, rax, kScratchRegister); + BranchIfNotInternalizedString( + masm, &check_for_strings, rdx, kScratchRegister); + + // We've already checked for object identity, so if both operands are + // internalized strings they aren't equal. Register eax (not rax) already + // holds a non-zero value, which indicates not equal, so just return. __ ret(0); } @@ -3574,7 +3806,7 @@ void CompareStub::Generate(MacroAssembler* masm) { rdx, rax, rcx, rbx, &check_unequal_objects); // Inline comparison of ASCII strings. - if (cc_ == equal) { + if (cc == equal) { StringCompareStub::GenerateFlatAsciiStringEquals(masm, rdx, rax, @@ -3595,7 +3827,7 @@ void CompareStub::Generate(MacroAssembler* masm) { #endif __ bind(&check_unequal_objects); - if (cc_ == equal && !strict_) { + if (cc == equal && !strict()) { // Not strict equality. Objects are unequal if // they are both JSObjects and not undetectable, // and their pointers are different. @@ -3635,11 +3867,11 @@ void CompareStub::Generate(MacroAssembler* masm) { // Figure out which native to call and setup the arguments. Builtins::JavaScript builtin; - if (cc_ == equal) { - builtin = strict_ ? Builtins::STRICT_EQUALS : Builtins::EQUALS; + if (cc == equal) { + builtin = strict() ? Builtins::STRICT_EQUALS : Builtins::EQUALS; } else { builtin = Builtins::COMPARE; - __ Push(Smi::FromInt(NegativeComparisonResult(cc_))); + __ Push(Smi::FromInt(NegativeComparisonResult(cc))); } // Restore return address on the stack. @@ -3648,22 +3880,9 @@ void CompareStub::Generate(MacroAssembler* masm) { // Call the native; it returns -1 (less), 0 (equal), or 1 (greater) // tagged as a small integer. __ InvokeBuiltin(builtin, JUMP_FUNCTION); -} - -void CompareStub::BranchIfNonSymbol(MacroAssembler* masm, - Label* label, - Register object, - Register scratch) { - __ JumpIfSmi(object, label); - __ movq(scratch, FieldOperand(object, HeapObject::kMapOffset)); - __ movzxbq(scratch, - FieldOperand(scratch, Map::kInstanceTypeOffset)); - // Ensure that no non-strings have the symbol bit set. - STATIC_ASSERT(LAST_TYPE < kNotStringTag + kIsSymbolMask); - STATIC_ASSERT(kSymbolTag != 0); - __ testb(scratch, Immediate(kIsSymbolMask)); - __ j(zero, label); + __ bind(&miss); + GenerateMiss(masm); } @@ -3677,12 +3896,13 @@ void InterruptStub::Generate(MacroAssembler* masm) { } -static void GenerateRecordCallTarget(MacroAssembler* masm) { +static void GenerateRecordCallTargetNoArray(MacroAssembler* masm) { // Cache the called function in a global property cell. Cache states // are uninitialized, monomorphic (indicated by a JSFunction), and // megamorphic. // rbx : cache cell for call target // rdi : the function to call + ASSERT(!FLAG_optimize_constructed_arrays); Isolate* isolate = masm->isolate(); Label initialize, done; @@ -3715,6 +3935,79 @@ static void GenerateRecordCallTarget(MacroAssembler* masm) { } +static void GenerateRecordCallTarget(MacroAssembler* masm) { + // Cache the called function in a global property cell. Cache states + // are uninitialized, monomorphic (indicated by a JSFunction), and + // megamorphic. + // rbx : cache cell for call target + // rdi : the function to call + ASSERT(FLAG_optimize_constructed_arrays); + Isolate* isolate = masm->isolate(); + Label initialize, done, miss, megamorphic, not_array_function; + + // Load the cache state into rcx. + __ movq(rcx, FieldOperand(rbx, JSGlobalPropertyCell::kValueOffset)); + + // A monomorphic cache hit or an already megamorphic state: invoke the + // function without changing the state. + __ cmpq(rcx, rdi); + __ j(equal, &done); + __ Cmp(rcx, TypeFeedbackCells::MegamorphicSentinel(isolate)); + __ j(equal, &done); + + // Special handling of the Array() function, which caches not only the + // monomorphic Array function but the initial ElementsKind with special + // sentinels + Handle<Object> terminal_kind_sentinel = + TypeFeedbackCells::MonomorphicArraySentinel(isolate, + LAST_FAST_ELEMENTS_KIND); + __ Cmp(rcx, terminal_kind_sentinel); + __ j(not_equal, &miss); + // Make sure the function is the Array() function + __ LoadArrayFunction(rcx); + __ cmpq(rdi, rcx); + __ j(not_equal, &megamorphic); + __ jmp(&done); + + __ bind(&miss); + + // A monomorphic miss (i.e, here the cache is not uninitialized) goes + // megamorphic. + __ Cmp(rcx, TypeFeedbackCells::UninitializedSentinel(isolate)); + __ j(equal, &initialize); + // MegamorphicSentinel is an immortal immovable object (undefined) so no + // write-barrier is needed. + __ bind(&megamorphic); + __ Move(FieldOperand(rbx, JSGlobalPropertyCell::kValueOffset), + TypeFeedbackCells::MegamorphicSentinel(isolate)); + __ jmp(&done, Label::kNear); + + // An uninitialized cache is patched with the function or sentinel to + // indicate the ElementsKind if function is the Array constructor. + __ bind(&initialize); + // Make sure the function is the Array() function + __ LoadArrayFunction(rcx); + __ cmpq(rdi, rcx); + __ j(not_equal, ¬_array_function); + + // The target function is the Array constructor, install a sentinel value in + // the constructor's type info cell that will track the initial ElementsKind + // that should be used for the array when its constructed. + Handle<Object> initial_kind_sentinel = + TypeFeedbackCells::MonomorphicArraySentinel(isolate, + GetInitialFastElementsKind()); + __ Move(FieldOperand(rbx, JSGlobalPropertyCell::kValueOffset), + initial_kind_sentinel); + __ jmp(&done); + + __ bind(¬_array_function); + __ movq(FieldOperand(rbx, JSGlobalPropertyCell::kValueOffset), rdi); + // No need for a write barrier here - cells are rescanned. + + __ bind(&done); +} + + void CallFunctionStub::Generate(MacroAssembler* masm) { // rbx : cache cell for call target // rdi : the function to call @@ -3746,7 +4039,11 @@ void CallFunctionStub::Generate(MacroAssembler* masm) { __ j(not_equal, &slow); if (RecordCallTarget()) { - GenerateRecordCallTarget(masm); + if (FLAG_optimize_constructed_arrays) { + GenerateRecordCallTarget(masm); + } else { + GenerateRecordCallTargetNoArray(masm); + } } // Fast-case: Just invoke the function. @@ -3821,14 +4118,20 @@ void CallConstructStub::Generate(MacroAssembler* masm) { __ j(not_equal, &slow); if (RecordCallTarget()) { - GenerateRecordCallTarget(masm); + if (FLAG_optimize_constructed_arrays) { + GenerateRecordCallTarget(masm); + } else { + GenerateRecordCallTargetNoArray(masm); + } } // Jump to the function-specific construct stub. - __ movq(rbx, FieldOperand(rdi, JSFunction::kSharedFunctionInfoOffset)); - __ movq(rbx, FieldOperand(rbx, SharedFunctionInfo::kConstructStubOffset)); - __ lea(rbx, FieldOperand(rbx, Code::kHeaderSize)); - __ jmp(rbx); + Register jmp_reg = FLAG_optimize_constructed_arrays ? rcx : rbx; + __ movq(jmp_reg, FieldOperand(rdi, JSFunction::kSharedFunctionInfoOffset)); + __ movq(jmp_reg, FieldOperand(jmp_reg, + SharedFunctionInfo::kConstructStubOffset)); + __ lea(jmp_reg, FieldOperand(jmp_reg, Code::kHeaderSize)); + __ jmp(jmp_reg); // rdi: called object // rax: number of arguments @@ -3865,23 +4168,36 @@ bool CEntryStub::IsPregenerated() { } -void CodeStub::GenerateStubsAheadOfTime() { - CEntryStub::GenerateAheadOfTime(); - StoreBufferOverflowStub::GenerateFixedRegStubsAheadOfTime(); +void CodeStub::GenerateStubsAheadOfTime(Isolate* isolate) { + CEntryStub::GenerateAheadOfTime(isolate); + StoreBufferOverflowStub::GenerateFixedRegStubsAheadOfTime(isolate); // It is important that the store buffer overflow stubs are generated first. - RecordWriteStub::GenerateFixedRegStubsAheadOfTime(); + RecordWriteStub::GenerateFixedRegStubsAheadOfTime(isolate); } -void CodeStub::GenerateFPStubs() { +void CodeStub::GenerateFPStubs(Isolate* isolate) { } -void CEntryStub::GenerateAheadOfTime() { +void CEntryStub::GenerateAheadOfTime(Isolate* isolate) { CEntryStub stub(1, kDontSaveFPRegs); - stub.GetCode()->set_is_pregenerated(true); + stub.GetCode(isolate)->set_is_pregenerated(true); CEntryStub save_doubles(1, kSaveFPRegs); - save_doubles.GetCode()->set_is_pregenerated(true); + save_doubles.GetCode(isolate)->set_is_pregenerated(true); +} + + +static void JumpIfOOM(MacroAssembler* masm, + Register value, + Register scratch, + Label* oom_label) { + __ movq(scratch, value); + STATIC_ASSERT(Failure::OUT_OF_MEMORY_EXCEPTION == 3); + STATIC_ASSERT(kFailureTag == 3); + __ and_(scratch, Immediate(0xf)); + __ cmpq(scratch, Immediate(0xf)); + __ j(equal, oom_label); } @@ -3920,8 +4236,7 @@ void CEntryStub::GenerateCore(MacroAssembler* masm, __ movq(rdi, rax); #endif __ movq(kScratchRegister, - FUNCTION_ADDR(Runtime::PerformGC), - RelocInfo::RUNTIME_ENTRY); + ExternalReference::perform_gc_function(masm->isolate())); __ call(kScratchRegister); } @@ -3999,9 +4314,7 @@ void CEntryStub::GenerateCore(MacroAssembler* masm, __ j(zero, &retry, Label::kNear); // Special handling of out of memory exceptions. - __ movq(kScratchRegister, Failure::OutOfMemoryException(), RelocInfo::NONE); - __ cmpq(rax, kScratchRegister); - __ j(equal, throw_out_of_memory_exception); + JumpIfOOM(masm, rax, kScratchRegister, throw_out_of_memory_exception); // Retrieve the pending exception and clear the variable. ExternalReference pending_exception_address( @@ -4079,7 +4392,7 @@ void CEntryStub::Generate(MacroAssembler* masm) { // Do full GC and retry runtime call one final time. Failure* failure = Failure::InternalError(); - __ movq(rax, failure, RelocInfo::NONE); + __ movq(rax, failure, RelocInfo::NONE64); GenerateCore(masm, &throw_normal_exception, &throw_termination_exception, @@ -4098,7 +4411,10 @@ void CEntryStub::Generate(MacroAssembler* masm) { // Set pending exception and rax to out of memory exception. ExternalReference pending_exception(Isolate::kPendingExceptionAddress, isolate); - __ movq(rax, Failure::OutOfMemoryException(), RelocInfo::NONE); + Label already_have_failure; + JumpIfOOM(masm, rax, kScratchRegister, &already_have_failure); + __ movq(rax, Failure::OutOfMemoryException(0x1), RelocInfo::NONE64); + __ bind(&already_have_failure); __ Store(pending_exception, rax); // Fall through to the next label. @@ -4126,7 +4442,7 @@ void JSEntryStub::GenerateBody(MacroAssembler* masm, bool is_construct) { // Cannot use smi-register for loading yet. __ movq(kScratchRegister, reinterpret_cast<uint64_t>(Smi::FromInt(marker)), - RelocInfo::NONE); + RelocInfo::NONE64); __ push(kScratchRegister); // context slot __ push(kScratchRegister); // function slot // Save callee-saved registers (X64/Win64 calling conventions). @@ -4181,7 +4497,7 @@ void JSEntryStub::GenerateBody(MacroAssembler* masm, bool is_construct) { ExternalReference pending_exception(Isolate::kPendingExceptionAddress, isolate); __ Store(pending_exception, rax); - __ movq(rax, Failure::Exception(), RelocInfo::NONE); + __ movq(rax, Failure::Exception(), RelocInfo::NONE64); __ jmp(&exit); // Invoke: Link this frame into the handler chain. There's only one @@ -4421,44 +4737,6 @@ Register InstanceofStub::left() { return no_reg; } Register InstanceofStub::right() { return no_reg; } -int CompareStub::MinorKey() { - // Encode the three parameters in a unique 16 bit value. To avoid duplicate - // stubs the never NaN NaN condition is only taken into account if the - // condition is equals. - ASSERT(static_cast<unsigned>(cc_) < (1 << 12)); - ASSERT(lhs_.is(no_reg) && rhs_.is(no_reg)); - return ConditionField::encode(static_cast<unsigned>(cc_)) - | RegisterField::encode(false) // lhs_ and rhs_ are not used - | StrictField::encode(strict_) - | NeverNanNanField::encode(cc_ == equal ? never_nan_nan_ : false) - | IncludeNumberCompareField::encode(include_number_compare_) - | IncludeSmiCompareField::encode(include_smi_compare_); -} - - -// Unfortunately you have to run without snapshots to see most of these -// names in the profile since most compare stubs end up in the snapshot. -void CompareStub::PrintName(StringStream* stream) { - ASSERT(lhs_.is(no_reg) && rhs_.is(no_reg)); - const char* cc_name; - switch (cc_) { - case less: cc_name = "LT"; break; - case greater: cc_name = "GT"; break; - case less_equal: cc_name = "LE"; break; - case greater_equal: cc_name = "GE"; break; - case equal: cc_name = "EQ"; break; - case not_equal: cc_name = "NE"; break; - default: cc_name = "UnknownCondition"; break; - } - bool is_equality = cc_ == equal || cc_ == not_equal; - stream->Add("CompareStub_%s", cc_name); - if (strict_ && is_equality) stream->Add("_STRICT"); - if (never_nan_nan_ && is_equality) stream->Add("_NO_NAN"); - if (!include_number_compare_) stream->Add("_NO_NUMBER"); - if (!include_smi_compare_) stream->Add("_NO_SMI"); -} - - // ------------------------------------------------------------------------- // StringCharCodeAtGenerator @@ -4559,7 +4837,7 @@ void StringCharCodeAtGenerator::GenerateSlow( void StringCharFromCodeGenerator::GenerateFast(MacroAssembler* masm) { // Fast case of Heap::LookupSingleCharacterStringFromCode. __ JumpIfNotSmi(code_, &slow_case_); - __ SmiCompare(code_, Smi::FromInt(String::kMaxAsciiCharCode)); + __ SmiCompare(code_, Smi::FromInt(String::kMaxOneByteCharCode)); __ j(above, &slow_case_); __ LoadRoot(result_, Heap::kSingleCharacterStringCacheRootIndex); @@ -4591,23 +4869,6 @@ void StringCharFromCodeGenerator::GenerateSlow( } -// ------------------------------------------------------------------------- -// StringCharAtGenerator - -void StringCharAtGenerator::GenerateFast(MacroAssembler* masm) { - char_code_at_generator_.GenerateFast(masm); - char_from_code_generator_.GenerateFast(masm); -} - - -void StringCharAtGenerator::GenerateSlow( - MacroAssembler* masm, - const RuntimeCallHelper& call_helper) { - char_code_at_generator_.GenerateSlow(masm, call_helper); - char_from_code_generator_.GenerateSlow(masm, call_helper); -} - - void StringAddStub::Generate(MacroAssembler* masm) { Label call_runtime, call_builtin; Builtins::JavaScript builtin_id = Builtins::ADD; @@ -4686,8 +4947,8 @@ void StringAddStub::Generate(MacroAssembler* masm) { // Look at the length of the result of adding the two strings. STATIC_ASSERT(String::kMaxLength <= Smi::kMaxValue / 2); __ SmiAdd(rbx, rbx, rcx); - // Use the symbol table when adding two one character strings, as it - // helps later optimizations to return a symbol here. + // Use the string table when adding two one character strings, as it + // helps later optimizations to return an internalized string here. __ SmiCompare(rbx, Smi::FromInt(2)); __ j(not_equal, &longer_than_two); @@ -4696,13 +4957,13 @@ void StringAddStub::Generate(MacroAssembler* masm) { &call_runtime); // Get the two characters forming the sub string. - __ movzxbq(rbx, FieldOperand(rax, SeqAsciiString::kHeaderSize)); - __ movzxbq(rcx, FieldOperand(rdx, SeqAsciiString::kHeaderSize)); + __ movzxbq(rbx, FieldOperand(rax, SeqOneByteString::kHeaderSize)); + __ movzxbq(rcx, FieldOperand(rdx, SeqOneByteString::kHeaderSize)); - // Try to lookup two character string in symbol table. If it is not found + // Try to lookup two character string in string table. If it is not found // just allocate a new one. Label make_two_character_string, make_flat_ascii_string; - StringHelper::GenerateTwoCharacterSymbolTableProbe( + StringHelper::GenerateTwoCharacterStringTableProbe( masm, rbx, rcx, r14, r11, rdi, r15, &make_two_character_string); __ IncrementCounter(counters->string_add_native(), 1); __ ret(2 * kPointerSize); @@ -4713,11 +4974,11 @@ void StringAddStub::Generate(MacroAssembler* masm) { // rbx - first byte: first character // rbx - second byte: *maybe* second character // Make sure that the second byte of rbx contains the second character. - __ movzxbq(rcx, FieldOperand(rdx, SeqAsciiString::kHeaderSize)); + __ movzxbq(rcx, FieldOperand(rdx, SeqOneByteString::kHeaderSize)); __ shll(rcx, Immediate(kBitsPerByte)); __ orl(rbx, rcx); // Write both characters to the new string. - __ movw(FieldOperand(rax, SeqAsciiString::kHeaderSize), rbx); + __ movw(FieldOperand(rax, SeqOneByteString::kHeaderSize), rbx); __ IncrementCounter(counters->string_add_native(), 1); __ ret(2 * kPointerSize); @@ -4740,7 +5001,7 @@ void StringAddStub::Generate(MacroAssembler* masm) { Label non_ascii, allocated, ascii_data; __ movl(rcx, r8); __ and_(rcx, r9); - STATIC_ASSERT((kStringEncodingMask & kAsciiStringTag) != 0); + STATIC_ASSERT((kStringEncodingMask & kOneByteStringTag) != 0); STATIC_ASSERT((kStringEncodingMask & kTwoByteStringTag) == 0); __ testl(rcx, Immediate(kStringEncodingMask)); __ j(zero, &non_ascii); @@ -4766,9 +5027,9 @@ void StringAddStub::Generate(MacroAssembler* masm) { __ testb(rcx, Immediate(kAsciiDataHintMask)); __ j(not_zero, &ascii_data); __ xor_(r8, r9); - STATIC_ASSERT(kAsciiStringTag != 0 && kAsciiDataHintTag != 0); - __ andb(r8, Immediate(kAsciiStringTag | kAsciiDataHintTag)); - __ cmpb(r8, Immediate(kAsciiStringTag | kAsciiDataHintTag)); + STATIC_ASSERT(kOneByteStringTag != 0 && kAsciiDataHintTag != 0); + __ andb(r8, Immediate(kOneByteStringTag | kAsciiDataHintTag)); + __ cmpb(r8, Immediate(kOneByteStringTag | kAsciiDataHintTag)); __ j(equal, &ascii_data); // Allocate a two byte cons string. __ AllocateTwoByteConsString(rcx, rdi, no_reg, &call_runtime); @@ -4799,8 +5060,8 @@ void StringAddStub::Generate(MacroAssembler* masm) { __ movq(rcx, FieldOperand(rax, ExternalString::kResourceDataOffset)); __ jmp(&first_prepared, Label::kNear); __ bind(&first_is_sequential); - STATIC_ASSERT(SeqAsciiString::kHeaderSize == SeqTwoByteString::kHeaderSize); - __ lea(rcx, FieldOperand(rax, SeqAsciiString::kHeaderSize)); + STATIC_ASSERT(SeqOneByteString::kHeaderSize == SeqTwoByteString::kHeaderSize); + __ lea(rcx, FieldOperand(rax, SeqOneByteString::kHeaderSize)); __ bind(&first_prepared); // Check whether both strings have same encoding. @@ -4820,8 +5081,8 @@ void StringAddStub::Generate(MacroAssembler* masm) { __ movq(rdx, FieldOperand(rdx, ExternalString::kResourceDataOffset)); __ jmp(&second_prepared, Label::kNear); __ bind(&second_is_sequential); - STATIC_ASSERT(SeqAsciiString::kHeaderSize == SeqTwoByteString::kHeaderSize); - __ lea(rdx, FieldOperand(rdx, SeqAsciiString::kHeaderSize)); + STATIC_ASSERT(SeqOneByteString::kHeaderSize == SeqTwoByteString::kHeaderSize); + __ lea(rdx, FieldOperand(rdx, SeqOneByteString::kHeaderSize)); __ bind(&second_prepared); Label non_ascii_string_add_flat_result; @@ -4837,7 +5098,7 @@ void StringAddStub::Generate(MacroAssembler* masm) { __ AllocateAsciiString(rax, rbx, rdi, r8, r9, &call_runtime); // rax: result string // Locate first character of result. - __ lea(rbx, FieldOperand(rax, SeqAsciiString::kHeaderSize)); + __ lea(rbx, FieldOperand(rax, SeqOneByteString::kHeaderSize)); // rcx: first char of first string // rbx: first character of result // r14: length of first string @@ -5001,7 +5262,7 @@ void StringHelper::GenerateCopyCharactersREP(MacroAssembler* masm, __ bind(&done); } -void StringHelper::GenerateTwoCharacterSymbolTableProbe(MacroAssembler* masm, +void StringHelper::GenerateTwoCharacterStringTableProbe(MacroAssembler* masm, Register c1, Register c2, Register scratch1, @@ -5013,7 +5274,7 @@ void StringHelper::GenerateTwoCharacterSymbolTableProbe(MacroAssembler* masm, Register scratch = scratch3; // Make sure that both characters are not digits as such strings has a - // different hash algorithm. Don't try to look for these in the symbol table. + // different hash algorithm. Don't try to look for these in the string table. Label not_array_index; __ leal(scratch, Operand(c1, -'0')); __ cmpl(scratch, Immediate(static_cast<int>('9' - '0'))); @@ -5037,14 +5298,14 @@ void StringHelper::GenerateTwoCharacterSymbolTableProbe(MacroAssembler* masm, // chars: two character string, char 1 in byte 0 and char 2 in byte 1. // hash: hash of two character string. - // Load the symbol table. - Register symbol_table = c2; - __ LoadRoot(symbol_table, Heap::kSymbolTableRootIndex); + // Load the string table. + Register string_table = c2; + __ LoadRoot(string_table, Heap::kStringTableRootIndex); - // Calculate capacity mask from the symbol table capacity. + // Calculate capacity mask from the string table capacity. Register mask = scratch2; __ SmiToInteger32(mask, - FieldOperand(symbol_table, SymbolTable::kCapacityOffset)); + FieldOperand(string_table, StringTable::kCapacityOffset)); __ decl(mask); Register map = scratch4; @@ -5052,31 +5313,31 @@ void StringHelper::GenerateTwoCharacterSymbolTableProbe(MacroAssembler* masm, // Registers // chars: two character string, char 1 in byte 0 and char 2 in byte 1. // hash: hash of two character string (32-bit int) - // symbol_table: symbol table + // string_table: string table // mask: capacity mask (32-bit int) // map: - // scratch: - - // Perform a number of probes in the symbol table. + // Perform a number of probes in the string table. static const int kProbes = 4; - Label found_in_symbol_table; + Label found_in_string_table; Label next_probe[kProbes]; Register candidate = scratch; // Scratch register contains candidate. for (int i = 0; i < kProbes; i++) { - // Calculate entry in symbol table. + // Calculate entry in string table. __ movl(scratch, hash); if (i > 0) { - __ addl(scratch, Immediate(SymbolTable::GetProbeOffset(i))); + __ addl(scratch, Immediate(StringTable::GetProbeOffset(i))); } __ andl(scratch, mask); - // Load the entry from the symbol table. - STATIC_ASSERT(SymbolTable::kEntrySize == 1); + // Load the entry from the string table. + STATIC_ASSERT(StringTable::kEntrySize == 1); __ movq(candidate, - FieldOperand(symbol_table, + FieldOperand(string_table, scratch, times_pointer_size, - SymbolTable::kElementsStartOffset)); + StringTable::kElementsStartOffset)); // If entry is undefined no string with this hash can be found. Label is_string; @@ -5089,7 +5350,7 @@ void StringHelper::GenerateTwoCharacterSymbolTableProbe(MacroAssembler* masm, if (FLAG_debug_code) { __ LoadRoot(kScratchRegister, Heap::kTheHoleValueRootIndex); __ cmpq(kScratchRegister, candidate); - __ Assert(equal, "oddball in symbol table is not undefined or the hole"); + __ Assert(equal, "oddball in string table is not undefined or the hole"); } __ jmp(&next_probe[i]); @@ -5110,10 +5371,10 @@ void StringHelper::GenerateTwoCharacterSymbolTableProbe(MacroAssembler* masm, temp, temp, &next_probe[i]); // Check if the two characters match. - __ movl(temp, FieldOperand(candidate, SeqAsciiString::kHeaderSize)); + __ movl(temp, FieldOperand(candidate, SeqOneByteString::kHeaderSize)); __ andl(temp, Immediate(0x0000ffff)); __ cmpl(chars, temp); - __ j(equal, &found_in_symbol_table); + __ j(equal, &found_in_string_table); __ bind(&next_probe[i]); } @@ -5122,7 +5383,7 @@ void StringHelper::GenerateTwoCharacterSymbolTableProbe(MacroAssembler* masm, // Scratch register contains result when we fall through to here. Register result = candidate; - __ bind(&found_in_symbol_table); + __ bind(&found_in_string_table); if (!result.is(rax)) { __ movq(rax, result); } @@ -5228,6 +5489,11 @@ void SubStringStub::Generate(MacroAssembler* masm) { __ IncrementCounter(counters->sub_string_native(), 1); __ ret(kArgumentsSize); __ bind(¬_original_string); + + Label single_char; + __ SmiCompare(rcx, Smi::FromInt(1)); + __ j(equal, &single_char); + __ SmiToInteger32(rcx, rcx); // rax: string @@ -5248,7 +5514,7 @@ void SubStringStub::Generate(MacroAssembler* masm) { // Cons string. Check whether it is flat, then fetch first part. // Flat cons strings have an empty second part. __ CompareRoot(FieldOperand(rax, ConsString::kSecondOffset), - Heap::kEmptyStringRootIndex); + Heap::kempty_stringRootIndex); __ j(not_equal, &runtime); __ movq(rdi, FieldOperand(rax, ConsString::kFirstOffset)); // Update instance type. @@ -5288,7 +5554,7 @@ void SubStringStub::Generate(MacroAssembler* masm) { // string's encoding is wrong because we always have to recheck encoding of // the newly created string's parent anyways due to externalized strings. Label two_byte_slice, set_slice_header; - STATIC_ASSERT((kStringEncodingMask & kAsciiStringTag) != 0); + STATIC_ASSERT((kStringEncodingMask & kOneByteStringTag) != 0); STATIC_ASSERT((kStringEncodingMask & kTwoByteStringTag) == 0); __ testb(rbx, Immediate(kStringEncodingMask)); __ j(zero, &two_byte_slice, Label::kNear); @@ -5328,11 +5594,11 @@ void SubStringStub::Generate(MacroAssembler* masm) { __ j(not_zero, &runtime); __ movq(rdi, FieldOperand(rdi, ExternalString::kResourceDataOffset)); // Move the pointer so that offset-wise, it looks like a sequential string. - STATIC_ASSERT(SeqTwoByteString::kHeaderSize == SeqAsciiString::kHeaderSize); + STATIC_ASSERT(SeqTwoByteString::kHeaderSize == SeqOneByteString::kHeaderSize); __ subq(rdi, Immediate(SeqTwoByteString::kHeaderSize - kHeapObjectTag)); __ bind(&sequential_string); - STATIC_ASSERT((kAsciiStringTag & kStringEncodingMask) != 0); + STATIC_ASSERT((kOneByteStringTag & kStringEncodingMask) != 0); __ testb(rbx, Immediate(kStringEncodingMask)); __ j(zero, &two_byte_sequential); @@ -5345,10 +5611,10 @@ void SubStringStub::Generate(MacroAssembler* masm) { { // Locate character of sub string start. SmiIndex smi_as_index = masm->SmiToIndex(rdx, rdx, times_1); __ lea(rsi, Operand(rdi, smi_as_index.reg, smi_as_index.scale, - SeqAsciiString::kHeaderSize - kHeapObjectTag)); + SeqOneByteString::kHeaderSize - kHeapObjectTag)); } // Locate first character of result. - __ lea(rdi, FieldOperand(rax, SeqAsciiString::kHeaderSize)); + __ lea(rdi, FieldOperand(rax, SeqOneByteString::kHeaderSize)); // rax: result string // rcx: result length @@ -5370,7 +5636,7 @@ void SubStringStub::Generate(MacroAssembler* masm) { { // Locate character of sub string start. SmiIndex smi_as_index = masm->SmiToIndex(rdx, rdx, times_2); __ lea(rsi, Operand(rdi, smi_as_index.reg, smi_as_index.scale, - SeqAsciiString::kHeaderSize - kHeapObjectTag)); + SeqOneByteString::kHeaderSize - kHeapObjectTag)); } // Locate first character of result. __ lea(rdi, FieldOperand(rax, SeqTwoByteString::kHeaderSize)); @@ -5388,6 +5654,17 @@ void SubStringStub::Generate(MacroAssembler* masm) { // Just jump to runtime to create the sub string. __ bind(&runtime); __ TailCallRuntime(Runtime::kSubString, 3, 1); + + __ bind(&single_char); + // rax: string + // rbx: instance type + // rcx: sub string length (smi) + // rdx: from index (smi) + StringCharAtGenerator generator( + rax, rdx, rcx, rax, &runtime, &runtime, &runtime, STRING_INDEX_IS_NUMBER); + generator.GenerateFast(masm); + __ ret(kArgumentsSize); + generator.SkipSlow(masm, &runtime); } @@ -5475,16 +5752,22 @@ void StringCompareStub::GenerateCompareFlatAsciiStrings(MacroAssembler* masm, // Compare lengths (precomputed). __ bind(&compare_lengths); __ SmiTest(length_difference); - __ j(not_zero, &result_not_equal, Label::kNear); + Label length_not_equal; + __ j(not_zero, &length_not_equal, Label::kNear); // Result is EQUAL. __ Move(rax, Smi::FromInt(EQUAL)); __ ret(0); Label result_greater; + Label result_less; + __ bind(&length_not_equal); + __ j(greater, &result_greater, Label::kNear); + __ jmp(&result_less, Label::kNear); __ bind(&result_not_equal); // Unequal comparison of left to right, either character or length. - __ j(greater, &result_greater, Label::kNear); + __ j(above, &result_greater, Label::kNear); + __ bind(&result_less); // Result is LESS. __ Move(rax, Smi::FromInt(LESS)); @@ -5510,9 +5793,9 @@ void StringCompareStub::GenerateAsciiCharsCompareLoop( // doesn't need an additional compare. __ SmiToInteger32(length, length); __ lea(left, - FieldOperand(left, length, times_1, SeqAsciiString::kHeaderSize)); + FieldOperand(left, length, times_1, SeqOneByteString::kHeaderSize)); __ lea(right, - FieldOperand(right, length, times_1, SeqAsciiString::kHeaderSize)); + FieldOperand(right, length, times_1, SeqOneByteString::kHeaderSize)); __ neg(length); Register index = length; // index = -length; @@ -5568,7 +5851,7 @@ void StringCompareStub::Generate(MacroAssembler* masm) { void ICCompareStub::GenerateSmis(MacroAssembler* masm) { - ASSERT(state_ == CompareIC::SMIS); + ASSERT(state_ == CompareIC::SMI); Label miss; __ JumpIfNotBothSmi(rdx, rax, &miss, Label::kNear); @@ -5580,7 +5863,7 @@ void ICCompareStub::GenerateSmis(MacroAssembler* masm) { __ subq(rdx, rax); __ j(no_overflow, &done, Label::kNear); // Correct sign of result in case of overflow. - __ SmiNot(rdx, rdx); + __ not_(rdx); __ bind(&done); __ movq(rax, rdx); } @@ -5591,24 +5874,42 @@ void ICCompareStub::GenerateSmis(MacroAssembler* masm) { } -void ICCompareStub::GenerateHeapNumbers(MacroAssembler* masm) { - ASSERT(state_ == CompareIC::HEAP_NUMBERS); +void ICCompareStub::GenerateNumbers(MacroAssembler* masm) { + ASSERT(state_ == CompareIC::NUMBER); Label generic_stub; Label unordered, maybe_undefined1, maybe_undefined2; Label miss; - Condition either_smi = masm->CheckEitherSmi(rax, rdx); - __ j(either_smi, &generic_stub, Label::kNear); - __ CmpObjectType(rax, HEAP_NUMBER_TYPE, rcx); + if (left_ == CompareIC::SMI) { + __ JumpIfNotSmi(rdx, &miss); + } + if (right_ == CompareIC::SMI) { + __ JumpIfNotSmi(rax, &miss); + } + + // Load left and right operand. + Label done, left, left_smi, right_smi; + __ JumpIfSmi(rax, &right_smi, Label::kNear); + __ CompareMap(rax, masm->isolate()->factory()->heap_number_map(), NULL); __ j(not_equal, &maybe_undefined1, Label::kNear); - __ CmpObjectType(rdx, HEAP_NUMBER_TYPE, rcx); + __ movsd(xmm1, FieldOperand(rax, HeapNumber::kValueOffset)); + __ jmp(&left, Label::kNear); + __ bind(&right_smi); + __ SmiToInteger32(rcx, rax); // Can't clobber rax yet. + __ cvtlsi2sd(xmm1, rcx); + + __ bind(&left); + __ JumpIfSmi(rdx, &left_smi, Label::kNear); + __ CompareMap(rdx, masm->isolate()->factory()->heap_number_map(), NULL); __ j(not_equal, &maybe_undefined2, Label::kNear); - - // Load left and right operand __ movsd(xmm0, FieldOperand(rdx, HeapNumber::kValueOffset)); - __ movsd(xmm1, FieldOperand(rax, HeapNumber::kValueOffset)); + __ jmp(&done); + __ bind(&left_smi); + __ SmiToInteger32(rcx, rdx); // Can't clobber rdx yet. + __ cvtlsi2sd(xmm0, rcx); + __ bind(&done); // Compare operands __ ucomisd(xmm0, xmm1); @@ -5624,14 +5925,16 @@ void ICCompareStub::GenerateHeapNumbers(MacroAssembler* masm) { __ ret(0); __ bind(&unordered); - CompareStub stub(GetCondition(), strict(), NO_COMPARE_FLAGS); __ bind(&generic_stub); - __ jmp(stub.GetCode(), RelocInfo::CODE_TARGET); + ICCompareStub stub(op_, CompareIC::GENERIC, CompareIC::GENERIC, + CompareIC::GENERIC); + __ jmp(stub.GetCode(masm->isolate()), RelocInfo::CODE_TARGET); __ bind(&maybe_undefined1); if (Token::IsOrderedRelationalCompareOp(op_)) { __ Cmp(rax, masm->isolate()->factory()->undefined_value()); __ j(not_equal, &miss); + __ JumpIfSmi(rdx, &unordered); __ CmpObjectType(rdx, HEAP_NUMBER_TYPE, rcx); __ j(not_equal, &maybe_undefined2, Label::kNear); __ jmp(&unordered); @@ -5648,8 +5951,8 @@ void ICCompareStub::GenerateHeapNumbers(MacroAssembler* masm) { } -void ICCompareStub::GenerateSymbols(MacroAssembler* masm) { - ASSERT(state_ == CompareIC::SYMBOLS); +void ICCompareStub::GenerateInternalizedStrings(MacroAssembler* masm) { + ASSERT(state_ == CompareIC::INTERNALIZED_STRING); ASSERT(GetCondition() == equal); // Registers containing left and right operands respectively. @@ -5663,17 +5966,72 @@ void ICCompareStub::GenerateSymbols(MacroAssembler* masm) { Condition cond = masm->CheckEitherSmi(left, right, tmp1); __ j(cond, &miss, Label::kNear); - // Check that both operands are symbols. + // Check that both operands are internalized strings. __ movq(tmp1, FieldOperand(left, HeapObject::kMapOffset)); __ movq(tmp2, FieldOperand(right, HeapObject::kMapOffset)); __ movzxbq(tmp1, FieldOperand(tmp1, Map::kInstanceTypeOffset)); __ movzxbq(tmp2, FieldOperand(tmp2, Map::kInstanceTypeOffset)); - STATIC_ASSERT(kSymbolTag != 0); + STATIC_ASSERT(kInternalizedTag != 0); __ and_(tmp1, tmp2); - __ testb(tmp1, Immediate(kIsSymbolMask)); + __ testb(tmp1, Immediate(kIsInternalizedMask)); __ j(zero, &miss, Label::kNear); - // Symbols are compared by identity. + // Internalized strings are compared by identity. + Label done; + __ cmpq(left, right); + // Make sure rax is non-zero. At this point input operands are + // guaranteed to be non-zero. + ASSERT(right.is(rax)); + __ j(not_equal, &done, Label::kNear); + STATIC_ASSERT(EQUAL == 0); + STATIC_ASSERT(kSmiTag == 0); + __ Move(rax, Smi::FromInt(EQUAL)); + __ bind(&done); + __ ret(0); + + __ bind(&miss); + GenerateMiss(masm); +} + + +void ICCompareStub::GenerateUniqueNames(MacroAssembler* masm) { + ASSERT(state_ == CompareIC::UNIQUE_NAME); + ASSERT(GetCondition() == equal); + + // Registers containing left and right operands respectively. + Register left = rdx; + Register right = rax; + Register tmp1 = rcx; + Register tmp2 = rbx; + + // Check that both operands are heap objects. + Label miss; + Condition cond = masm->CheckEitherSmi(left, right, tmp1); + __ j(cond, &miss, Label::kNear); + + // Check that both operands are unique names. This leaves the instance + // types loaded in tmp1 and tmp2. + STATIC_ASSERT(kInternalizedTag != 0); + __ movq(tmp1, FieldOperand(left, HeapObject::kMapOffset)); + __ movq(tmp2, FieldOperand(right, HeapObject::kMapOffset)); + __ movzxbq(tmp1, FieldOperand(tmp1, Map::kInstanceTypeOffset)); + __ movzxbq(tmp2, FieldOperand(tmp2, Map::kInstanceTypeOffset)); + + Label succeed1; + __ testb(tmp1, Immediate(kIsInternalizedMask)); + __ j(not_zero, &succeed1, Label::kNear); + __ cmpb(tmp1, Immediate(static_cast<uint8_t>(SYMBOL_TYPE))); + __ j(not_equal, &miss, Label::kNear); + __ bind(&succeed1); + + Label succeed2; + __ testb(tmp2, Immediate(kIsInternalizedMask)); + __ j(not_zero, &succeed2, Label::kNear); + __ cmpb(tmp2, Immediate(static_cast<uint8_t>(SYMBOL_TYPE))); + __ j(not_equal, &miss, Label::kNear); + __ bind(&succeed2); + + // Unique names are compared by identity. Label done; __ cmpq(left, right); // Make sure rax is non-zero. At this point input operands are @@ -5692,7 +6050,7 @@ void ICCompareStub::GenerateSymbols(MacroAssembler* masm) { void ICCompareStub::GenerateStrings(MacroAssembler* masm) { - ASSERT(state_ == CompareIC::STRINGS); + ASSERT(state_ == CompareIC::STRING); Label miss; bool equality = Token::IsEqualityOp(op_); @@ -5732,13 +6090,13 @@ void ICCompareStub::GenerateStrings(MacroAssembler* masm) { // Handle not identical strings. __ bind(¬_same); - // Check that both strings are symbols. If they are, we're done + // Check that both strings are internalized strings. If they are, we're done // because we already know they are not identical. if (equality) { Label do_compare; - STATIC_ASSERT(kSymbolTag != 0); + STATIC_ASSERT(kInternalizedTag != 0); __ and_(tmp1, tmp2); - __ testb(tmp1, Immediate(kIsSymbolMask)); + __ testb(tmp1, Immediate(kIsInternalizedMask)); __ j(zero, &do_compare, Label::kNear); // Make sure rax is non-zero. At this point input operands are // guaranteed to be non-zero. @@ -5778,7 +6136,7 @@ void ICCompareStub::GenerateStrings(MacroAssembler* masm) { void ICCompareStub::GenerateObjects(MacroAssembler* masm) { - ASSERT(state_ == CompareIC::OBJECTS); + ASSERT(state_ == CompareIC::OBJECT); Label miss; Condition either_smi = masm->CheckEitherSmi(rdx, rax); __ j(either_smi, &miss, Label::kNear); @@ -5842,12 +6200,13 @@ void ICCompareStub::GenerateMiss(MacroAssembler* masm) { } -void StringDictionaryLookupStub::GenerateNegativeLookup(MacroAssembler* masm, - Label* miss, - Label* done, - Register properties, - Handle<String> name, - Register r0) { +void NameDictionaryLookupStub::GenerateNegativeLookup(MacroAssembler* masm, + Label* miss, + Label* done, + Register properties, + Handle<Name> name, + Register r0) { + ASSERT(name->IsUniqueName()); // If names of slots in range from 1 to kProbes - 1 for the hash value are // not equal to the name and kProbes-th slot is not used (its name is the // undefined value), it guarantees the hash table doesn't contain the @@ -5861,10 +6220,10 @@ void StringDictionaryLookupStub::GenerateNegativeLookup(MacroAssembler* masm, __ SmiToInteger32(index, FieldOperand(properties, kCapacityOffset)); __ decl(index); __ and_(index, - Immediate(name->Hash() + StringDictionary::GetProbeOffset(i))); + Immediate(name->Hash() + NameDictionary::GetProbeOffset(i))); // Scale the index by multiplying by the entry size. - ASSERT(StringDictionary::kEntrySize == 3); + ASSERT(NameDictionary::kEntrySize == 3); __ lea(index, Operand(index, index, times_2, 0)); // index *= 3. Register entity_name = r0; @@ -5878,27 +6237,27 @@ void StringDictionaryLookupStub::GenerateNegativeLookup(MacroAssembler* masm, __ j(equal, done); // Stop if found the property. - __ Cmp(entity_name, Handle<String>(name)); + __ Cmp(entity_name, Handle<Name>(name)); __ j(equal, miss); - Label the_hole; + Label good; // Check for the hole and skip. __ CompareRoot(entity_name, Heap::kTheHoleValueRootIndex); - __ j(equal, &the_hole, Label::kNear); + __ j(equal, &good, Label::kNear); - // Check if the entry name is not a symbol. + // Check if the entry name is not a unique name. __ movq(entity_name, FieldOperand(entity_name, HeapObject::kMapOffset)); __ testb(FieldOperand(entity_name, Map::kInstanceTypeOffset), - Immediate(kIsSymbolMask)); - __ j(zero, miss); + Immediate(kIsInternalizedMask)); + __ j(not_zero, &good, Label::kNear); + __ cmpb(FieldOperand(entity_name, Map::kInstanceTypeOffset), + Immediate(static_cast<uint8_t>(SYMBOL_TYPE))); + __ j(not_equal, miss); - __ bind(&the_hole); + __ bind(&good); } - StringDictionaryLookupStub stub(properties, - r0, - r0, - StringDictionaryLookupStub::NEGATIVE_LOOKUP); + NameDictionaryLookupStub stub(properties, r0, r0, NEGATIVE_LOOKUP); __ Push(Handle<Object>(name)); __ push(Immediate(name->Hash())); __ CallStub(&stub); @@ -5908,38 +6267,38 @@ void StringDictionaryLookupStub::GenerateNegativeLookup(MacroAssembler* masm, } -// Probe the string dictionary in the |elements| register. Jump to the +// Probe the name dictionary in the |elements| register. Jump to the // |done| label if a property with the given name is found leaving the // index into the dictionary in |r1|. Jump to the |miss| label // otherwise. -void StringDictionaryLookupStub::GeneratePositiveLookup(MacroAssembler* masm, - Label* miss, - Label* done, - Register elements, - Register name, - Register r0, - Register r1) { +void NameDictionaryLookupStub::GeneratePositiveLookup(MacroAssembler* masm, + Label* miss, + Label* done, + Register elements, + Register name, + Register r0, + Register r1) { ASSERT(!elements.is(r0)); ASSERT(!elements.is(r1)); ASSERT(!name.is(r0)); ASSERT(!name.is(r1)); - __ AssertString(name); + __ AssertName(name); __ SmiToInteger32(r0, FieldOperand(elements, kCapacityOffset)); __ decl(r0); for (int i = 0; i < kInlinedProbes; i++) { // Compute the masked index: (hash + i + i * i) & mask. - __ movl(r1, FieldOperand(name, String::kHashFieldOffset)); - __ shrl(r1, Immediate(String::kHashShift)); + __ movl(r1, FieldOperand(name, Name::kHashFieldOffset)); + __ shrl(r1, Immediate(Name::kHashShift)); if (i > 0) { - __ addl(r1, Immediate(StringDictionary::GetProbeOffset(i))); + __ addl(r1, Immediate(NameDictionary::GetProbeOffset(i))); } __ and_(r1, r0); // Scale the index by multiplying by the entry size. - ASSERT(StringDictionary::kEntrySize == 3); + ASSERT(NameDictionary::kEntrySize == 3); __ lea(r1, Operand(r1, r1, times_2, 0)); // r1 = r1 * 3 // Check if the key is identical to the name. @@ -5948,13 +6307,10 @@ void StringDictionaryLookupStub::GeneratePositiveLookup(MacroAssembler* masm, __ j(equal, done); } - StringDictionaryLookupStub stub(elements, - r0, - r1, - POSITIVE_LOOKUP); + NameDictionaryLookupStub stub(elements, r0, r1, POSITIVE_LOOKUP); __ push(name); - __ movl(r0, FieldOperand(name, String::kHashFieldOffset)); - __ shrl(r0, Immediate(String::kHashShift)); + __ movl(r0, FieldOperand(name, Name::kHashFieldOffset)); + __ shrl(r0, Immediate(Name::kHashShift)); __ push(r0); __ CallStub(&stub); @@ -5964,7 +6320,7 @@ void StringDictionaryLookupStub::GeneratePositiveLookup(MacroAssembler* masm, } -void StringDictionaryLookupStub::Generate(MacroAssembler* masm) { +void NameDictionaryLookupStub::Generate(MacroAssembler* masm) { // This stub overrides SometimesSetsUpAFrame() to return false. That means // we cannot call anything that could cause a GC from this stub. // Stack frame on entry: @@ -5972,7 +6328,7 @@ void StringDictionaryLookupStub::Generate(MacroAssembler* masm) { // esp[1 * kPointerSize]: key's hash. // esp[2 * kPointerSize]: key. // Registers: - // dictionary_: StringDictionary to probe. + // dictionary_: NameDictionary to probe. // result_: used as scratch. // index_: will hold an index of entry if lookup is successful. // might alias with result_. @@ -5996,12 +6352,12 @@ void StringDictionaryLookupStub::Generate(MacroAssembler* masm) { // Compute the masked index: (hash + i + i * i) & mask. __ movq(scratch, Operand(rsp, 2 * kPointerSize)); if (i > 0) { - __ addl(scratch, Immediate(StringDictionary::GetProbeOffset(i))); + __ addl(scratch, Immediate(NameDictionary::GetProbeOffset(i))); } __ and_(scratch, Operand(rsp, 0)); // Scale the index by multiplying by the entry size. - ASSERT(StringDictionary::kEntrySize == 3); + ASSERT(NameDictionary::kEntrySize == 3); __ lea(index_, Operand(scratch, scratch, times_2, 0)); // index *= 3. // Having undefined at this place means the name is not contained. @@ -6018,15 +6374,20 @@ void StringDictionaryLookupStub::Generate(MacroAssembler* masm) { __ j(equal, &in_dictionary); if (i != kTotalProbes - 1 && mode_ == NEGATIVE_LOOKUP) { - // If we hit a non symbol key during negative lookup - // we have to bailout as this key might be equal to the + // If we hit a key that is not a unique name during negative + // lookup we have to bailout as this key might be equal to the // key we are looking for. - // Check if the entry name is not a symbol. + // Check if the entry name is not a unique name. + Label cont; __ movq(scratch, FieldOperand(scratch, HeapObject::kMapOffset)); __ testb(FieldOperand(scratch, Map::kInstanceTypeOffset), - Immediate(kIsSymbolMask)); - __ j(zero, &maybe_in_dictionary); + Immediate(kIsInternalizedMask)); + __ j(not_zero, &cont); + __ cmpb(FieldOperand(scratch, Map::kInstanceTypeOffset), + Immediate(static_cast<uint8_t>(SYMBOL_TYPE))); + __ j(not_equal, &maybe_in_dictionary); + __ bind(&cont); } } @@ -6116,15 +6477,16 @@ bool RecordWriteStub::IsPregenerated() { } -void StoreBufferOverflowStub::GenerateFixedRegStubsAheadOfTime() { +void StoreBufferOverflowStub::GenerateFixedRegStubsAheadOfTime( + Isolate* isolate) { StoreBufferOverflowStub stub1(kDontSaveFPRegs); - stub1.GetCode()->set_is_pregenerated(true); + stub1.GetCode(isolate)->set_is_pregenerated(true); StoreBufferOverflowStub stub2(kSaveFPRegs); - stub2.GetCode()->set_is_pregenerated(true); + stub2.GetCode(isolate)->set_is_pregenerated(true); } -void RecordWriteStub::GenerateFixedRegStubsAheadOfTime() { +void RecordWriteStub::GenerateFixedRegStubsAheadOfTime(Isolate* isolate) { for (AheadOfTimeWriteBarrierStubList* entry = kAheadOfTime; !entry->object.is(no_reg); entry++) { @@ -6133,7 +6495,7 @@ void RecordWriteStub::GenerateFixedRegStubsAheadOfTime() { entry->address, entry->action, kDontSaveFPRegs); - stub.GetCode()->set_is_pregenerated(true); + stub.GetCode(isolate)->set_is_pregenerated(true); } } @@ -6239,13 +6601,8 @@ void RecordWriteStub::InformIncrementalMarker(MacroAssembler* masm, Mode mode) { ASSERT(!address.is(arg1)); __ Move(address, regs_.address()); __ Move(arg1, regs_.object()); - if (mode == INCREMENTAL_COMPACTION) { - // TODO(gc) Can we just set address arg2 in the beginning? - __ Move(arg2, address); - } else { - ASSERT(mode == INCREMENTAL); - __ movq(arg2, Operand(address, 0)); - } + // TODO(gc) Can we just set address arg2 in the beginning? + __ Move(arg2, address); __ LoadAddress(arg3, ExternalReference::isolate_address()); int argument_count = 3; @@ -6434,6 +6791,21 @@ void StoreArrayLiteralElementStub::Generate(MacroAssembler* masm) { } +void StubFailureTrampolineStub::Generate(MacroAssembler* masm) { + ASSERT(!Serializer::enabled()); + CEntryStub ces(1, kSaveFPRegs); + __ Call(ces.GetCode(masm->isolate()), RelocInfo::CODE_TARGET); + int parameter_count_offset = + StubFailureTrampolineFrame::kCallerStackParameterCountFrameOffset; + __ movq(rbx, MemOperand(rbp, parameter_count_offset)); + masm->LeaveFrame(StackFrame::STUB_FAILURE_TRAMPOLINE); + __ pop(rcx); + __ lea(rsp, MemOperand(rsp, rbx, times_pointer_size, + extra_expression_stack_count_ * kPointerSize)); + __ jmp(rcx); // Return to IC Miss stub, continuation still on stack. +} + + void ProfileEntryHookStub::MaybeCallEntryHook(MacroAssembler* masm) { if (entry_hook_ != NULL) { ProfileEntryHookStub stub; @@ -6480,7 +6852,7 @@ void ProfileEntryHookStub::Generate(MacroAssembler* masm) { #endif // Call the entry hook function. - __ movq(rax, &entry_hook_, RelocInfo::NONE); + __ movq(rax, &entry_hook_, RelocInfo::NONE64); __ movq(rax, Operand(rax, 0)); AllowExternalCallThatCantCauseGC scope(masm); diff --git a/deps/v8/src/x64/code-stubs-x64.h b/deps/v8/src/x64/code-stubs-x64.h index 6a1a18f830..5dd4064a57 100644 --- a/deps/v8/src/x64/code-stubs-x64.h +++ b/deps/v8/src/x64/code-stubs-x64.h @@ -37,7 +37,7 @@ namespace internal { // Compute a transcendental math function natively, or call the // TranscendentalCache runtime function. -class TranscendentalCacheStub: public CodeStub { +class TranscendentalCacheStub: public PlatformCodeStub { public: enum ArgumentType { TAGGED = 0, @@ -60,7 +60,7 @@ class TranscendentalCacheStub: public CodeStub { }; -class StoreBufferOverflowStub: public CodeStub { +class StoreBufferOverflowStub: public PlatformCodeStub { public: explicit StoreBufferOverflowStub(SaveFPRegsMode save_fp) : save_doubles_(save_fp) { } @@ -68,7 +68,7 @@ class StoreBufferOverflowStub: public CodeStub { void Generate(MacroAssembler* masm); virtual bool IsPregenerated() { return true; } - static void GenerateFixedRegStubsAheadOfTime(); + static void GenerateFixedRegStubsAheadOfTime(Isolate* isolate); virtual bool SometimesSetsUpAFrame() { return false; } private: @@ -79,14 +79,7 @@ class StoreBufferOverflowStub: public CodeStub { }; -// Flag that indicates how to generate code for the stub GenericBinaryOpStub. -enum GenericBinaryFlags { - NO_GENERIC_BINARY_FLAGS = 0, - NO_SMI_CODE_IN_STUB = 1 << 0 // Omit smi code in stub. -}; - - -class UnaryOpStub: public CodeStub { +class UnaryOpStub: public PlatformCodeStub { public: UnaryOpStub(Token::Value op, UnaryOverwriteMode mode, @@ -134,9 +127,9 @@ class UnaryOpStub: public CodeStub { Label* non_smi, Label::Distance non_smi_near); - void GenerateHeapNumberStub(MacroAssembler* masm); - void GenerateHeapNumberStubSub(MacroAssembler* masm); - void GenerateHeapNumberStubBitNot(MacroAssembler* masm); + void GenerateNumberStub(MacroAssembler* masm); + void GenerateNumberStubSub(MacroAssembler* masm); + void GenerateNumberStubBitNot(MacroAssembler* masm); void GenerateHeapNumberCodeSub(MacroAssembler* masm, Label* slow); void GenerateHeapNumberCodeBitNot(MacroAssembler* masm, Label* slow); @@ -157,95 +150,6 @@ class UnaryOpStub: public CodeStub { }; -class BinaryOpStub: public CodeStub { - public: - BinaryOpStub(Token::Value op, OverwriteMode mode) - : op_(op), - mode_(mode), - operands_type_(BinaryOpIC::UNINITIALIZED), - result_type_(BinaryOpIC::UNINITIALIZED) { - ASSERT(OpBits::is_valid(Token::NUM_TOKENS)); - } - - BinaryOpStub( - int key, - BinaryOpIC::TypeInfo operands_type, - BinaryOpIC::TypeInfo result_type = BinaryOpIC::UNINITIALIZED) - : op_(OpBits::decode(key)), - mode_(ModeBits::decode(key)), - operands_type_(operands_type), - result_type_(result_type) { } - - private: - enum SmiCodeGenerateHeapNumberResults { - ALLOW_HEAPNUMBER_RESULTS, - NO_HEAPNUMBER_RESULTS - }; - - Token::Value op_; - OverwriteMode mode_; - - // Operand type information determined at runtime. - BinaryOpIC::TypeInfo operands_type_; - BinaryOpIC::TypeInfo result_type_; - - virtual void PrintName(StringStream* stream); - - // Minor key encoding in 15 bits RRRTTTOOOOOOOMM. - class ModeBits: public BitField<OverwriteMode, 0, 2> {}; - class OpBits: public BitField<Token::Value, 2, 7> {}; - class OperandTypeInfoBits: public BitField<BinaryOpIC::TypeInfo, 9, 3> {}; - class ResultTypeInfoBits: public BitField<BinaryOpIC::TypeInfo, 12, 3> {}; - - Major MajorKey() { return BinaryOp; } - int MinorKey() { - return OpBits::encode(op_) - | ModeBits::encode(mode_) - | OperandTypeInfoBits::encode(operands_type_) - | ResultTypeInfoBits::encode(result_type_); - } - - void Generate(MacroAssembler* masm); - void GenerateGeneric(MacroAssembler* masm); - void GenerateSmiCode(MacroAssembler* masm, - Label* slow, - SmiCodeGenerateHeapNumberResults heapnumber_results); - void GenerateFloatingPointCode(MacroAssembler* masm, - Label* allocation_failure, - Label* non_numeric_failure); - void GenerateStringAddCode(MacroAssembler* masm); - void GenerateCallRuntimeCode(MacroAssembler* masm); - void GenerateLoadArguments(MacroAssembler* masm); - void GenerateReturn(MacroAssembler* masm); - void GenerateUninitializedStub(MacroAssembler* masm); - void GenerateSmiStub(MacroAssembler* masm); - void GenerateInt32Stub(MacroAssembler* masm); - void GenerateHeapNumberStub(MacroAssembler* masm); - void GenerateOddballStub(MacroAssembler* masm); - void GenerateStringStub(MacroAssembler* masm); - void GenerateBothStringStub(MacroAssembler* masm); - void GenerateGenericStub(MacroAssembler* masm); - - void GenerateHeapResultAllocation(MacroAssembler* masm, Label* alloc_failure); - void GenerateRegisterArgsPush(MacroAssembler* masm); - void GenerateTypeTransition(MacroAssembler* masm); - void GenerateTypeTransitionWithSavedArgs(MacroAssembler* masm); - - virtual int GetCodeKind() { return Code::BINARY_OP_IC; } - - virtual InlineCacheState GetICState() { - return BinaryOpIC::ToState(operands_type_); - } - - virtual void FinishCode(Handle<Code> code) { - code->set_binary_op_type(operands_type_); - code->set_binary_op_result_type(result_type_); - } - - friend class CodeGenerator; -}; - - class StringHelper : public AllStatic { public: // Generate code for copying characters using a simple loop. This should only @@ -268,11 +172,11 @@ class StringHelper : public AllStatic { bool ascii); - // Probe the symbol table for a two character string. If the string is + // Probe the string table for a two character string. If the string is // not found by probing a jump to the label not_found is performed. This jump - // does not guarantee that the string is not in the symbol table. If the + // does not guarantee that the string is not in the string table. If the // string is found the code falls through with the string in register rax. - static void GenerateTwoCharacterSymbolTableProbe(MacroAssembler* masm, + static void GenerateTwoCharacterStringTableProbe(MacroAssembler* masm, Register c1, Register c2, Register scratch1, @@ -312,7 +216,7 @@ enum StringAddFlags { }; -class StringAddStub: public CodeStub { +class StringAddStub: public PlatformCodeStub { public: explicit StringAddStub(StringAddFlags flags) : flags_(flags) {} @@ -334,7 +238,7 @@ class StringAddStub: public CodeStub { }; -class SubStringStub: public CodeStub { +class SubStringStub: public PlatformCodeStub { public: SubStringStub() {} @@ -346,7 +250,7 @@ class SubStringStub: public CodeStub { }; -class StringCompareStub: public CodeStub { +class StringCompareStub: public PlatformCodeStub { public: StringCompareStub() {} @@ -383,7 +287,7 @@ class StringCompareStub: public CodeStub { }; -class NumberToStringStub: public CodeStub { +class NumberToStringStub: public PlatformCodeStub { public: NumberToStringStub() { } @@ -412,14 +316,14 @@ class NumberToStringStub: public CodeStub { }; -class StringDictionaryLookupStub: public CodeStub { +class NameDictionaryLookupStub: public PlatformCodeStub { public: enum LookupMode { POSITIVE_LOOKUP, NEGATIVE_LOOKUP }; - StringDictionaryLookupStub(Register dictionary, - Register result, - Register index, - LookupMode mode) + NameDictionaryLookupStub(Register dictionary, + Register result, + Register index, + LookupMode mode) : dictionary_(dictionary), result_(result), index_(index), mode_(mode) { } void Generate(MacroAssembler* masm); @@ -428,7 +332,7 @@ class StringDictionaryLookupStub: public CodeStub { Label* miss, Label* done, Register properties, - Handle<String> name, + Handle<Name> name, Register r0); static void GeneratePositiveLookup(MacroAssembler* masm, @@ -446,14 +350,14 @@ class StringDictionaryLookupStub: public CodeStub { static const int kTotalProbes = 20; static const int kCapacityOffset = - StringDictionary::kHeaderSize + - StringDictionary::kCapacityIndex * kPointerSize; + NameDictionary::kHeaderSize + + NameDictionary::kCapacityIndex * kPointerSize; static const int kElementsStartOffset = - StringDictionary::kHeaderSize + - StringDictionary::kElementsStartIndex * kPointerSize; + NameDictionary::kHeaderSize + + NameDictionary::kElementsStartIndex * kPointerSize; - Major MajorKey() { return StringDictionaryLookup; } + Major MajorKey() { return NameDictionaryLookup; } int MinorKey() { return DictionaryBits::encode(dictionary_.code()) | @@ -474,7 +378,7 @@ class StringDictionaryLookupStub: public CodeStub { }; -class RecordWriteStub: public CodeStub { +class RecordWriteStub: public PlatformCodeStub { public: RecordWriteStub(Register object, Register value, @@ -498,7 +402,7 @@ class RecordWriteStub: public CodeStub { }; virtual bool IsPregenerated(); - static void GenerateFixedRegStubsAheadOfTime(); + static void GenerateFixedRegStubsAheadOfTime(Isolate* isolate); virtual bool SometimesSetsUpAFrame() { return false; } static const byte kTwoByteNopInstruction = 0x3c; // Cmpb al, #imm8. @@ -657,7 +561,7 @@ class RecordWriteStub: public CodeStub { Register GetRegThatIsNotRcxOr(Register r1, Register r2, Register r3) { - for (int i = 0; i < Register::kNumAllocatableRegisters; i++) { + for (int i = 0; i < Register::NumAllocatableRegisters(); i++) { Register candidate = Register::FromAllocationIndex(i); if (candidate.is(rcx)) continue; if (candidate.is(r1)) continue; diff --git a/deps/v8/src/x64/codegen-x64.cc b/deps/v8/src/x64/codegen-x64.cc index 2924810c1e..fa8b44a419 100644 --- a/deps/v8/src/x64/codegen-x64.cc +++ b/deps/v8/src/x64/codegen-x64.cc @@ -91,7 +91,38 @@ UnaryMathFunction CreateTranscendentalFunction(TranscendentalCache::Type type) { CodeDesc desc; masm.GetCode(&desc); - ASSERT(desc.reloc_size == 0); + ASSERT(!RelocInfo::RequiresRelocation(desc)); + + CPU::FlushICache(buffer, actual_size); + OS::ProtectCode(buffer, actual_size); + return FUNCTION_CAST<UnaryMathFunction>(buffer); +} + + +UnaryMathFunction CreateExpFunction() { + if (!FLAG_fast_math) return &exp; + size_t actual_size; + byte* buffer = static_cast<byte*>(OS::Allocate(1 * KB, &actual_size, true)); + if (buffer == NULL) return &exp; + ExternalReference::InitializeMathExpData(); + + MacroAssembler masm(NULL, buffer, static_cast<int>(actual_size)); + // xmm0: raw double input. + XMMRegister input = xmm0; + XMMRegister result = xmm1; + __ push(rax); + __ push(rbx); + + MathExpGenerator::EmitMathExp(&masm, input, result, xmm2, rax, rbx); + + __ pop(rbx); + __ pop(rax); + __ movsd(xmm0, result); + __ Ret(); + + CodeDesc desc; + masm.GetCode(&desc); + ASSERT(!RelocInfo::RequiresRelocation(desc)); CPU::FlushICache(buffer, actual_size); OS::ProtectCode(buffer, actual_size); @@ -115,7 +146,7 @@ UnaryMathFunction CreateSqrtFunction() { CodeDesc desc; masm.GetCode(&desc); - ASSERT(desc.reloc_size == 0); + ASSERT(!RelocInfo::RequiresRelocation(desc)); CPU::FlushICache(buffer, actual_size); OS::ProtectCode(buffer, actual_size); @@ -182,7 +213,7 @@ ModuloFunction CreateModuloFunction() { __ j(zero, &valid_result); __ fstp(0); // Drop result in st(0). int64_t kNaNValue = V8_INT64_C(0x7ff8000000000000); - __ movq(rcx, kNaNValue, RelocInfo::NONE); + __ movq(rcx, kNaNValue, RelocInfo::NONE64); __ movq(Operand(rsp, kPointerSize), rcx); __ movsd(xmm0, Operand(rsp, kPointerSize)); __ jmp(&return_result); @@ -221,7 +252,8 @@ ModuloFunction CreateModuloFunction() { #define __ ACCESS_MASM(masm) void ElementsTransitionGenerator::GenerateMapChangeElementsTransition( - MacroAssembler* masm) { + MacroAssembler* masm, AllocationSiteMode mode, + Label* allocation_site_info_found) { // ----------- S t a t e ------------- // -- rax : value // -- rbx : target map @@ -229,6 +261,12 @@ void ElementsTransitionGenerator::GenerateMapChangeElementsTransition( // -- rdx : receiver // -- rsp[0] : return address // ----------------------------------- + if (mode == TRACK_ALLOCATION_SITE) { + ASSERT(allocation_site_info_found != NULL); + __ TestJSArrayForAllocationSiteInfo(rdx, rdi); + __ j(equal, allocation_site_info_found); + } + // Set transitioned map. __ movq(FieldOperand(rdx, HeapObject::kMapOffset), rbx); __ RecordWriteField(rdx, @@ -242,7 +280,7 @@ void ElementsTransitionGenerator::GenerateMapChangeElementsTransition( void ElementsTransitionGenerator::GenerateSmiToDouble( - MacroAssembler* masm, Label* fail) { + MacroAssembler* masm, AllocationSiteMode mode, Label* fail) { // ----------- S t a t e ------------- // -- rax : value // -- rbx : target map @@ -253,6 +291,11 @@ void ElementsTransitionGenerator::GenerateSmiToDouble( // The fail label is not actually used since we do not allocate. Label allocated, new_backing_store, only_change_map, done; + if (mode == TRACK_ALLOCATION_SITE) { + __ TestJSArrayForAllocationSiteInfo(rdx, rdi); + __ j(equal, fail); + } + // Check for empty arrays, which only require a map transition and no changes // to the backing store. __ movq(r8, FieldOperand(rdx, JSObject::kElementsOffset)); @@ -297,7 +340,7 @@ void ElementsTransitionGenerator::GenerateSmiToDouble( STATIC_ASSERT(FixedDoubleArray::kHeaderSize == FixedArray::kHeaderSize); Label loop, entry, convert_hole; - __ movq(r15, BitCast<int64_t, uint64_t>(kHoleNanInt64), RelocInfo::NONE); + __ movq(r15, BitCast<int64_t, uint64_t>(kHoleNanInt64), RelocInfo::NONE64); // r15: the-hole NaN __ jmp(&entry); @@ -364,7 +407,7 @@ void ElementsTransitionGenerator::GenerateSmiToDouble( void ElementsTransitionGenerator::GenerateDoubleToObject( - MacroAssembler* masm, Label* fail) { + MacroAssembler* masm, AllocationSiteMode mode, Label* fail) { // ----------- S t a t e ------------- // -- rax : value // -- rbx : target map @@ -374,6 +417,11 @@ void ElementsTransitionGenerator::GenerateDoubleToObject( // ----------------------------------- Label loop, entry, convert_hole, gc_required, only_change_map; + if (mode == TRACK_ALLOCATION_SITE) { + __ TestJSArrayForAllocationSiteInfo(rdx, rdi); + __ j(equal, fail); + } + // Check for empty arrays, which only require a map transition and no changes // to the backing store. __ movq(r8, FieldOperand(rdx, JSObject::kElementsOffset)); @@ -395,7 +443,7 @@ void ElementsTransitionGenerator::GenerateDoubleToObject( __ movq(FieldOperand(r11, FixedArray::kLengthOffset), r14); // Prepare for conversion loop. - __ movq(rsi, BitCast<int64_t, uint64_t>(kHoleNanInt64), RelocInfo::NONE); + __ movq(rsi, BitCast<int64_t, uint64_t>(kHoleNanInt64), RelocInfo::NONE64); __ LoadRoot(rdi, Heap::kTheHoleValueRootIndex); // rsi: the-hole NaN // rdi: pointer to the-hole @@ -506,7 +554,7 @@ void StringCharLoadGenerator::Generate(MacroAssembler* masm, // the string. __ bind(&cons_string); __ CompareRoot(FieldOperand(string, ConsString::kSecondOffset), - Heap::kEmptyStringRootIndex); + Heap::kempty_stringRootIndex); __ j(not_equal, call_runtime); __ movq(string, FieldOperand(string, ConsString::kFirstOffset)); @@ -551,7 +599,7 @@ void StringCharLoadGenerator::Generate(MacroAssembler* masm, // Dispatch on the encoding: ASCII or two-byte. Label ascii; __ bind(&seq_string); - STATIC_ASSERT((kStringEncodingMask & kAsciiStringTag) != 0); + STATIC_ASSERT((kStringEncodingMask & kOneByteStringTag) != 0); STATIC_ASSERT((kStringEncodingMask & kTwoByteStringTag) == 0); __ testb(result, Immediate(kStringEncodingMask)); __ j(not_zero, &ascii, Label::kNear); @@ -571,12 +619,167 @@ void StringCharLoadGenerator::Generate(MacroAssembler* masm, __ movzxbl(result, FieldOperand(string, index, times_1, - SeqAsciiString::kHeaderSize)); + SeqOneByteString::kHeaderSize)); + __ bind(&done); +} + + +void SeqStringSetCharGenerator::Generate(MacroAssembler* masm, + String::Encoding encoding, + Register string, + Register index, + Register value) { + if (FLAG_debug_code) { + __ Check(masm->CheckSmi(index), "Non-smi index"); + __ Check(masm->CheckSmi(value), "Non-smi value"); + + __ SmiCompare(index, FieldOperand(string, String::kLengthOffset)); + __ Check(less, "Index is too large"); + + __ SmiCompare(index, Smi::FromInt(0)); + __ Check(greater_equal, "Index is negative"); + + __ push(value); + __ movq(value, FieldOperand(string, HeapObject::kMapOffset)); + __ movzxbq(value, FieldOperand(value, Map::kInstanceTypeOffset)); + + __ andb(value, Immediate(kStringRepresentationMask | kStringEncodingMask)); + static const uint32_t one_byte_seq_type = kSeqStringTag | kOneByteStringTag; + static const uint32_t two_byte_seq_type = kSeqStringTag | kTwoByteStringTag; + __ cmpq(value, Immediate(encoding == String::ONE_BYTE_ENCODING + ? one_byte_seq_type : two_byte_seq_type)); + __ Check(equal, "Unexpected string type"); + __ pop(value); + } + + __ SmiToInteger32(value, value); + __ SmiToInteger32(index, index); + if (encoding == String::ONE_BYTE_ENCODING) { + __ movb(FieldOperand(string, index, times_1, SeqString::kHeaderSize), + value); + } else { + __ movw(FieldOperand(string, index, times_2, SeqString::kHeaderSize), + value); + } +} + + +void MathExpGenerator::EmitMathExp(MacroAssembler* masm, + XMMRegister input, + XMMRegister result, + XMMRegister double_scratch, + Register temp1, + Register temp2) { + ASSERT(!input.is(result)); + ASSERT(!input.is(double_scratch)); + ASSERT(!result.is(double_scratch)); + ASSERT(!temp1.is(temp2)); + ASSERT(ExternalReference::math_exp_constants(0).address() != NULL); + + Label done; + + __ movq(kScratchRegister, ExternalReference::math_exp_constants(0)); + __ movsd(double_scratch, Operand(kScratchRegister, 0 * kDoubleSize)); + __ xorpd(result, result); + __ ucomisd(double_scratch, input); + __ j(above_equal, &done); + __ ucomisd(input, Operand(kScratchRegister, 1 * kDoubleSize)); + __ movsd(result, Operand(kScratchRegister, 2 * kDoubleSize)); + __ j(above_equal, &done); + __ movsd(double_scratch, Operand(kScratchRegister, 3 * kDoubleSize)); + __ movsd(result, Operand(kScratchRegister, 4 * kDoubleSize)); + __ mulsd(double_scratch, input); + __ addsd(double_scratch, result); + __ movq(temp2, double_scratch); + __ subsd(double_scratch, result); + __ movsd(result, Operand(kScratchRegister, 6 * kDoubleSize)); + __ lea(temp1, Operand(temp2, 0x1ff800)); + __ and_(temp2, Immediate(0x7ff)); + __ shr(temp1, Immediate(11)); + __ mulsd(double_scratch, Operand(kScratchRegister, 5 * kDoubleSize)); + __ movq(kScratchRegister, ExternalReference::math_exp_log_table()); + __ shl(temp1, Immediate(52)); + __ or_(temp1, Operand(kScratchRegister, temp2, times_8, 0)); + __ movq(kScratchRegister, ExternalReference::math_exp_constants(0)); + __ subsd(double_scratch, input); + __ movsd(input, double_scratch); + __ subsd(result, double_scratch); + __ mulsd(input, double_scratch); + __ mulsd(result, input); + __ movq(input, temp1); + __ mulsd(result, Operand(kScratchRegister, 7 * kDoubleSize)); + __ subsd(result, double_scratch); + __ addsd(result, Operand(kScratchRegister, 8 * kDoubleSize)); + __ mulsd(result, input); + __ bind(&done); } #undef __ + +static const int kNoCodeAgeSequenceLength = 6; + +static byte* GetNoCodeAgeSequence(uint32_t* length) { + static bool initialized = false; + static byte sequence[kNoCodeAgeSequenceLength]; + *length = kNoCodeAgeSequenceLength; + if (!initialized) { + // The sequence of instructions that is patched out for aging code is the + // following boilerplate stack-building prologue that is found both in + // FUNCTION and OPTIMIZED_FUNCTION code: + CodePatcher patcher(sequence, kNoCodeAgeSequenceLength); + patcher.masm()->push(rbp); + patcher.masm()->movq(rbp, rsp); + patcher.masm()->push(rsi); + patcher.masm()->push(rdi); + initialized = true; + } + return sequence; +} + + +bool Code::IsYoungSequence(byte* sequence) { + uint32_t young_length; + byte* young_sequence = GetNoCodeAgeSequence(&young_length); + bool result = (!memcmp(sequence, young_sequence, young_length)); + ASSERT(result || *sequence == kCallOpcode); + return result; +} + + +void Code::GetCodeAgeAndParity(byte* sequence, Age* age, + MarkingParity* parity) { + if (IsYoungSequence(sequence)) { + *age = kNoAge; + *parity = NO_MARKING_PARITY; + } else { + sequence++; // Skip the kCallOpcode byte + Address target_address = sequence + *reinterpret_cast<int*>(sequence) + + Assembler::kCallTargetAddressOffset; + Code* stub = GetCodeFromTargetAddress(target_address); + GetCodeAgeAndParity(stub, age, parity); + } +} + + +void Code::PatchPlatformCodeAge(byte* sequence, + Code::Age age, + MarkingParity parity) { + uint32_t young_length; + byte* young_sequence = GetNoCodeAgeSequence(&young_length); + if (age == kNoAge) { + memcpy(sequence, young_sequence, young_length); + CPU::FlushICache(sequence, young_length); + } else { + Code* stub = GetCodeAgeStub(age, parity); + CodePatcher patcher(sequence, young_length); + patcher.masm()->call(stub->instruction_start()); + patcher.masm()->nop(); + } +} + + } } // namespace v8::internal #endif // V8_TARGET_ARCH_X64 diff --git a/deps/v8/src/x64/codegen-x64.h b/deps/v8/src/x64/codegen-x64.h index 2e80751033..3a7646bd1b 100644 --- a/deps/v8/src/x64/codegen-x64.h +++ b/deps/v8/src/x64/codegen-x64.h @@ -39,12 +39,15 @@ class CompilationInfo; enum TypeofState { INSIDE_TYPEOF, NOT_INSIDE_TYPEOF }; - // ------------------------------------------------------------------------- // CodeGenerator class CodeGenerator: public AstVisitor { public: + CodeGenerator() { + InitializeAstVisitor(); + } + static bool MakeCode(CompilationInfo* info); // Printing of AST, etc. as requested by flags. @@ -64,6 +67,8 @@ class CodeGenerator: public AstVisitor { int pos, bool right_here = false); + DEFINE_AST_VISITOR_SUBCLASS_MEMBERS(); + private: DISALLOW_COPY_AND_ASSIGN(CodeGenerator); }; @@ -84,6 +89,20 @@ class StringCharLoadGenerator : public AllStatic { DISALLOW_COPY_AND_ASSIGN(StringCharLoadGenerator); }; + +class MathExpGenerator : public AllStatic { + public: + static void EmitMathExp(MacroAssembler* masm, + XMMRegister input, + XMMRegister result, + XMMRegister double_scratch, + Register temp1, + Register temp2); + + private: + DISALLOW_COPY_AND_ASSIGN(MathExpGenerator); +}; + } } // namespace v8::internal #endif // V8_X64_CODEGEN_X64_H_ diff --git a/deps/v8/src/x64/deoptimizer-x64.cc b/deps/v8/src/x64/deoptimizer-x64.cc index a3fe8f9cf7..0bb02dbe7c 100644 --- a/deps/v8/src/x64/deoptimizer-x64.cc +++ b/deps/v8/src/x64/deoptimizer-x64.cc @@ -46,11 +46,14 @@ int Deoptimizer::patch_size() { } -void Deoptimizer::DeoptimizeFunction(JSFunction* function) { - HandleScope scope; +void Deoptimizer::DeoptimizeFunctionWithPreparedFunctionList( + JSFunction* function) { + Isolate* isolate = function->GetIsolate(); + HandleScope scope(isolate); AssertNoAllocation no_allocation; - if (!function->IsOptimized()) return; + ASSERT(function->IsOptimized()); + ASSERT(function->FunctionsInFunctionListShareSameCode()); // The optimized code is going to be patched, so we cannot use it // any more. Play safe and reset the whole cache. @@ -82,7 +85,8 @@ void Deoptimizer::DeoptimizeFunction(JSFunction* function) { // There is room enough to write a long call instruction because we pad // LLazyBailout instructions with nops if necessary. CodePatcher patcher(call_address, Assembler::kCallInstructionLength); - patcher.masm()->Call(GetDeoptimizationEntry(i, LAZY), RelocInfo::NONE); + patcher.masm()->Call(GetDeoptimizationEntry(isolate, i, LAZY), + RelocInfo::NONE64); ASSERT(prev_call_address == NULL || call_address >= prev_call_address + patch_size()); ASSERT(call_address + patch_size() <= code->instruction_end()); @@ -91,8 +95,6 @@ void Deoptimizer::DeoptimizeFunction(JSFunction* function) { #endif } - Isolate* isolate = code->GetIsolate(); - // Add the deoptimizing code to the list. DeoptimizingCodeListNode* node = new DeoptimizingCodeListNode(code); DeoptimizerData* data = isolate->deoptimizer_data(); @@ -116,8 +118,6 @@ void Deoptimizer::DeoptimizeFunction(JSFunction* function) { static const byte kJnsInstruction = 0x79; static const byte kJnsOffset = 0x1f; -static const byte kJaeInstruction = 0x73; -static const byte kJaeOffset = 0x07; static const byte kCallInstruction = 0xe8; static const byte kNopByteOne = 0x66; static const byte kNopByteTwo = 0x90; @@ -129,31 +129,26 @@ void Deoptimizer::PatchStackCheckCodeAt(Code* unoptimized_code, Address call_target_address = pc_after - kIntSize; ASSERT_EQ(check_code->entry(), Assembler::target_address_at(call_target_address)); - // The stack check code matches the pattern: + // The back edge bookkeeping code matches the pattern: // - // cmp rsp, <limit> - // jae ok + // add <profiling_counter>, <-delta> + // jns ok // call <stack guard> // test rax, <loop nesting depth> // ok: ... // // We will patch away the branch so the code is: // - // cmp rsp, <limit> ;; Not changed + // add <profiling_counter>, <-delta> ;; Not changed // nop // nop // call <on-stack replacment> // test rax, <loop nesting depth> // ok: // - if (FLAG_count_based_interrupts) { - ASSERT_EQ(kJnsInstruction, *(call_target_address - 3)); - ASSERT_EQ(kJnsOffset, *(call_target_address - 2)); - } else { - ASSERT_EQ(kJaeInstruction, *(call_target_address - 3)); - ASSERT_EQ(kJaeOffset, *(call_target_address - 2)); - } - ASSERT_EQ(kCallInstruction, *(call_target_address - 1)); + ASSERT_EQ(kJnsInstruction, *(call_target_address - 3)); + ASSERT_EQ(kJnsOffset, *(call_target_address - 2)); + ASSERT_EQ(kCallInstruction, *(call_target_address - 1)); *(call_target_address - 3) = kNopByteOne; *(call_target_address - 2) = kNopByteTwo; Assembler::set_target_address_at(call_target_address, @@ -176,13 +171,8 @@ void Deoptimizer::RevertStackCheckCodeAt(Code* unoptimized_code, ASSERT_EQ(kNopByteOne, *(call_target_address - 3)); ASSERT_EQ(kNopByteTwo, *(call_target_address - 2)); ASSERT_EQ(kCallInstruction, *(call_target_address - 1)); - if (FLAG_count_based_interrupts) { - *(call_target_address - 3) = kJnsInstruction; - *(call_target_address - 2) = kJnsOffset; - } else { - *(call_target_address - 3) = kJaeInstruction; - *(call_target_address - 2) = kJaeOffset; - } + *(call_target_address - 3) = kJnsInstruction; + *(call_target_address - 2) = kJnsOffset; Assembler::set_target_address_at(call_target_address, check_code->entry()); @@ -211,7 +201,7 @@ static int LookupBailoutId(DeoptimizationInputData* data, BailoutId ast_id) { void Deoptimizer::DoComputeOsrOutputFrame() { DeoptimizationInputData* data = DeoptimizationInputData::cast( - optimized_code_->deoptimization_data()); + compiled_code_->deoptimization_data()); unsigned ast_id = data->OsrAstId()->value(); // TODO(kasperl): This should not be the bailout_id_. It should be // the ast id. Confusing. @@ -248,7 +238,7 @@ void Deoptimizer::DoComputeOsrOutputFrame() { unsigned input_frame_size = input_->GetFrameSize(); ASSERT(fixed_size + height_in_bytes == input_frame_size); - unsigned stack_slot_size = optimized_code_->stack_slots() * kPointerSize; + unsigned stack_slot_size = compiled_code_->stack_slots() * kPointerSize; unsigned outgoing_height = data->ArgumentsStackHeight(bailout_id)->value(); unsigned outgoing_size = outgoing_height * kPointerSize; unsigned output_frame_size = fixed_size + stack_slot_size + outgoing_size; @@ -340,7 +330,7 @@ void Deoptimizer::DoComputeOsrOutputFrame() { unsigned pc_offset = data->OsrPcOffset()->value(); intptr_t pc = reinterpret_cast<intptr_t>( - optimized_code_->entry() + pc_offset); + compiled_code_->entry() + pc_offset); output_[0]->SetPc(pc); } Code* continuation = @@ -358,346 +348,6 @@ void Deoptimizer::DoComputeOsrOutputFrame() { } -void Deoptimizer::DoComputeArgumentsAdaptorFrame(TranslationIterator* iterator, - int frame_index) { - JSFunction* function = JSFunction::cast(ComputeLiteral(iterator->Next())); - unsigned height = iterator->Next(); - unsigned height_in_bytes = height * kPointerSize; - if (FLAG_trace_deopt) { - PrintF(" translating arguments adaptor => height=%d\n", height_in_bytes); - } - - unsigned fixed_frame_size = ArgumentsAdaptorFrameConstants::kFrameSize; - unsigned output_frame_size = height_in_bytes + fixed_frame_size; - - // Allocate and store the output frame description. - FrameDescription* output_frame = - new(output_frame_size) FrameDescription(output_frame_size, function); - output_frame->SetFrameType(StackFrame::ARGUMENTS_ADAPTOR); - - // Arguments adaptor can not be topmost or bottommost. - ASSERT(frame_index > 0 && frame_index < output_count_ - 1); - ASSERT(output_[frame_index] == NULL); - output_[frame_index] = output_frame; - - // The top address of the frame is computed from the previous - // frame's top and this frame's size. - intptr_t top_address; - top_address = output_[frame_index - 1]->GetTop() - output_frame_size; - output_frame->SetTop(top_address); - - // Compute the incoming parameter translation. - int parameter_count = height; - unsigned output_offset = output_frame_size; - for (int i = 0; i < parameter_count; ++i) { - output_offset -= kPointerSize; - DoTranslateCommand(iterator, frame_index, output_offset); - } - - // Read caller's PC from the previous frame. - output_offset -= kPointerSize; - intptr_t callers_pc = output_[frame_index - 1]->GetPc(); - output_frame->SetFrameSlot(output_offset, callers_pc); - if (FLAG_trace_deopt) { - PrintF(" 0x%08" V8PRIxPTR ": [top + %d] <- 0x%08" - V8PRIxPTR " ; caller's pc\n", - top_address + output_offset, output_offset, callers_pc); - } - - // Read caller's FP from the previous frame, and set this frame's FP. - output_offset -= kPointerSize; - intptr_t value = output_[frame_index - 1]->GetFp(); - output_frame->SetFrameSlot(output_offset, value); - intptr_t fp_value = top_address + output_offset; - output_frame->SetFp(fp_value); - if (FLAG_trace_deopt) { - PrintF(" 0x%08" V8PRIxPTR ": [top + %d] <- 0x%08" - V8PRIxPTR " ; caller's fp\n", - fp_value, output_offset, value); - } - - // A marker value is used in place of the context. - output_offset -= kPointerSize; - intptr_t context = reinterpret_cast<intptr_t>( - Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR)); - output_frame->SetFrameSlot(output_offset, context); - if (FLAG_trace_deopt) { - PrintF(" 0x%08" V8PRIxPTR ": [top + %d] <- 0x%08" - V8PRIxPTR " ; context (adaptor sentinel)\n", - top_address + output_offset, output_offset, context); - } - - // The function was mentioned explicitly in the ARGUMENTS_ADAPTOR_FRAME. - output_offset -= kPointerSize; - value = reinterpret_cast<intptr_t>(function); - output_frame->SetFrameSlot(output_offset, value); - if (FLAG_trace_deopt) { - PrintF(" 0x%08" V8PRIxPTR ": [top + %d] <- 0x%08" - V8PRIxPTR " ; function\n", - top_address + output_offset, output_offset, value); - } - - // Number of incoming arguments. - output_offset -= kPointerSize; - value = reinterpret_cast<intptr_t>(Smi::FromInt(height - 1)); - output_frame->SetFrameSlot(output_offset, value); - if (FLAG_trace_deopt) { - PrintF(" 0x%08" V8PRIxPTR ": [top + %d] <- 0x%08" - V8PRIxPTR " ; argc (%d)\n", - top_address + output_offset, output_offset, value, height - 1); - } - - ASSERT(0 == output_offset); - - Builtins* builtins = isolate_->builtins(); - Code* adaptor_trampoline = - builtins->builtin(Builtins::kArgumentsAdaptorTrampoline); - intptr_t pc_value = reinterpret_cast<intptr_t>( - adaptor_trampoline->instruction_start() + - isolate_->heap()->arguments_adaptor_deopt_pc_offset()->value()); - output_frame->SetPc(pc_value); -} - - -void Deoptimizer::DoComputeConstructStubFrame(TranslationIterator* iterator, - int frame_index) { - Builtins* builtins = isolate_->builtins(); - Code* construct_stub = builtins->builtin(Builtins::kJSConstructStubGeneric); - JSFunction* function = JSFunction::cast(ComputeLiteral(iterator->Next())); - unsigned height = iterator->Next(); - unsigned height_in_bytes = height * kPointerSize; - if (FLAG_trace_deopt) { - PrintF(" translating construct stub => height=%d\n", height_in_bytes); - } - - unsigned fixed_frame_size = 7 * kPointerSize; - unsigned output_frame_size = height_in_bytes + fixed_frame_size; - - // Allocate and store the output frame description. - FrameDescription* output_frame = - new(output_frame_size) FrameDescription(output_frame_size, function); - output_frame->SetFrameType(StackFrame::CONSTRUCT); - - // Construct stub can not be topmost or bottommost. - ASSERT(frame_index > 0 && frame_index < output_count_ - 1); - ASSERT(output_[frame_index] == NULL); - output_[frame_index] = output_frame; - - // The top address of the frame is computed from the previous - // frame's top and this frame's size. - intptr_t top_address; - top_address = output_[frame_index - 1]->GetTop() - output_frame_size; - output_frame->SetTop(top_address); - - // Compute the incoming parameter translation. - int parameter_count = height; - unsigned output_offset = output_frame_size; - for (int i = 0; i < parameter_count; ++i) { - output_offset -= kPointerSize; - DoTranslateCommand(iterator, frame_index, output_offset); - } - - // Read caller's PC from the previous frame. - output_offset -= kPointerSize; - intptr_t callers_pc = output_[frame_index - 1]->GetPc(); - output_frame->SetFrameSlot(output_offset, callers_pc); - if (FLAG_trace_deopt) { - PrintF(" 0x%08" V8PRIxPTR ": [top + %d] <- 0x%08" - V8PRIxPTR " ; caller's pc\n", - top_address + output_offset, output_offset, callers_pc); - } - - // Read caller's FP from the previous frame, and set this frame's FP. - output_offset -= kPointerSize; - intptr_t value = output_[frame_index - 1]->GetFp(); - output_frame->SetFrameSlot(output_offset, value); - intptr_t fp_value = top_address + output_offset; - output_frame->SetFp(fp_value); - if (FLAG_trace_deopt) { - PrintF(" 0x%08" V8PRIxPTR ": [top + %d] <- 0x%08" - V8PRIxPTR " ; caller's fp\n", - fp_value, output_offset, value); - } - - // The context can be gotten from the previous frame. - output_offset -= kPointerSize; - value = output_[frame_index - 1]->GetContext(); - output_frame->SetFrameSlot(output_offset, value); - if (FLAG_trace_deopt) { - PrintF(" 0x%08" V8PRIxPTR ": [top + %d] <- 0x%08" - V8PRIxPTR " ; context\n", - top_address + output_offset, output_offset, value); - } - - // A marker value is used in place of the function. - output_offset -= kPointerSize; - value = reinterpret_cast<intptr_t>(Smi::FromInt(StackFrame::CONSTRUCT)); - output_frame->SetFrameSlot(output_offset, value); - if (FLAG_trace_deopt) { - PrintF(" 0x%08" V8PRIxPTR ": [top + %d] <- 0x%08" - V8PRIxPTR " ; function (construct sentinel)\n", - top_address + output_offset, output_offset, value); - } - - // The output frame reflects a JSConstructStubGeneric frame. - output_offset -= kPointerSize; - value = reinterpret_cast<intptr_t>(construct_stub); - output_frame->SetFrameSlot(output_offset, value); - if (FLAG_trace_deopt) { - PrintF(" 0x%08" V8PRIxPTR ": [top + %d] <- 0x%08" - V8PRIxPTR " ; code object\n", - top_address + output_offset, output_offset, value); - } - - // Number of incoming arguments. - output_offset -= kPointerSize; - value = reinterpret_cast<intptr_t>(Smi::FromInt(height - 1)); - output_frame->SetFrameSlot(output_offset, value); - if (FLAG_trace_deopt) { - PrintF(" 0x%08" V8PRIxPTR ": [top + %d] <- 0x%08" - V8PRIxPTR " ; argc (%d)\n", - top_address + output_offset, output_offset, value, height - 1); - } - - // The newly allocated object was passed as receiver in the artificial - // constructor stub environment created by HEnvironment::CopyForInlining(). - output_offset -= kPointerSize; - value = output_frame->GetFrameSlot(output_frame_size - kPointerSize); - output_frame->SetFrameSlot(output_offset, value); - if (FLAG_trace_deopt) { - PrintF(" 0x%08" V8PRIxPTR ": [top + %d] <- 0x%08" - V8PRIxPTR " ; allocated receiver\n", - top_address + output_offset, output_offset, value); - } - - ASSERT(0 == output_offset); - - intptr_t pc = reinterpret_cast<intptr_t>( - construct_stub->instruction_start() + - isolate_->heap()->construct_stub_deopt_pc_offset()->value()); - output_frame->SetPc(pc); -} - - -void Deoptimizer::DoComputeAccessorStubFrame(TranslationIterator* iterator, - int frame_index, - bool is_setter_stub_frame) { - JSFunction* accessor = JSFunction::cast(ComputeLiteral(iterator->Next())); - // The receiver (and the implicit return value, if any) are expected in - // registers by the LoadIC/StoreIC, so they don't belong to the output stack - // frame. This means that we have to use a height of 0. - unsigned height = 0; - unsigned height_in_bytes = height * kPointerSize; - const char* kind = is_setter_stub_frame ? "setter" : "getter"; - if (FLAG_trace_deopt) { - PrintF(" translating %s stub => height=%u\n", kind, height_in_bytes); - } - - // We need 1 stack entry for the return address + 4 stack entries from - // StackFrame::INTERNAL (FP, context, frame type, code object, see - // MacroAssembler::EnterFrame). For a setter stub frame we need one additional - // entry for the implicit return value, see - // StoreStubCompiler::CompileStoreViaSetter. - unsigned fixed_frame_entries = 1 + 4 + (is_setter_stub_frame ? 1 : 0); - unsigned fixed_frame_size = fixed_frame_entries * kPointerSize; - unsigned output_frame_size = height_in_bytes + fixed_frame_size; - - // Allocate and store the output frame description. - FrameDescription* output_frame = - new(output_frame_size) FrameDescription(output_frame_size, accessor); - output_frame->SetFrameType(StackFrame::INTERNAL); - - // A frame for an accessor stub can not be the topmost or bottommost one. - ASSERT(frame_index > 0 && frame_index < output_count_ - 1); - ASSERT(output_[frame_index] == NULL); - output_[frame_index] = output_frame; - - // The top address of the frame is computed from the previous frame's top and - // this frame's size. - intptr_t top_address = output_[frame_index - 1]->GetTop() - output_frame_size; - output_frame->SetTop(top_address); - - unsigned output_offset = output_frame_size; - - // Read caller's PC from the previous frame. - output_offset -= kPointerSize; - intptr_t callers_pc = output_[frame_index - 1]->GetPc(); - output_frame->SetFrameSlot(output_offset, callers_pc); - if (FLAG_trace_deopt) { - PrintF(" 0x%08" V8PRIxPTR ": [top + %u] <- 0x%08" V8PRIxPTR - " ; caller's pc\n", - top_address + output_offset, output_offset, callers_pc); - } - - // Read caller's FP from the previous frame, and set this frame's FP. - output_offset -= kPointerSize; - intptr_t value = output_[frame_index - 1]->GetFp(); - output_frame->SetFrameSlot(output_offset, value); - intptr_t fp_value = top_address + output_offset; - output_frame->SetFp(fp_value); - if (FLAG_trace_deopt) { - PrintF(" 0x%08" V8PRIxPTR ": [top + %u] <- 0x%08" V8PRIxPTR - " ; caller's fp\n", - fp_value, output_offset, value); - } - - // The context can be gotten from the previous frame. - output_offset -= kPointerSize; - value = output_[frame_index - 1]->GetContext(); - output_frame->SetFrameSlot(output_offset, value); - if (FLAG_trace_deopt) { - PrintF(" 0x%08" V8PRIxPTR ": [top + %u] <- 0x%08" V8PRIxPTR - " ; context\n", - top_address + output_offset, output_offset, value); - } - - // A marker value is used in place of the function. - output_offset -= kPointerSize; - value = reinterpret_cast<intptr_t>(Smi::FromInt(StackFrame::INTERNAL)); - output_frame->SetFrameSlot(output_offset, value); - if (FLAG_trace_deopt) { - PrintF(" 0x%08" V8PRIxPTR ": [top + %u] <- 0x%08" V8PRIxPTR - " ; function (%s sentinel)\n", - top_address + output_offset, output_offset, value, kind); - } - - // Get Code object from accessor stub. - output_offset -= kPointerSize; - Builtins::Name name = is_setter_stub_frame ? - Builtins::kStoreIC_Setter_ForDeopt : - Builtins::kLoadIC_Getter_ForDeopt; - Code* accessor_stub = isolate_->builtins()->builtin(name); - value = reinterpret_cast<intptr_t>(accessor_stub); - output_frame->SetFrameSlot(output_offset, value); - if (FLAG_trace_deopt) { - PrintF(" 0x%08" V8PRIxPTR ": [top + %u] <- 0x%08" V8PRIxPTR - " ; code object\n", - top_address + output_offset, output_offset, value); - } - - // Skip receiver. - Translation::Opcode opcode = - static_cast<Translation::Opcode>(iterator->Next()); - iterator->Skip(Translation::NumberOfOperandsFor(opcode)); - - if (is_setter_stub_frame) { - // The implicit return value was part of the artificial setter stub - // environment. - output_offset -= kPointerSize; - DoTranslateCommand(iterator, frame_index, output_offset); - } - - ASSERT(0 == output_offset); - - Smi* offset = is_setter_stub_frame ? - isolate_->heap()->setter_stub_deopt_pc_offset() : - isolate_->heap()->getter_stub_deopt_pc_offset(); - intptr_t pc = reinterpret_cast<intptr_t>( - accessor_stub->instruction_start() + offset->value()); - output_frame->SetPc(pc); -} - - void Deoptimizer::DoComputeJSFrame(TranslationIterator* iterator, int frame_index) { BailoutId node_id = BailoutId(iterator->Next()); @@ -712,7 +362,7 @@ void Deoptimizer::DoComputeJSFrame(TranslationIterator* iterator, } unsigned height = iterator->Next(); unsigned height_in_bytes = height * kPointerSize; - if (FLAG_trace_deopt) { + if (trace_) { PrintF(" translating "); function->PrintName(); PrintF(" => node=%d, height=%d\n", node_id.ToInt(), height_in_bytes); @@ -776,7 +426,7 @@ void Deoptimizer::DoComputeJSFrame(TranslationIterator* iterator, value = output_[frame_index - 1]->GetPc(); } output_frame->SetFrameSlot(output_offset, value); - if (FLAG_trace_deopt) { + if (trace_) { PrintF(" 0x%08" V8PRIxPTR ": [top + %d] <- 0x%08" V8PRIxPTR " ; caller's pc\n", top_address + output_offset, output_offset, value); @@ -798,7 +448,7 @@ void Deoptimizer::DoComputeJSFrame(TranslationIterator* iterator, ASSERT(!is_bottommost || input_->GetRegister(rbp.code()) == fp_value); output_frame->SetFp(fp_value); if (is_topmost) output_frame->SetRegister(rbp.code(), fp_value); - if (FLAG_trace_deopt) { + if (trace_) { PrintF(" 0x%08" V8PRIxPTR ": [top + %d] <- 0x%08" V8PRIxPTR " ; caller's fp\n", fp_value, output_offset, value); @@ -817,7 +467,7 @@ void Deoptimizer::DoComputeJSFrame(TranslationIterator* iterator, output_frame->SetFrameSlot(output_offset, value); output_frame->SetContext(value); if (is_topmost) output_frame->SetRegister(rsi.code(), value); - if (FLAG_trace_deopt) { + if (trace_) { PrintF(" 0x%08" V8PRIxPTR ": [top + %d] <- 0x%08" V8PRIxPTR "; context\n", top_address + output_offset, output_offset, value); @@ -831,7 +481,7 @@ void Deoptimizer::DoComputeJSFrame(TranslationIterator* iterator, // input frame. ASSERT(!is_bottommost || input_->GetFrameSlot(input_offset) == value); output_frame->SetFrameSlot(output_offset, value); - if (FLAG_trace_deopt) { + if (trace_) { PrintF(" 0x%08" V8PRIxPTR ": [top + %d] <- 0x%08" V8PRIxPTR "; function\n", top_address + output_offset, output_offset, value); @@ -878,7 +528,7 @@ void Deoptimizer::FillInputFrame(Address tos, JavaScriptFrame* frame) { } input_->SetRegister(rsp.code(), reinterpret_cast<intptr_t>(frame->sp())); input_->SetRegister(rbp.code(), reinterpret_cast<intptr_t>(frame->fp())); - for (int i = 0; i < DoubleRegister::kNumAllocatableRegisters; i++) { + for (int i = 0; i < DoubleRegister::NumAllocatableRegisters(); i++) { input_->SetDoubleRegister(i, 0.0); } @@ -889,6 +539,27 @@ void Deoptimizer::FillInputFrame(Address tos, JavaScriptFrame* frame) { } +void Deoptimizer::SetPlatformCompiledStubRegisters( + FrameDescription* output_frame, CodeStubInterfaceDescriptor* descriptor) { + intptr_t handler = + reinterpret_cast<intptr_t>(descriptor->deoptimization_handler_); + int params = descriptor->register_param_count_; + if (descriptor->stack_parameter_count_ != NULL) { + params++; + } + output_frame->SetRegister(rax.code(), params); + output_frame->SetRegister(rbx.code(), handler); +} + + +void Deoptimizer::CopyDoubleRegisters(FrameDescription* output_frame) { + for (int i = 0; i < XMMRegister::NumAllocatableRegisters(); ++i) { + double double_value = input_->GetDoubleRegister(i); + output_frame->SetDoubleRegister(i, double_value); + } +} + + #define __ masm()-> void Deoptimizer::EntryGenerator::Generate() { @@ -898,10 +569,10 @@ void Deoptimizer::EntryGenerator::Generate() { const int kNumberOfRegisters = Register::kNumRegisters; const int kDoubleRegsSize = kDoubleSize * - XMMRegister::kNumAllocatableRegisters; + XMMRegister::NumAllocatableRegisters(); __ subq(rsp, Immediate(kDoubleRegsSize)); - for (int i = 0; i < XMMRegister::kNumAllocatableRegisters; ++i) { + for (int i = 0; i < XMMRegister::NumAllocatableRegisters(); ++i) { XMMRegister xmm_reg = XMMRegister::FromAllocationIndex(i); int offset = i * kDoubleSize; __ movsd(Operand(rsp, offset), xmm_reg); @@ -990,7 +661,7 @@ void Deoptimizer::EntryGenerator::Generate() { // Fill in the double input registers. int double_regs_offset = FrameDescription::double_registers_offset(); - for (int i = 0; i < XMMRegister::kNumAllocatableRegisters; i++) { + for (int i = 0; i < XMMRegister::NumAllocatableRegisters(); i++) { int dst_offset = i * kDoubleSize + double_regs_offset; __ pop(Operand(rbx, dst_offset)); } @@ -1011,10 +682,13 @@ void Deoptimizer::EntryGenerator::Generate() { // limit and copy the contents of the activation frame to the input // frame description. __ lea(rdx, Operand(rbx, FrameDescription::frame_content_offset())); + Label pop_loop_header; + __ jmp(&pop_loop_header); Label pop_loop; __ bind(&pop_loop); __ pop(Operand(rdx, 0)); __ addq(rdx, Immediate(sizeof(intptr_t))); + __ bind(&pop_loop_header); __ cmpq(rcx, rsp); __ j(not_equal, &pop_loop); @@ -1031,32 +705,34 @@ void Deoptimizer::EntryGenerator::Generate() { __ pop(rax); // Replace the current frame with the output frames. - Label outer_push_loop, inner_push_loop; + Label outer_push_loop, inner_push_loop, + outer_loop_header, inner_loop_header; // Outer loop state: rax = current FrameDescription**, rdx = one past the // last FrameDescription**. __ movl(rdx, Operand(rax, Deoptimizer::output_count_offset())); __ movq(rax, Operand(rax, Deoptimizer::output_offset())); __ lea(rdx, Operand(rax, rdx, times_8, 0)); + __ jmp(&outer_loop_header); __ bind(&outer_push_loop); // Inner loop state: rbx = current FrameDescription*, rcx = loop index. __ movq(rbx, Operand(rax, 0)); __ movq(rcx, Operand(rbx, FrameDescription::frame_size_offset())); + __ jmp(&inner_loop_header); __ bind(&inner_push_loop); __ subq(rcx, Immediate(sizeof(intptr_t))); __ push(Operand(rbx, rcx, times_1, FrameDescription::frame_content_offset())); + __ bind(&inner_loop_header); __ testq(rcx, rcx); __ j(not_zero, &inner_push_loop); __ addq(rax, Immediate(kPointerSize)); + __ bind(&outer_loop_header); __ cmpq(rax, rdx); __ j(below, &outer_push_loop); - // In case of OSR, we have to restore the XMM registers. - if (type() == OSR) { - for (int i = 0; i < XMMRegister::kNumAllocatableRegisters; ++i) { - XMMRegister xmm_reg = XMMRegister::FromAllocationIndex(i); - int src_offset = i * kDoubleSize + double_regs_offset; - __ movsd(xmm_reg, Operand(rbx, src_offset)); - } + for (int i = 0; i < XMMRegister::NumAllocatableRegisters(); ++i) { + XMMRegister xmm_reg = XMMRegister::FromAllocationIndex(i); + int src_offset = i * kDoubleSize + double_regs_offset; + __ movsd(xmm_reg, Operand(rbx, src_offset)); } // Push state, pc, and continuation from the last output frame. diff --git a/deps/v8/src/x64/disasm-x64.cc b/deps/v8/src/x64/disasm-x64.cc index c8606c40b2..fb0914d7d0 100644 --- a/deps/v8/src/x64/disasm-x64.cc +++ b/deps/v8/src/x64/disasm-x64.cc @@ -1244,6 +1244,13 @@ int DisassemblerX64::TwoByteOpcodeInstruction(byte* data) { AppendToBuffer("xorps %s, ", NameOfXMMRegister(regop)); current += PrintRightXMMOperand(current); + } else if (opcode == 0x50) { + // movmskps reg, xmm + int mod, regop, rm; + get_modrm(*current, &mod, ®op, &rm); + AppendToBuffer("movmskps %s, ", NameOfCPURegister(regop)); + current += PrintRightXMMOperand(current); + } else if ((opcode & 0xF0) == 0x80) { // Jcc: Conditional jump (branch). current = data + JumpConditional(data); @@ -1724,6 +1731,11 @@ int DisassemblerX64::InstructionDecode(v8::internal::Vector<char> out_buffer, data += F6F7Instruction(data); break; + case 0x3C: + AppendToBuffer("cmp al, 0x%x", *reinterpret_cast<int8_t*>(data + 1)); + data +=2; + break; + default: UnimplementedInstruction(); data += 1; diff --git a/deps/v8/src/x64/frames-x64.cc b/deps/v8/src/x64/frames-x64.cc index 6c58bc9e0c..1d6adfdc21 100644 --- a/deps/v8/src/x64/frames-x64.cc +++ b/deps/v8/src/x64/frames-x64.cc @@ -29,6 +29,9 @@ #if defined(V8_TARGET_ARCH_X64) +#include "assembler.h" +#include "assembler-x64.h" +#include "assembler-x64-inl.h" #include "frames-inl.h" namespace v8 { @@ -40,6 +43,10 @@ Address ExitFrame::ComputeStackPointer(Address fp) { } +Register StubFailureTrampolineFrame::fp_register() { return rbp; } +Register StubFailureTrampolineFrame::context_register() { return rsi; } + + } } // namespace v8::internal #endif // V8_TARGET_ARCH_X64 diff --git a/deps/v8/src/x64/frames-x64.h b/deps/v8/src/x64/frames-x64.h index 3e3d63d62b..53c3459063 100644 --- a/deps/v8/src/x64/frames-x64.h +++ b/deps/v8/src/x64/frames-x64.h @@ -85,20 +85,6 @@ class ExitFrameConstants : public AllStatic { }; -class StandardFrameConstants : public AllStatic { - public: - // Fixed part of the frame consists of return address, caller fp, - // context and function. - static const int kFixedFrameSize = 4 * kPointerSize; - static const int kExpressionsOffset = -3 * kPointerSize; - static const int kMarkerOffset = -2 * kPointerSize; - static const int kContextOffset = -1 * kPointerSize; - static const int kCallerFPOffset = 0 * kPointerSize; - static const int kCallerPCOffset = +1 * kPointerSize; - static const int kCallerSPOffset = +2 * kPointerSize; -}; - - class JavaScriptFrameConstants : public AllStatic { public: // FP-relative. @@ -114,14 +100,30 @@ class JavaScriptFrameConstants : public AllStatic { class ArgumentsAdaptorFrameConstants : public AllStatic { public: + // FP-relative. static const int kLengthOffset = StandardFrameConstants::kExpressionsOffset; + static const int kFrameSize = StandardFrameConstants::kFixedFrameSize + kPointerSize; }; +class ConstructFrameConstants : public AllStatic { + public: + // FP-relative. + static const int kImplicitReceiverOffset = -5 * kPointerSize; + static const int kConstructorOffset = kMinInt; + static const int kLengthOffset = -4 * kPointerSize; + static const int kCodeOffset = StandardFrameConstants::kExpressionsOffset; + + static const int kFrameSize = + StandardFrameConstants::kFixedFrameSize + 3 * kPointerSize; +}; + + class InternalFrameConstants : public AllStatic { public: + // FP-relative. static const int kCodeOffset = StandardFrameConstants::kExpressionsOffset; }; diff --git a/deps/v8/src/x64/full-codegen-x64.cc b/deps/v8/src/x64/full-codegen-x64.cc index 475fb9de34..8ff12df361 100644 --- a/deps/v8/src/x64/full-codegen-x64.cc +++ b/deps/v8/src/x64/full-codegen-x64.cc @@ -119,7 +119,7 @@ void FullCodeGenerator::Generate() { handler_table_ = isolate()->factory()->NewFixedArray(function()->handler_count(), TENURED); profiling_counter_ = isolate()->factory()->NewJSGlobalPropertyCell( - Handle<Smi>(Smi::FromInt(FLAG_interrupt_budget))); + Handle<Smi>(Smi::FromInt(FLAG_interrupt_budget), isolate())); SetFunctionPosition(function()); Comment cmnt(masm_, "[ function compiled by full code generator"); @@ -127,7 +127,7 @@ void FullCodeGenerator::Generate() { #ifdef DEBUG if (strlen(FLAG_stop_at) > 0 && - info->function()->name()->IsEqualTo(CStrVector(FLAG_stop_at))) { + info->function()->name()->IsUtf8EqualTo(CStrVector(FLAG_stop_at))) { __ int3(); } #endif @@ -152,6 +152,7 @@ void FullCodeGenerator::Generate() { // the frame (that is done below). FrameScope frame_scope(masm_, StackFrame::MANUAL); + info->set_prologue_offset(masm_->pc_offset()); __ push(rbp); // Caller's frame pointer. __ movq(rbp, rsp); __ push(rsi); // Callee's context. @@ -318,40 +319,33 @@ void FullCodeGenerator::EmitProfilingCounterReset() { __ movq(rbx, profiling_counter_, RelocInfo::EMBEDDED_OBJECT); __ movq(kScratchRegister, reinterpret_cast<uint64_t>(Smi::FromInt(reset_value)), - RelocInfo::NONE); + RelocInfo::NONE64); __ movq(FieldOperand(rbx, JSGlobalPropertyCell::kValueOffset), kScratchRegister); } -void FullCodeGenerator::EmitStackCheck(IterationStatement* stmt, - Label* back_edge_target) { - Comment cmnt(masm_, "[ Stack check"); +void FullCodeGenerator::EmitBackEdgeBookkeeping(IterationStatement* stmt, + Label* back_edge_target) { + Comment cmnt(masm_, "[ Back edge bookkeeping"); Label ok; - if (FLAG_count_based_interrupts) { - int weight = 1; - if (FLAG_weighted_back_edges) { - ASSERT(back_edge_target->is_bound()); - int distance = masm_->SizeOfCodeGeneratedSince(back_edge_target); - weight = Min(kMaxBackEdgeWeight, - Max(1, distance / kBackEdgeDistanceUnit)); - } - EmitProfilingCounterDecrement(weight); - __ j(positive, &ok, Label::kNear); - InterruptStub stub; - __ CallStub(&stub); - } else { - __ CompareRoot(rsp, Heap::kStackLimitRootIndex); - __ j(above_equal, &ok, Label::kNear); - StackCheckStub stub; - __ CallStub(&stub); + int weight = 1; + if (FLAG_weighted_back_edges) { + ASSERT(back_edge_target->is_bound()); + int distance = masm_->SizeOfCodeGeneratedSince(back_edge_target); + weight = Min(kMaxBackEdgeWeight, + Max(1, distance / kBackEdgeDistanceUnit)); } + EmitProfilingCounterDecrement(weight); + __ j(positive, &ok, Label::kNear); + InterruptStub stub; + __ CallStub(&stub); // Record a mapping of this PC offset to the OSR id. This is used to find // the AST id from the unoptimized code in order to use it as a key into // the deoptimization input data found in the optimized code. - RecordStackCheck(stmt->OsrEntryId()); + RecordBackEdge(stmt->OsrEntryId()); // Loop stack checks can be patched to perform on-stack replacement. In // order to decide whether or not to perform OSR we embed the loop depth @@ -360,9 +354,7 @@ void FullCodeGenerator::EmitStackCheck(IterationStatement* stmt, ASSERT(loop_depth() > 0); __ testl(rax, Immediate(Min(loop_depth(), Code::kMaxLoopNestingMarker))); - if (FLAG_count_based_interrupts) { - EmitProfilingCounterReset(); - } + EmitProfilingCounterReset(); __ bind(&ok); PrepareForBailoutForId(stmt->EntryId(), NO_REGISTERS); @@ -759,8 +751,7 @@ void FullCodeGenerator::PrepareForBailoutBeforeSplit(Expression* expr, void FullCodeGenerator::EmitDebugCheckDeclarationContext(Variable* variable) { - // The variable in the declaration always resides in the current function - // context. + // The variable in the declaration always resides in the current context. ASSERT_EQ(0, scope()->ContextChainLength(variable->scope())); if (generate_debug_code_) { // Check that we're not inside a with or catch context. @@ -891,33 +882,32 @@ void FullCodeGenerator::VisitFunctionDeclaration( void FullCodeGenerator::VisitModuleDeclaration(ModuleDeclaration* declaration) { - VariableProxy* proxy = declaration->proxy(); - Variable* variable = proxy->var(); - Handle<JSModule> instance = declaration->module()->interface()->Instance(); - ASSERT(!instance.is_null()); + Variable* variable = declaration->proxy()->var(); + ASSERT(variable->location() == Variable::CONTEXT); + ASSERT(variable->interface()->IsFrozen()); - switch (variable->location()) { - case Variable::UNALLOCATED: { - Comment cmnt(masm_, "[ ModuleDeclaration"); - globals_->Add(variable->name(), zone()); - globals_->Add(instance, zone()); - Visit(declaration->module()); - break; - } + Comment cmnt(masm_, "[ ModuleDeclaration"); + EmitDebugCheckDeclarationContext(variable); - case Variable::CONTEXT: { - Comment cmnt(masm_, "[ ModuleDeclaration"); - EmitDebugCheckDeclarationContext(variable); - __ Move(ContextOperand(rsi, variable->index()), instance); - Visit(declaration->module()); - break; - } + // Load instance object. + __ LoadContext(rax, scope_->ContextChainLength(scope_->GlobalScope())); + __ movq(rax, ContextOperand(rax, variable->interface()->Index())); + __ movq(rax, ContextOperand(rax, Context::EXTENSION_INDEX)); - case Variable::PARAMETER: - case Variable::LOCAL: - case Variable::LOOKUP: - UNREACHABLE(); - } + // Assign it. + __ movq(ContextOperand(rsi, variable->index()), rax); + // We know that we have written a module, which is not a smi. + __ RecordWriteContextSlot(rsi, + Context::SlotOffset(variable->index()), + rax, + rcx, + kDontSaveFPRegs, + EMIT_REMEMBERED_SET, + OMIT_SMI_CHECK); + PrepareForBailoutForId(declaration->proxy()->id(), NO_REGISTERS); + + // Traverse into body. + Visit(declaration->module()); } @@ -959,6 +949,14 @@ void FullCodeGenerator::DeclareGlobals(Handle<FixedArray> pairs) { } +void FullCodeGenerator::DeclareModules(Handle<FixedArray> descriptions) { + // Call the runtime to declare the modules. + __ Push(descriptions); + __ CallRuntime(Runtime::kDeclareModules, 1); + // Return value is ignored. +} + + void FullCodeGenerator::VisitSwitchStatement(SwitchStatement* stmt) { Comment cmnt(masm_, "[ SwitchStatement"); Breakable nested_statement(this, stmt); @@ -1009,7 +1007,7 @@ void FullCodeGenerator::VisitSwitchStatement(SwitchStatement* stmt) { // Record position before stub call for type feedback. SetSourcePosition(clause->position()); - Handle<Code> ic = CompareIC::GetUninitialized(Token::EQ_STRICT); + Handle<Code> ic = CompareIC::GetUninitialized(isolate(), Token::EQ_STRICT); CallIC(ic, RelocInfo::CODE_TARGET, clause->CompareId()); patch_site.EmitPatchInfo(); @@ -1137,7 +1135,8 @@ void FullCodeGenerator::VisitForInStatement(ForInStatement* stmt) { Handle<JSGlobalPropertyCell> cell = isolate()->factory()->NewJSGlobalPropertyCell( Handle<Object>( - Smi::FromInt(TypeFeedbackCells::kForInFastCaseMarker))); + Smi::FromInt(TypeFeedbackCells::kForInFastCaseMarker), + isolate())); RecordTypeFeedbackCell(stmt->ForInFeedbackId(), cell); __ LoadHeapObject(rbx, cell); __ Move(FieldOperand(rbx, JSGlobalPropertyCell::kValueOffset), @@ -1214,7 +1213,7 @@ void FullCodeGenerator::VisitForInStatement(ForInStatement* stmt) { __ bind(loop_statement.continue_label()); __ SmiAddConstant(Operand(rsp, 0 * kPointerSize), Smi::FromInt(1)); - EmitStackCheck(stmt, &loop); + EmitBackEdgeBookkeeping(stmt, &loop); __ jmp(&loop); // Remove the pointers stored on the stack. @@ -1368,9 +1367,9 @@ void FullCodeGenerator::EmitDynamicLookupFastCase(Variable* var, } else if (var->mode() == DYNAMIC_LOCAL) { Variable* local = var->local_if_not_shadowed(); __ movq(rax, ContextSlotOperandCheckExtensions(local, slow)); - if (local->mode() == CONST || - local->mode() == CONST_HARMONY || - local->mode() == LET) { + if (local->mode() == LET || + local->mode() == CONST || + local->mode() == CONST_HARMONY) { __ CompareRoot(rax, Heap::kTheHoleValueRootIndex); __ j(not_equal, done); if (local->mode() == CONST) { @@ -1515,7 +1514,7 @@ void FullCodeGenerator::VisitRegExpLiteral(RegExpLiteral* expr) { __ bind(&materialized); int size = JSRegExp::kSize + JSRegExp::kInObjectFieldCount * kPointerSize; Label allocated, runtime_allocate; - __ AllocateInNewSpace(size, rax, rcx, rdx, &runtime_allocate, TAG_OBJECT); + __ Allocate(size, rax, rcx, rdx, &runtime_allocate, TAG_OBJECT); __ jmp(&allocated); __ bind(&runtime_allocate); @@ -1553,24 +1552,34 @@ void FullCodeGenerator::EmitAccessor(Expression* expression) { void FullCodeGenerator::VisitObjectLiteral(ObjectLiteral* expr) { Comment cmnt(masm_, "[ ObjectLiteral"); Handle<FixedArray> constant_properties = expr->constant_properties(); - __ movq(rdi, Operand(rbp, JavaScriptFrameConstants::kFunctionOffset)); - __ push(FieldOperand(rdi, JSFunction::kLiteralsOffset)); - __ Push(Smi::FromInt(expr->literal_index())); - __ Push(constant_properties); int flags = expr->fast_elements() ? ObjectLiteral::kFastElements : ObjectLiteral::kNoFlags; flags |= expr->has_function() ? ObjectLiteral::kHasFunction : ObjectLiteral::kNoFlags; - __ Push(Smi::FromInt(flags)); int properties_count = constant_properties->length() / 2; if (expr->depth() > 1) { + __ movq(rdi, Operand(rbp, JavaScriptFrameConstants::kFunctionOffset)); + __ push(FieldOperand(rdi, JSFunction::kLiteralsOffset)); + __ Push(Smi::FromInt(expr->literal_index())); + __ Push(constant_properties); + __ Push(Smi::FromInt(flags)); __ CallRuntime(Runtime::kCreateObjectLiteral, 4); - } else if (flags != ObjectLiteral::kFastElements || + } else if (Serializer::enabled() || flags != ObjectLiteral::kFastElements || properties_count > FastCloneShallowObjectStub::kMaximumClonedProperties) { + __ movq(rdi, Operand(rbp, JavaScriptFrameConstants::kFunctionOffset)); + __ push(FieldOperand(rdi, JSFunction::kLiteralsOffset)); + __ Push(Smi::FromInt(expr->literal_index())); + __ Push(constant_properties); + __ Push(Smi::FromInt(flags)); __ CallRuntime(Runtime::kCreateObjectLiteralShallow, 4); } else { + __ movq(rdi, Operand(rbp, JavaScriptFrameConstants::kFunctionOffset)); + __ movq(rax, FieldOperand(rdi, JSFunction::kLiteralsOffset)); + __ Move(rbx, Smi::FromInt(expr->literal_index())); + __ Move(rcx, constant_properties); + __ Move(rdx, Smi::FromInt(flags)); FastCloneShallowObjectStub stub(properties_count); __ CallStub(&stub); } @@ -1602,7 +1611,7 @@ void FullCodeGenerator::VisitObjectLiteral(ObjectLiteral* expr) { ASSERT(!CompileTimeValue::IsCompileTimeValue(value)); // Fall through. case ObjectLiteral::Property::COMPUTED: - if (key->handle()->IsSymbol()) { + if (key->handle()->IsInternalizedString()) { if (property->emit_store()) { VisitForAccumulatorValue(value); __ Move(rcx, key->handle()); @@ -1617,8 +1626,6 @@ void FullCodeGenerator::VisitObjectLiteral(ObjectLiteral* expr) { } break; } - // Fall through. - case ObjectLiteral::Property::PROTOTYPE: __ push(Operand(rsp, 0)); // Duplicate receiver. VisitForStackValue(key); VisitForStackValue(value); @@ -1629,6 +1636,15 @@ void FullCodeGenerator::VisitObjectLiteral(ObjectLiteral* expr) { __ Drop(3); } break; + case ObjectLiteral::Property::PROTOTYPE: + __ push(Operand(rsp, 0)); // Duplicate receiver. + VisitForStackValue(value); + if (property->emit_store()) { + __ CallRuntime(Runtime::kSetPrototype, 2); + } else { + __ Drop(2); + } + break; case ObjectLiteral::Property::GETTER: accessor_table.lookup(key)->second->getter = value; break; @@ -1691,6 +1707,7 @@ void FullCodeGenerator::VisitArrayLiteral(ArrayLiteral* expr) { __ IncrementCounter(isolate()->counters()->cow_arrays_created_stub(), 1); FastCloneShallowArrayStub stub( FastCloneShallowArrayStub::COPY_ON_WRITE_ELEMENTS, + DONT_TRACK_ALLOCATION_SITE, length); __ CallStub(&stub); } else if (expr->depth() > 1) { @@ -1700,12 +1717,19 @@ void FullCodeGenerator::VisitArrayLiteral(ArrayLiteral* expr) { } else { ASSERT(IsFastSmiOrObjectElementsKind(constant_elements_kind) || FLAG_smi_only_arrays); + FastCloneShallowArrayStub::Mode mode = + FastCloneShallowArrayStub::CLONE_ANY_ELEMENTS; + AllocationSiteMode allocation_site_mode = FLAG_track_allocation_sites + ? TRACK_ALLOCATION_SITE : DONT_TRACK_ALLOCATION_SITE; + // If the elements are already FAST_*_ELEMENTS, the boilerplate cannot // change, so it's possible to specialize the stub in advance. - FastCloneShallowArrayStub::Mode mode = has_constant_fast_elements - ? FastCloneShallowArrayStub::CLONE_ELEMENTS - : FastCloneShallowArrayStub::CLONE_ANY_ELEMENTS; - FastCloneShallowArrayStub stub(mode, length); + if (has_constant_fast_elements) { + mode = FastCloneShallowArrayStub::CLONE_ELEMENTS; + allocation_site_mode = DONT_TRACK_ALLOCATION_SITE; + } + + FastCloneShallowArrayStub stub(mode, allocation_site_mode, length); __ CallStub(&stub); } @@ -1909,7 +1933,7 @@ void FullCodeGenerator::EmitInlineSmiBinaryOp(BinaryOperation* expr, __ bind(&stub_call); __ movq(rax, rcx); BinaryOpStub stub(op, mode); - CallIC(stub.GetCode(), RelocInfo::CODE_TARGET, + CallIC(stub.GetCode(isolate()), RelocInfo::CODE_TARGET, expr->BinaryOperationFeedbackId()); patch_site.EmitPatchInfo(); __ jmp(&done, Label::kNear); @@ -1959,7 +1983,7 @@ void FullCodeGenerator::EmitBinaryOp(BinaryOperation* expr, __ pop(rdx); BinaryOpStub stub(op, mode); JumpPatchSite patch_site(masm_); // unbound, signals no inlined smi code. - CallIC(stub.GetCode(), RelocInfo::CODE_TARGET, + CallIC(stub.GetCode(isolate()), RelocInfo::CODE_TARGET, expr->BinaryOperationFeedbackId()); patch_site.EmitPatchInfo(); context()->Plug(rax); @@ -1967,7 +1991,7 @@ void FullCodeGenerator::EmitBinaryOp(BinaryOperation* expr, void FullCodeGenerator::EmitAssignment(Expression* expr) { - // Invalid left-hand sides are rewritten to have a 'throw + // Invalid left-hand sides are rewritten by the parser to have a 'throw // ReferenceError' on the left-hand side. if (!expr->IsValidLeftHandSide()) { VisitForEffect(expr); @@ -2300,7 +2324,7 @@ void FullCodeGenerator::VisitCall(Call* expr) { VariableProxy* proxy = callee->AsVariableProxy(); Property* property = callee->AsProperty(); - if (proxy != NULL && proxy->var()->is_possibly_eval()) { + if (proxy != NULL && proxy->var()->is_possibly_eval(isolate())) { // In a call to eval, we first call %ResolvePossiblyDirectEval to // resolve the function we need to call and the receiver of the call. // Then we call the resolved function using the given arguments. @@ -2440,7 +2464,7 @@ void FullCodeGenerator::VisitCallNew(CallNew* expr) { __ Move(rbx, cell); CallConstructStub stub(RECORD_CALL_TARGET); - __ Call(stub.GetCode(), RelocInfo::CONSTRUCT_CALL); + __ Call(stub.GetCode(isolate()), RelocInfo::CONSTRUCT_CALL); PrepareForBailoutForId(expr->ReturnId(), TOS_REG); context()->Plug(rax); } @@ -2595,7 +2619,7 @@ void FullCodeGenerator::EmitIsStringWrapperSafeForDefaultValueOf( __ CompareRoot(rcx, Heap::kHashTableMapRootIndex); __ j(equal, if_false); - // Look for valueOf symbol in the descriptor array, and indicate false if + // Look for valueOf string in the descriptor array, and indicate false if // found. Since we omit an enumeration index check, if it is added via a // transition that shares its descriptor array, this is a false positive. Label entry, loop, done; @@ -2617,11 +2641,11 @@ void FullCodeGenerator::EmitIsStringWrapperSafeForDefaultValueOf( // Calculate location of the first key name. __ addq(rbx, Immediate(DescriptorArray::kFirstOffset)); // Loop through all the keys in the descriptor array. If one of these is the - // symbol valueOf the result is false. + // internalized string "valueOf" the result is false. __ jmp(&entry); __ bind(&loop); __ movq(rdx, FieldOperand(rbx, 0)); - __ Cmp(rdx, FACTORY->value_of_symbol()); + __ Cmp(rdx, FACTORY->value_of_string()); __ j(equal, if_false); __ addq(rbx, Immediate(DescriptorArray::kDescriptorSize * kPointerSize)); __ bind(&entry); @@ -2654,6 +2678,28 @@ void FullCodeGenerator::EmitIsStringWrapperSafeForDefaultValueOf( } +void FullCodeGenerator::EmitIsSymbol(CallRuntime* expr) { + ZoneList<Expression*>* args = expr->arguments(); + ASSERT(args->length() == 1); + + VisitForAccumulatorValue(args->at(0)); + + Label materialize_true, materialize_false; + Label* if_true = NULL; + Label* if_false = NULL; + Label* fall_through = NULL; + context()->PrepareTest(&materialize_true, &materialize_false, + &if_true, &if_false, &fall_through); + + __ JumpIfSmi(rax, if_false); + __ CmpObjectType(rax, SYMBOL_TYPE, rbx); + PrepareForBailoutBeforeSplit(expr, true, if_true, if_false); + Split(equal, if_true, if_false, fall_through); + + context()->Plug(if_true, if_false); +} + + void FullCodeGenerator::EmitIsFunction(CallRuntime* expr) { ZoneList<Expression*>* args = expr->arguments(); ASSERT(args->length() == 1); @@ -2856,12 +2902,12 @@ void FullCodeGenerator::EmitClassOf(CallRuntime* expr) { // Functions have class 'Function'. __ bind(&function); - __ Move(rax, isolate()->factory()->function_class_symbol()); + __ Move(rax, isolate()->factory()->function_class_string()); __ jmp(&done); // Objects with a non-function constructor have class 'Object'. __ bind(&non_function_constructor); - __ Move(rax, isolate()->factory()->Object_symbol()); + __ Move(rax, isolate()->factory()->Object_string()); __ jmp(&done); // Non-JS objects have class null. @@ -3012,7 +3058,8 @@ void FullCodeGenerator::EmitDateField(CallRuntime* expr) { } else { if (index->value() < JSDate::kFirstUncachedField) { ExternalReference stamp = ExternalReference::date_cache_stamp(isolate()); - __ movq(scratch, stamp); + Operand stamp_operand = __ ExternalOperand(stamp); + __ movq(scratch, stamp_operand); __ cmpq(scratch, FieldOperand(object, JSDate::kCacheStampOffset)); __ j(not_equal, &runtime, Label::kNear); __ movq(result, FieldOperand(object, JSDate::kValueOffset + @@ -3023,10 +3070,10 @@ void FullCodeGenerator::EmitDateField(CallRuntime* expr) { __ PrepareCallCFunction(2); #ifdef _WIN64 __ movq(rcx, object); - __ movq(rdx, index, RelocInfo::NONE); + __ movq(rdx, index, RelocInfo::NONE64); #else __ movq(rdi, object); - __ movq(rsi, index, RelocInfo::NONE); + __ movq(rsi, index, RelocInfo::NONE64); #endif __ CallCFunction(ExternalReference::get_date_field_function(isolate()), 2); __ movq(rsi, Operand(rbp, StandardFrameConstants::kContextOffset)); @@ -3040,6 +3087,38 @@ void FullCodeGenerator::EmitDateField(CallRuntime* expr) { } +void FullCodeGenerator::EmitOneByteSeqStringSetChar(CallRuntime* expr) { + ZoneList<Expression*>* args = expr->arguments(); + ASSERT_EQ(3, args->length()); + + VisitForStackValue(args->at(1)); // index + VisitForStackValue(args->at(2)); // value + __ pop(rcx); + __ pop(rbx); + VisitForAccumulatorValue(args->at(0)); // string + + static const String::Encoding encoding = String::ONE_BYTE_ENCODING; + SeqStringSetCharGenerator::Generate(masm_, encoding, rax, rbx, rcx); + context()->Plug(rax); +} + + +void FullCodeGenerator::EmitTwoByteSeqStringSetChar(CallRuntime* expr) { + ZoneList<Expression*>* args = expr->arguments(); + ASSERT_EQ(3, args->length()); + + VisitForStackValue(args->at(1)); // index + VisitForStackValue(args->at(2)); // value + __ pop(rcx); + __ pop(rbx); + VisitForAccumulatorValue(args->at(0)); // string + + static const String::Encoding encoding = String::TWO_BYTE_ENCODING; + SeqStringSetCharGenerator::Generate(masm_, encoding, rax, rbx, rcx); + context()->Plug(rax); +} + + void FullCodeGenerator::EmitMathPow(CallRuntime* expr) { // Load the arguments on the stack and call the runtime function. ZoneList<Expression*>* args = expr->arguments(); @@ -3189,7 +3268,7 @@ void FullCodeGenerator::EmitStringCharAt(CallRuntime* expr) { __ bind(&index_out_of_range); // When the index is out of range, the spec requires us to return // the empty string. - __ LoadRoot(result, Heap::kEmptyStringRootIndex); + __ LoadRoot(result, Heap::kempty_stringRootIndex); __ jmp(&done); __ bind(&need_conversion); @@ -3515,7 +3594,7 @@ void FullCodeGenerator::EmitFastAsciiArrayJoin(CallRuntime* expr) { __ movq(array_length, FieldOperand(array, JSArray::kLengthOffset)); __ SmiCompare(array_length, Smi::FromInt(0)); __ j(not_zero, &non_trivial_array); - __ LoadRoot(rax, Heap::kEmptyStringRootIndex); + __ LoadRoot(rax, Heap::kempty_stringRootIndex); __ jmp(&return_result); // Save the array length on the stack. @@ -3551,10 +3630,10 @@ void FullCodeGenerator::EmitFastAsciiArrayJoin(CallRuntime* expr) { __ movzxbl(scratch, FieldOperand(scratch, Map::kInstanceTypeOffset)); __ andb(scratch, Immediate( kIsNotStringMask | kStringEncodingMask | kStringRepresentationMask)); - __ cmpb(scratch, Immediate(kStringTag | kAsciiStringTag | kSeqStringTag)); + __ cmpb(scratch, Immediate(kStringTag | kOneByteStringTag | kSeqStringTag)); __ j(not_equal, &bailout); __ AddSmiField(string_length, - FieldOperand(string, SeqAsciiString::kLengthOffset)); + FieldOperand(string, SeqOneByteString::kLengthOffset)); __ j(overflow, &bailout); __ incl(index); __ cmpl(index, array_length); @@ -3590,7 +3669,7 @@ void FullCodeGenerator::EmitFastAsciiArrayJoin(CallRuntime* expr) { __ movzxbl(scratch, FieldOperand(scratch, Map::kInstanceTypeOffset)); __ andb(scratch, Immediate( kIsNotStringMask | kStringEncodingMask | kStringRepresentationMask)); - __ cmpb(scratch, Immediate(kStringTag | kAsciiStringTag | kSeqStringTag)); + __ cmpb(scratch, Immediate(kStringTag | kOneByteStringTag | kSeqStringTag)); __ j(not_equal, &bailout); // Live registers: @@ -3601,7 +3680,7 @@ void FullCodeGenerator::EmitFastAsciiArrayJoin(CallRuntime* expr) { // Add (separator length times (array_length - 1)) to string_length. __ SmiToInteger32(scratch, - FieldOperand(string, SeqAsciiString::kLengthOffset)); + FieldOperand(string, SeqOneByteString::kLengthOffset)); __ decl(index); __ imull(scratch, index); __ j(overflow, &bailout); @@ -3614,10 +3693,10 @@ void FullCodeGenerator::EmitFastAsciiArrayJoin(CallRuntime* expr) { __ AllocateAsciiString(result_pos, string_length, scratch, index, string, &bailout); __ movq(result_operand, result_pos); - __ lea(result_pos, FieldOperand(result_pos, SeqAsciiString::kHeaderSize)); + __ lea(result_pos, FieldOperand(result_pos, SeqOneByteString::kHeaderSize)); __ movq(string, separator_operand); - __ SmiCompare(FieldOperand(string, SeqAsciiString::kLengthOffset), + __ SmiCompare(FieldOperand(string, SeqOneByteString::kLengthOffset), Smi::FromInt(1)); __ j(equal, &one_char_separator); __ j(greater, &long_separator); @@ -3643,7 +3722,7 @@ void FullCodeGenerator::EmitFastAsciiArrayJoin(CallRuntime* expr) { __ SmiToInteger32(string_length, FieldOperand(string, String::kLengthOffset)); __ lea(string, - FieldOperand(string, SeqAsciiString::kHeaderSize)); + FieldOperand(string, SeqOneByteString::kHeaderSize)); __ CopyBytes(result_pos, string, string_length); __ incl(index); __ bind(&loop_1_condition); @@ -3661,7 +3740,7 @@ void FullCodeGenerator::EmitFastAsciiArrayJoin(CallRuntime* expr) { __ bind(&one_char_separator); // Get the separator ASCII character value. // Register "string" holds the separator. - __ movzxbl(scratch, FieldOperand(string, SeqAsciiString::kHeaderSize)); + __ movzxbl(scratch, FieldOperand(string, SeqOneByteString::kHeaderSize)); __ Set(index, 0); // Jump into the loop after the code that copies the separator, so the first // element is not preceded by a separator @@ -3687,7 +3766,7 @@ void FullCodeGenerator::EmitFastAsciiArrayJoin(CallRuntime* expr) { __ SmiToInteger32(string_length, FieldOperand(string, String::kLengthOffset)); __ lea(string, - FieldOperand(string, SeqAsciiString::kHeaderSize)); + FieldOperand(string, SeqOneByteString::kHeaderSize)); __ CopyBytes(result_pos, string, string_length); __ incl(index); __ cmpl(index, array_length_operand); @@ -3712,7 +3791,7 @@ void FullCodeGenerator::EmitFastAsciiArrayJoin(CallRuntime* expr) { __ SmiToInteger32(scratch, FieldOperand(string, String::kLengthOffset)); __ lea(string, - FieldOperand(string, SeqAsciiString::kHeaderSize)); + FieldOperand(string, SeqOneByteString::kHeaderSize)); __ movq(separator_operand, string); // Jump into the loop after the code that copies the separator, so the first @@ -3738,7 +3817,7 @@ void FullCodeGenerator::EmitFastAsciiArrayJoin(CallRuntime* expr) { __ SmiToInteger32(string_length, FieldOperand(string, String::kLengthOffset)); __ lea(string, - FieldOperand(string, SeqAsciiString::kHeaderSize)); + FieldOperand(string, SeqOneByteString::kHeaderSize)); __ CopyBytes(result_pos, string, string_length); __ incq(index); __ j(not_equal, &loop_3); // Loop while (index < 0). @@ -3941,7 +4020,7 @@ void FullCodeGenerator::EmitUnaryOperation(UnaryOperation* expr, // accumulator register rax. VisitForAccumulatorValue(expr->expression()); SetSourcePosition(expr->position()); - CallIC(stub.GetCode(), RelocInfo::CODE_TARGET, + CallIC(stub.GetCode(isolate()), RelocInfo::CODE_TARGET, expr->UnaryOperationFeedbackId()); context()->Plug(rax); } @@ -4056,14 +4135,12 @@ void FullCodeGenerator::VisitCountOperation(CountOperation* expr) { SetSourcePosition(expr->position()); // Call stub for +1/-1. + __ movq(rdx, rax); + __ Move(rax, Smi::FromInt(1)); BinaryOpStub stub(expr->binary_op(), NO_OVERWRITE); - if (expr->op() == Token::INC) { - __ Move(rdx, Smi::FromInt(1)); - } else { - __ movq(rdx, rax); - __ Move(rax, Smi::FromInt(1)); - } - CallIC(stub.GetCode(), RelocInfo::CODE_TARGET, expr->CountBinOpFeedbackId()); + CallIC(stub.GetCode(isolate()), + RelocInfo::CODE_TARGET, + expr->CountBinOpFeedbackId()); patch_site.EmitPatchInfo(); __ bind(&done); @@ -4181,12 +4258,12 @@ void FullCodeGenerator::EmitLiteralCompareTypeof(Expression* expr, } PrepareForBailoutBeforeSplit(expr, true, if_true, if_false); - if (check->Equals(isolate()->heap()->number_symbol())) { + if (check->Equals(isolate()->heap()->number_string())) { __ JumpIfSmi(rax, if_true); __ movq(rax, FieldOperand(rax, HeapObject::kMapOffset)); __ CompareRoot(rax, Heap::kHeapNumberMapRootIndex); Split(equal, if_true, if_false, fall_through); - } else if (check->Equals(isolate()->heap()->string_symbol())) { + } else if (check->Equals(isolate()->heap()->string_string())) { __ JumpIfSmi(rax, if_false); // Check for undetectable objects => false. __ CmpObjectType(rax, FIRST_NONSTRING_TYPE, rdx); @@ -4194,16 +4271,16 @@ void FullCodeGenerator::EmitLiteralCompareTypeof(Expression* expr, __ testb(FieldOperand(rdx, Map::kBitFieldOffset), Immediate(1 << Map::kIsUndetectable)); Split(zero, if_true, if_false, fall_through); - } else if (check->Equals(isolate()->heap()->boolean_symbol())) { + } else if (check->Equals(isolate()->heap()->boolean_string())) { __ CompareRoot(rax, Heap::kTrueValueRootIndex); __ j(equal, if_true); __ CompareRoot(rax, Heap::kFalseValueRootIndex); Split(equal, if_true, if_false, fall_through); } else if (FLAG_harmony_typeof && - check->Equals(isolate()->heap()->null_symbol())) { + check->Equals(isolate()->heap()->null_string())) { __ CompareRoot(rax, Heap::kNullValueRootIndex); Split(equal, if_true, if_false, fall_through); - } else if (check->Equals(isolate()->heap()->undefined_symbol())) { + } else if (check->Equals(isolate()->heap()->undefined_string())) { __ CompareRoot(rax, Heap::kUndefinedValueRootIndex); __ j(equal, if_true); __ JumpIfSmi(rax, if_false); @@ -4212,19 +4289,23 @@ void FullCodeGenerator::EmitLiteralCompareTypeof(Expression* expr, __ testb(FieldOperand(rdx, Map::kBitFieldOffset), Immediate(1 << Map::kIsUndetectable)); Split(not_zero, if_true, if_false, fall_through); - } else if (check->Equals(isolate()->heap()->function_symbol())) { + } else if (check->Equals(isolate()->heap()->function_string())) { __ JumpIfSmi(rax, if_false); STATIC_ASSERT(NUM_OF_CALLABLE_SPEC_OBJECT_TYPES == 2); __ CmpObjectType(rax, JS_FUNCTION_TYPE, rdx); __ j(equal, if_true); __ CmpInstanceType(rdx, JS_FUNCTION_PROXY_TYPE); Split(equal, if_true, if_false, fall_through); - } else if (check->Equals(isolate()->heap()->object_symbol())) { + } else if (check->Equals(isolate()->heap()->object_string())) { __ JumpIfSmi(rax, if_false); if (!FLAG_harmony_typeof) { __ CompareRoot(rax, Heap::kNullValueRootIndex); __ j(equal, if_true); } + if (FLAG_harmony_symbols) { + __ CmpObjectType(rax, SYMBOL_TYPE, rdx); + __ j(equal, if_true); + } __ CmpObjectType(rax, FIRST_NONCALLABLE_SPEC_OBJECT_TYPE, rdx); __ j(below, if_false); __ CmpInstanceType(rdx, LAST_NONCALLABLE_SPEC_OBJECT_TYPE); @@ -4281,29 +4362,7 @@ void FullCodeGenerator::VisitCompareOperation(CompareOperation* expr) { default: { VisitForAccumulatorValue(expr->right()); - Condition cc = no_condition; - switch (op) { - case Token::EQ_STRICT: - case Token::EQ: - cc = equal; - break; - case Token::LT: - cc = less; - break; - case Token::GT: - cc = greater; - break; - case Token::LTE: - cc = less_equal; - break; - case Token::GTE: - cc = greater_equal; - break; - case Token::IN: - case Token::INSTANCEOF: - default: - UNREACHABLE(); - } + Condition cc = CompareIC::ComputeCondition(op); __ pop(rdx); bool inline_smi_code = ShouldInlineSmiCase(op); @@ -4320,7 +4379,7 @@ void FullCodeGenerator::VisitCompareOperation(CompareOperation* expr) { // Record position and call the compare IC. SetSourcePosition(expr->position()); - Handle<Code> ic = CompareIC::GetUninitialized(op); + Handle<Code> ic = CompareIC::GetUninitialized(isolate(), op); CallIC(ic, RelocInfo::CODE_TARGET, expr->CompareOperationFeedbackId()); patch_site.EmitPatchInfo(); diff --git a/deps/v8/src/x64/ic-x64.cc b/deps/v8/src/x64/ic-x64.cc index efa07a80b5..0a9ceaa865 100644 --- a/deps/v8/src/x64/ic-x64.cc +++ b/deps/v8/src/x64/ic-x64.cc @@ -60,11 +60,11 @@ static void GenerateGlobalInstanceTypeCheck(MacroAssembler* masm, // Generated code falls through if the receiver is a regular non-global // JS object with slow properties and no interceptors. -static void GenerateStringDictionaryReceiverCheck(MacroAssembler* masm, - Register receiver, - Register r0, - Register r1, - Label* miss) { +static void GenerateNameDictionaryReceiverCheck(MacroAssembler* masm, + Register receiver, + Register r0, + Register r1, + Label* miss) { // Register usage: // receiver: holds the receiver on entry and is unchanged. // r0: used to hold receiver instance type. @@ -101,8 +101,8 @@ static void GenerateStringDictionaryReceiverCheck(MacroAssembler* masm, // Helper function used to load a property from a dictionary backing storage. // This function may return false negatives, so miss_label // must always call a backup property load that is complete. -// This function is safe to call if name is not a symbol, and will jump to -// the miss_label in that case. +// This function is safe to call if name is not an internalized string, +// and will jump to the miss_label in that case. // The generated code assumes that the receiver has slow properties, // is not a global object and does not have interceptors. static void GenerateDictionaryLoad(MacroAssembler* masm, @@ -127,21 +127,21 @@ static void GenerateDictionaryLoad(MacroAssembler* masm, Label done; // Probe the dictionary. - StringDictionaryLookupStub::GeneratePositiveLookup(masm, - miss_label, - &done, - elements, - name, - r0, - r1); + NameDictionaryLookupStub::GeneratePositiveLookup(masm, + miss_label, + &done, + elements, + name, + r0, + r1); // If probing finds an entry in the dictionary, r1 contains the // index into the dictionary. Check that the value is a normal // property. __ bind(&done); const int kElementsStartOffset = - StringDictionary::kHeaderSize + - StringDictionary::kElementsStartIndex * kPointerSize; + NameDictionary::kHeaderSize + + NameDictionary::kElementsStartIndex * kPointerSize; const int kDetailsOffset = kElementsStartOffset + 2 * kPointerSize; __ Test(Operand(elements, r1, times_pointer_size, kDetailsOffset - kHeapObjectTag), @@ -160,8 +160,8 @@ static void GenerateDictionaryLoad(MacroAssembler* masm, // storage. This function may fail to store a property even though it // is in the dictionary, so code at miss_label must always call a // backup property store that is complete. This function is safe to -// call if name is not a symbol, and will jump to the miss_label in -// that case. The generated code assumes that the receiver has slow +// call if name is not an internalized string, and will jump to the miss_label +// in that case. The generated code assumes that the receiver has slow // properties, is not a global object and does not have interceptors. static void GenerateDictionaryStore(MacroAssembler* masm, Label* miss_label, @@ -184,21 +184,21 @@ static void GenerateDictionaryStore(MacroAssembler* masm, Label done; // Probe the dictionary. - StringDictionaryLookupStub::GeneratePositiveLookup(masm, - miss_label, - &done, - elements, - name, - scratch0, - scratch1); + NameDictionaryLookupStub::GeneratePositiveLookup(masm, + miss_label, + &done, + elements, + name, + scratch0, + scratch1); // If probing finds an entry in the dictionary, scratch0 contains the // index into the dictionary. Check that the value is a normal // property that is not read only. __ bind(&done); const int kElementsStartOffset = - StringDictionary::kHeaderSize + - StringDictionary::kElementsStartIndex * kPointerSize; + NameDictionary::kHeaderSize + + NameDictionary::kElementsStartIndex * kPointerSize; const int kDetailsOffset = kElementsStartOffset + 2 * kPointerSize; const int kTypeAndReadOnlyMask = (PropertyDetails::TypeField::kMask | @@ -224,49 +224,6 @@ static void GenerateDictionaryStore(MacroAssembler* masm, } -void LoadIC::GenerateArrayLength(MacroAssembler* masm) { - // ----------- S t a t e ------------- - // -- rax : receiver - // -- rcx : name - // -- rsp[0] : return address - // ----------------------------------- - Label miss; - - StubCompiler::GenerateLoadArrayLength(masm, rax, rdx, &miss); - __ bind(&miss); - StubCompiler::GenerateLoadMiss(masm, Code::LOAD_IC); -} - - -void LoadIC::GenerateStringLength(MacroAssembler* masm, bool support_wrappers) { - // ----------- S t a t e ------------- - // -- rax : receiver - // -- rcx : name - // -- rsp[0] : return address - // ----------------------------------- - Label miss; - - StubCompiler::GenerateLoadStringLength(masm, rax, rdx, rbx, &miss, - support_wrappers); - __ bind(&miss); - StubCompiler::GenerateLoadMiss(masm, Code::LOAD_IC); -} - - -void LoadIC::GenerateFunctionPrototype(MacroAssembler* masm) { - // ----------- S t a t e ------------- - // -- rax : receiver - // -- rcx : name - // -- rsp[0] : return address - // ----------------------------------- - Label miss; - - StubCompiler::GenerateLoadFunctionPrototype(masm, rax, rdx, rbx, &miss); - __ bind(&miss); - StubCompiler::GenerateLoadMiss(masm, Code::LOAD_IC); -} - - // Checks the receiver for special cases (value type, slow case bits). // Falls through for regular JS object. static void GenerateKeyedLoadReceiverCheck(MacroAssembler* masm, @@ -356,31 +313,37 @@ static void GenerateFastArrayLoad(MacroAssembler* masm, } -// Checks whether a key is an array index string or a symbol string. -// Falls through if the key is a symbol. -static void GenerateKeyStringCheck(MacroAssembler* masm, - Register key, - Register map, - Register hash, - Label* index_string, - Label* not_symbol) { +// Checks whether a key is an array index string or a unique name. +// Falls through if the key is a unique name. +static void GenerateKeyNameCheck(MacroAssembler* masm, + Register key, + Register map, + Register hash, + Label* index_string, + Label* not_unique) { // Register use: // key - holds the key and is unchanged. Assumed to be non-smi. // Scratch registers: // map - used to hold the map of the key. // hash - used to hold the hash of the key. - __ CmpObjectType(key, FIRST_NONSTRING_TYPE, map); - __ j(above_equal, not_symbol); + Label unique; + __ CmpObjectType(key, LAST_UNIQUE_NAME_TYPE, map); + __ j(above, not_unique); + STATIC_ASSERT(LAST_UNIQUE_NAME_TYPE == FIRST_NONSTRING_TYPE); + __ j(equal, &unique); + // Is the string an array index, with cached numeric value? - __ movl(hash, FieldOperand(key, String::kHashFieldOffset)); - __ testl(hash, Immediate(String::kContainsCachedArrayIndexMask)); + __ movl(hash, FieldOperand(key, Name::kHashFieldOffset)); + __ testl(hash, Immediate(Name::kContainsCachedArrayIndexMask)); __ j(zero, index_string); // The value in hash is used at jump target. - // Is the string a symbol? - STATIC_ASSERT(kSymbolTag != 0); + // Is the string internalized? + STATIC_ASSERT(kInternalizedTag != 0); __ testb(FieldOperand(map, Map::kInstanceTypeOffset), - Immediate(kIsSymbolMask)); - __ j(zero, not_symbol); + Immediate(kIsInternalizedMask)); + __ j(zero, not_unique); + + __ bind(&unique); } @@ -391,11 +354,11 @@ void KeyedLoadIC::GenerateGeneric(MacroAssembler* masm) { // -- rdx : receiver // -- rsp[0] : return address // ----------------------------------- - Label slow, check_string, index_smi, index_string, property_array_property; + Label slow, check_name, index_smi, index_name, property_array_property; Label probe_dictionary, check_number_dictionary; // Check that the key is a smi. - __ JumpIfNotSmi(rax, &check_string); + __ JumpIfNotSmi(rax, &check_name); __ bind(&index_smi); // Now the key is known to be a smi. This place is also jumped to from below // where a numeric string is converted to a smi. @@ -440,8 +403,8 @@ void KeyedLoadIC::GenerateGeneric(MacroAssembler* masm) { __ IncrementCounter(counters->keyed_load_generic_slow(), 1); GenerateRuntimeGetProperty(masm); - __ bind(&check_string); - GenerateKeyStringCheck(masm, rax, rcx, rbx, &index_string, &slow); + __ bind(&check_name); + GenerateKeyNameCheck(masm, rax, rcx, rbx, &index_name, &slow); GenerateKeyedLoadReceiverCheck( masm, rdx, rcx, Map::kHasNamedInterceptor, &slow); @@ -464,7 +427,7 @@ void KeyedLoadIC::GenerateGeneric(MacroAssembler* masm) { int mask = (KeyedLookupCache::kCapacityMask & KeyedLookupCache::kHashMask); __ and_(rcx, Immediate(mask)); - // Load the key (consisting of map and symbol) from the cache and + // Load the key (consisting of map and internalized string) from the cache and // check for match. Label load_in_object_property; static const int kEntriesPerBucket = KeyedLookupCache::kEntriesPerBucket; @@ -542,7 +505,7 @@ void KeyedLoadIC::GenerateGeneric(MacroAssembler* masm) { __ IncrementCounter(counters->keyed_load_generic_symbol(), 1); __ ret(0); - __ bind(&index_string); + __ bind(&index_name); __ IndexFromHash(rbx, rax); __ jmp(&index_smi); } @@ -576,7 +539,7 @@ void KeyedLoadIC::GenerateString(MacroAssembler* masm) { char_at_generator.GenerateSlow(masm, call_helper); __ bind(&miss); - GenerateMiss(masm, false); + GenerateMiss(masm, MISS); } @@ -619,7 +582,7 @@ void KeyedLoadIC::GenerateIndexedInterceptor(MacroAssembler* masm) { 1); __ bind(&slow); - GenerateMiss(masm, false); + GenerateMiss(masm, MISS); } @@ -709,7 +672,9 @@ static void KeyedStoreGenerateGenericHelper( rbx, rdi, slow); - ElementsTransitionGenerator::GenerateSmiToDouble(masm, slow); + AllocationSiteMode mode = AllocationSiteInfo::GetMode(FAST_SMI_ELEMENTS, + FAST_DOUBLE_ELEMENTS); + ElementsTransitionGenerator::GenerateSmiToDouble(masm, mode, slow); __ movq(rbx, FieldOperand(rdx, JSObject::kElementsOffset)); __ jmp(&fast_double_without_map_check); @@ -720,7 +685,9 @@ static void KeyedStoreGenerateGenericHelper( rbx, rdi, slow); - ElementsTransitionGenerator::GenerateMapChangeElementsTransition(masm); + mode = AllocationSiteInfo::GetMode(FAST_SMI_ELEMENTS, FAST_ELEMENTS); + ElementsTransitionGenerator::GenerateMapChangeElementsTransition(masm, mode, + slow); __ movq(rbx, FieldOperand(rdx, JSObject::kElementsOffset)); __ jmp(&finish_object_store); @@ -734,7 +701,8 @@ static void KeyedStoreGenerateGenericHelper( rbx, rdi, slow); - ElementsTransitionGenerator::GenerateDoubleToObject(masm, slow); + mode = AllocationSiteInfo::GetMode(FAST_DOUBLE_ELEMENTS, FAST_ELEMENTS); + ElementsTransitionGenerator::GenerateDoubleToObject(masm, mode, slow); __ movq(rbx, FieldOperand(rdx, JSObject::kElementsOffset)); __ jmp(&finish_object_store); } @@ -938,7 +906,7 @@ void CallICBase::GenerateNormal(MacroAssembler* masm, int argc) { // Get the receiver of the function from the stack. __ movq(rdx, Operand(rsp, (argc + 1) * kPointerSize)); - GenerateStringDictionaryReceiverCheck(masm, rdx, rax, rbx, &miss); + GenerateNameDictionaryReceiverCheck(masm, rdx, rax, rbx, &miss); // rax: elements // Search the dictionary placing the result in rdi. @@ -1058,11 +1026,11 @@ void KeyedCallIC::GenerateMegamorphic(MacroAssembler* masm, int argc) { __ movq(rdx, Operand(rsp, (argc + 1) * kPointerSize)); Label do_call, slow_call, slow_load; - Label check_number_dictionary, check_string, lookup_monomorphic_cache; - Label index_smi, index_string; + Label check_number_dictionary, check_name, lookup_monomorphic_cache; + Label index_smi, index_name; // Check that the key is a smi. - __ JumpIfNotSmi(rcx, &check_string); + __ JumpIfNotSmi(rcx, &check_name); __ bind(&index_smi); // Now the key is known to be a smi. This place is also jumped to from below @@ -1110,10 +1078,10 @@ void KeyedCallIC::GenerateMegamorphic(MacroAssembler* masm, int argc) { __ movq(rdi, rax); __ jmp(&do_call); - __ bind(&check_string); - GenerateKeyStringCheck(masm, rcx, rax, rbx, &index_string, &slow_call); + __ bind(&check_name); + GenerateKeyNameCheck(masm, rcx, rax, rbx, &index_name, &slow_call); - // The key is known to be a symbol. + // The key is known to be a unique name. // If the receiver is a regular JS object with slow properties then do // a quick inline probe of the receiver's dictionary. // Otherwise do the monomorphic cache probe. @@ -1140,14 +1108,14 @@ void KeyedCallIC::GenerateMegamorphic(MacroAssembler* masm, int argc) { __ bind(&slow_call); // This branch is taken if: // - the receiver requires boxing or access check, - // - the key is neither smi nor symbol, + // - the key is neither smi nor a unique name, // - the value loaded is not a function, // - there is hope that the runtime will create a monomorphic call stub // that will get fetched next time. __ IncrementCounter(counters->keyed_call_generic_slow(), 1); GenerateMiss(masm, argc); - __ bind(&index_string); + __ bind(&index_name); __ IndexFromHash(rbx, rcx); // Now jump to the place where smi keys are handled. __ jmp(&index_smi); @@ -1165,10 +1133,10 @@ void KeyedCallIC::GenerateNormal(MacroAssembler* masm, int argc) { // rsp[(argc + 1) * 8] : argument 0 = receiver // ----------------------------------- - // Check if the name is a string. + // Check if the name is really a name. Label miss; __ JumpIfSmi(rcx, &miss); - Condition cond = masm->IsObjectStringType(rcx, rax, rax); + Condition cond = masm->IsObjectNameType(rcx, rax, rax); __ j(NegateCondition(cond), &miss); CallICBase::GenerateNormal(masm, argc); __ bind(&miss); @@ -1278,7 +1246,7 @@ void KeyedLoadIC::GenerateNonStrictArguments(MacroAssembler* masm) { __ movq(rax, unmapped_location); __ Ret(); __ bind(&slow); - GenerateMiss(masm, false); + GenerateMiss(masm, MISS); } @@ -1317,7 +1285,7 @@ void KeyedStoreIC::GenerateNonStrictArguments(MacroAssembler* masm) { INLINE_SMI_CHECK); __ Ret(); __ bind(&slow); - GenerateMiss(masm, false); + GenerateMiss(masm, MISS); } @@ -1359,12 +1327,13 @@ void LoadIC::GenerateMegamorphic(MacroAssembler* masm) { // ----------------------------------- // Probe the stub cache. - Code::Flags flags = Code::ComputeFlags(Code::LOAD_IC, MONOMORPHIC); - Isolate::Current()->stub_cache()->GenerateProbe(masm, flags, rax, rcx, rbx, - rdx); + Code::Flags flags = Code::ComputeFlags( + Code::STUB, MONOMORPHIC, Code::kNoExtraICState, + Code::NORMAL, Code::LOAD_IC); + Isolate::Current()->stub_cache()->GenerateProbe( + masm, flags, rax, rcx, rbx, rdx); - // Cache miss: Jump to runtime. - StubCompiler::GenerateLoadMiss(masm, Code::LOAD_IC); + GenerateMiss(masm); } @@ -1376,7 +1345,7 @@ void LoadIC::GenerateNormal(MacroAssembler* masm) { // ----------------------------------- Label miss; - GenerateStringDictionaryReceiverCheck(masm, rax, rdx, rbx, &miss); + GenerateNameDictionaryReceiverCheck(masm, rax, rdx, rbx, &miss); // rdx: elements // Search the dictionary placing the result in rax. @@ -1411,7 +1380,7 @@ void LoadIC::GenerateMiss(MacroAssembler* masm) { } -void KeyedLoadIC::GenerateMiss(MacroAssembler* masm, bool force_generic) { +void KeyedLoadIC::GenerateMiss(MacroAssembler* masm, ICMissMode miss_mode) { // ----------- S t a t e ------------- // -- rax : key // -- rdx : receiver @@ -1427,7 +1396,7 @@ void KeyedLoadIC::GenerateMiss(MacroAssembler* masm, bool force_generic) { __ push(rbx); // return address // Perform tail call to the entry. - ExternalReference ref = force_generic + ExternalReference ref = miss_mode == MISS_FORCE_GENERIC ? ExternalReference(IC_Utility(kKeyedLoadIC_MissForceGeneric), masm->isolate()) : ExternalReference(IC_Utility(kKeyedLoadIC_Miss), masm->isolate()); @@ -1493,65 +1462,6 @@ void StoreIC::GenerateMiss(MacroAssembler* masm) { } -void StoreIC::GenerateArrayLength(MacroAssembler* masm) { - // ----------- S t a t e ------------- - // -- rax : value - // -- rcx : name - // -- rdx : receiver - // -- rsp[0] : return address - // ----------------------------------- - // - // This accepts as a receiver anything JSArray::SetElementsLength accepts - // (currently anything except for external arrays which means anything with - // elements of FixedArray type). Value must be a number, but only smis are - // accepted as the most common case. - - Label miss; - - Register receiver = rdx; - Register value = rax; - Register scratch = rbx; - - // Check that the receiver isn't a smi. - __ JumpIfSmi(receiver, &miss); - - // Check that the object is a JS array. - __ CmpObjectType(receiver, JS_ARRAY_TYPE, scratch); - __ j(not_equal, &miss); - - // Check that elements are FixedArray. - // We rely on StoreIC_ArrayLength below to deal with all types of - // fast elements (including COW). - __ movq(scratch, FieldOperand(receiver, JSArray::kElementsOffset)); - __ CmpObjectType(scratch, FIXED_ARRAY_TYPE, scratch); - __ j(not_equal, &miss); - - // Check that the array has fast properties, otherwise the length - // property might have been redefined. - __ movq(scratch, FieldOperand(receiver, JSArray::kPropertiesOffset)); - __ CompareRoot(FieldOperand(scratch, FixedArray::kMapOffset), - Heap::kHashTableMapRootIndex); - __ j(equal, &miss); - - // Check that value is a smi. - __ JumpIfNotSmi(value, &miss); - - // Prepare tail call to StoreIC_ArrayLength. - __ pop(scratch); - __ push(receiver); - __ push(value); - __ push(scratch); // return address - - ExternalReference ref = - ExternalReference(IC_Utility(kStoreIC_ArrayLength), masm->isolate()); - __ TailCallExternalReference(ref, 2, 1); - - __ bind(&miss); - - GenerateMiss(masm); -} - - void StoreIC::GenerateNormal(MacroAssembler* masm) { // ----------- S t a t e ------------- // -- rax : value @@ -1562,7 +1472,7 @@ void StoreIC::GenerateNormal(MacroAssembler* masm) { Label miss; - GenerateStringDictionaryReceiverCheck(masm, rdx, rbx, rdi, &miss); + GenerateNameDictionaryReceiverCheck(masm, rdx, rbx, rdi, &miss); GenerateDictionaryStore(masm, &miss, rbx, rcx, rax, r8, r9); Counters* counters = masm->isolate()->counters(); @@ -1638,7 +1548,7 @@ void KeyedStoreIC::GenerateSlow(MacroAssembler* masm) { } -void KeyedStoreIC::GenerateMiss(MacroAssembler* masm, bool force_generic) { +void KeyedStoreIC::GenerateMiss(MacroAssembler* masm, ICMissMode miss_mode) { // ----------- S t a t e ------------- // -- rax : value // -- rcx : key @@ -1653,7 +1563,7 @@ void KeyedStoreIC::GenerateMiss(MacroAssembler* masm, bool force_generic) { __ push(rbx); // return address // Do tail-call to runtime routine. - ExternalReference ref = force_generic + ExternalReference ref = miss_mode == MISS_FORCE_GENERIC ? ExternalReference(IC_Utility(kKeyedStoreIC_MissForceGeneric), masm->isolate()) : ExternalReference(IC_Utility(kKeyedStoreIC_Miss), masm->isolate()); @@ -1670,7 +1580,9 @@ void KeyedStoreIC::GenerateTransitionElementsSmiToDouble(MacroAssembler* masm) { // Must return the modified receiver in eax. if (!FLAG_trace_elements_transitions) { Label fail; - ElementsTransitionGenerator::GenerateSmiToDouble(masm, &fail); + AllocationSiteMode mode = AllocationSiteInfo::GetMode(FAST_SMI_ELEMENTS, + FAST_DOUBLE_ELEMENTS); + ElementsTransitionGenerator::GenerateSmiToDouble(masm, mode, &fail); __ movq(rax, rdx); __ Ret(); __ bind(&fail); @@ -1693,7 +1605,9 @@ void KeyedStoreIC::GenerateTransitionElementsDoubleToObject( // Must return the modified receiver in eax. if (!FLAG_trace_elements_transitions) { Label fail; - ElementsTransitionGenerator::GenerateDoubleToObject(masm, &fail); + AllocationSiteMode mode = AllocationSiteInfo::GetMode(FAST_DOUBLE_ELEMENTS, + FAST_ELEMENTS); + ElementsTransitionGenerator::GenerateDoubleToObject(masm, mode, &fail); __ movq(rax, rdx); __ Ret(); __ bind(&fail); @@ -1729,7 +1643,7 @@ Condition CompareIC::ComputeCondition(Token::Value op) { } -static bool HasInlinedSmiCode(Address address) { +bool CompareIC::HasInlinedSmiCode(Address address) { // The address of the instruction following the call. Address test_instruction_address = address + Assembler::kCallTargetAddressOffset; @@ -1740,39 +1654,6 @@ static bool HasInlinedSmiCode(Address address) { } -void CompareIC::UpdateCaches(Handle<Object> x, Handle<Object> y) { - HandleScope scope; - Handle<Code> rewritten; - State previous_state = GetState(); - - State state = TargetState(previous_state, HasInlinedSmiCode(address()), x, y); - if (state == GENERIC) { - CompareStub stub(GetCondition(), strict(), NO_COMPARE_FLAGS); - rewritten = stub.GetCode(); - } else { - ICCompareStub stub(op_, state); - if (state == KNOWN_OBJECTS) { - stub.set_known_map(Handle<Map>(Handle<JSObject>::cast(x)->map())); - } - rewritten = stub.GetCode(); - } - set_target(*rewritten); - -#ifdef DEBUG - if (FLAG_trace_ic) { - PrintF("[CompareIC (%s->%s)#%s]\n", - GetStateName(previous_state), - GetStateName(state), - Token::Name(op_)); - } -#endif - - // Activate inlined smi code. - if (previous_state == UNINITIALIZED) { - PatchInlinedSmiCode(address(), ENABLE_INLINED_SMI_CHECK); - } -} - void PatchInlinedSmiCode(Address address, InlinedSmiCheck check) { // The address of the instruction following the call. Address test_instruction_address = diff --git a/deps/v8/src/x64/lithium-codegen-x64.cc b/deps/v8/src/x64/lithium-codegen-x64.cc index b461e62903..b9bd30bb16 100644 --- a/deps/v8/src/x64/lithium-codegen-x64.cc +++ b/deps/v8/src/x64/lithium-codegen-x64.cc @@ -1,4 +1,4 @@ -// Copyright 2012 the V8 project authors. All rights reserved. +// Copyright 2013 the V8 project authors. All rights reserved. // Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions are // met: @@ -88,7 +88,14 @@ void LCodeGen::FinishCode(Handle<Code> code) { ASSERT(is_done()); code->set_stack_slots(GetStackSlotCount()); code->set_safepoint_table_offset(safepoints_.GetCodeOffset()); + if (FLAG_weak_embedded_maps_in_optimized_code) { + RegisterDependentCodeForEmbeddedMaps(code); + } PopulateDeoptimizationData(code); + for (int i = 0 ; i < prototype_maps_.length(); i++) { + prototype_maps_.at(i)->AddDependentCode( + DependentCode::kPrototypeCheckGroup, code); + } } @@ -119,46 +126,61 @@ void LCodeGen::Comment(const char* format, ...) { bool LCodeGen::GeneratePrologue() { ASSERT(is_generating()); - ProfileEntryHookStub::MaybeCallEntryHook(masm_); + if (info()->IsOptimizing()) { + ProfileEntryHookStub::MaybeCallEntryHook(masm_); #ifdef DEBUG - if (strlen(FLAG_stop_at) > 0 && - info_->function()->name()->IsEqualTo(CStrVector(FLAG_stop_at))) { - __ int3(); - } + if (strlen(FLAG_stop_at) > 0 && + info_->function()->name()->IsUtf8EqualTo(CStrVector(FLAG_stop_at))) { + __ int3(); + } #endif - // Strict mode functions need to replace the receiver with undefined - // when called as functions (without an explicit receiver - // object). rcx is zero for method calls and non-zero for function - // calls. - if (!info_->is_classic_mode() || info_->is_native()) { - Label ok; - __ testq(rcx, rcx); - __ j(zero, &ok, Label::kNear); - // +1 for return address. - int receiver_offset = (scope()->num_parameters() + 1) * kPointerSize; - __ LoadRoot(kScratchRegister, Heap::kUndefinedValueRootIndex); - __ movq(Operand(rsp, receiver_offset), kScratchRegister); - __ bind(&ok); + // Strict mode functions need to replace the receiver with undefined + // when called as functions (without an explicit receiver + // object). rcx is zero for method calls and non-zero for function + // calls. + if (!info_->is_classic_mode() || info_->is_native()) { + Label ok; + __ testq(rcx, rcx); + __ j(zero, &ok, Label::kNear); + // +1 for return address. + int receiver_offset = (scope()->num_parameters() + 1) * kPointerSize; + __ LoadRoot(kScratchRegister, Heap::kUndefinedValueRootIndex); + __ movq(Operand(rsp, receiver_offset), kScratchRegister); + __ bind(&ok); + } } - __ push(rbp); // Caller's frame pointer. - __ movq(rbp, rsp); - __ push(rsi); // Callee's context. - __ push(rdi); // Callee's JS function. + info()->set_prologue_offset(masm_->pc_offset()); + if (NeedsEagerFrame()) { + ASSERT(!frame_is_built_); + frame_is_built_ = true; + __ push(rbp); // Caller's frame pointer. + __ movq(rbp, rsp); + __ push(rsi); // Callee's context. + if (info()->IsStub()) { + __ Push(Smi::FromInt(StackFrame::STUB)); + } else { + __ push(rdi); // Callee's JS function. + } + } // Reserve space for the stack slots needed by the code. int slots = GetStackSlotCount(); if (slots > 0) { if (FLAG_debug_code) { + __ subq(rsp, Immediate(slots * kPointerSize)); + __ push(rax); __ Set(rax, slots); - __ movq(kScratchRegister, kSlotsZapValue, RelocInfo::NONE); + __ movq(kScratchRegister, kSlotsZapValue, RelocInfo::NONE64); Label loop; __ bind(&loop); - __ push(kScratchRegister); + __ movq(MemOperand(rsp, rax, times_pointer_size, 0), + kScratchRegister); __ decl(rax); __ j(not_zero, &loop); + __ pop(rax); } else { __ subq(rsp, Immediate(slots * kPointerSize)); #ifdef _MSC_VER @@ -173,10 +195,23 @@ bool LCodeGen::GeneratePrologue() { } #endif } + + if (info()->saves_caller_doubles()) { + Comment(";;; Save clobbered callee double registers"); + int count = 0; + BitVector* doubles = chunk()->allocated_double_registers(); + BitVector::Iterator save_iterator(doubles); + while (!save_iterator.Done()) { + __ movsd(MemOperand(rsp, count * kDoubleSize), + XMMRegister::FromAllocationIndex(save_iterator.Current())); + save_iterator.Advance(); + count++; + } + } } // Possibly allocate a local context. - int heap_slots = scope()->num_heap_slots() - Context::MIN_CONTEXT_SLOTS; + int heap_slots = info_->num_heap_slots() - Context::MIN_CONTEXT_SLOTS; if (heap_slots > 0) { Comment(";;; Allocate local context"); // Argument to NewContext is the function, which is still in rdi. @@ -212,7 +247,7 @@ bool LCodeGen::GeneratePrologue() { } // Trace the call. - if (FLAG_trace) { + if (FLAG_trace && info()->IsOptimizing()) { __ CallRuntime(Runtime::kTraceEnter, 0); } return !is_aborted(); @@ -232,7 +267,30 @@ bool LCodeGen::GenerateBody() { } if (emit_instructions) { - Comment(";;; @%d: %s.", current_instruction_, instr->Mnemonic()); + if (FLAG_code_comments) { + HValue* hydrogen = instr->hydrogen_value(); + if (hydrogen != NULL) { + if (hydrogen->IsChange()) { + HValue* changed_value = HChange::cast(hydrogen)->value(); + int use_id = 0; + const char* use_mnemo = "dead"; + if (hydrogen->UseCount() >= 1) { + HValue* use_value = hydrogen->uses().value(); + use_id = use_value->id(); + use_mnemo = use_value->Mnemonic(); + } + Comment(";;; @%d: %s. <of #%d %s for #%d %s>", + current_instruction_, instr->Mnemonic(), + changed_value->id(), changed_value->Mnemonic(), + use_id, use_mnemo); + } else { + Comment(";;; @%d: %s. <#%d>", current_instruction_, + instr->Mnemonic(), hydrogen->id()); + } + } else { + Comment(";;; @%d: %s.", current_instruction_, instr->Mnemonic()); + } + } instr->CompileToNative(this); } } @@ -242,9 +300,64 @@ bool LCodeGen::GenerateBody() { bool LCodeGen::GenerateJumpTable() { + Label needs_frame_not_call; + Label needs_frame_is_call; for (int i = 0; i < jump_table_.length(); i++) { __ bind(&jump_table_[i].label); - __ Jump(jump_table_[i].address, RelocInfo::RUNTIME_ENTRY); + Address entry = jump_table_[i].address; + bool is_lazy_deopt = jump_table_[i].is_lazy_deopt; + Deoptimizer::BailoutType type = + is_lazy_deopt ? Deoptimizer::LAZY : Deoptimizer::EAGER; + int id = Deoptimizer::GetDeoptimizationId(isolate(), entry, type); + if (id == Deoptimizer::kNotDeoptimizationEntry) { + Comment(";;; jump table entry %d.", i); + } else { + Comment(";;; jump table entry %d: deoptimization bailout %d.", i, id); + } + if (jump_table_[i].needs_frame) { + __ movq(kScratchRegister, ExternalReference::ForDeoptEntry(entry)); + if (is_lazy_deopt) { + if (needs_frame_is_call.is_bound()) { + __ jmp(&needs_frame_is_call); + } else { + __ bind(&needs_frame_is_call); + __ push(rbp); + __ movq(rbp, rsp); + __ push(rsi); + // This variant of deopt can only be used with stubs. Since we don't + // have a function pointer to install in the stack frame that we're + // building, install a special marker there instead. + ASSERT(info()->IsStub()); + __ Move(rsi, Smi::FromInt(StackFrame::STUB)); + __ push(rsi); + __ movq(rsi, MemOperand(rsp, kPointerSize)); + __ call(kScratchRegister); + } + } else { + if (needs_frame_not_call.is_bound()) { + __ jmp(&needs_frame_not_call); + } else { + __ bind(&needs_frame_not_call); + __ push(rbp); + __ movq(rbp, rsp); + __ push(rsi); + // This variant of deopt can only be used with stubs. Since we don't + // have a function pointer to install in the stack frame that we're + // building, install a special marker there instead. + ASSERT(info()->IsStub()); + __ Move(rsi, Smi::FromInt(StackFrame::STUB)); + __ push(rsi); + __ movq(rsi, MemOperand(rsp, kPointerSize)); + __ jmp(kScratchRegister); + } + } + } else { + if (is_lazy_deopt) { + __ call(entry, RelocInfo::RUNTIME_ENTRY); + } else { + __ jmp(entry, RelocInfo::RUNTIME_ENTRY); + } + } } return !is_aborted(); } @@ -256,10 +369,32 @@ bool LCodeGen::GenerateDeferredCode() { for (int i = 0; !is_aborted() && i < deferred_.length(); i++) { LDeferredCode* code = deferred_[i]; __ bind(code->entry()); + if (NeedsDeferredFrame()) { + Comment(";;; Deferred build frame", + code->instruction_index(), + code->instr()->Mnemonic()); + ASSERT(!frame_is_built_); + ASSERT(info()->IsStub()); + frame_is_built_ = true; + // Build the frame in such a way that esi isn't trashed. + __ push(rbp); // Caller's frame pointer. + __ push(Operand(rbp, StandardFrameConstants::kContextOffset)); + __ Push(Smi::FromInt(StackFrame::STUB)); + __ lea(rbp, Operand(rsp, 2 * kPointerSize)); + } Comment(";;; Deferred code @%d: %s.", code->instruction_index(), code->instr()->Mnemonic()); code->Generate(); + if (NeedsDeferredFrame()) { + Comment(";;; Deferred destroy frame", + code->instruction_index(), + code->instr()->Mnemonic()); + ASSERT(frame_is_built_); + frame_is_built_ = false; + __ movq(rsp, rbp); + __ pop(rbp); + } __ jmp(code->exit()); } } @@ -314,8 +449,6 @@ bool LCodeGen::IsTaggedConstant(LConstantOperand* op) const { int LCodeGen::ToInteger32(LConstantOperand* op) const { HConstant* constant = chunk_->LookupConstant(op); - ASSERT(chunk_->LookupLiteralRepresentation(op).IsInteger32()); - ASSERT(constant->HasInteger32Value()); return constant->Integer32Value(); } @@ -338,22 +471,14 @@ Operand LCodeGen::ToOperand(LOperand* op) const { // Does not handle registers. In X64 assembler, plain registers are not // representable as an Operand. ASSERT(op->IsStackSlot() || op->IsDoubleStackSlot()); - int index = op->index(); - if (index >= 0) { - // Local or spill slot. Skip the frame pointer, function, and - // context in the fixed part of the frame. - return Operand(rbp, -(index + 3) * kPointerSize); - } else { - // Incoming parameter. Skip the return address. - return Operand(rbp, -(index - 1) * kPointerSize); - } + return Operand(rbp, StackSlotOffset(op->index())); } void LCodeGen::WriteTranslation(LEnvironment* environment, Translation* translation, - int* arguments_index, - int* arguments_count) { + int* pushed_arguments_index, + int* pushed_arguments_count) { if (environment == NULL) return; // The translation includes one command per value in the environment. @@ -365,14 +490,16 @@ void LCodeGen::WriteTranslation(LEnvironment* environment, // arguments index points to the first element of a sequence of tagged // values on the stack that represent the arguments. This needs to be // kept in sync with the LArgumentsElements implementation. - *arguments_index = -environment->parameter_count(); - *arguments_count = environment->parameter_count(); + *pushed_arguments_index = -environment->parameter_count(); + *pushed_arguments_count = environment->parameter_count(); WriteTranslation(environment->outer(), translation, - arguments_index, - arguments_count); - int closure_id = *info()->closure() != *environment->closure() + pushed_arguments_index, + pushed_arguments_count); + bool has_closure_id = !info()->closure().is_null() && + *info()->closure() != *environment->closure(); + int closure_id = has_closure_id ? DefineDeoptimizationLiteral(environment->closure()) : Translation::kSelfLiteralId; @@ -396,16 +523,26 @@ void LCodeGen::WriteTranslation(LEnvironment* environment, case ARGUMENTS_ADAPTOR: translation->BeginArgumentsAdaptorFrame(closure_id, translation_size); break; + case STUB: + translation->BeginCompiledStubFrame(); + break; } // Inlined frames which push their arguments cause the index to be - // bumped and a new stack area to be used for materialization. - if (environment->entry() != NULL && - environment->entry()->arguments_pushed()) { - *arguments_index = *arguments_index < 0 - ? GetStackSlotCount() - : *arguments_index + *arguments_count; - *arguments_count = environment->entry()->arguments_count() + 1; + // bumped and another stack area to be used for materialization, + // otherwise actual argument values are unknown for inlined frames. + bool arguments_known = true; + int arguments_index = *pushed_arguments_index; + int arguments_count = *pushed_arguments_count; + if (environment->entry() != NULL) { + arguments_known = environment->entry()->arguments_pushed(); + arguments_index = arguments_index < 0 + ? GetStackSlotCount() : arguments_index + arguments_count; + arguments_count = environment->entry()->arguments_count() + 1; + if (environment->entry()->arguments_pushed()) { + *pushed_arguments_index = arguments_index; + *pushed_arguments_count = arguments_count; + } } for (int i = 0; i < translation_size; ++i) { @@ -420,8 +557,9 @@ void LCodeGen::WriteTranslation(LEnvironment* environment, environment->spilled_registers()[value->index()], environment->HasTaggedValueAt(i), environment->HasUint32ValueAt(i), - *arguments_index, - *arguments_count); + arguments_known, + arguments_index, + arguments_count); } else if ( value->IsDoubleRegister() && environment->spilled_double_registers()[value->index()] != NULL) { @@ -431,8 +569,9 @@ void LCodeGen::WriteTranslation(LEnvironment* environment, environment->spilled_double_registers()[value->index()], false, false, - *arguments_index, - *arguments_count); + arguments_known, + arguments_index, + arguments_count); } } @@ -440,8 +579,9 @@ void LCodeGen::WriteTranslation(LEnvironment* environment, value, environment->HasTaggedValueAt(i), environment->HasUint32ValueAt(i), - *arguments_index, - *arguments_count); + arguments_known, + arguments_index, + arguments_count); } } @@ -450,13 +590,15 @@ void LCodeGen::AddToTranslation(Translation* translation, LOperand* op, bool is_tagged, bool is_uint32, + bool arguments_known, int arguments_index, int arguments_count) { if (op == NULL) { // TODO(twuerthinger): Introduce marker operands to indicate that this value // is not present and must be reconstructed from the deoptimizer. Currently // this is only used for the arguments object. - translation->StoreArgumentsObject(arguments_index, arguments_count); + translation->StoreArgumentsObject( + arguments_known, arguments_index, arguments_count); } else if (op->IsStackSlot()) { if (is_tagged) { translation->StoreStackSlot(op->index()); @@ -586,22 +728,76 @@ void LCodeGen::DeoptimizeIf(Condition cc, LEnvironment* environment) { RegisterEnvironmentForDeoptimization(environment, Safepoint::kNoLazyDeopt); ASSERT(environment->HasBeenRegistered()); int id = environment->deoptimization_index(); - Address entry = Deoptimizer::GetDeoptimizationEntry(id, Deoptimizer::EAGER); + ASSERT(info()->IsOptimizing() || info()->IsStub()); + Deoptimizer::BailoutType bailout_type = info()->IsStub() + ? Deoptimizer::LAZY + : Deoptimizer::EAGER; + Address entry = + Deoptimizer::GetDeoptimizationEntry(isolate(), id, bailout_type); if (entry == NULL) { Abort("bailout was not prepared"); return; } - if (cc == no_condition) { - __ Jump(entry, RelocInfo::RUNTIME_ENTRY); + ASSERT(FLAG_deopt_every_n_times == 0); // Not yet implemented on x64. + + if (FLAG_trap_on_deopt) { + Label done; + if (cc != no_condition) { + __ j(NegateCondition(cc), &done, Label::kNear); + } + __ int3(); + __ bind(&done); + } + + ASSERT(info()->IsStub() || frame_is_built_); + bool needs_lazy_deopt = info()->IsStub(); + if (cc == no_condition && frame_is_built_) { + if (needs_lazy_deopt) { + __ call(entry, RelocInfo::RUNTIME_ENTRY); + } else { + __ jmp(entry, RelocInfo::RUNTIME_ENTRY); + } } else { // We often have several deopts to the same entry, reuse the last // jump entry if this is the case. if (jump_table_.is_empty() || - jump_table_.last().address != entry) { - jump_table_.Add(JumpTableEntry(entry), zone()); + jump_table_.last().address != entry || + jump_table_.last().needs_frame != !frame_is_built_ || + jump_table_.last().is_lazy_deopt != needs_lazy_deopt) { + JumpTableEntry table_entry(entry, !frame_is_built_, needs_lazy_deopt); + jump_table_.Add(table_entry, zone()); + } + if (cc == no_condition) { + __ jmp(&jump_table_.last().label); + } else { + __ j(cc, &jump_table_.last().label); } - __ j(cc, &jump_table_.last().label); + } +} + + +void LCodeGen::RegisterDependentCodeForEmbeddedMaps(Handle<Code> code) { + ZoneList<Handle<Map> > maps(1, zone()); + int mode_mask = RelocInfo::ModeMask(RelocInfo::EMBEDDED_OBJECT); + for (RelocIterator it(*code, mode_mask); !it.done(); it.next()) { + RelocInfo::Mode mode = it.rinfo()->rmode(); + if (mode == RelocInfo::EMBEDDED_OBJECT && + it.rinfo()->target_object()->IsMap()) { + Handle<Map> map(Map::cast(it.rinfo()->target_object())); + if (map->CanTransition()) { + maps.Add(map, zone()); + } + } + } +#ifdef VERIFY_HEAP + // This disables verification of weak embedded maps after full GC. + // AddDependentCode can cause a GC, which would observe the state where + // this code is not yet in the depended code lists of the embedded maps. + NoWeakEmbeddedMapsVerificationScope disable_verification_of_embedded_maps; +#endif + for (int i = 0; i < maps.length(); i++) { + maps.at(i)->AddDependentCode(DependentCode::kWeaklyEmbeddedGroup, code); } } @@ -612,7 +808,8 @@ void LCodeGen::PopulateDeoptimizationData(Handle<Code> code) { Handle<DeoptimizationInputData> data = factory()->NewDeoptimizationInputData(length, TENURED); - Handle<ByteArray> translations = translations_.CreateByteArray(); + Handle<ByteArray> translations = + translations_.CreateByteArray(isolate()->factory()); data->SetTranslationByteArray(*translations); data->SetInlinedFunctionCount(Smi::FromInt(inlined_function_count_)); @@ -771,38 +968,38 @@ void LCodeGen::DoCallStub(LCallStub* instr) { switch (instr->hydrogen()->major_key()) { case CodeStub::RegExpConstructResult: { RegExpConstructResultStub stub; - CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr); + CallCode(stub.GetCode(isolate()), RelocInfo::CODE_TARGET, instr); break; } case CodeStub::RegExpExec: { RegExpExecStub stub; - CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr); + CallCode(stub.GetCode(isolate()), RelocInfo::CODE_TARGET, instr); break; } case CodeStub::SubString: { SubStringStub stub; - CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr); + CallCode(stub.GetCode(isolate()), RelocInfo::CODE_TARGET, instr); break; } case CodeStub::NumberToString: { NumberToStringStub stub; - CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr); + CallCode(stub.GetCode(isolate()), RelocInfo::CODE_TARGET, instr); break; } case CodeStub::StringAdd: { StringAddStub stub(NO_STRING_ADD_FLAGS); - CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr); + CallCode(stub.GetCode(isolate()), RelocInfo::CODE_TARGET, instr); break; } case CodeStub::StringCompare: { StringCompareStub stub; - CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr); + CallCode(stub.GetCode(isolate()), RelocInfo::CODE_TARGET, instr); break; } case CodeStub::TranscendentalCache: { TranscendentalCacheStub stub(instr->transcendental_type(), TranscendentalCacheStub::TAGGED); - CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr); + CallCode(stub.GetCode(isolate()), RelocInfo::CODE_TARGET, instr); break; } default: @@ -896,6 +1093,17 @@ void LCodeGen::DoModI(LModI* instr) { // Slow case, using idiv instruction. __ bind(&slow); + + // Check for (kMinInt % -1). + if (instr->hydrogen()->CheckFlag(HValue::kCanOverflow)) { + Label left_not_min_int; + __ cmpl(left_reg, Immediate(kMinInt)); + __ j(not_zero, &left_not_min_int, Label::kNear); + __ cmpl(right_reg, Immediate(-1)); + DeoptimizeIf(zero, instr->environment()); + __ bind(&left_not_min_int); + } + // Sign extend eax to edx. // (We are using only the low 32 bits of the values.) __ cdq(); @@ -1002,7 +1210,7 @@ void LCodeGen::DoMathFloorOfDiv(LMathFloorOfDiv* instr) { __ neg(reg1); DeoptimizeIf(zero, instr->environment()); } - __ movq(reg2, multiplier, RelocInfo::NONE); + __ movq(reg2, multiplier, RelocInfo::NONE64); // Result just fit in r64, because it's int32 * uint32. __ imul(reg2, reg1); @@ -1013,6 +1221,43 @@ void LCodeGen::DoMathFloorOfDiv(LMathFloorOfDiv* instr) { void LCodeGen::DoDivI(LDivI* instr) { + if (!instr->is_flooring() && instr->hydrogen()->HasPowerOf2Divisor()) { + Register dividend = ToRegister(instr->left()); + int32_t divisor = + HConstant::cast(instr->hydrogen()->right())->Integer32Value(); + int32_t test_value = 0; + int32_t power = 0; + + if (divisor > 0) { + test_value = divisor - 1; + power = WhichPowerOf2(divisor); + } else { + // Check for (0 / -x) that will produce negative zero. + if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) { + __ testl(dividend, dividend); + DeoptimizeIf(zero, instr->environment()); + } + // Check for (kMinInt / -1). + if (divisor == -1 && instr->hydrogen()->CheckFlag(HValue::kCanOverflow)) { + __ cmpl(dividend, Immediate(kMinInt)); + DeoptimizeIf(zero, instr->environment()); + } + test_value = - divisor - 1; + power = WhichPowerOf2(-divisor); + } + + if (test_value != 0) { + // Deoptimize if remainder is not 0. + __ testl(dividend, Immediate(test_value)); + DeoptimizeIf(not_zero, instr->environment()); + __ sarl(dividend, Immediate(power)); + } + + if (divisor < 0) __ negl(dividend); + + return; + } + LOperand* right = instr->right(); ASSERT(ToRegister(instr->result()).is(rax)); ASSERT(ToRegister(instr->left()).is(rax)); @@ -1023,13 +1268,13 @@ void LCodeGen::DoDivI(LDivI* instr) { // Check for x / 0. Register right_reg = ToRegister(right); - if (instr->hydrogen()->CheckFlag(HValue::kCanBeDivByZero)) { + if (instr->hydrogen_value()->CheckFlag(HValue::kCanBeDivByZero)) { __ testl(right_reg, right_reg); DeoptimizeIf(zero, instr->environment()); } // Check for (0 / -x) that will produce negative zero. - if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) { + if (instr->hydrogen_value()->CheckFlag(HValue::kBailoutOnMinusZero)) { Label left_not_zero; __ testl(left_reg, left_reg); __ j(not_zero, &left_not_zero, Label::kNear); @@ -1038,8 +1283,8 @@ void LCodeGen::DoDivI(LDivI* instr) { __ bind(&left_not_zero); } - // Check for (-kMinInt / -1). - if (instr->hydrogen()->CheckFlag(HValue::kCanOverflow)) { + // Check for (kMinInt / -1). + if (instr->hydrogen_value()->CheckFlag(HValue::kCanOverflow)) { Label left_not_min_int; __ cmpl(left_reg, Immediate(kMinInt)); __ j(not_zero, &left_not_min_int, Label::kNear); @@ -1052,9 +1297,19 @@ void LCodeGen::DoDivI(LDivI* instr) { __ cdq(); __ idivl(right_reg); - // Deoptimize if remainder is not 0. - __ testl(rdx, rdx); - DeoptimizeIf(not_zero, instr->environment()); + if (!instr->is_flooring()) { + // Deoptimize if remainder is not 0. + __ testl(rdx, rdx); + DeoptimizeIf(not_zero, instr->environment()); + } else { + Label done; + __ testl(rdx, rdx); + __ j(zero, &done, Label::kNear); + __ xorl(rdx, right_reg); + __ sarl(rdx, Immediate(31)); + __ addl(rax, rdx); + __ bind(&done); + } } @@ -1210,6 +1465,9 @@ void LCodeGen::DoShiftI(LShiftI* instr) { ASSERT(ToRegister(right).is(rcx)); switch (instr->op()) { + case Token::ROR: + __ rorl_cl(ToRegister(left)); + break; case Token::SAR: __ sarl_cl(ToRegister(left)); break; @@ -1231,6 +1489,11 @@ void LCodeGen::DoShiftI(LShiftI* instr) { int value = ToInteger32(LConstantOperand::cast(right)); uint8_t shift_count = static_cast<uint8_t>(value & 0x1F); switch (instr->op()) { + case Token::ROR: + if (shift_count != 0) { + __ rorl(ToRegister(left), Immediate(shift_count)); + } + break; case Token::SAR: if (shift_count != 0) { __ sarl(ToRegister(left), Immediate(shift_count)); @@ -1381,7 +1644,8 @@ void LCodeGen::DoDateField(LDateField* instr) { } else { if (index->value() < JSDate::kFirstUncachedField) { ExternalReference stamp = ExternalReference::date_cache_stamp(isolate()); - __ movq(kScratchRegister, stamp); + Operand stamp_operand = __ ExternalOperand(stamp); + __ movq(kScratchRegister, stamp_operand); __ cmpq(kScratchRegister, FieldOperand(object, JSDate::kCacheStampOffset)); __ j(not_equal, &runtime, Label::kNear); @@ -1393,10 +1657,10 @@ void LCodeGen::DoDateField(LDateField* instr) { __ PrepareCallCFunction(2); #ifdef _WIN64 __ movq(rcx, object); - __ movq(rdx, index, RelocInfo::NONE); + __ movq(rdx, index, RelocInfo::NONE64); #else __ movq(rdi, object); - __ movq(rsi, index, RelocInfo::NONE); + __ movq(rsi, index, RelocInfo::NONE64); #endif __ CallCFunction(ExternalReference::get_date_field_function(isolate()), 2); __ movq(rsi, Operand(rbp, StandardFrameConstants::kContextOffset)); @@ -1405,6 +1669,15 @@ void LCodeGen::DoDateField(LDateField* instr) { } +void LCodeGen::DoSeqStringSetChar(LSeqStringSetChar* instr) { + SeqStringSetCharGenerator::Generate(masm(), + instr->encoding(), + ToRegister(instr->string()), + ToRegister(instr->index()), + ToRegister(instr->value())); +} + + void LCodeGen::DoBitNotI(LBitNotI* instr) { LOperand* input = instr->value(); ASSERT(input->Equals(instr->result())); @@ -1457,17 +1730,17 @@ void LCodeGen::DoMathMinMax(LMathMinMax* instr) { if (right->IsConstantOperand()) { Immediate right_imm = Immediate(ToInteger32(LConstantOperand::cast(right))); - __ cmpq(left_reg, right_imm); + __ cmpl(left_reg, right_imm); __ j(condition, &return_left, Label::kNear); __ movq(left_reg, right_imm); } else if (right->IsRegister()) { Register right_reg = ToRegister(right); - __ cmpq(left_reg, right_reg); + __ cmpl(left_reg, right_reg); __ j(condition, &return_left, Label::kNear); __ movq(left_reg, right_reg); } else { Operand right_op = ToOperand(right); - __ cmpq(left_reg, right_op); + __ cmpl(left_reg, right_op); __ j(condition, &return_left, Label::kNear); __ movq(left_reg, right_op); } @@ -1527,6 +1800,7 @@ void LCodeGen::DoArithmeticD(LArithmeticD* instr) { break; case Token::DIV: __ divsd(left, right); + __ movaps(left, left); break; case Token::MOD: __ PrepareCallCFunction(2); @@ -1550,7 +1824,7 @@ void LCodeGen::DoArithmeticT(LArithmeticT* instr) { ASSERT(ToRegister(instr->result()).is(rax)); BinaryOpStub stub(instr->op(), NO_OVERWRITE); - CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr); + CallCode(stub.GetCode(isolate()), RelocInfo::CODE_TARGET, instr); __ nop(); // Signals no inlined code. } @@ -1944,7 +2218,7 @@ void LCodeGen::DoStringCompareAndBranch(LStringCompareAndBranch* instr) { int true_block = chunk_->LookupDestination(instr->true_block_id()); int false_block = chunk_->LookupDestination(instr->false_block_id()); - Handle<Code> ic = CompareIC::GetUninitialized(op); + Handle<Code> ic = CompareIC::GetUninitialized(isolate(), op); CallCode(ic, RelocInfo::CODE_TARGET, instr); Condition condition = TokenToCondition(op, false); @@ -2028,7 +2302,7 @@ void LCodeGen::EmitClassOfTest(Label* is_true, __ JumpIfSmi(input, is_false); - if (class_name->IsEqualTo(CStrVector("Function"))) { + if (class_name->IsOneByteEqualTo(STATIC_ASCII_VECTOR("Function"))) { // Assuming the following assertions, we can use the same compares to test // for both being a function type and being in the object type range. STATIC_ASSERT(NUM_OF_CALLABLE_SPEC_OBJECT_TYPES == 2); @@ -2059,7 +2333,7 @@ void LCodeGen::EmitClassOfTest(Label* is_true, // Objects with a non-function constructor have class 'Object'. __ CmpObjectType(temp, JS_FUNCTION_TYPE, kScratchRegister); - if (class_name->IsEqualTo(CStrVector("Object"))) { + if (class_name->IsOneByteEqualTo(STATIC_ASCII_VECTOR("Object"))) { __ j(not_equal, is_true); } else { __ j(not_equal, is_false); @@ -2070,13 +2344,13 @@ void LCodeGen::EmitClassOfTest(Label* is_true, __ movq(temp, FieldOperand(temp, JSFunction::kSharedFunctionInfoOffset)); __ movq(temp, FieldOperand(temp, SharedFunctionInfo::kInstanceClassNameOffset)); - // The class name we are testing against is a symbol because it's a literal. - // The name in the constructor is a symbol because of the way the context is - // booted. This routine isn't expected to work for random API-created + // The class name we are testing against is internalized since it's a literal. + // The name in the constructor is internalized because of the way the context + // is booted. This routine isn't expected to work for random API-created // classes and it doesn't have to because you can't access it with natives - // syntax. Since both sides are symbols it is sufficient to use an identity - // comparison. - ASSERT(class_name->IsSymbol()); + // syntax. Since both sides are internalized it is sufficient to use an + // identity comparison. + ASSERT(class_name->IsInternalizedString()); __ Cmp(temp, class_name); // End with the answer in the z flag. } @@ -2114,7 +2388,7 @@ void LCodeGen::DoInstanceOf(LInstanceOf* instr) { InstanceofStub stub(InstanceofStub::kNoFlags); __ push(ToRegister(instr->left())); __ push(ToRegister(instr->right())); - CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr); + CallCode(stub.GetCode(isolate()), RelocInfo::CODE_TARGET, instr); Label true_value, done; __ testq(rax, rax); __ j(zero, &true_value, Label::kNear); @@ -2213,7 +2487,7 @@ void LCodeGen::DoDeferredInstanceOfKnownGlobal(LInstanceOfKnownGlobal* instr, // safepoint with two arguments because stub is going to // remove the third argument from the stack before jumping // to instanceof builtin on the slow path. - CallCodeGeneric(stub.GetCode(), + CallCodeGeneric(stub.GetCode(isolate()), RelocInfo::CODE_TARGET, instr, RECORD_SAFEPOINT_WITH_REGISTERS, @@ -2237,10 +2511,18 @@ void LCodeGen::DoDeferredInstanceOfKnownGlobal(LInstanceOfKnownGlobal* instr, } +void LCodeGen::DoInstanceSize(LInstanceSize* instr) { + Register object = ToRegister(instr->object()); + Register result = ToRegister(instr->result()); + __ movq(result, FieldOperand(object, HeapObject::kMapOffset)); + __ movzxbq(result, FieldOperand(result, Map::kInstanceSizeOffset)); +} + + void LCodeGen::DoCmpT(LCmpT* instr) { Token::Value op = instr->op(); - Handle<Code> ic = CompareIC::GetUninitialized(op); + Handle<Code> ic = CompareIC::GetUninitialized(isolate(), op); CallCode(ic, RelocInfo::CODE_TARGET, instr); Condition condition = TokenToCondition(op, false); @@ -2256,15 +2538,39 @@ void LCodeGen::DoCmpT(LCmpT* instr) { void LCodeGen::DoReturn(LReturn* instr) { - if (FLAG_trace) { + if (FLAG_trace && info()->IsOptimizing()) { // Preserve the return value on the stack and rely on the runtime // call to return the value in the same register. __ push(rax); __ CallRuntime(Runtime::kTraceExit, 1); } - __ movq(rsp, rbp); - __ pop(rbp); - __ Ret((GetParameterCount() + 1) * kPointerSize, rcx); + if (info()->saves_caller_doubles()) { + ASSERT(NeedsEagerFrame()); + BitVector* doubles = chunk()->allocated_double_registers(); + BitVector::Iterator save_iterator(doubles); + int count = 0; + while (!save_iterator.Done()) { + __ movsd(XMMRegister::FromAllocationIndex(save_iterator.Current()), + MemOperand(rsp, count * kDoubleSize)); + save_iterator.Advance(); + count++; + } + } + if (NeedsEagerFrame()) { + __ movq(rsp, rbp); + __ pop(rbp); + } + if (instr->has_constant_parameter_count()) { + __ Ret((ToInteger32(instr->constant_parameter_count()) + 1) * kPointerSize, + rcx); + } else { + Register reg = ToRegister(instr->parameter_count()); + Register return_addr_reg = reg.is(rcx) ? rbx : rcx; + __ pop(return_addr_reg); + __ shl(reg, Immediate(kPointerSizeLog2)); + __ addq(rsp, reg); + __ jmp(return_addr_reg); + } } @@ -2612,15 +2918,16 @@ void LCodeGen::DoAccessArgumentsAt(LAccessArgumentsAt* instr) { } -void LCodeGen::DoLoadKeyedFastElement(LLoadKeyedFastElement* instr) { - Register result = ToRegister(instr->result()); +void LCodeGen::DoLoadKeyedExternalArray(LLoadKeyed* instr) { + ElementsKind elements_kind = instr->elements_kind(); LOperand* key = instr->key(); if (!key->IsConstantOperand()) { Register key_reg = ToRegister(key); - // Even though the HLoad/StoreKeyedFastElement instructions force the input - // representation for the key to be an integer, the input gets replaced - // during bound check elimination with the index argument to the bounds - // check, which can be tagged, so that case must be handled here, too. + // Even though the HLoad/StoreKeyed (in this case) instructions force + // the input representation for the key to be an integer, the input + // gets replaced during bound check elimination with the index argument + // to the bounds check, which can be tagged, so that case must be + // handled here, too. if (instr->hydrogen()->key()->representation().IsTagged()) { __ SmiToInteger64(key_reg, key_reg); } else if (instr->hydrogen()->IsDehoisted()) { @@ -2629,35 +2936,68 @@ void LCodeGen::DoLoadKeyedFastElement(LLoadKeyedFastElement* instr) { __ movsxlq(key_reg, key_reg); } } + Operand operand(BuildFastArrayOperand( + instr->elements(), + key, + elements_kind, + 0, + instr->additional_index())); - // Load the result. - __ movq(result, - BuildFastArrayOperand(instr->elements(), - key, - FAST_ELEMENTS, - FixedArray::kHeaderSize - kHeapObjectTag, - instr->additional_index())); - - // Check for the hole value. - if (instr->hydrogen()->RequiresHoleCheck()) { - if (IsFastSmiElementsKind(instr->hydrogen()->elements_kind())) { - Condition smi = __ CheckSmi(result); - DeoptimizeIf(NegateCondition(smi), instr->environment()); - } else { - __ CompareRoot(result, Heap::kTheHoleValueRootIndex); - DeoptimizeIf(equal, instr->environment()); + if (elements_kind == EXTERNAL_FLOAT_ELEMENTS) { + XMMRegister result(ToDoubleRegister(instr->result())); + __ movss(result, operand); + __ cvtss2sd(result, result); + } else if (elements_kind == EXTERNAL_DOUBLE_ELEMENTS) { + __ movsd(ToDoubleRegister(instr->result()), operand); + } else { + Register result(ToRegister(instr->result())); + switch (elements_kind) { + case EXTERNAL_BYTE_ELEMENTS: + __ movsxbq(result, operand); + break; + case EXTERNAL_UNSIGNED_BYTE_ELEMENTS: + case EXTERNAL_PIXEL_ELEMENTS: + __ movzxbq(result, operand); + break; + case EXTERNAL_SHORT_ELEMENTS: + __ movsxwq(result, operand); + break; + case EXTERNAL_UNSIGNED_SHORT_ELEMENTS: + __ movzxwq(result, operand); + break; + case EXTERNAL_INT_ELEMENTS: + __ movsxlq(result, operand); + break; + case EXTERNAL_UNSIGNED_INT_ELEMENTS: + __ movl(result, operand); + if (!instr->hydrogen()->CheckFlag(HInstruction::kUint32)) { + __ testl(result, result); + DeoptimizeIf(negative, instr->environment()); + } + break; + case EXTERNAL_FLOAT_ELEMENTS: + case EXTERNAL_DOUBLE_ELEMENTS: + case FAST_ELEMENTS: + case FAST_SMI_ELEMENTS: + case FAST_DOUBLE_ELEMENTS: + case FAST_HOLEY_ELEMENTS: + case FAST_HOLEY_SMI_ELEMENTS: + case FAST_HOLEY_DOUBLE_ELEMENTS: + case DICTIONARY_ELEMENTS: + case NON_STRICT_ARGUMENTS_ELEMENTS: + UNREACHABLE(); + break; } } } -void LCodeGen::DoLoadKeyedFastDoubleElement( - LLoadKeyedFastDoubleElement* instr) { +void LCodeGen::DoLoadKeyedFixedDoubleArray(LLoadKeyed* instr) { XMMRegister result(ToDoubleRegister(instr->result())); LOperand* key = instr->key(); if (!key->IsConstantOperand()) { Register key_reg = ToRegister(key); - // Even though the HLoad/StoreKeyedFastElement instructions force the input + // Even though the HLoad/StoreKeyed instructions force the input // representation for the key to be an integer, the input gets replaced // during bound check elimination with the index argument to the bounds // check, which can be tagged, so that case must be handled here, too. @@ -2693,6 +3033,57 @@ void LCodeGen::DoLoadKeyedFastDoubleElement( } +void LCodeGen::DoLoadKeyedFixedArray(LLoadKeyed* instr) { + Register result = ToRegister(instr->result()); + LOperand* key = instr->key(); + if (!key->IsConstantOperand()) { + Register key_reg = ToRegister(key); + // Even though the HLoad/StoreKeyedFastElement instructions force + // the input representation for the key to be an integer, the input + // gets replaced during bound check elimination with the index + // argument to the bounds check, which can be tagged, so that + // case must be handled here, too. + if (instr->hydrogen()->key()->representation().IsTagged()) { + __ SmiToInteger64(key_reg, key_reg); + } else if (instr->hydrogen()->IsDehoisted()) { + // Sign extend key because it could be a 32 bit negative value + // and the dehoisted address computation happens in 64 bits + __ movsxlq(key_reg, key_reg); + } + } + + // Load the result. + __ movq(result, + BuildFastArrayOperand(instr->elements(), + key, + FAST_ELEMENTS, + FixedArray::kHeaderSize - kHeapObjectTag, + instr->additional_index())); + + // Check for the hole value. + if (instr->hydrogen()->RequiresHoleCheck()) { + if (IsFastSmiElementsKind(instr->hydrogen()->elements_kind())) { + Condition smi = __ CheckSmi(result); + DeoptimizeIf(NegateCondition(smi), instr->environment()); + } else { + __ CompareRoot(result, Heap::kTheHoleValueRootIndex); + DeoptimizeIf(equal, instr->environment()); + } + } +} + + +void LCodeGen::DoLoadKeyed(LLoadKeyed* instr) { + if (instr->is_external()) { + DoLoadKeyedExternalArray(instr); + } else if (instr->hydrogen()->representation().IsDouble()) { + DoLoadKeyedFixedDoubleArray(instr); + } else { + DoLoadKeyedFixedArray(instr); + } +} + + Operand LCodeGen::BuildFastArrayOperand( LOperand* elements_pointer, LOperand* key, @@ -2719,80 +3110,6 @@ Operand LCodeGen::BuildFastArrayOperand( } -void LCodeGen::DoLoadKeyedSpecializedArrayElement( - LLoadKeyedSpecializedArrayElement* instr) { - ElementsKind elements_kind = instr->elements_kind(); - LOperand* key = instr->key(); - if (!key->IsConstantOperand()) { - Register key_reg = ToRegister(key); - // Even though the HLoad/StoreKeyedFastElement instructions force the input - // representation for the key to be an integer, the input gets replaced - // during bound check elimination with the index argument to the bounds - // check, which can be tagged, so that case must be handled here, too. - if (instr->hydrogen()->key()->representation().IsTagged()) { - __ SmiToInteger64(key_reg, key_reg); - } else if (instr->hydrogen()->IsDehoisted()) { - // Sign extend key because it could be a 32 bit negative value - // and the dehoisted address computation happens in 64 bits - __ movsxlq(key_reg, key_reg); - } - } - Operand operand(BuildFastArrayOperand( - instr->external_pointer(), - key, - elements_kind, - 0, - instr->additional_index())); - - if (elements_kind == EXTERNAL_FLOAT_ELEMENTS) { - XMMRegister result(ToDoubleRegister(instr->result())); - __ movss(result, operand); - __ cvtss2sd(result, result); - } else if (elements_kind == EXTERNAL_DOUBLE_ELEMENTS) { - __ movsd(ToDoubleRegister(instr->result()), operand); - } else { - Register result(ToRegister(instr->result())); - switch (elements_kind) { - case EXTERNAL_BYTE_ELEMENTS: - __ movsxbq(result, operand); - break; - case EXTERNAL_UNSIGNED_BYTE_ELEMENTS: - case EXTERNAL_PIXEL_ELEMENTS: - __ movzxbq(result, operand); - break; - case EXTERNAL_SHORT_ELEMENTS: - __ movsxwq(result, operand); - break; - case EXTERNAL_UNSIGNED_SHORT_ELEMENTS: - __ movzxwq(result, operand); - break; - case EXTERNAL_INT_ELEMENTS: - __ movsxlq(result, operand); - break; - case EXTERNAL_UNSIGNED_INT_ELEMENTS: - __ movl(result, operand); - if (!instr->hydrogen()->CheckFlag(HInstruction::kUint32)) { - __ testl(result, result); - DeoptimizeIf(negative, instr->environment()); - } - break; - case EXTERNAL_FLOAT_ELEMENTS: - case EXTERNAL_DOUBLE_ELEMENTS: - case FAST_ELEMENTS: - case FAST_SMI_ELEMENTS: - case FAST_DOUBLE_ELEMENTS: - case FAST_HOLEY_ELEMENTS: - case FAST_HOLEY_SMI_ELEMENTS: - case FAST_HOLEY_DOUBLE_ELEMENTS: - case DICTIONARY_ELEMENTS: - case NON_STRICT_ARGUMENTS_ELEMENTS: - UNREACHABLE(); - break; - } - } -} - - void LCodeGen::DoLoadKeyedGeneric(LLoadKeyedGeneric* instr) { ASSERT(ToRegister(instr->object()).is(rdx)); ASSERT(ToRegister(instr->key()).is(rax)); @@ -3165,7 +3482,7 @@ void LCodeGen::DoMathFloor(LUnaryMathOperation* instr) { XMMRegister input_reg = ToDoubleRegister(instr->value()); if (CpuFeatures::IsSupported(SSE4_1)) { - CpuFeatures::Scope scope(SSE4_1); + CpuFeatureScope scope(masm(), SSE4_1); if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) { // Deoptimize if minus zero. __ movq(output_reg, input_reg); @@ -3178,7 +3495,7 @@ void LCodeGen::DoMathFloor(LUnaryMathOperation* instr) { DeoptimizeIf(equal, instr->environment()); } else { Label negative_sign, done; - // Deoptimize on negative inputs. + // Deoptimize on unordered. __ xorps(xmm_scratch, xmm_scratch); // Zero the register. __ ucomisd(input_reg, xmm_scratch); DeoptimizeIf(parity_even, instr->environment()); @@ -3222,45 +3539,59 @@ void LCodeGen::DoMathRound(LUnaryMathOperation* instr) { const XMMRegister xmm_scratch = xmm0; Register output_reg = ToRegister(instr->result()); XMMRegister input_reg = ToDoubleRegister(instr->value()); + static int64_t one_half = V8_INT64_C(0x3FE0000000000000); // 0.5 + static int64_t minus_one_half = V8_INT64_C(0xBFE0000000000000); // -0.5 - Label done; - // xmm_scratch = 0.5 - __ movq(kScratchRegister, V8_INT64_C(0x3FE0000000000000), RelocInfo::NONE); + Label done, round_to_zero, below_one_half, do_not_compensate, restore; + __ movq(kScratchRegister, one_half, RelocInfo::NONE64); __ movq(xmm_scratch, kScratchRegister); - Label below_half; __ ucomisd(xmm_scratch, input_reg); - // If input_reg is NaN, this doesn't jump. - __ j(above, &below_half, Label::kNear); - // input = input + 0.5 - // This addition might give a result that isn't the correct for - // rounding, due to loss of precision, but only for a number that's - // so big that the conversion below will overflow anyway. + __ j(above, &below_one_half); + + // CVTTSD2SI rounds towards zero, since 0.5 <= x, we use floor(0.5 + x). __ addsd(xmm_scratch, input_reg); - // Compute Math.floor(input). - // Use truncating instruction (OK because input is positive). __ cvttsd2si(output_reg, xmm_scratch); // Overflow is signalled with minint. __ cmpl(output_reg, Immediate(0x80000000)); + __ RecordComment("D2I conversion overflow"); + DeoptimizeIf(equal, instr->environment()); + __ jmp(&done); + + __ bind(&below_one_half); + __ movq(kScratchRegister, minus_one_half, RelocInfo::NONE64); + __ movq(xmm_scratch, kScratchRegister); + __ ucomisd(xmm_scratch, input_reg); + __ j(below_equal, &round_to_zero); + + // CVTTSD2SI rounds towards zero, we use ceil(x - (-0.5)) and then + // compare and compensate. + __ movq(kScratchRegister, input_reg); // Back up input_reg. + __ subsd(input_reg, xmm_scratch); + __ cvttsd2si(output_reg, input_reg); + // Catch minint due to overflow, and to prevent overflow when compensating. + __ cmpl(output_reg, Immediate(0x80000000)); + __ RecordComment("D2I conversion overflow"); DeoptimizeIf(equal, instr->environment()); + + __ cvtlsi2sd(xmm_scratch, output_reg); + __ ucomisd(input_reg, xmm_scratch); + __ j(equal, &restore, Label::kNear); + __ subl(output_reg, Immediate(1)); + // No overflow because we already ruled out minint. + __ bind(&restore); + __ movq(input_reg, kScratchRegister); // Restore input_reg. __ jmp(&done); - __ bind(&below_half); + __ bind(&round_to_zero); + // We return 0 for the input range [+0, 0.5[, or [-0.5, 0.5[ if + // we can ignore the difference between a result of -0 and +0. if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) { - // Bailout if negative (including -0). __ movq(output_reg, input_reg); __ testq(output_reg, output_reg); + __ RecordComment("Minus zero"); DeoptimizeIf(negative, instr->environment()); - } else { - // Bailout if below -0.5, otherwise round to (positive) zero, even - // if negative. - // xmm_scrach = -0.5 - __ movq(kScratchRegister, V8_INT64_C(0xBFE0000000000000), RelocInfo::NONE); - __ movq(xmm_scratch, kScratchRegister); - __ ucomisd(input_reg, xmm_scratch); - DeoptimizeIf(below, instr->environment()); } - __ xorl(output_reg, output_reg); - + __ Set(output_reg, 0); __ bind(&done); } @@ -3283,7 +3614,7 @@ void LCodeGen::DoMathPowHalf(LUnaryMathOperation* instr) { Label done, sqrt; // Check base for -Infinity. According to IEEE-754, double-precision // -Infinity has the highest 12 bits set and the lowest 52 bits cleared. - __ movq(kScratchRegister, V8_INT64_C(0xFFF0000000000000), RelocInfo::NONE); + __ movq(kScratchRegister, V8_INT64_C(0xFFF0000000000000), RelocInfo::NONE64); __ movq(xmm_scratch, kScratchRegister); __ ucomisd(xmm_scratch, input_reg); // Comparing -Infinity with NaN results in "unordered", which sets the @@ -3388,8 +3719,7 @@ void LCodeGen::DoRandom(LRandom* instr) { // state[0] = 18273 * (state[0] & 0xFFFF) + (state[0] >> 16) // Only operate on the lower 32 bit of rax. - __ movl(rdx, rax); - __ andl(rdx, Immediate(0xFFFF)); + __ movzxwl(rdx, rax); __ imull(rdx, rdx, Immediate(18273)); __ shrl(rax, Immediate(16)); __ addl(rax, rdx); @@ -3397,8 +3727,7 @@ void LCodeGen::DoRandom(LRandom* instr) { __ movl(FieldOperand(rbx, ByteArray::kHeaderSize), rax); // state[1] = 36969 * (state[1] & 0xFFFF) + (state[1] >> 16) - __ movl(rdx, rcx); - __ andl(rdx, Immediate(0xFFFF)); + __ movzxwl(rdx, rcx); __ imull(rdx, rdx, Immediate(36969)); __ shrl(rcx, Immediate(16)); __ addl(rcx, rdx); @@ -3414,10 +3743,10 @@ void LCodeGen::DoRandom(LRandom* instr) { // Convert 32 random bits in rax to 0.(32 random bits) in a double // by computing: // ( 1.(20 0s)(32 random bits) x 2^20 ) - (1.0 x 2^20)). - __ movl(rcx, Immediate(0x49800000)); // 1.0 x 2^20 as single. - __ movd(xmm2, rcx); + __ movq(rcx, V8_INT64_C(0x4130000000000000), + RelocInfo::NONE64); // 1.0 x 2^20 as double + __ movq(xmm2, rcx); __ movd(xmm1, rax); - __ cvtss2sd(xmm2, xmm2); __ xorps(xmm1, xmm2); __ subsd(xmm1, xmm2); } @@ -3431,11 +3760,21 @@ void LCodeGen::DoDeferredRandom(LRandom* instr) { } +void LCodeGen::DoMathExp(LMathExp* instr) { + XMMRegister input = ToDoubleRegister(instr->value()); + XMMRegister result = ToDoubleRegister(instr->result()); + Register temp1 = ToRegister(instr->temp1()); + Register temp2 = ToRegister(instr->temp2()); + + MathExpGenerator::EmitMathExp(masm(), input, result, xmm0, temp1, temp2); +} + + void LCodeGen::DoMathLog(LUnaryMathOperation* instr) { ASSERT(ToDoubleRegister(instr->result()).is(xmm1)); TranscendentalCacheStub stub(TranscendentalCache::LOG, TranscendentalCacheStub::UNTAGGED); - CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr); + CallCode(stub.GetCode(isolate()), RelocInfo::CODE_TARGET, instr); } @@ -3443,7 +3782,7 @@ void LCodeGen::DoMathTan(LUnaryMathOperation* instr) { ASSERT(ToDoubleRegister(instr->result()).is(xmm1)); TranscendentalCacheStub stub(TranscendentalCache::TAN, TranscendentalCacheStub::UNTAGGED); - CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr); + CallCode(stub.GetCode(isolate()), RelocInfo::CODE_TARGET, instr); } @@ -3451,7 +3790,7 @@ void LCodeGen::DoMathCos(LUnaryMathOperation* instr) { ASSERT(ToDoubleRegister(instr->result()).is(xmm1)); TranscendentalCacheStub stub(TranscendentalCache::COS, TranscendentalCacheStub::UNTAGGED); - CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr); + CallCode(stub.GetCode(isolate()), RelocInfo::CODE_TARGET, instr); } @@ -3459,7 +3798,7 @@ void LCodeGen::DoMathSin(LUnaryMathOperation* instr) { ASSERT(ToDoubleRegister(instr->result()).is(xmm1)); TranscendentalCacheStub stub(TranscendentalCache::SIN, TranscendentalCacheStub::UNTAGGED); - CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr); + CallCode(stub.GetCode(isolate()), RelocInfo::CODE_TARGET, instr); } @@ -3551,7 +3890,7 @@ void LCodeGen::DoCallFunction(LCallFunction* instr) { int arity = instr->arity(); CallFunctionStub stub(arity, NO_CALL_FUNCTION_FLAGS); - CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr); + CallCode(stub.GetCode(isolate()), RelocInfo::CODE_TARGET, instr); __ movq(rsi, Operand(rbp, StandardFrameConstants::kContextOffset)); } @@ -3582,9 +3921,27 @@ void LCodeGen::DoCallNew(LCallNew* instr) { ASSERT(ToRegister(instr->constructor()).is(rdi)); ASSERT(ToRegister(instr->result()).is(rax)); + __ Set(rax, instr->arity()); + if (FLAG_optimize_constructed_arrays) { + // No cell in ebx for construct type feedback in optimized code + Handle<Object> undefined_value(isolate()->factory()->undefined_value()); + __ Move(rbx, undefined_value); + } CallConstructStub stub(NO_CALL_FUNCTION_FLAGS); + CallCode(stub.GetCode(isolate()), RelocInfo::CONSTRUCT_CALL, instr); +} + + +void LCodeGen::DoCallNewArray(LCallNewArray* instr) { + ASSERT(ToRegister(instr->constructor()).is(rdi)); + ASSERT(ToRegister(instr->result()).is(rax)); + ASSERT(FLAG_optimize_constructed_arrays); + __ Set(rax, instr->arity()); - CallCode(stub.GetCode(), RelocInfo::CONSTRUCT_CALL, instr); + __ Move(rbx, instr->hydrogen()->property_cell()); + Handle<Code> array_construct_code = + isolate()->builtins()->ArrayConstructCode(); + CallCode(array_construct_code, RelocInfo::CONSTRUCT_CALL, instr); } @@ -3593,6 +3950,13 @@ void LCodeGen::DoCallRuntime(LCallRuntime* instr) { } +void LCodeGen::DoInnerAllocatedObject(LInnerAllocatedObject* instr) { + Register result = ToRegister(instr->result()); + Register base = ToRegister(instr->base_object()); + __ lea(result, Operand(base, instr->offset())); +} + + void LCodeGen::DoStoreNamedField(LStoreNamedField* instr) { Register object = ToRegister(instr->object()); Register value = ToRegister(instr->value()); @@ -3665,16 +4029,57 @@ void LCodeGen::DoStoreNamedGeneric(LStoreNamedGeneric* instr) { } -void LCodeGen::DoStoreKeyedSpecializedArrayElement( - LStoreKeyedSpecializedArrayElement* instr) { +void LCodeGen::DoBoundsCheck(LBoundsCheck* instr) { + if (instr->hydrogen()->skip_check()) return; + + if (instr->length()->IsRegister()) { + Register reg = ToRegister(instr->length()); + if (!instr->hydrogen()->length()->representation().IsTagged()) { + __ AssertZeroExtended(reg); + } + if (instr->index()->IsConstantOperand()) { + int constant_index = + ToInteger32(LConstantOperand::cast(instr->index())); + if (instr->hydrogen()->length()->representation().IsTagged()) { + __ Cmp(reg, Smi::FromInt(constant_index)); + } else { + __ cmpq(reg, Immediate(constant_index)); + } + } else { + Register reg2 = ToRegister(instr->index()); + if (!instr->hydrogen()->index()->representation().IsTagged()) { + __ AssertZeroExtended(reg2); + } + __ cmpq(reg, reg2); + } + } else { + Operand length = ToOperand(instr->length()); + if (instr->index()->IsConstantOperand()) { + int constant_index = + ToInteger32(LConstantOperand::cast(instr->index())); + if (instr->hydrogen()->length()->representation().IsTagged()) { + __ Cmp(length, Smi::FromInt(constant_index)); + } else { + __ cmpq(length, Immediate(constant_index)); + } + } else { + __ cmpq(length, ToRegister(instr->index())); + } + } + DeoptimizeIf(below_equal, instr->environment()); +} + + +void LCodeGen::DoStoreKeyedExternalArray(LStoreKeyed* instr) { ElementsKind elements_kind = instr->elements_kind(); LOperand* key = instr->key(); if (!key->IsConstantOperand()) { Register key_reg = ToRegister(key); - // Even though the HLoad/StoreKeyedFastElement instructions force the input - // representation for the key to be an integer, the input gets replaced - // during bound check elimination with the index argument to the bounds - // check, which can be tagged, so that case must be handled here, too. + // Even though the HLoad/StoreKeyedFastElement instructions force + // the input representation for the key to be an integer, the input + // gets replaced during bound check elimination with the index + // argument to the bounds check, which can be tagged, so that case + // must be handled here, too. if (instr->hydrogen()->key()->representation().IsTagged()) { __ SmiToInteger64(key_reg, key_reg); } else if (instr->hydrogen()->IsDehoisted()) { @@ -3684,7 +4089,7 @@ void LCodeGen::DoStoreKeyedSpecializedArrayElement( } } Operand operand(BuildFastArrayOperand( - instr->external_pointer(), + instr->elements(), key, elements_kind, 0, @@ -3729,76 +4134,60 @@ void LCodeGen::DoStoreKeyedSpecializedArrayElement( } -void LCodeGen::DeoptIfTaggedButNotSmi(LEnvironment* environment, - HValue* value, - LOperand* operand) { - if (value->representation().IsTagged() && !value->type().IsSmi()) { - Condition cc; - if (operand->IsRegister()) { - cc = masm()->CheckSmi(ToRegister(operand)); - } else { - cc = masm()->CheckSmi(ToOperand(operand)); +void LCodeGen::DoStoreKeyedFixedDoubleArray(LStoreKeyed* instr) { + XMMRegister value = ToDoubleRegister(instr->value()); + LOperand* key = instr->key(); + if (!key->IsConstantOperand()) { + Register key_reg = ToRegister(key); + // Even though the HLoad/StoreKeyedFastElement instructions force + // the input representation for the key to be an integer, the + // input gets replaced during bound check elimination with the index + // argument to the bounds check, which can be tagged, so that case + // must be handled here, too. + if (instr->hydrogen()->key()->representation().IsTagged()) { + __ SmiToInteger64(key_reg, key_reg); + } else if (instr->hydrogen()->IsDehoisted()) { + // Sign extend key because it could be a 32 bit negative value + // and the dehoisted address computation happens in 64 bits + __ movsxlq(key_reg, key_reg); } - DeoptimizeIf(NegateCondition(cc), environment); } -} + if (instr->NeedsCanonicalization()) { + Label have_value; -void LCodeGen::DoBoundsCheck(LBoundsCheck* instr) { - DeoptIfTaggedButNotSmi(instr->environment(), - instr->hydrogen()->length(), - instr->length()); - DeoptIfTaggedButNotSmi(instr->environment(), - instr->hydrogen()->index(), - instr->index()); - if (instr->length()->IsRegister()) { - Register reg = ToRegister(instr->length()); - if (!instr->hydrogen()->length()->representation().IsTagged()) { - __ AssertZeroExtended(reg); - } - if (instr->index()->IsConstantOperand()) { - int constant_index = - ToInteger32(LConstantOperand::cast(instr->index())); - if (instr->hydrogen()->length()->representation().IsTagged()) { - __ Cmp(reg, Smi::FromInt(constant_index)); - } else { - __ cmpq(reg, Immediate(constant_index)); - } - } else { - Register reg2 = ToRegister(instr->index()); - if (!instr->hydrogen()->index()->representation().IsTagged()) { - __ AssertZeroExtended(reg2); - } - __ cmpq(reg, reg2); - } - } else { - Operand length = ToOperand(instr->length()); - if (instr->index()->IsConstantOperand()) { - int constant_index = - ToInteger32(LConstantOperand::cast(instr->index())); - if (instr->hydrogen()->length()->representation().IsTagged()) { - __ Cmp(length, Smi::FromInt(constant_index)); - } else { - __ cmpq(length, Immediate(constant_index)); - } - } else { - __ cmpq(length, ToRegister(instr->index())); - } + __ ucomisd(value, value); + __ j(parity_odd, &have_value); // NaN. + + __ Set(kScratchRegister, BitCast<uint64_t>( + FixedDoubleArray::canonical_not_the_hole_nan_as_double())); + __ movq(value, kScratchRegister); + + __ bind(&have_value); } - DeoptimizeIf(below_equal, instr->environment()); + + Operand double_store_operand = BuildFastArrayOperand( + instr->elements(), + key, + FAST_DOUBLE_ELEMENTS, + FixedDoubleArray::kHeaderSize - kHeapObjectTag, + instr->additional_index()); + + __ movsd(double_store_operand, value); } -void LCodeGen::DoStoreKeyedFastElement(LStoreKeyedFastElement* instr) { +void LCodeGen::DoStoreKeyedFixedArray(LStoreKeyed* instr) { Register value = ToRegister(instr->value()); - Register elements = ToRegister(instr->object()); + Register elements = ToRegister(instr->elements()); LOperand* key = instr->key(); if (!key->IsConstantOperand()) { Register key_reg = ToRegister(key); - // Even though the HLoad/StoreKeyedFastElement instructions force the input - // representation for the key to be an integer, the input gets replaced - // during bound check elimination with the index argument to the bounds - // check, which can be tagged, so that case must be handled here, too. + // Even though the HLoad/StoreKeyedFastElement instructions force + // the input representation for the key to be an integer, the + // input gets replaced during bound check elimination with the index + // argument to the bounds check, which can be tagged, so that case + // must be handled here, too. if (instr->hydrogen()->key()->representation().IsTagged()) { __ SmiToInteger64(key_reg, key_reg); } else if (instr->hydrogen()->IsDehoisted()) { @@ -3809,7 +4198,7 @@ void LCodeGen::DoStoreKeyedFastElement(LStoreKeyedFastElement* instr) { } Operand operand = - BuildFastArrayOperand(instr->object(), + BuildFastArrayOperand(instr->elements(), key, FAST_ELEMENTS, FixedArray::kHeaderSize - kHeapObjectTag, @@ -3836,48 +4225,17 @@ void LCodeGen::DoStoreKeyedFastElement(LStoreKeyedFastElement* instr) { } -void LCodeGen::DoStoreKeyedFastDoubleElement( - LStoreKeyedFastDoubleElement* instr) { - XMMRegister value = ToDoubleRegister(instr->value()); - LOperand* key = instr->key(); - if (!key->IsConstantOperand()) { - Register key_reg = ToRegister(key); - // Even though the HLoad/StoreKeyedFastElement instructions force the input - // representation for the key to be an integer, the input gets replaced - // during bound check elimination with the index argument to the bounds - // check, which can be tagged, so that case must be handled here, too. - if (instr->hydrogen()->key()->representation().IsTagged()) { - __ SmiToInteger64(key_reg, key_reg); - } else if (instr->hydrogen()->IsDehoisted()) { - // Sign extend key because it could be a 32 bit negative value - // and the dehoisted address computation happens in 64 bits - __ movsxlq(key_reg, key_reg); - } - } - - if (instr->NeedsCanonicalization()) { - Label have_value; - - __ ucomisd(value, value); - __ j(parity_odd, &have_value); // NaN. - - __ Set(kScratchRegister, BitCast<uint64_t>( - FixedDoubleArray::canonical_not_the_hole_nan_as_double())); - __ movq(value, kScratchRegister); - - __ bind(&have_value); +void LCodeGen::DoStoreKeyed(LStoreKeyed* instr) { + if (instr->is_external()) { + DoStoreKeyedExternalArray(instr); + } else if (instr->hydrogen()->value()->representation().IsDouble()) { + DoStoreKeyedFixedDoubleArray(instr); + } else { + DoStoreKeyedFixedArray(instr); } - - Operand double_store_operand = BuildFastArrayOperand( - instr->elements(), - key, - FAST_DOUBLE_ELEMENTS, - FixedDoubleArray::kHeaderSize - kHeapObjectTag, - instr->additional_index()); - - __ movsd(double_store_operand, value); } + void LCodeGen::DoStoreKeyedGeneric(LStoreKeyedGeneric* instr) { ASSERT(ToRegister(instr->object()).is(rdx)); ASSERT(ToRegister(instr->key()).is(rcx)); @@ -3892,28 +4250,40 @@ void LCodeGen::DoStoreKeyedGeneric(LStoreKeyedGeneric* instr) { void LCodeGen::DoTransitionElementsKind(LTransitionElementsKind* instr) { Register object_reg = ToRegister(instr->object()); - Register new_map_reg = ToRegister(instr->new_map_temp()); Handle<Map> from_map = instr->original_map(); Handle<Map> to_map = instr->transitioned_map(); - ElementsKind from_kind = from_map->elements_kind(); - ElementsKind to_kind = to_map->elements_kind(); + ElementsKind from_kind = instr->from_kind(); + ElementsKind to_kind = instr->to_kind(); Label not_applicable; __ Cmp(FieldOperand(object_reg, HeapObject::kMapOffset), from_map); __ j(not_equal, ¬_applicable); - __ movq(new_map_reg, to_map, RelocInfo::EMBEDDED_OBJECT); if (IsSimpleMapChangeTransition(from_kind, to_kind)) { + Register new_map_reg = ToRegister(instr->new_map_temp()); + __ movq(new_map_reg, to_map, RelocInfo::EMBEDDED_OBJECT); __ movq(FieldOperand(object_reg, HeapObject::kMapOffset), new_map_reg); // Write barrier. ASSERT_NE(instr->temp(), NULL); __ RecordWriteField(object_reg, HeapObject::kMapOffset, new_map_reg, ToRegister(instr->temp()), kDontSaveFPRegs); + } else if (FLAG_compiled_transitions) { + PushSafepointRegistersScope scope(this); + if (!object_reg.is(rax)) { + __ movq(rax, object_reg); + } + __ Move(rbx, to_map); + TransitionElementsKindStub stub(from_kind, to_kind); + __ CallStub(&stub); + RecordSafepointWithRegisters( + instr->pointer_map(), 0, Safepoint::kNoLazyDeopt); } else if (IsFastSmiElementsKind(from_kind) && - IsFastDoubleElementsKind(to_kind)) { + IsFastDoubleElementsKind(to_kind)) { Register fixed_object_reg = ToRegister(instr->temp()); ASSERT(fixed_object_reg.is(rdx)); + Register new_map_reg = ToRegister(instr->new_map_temp()); ASSERT(new_map_reg.is(rbx)); + __ movq(new_map_reg, to_map, RelocInfo::EMBEDDED_OBJECT); __ movq(fixed_object_reg, object_reg); CallCode(isolate()->builtins()->TransitionElementsSmiToDouble(), RelocInfo::CODE_TARGET, instr); @@ -3921,7 +4291,9 @@ void LCodeGen::DoTransitionElementsKind(LTransitionElementsKind* instr) { IsFastObjectElementsKind(to_kind)) { Register fixed_object_reg = ToRegister(instr->temp()); ASSERT(fixed_object_reg.is(rdx)); + Register new_map_reg = ToRegister(instr->new_map_temp()); ASSERT(new_map_reg.is(rbx)); + __ movq(new_map_reg, to_map, RelocInfo::EMBEDDED_OBJECT); __ movq(fixed_object_reg, object_reg); CallCode(isolate()->builtins()->TransitionElementsDoubleToObject(), RelocInfo::CODE_TARGET, instr); @@ -3932,11 +4304,19 @@ void LCodeGen::DoTransitionElementsKind(LTransitionElementsKind* instr) { } +void LCodeGen::DoTrapAllocationMemento(LTrapAllocationMemento* instr) { + Register object = ToRegister(instr->object()); + Register temp = ToRegister(instr->temp()); + __ TestJSArrayForAllocationSiteInfo(object, temp); + DeoptimizeIf(equal, instr->environment()); +} + + void LCodeGen::DoStringAdd(LStringAdd* instr) { EmitPushTaggedOperand(instr->left()); EmitPushTaggedOperand(instr->right()); StringAddStub stub(NO_STRING_CHECK_IN_STUB); - CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr); + CallCode(stub.GetCode(isolate()), RelocInfo::CODE_TARGET, instr); } @@ -4011,7 +4391,7 @@ void LCodeGen::DoStringCharFromCode(LStringCharFromCode* instr) { Register result = ToRegister(instr->result()); ASSERT(!char_code.is(result)); - __ cmpl(char_code, Immediate(String::kMaxAsciiCharCode)); + __ cmpl(char_code, Immediate(String::kMaxOneByteCharCode)); __ j(above, deferred->entry()); __ LoadRoot(result, Heap::kSingleCharacterStringCacheRootIndex); __ movq(result, FieldOperand(result, @@ -4158,6 +4538,36 @@ void LCodeGen::DoNumberTagD(LNumberTagD* instr) { Register reg = ToRegister(instr->result()); Register tmp = ToRegister(instr->temp()); + bool convert_hole = false; + HValue* change_input = instr->hydrogen()->value(); + if (change_input->IsLoadKeyed()) { + HLoadKeyed* load = HLoadKeyed::cast(change_input); + convert_hole = load->UsesMustHandleHole(); + } + + Label no_special_nan_handling; + Label done; + if (convert_hole) { + XMMRegister input_reg = ToDoubleRegister(instr->value()); + __ ucomisd(input_reg, input_reg); + __ j(parity_odd, &no_special_nan_handling); + __ subq(rsp, Immediate(kDoubleSize)); + __ movsd(MemOperand(rsp, 0), input_reg); + __ cmpl(MemOperand(rsp, sizeof(kHoleNanLower32)), + Immediate(kHoleNanUpper32)); + Label canonicalize; + __ j(not_equal, &canonicalize); + __ addq(rsp, Immediate(kDoubleSize)); + __ Move(reg, factory()->the_hole_value()); + __ jmp(&done); + __ bind(&canonicalize); + __ addq(rsp, Immediate(kDoubleSize)); + __ Set(kScratchRegister, BitCast<uint64_t>( + FixedDoubleArray::canonical_not_the_hole_nan_as_double())); + __ movq(input_reg, kScratchRegister); + } + + __ bind(&no_special_nan_handling); DeferredNumberTagD* deferred = new(zone()) DeferredNumberTagD(this, instr); if (FLAG_inline_new) { __ AllocateHeapNumber(reg, tmp, deferred->entry()); @@ -4166,6 +4576,8 @@ void LCodeGen::DoNumberTagD(LNumberTagD* instr) { } __ bind(deferred->exit()); __ movsd(FieldOperand(reg, HeapNumber::kValueOffset), input_reg); + + __ bind(&done); } @@ -4211,43 +4623,58 @@ void LCodeGen::EmitNumberUntagD(Register input_reg, XMMRegister result_reg, bool deoptimize_on_undefined, bool deoptimize_on_minus_zero, - LEnvironment* env) { + LEnvironment* env, + NumberUntagDMode mode) { Label load_smi, done; - // Smi check. - __ JumpIfSmi(input_reg, &load_smi, Label::kNear); + if (mode == NUMBER_CANDIDATE_IS_ANY_TAGGED) { + // Smi check. + __ JumpIfSmi(input_reg, &load_smi, Label::kNear); - // Heap number map check. - __ CompareRoot(FieldOperand(input_reg, HeapObject::kMapOffset), - Heap::kHeapNumberMapRootIndex); - if (deoptimize_on_undefined) { - DeoptimizeIf(not_equal, env); - } else { - Label heap_number; - __ j(equal, &heap_number, Label::kNear); + // Heap number map check. + __ CompareRoot(FieldOperand(input_reg, HeapObject::kMapOffset), + Heap::kHeapNumberMapRootIndex); + if (deoptimize_on_undefined) { + DeoptimizeIf(not_equal, env); + } else { + Label heap_number; + __ j(equal, &heap_number, Label::kNear); - __ CompareRoot(input_reg, Heap::kUndefinedValueRootIndex); - DeoptimizeIf(not_equal, env); + __ CompareRoot(input_reg, Heap::kUndefinedValueRootIndex); + DeoptimizeIf(not_equal, env); - // Convert undefined to NaN. Compute NaN as 0/0. - __ xorps(result_reg, result_reg); - __ divsd(result_reg, result_reg); - __ jmp(&done, Label::kNear); + // Convert undefined to NaN. Compute NaN as 0/0. + __ xorps(result_reg, result_reg); + __ divsd(result_reg, result_reg); + __ jmp(&done, Label::kNear); - __ bind(&heap_number); - } - // Heap number to XMM conversion. - __ movsd(result_reg, FieldOperand(input_reg, HeapNumber::kValueOffset)); - if (deoptimize_on_minus_zero) { - XMMRegister xmm_scratch = xmm0; - __ xorps(xmm_scratch, xmm_scratch); - __ ucomisd(xmm_scratch, result_reg); - __ j(not_equal, &done, Label::kNear); - __ movmskpd(kScratchRegister, result_reg); - __ testq(kScratchRegister, Immediate(1)); - DeoptimizeIf(not_zero, env); + __ bind(&heap_number); + } + // Heap number to XMM conversion. + __ movsd(result_reg, FieldOperand(input_reg, HeapNumber::kValueOffset)); + if (deoptimize_on_minus_zero) { + XMMRegister xmm_scratch = xmm0; + __ xorps(xmm_scratch, xmm_scratch); + __ ucomisd(xmm_scratch, result_reg); + __ j(not_equal, &done, Label::kNear); + __ movmskpd(kScratchRegister, result_reg); + __ testq(kScratchRegister, Immediate(1)); + DeoptimizeIf(not_zero, env); + } + __ jmp(&done, Label::kNear); + } else if (mode == NUMBER_CANDIDATE_IS_SMI_OR_HOLE) { + __ testq(input_reg, Immediate(kSmiTagMask)); + DeoptimizeIf(not_equal, env); + } else if (mode == NUMBER_CANDIDATE_IS_SMI_CONVERT_HOLE) { + __ testq(input_reg, Immediate(kSmiTagMask)); + __ j(zero, &load_smi); + __ Set(kScratchRegister, BitCast<uint64_t>( + FixedDoubleArray::hole_nan_as_double())); + __ movq(result_reg, kScratchRegister); + __ jmp(&done, Label::kNear); + } else { + ASSERT(mode == NUMBER_CANDIDATE_IS_SMI); } - __ jmp(&done, Label::kNear); // Smi to XMM conversion __ bind(&load_smi); @@ -4336,10 +4763,28 @@ void LCodeGen::DoNumberUntagD(LNumberUntagD* instr) { Register input_reg = ToRegister(input); XMMRegister result_reg = ToDoubleRegister(result); + NumberUntagDMode mode = NUMBER_CANDIDATE_IS_ANY_TAGGED; + HValue* value = instr->hydrogen()->value(); + if (value->type().IsSmi()) { + if (value->IsLoadKeyed()) { + HLoadKeyed* load = HLoadKeyed::cast(value); + if (load->UsesMustHandleHole()) { + if (load->hole_mode() == ALLOW_RETURN_HOLE) { + mode = NUMBER_CANDIDATE_IS_SMI_CONVERT_HOLE; + } else { + mode = NUMBER_CANDIDATE_IS_SMI_OR_HOLE; + } + } else { + mode = NUMBER_CANDIDATE_IS_SMI; + } + } + } + EmitNumberUntagD(input_reg, result_reg, instr->hydrogen()->deoptimize_on_undefined(), instr->hydrogen()->deoptimize_on_minus_zero(), - instr->environment()); + instr->environment(), + mode); } @@ -4356,7 +4801,9 @@ void LCodeGen::DoDoubleToI(LDoubleToI* instr) { // Performs a truncating conversion of a floating point number as used by // the JS bitwise operations. __ cvttsd2siq(result_reg, input_reg); - __ movq(kScratchRegister, V8_INT64_C(0x8000000000000000), RelocInfo::NONE); + __ movq(kScratchRegister, + V8_INT64_C(0x8000000000000000), + RelocInfo::NONE64); __ cmpq(result_reg, kScratchRegister); DeoptimizeIf(equal, instr->environment()); } else { @@ -4461,10 +4908,10 @@ void LCodeGen::DoCheckFunction(LCheckFunction* instr) { void LCodeGen::DoCheckMapCommon(Register reg, Handle<Map> map, CompareMapMode mode, - LEnvironment* env) { + LInstruction* instr) { Label success; __ CompareMap(reg, map, &success, mode); - DeoptimizeIf(not_equal, env); + DeoptimizeIf(not_equal, instr->environment()); __ bind(&success); } @@ -4482,7 +4929,7 @@ void LCodeGen::DoCheckMaps(LCheckMaps* instr) { __ j(equal, &success); } Handle<Map> map = map_set->last(); - DoCheckMapCommon(reg, map, REQUIRE_EXACT_MAP, instr->environment()); + DoCheckMapCommon(reg, map, REQUIRE_EXACT_MAP, instr); __ bind(&success); } @@ -4537,27 +4984,25 @@ void LCodeGen::DoClampTToUint8(LClampTToUint8* instr) { void LCodeGen::DoCheckPrototypeMaps(LCheckPrototypeMaps* instr) { + ASSERT(instr->temp()->Equals(instr->result())); Register reg = ToRegister(instr->temp()); - Handle<JSObject> holder = instr->holder(); - Handle<JSObject> current_prototype = instr->prototype(); + ZoneList<Handle<JSObject> >* prototypes = instr->prototypes(); + ZoneList<Handle<Map> >* maps = instr->maps(); - // Load prototype object. - __ LoadHeapObject(reg, current_prototype); + ASSERT(prototypes->length() == maps->length()); - // Check prototype maps up to the holder. - while (!current_prototype.is_identical_to(holder)) { - DoCheckMapCommon(reg, Handle<Map>(current_prototype->map()), - ALLOW_ELEMENT_TRANSITION_MAPS, instr->environment()); - current_prototype = - Handle<JSObject>(JSObject::cast(current_prototype->GetPrototype())); - // Load next prototype object. - __ LoadHeapObject(reg, current_prototype); + if (instr->hydrogen()->CanOmitPrototypeChecks()) { + for (int i = 0; i < maps->length(); i++) { + prototype_maps_.Add(maps->at(i), info()->zone()); + } + __ LoadHeapObject(reg, prototypes->at(prototypes->length() - 1)); + } else { + for (int i = 0; i < prototypes->length(); i++) { + __ LoadHeapObject(reg, prototypes->at(i)); + DoCheckMapCommon(reg, maps->at(i), ALLOW_ELEMENT_TRANSITION_MAPS, instr); + } } - - // Check the holder map. - DoCheckMapCommon(reg, Handle<Map>(current_prototype->map()), - ALLOW_ELEMENT_TRANSITION_MAPS, instr->environment()); } @@ -4588,12 +5033,8 @@ void LCodeGen::DoAllocateObject(LAllocateObject* instr) { // the constructor's prototype changes, but instance size and property // counts remain unchanged (if slack tracking finished). ASSERT(!constructor->shared()->IsInobjectSlackTrackingInProgress()); - __ AllocateInNewSpace(instance_size, - result, - no_reg, - scratch, - deferred->entry(), - TAG_OBJECT); + __ Allocate(instance_size, result, no_reg, scratch, deferred->entry(), + TAG_OBJECT); __ bind(deferred->exit()); if (FLAG_debug_code) { @@ -4658,10 +5099,66 @@ void LCodeGen::DoDeferredAllocateObject(LAllocateObject* instr) { } +void LCodeGen::DoAllocate(LAllocate* instr) { + class DeferredAllocate: public LDeferredCode { + public: + DeferredAllocate(LCodeGen* codegen, LAllocate* instr) + : LDeferredCode(codegen), instr_(instr) { } + virtual void Generate() { codegen()->DoDeferredAllocate(instr_); } + virtual LInstruction* instr() { return instr_; } + private: + LAllocate* instr_; + }; + + DeferredAllocate* deferred = + new(zone()) DeferredAllocate(this, instr); + + Register result = ToRegister(instr->result()); + Register temp = ToRegister(instr->temp()); + + // Allocate memory for the object. + AllocationFlags flags = TAG_OBJECT; + if (instr->hydrogen()->MustAllocateDoubleAligned()) { + flags = static_cast<AllocationFlags>(flags | DOUBLE_ALIGNMENT); + } + if (instr->size()->IsConstantOperand()) { + int32_t size = ToInteger32(LConstantOperand::cast(instr->size())); + if (instr->hydrogen()->CanAllocateInOldPointerSpace()) { + flags = static_cast<AllocationFlags>(flags | PRETENURE_OLD_POINTER_SPACE); + } + __ Allocate(size, result, temp, no_reg, deferred->entry(), flags); + } else { + Register size = ToRegister(instr->size()); + __ AllocateInNewSpace(size, result, temp, no_reg, deferred->entry(), flags); + } + + __ bind(deferred->exit()); +} + + +void LCodeGen::DoDeferredAllocate(LAllocate* instr) { + Register size = ToRegister(instr->size()); + Register result = ToRegister(instr->result()); + + // TODO(3095996): Get rid of this. For now, we need to make the + // result register contain a valid pointer because it is already + // contained in the register pointer map. + __ Set(result, 0); + + PushSafepointRegistersScope scope(this); + __ Integer32ToSmi(size, size); + __ push(size); + CallRuntimeFromDeferred(Runtime::kAllocateInNewSpace, 1, instr); + __ StoreToSafepointRegisterSlot(result, rax); +} + + void LCodeGen::DoArrayLiteral(LArrayLiteral* instr) { Handle<FixedArray> literals(instr->environment()->closure()->literals()); ElementsKind boilerplate_elements_kind = instr->hydrogen()->boilerplate_elements_kind(); + AllocationSiteMode allocation_site_mode = + instr->hydrogen()->allocation_site_mode(); // Deopt if the array literal boilerplate ElementsKind is of a type different // than the expected one. The check isn't necessary if the boilerplate has @@ -4692,8 +5189,8 @@ void LCodeGen::DoArrayLiteral(LArrayLiteral* instr) { ASSERT(instr->hydrogen()->depth() == 1); FastCloneShallowArrayStub::Mode mode = FastCloneShallowArrayStub::COPY_ON_WRITE_ELEMENTS; - FastCloneShallowArrayStub stub(mode, length); - CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr); + FastCloneShallowArrayStub stub(mode, DONT_TRACK_ALLOCATION_SITE, length); + CallCode(stub.GetCode(isolate()), RelocInfo::CODE_TARGET, instr); } else if (instr->hydrogen()->depth() > 1) { CallRuntime(Runtime::kCreateArrayLiteral, 3, instr); } else if (length > FastCloneShallowArrayStub::kMaximumClonedLength) { @@ -4701,10 +5198,10 @@ void LCodeGen::DoArrayLiteral(LArrayLiteral* instr) { } else { FastCloneShallowArrayStub::Mode mode = boilerplate_elements_kind == FAST_DOUBLE_ELEMENTS - ? FastCloneShallowArrayStub::CLONE_DOUBLE_ELEMENTS - : FastCloneShallowArrayStub::CLONE_ELEMENTS; - FastCloneShallowArrayStub stub(mode, length); - CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr); + ? FastCloneShallowArrayStub::CLONE_DOUBLE_ELEMENTS + : FastCloneShallowArrayStub::CLONE_ELEMENTS; + FastCloneShallowArrayStub stub(mode, allocation_site_mode, length); + CallCode(stub.GetCode(isolate()), RelocInfo::CODE_TARGET, instr); } } @@ -4712,10 +5209,14 @@ void LCodeGen::DoArrayLiteral(LArrayLiteral* instr) { void LCodeGen::EmitDeepCopy(Handle<JSObject> object, Register result, Register source, - int* offset) { + int* offset, + AllocationSiteMode mode) { ASSERT(!source.is(rcx)); ASSERT(!result.is(rcx)); + bool create_allocation_site_info = mode == TRACK_ALLOCATION_SITE && + object->map()->CanTrackAllocationSite(); + // Only elements backing stores for non-COW arrays need to be copied. Handle<FixedArrayBase> elements(object->elements()); bool has_elements = elements->length() > 0 && @@ -4725,8 +5226,13 @@ void LCodeGen::EmitDeepCopy(Handle<JSObject> object, // this object and its backing store. int object_offset = *offset; int object_size = object->map()->instance_size(); - int elements_offset = *offset + object_size; int elements_size = has_elements ? elements->Size() : 0; + int elements_offset = *offset + object_size; + if (create_allocation_site_info) { + elements_offset += AllocationSiteInfo::kSize; + *offset += AllocationSiteInfo::kSize; + } + *offset += object_size + elements_size; // Copy object header. @@ -4745,22 +5251,31 @@ void LCodeGen::EmitDeepCopy(Handle<JSObject> object, // Copy in-object properties. for (int i = 0; i < inobject_properties; i++) { int total_offset = object_offset + object->GetInObjectPropertyOffset(i); - Handle<Object> value = Handle<Object>(object->InObjectPropertyAt(i)); + Handle<Object> value = Handle<Object>(object->InObjectPropertyAt(i), + isolate()); if (value->IsJSObject()) { Handle<JSObject> value_object = Handle<JSObject>::cast(value); __ lea(rcx, Operand(result, *offset)); __ movq(FieldOperand(result, total_offset), rcx); __ LoadHeapObject(source, value_object); - EmitDeepCopy(value_object, result, source, offset); + EmitDeepCopy(value_object, result, source, offset, + DONT_TRACK_ALLOCATION_SITE); } else if (value->IsHeapObject()) { __ LoadHeapObject(rcx, Handle<HeapObject>::cast(value)); __ movq(FieldOperand(result, total_offset), rcx); } else { - __ movq(rcx, value, RelocInfo::NONE); + __ movq(rcx, value, RelocInfo::NONE64); __ movq(FieldOperand(result, total_offset), rcx); } } + // Build Allocation Site Info if desired + if (create_allocation_site_info) { + __ LoadRoot(kScratchRegister, Heap::kAllocationSiteInfoMapRootIndex); + __ movq(FieldOperand(result, object_size), kScratchRegister); + __ movq(FieldOperand(result, object_size + kPointerSize), source); + } + if (has_elements) { // Copy elements backing store header. __ LoadHeapObject(source, elements); @@ -4778,25 +5293,26 @@ void LCodeGen::EmitDeepCopy(Handle<JSObject> object, int64_t value = double_array->get_representation(i); int total_offset = elements_offset + FixedDoubleArray::OffsetOfElementAt(i); - __ movq(rcx, value, RelocInfo::NONE); + __ movq(rcx, value, RelocInfo::NONE64); __ movq(FieldOperand(result, total_offset), rcx); } } else if (elements->IsFixedArray()) { Handle<FixedArray> fast_elements = Handle<FixedArray>::cast(elements); for (int i = 0; i < elements_length; i++) { int total_offset = elements_offset + FixedArray::OffsetOfElementAt(i); - Handle<Object> value(fast_elements->get(i)); + Handle<Object> value(fast_elements->get(i), isolate()); if (value->IsJSObject()) { Handle<JSObject> value_object = Handle<JSObject>::cast(value); __ lea(rcx, Operand(result, *offset)); __ movq(FieldOperand(result, total_offset), rcx); __ LoadHeapObject(source, value_object); - EmitDeepCopy(value_object, result, source, offset); + EmitDeepCopy(value_object, result, source, offset, + DONT_TRACK_ALLOCATION_SITE); } else if (value->IsHeapObject()) { __ LoadHeapObject(rcx, Handle<HeapObject>::cast(value)); __ movq(FieldOperand(result, total_offset), rcx); } else { - __ movq(rcx, value, RelocInfo::NONE); + __ movq(rcx, value, RelocInfo::NONE64); __ movq(FieldOperand(result, total_offset), rcx); } } @@ -4831,7 +5347,7 @@ void LCodeGen::DoFastLiteral(LFastLiteral* instr) { // Allocate all objects that are part of the literal in one big // allocation. This avoids multiple limit checks. Label allocated, runtime_allocate; - __ AllocateInNewSpace(size, rax, rcx, rdx, &runtime_allocate, TAG_OBJECT); + __ Allocate(size, rax, rcx, rdx, &runtime_allocate, TAG_OBJECT); __ jmp(&allocated); __ bind(&runtime_allocate); @@ -4841,7 +5357,8 @@ void LCodeGen::DoFastLiteral(LFastLiteral* instr) { __ bind(&allocated); int offset = 0; __ LoadHeapObject(rbx, instr->hydrogen()->boilerplate()); - EmitDeepCopy(instr->hydrogen()->boilerplate(), rax, rbx, &offset); + EmitDeepCopy(instr->hydrogen()->boilerplate(), rax, rbx, &offset, + instr->hydrogen()->allocation_site_mode()); ASSERT_EQ(size, offset); } @@ -4851,28 +5368,36 @@ void LCodeGen::DoObjectLiteral(LObjectLiteral* instr) { Handle<FixedArray> constant_properties = instr->hydrogen()->constant_properties(); - // Set up the parameters to the stub/runtime call. - __ PushHeapObject(literals); - __ Push(Smi::FromInt(instr->hydrogen()->literal_index())); - __ Push(constant_properties); int flags = instr->hydrogen()->fast_elements() ? ObjectLiteral::kFastElements : ObjectLiteral::kNoFlags; flags |= instr->hydrogen()->has_function() ? ObjectLiteral::kHasFunction : ObjectLiteral::kNoFlags; - __ Push(Smi::FromInt(flags)); - // Pick the right runtime function or stub to call. + // Set up the parameters to the stub/runtime call and pick the right + // runtime function or stub to call. int properties_count = constant_properties->length() / 2; if (instr->hydrogen()->depth() > 1) { + __ PushHeapObject(literals); + __ Push(Smi::FromInt(instr->hydrogen()->literal_index())); + __ Push(constant_properties); + __ Push(Smi::FromInt(flags)); CallRuntime(Runtime::kCreateObjectLiteral, 4, instr); } else if (flags != ObjectLiteral::kFastElements || properties_count > FastCloneShallowObjectStub::kMaximumClonedProperties) { + __ PushHeapObject(literals); + __ Push(Smi::FromInt(instr->hydrogen()->literal_index())); + __ Push(constant_properties); + __ Push(Smi::FromInt(flags)); CallRuntime(Runtime::kCreateObjectLiteralShallow, 4, instr); } else { + __ LoadHeapObject(rax, literals); + __ Move(rbx, Smi::FromInt(instr->hydrogen()->literal_index())); + __ Move(rcx, constant_properties); + __ Move(rdx, Smi::FromInt(flags)); FastCloneShallowObjectStub stub(properties_count); - CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr); + CallCode(stub.GetCode(isolate()), RelocInfo::CODE_TARGET, instr); } } @@ -4909,7 +5434,7 @@ void LCodeGen::DoRegExpLiteral(LRegExpLiteral* instr) { __ bind(&materialized); int size = JSRegExp::kSize + JSRegExp::kInObjectFieldCount * kPointerSize; Label allocated, runtime_allocate; - __ AllocateInNewSpace(size, rax, rcx, rdx, &runtime_allocate, TAG_OBJECT); + __ Allocate(size, rax, rcx, rdx, &runtime_allocate, TAG_OBJECT); __ jmp(&allocated); __ bind(&runtime_allocate); @@ -4942,7 +5467,7 @@ void LCodeGen::DoFunctionLiteral(LFunctionLiteral* instr) { if (!pretenure && shared_info->num_literals() == 0) { FastNewClosureStub stub(shared_info->language_mode()); __ Push(shared_info); - CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr); + CallCode(stub.GetCode(isolate()), RelocInfo::CODE_TARGET, instr); } else { __ push(rsi); __ Push(shared_info); @@ -4998,14 +5523,14 @@ Condition LCodeGen::EmitTypeofIs(Label* true_label, Register input, Handle<String> type_name) { Condition final_branch_condition = no_condition; - if (type_name->Equals(heap()->number_symbol())) { + if (type_name->Equals(heap()->number_string())) { __ JumpIfSmi(input, true_label); __ CompareRoot(FieldOperand(input, HeapObject::kMapOffset), Heap::kHeapNumberMapRootIndex); final_branch_condition = equal; - } else if (type_name->Equals(heap()->string_symbol())) { + } else if (type_name->Equals(heap()->string_string())) { __ JumpIfSmi(input, false_label); __ CmpObjectType(input, FIRST_NONSTRING_TYPE, input); __ j(above_equal, false_label); @@ -5013,17 +5538,17 @@ Condition LCodeGen::EmitTypeofIs(Label* true_label, Immediate(1 << Map::kIsUndetectable)); final_branch_condition = zero; - } else if (type_name->Equals(heap()->boolean_symbol())) { + } else if (type_name->Equals(heap()->boolean_string())) { __ CompareRoot(input, Heap::kTrueValueRootIndex); __ j(equal, true_label); __ CompareRoot(input, Heap::kFalseValueRootIndex); final_branch_condition = equal; - } else if (FLAG_harmony_typeof && type_name->Equals(heap()->null_symbol())) { + } else if (FLAG_harmony_typeof && type_name->Equals(heap()->null_string())) { __ CompareRoot(input, Heap::kNullValueRootIndex); final_branch_condition = equal; - } else if (type_name->Equals(heap()->undefined_symbol())) { + } else if (type_name->Equals(heap()->undefined_string())) { __ CompareRoot(input, Heap::kUndefinedValueRootIndex); __ j(equal, true_label); __ JumpIfSmi(input, false_label); @@ -5033,7 +5558,7 @@ Condition LCodeGen::EmitTypeofIs(Label* true_label, Immediate(1 << Map::kIsUndetectable)); final_branch_condition = not_zero; - } else if (type_name->Equals(heap()->function_symbol())) { + } else if (type_name->Equals(heap()->function_string())) { STATIC_ASSERT(NUM_OF_CALLABLE_SPEC_OBJECT_TYPES == 2); __ JumpIfSmi(input, false_label); __ CmpObjectType(input, JS_FUNCTION_TYPE, input); @@ -5041,13 +5566,19 @@ Condition LCodeGen::EmitTypeofIs(Label* true_label, __ CmpInstanceType(input, JS_FUNCTION_PROXY_TYPE); final_branch_condition = equal; - } else if (type_name->Equals(heap()->object_symbol())) { + } else if (type_name->Equals(heap()->object_string())) { __ JumpIfSmi(input, false_label); if (!FLAG_harmony_typeof) { __ CompareRoot(input, Heap::kNullValueRootIndex); __ j(equal, true_label); } - __ CmpObjectType(input, FIRST_NONCALLABLE_SPEC_OBJECT_TYPE, input); + if (FLAG_harmony_symbols) { + __ CmpObjectType(input, SYMBOL_TYPE, input); + __ j(equal, true_label); + __ CmpInstanceType(input, FIRST_NONCALLABLE_SPEC_OBJECT_TYPE); + } else { + __ CmpObjectType(input, FIRST_NONCALLABLE_SPEC_OBJECT_TYPE, input); + } __ j(below, false_label); __ CmpInstanceType(input, LAST_NONCALLABLE_SPEC_OBJECT_TYPE); __ j(above, false_label); @@ -5093,6 +5624,7 @@ void LCodeGen::EmitIsConstructCall(Register temp) { void LCodeGen::EnsureSpaceForLazyDeopt(int space_needed) { + if (info()->IsStub()) return; // Ensure that we have enough space after the previous lazy-bailout // instruction for patching the code here. int current_pc = masm()->pc_offset(); @@ -5118,6 +5650,11 @@ void LCodeGen::DoDeoptimize(LDeoptimize* instr) { } +void LCodeGen::DoDummyUse(LDummyUse* instr) { + // Nothing to see here, move on! +} + + void LCodeGen::DoDeleteProperty(LDeleteProperty* instr) { LOperand* obj = instr->object(); LOperand* key = instr->key(); @@ -5182,7 +5719,7 @@ void LCodeGen::DoStackCheck(LStackCheck* instr) { __ CompareRoot(rsp, Heap::kStackLimitRootIndex); __ j(above_equal, &done, Label::kNear); StackCheckStub stub; - CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr); + CallCode(stub.GetCode(isolate()), RelocInfo::CODE_TARGET, instr); EnsureSpaceForLazyDeopt(Deoptimizer::patch_size()); last_lazy_deopt_pc_ = masm()->pc_offset(); __ bind(&done); diff --git a/deps/v8/src/x64/lithium-codegen-x64.h b/deps/v8/src/x64/lithium-codegen-x64.h index 65b3980163..5809296375 100644 --- a/deps/v8/src/x64/lithium-codegen-x64.h +++ b/deps/v8/src/x64/lithium-codegen-x64.h @@ -56,6 +56,7 @@ class LCodeGen BASE_EMBEDDED { deoptimizations_(4, info->zone()), jump_table_(4, info->zone()), deoptimization_literals_(8, info->zone()), + prototype_maps_(0, info->zone()), inlined_function_count_(0), scope_(info->scope()), status_(UNUSED), @@ -63,6 +64,7 @@ class LCodeGen BASE_EMBEDDED { deferred_(8, info->zone()), osr_pc_offset_(-1), last_lazy_deopt_pc_(0), + frame_is_built_(false), safepoints_(info->zone()), resolver_(this), expected_safepoint_kind_(Safepoint::kSimple) { @@ -77,6 +79,15 @@ class LCodeGen BASE_EMBEDDED { Heap* heap() const { return isolate()->heap(); } Zone* zone() const { return zone_; } + bool NeedsEagerFrame() const { + return GetStackSlotCount() > 0 || + info()->is_non_deferred_calling() || + !info()->IsStub(); + } + bool NeedsDeferredFrame() const { + return !NeedsEagerFrame() && info()->is_deferred_calling(); + } + // Support for converting LOperands to assembler types. Register ToRegister(LOperand* op) const; XMMRegister ToDoubleRegister(LOperand* op) const; @@ -106,11 +117,12 @@ class LCodeGen BASE_EMBEDDED { void DoDeferredStringCharCodeAt(LStringCharCodeAt* instr); void DoDeferredStringCharFromCode(LStringCharFromCode* instr); void DoDeferredAllocateObject(LAllocateObject* instr); + void DoDeferredAllocate(LAllocate* instr); void DoDeferredInstanceOfKnownGlobal(LInstanceOfKnownGlobal* instr, Label* map_check); void DoCheckMapCommon(Register reg, Handle<Map> map, - CompareMapMode mode, LEnvironment* env); + CompareMapMode mode, LInstruction* instr); // Parallel move support. void DoParallelMove(LParallelMove* move); @@ -158,7 +170,6 @@ class LCodeGen BASE_EMBEDDED { Register scratch); int GetStackSlotCount() const { return chunk()->spill_slot_count(); } - int GetParameterCount() const { return scope()->num_parameters(); } void Abort(const char* reason); void Comment(const char* format, ...); @@ -229,8 +240,10 @@ class LCodeGen BASE_EMBEDDED { LOperand* op, bool is_tagged, bool is_uint32, + bool arguments_known, int arguments_index, int arguments_count); + void RegisterDependentCodeForEmbeddedMaps(Handle<Code> code); void PopulateDeoptimizationData(Handle<Code> code); int DefineDeoptimizationLiteral(Handle<Object> literal); @@ -272,16 +285,13 @@ class LCodeGen BASE_EMBEDDED { static Condition TokenToCondition(Token::Value op, bool is_unsigned); void EmitGoto(int block); void EmitBranch(int left_block, int right_block, Condition cc); - void EmitNumberUntagD(Register input, - XMMRegister result, - bool deoptimize_on_undefined, - bool deoptimize_on_minus_zero, - LEnvironment* env); - - - void DeoptIfTaggedButNotSmi(LEnvironment* environment, - HValue* value, - LOperand* operand); + void EmitNumberUntagD( + Register input, + XMMRegister result, + bool deoptimize_on_undefined, + bool deoptimize_on_minus_zero, + LEnvironment* env, + NumberUntagDMode mode = NUMBER_CANDIDATE_IS_ANY_TAGGED); // Emits optimized code for typeof x == "y". Modifies input register. // Returns the condition on which a final split to @@ -324,17 +334,28 @@ class LCodeGen BASE_EMBEDDED { void EmitDeepCopy(Handle<JSObject> object, Register result, Register source, - int* offset); + int* offset, + AllocationSiteMode mode); struct JumpTableEntry { - explicit inline JumpTableEntry(Address entry) + inline JumpTableEntry(Address entry, bool frame, bool is_lazy) : label(), - address(entry) { } + address(entry), + needs_frame(frame), + is_lazy_deopt(is_lazy) { } Label label; Address address; + bool needs_frame; + bool is_lazy_deopt; }; void EnsureSpaceForLazyDeopt(int space_needed); + void DoLoadKeyedExternalArray(LLoadKeyed* instr); + void DoLoadKeyedFixedDoubleArray(LLoadKeyed* instr); + void DoLoadKeyedFixedArray(LLoadKeyed* instr); + void DoStoreKeyedExternalArray(LStoreKeyed* instr); + void DoStoreKeyedFixedDoubleArray(LStoreKeyed* instr); + void DoStoreKeyedFixedArray(LStoreKeyed* instr); Zone* zone_; LPlatformChunk* const chunk_; @@ -347,6 +368,7 @@ class LCodeGen BASE_EMBEDDED { ZoneList<LEnvironment*> deoptimizations_; ZoneList<JumpTableEntry> jump_table_; ZoneList<Handle<Object> > deoptimization_literals_; + ZoneList<Handle<Map> > prototype_maps_; int inlined_function_count_; Scope* const scope_; Status status_; @@ -354,6 +376,7 @@ class LCodeGen BASE_EMBEDDED { ZoneList<LDeferredCode*> deferred_; int osr_pc_offset_; int last_lazy_deopt_pc_; + bool frame_is_built_; // Builder that keeps track of safepoints in the code. The table // itself is emitted at the end of the generated code. @@ -368,6 +391,7 @@ class LCodeGen BASE_EMBEDDED { public: explicit PushSafepointRegistersScope(LCodeGen* codegen) : codegen_(codegen) { + ASSERT(codegen_->info()->is_calling()); ASSERT(codegen_->expected_safepoint_kind_ == Safepoint::kSimple); codegen_->masm_->PushSafepointRegisters(); codegen_->expected_safepoint_kind_ = Safepoint::kWithRegisters; diff --git a/deps/v8/src/x64/lithium-x64.cc b/deps/v8/src/x64/lithium-x64.cc index 43fb8b9ba0..16248ee179 100644 --- a/deps/v8/src/x64/lithium-x64.cc +++ b/deps/v8/src/x64/lithium-x64.cc @@ -44,10 +44,10 @@ LITHIUM_CONCRETE_INSTRUCTION_LIST(DEFINE_COMPILE) #undef DEFINE_COMPILE LOsrEntry::LOsrEntry() { - for (int i = 0; i < Register::kNumAllocatableRegisters; ++i) { + for (int i = 0; i < Register::NumAllocatableRegisters(); ++i) { register_spills_[i] = NULL; } - for (int i = 0; i < DoubleRegister::kNumAllocatableRegisters; ++i) { + for (int i = 0; i < DoubleRegister::NumAllocatableRegisters(); ++i) { double_register_spills_[i] = NULL; } } @@ -114,7 +114,11 @@ void LInstruction::PrintDataTo(StringStream* stream) { stream->Add("= "); for (int i = 0; i < InputCount(); i++) { if (i > 0) stream->Add(" "); - InputAt(i)->PrintTo(stream); + if (InputAt(i) == NULL) { + stream->Add("NULL"); + } else { + InputAt(i)->PrintTo(stream); + } } } @@ -179,6 +183,7 @@ const char* LArithmeticT::Mnemonic() const { case Token::BIT_AND: return "bit-and-t"; case Token::BIT_OR: return "bit-or-t"; case Token::BIT_XOR: return "bit-xor-t"; + case Token::ROR: return "ror-t"; case Token::SHL: return "sal-t"; case Token::SAR: return "sar-t"; case Token::SHR: return "shr-t"; @@ -287,6 +292,13 @@ void LTypeofIsAndBranch::PrintDataTo(StringStream* stream) { } +void LInnerAllocatedObject::PrintDataTo(StringStream* stream) { + stream->Add(" = "); + base_object()->PrintTo(stream); + stream->Add(" + %d", offset()); +} + + void LCallConstantFunction::PrintDataTo(StringStream* stream) { stream->Add("#%d / ", arity()); } @@ -298,6 +310,11 @@ void LUnaryMathOperation::PrintDataTo(StringStream* stream) { } +void LMathExp::PrintDataTo(StringStream* stream) { + value()->PrintTo(stream); +} + + void LLoadContextSlot::PrintDataTo(StringStream* stream) { context()->PrintTo(stream); stream->Add("[%d]", slot_index()); @@ -347,6 +364,17 @@ void LCallNew::PrintDataTo(StringStream* stream) { } +void LCallNewArray::PrintDataTo(StringStream* stream) { + stream->Add("= "); + constructor()->PrintTo(stream); + stream->Add(" #%d / ", arity()); + ASSERT(hydrogen()->property_cell()->value()->IsSmi()); + ElementsKind kind = static_cast<ElementsKind>( + Smi::cast(hydrogen()->property_cell()->value())->value()); + stream->Add(" (%s) ", ElementsKindToString(kind)); +} + + void LAccessArgumentsAt::PrintDataTo(StringStream* stream) { arguments()->PrintTo(stream); @@ -394,20 +422,27 @@ void LStoreNamedGeneric::PrintDataTo(StringStream* stream) { } -void LStoreKeyedFastElement::PrintDataTo(StringStream* stream) { - object()->PrintTo(stream); +void LLoadKeyed::PrintDataTo(StringStream* stream) { + elements()->PrintTo(stream); stream->Add("["); key()->PrintTo(stream); - stream->Add("] <- "); - value()->PrintTo(stream); + if (hydrogen()->IsDehoisted()) { + stream->Add(" + %d]", additional_index()); + } else { + stream->Add("]"); + } } -void LStoreKeyedFastDoubleElement::PrintDataTo(StringStream* stream) { +void LStoreKeyed::PrintDataTo(StringStream* stream) { elements()->PrintTo(stream); stream->Add("["); key()->PrintTo(stream); - stream->Add("] <- "); + if (hydrogen()->IsDehoisted()) { + stream->Add(" + %d] <-", additional_index()); + } else { + stream->Add("] <- "); + } value()->PrintTo(stream); } @@ -606,6 +641,8 @@ LInstruction* LChunkBuilder::AssignEnvironment(LInstruction* instr) { LInstruction* LChunkBuilder::MarkAsCall(LInstruction* instr, HInstruction* hinstr, CanDeoptimize can_deoptimize) { + info()->MarkAsNonDeferredCalling(); + #ifdef DEBUG instr->VerifyCall(); #endif @@ -646,8 +683,12 @@ LInstruction* LChunkBuilder::AssignPointerMap(LInstruction* instr) { LUnallocated* LChunkBuilder::TempRegister() { LUnallocated* operand = new(zone()) LUnallocated(LUnallocated::MUST_HAVE_REGISTER); - operand->set_virtual_register(allocator_->GetVirtualRegister()); - if (!allocator_->AllocationOk()) Abort("Not enough virtual registers."); + int vreg = allocator_->GetVirtualRegister(); + if (!allocator_->AllocationOk()) { + Abort("Out of virtual registers while trying to allocate temp register."); + return NULL; + } + operand->set_virtual_register(vreg); return operand; } @@ -671,6 +712,11 @@ LInstruction* LChunkBuilder::DoBlockEntry(HBlockEntry* instr) { } +LInstruction* LChunkBuilder::DoDummyUse(HDummyUse* instr) { + return DefineAsRegister(new(zone()) LDummyUse(UseAny(instr->value()))); +} + + LInstruction* LChunkBuilder::DoSoftDeoptimize(HSoftDeoptimize* instr) { return AssignEnvironment(new(zone()) LDeoptimize); } @@ -903,7 +949,7 @@ LInstruction* LChunkBuilder::DoBranch(HBranch* instr) { if (value->EmitAtUses()) { ASSERT(value->IsConstant()); ASSERT(!value->representation().IsDouble()); - HBasicBlock* successor = HConstant::cast(value)->ToBoolean() + HBasicBlock* successor = HConstant::cast(value)->BooleanValue() ? instr->FirstSuccessor() : instr->SecondSuccessor(); return new(zone()) LGoto(successor->block_id()); @@ -955,6 +1001,12 @@ LInstruction* LChunkBuilder::DoInstanceOfKnownGlobal( } +LInstruction* LChunkBuilder::DoInstanceSize(HInstanceSize* instr) { + LOperand* object = UseRegisterAtStart(instr->object()); + return DefineAsRegister(new(zone()) LInstanceSize(object)); +} + + LInstruction* LChunkBuilder::DoWrapReceiver(HWrapReceiver* instr) { LOperand* receiver = UseRegister(instr->receiver()); LOperand* function = UseRegisterAtStart(instr->function()); @@ -983,6 +1035,15 @@ LInstruction* LChunkBuilder::DoPushArgument(HPushArgument* instr) { } +LInstruction* LChunkBuilder::DoInnerAllocatedObject( + HInnerAllocatedObject* inner_object) { + LOperand* base_object = UseRegisterAtStart(inner_object->base_object()); + LInnerAllocatedObject* result = + new(zone()) LInnerAllocatedObject(base_object); + return DefineAsRegister(result); +} + + LInstruction* LChunkBuilder::DoThisFunction(HThisFunction* instr) { return instr->HasNoUses() ? NULL @@ -991,7 +1052,14 @@ LInstruction* LChunkBuilder::DoThisFunction(HThisFunction* instr) { LInstruction* LChunkBuilder::DoContext(HContext* instr) { - return instr->HasNoUses() ? NULL : DefineAsRegister(new(zone()) LContext); + // If there is a non-return use, the context must be allocated in a register. + for (HUseIterator it(instr->uses()); !it.Done(); it.Advance()) { + if (!it.value()->IsReturn()) { + return DefineAsRegister(new(zone()) LContext); + } + } + + return NULL; } @@ -1038,6 +1106,14 @@ LInstruction* LChunkBuilder::DoUnaryMathOperation(HUnaryMathOperation* instr) { LOperand* input = UseFixedDouble(instr->value(), xmm1); LUnaryMathOperation* result = new(zone()) LUnaryMathOperation(input); return MarkAsCall(DefineFixedDouble(result, xmm1), instr); + } else if (op == kMathExp) { + ASSERT(instr->representation().IsDouble()); + ASSERT(instr->value()->representation().IsDouble()); + LOperand* value = UseTempRegister(instr->value()); + LOperand* temp1 = TempRegister(); + LOperand* temp2 = TempRegister(); + LMathExp* result = new(zone()) LMathExp(value, temp1, temp2); + return DefineAsRegister(result); } else { LOperand* input = UseRegisterAtStart(instr->value()); LUnaryMathOperation* result = new(zone()) LUnaryMathOperation(input); @@ -1095,6 +1171,15 @@ LInstruction* LChunkBuilder::DoCallNew(HCallNew* instr) { } +LInstruction* LChunkBuilder::DoCallNewArray(HCallNewArray* instr) { + ASSERT(FLAG_optimize_constructed_arrays); + LOperand* constructor = UseFixed(instr->constructor(), rdi); + argument_count_ -= instr->argument_count(); + LCallNewArray* result = new(zone()) LCallNewArray(constructor); + return MarkAsCall(DefineFixed(result, rax), instr); +} + + LInstruction* LChunkBuilder::DoCallFunction(HCallFunction* instr) { LOperand* function = UseFixed(instr->function(), rdi); argument_count_ -= instr->argument_count(); @@ -1109,6 +1194,11 @@ LInstruction* LChunkBuilder::DoCallRuntime(HCallRuntime* instr) { } +LInstruction* LChunkBuilder::DoRor(HRor* instr) { + return DoShift(Token::ROR, instr); +} + + LInstruction* LChunkBuilder::DoShr(HShr* instr) { return DoShift(Token::SHR, instr); } @@ -1159,6 +1249,13 @@ LInstruction* LChunkBuilder::DoDiv(HDiv* instr) { if (instr->representation().IsDouble()) { return DoArithmeticD(Token::DIV, instr); } else if (instr->representation().IsInteger32()) { + if (instr->HasPowerOf2Divisor()) { + ASSERT(!instr->CheckFlag(HValue::kCanBeDivByZero)); + LOperand* value = UseRegisterAtStart(instr->left()); + LDivI* div = + new(zone()) LDivI(value, UseOrConstant(instr->right()), NULL); + return AssignEnvironment(DefineSameAsFirst(div)); + } // The temporary operand is necessary to ensure that right is not allocated // into rdx. LOperand* temp = FixedTemp(rdx); @@ -1193,12 +1290,31 @@ HValue* LChunkBuilder::SimplifiedDivisorForMathFloorOfDiv(HValue* divisor) { return constant_val->CopyToRepresentation(Representation::Integer32(), divisor->block()->zone()); } + // A value with an integer representation does not need to be transformed. + if (divisor->representation().IsInteger32()) { + return divisor; + // A change from an integer32 can be replaced by the integer32 value. + } else if (divisor->IsChange() && + HChange::cast(divisor)->from().IsInteger32()) { + return HChange::cast(divisor)->value(); + } return NULL; } LInstruction* LChunkBuilder::DoMathFloorOfDiv(HMathFloorOfDiv* instr) { HValue* right = instr->right(); + if (!right->IsConstant()) { + ASSERT(right->representation().IsInteger32()); + // The temporary operand is necessary to ensure that right is not allocated + // into rdx. + LOperand* temp = FixedTemp(rdx); + LOperand* dividend = UseFixed(instr->left(), rax); + LOperand* divisor = UseRegister(instr->right()); + LDivI* flooring_div = new(zone()) LDivI(dividend, divisor, temp); + return AssignEnvironment(DefineFixed(flooring_div, rax)); + } + ASSERT(right->IsConstant() && HConstant::cast(right)->HasInteger32Value()); LOperand* divisor = chunk_->DefineConstantOperand(HConstant::cast(right)); int32_t divisor_si = HConstant::cast(right)->Integer32Value(); @@ -1392,7 +1508,7 @@ LInstruction* LChunkBuilder::DoCompareGeneric(HCompareGeneric* instr) { LInstruction* LChunkBuilder::DoCompareIDAndBranch( HCompareIDAndBranch* instr) { - Representation r = instr->GetInputRepresentation(); + Representation r = instr->representation(); if (r.IsInteger32()) { ASSERT(instr->left()->representation().IsInteger32()); ASSERT(instr->right()->representation().IsInteger32()); @@ -1555,6 +1671,28 @@ LInstruction* LChunkBuilder::DoDateField(HDateField* instr) { } +LInstruction* LChunkBuilder::DoSeqStringSetChar(HSeqStringSetChar* instr) { + LOperand* string = UseRegister(instr->string()); + LOperand* index = UseRegister(instr->index()); + ASSERT(rcx.is_byte_register()); + LOperand* value = UseFixed(instr->value(), rcx); + LSeqStringSetChar* result = + new(zone()) LSeqStringSetChar(instr->encoding(), string, index, value); + return DefineSameAsFirst(result); +} + + +LInstruction* LChunkBuilder::DoNumericConstraint(HNumericConstraint* instr) { + return NULL; +} + + +LInstruction* LChunkBuilder::DoInductionVariableAnnotation( + HInductionVariableAnnotation* instr) { + return NULL; +} + + LInstruction* LChunkBuilder::DoBoundsCheck(HBoundsCheck* instr) { LOperand* value = UseRegisterOrConstantAtStart(instr->index()); LOperand* length = Use(instr->length()); @@ -1562,6 +1700,13 @@ LInstruction* LChunkBuilder::DoBoundsCheck(HBoundsCheck* instr) { } +LInstruction* LChunkBuilder::DoBoundsCheckBaseIndexInformation( + HBoundsCheckBaseIndexInformation* instr) { + UNREACHABLE(); + return NULL; +} + + LInstruction* LChunkBuilder::DoAbnormalExit(HAbnormalExit* instr) { // The control instruction marking the end of a block that completed // abruptly (e.g., threw an exception). There is nothing specific to do. @@ -1591,8 +1736,12 @@ LInstruction* LChunkBuilder::DoForceRepresentation(HForceRepresentation* bad) { LInstruction* LChunkBuilder::DoChange(HChange* instr) { Representation from = instr->from(); Representation to = instr->to(); + // Only mark conversions that might need to allocate as calling rather than + // all changes. This makes simple, non-allocating conversion not have to force + // building a stack frame. if (from.IsTagged()) { if (to.IsDouble()) { + info()->MarkAsDeferredCalling(); LOperand* value = UseRegister(instr->value()); LNumberUntagD* res = new(zone()) LNumberUntagD(value); return AssignEnvironment(DefineAsRegister(res)); @@ -1610,6 +1759,7 @@ LInstruction* LChunkBuilder::DoChange(HChange* instr) { } } else if (from.IsDouble()) { if (to.IsTagged()) { + info()->MarkAsDeferredCalling(); LOperand* value = UseRegister(instr->value()); LOperand* temp = TempRegister(); @@ -1623,6 +1773,7 @@ LInstruction* LChunkBuilder::DoChange(HChange* instr) { return AssignEnvironment(DefineAsRegister(new(zone()) LDoubleToI(value))); } } else if (from.IsInteger32()) { + info()->MarkAsDeferredCalling(); if (to.IsTagged()) { HValue* val = instr->value(); LOperand* value = UseRegister(val); @@ -1667,9 +1818,9 @@ LInstruction* LChunkBuilder::DoCheckInstanceType(HCheckInstanceType* instr) { LInstruction* LChunkBuilder::DoCheckPrototypeMaps(HCheckPrototypeMaps* instr) { - LOperand* temp = TempRegister(); + LUnallocated* temp = TempRegister(); LCheckPrototypeMaps* result = new(zone()) LCheckPrototypeMaps(temp); - return AssignEnvironment(result); + return AssignEnvironment(Define(result, temp)); } @@ -1679,6 +1830,12 @@ LInstruction* LChunkBuilder::DoCheckSmi(HCheckSmi* instr) { } +LInstruction* LChunkBuilder::DoCheckSmiOrInt32(HCheckSmiOrInt32* instr) { + LOperand* value = UseRegisterAtStart(instr->value()); + return AssignEnvironment(new(zone()) LCheckSmi(value)); +} + + LInstruction* LChunkBuilder::DoCheckFunction(HCheckFunction* instr) { LOperand* value = UseRegisterAtStart(instr->value()); return AssignEnvironment(new(zone()) LCheckFunction(value)); @@ -1712,7 +1869,9 @@ LInstruction* LChunkBuilder::DoClampToUint8(HClampToUint8* instr) { LInstruction* LChunkBuilder::DoReturn(HReturn* instr) { - return new(zone()) LReturn(UseFixed(instr->value(), rax)); + LOperand* parameter_count = UseRegisterOrConstant(instr->parameter_count()); + return new(zone()) LReturn(UseFixed(instr->value(), rax), + parameter_count); } @@ -1843,63 +2002,37 @@ LInstruction* LChunkBuilder::DoLoadExternalArrayPointer( } -LInstruction* LChunkBuilder::DoLoadKeyedFastElement( - HLoadKeyedFastElement* instr) { - ASSERT(instr->representation().IsTagged()); +LInstruction* LChunkBuilder::DoLoadKeyed(HLoadKeyed* instr) { ASSERT(instr->key()->representation().IsInteger32() || instr->key()->representation().IsTagged()); - LOperand* obj = UseRegisterAtStart(instr->object()); - bool clobbers_key = instr->key()->representation().IsTagged(); - LOperand* key = clobbers_key - ? UseTempRegister(instr->key()) - : UseRegisterOrConstantAtStart(instr->key()); - LLoadKeyedFastElement* result = - new(zone()) LLoadKeyedFastElement(obj, key); - if (instr->RequiresHoleCheck()) AssignEnvironment(result); - return DefineAsRegister(result); -} - - -LInstruction* LChunkBuilder::DoLoadKeyedFastDoubleElement( - HLoadKeyedFastDoubleElement* instr) { - ASSERT(instr->representation().IsDouble()); - ASSERT(instr->key()->representation().IsInteger32() || - instr->key()->representation().IsTagged()); - LOperand* elements = UseRegisterAtStart(instr->elements()); + ElementsKind elements_kind = instr->elements_kind(); bool clobbers_key = instr->key()->representation().IsTagged(); LOperand* key = clobbers_key ? UseTempRegister(instr->key()) : UseRegisterOrConstantAtStart(instr->key()); - LLoadKeyedFastDoubleElement* result = - new(zone()) LLoadKeyedFastDoubleElement(elements, key); - return AssignEnvironment(DefineAsRegister(result)); -} + LLoadKeyed* result = NULL; + if (!instr->is_external()) { + LOperand* obj = UseRegisterAtStart(instr->elements()); + result = new(zone()) LLoadKeyed(obj, key); + } else { + ASSERT( + (instr->representation().IsInteger32() && + (elements_kind != EXTERNAL_FLOAT_ELEMENTS) && + (elements_kind != EXTERNAL_DOUBLE_ELEMENTS)) || + (instr->representation().IsDouble() && + ((elements_kind == EXTERNAL_FLOAT_ELEMENTS) || + (elements_kind == EXTERNAL_DOUBLE_ELEMENTS)))); + LOperand* external_pointer = UseRegister(instr->elements()); + result = new(zone()) LLoadKeyed(external_pointer, key); + } -LInstruction* LChunkBuilder::DoLoadKeyedSpecializedArrayElement( - HLoadKeyedSpecializedArrayElement* instr) { - ElementsKind elements_kind = instr->elements_kind(); - ASSERT( - (instr->representation().IsInteger32() && - (elements_kind != EXTERNAL_FLOAT_ELEMENTS) && - (elements_kind != EXTERNAL_DOUBLE_ELEMENTS)) || - (instr->representation().IsDouble() && - ((elements_kind == EXTERNAL_FLOAT_ELEMENTS) || - (elements_kind == EXTERNAL_DOUBLE_ELEMENTS)))); - ASSERT(instr->key()->representation().IsInteger32() || - instr->key()->representation().IsTagged()); - LOperand* external_pointer = UseRegister(instr->external_pointer()); - bool clobbers_key = instr->key()->representation().IsTagged(); - LOperand* key = clobbers_key - ? UseTempRegister(instr->key()) - : UseRegisterOrConstantAtStart(instr->key()); - LLoadKeyedSpecializedArrayElement* result = - new(zone()) LLoadKeyedSpecializedArrayElement(external_pointer, key); - LInstruction* load_instr = DefineAsRegister(result); + DefineAsRegister(result); + bool can_deoptimize = instr->RequiresHoleCheck() || + (elements_kind == EXTERNAL_UNSIGNED_INT_ELEMENTS); // An unsigned int array load might overflow and cause a deopt, make sure it // has an environment. - return (elements_kind == EXTERNAL_UNSIGNED_INT_ELEMENTS) ? - AssignEnvironment(load_instr) : load_instr; + return can_deoptimize ? AssignEnvironment(result) : result; } @@ -1912,71 +2045,52 @@ LInstruction* LChunkBuilder::DoLoadKeyedGeneric(HLoadKeyedGeneric* instr) { } -LInstruction* LChunkBuilder::DoStoreKeyedFastElement( - HStoreKeyedFastElement* instr) { - bool needs_write_barrier = instr->NeedsWriteBarrier(); - ASSERT(instr->value()->representation().IsTagged()); - ASSERT(instr->object()->representation().IsTagged()); - ASSERT(instr->key()->representation().IsInteger32() || - instr->key()->representation().IsTagged()); - - LOperand* obj = UseTempRegister(instr->object()); - LOperand* val = needs_write_barrier - ? UseTempRegister(instr->value()) - : UseRegisterAtStart(instr->value()); - bool clobbers_key = needs_write_barrier || - instr->key()->representation().IsTagged(); - LOperand* key = clobbers_key - ? UseTempRegister(instr->key()) - : UseRegisterOrConstantAtStart(instr->key()); - return new(zone()) LStoreKeyedFastElement(obj, key, val); -} - - -LInstruction* LChunkBuilder::DoStoreKeyedFastDoubleElement( - HStoreKeyedFastDoubleElement* instr) { - ASSERT(instr->value()->representation().IsDouble()); - ASSERT(instr->elements()->representation().IsTagged()); - ASSERT(instr->key()->representation().IsInteger32() || - instr->key()->representation().IsTagged()); - - LOperand* elements = UseRegisterAtStart(instr->elements()); - LOperand* val = UseTempRegister(instr->value()); +LInstruction* LChunkBuilder::DoStoreKeyed(HStoreKeyed* instr) { + ElementsKind elements_kind = instr->elements_kind(); bool clobbers_key = instr->key()->representation().IsTagged(); - LOperand* key = clobbers_key - ? UseTempRegister(instr->key()) - : UseRegisterOrConstantAtStart(instr->key()); - return new(zone()) LStoreKeyedFastDoubleElement(elements, key, val); -} + if (!instr->is_external()) { + ASSERT(instr->elements()->representation().IsTagged()); + bool needs_write_barrier = instr->NeedsWriteBarrier(); + LOperand* object = NULL; + LOperand* key = NULL; + LOperand* val = NULL; + + if (instr->value()->representation().IsDouble()) { + object = UseRegisterAtStart(instr->elements()); + val = UseTempRegister(instr->value()); + key = clobbers_key ? UseTempRegister(instr->key()) + : UseRegisterOrConstantAtStart(instr->key()); + } else { + ASSERT(instr->value()->representation().IsTagged()); + object = UseTempRegister(instr->elements()); + val = needs_write_barrier ? UseTempRegister(instr->value()) + : UseRegisterAtStart(instr->value()); + key = (clobbers_key || needs_write_barrier) + ? UseTempRegister(instr->key()) + : UseRegisterOrConstantAtStart(instr->key()); + } + + return new(zone()) LStoreKeyed(object, key, val); + } -LInstruction* LChunkBuilder::DoStoreKeyedSpecializedArrayElement( - HStoreKeyedSpecializedArrayElement* instr) { - ElementsKind elements_kind = instr->elements_kind(); ASSERT( (instr->value()->representation().IsInteger32() && (elements_kind != EXTERNAL_FLOAT_ELEMENTS) && (elements_kind != EXTERNAL_DOUBLE_ELEMENTS)) || (instr->value()->representation().IsDouble() && ((elements_kind == EXTERNAL_FLOAT_ELEMENTS) || - (elements_kind == EXTERNAL_DOUBLE_ELEMENTS)))); - ASSERT(instr->external_pointer()->representation().IsExternal()); - ASSERT(instr->key()->representation().IsInteger32() || - instr->key()->representation().IsTagged()); - - LOperand* external_pointer = UseRegister(instr->external_pointer()); + (elements_kind == EXTERNAL_DOUBLE_ELEMENTS)))); + ASSERT(instr->elements()->representation().IsExternal()); bool val_is_temp_register = elements_kind == EXTERNAL_PIXEL_ELEMENTS || elements_kind == EXTERNAL_FLOAT_ELEMENTS; - LOperand* val = val_is_temp_register - ? UseTempRegister(instr->value()) + LOperand* val = val_is_temp_register ? UseTempRegister(instr->value()) : UseRegister(instr->value()); - bool clobbers_key = instr->key()->representation().IsTagged(); - LOperand* key = clobbers_key - ? UseTempRegister(instr->key()) + LOperand* key = clobbers_key ? UseTempRegister(instr->key()) : UseRegisterOrConstantAtStart(instr->key()); - return new(zone()) LStoreKeyedSpecializedArrayElement(external_pointer, - key, val); + LOperand* external_pointer = UseRegister(instr->elements()); + return new(zone()) LStoreKeyed(external_pointer, key, val); } @@ -1997,15 +2111,18 @@ LInstruction* LChunkBuilder::DoStoreKeyedGeneric(HStoreKeyedGeneric* instr) { LInstruction* LChunkBuilder::DoTransitionElementsKind( HTransitionElementsKind* instr) { - ElementsKind from_kind = instr->original_map()->elements_kind(); - ElementsKind to_kind = instr->transitioned_map()->elements_kind(); - if (IsSimpleMapChangeTransition(from_kind, to_kind)) { + LOperand* object = UseRegister(instr->object()); + if (IsSimpleMapChangeTransition(instr->from_kind(), instr->to_kind())) { LOperand* object = UseRegister(instr->object()); LOperand* new_map_reg = TempRegister(); LOperand* temp_reg = TempRegister(); LTransitionElementsKind* result = new(zone()) LTransitionElementsKind(object, new_map_reg, temp_reg); - return DefineSameAsFirst(result); + return result; + } else if (FLAG_compiled_transitions) { + LTransitionElementsKind* result = + new(zone()) LTransitionElementsKind(object, NULL, NULL); + return AssignPointerMap(result); } else { LOperand* object = UseFixed(instr->object(), rax); LOperand* fixed_object_reg = FixedTemp(rdx); @@ -2014,11 +2131,21 @@ LInstruction* LChunkBuilder::DoTransitionElementsKind( new(zone()) LTransitionElementsKind(object, new_map_reg, fixed_object_reg); - return MarkAsCall(DefineFixed(result, rax), instr); + return MarkAsCall(result, instr); } } +LInstruction* LChunkBuilder::DoTrapAllocationMemento( + HTrapAllocationMemento* instr) { + LOperand* object = UseRegister(instr->object()); + LOperand* temp = TempRegister(); + LTrapAllocationMemento* result = + new(zone()) LTrapAllocationMemento(object, temp); + return AssignEnvironment(result); +} + + LInstruction* LChunkBuilder::DoStoreNamedField(HStoreNamedField* instr) { bool needs_write_barrier = instr->NeedsWriteBarrier(); bool needs_write_barrier_for_map = !instr->transition().is_null() && @@ -2087,11 +2214,21 @@ LInstruction* LChunkBuilder::DoStringLength(HStringLength* instr) { LInstruction* LChunkBuilder::DoAllocateObject(HAllocateObject* instr) { + info()->MarkAsDeferredCalling(); LAllocateObject* result = new(zone()) LAllocateObject(TempRegister()); return AssignPointerMap(DefineAsRegister(result)); } +LInstruction* LChunkBuilder::DoAllocate(HAllocate* instr) { + info()->MarkAsDeferredCalling(); + LOperand* size = UseTempRegister(instr->size()); + LOperand* temp = TempRegister(); + LAllocate* result = new(zone()) LAllocate(size, temp); + return AssignPointerMap(DefineAsRegister(result)); +} + + LInstruction* LChunkBuilder::DoFastLiteral(HFastLiteral* instr) { return MarkAsCall(DefineFixed(new(zone()) LFastLiteral, rax), instr); } @@ -2134,8 +2271,17 @@ LInstruction* LChunkBuilder::DoOsrEntry(HOsrEntry* instr) { LInstruction* LChunkBuilder::DoParameter(HParameter* instr) { - int spill_index = chunk()->GetParameterStackSlot(instr->index()); - return DefineAsSpilled(new(zone()) LParameter, spill_index); + LParameter* result = new(zone()) LParameter; + if (instr->kind() == HParameter::STACK_PARAMETER) { + int spill_index = chunk()->GetParameterStackSlot(instr->index()); + return DefineAsSpilled(result, spill_index); + } else { + ASSERT(info()->IsStub()); + CodeStubInterfaceDescriptor* descriptor = + info()->code_stub()->GetInterfaceDescriptor(info()->isolate()); + Register reg = descriptor->register_params_[instr->index()]; + return DefineFixed(result, reg); + } } @@ -2203,7 +2349,7 @@ LInstruction* LChunkBuilder::DoSimulate(HSimulate* instr) { env->set_ast_id(instr->ast_id()); env->Drop(instr->pop_count()); - for (int i = 0; i < instr->values()->length(); ++i) { + for (int i = instr->values()->length() - 1; i >= 0; --i) { HValue* value = instr->values()->at(i); if (instr->HasAssignedIndexAt(i)) { env->Bind(instr->GetAssignedIndexAt(i), value); @@ -2231,6 +2377,7 @@ LInstruction* LChunkBuilder::DoSimulate(HSimulate* instr) { LInstruction* LChunkBuilder::DoStackCheck(HStackCheck* instr) { + info()->MarkAsDeferredCalling(); if (instr->is_function_entry()) { return MarkAsCall(new(zone()) LStackCheck, instr); } else { @@ -2247,8 +2394,8 @@ LInstruction* LChunkBuilder::DoEnterInlined(HEnterInlined* instr) { instr->arguments_count(), instr->function(), undefined, - instr->call_kind(), - instr->inlining_kind()); + instr->inlining_kind(), + instr->undefined_receiver()); if (instr->arguments_var() != NULL) { inner->Bind(instr->arguments_var(), graph()->GetArgumentsObject()); } diff --git a/deps/v8/src/x64/lithium-x64.h b/deps/v8/src/x64/lithium-x64.h index 6cf4af661f..9e3e836ed4 100644 --- a/deps/v8/src/x64/lithium-x64.h +++ b/deps/v8/src/x64/lithium-x64.h @@ -49,6 +49,7 @@ class LCodeGen; #define LITHIUM_CONCRETE_INSTRUCTION_LIST(V) \ V(AccessArgumentsAt) \ V(AddI) \ + V(Allocate) \ V(AllocateObject) \ V(ApplyArguments) \ V(ArgumentsElements) \ @@ -67,6 +68,7 @@ class LCodeGen; V(CallKnownGlobal) \ V(CallNamed) \ V(CallNew) \ + V(CallNewArray) \ V(CallRuntime) \ V(CallStub) \ V(CheckFunction) \ @@ -93,6 +95,7 @@ class LCodeGen; V(Deoptimize) \ V(DivI) \ V(DoubleToI) \ + V(DummyUse) \ V(ElementsKind) \ V(FastLiteral) \ V(FixedArrayBaseLength) \ @@ -107,6 +110,7 @@ class LCodeGen; V(In) \ V(InstanceOf) \ V(InstanceOfKnownGlobal) \ + V(InstanceSize) \ V(InstructionGap) \ V(Integer32ToDouble) \ V(Uint32ToDouble) \ @@ -126,13 +130,12 @@ class LCodeGen; V(LoadFunctionPrototype) \ V(LoadGlobalCell) \ V(LoadGlobalGeneric) \ - V(LoadKeyedFastDoubleElement) \ - V(LoadKeyedFastElement) \ + V(LoadKeyed) \ V(LoadKeyedGeneric) \ - V(LoadKeyedSpecializedArrayElement) \ V(LoadNamedField) \ V(LoadNamedFieldPolymorphic) \ V(LoadNamedGeneric) \ + V(MathExp) \ V(MathFloorOfDiv) \ V(MathMinMax) \ V(ModI) \ @@ -150,6 +153,7 @@ class LCodeGen; V(Random) \ V(RegExpLiteral) \ V(Return) \ + V(SeqStringSetChar) \ V(ShiftI) \ V(SmiTag) \ V(SmiUntag) \ @@ -157,10 +161,8 @@ class LCodeGen; V(StoreContextSlot) \ V(StoreGlobalCell) \ V(StoreGlobalGeneric) \ - V(StoreKeyedFastDoubleElement) \ - V(StoreKeyedFastElement) \ + V(StoreKeyed) \ V(StoreKeyedGeneric) \ - V(StoreKeyedSpecializedArrayElement) \ V(StoreNamedField) \ V(StoreNamedGeneric) \ V(StringAdd) \ @@ -174,6 +176,7 @@ class LCodeGen; V(Throw) \ V(ToFastProperties) \ V(TransitionElementsKind) \ + V(TrapAllocationMemento) \ V(Typeof) \ V(TypeofIsAndBranch) \ V(UnaryMathOperation) \ @@ -185,7 +188,8 @@ class LCodeGen; V(LoadFieldByIndex) \ V(DateField) \ V(WrapReceiver) \ - V(Drop) + V(Drop) \ + V(InnerAllocatedObject) #define DECLARE_CONCRETE_INSTRUCTION(type, mnemonic) \ @@ -254,6 +258,11 @@ class LInstruction: public ZoneObject { void MarkAsCall() { is_call_ = true; } + // Interface to the register allocator and iterators. + bool ClobbersTemps() const { return is_call_; } + bool ClobbersRegisters() const { return is_call_; } + bool ClobbersDoubleRegisters() const { return is_call_; } + virtual void SetDeferredLazyDeoptimizationEnvironment(LEnvironment* env) { } // Interface to the register allocator and iterators. @@ -401,6 +410,15 @@ class LLazyBailout: public LTemplateInstruction<0, 0, 0> { }; +class LDummyUse: public LTemplateInstruction<1, 1, 0> { + public: + explicit LDummyUse(LOperand* value) { + inputs_[0] = value; + } + DECLARE_CONCRETE_INSTRUCTION(DummyUse, "dummy-use") +}; + + class LDeoptimize: public LTemplateInstruction<0, 0, 0> { public: DECLARE_CONCRETE_INSTRUCTION(Deoptimize, "deoptimize") @@ -570,6 +588,8 @@ class LDivI: public LTemplateInstruction<1, 2, 1> { LOperand* right() { return inputs_[1]; } LOperand* temp() { return temps_[0]; } + bool is_flooring() { return hydrogen_value()->IsMathFloorOfDiv(); } + DECLARE_CONCRETE_INSTRUCTION(DivI, "div-i") DECLARE_HYDROGEN_ACCESSOR(Div) }; @@ -624,7 +644,7 @@ class LCmpIDAndBranch: public LControlInstruction<2, 0> { Token::Value op() const { return hydrogen()->token(); } bool is_double() const { - return hydrogen()->GetInputRepresentation().IsDouble(); + return hydrogen()->representation().IsDouble(); } virtual void PrintDataTo(StringStream* stream); @@ -647,6 +667,25 @@ class LUnaryMathOperation: public LTemplateInstruction<1, 1, 0> { }; +class LMathExp: public LTemplateInstruction<1, 1, 2> { + public: + LMathExp(LOperand* value, LOperand* temp1, LOperand* temp2) { + inputs_[0] = value; + temps_[0] = temp1; + temps_[1] = temp2; + ExternalReference::InitializeMathExpData(); + } + + LOperand* value() { return inputs_[0]; } + LOperand* temp1() { return temps_[0]; } + LOperand* temp2() { return temps_[1]; } + + DECLARE_CONCRETE_INSTRUCTION(MathExp, "math-exp") + + virtual void PrintDataTo(StringStream* stream); +}; + + class LCmpObjectEqAndBranch: public LControlInstruction<2, 0> { public: LCmpObjectEqAndBranch(LOperand* left, LOperand* right) { @@ -918,6 +957,19 @@ class LInstanceOfKnownGlobal: public LTemplateInstruction<1, 1, 1> { }; +class LInstanceSize: public LTemplateInstruction<1, 1, 0> { + public: + explicit LInstanceSize(LOperand* object) { + inputs_[0] = object; + } + + LOperand* object() { return inputs_[0]; } + + DECLARE_CONCRETE_INSTRUCTION(InstanceSize, "instance-size") + DECLARE_HYDROGEN_ACCESSOR(InstanceSize) +}; + + class LBoundsCheck: public LTemplateInstruction<0, 2, 0> { public: LBoundsCheck(LOperand* index, LOperand* length) { @@ -1139,6 +1191,30 @@ class LDateField: public LTemplateInstruction<1, 1, 0> { }; +class LSeqStringSetChar: public LTemplateInstruction<1, 3, 0> { + public: + LSeqStringSetChar(String::Encoding encoding, + LOperand* string, + LOperand* index, + LOperand* value) : encoding_(encoding) { + inputs_[0] = string; + inputs_[1] = index; + inputs_[2] = value; + } + + String::Encoding encoding() { return encoding_; } + LOperand* string() { return inputs_[0]; } + LOperand* index() { return inputs_[1]; } + LOperand* value() { return inputs_[2]; } + + DECLARE_CONCRETE_INSTRUCTION(SeqStringSetChar, "seq-string-set-char") + DECLARE_HYDROGEN_ACCESSOR(SeqStringSetChar) + + private: + String::Encoding encoding_; +}; + + class LThrow: public LTemplateInstruction<0, 1, 0> { public: explicit LThrow(LOperand* value) { @@ -1263,14 +1339,24 @@ class LArithmeticT: public LTemplateInstruction<1, 2, 0> { }; -class LReturn: public LTemplateInstruction<0, 1, 0> { +class LReturn: public LTemplateInstruction<0, 2, 0> { public: - explicit LReturn(LOperand* value) { + explicit LReturn(LOperand* value, LOperand* parameter_count) { inputs_[0] = value; + inputs_[1] = parameter_count; } LOperand* value() { return inputs_[0]; } + bool has_constant_parameter_count() { + return parameter_count()->IsConstantOperand(); + } + LConstantOperand* constant_parameter_count() { + ASSERT(has_constant_parameter_count()); + return LConstantOperand::cast(parameter_count()); + } + LOperand* parameter_count() { return inputs_[1]; } + DECLARE_CONCRETE_INSTRUCTION(Return, "return") }; @@ -1353,56 +1439,26 @@ class LLoadExternalArrayPointer: public LTemplateInstruction<1, 1, 0> { }; -class LLoadKeyedFastElement: public LTemplateInstruction<1, 2, 0> { +class LLoadKeyed: public LTemplateInstruction<1, 2, 0> { public: - LLoadKeyedFastElement(LOperand* elements, LOperand* key) { + LLoadKeyed(LOperand* elements, LOperand* key) { inputs_[0] = elements; inputs_[1] = key; } - DECLARE_CONCRETE_INSTRUCTION(LoadKeyedFastElement, "load-keyed-fast-element") - DECLARE_HYDROGEN_ACCESSOR(LoadKeyedFastElement) + DECLARE_CONCRETE_INSTRUCTION(LoadKeyed, "load-keyed") + DECLARE_HYDROGEN_ACCESSOR(LoadKeyed) - LOperand* elements() { return inputs_[0]; } - LOperand* key() { return inputs_[1]; } - uint32_t additional_index() const { return hydrogen()->index_offset(); } -}; - - -class LLoadKeyedFastDoubleElement: public LTemplateInstruction<1, 2, 0> { - public: - LLoadKeyedFastDoubleElement(LOperand* elements, LOperand* key) { - inputs_[0] = elements; - inputs_[1] = key; + bool is_external() const { + return hydrogen()->is_external(); } - - DECLARE_CONCRETE_INSTRUCTION(LoadKeyedFastDoubleElement, - "load-keyed-fast-double-element") - DECLARE_HYDROGEN_ACCESSOR(LoadKeyedFastDoubleElement) - LOperand* elements() { return inputs_[0]; } LOperand* key() { return inputs_[1]; } + virtual void PrintDataTo(StringStream* stream); uint32_t additional_index() const { return hydrogen()->index_offset(); } -}; - - -class LLoadKeyedSpecializedArrayElement: public LTemplateInstruction<1, 2, 0> { - public: - LLoadKeyedSpecializedArrayElement(LOperand* external_pointer, LOperand* key) { - inputs_[0] = external_pointer; - inputs_[1] = key; - } - - DECLARE_CONCRETE_INSTRUCTION(LoadKeyedSpecializedArrayElement, - "load-keyed-specialized-array-element") - DECLARE_HYDROGEN_ACCESSOR(LoadKeyedSpecializedArrayElement) - - LOperand* external_pointer() { return inputs_[0]; } - LOperand* key() { return inputs_[1]; } ElementsKind elements_kind() const { return hydrogen()->elements_kind(); } - uint32_t additional_index() const { return hydrogen()->index_offset(); } }; @@ -1539,6 +1595,22 @@ class LDrop: public LTemplateInstruction<0, 0, 0> { }; +class LInnerAllocatedObject: public LTemplateInstruction<1, 1, 0> { + public: + explicit LInnerAllocatedObject(LOperand* base_object) { + inputs_[0] = base_object; + } + + LOperand* base_object() { return inputs_[0]; } + int offset() { return hydrogen()->offset(); } + + virtual void PrintDataTo(StringStream* stream); + + DECLARE_CONCRETE_INSTRUCTION(InnerAllocatedObject, "sub-allocated-object") + DECLARE_HYDROGEN_ACCESSOR(InnerAllocatedObject) +}; + + class LThisFunction: public LTemplateInstruction<1, 0, 0> { public: DECLARE_CONCRETE_INSTRUCTION(ThisFunction, "this-function") @@ -1549,6 +1621,7 @@ class LThisFunction: public LTemplateInstruction<1, 0, 0> { class LContext: public LTemplateInstruction<1, 0, 0> { public: DECLARE_CONCRETE_INSTRUCTION(Context, "context") + DECLARE_HYDROGEN_ACCESSOR(Context) }; @@ -1703,6 +1776,23 @@ class LCallNew: public LTemplateInstruction<1, 1, 0> { }; +class LCallNewArray: public LTemplateInstruction<1, 1, 0> { + public: + explicit LCallNewArray(LOperand* constructor) { + inputs_[0] = constructor; + } + + LOperand* constructor() { return inputs_[0]; } + + DECLARE_CONCRETE_INSTRUCTION(CallNewArray, "call-new-array") + DECLARE_HYDROGEN_ACCESSOR(CallNewArray) + + virtual void PrintDataTo(StringStream* stream); + + int arity() const { return hydrogen()->argument_count() - 1; } +}; + + class LCallRuntime: public LTemplateInstruction<1, 0, 0> { public: DECLARE_CONCRETE_INSTRUCTION(CallRuntime, "call-runtime") @@ -1776,6 +1866,7 @@ class LNumberTagD: public LTemplateInstruction<1, 1, 1> { LOperand* temp() { return temps_[0]; } DECLARE_CONCRETE_INSTRUCTION(NumberTagD, "number-tag-d") + DECLARE_HYDROGEN_ACCESSOR(Change) }; @@ -1899,76 +1990,29 @@ class LStoreNamedGeneric: public LTemplateInstruction<0, 2, 0> { }; -class LStoreKeyedFastElement: public LTemplateInstruction<0, 3, 0> { +class LStoreKeyed: public LTemplateInstruction<0, 3, 0> { public: - LStoreKeyedFastElement(LOperand* object, LOperand* key, LOperand* value) { + LStoreKeyed(LOperand* object, LOperand* key, LOperand* value) { inputs_[0] = object; inputs_[1] = key; inputs_[2] = value; } - LOperand* object() { return inputs_[0]; } - LOperand* key() { return inputs_[1]; } - LOperand* value() { return inputs_[2]; } - - DECLARE_CONCRETE_INSTRUCTION(StoreKeyedFastElement, - "store-keyed-fast-element") - DECLARE_HYDROGEN_ACCESSOR(StoreKeyedFastElement) - - virtual void PrintDataTo(StringStream* stream); - - uint32_t additional_index() const { return hydrogen()->index_offset(); } -}; - - -class LStoreKeyedFastDoubleElement: public LTemplateInstruction<0, 3, 0> { - public: - LStoreKeyedFastDoubleElement(LOperand* elements, - LOperand* key, - LOperand* value) { - inputs_[0] = elements; - inputs_[1] = key; - inputs_[2] = value; - } - + bool is_external() const { return hydrogen()->is_external(); } LOperand* elements() { return inputs_[0]; } LOperand* key() { return inputs_[1]; } LOperand* value() { return inputs_[2]; } + ElementsKind elements_kind() const { return hydrogen()->elements_kind(); } - DECLARE_CONCRETE_INSTRUCTION(StoreKeyedFastDoubleElement, - "store-keyed-fast-double-element") - DECLARE_HYDROGEN_ACCESSOR(StoreKeyedFastDoubleElement) + DECLARE_CONCRETE_INSTRUCTION(StoreKeyed, "store-keyed") + DECLARE_HYDROGEN_ACCESSOR(StoreKeyed) virtual void PrintDataTo(StringStream* stream); - bool NeedsCanonicalization() { return hydrogen()->NeedsCanonicalization(); } uint32_t additional_index() const { return hydrogen()->index_offset(); } }; -class LStoreKeyedSpecializedArrayElement: public LTemplateInstruction<0, 3, 0> { - public: - LStoreKeyedSpecializedArrayElement(LOperand* external_pointer, - LOperand* key, - LOperand* value) { - inputs_[0] = external_pointer; - inputs_[1] = key; - inputs_[2] = value; - } - - LOperand* external_pointer() { return inputs_[0]; } - LOperand* key() { return inputs_[1]; } - LOperand* value() { return inputs_[2]; } - - DECLARE_CONCRETE_INSTRUCTION(StoreKeyedSpecializedArrayElement, - "store-keyed-specialized-array-element") - DECLARE_HYDROGEN_ACCESSOR(StoreKeyedSpecializedArrayElement) - - ElementsKind elements_kind() const { return hydrogen()->elements_kind(); } - uint32_t additional_index() const { return hydrogen()->index_offset(); } -}; - - class LStoreKeyedGeneric: public LTemplateInstruction<0, 3, 0> { public: LStoreKeyedGeneric(LOperand* object, LOperand* key, LOperand* value) { @@ -2012,6 +2056,24 @@ class LTransitionElementsKind: public LTemplateInstruction<1, 1, 2> { Handle<Map> original_map() { return hydrogen()->original_map(); } Handle<Map> transitioned_map() { return hydrogen()->transitioned_map(); } + ElementsKind from_kind() { return hydrogen()->from_kind(); } + ElementsKind to_kind() { return hydrogen()->to_kind(); } +}; + + +class LTrapAllocationMemento : public LTemplateInstruction<0, 1, 1> { + public: + LTrapAllocationMemento(LOperand* object, + LOperand* temp) { + inputs_[0] = object; + temps_[0] = temp; + } + + LOperand* object() { return inputs_[0]; } + LOperand* temp() { return temps_[0]; } + + DECLARE_CONCRETE_INSTRUCTION(TrapAllocationMemento, + "trap-allocation-memento") }; @@ -2110,7 +2172,7 @@ class LCheckMaps: public LTemplateInstruction<0, 1, 0> { }; -class LCheckPrototypeMaps: public LTemplateInstruction<0, 0, 1> { +class LCheckPrototypeMaps: public LTemplateInstruction<1, 0, 1> { public: explicit LCheckPrototypeMaps(LOperand* temp) { temps_[0] = temp; @@ -2121,8 +2183,10 @@ class LCheckPrototypeMaps: public LTemplateInstruction<0, 0, 1> { DECLARE_CONCRETE_INSTRUCTION(CheckPrototypeMaps, "check-prototype-maps") DECLARE_HYDROGEN_ACCESSOR(CheckPrototypeMaps) - Handle<JSObject> prototype() const { return hydrogen()->prototype(); } - Handle<JSObject> holder() const { return hydrogen()->holder(); } + ZoneList<Handle<JSObject> >* prototypes() const { + return hydrogen()->prototypes(); + } + ZoneList<Handle<Map> >* maps() const { return hydrogen()->maps(); } }; @@ -2202,6 +2266,21 @@ class LAllocateObject: public LTemplateInstruction<1, 0, 1> { }; +class LAllocate: public LTemplateInstruction<1, 1, 1> { + public: + LAllocate(LOperand* size, LOperand* temp) { + inputs_[0] = size; + temps_[0] = temp; + } + + LOperand* size() { return inputs_[0]; } + LOperand* temp() { return temps_[0]; } + + DECLARE_CONCRETE_INSTRUCTION(Allocate, "allocate") + DECLARE_HYDROGEN_ACCESSOR(Allocate) +}; + + class LFastLiteral: public LTemplateInstruction<1, 0, 0> { public: DECLARE_CONCRETE_INSTRUCTION(FastLiteral, "fast-literal") @@ -2327,8 +2406,9 @@ class LOsrEntry: public LTemplateInstruction<0, 0, 0> { // slot, i.e., that must also be restored to the spill slot on OSR entry. // NULL if the register has no assigned spill slot. Indexed by allocation // index. - LOperand* register_spills_[Register::kNumAllocatableRegisters]; - LOperand* double_register_spills_[DoubleRegister::kNumAllocatableRegisters]; + LOperand* register_spills_[Register::kMaxNumAllocatableRegisters]; + LOperand* double_register_spills_[ + DoubleRegister::kMaxNumAllocatableRegisters]; }; diff --git a/deps/v8/src/x64/macro-assembler-x64.cc b/deps/v8/src/x64/macro-assembler-x64.cc index 77506741a3..9ecf66c5ec 100644 --- a/deps/v8/src/x64/macro-assembler-x64.cc +++ b/deps/v8/src/x64/macro-assembler-x64.cc @@ -162,7 +162,7 @@ void MacroAssembler::PushAddress(ExternalReference source) { int64_t address = reinterpret_cast<int64_t>(source.address()); if (is_int32(address) && !Serializer::enabled()) { if (emit_debug_code()) { - movq(kScratchRegister, BitCast<int64_t>(kZapValue), RelocInfo::NONE); + movq(kScratchRegister, BitCast<int64_t>(kZapValue), RelocInfo::NONE64); } push(Immediate(static_cast<int32_t>(address))); return; @@ -287,7 +287,7 @@ void MacroAssembler::InNewSpace(Register object, ASSERT(is_int32(static_cast<int64_t>(HEAP->NewSpaceMask()))); intptr_t new_space_start = reinterpret_cast<intptr_t>(HEAP->NewSpaceStart()); - movq(kScratchRegister, -new_space_start, RelocInfo::NONE); + movq(kScratchRegister, -new_space_start, RelocInfo::NONE64); if (scratch.is(object)) { addq(scratch, kScratchRegister); } else { @@ -342,8 +342,8 @@ void MacroAssembler::RecordWriteField( // Clobber clobbered input registers when running with the debug-code flag // turned on to provoke errors. if (emit_debug_code()) { - movq(value, BitCast<int64_t>(kZapValue), RelocInfo::NONE); - movq(dst, BitCast<int64_t>(kZapValue), RelocInfo::NONE); + movq(value, BitCast<int64_t>(kZapValue), RelocInfo::NONE64); + movq(dst, BitCast<int64_t>(kZapValue), RelocInfo::NONE64); } } @@ -376,8 +376,8 @@ void MacroAssembler::RecordWriteArray(Register object, // Clobber clobbered input registers when running with the debug-code flag // turned on to provoke errors. if (emit_debug_code()) { - movq(value, BitCast<int64_t>(kZapValue), RelocInfo::NONE); - movq(index, BitCast<int64_t>(kZapValue), RelocInfo::NONE); + movq(value, BitCast<int64_t>(kZapValue), RelocInfo::NONE64); + movq(index, BitCast<int64_t>(kZapValue), RelocInfo::NONE64); } } @@ -442,8 +442,8 @@ void MacroAssembler::RecordWrite(Register object, // Clobber clobbered registers when running with the debug-code flag // turned on to provoke errors. if (emit_debug_code()) { - movq(address, BitCast<int64_t>(kZapValue), RelocInfo::NONE); - movq(value, BitCast<int64_t>(kZapValue), RelocInfo::NONE); + movq(address, BitCast<int64_t>(kZapValue), RelocInfo::NONE64); + movq(value, BitCast<int64_t>(kZapValue), RelocInfo::NONE64); } } @@ -524,11 +524,11 @@ void MacroAssembler::Abort(const char* msg) { } #endif push(rax); - movq(kScratchRegister, p0, RelocInfo::NONE); + movq(kScratchRegister, p0, RelocInfo::NONE64); push(kScratchRegister); movq(kScratchRegister, reinterpret_cast<intptr_t>(Smi::FromInt(static_cast<int>(p1 - p0))), - RelocInfo::NONE); + RelocInfo::NONE64); push(kScratchRegister); if (!has_frame_) { @@ -546,13 +546,14 @@ void MacroAssembler::Abort(const char* msg) { void MacroAssembler::CallStub(CodeStub* stub, TypeFeedbackId ast_id) { ASSERT(AllowThisStubCall(stub)); // Calls are not allowed in some stubs - Call(stub->GetCode(), RelocInfo::CODE_TARGET, ast_id); + Call(stub->GetCode(isolate()), RelocInfo::CODE_TARGET, ast_id); } void MacroAssembler::TailCallStub(CodeStub* stub) { - ASSERT(allow_stub_calls_ || stub->CompilingCallsToThisStubIsGCSafe()); - Jump(stub->GetCode(), RelocInfo::CODE_TARGET); + ASSERT(allow_stub_calls_ || + stub->CompilingCallsToThisStubIsGCSafe(isolate())); + Jump(stub->GetCode(isolate()), RelocInfo::CODE_TARGET); } @@ -564,7 +565,7 @@ void MacroAssembler::StubReturn(int argc) { bool MacroAssembler::AllowThisStubCall(CodeStub* stub) { if (!has_frame_ && stub->SometimesSetsUpAFrame()) return false; - return allow_stub_calls_ || stub->CompilingCallsToThisStubIsGCSafe(); + return allow_stub_calls_ || stub->CompilingCallsToThisStubIsGCSafe(isolate()); } @@ -701,13 +702,13 @@ void MacroAssembler::CallApiFunctionAndReturn(Address function_address, Factory* factory = isolate()->factory(); ExternalReference next_address = - ExternalReference::handle_scope_next_address(); + ExternalReference::handle_scope_next_address(isolate()); const int kNextOffset = 0; const int kLimitOffset = Offset( - ExternalReference::handle_scope_limit_address(), + ExternalReference::handle_scope_limit_address(isolate()), next_address); const int kLevelOffset = Offset( - ExternalReference::handle_scope_level_address(), + ExternalReference::handle_scope_level_address(isolate()), next_address); ExternalReference scheduled_exception_address = ExternalReference::scheduled_exception_address(isolate()); @@ -720,11 +721,28 @@ void MacroAssembler::CallApiFunctionAndReturn(Address function_address, movq(prev_next_address_reg, Operand(base_reg, kNextOffset)); movq(prev_limit_reg, Operand(base_reg, kLimitOffset)); addl(Operand(base_reg, kLevelOffset), Immediate(1)); + + if (FLAG_log_timer_events) { + FrameScope frame(this, StackFrame::MANUAL); + PushSafepointRegisters(); + PrepareCallCFunction(0); + CallCFunction(ExternalReference::log_enter_external_function(isolate()), 0); + PopSafepointRegisters(); + } + // Call the api function! movq(rax, reinterpret_cast<int64_t>(function_address), - RelocInfo::RUNTIME_ENTRY); + RelocInfo::EXTERNAL_REFERENCE); call(rax); + if (FLAG_log_timer_events) { + FrameScope frame(this, StackFrame::MANUAL); + PushSafepointRegisters(); + PrepareCallCFunction(0); + CallCFunction(ExternalReference::log_leave_external_function(isolate()), 0); + PopSafepointRegisters(); + } + #if defined(_WIN64) && !defined(__MINGW64__) // rax keeps a pointer to v8::Handle, unpack it. movq(rax, Operand(rax, 0)); @@ -817,7 +835,7 @@ void MacroAssembler::JumpToExternalReference(const ExternalReference& ext, // Set the entry point and jump to the C entry runtime stub. LoadAddress(rbx, ext); CEntryStub ces(result_size); - jmp(ces.GetCode(), RelocInfo::CODE_TARGET); + jmp(ces.GetCode(isolate()), RelocInfo::CODE_TARGET); } @@ -881,9 +899,9 @@ void MacroAssembler::PushCallerSaved(SaveFPRegsMode fp_mode, } // R12 to r15 are callee save on all platforms. if (fp_mode == kSaveFPRegs) { - CpuFeatures::Scope scope(SSE2); - subq(rsp, Immediate(kDoubleSize * XMMRegister::kNumRegisters)); - for (int i = 0; i < XMMRegister::kNumRegisters; i++) { + CpuFeatureScope scope(this, SSE2); + subq(rsp, Immediate(kDoubleSize * XMMRegister::kMaxNumRegisters)); + for (int i = 0; i < XMMRegister::kMaxNumRegisters; i++) { XMMRegister reg = XMMRegister::from_code(i); movsd(Operand(rsp, i * kDoubleSize), reg); } @@ -896,12 +914,12 @@ void MacroAssembler::PopCallerSaved(SaveFPRegsMode fp_mode, Register exclusion2, Register exclusion3) { if (fp_mode == kSaveFPRegs) { - CpuFeatures::Scope scope(SSE2); - for (int i = 0; i < XMMRegister::kNumRegisters; i++) { + CpuFeatureScope scope(this, SSE2); + for (int i = 0; i < XMMRegister::kMaxNumRegisters; i++) { XMMRegister reg = XMMRegister::from_code(i); movsd(reg, Operand(rsp, i * kDoubleSize)); } - addq(rsp, Immediate(kDoubleSize * XMMRegister::kNumRegisters)); + addq(rsp, Immediate(kDoubleSize * XMMRegister::kMaxNumRegisters)); } for (int i = kNumberOfSavedRegs - 1; i >= 0; i--) { Register reg = saved_regs[i]; @@ -920,7 +938,7 @@ void MacroAssembler::Set(Register dst, int64_t x) { } else if (is_int32(x)) { movq(dst, Immediate(static_cast<int32_t>(x))); } else { - movq(dst, x, RelocInfo::NONE); + movq(dst, x, RelocInfo::NONE64); } } @@ -985,7 +1003,7 @@ void MacroAssembler::LoadSmiConstant(Register dst, Smi* source) { if (emit_debug_code()) { movq(dst, reinterpret_cast<uint64_t>(Smi::FromInt(kSmiConstantRegisterValue)), - RelocInfo::NONE); + RelocInfo::NONE64); cmpq(dst, kSmiConstantRegister); if (allow_stub_calls()) { Assert(equal, "Uninitialized kSmiConstantRegister"); @@ -1032,7 +1050,7 @@ void MacroAssembler::LoadSmiConstant(Register dst, Smi* source) { UNREACHABLE(); return; default: - movq(dst, reinterpret_cast<uint64_t>(source), RelocInfo::NONE); + movq(dst, reinterpret_cast<uint64_t>(source), RelocInfo::NONE64); return; } if (negative) { @@ -2228,7 +2246,7 @@ void MacroAssembler::JumpIfInstanceTypeIsNotSequentialAscii( kIsNotStringMask | kStringRepresentationMask | kStringEncodingMask; andl(scratch, Immediate(kFlatAsciiStringMask)); - cmpl(scratch, Immediate(kStringTag | kSeqStringTag | kAsciiStringTag)); + cmpl(scratch, Immediate(kStringTag | kSeqStringTag | kOneByteStringTag)); j(not_equal, failure, near_jump); } @@ -2769,7 +2787,8 @@ void MacroAssembler::StoreNumberToDoubleElements( Register elements, Register index, XMMRegister xmm_scratch, - Label* fail) { + Label* fail, + int elements_offset) { Label smi_value, is_nan, maybe_nan, not_nan, have_double_value, done; JumpIfSmi(maybe_number, &smi_value, Label::kNear); @@ -2788,7 +2807,8 @@ void MacroAssembler::StoreNumberToDoubleElements( bind(¬_nan); movsd(xmm_scratch, FieldOperand(maybe_number, HeapNumber::kValueOffset)); bind(&have_double_value); - movsd(FieldOperand(elements, index, times_8, FixedDoubleArray::kHeaderSize), + movsd(FieldOperand(elements, index, times_8, + FixedDoubleArray::kHeaderSize - elements_offset), xmm_scratch); jmp(&done); @@ -2811,7 +2831,8 @@ void MacroAssembler::StoreNumberToDoubleElements( // Preserve original value. SmiToInteger32(kScratchRegister, maybe_number); cvtlsi2sd(xmm_scratch, kScratchRegister); - movsd(FieldOperand(elements, index, times_8, FixedDoubleArray::kHeaderSize), + movsd(FieldOperand(elements, index, times_8, + FixedDoubleArray::kHeaderSize - elements_offset), xmm_scratch); bind(&done); } @@ -2891,23 +2912,14 @@ void MacroAssembler::ClampDoubleToUint8(XMMRegister input_reg, } -static double kUint32Bias = - static_cast<double>(static_cast<uint32_t>(0xFFFFFFFF)) + 1; - - void MacroAssembler::LoadUint32(XMMRegister dst, Register src, XMMRegister scratch) { - Label done; - cmpl(src, Immediate(0)); - movq(kScratchRegister, - reinterpret_cast<int64_t>(&kUint32Bias), - RelocInfo::NONE); - movsd(scratch, Operand(kScratchRegister, 0)); - cvtlsi2sd(dst, src); - j(not_sign, &done, Label::kNear); - addsd(dst, scratch); - bind(&done); + if (FLAG_debug_code) { + cmpq(src, Immediate(0xffffffff)); + Assert(below_equal, "input GPR is expected to have upper32 cleared"); + } + cvtqsi2sd(dst, src); } @@ -2932,6 +2944,7 @@ void MacroAssembler::EnumLength(Register dst, Register map) { void MacroAssembler::DispatchMap(Register obj, + Register unused, Handle<Map> map, Handle<Code> success, SmiCheckType smi_check_type) { @@ -2986,7 +2999,7 @@ void MacroAssembler::AssertSmi(const Operand& object) { void MacroAssembler::AssertZeroExtended(Register int32_register) { if (emit_debug_code()) { ASSERT(!int32_register.is(kScratchRegister)); - movq(kScratchRegister, 0x100000000l, RelocInfo::NONE); + movq(kScratchRegister, 0x100000000l, RelocInfo::NONE64); cmpq(kScratchRegister, int32_register); Check(above_equal, "32 bit value in register is not zero-extended"); } @@ -3006,6 +3019,19 @@ void MacroAssembler::AssertString(Register object) { } +void MacroAssembler::AssertName(Register object) { + if (emit_debug_code()) { + testb(object, Immediate(kSmiTagMask)); + Check(not_equal, "Operand is a smi and not a name"); + push(object); + movq(object, FieldOperand(object, HeapObject::kMapOffset)); + CmpInstanceType(object, LAST_NAME_TYPE); + pop(object); + Check(below_equal, "Operand is not a name"); + } +} + + void MacroAssembler::AssertRootValue(Register src, Heap::RootListIndex root_value_index, const char* message) { @@ -3030,6 +3056,16 @@ Condition MacroAssembler::IsObjectStringType(Register heap_object, } +Condition MacroAssembler::IsObjectNameType(Register heap_object, + Register map, + Register instance_type) { + movq(map, FieldOperand(heap_object, HeapObject::kMapOffset)); + movzxbl(instance_type, FieldOperand(map, Map::kInstanceTypeOffset)); + cmpb(instance_type, Immediate(static_cast<uint8_t>(LAST_NAME_TYPE))); + return below_equal; +} + + void MacroAssembler::TryGetFunctionPrototype(Register function, Register result, Label* miss, @@ -3128,7 +3164,7 @@ void MacroAssembler::DebugBreak() { LoadAddress(rbx, ExternalReference(Runtime::kDebugBreak, isolate())); CEntryStub ces(1); ASSERT(AllowThisStubCall(&ces)); - Call(ces.GetCode(), RelocInfo::DEBUG_BREAK); + Call(ces.GetCode(isolate()), RelocInfo::DEBUG_BREAK); } #endif // ENABLE_DEBUGGER_SUPPORT @@ -3403,11 +3439,11 @@ void MacroAssembler::EnterExitFrameEpilogue(int arg_stack_space, #endif // Optionally save all XMM registers. if (save_doubles) { - int space = XMMRegister::kNumRegisters * kDoubleSize + + int space = XMMRegister::kMaxNumRegisters * kDoubleSize + arg_stack_space * kPointerSize; subq(rsp, Immediate(space)); int offset = -2 * kPointerSize; - for (int i = 0; i < XMMRegister::kNumAllocatableRegisters; i++) { + for (int i = 0; i < XMMRegister::NumAllocatableRegisters(); i++) { XMMRegister reg = XMMRegister::FromAllocationIndex(i); movsd(Operand(rbp, offset - ((i + 1) * kDoubleSize)), reg); } @@ -3451,7 +3487,7 @@ void MacroAssembler::LeaveExitFrame(bool save_doubles) { // r15 : argv if (save_doubles) { int offset = -2 * kPointerSize; - for (int i = 0; i < XMMRegister::kNumAllocatableRegisters; i++) { + for (int i = 0; i < XMMRegister::NumAllocatableRegisters(); i++) { XMMRegister reg = XMMRegister::FromAllocationIndex(i); movsd(reg, Operand(rbp, offset - ((i + 1) * kDoubleSize))); } @@ -3680,8 +3716,8 @@ void MacroAssembler::LoadFromNumberDictionary(Label* miss, void MacroAssembler::LoadAllocationTopHelper(Register result, Register scratch, AllocationFlags flags) { - ExternalReference new_space_allocation_top = - ExternalReference::new_space_allocation_top_address(isolate()); + ExternalReference allocation_top = + AllocationUtils::GetAllocationTopReference(isolate(), flags); // Just return if allocation top is already known. if ((flags & RESULT_CONTAINS_TOP) != 0) { @@ -3689,7 +3725,7 @@ void MacroAssembler::LoadAllocationTopHelper(Register result, ASSERT(!scratch.is_valid()); #ifdef DEBUG // Assert that result actually contains top on entry. - Operand top_operand = ExternalOperand(new_space_allocation_top); + Operand top_operand = ExternalOperand(allocation_top); cmpq(result, top_operand); Check(equal, "Unexpected allocation top"); #endif @@ -3699,40 +3735,42 @@ void MacroAssembler::LoadAllocationTopHelper(Register result, // Move address of new object to result. Use scratch register if available, // and keep address in scratch until call to UpdateAllocationTopHelper. if (scratch.is_valid()) { - LoadAddress(scratch, new_space_allocation_top); + LoadAddress(scratch, allocation_top); movq(result, Operand(scratch, 0)); } else { - Load(result, new_space_allocation_top); + Load(result, allocation_top); } } void MacroAssembler::UpdateAllocationTopHelper(Register result_end, - Register scratch) { + Register scratch, + AllocationFlags flags) { if (emit_debug_code()) { testq(result_end, Immediate(kObjectAlignmentMask)); Check(zero, "Unaligned allocation in new space"); } - ExternalReference new_space_allocation_top = - ExternalReference::new_space_allocation_top_address(isolate()); + ExternalReference allocation_top = + AllocationUtils::GetAllocationTopReference(isolate(), flags); // Update new top. if (scratch.is_valid()) { // Scratch already contains address of allocation top. movq(Operand(scratch, 0), result_end); } else { - Store(new_space_allocation_top, result_end); + Store(allocation_top, result_end); } } -void MacroAssembler::AllocateInNewSpace(int object_size, - Register result, - Register result_end, - Register scratch, - Label* gc_required, - AllocationFlags flags) { +void MacroAssembler::Allocate(int object_size, + Register result, + Register result_end, + Register scratch, + Label* gc_required, + AllocationFlags flags) { + ASSERT((flags & (RESULT_CONTAINS_TOP | SIZE_IN_WORDS)) == 0); if (!FLAG_inline_new) { if (emit_debug_code()) { // Trash the registers to simulate an allocation failure. @@ -3752,9 +3790,16 @@ void MacroAssembler::AllocateInNewSpace(int object_size, // Load address of new object into result. LoadAllocationTopHelper(result, scratch, flags); + // Align the next allocation. Storing the filler map without checking top is + // always safe because the limit of the heap is always aligned. + if (((flags & DOUBLE_ALIGNMENT) != 0) && FLAG_debug_code) { + testq(result, Immediate(kDoubleAlignmentMask)); + Check(zero, "Allocation is not double aligned"); + } + // Calculate new top and bail out if new space is exhausted. - ExternalReference new_space_allocation_limit = - ExternalReference::new_space_allocation_limit_address(isolate()); + ExternalReference allocation_limit = + AllocationUtils::GetAllocationLimitReference(isolate(), flags); Register top_reg = result_end.is_valid() ? result_end : result; @@ -3763,22 +3808,24 @@ void MacroAssembler::AllocateInNewSpace(int object_size, } addq(top_reg, Immediate(object_size)); j(carry, gc_required); - Operand limit_operand = ExternalOperand(new_space_allocation_limit); + Operand limit_operand = ExternalOperand(allocation_limit); cmpq(top_reg, limit_operand); j(above, gc_required); // Update allocation top. - UpdateAllocationTopHelper(top_reg, scratch); + UpdateAllocationTopHelper(top_reg, scratch, flags); + bool tag_result = (flags & TAG_OBJECT) != 0; if (top_reg.is(result)) { - if ((flags & TAG_OBJECT) != 0) { + if (tag_result) { subq(result, Immediate(object_size - kHeapObjectTag)); } else { subq(result, Immediate(object_size)); } - } else if ((flags & TAG_OBJECT) != 0) { + } else if (tag_result) { // Tag the result if requested. - addq(result, Immediate(kHeapObjectTag)); + ASSERT(kHeapObjectTag == 1); + incq(result); } } @@ -3791,6 +3838,8 @@ void MacroAssembler::AllocateInNewSpace(int header_size, Register scratch, Label* gc_required, AllocationFlags flags) { + ASSERT((flags & SIZE_IN_WORDS) == 0); + ASSERT((flags & PRETENURE_OLD_POINTER_SPACE) == 0); if (!FLAG_inline_new) { if (emit_debug_code()) { // Trash the registers to simulate an allocation failure. @@ -3809,6 +3858,13 @@ void MacroAssembler::AllocateInNewSpace(int header_size, // Load address of new object into result. LoadAllocationTopHelper(result, scratch, flags); + // Align the next allocation. Storing the filler map without checking top is + // always safe because the limit of the heap is always aligned. + if (((flags & DOUBLE_ALIGNMENT) != 0) && FLAG_debug_code) { + testq(result, Immediate(kDoubleAlignmentMask)); + Check(zero, "Allocation is not double aligned"); + } + // Calculate new top and bail out if new space is exhausted. ExternalReference new_space_allocation_limit = ExternalReference::new_space_allocation_limit_address(isolate()); @@ -3823,11 +3879,12 @@ void MacroAssembler::AllocateInNewSpace(int header_size, j(above, gc_required); // Update allocation top. - UpdateAllocationTopHelper(result_end, scratch); + UpdateAllocationTopHelper(result_end, scratch, flags); // Tag the result if requested. if ((flags & TAG_OBJECT) != 0) { - addq(result, Immediate(kHeapObjectTag)); + ASSERT(kHeapObjectTag == 1); + incq(result); } } @@ -3838,6 +3895,8 @@ void MacroAssembler::AllocateInNewSpace(Register object_size, Register scratch, Label* gc_required, AllocationFlags flags) { + ASSERT((flags & (RESULT_CONTAINS_TOP | SIZE_IN_WORDS)) == 0); + ASSERT((flags & PRETENURE_OLD_POINTER_SPACE) == 0); if (!FLAG_inline_new) { if (emit_debug_code()) { // Trash the registers to simulate an allocation failure. @@ -3869,7 +3928,14 @@ void MacroAssembler::AllocateInNewSpace(Register object_size, j(above, gc_required); // Update allocation top. - UpdateAllocationTopHelper(result_end, scratch); + UpdateAllocationTopHelper(result_end, scratch, flags); + + // Align the next allocation. Storing the filler map without checking top is + // always safe because the limit of the heap is always aligned. + if (((flags & DOUBLE_ALIGNMENT) != 0) && FLAG_debug_code) { + testq(result, Immediate(kDoubleAlignmentMask)); + Check(zero, "Allocation is not double aligned"); + } // Tag the result if requested. if ((flags & TAG_OBJECT) != 0) { @@ -3897,12 +3963,7 @@ void MacroAssembler::AllocateHeapNumber(Register result, Register scratch, Label* gc_required) { // Allocate heap number in new space. - AllocateInNewSpace(HeapNumber::kSize, - result, - scratch, - no_reg, - gc_required, - TAG_OBJECT); + Allocate(HeapNumber::kSize, result, scratch, no_reg, gc_required, TAG_OBJECT); // Set the map. LoadRoot(kScratchRegister, Heap::kHeapNumberMapRootIndex); @@ -3957,7 +4018,7 @@ void MacroAssembler::AllocateAsciiString(Register result, Label* gc_required) { // Calculate the number of bytes needed for the characters in the string while // observing object alignment. - const int kHeaderAlignment = SeqAsciiString::kHeaderSize & + const int kHeaderAlignment = SeqOneByteString::kHeaderSize & kObjectAlignmentMask; movl(scratch1, length); ASSERT(kCharSize == 1); @@ -3968,7 +4029,7 @@ void MacroAssembler::AllocateAsciiString(Register result, } // Allocate ASCII string in new space. - AllocateInNewSpace(SeqAsciiString::kHeaderSize, + AllocateInNewSpace(SeqOneByteString::kHeaderSize, times_1, scratch1, result, @@ -3992,12 +4053,8 @@ void MacroAssembler::AllocateTwoByteConsString(Register result, Register scratch2, Label* gc_required) { // Allocate heap number in new space. - AllocateInNewSpace(ConsString::kSize, - result, - scratch1, - scratch2, - gc_required, - TAG_OBJECT); + Allocate(ConsString::kSize, result, scratch1, scratch2, gc_required, + TAG_OBJECT); // Set the map. The other fields are left uninitialized. LoadRoot(kScratchRegister, Heap::kConsStringMapRootIndex); @@ -4010,12 +4067,8 @@ void MacroAssembler::AllocateAsciiConsString(Register result, Register scratch2, Label* gc_required) { // Allocate heap number in new space. - AllocateInNewSpace(ConsString::kSize, - result, - scratch1, - scratch2, - gc_required, - TAG_OBJECT); + Allocate(ConsString::kSize, result, scratch1, scratch2, gc_required, + TAG_OBJECT); // Set the map. The other fields are left uninitialized. LoadRoot(kScratchRegister, Heap::kConsAsciiStringMapRootIndex); @@ -4028,12 +4081,8 @@ void MacroAssembler::AllocateTwoByteSlicedString(Register result, Register scratch2, Label* gc_required) { // Allocate heap number in new space. - AllocateInNewSpace(SlicedString::kSize, - result, - scratch1, - scratch2, - gc_required, - TAG_OBJECT); + Allocate(SlicedString::kSize, result, scratch1, scratch2, gc_required, + TAG_OBJECT); // Set the map. The other fields are left uninitialized. LoadRoot(kScratchRegister, Heap::kSlicedStringMapRootIndex); @@ -4046,12 +4095,8 @@ void MacroAssembler::AllocateAsciiSlicedString(Register result, Register scratch2, Label* gc_required) { // Allocate heap number in new space. - AllocateInNewSpace(SlicedString::kSize, - result, - scratch1, - scratch2, - gc_required, - TAG_OBJECT); + Allocate(SlicedString::kSize, result, scratch1, scratch2, gc_required, + TAG_OBJECT); // Set the map. The other fields are left uninitialized. LoadRoot(kScratchRegister, Heap::kSlicedAsciiStringMapRootIndex); @@ -4231,6 +4276,15 @@ void MacroAssembler::LoadGlobalFunction(int index, Register function) { } +void MacroAssembler::LoadArrayFunction(Register function) { + movq(function, + Operand(rsi, Context::SlotOffset(Context::GLOBAL_OBJECT_INDEX))); + movq(function, FieldOperand(function, GlobalObject::kGlobalContextOffset)); + movq(function, + Operand(function, Context::SlotOffset(Context::ARRAY_FUNCTION_INDEX))); +} + + void MacroAssembler::LoadGlobalFunctionInitialMap(Register function, Register map) { // Load the initial map. The global functions all have initial maps. @@ -4503,7 +4557,7 @@ void MacroAssembler::EnsureNotWhite( bind(¬_external); // Sequential string, either ASCII or UC16. - ASSERT(kAsciiStringTag == 0x04); + ASSERT(kOneByteStringTag == 0x04); and_(length, Immediate(kStringEncodingMask)); xor_(length, Immediate(kStringEncodingMask)); addq(length, Immediate(0x04)); @@ -4563,6 +4617,27 @@ void MacroAssembler::CheckEnumCache(Register null_value, Label* call_runtime) { j(not_equal, &next); } +void MacroAssembler::TestJSArrayForAllocationSiteInfo( + Register receiver_reg, + Register scratch_reg) { + Label no_info_available; + ExternalReference new_space_start = + ExternalReference::new_space_start(isolate()); + ExternalReference new_space_allocation_top = + ExternalReference::new_space_allocation_top_address(isolate()); + + lea(scratch_reg, Operand(receiver_reg, + JSArray::kSize + AllocationSiteInfo::kSize - kHeapObjectTag)); + movq(kScratchRegister, new_space_start); + cmpq(scratch_reg, kScratchRegister); + j(less, &no_info_available); + cmpq(scratch_reg, ExternalOperand(new_space_allocation_top)); + j(greater, &no_info_available); + CompareRoot(MemOperand(scratch_reg, -AllocationSiteInfo::kSize), + Heap::kAllocationSiteInfoMapRootIndex); + bind(&no_info_available); +} + } } // namespace v8::internal diff --git a/deps/v8/src/x64/macro-assembler-x64.h b/deps/v8/src/x64/macro-assembler-x64.h index cc057ac54c..df5215991a 100644 --- a/deps/v8/src/x64/macro-assembler-x64.h +++ b/deps/v8/src/x64/macro-assembler-x64.h @@ -35,18 +35,6 @@ namespace v8 { namespace internal { -// Flags used for the AllocateInNewSpace functions. -enum AllocationFlags { - // No special flags. - NO_ALLOCATION_FLAGS = 0, - // Return the pointer to the allocated already tagged as a heap object. - TAG_OBJECT = 1 << 0, - // The content of the result register already contains the allocation top in - // new space. - RESULT_CONTAINS_TOP = 1 << 1 -}; - - // Default scratch register used by MacroAssembler (and other code that needs // a spare register). The register isn't callee save, and not used by the // function calling convention. @@ -385,7 +373,7 @@ class MacroAssembler: public Assembler { void InitializeSmiConstantRegister() { movq(kSmiConstantRegister, reinterpret_cast<uint64_t>(Smi::FromInt(kSmiConstantRegisterValue)), - RelocInfo::NONE); + RelocInfo::NONE64); } // Conversions between tagged smi values and non-tagged integer values. @@ -895,7 +883,8 @@ class MacroAssembler: public Assembler { Register elements, Register index, XMMRegister xmm_scratch, - Label* fail); + Label* fail, + int elements_offset = 0); // Compare an object's map with the specified map and its transitioned // elements maps if mode is ALLOW_ELEMENT_TRANSITION_MAPS. FLAGS are set with @@ -920,6 +909,7 @@ class MacroAssembler: public Assembler { // specified target if equal. Skip the smi check if not required (object is // known to be a heap object) void DispatchMap(Register obj, + Register unused, Handle<Map> map, Handle<Code> success, SmiCheckType smi_check_type); @@ -933,6 +923,15 @@ class MacroAssembler: public Assembler { Register map, Register instance_type); + // Check if the object in register heap_object is a name. Afterwards the + // register map contains the object map and the register instance_type + // contains the instance_type. The registers map and instance_type can be the + // same in which case it contains the instance type afterwards. Either of the + // registers map and instance_type can be the same as heap_object. + Condition IsObjectNameType(Register heap_object, + Register map, + Register instance_type); + // FCmp compares and pops the two values on top of the FPU stack. // The flag results are similar to integer cmp, but requires unsigned // jcc instructions (je, ja, jae, jb, jbe, je, and jz). @@ -976,6 +975,9 @@ class MacroAssembler: public Assembler { // Abort execution if argument is not a string, enabled via --debug-code. void AssertString(Register object); + // Abort execution if argument is not a name, enabled via --debug-code. + void AssertName(Register object); + // Abort execution if argument is not the root value with the given index, // enabled via --debug-code. void AssertRootValue(Register src, @@ -1023,22 +1025,22 @@ class MacroAssembler: public Assembler { // --------------------------------------------------------------------------- // Allocation support - // Allocate an object in new space. If the new space is exhausted control - // continues at the gc_required label. The allocated object is returned in - // result and end of the new object is returned in result_end. The register - // scratch can be passed as no_reg in which case an additional object - // reference will be added to the reloc info. The returned pointers in result - // and result_end have not yet been tagged as heap objects. If - // result_contains_top_on_entry is true the content of result is known to be - // the allocation top on entry (could be result_end from a previous call to - // AllocateInNewSpace). If result_contains_top_on_entry is true scratch + // Allocate an object in new space or old pointer space. If the given space + // is exhausted control continues at the gc_required label. The allocated + // object is returned in result and end of the new object is returned in + // result_end. The register scratch can be passed as no_reg in which case + // an additional object reference will be added to the reloc info. The + // returned pointers in result and result_end have not yet been tagged as + // heap objects. If result_contains_top_on_entry is true the content of + // result is known to be the allocation top on entry (could be result_end + // from a previous call). If result_contains_top_on_entry is true scratch // should be no_reg as it is never used. - void AllocateInNewSpace(int object_size, - Register result, - Register result_end, - Register scratch, - Label* gc_required, - AllocationFlags flags); + void Allocate(int object_size, + Register result, + Register result_end, + Register scratch, + Label* gc_required, + AllocationFlags flags); void AllocateInNewSpace(int header_size, ScaleFactor element_size, @@ -1165,6 +1167,7 @@ class MacroAssembler: public Assembler { // Load the global function with the given index. void LoadGlobalFunction(int index, Register function); + void LoadArrayFunction(Register function); // Load the initial map from the global function. The registers // function and map can be the same. @@ -1326,6 +1329,15 @@ class MacroAssembler: public Assembler { void CheckEnumCache(Register null_value, Label* call_runtime); + // AllocationSiteInfo support. Arrays may have an associated + // AllocationSiteInfo object that can be checked for in order to pretransition + // to another type. + // On entry, receiver_reg should point to the array object. + // scratch_reg gets clobbered. + // If allocation info is present, condition flags are set to equal + void TestJSArrayForAllocationSiteInfo(Register receiver_reg, + Register scratch_reg); + private: // Order general registers are pushed by Pushad. // rax, rcx, rdx, rbx, rsi, rdi, r8, r9, r11, r14, r15. @@ -1377,9 +1389,12 @@ class MacroAssembler: public Assembler { void LoadAllocationTopHelper(Register result, Register scratch, AllocationFlags flags); + // Update allocation top with value in result_end register. // If scratch is valid, it contains the address of the allocation top. - void UpdateAllocationTopHelper(Register result_end, Register scratch); + void UpdateAllocationTopHelper(Register result_end, + Register scratch, + AllocationFlags flags); // Helper for PopHandleScope. Allowed to perform a GC and returns // NULL if gc_allowed. Does not perform a GC if !gc_allowed, and @@ -1413,9 +1428,9 @@ class MacroAssembler: public Assembler { return kNumSafepointRegisters - kSafepointPushRegisterIndices[reg_code] - 1; } - // Needs access to SafepointRegisterStackIndex for optimized frame + // Needs access to SafepointRegisterStackIndex for compiled frame // traversal. - friend class OptimizedFrame; + friend class StandardFrame; }; @@ -1484,17 +1499,16 @@ extern void LogGeneratedCodeCoverage(const char* file_line); #define CODE_COVERAGE_STRINGIFY(x) #x #define CODE_COVERAGE_TOSTRING(x) CODE_COVERAGE_STRINGIFY(x) #define __FILE_LINE__ __FILE__ ":" CODE_COVERAGE_TOSTRING(__LINE__) -#define ACCESS_MASM(masm) { \ - byte* x64_coverage_function = \ - reinterpret_cast<byte*>(FUNCTION_ADDR(LogGeneratedCodeCoverage)); \ - masm->pushfd(); \ - masm->pushad(); \ - masm->push(Immediate(reinterpret_cast<int>(&__FILE_LINE__))); \ - masm->call(x64_coverage_function, RelocInfo::RUNTIME_ENTRY); \ - masm->pop(rax); \ - masm->popad(); \ - masm->popfd(); \ - } \ +#define ACCESS_MASM(masm) { \ + Address x64_coverage_function = FUNCTION_ADDR(LogGeneratedCodeCoverage); \ + masm->pushfq(); \ + masm->Pushad(); \ + masm->push(Immediate(reinterpret_cast<int>(&__FILE_LINE__))); \ + masm->Call(x64_coverage_function, RelocInfo::EXTERNAL_REFERENCE); \ + masm->pop(rax); \ + masm->Popad(); \ + masm->popfq(); \ + } \ masm-> #else #define ACCESS_MASM(masm) masm-> diff --git a/deps/v8/src/x64/regexp-macro-assembler-x64.cc b/deps/v8/src/x64/regexp-macro-assembler-x64.cc index 86f7bfe6ca..914241ecdc 100644 --- a/deps/v8/src/x64/regexp-macro-assembler-x64.cc +++ b/deps/v8/src/x64/regexp-macro-assembler-x64.cc @@ -234,7 +234,7 @@ void RegExpMacroAssemblerX64::CheckCharacters(Vector<const uc16> str, // If input is ASCII, don't even bother calling here if the string to // match contains a non-ASCII character. if (mode_ == ASCII) { - ASSERT(String::IsAscii(str.start(), str.length())); + ASSERT(String::IsOneByte(str.start(), str.length())); } #endif int byte_length = str.length() * char_size(); @@ -280,7 +280,7 @@ void RegExpMacroAssemblerX64::CheckCharacters(Vector<const uc16> str, (static_cast<uint64_t>(str[i + 5]) << 40) || (static_cast<uint64_t>(str[i + 6]) << 48) || (static_cast<uint64_t>(str[i + 7]) << 56); - __ movq(rax, combined_chars, RelocInfo::NONE); + __ movq(rax, combined_chars, RelocInfo::NONE64); __ cmpq(rax, Operand(rbx, byte_offset + i)); i += 8; } else if (i + 4 <= n) { @@ -300,7 +300,7 @@ void RegExpMacroAssemblerX64::CheckCharacters(Vector<const uc16> str, ASSERT(mode_ == UC16); if (i + 4 <= n) { uint64_t combined_chars = *reinterpret_cast<const uint64_t*>(&str[i]); - __ movq(rax, combined_chars, RelocInfo::NONE); + __ movq(rax, combined_chars, RelocInfo::NONE64); __ cmpq(rax, Operand(rsi, rdi, times_1, byte_offset + i * sizeof(uc16))); i += 4; @@ -393,8 +393,13 @@ void RegExpMacroAssemblerX64::CheckNotBackReferenceIgnoreCase( __ j(not_equal, on_no_match); // Definitely not equal. __ subb(rax, Immediate('a')); __ cmpb(rax, Immediate('z' - 'a')); - __ j(above, on_no_match); // Weren't letters anyway. - + __ j(below_equal, &loop_increment); // In range 'a'-'z'. + // Latin-1: Check for values in range [224,254] but not 247. + __ subb(rax, Immediate(224 - 'a')); + __ cmpb(rax, Immediate(254 - 224)); + __ j(above, on_no_match); // Weren't Latin-1 letters. + __ cmpb(rax, Immediate(247 - 224)); // Check for 247. + __ j(equal, on_no_match); __ bind(&loop_increment); // Increment pointers into match and capture strings. __ addq(r11, Immediate(1)); @@ -610,7 +615,7 @@ void RegExpMacroAssemblerX64::CheckBitInTable( Label* on_bit_set) { __ Move(rax, table); Register index = current_character(); - if (mode_ != ASCII || kTableMask != String::kMaxAsciiCharCode) { + if (mode_ != ASCII || kTableMask != String::kMaxOneByteCharCode) { __ movq(rbx, current_character()); __ and_(rbx, Immediate(kTableMask)); index = rbx; @@ -631,29 +636,23 @@ bool RegExpMacroAssemblerX64::CheckSpecialCharacterClass(uc16 type, case 's': // Match space-characters if (mode_ == ASCII) { - // ASCII space characters are '\t'..'\r' and ' '. + // One byte space characters are '\t'..'\r', ' ' and \u00a0. Label success; __ cmpl(current_character(), Immediate(' ')); - __ j(equal, &success); + __ j(equal, &success, Label::kNear); // Check range 0x09..0x0d __ lea(rax, Operand(current_character(), -'\t')); __ cmpl(rax, Immediate('\r' - '\t')); - BranchOrBacktrack(above, on_no_match); + __ j(below_equal, &success, Label::kNear); + // \u00a0 (NBSP). + __ cmpl(rax, Immediate(0x00a0 - '\t')); + BranchOrBacktrack(not_equal, on_no_match); __ bind(&success); return true; } return false; case 'S': - // Match non-space characters. - if (mode_ == ASCII) { - // ASCII space characters are '\t'..'\r' and ' '. - __ cmpl(current_character(), Immediate(' ')); - BranchOrBacktrack(equal, on_no_match); - __ lea(rax, Operand(current_character(), -'\t')); - __ cmpl(rax, Immediate('\r' - '\t')); - BranchOrBacktrack(below_equal, on_no_match); - return true; - } + // The emitted code for generic character classes is good enough. return false; case 'd': // Match ASCII digits ('0'..'9') @@ -1305,7 +1304,7 @@ int RegExpMacroAssemblerX64::CheckStackGuardState(Address* return_address, Handle<String> subject(frame_entry<String*>(re_frame, kInputString)); // Current string. - bool is_ascii = subject->IsAsciiRepresentationUnderneath(); + bool is_ascii = subject->IsOneByteRepresentationUnderneath(); ASSERT(re_code->instruction_start() <= *return_address); ASSERT(*return_address <= @@ -1336,7 +1335,7 @@ int RegExpMacroAssemblerX64::CheckStackGuardState(Address* return_address, } // String might have changed. - if (subject_tmp->IsAsciiRepresentation() != is_ascii) { + if (subject_tmp->IsOneByteRepresentation() != is_ascii) { // If we changed between an ASCII and an UC16 string, the specialized // code cannot be used, and we need to restart regexp matching from // scratch (including, potentially, compiling a new version of the code). diff --git a/deps/v8/src/x64/stub-cache-x64.cc b/deps/v8/src/x64/stub-cache-x64.cc index b120efb376..69d7a91b2d 100644 --- a/deps/v8/src/x64/stub-cache-x64.cc +++ b/deps/v8/src/x64/stub-cache-x64.cc @@ -110,14 +110,14 @@ static void ProbeTable(Isolate* isolate, // the property. This function may return false negatives, so miss_label // must always call a backup property check that is complete. // This function is safe to call if the receiver has fast properties. -// Name must be a symbol and receiver must be a heap object. +// Name must be unique and receiver must be a heap object. static void GenerateDictionaryNegativeLookup(MacroAssembler* masm, Label* miss_label, Register receiver, - Handle<String> name, + Handle<Name> name, Register r0, Register r1) { - ASSERT(name->IsSymbol()); + ASSERT(name->IsUniqueName()); Counters* counters = masm->isolate()->counters(); __ IncrementCounter(counters->negative_lookups(), 1); __ IncrementCounter(counters->negative_lookups_miss(), 1); @@ -146,12 +146,12 @@ static void GenerateDictionaryNegativeLookup(MacroAssembler* masm, __ j(not_equal, miss_label); Label done; - StringDictionaryLookupStub::GenerateNegativeLookup(masm, - miss_label, - &done, - properties, - name, - r1); + NameDictionaryLookupStub::GenerateNegativeLookup(masm, + miss_label, + &done, + properties, + name, + r1); __ bind(&done); __ DecrementCounter(counters->negative_lookups_miss(), 1); } @@ -193,7 +193,7 @@ void StubCache::GenerateProbe(MacroAssembler* masm, __ JumpIfSmi(receiver, &miss); // Get the map of the receiver and compute the hash. - __ movl(scratch, FieldOperand(name, String::kHashFieldOffset)); + __ movl(scratch, FieldOperand(name, Name::kHashFieldOffset)); // Use only the low 32 bits of the map pointer. __ addl(scratch, FieldOperand(receiver, HeapObject::kMapOffset)); __ xor_(scratch, Immediate(flags)); @@ -205,7 +205,7 @@ void StubCache::GenerateProbe(MacroAssembler* masm, ProbeTable(isolate, masm, flags, kPrimary, receiver, name, scratch); // Primary miss: Compute hash for secondary probe. - __ movl(scratch, FieldOperand(name, String::kHashFieldOffset)); + __ movl(scratch, FieldOperand(name, Name::kHashFieldOffset)); __ addl(scratch, FieldOperand(receiver, HeapObject::kMapOffset)); __ xor_(scratch, Immediate(flags)); __ and_(scratch, Immediate((kPrimaryTableSize - 1) << kHeapObjectTagSize)); @@ -343,26 +343,19 @@ void StubCompiler::GenerateLoadFunctionPrototype(MacroAssembler* masm, } -// Load a fast property out of a holder object (src). In-object properties -// are loaded directly otherwise the property is loaded from the properties -// fixed array. -void StubCompiler::GenerateFastPropertyLoad(MacroAssembler* masm, - Register dst, - Register src, - Handle<JSObject> holder, - int index) { - // Adjust for the number of properties stored in the holder. - index -= holder->map()->inobject_properties(); - if (index < 0) { - // Get the property straight out of the holder. - int offset = holder->map()->instance_size() + (index * kPointerSize); - __ movq(dst, FieldOperand(src, offset)); - } else { +void StubCompiler::DoGenerateFastPropertyLoad(MacroAssembler* masm, + Register dst, + Register src, + bool inobject, + int index) { + int offset = index * kPointerSize; + if (!inobject) { // Calculate the offset into the properties array. - int offset = index * kPointerSize + FixedArray::kHeaderSize; + offset = offset + FixedArray::kHeaderSize; __ movq(dst, FieldOperand(src, JSObject::kPropertiesOffset)); - __ movq(dst, FieldOperand(dst, offset)); + src = dst; } + __ movq(dst, FieldOperand(src, offset)); } @@ -467,7 +460,7 @@ static void GenerateFastApiCall(MacroAssembler* masm, // Pass the additional arguments. __ movq(Operand(rsp, 2 * kPointerSize), rdi); Handle<CallHandlerInfo> api_call_info = optimization.api_call_info(); - Handle<Object> call_data(api_call_info->data()); + Handle<Object> call_data(api_call_info->data(), masm->isolate()); if (masm->isolate()->heap()->InNewSpace(*call_data)) { __ Move(rcx, api_call_info); __ movq(rbx, FieldOperand(rcx, CallHandlerInfo::kDataOffset)); @@ -527,7 +520,7 @@ class CallInterceptorCompiler BASE_EMBEDDED { void Compile(MacroAssembler* masm, Handle<JSObject> object, Handle<JSObject> holder, - Handle<String> name, + Handle<Name> name, LookupResult* lookup, Register receiver, Register scratch1, @@ -559,7 +552,7 @@ class CallInterceptorCompiler BASE_EMBEDDED { Register scratch3, Handle<JSObject> interceptor_holder, LookupResult* lookup, - Handle<String> name, + Handle<Name> name, const CallOptimization& optimization, Label* miss_label) { ASSERT(optimization.is_constant_call()); @@ -652,7 +645,7 @@ class CallInterceptorCompiler BASE_EMBEDDED { Register scratch1, Register scratch2, Register scratch3, - Handle<String> name, + Handle<Name> name, Handle<JSObject> interceptor_holder, Label* miss_label) { Register holder = @@ -709,19 +702,13 @@ class CallInterceptorCompiler BASE_EMBEDDED { }; -void StubCompiler::GenerateLoadMiss(MacroAssembler* masm, Code::Kind kind) { - ASSERT(kind == Code::LOAD_IC || kind == Code::KEYED_LOAD_IC); - Handle<Code> code = (kind == Code::LOAD_IC) - ? masm->isolate()->builtins()->LoadIC_Miss() - : masm->isolate()->builtins()->KeyedLoadIC_Miss(); - __ Jump(code, RelocInfo::CODE_TARGET); -} - - -void StubCompiler::GenerateKeyedLoadMissForceGeneric(MacroAssembler* masm) { - Handle<Code> code = - masm->isolate()->builtins()->KeyedLoadIC_MissForceGeneric(); - __ Jump(code, RelocInfo::CODE_TARGET); +void BaseStoreStubCompiler::GenerateRestoreName(MacroAssembler* masm, + Label* label, + Handle<Name> name) { + if (!label->is_unused()) { + __ bind(label); + __ Move(this->name(), name); + } } @@ -731,12 +718,14 @@ void StubCompiler::GenerateStoreField(MacroAssembler* masm, Handle<JSObject> object, int index, Handle<Map> transition, - Handle<String> name, + Handle<Name> name, Register receiver_reg, Register name_reg, + Register value_reg, Register scratch1, Register scratch2, - Label* miss_label) { + Label* miss_label, + Label* miss_restore_name) { LookupResult lookup(masm->isolate()); object->Lookup(*name, &lookup); if (lookup.IsFound() && (lookup.IsReadOnly() || !lookup.IsCacheable())) { @@ -770,17 +759,8 @@ void StubCompiler::GenerateStoreField(MacroAssembler* masm, holder = JSObject::cast(holder->GetPrototype()); } while (holder->GetPrototype()->IsJSObject()); } - // We need an extra register, push - __ push(name_reg); - Label miss_pop, done_check; CheckPrototypes(object, receiver_reg, Handle<JSObject>(holder), name_reg, - scratch1, scratch2, name, &miss_pop); - __ jmp(&done_check); - __ bind(&miss_pop); - __ pop(name_reg); - __ jmp(miss_label); - __ bind(&done_check); - __ pop(name_reg); + scratch1, scratch2, name, miss_restore_name); } // Stub never generated for non-global objects that require access @@ -794,7 +774,7 @@ void StubCompiler::GenerateStoreField(MacroAssembler* masm, __ pop(scratch1); // Return address. __ push(receiver_reg); __ Push(transition); - __ push(rax); + __ push(value_reg); __ push(scratch1); __ TailCallExternalReference( ExternalReference(IC_Utility(IC::kSharedStoreIC_ExtendStorage), @@ -828,11 +808,11 @@ void StubCompiler::GenerateStoreField(MacroAssembler* masm, if (index < 0) { // Set the property straight into the object. int offset = object->map()->instance_size() + (index * kPointerSize); - __ movq(FieldOperand(receiver_reg, offset), rax); + __ movq(FieldOperand(receiver_reg, offset), value_reg); // Update the write barrier for the array address. // Pass the value being stored in the now unused name_reg. - __ movq(name_reg, rax); + __ movq(name_reg, value_reg); __ RecordWriteField( receiver_reg, offset, name_reg, scratch1, kDontSaveFPRegs); } else { @@ -840,16 +820,17 @@ void StubCompiler::GenerateStoreField(MacroAssembler* masm, int offset = index * kPointerSize + FixedArray::kHeaderSize; // Get the properties array (optimistically). __ movq(scratch1, FieldOperand(receiver_reg, JSObject::kPropertiesOffset)); - __ movq(FieldOperand(scratch1, offset), rax); + __ movq(FieldOperand(scratch1, offset), value_reg); // Update the write barrier for the array address. // Pass the value being stored in the now unused name_reg. - __ movq(name_reg, rax); + __ movq(name_reg, value_reg); __ RecordWriteField( scratch1, offset, name_reg, receiver_reg, kDontSaveFPRegs); } // Return the value (register rax). + ASSERT(value_reg.is(rax)); __ ret(0); } @@ -859,7 +840,7 @@ void StubCompiler::GenerateStoreField(MacroAssembler* masm, // property. static void GenerateCheckPropertyCell(MacroAssembler* masm, Handle<GlobalObject> global, - Handle<String> name, + Handle<Name> name, Register scratch, Label* miss) { Handle<JSGlobalPropertyCell> cell = @@ -877,7 +858,7 @@ static void GenerateCheckPropertyCell(MacroAssembler* masm, static void GenerateCheckPropertyCells(MacroAssembler* masm, Handle<JSObject> object, Handle<JSObject> holder, - Handle<String> name, + Handle<Name> name, Register scratch, Label* miss) { Handle<JSObject> current = object; @@ -893,6 +874,12 @@ static void GenerateCheckPropertyCells(MacroAssembler* masm, } } + +void StubCompiler::GenerateTailCall(MacroAssembler* masm, Handle<Code> code) { + __ jmp(code, RelocInfo::CODE_TARGET); +} + + #undef __ #define __ ACCESS_MASM((masm())) @@ -903,9 +890,11 @@ Register StubCompiler::CheckPrototypes(Handle<JSObject> object, Register holder_reg, Register scratch1, Register scratch2, - Handle<String> name, + Handle<Name> name, int save_at_depth, - Label* miss) { + Label* miss, + PrototypeCheckType check) { + Handle<JSObject> first = object; // Make sure there's no overlap between holder and object registers. ASSERT(!scratch1.is(object_reg) && !scratch1.is(holder_reg)); ASSERT(!scratch2.is(object_reg) && !scratch2.is(holder_reg) @@ -935,11 +924,12 @@ Register StubCompiler::CheckPrototypes(Handle<JSObject> object, if (!current->HasFastProperties() && !current->IsJSGlobalObject() && !current->IsJSGlobalProxy()) { - if (!name->IsSymbol()) { - name = factory()->LookupSymbol(name); + if (!name->IsUniqueName()) { + ASSERT(name->IsString()); + name = factory()->InternalizeString(Handle<String>::cast(name)); } ASSERT(current->property_dictionary()->FindEntry(*name) == - StringDictionary::kNotFound); + NameDictionary::kNotFound); GenerateDictionaryNegativeLookup(masm(), miss, reg, name, scratch1, scratch2); @@ -954,8 +944,10 @@ Register StubCompiler::CheckPrototypes(Handle<JSObject> object, // Save the map in scratch1 for later. __ movq(scratch1, FieldOperand(reg, HeapObject::kMapOffset)); } - __ CheckMap(reg, Handle<Map>(current_map), - miss, DONT_DO_SMI_CHECK, ALLOW_ELEMENT_TRANSITION_MAPS); + if (!current.is_identical_to(first) || check == CHECK_ALL_MAPS) { + __ CheckMap(reg, current_map, miss, DONT_DO_SMI_CHECK, + ALLOW_ELEMENT_TRANSITION_MAPS); + } // Check access rights to the global object. This has to happen after // the map check so that we know that the object is actually a global @@ -987,9 +979,11 @@ Register StubCompiler::CheckPrototypes(Handle<JSObject> object, // Log the check depth. LOG(isolate(), IntEvent("check-maps-depth", depth + 1)); - // Check the holder map. - __ CheckMap(reg, Handle<Map>(holder->map()), - miss, DONT_DO_SMI_CHECK, ALLOW_ELEMENT_TRANSITION_MAPS); + if (!holder.is_identical_to(first) || check == CHECK_ALL_MAPS) { + // Check the holder map. + __ CheckMap(reg, Handle<Map>(holder->map()), + miss, DONT_DO_SMI_CHECK, ALLOW_ELEMENT_TRANSITION_MAPS); + } // Perform security check for access to the global object. ASSERT(current->IsJSGlobalProxy() || !current->IsAccessCheckNeeded()); @@ -1007,110 +1001,123 @@ Register StubCompiler::CheckPrototypes(Handle<JSObject> object, } -void StubCompiler::GenerateLoadField(Handle<JSObject> object, - Handle<JSObject> holder, - Register receiver, - Register scratch1, - Register scratch2, - Register scratch3, - int index, - Handle<String> name, - Label* miss) { - // Check that the receiver isn't a smi. - __ JumpIfSmi(receiver, miss); +void BaseLoadStubCompiler::HandlerFrontendFooter(Label* success, + Label* miss) { + if (!miss->is_unused()) { + __ jmp(success); + __ bind(miss); + TailCallBuiltin(masm(), MissBuiltin(kind())); + } +} - // Check the prototype chain. - Register reg = CheckPrototypes( - object, receiver, holder, scratch1, scratch2, scratch3, name, miss); - // Get the value from the properties. - GenerateFastPropertyLoad(masm(), rax, reg, holder, index); - __ ret(0); -} +Register BaseLoadStubCompiler::CallbackHandlerFrontend( + Handle<JSObject> object, + Register object_reg, + Handle<JSObject> holder, + Handle<Name> name, + Label* success, + Handle<ExecutableAccessorInfo> callback) { + Label miss; + Register reg = HandlerFrontendHeader(object, object_reg, holder, name, &miss); -void StubCompiler::GenerateDictionaryLoadCallback(Register receiver, - Register name_reg, - Register scratch1, - Register scratch2, - Register scratch3, - Handle<AccessorInfo> callback, - Handle<String> name, - Label* miss) { - ASSERT(!receiver.is(scratch1)); - ASSERT(!receiver.is(scratch2)); - ASSERT(!receiver.is(scratch3)); - - // Load the properties dictionary. - Register dictionary = scratch1; - __ movq(dictionary, FieldOperand(receiver, JSObject::kPropertiesOffset)); - - // Probe the dictionary. - Label probe_done; - StringDictionaryLookupStub::GeneratePositiveLookup(masm(), - miss, + if (!holder->HasFastProperties() && !holder->IsJSGlobalObject()) { + ASSERT(!reg.is(scratch2())); + ASSERT(!reg.is(scratch3())); + ASSERT(!reg.is(scratch4())); + + // Load the properties dictionary. + Register dictionary = scratch4(); + __ movq(dictionary, FieldOperand(reg, JSObject::kPropertiesOffset)); + + // Probe the dictionary. + Label probe_done; + NameDictionaryLookupStub::GeneratePositiveLookup(masm(), + &miss, &probe_done, dictionary, - name_reg, - scratch2, - scratch3); - __ bind(&probe_done); - - // If probing finds an entry in the dictionary, scratch3 contains the - // index into the dictionary. Check that the value is the callback. - Register index = scratch3; - const int kElementsStartOffset = - StringDictionary::kHeaderSize + - StringDictionary::kElementsStartIndex * kPointerSize; - const int kValueOffset = kElementsStartOffset + kPointerSize; - __ movq(scratch2, - Operand(dictionary, index, times_pointer_size, - kValueOffset - kHeapObjectTag)); - __ movq(scratch3, callback, RelocInfo::EMBEDDED_OBJECT); - __ cmpq(scratch2, scratch3); - __ j(not_equal, miss); + this->name(), + scratch2(), + scratch3()); + __ bind(&probe_done); + + // If probing finds an entry in the dictionary, scratch3 contains the + // index into the dictionary. Check that the value is the callback. + Register index = scratch3(); + const int kElementsStartOffset = + NameDictionary::kHeaderSize + + NameDictionary::kElementsStartIndex * kPointerSize; + const int kValueOffset = kElementsStartOffset + kPointerSize; + __ movq(scratch2(), + Operand(dictionary, index, times_pointer_size, + kValueOffset - kHeapObjectTag)); + __ movq(scratch3(), callback, RelocInfo::EMBEDDED_OBJECT); + __ cmpq(scratch2(), scratch3()); + __ j(not_equal, &miss); + } + + HandlerFrontendFooter(success, &miss); + return reg; } -void StubCompiler::GenerateLoadCallback(Handle<JSObject> object, - Handle<JSObject> holder, - Register receiver, - Register name_reg, - Register scratch1, - Register scratch2, - Register scratch3, - Register scratch4, - Handle<AccessorInfo> callback, - Handle<String> name, - Label* miss) { - // Check that the receiver isn't a smi. - __ JumpIfSmi(receiver, miss); +void BaseLoadStubCompiler::NonexistentHandlerFrontend( + Handle<JSObject> object, + Handle<JSObject> last, + Handle<Name> name, + Label* success, + Handle<GlobalObject> global) { + Label miss; - // Check that the maps haven't changed. - Register reg = CheckPrototypes(object, receiver, holder, scratch1, - scratch2, scratch3, name, miss); + Register reg = HandlerFrontendHeader(object, receiver(), last, name, &miss); - if (!holder->HasFastProperties() && !holder->IsJSGlobalObject()) { - GenerateDictionaryLoadCallback( - reg, name_reg, scratch2, scratch3, scratch4, callback, name, miss); + // If the last object in the prototype chain is a global object, + // check that the global property cell is empty. + if (!global.is_null()) { + GenerateCheckPropertyCell(masm(), global, name, scratch2(), &miss); } + if (!last->HasFastProperties()) { + __ movq(scratch2(), FieldOperand(reg, HeapObject::kMapOffset)); + __ movq(scratch2(), FieldOperand(scratch2(), Map::kPrototypeOffset)); + __ Cmp(scratch2(), isolate()->factory()->null_value()); + __ j(not_equal, &miss); + } + + HandlerFrontendFooter(success, &miss); +} + + +void BaseLoadStubCompiler::GenerateLoadField(Register reg, + Handle<JSObject> holder, + PropertyIndex index) { + // Get the value from the properties. + GenerateFastPropertyLoad(masm(), rax, reg, holder, index); + __ ret(0); +} + + +void BaseLoadStubCompiler::GenerateLoadCallback( + Register reg, + Handle<ExecutableAccessorInfo> callback) { // Insert additional parameters into the stack frame above return address. - ASSERT(!scratch2.is(reg)); - __ pop(scratch2); // Get return address to place it below. + ASSERT(!scratch2().is(reg)); + __ pop(scratch2()); // Get return address to place it below. - __ push(receiver); // receiver + __ push(receiver()); // receiver __ push(reg); // holder if (heap()->InNewSpace(callback->data())) { - __ Move(scratch1, callback); - __ push(FieldOperand(scratch1, AccessorInfo::kDataOffset)); // data + __ Move(scratch1(), callback); + __ push(FieldOperand(scratch1(), + ExecutableAccessorInfo::kDataOffset)); // data } else { - __ Push(Handle<Object>(callback->data())); + __ Push(Handle<Object>(callback->data(), isolate())); } __ PushAddress(ExternalReference::isolate_address()); // isolate - __ push(name_reg); // name - // Save a pointer to where we pushed the arguments pointer. - // This will be passed as the const AccessorInfo& to the C++ callback. + __ push(name()); // name + // Save a pointer to where we pushed the arguments pointer. This will be + // passed as the const ExecutableAccessorInfo& to the C++ callback. #if defined(__MINGW64__) Register accessor_info_arg = rdx; @@ -1124,9 +1131,9 @@ void StubCompiler::GenerateLoadCallback(Handle<JSObject> object, Register name_arg = rdi; #endif - ASSERT(!name_arg.is(scratch2)); + ASSERT(!name_arg.is(scratch2())); __ movq(name_arg, rsp); - __ push(scratch2); // Restore return address. + __ push(scratch2()); // Restore return address. // 4 elements array for v8::Arguments::values_ and handler for name. const int kStackSpace = 5; @@ -1149,44 +1156,22 @@ void StubCompiler::GenerateLoadCallback(Handle<JSObject> object, } -void StubCompiler::GenerateLoadConstant(Handle<JSObject> object, - Handle<JSObject> holder, - Register receiver, - Register scratch1, - Register scratch2, - Register scratch3, - Handle<JSFunction> value, - Handle<String> name, - Label* miss) { - // Check that the receiver isn't a smi. - __ JumpIfSmi(receiver, miss); - - // Check that the maps haven't changed. - CheckPrototypes( - object, receiver, holder, scratch1, scratch2, scratch3, name, miss); - +void BaseLoadStubCompiler::GenerateLoadConstant(Handle<JSFunction> value) { // Return the constant value. __ LoadHeapObject(rax, value); __ ret(0); } -void StubCompiler::GenerateLoadInterceptor(Handle<JSObject> object, - Handle<JSObject> interceptor_holder, - LookupResult* lookup, - Register receiver, - Register name_reg, - Register scratch1, - Register scratch2, - Register scratch3, - Handle<String> name, - Label* miss) { +void BaseLoadStubCompiler::GenerateLoadInterceptor( + Register holder_reg, + Handle<JSObject> object, + Handle<JSObject> interceptor_holder, + LookupResult* lookup, + Handle<Name> name) { ASSERT(interceptor_holder->HasNamedInterceptor()); ASSERT(!interceptor_holder->GetNamedInterceptor()->getter()->IsUndefined()); - // Check that the receiver isn't a smi. - __ JumpIfSmi(receiver, miss); - // So far the most popular follow ups for interceptor loads are FIELD // and CALLBACKS, so inline only them, other cases may be added // later. @@ -1195,8 +1180,9 @@ void StubCompiler::GenerateLoadInterceptor(Handle<JSObject> object, if (lookup->IsField()) { compile_followup_inline = true; } else if (lookup->type() == CALLBACKS && - lookup->GetCallbackObject()->IsAccessorInfo()) { - AccessorInfo* callback = AccessorInfo::cast(lookup->GetCallbackObject()); + lookup->GetCallbackObject()->IsExecutableAccessorInfo()) { + ExecutableAccessorInfo* callback = + ExecutableAccessorInfo::cast(lookup->GetCallbackObject()); compile_followup_inline = callback->getter() != NULL && callback->IsCompatibleReceiver(*object); } @@ -1206,17 +1192,14 @@ void StubCompiler::GenerateLoadInterceptor(Handle<JSObject> object, // Compile the interceptor call, followed by inline code to load the // property from further up the prototype chain if the call fails. // Check that the maps haven't changed. - Register holder_reg = CheckPrototypes(object, receiver, interceptor_holder, - scratch1, scratch2, scratch3, - name, miss); - ASSERT(holder_reg.is(receiver) || holder_reg.is(scratch1)); + ASSERT(holder_reg.is(receiver()) || holder_reg.is(scratch1())); // Preserve the receiver register explicitly whenever it is different from // the holder and it is needed should the interceptor return without any // result. The CALLBACKS case needs the receiver to be passed into C++ code, // the FIELD case might cause a miss during the prototype check. bool must_perfrom_prototype_check = *interceptor_holder != lookup->holder(); - bool must_preserve_receiver_reg = !receiver.is(holder_reg) && + bool must_preserve_receiver_reg = !receiver().is(holder_reg) && (lookup->type() == CALLBACKS || must_perfrom_prototype_check); // Save necessary data before invoking an interceptor. @@ -1225,18 +1208,18 @@ void StubCompiler::GenerateLoadInterceptor(Handle<JSObject> object, FrameScope frame_scope(masm(), StackFrame::INTERNAL); if (must_preserve_receiver_reg) { - __ push(receiver); + __ push(receiver()); } __ push(holder_reg); - __ push(name_reg); + __ push(this->name()); // Invoke an interceptor. Note: map checks from receiver to // interceptor's holder has been compiled before (see a caller // of this method.) CompileCallLoadPropertyWithInterceptor(masm(), - receiver, + receiver(), holder_reg, - name_reg, + this->name(), interceptor_holder); // Check if interceptor provided a value for property. If it's @@ -1248,71 +1231,23 @@ void StubCompiler::GenerateLoadInterceptor(Handle<JSObject> object, __ ret(0); __ bind(&interceptor_failed); - __ pop(name_reg); + __ pop(this->name()); __ pop(holder_reg); if (must_preserve_receiver_reg) { - __ pop(receiver); + __ pop(receiver()); } // Leave the internal frame. } - // Check that the maps from interceptor's holder to lookup's holder - // haven't changed. And load lookup's holder into |holder| register. - if (must_perfrom_prototype_check) { - holder_reg = CheckPrototypes(interceptor_holder, - holder_reg, - Handle<JSObject>(lookup->holder()), - scratch1, - scratch2, - scratch3, - name, - miss); - } - - if (lookup->IsField()) { - // We found FIELD property in prototype chain of interceptor's holder. - // Retrieve a field from field's holder. - GenerateFastPropertyLoad(masm(), rax, holder_reg, - Handle<JSObject>(lookup->holder()), - lookup->GetFieldIndex()); - __ ret(0); - } else { - // We found CALLBACKS property in prototype chain of interceptor's - // holder. - ASSERT(lookup->type() == CALLBACKS); - Handle<AccessorInfo> callback( - AccessorInfo::cast(lookup->GetCallbackObject())); - ASSERT(callback->getter() != NULL); - - // Tail call to runtime. - // Important invariant in CALLBACKS case: the code above must be - // structured to never clobber |receiver| register. - __ pop(scratch2); // return address - __ push(receiver); - __ push(holder_reg); - __ Move(holder_reg, callback); - __ push(FieldOperand(holder_reg, AccessorInfo::kDataOffset)); - __ PushAddress(ExternalReference::isolate_address()); - __ push(holder_reg); - __ push(name_reg); - __ push(scratch2); // restore return address - - ExternalReference ref = - ExternalReference(IC_Utility(IC::kLoadCallbackProperty), - isolate()); - __ TailCallExternalReference(ref, 6, 1); - } + GenerateLoadPostInterceptor(holder_reg, interceptor_holder, name, lookup); } else { // !compile_followup_inline // Call the runtime system to load the interceptor. // Check that the maps haven't changed. - Register holder_reg = CheckPrototypes(object, receiver, interceptor_holder, - scratch1, scratch2, scratch3, - name, miss); - __ pop(scratch2); // save old return address - PushInterceptorArguments(masm(), receiver, holder_reg, - name_reg, interceptor_holder); - __ push(scratch2); // restore old return address + __ pop(scratch2()); // save old return address + PushInterceptorArguments(masm(), receiver(), holder_reg, + this->name(), interceptor_holder); + __ push(scratch2()); // restore old return address ExternalReference ref = ExternalReference( IC_Utility(IC::kLoadPropertyWithInterceptorForLoad), isolate()); @@ -1321,7 +1256,7 @@ void StubCompiler::GenerateLoadInterceptor(Handle<JSObject> object, } -void CallStubCompiler::GenerateNameCheck(Handle<String> name, Label* miss) { +void CallStubCompiler::GenerateNameCheck(Handle<Name> name, Label* miss) { if (kind_ == Code::KEYED_CALL_IC) { __ Cmp(rcx, name); __ j(not_equal, miss); @@ -1331,7 +1266,7 @@ void CallStubCompiler::GenerateNameCheck(Handle<String> name, Label* miss) { void CallStubCompiler::GenerateGlobalReceiverCheck(Handle<JSObject> object, Handle<JSObject> holder, - Handle<String> name, + Handle<Name> name, Label* miss) { ASSERT(holder->IsGlobalObject()); @@ -1388,8 +1323,8 @@ void CallStubCompiler::GenerateMissBranch() { Handle<Code> CallStubCompiler::CompileCallField(Handle<JSObject> object, Handle<JSObject> holder, - int index, - Handle<String> name) { + PropertyIndex index, + Handle<Name> name) { // ----------- S t a t e ------------- // rcx : function name // rsp[0] : return address @@ -1482,7 +1417,7 @@ Handle<Code> CallStubCompiler::CompileArrayPushCall( Label call_builtin; if (argc == 1) { // Otherwise fall through to call builtin. - Label attempt_to_grow_elements, with_write_barrier; + Label attempt_to_grow_elements, with_write_barrier, check_double; // Get the elements array of the object. __ movq(rdi, FieldOperand(rdx, JSArray::kElementsOffset)); @@ -1490,7 +1425,7 @@ Handle<Code> CallStubCompiler::CompileArrayPushCall( // Check that the elements are in fast mode and writable. __ Cmp(FieldOperand(rdi, HeapObject::kMapOffset), factory()->fixed_array_map()); - __ j(not_equal, &call_builtin); + __ j(not_equal, &check_double); // Get the array's length into rax and calculate new length. __ SmiToInteger32(rax, FieldOperand(rdx, JSArray::kLengthOffset)); @@ -1521,6 +1456,34 @@ Handle<Code> CallStubCompiler::CompileArrayPushCall( __ Integer32ToSmi(rax, rax); // Return new length as smi. __ ret((argc + 1) * kPointerSize); + __ bind(&check_double); + + // Check that the elements are in double mode. + __ Cmp(FieldOperand(rdi, HeapObject::kMapOffset), + factory()->fixed_double_array_map()); + __ j(not_equal, &call_builtin); + + // Get the array's length into rax and calculate new length. + __ SmiToInteger32(rax, FieldOperand(rdx, JSArray::kLengthOffset)); + STATIC_ASSERT(FixedArray::kMaxLength < Smi::kMaxValue); + __ addl(rax, Immediate(argc)); + + // Get the elements' length into rcx. + __ SmiToInteger32(rcx, FieldOperand(rdi, FixedArray::kLengthOffset)); + + // Check if we could survive without allocation. + __ cmpl(rax, rcx); + __ j(greater, &call_builtin); + + __ movq(rcx, Operand(rsp, argc * kPointerSize)); + __ StoreNumberToDoubleElements( + rcx, rdi, rax, xmm0, &call_builtin, argc * kDoubleSize); + + // Save new length. + __ Integer32ToSmiField(FieldOperand(rdx, JSArray::kLengthOffset), rax); + __ Integer32ToSmi(rax, rax); // Return new length as smi. + __ ret((argc + 1) * kPointerSize); + __ bind(&with_write_barrier); __ movq(rbx, FieldOperand(rdx, HeapObject::kMapOffset)); @@ -1532,6 +1495,9 @@ Handle<Code> CallStubCompiler::CompileArrayPushCall( // In case of fast smi-only, convert to fast object, otherwise bail out. __ bind(¬_fast_object); __ CheckFastSmiElements(rbx, &call_builtin); + __ Cmp(FieldOperand(rcx, HeapObject::kMapOffset), + factory()->heap_number_map()); + __ j(equal, &call_builtin); // rdx: receiver // rbx: map @@ -1543,7 +1509,9 @@ Handle<Code> CallStubCompiler::CompileArrayPushCall( &try_holey_map); ElementsTransitionGenerator:: - GenerateMapChangeElementsTransition(masm()); + GenerateMapChangeElementsTransition(masm(), + DONT_TRACK_ALLOCATION_SITE, + NULL); // Restore edi. __ movq(rdi, FieldOperand(rdx, JSArray::kElementsOffset)); __ jmp(&fast_object); @@ -1555,7 +1523,9 @@ Handle<Code> CallStubCompiler::CompileArrayPushCall( rdi, &call_builtin); ElementsTransitionGenerator:: - GenerateMapChangeElementsTransition(masm()); + GenerateMapChangeElementsTransition(masm(), + DONT_TRACK_ALLOCATION_SITE, + NULL); __ movq(rdi, FieldOperand(rdx, JSArray::kElementsOffset)); __ bind(&fast_object); } else { @@ -1776,8 +1746,9 @@ Handle<Code> CallStubCompiler::CompileStringCharCodeAtCall( rax, &miss); ASSERT(!object.is_identical_to(holder)); - CheckPrototypes(Handle<JSObject>(JSObject::cast(object->GetPrototype())), - rax, holder, rbx, rdx, rdi, name, &miss); + CheckPrototypes( + Handle<JSObject>(JSObject::cast(object->GetPrototype(isolate()))), + rax, holder, rbx, rdx, rdi, name, &miss); Register receiver = rbx; Register index = rdi; @@ -1854,8 +1825,9 @@ Handle<Code> CallStubCompiler::CompileStringCharAtCall( rax, &miss); ASSERT(!object.is_identical_to(holder)); - CheckPrototypes(Handle<JSObject>(JSObject::cast(object->GetPrototype())), - rax, holder, rbx, rdx, rdi, name, &miss); + CheckPrototypes( + Handle<JSObject>(JSObject::cast(object->GetPrototype(isolate()))), + rax, holder, rbx, rdx, rdi, name, &miss); Register receiver = rax; Register index = rdi; @@ -1884,7 +1856,7 @@ Handle<Code> CallStubCompiler::CompileStringCharAtCall( if (index_out_of_range.is_linked()) { __ bind(&index_out_of_range); - __ LoadRoot(rax, Heap::kEmptyStringRootIndex); + __ LoadRoot(rax, Heap::kempty_stringRootIndex); __ ret((argc + 1) * kPointerSize); } __ bind(&miss); @@ -2052,7 +2024,7 @@ Handle<Code> CallStubCompiler::CompileMathAbsCall( const int sign_mask_shift = (HeapNumber::kExponentOffset - HeapNumber::kValueOffset) * kBitsPerByte; __ movq(rdi, static_cast<int64_t>(HeapNumber::kSignMask) << sign_mask_shift, - RelocInfo::NONE); + RelocInfo::NONE64); __ testq(rbx, rdi); __ j(not_zero, &negative_sign); __ ret(2 * kPointerSize); @@ -2139,11 +2111,11 @@ Handle<Code> CallStubCompiler::CompileFastApiCall( } -Handle<Code> CallStubCompiler::CompileCallConstant(Handle<Object> object, - Handle<JSObject> holder, - Handle<JSFunction> function, - Handle<String> name, - CheckType check) { +void CallStubCompiler::CompileHandlerFrontend(Handle<Object> object, + Handle<JSObject> holder, + Handle<Name> name, + CheckType check, + Label* success) { // ----------- S t a t e ------------- // rcx : function name // rsp[0] : return address @@ -2153,15 +2125,6 @@ Handle<Code> CallStubCompiler::CompileCallConstant(Handle<Object> object, // rsp[argc * 8] : argument 1 // rsp[(argc + 1) * 8] : argument 0 = receiver // ----------------------------------- - - if (HasCustomCallGenerator(function)) { - Handle<Code> code = CompileCustomCall(object, holder, - Handle<JSGlobalPropertyCell>::null(), - function, name); - // A null handle means bail out to the regular compiler code below. - if (!code.is_null()) return code; - } - Label miss; GenerateNameCheck(name, &miss); @@ -2196,76 +2159,92 @@ Handle<Code> CallStubCompiler::CompileCallConstant(Handle<Object> object, break; case STRING_CHECK: - if (function->IsBuiltin() || !function->shared()->is_classic_mode()) { - // Check that the object is a two-byte string or a symbol. - __ CmpObjectType(rdx, FIRST_NONSTRING_TYPE, rax); - __ j(above_equal, &miss); - // Check that the maps starting from the prototype haven't changed. - GenerateDirectLoadGlobalFunctionPrototype( - masm(), Context::STRING_FUNCTION_INDEX, rax, &miss); - CheckPrototypes( - Handle<JSObject>(JSObject::cast(object->GetPrototype())), - rax, holder, rbx, rdx, rdi, name, &miss); - } else { - // Calling non-strict non-builtins with a value as the receiver - // requires boxing. - __ jmp(&miss); - } + // Check that the object is a string. + __ CmpObjectType(rdx, FIRST_NONSTRING_TYPE, rax); + __ j(above_equal, &miss); + // Check that the maps starting from the prototype haven't changed. + GenerateDirectLoadGlobalFunctionPrototype( + masm(), Context::STRING_FUNCTION_INDEX, rax, &miss); + CheckPrototypes( + Handle<JSObject>(JSObject::cast(object->GetPrototype(isolate()))), + rax, holder, rbx, rdx, rdi, name, &miss); break; - case NUMBER_CHECK: - if (function->IsBuiltin() || !function->shared()->is_classic_mode()) { - Label fast; - // Check that the object is a smi or a heap number. - __ JumpIfSmi(rdx, &fast); - __ CmpObjectType(rdx, HEAP_NUMBER_TYPE, rax); - __ j(not_equal, &miss); - __ bind(&fast); - // Check that the maps starting from the prototype haven't changed. - GenerateDirectLoadGlobalFunctionPrototype( - masm(), Context::NUMBER_FUNCTION_INDEX, rax, &miss); - CheckPrototypes( - Handle<JSObject>(JSObject::cast(object->GetPrototype())), - rax, holder, rbx, rdx, rdi, name, &miss); - } else { - // Calling non-strict non-builtins with a value as the receiver - // requires boxing. - __ jmp(&miss); - } + case SYMBOL_CHECK: + // Check that the object is a symbol. + __ CmpObjectType(rdx, SYMBOL_TYPE, rax); + __ j(not_equal, &miss); break; - case BOOLEAN_CHECK: - if (function->IsBuiltin() || !function->shared()->is_classic_mode()) { - Label fast; - // Check that the object is a boolean. - __ CompareRoot(rdx, Heap::kTrueValueRootIndex); - __ j(equal, &fast); - __ CompareRoot(rdx, Heap::kFalseValueRootIndex); - __ j(not_equal, &miss); - __ bind(&fast); - // Check that the maps starting from the prototype haven't changed. - GenerateDirectLoadGlobalFunctionPrototype( - masm(), Context::BOOLEAN_FUNCTION_INDEX, rax, &miss); - CheckPrototypes( - Handle<JSObject>(JSObject::cast(object->GetPrototype())), - rax, holder, rbx, rdx, rdi, name, &miss); - } else { - // Calling non-strict non-builtins with a value as the receiver - // requires boxing. - __ jmp(&miss); - } + case NUMBER_CHECK: { + Label fast; + // Check that the object is a smi or a heap number. + __ JumpIfSmi(rdx, &fast); + __ CmpObjectType(rdx, HEAP_NUMBER_TYPE, rax); + __ j(not_equal, &miss); + __ bind(&fast); + // Check that the maps starting from the prototype haven't changed. + GenerateDirectLoadGlobalFunctionPrototype( + masm(), Context::NUMBER_FUNCTION_INDEX, rax, &miss); + CheckPrototypes( + Handle<JSObject>(JSObject::cast(object->GetPrototype(isolate()))), + rax, holder, rbx, rdx, rdi, name, &miss); break; + } + case BOOLEAN_CHECK: { + Label fast; + // Check that the object is a boolean. + __ CompareRoot(rdx, Heap::kTrueValueRootIndex); + __ j(equal, &fast); + __ CompareRoot(rdx, Heap::kFalseValueRootIndex); + __ j(not_equal, &miss); + __ bind(&fast); + // Check that the maps starting from the prototype haven't changed. + GenerateDirectLoadGlobalFunctionPrototype( + masm(), Context::BOOLEAN_FUNCTION_INDEX, rax, &miss); + CheckPrototypes( + Handle<JSObject>(JSObject::cast(object->GetPrototype(isolate()))), + rax, holder, rbx, rdx, rdi, name, &miss); + break; + } } + __ jmp(success); + + // Handle call cache miss. + __ bind(&miss); + GenerateMissBranch(); +} + + +void CallStubCompiler::CompileHandlerBackend(Handle<JSFunction> function) { CallKind call_kind = CallICBase::Contextual::decode(extra_state_) ? CALL_AS_FUNCTION : CALL_AS_METHOD; __ InvokeFunction(function, arguments(), JUMP_FUNCTION, NullCallWrapper(), call_kind); +} - // Handle call cache miss. - __ bind(&miss); - GenerateMissBranch(); + +Handle<Code> CallStubCompiler::CompileCallConstant( + Handle<Object> object, + Handle<JSObject> holder, + Handle<Name> name, + CheckType check, + Handle<JSFunction> function) { + if (HasCustomCallGenerator(function)) { + Handle<Code> code = CompileCustomCall(object, holder, + Handle<JSGlobalPropertyCell>::null(), + function, Handle<String>::cast(name)); + // A null handle means bail out to the regular compiler code below. + if (!code.is_null()) return code; + } + + Label success; + + CompileHandlerFrontend(object, holder, name, check, &success); + __ bind(&success); + CompileHandlerBackend(function); // Return the generated code. return GetCode(function); @@ -2274,7 +2253,7 @@ Handle<Code> CallStubCompiler::CompileCallConstant(Handle<Object> object, Handle<Code> CallStubCompiler::CompileCallInterceptor(Handle<JSObject> object, Handle<JSObject> holder, - Handle<String> name) { + Handle<Name> name) { // ----------- S t a t e ------------- // rcx : function name // rsp[0] : return address @@ -2337,7 +2316,7 @@ Handle<Code> CallStubCompiler::CompileCallGlobal( Handle<GlobalObject> holder, Handle<JSGlobalPropertyCell> cell, Handle<JSFunction> function, - Handle<String> name) { + Handle<Name> name) { // ----------- S t a t e ------------- // rcx : function name // rsp[0] : return address @@ -2349,7 +2328,8 @@ Handle<Code> CallStubCompiler::CompileCallGlobal( // ----------------------------------- if (HasCustomCallGenerator(function)) { - Handle<Code> code = CompileCustomCall(object, holder, cell, function, name); + Handle<Code> code = CompileCustomCall( + object, holder, cell, function, Handle<String>::cast(name)); // A null handle means bail out to the regular compiler code below. if (!code.is_null()) return code; } @@ -2395,64 +2375,26 @@ Handle<Code> CallStubCompiler::CompileCallGlobal( } -Handle<Code> StoreStubCompiler::CompileStoreField(Handle<JSObject> object, - int index, - Handle<Map> transition, - Handle<String> name) { - // ----------- S t a t e ------------- - // -- rax : value - // -- rcx : name - // -- rdx : receiver - // -- rsp[0] : return address - // ----------------------------------- - Label miss; - - // Generate store field code. Preserves receiver and name on jump to miss. - GenerateStoreField(masm(), - object, - index, - transition, - name, - rdx, rcx, rbx, rdi, - &miss); - - // Handle store cache miss. - __ bind(&miss); - Handle<Code> ic = isolate()->builtins()->StoreIC_Miss(); - __ Jump(ic, RelocInfo::CODE_TARGET); - - // Return the generated code. - return GetCode(transition.is_null() - ? Code::FIELD - : Code::MAP_TRANSITION, name); -} - - Handle<Code> StoreStubCompiler::CompileStoreCallback( - Handle<String> name, - Handle<JSObject> receiver, + Handle<Name> name, + Handle<JSObject> object, Handle<JSObject> holder, - Handle<AccessorInfo> callback) { - // ----------- S t a t e ------------- - // -- rax : value - // -- rcx : name - // -- rdx : receiver - // -- rsp[0] : return address - // ----------------------------------- + Handle<ExecutableAccessorInfo> callback) { Label miss; // Check that the maps haven't changed. - __ JumpIfSmi(rdx, &miss); - CheckPrototypes(receiver, rdx, holder, rbx, r8, rdi, name, &miss); + __ JumpIfSmi(receiver(), &miss); + CheckPrototypes(object, receiver(), holder, + scratch1(), scratch2(), scratch3(), name, &miss); // Stub never generated for non-global objects that require access checks. ASSERT(holder->IsJSGlobalProxy() || !holder->IsAccessCheckNeeded()); - __ pop(rbx); // remove the return address - __ push(rdx); // receiver + __ pop(scratch1()); // remove the return address + __ push(receiver()); __ Push(callback); // callback info - __ push(rcx); // name - __ push(rax); // value - __ push(rbx); // restore return address + __ push(this->name()); + __ push(value()); + __ push(scratch1()); // restore return address // Do tail-call to the runtime system. ExternalReference store_callback_property = @@ -2461,11 +2403,10 @@ Handle<Code> StoreStubCompiler::CompileStoreCallback( // Handle store cache miss. __ bind(&miss); - Handle<Code> ic = isolate()->builtins()->StoreIC_Miss(); - __ Jump(ic, RelocInfo::CODE_TARGET); + TailCallBuiltin(masm(), MissBuiltin(kind())); // Return the generated code. - return GetCode(Code::CALLBACKS, name); + return GetICCode(kind(), Code::CALLBACKS, name); } @@ -2515,64 +2456,30 @@ void StoreStubCompiler::GenerateStoreViaSetter( #define __ ACCESS_MASM(masm()) -Handle<Code> StoreStubCompiler::CompileStoreViaSetter( - Handle<String> name, - Handle<JSObject> receiver, - Handle<JSObject> holder, - Handle<JSFunction> setter) { - // ----------- S t a t e ------------- - // -- rax : value - // -- rcx : name - // -- rdx : receiver - // -- rsp[0] : return address - // ----------------------------------- - Label miss; - - // Check that the maps haven't changed. - __ JumpIfSmi(rdx, &miss); - CheckPrototypes(receiver, rdx, holder, rbx, r8, rdi, name, &miss); - - GenerateStoreViaSetter(masm(), setter); - - __ bind(&miss); - Handle<Code> ic = isolate()->builtins()->StoreIC_Miss(); - __ Jump(ic, RelocInfo::CODE_TARGET); - - // Return the generated code. - return GetCode(Code::CALLBACKS, name); -} - - Handle<Code> StoreStubCompiler::CompileStoreInterceptor( - Handle<JSObject> receiver, - Handle<String> name) { - // ----------- S t a t e ------------- - // -- rax : value - // -- rcx : name - // -- rdx : receiver - // -- rsp[0] : return address - // ----------------------------------- + Handle<JSObject> object, + Handle<Name> name) { Label miss; // Check that the map of the object hasn't changed. - __ CheckMap(rdx, Handle<Map>(receiver->map()), &miss, + __ CheckMap(receiver(), Handle<Map>(object->map()), &miss, DO_SMI_CHECK, ALLOW_ELEMENT_TRANSITION_MAPS); // Perform global security token check if needed. - if (receiver->IsJSGlobalProxy()) { - __ CheckAccessGlobalProxy(rdx, rbx, &miss); + if (object->IsJSGlobalProxy()) { + __ CheckAccessGlobalProxy(receiver(), scratch1(), &miss); } // Stub never generated for non-global objects that require access // checks. - ASSERT(receiver->IsJSGlobalProxy() || !receiver->IsAccessCheckNeeded()); + ASSERT(object->IsJSGlobalProxy() || !object->IsAccessCheckNeeded()); - __ pop(rbx); // remove the return address - __ push(rdx); // receiver - __ push(rcx); // name - __ push(rax); // value - __ Push(Smi::FromInt(strict_mode_)); - __ push(rbx); // restore return address + __ pop(scratch1()); // remove the return address + __ push(receiver()); + __ push(this->name()); + __ push(value()); + __ Push(Smi::FromInt(strict_mode())); + __ push(scratch1()); // restore return address // Do tail-call to the runtime system. ExternalReference store_ic_property = @@ -2581,34 +2488,28 @@ Handle<Code> StoreStubCompiler::CompileStoreInterceptor( // Handle store cache miss. __ bind(&miss); - Handle<Code> ic = isolate()->builtins()->StoreIC_Miss(); - __ Jump(ic, RelocInfo::CODE_TARGET); + TailCallBuiltin(masm(), MissBuiltin(kind())); // Return the generated code. - return GetCode(Code::INTERCEPTOR, name); + return GetICCode(kind(), Code::INTERCEPTOR, name); } Handle<Code> StoreStubCompiler::CompileStoreGlobal( Handle<GlobalObject> object, Handle<JSGlobalPropertyCell> cell, - Handle<String> name) { - // ----------- S t a t e ------------- - // -- rax : value - // -- rcx : name - // -- rdx : receiver - // -- rsp[0] : return address - // ----------------------------------- + Handle<Name> name) { Label miss; // Check that the map of the global has not changed. - __ Cmp(FieldOperand(rdx, HeapObject::kMapOffset), + __ Cmp(FieldOperand(receiver(), HeapObject::kMapOffset), Handle<Map>(object->map())); __ j(not_equal, &miss); // Compute the cell operand to use. - __ Move(rbx, cell); - Operand cell_operand = FieldOperand(rbx, JSGlobalPropertyCell::kValueOffset); + __ Move(scratch1(), cell); + Operand cell_operand = + FieldOperand(scratch1(), JSGlobalPropertyCell::kValueOffset); // Check that the value in the cell is not the hole. If it is, this // cell could have been deleted and reintroducing the global needs @@ -2618,7 +2519,7 @@ Handle<Code> StoreStubCompiler::CompileStoreGlobal( __ j(equal, &miss); // Store the value in the cell. - __ movq(cell_operand, rax); + __ movq(cell_operand, value()); // Cells are always rescanned, so no write barrier here. // Return the value (register rax). @@ -2629,76 +2530,10 @@ Handle<Code> StoreStubCompiler::CompileStoreGlobal( // Handle store cache miss. __ bind(&miss); __ IncrementCounter(counters->named_store_global_inline_miss(), 1); - Handle<Code> ic = isolate()->builtins()->StoreIC_Miss(); - __ Jump(ic, RelocInfo::CODE_TARGET); - - // Return the generated code. - return GetCode(Code::NORMAL, name); -} - - -Handle<Code> KeyedStoreStubCompiler::CompileStoreField(Handle<JSObject> object, - int index, - Handle<Map> transition, - Handle<String> name) { - // ----------- S t a t e ------------- - // -- rax : value - // -- rcx : key - // -- rdx : receiver - // -- rsp[0] : return address - // ----------------------------------- - Label miss; - - Counters* counters = isolate()->counters(); - __ IncrementCounter(counters->keyed_store_field(), 1); - - // Check that the name has not changed. - __ Cmp(rcx, name); - __ j(not_equal, &miss); - - // Generate store field code. Preserves receiver and name on jump to miss. - GenerateStoreField(masm(), - object, - index, - transition, - name, - rdx, rcx, rbx, rdi, - &miss); - - // Handle store cache miss. - __ bind(&miss); - __ DecrementCounter(counters->keyed_store_field(), 1); - Handle<Code> ic = isolate()->builtins()->KeyedStoreIC_Miss(); - __ Jump(ic, RelocInfo::CODE_TARGET); - - // Return the generated code. - return GetCode(transition.is_null() - ? Code::FIELD - : Code::MAP_TRANSITION, name); -} - - -Handle<Code> KeyedStoreStubCompiler::CompileStoreElement( - Handle<Map> receiver_map) { - // ----------- S t a t e ------------- - // -- rax : value - // -- rcx : key - // -- rdx : receiver - // -- rsp[0] : return address - // ----------------------------------- - - ElementsKind elements_kind = receiver_map->elements_kind(); - bool is_js_array = receiver_map->instance_type() == JS_ARRAY_TYPE; - Handle<Code> stub = - KeyedStoreElementStub(is_js_array, elements_kind, grow_mode_).GetCode(); - - __ DispatchMap(rdx, receiver_map, stub, DO_SMI_CHECK); - - Handle<Code> ic = isolate()->builtins()->KeyedStoreIC_Miss(); - __ jmp(ic, RelocInfo::CODE_TARGET); + TailCallBuiltin(masm(), MissBuiltin(kind())); // Return the generated code. - return GetCode(Code::NORMAL, factory()->empty_string()); + return GetICCode(kind(), Code::NORMAL, name); } @@ -2706,116 +2541,98 @@ Handle<Code> KeyedStoreStubCompiler::CompileStorePolymorphic( MapHandleList* receiver_maps, CodeHandleList* handler_stubs, MapHandleList* transitioned_maps) { - // ----------- S t a t e ------------- - // -- rax : value - // -- rcx : key - // -- rdx : receiver - // -- rsp[0] : return address - // ----------------------------------- Label miss; - __ JumpIfSmi(rdx, &miss, Label::kNear); + __ JumpIfSmi(receiver(), &miss, Label::kNear); - __ movq(rdi, FieldOperand(rdx, HeapObject::kMapOffset)); + __ movq(scratch1(), FieldOperand(receiver(), HeapObject::kMapOffset)); int receiver_count = receiver_maps->length(); for (int i = 0; i < receiver_count; ++i) { // Check map and tail call if there's a match - __ Cmp(rdi, receiver_maps->at(i)); + __ Cmp(scratch1(), receiver_maps->at(i)); if (transitioned_maps->at(i).is_null()) { __ j(equal, handler_stubs->at(i), RelocInfo::CODE_TARGET); } else { Label next_map; __ j(not_equal, &next_map, Label::kNear); - __ movq(rbx, transitioned_maps->at(i), RelocInfo::EMBEDDED_OBJECT); + __ movq(transition_map(), + transitioned_maps->at(i), + RelocInfo::EMBEDDED_OBJECT); __ jmp(handler_stubs->at(i), RelocInfo::CODE_TARGET); __ bind(&next_map); } } __ bind(&miss); - Handle<Code> ic = isolate()->builtins()->KeyedStoreIC_Miss(); - __ jmp(ic, RelocInfo::CODE_TARGET); + + TailCallBuiltin(masm(), MissBuiltin(kind())); // Return the generated code. - return GetCode(Code::NORMAL, factory()->empty_string(), MEGAMORPHIC); + return GetICCode( + kind(), Code::NORMAL, factory()->empty_string(), POLYMORPHIC); } -Handle<Code> LoadStubCompiler::CompileLoadNonexistent(Handle<String> name, - Handle<JSObject> object, - Handle<JSObject> last) { - // ----------- S t a t e ------------- - // -- rax : receiver - // -- rcx : name - // -- rsp[0] : return address - // ----------------------------------- - Label miss; - - // Check that receiver is not a smi. - __ JumpIfSmi(rax, &miss); - - // Check the maps of the full prototype chain. Also check that - // global property cells up to (but not including) the last object - // in the prototype chain are empty. - CheckPrototypes(object, rax, last, rbx, rdx, rdi, name, &miss); +Handle<Code> LoadStubCompiler::CompileLoadNonexistent( + Handle<JSObject> object, + Handle<JSObject> last, + Handle<Name> name, + Handle<GlobalObject> global) { + Label success; - // If the last object in the prototype chain is a global object, - // check that the global property cell is empty. - if (last->IsGlobalObject()) { - GenerateCheckPropertyCell( - masm(), Handle<GlobalObject>::cast(last), name, rdx, &miss); - } + NonexistentHandlerFrontend(object, last, name, &success, global); + __ bind(&success); // Return undefined if maps of the full prototype chain are still the // same and no global property with this name contains a value. __ LoadRoot(rax, Heap::kUndefinedValueRootIndex); __ ret(0); - __ bind(&miss); - GenerateLoadMiss(masm(), Code::LOAD_IC); - // Return the generated code. - return GetCode(Code::NONEXISTENT, factory()->empty_string()); + return GetCode(kind(), Code::NONEXISTENT, name); } -Handle<Code> LoadStubCompiler::CompileLoadField(Handle<JSObject> object, - Handle<JSObject> holder, - int index, - Handle<String> name) { - // ----------- S t a t e ------------- - // -- rax : receiver - // -- rcx : name - // -- rsp[0] : return address - // ----------------------------------- - Label miss; +Register* LoadStubCompiler::registers() { + // receiver, name, scratch1, scratch2, scratch3, scratch4. + static Register registers[] = { rax, rcx, rdx, rbx, rdi, r8 }; + return registers; +} - GenerateLoadField(object, holder, rax, rbx, rdx, rdi, index, name, &miss); - __ bind(&miss); - GenerateLoadMiss(masm(), Code::LOAD_IC); - // Return the generated code. - return GetCode(Code::FIELD, name); +Register* KeyedLoadStubCompiler::registers() { + // receiver, name, scratch1, scratch2, scratch3, scratch4. + static Register registers[] = { rdx, rax, rbx, rcx, rdi, r8 }; + return registers; } -Handle<Code> LoadStubCompiler::CompileLoadCallback( - Handle<String> name, - Handle<JSObject> object, - Handle<JSObject> holder, - Handle<AccessorInfo> callback) { - // ----------- S t a t e ------------- - // -- rax : receiver - // -- rcx : name - // -- rsp[0] : return address - // ----------------------------------- - Label miss; - GenerateLoadCallback(object, holder, rax, rcx, rdx, rbx, rdi, r8, callback, - name, &miss); - __ bind(&miss); - GenerateLoadMiss(masm(), Code::LOAD_IC); +Register* StoreStubCompiler::registers() { + // receiver, name, value, scratch1, scratch2, scratch3. + static Register registers[] = { rdx, rcx, rax, rbx, rdi, r8 }; + return registers; +} - // Return the generated code. - return GetCode(Code::CALLBACKS, name); + +Register* KeyedStoreStubCompiler::registers() { + // receiver, name, value, scratch1, scratch2, scratch3. + static Register registers[] = { rdx, rcx, rax, rbx, rdi, r8 }; + return registers; +} + + +void KeyedLoadStubCompiler::GenerateNameCheck(Handle<Name> name, + Register name_reg, + Label* miss) { + __ Cmp(name_reg, name); + __ j(not_equal, miss); +} + + +void KeyedStoreStubCompiler::GenerateNameCheck(Handle<Name> name, + Register name_reg, + Label* miss) { + __ Cmp(name_reg, name); + __ j(not_equal, miss); } @@ -2856,92 +2673,20 @@ void LoadStubCompiler::GenerateLoadViaGetter(MacroAssembler* masm, #define __ ACCESS_MASM(masm()) -Handle<Code> LoadStubCompiler::CompileLoadViaGetter( - Handle<String> name, - Handle<JSObject> receiver, - Handle<JSObject> holder, - Handle<JSFunction> getter) { - // ----------- S t a t e ------------- - // -- rax : receiver - // -- rcx : name - // -- rsp[0] : return address - // ----------------------------------- - Label miss; - - // Check that the maps haven't changed. - __ JumpIfSmi(rax, &miss); - CheckPrototypes(receiver, rax, holder, rbx, rdx, rdi, name, &miss); - - GenerateLoadViaGetter(masm(), getter), - - __ bind(&miss); - GenerateLoadMiss(masm(), Code::LOAD_IC); - - // Return the generated code. - return GetCode(Code::CALLBACKS, name); -} - - -Handle<Code> LoadStubCompiler::CompileLoadConstant(Handle<JSObject> object, - Handle<JSObject> holder, - Handle<JSFunction> value, - Handle<String> name) { - // ----------- S t a t e ------------- - // -- rax : receiver - // -- rcx : name - // -- rsp[0] : return address - // ----------------------------------- - Label miss; - - GenerateLoadConstant(object, holder, rax, rbx, rdx, rdi, value, name, &miss); - __ bind(&miss); - GenerateLoadMiss(masm(), Code::LOAD_IC); - - // Return the generated code. - return GetCode(Code::CONSTANT_FUNCTION, name); -} - - -Handle<Code> LoadStubCompiler::CompileLoadInterceptor(Handle<JSObject> receiver, - Handle<JSObject> holder, - Handle<String> name) { - // ----------- S t a t e ------------- - // -- rax : receiver - // -- rcx : name - // -- rsp[0] : return address - // ----------------------------------- - Label miss; - LookupResult lookup(isolate()); - LookupPostInterceptor(holder, name, &lookup); - - // TODO(368): Compile in the whole chain: all the interceptors in - // prototypes and ultimate answer. - GenerateLoadInterceptor(receiver, holder, &lookup, rax, rcx, rdx, rbx, rdi, - name, &miss); - __ bind(&miss); - GenerateLoadMiss(masm(), Code::LOAD_IC); - - // Return the generated code. - return GetCode(Code::INTERCEPTOR, name); -} - - Handle<Code> LoadStubCompiler::CompileLoadGlobal( Handle<JSObject> object, - Handle<GlobalObject> holder, + Handle<GlobalObject> global, Handle<JSGlobalPropertyCell> cell, - Handle<String> name, + Handle<Name> name, bool is_dont_delete) { - // ----------- S t a t e ------------- - // -- rax : receiver - // -- rcx : name - // -- rsp[0] : return address - // ----------------------------------- - Label miss; + Label success, miss; + // TODO(verwaest): Directly store to rax. Currently we cannot do this, since + // rax is used as receiver(), which we would otherwise clobber before a + // potential miss. - // Check that the maps haven't changed. - __ JumpIfSmi(rax, &miss); - CheckPrototypes(object, rax, holder, rbx, rdx, rdi, name, &miss); + __ CheckMap(receiver(), Handle<Map>(object->map()), &miss, DO_SMI_CHECK); + HandlerFrontendHeader( + object, receiver(), Handle<JSObject>::cast(global), name, &miss); // Get the value from the cell. __ Move(rbx, cell); @@ -2956,261 +2701,48 @@ Handle<Code> LoadStubCompiler::CompileLoadGlobal( __ Check(not_equal, "DontDelete cells can't contain the hole"); } + HandlerFrontendFooter(&success, &miss); + __ bind(&success); + Counters* counters = isolate()->counters(); __ IncrementCounter(counters->named_load_global_stub(), 1); __ movq(rax, rbx); __ ret(0); - __ bind(&miss); - __ IncrementCounter(counters->named_load_global_stub_miss(), 1); - GenerateLoadMiss(masm(), Code::LOAD_IC); - - // Return the generated code. - return GetCode(Code::NORMAL, name); -} - - -Handle<Code> KeyedLoadStubCompiler::CompileLoadField(Handle<String> name, - Handle<JSObject> receiver, - Handle<JSObject> holder, - int index) { - // ----------- S t a t e ------------- - // -- rax : key - // -- rdx : receiver - // -- rsp[0] : return address - // ----------------------------------- - Label miss; - - Counters* counters = isolate()->counters(); - __ IncrementCounter(counters->keyed_load_field(), 1); - - // Check that the name has not changed. - __ Cmp(rax, name); - __ j(not_equal, &miss); - - GenerateLoadField(receiver, holder, rdx, rbx, rcx, rdi, index, name, &miss); - - __ bind(&miss); - __ DecrementCounter(counters->keyed_load_field(), 1); - GenerateLoadMiss(masm(), Code::KEYED_LOAD_IC); - - // Return the generated code. - return GetCode(Code::FIELD, name); -} - - -Handle<Code> KeyedLoadStubCompiler::CompileLoadCallback( - Handle<String> name, - Handle<JSObject> receiver, - Handle<JSObject> holder, - Handle<AccessorInfo> callback) { - // ----------- S t a t e ------------- - // -- rax : key - // -- rdx : receiver - // -- rsp[0] : return address - // ----------------------------------- - Label miss; - Counters* counters = isolate()->counters(); - __ IncrementCounter(counters->keyed_load_callback(), 1); - - // Check that the name has not changed. - __ Cmp(rax, name); - __ j(not_equal, &miss); - - GenerateLoadCallback(receiver, holder, rdx, rax, rbx, rcx, rdi, r8, callback, - name, &miss); - __ bind(&miss); - __ DecrementCounter(counters->keyed_load_callback(), 1); - GenerateLoadMiss(masm(), Code::KEYED_LOAD_IC); - - // Return the generated code. - return GetCode(Code::CALLBACKS, name); -} - - -Handle<Code> KeyedLoadStubCompiler::CompileLoadConstant( - Handle<String> name, - Handle<JSObject> receiver, - Handle<JSObject> holder, - Handle<JSFunction> value) { - // ----------- S t a t e ------------- - // -- rax : key - // -- rdx : receiver - // -- rsp[0] : return address - // ----------------------------------- - Label miss; - - Counters* counters = isolate()->counters(); - __ IncrementCounter(counters->keyed_load_constant_function(), 1); - - // Check that the name has not changed. - __ Cmp(rax, name); - __ j(not_equal, &miss); - - GenerateLoadConstant(receiver, holder, rdx, rbx, rcx, rdi, - value, name, &miss); - __ bind(&miss); - __ DecrementCounter(counters->keyed_load_constant_function(), 1); - GenerateLoadMiss(masm(), Code::KEYED_LOAD_IC); - - // Return the generated code. - return GetCode(Code::CONSTANT_FUNCTION, name); -} - - -Handle<Code> KeyedLoadStubCompiler::CompileLoadInterceptor( - Handle<JSObject> receiver, - Handle<JSObject> holder, - Handle<String> name) { - // ----------- S t a t e ------------- - // -- rax : key - // -- rdx : receiver - // -- rsp[0] : return address - // ----------------------------------- - Label miss; - Counters* counters = isolate()->counters(); - __ IncrementCounter(counters->keyed_load_interceptor(), 1); - - // Check that the name has not changed. - __ Cmp(rax, name); - __ j(not_equal, &miss); - - LookupResult lookup(isolate()); - LookupPostInterceptor(holder, name, &lookup); - GenerateLoadInterceptor(receiver, holder, &lookup, rdx, rax, rcx, rbx, rdi, - name, &miss); - __ bind(&miss); - __ DecrementCounter(counters->keyed_load_interceptor(), 1); - GenerateLoadMiss(masm(), Code::KEYED_LOAD_IC); - - // Return the generated code. - return GetCode(Code::INTERCEPTOR, name); -} - - -Handle<Code> KeyedLoadStubCompiler::CompileLoadArrayLength( - Handle<String> name) { - // ----------- S t a t e ------------- - // -- rax : key - // -- rdx : receiver - // -- rsp[0] : return address - // ----------------------------------- - Label miss; - - Counters* counters = isolate()->counters(); - __ IncrementCounter(counters->keyed_load_array_length(), 1); - - // Check that the name has not changed. - __ Cmp(rax, name); - __ j(not_equal, &miss); - - GenerateLoadArrayLength(masm(), rdx, rcx, &miss); - __ bind(&miss); - __ DecrementCounter(counters->keyed_load_array_length(), 1); - GenerateLoadMiss(masm(), Code::KEYED_LOAD_IC); - - // Return the generated code. - return GetCode(Code::CALLBACKS, name); -} - - -Handle<Code> KeyedLoadStubCompiler::CompileLoadStringLength( - Handle<String> name) { - // ----------- S t a t e ------------- - // -- rax : key - // -- rdx : receiver - // -- rsp[0] : return address - // ----------------------------------- - Label miss; - - Counters* counters = isolate()->counters(); - __ IncrementCounter(counters->keyed_load_string_length(), 1); - - // Check that the name has not changed. - __ Cmp(rax, name); - __ j(not_equal, &miss); - - GenerateLoadStringLength(masm(), rdx, rcx, rbx, &miss, true); - __ bind(&miss); - __ DecrementCounter(counters->keyed_load_string_length(), 1); - GenerateLoadMiss(masm(), Code::KEYED_LOAD_IC); - - // Return the generated code. - return GetCode(Code::CALLBACKS, name); -} - - -Handle<Code> KeyedLoadStubCompiler::CompileLoadFunctionPrototype( - Handle<String> name) { - // ----------- S t a t e ------------- - // -- rax : key - // -- rdx : receiver - // -- rsp[0] : return address - // ----------------------------------- - Label miss; - - Counters* counters = isolate()->counters(); - __ IncrementCounter(counters->keyed_load_function_prototype(), 1); - - // Check that the name has not changed. - __ Cmp(rax, name); - __ j(not_equal, &miss); - - GenerateLoadFunctionPrototype(masm(), rdx, rcx, rbx, &miss); - __ bind(&miss); - __ DecrementCounter(counters->keyed_load_function_prototype(), 1); - GenerateLoadMiss(masm(), Code::KEYED_LOAD_IC); - // Return the generated code. - return GetCode(Code::CALLBACKS, name); + return GetICCode(kind(), Code::NORMAL, name); } -Handle<Code> KeyedLoadStubCompiler::CompileLoadElement( - Handle<Map> receiver_map) { - // ----------- S t a t e ------------- - // -- rax : key - // -- rdx : receiver - // -- rsp[0] : return address - // ----------------------------------- - ElementsKind elements_kind = receiver_map->elements_kind(); - Handle<Code> stub = KeyedLoadElementStub(elements_kind).GetCode(); - - __ DispatchMap(rdx, receiver_map, stub, DO_SMI_CHECK); - - Handle<Code> ic = isolate()->builtins()->KeyedLoadIC_Miss(); - __ jmp(ic, RelocInfo::CODE_TARGET); - - // Return the generated code. - return GetCode(Code::NORMAL, factory()->empty_string()); -} - - -Handle<Code> KeyedLoadStubCompiler::CompileLoadPolymorphic( +Handle<Code> BaseLoadStubCompiler::CompilePolymorphicIC( MapHandleList* receiver_maps, - CodeHandleList* handler_ics) { - // ----------- S t a t e ------------- - // -- rax : key - // -- rdx : receiver - // -- rsp[0] : return address - // ----------------------------------- + CodeHandleList* handlers, + Handle<Name> name, + Code::StubType type, + IcCheckType check) { Label miss; - __ JumpIfSmi(rdx, &miss); - Register map_reg = rbx; - __ movq(map_reg, FieldOperand(rdx, HeapObject::kMapOffset)); + if (check == PROPERTY) { + GenerateNameCheck(name, this->name(), &miss); + } + + __ JumpIfSmi(receiver(), &miss); + Register map_reg = scratch1(); + __ movq(map_reg, FieldOperand(receiver(), HeapObject::kMapOffset)); int receiver_count = receiver_maps->length(); for (int current = 0; current < receiver_count; ++current) { // Check map and tail call if there's a match __ Cmp(map_reg, receiver_maps->at(current)); - __ j(equal, handler_ics->at(current), RelocInfo::CODE_TARGET); + __ j(equal, handlers->at(current), RelocInfo::CODE_TARGET); } __ bind(&miss); - GenerateLoadMiss(masm(), Code::KEYED_LOAD_IC); + TailCallBuiltin(masm(), MissBuiltin(kind())); // Return the generated code. - return GetCode(Code::NORMAL, factory()->empty_string(), MEGAMORPHIC); + InlineCacheState state = + receiver_maps->length() > 1 ? POLYMORPHIC : MONOMORPHIC; + return GetICCode(kind(), type, name, state); } @@ -3265,8 +2797,8 @@ Handle<Code> ConstructStubCompiler::CompileConstructStub( __ cmpq(rcx, Immediate(instance_size)); __ Check(equal, "Instance size of initial map changed."); #endif - __ AllocateInNewSpace(instance_size, rdx, rcx, no_reg, - &generic_stub_call, NO_ALLOCATION_FLAGS); + __ Allocate(instance_size, rdx, rcx, no_reg, &generic_stub_call, + NO_ALLOCATION_FLAGS); // Allocated the JSObject, now initialize the fields and add the heap tag. // rbx: initial map @@ -3305,7 +2837,8 @@ Handle<Code> ConstructStubCompiler::CompileConstructStub( __ movq(Operand(r9, i * kPointerSize), rbx); } else { // Set the property to the constant value. - Handle<Object> constant(shared->GetThisPropertyAssignmentConstant(i)); + Handle<Object> constant(shared->GetThisPropertyAssignmentConstant(i), + isolate()); __ Move(Operand(r9, i * kPointerSize), constant); } } @@ -3380,9 +2913,7 @@ void KeyedLoadStubCompiler::GenerateLoadDictionaryElement( // -- rdx : receiver // -- rsp[0] : return address // ----------------------------------- - Handle<Code> slow_ic = - masm->isolate()->builtins()->KeyedLoadIC_Slow(); - __ jmp(slow_ic, RelocInfo::CODE_TARGET); + TailCallBuiltin(masm, Builtins::kKeyedLoadIC_Slow); __ bind(&miss_force_generic); // ----------- S t a t e ------------- @@ -3390,9 +2921,7 @@ void KeyedLoadStubCompiler::GenerateLoadDictionaryElement( // -- rdx : receiver // -- rsp[0] : return address // ----------------------------------- - Handle<Code> miss_ic = - masm->isolate()->builtins()->KeyedLoadIC_MissForceGeneric(); - __ jmp(miss_ic, RelocInfo::CODE_TARGET); + TailCallBuiltin(masm, Builtins::kKeyedLoadIC_MissForceGeneric); } @@ -3421,140 +2950,6 @@ static void GenerateSmiKeyCheck(MacroAssembler* masm, } -void KeyedLoadStubCompiler::GenerateLoadExternalArray( - MacroAssembler* masm, - ElementsKind elements_kind) { - // ----------- S t a t e ------------- - // -- rax : key - // -- rdx : receiver - // -- rsp[0] : return address - // ----------------------------------- - Label slow, miss_force_generic; - - // This stub is meant to be tail-jumped to, the receiver must already - // have been verified by the caller to not be a smi. - - // Check that the key is a smi or a heap number convertible to a smi. - GenerateSmiKeyCheck(masm, rax, rcx, xmm0, xmm1, &miss_force_generic); - - // Check that the index is in range. - __ movq(rbx, FieldOperand(rdx, JSObject::kElementsOffset)); - __ SmiToInteger32(rcx, rax); - __ cmpq(rax, FieldOperand(rbx, ExternalArray::kLengthOffset)); - // Unsigned comparison catches both negative and too-large values. - __ j(above_equal, &miss_force_generic); - - // rax: index (as a smi) - // rdx: receiver (JSObject) - // rcx: untagged index - // rbx: elements array - __ movq(rbx, FieldOperand(rbx, ExternalArray::kExternalPointerOffset)); - // rbx: base pointer of external storage - switch (elements_kind) { - case EXTERNAL_BYTE_ELEMENTS: - __ movsxbq(rcx, Operand(rbx, rcx, times_1, 0)); - break; - case EXTERNAL_PIXEL_ELEMENTS: - case EXTERNAL_UNSIGNED_BYTE_ELEMENTS: - __ movzxbq(rcx, Operand(rbx, rcx, times_1, 0)); - break; - case EXTERNAL_SHORT_ELEMENTS: - __ movsxwq(rcx, Operand(rbx, rcx, times_2, 0)); - break; - case EXTERNAL_UNSIGNED_SHORT_ELEMENTS: - __ movzxwq(rcx, Operand(rbx, rcx, times_2, 0)); - break; - case EXTERNAL_INT_ELEMENTS: - __ movsxlq(rcx, Operand(rbx, rcx, times_4, 0)); - break; - case EXTERNAL_UNSIGNED_INT_ELEMENTS: - __ movl(rcx, Operand(rbx, rcx, times_4, 0)); - break; - case EXTERNAL_FLOAT_ELEMENTS: - __ cvtss2sd(xmm0, Operand(rbx, rcx, times_4, 0)); - break; - case EXTERNAL_DOUBLE_ELEMENTS: - __ movsd(xmm0, Operand(rbx, rcx, times_8, 0)); - break; - default: - UNREACHABLE(); - break; - } - - // rax: index - // rdx: receiver - // For integer array types: - // rcx: value - // For floating-point array type: - // xmm0: value as double. - - ASSERT(kSmiValueSize == 32); - if (elements_kind == EXTERNAL_UNSIGNED_INT_ELEMENTS) { - // For the UnsignedInt array type, we need to see whether - // the value can be represented in a Smi. If not, we need to convert - // it to a HeapNumber. - Label box_int; - - __ JumpIfUIntNotValidSmiValue(rcx, &box_int, Label::kNear); - - __ Integer32ToSmi(rax, rcx); - __ ret(0); - - __ bind(&box_int); - - // Allocate a HeapNumber for the int and perform int-to-double - // conversion. - // The value is zero-extended since we loaded the value from memory - // with movl. - __ cvtqsi2sd(xmm0, rcx); - - __ AllocateHeapNumber(rcx, rbx, &slow); - // Set the value. - __ movsd(FieldOperand(rcx, HeapNumber::kValueOffset), xmm0); - __ movq(rax, rcx); - __ ret(0); - } else if (elements_kind == EXTERNAL_FLOAT_ELEMENTS || - elements_kind == EXTERNAL_DOUBLE_ELEMENTS) { - // For the floating-point array type, we need to always allocate a - // HeapNumber. - __ AllocateHeapNumber(rcx, rbx, &slow); - // Set the value. - __ movsd(FieldOperand(rcx, HeapNumber::kValueOffset), xmm0); - __ movq(rax, rcx); - __ ret(0); - } else { - __ Integer32ToSmi(rax, rcx); - __ ret(0); - } - - // Slow case: Jump to runtime. - __ bind(&slow); - Counters* counters = masm->isolate()->counters(); - __ IncrementCounter(counters->keyed_load_external_array_slow(), 1); - - // ----------- S t a t e ------------- - // -- rax : key - // -- rdx : receiver - // -- rsp[0] : return address - // ----------------------------------- - - Handle<Code> ic = masm->isolate()->builtins()->KeyedLoadIC_Slow(); - __ jmp(ic, RelocInfo::CODE_TARGET); - - // Miss case: Jump to runtime. - __ bind(&miss_force_generic); - - // ----------- S t a t e ------------- - // -- rax : key - // -- rdx : receiver - // -- rsp[0] : return address - // ----------------------------------- - Handle<Code> miss_ic = - masm->isolate()->builtins()->KeyedLoadIC_MissForceGeneric(); - __ jmp(miss_ic, RelocInfo::CODE_TARGET); -} - - void KeyedStoreStubCompiler::GenerateStoreExternalArray( MacroAssembler* masm, ElementsKind elements_kind) { @@ -3724,9 +3119,7 @@ void KeyedStoreStubCompiler::GenerateStoreExternalArray( // -- rdx : receiver // -- rsp[0] : return address // ----------------------------------- - - Handle<Code> ic = masm->isolate()->builtins()->KeyedStoreIC_Slow(); - __ jmp(ic, RelocInfo::CODE_TARGET); + TailCallBuiltin(masm, Builtins::kKeyedStoreIC_Slow); // Miss case: call runtime. __ bind(&miss_force_generic); @@ -3737,102 +3130,7 @@ void KeyedStoreStubCompiler::GenerateStoreExternalArray( // -- rdx : receiver // -- rsp[0] : return address // ----------------------------------- - - Handle<Code> miss_ic = - masm->isolate()->builtins()->KeyedStoreIC_MissForceGeneric(); - __ jmp(miss_ic, RelocInfo::CODE_TARGET); -} - - -void KeyedLoadStubCompiler::GenerateLoadFastElement(MacroAssembler* masm) { - // ----------- S t a t e ------------- - // -- rax : key - // -- rdx : receiver - // -- rsp[0] : return address - // ----------------------------------- - Label miss_force_generic; - - // This stub is meant to be tail-jumped to, the receiver must already - // have been verified by the caller to not be a smi. - - // Check that the key is a smi or a heap number convertible to a smi. - GenerateSmiKeyCheck(masm, rax, rcx, xmm0, xmm1, &miss_force_generic); - - // Get the elements array. - __ movq(rcx, FieldOperand(rdx, JSObject::kElementsOffset)); - __ AssertFastElements(rcx); - - // Check that the key is within bounds. - __ SmiCompare(rax, FieldOperand(rcx, FixedArray::kLengthOffset)); - __ j(above_equal, &miss_force_generic); - - // Load the result and make sure it's not the hole. - SmiIndex index = masm->SmiToIndex(rbx, rax, kPointerSizeLog2); - __ movq(rbx, FieldOperand(rcx, - index.reg, - index.scale, - FixedArray::kHeaderSize)); - __ CompareRoot(rbx, Heap::kTheHoleValueRootIndex); - __ j(equal, &miss_force_generic); - __ movq(rax, rbx); - __ ret(0); - - __ bind(&miss_force_generic); - Code* code = masm->isolate()->builtins()->builtin( - Builtins::kKeyedLoadIC_MissForceGeneric); - Handle<Code> ic(code); - __ jmp(ic, RelocInfo::CODE_TARGET); -} - - -void KeyedLoadStubCompiler::GenerateLoadFastDoubleElement( - MacroAssembler* masm) { - // ----------- S t a t e ------------- - // -- rax : key - // -- rdx : receiver - // -- rsp[0] : return address - // ----------------------------------- - Label miss_force_generic, slow_allocate_heapnumber; - - // This stub is meant to be tail-jumped to, the receiver must already - // have been verified by the caller to not be a smi. - - // Check that the key is a smi or a heap number convertible to a smi. - GenerateSmiKeyCheck(masm, rax, rcx, xmm0, xmm1, &miss_force_generic); - - // Get the elements array. - __ movq(rcx, FieldOperand(rdx, JSObject::kElementsOffset)); - __ AssertFastElements(rcx); - - // Check that the key is within bounds. - __ SmiCompare(rax, FieldOperand(rcx, FixedArray::kLengthOffset)); - __ j(above_equal, &miss_force_generic); - - // Check for the hole - __ SmiToInteger32(kScratchRegister, rax); - uint32_t offset = FixedDoubleArray::kHeaderSize + sizeof(kHoleNanLower32); - __ cmpl(FieldOperand(rcx, kScratchRegister, times_8, offset), - Immediate(kHoleNanUpper32)); - __ j(equal, &miss_force_generic); - - // Always allocate a heap number for the result. - __ movsd(xmm0, FieldOperand(rcx, kScratchRegister, times_8, - FixedDoubleArray::kHeaderSize)); - __ AllocateHeapNumber(rcx, rbx, &slow_allocate_heapnumber); - // Set the value. - __ movq(rax, rcx); - __ movsd(FieldOperand(rcx, HeapNumber::kValueOffset), xmm0); - __ ret(0); - - __ bind(&slow_allocate_heapnumber); - Handle<Code> slow_ic = - masm->isolate()->builtins()->KeyedLoadIC_Slow(); - __ jmp(slow_ic, RelocInfo::CODE_TARGET); - - __ bind(&miss_force_generic); - Handle<Code> miss_ic = - masm->isolate()->builtins()->KeyedLoadIC_MissForceGeneric(); - __ jmp(miss_ic, RelocInfo::CODE_TARGET); + TailCallBuiltin(masm, Builtins::kKeyedStoreIC_MissForceGeneric); } @@ -3840,7 +3138,7 @@ void KeyedStoreStubCompiler::GenerateStoreFastElement( MacroAssembler* masm, bool is_js_array, ElementsKind elements_kind, - KeyedAccessGrowMode grow_mode) { + KeyedAccessStoreMode store_mode) { // ----------- S t a t e ------------- // -- rax : value // -- rcx : key @@ -3865,7 +3163,7 @@ void KeyedStoreStubCompiler::GenerateStoreFastElement( // Check that the key is within bounds. if (is_js_array) { __ SmiCompare(rcx, FieldOperand(rdx, JSArray::kLengthOffset)); - if (grow_mode == ALLOW_JSARRAY_GROWTH) { + if (IsGrowStoreMode(store_mode)) { __ j(above_equal, &grow); } else { __ j(above_equal, &miss_force_generic); @@ -3901,15 +3199,12 @@ void KeyedStoreStubCompiler::GenerateStoreFastElement( // Handle store cache miss. __ bind(&miss_force_generic); - Handle<Code> ic_force_generic = - masm->isolate()->builtins()->KeyedStoreIC_MissForceGeneric(); - __ jmp(ic_force_generic, RelocInfo::CODE_TARGET); + TailCallBuiltin(masm, Builtins::kKeyedStoreIC_MissForceGeneric); __ bind(&transition_elements_kind); - Handle<Code> ic_miss = masm->isolate()->builtins()->KeyedStoreIC_Miss(); - __ jmp(ic_miss, RelocInfo::CODE_TARGET); + TailCallBuiltin(masm, Builtins::kKeyedStoreIC_Miss); - if (is_js_array && grow_mode == ALLOW_JSARRAY_GROWTH) { + if (is_js_array && IsGrowStoreMode(store_mode)) { // Grow the array by a single element if possible. __ bind(&grow); @@ -3925,7 +3220,7 @@ void KeyedStoreStubCompiler::GenerateStoreFastElement( __ j(not_equal, &check_capacity); int size = FixedArray::SizeFor(JSArray::kPreallocatedArrayElements); - __ AllocateInNewSpace(size, rdi, rbx, r8, &slow, TAG_OBJECT); + __ Allocate(size, rdi, rbx, r8, &slow, TAG_OBJECT); // rax: value // rcx: key @@ -3973,8 +3268,7 @@ void KeyedStoreStubCompiler::GenerateStoreFastElement( __ jmp(&finish_store); __ bind(&slow); - Handle<Code> ic_slow = masm->isolate()->builtins()->KeyedStoreIC_Slow(); - __ jmp(ic_slow, RelocInfo::CODE_TARGET); + TailCallBuiltin(masm, Builtins::kKeyedStoreIC_Slow); } } @@ -3982,7 +3276,7 @@ void KeyedStoreStubCompiler::GenerateStoreFastElement( void KeyedStoreStubCompiler::GenerateStoreFastDoubleElement( MacroAssembler* masm, bool is_js_array, - KeyedAccessGrowMode grow_mode) { + KeyedAccessStoreMode store_mode) { // ----------- S t a t e ------------- // -- rax : value // -- rcx : key @@ -3990,7 +3284,7 @@ void KeyedStoreStubCompiler::GenerateStoreFastDoubleElement( // -- rsp[0] : return address // ----------------------------------- Label miss_force_generic, transition_elements_kind, finish_store; - Label grow, slow, check_capacity; + Label grow, slow, check_capacity, restore_key_transition_elements_kind; // This stub is meant to be tail-jumped to, the receiver must already // have been verified by the caller to not be a smi. @@ -4005,7 +3299,7 @@ void KeyedStoreStubCompiler::GenerateStoreFastDoubleElement( // Check that the key is within bounds. if (is_js_array) { __ SmiCompare(rcx, FieldOperand(rdx, JSArray::kLengthOffset)); - if (grow_mode == ALLOW_JSARRAY_GROWTH) { + if (IsGrowStoreMode(store_mode)) { __ j(above_equal, &grow); } else { __ j(above_equal, &miss_force_generic); @@ -4019,22 +3313,20 @@ void KeyedStoreStubCompiler::GenerateStoreFastDoubleElement( __ bind(&finish_store); __ SmiToInteger32(rcx, rcx); __ StoreNumberToDoubleElements(rax, rdi, rcx, xmm0, - &transition_elements_kind); + &restore_key_transition_elements_kind); __ ret(0); // Handle store cache miss, replacing the ic with the generic stub. __ bind(&miss_force_generic); - Handle<Code> ic_force_generic = - masm->isolate()->builtins()->KeyedStoreIC_MissForceGeneric(); - __ jmp(ic_force_generic, RelocInfo::CODE_TARGET); + TailCallBuiltin(masm, Builtins::kKeyedStoreIC_MissForceGeneric); - __ bind(&transition_elements_kind); + __ bind(&restore_key_transition_elements_kind); // Restore smi-tagging of rcx. __ Integer32ToSmi(rcx, rcx); - Handle<Code> ic_miss = masm->isolate()->builtins()->KeyedStoreIC_Miss(); - __ jmp(ic_miss, RelocInfo::CODE_TARGET); + __ bind(&transition_elements_kind); + TailCallBuiltin(masm, Builtins::kKeyedStoreIC_Miss); - if (is_js_array && grow_mode == ALLOW_JSARRAY_GROWTH) { + if (is_js_array && IsGrowStoreMode(store_mode)) { // Grow the array by a single element if possible. __ bind(&grow); @@ -4058,7 +3350,7 @@ void KeyedStoreStubCompiler::GenerateStoreFastDoubleElement( __ j(not_equal, &check_capacity); int size = FixedDoubleArray::SizeFor(JSArray::kPreallocatedArrayElements); - __ AllocateInNewSpace(size, rdi, rbx, r8, &slow, TAG_OBJECT); + __ Allocate(size, rdi, rbx, r8, &slow, TAG_OBJECT); // rax: value // rcx: key @@ -4071,6 +3363,16 @@ void KeyedStoreStubCompiler::GenerateStoreFastDoubleElement( __ Move(FieldOperand(rdi, FixedDoubleArray::kLengthOffset), Smi::FromInt(JSArray::kPreallocatedArrayElements)); + // Increment the length of the array. + __ SmiToInteger32(rcx, rcx); + __ StoreNumberToDoubleElements(rax, rdi, rcx, xmm0, + &restore_key_transition_elements_kind); + + __ movq(r8, BitCast<int64_t, uint64_t>(kHoleNanInt64), RelocInfo::NONE64); + for (int i = 1; i < JSArray::kPreallocatedArrayElements; i++) { + __ movq(FieldOperand(rdi, FixedDoubleArray::OffsetOfElementAt(i)), r8); + } + // Install the new backing store in the JSArray. __ movq(FieldOperand(rdx, JSObject::kElementsOffset), rdi); __ RecordWriteField(rdx, JSObject::kElementsOffset, rdi, rbx, @@ -4079,7 +3381,7 @@ void KeyedStoreStubCompiler::GenerateStoreFastDoubleElement( // Increment the length of the array. __ Move(FieldOperand(rdx, JSArray::kLengthOffset), Smi::FromInt(1)); __ movq(rdi, FieldOperand(rdx, JSObject::kElementsOffset)); - __ jmp(&finish_store); + __ ret(0); __ bind(&check_capacity); // rax: value @@ -4096,8 +3398,7 @@ void KeyedStoreStubCompiler::GenerateStoreFastDoubleElement( __ jmp(&finish_store); __ bind(&slow); - Handle<Code> ic_slow = masm->isolate()->builtins()->KeyedStoreIC_Slow(); - __ jmp(ic_slow, RelocInfo::CODE_TARGET); + TailCallBuiltin(masm, Builtins::kKeyedStoreIC_Slow); } } |