summaryrefslogtreecommitdiff
path: root/deps/v8/src
diff options
context:
space:
mode:
authorBen Noordhuis <info@bnoordhuis.nl>2016-04-06 12:07:45 +0200
committerBen Noordhuis <info@bnoordhuis.nl>2016-04-07 13:14:25 +0200
commit3a8e8230eeeb1340ba4b8b99c7a58127f594f3fd (patch)
tree0a9188d937e90a1f1bd225d44462e18304061003 /deps/v8/src
parentd4eafd0c114b6765983c5524759656f335675835 (diff)
downloadnode-new-3a8e8230eeeb1340ba4b8b99c7a58127f594f3fd.tar.gz
deps: upgrade to V8 4.9.385.35
PR-URL: https://github.com/nodejs/node/pull/6077 Reviewed-By: Trevor Norris <trev.norris@gmail.com>
Diffstat (limited to 'deps/v8/src')
-rw-r--r--deps/v8/src/arm/deoptimizer-arm.cc13
-rw-r--r--deps/v8/src/arm64/deoptimizer-arm64.cc10
-rw-r--r--deps/v8/src/builtins.cc108
-rw-r--r--deps/v8/src/compiler/pipeline.cc10
-rw-r--r--deps/v8/src/compiler/simplified-lowering.cc8
-rw-r--r--deps/v8/src/compiler/simplified-lowering.h5
-rw-r--r--deps/v8/src/elements.cc37
-rw-r--r--deps/v8/src/elements.h2
-rw-r--r--deps/v8/src/heap/incremental-marking.cc13
-rw-r--r--deps/v8/src/heap/memory-reducer.cc9
-rw-r--r--deps/v8/src/runtime/runtime-array.cc6
11 files changed, 158 insertions, 63 deletions
diff --git a/deps/v8/src/arm/deoptimizer-arm.cc b/deps/v8/src/arm/deoptimizer-arm.cc
index 38635ea3cf..e4fc2138fc 100644
--- a/deps/v8/src/arm/deoptimizer-arm.cc
+++ b/deps/v8/src/arm/deoptimizer-arm.cc
@@ -288,14 +288,11 @@ void Deoptimizer::TableEntryGenerator::Generate() {
__ CheckFor32DRegs(ip);
__ ldr(r1, MemOperand(r0, Deoptimizer::input_offset()));
- int src_offset = FrameDescription::double_registers_offset();
- for (int i = 0; i < DwVfpRegister::kMaxNumRegisters; ++i) {
- if (i == kDoubleRegZero.code()) continue;
- if (i == kScratchDoubleReg.code()) continue;
-
- const DwVfpRegister reg = DwVfpRegister::from_code(i);
- __ vldr(reg, r1, src_offset, i < 16 ? al : ne);
- src_offset += kDoubleSize;
+ for (int i = 0; i < config->num_allocatable_double_registers(); ++i) {
+ int code = config->GetAllocatableDoubleCode(i);
+ DwVfpRegister reg = DwVfpRegister::from_code(code);
+ int src_offset = code * kDoubleSize + double_regs_offset;
+ __ vldr(reg, r1, src_offset);
}
// Push state, pc, and continuation from the last output frame.
diff --git a/deps/v8/src/arm64/deoptimizer-arm64.cc b/deps/v8/src/arm64/deoptimizer-arm64.cc
index 118c5dfa8d..081405037a 100644
--- a/deps/v8/src/arm64/deoptimizer-arm64.cc
+++ b/deps/v8/src/arm64/deoptimizer-arm64.cc
@@ -191,11 +191,13 @@ void Deoptimizer::TableEntryGenerator::Generate() {
}
// Copy FP registers to the input frame.
+ CPURegList copy_fp_to_input = saved_fp_registers;
for (int i = 0; i < saved_fp_registers.Count(); i++) {
- int dst_offset = FrameDescription::double_registers_offset() +
- (i * kDoubleSize);
int src_offset = kFPRegistersOffset + (i * kDoubleSize);
__ Peek(x2, src_offset);
+ CPURegister reg = copy_fp_to_input.PopLowestIndex();
+ int dst_offset = FrameDescription::double_registers_offset() +
+ (reg.code() * kDoubleSize);
__ Str(x2, MemOperand(x1, dst_offset));
}
@@ -264,11 +266,11 @@ void Deoptimizer::TableEntryGenerator::Generate() {
DCHECK(!saved_fp_registers.IncludesAliasOf(crankshaft_fp_scratch) &&
!saved_fp_registers.IncludesAliasOf(fp_zero) &&
!saved_fp_registers.IncludesAliasOf(fp_scratch));
- int src_offset = FrameDescription::double_registers_offset();
while (!saved_fp_registers.IsEmpty()) {
const CPURegister reg = saved_fp_registers.PopLowestIndex();
+ int src_offset = FrameDescription::double_registers_offset() +
+ (reg.code() * kDoubleSize);
__ Ldr(reg, MemOperand(x1, src_offset));
- src_offset += kDoubleSize;
}
// Push state from the last output frame.
diff --git a/deps/v8/src/builtins.cc b/deps/v8/src/builtins.cc
index 34b370fe66..2df9503302 100644
--- a/deps/v8/src/builtins.cc
+++ b/deps/v8/src/builtins.cc
@@ -208,6 +208,7 @@ inline bool PrototypeHasNoElements(PrototypeIterator* iter) {
JSObject* current = iter->GetCurrent<JSObject>();
if (current->IsAccessCheckNeeded()) return false;
if (current->HasIndexedInterceptor()) return false;
+ if (current->IsJSValue()) return false;
if (current->elements()->length() != 0) return false;
}
return true;
@@ -232,6 +233,41 @@ inline bool IsJSArrayFastElementMovingAllowed(Isolate* isolate,
}
+inline bool HasSimpleElements(JSObject* current) {
+ if (current->IsAccessCheckNeeded()) return false;
+ if (current->HasIndexedInterceptor()) return false;
+ if (current->IsJSValue()) return false;
+ if (current->GetElementsAccessor()->HasAccessors(current)) return false;
+ return true;
+}
+
+
+inline bool HasOnlySimpleReceiverElements(Isolate* isolate,
+ JSReceiver* receiver) {
+ // Check that we have no accessors on the receiver's elements.
+ JSObject* object = JSObject::cast(receiver);
+ if (!HasSimpleElements(object)) return false;
+ // Check that ther are not elements on the prototype.
+ DisallowHeapAllocation no_gc;
+ PrototypeIterator iter(isolate, receiver);
+ return PrototypeHasNoElements(&iter);
+}
+
+
+inline bool HasOnlySimpleElements(Isolate* isolate, JSReceiver* receiver) {
+ // Check that ther are not elements on the prototype.
+ DisallowHeapAllocation no_gc;
+ PrototypeIterator iter(isolate, receiver,
+ PrototypeIterator::START_AT_RECEIVER);
+ for (; !iter.IsAtEnd(); iter.Advance()) {
+ if (iter.GetCurrent()->IsJSProxy()) return false;
+ JSObject* current = iter.GetCurrent<JSObject>();
+ if (!HasSimpleElements(current)) return false;
+ }
+ return true;
+}
+
+
// Returns empty handle if not applicable.
MUST_USE_RESULT
inline MaybeHandle<FixedArrayBase> EnsureJSArrayWithWritableFastElements(
@@ -1013,9 +1049,10 @@ bool IterateElements(Isolate* isolate, Handle<JSReceiver> receiver,
if (!val->ToUint32(&length)) {
length = 0;
}
+ return IterateElementsSlow(isolate, receiver, length, visitor);
}
- if (!(receiver->IsJSArray() || receiver->IsJSTypedArray())) {
+ if (!HasOnlySimpleElements(isolate, *receiver)) {
// For classes which are not known to be safe to access via elements alone,
// use the slow case.
return IterateElementsSlow(isolate, receiver, length, visitor);
@@ -1031,7 +1068,7 @@ bool IterateElements(Isolate* isolate, Handle<JSReceiver> receiver,
// to check the prototype for missing elements.
Handle<FixedArray> elements(FixedArray::cast(array->elements()));
int fast_length = static_cast<int>(length);
- DCHECK(fast_length <= elements->length());
+ DCHECK_LE(fast_length, elements->length());
for (int j = 0; j < fast_length; j++) {
HandleScope loop_scope(isolate);
Handle<Object> element_value(elements->get(j), isolate);
@@ -1090,14 +1127,6 @@ bool IterateElements(Isolate* isolate, Handle<JSReceiver> receiver,
break;
}
case DICTIONARY_ELEMENTS: {
- // CollectElementIndices() can't be called when there's a JSProxy
- // on the prototype chain.
- for (PrototypeIterator iter(isolate, array); !iter.IsAtEnd();
- iter.Advance()) {
- if (PrototypeIterator::GetCurrent(iter)->IsJSProxy()) {
- return IterateElementsSlow(isolate, array, length, visitor);
- }
- }
Handle<SeededNumberDictionary> dict(array->element_dictionary());
List<uint32_t> indices(dict->Capacity() / 2);
// Collect all indices in the object and the prototypes less
@@ -1187,7 +1216,6 @@ bool IterateElements(Isolate* isolate, Handle<JSReceiver> receiver,
bool HasConcatSpreadableModifier(Isolate* isolate, Handle<JSArray> obj) {
- DCHECK(isolate->IsFastArrayConstructorPrototypeChainIntact());
if (!FLAG_harmony_concat_spreadable) return false;
Handle<Symbol> key(isolate->factory()->is_concat_spreadable_symbol());
Maybe<bool> maybe = JSReceiver::HasProperty(obj, key);
@@ -1232,17 +1260,14 @@ Object* Slow_ArrayConcat(Arguments* args, Isolate* isolate) {
length_estimate = static_cast<uint32_t>(array->length()->Number());
if (length_estimate != 0) {
ElementsKind array_kind =
- GetPackedElementsKind(array->map()->elements_kind());
+ GetPackedElementsKind(array->GetElementsKind());
kind = GetMoreGeneralElementsKind(kind, array_kind);
}
element_estimate = EstimateElementCount(array);
} else {
if (obj->IsHeapObject()) {
- if (obj->IsNumber()) {
- kind = GetMoreGeneralElementsKind(kind, FAST_DOUBLE_ELEMENTS);
- } else {
- kind = GetMoreGeneralElementsKind(kind, FAST_ELEMENTS);
- }
+ kind = GetMoreGeneralElementsKind(
+ kind, obj->IsNumber() ? FAST_DOUBLE_ELEMENTS : FAST_ELEMENTS);
}
length_estimate = 1;
element_estimate = 1;
@@ -1284,7 +1309,7 @@ Object* Slow_ArrayConcat(Arguments* args, Isolate* isolate) {
} else {
JSArray* array = JSArray::cast(*obj);
uint32_t length = static_cast<uint32_t>(array->length()->Number());
- switch (array->map()->elements_kind()) {
+ switch (array->GetElementsKind()) {
case FAST_HOLEY_DOUBLE_ELEMENTS:
case FAST_DOUBLE_ELEMENTS: {
// Empty array is FixedArray but not FixedDoubleArray.
@@ -1335,14 +1360,7 @@ Object* Slow_ArrayConcat(Arguments* args, Isolate* isolate) {
}
}
if (!failure) {
- Handle<JSArray> array = isolate->factory()->NewJSArray(0);
- Smi* length = Smi::FromInt(j);
- Handle<Map> map;
- map = JSObject::GetElementsTransitionMap(array, kind);
- array->set_map(*map);
- array->set_length(length);
- array->set_elements(*storage);
- return *array;
+ return *isolate->factory()->NewJSArrayWithElements(storage, kind, j);
}
// In case of failure, fall through.
}
@@ -1387,23 +1405,23 @@ Object* Slow_ArrayConcat(Arguments* args, Isolate* isolate) {
MaybeHandle<JSArray> Fast_ArrayConcat(Isolate* isolate, Arguments* args) {
- if (!isolate->IsFastArrayConstructorPrototypeChainIntact()) {
- return MaybeHandle<JSArray>();
- }
int n_arguments = args->length();
int result_len = 0;
{
DisallowHeapAllocation no_gc;
- Object* array_proto = isolate->array_function()->prototype();
// Iterate through all the arguments performing checks
// and calculating total length.
for (int i = 0; i < n_arguments; i++) {
Object* arg = (*args)[i];
if (!arg->IsJSArray()) return MaybeHandle<JSArray>();
+ if (!HasOnlySimpleReceiverElements(isolate, JSObject::cast(arg))) {
+ return MaybeHandle<JSArray>();
+ }
+ // TODO(cbruni): support fast concatenation of DICTIONARY_ELEMENTS.
+ if (!JSObject::cast(arg)->HasFastElements()) {
+ return MaybeHandle<JSArray>();
+ }
Handle<JSArray> array(JSArray::cast(arg), isolate);
- if (!array->HasFastElements()) return MaybeHandle<JSArray>();
- PrototypeIterator iter(isolate, arg);
- if (iter.GetCurrent() != array_proto) return MaybeHandle<JSArray>();
if (HasConcatSpreadableModifier(isolate, array)) {
return MaybeHandle<JSArray>();
}
@@ -2207,7 +2225,11 @@ BUILTIN(DateConstructor) {
char buffer[128];
Vector<char> str(buffer, arraysize(buffer));
ToDateString(time_val, str, isolate->date_cache());
- return *isolate->factory()->NewStringFromAsciiChecked(str.start());
+ Handle<String> result;
+ ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
+ isolate, result,
+ isolate->factory()->NewStringFromUtf8(CStrVector(buffer)));
+ return *result;
}
@@ -2787,7 +2809,11 @@ BUILTIN(DatePrototypeToDateString) {
char buffer[128];
Vector<char> str(buffer, arraysize(buffer));
ToDateString(date->value()->Number(), str, isolate->date_cache(), kDateOnly);
- return *isolate->factory()->NewStringFromAsciiChecked(str.start());
+ Handle<String> result;
+ ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
+ isolate, result,
+ isolate->factory()->NewStringFromUtf8(CStrVector(buffer)));
+ return *result;
}
@@ -2827,7 +2853,11 @@ BUILTIN(DatePrototypeToString) {
char buffer[128];
Vector<char> str(buffer, arraysize(buffer));
ToDateString(date->value()->Number(), str, isolate->date_cache());
- return *isolate->factory()->NewStringFromAsciiChecked(str.start());
+ Handle<String> result;
+ ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
+ isolate, result,
+ isolate->factory()->NewStringFromUtf8(CStrVector(buffer)));
+ return *result;
}
@@ -2838,7 +2868,11 @@ BUILTIN(DatePrototypeToTimeString) {
char buffer[128];
Vector<char> str(buffer, arraysize(buffer));
ToDateString(date->value()->Number(), str, isolate->date_cache(), kTimeOnly);
- return *isolate->factory()->NewStringFromAsciiChecked(str.start());
+ Handle<String> result;
+ ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
+ isolate, result,
+ isolate->factory()->NewStringFromUtf8(CStrVector(buffer)));
+ return *result;
}
diff --git a/deps/v8/src/compiler/pipeline.cc b/deps/v8/src/compiler/pipeline.cc
index 4d6aacd78a..2204424706 100644
--- a/deps/v8/src/compiler/pipeline.cc
+++ b/deps/v8/src/compiler/pipeline.cc
@@ -677,6 +677,13 @@ struct SimplifiedLoweringPhase {
SimplifiedLowering lowering(data->jsgraph(), temp_zone,
data->source_positions());
lowering.LowerAllNodes();
+
+ // TODO(bmeurer): See comment on SimplifiedLowering::abort_compilation_.
+ if (lowering.abort_compilation_) {
+ data->set_compilation_failed();
+ return;
+ }
+
JSGraphReducer graph_reducer(data->jsgraph(), temp_zone);
DeadCodeElimination dead_code_elimination(&graph_reducer, data->graph(),
data->common());
@@ -1204,6 +1211,9 @@ Handle<Code> Pipeline::GenerateCode() {
// Kill the Typer and thereby uninstall the decorator (if any).
typer.Reset(nullptr);
+ // TODO(bmeurer): See comment on SimplifiedLowering::abort_compilation_.
+ if (data.compilation_failed()) return Handle<Code>::null();
+
return ScheduleAndGenerateCode(
Linkage::ComputeIncoming(data.instruction_zone(), info()));
}
diff --git a/deps/v8/src/compiler/simplified-lowering.cc b/deps/v8/src/compiler/simplified-lowering.cc
index 653fea80ea..8af8bdfaa1 100644
--- a/deps/v8/src/compiler/simplified-lowering.cc
+++ b/deps/v8/src/compiler/simplified-lowering.cc
@@ -1189,10 +1189,18 @@ class RepresentationSelector {
NodeOutputInfo(access.machine_type().representation(),
NodeProperties::GetType(node));
} else {
+ if (access.machine_type().representation() !=
+ MachineRepresentation::kFloat64) {
+ // TODO(bmeurer): See comment on abort_compilation_.
+ if (lower()) lowering->abort_compilation_ = true;
+ }
output_info = NodeOutputInfo::Float64();
}
}
} else {
+ // TODO(bmeurer): See comment on abort_compilation_.
+ if (lower()) lowering->abort_compilation_ = true;
+
// If undefined is not truncated away, we need to have the tagged
// representation.
output_info = NodeOutputInfo::AnyTagged();
diff --git a/deps/v8/src/compiler/simplified-lowering.h b/deps/v8/src/compiler/simplified-lowering.h
index f9410f8b41..056837ab87 100644
--- a/deps/v8/src/compiler/simplified-lowering.h
+++ b/deps/v8/src/compiler/simplified-lowering.h
@@ -43,6 +43,11 @@ class SimplifiedLowering final {
void DoStringLessThan(Node* node);
void DoStringLessThanOrEqual(Node* node);
+ // TODO(bmeurer): This is a gigantic hack to support the gigantic LoadBuffer
+ // typing hack to support the gigantic "asm.js should be fast without proper
+ // verifier"-hack, ... Kill this! Soon! Really soon! I'm serious!
+ bool abort_compilation_ = false;
+
private:
JSGraph* const jsgraph_;
Zone* const zone_;
diff --git a/deps/v8/src/elements.cc b/deps/v8/src/elements.cc
index d4d80dbdec..277749763a 100644
--- a/deps/v8/src/elements.cc
+++ b/deps/v8/src/elements.cc
@@ -545,6 +545,16 @@ class ElementsAccessorBase : public ElementsAccessor {
*holder, *backing_store, index, filter) != kMaxUInt32;
}
+ bool HasAccessors(JSObject* holder) final {
+ return ElementsAccessorSubclass::HasAccessorsImpl(holder,
+ holder->elements());
+ }
+
+ static bool HasAccessorsImpl(JSObject* holder,
+ FixedArrayBase* backing_store) {
+ return false;
+ }
+
Handle<Object> Get(Handle<FixedArrayBase> backing_store,
uint32_t entry) final {
return ElementsAccessorSubclass::GetImpl(backing_store, entry);
@@ -1048,6 +1058,21 @@ class DictionaryElementsAccessor
obj->set_elements(*new_elements);
}
+ static bool HasAccessorsImpl(JSObject* holder,
+ FixedArrayBase* backing_store) {
+ SeededNumberDictionary* dict = SeededNumberDictionary::cast(backing_store);
+ if (!dict->requires_slow_elements()) return false;
+ int capacity = dict->Capacity();
+ for (int i = 0; i < capacity; i++) {
+ Object* key = dict->KeyAt(i);
+ if (!dict->IsKey(key)) continue;
+ DCHECK(!dict->IsDeleted(i));
+ PropertyDetails details = dict->DetailsAt(i);
+ if (details.type() == ACCESSOR_CONSTANT) return true;
+ }
+ return false;
+ }
+
static Object* GetRaw(FixedArrayBase* store, uint32_t entry) {
SeededNumberDictionary* backing_store = SeededNumberDictionary::cast(store);
return backing_store->ValueAt(entry);
@@ -1813,6 +1838,11 @@ class TypedElementsAccessor
BackingStore::cast(backing_store)->SetValue(entry, value);
}
+ static bool HasAccessorsImpl(JSObject* holder,
+ FixedArrayBase* backing_store) {
+ return false;
+ }
+
static inline void SetImpl(FixedArrayBase* backing_store, uint32_t entry,
Object* value, WriteBarrierMode mode) {
BackingStore::cast(backing_store)->SetValue(entry, value);
@@ -1970,6 +2000,13 @@ class SloppyArgumentsElementsAccessor
return ArgumentsAccessor::HasEntryImpl(arguments, entry - length);
}
+ static bool HasAccessorsImpl(JSObject* holder,
+ FixedArrayBase* backing_store) {
+ FixedArray* parameter_map = FixedArray::cast(backing_store);
+ FixedArrayBase* arguments = FixedArrayBase::cast(parameter_map->get(1));
+ return ArgumentsAccessor::HasAccessorsImpl(holder, arguments);
+ }
+
static uint32_t GetIndexForEntryImpl(FixedArrayBase* parameters,
uint32_t entry) {
FixedArray* parameter_map = FixedArray::cast(parameters);
diff --git a/deps/v8/src/elements.h b/deps/v8/src/elements.h
index 71e70a1c00..86ada229e3 100644
--- a/deps/v8/src/elements.h
+++ b/deps/v8/src/elements.h
@@ -54,6 +54,8 @@ class ElementsAccessor {
return HasElement(holder, index, handle(holder->elements()), filter);
}
+ virtual bool HasAccessors(JSObject* holder) = 0;
+
// Returns true if the backing store is compact in the given range
virtual bool IsPacked(Handle<JSObject> holder,
Handle<FixedArrayBase> backing_store, uint32_t start,
diff --git a/deps/v8/src/heap/incremental-marking.cc b/deps/v8/src/heap/incremental-marking.cc
index 52d0ca4e51..579df28b08 100644
--- a/deps/v8/src/heap/incremental-marking.cc
+++ b/deps/v8/src/heap/incremental-marking.cc
@@ -847,16 +847,21 @@ void IncrementalMarking::MarkObject(Heap* heap, HeapObject* obj) {
intptr_t IncrementalMarking::ProcessMarkingDeque(intptr_t bytes_to_process) {
intptr_t bytes_processed = 0;
- Map* filler_map = heap_->one_pointer_filler_map();
+ Map* one_pointer_filler_map = heap_->one_pointer_filler_map();
+ Map* two_pointer_filler_map = heap_->two_pointer_filler_map();
MarkingDeque* marking_deque =
heap_->mark_compact_collector()->marking_deque();
while (!marking_deque->IsEmpty() && bytes_processed < bytes_to_process) {
HeapObject* obj = marking_deque->Pop();
- // Explicitly skip one word fillers. Incremental markbit patterns are
- // correct only for objects that occupy at least two words.
+ // Explicitly skip one and two word fillers. Incremental markbit patterns
+ // are correct only for objects that occupy at least two words.
+ // Moreover, slots filtering for left-trimmed arrays works only when
+ // the distance between the old array start and the new array start
+ // is greater than two if both starts are marked.
Map* map = obj->map();
- if (map == filler_map) continue;
+ if (map == one_pointer_filler_map || map == two_pointer_filler_map)
+ continue;
int size = obj->SizeFromMap(map);
unscanned_bytes_of_large_object_ = 0;
diff --git a/deps/v8/src/heap/memory-reducer.cc b/deps/v8/src/heap/memory-reducer.cc
index 33e624978f..ee1009134b 100644
--- a/deps/v8/src/heap/memory-reducer.cc
+++ b/deps/v8/src/heap/memory-reducer.cc
@@ -73,14 +73,7 @@ void MemoryReducer::NotifyTimer(const Event& event) {
PrintIsolate(heap()->isolate(), "Memory reducer: started GC #%d\n",
state_.started_gcs);
}
- if (heap()->ShouldOptimizeForMemoryUsage()) {
- // TODO(ulan): Remove this once crbug.com/552305 is fixed.
- // Do full GC if memory usage has higher priority than latency.
- heap()->CollectAllGarbage(Heap::kReduceMemoryFootprintMask,
- "memory reducer");
- } else {
- heap()->StartIdleIncrementalMarking();
- }
+ heap()->StartIdleIncrementalMarking();
} else if (state_.action == kWait) {
if (!heap()->incremental_marking()->IsStopped() &&
heap()->ShouldOptimizeForMemoryUsage()) {
diff --git a/deps/v8/src/runtime/runtime-array.cc b/deps/v8/src/runtime/runtime-array.cc
index 28e92cbd2b..f2a217d7f7 100644
--- a/deps/v8/src/runtime/runtime-array.cc
+++ b/deps/v8/src/runtime/runtime-array.cc
@@ -120,9 +120,11 @@ RUNTIME_FUNCTION(Runtime_PushIfAbsent) {
RUNTIME_FUNCTION(Runtime_RemoveArrayHoles) {
HandleScope scope(isolate);
DCHECK(args.length() == 2);
- CONVERT_ARG_HANDLE_CHECKED(JSObject, object, 0);
+ CONVERT_ARG_HANDLE_CHECKED(JSReceiver, object, 0);
CONVERT_NUMBER_CHECKED(uint32_t, limit, Uint32, args[1]);
- return *JSObject::PrepareElementsForSort(object, limit);
+ if (object->IsJSProxy()) return Smi::FromInt(-1);
+ return *JSObject::PrepareElementsForSort(Handle<JSObject>::cast(object),
+ limit);
}