summaryrefslogtreecommitdiff
path: root/deps/v8/src/arm
diff options
context:
space:
mode:
authorisaacs <i@izs.me>2012-02-06 15:21:49 -0800
committerisaacs <i@izs.me>2012-02-06 15:21:49 -0800
commit8be699494ec67c3ba895bd8e1c9e3e73b02311d3 (patch)
tree22ee6f2ba22a26594ae0062c827c67710fc166db /deps/v8/src/arm
parent23514fc94648185c092355bf3e5bbce76844bd42 (diff)
downloadnode-new-8be699494ec67c3ba895bd8e1c9e3e73b02311d3.tar.gz
Upgrade V8 to 3.9.2
Diffstat (limited to 'deps/v8/src/arm')
-rw-r--r--deps/v8/src/arm/builtins-arm.cc53
-rw-r--r--deps/v8/src/arm/code-stubs-arm.cc207
-rw-r--r--deps/v8/src/arm/codegen-arm.cc24
-rw-r--r--deps/v8/src/arm/debug-arm.cc40
-rw-r--r--deps/v8/src/arm/full-codegen-arm.cc21
-rw-r--r--deps/v8/src/arm/ic-arm.cc62
-rw-r--r--deps/v8/src/arm/lithium-arm.cc32
-rw-r--r--deps/v8/src/arm/lithium-arm.h3
-rw-r--r--deps/v8/src/arm/lithium-codegen-arm.cc54
-rw-r--r--deps/v8/src/arm/macro-assembler-arm.cc57
-rw-r--r--deps/v8/src/arm/macro-assembler-arm.h24
-rw-r--r--deps/v8/src/arm/regexp-macro-assembler-arm.cc6
-rw-r--r--deps/v8/src/arm/stub-cache-arm.cc62
13 files changed, 418 insertions, 227 deletions
diff --git a/deps/v8/src/arm/builtins-arm.cc b/deps/v8/src/arm/builtins-arm.cc
index b461b45a57..186d06e3f0 100644
--- a/deps/v8/src/arm/builtins-arm.cc
+++ b/deps/v8/src/arm/builtins-arm.cc
@@ -114,9 +114,7 @@ static void AllocateEmptyJSArray(MacroAssembler* masm,
Label* gc_required) {
const int initial_capacity = JSArray::kPreallocatedArrayElements;
STATIC_ASSERT(initial_capacity >= 0);
- // Load the initial map from the array function.
- __ ldr(scratch1, FieldMemOperand(array_function,
- JSFunction::kPrototypeOrInitialMapOffset));
+ __ LoadInitialArrayMap(array_function, scratch2, scratch1);
// Allocate the JSArray object together with space for a fixed array with the
// requested elements.
@@ -210,9 +208,7 @@ static void AllocateJSArray(MacroAssembler* masm,
bool fill_with_hole,
Label* gc_required) {
// Load the initial map from the array function.
- __ ldr(elements_array_storage,
- FieldMemOperand(array_function,
- JSFunction::kPrototypeOrInitialMapOffset));
+ __ LoadInitialArrayMap(array_function, scratch2, elements_array_storage);
if (FLAG_debug_code) { // Assert that array size is not zero.
__ tst(array_size, array_size);
@@ -667,7 +663,9 @@ void Builtins::Generate_StringConstructCode(MacroAssembler* masm) {
}
-void Builtins::Generate_JSConstructCall(MacroAssembler* masm) {
+static void Generate_JSConstructStubHelper(MacroAssembler* masm,
+ bool is_api_function,
+ bool count_constructions) {
// ----------- S t a t e -------------
// -- r0 : number of arguments
// -- r1 : constructor function
@@ -675,42 +673,6 @@ void Builtins::Generate_JSConstructCall(MacroAssembler* masm) {
// -- sp[...]: constructor arguments
// -----------------------------------
- Label slow, non_function_call;
- // Check that the function is not a smi.
- __ JumpIfSmi(r1, &non_function_call);
- // Check that the function is a JSFunction.
- __ CompareObjectType(r1, r2, r2, JS_FUNCTION_TYPE);
- __ b(ne, &slow);
-
- // Jump to the function-specific construct stub.
- __ ldr(r2, FieldMemOperand(r1, JSFunction::kSharedFunctionInfoOffset));
- __ ldr(r2, FieldMemOperand(r2, SharedFunctionInfo::kConstructStubOffset));
- __ add(pc, r2, Operand(Code::kHeaderSize - kHeapObjectTag));
-
- // r0: number of arguments
- // r1: called object
- // r2: object type
- Label do_call;
- __ bind(&slow);
- __ cmp(r2, Operand(JS_FUNCTION_PROXY_TYPE));
- __ b(ne, &non_function_call);
- __ GetBuiltinEntry(r3, Builtins::CALL_FUNCTION_PROXY_AS_CONSTRUCTOR);
- __ jmp(&do_call);
-
- __ bind(&non_function_call);
- __ GetBuiltinEntry(r3, Builtins::CALL_NON_FUNCTION_AS_CONSTRUCTOR);
- __ bind(&do_call);
- // Set expected number of arguments to zero (not changing r0).
- __ mov(r2, Operand(0, RelocInfo::NONE));
- __ SetCallKind(r5, CALL_AS_METHOD);
- __ Jump(masm->isolate()->builtins()->ArgumentsAdaptorTrampoline(),
- RelocInfo::CODE_TARGET);
-}
-
-
-static void Generate_JSConstructStubHelper(MacroAssembler* masm,
- bool is_api_function,
- bool count_constructions) {
// Should never count constructions for api objects.
ASSERT(!is_api_function || !count_constructions);
@@ -1117,7 +1079,8 @@ static void Generate_JSEntryTrampolineHelper(MacroAssembler* masm,
// Invoke the code and pass argc as r0.
__ mov(r0, Operand(r3));
if (is_construct) {
- __ Call(masm->isolate()->builtins()->JSConstructCall());
+ CallConstructStub stub(NO_CALL_FUNCTION_FLAGS);
+ __ CallStub(&stub);
} else {
ParameterCount actual(r0);
__ InvokeFunction(r1, actual, CALL_FUNCTION,
@@ -1297,7 +1260,7 @@ void Builtins::Generate_FunctionCall(MacroAssembler* masm) {
// 1. Make sure we have at least one argument.
// r0: actual number of arguments
{ Label done;
- __ tst(r0, Operand(r0));
+ __ cmp(r0, Operand(0));
__ b(ne, &done);
__ LoadRoot(r2, Heap::kUndefinedValueRootIndex);
__ push(r2);
diff --git a/deps/v8/src/arm/code-stubs-arm.cc b/deps/v8/src/arm/code-stubs-arm.cc
index c33df5cf77..3763867225 100644
--- a/deps/v8/src/arm/code-stubs-arm.cc
+++ b/deps/v8/src/arm/code-stubs-arm.cc
@@ -1,4 +1,4 @@
-// Copyright 2011 the V8 project authors. All rights reserved.
+// Copyright 2012 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
@@ -122,7 +122,6 @@ void FastNewClosureStub::Generate(MacroAssembler* masm) {
__ str(r1, FieldMemOperand(r0, JSFunction::kLiteralsOffset));
__ str(r4, FieldMemOperand(r0, JSFunction::kNextFunctionLinkOffset));
-
// Initialize the code pointer in the function to be the one
// found in the shared function info object.
__ ldr(r3, FieldMemOperand(r3, SharedFunctionInfo::kCodeOffset));
@@ -157,20 +156,18 @@ void FastNewContextStub::Generate(MacroAssembler* masm) {
__ ldr(r3, MemOperand(sp, 0));
// Set up the object header.
- __ LoadRoot(r2, Heap::kFunctionContextMapRootIndex);
- __ str(r2, FieldMemOperand(r0, HeapObject::kMapOffset));
+ __ LoadRoot(r1, Heap::kFunctionContextMapRootIndex);
__ mov(r2, Operand(Smi::FromInt(length)));
__ str(r2, FieldMemOperand(r0, FixedArray::kLengthOffset));
+ __ str(r1, FieldMemOperand(r0, HeapObject::kMapOffset));
- // Set up the fixed slots.
+ // Set up the fixed slots, copy the global object from the previous context.
+ __ ldr(r2, MemOperand(cp, Context::SlotOffset(Context::GLOBAL_INDEX)));
__ mov(r1, Operand(Smi::FromInt(0)));
__ str(r3, MemOperand(r0, Context::SlotOffset(Context::CLOSURE_INDEX)));
__ str(cp, MemOperand(r0, Context::SlotOffset(Context::PREVIOUS_INDEX)));
__ str(r1, MemOperand(r0, Context::SlotOffset(Context::EXTENSION_INDEX)));
-
- // Copy the global object from the previous context.
- __ ldr(r1, MemOperand(cp, Context::SlotOffset(Context::GLOBAL_INDEX)));
- __ str(r1, MemOperand(r0, Context::SlotOffset(Context::GLOBAL_INDEX)));
+ __ str(r2, MemOperand(r0, Context::SlotOffset(Context::GLOBAL_INDEX)));
// Initialize the rest of the slots to undefined.
__ LoadRoot(r1, Heap::kUndefinedValueRootIndex);
@@ -229,14 +226,12 @@ void FastNewBlockContextStub::Generate(MacroAssembler* masm) {
__ ldr(r3, ContextOperand(r3, Context::CLOSURE_INDEX));
__ bind(&after_sentinel);
- // Set up the fixed slots.
+ // Set up the fixed slots, copy the global object from the previous context.
+ __ ldr(r2, ContextOperand(cp, Context::GLOBAL_INDEX));
__ str(r3, ContextOperand(r0, Context::CLOSURE_INDEX));
__ str(cp, ContextOperand(r0, Context::PREVIOUS_INDEX));
__ str(r1, ContextOperand(r0, Context::EXTENSION_INDEX));
-
- // Copy the global object from the previous context.
- __ ldr(r1, ContextOperand(cp, Context::GLOBAL_INDEX));
- __ str(r1, ContextOperand(r0, Context::GLOBAL_INDEX));
+ __ str(r2, ContextOperand(r0, Context::GLOBAL_INDEX));
// Initialize the rest of the slots to the hole value.
__ LoadRoot(r1, Heap::kTheHoleValueRootIndex);
@@ -326,8 +321,7 @@ void FastCloneShallowArrayStub::Generate(MacroAssembler* masm) {
Label double_elements, check_fast_elements;
__ ldr(r0, FieldMemOperand(r3, JSArray::kElementsOffset));
__ ldr(r0, FieldMemOperand(r0, HeapObject::kMapOffset));
- __ LoadRoot(ip, Heap::kFixedCOWArrayMapRootIndex);
- __ cmp(r0, ip);
+ __ CompareRoot(r0, Heap::kFixedCOWArrayMapRootIndex);
__ b(ne, &check_fast_elements);
GenerateFastCloneShallowArrayCommon(masm, 0,
COPY_ON_WRITE_ELEMENTS, &slow_case);
@@ -336,8 +330,7 @@ void FastCloneShallowArrayStub::Generate(MacroAssembler* masm) {
__ Ret();
__ bind(&check_fast_elements);
- __ LoadRoot(ip, Heap::kFixedArrayMapRootIndex);
- __ cmp(r0, ip);
+ __ CompareRoot(r0, Heap::kFixedArrayMapRootIndex);
__ b(ne, &double_elements);
GenerateFastCloneShallowArrayCommon(masm, length_,
CLONE_ELEMENTS, &slow_case);
@@ -590,7 +583,9 @@ void FloatingPointHelper::LoadNumber(MacroAssembler* masm,
Label is_smi, done;
- __ JumpIfSmi(object, &is_smi);
+ // Smi-check
+ __ UntagAndJumpIfSmi(scratch1, object, &is_smi);
+ // Heap number check
__ JumpIfNotHeapNumber(object, heap_number_map, scratch1, not_number);
// Handle loading a double from a heap number.
@@ -612,7 +607,6 @@ void FloatingPointHelper::LoadNumber(MacroAssembler* masm,
if (CpuFeatures::IsSupported(VFP3)) {
CpuFeatures::Scope scope(VFP3);
// Convert smi to double using VFP instructions.
- __ SmiUntag(scratch1, object);
__ vmov(dst.high(), scratch1);
__ vcvt_f64_s32(dst, dst.high());
if (destination == kCoreRegisters) {
@@ -647,11 +641,10 @@ void FloatingPointHelper::ConvertNumberToInt32(MacroAssembler* masm,
Heap::kHeapNumberMapRootIndex,
"HeapNumberMap register clobbered.");
}
- Label is_smi;
Label done;
Label not_in_int32_range;
- __ JumpIfSmi(object, &is_smi);
+ __ UntagAndJumpIfSmi(dst, object, &done);
__ ldr(scratch1, FieldMemOperand(object, HeapNumber::kMapOffset));
__ cmp(scratch1, heap_number_map);
__ b(ne, not_number);
@@ -671,10 +664,6 @@ void FloatingPointHelper::ConvertNumberToInt32(MacroAssembler* masm,
scratch1,
scratch2,
scratch3);
- __ jmp(&done);
-
- __ bind(&is_smi);
- __ SmiUntag(dst, object);
__ bind(&done);
}
@@ -847,10 +836,7 @@ void FloatingPointHelper::LoadNumberAsInt32(MacroAssembler* masm,
Label done;
- // Untag the object into the destination register.
- __ SmiUntag(dst, object);
- // Just return if the object is a smi.
- __ JumpIfSmi(object, &done);
+ __ UntagAndJumpIfSmi(dst, object, &done);
if (FLAG_debug_code) {
__ AbortIfNotRootValue(heap_number_map,
@@ -2338,7 +2324,7 @@ void BinaryOpStub::GenerateSmiSmiOperation(MacroAssembler* masm) {
__ cmp(ip, Operand(scratch2));
__ b(ne, &not_smi_result);
// Go slow on zero result to handle -0.
- __ tst(scratch1, Operand(scratch1));
+ __ cmp(scratch1, Operand(0));
__ mov(right, Operand(scratch1), LeaveCC, ne);
__ Ret(ne);
// We need -0 if we were multiplying a negative number with 0 to get 0.
@@ -3310,8 +3296,7 @@ void TranscendentalCacheStub::Generate(MacroAssembler* masm) {
// Check if cache matches: Double value is stored in uint32_t[2] array.
__ ldm(ia, cache_entry, r4.bit() | r5.bit() | r6.bit());
__ cmp(r2, r4);
- __ b(ne, &calculate);
- __ cmp(r3, r5);
+ __ cmp(r3, r5, eq);
__ b(ne, &calculate);
// Cache hit. Load result, cleanup and return.
Counters* counters = masm->isolate()->counters();
@@ -3468,7 +3453,7 @@ void MathPowStub::Generate(MacroAssembler* masm) {
const Register scratch = r9;
const Register scratch2 = r7;
- Label call_runtime, done, exponent_not_smi, int_exponent;
+ Label call_runtime, done, int_exponent;
if (exponent_type_ == ON_STACK) {
Label base_is_smi, unpack_exponent;
// The exponent and base are supplied as arguments on the stack.
@@ -3479,7 +3464,7 @@ void MathPowStub::Generate(MacroAssembler* masm) {
__ LoadRoot(heapnumbermap, Heap::kHeapNumberMapRootIndex);
- __ JumpIfSmi(base, &base_is_smi);
+ __ UntagAndJumpIfSmi(scratch, base, &base_is_smi);
__ ldr(scratch, FieldMemOperand(base, JSObject::kMapOffset));
__ cmp(scratch, heapnumbermap);
__ b(ne, &call_runtime);
@@ -3488,16 +3473,12 @@ void MathPowStub::Generate(MacroAssembler* masm) {
__ jmp(&unpack_exponent);
__ bind(&base_is_smi);
- __ SmiUntag(base);
- __ vmov(single_scratch, base);
+ __ vmov(single_scratch, scratch);
__ vcvt_f64_s32(double_base, single_scratch);
__ bind(&unpack_exponent);
- __ JumpIfNotSmi(exponent, &exponent_not_smi);
- __ SmiUntag(exponent);
- __ jmp(&int_exponent);
+ __ UntagAndJumpIfSmi(scratch, exponent, &int_exponent);
- __ bind(&exponent_not_smi);
__ ldr(scratch, FieldMemOperand(exponent, JSObject::kMapOffset));
__ cmp(scratch, heapnumbermap);
__ b(ne, &call_runtime);
@@ -3505,11 +3486,8 @@ void MathPowStub::Generate(MacroAssembler* masm) {
FieldMemOperand(exponent, HeapNumber::kValueOffset));
} else if (exponent_type_ == TAGGED) {
// Base is already in double_base.
- __ JumpIfNotSmi(exponent, &exponent_not_smi);
- __ SmiUntag(exponent);
- __ jmp(&int_exponent);
+ __ UntagAndJumpIfSmi(scratch, exponent, &int_exponent);
- __ bind(&exponent_not_smi);
__ vldr(double_exponent,
FieldMemOperand(exponent, HeapNumber::kValueOffset));
}
@@ -3582,13 +3560,19 @@ void MathPowStub::Generate(MacroAssembler* masm) {
__ bind(&int_exponent_convert);
__ vcvt_u32_f64(single_scratch, double_exponent);
- __ vmov(exponent, single_scratch);
+ __ vmov(scratch, single_scratch);
}
// Calculate power with integer exponent.
__ bind(&int_exponent);
- __ mov(scratch, exponent); // Back up exponent.
+ // Get two copies of exponent in the registers scratch and exponent.
+ if (exponent_type_ == INTEGER) {
+ __ mov(scratch, exponent);
+ } else {
+ // Exponent has previously been stored into scratch as untagged integer.
+ __ mov(exponent, scratch);
+ }
__ vmov(double_scratch, double_base); // Back up base.
__ vmov(double_result, 1.0);
@@ -4098,11 +4082,9 @@ void InstanceofStub::Generate(MacroAssembler* masm) {
// real lookup and update the call site cache.
if (!HasCallSiteInlineCheck()) {
Label miss;
- __ LoadRoot(ip, Heap::kInstanceofCacheFunctionRootIndex);
- __ cmp(function, ip);
+ __ CompareRoot(function, Heap::kInstanceofCacheFunctionRootIndex);
__ b(ne, &miss);
- __ LoadRoot(ip, Heap::kInstanceofCacheMapRootIndex);
- __ cmp(map, ip);
+ __ CompareRoot(map, Heap::kInstanceofCacheMapRootIndex);
__ b(ne, &miss);
__ LoadRoot(r0, Heap::kInstanceofCacheAnswerRootIndex);
__ Ret(HasArgsInRegisters() ? 0 : 2);
@@ -4656,7 +4638,7 @@ void RegExpExecStub::Generate(MacroAssembler* masm) {
ExternalReference::address_of_regexp_stack_memory_size(isolate);
__ mov(r0, Operand(address_of_regexp_stack_memory_size));
__ ldr(r0, MemOperand(r0, 0));
- __ tst(r0, Operand(r0));
+ __ cmp(r0, Operand(0));
__ b(eq, &runtime);
// Check that the first argument is a JSRegExp object.
@@ -4727,8 +4709,7 @@ void RegExpExecStub::Generate(MacroAssembler* masm) {
__ ldr(last_match_info_elements,
FieldMemOperand(r0, JSArray::kElementsOffset));
__ ldr(r0, FieldMemOperand(last_match_info_elements, HeapObject::kMapOffset));
- __ LoadRoot(ip, Heap::kFixedArrayMapRootIndex);
- __ cmp(r0, ip);
+ __ CompareRoot(r0, Heap::kFixedArrayMapRootIndex);
__ b(ne, &runtime);
// Check that the last match info has space for the capture registers and the
// additional information.
@@ -5082,11 +5063,11 @@ void RegExpConstructResultStub::Generate(MacroAssembler* masm) {
// Set input, index and length fields from arguments.
__ ldr(r1, MemOperand(sp, kPointerSize * 0));
+ __ ldr(r2, MemOperand(sp, kPointerSize * 1));
+ __ ldr(r6, MemOperand(sp, kPointerSize * 2));
__ str(r1, FieldMemOperand(r0, JSRegExpResult::kInputOffset));
- __ ldr(r1, MemOperand(sp, kPointerSize * 1));
- __ str(r1, FieldMemOperand(r0, JSRegExpResult::kIndexOffset));
- __ ldr(r1, MemOperand(sp, kPointerSize * 2));
- __ str(r1, FieldMemOperand(r0, JSArray::kLengthOffset));
+ __ str(r2, FieldMemOperand(r0, JSRegExpResult::kIndexOffset));
+ __ str(r6, FieldMemOperand(r0, JSArray::kLengthOffset));
// Fill out the elements FixedArray.
// r0: JSArray, tagged.
@@ -5108,9 +5089,9 @@ void RegExpConstructResultStub::Generate(MacroAssembler* masm) {
// r3: Start of elements in FixedArray.
// r5: Number of elements to fill.
Label loop;
- __ tst(r5, Operand(r5));
+ __ cmp(r5, Operand(0));
__ bind(&loop);
- __ b(le, &done); // Jump if r1 is negative or zero.
+ __ b(le, &done); // Jump if r5 is negative or zero.
__ sub(r5, r5, Operand(1), SetCC);
__ str(r2, MemOperand(r3, r5, LSL, kPointerSizeLog2));
__ jmp(&loop);
@@ -5124,24 +5105,48 @@ void RegExpConstructResultStub::Generate(MacroAssembler* masm) {
}
-void CallFunctionStub::FinishCode(Handle<Code> code) {
- code->set_has_function_cache(false);
-}
+static void GenerateRecordCallTarget(MacroAssembler* masm) {
+ // Cache the called function in a global property cell. Cache states
+ // are uninitialized, monomorphic (indicated by a JSFunction), and
+ // megamorphic.
+ // r1 : the function to call
+ // r2 : cache cell for call target
+ Label done;
+ ASSERT_EQ(*TypeFeedbackCells::MegamorphicSentinel(masm->isolate()),
+ masm->isolate()->heap()->undefined_value());
+ ASSERT_EQ(*TypeFeedbackCells::UninitializedSentinel(masm->isolate()),
+ masm->isolate()->heap()->the_hole_value());
-void CallFunctionStub::Clear(Heap* heap, Address address) {
- UNREACHABLE();
-}
+ // Load the cache state into r3.
+ __ ldr(r3, FieldMemOperand(r2, JSGlobalPropertyCell::kValueOffset));
+
+ // A monomorphic cache hit or an already megamorphic state: invoke the
+ // function without changing the state.
+ __ cmp(r3, r1);
+ __ b(eq, &done);
+ __ CompareRoot(r3, Heap::kUndefinedValueRootIndex);
+ __ b(eq, &done);
+
+ // A monomorphic miss (i.e, here the cache is not uninitialized) goes
+ // megamorphic.
+ __ CompareRoot(r3, Heap::kTheHoleValueRootIndex);
+ // MegamorphicSentinel is an immortal immovable object (undefined) so no
+ // write-barrier is needed.
+ __ LoadRoot(ip, Heap::kUndefinedValueRootIndex, ne);
+ __ str(ip, FieldMemOperand(r2, JSGlobalPropertyCell::kValueOffset), ne);
+ // An uninitialized cache is patched with the function.
+ __ str(r1, FieldMemOperand(r2, JSGlobalPropertyCell::kValueOffset), eq);
+ // No need for a write barrier here - cells are rescanned.
-Object* CallFunctionStub::GetCachedValue(Address address) {
- UNREACHABLE();
- return NULL;
+ __ bind(&done);
}
void CallFunctionStub::Generate(MacroAssembler* masm) {
// r1 : the function to call
+ // r2 : cache cell for call target
Label slow, non_function;
// The receiver might implicitly be the global object. This is
@@ -5219,6 +5224,48 @@ void CallFunctionStub::Generate(MacroAssembler* masm) {
}
+void CallConstructStub::Generate(MacroAssembler* masm) {
+ // r0 : number of arguments
+ // r1 : the function to call
+ // r2 : cache cell for call target
+ Label slow, non_function_call;
+
+ // Check that the function is not a smi.
+ __ JumpIfSmi(r1, &non_function_call);
+ // Check that the function is a JSFunction.
+ __ CompareObjectType(r1, r3, r3, JS_FUNCTION_TYPE);
+ __ b(ne, &slow);
+
+ if (RecordCallTarget()) {
+ GenerateRecordCallTarget(masm);
+ }
+
+ // Jump to the function-specific construct stub.
+ __ ldr(r2, FieldMemOperand(r1, JSFunction::kSharedFunctionInfoOffset));
+ __ ldr(r2, FieldMemOperand(r2, SharedFunctionInfo::kConstructStubOffset));
+ __ add(pc, r2, Operand(Code::kHeaderSize - kHeapObjectTag));
+
+ // r0: number of arguments
+ // r1: called object
+ // r3: object type
+ Label do_call;
+ __ bind(&slow);
+ __ cmp(r3, Operand(JS_FUNCTION_PROXY_TYPE));
+ __ b(ne, &non_function_call);
+ __ GetBuiltinEntry(r3, Builtins::CALL_FUNCTION_PROXY_AS_CONSTRUCTOR);
+ __ jmp(&do_call);
+
+ __ bind(&non_function_call);
+ __ GetBuiltinEntry(r3, Builtins::CALL_NON_FUNCTION_AS_CONSTRUCTOR);
+ __ bind(&do_call);
+ // Set expected number of arguments to zero (not changing r0).
+ __ mov(r2, Operand(0, RelocInfo::NONE));
+ __ SetCallKind(r5, CALL_AS_METHOD);
+ __ Jump(masm->isolate()->builtins()->ArgumentsAdaptorTrampoline(),
+ RelocInfo::CODE_TARGET);
+}
+
+
// Unfortunately you have to run without snapshots to see most of these
// names in the profile since most compare stubs end up in the snapshot.
void CompareStub::PrintName(StringStream* stream) {
@@ -5370,8 +5417,7 @@ void StringCharFromCodeGenerator::GenerateFast(MacroAssembler* masm) {
STATIC_ASSERT(kSmiTag == 0);
__ add(result_, result_, Operand(code_, LSL, kPointerSizeLog2 - kSmiTagSize));
__ ldr(result_, FieldMemOperand(result_, FixedArray::kHeaderSize));
- __ LoadRoot(ip, Heap::kUndefinedValueRootIndex);
- __ cmp(result_, Operand(ip));
+ __ CompareRoot(result_, Heap::kUndefinedValueRootIndex);
__ b(eq, &slow_case_);
__ bind(&exit_);
}
@@ -5799,10 +5845,11 @@ void SubStringStub::Generate(MacroAssembler* masm) {
__ mov(r3, Operand(r3, ASR, 1), SetCC, cc);
// If either to or from had the smi tag bit set, then carry is set now.
__ b(cs, &runtime); // Either "from" or "to" is not a smi.
- __ b(mi, &runtime); // From is negative.
-
+ // We want to bailout to runtime here if From is negative. In that case, the
+ // next instruction is not executed and we fall through to bailing out to
+ // runtime. pl is the opposite of mi.
// Both r2 and r3 are untagged integers.
- __ sub(r2, r2, Operand(r3), SetCC);
+ __ sub(r2, r2, Operand(r3), SetCC, pl);
__ b(mi, &runtime); // Fail if from > to.
// Make sure first argument is a string.
@@ -5875,9 +5922,9 @@ void SubStringStub::Generate(MacroAssembler* masm) {
__ bind(&sliced_string);
// Sliced string. Fetch parent and correct start index by offset.
- __ ldr(r5, FieldMemOperand(r0, SlicedString::kOffsetOffset));
- __ add(r3, r3, Operand(r5, ASR, 1));
+ __ ldr(r4, FieldMemOperand(r0, SlicedString::kOffsetOffset));
__ ldr(r5, FieldMemOperand(r0, SlicedString::kParentOffset));
+ __ add(r3, r3, Operand(r4, ASR, 1)); // Add offset to index.
// Update instance type.
__ ldr(r1, FieldMemOperand(r5, HeapObject::kMapOffset));
__ ldrb(r1, FieldMemOperand(r1, Map::kInstanceTypeOffset));
@@ -6020,7 +6067,7 @@ void StringCompareStub::GenerateFlatAsciiStringEquals(MacroAssembler* masm,
Label compare_chars;
__ bind(&check_zero_length);
STATIC_ASSERT(kSmiTag == 0);
- __ tst(length, Operand(length));
+ __ cmp(length, Operand(0));
__ b(ne, &compare_chars);
__ mov(r0, Operand(Smi::FromInt(EQUAL)));
__ Ret();
@@ -6053,7 +6100,7 @@ void StringCompareStub::GenerateCompareFlatAsciiStrings(MacroAssembler* masm,
__ mov(scratch1, scratch2, LeaveCC, gt);
Register min_length = scratch1;
STATIC_ASSERT(kSmiTag == 0);
- __ tst(min_length, Operand(min_length));
+ __ cmp(min_length, Operand(0));
__ b(eq, &compare_lengths);
// Compare loop.
@@ -6811,7 +6858,7 @@ void StringDictionaryLookupStub::GenerateNegativeLookup(MacroAssembler* masm,
__ mov(r1, Operand(Handle<String>(name)));
StringDictionaryLookupStub stub(NEGATIVE_LOOKUP);
__ CallStub(&stub);
- __ tst(r0, Operand(r0));
+ __ cmp(r0, Operand(0));
__ ldm(ia_w, sp, spill_mask);
__ b(eq, done);
@@ -6888,7 +6935,7 @@ void StringDictionaryLookupStub::GeneratePositiveLookup(MacroAssembler* masm,
}
StringDictionaryLookupStub stub(POSITIVE_LOOKUP);
__ CallStub(&stub);
- __ tst(r0, Operand(r0));
+ __ cmp(r0, Operand(0));
__ mov(scratch2, Operand(r2));
__ ldm(ia_w, sp, spill_mask);
diff --git a/deps/v8/src/arm/codegen-arm.cc b/deps/v8/src/arm/codegen-arm.cc
index 3371e8a6b1..ce35b97c18 100644
--- a/deps/v8/src/arm/codegen-arm.cc
+++ b/deps/v8/src/arm/codegen-arm.cc
@@ -1,4 +1,4 @@
-// Copyright 2011 the V8 project authors. All rights reserved.
+// Copyright 2012 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
@@ -104,10 +104,10 @@ void ElementsTransitionGenerator::GenerateSmiOnlyToDouble(
__ add(lr, lr, Operand(r5, LSL, 2));
__ AllocateInNewSpace(lr, r6, r7, r9, &gc_required, NO_ALLOCATION_FLAGS);
// r6: destination FixedDoubleArray, not tagged as heap object
+ // Set destination FixedDoubleArray's length and map.
__ LoadRoot(r9, Heap::kFixedDoubleArrayMapRootIndex);
- __ str(r9, MemOperand(r6, HeapObject::kMapOffset));
- // Set destination FixedDoubleArray's length.
__ str(r5, MemOperand(r6, FixedDoubleArray::kLengthOffset));
+ __ str(r9, MemOperand(r6, HeapObject::kMapOffset));
// Update receiver's map.
__ str(r3, FieldMemOperand(r2, HeapObject::kMapOffset));
@@ -155,10 +155,9 @@ void ElementsTransitionGenerator::GenerateSmiOnlyToDouble(
__ bind(&loop);
__ ldr(r9, MemOperand(r3, 4, PostIndex));
// r9: current element
- __ JumpIfNotSmi(r9, &convert_hole);
+ __ UntagAndJumpIfNotSmi(r9, r9, &convert_hole);
// Normal smi, convert to double and store.
- __ SmiUntag(r9);
if (vfp3_supported) {
CpuFeatures::Scope scope(VFP3);
__ vmov(s0, r9);
@@ -181,6 +180,9 @@ void ElementsTransitionGenerator::GenerateSmiOnlyToDouble(
// Hole found, store the-hole NaN.
__ bind(&convert_hole);
if (FLAG_debug_code) {
+ // Restore a "smi-untagged" heap object.
+ __ SmiTag(r9);
+ __ orr(r9, r9, Operand(1));
__ CompareRoot(r9, Heap::kTheHoleValueRootIndex);
__ Assert(eq, "object found in smi-only array");
}
@@ -208,9 +210,8 @@ void ElementsTransitionGenerator::GenerateDoubleToObject(
Label entry, loop, convert_hole, gc_required;
__ push(lr);
- __ Push(r3, r2, r1, r0);
-
__ ldr(r4, FieldMemOperand(r2, JSObject::kElementsOffset));
+ __ Push(r3, r2, r1, r0);
__ ldr(r5, FieldMemOperand(r4, FixedArray::kLengthOffset));
// r4: source FixedDoubleArray
// r5: number of elements (smi-tagged)
@@ -220,10 +221,10 @@ void ElementsTransitionGenerator::GenerateDoubleToObject(
__ add(r0, r0, Operand(r5, LSL, 1));
__ AllocateInNewSpace(r0, r6, r7, r9, &gc_required, NO_ALLOCATION_FLAGS);
// r6: destination FixedArray, not tagged as heap object
+ // Set destination FixedDoubleArray's length and map.
__ LoadRoot(r9, Heap::kFixedArrayMapRootIndex);
- __ str(r9, MemOperand(r6, HeapObject::kMapOffset));
- // Set destination FixedDoubleArray's length.
__ str(r5, MemOperand(r6, FixedDoubleArray::kLengthOffset));
+ __ str(r9, MemOperand(r6, HeapObject::kMapOffset));
// Prepare for conversion loop.
__ add(r4, r4, Operand(FixedDoubleArray::kHeaderSize - kHeapObjectTag + 4));
@@ -325,8 +326,8 @@ void StringCharLoadGenerator::Generate(MacroAssembler* masm,
// Handle slices.
Label indirect_string_loaded;
__ ldr(result, FieldMemOperand(string, SlicedString::kOffsetOffset));
- __ add(index, index, Operand(result, ASR, kSmiTagSize));
__ ldr(string, FieldMemOperand(string, SlicedString::kParentOffset));
+ __ add(index, index, Operand(result, ASR, kSmiTagSize));
__ jmp(&indirect_string_loaded);
// Handle cons strings.
@@ -336,8 +337,7 @@ void StringCharLoadGenerator::Generate(MacroAssembler* masm,
// the string.
__ bind(&cons_string);
__ ldr(result, FieldMemOperand(string, ConsString::kSecondOffset));
- __ LoadRoot(ip, Heap::kEmptyStringRootIndex);
- __ cmp(result, ip);
+ __ CompareRoot(result, Heap::kEmptyStringRootIndex);
__ b(ne, call_runtime);
// Get the first of the two strings and load its instance type.
__ ldr(string, FieldMemOperand(string, ConsString::kFirstOffset));
diff --git a/deps/v8/src/arm/debug-arm.cc b/deps/v8/src/arm/debug-arm.cc
index 837410302a..96139a2597 100644
--- a/deps/v8/src/arm/debug-arm.cc
+++ b/deps/v8/src/arm/debug-arm.cc
@@ -251,14 +251,6 @@ void Debug::GenerateCallICDebugBreak(MacroAssembler* masm) {
}
-void Debug::GenerateConstructCallDebugBreak(MacroAssembler* masm) {
- // Calling convention for construct call (from builtins-arm.cc)
- // -- r0 : number of arguments (not smi)
- // -- r1 : constructor function
- Generate_DebugBreakCallHelper(masm, r1.bit(), r0.bit());
-}
-
-
void Debug::GenerateReturnDebugBreak(MacroAssembler* masm) {
// In places other than IC call sites it is expected that r0 is TOS which
// is an object - this is not generally the case so this should be used with
@@ -268,6 +260,7 @@ void Debug::GenerateReturnDebugBreak(MacroAssembler* masm) {
void Debug::GenerateCallFunctionStubDebugBreak(MacroAssembler* masm) {
+ // Register state for CallFunctionStub (from code-stubs-arm.cc).
// ----------- S t a t e -------------
// -- r1 : function
// -----------------------------------
@@ -275,6 +268,37 @@ void Debug::GenerateCallFunctionStubDebugBreak(MacroAssembler* masm) {
}
+void Debug::GenerateCallFunctionStubRecordDebugBreak(MacroAssembler* masm) {
+ // Register state for CallFunctionStub (from code-stubs-arm.cc).
+ // ----------- S t a t e -------------
+ // -- r1 : function
+ // -- r2 : cache cell for call target
+ // -----------------------------------
+ Generate_DebugBreakCallHelper(masm, r1.bit() | r2.bit(), 0);
+}
+
+
+void Debug::GenerateCallConstructStubDebugBreak(MacroAssembler* masm) {
+ // Calling convention for CallConstructStub (from code-stubs-arm.cc)
+ // ----------- S t a t e -------------
+ // -- r0 : number of arguments (not smi)
+ // -- r1 : constructor function
+ // -----------------------------------
+ Generate_DebugBreakCallHelper(masm, r1.bit(), r0.bit());
+}
+
+
+void Debug::GenerateCallConstructStubRecordDebugBreak(MacroAssembler* masm) {
+ // Calling convention for CallConstructStub (from code-stubs-arm.cc)
+ // ----------- S t a t e -------------
+ // -- r0 : number of arguments (not smi)
+ // -- r1 : constructor function
+ // -- r2 : cache cell for call target
+ // -----------------------------------
+ Generate_DebugBreakCallHelper(masm, r1.bit() | r2.bit(), r0.bit());
+}
+
+
void Debug::GenerateSlot(MacroAssembler* masm) {
// Generate enough nop's to make space for a call instruction. Avoid emitting
// the constant pool in the debug break slot code.
diff --git a/deps/v8/src/arm/full-codegen-arm.cc b/deps/v8/src/arm/full-codegen-arm.cc
index 6654263989..a4fabe2ac1 100644
--- a/deps/v8/src/arm/full-codegen-arm.cc
+++ b/deps/v8/src/arm/full-codegen-arm.cc
@@ -1820,7 +1820,7 @@ void FullCodeGenerator::EmitInlineSmiBinaryOp(BinaryOperation* expr,
__ mov(ip, Operand(scratch1, ASR, 31));
__ cmp(ip, Operand(scratch2));
__ b(ne, &stub_call);
- __ tst(scratch1, Operand(scratch1));
+ __ cmp(scratch1, Operand(0));
__ mov(right, Operand(scratch1), LeaveCC, ne);
__ b(ne, &done);
__ add(scratch2, right, Operand(left), SetCC);
@@ -2379,9 +2379,22 @@ void FullCodeGenerator::VisitCallNew(CallNew* expr) {
__ mov(r0, Operand(arg_count));
__ ldr(r1, MemOperand(sp, arg_count * kPointerSize));
- Handle<Code> construct_builtin =
- isolate()->builtins()->JSConstructCall();
- __ Call(construct_builtin, RelocInfo::CONSTRUCT_CALL);
+ // Record call targets in unoptimized code, but not in the snapshot.
+ CallFunctionFlags flags;
+ if (!Serializer::enabled()) {
+ flags = RECORD_CALL_TARGET;
+ Handle<Object> uninitialized =
+ TypeFeedbackCells::UninitializedSentinel(isolate());
+ Handle<JSGlobalPropertyCell> cell =
+ isolate()->factory()->NewJSGlobalPropertyCell(uninitialized);
+ RecordTypeFeedbackCell(expr->id(), cell);
+ __ mov(r2, Operand(cell));
+ } else {
+ flags = NO_CALL_FUNCTION_FLAGS;
+ }
+
+ CallConstructStub stub(flags);
+ __ Call(stub.GetCode(), RelocInfo::CONSTRUCT_CALL);
context()->Plug(r0);
}
diff --git a/deps/v8/src/arm/ic-arm.cc b/deps/v8/src/arm/ic-arm.cc
index dfd4d2e396..14daadaea9 100644
--- a/deps/v8/src/arm/ic-arm.cc
+++ b/deps/v8/src/arm/ic-arm.cc
@@ -1,4 +1,4 @@
-// Copyright 2011 the V8 project authors. All rights reserved.
+// Copyright 2012 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
@@ -1312,14 +1312,16 @@ void KeyedStoreIC::GenerateGeneric(MacroAssembler* masm,
Label slow, array, extra, check_if_double_array;
Label fast_object_with_map_check, fast_object_without_map_check;
Label fast_double_with_map_check, fast_double_without_map_check;
+ Label transition_smi_elements, finish_object_store, non_double_value;
+ Label transition_double_elements;
// Register usage.
Register value = r0;
Register key = r1;
Register receiver = r2;
- Register elements = r3; // Elements array of the receiver.
+ Register receiver_map = r3;
Register elements_map = r6;
- Register receiver_map = r7;
+ Register elements = r7; // Elements array of the receiver.
// r4 and r5 are used as general scratch registers.
// Check that the key is a smi.
@@ -1417,9 +1419,11 @@ void KeyedStoreIC::GenerateGeneric(MacroAssembler* masm,
__ Ret();
__ bind(&non_smi_value);
- // Escape to slow case when writing non-smi into smi-only array.
- __ CheckFastObjectElements(receiver_map, scratch_value, &slow);
+ // Escape to elements kind transition case.
+ __ CheckFastObjectElements(receiver_map, scratch_value,
+ &transition_smi_elements);
// Fast elements array, store the value to the elements backing store.
+ __ bind(&finish_object_store);
__ add(address, elements, Operand(FixedArray::kHeaderSize - kHeapObjectTag));
__ add(address, address, Operand(key, LSL, kPointerSizeLog2 - kSmiTagSize));
__ str(value, MemOperand(address));
@@ -1445,12 +1449,56 @@ void KeyedStoreIC::GenerateGeneric(MacroAssembler* masm,
key,
receiver,
elements,
+ r3,
r4,
r5,
r6,
- r7,
- &slow);
+ &transition_double_elements);
__ Ret();
+
+ __ bind(&transition_smi_elements);
+ // Transition the array appropriately depending on the value type.
+ __ ldr(r4, FieldMemOperand(value, HeapObject::kMapOffset));
+ __ CompareRoot(r4, Heap::kHeapNumberMapRootIndex);
+ __ b(ne, &non_double_value);
+
+ // Value is a double. Transition FAST_SMI_ONLY_ELEMENTS ->
+ // FAST_DOUBLE_ELEMENTS and complete the store.
+ __ LoadTransitionedArrayMapConditional(FAST_SMI_ONLY_ELEMENTS,
+ FAST_DOUBLE_ELEMENTS,
+ receiver_map,
+ r4,
+ &slow);
+ ASSERT(receiver_map.is(r3)); // Transition code expects map in r3
+ ElementsTransitionGenerator::GenerateSmiOnlyToDouble(masm, &slow);
+ __ ldr(elements, FieldMemOperand(receiver, JSObject::kElementsOffset));
+ __ jmp(&fast_double_without_map_check);
+
+ __ bind(&non_double_value);
+ // Value is not a double, FAST_SMI_ONLY_ELEMENTS -> FAST_ELEMENTS
+ __ LoadTransitionedArrayMapConditional(FAST_SMI_ONLY_ELEMENTS,
+ FAST_ELEMENTS,
+ receiver_map,
+ r4,
+ &slow);
+ ASSERT(receiver_map.is(r3)); // Transition code expects map in r3
+ ElementsTransitionGenerator::GenerateSmiOnlyToObject(masm);
+ __ ldr(elements, FieldMemOperand(receiver, JSObject::kElementsOffset));
+ __ jmp(&finish_object_store);
+
+ __ bind(&transition_double_elements);
+ // Elements are FAST_DOUBLE_ELEMENTS, but value is an Object that's not a
+ // HeapNumber. Make sure that the receiver is a Array with FAST_ELEMENTS and
+ // transition array from FAST_DOUBLE_ELEMENTS to FAST_ELEMENTS
+ __ LoadTransitionedArrayMapConditional(FAST_DOUBLE_ELEMENTS,
+ FAST_ELEMENTS,
+ receiver_map,
+ r4,
+ &slow);
+ ASSERT(receiver_map.is(r3)); // Transition code expects map in r3
+ ElementsTransitionGenerator::GenerateDoubleToObject(masm, &slow);
+ __ ldr(elements, FieldMemOperand(receiver, JSObject::kElementsOffset));
+ __ jmp(&finish_object_store);
}
diff --git a/deps/v8/src/arm/lithium-arm.cc b/deps/v8/src/arm/lithium-arm.cc
index 846680f4f6..1111c67faf 100644
--- a/deps/v8/src/arm/lithium-arm.cc
+++ b/deps/v8/src/arm/lithium-arm.cc
@@ -581,11 +581,6 @@ void LChunkBuilder::Abort(const char* format, ...) {
}
-LRegister* LChunkBuilder::ToOperand(Register reg) {
- return LRegister::Create(Register::ToAllocationIndex(reg));
-}
-
-
LUnallocated* LChunkBuilder::ToUnallocated(Register reg) {
return new LUnallocated(LUnallocated::FIXED_REGISTER,
Register::ToAllocationIndex(reg));
@@ -676,7 +671,7 @@ LOperand* LChunkBuilder::Use(HValue* value, LUnallocated* operand) {
HInstruction* instr = HInstruction::cast(value);
VisitInstruction(instr);
}
- allocator_->RecordUse(value, operand);
+ operand->set_virtual_register(value->id());
return operand;
}
@@ -684,19 +679,13 @@ LOperand* LChunkBuilder::Use(HValue* value, LUnallocated* operand) {
template<int I, int T>
LInstruction* LChunkBuilder::Define(LTemplateInstruction<1, I, T>* instr,
LUnallocated* result) {
- allocator_->RecordDefinition(current_instruction_, result);
+ result->set_virtual_register(current_instruction_->id());
instr->set_result(result);
return instr;
}
template<int I, int T>
-LInstruction* LChunkBuilder::Define(LTemplateInstruction<1, I, T>* instr) {
- return Define(instr, new LUnallocated(LUnallocated::NONE));
-}
-
-
-template<int I, int T>
LInstruction* LChunkBuilder::DefineAsRegister(
LTemplateInstruction<1, I, T>* instr) {
return Define(instr, new LUnallocated(LUnallocated::MUST_HAVE_REGISTER));
@@ -802,21 +791,22 @@ LInstruction* LChunkBuilder::AssignPointerMap(LInstruction* instr) {
LUnallocated* LChunkBuilder::TempRegister() {
LUnallocated* operand = new LUnallocated(LUnallocated::MUST_HAVE_REGISTER);
- allocator_->RecordTemporary(operand);
+ operand->set_virtual_register(allocator_->GetVirtualRegister());
+ if (!allocator_->AllocationOk()) Abort("Not enough virtual registers.");
return operand;
}
LOperand* LChunkBuilder::FixedTemp(Register reg) {
LUnallocated* operand = ToUnallocated(reg);
- allocator_->RecordTemporary(operand);
+ ASSERT(operand->HasFixedPolicy());
return operand;
}
LOperand* LChunkBuilder::FixedTemp(DoubleRegister reg) {
LUnallocated* operand = ToUnallocated(reg);
- allocator_->RecordTemporary(operand);
+ ASSERT(operand->HasFixedPolicy());
return operand;
}
@@ -1631,11 +1621,11 @@ LInstruction* LChunkBuilder::DoChange(HChange* instr) {
return AssignEnvironment(DefineAsRegister(res));
} else {
ASSERT(to.IsInteger32());
- LOperand* value = UseRegister(instr->value());
+ LOperand* value = UseRegisterAtStart(instr->value());
bool needs_check = !instr->value()->type().IsSmi();
LInstruction* res = NULL;
if (!needs_check) {
- res = DefineSameAsFirst(new LSmiUntag(value, needs_check));
+ res = DefineAsRegister(new LSmiUntag(value, needs_check));
} else {
LOperand* temp1 = TempRegister();
LOperand* temp2 = instr->CanTruncateToInt32() ? TempRegister()
@@ -1671,12 +1661,12 @@ LInstruction* LChunkBuilder::DoChange(HChange* instr) {
} else if (from.IsInteger32()) {
if (to.IsTagged()) {
HValue* val = instr->value();
- LOperand* value = UseRegister(val);
+ LOperand* value = UseRegisterAtStart(val);
if (val->HasRange() && val->range()->IsInSmiRange()) {
- return DefineSameAsFirst(new LSmiTag(value));
+ return DefineAsRegister(new LSmiTag(value));
} else {
LNumberTagI* result = new LNumberTagI(value);
- return AssignEnvironment(AssignPointerMap(DefineSameAsFirst(result)));
+ return AssignEnvironment(AssignPointerMap(DefineAsRegister(result)));
}
} else {
ASSERT(to.IsDouble());
diff --git a/deps/v8/src/arm/lithium-arm.h b/deps/v8/src/arm/lithium-arm.h
index d3aff76e18..45043593bd 100644
--- a/deps/v8/src/arm/lithium-arm.h
+++ b/deps/v8/src/arm/lithium-arm.h
@@ -2161,7 +2161,6 @@ class LChunkBuilder BASE_EMBEDDED {
void Abort(const char* format, ...);
// Methods for getting operands for Use / Define / Temp.
- LRegister* ToOperand(Register reg);
LUnallocated* ToUnallocated(Register reg);
LUnallocated* ToUnallocated(DoubleRegister reg);
@@ -2212,8 +2211,6 @@ class LChunkBuilder BASE_EMBEDDED {
LInstruction* Define(LTemplateInstruction<1, I, T>* instr,
LUnallocated* result);
template<int I, int T>
- LInstruction* Define(LTemplateInstruction<1, I, T>* instr);
- template<int I, int T>
LInstruction* DefineAsRegister(LTemplateInstruction<1, I, T>* instr);
template<int I, int T>
LInstruction* DefineAsSpilled(LTemplateInstruction<1, I, T>* instr,
diff --git a/deps/v8/src/arm/lithium-codegen-arm.cc b/deps/v8/src/arm/lithium-codegen-arm.cc
index 76c8443e7c..6f898fca5c 100644
--- a/deps/v8/src/arm/lithium-codegen-arm.cc
+++ b/deps/v8/src/arm/lithium-codegen-arm.cc
@@ -3376,9 +3376,9 @@ void LCodeGen::DoCallNew(LCallNew* instr) {
ASSERT(ToRegister(instr->InputAt(0)).is(r1));
ASSERT(ToRegister(instr->result()).is(r0));
- Handle<Code> builtin = isolate()->builtins()->JSConstructCall();
+ CallConstructStub stub(NO_CALL_FUNCTION_FLAGS);
__ mov(r0, Operand(instr->arity()));
- CallCode(builtin, RelocInfo::CONSTRUCT_CALL, instr);
+ CallCode(stub.GetCode(), RelocInfo::CONSTRUCT_CALL, instr);
}
@@ -3796,12 +3796,11 @@ void LCodeGen::DoNumberTagI(LNumberTagI* instr) {
LNumberTagI* instr_;
};
- LOperand* input = instr->InputAt(0);
- ASSERT(input->IsRegister() && input->Equals(instr->result()));
- Register reg = ToRegister(input);
+ Register src = ToRegister(instr->InputAt(0));
+ Register dst = ToRegister(instr->result());
DeferredNumberTagI* deferred = new DeferredNumberTagI(this, instr);
- __ SmiTag(reg, SetCC);
+ __ SmiTag(dst, src, SetCC);
__ b(vs, deferred->entry());
__ bind(deferred->exit());
}
@@ -3809,7 +3808,8 @@ void LCodeGen::DoNumberTagI(LNumberTagI* instr) {
void LCodeGen::DoDeferredNumberTagI(LNumberTagI* instr) {
Label slow;
- Register reg = ToRegister(instr->InputAt(0));
+ Register src = ToRegister(instr->InputAt(0));
+ Register dst = ToRegister(instr->result());
DoubleRegister dbl_scratch = double_scratch0();
SwVfpRegister flt_scratch = dbl_scratch.low();
@@ -3820,14 +3820,16 @@ void LCodeGen::DoDeferredNumberTagI(LNumberTagI* instr) {
// disagree. Try to allocate a heap number in new space and store
// the value in there. If that fails, call the runtime system.
Label done;
- __ SmiUntag(reg);
- __ eor(reg, reg, Operand(0x80000000));
- __ vmov(flt_scratch, reg);
+ if (dst.is(src)) {
+ __ SmiUntag(src, dst);
+ __ eor(src, src, Operand(0x80000000));
+ }
+ __ vmov(flt_scratch, src);
__ vcvt_f64_s32(dbl_scratch, flt_scratch);
if (FLAG_inline_new) {
__ LoadRoot(r6, Heap::kHeapNumberMapRootIndex);
__ AllocateHeapNumber(r5, r3, r4, r6, &slow);
- if (!reg.is(r5)) __ mov(reg, r5);
+ __ Move(dst, r5);
__ b(&done);
}
@@ -3838,16 +3840,16 @@ void LCodeGen::DoDeferredNumberTagI(LNumberTagI* instr) {
// register is stored, as this register is in the pointer map, but contains an
// integer value.
__ mov(ip, Operand(0));
- __ StoreToSafepointRegisterSlot(ip, reg);
+ __ StoreToSafepointRegisterSlot(ip, dst);
CallRuntimeFromDeferred(Runtime::kAllocateHeapNumber, 0, instr);
- if (!reg.is(r0)) __ mov(reg, r0);
+ __ Move(dst, r0);
// Done. Put the value in dbl_scratch into the value of the allocated heap
// number.
__ bind(&done);
- __ sub(ip, reg, Operand(kHeapObjectTag));
+ __ sub(ip, dst, Operand(kHeapObjectTag));
__ vstr(dbl_scratch, ip, HeapNumber::kValueOffset);
- __ StoreToSafepointRegisterSlot(reg, reg);
+ __ StoreToSafepointRegisterSlot(dst, dst);
}
@@ -3895,23 +3897,21 @@ void LCodeGen::DoDeferredNumberTagD(LNumberTagD* instr) {
void LCodeGen::DoSmiTag(LSmiTag* instr) {
- LOperand* input = instr->InputAt(0);
- ASSERT(input->IsRegister() && input->Equals(instr->result()));
ASSERT(!instr->hydrogen_value()->CheckFlag(HValue::kCanOverflow));
- __ SmiTag(ToRegister(input));
+ __ SmiTag(ToRegister(instr->result()), ToRegister(instr->InputAt(0)));
}
void LCodeGen::DoSmiUntag(LSmiUntag* instr) {
- LOperand* input = instr->InputAt(0);
- ASSERT(input->IsRegister() && input->Equals(instr->result()));
+ Register input = ToRegister(instr->InputAt(0));
+ Register result = ToRegister(instr->result());
if (instr->needs_check()) {
STATIC_ASSERT(kHeapObjectTag == 1);
// If the input is a HeapObject, SmiUntag will set the carry flag.
- __ SmiUntag(ToRegister(input), SetCC);
+ __ SmiUntag(result, input, SetCC);
DeoptimizeIf(cs, instr->environment());
} else {
- __ SmiUntag(ToRegister(input));
+ __ SmiUntag(result, input);
}
}
@@ -3928,7 +3928,7 @@ void LCodeGen::EmitNumberUntagD(Register input_reg,
Label load_smi, heap_number, done;
// Smi check.
- __ JumpIfSmi(input_reg, &load_smi);
+ __ UntagAndJumpIfSmi(scratch, input_reg, &load_smi);
// Heap number map check.
__ ldr(scratch, FieldMemOperand(input_reg, HeapObject::kMapOffset));
@@ -3967,10 +3967,9 @@ void LCodeGen::EmitNumberUntagD(Register input_reg,
// Smi to double register conversion
__ bind(&load_smi);
- __ SmiUntag(input_reg); // Untag smi before converting to float.
- __ vmov(flt_scratch, input_reg);
+ // scratch: untagged value of input_reg
+ __ vmov(flt_scratch, scratch);
__ vcvt_f64_s32(result_reg, flt_scratch);
- __ SmiTag(input_reg); // Retag smi.
__ bind(&done);
}
@@ -4256,7 +4255,7 @@ void LCodeGen::DoClampTToUint8(LClampTToUint8* instr) {
Label is_smi, done, heap_number;
// Both smi and heap number cases are handled.
- __ JumpIfSmi(input_reg, &is_smi);
+ __ UntagAndJumpIfSmi(result_reg, input_reg, &is_smi);
// Check for heap number
__ ldr(scratch, FieldMemOperand(input_reg, HeapObject::kMapOffset));
@@ -4279,7 +4278,6 @@ void LCodeGen::DoClampTToUint8(LClampTToUint8* instr) {
// smi
__ bind(&is_smi);
- __ SmiUntag(result_reg, input_reg);
__ ClampUint8(result_reg, result_reg);
__ bind(&done);
diff --git a/deps/v8/src/arm/macro-assembler-arm.cc b/deps/v8/src/arm/macro-assembler-arm.cc
index 9894ff202e..c92b943817 100644
--- a/deps/v8/src/arm/macro-assembler-arm.cc
+++ b/deps/v8/src/arm/macro-assembler-arm.cc
@@ -2879,6 +2879,47 @@ void MacroAssembler::LoadContext(Register dst, int context_chain_length) {
}
+void MacroAssembler::LoadTransitionedArrayMapConditional(
+ ElementsKind expected_kind,
+ ElementsKind transitioned_kind,
+ Register map_in_out,
+ Register scratch,
+ Label* no_map_match) {
+ // Load the global or builtins object from the current context.
+ ldr(scratch, MemOperand(cp, Context::SlotOffset(Context::GLOBAL_INDEX)));
+ ldr(scratch, FieldMemOperand(scratch, GlobalObject::kGlobalContextOffset));
+
+ // Check that the function's map is the same as the expected cached map.
+ int expected_index =
+ Context::GetContextMapIndexFromElementsKind(expected_kind);
+ ldr(ip, MemOperand(scratch, Context::SlotOffset(expected_index)));
+ cmp(map_in_out, ip);
+ b(ne, no_map_match);
+
+ // Use the transitioned cached map.
+ int trans_index =
+ Context::GetContextMapIndexFromElementsKind(transitioned_kind);
+ ldr(map_in_out, MemOperand(scratch, Context::SlotOffset(trans_index)));
+}
+
+
+void MacroAssembler::LoadInitialArrayMap(
+ Register function_in, Register scratch, Register map_out) {
+ ASSERT(!function_in.is(map_out));
+ Label done;
+ ldr(map_out, FieldMemOperand(function_in,
+ JSFunction::kPrototypeOrInitialMapOffset));
+ if (!FLAG_smi_only_arrays) {
+ LoadTransitionedArrayMapConditional(FAST_SMI_ONLY_ELEMENTS,
+ FAST_ELEMENTS,
+ map_out,
+ scratch,
+ &done);
+ }
+ bind(&done);
+}
+
+
void MacroAssembler::LoadGlobalFunction(int index, Register function) {
// Load the global or builtins object from the current context.
ldr(function, MemOperand(cp, Context::SlotOffset(Context::GLOBAL_INDEX)));
@@ -2939,6 +2980,22 @@ void MacroAssembler::JumpIfNotBothSmi(Register reg1,
}
+void MacroAssembler::UntagAndJumpIfSmi(
+ Register dst, Register src, Label* smi_case) {
+ STATIC_ASSERT(kSmiTag == 0);
+ mov(dst, Operand(src, ASR, kSmiTagSize), SetCC);
+ b(cc, smi_case); // Shifter carry is not set for a smi.
+}
+
+
+void MacroAssembler::UntagAndJumpIfNotSmi(
+ Register dst, Register src, Label* non_smi_case) {
+ STATIC_ASSERT(kSmiTag == 0);
+ mov(dst, Operand(src, ASR, kSmiTagSize), SetCC);
+ b(cs, non_smi_case); // Shifter carry is set for a non-smi.
+}
+
+
void MacroAssembler::JumpIfEitherSmi(Register reg1,
Register reg2,
Label* on_either_smi) {
diff --git a/deps/v8/src/arm/macro-assembler-arm.h b/deps/v8/src/arm/macro-assembler-arm.h
index 60c2e6f6bf..368ca5c3fa 100644
--- a/deps/v8/src/arm/macro-assembler-arm.h
+++ b/deps/v8/src/arm/macro-assembler-arm.h
@@ -491,6 +491,22 @@ class MacroAssembler: public Assembler {
void LoadContext(Register dst, int context_chain_length);
+ // Conditionally load the cached Array transitioned map of type
+ // transitioned_kind from the global context if the map in register
+ // map_in_out is the cached Array map in the global context of
+ // expected_kind.
+ void LoadTransitionedArrayMapConditional(
+ ElementsKind expected_kind,
+ ElementsKind transitioned_kind,
+ Register map_in_out,
+ Register scratch,
+ Label* no_map_match);
+
+ // Load the initial map for new Arrays from a JSFunction.
+ void LoadInitialArrayMap(Register function_in,
+ Register scratch,
+ Register map_out);
+
void LoadGlobalFunction(int index, Register function);
// Load the initial map from the global function. The registers
@@ -1144,6 +1160,14 @@ class MacroAssembler: public Assembler {
mov(dst, Operand(src, ASR, kSmiTagSize), s);
}
+ // Untag the source value into destination and jump if source is a smi.
+ // Souce and destination can be the same register.
+ void UntagAndJumpIfSmi(Register dst, Register src, Label* smi_case);
+
+ // Untag the source value into destination and jump if source is not a smi.
+ // Souce and destination can be the same register.
+ void UntagAndJumpIfNotSmi(Register dst, Register src, Label* non_smi_case);
+
// Jump the register contains a smi.
inline void JumpIfSmi(Register value, Label* smi_label) {
tst(value, Operand(kSmiTagMask));
diff --git a/deps/v8/src/arm/regexp-macro-assembler-arm.cc b/deps/v8/src/arm/regexp-macro-assembler-arm.cc
index b212f9f6e6..880c372538 100644
--- a/deps/v8/src/arm/regexp-macro-assembler-arm.cc
+++ b/deps/v8/src/arm/regexp-macro-assembler-arm.cc
@@ -571,7 +571,7 @@ bool RegExpMacroAssemblerARM::CheckSpecialCharacterClass(uc16 type,
ExternalReference map = ExternalReference::re_word_character_map();
__ mov(r0, Operand(map));
__ ldrb(r0, MemOperand(r0, current_character()));
- __ tst(r0, Operand(r0));
+ __ cmp(r0, Operand(0));
BranchOrBacktrack(eq, on_no_match);
return true;
}
@@ -585,7 +585,7 @@ bool RegExpMacroAssemblerARM::CheckSpecialCharacterClass(uc16 type,
ExternalReference map = ExternalReference::re_word_character_map();
__ mov(r0, Operand(map));
__ ldrb(r0, MemOperand(r0, current_character()));
- __ tst(r0, Operand(r0));
+ __ cmp(r0, Operand(0));
BranchOrBacktrack(ne, on_no_match);
if (mode_ != ASCII) {
__ bind(&done);
@@ -681,7 +681,7 @@ Handle<HeapObject> RegExpMacroAssemblerARM::GetCode(Handle<String> source) {
// Determine whether the start index is zero, that is at the start of the
// string, and store that value in a local variable.
- __ tst(r1, Operand(r1));
+ __ cmp(r1, Operand(0));
__ mov(r1, Operand(1), LeaveCC, eq);
__ mov(r1, Operand(0, RelocInfo::NONE), LeaveCC, ne);
__ str(r1, MemOperand(frame_pointer(), kAtStart));
diff --git a/deps/v8/src/arm/stub-cache-arm.cc b/deps/v8/src/arm/stub-cache-arm.cc
index 33fbee52d6..15c5f4edef 100644
--- a/deps/v8/src/arm/stub-cache-arm.cc
+++ b/deps/v8/src/arm/stub-cache-arm.cc
@@ -45,6 +45,7 @@ static void ProbeTable(Isolate* isolate,
StubCache::Table table,
Register name,
Register offset,
+ int offset_shift_bits,
Register scratch,
Register scratch2) {
ExternalReference key_offset(isolate->stub_cache()->key_reference(table));
@@ -63,23 +64,34 @@ static void ProbeTable(Isolate* isolate,
// Check that the key in the entry matches the name.
__ mov(offsets_base_addr, Operand(key_offset));
- __ ldr(ip, MemOperand(offsets_base_addr, offset, LSL, 1));
+ __ ldr(ip, MemOperand(offsets_base_addr, offset, LSL, 1 + offset_shift_bits));
__ cmp(name, ip);
__ b(ne, &miss);
// Get the code entry from the cache.
__ add(offsets_base_addr, offsets_base_addr,
Operand(value_off_addr - key_off_addr));
- __ ldr(scratch2, MemOperand(offsets_base_addr, offset, LSL, 1));
+ __ ldr(scratch2,
+ MemOperand(offsets_base_addr, offset, LSL, 1 + offset_shift_bits));
// Check that the flags match what we're looking for.
__ ldr(scratch2, FieldMemOperand(scratch2, Code::kFlagsOffset));
- __ bic(scratch2, scratch2, Operand(Code::kFlagsNotUsedInLookup));
- __ cmp(scratch2, Operand(flags));
+ // It's a nice optimization if this constant is encodable in the bic insn.
+
+ uint32_t mask = Code::kFlagsNotUsedInLookup;
+ ASSERT(__ ImmediateFitsAddrMode1Instruction(mask));
+ __ bic(scratch2, scratch2, Operand(mask));
+ // Using cmn and the negative instead of cmp means we can use movw.
+ if (flags < 0) {
+ __ cmn(scratch2, Operand(-flags));
+ } else {
+ __ cmp(scratch2, Operand(flags));
+ }
__ b(ne, &miss);
// Re-load code entry from cache.
- __ ldr(offset, MemOperand(offsets_base_addr, offset, LSL, 1));
+ __ ldr(offset,
+ MemOperand(offsets_base_addr, offset, LSL, 1 + offset_shift_bits));
// Jump to the first instruction in the code stub.
__ add(offset, offset, Operand(Code::kHeaderSize - kHeapObjectTag));
@@ -189,23 +201,41 @@ void StubCache::GenerateProbe(MacroAssembler* masm,
__ ldr(scratch, FieldMemOperand(name, String::kHashFieldOffset));
__ ldr(ip, FieldMemOperand(receiver, HeapObject::kMapOffset));
__ add(scratch, scratch, Operand(ip));
- __ eor(scratch, scratch, Operand(flags));
- __ and_(scratch,
- scratch,
- Operand((kPrimaryTableSize - 1) << kHeapObjectTagSize));
+ uint32_t mask = (kPrimaryTableSize - 1) << kHeapObjectTagSize;
+ // Mask down the eor argument to the minimum to keep the immediate
+ // ARM-encodable.
+ __ eor(scratch, scratch, Operand(flags & mask));
+ // Prefer and_ to ubfx here because ubfx takes 2 cycles.
+ __ and_(scratch, scratch, Operand(mask));
+ __ mov(scratch, Operand(scratch, LSR, 1));
// Probe the primary table.
- ProbeTable(isolate, masm, flags, kPrimary, name, scratch, extra, extra2);
+ ProbeTable(isolate,
+ masm,
+ flags,
+ kPrimary,
+ name,
+ scratch,
+ 1,
+ extra,
+ extra2);
// Primary miss: Compute hash for secondary probe.
- __ sub(scratch, scratch, Operand(name));
- __ add(scratch, scratch, Operand(flags));
- __ and_(scratch,
- scratch,
- Operand((kSecondaryTableSize - 1) << kHeapObjectTagSize));
+ __ sub(scratch, scratch, Operand(name, LSR, 1));
+ uint32_t mask2 = (kSecondaryTableSize - 1) << (kHeapObjectTagSize - 1);
+ __ add(scratch, scratch, Operand((flags >> 1) & mask2));
+ __ and_(scratch, scratch, Operand(mask2));
// Probe the secondary table.
- ProbeTable(isolate, masm, flags, kSecondary, name, scratch, extra, extra2);
+ ProbeTable(isolate,
+ masm,
+ flags,
+ kSecondary,
+ name,
+ scratch,
+ 1,
+ extra,
+ extra2);
// Cache miss: Fall-through and let caller handle the miss by
// entering the runtime system.