summaryrefslogtreecommitdiff
path: root/deps/v8/src/mips
diff options
context:
space:
mode:
Diffstat (limited to 'deps/v8/src/mips')
-rw-r--r--deps/v8/src/mips/builtins-mips.cc56
-rw-r--r--deps/v8/src/mips/code-stubs-mips.cc174
-rw-r--r--deps/v8/src/mips/codegen-mips.cc18
-rw-r--r--deps/v8/src/mips/cpu-mips.cc12
-rw-r--r--deps/v8/src/mips/debug-mips.cc40
-rw-r--r--deps/v8/src/mips/deoptimizer-mips.cc123
-rw-r--r--deps/v8/src/mips/frames-mips.h5
-rw-r--r--deps/v8/src/mips/full-codegen-mips.cc19
-rw-r--r--deps/v8/src/mips/lithium-codegen-mips.cc69
-rw-r--r--deps/v8/src/mips/lithium-mips.cc46
-rw-r--r--deps/v8/src/mips/lithium-mips.h3
-rw-r--r--deps/v8/src/mips/macro-assembler-mips.cc83
-rw-r--r--deps/v8/src/mips/macro-assembler-mips.h49
13 files changed, 469 insertions, 228 deletions
diff --git a/deps/v8/src/mips/builtins-mips.cc b/deps/v8/src/mips/builtins-mips.cc
index 8461342d00..d122e9a9f9 100644
--- a/deps/v8/src/mips/builtins-mips.cc
+++ b/deps/v8/src/mips/builtins-mips.cc
@@ -116,9 +116,7 @@ static void AllocateEmptyJSArray(MacroAssembler* masm,
Label* gc_required) {
const int initial_capacity = JSArray::kPreallocatedArrayElements;
STATIC_ASSERT(initial_capacity >= 0);
- // Load the initial map from the array function.
- __ lw(scratch1, FieldMemOperand(array_function,
- JSFunction::kPrototypeOrInitialMapOffset));
+ __ LoadGlobalInitialConstructedArrayMap(array_function, scratch2, scratch1);
// Allocate the JSArray object together with space for a fixed array with the
// requested elements.
@@ -214,9 +212,8 @@ static void AllocateJSArray(MacroAssembler* masm,
bool fill_with_hole,
Label* gc_required) {
// Load the initial map from the array function.
- __ lw(elements_array_storage,
- FieldMemOperand(array_function,
- JSFunction::kPrototypeOrInitialMapOffset));
+ __ LoadGlobalInitialConstructedArrayMap(array_function, scratch2,
+ elements_array_storage);
if (FLAG_debug_code) { // Assert that array size is not zero.
__ Assert(
@@ -681,7 +678,9 @@ void Builtins::Generate_StringConstructCode(MacroAssembler* masm) {
}
-void Builtins::Generate_JSConstructCall(MacroAssembler* masm) {
+static void Generate_JSConstructStubHelper(MacroAssembler* masm,
+ bool is_api_function,
+ bool count_constructions) {
// ----------- S t a t e -------------
// -- a0 : number of arguments
// -- a1 : constructor function
@@ -689,45 +688,6 @@ void Builtins::Generate_JSConstructCall(MacroAssembler* masm) {
// -- sp[...]: constructor arguments
// -----------------------------------
- Label slow, non_function_call;
- // Check that the function is not a smi.
- __ JumpIfSmi(a1, &non_function_call);
- // Check that the function is a JSFunction.
- __ GetObjectType(a1, a2, a2);
- __ Branch(&slow, ne, a2, Operand(JS_FUNCTION_TYPE));
-
- // Jump to the function-specific construct stub.
- __ lw(a2, FieldMemOperand(a1, JSFunction::kSharedFunctionInfoOffset));
- __ lw(a2, FieldMemOperand(a2, SharedFunctionInfo::kConstructStubOffset));
- __ Addu(t9, a2, Operand(Code::kHeaderSize - kHeapObjectTag));
- __ Jump(t9);
-
- // a0: number of arguments
- // a1: called object
- // a2: object type
- Label do_call;
- __ bind(&slow);
- __ Branch(&non_function_call, ne, a2, Operand(JS_FUNCTION_PROXY_TYPE));
- __ GetBuiltinEntry(a3, Builtins::CALL_FUNCTION_PROXY_AS_CONSTRUCTOR);
- __ jmp(&do_call);
-
- __ bind(&non_function_call);
- __ GetBuiltinEntry(a3, Builtins::CALL_NON_FUNCTION_AS_CONSTRUCTOR);
- __ bind(&do_call);
- // CALL_NON_FUNCTION expects the non-function constructor as receiver
- // (instead of the original receiver from the call site). The receiver is
- // stack element argc.
- // Set expected number of arguments to zero (not changing a0).
- __ mov(a2, zero_reg);
- __ SetCallKind(t1, CALL_AS_METHOD);
- __ Jump(masm->isolate()->builtins()->ArgumentsAdaptorTrampoline(),
- RelocInfo::CODE_TARGET);
-}
-
-
-static void Generate_JSConstructStubHelper(MacroAssembler* masm,
- bool is_api_function,
- bool count_constructions) {
// Should never count constructions for api objects.
ASSERT(!is_api_function || !count_constructions);
@@ -1150,7 +1110,8 @@ static void Generate_JSEntryTrampolineHelper(MacroAssembler* masm,
// Invoke the code and pass argc as a0.
__ mov(a0, a3);
if (is_construct) {
- __ Call(masm->isolate()->builtins()->JSConstructCall());
+ CallConstructStub stub(NO_CALL_FUNCTION_FLAGS);
+ __ CallStub(&stub);
} else {
ParameterCount actual(a0);
__ InvokeFunction(a1, actual, CALL_FUNCTION,
@@ -1800,6 +1761,7 @@ void Builtins::Generate_ArgumentsAdaptorTrampoline(MacroAssembler* masm) {
__ Call(a3);
+ masm->isolate()->heap()->SetArgumentsAdaptorDeoptPCOffset(masm->pc_offset());
// Exit frame and return.
LeaveArgumentsAdaptorFrame(masm);
__ Ret();
diff --git a/deps/v8/src/mips/code-stubs-mips.cc b/deps/v8/src/mips/code-stubs-mips.cc
index 289e6b8107..1a0e7c353c 100644
--- a/deps/v8/src/mips/code-stubs-mips.cc
+++ b/deps/v8/src/mips/code-stubs-mips.cc
@@ -1,4 +1,4 @@
-// Copyright 2011 the V8 project authors. All rights reserved.
+// Copyright 2012 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
@@ -158,20 +158,18 @@ void FastNewContextStub::Generate(MacroAssembler* masm) {
__ lw(a3, MemOperand(sp, 0));
// Set up the object header.
- __ LoadRoot(a2, Heap::kFunctionContextMapRootIndex);
- __ sw(a2, FieldMemOperand(v0, HeapObject::kMapOffset));
+ __ LoadRoot(a1, Heap::kFunctionContextMapRootIndex);
__ li(a2, Operand(Smi::FromInt(length)));
__ sw(a2, FieldMemOperand(v0, FixedArray::kLengthOffset));
+ __ sw(a1, FieldMemOperand(v0, HeapObject::kMapOffset));
- // Set up the fixed slots.
+ // Set up the fixed slots, copy the global object from the previous context.
+ __ lw(a2, MemOperand(cp, Context::SlotOffset(Context::GLOBAL_INDEX)));
__ li(a1, Operand(Smi::FromInt(0)));
__ sw(a3, MemOperand(v0, Context::SlotOffset(Context::CLOSURE_INDEX)));
__ sw(cp, MemOperand(v0, Context::SlotOffset(Context::PREVIOUS_INDEX)));
__ sw(a1, MemOperand(v0, Context::SlotOffset(Context::EXTENSION_INDEX)));
-
- // Copy the global object from the previous context.
- __ lw(a1, MemOperand(cp, Context::SlotOffset(Context::GLOBAL_INDEX)));
- __ sw(a1, MemOperand(v0, Context::SlotOffset(Context::GLOBAL_INDEX)));
+ __ sw(a2, MemOperand(v0, Context::SlotOffset(Context::GLOBAL_INDEX)));
// Initialize the rest of the slots to undefined.
__ LoadRoot(a1, Heap::kUndefinedValueRootIndex);
@@ -229,14 +227,12 @@ void FastNewBlockContextStub::Generate(MacroAssembler* masm) {
__ lw(a3, ContextOperand(a3, Context::CLOSURE_INDEX));
__ bind(&after_sentinel);
- // Set up the fixed slots.
+ // Set up the fixed slots, copy the global object from the previous context.
+ __ lw(a2, ContextOperand(cp, Context::GLOBAL_INDEX));
__ sw(a3, ContextOperand(v0, Context::CLOSURE_INDEX));
__ sw(cp, ContextOperand(v0, Context::PREVIOUS_INDEX));
__ sw(a1, ContextOperand(v0, Context::EXTENSION_INDEX));
-
- // Copy the global object from the previous context.
- __ lw(a1, ContextOperand(cp, Context::GLOBAL_INDEX));
- __ sw(a1, ContextOperand(v0, Context::GLOBAL_INDEX));
+ __ sw(a2, ContextOperand(v0, Context::GLOBAL_INDEX));
// Initialize the rest of the slots to the hole value.
__ LoadRoot(a1, Heap::kTheHoleValueRootIndex);
@@ -592,7 +588,9 @@ void FloatingPointHelper::LoadNumber(MacroAssembler* masm,
Label is_smi, done;
- __ JumpIfSmi(object, &is_smi);
+ // Smi-check
+ __ UntagAndJumpIfSmi(scratch1, object, &is_smi);
+ // Heap number check
__ JumpIfNotHeapNumber(object, heap_number_map, scratch1, not_number);
// Handle loading a double from a heap number.
@@ -619,7 +617,6 @@ void FloatingPointHelper::LoadNumber(MacroAssembler* masm,
if (CpuFeatures::IsSupported(FPU)) {
CpuFeatures::Scope scope(FPU);
// Convert smi to double using FPU instructions.
- __ SmiUntag(scratch1, object);
__ mtc1(scratch1, dst);
__ cvt_d_w(dst, dst);
if (destination == kCoreRegisters) {
@@ -654,11 +651,10 @@ void FloatingPointHelper::ConvertNumberToInt32(MacroAssembler* masm,
Heap::kHeapNumberMapRootIndex,
"HeapNumberMap register clobbered.");
}
- Label is_smi;
Label done;
Label not_in_int32_range;
- __ JumpIfSmi(object, &is_smi);
+ __ UntagAndJumpIfSmi(dst, object, &done);
__ lw(scratch1, FieldMemOperand(object, HeapNumber::kMapOffset));
__ Branch(not_number, ne, scratch1, Operand(heap_number_map));
__ ConvertToInt32(object,
@@ -678,10 +674,6 @@ void FloatingPointHelper::ConvertNumberToInt32(MacroAssembler* masm,
scratch2,
scratch3);
- __ jmp(&done);
-
- __ bind(&is_smi);
- __ SmiUntag(dst, object);
__ bind(&done);
}
@@ -863,10 +855,7 @@ void FloatingPointHelper::LoadNumberAsInt32(MacroAssembler* masm,
Label done;
- // Untag the object into the destination register.
- __ SmiUntag(dst, object);
- // Just return if the object is a smi.
- __ JumpIfSmi(object, &done);
+ __ UntagAndJumpIfSmi(dst, object, &done);
if (FLAG_debug_code) {
__ AbortIfNotRootValue(heap_number_map,
@@ -3605,7 +3594,7 @@ void MathPowStub::Generate(MacroAssembler* masm) {
const Register scratch = t5;
const Register scratch2 = t3;
- Label call_runtime, done, exponent_not_smi, int_exponent;
+ Label call_runtime, done, int_exponent;
if (exponent_type_ == ON_STACK) {
Label base_is_smi, unpack_exponent;
// The exponent and base are supplied as arguments on the stack.
@@ -3616,7 +3605,7 @@ void MathPowStub::Generate(MacroAssembler* masm) {
__ LoadRoot(heapnumbermap, Heap::kHeapNumberMapRootIndex);
- __ JumpIfSmi(base, &base_is_smi);
+ __ UntagAndJumpIfSmi(scratch, base, &base_is_smi);
__ lw(scratch, FieldMemOperand(base, JSObject::kMapOffset));
__ Branch(&call_runtime, ne, scratch, Operand(heapnumbermap));
@@ -3624,27 +3613,20 @@ void MathPowStub::Generate(MacroAssembler* masm) {
__ jmp(&unpack_exponent);
__ bind(&base_is_smi);
- __ SmiUntag(base);
- __ mtc1(base, single_scratch);
+ __ mtc1(scratch, single_scratch);
__ cvt_d_w(double_base, single_scratch);
__ bind(&unpack_exponent);
- __ JumpIfNotSmi(exponent, &exponent_not_smi);
- __ SmiUntag(exponent);
- __ jmp(&int_exponent);
+ __ UntagAndJumpIfSmi(scratch, exponent, &int_exponent);
- __ bind(&exponent_not_smi);
__ lw(scratch, FieldMemOperand(exponent, JSObject::kMapOffset));
__ Branch(&call_runtime, ne, scratch, Operand(heapnumbermap));
__ ldc1(double_exponent,
FieldMemOperand(exponent, HeapNumber::kValueOffset));
} else if (exponent_type_ == TAGGED) {
// Base is already in double_base.
- __ JumpIfNotSmi(exponent, &exponent_not_smi);
- __ SmiUntag(exponent);
- __ jmp(&int_exponent);
+ __ UntagAndJumpIfSmi(scratch, exponent, &int_exponent);
- __ bind(&exponent_not_smi);
__ ldc1(double_exponent,
FieldMemOperand(exponent, HeapNumber::kValueOffset));
}
@@ -3724,13 +3706,20 @@ void MathPowStub::Generate(MacroAssembler* masm) {
__ jmp(&done);
__ bind(&int_exponent_convert);
- __ mfc1(exponent, single_scratch);
+ __ mfc1(scratch, single_scratch);
}
// Calculate power with integer exponent.
__ bind(&int_exponent);
- __ mov(scratch, exponent); // Back up exponent.
+ // Get two copies of exponent in the registers scratch and exponent.
+ if (exponent_type_ == INTEGER) {
+ __ mov(scratch, exponent);
+ } else {
+ // Exponent has previously been stored into scratch as untagged integer.
+ __ mov(exponent, scratch);
+ }
+
__ mov_d(double_scratch, double_base); // Back up base.
__ Move(double_result, 1.0);
@@ -5298,11 +5287,11 @@ void RegExpConstructResultStub::Generate(MacroAssembler* masm) {
// Set input, index and length fields from arguments.
__ lw(a1, MemOperand(sp, kPointerSize * 0));
+ __ lw(a2, MemOperand(sp, kPointerSize * 1));
+ __ lw(t2, MemOperand(sp, kPointerSize * 2));
__ sw(a1, FieldMemOperand(v0, JSRegExpResult::kInputOffset));
- __ lw(a1, MemOperand(sp, kPointerSize * 1));
- __ sw(a1, FieldMemOperand(v0, JSRegExpResult::kIndexOffset));
- __ lw(a1, MemOperand(sp, kPointerSize * 2));
- __ sw(a1, FieldMemOperand(v0, JSArray::kLengthOffset));
+ __ sw(a2, FieldMemOperand(v0, JSRegExpResult::kIndexOffset));
+ __ sw(t2, FieldMemOperand(v0, JSArray::kLengthOffset));
// Fill out the elements FixedArray.
// v0: JSArray, tagged.
@@ -5341,24 +5330,49 @@ void RegExpConstructResultStub::Generate(MacroAssembler* masm) {
}
-void CallFunctionStub::FinishCode(Handle<Code> code) {
- code->set_has_function_cache(false);
-}
-
+static void GenerateRecordCallTarget(MacroAssembler* masm) {
+ // Cache the called function in a global property cell. Cache states
+ // are uninitialized, monomorphic (indicated by a JSFunction), and
+ // megamorphic.
+ // a1 : the function to call
+ // a2 : cache cell for call target
+ Label done;
-void CallFunctionStub::Clear(Heap* heap, Address address) {
- UNREACHABLE();
-}
+ ASSERT_EQ(*TypeFeedbackCells::MegamorphicSentinel(masm->isolate()),
+ masm->isolate()->heap()->undefined_value());
+ ASSERT_EQ(*TypeFeedbackCells::UninitializedSentinel(masm->isolate()),
+ masm->isolate()->heap()->the_hole_value());
+
+ // Load the cache state into a3.
+ __ lw(a3, FieldMemOperand(a2, JSGlobalPropertyCell::kValueOffset));
+
+ // A monomorphic cache hit or an already megamorphic state: invoke the
+ // function without changing the state.
+ __ Branch(&done, eq, a3, Operand(a1));
+ __ LoadRoot(at, Heap::kUndefinedValueRootIndex);
+ __ Branch(&done, eq, a3, Operand(at));
+
+ // A monomorphic miss (i.e, here the cache is not uninitialized) goes
+ // megamorphic.
+ __ LoadRoot(at, Heap::kTheHoleValueRootIndex);
+ __ Branch(&done, eq, a3, Operand(at));
+ // MegamorphicSentinel is an immortal immovable object (undefined) so no
+ // write-barrier is needed.
+ __ LoadRoot(at, Heap::kUndefinedValueRootIndex);
+ __ sw(at, FieldMemOperand(a2, JSGlobalPropertyCell::kValueOffset));
+ __ Branch(&done);
+ // An uninitialized cache is patched with the function.
+ __ sw(a1, FieldMemOperand(a2, JSGlobalPropertyCell::kValueOffset));
+ // No need for a write barrier here - cells are rescanned.
-Object* CallFunctionStub::GetCachedValue(Address address) {
- UNREACHABLE();
- return NULL;
+ __ bind(&done);
}
void CallFunctionStub::Generate(MacroAssembler* masm) {
// a1 : the function to call
+ // a2 : cache cell for call target
Label slow, non_function;
// The receiver might implicitly be the global object. This is
@@ -5435,6 +5449,48 @@ void CallFunctionStub::Generate(MacroAssembler* masm) {
}
+void CallConstructStub::Generate(MacroAssembler* masm) {
+ // a0 : number of arguments
+ // a1 : the function to call
+ // a2 : cache cell for call target
+ Label slow, non_function_call;
+
+ // Check that the function is not a smi.
+ __ JumpIfSmi(a1, &non_function_call);
+ // Check that the function is a JSFunction.
+ __ GetObjectType(a1, a3, a3);
+ __ Branch(&slow, ne, a3, Operand(JS_FUNCTION_TYPE));
+
+ if (RecordCallTarget()) {
+ GenerateRecordCallTarget(masm);
+ }
+
+ // Jump to the function-specific construct stub.
+ __ lw(a2, FieldMemOperand(a1, JSFunction::kSharedFunctionInfoOffset));
+ __ lw(a2, FieldMemOperand(a2, SharedFunctionInfo::kConstructStubOffset));
+ __ Addu(at, a2, Operand(Code::kHeaderSize - kHeapObjectTag));
+ __ Jump(at);
+
+ // a0: number of arguments
+ // a1: called object
+ // a3: object type
+ Label do_call;
+ __ bind(&slow);
+ __ Branch(&non_function_call, ne, a3, Operand(JS_FUNCTION_PROXY_TYPE));
+ __ GetBuiltinEntry(a3, Builtins::CALL_FUNCTION_PROXY_AS_CONSTRUCTOR);
+ __ jmp(&do_call);
+
+ __ bind(&non_function_call);
+ __ GetBuiltinEntry(a3, Builtins::CALL_NON_FUNCTION_AS_CONSTRUCTOR);
+ __ bind(&do_call);
+ // Set expected number of arguments to zero (not changing r0).
+ __ li(a2, Operand(0, RelocInfo::NONE));
+ __ SetCallKind(t1, CALL_AS_METHOD);
+ __ Jump(masm->isolate()->builtins()->ArgumentsAdaptorTrampoline(),
+ RelocInfo::CODE_TARGET);
+}
+
+
// Unfortunately you have to run without snapshots to see most of these
// names in the profile since most compare stubs end up in the snapshot.
void CompareStub::PrintName(StringStream* stream) {
@@ -6002,10 +6058,8 @@ void SubStringStub::Generate(MacroAssembler* masm) {
// Utilize delay slots. SmiUntag doesn't emit a jump, everything else is
// safe in this case.
- __ JumpIfSmi(a2, &runtime, at, USE_DELAY_SLOT);
- __ SmiUntag(a2);
- __ JumpIfSmi(a3, &runtime, at, USE_DELAY_SLOT);
- __ SmiUntag(a3);
+ __ UntagAndJumpIfSmi(a2, a2, &runtime);
+ __ UntagAndJumpIfSmi(a3, a3, &runtime);
// Both a2 and a3 are untagged integers.
@@ -6089,10 +6143,10 @@ void SubStringStub::Generate(MacroAssembler* masm) {
__ bind(&sliced_string);
// Sliced string. Fetch parent and correct start index by offset.
- __ lw(t1, FieldMemOperand(v0, SlicedString::kOffsetOffset));
- __ sra(t1, t1, 1);
- __ Addu(a3, a3, t1);
+ __ lw(t0, FieldMemOperand(v0, SlicedString::kOffsetOffset));
__ lw(t1, FieldMemOperand(v0, SlicedString::kParentOffset));
+ __ sra(t0, t0, 1); // Add offset to index.
+ __ Addu(a3, a3, t0);
// Update instance type.
__ lw(a1, FieldMemOperand(t1, HeapObject::kMapOffset));
__ lbu(a1, FieldMemOperand(a1, Map::kInstanceTypeOffset));
diff --git a/deps/v8/src/mips/codegen-mips.cc b/deps/v8/src/mips/codegen-mips.cc
index 0b68384973..c48432c702 100644
--- a/deps/v8/src/mips/codegen-mips.cc
+++ b/deps/v8/src/mips/codegen-mips.cc
@@ -1,4 +1,4 @@
-// Copyright 2011 the V8 project authors. All rights reserved.
+// Copyright 2012 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
@@ -105,10 +105,10 @@ void ElementsTransitionGenerator::GenerateSmiOnlyToDouble(
__ Addu(scratch, scratch, FixedDoubleArray::kHeaderSize);
__ AllocateInNewSpace(scratch, t2, t3, t5, &gc_required, NO_ALLOCATION_FLAGS);
// t2: destination FixedDoubleArray, not tagged as heap object
+ // Set destination FixedDoubleArray's length and map.
__ LoadRoot(t5, Heap::kFixedDoubleArrayMapRootIndex);
- __ sw(t5, MemOperand(t2, HeapObject::kMapOffset));
- // Set destination FixedDoubleArray's length.
__ sw(t1, MemOperand(t2, FixedDoubleArray::kLengthOffset));
+ __ sw(t5, MemOperand(t2, HeapObject::kMapOffset));
// Update receiver's map.
__ sw(a3, FieldMemOperand(a2, HeapObject::kMapOffset));
@@ -159,10 +159,9 @@ void ElementsTransitionGenerator::GenerateSmiOnlyToDouble(
__ lw(t5, MemOperand(a3));
__ Addu(a3, a3, kIntSize);
// t5: current element
- __ JumpIfNotSmi(t5, &convert_hole);
+ __ UntagAndJumpIfNotSmi(t5, t5, &convert_hole);
// Normal smi, convert to double and store.
- __ SmiUntag(t5);
if (fpu_supported) {
CpuFeatures::Scope scope(FPU);
__ mtc1(t5, f0);
@@ -187,6 +186,9 @@ void ElementsTransitionGenerator::GenerateSmiOnlyToDouble(
// Hole found, store the-hole NaN.
__ bind(&convert_hole);
if (FLAG_debug_code) {
+ // Restore a "smi-untagged" heap object.
+ __ SmiTag(t5);
+ __ Or(t5, t5, Operand(1));
__ LoadRoot(at, Heap::kTheHoleValueRootIndex);
__ Assert(eq, "object found in smi-only array", at, Operand(t5));
}
@@ -225,10 +227,10 @@ void ElementsTransitionGenerator::GenerateDoubleToObject(
__ Addu(a0, a0, FixedDoubleArray::kHeaderSize);
__ AllocateInNewSpace(a0, t2, t3, t5, &gc_required, NO_ALLOCATION_FLAGS);
// t2: destination FixedArray, not tagged as heap object
+ // Set destination FixedDoubleArray's length and map.
__ LoadRoot(t5, Heap::kFixedArrayMapRootIndex);
- __ sw(t5, MemOperand(t2, HeapObject::kMapOffset));
- // Set destination FixedDoubleArray's length.
__ sw(t1, MemOperand(t2, FixedDoubleArray::kLengthOffset));
+ __ sw(t5, MemOperand(t2, HeapObject::kMapOffset));
// Prepare for conversion loop.
__ Addu(t0, t0, Operand(FixedDoubleArray::kHeaderSize - kHeapObjectTag + 4));
@@ -333,9 +335,9 @@ void StringCharLoadGenerator::Generate(MacroAssembler* masm,
// Handle slices.
Label indirect_string_loaded;
__ lw(result, FieldMemOperand(string, SlicedString::kOffsetOffset));
+ __ lw(string, FieldMemOperand(string, SlicedString::kParentOffset));
__ sra(at, result, kSmiTagSize);
__ Addu(index, index, at);
- __ lw(string, FieldMemOperand(string, SlicedString::kParentOffset));
__ jmp(&indirect_string_loaded);
// Handle cons strings.
diff --git a/deps/v8/src/mips/cpu-mips.cc b/deps/v8/src/mips/cpu-mips.cc
index a1e062c803..93ebeda800 100644
--- a/deps/v8/src/mips/cpu-mips.cc
+++ b/deps/v8/src/mips/cpu-mips.cc
@@ -1,4 +1,4 @@
-// Copyright 2011 the V8 project authors. All rights reserved.
+// Copyright 2012 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
@@ -64,15 +64,19 @@ void CPU::FlushICache(void* start, size_t size) {
}
#if !defined (USE_SIMULATOR)
+#if defined(ANDROID)
+ // Bionic cacheflush can typically run in userland, avoiding kernel call.
+ char *end = reinterpret_cast<char *>(start) + size;
+ cacheflush(
+ reinterpret_cast<intptr_t>(start), reinterpret_cast<intptr_t>(end), 0);
+#else // ANDROID
int res;
-
// See http://www.linux-mips.org/wiki/Cacheflush_Syscall.
res = syscall(__NR_cacheflush, start, size, ICACHE);
-
if (res) {
V8_Fatal(__FILE__, __LINE__, "Failed to flush the instruction cache");
}
-
+#endif // ANDROID
#else // USE_SIMULATOR.
// Not generating mips instructions for C-code. This means that we are
// building a mips emulator based target. We should notify the simulator
diff --git a/deps/v8/src/mips/debug-mips.cc b/deps/v8/src/mips/debug-mips.cc
index 34e333d289..26b343c873 100644
--- a/deps/v8/src/mips/debug-mips.cc
+++ b/deps/v8/src/mips/debug-mips.cc
@@ -243,14 +243,6 @@ void Debug::GenerateCallICDebugBreak(MacroAssembler* masm) {
}
-void Debug::GenerateConstructCallDebugBreak(MacroAssembler* masm) {
- // Calling convention for construct call (from builtins-mips.cc).
- // -- a0 : number of arguments (not smi)
- // -- a1 : constructor function
- Generate_DebugBreakCallHelper(masm, a1.bit(), a0.bit());
-}
-
-
void Debug::GenerateReturnDebugBreak(MacroAssembler* masm) {
// In places other than IC call sites it is expected that v0 is TOS which
// is an object - this is not generally the case so this should be used with
@@ -260,6 +252,7 @@ void Debug::GenerateReturnDebugBreak(MacroAssembler* masm) {
void Debug::GenerateCallFunctionStubDebugBreak(MacroAssembler* masm) {
+ // Register state for CallFunctionStub (from code-stubs-mips.cc).
// ----------- S t a t e -------------
// -- a1 : function
// -----------------------------------
@@ -267,6 +260,37 @@ void Debug::GenerateCallFunctionStubDebugBreak(MacroAssembler* masm) {
}
+void Debug::GenerateCallFunctionStubRecordDebugBreak(MacroAssembler* masm) {
+ // Register state for CallFunctionStub (from code-stubs-mips.cc).
+ // ----------- S t a t e -------------
+ // -- a1 : function
+ // -- a2 : cache cell for call target
+ // -----------------------------------
+ Generate_DebugBreakCallHelper(masm, a1.bit() | a2.bit(), 0);
+}
+
+
+void Debug::GenerateCallConstructStubDebugBreak(MacroAssembler* masm) {
+ // Calling convention for CallConstructStub (from code-stubs-mips.cc).
+ // ----------- S t a t e -------------
+ // -- a0 : number of arguments (not smi)
+ // -- a1 : constructor function
+ // -----------------------------------
+ Generate_DebugBreakCallHelper(masm, a1.bit() , a0.bit());
+}
+
+
+void Debug::GenerateCallConstructStubRecordDebugBreak(MacroAssembler* masm) {
+ // Calling convention for CallConstructStub (from code-stubs-mips.cc).
+ // ----------- S t a t e -------------
+ // -- a0 : number of arguments (not smi)
+ // -- a1 : constructor function
+ // -- a2 : cache cell for call target
+ // -----------------------------------
+ Generate_DebugBreakCallHelper(masm, a1.bit() | a2.bit(), a0.bit());
+}
+
+
void Debug::GenerateSlot(MacroAssembler* masm) {
// Generate enough nop's to make space for a call instruction. Avoid emitting
// the trampoline pool in the debug break slot code.
diff --git a/deps/v8/src/mips/deoptimizer-mips.cc b/deps/v8/src/mips/deoptimizer-mips.cc
index 44af3d7b29..26a406333f 100644
--- a/deps/v8/src/mips/deoptimizer-mips.cc
+++ b/deps/v8/src/mips/deoptimizer-mips.cc
@@ -218,12 +218,13 @@ void Deoptimizer::DoComputeOsrOutputFrame() {
ASSERT(Translation::BEGIN == opcode);
USE(opcode);
int count = iterator.Next();
+ iterator.Skip(1); // Drop JS frame count.
ASSERT(count == 1);
USE(count);
opcode = static_cast<Translation::Opcode>(iterator.Next());
USE(opcode);
- ASSERT(Translation::FRAME == opcode);
+ ASSERT(Translation::JS_FRAME == opcode);
unsigned node_id = iterator.Next();
USE(node_id);
ASSERT(node_id == ast_id);
@@ -259,9 +260,7 @@ void Deoptimizer::DoComputeOsrOutputFrame() {
output_ = new FrameDescription*[1];
output_[0] = new(output_frame_size) FrameDescription(
output_frame_size, function_);
-#ifdef DEBUG
- output_[0]->SetKind(Code::OPTIMIZED_FUNCTION);
-#endif
+ output_[0]->SetFrameType(StackFrame::JAVA_SCRIPT);
// Clear the incoming parameters in the optimized frame to avoid
// confusing the garbage collector.
@@ -349,15 +348,115 @@ void Deoptimizer::DoComputeOsrOutputFrame() {
}
+void Deoptimizer::DoComputeArgumentsAdaptorFrame(TranslationIterator* iterator,
+ int frame_index) {
+ JSFunction* function = JSFunction::cast(ComputeLiteral(iterator->Next()));
+ unsigned height = iterator->Next();
+ unsigned height_in_bytes = height * kPointerSize;
+ if (FLAG_trace_deopt) {
+ PrintF(" translating arguments adaptor => height=%d\n", height_in_bytes);
+ }
+
+ unsigned fixed_frame_size = ArgumentsAdaptorFrameConstants::kFrameSize;
+ unsigned input_frame_size = input_->GetFrameSize();
+ unsigned output_frame_size = height_in_bytes + fixed_frame_size;
+
+ // Allocate and store the output frame description.
+ FrameDescription* output_frame =
+ new(output_frame_size) FrameDescription(output_frame_size, function);
+ output_frame->SetFrameType(StackFrame::ARGUMENTS_ADAPTOR);
+
+ // Arguments adaptor can not be topmost or bottommost.
+ ASSERT(frame_index > 0 && frame_index < output_count_ - 1);
+ ASSERT(output_[frame_index] == NULL);
+ output_[frame_index] = output_frame;
+
+ // The top address of the frame is computed from the previous
+ // frame's top and this frame's size.
+ uint32_t top_address;
+ top_address = output_[frame_index - 1]->GetTop() - output_frame_size;
+ output_frame->SetTop(top_address);
+
+ // Compute the incoming parameter translation.
+ int parameter_count = height;
+ unsigned output_offset = output_frame_size;
+ unsigned input_offset = input_frame_size;
+ for (int i = 0; i < parameter_count; ++i) {
+ output_offset -= kPointerSize;
+ DoTranslateCommand(iterator, frame_index, output_offset);
+ }
+ input_offset -= (parameter_count * kPointerSize);
+
+ // Read caller's PC from the previous frame.
+ output_offset -= kPointerSize;
+ input_offset -= kPointerSize;
+ intptr_t callers_pc = output_[frame_index - 1]->GetPc();
+ output_frame->SetFrameSlot(output_offset, callers_pc);
+ if (FLAG_trace_deopt) {
+ PrintF(" 0x%08x: [top + %d] <- 0x%08x ; caller's pc\n",
+ top_address + output_offset, output_offset, callers_pc);
+ }
+
+ // Read caller's FP from the previous frame, and set this frame's FP.
+ output_offset -= kPointerSize;
+ input_offset -= kPointerSize;
+ intptr_t value = output_[frame_index - 1]->GetFp();
+ output_frame->SetFrameSlot(output_offset, value);
+ intptr_t fp_value = top_address + output_offset;
+ output_frame->SetFp(fp_value);
+ if (FLAG_trace_deopt) {
+ PrintF(" 0x%08x: [top + %d] <- 0x%08x ; caller's fp\n",
+ fp_value, output_offset, value);
+ }
+
+ // A marker value is used in place of the context.
+ output_offset -= kPointerSize;
+ input_offset -= kPointerSize;
+ intptr_t context = reinterpret_cast<intptr_t>(
+ Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR));
+ output_frame->SetFrameSlot(output_offset, context);
+ if (FLAG_trace_deopt) {
+ PrintF(" 0x%08x: [top + %d] <- 0x%08x ; context (adaptor sentinel)\n",
+ top_address + output_offset, output_offset, context);
+ }
+
+ // The function was mentioned explicitly in the ARGUMENTS_ADAPTOR_FRAME.
+ output_offset -= kPointerSize;
+ input_offset -= kPointerSize;
+ value = reinterpret_cast<intptr_t>(function);
+ output_frame->SetFrameSlot(output_offset, value);
+ if (FLAG_trace_deopt) {
+ PrintF(" 0x%08x: [top + %d] <- 0x%08x ; function\n",
+ top_address + output_offset, output_offset, value);
+ }
+
+ // Number of incoming arguments.
+ output_offset -= kPointerSize;
+ input_offset -= kPointerSize;
+ value = reinterpret_cast<uint32_t>(Smi::FromInt(height - 1));
+ output_frame->SetFrameSlot(output_offset, value);
+ if (FLAG_trace_deopt) {
+ PrintF(" 0x%08x: [top + %d] <- 0x%08x ; argc (%d)\n",
+ top_address + output_offset, output_offset, value, height - 1);
+ }
+
+ ASSERT(0 == output_offset);
+
+ Builtins* builtins = isolate_->builtins();
+ Code* adaptor_trampoline =
+ builtins->builtin(Builtins::kArgumentsAdaptorTrampoline);
+ uint32_t pc = reinterpret_cast<uint32_t>(
+ adaptor_trampoline->instruction_start() +
+ isolate_->heap()->arguments_adaptor_deopt_pc_offset()->value());
+ output_frame->SetPc(pc);
+}
+
+
// This code is very similar to ia32/arm code, but relies on register names
// (fp, sp) and how the frame is laid out.
-void Deoptimizer::DoComputeFrame(TranslationIterator* iterator,
- int frame_index) {
+void Deoptimizer::DoComputeJSFrame(TranslationIterator* iterator,
+ int frame_index) {
// Read the ast node id, function, and frame height for this output frame.
- Translation::Opcode opcode =
- static_cast<Translation::Opcode>(iterator->Next());
- USE(opcode);
- ASSERT(Translation::FRAME == opcode);
int node_id = iterator->Next();
JSFunction* function = JSFunction::cast(ComputeLiteral(iterator->Next()));
unsigned height = iterator->Next();
@@ -377,9 +476,7 @@ void Deoptimizer::DoComputeFrame(TranslationIterator* iterator,
// Allocate and store the output frame description.
FrameDescription* output_frame =
new(output_frame_size) FrameDescription(output_frame_size, function);
-#ifdef DEBUG
- output_frame->SetKind(Code::FUNCTION);
-#endif
+ output_frame->SetFrameType(StackFrame::JAVA_SCRIPT);
bool is_bottommost = (0 == frame_index);
bool is_topmost = (output_count_ - 1 == frame_index);
diff --git a/deps/v8/src/mips/frames-mips.h b/deps/v8/src/mips/frames-mips.h
index 9e626f3775..2ed358a913 100644
--- a/deps/v8/src/mips/frames-mips.h
+++ b/deps/v8/src/mips/frames-mips.h
@@ -195,6 +195,9 @@ class ExitFrameConstants : public AllStatic {
class StandardFrameConstants : public AllStatic {
public:
+ // Fixed part of the frame consists of return address, caller fp,
+ // context and function.
+ static const int kFixedFrameSize = 4 * kPointerSize;
static const int kExpressionsOffset = -3 * kPointerSize;
static const int kMarkerOffset = -2 * kPointerSize;
static const int kContextOffset = -1 * kPointerSize;
@@ -230,6 +233,8 @@ class JavaScriptFrameConstants : public AllStatic {
class ArgumentsAdaptorFrameConstants : public AllStatic {
public:
static const int kLengthOffset = StandardFrameConstants::kExpressionsOffset;
+ static const int kFrameSize =
+ StandardFrameConstants::kFixedFrameSize + kPointerSize;
};
diff --git a/deps/v8/src/mips/full-codegen-mips.cc b/deps/v8/src/mips/full-codegen-mips.cc
index da3be4c23e..201742efec 100644
--- a/deps/v8/src/mips/full-codegen-mips.cc
+++ b/deps/v8/src/mips/full-codegen-mips.cc
@@ -2403,9 +2403,22 @@ void FullCodeGenerator::VisitCallNew(CallNew* expr) {
__ li(a0, Operand(arg_count));
__ lw(a1, MemOperand(sp, arg_count * kPointerSize));
- Handle<Code> construct_builtin =
- isolate()->builtins()->JSConstructCall();
- __ Call(construct_builtin, RelocInfo::CONSTRUCT_CALL);
+ // Record call targets in unoptimized code, but not in the snapshot.
+ CallFunctionFlags flags;
+ if (!Serializer::enabled()) {
+ flags = RECORD_CALL_TARGET;
+ Handle<Object> uninitialized =
+ TypeFeedbackCells::UninitializedSentinel(isolate());
+ Handle<JSGlobalPropertyCell> cell =
+ isolate()->factory()->NewJSGlobalPropertyCell(uninitialized);
+ RecordTypeFeedbackCell(expr->id(), cell);
+ __ li(a2, Operand(cell));
+ } else {
+ flags = NO_CALL_FUNCTION_FLAGS;
+ }
+
+ CallConstructStub stub(flags);
+ __ Call(stub.GetCode(), RelocInfo::CONSTRUCT_CALL);
context()->Plug(v0);
}
diff --git a/deps/v8/src/mips/lithium-codegen-mips.cc b/deps/v8/src/mips/lithium-codegen-mips.cc
index 7a230c1fac..91cddd820f 100644
--- a/deps/v8/src/mips/lithium-codegen-mips.cc
+++ b/deps/v8/src/mips/lithium-codegen-mips.cc
@@ -447,7 +447,11 @@ void LCodeGen::WriteTranslation(LEnvironment* environment,
WriteTranslation(environment->outer(), translation);
int closure_id = DefineDeoptimizationLiteral(environment->closure());
- translation->BeginFrame(environment->ast_id(), closure_id, height);
+ if (environment->is_arguments_adaptor()) {
+ translation->BeginArgumentsAdaptorFrame(closure_id, translation_size);
+ } else {
+ translation->BeginJSFrame(environment->ast_id(), closure_id, height);
+ }
for (int i = 0; i < translation_size; ++i) {
LOperand* value = environment->values()->at(i);
// spilled_registers_ and spilled_double_registers_ are either
@@ -573,10 +577,14 @@ void LCodeGen::RegisterEnvironmentForDeoptimization(LEnvironment* environment,
// |>------------ translation_size ------------<|
int frame_count = 0;
+ int jsframe_count = 0;
for (LEnvironment* e = environment; e != NULL; e = e->outer()) {
++frame_count;
+ if (!e->is_arguments_adaptor()) {
+ ++jsframe_count;
+ }
}
- Translation translation(&translations_, frame_count);
+ Translation translation(&translations_, frame_count, jsframe_count);
WriteTranslation(environment, &translation);
int deoptimization_index = deoptimizations_.length();
int pc_offset = masm()->pc_offset();
@@ -3269,9 +3277,9 @@ void LCodeGen::DoCallNew(LCallNew* instr) {
ASSERT(ToRegister(instr->InputAt(0)).is(a1));
ASSERT(ToRegister(instr->result()).is(v0));
- Handle<Code> builtin = isolate()->builtins()->JSConstructCall();
+ CallConstructStub stub(NO_CALL_FUNCTION_FLAGS);
__ li(a0, Operand(instr->arity()));
- CallCode(builtin, RelocInfo::CONSTRUCT_CALL, instr);
+ CallCode(stub.GetCode(), RelocInfo::CONSTRUCT_CALL, instr);
}
@@ -3706,13 +3714,12 @@ void LCodeGen::DoNumberTagI(LNumberTagI* instr) {
LNumberTagI* instr_;
};
- LOperand* input = instr->InputAt(0);
- ASSERT(input->IsRegister() && input->Equals(instr->result()));
- Register reg = ToRegister(input);
+ Register src = ToRegister(instr->InputAt(0));
+ Register dst = ToRegister(instr->result());
Register overflow = scratch0();
DeferredNumberTagI* deferred = new DeferredNumberTagI(this, instr);
- __ SmiTagCheckOverflow(reg, overflow);
+ __ SmiTagCheckOverflow(dst, src, overflow);
__ BranchOnOverflow(deferred->entry(), overflow);
__ bind(deferred->exit());
}
@@ -3720,7 +3727,8 @@ void LCodeGen::DoNumberTagI(LNumberTagI* instr) {
void LCodeGen::DoDeferredNumberTagI(LNumberTagI* instr) {
Label slow;
- Register reg = ToRegister(instr->InputAt(0));
+ Register src = ToRegister(instr->InputAt(0));
+ Register dst = ToRegister(instr->result());
FPURegister dbl_scratch = double_scratch0();
// Preserve the value of all registers.
@@ -3730,14 +3738,16 @@ void LCodeGen::DoDeferredNumberTagI(LNumberTagI* instr) {
// disagree. Try to allocate a heap number in new space and store
// the value in there. If that fails, call the runtime system.
Label done;
- __ SmiUntag(reg);
- __ Xor(reg, reg, Operand(0x80000000));
- __ mtc1(reg, dbl_scratch);
+ if (dst.is(src)) {
+ __ SmiUntag(src, dst);
+ __ Xor(src, src, Operand(0x80000000));
+ }
+ __ mtc1(src, dbl_scratch);
__ cvt_d_w(dbl_scratch, dbl_scratch);
if (FLAG_inline_new) {
__ LoadRoot(t2, Heap::kHeapNumberMapRootIndex);
__ AllocateHeapNumber(t1, a3, t0, t2, &slow);
- if (!reg.is(t1)) __ mov(reg, t1);
+ __ Move(dst, t1);
__ Branch(&done);
}
@@ -3747,15 +3757,15 @@ void LCodeGen::DoDeferredNumberTagI(LNumberTagI* instr) {
// TODO(3095996): Put a valid pointer value in the stack slot where the result
// register is stored, as this register is in the pointer map, but contains an
// integer value.
- __ StoreToSafepointRegisterSlot(zero_reg, reg);
+ __ StoreToSafepointRegisterSlot(zero_reg, dst);
CallRuntimeFromDeferred(Runtime::kAllocateHeapNumber, 0, instr);
- if (!reg.is(v0)) __ mov(reg, v0);
+ __ Move(dst, v0);
// Done. Put the value in dbl_scratch into the value of the allocated heap
// number.
__ bind(&done);
- __ sdc1(dbl_scratch, FieldMemOperand(reg, HeapNumber::kValueOffset));
- __ StoreToSafepointRegisterSlot(reg, reg);
+ __ sdc1(dbl_scratch, FieldMemOperand(dst, HeapNumber::kValueOffset));
+ __ StoreToSafepointRegisterSlot(dst, dst);
}
@@ -3802,25 +3812,23 @@ void LCodeGen::DoDeferredNumberTagD(LNumberTagD* instr) {
void LCodeGen::DoSmiTag(LSmiTag* instr) {
- LOperand* input = instr->InputAt(0);
- ASSERT(input->IsRegister() && input->Equals(instr->result()));
ASSERT(!instr->hydrogen_value()->CheckFlag(HValue::kCanOverflow));
- __ SmiTag(ToRegister(input));
+ __ SmiTag(ToRegister(instr->result()), ToRegister(instr->InputAt(0)));
}
void LCodeGen::DoSmiUntag(LSmiUntag* instr) {
Register scratch = scratch0();
- LOperand* input = instr->InputAt(0);
- ASSERT(input->IsRegister() && input->Equals(instr->result()));
+ Register input = ToRegister(instr->InputAt(0));
+ Register result = ToRegister(instr->result());
if (instr->needs_check()) {
STATIC_ASSERT(kHeapObjectTag == 1);
// If the input is a HeapObject, value of scratch won't be zero.
- __ And(scratch, ToRegister(input), Operand(kHeapObjectTag));
- __ SmiUntag(ToRegister(input));
+ __ And(scratch, input, Operand(kHeapObjectTag));
+ __ SmiUntag(result, input);
DeoptimizeIf(ne, instr->environment(), scratch, Operand(zero_reg));
} else {
- __ SmiUntag(ToRegister(input));
+ __ SmiUntag(result, input);
}
}
@@ -3835,7 +3843,7 @@ void LCodeGen::EmitNumberUntagD(Register input_reg,
Label load_smi, heap_number, done;
// Smi check.
- __ JumpIfSmi(input_reg, &load_smi);
+ __ UntagAndJumpIfSmi(scratch, input_reg, &load_smi);
// Heap number map check.
__ lw(scratch, FieldMemOperand(input_reg, HeapObject::kMapOffset));
@@ -3868,10 +3876,9 @@ void LCodeGen::EmitNumberUntagD(Register input_reg,
// Smi to double register conversion
__ bind(&load_smi);
- __ SmiUntag(input_reg); // Untag smi before converting to float.
- __ mtc1(input_reg, result_reg);
+ // scratch: untagged value of input_reg
+ __ mtc1(scratch, result_reg);
__ cvt_d_w(result_reg, result_reg);
- __ SmiTag(input_reg); // Retag smi.
__ bind(&done);
}
@@ -4152,7 +4159,7 @@ void LCodeGen::DoClampTToUint8(LClampTToUint8* instr) {
Label is_smi, done, heap_number;
// Both smi and heap number cases are handled.
- __ JumpIfSmi(input_reg, &is_smi);
+ __ UntagAndJumpIfSmi(scratch, input_reg, &is_smi);
// Check for heap number
__ lw(scratch, FieldMemOperand(input_reg, HeapObject::kMapOffset));
@@ -4172,9 +4179,7 @@ void LCodeGen::DoClampTToUint8(LClampTToUint8* instr) {
__ ClampDoubleToUint8(result_reg, double_scratch0(), temp_reg);
__ jmp(&done);
- // smi
__ bind(&is_smi);
- __ SmiUntag(scratch, input_reg);
__ ClampUint8(result_reg, scratch);
__ bind(&done);
diff --git a/deps/v8/src/mips/lithium-mips.cc b/deps/v8/src/mips/lithium-mips.cc
index 2c098fe77c..0bc222339f 100644
--- a/deps/v8/src/mips/lithium-mips.cc
+++ b/deps/v8/src/mips/lithium-mips.cc
@@ -581,11 +581,6 @@ void LChunkBuilder::Abort(const char* format, ...) {
}
-LRegister* LChunkBuilder::ToOperand(Register reg) {
- return LRegister::Create(Register::ToAllocationIndex(reg));
-}
-
-
LUnallocated* LChunkBuilder::ToUnallocated(Register reg) {
return new LUnallocated(LUnallocated::FIXED_REGISTER,
Register::ToAllocationIndex(reg));
@@ -676,7 +671,7 @@ LOperand* LChunkBuilder::Use(HValue* value, LUnallocated* operand) {
HInstruction* instr = HInstruction::cast(value);
VisitInstruction(instr);
}
- allocator_->RecordUse(value, operand);
+ operand->set_virtual_register(value->id());
return operand;
}
@@ -684,19 +679,13 @@ LOperand* LChunkBuilder::Use(HValue* value, LUnallocated* operand) {
template<int I, int T>
LInstruction* LChunkBuilder::Define(LTemplateInstruction<1, I, T>* instr,
LUnallocated* result) {
- allocator_->RecordDefinition(current_instruction_, result);
+ result->set_virtual_register(current_instruction_->id());
instr->set_result(result);
return instr;
}
template<int I, int T>
-LInstruction* LChunkBuilder::Define(LTemplateInstruction<1, I, T>* instr) {
- return Define(instr, new LUnallocated(LUnallocated::NONE));
-}
-
-
-template<int I, int T>
LInstruction* LChunkBuilder::DefineAsRegister(
LTemplateInstruction<1, I, T>* instr) {
return Define(instr, new LUnallocated(LUnallocated::MUST_HAVE_REGISTER));
@@ -802,21 +791,22 @@ LInstruction* LChunkBuilder::AssignPointerMap(LInstruction* instr) {
LUnallocated* LChunkBuilder::TempRegister() {
LUnallocated* operand = new LUnallocated(LUnallocated::MUST_HAVE_REGISTER);
- allocator_->RecordTemporary(operand);
+ operand->set_virtual_register(allocator_->GetVirtualRegister());
+ if (!allocator_->AllocationOk()) Abort("Not enough virtual registers.");
return operand;
}
LOperand* LChunkBuilder::FixedTemp(Register reg) {
LUnallocated* operand = ToUnallocated(reg);
- allocator_->RecordTemporary(operand);
+ ASSERT(operand->HasFixedPolicy());
return operand;
}
LOperand* LChunkBuilder::FixedTemp(DoubleRegister reg) {
LUnallocated* operand = ToUnallocated(reg);
- allocator_->RecordTemporary(operand);
+ ASSERT(operand->HasFixedPolicy());
return operand;
}
@@ -1005,14 +995,16 @@ LEnvironment* LChunkBuilder::CreateEnvironment(
LEnvironment* outer =
CreateEnvironment(hydrogen_env->outer(), argument_index_accumulator);
int ast_id = hydrogen_env->ast_id();
- ASSERT(ast_id != AstNode::kNoNumber);
+ ASSERT(ast_id != AstNode::kNoNumber || hydrogen_env->is_arguments_adaptor());
int value_count = hydrogen_env->length();
LEnvironment* result = new LEnvironment(hydrogen_env->closure(),
+ hydrogen_env->is_arguments_adaptor(),
ast_id,
hydrogen_env->parameter_count(),
argument_count_,
value_count,
outer);
+ int argument_index = *argument_index_accumulator;
for (int i = 0; i < value_count; ++i) {
if (hydrogen_env->is_special_index(i)) continue;
@@ -1021,13 +1013,17 @@ LEnvironment* LChunkBuilder::CreateEnvironment(
if (value->IsArgumentsObject()) {
op = NULL;
} else if (value->IsPushArgument()) {
- op = new LArgument((*argument_index_accumulator)++);
+ op = new LArgument(argument_index++);
} else {
op = UseAny(value);
}
result->AddValue(op, value->representation());
}
+ if (!hydrogen_env->is_arguments_adaptor()) {
+ *argument_index_accumulator = argument_index;
+ }
+
return result;
}
@@ -1627,11 +1623,11 @@ LInstruction* LChunkBuilder::DoChange(HChange* instr) {
return AssignEnvironment(DefineAsRegister(res));
} else {
ASSERT(to.IsInteger32());
- LOperand* value = UseRegister(instr->value());
+ LOperand* value = UseRegisterAtStart(instr->value());
bool needs_check = !instr->value()->type().IsSmi();
LInstruction* res = NULL;
if (!needs_check) {
- res = DefineSameAsFirst(new LSmiUntag(value, needs_check));
+ res = DefineAsRegister(new LSmiUntag(value, needs_check));
} else {
LOperand* temp1 = TempRegister();
LOperand* temp2 = instr->CanTruncateToInt32() ? TempRegister()
@@ -1667,12 +1663,12 @@ LInstruction* LChunkBuilder::DoChange(HChange* instr) {
} else if (from.IsInteger32()) {
if (to.IsTagged()) {
HValue* val = instr->value();
- LOperand* value = UseRegister(val);
+ LOperand* value = UseRegisterAtStart(val);
if (val->HasRange() && val->range()->IsInSmiRange()) {
- return DefineSameAsFirst(new LSmiTag(value));
+ return DefineAsRegister(new LSmiTag(value));
} else {
LNumberTagI* result = new LNumberTagI(value);
- return AssignEnvironment(AssignPointerMap(DefineSameAsFirst(result)));
+ return AssignEnvironment(AssignPointerMap(DefineAsRegister(result)));
}
} else {
ASSERT(to.IsDouble());
@@ -2247,6 +2243,7 @@ LInstruction* LChunkBuilder::DoEnterInlined(HEnterInlined* instr) {
HEnvironment* outer = current_block_->last_environment();
HConstant* undefined = graph()->GetConstantUndefined();
HEnvironment* inner = outer->CopyForInlining(instr->closure(),
+ instr->arguments_count(),
instr->function(),
undefined,
instr->call_kind());
@@ -2257,7 +2254,8 @@ LInstruction* LChunkBuilder::DoEnterInlined(HEnterInlined* instr) {
LInstruction* LChunkBuilder::DoLeaveInlined(HLeaveInlined* instr) {
- HEnvironment* outer = current_block_->last_environment()->outer();
+ HEnvironment* outer = current_block_->last_environment()->
+ DiscardInlined(false);
current_block_->UpdateEnvironment(outer);
return NULL;
}
diff --git a/deps/v8/src/mips/lithium-mips.h b/deps/v8/src/mips/lithium-mips.h
index 069a0251fd..0a21649fa8 100644
--- a/deps/v8/src/mips/lithium-mips.h
+++ b/deps/v8/src/mips/lithium-mips.h
@@ -2161,7 +2161,6 @@ class LChunkBuilder BASE_EMBEDDED {
void Abort(const char* format, ...);
// Methods for getting operands for Use / Define / Temp.
- LRegister* ToOperand(Register reg);
LUnallocated* ToUnallocated(Register reg);
LUnallocated* ToUnallocated(DoubleRegister reg);
@@ -2212,8 +2211,6 @@ class LChunkBuilder BASE_EMBEDDED {
LInstruction* Define(LTemplateInstruction<1, I, T>* instr,
LUnallocated* result);
template<int I, int T>
- LInstruction* Define(LTemplateInstruction<1, I, T>* instr);
- template<int I, int T>
LInstruction* DefineAsRegister(LTemplateInstruction<1, I, T>* instr);
template<int I, int T>
LInstruction* DefineAsSpilled(LTemplateInstruction<1, I, T>* instr,
diff --git a/deps/v8/src/mips/macro-assembler-mips.cc b/deps/v8/src/mips/macro-assembler-mips.cc
index 941c7fe4ea..678b8b1036 100644
--- a/deps/v8/src/mips/macro-assembler-mips.cc
+++ b/deps/v8/src/mips/macro-assembler-mips.cc
@@ -4279,6 +4279,31 @@ void MacroAssembler::LoadContext(Register dst, int context_chain_length) {
}
+void MacroAssembler::LoadGlobalInitialConstructedArrayMap(
+ Register function_in, Register scratch, Register map_out) {
+ ASSERT(!function_in.is(map_out));
+ Label done;
+ lw(map_out, FieldMemOperand(function_in,
+ JSFunction::kPrototypeOrInitialMapOffset));
+ if (!FLAG_smi_only_arrays) {
+ // Load the global or builtins object from the current context.
+ lw(scratch, MemOperand(cp, Context::SlotOffset(Context::GLOBAL_INDEX)));
+ lw(scratch, FieldMemOperand(scratch, GlobalObject::kGlobalContextOffset));
+
+ // Check that the function's map is same as the cached map.
+ lw(at, MemOperand(
+ scratch, Context::SlotOffset(Context::SMI_JS_ARRAY_MAP_INDEX)));
+ Branch(&done, ne, map_out, Operand(at));
+
+ // Use the cached transitioned map.
+ lw(map_out,
+ MemOperand(scratch,
+ Context::SlotOffset(Context::OBJECT_JS_ARRAY_MAP_INDEX)));
+ }
+ bind(&done);
+}
+
+
void MacroAssembler::LoadGlobalFunction(int index, Register function) {
// Load the global or builtins object from the current context.
lw(function, MemOperand(cp, Context::SlotOffset(Context::GLOBAL_INDEX)));
@@ -4492,6 +4517,64 @@ void MacroAssembler::JumpIfNotPowerOfTwoOrZero(
}
+void MacroAssembler::SmiTagCheckOverflow(Register reg, Register overflow) {
+ ASSERT(!reg.is(overflow));
+ mov(overflow, reg); // Save original value.
+ SmiTag(reg);
+ xor_(overflow, overflow, reg); // Overflow if (value ^ 2 * value) < 0.
+}
+
+
+void MacroAssembler::SmiTagCheckOverflow(Register dst,
+ Register src,
+ Register overflow) {
+ if (dst.is(src)) {
+ // Fall back to slower case.
+ SmiTagCheckOverflow(dst, overflow);
+ } else {
+ ASSERT(!dst.is(src));
+ ASSERT(!dst.is(overflow));
+ ASSERT(!src.is(overflow));
+ SmiTag(dst, src);
+ xor_(overflow, dst, src); // Overflow if (value ^ 2 * value) < 0.
+ }
+}
+
+
+void MacroAssembler::UntagAndJumpIfSmi(Register dst,
+ Register src,
+ Label* smi_case) {
+ JumpIfSmi(src, smi_case, at, USE_DELAY_SLOT);
+ SmiUntag(dst, src);
+}
+
+
+void MacroAssembler::UntagAndJumpIfNotSmi(Register dst,
+ Register src,
+ Label* non_smi_case) {
+ JumpIfNotSmi(src, non_smi_case, at, USE_DELAY_SLOT);
+ SmiUntag(dst, src);
+}
+
+void MacroAssembler::JumpIfSmi(Register value,
+ Label* smi_label,
+ Register scratch,
+ BranchDelaySlot bd) {
+ ASSERT_EQ(0, kSmiTag);
+ andi(scratch, value, kSmiTagMask);
+ Branch(bd, smi_label, eq, scratch, Operand(zero_reg));
+}
+
+void MacroAssembler::JumpIfNotSmi(Register value,
+ Label* not_smi_label,
+ Register scratch,
+ BranchDelaySlot bd) {
+ ASSERT_EQ(0, kSmiTag);
+ andi(scratch, value, kSmiTagMask);
+ Branch(bd, not_smi_label, ne, scratch, Operand(zero_reg));
+}
+
+
void MacroAssembler::JumpIfNotBothSmi(Register reg1,
Register reg2,
Label* on_not_both_smi) {
diff --git a/deps/v8/src/mips/macro-assembler-mips.h b/deps/v8/src/mips/macro-assembler-mips.h
index b976f6ee0c..24dfceb05e 100644
--- a/deps/v8/src/mips/macro-assembler-mips.h
+++ b/deps/v8/src/mips/macro-assembler-mips.h
@@ -772,6 +772,11 @@ class MacroAssembler: public Assembler {
void LoadContext(Register dst, int context_chain_length);
+ // Load the initial map for new Arrays of a given type.
+ void LoadGlobalInitialConstructedArrayMap(Register function_in,
+ Register scratch,
+ Register map_out);
+
void LoadGlobalFunction(int index, Register function);
// Load the initial map from the global function. The registers
@@ -1217,24 +1222,13 @@ class MacroAssembler: public Assembler {
// -------------------------------------------------------------------------
// Smi utilities.
- // Try to convert int32 to smi. If the value is to large, preserve
- // the original value and jump to not_a_smi. Destroys scratch and
- // sets flags.
- // This is only used by crankshaft atm so it is unimplemented on MIPS.
- void TrySmiTag(Register reg, Label* not_a_smi, Register scratch) {
- UNIMPLEMENTED_MIPS();
- }
-
void SmiTag(Register reg) {
Addu(reg, reg, reg);
}
// Test for overflow < 0: use BranchOnOverflow() or BranchOnNoOverflow().
- void SmiTagCheckOverflow(Register reg, Register overflow) {
- mov(overflow, reg); // Save original value.
- addu(reg, reg, reg);
- xor_(overflow, overflow, reg); // Overflow if (value ^ 2 * value) < 0.
- }
+ void SmiTagCheckOverflow(Register reg, Register overflow);
+ void SmiTagCheckOverflow(Register dst, Register src, Register overflow);
void SmiTag(Register dst, Register src) {
Addu(dst, src, src);
@@ -1248,22 +1242,25 @@ class MacroAssembler: public Assembler {
sra(dst, src, kSmiTagSize);
}
+ // Untag the source value into destination and jump if source is a smi.
+ // Souce and destination can be the same register.
+ void UntagAndJumpIfSmi(Register dst, Register src, Label* smi_case);
+
+ // Untag the source value into destination and jump if source is not a smi.
+ // Souce and destination can be the same register.
+ void UntagAndJumpIfNotSmi(Register dst, Register src, Label* non_smi_case);
+
// Jump the register contains a smi.
- inline void JumpIfSmi(Register value, Label* smi_label,
- Register scratch = at,
- BranchDelaySlot bd = PROTECT) {
- ASSERT_EQ(0, kSmiTag);
- andi(scratch, value, kSmiTagMask);
- Branch(bd, smi_label, eq, scratch, Operand(zero_reg));
- }
+ void JumpIfSmi(Register value,
+ Label* smi_label,
+ Register scratch = at,
+ BranchDelaySlot bd = PROTECT);
// Jump if the register contains a non-smi.
- inline void JumpIfNotSmi(Register value, Label* not_smi_label,
- Register scratch = at) {
- ASSERT_EQ(0, kSmiTag);
- andi(scratch, value, kSmiTagMask);
- Branch(not_smi_label, ne, scratch, Operand(zero_reg));
- }
+ void JumpIfNotSmi(Register value,
+ Label* not_smi_label,
+ Register scratch = at,
+ BranchDelaySlot bd = PROTECT);
// Jump if either of the registers contain a non-smi.
void JumpIfNotBothSmi(Register reg1, Register reg2, Label* on_not_both_smi);