summaryrefslogtreecommitdiff
path: root/chromium/v8/src/mips/code-stubs-mips.cc
diff options
context:
space:
mode:
Diffstat (limited to 'chromium/v8/src/mips/code-stubs-mips.cc')
-rw-r--r--chromium/v8/src/mips/code-stubs-mips.cc1423
1 files changed, 160 insertions, 1263 deletions
diff --git a/chromium/v8/src/mips/code-stubs-mips.cc b/chromium/v8/src/mips/code-stubs-mips.cc
index 0589bf01624..4c3708ce7a5 100644
--- a/chromium/v8/src/mips/code-stubs-mips.cc
+++ b/chromium/v8/src/mips/code-stubs-mips.cc
@@ -60,6 +60,17 @@ void ToNumberStub::InitializeInterfaceDescriptor(
}
+void NumberToStringStub::InitializeInterfaceDescriptor(
+ Isolate* isolate,
+ CodeStubInterfaceDescriptor* descriptor) {
+ static Register registers[] = { a0 };
+ descriptor->register_param_count_ = 1;
+ descriptor->register_params_ = registers;
+ descriptor->deoptimization_handler_ =
+ Runtime::FunctionForId(Runtime::kNumberToString)->entry;
+}
+
+
void FastCloneShallowArrayStub::InitializeInterfaceDescriptor(
Isolate* isolate,
CodeStubInterfaceDescriptor* descriptor) {
@@ -67,7 +78,7 @@ void FastCloneShallowArrayStub::InitializeInterfaceDescriptor(
descriptor->register_param_count_ = 3;
descriptor->register_params_ = registers;
descriptor->deoptimization_handler_ =
- Runtime::FunctionForId(Runtime::kCreateArrayLiteralShallow)->entry;
+ Runtime::FunctionForId(Runtime::kCreateArrayLiteralStubBailout)->entry;
}
@@ -78,7 +89,7 @@ void FastCloneShallowObjectStub::InitializeInterfaceDescriptor(
descriptor->register_param_count_ = 4;
descriptor->register_params_ = registers;
descriptor->deoptimization_handler_ =
- Runtime::FunctionForId(Runtime::kCreateObjectLiteralShallow)->entry;
+ Runtime::FunctionForId(Runtime::kCreateObjectLiteral)->entry;
}
@@ -103,6 +114,17 @@ void KeyedLoadFastElementStub::InitializeInterfaceDescriptor(
}
+void KeyedLoadDictionaryElementStub::InitializeInterfaceDescriptor(
+ Isolate* isolate,
+ CodeStubInterfaceDescriptor* descriptor) {
+ static Register registers[] = {a1, a0 };
+ descriptor->register_param_count_ = 2;
+ descriptor->register_params_ = registers;
+ descriptor->deoptimization_handler_ =
+ FUNCTION_ADDR(KeyedLoadIC_MissFromStubFailure);
+}
+
+
void LoadFieldStub::InitializeInterfaceDescriptor(
Isolate* isolate,
CodeStubInterfaceDescriptor* descriptor) {
@@ -123,6 +145,19 @@ void KeyedLoadFieldStub::InitializeInterfaceDescriptor(
}
+void KeyedArrayCallStub::InitializeInterfaceDescriptor(
+ Isolate* isolate,
+ CodeStubInterfaceDescriptor* descriptor) {
+ static Register registers[] = { a2 };
+ descriptor->register_param_count_ = 1;
+ descriptor->register_params_ = registers;
+ descriptor->continuation_type_ = TAIL_CALL_CONTINUATION;
+ descriptor->handler_arguments_mode_ = PASS_ARGUMENTS;
+ descriptor->deoptimization_handler_ =
+ FUNCTION_ADDR(KeyedCallIC_MissFromStubFailure);
+}
+
+
void KeyedStoreFastElementStub::InitializeInterfaceDescriptor(
Isolate* isolate,
CodeStubInterfaceDescriptor* descriptor) {
@@ -167,14 +202,21 @@ static void InitializeArrayConstructorDescriptor(
// a0 -- number of arguments
// a1 -- function
// a2 -- type info cell with elements kind
- static Register registers[] = { a1, a2 };
- descriptor->register_param_count_ = 2;
- if (constant_stack_parameter_count != 0) {
+ static Register registers_variable_args[] = { a1, a2, a0 };
+ static Register registers_no_args[] = { a1, a2 };
+
+ if (constant_stack_parameter_count == 0) {
+ descriptor->register_param_count_ = 2;
+ descriptor->register_params_ = registers_no_args;
+ } else {
// stack param count needs (constructor pointer, and single argument)
- descriptor->stack_parameter_count_ = &a0;
+ descriptor->handler_arguments_mode_ = PASS_ARGUMENTS;
+ descriptor->stack_parameter_count_ = a0;
+ descriptor->register_param_count_ = 3;
+ descriptor->register_params_ = registers_variable_args;
}
+
descriptor->hint_stack_parameter_count_ = constant_stack_parameter_count;
- descriptor->register_params_ = registers;
descriptor->function_mode_ = JS_FUNCTION_STUB_MODE;
descriptor->deoptimization_handler_ =
Runtime::FunctionForId(Runtime::kArrayConstructor)->entry;
@@ -188,15 +230,21 @@ static void InitializeInternalArrayConstructorDescriptor(
// register state
// a0 -- number of arguments
// a1 -- constructor function
- static Register registers[] = { a1 };
- descriptor->register_param_count_ = 1;
+ static Register registers_variable_args[] = { a1, a0 };
+ static Register registers_no_args[] = { a1 };
- if (constant_stack_parameter_count != 0) {
- // Stack param count needs (constructor pointer, and single argument).
- descriptor->stack_parameter_count_ = &a0;
+ if (constant_stack_parameter_count == 0) {
+ descriptor->register_param_count_ = 1;
+ descriptor->register_params_ = registers_no_args;
+ } else {
+ // stack param count needs (constructor pointer, and single argument)
+ descriptor->handler_arguments_mode_ = PASS_ARGUMENTS;
+ descriptor->stack_parameter_count_ = a0;
+ descriptor->register_param_count_ = 2;
+ descriptor->register_params_ = registers_variable_args;
}
+
descriptor->hint_stack_parameter_count_ = constant_stack_parameter_count;
- descriptor->register_params_ = registers;
descriptor->function_mode_ = JS_FUNCTION_STUB_MODE;
descriptor->deoptimization_handler_ =
Runtime::FunctionForId(Runtime::kInternalArrayConstructor)->entry;
@@ -280,6 +328,17 @@ void ElementsTransitionAndStoreStub::InitializeInterfaceDescriptor(
}
+void NewStringAddStub::InitializeInterfaceDescriptor(
+ Isolate* isolate,
+ CodeStubInterfaceDescriptor* descriptor) {
+ static Register registers[] = { a1, a0 };
+ descriptor->register_param_count_ = 2;
+ descriptor->register_params_ = registers;
+ descriptor->deoptimization_handler_ =
+ Runtime::FunctionForId(Runtime::kStringAdd)->entry;
+}
+
+
#define __ ACCESS_MASM(masm)
@@ -536,23 +595,27 @@ void DoubleToIStub::Generate(MacroAssembler* masm) {
GetRegisterThatIsNotOneOf(input_reg, result_reg, scratch);
Register scratch3 =
GetRegisterThatIsNotOneOf(input_reg, result_reg, scratch, scratch2);
- DoubleRegister double_scratch = kLithiumScratchDouble.low();
- DoubleRegister double_input = f12;
+ DoubleRegister double_scratch = kLithiumScratchDouble;
__ Push(scratch, scratch2, scratch3);
- __ ldc1(double_input, MemOperand(input_reg, double_offset));
-
if (!skip_fastpath()) {
+ // Load double input.
+ __ ldc1(double_scratch, MemOperand(input_reg, double_offset));
+
// Clear cumulative exception flags and save the FCSR.
__ cfc1(scratch2, FCSR);
__ ctc1(zero_reg, FCSR);
+
// Try a conversion to a signed integer.
- __ trunc_w_d(double_scratch, double_input);
+ __ Trunc_w_d(double_scratch, double_scratch);
+ // Move the converted value into the result register.
__ mfc1(result_reg, double_scratch);
+
// Retrieve and restore the FCSR.
__ cfc1(scratch, FCSR);
__ ctc1(scratch2, FCSR);
+
// Check for overflow and NaNs.
__ And(
scratch, scratch,
@@ -565,7 +628,9 @@ void DoubleToIStub::Generate(MacroAssembler* masm) {
// Load the double value and perform a manual truncation.
Register input_high = scratch2;
Register input_low = scratch3;
- __ Move(input_low, input_high, double_input);
+
+ __ lw(input_low, MemOperand(input_reg, double_offset));
+ __ lw(input_high, MemOperand(input_reg, double_offset + kIntSize));
Label normal_exponent, restore_sign;
// Extract the biased exponent in result.
@@ -649,33 +714,12 @@ void DoubleToIStub::Generate(MacroAssembler* masm) {
}
-bool WriteInt32ToHeapNumberStub::IsPregenerated(Isolate* isolate) {
- // These variants are compiled ahead of time. See next method.
- if (the_int_.is(a1) &&
- the_heap_number_.is(v0) &&
- scratch_.is(a2) &&
- sign_.is(a3)) {
- return true;
- }
- if (the_int_.is(a2) &&
- the_heap_number_.is(v0) &&
- scratch_.is(a3) &&
- sign_.is(a0)) {
- return true;
- }
- // Other register combinations are generated as and when they are needed,
- // so it is unsafe to call them from stubs (we can't generate a stub while
- // we are generating a stub).
- return false;
-}
-
-
void WriteInt32ToHeapNumberStub::GenerateFixedRegStubsAheadOfTime(
Isolate* isolate) {
WriteInt32ToHeapNumberStub stub1(a1, v0, a2, a3);
WriteInt32ToHeapNumberStub stub2(a2, v0, a3, a0);
- stub1.GetCode(isolate)->set_is_pregenerated(true);
- stub2.GetCode(isolate)->set_is_pregenerated(true);
+ stub1.GetCode(isolate);
+ stub2.GetCode(isolate);
}
@@ -994,105 +1038,6 @@ static void EmitCheckForInternalizedStringsOrObjects(MacroAssembler* masm,
}
-void NumberToStringStub::GenerateLookupNumberStringCache(MacroAssembler* masm,
- Register object,
- Register result,
- Register scratch1,
- Register scratch2,
- Register scratch3,
- Label* not_found) {
- // Use of registers. Register result is used as a temporary.
- Register number_string_cache = result;
- Register mask = scratch3;
-
- // Load the number string cache.
- __ LoadRoot(number_string_cache, Heap::kNumberStringCacheRootIndex);
-
- // Make the hash mask from the length of the number string cache. It
- // contains two elements (number and string) for each cache entry.
- __ lw(mask, FieldMemOperand(number_string_cache, FixedArray::kLengthOffset));
- // Divide length by two (length is a smi).
- __ sra(mask, mask, kSmiTagSize + 1);
- __ Addu(mask, mask, -1); // Make mask.
-
- // Calculate the entry in the number string cache. The hash value in the
- // number string cache for smis is just the smi value, and the hash for
- // doubles is the xor of the upper and lower words. See
- // Heap::GetNumberStringCache.
- Isolate* isolate = masm->isolate();
- Label is_smi;
- Label load_result_from_cache;
- __ JumpIfSmi(object, &is_smi);
- __ CheckMap(object,
- scratch1,
- Heap::kHeapNumberMapRootIndex,
- not_found,
- DONT_DO_SMI_CHECK);
-
- STATIC_ASSERT(8 == kDoubleSize);
- __ Addu(scratch1,
- object,
- Operand(HeapNumber::kValueOffset - kHeapObjectTag));
- __ lw(scratch2, MemOperand(scratch1, kPointerSize));
- __ lw(scratch1, MemOperand(scratch1, 0));
- __ Xor(scratch1, scratch1, Operand(scratch2));
- __ And(scratch1, scratch1, Operand(mask));
-
- // Calculate address of entry in string cache: each entry consists
- // of two pointer sized fields.
- __ sll(scratch1, scratch1, kPointerSizeLog2 + 1);
- __ Addu(scratch1, number_string_cache, scratch1);
-
- Register probe = mask;
- __ lw(probe,
- FieldMemOperand(scratch1, FixedArray::kHeaderSize));
- __ JumpIfSmi(probe, not_found);
- __ ldc1(f12, FieldMemOperand(object, HeapNumber::kValueOffset));
- __ ldc1(f14, FieldMemOperand(probe, HeapNumber::kValueOffset));
- __ BranchF(&load_result_from_cache, NULL, eq, f12, f14);
- __ Branch(not_found);
-
- __ bind(&is_smi);
- Register scratch = scratch1;
- __ sra(scratch, object, 1); // Shift away the tag.
- __ And(scratch, mask, Operand(scratch));
-
- // Calculate address of entry in string cache: each entry consists
- // of two pointer sized fields.
- __ sll(scratch, scratch, kPointerSizeLog2 + 1);
- __ Addu(scratch, number_string_cache, scratch);
-
- // Check if the entry is the smi we are looking for.
- __ lw(probe, FieldMemOperand(scratch, FixedArray::kHeaderSize));
- __ Branch(not_found, ne, object, Operand(probe));
-
- // Get the result from the cache.
- __ bind(&load_result_from_cache);
- __ lw(result,
- FieldMemOperand(scratch, FixedArray::kHeaderSize + kPointerSize));
-
- __ IncrementCounter(isolate->counters()->number_to_string_native(),
- 1,
- scratch1,
- scratch2);
-}
-
-
-void NumberToStringStub::Generate(MacroAssembler* masm) {
- Label runtime;
-
- __ lw(a1, MemOperand(sp, 0));
-
- // Generate code to lookup number in the number string cache.
- GenerateLookupNumberStringCache(masm, a1, v0, a2, a3, t0, &runtime);
- __ DropAndRet(1);
-
- __ bind(&runtime);
- // Handle number to string in the runtime system if not found in the cache.
- __ TailCallRuntime(Runtime::kNumberToString, 1, 1);
-}
-
-
static void ICCompareStub_CheckInputType(MacroAssembler* masm,
Register input,
Register scratch,
@@ -1316,958 +1261,18 @@ void StoreBufferOverflowStub::Generate(MacroAssembler* masm) {
}
-// Generates code to call a C function to do a double operation.
-// This code never falls through, but returns with a heap number containing
-// the result in v0.
-// Register heap_number_result must be a heap number in which the
-// result of the operation will be stored.
-// Requires the following layout on entry:
-// a0: Left value (least significant part of mantissa).
-// a1: Left value (sign, exponent, top of mantissa).
-// a2: Right value (least significant part of mantissa).
-// a3: Right value (sign, exponent, top of mantissa).
-static void CallCCodeForDoubleOperation(MacroAssembler* masm,
- Token::Value op,
- Register heap_number_result,
- Register scratch) {
- // Assert that heap_number_result is saved.
- // We currently always use s0 to pass it.
- ASSERT(heap_number_result.is(s0));
-
- // Push the current return address before the C call.
- __ push(ra);
- __ PrepareCallCFunction(4, scratch); // Two doubles are 4 arguments.
- {
- AllowExternalCallThatCantCauseGC scope(masm);
- __ CallCFunction(
- ExternalReference::double_fp_operation(op, masm->isolate()), 0, 2);
- }
- // Store answer in the overwritable heap number.
- // Double returned in register f0.
- __ sdc1(f0, FieldMemOperand(heap_number_result, HeapNumber::kValueOffset));
- // Place heap_number_result in v0 and return to the pushed return address.
- __ pop(ra);
- __ Ret(USE_DELAY_SLOT);
- __ mov(v0, heap_number_result);
-}
-
-
-void BinaryOpStub::Initialize() {
- platform_specific_bit_ = true; // FPU is a base requirement for V8.
-}
-
-
-void BinaryOpStub::GenerateTypeTransition(MacroAssembler* masm) {
- Label get_result;
-
- __ Push(a1, a0);
-
- __ li(a2, Operand(Smi::FromInt(MinorKey())));
- __ push(a2);
-
- __ TailCallExternalReference(
- ExternalReference(IC_Utility(IC::kBinaryOp_Patch),
- masm->isolate()),
- 3,
- 1);
-}
-
-
-void BinaryOpStub::GenerateTypeTransitionWithSavedArgs(
- MacroAssembler* masm) {
- UNIMPLEMENTED();
-}
-
-
-void BinaryOpStub_GenerateSmiSmiOperation(MacroAssembler* masm,
- Token::Value op) {
- Register left = a1;
- Register right = a0;
-
- Register scratch1 = t0;
- Register scratch2 = t1;
-
- ASSERT(right.is(a0));
- STATIC_ASSERT(kSmiTag == 0);
-
- Label not_smi_result;
- switch (op) {
- case Token::ADD:
- __ AdduAndCheckForOverflow(v0, left, right, scratch1);
- __ RetOnNoOverflow(scratch1);
- // No need to revert anything - right and left are intact.
- break;
- case Token::SUB:
- __ SubuAndCheckForOverflow(v0, left, right, scratch1);
- __ RetOnNoOverflow(scratch1);
- // No need to revert anything - right and left are intact.
- break;
- case Token::MUL: {
- // Remove tag from one of the operands. This way the multiplication result
- // will be a smi if it fits the smi range.
- __ SmiUntag(scratch1, right);
- // Do multiplication.
- // lo = lower 32 bits of scratch1 * left.
- // hi = higher 32 bits of scratch1 * left.
- __ Mult(left, scratch1);
- // Check for overflowing the smi range - no overflow if higher 33 bits of
- // the result are identical.
- __ mflo(scratch1);
- __ mfhi(scratch2);
- __ sra(scratch1, scratch1, 31);
- __ Branch(&not_smi_result, ne, scratch1, Operand(scratch2));
- // Go slow on zero result to handle -0.
- __ mflo(v0);
- __ Ret(ne, v0, Operand(zero_reg));
- // We need -0 if we were multiplying a negative number with 0 to get 0.
- // We know one of them was zero.
- __ Addu(scratch2, right, left);
- Label skip;
- // ARM uses the 'pl' condition, which is 'ge'.
- // Negating it results in 'lt'.
- __ Branch(&skip, lt, scratch2, Operand(zero_reg));
- ASSERT(Smi::FromInt(0) == 0);
- __ Ret(USE_DELAY_SLOT);
- __ mov(v0, zero_reg); // Return smi 0 if the non-zero one was positive.
- __ bind(&skip);
- // We fall through here if we multiplied a negative number with 0, because
- // that would mean we should produce -0.
- }
- break;
- case Token::DIV: {
- Label done;
- __ SmiUntag(scratch2, right);
- __ SmiUntag(scratch1, left);
- __ Div(scratch1, scratch2);
- // A minor optimization: div may be calculated asynchronously, so we check
- // for division by zero before getting the result.
- __ Branch(&not_smi_result, eq, scratch2, Operand(zero_reg));
- // If the result is 0, we need to make sure the dividsor (right) is
- // positive, otherwise it is a -0 case.
- // Quotient is in 'lo', remainder is in 'hi'.
- // Check for no remainder first.
- __ mfhi(scratch1);
- __ Branch(&not_smi_result, ne, scratch1, Operand(zero_reg));
- __ mflo(scratch1);
- __ Branch(&done, ne, scratch1, Operand(zero_reg));
- __ Branch(&not_smi_result, lt, scratch2, Operand(zero_reg));
- __ bind(&done);
- // Check that the signed result fits in a Smi.
- __ Addu(scratch2, scratch1, Operand(0x40000000));
- __ Branch(&not_smi_result, lt, scratch2, Operand(zero_reg));
- __ Ret(USE_DELAY_SLOT); // SmiTag emits one instruction in delay slot.
- __ SmiTag(v0, scratch1);
- }
- break;
- case Token::MOD: {
- Label done;
- __ SmiUntag(scratch2, right);
- __ SmiUntag(scratch1, left);
- __ Div(scratch1, scratch2);
- // A minor optimization: div may be calculated asynchronously, so we check
- // for division by 0 before calling mfhi.
- // Check for zero on the right hand side.
- __ Branch(&not_smi_result, eq, scratch2, Operand(zero_reg));
- // If the result is 0, we need to make sure the dividend (left) is
- // positive (or 0), otherwise it is a -0 case.
- // Remainder is in 'hi'.
- __ mfhi(scratch2);
- __ Branch(&done, ne, scratch2, Operand(zero_reg));
- __ Branch(&not_smi_result, lt, scratch1, Operand(zero_reg));
- __ bind(&done);
- // Check that the signed result fits in a Smi.
- __ Addu(scratch1, scratch2, Operand(0x40000000));
- __ Branch(&not_smi_result, lt, scratch1, Operand(zero_reg));
- __ Ret(USE_DELAY_SLOT); // SmiTag emits one instruction in delay slot.
- __ SmiTag(v0, scratch2);
- }
- break;
- case Token::BIT_OR:
- __ Ret(USE_DELAY_SLOT);
- __ or_(v0, left, right);
- break;
- case Token::BIT_AND:
- __ Ret(USE_DELAY_SLOT);
- __ and_(v0, left, right);
- break;
- case Token::BIT_XOR:
- __ Ret(USE_DELAY_SLOT);
- __ xor_(v0, left, right);
- break;
- case Token::SAR:
- // Remove tags from right operand.
- __ GetLeastBitsFromSmi(scratch1, right, 5);
- __ srav(scratch1, left, scratch1);
- // Smi tag result.
- __ And(v0, scratch1, ~kSmiTagMask);
- __ Ret();
- break;
- case Token::SHR:
- // Remove tags from operands. We can't do this on a 31 bit number
- // because then the 0s get shifted into bit 30 instead of bit 31.
- __ SmiUntag(scratch1, left);
- __ GetLeastBitsFromSmi(scratch2, right, 5);
- __ srlv(v0, scratch1, scratch2);
- // Unsigned shift is not allowed to produce a negative number, so
- // check the sign bit and the sign bit after Smi tagging.
- __ And(scratch1, v0, Operand(0xc0000000));
- __ Branch(&not_smi_result, ne, scratch1, Operand(zero_reg));
- // Smi tag result.
- __ Ret(USE_DELAY_SLOT); // SmiTag emits one instruction in delay slot.
- __ SmiTag(v0);
- break;
- case Token::SHL:
- // Remove tags from operands.
- __ SmiUntag(scratch1, left);
- __ GetLeastBitsFromSmi(scratch2, right, 5);
- __ sllv(scratch1, scratch1, scratch2);
- // Check that the signed result fits in a Smi.
- __ Addu(scratch2, scratch1, Operand(0x40000000));
- __ Branch(&not_smi_result, lt, scratch2, Operand(zero_reg));
- __ Ret(USE_DELAY_SLOT);
- __ SmiTag(v0, scratch1); // SmiTag emits one instruction in delay slot.
- break;
- default:
- UNREACHABLE();
- }
- __ bind(&not_smi_result);
-}
-
-
-void BinaryOpStub_GenerateHeapResultAllocation(MacroAssembler* masm,
- Register result,
- Register heap_number_map,
- Register scratch1,
- Register scratch2,
- Label* gc_required,
- OverwriteMode mode);
-
-
-void BinaryOpStub_GenerateFPOperation(MacroAssembler* masm,
- BinaryOpIC::TypeInfo left_type,
- BinaryOpIC::TypeInfo right_type,
- bool smi_operands,
- Label* not_numbers,
- Label* gc_required,
- Label* miss,
- Token::Value op,
- OverwriteMode mode) {
- Register left = a1;
- Register right = a0;
- Register scratch1 = t3;
- Register scratch2 = t5;
-
- ASSERT(smi_operands || (not_numbers != NULL));
- if (smi_operands) {
- __ AssertSmi(left);
- __ AssertSmi(right);
- }
- if (left_type == BinaryOpIC::SMI) {
- __ JumpIfNotSmi(left, miss);
- }
- if (right_type == BinaryOpIC::SMI) {
- __ JumpIfNotSmi(right, miss);
- }
-
- Register heap_number_map = t2;
- __ LoadRoot(heap_number_map, Heap::kHeapNumberMapRootIndex);
-
- switch (op) {
- case Token::ADD:
- case Token::SUB:
- case Token::MUL:
- case Token::DIV:
- case Token::MOD: {
- // Allocate new heap number for result.
- Register result = s0;
- BinaryOpStub_GenerateHeapResultAllocation(
- masm, result, heap_number_map, scratch1, scratch2, gc_required, mode);
-
- // Load left and right operands into f12 and f14.
- if (smi_operands) {
- __ SmiUntag(scratch1, a0);
- __ mtc1(scratch1, f14);
- __ cvt_d_w(f14, f14);
- __ SmiUntag(scratch1, a1);
- __ mtc1(scratch1, f12);
- __ cvt_d_w(f12, f12);
- } else {
- // Load right operand to f14.
- if (right_type == BinaryOpIC::INT32) {
- __ LoadNumberAsInt32Double(
- right, f14, heap_number_map, scratch1, scratch2, f2, miss);
- } else {
- Label* fail = (right_type == BinaryOpIC::NUMBER) ? miss : not_numbers;
- __ LoadNumber(right, f14, heap_number_map, scratch1, fail);
- }
- // Load left operand to f12 or a0/a1. This keeps a0/a1 intact if it
- // jumps to |miss|.
- if (left_type == BinaryOpIC::INT32) {
- __ LoadNumberAsInt32Double(
- left, f12, heap_number_map, scratch1, scratch2, f2, miss);
- } else {
- Label* fail = (left_type == BinaryOpIC::NUMBER) ? miss : not_numbers;
- __ LoadNumber(left, f12, heap_number_map, scratch1, fail);
- }
- }
-
- // Calculate the result.
- if (op != Token::MOD) {
- // Using FPU registers:
- // f12: Left value.
- // f14: Right value.
- switch (op) {
- case Token::ADD:
- __ add_d(f10, f12, f14);
- break;
- case Token::SUB:
- __ sub_d(f10, f12, f14);
- break;
- case Token::MUL:
- __ mul_d(f10, f12, f14);
- break;
- case Token::DIV:
- __ div_d(f10, f12, f14);
- break;
- default:
- UNREACHABLE();
- }
-
- // ARM uses a workaround here because of the unaligned HeapNumber
- // kValueOffset. On MIPS this workaround is built into sdc1 so
- // there's no point in generating even more instructions.
- __ sdc1(f10, FieldMemOperand(result, HeapNumber::kValueOffset));
- __ Ret(USE_DELAY_SLOT);
- __ mov(v0, result);
- } else {
- // Call the C function to handle the double operation.
- CallCCodeForDoubleOperation(masm, op, result, scratch1);
- if (FLAG_debug_code) {
- __ stop("Unreachable code.");
- }
- }
- break;
- }
- case Token::BIT_OR:
- case Token::BIT_XOR:
- case Token::BIT_AND:
- case Token::SAR:
- case Token::SHR:
- case Token::SHL: {
- if (smi_operands) {
- __ SmiUntag(a3, left);
- __ SmiUntag(a2, right);
- } else {
- // Convert operands to 32-bit integers. Right in a2 and left in a3.
- __ TruncateNumberToI(left, a3, heap_number_map, scratch1, not_numbers);
- __ TruncateNumberToI(right, a2, heap_number_map, scratch1, not_numbers);
- }
- Label result_not_a_smi;
- switch (op) {
- case Token::BIT_OR:
- __ Or(a2, a3, Operand(a2));
- break;
- case Token::BIT_XOR:
- __ Xor(a2, a3, Operand(a2));
- break;
- case Token::BIT_AND:
- __ And(a2, a3, Operand(a2));
- break;
- case Token::SAR:
- // Use only the 5 least significant bits of the shift count.
- __ GetLeastBitsFromInt32(a2, a2, 5);
- __ srav(a2, a3, a2);
- break;
- case Token::SHR:
- // Use only the 5 least significant bits of the shift count.
- __ GetLeastBitsFromInt32(a2, a2, 5);
- __ srlv(a2, a3, a2);
- // SHR is special because it is required to produce a positive answer.
- // The code below for writing into heap numbers isn't capable of
- // writing the register as an unsigned int so we go to slow case if we
- // hit this case.
- __ Branch(&result_not_a_smi, lt, a2, Operand(zero_reg));
- break;
- case Token::SHL:
- // Use only the 5 least significant bits of the shift count.
- __ GetLeastBitsFromInt32(a2, a2, 5);
- __ sllv(a2, a3, a2);
- break;
- default:
- UNREACHABLE();
- }
- // Check that the *signed* result fits in a smi.
- __ Addu(a3, a2, Operand(0x40000000));
- __ Branch(&result_not_a_smi, lt, a3, Operand(zero_reg));
- __ Ret(USE_DELAY_SLOT); // SmiTag emits one instruction in delay slot.
- __ SmiTag(v0, a2);
-
- // Allocate new heap number for result.
- __ bind(&result_not_a_smi);
- Register result = t1;
- if (smi_operands) {
- __ AllocateHeapNumber(
- result, scratch1, scratch2, heap_number_map, gc_required);
- } else {
- BinaryOpStub_GenerateHeapResultAllocation(
- masm, result, heap_number_map, scratch1, scratch2, gc_required,
- mode);
- }
-
- // a2: Answer as signed int32.
- // t1: Heap number to write answer into.
-
- // Nothing can go wrong now, so move the heap number to v0, which is the
- // result.
- __ mov(v0, t1);
- // Convert the int32 in a2 to the heap number in a0. As
- // mentioned above SHR needs to always produce a positive result.
- __ mtc1(a2, f0);
- if (op == Token::SHR) {
- __ Cvt_d_uw(f0, f0, f22);
- } else {
- __ cvt_d_w(f0, f0);
- }
- // ARM uses a workaround here because of the unaligned HeapNumber
- // kValueOffset. On MIPS this workaround is built into sdc1 so
- // there's no point in generating even more instructions.
- __ sdc1(f0, FieldMemOperand(v0, HeapNumber::kValueOffset));
- __ Ret();
- break;
- }
- default:
- UNREACHABLE();
- }
-}
-
-
-// Generate the smi code. If the operation on smis are successful this return is
-// generated. If the result is not a smi and heap number allocation is not
-// requested the code falls through. If number allocation is requested but a
-// heap number cannot be allocated the code jumps to the label gc_required.
-void BinaryOpStub_GenerateSmiCode(
- MacroAssembler* masm,
- Label* use_runtime,
- Label* gc_required,
- Token::Value op,
- BinaryOpStub::SmiCodeGenerateHeapNumberResults allow_heapnumber_results,
- OverwriteMode mode) {
- Label not_smis;
-
- Register left = a1;
- Register right = a0;
- Register scratch1 = t3;
-
- // Perform combined smi check on both operands.
- __ Or(scratch1, left, Operand(right));
- STATIC_ASSERT(kSmiTag == 0);
- __ JumpIfNotSmi(scratch1, &not_smis);
-
- // If the smi-smi operation results in a smi return is generated.
- BinaryOpStub_GenerateSmiSmiOperation(masm, op);
-
- // If heap number results are possible generate the result in an allocated
- // heap number.
- if (allow_heapnumber_results == BinaryOpStub::ALLOW_HEAPNUMBER_RESULTS) {
- BinaryOpStub_GenerateFPOperation(
- masm, BinaryOpIC::UNINITIALIZED, BinaryOpIC::UNINITIALIZED, true,
- use_runtime, gc_required, &not_smis, op, mode);
- }
- __ bind(&not_smis);
-}
-
-
-void BinaryOpStub::GenerateSmiStub(MacroAssembler* masm) {
- Label right_arg_changed, call_runtime;
-
- if (op_ == Token::MOD && encoded_right_arg_.has_value) {
- // It is guaranteed that the value will fit into a Smi, because if it
- // didn't, we wouldn't be here, see BinaryOp_Patch.
- __ Branch(&right_arg_changed,
- ne,
- a0,
- Operand(Smi::FromInt(fixed_right_arg_value())));
- }
-
- if (result_type_ == BinaryOpIC::UNINITIALIZED ||
- result_type_ == BinaryOpIC::SMI) {
- // Only allow smi results.
- BinaryOpStub_GenerateSmiCode(
- masm, &call_runtime, NULL, op_, NO_HEAPNUMBER_RESULTS, mode_);
- } else {
- // Allow heap number result and don't make a transition if a heap number
- // cannot be allocated.
- BinaryOpStub_GenerateSmiCode(
- masm, &call_runtime, &call_runtime, op_, ALLOW_HEAPNUMBER_RESULTS,
- mode_);
- }
-
- // Code falls through if the result is not returned as either a smi or heap
- // number.
- __ bind(&right_arg_changed);
- GenerateTypeTransition(masm);
-
- __ bind(&call_runtime);
- {
- FrameScope scope(masm, StackFrame::INTERNAL);
- GenerateRegisterArgsPush(masm);
- GenerateCallRuntime(masm);
- }
- __ Ret();
-}
-
-
-void BinaryOpStub::GenerateBothStringStub(MacroAssembler* masm) {
- Label call_runtime;
- ASSERT(left_type_ == BinaryOpIC::STRING && right_type_ == BinaryOpIC::STRING);
- ASSERT(op_ == Token::ADD);
- // If both arguments are strings, call the string add stub.
- // Otherwise, do a transition.
-
- // Registers containing left and right operands respectively.
- Register left = a1;
- Register right = a0;
-
- // Test if left operand is a string.
- __ JumpIfSmi(left, &call_runtime);
- __ GetObjectType(left, a2, a2);
- __ Branch(&call_runtime, ge, a2, Operand(FIRST_NONSTRING_TYPE));
-
- // Test if right operand is a string.
- __ JumpIfSmi(right, &call_runtime);
- __ GetObjectType(right, a2, a2);
- __ Branch(&call_runtime, ge, a2, Operand(FIRST_NONSTRING_TYPE));
-
- StringAddStub string_add_stub(
- (StringAddFlags)(STRING_ADD_CHECK_NONE | STRING_ADD_ERECT_FRAME));
- GenerateRegisterArgsPush(masm);
- __ TailCallStub(&string_add_stub);
-
- __ bind(&call_runtime);
- GenerateTypeTransition(masm);
-}
-
-
-void BinaryOpStub::GenerateInt32Stub(MacroAssembler* masm) {
- ASSERT(Max(left_type_, right_type_) == BinaryOpIC::INT32);
-
- Register left = a1;
- Register right = a0;
- Register scratch1 = t3;
- Register scratch2 = t5;
- FPURegister double_scratch = f0;
- FPURegister single_scratch = f6;
-
- Register heap_number_result = no_reg;
- Register heap_number_map = t2;
- __ LoadRoot(heap_number_map, Heap::kHeapNumberMapRootIndex);
-
- Label call_runtime;
- // Labels for type transition, used for wrong input or output types.
- // Both label are currently actually bound to the same position. We use two
- // different label to differentiate the cause leading to type transition.
- Label transition;
-
- // Smi-smi fast case.
- Label skip;
- __ Or(scratch1, left, right);
- __ JumpIfNotSmi(scratch1, &skip);
- BinaryOpStub_GenerateSmiSmiOperation(masm, op_);
- // Fall through if the result is not a smi.
- __ bind(&skip);
-
- switch (op_) {
- case Token::ADD:
- case Token::SUB:
- case Token::MUL:
- case Token::DIV:
- case Token::MOD: {
- // It could be that only SMIs have been seen at either the left
- // or the right operand. For precise type feedback, patch the IC
- // again if this changes.
- if (left_type_ == BinaryOpIC::SMI) {
- __ JumpIfNotSmi(left, &transition);
- }
- if (right_type_ == BinaryOpIC::SMI) {
- __ JumpIfNotSmi(right, &transition);
- }
- // Load both operands and check that they are 32-bit integer.
- // Jump to type transition if they are not. The registers a0 and a1 (right
- // and left) are preserved for the runtime call.
-
- __ LoadNumberAsInt32Double(
- right, f14, heap_number_map, scratch1, scratch2, f2, &transition);
- __ LoadNumberAsInt32Double(
- left, f12, heap_number_map, scratch1, scratch2, f2, &transition);
-
- if (op_ != Token::MOD) {
- Label return_heap_number;
- switch (op_) {
- case Token::ADD:
- __ add_d(f10, f12, f14);
- break;
- case Token::SUB:
- __ sub_d(f10, f12, f14);
- break;
- case Token::MUL:
- __ mul_d(f10, f12, f14);
- break;
- case Token::DIV:
- __ div_d(f10, f12, f14);
- break;
- default:
- UNREACHABLE();
- }
-
- if (result_type_ <= BinaryOpIC::INT32) {
- Register except_flag = scratch2;
- const FPURoundingMode kRoundingMode = op_ == Token::DIV ?
- kRoundToMinusInf : kRoundToZero;
- const CheckForInexactConversion kConversion = op_ == Token::DIV ?
- kCheckForInexactConversion : kDontCheckForInexactConversion;
- __ EmitFPUTruncate(kRoundingMode,
- scratch1,
- f10,
- at,
- f16,
- except_flag,
- kConversion);
- // If except_flag != 0, result does not fit in a 32-bit integer.
- __ Branch(&transition, ne, except_flag, Operand(zero_reg));
- // Try to tag the result as a Smi, return heap number on overflow.
- __ SmiTagCheckOverflow(scratch1, scratch1, scratch2);
- __ Branch(&return_heap_number, lt, scratch2, Operand(zero_reg));
- // Check for minus zero, transition in that case (because we need
- // to return a heap number).
- Label not_zero;
- ASSERT(kSmiTag == 0);
- __ Branch(&not_zero, ne, scratch1, Operand(zero_reg));
- __ mfc1(scratch2, f11);
- __ And(scratch2, scratch2, HeapNumber::kSignMask);
- __ Branch(&transition, ne, scratch2, Operand(zero_reg));
- __ bind(&not_zero);
-
- __ Ret(USE_DELAY_SLOT);
- __ mov(v0, scratch1);
- }
-
- __ bind(&return_heap_number);
- // Return a heap number, or fall through to type transition or runtime
- // call if we can't.
- // We are using FPU registers so s0 is available.
- heap_number_result = s0;
- BinaryOpStub_GenerateHeapResultAllocation(masm,
- heap_number_result,
- heap_number_map,
- scratch1,
- scratch2,
- &call_runtime,
- mode_);
- __ sdc1(f10,
- FieldMemOperand(heap_number_result, HeapNumber::kValueOffset));
- __ Ret(USE_DELAY_SLOT);
- __ mov(v0, heap_number_result);
-
- // A DIV operation expecting an integer result falls through
- // to type transition.
-
- } else {
- if (encoded_right_arg_.has_value) {
- __ Move(f16, fixed_right_arg_value());
- __ BranchF(&transition, NULL, ne, f14, f16);
- }
-
- Label pop_and_call_runtime;
-
- // Allocate a heap number to store the result.
- heap_number_result = s0;
- BinaryOpStub_GenerateHeapResultAllocation(masm,
- heap_number_result,
- heap_number_map,
- scratch1,
- scratch2,
- &pop_and_call_runtime,
- mode_);
-
- // Call the C function to handle the double operation.
- CallCCodeForDoubleOperation(masm, op_, heap_number_result, scratch1);
- if (FLAG_debug_code) {
- __ stop("Unreachable code.");
- }
-
- __ bind(&pop_and_call_runtime);
- __ Drop(2);
- __ Branch(&call_runtime);
- }
-
- break;
- }
-
- case Token::BIT_OR:
- case Token::BIT_XOR:
- case Token::BIT_AND:
- case Token::SAR:
- case Token::SHR:
- case Token::SHL: {
- Label return_heap_number;
- // Convert operands to 32-bit integers. Right in a2 and left in a3. The
- // registers a0 and a1 (right and left) are preserved for the runtime
- // call.
- __ LoadNumberAsInt32(
- left, a3, heap_number_map, scratch1, scratch2, f0, f2, &transition);
- __ LoadNumberAsInt32(
- right, a2, heap_number_map, scratch1, scratch2, f0, f2, &transition);
-
- // The ECMA-262 standard specifies that, for shift operations, only the
- // 5 least significant bits of the shift value should be used.
- switch (op_) {
- case Token::BIT_OR:
- __ Or(a2, a3, Operand(a2));
- break;
- case Token::BIT_XOR:
- __ Xor(a2, a3, Operand(a2));
- break;
- case Token::BIT_AND:
- __ And(a2, a3, Operand(a2));
- break;
- case Token::SAR:
- __ And(a2, a2, Operand(0x1f));
- __ srav(a2, a3, a2);
- break;
- case Token::SHR:
- __ And(a2, a2, Operand(0x1f));
- __ srlv(a2, a3, a2);
- // SHR is special because it is required to produce a positive answer.
- // We only get a negative result if the shift value (a2) is 0.
- // This result cannot be respresented as a signed 32-bit integer, try
- // to return a heap number if we can.
- __ Branch((result_type_ <= BinaryOpIC::INT32)
- ? &transition
- : &return_heap_number,
- lt,
- a2,
- Operand(zero_reg));
- break;
- case Token::SHL:
- __ And(a2, a2, Operand(0x1f));
- __ sllv(a2, a3, a2);
- break;
- default:
- UNREACHABLE();
- }
-
- // Check if the result fits in a smi.
- __ Addu(scratch1, a2, Operand(0x40000000));
- // If not try to return a heap number. (We know the result is an int32.)
- __ Branch(&return_heap_number, lt, scratch1, Operand(zero_reg));
- // Tag the result and return.
- __ Ret(USE_DELAY_SLOT); // SmiTag emits one instruction in delay slot.
- __ SmiTag(v0, a2);
-
- __ bind(&return_heap_number);
- heap_number_result = t1;
- BinaryOpStub_GenerateHeapResultAllocation(masm,
- heap_number_result,
- heap_number_map,
- scratch1,
- scratch2,
- &call_runtime,
- mode_);
-
- if (op_ != Token::SHR) {
- // Convert the result to a floating point value.
- __ mtc1(a2, double_scratch);
- __ cvt_d_w(double_scratch, double_scratch);
- } else {
- // The result must be interpreted as an unsigned 32-bit integer.
- __ mtc1(a2, double_scratch);
- __ Cvt_d_uw(double_scratch, double_scratch, single_scratch);
- }
-
- // Store the result.
- __ sdc1(double_scratch,
- FieldMemOperand(heap_number_result, HeapNumber::kValueOffset));
- __ Ret(USE_DELAY_SLOT);
- __ mov(v0, heap_number_result);
-
- break;
- }
-
- default:
- UNREACHABLE();
- }
-
- // We never expect DIV to yield an integer result, so we always generate
- // type transition code for DIV operations expecting an integer result: the
- // code will fall through to this type transition.
- if (transition.is_linked() ||
- ((op_ == Token::DIV) && (result_type_ <= BinaryOpIC::INT32))) {
- __ bind(&transition);
- GenerateTypeTransition(masm);
- }
-
- __ bind(&call_runtime);
- {
- FrameScope scope(masm, StackFrame::INTERNAL);
- GenerateRegisterArgsPush(masm);
- GenerateCallRuntime(masm);
- }
- __ Ret();
-}
-
-
-void BinaryOpStub::GenerateOddballStub(MacroAssembler* masm) {
- Label call_runtime;
-
- if (op_ == Token::ADD) {
- // Handle string addition here, because it is the only operation
- // that does not do a ToNumber conversion on the operands.
- GenerateAddStrings(masm);
- }
-
- // Convert oddball arguments to numbers.
- Label check, done;
- __ LoadRoot(t0, Heap::kUndefinedValueRootIndex);
- __ Branch(&check, ne, a1, Operand(t0));
- if (Token::IsBitOp(op_)) {
- __ li(a1, Operand(Smi::FromInt(0)));
- } else {
- __ LoadRoot(a1, Heap::kNanValueRootIndex);
- }
- __ jmp(&done);
- __ bind(&check);
- __ LoadRoot(t0, Heap::kUndefinedValueRootIndex);
- __ Branch(&done, ne, a0, Operand(t0));
- if (Token::IsBitOp(op_)) {
- __ li(a0, Operand(Smi::FromInt(0)));
- } else {
- __ LoadRoot(a0, Heap::kNanValueRootIndex);
- }
- __ bind(&done);
-
- GenerateNumberStub(masm);
-}
-
-
-void BinaryOpStub::GenerateNumberStub(MacroAssembler* masm) {
- Label call_runtime, transition;
- BinaryOpStub_GenerateFPOperation(
- masm, left_type_, right_type_, false,
- &transition, &call_runtime, &transition, op_, mode_);
-
- __ bind(&transition);
- GenerateTypeTransition(masm);
-
- __ bind(&call_runtime);
- {
- FrameScope scope(masm, StackFrame::INTERNAL);
- GenerateRegisterArgsPush(masm);
- GenerateCallRuntime(masm);
- }
- __ Ret();
-}
-
-
-void BinaryOpStub::GenerateGeneric(MacroAssembler* masm) {
- Label call_runtime, call_string_add_or_runtime, transition;
-
- BinaryOpStub_GenerateSmiCode(
- masm, &call_runtime, &call_runtime, op_, ALLOW_HEAPNUMBER_RESULTS, mode_);
-
- BinaryOpStub_GenerateFPOperation(
- masm, left_type_, right_type_, false,
- &call_string_add_or_runtime, &call_runtime, &transition, op_, mode_);
-
- __ bind(&transition);
- GenerateTypeTransition(masm);
-
- __ bind(&call_string_add_or_runtime);
- if (op_ == Token::ADD) {
- GenerateAddStrings(masm);
- }
-
- __ bind(&call_runtime);
- {
- FrameScope scope(masm, StackFrame::INTERNAL);
- GenerateRegisterArgsPush(masm);
- GenerateCallRuntime(masm);
- }
- __ Ret();
-}
-
-
-void BinaryOpStub::GenerateAddStrings(MacroAssembler* masm) {
- ASSERT(op_ == Token::ADD);
- Label left_not_string, call_runtime;
-
- Register left = a1;
- Register right = a0;
-
- // Check if left argument is a string.
- __ JumpIfSmi(left, &left_not_string);
- __ GetObjectType(left, a2, a2);
- __ Branch(&left_not_string, ge, a2, Operand(FIRST_NONSTRING_TYPE));
-
- StringAddStub string_add_left_stub(
- (StringAddFlags)(STRING_ADD_CHECK_RIGHT | STRING_ADD_ERECT_FRAME));
- GenerateRegisterArgsPush(masm);
- __ TailCallStub(&string_add_left_stub);
-
- // Left operand is not a string, test right.
- __ bind(&left_not_string);
- __ JumpIfSmi(right, &call_runtime);
- __ GetObjectType(right, a2, a2);
- __ Branch(&call_runtime, ge, a2, Operand(FIRST_NONSTRING_TYPE));
-
- StringAddStub string_add_right_stub(
- (StringAddFlags)(STRING_ADD_CHECK_LEFT | STRING_ADD_ERECT_FRAME));
- GenerateRegisterArgsPush(masm);
- __ TailCallStub(&string_add_right_stub);
-
- // At least one argument is not a string.
- __ bind(&call_runtime);
-}
-
-
-void BinaryOpStub_GenerateHeapResultAllocation(MacroAssembler* masm,
- Register result,
- Register heap_number_map,
- Register scratch1,
- Register scratch2,
- Label* gc_required,
- OverwriteMode mode) {
- // Code below will scratch result if allocation fails. To keep both arguments
- // intact for the runtime call result cannot be one of these.
- ASSERT(!result.is(a0) && !result.is(a1));
-
- if (mode == OVERWRITE_LEFT || mode == OVERWRITE_RIGHT) {
- Label skip_allocation, allocated;
- Register overwritable_operand = mode == OVERWRITE_LEFT ? a1 : a0;
- // If the overwritable operand is already an object, we skip the
- // allocation of a heap number.
- __ JumpIfNotSmi(overwritable_operand, &skip_allocation);
- // Allocate a heap number for the result.
- __ AllocateHeapNumber(
- result, scratch1, scratch2, heap_number_map, gc_required);
- __ Branch(&allocated);
- __ bind(&skip_allocation);
- // Use object holding the overwritable operand for result.
- __ mov(result, overwritable_operand);
- __ bind(&allocated);
- } else {
- ASSERT(mode == NO_OVERWRITE);
- __ AllocateHeapNumber(
- result, scratch1, scratch2, heap_number_map, gc_required);
- }
-}
-
-
-void BinaryOpStub::GenerateRegisterArgsPush(MacroAssembler* masm) {
- __ Push(a1, a0);
+void BinaryOpICStub::InitializeInterfaceDescriptor(
+ Isolate* isolate,
+ CodeStubInterfaceDescriptor* descriptor) {
+ static Register registers[] = { a1, a0 };
+ descriptor->register_param_count_ = 2;
+ descriptor->register_params_ = registers;
+ descriptor->deoptimization_handler_ = FUNCTION_ADDR(BinaryOpIC_Miss);
+ descriptor->SetMissHandler(
+ ExternalReference(IC_Utility(IC::kBinaryOpIC_Miss), isolate));
}
-
void TranscendentalCacheStub::Generate(MacroAssembler* masm) {
// Untagged case: double input in f4, double result goes
// into f4.
@@ -2723,20 +1728,14 @@ bool CEntryStub::NeedsImmovableCode() {
}
-bool CEntryStub::IsPregenerated(Isolate* isolate) {
- return (!save_doubles_ || isolate->fp_stubs_generated()) &&
- result_size_ == 1;
-}
-
-
void CodeStub::GenerateStubsAheadOfTime(Isolate* isolate) {
CEntryStub::GenerateAheadOfTime(isolate);
WriteInt32ToHeapNumberStub::GenerateFixedRegStubsAheadOfTime(isolate);
StoreBufferOverflowStub::GenerateFixedRegStubsAheadOfTime(isolate);
StubFailureTrampolineStub::GenerateAheadOfTime(isolate);
- RecordWriteStub::GenerateFixedRegStubsAheadOfTime(isolate);
ArrayConstructorStubBase::GenerateStubsAheadOfTime(isolate);
CreateAllocationSiteStub::GenerateAheadOfTime(isolate);
+ BinaryOpICStub::GenerateAheadOfTime(isolate);
}
@@ -2755,16 +1754,13 @@ void CodeStub::GenerateFPStubs(Isolate* isolate) {
if (!stub.FindCodeInCache(&store_buffer_overflow_code, isolate)) {
store_buffer_overflow_code = *stub.GetCode(isolate);
}
- save_doubles_code->set_is_pregenerated(true);
- store_buffer_overflow_code->set_is_pregenerated(true);
isolate->set_fp_stubs_generated(true);
}
void CEntryStub::GenerateAheadOfTime(Isolate* isolate) {
CEntryStub stub(1, kDontSaveFPRegs);
- Handle<Code> code = stub.GetCode(isolate);
- code->set_is_pregenerated(true);
+ stub.GetCode(isolate);
}
@@ -2795,8 +1791,9 @@ void CEntryStub::GenerateCore(MacroAssembler* masm,
if (do_gc) {
// Move result passed in v0 into a0 to call PerformGC.
__ mov(a0, v0);
- __ PrepareCallCFunction(1, 0, a1);
- __ CallCFunction(ExternalReference::perform_gc_function(isolate), 1, 0);
+ __ PrepareCallCFunction(2, 0, a1);
+ __ li(a1, Operand(ExternalReference::isolate_address(masm->isolate())));
+ __ CallCFunction(ExternalReference::perform_gc_function(isolate), 2, 0);
}
ExternalReference scope_depth =
@@ -2875,7 +1872,7 @@ void CEntryStub::GenerateCore(MacroAssembler* masm,
// v0:v1: result
// sp: stack pointer
// fp: frame pointer
- __ LeaveExitFrame(save_doubles_, s0, true);
+ __ LeaveExitFrame(save_doubles_, s0, true, EMIT_RETURN);
// Check if we should retry or throw exception.
Label retry;
@@ -3408,8 +2405,7 @@ void StringLengthStub::Generate(MacroAssembler* masm) {
receiver = a0;
}
- StubCompiler::GenerateLoadStringLength(masm, receiver, a3, t0, &miss,
- support_wrapper_);
+ StubCompiler::GenerateLoadStringLength(masm, receiver, a3, t0, &miss);
__ bind(&miss);
StubCompiler::TailCallBuiltin(
@@ -3942,7 +2938,7 @@ void RegExpExecStub::Generate(MacroAssembler* masm) {
// Check that the RegExp has been compiled (data contains a fixed array).
__ lw(regexp_data, FieldMemOperand(a0, JSRegExp::kDataOffset));
if (FLAG_debug_code) {
- __ And(t0, regexp_data, Operand(kSmiTagMask));
+ __ SmiTst(regexp_data, t0);
__ Check(nz,
kUnexpectedTypeForRegExpDataFixedArrayExpected,
t0,
@@ -4156,7 +3152,7 @@ void RegExpExecStub::Generate(MacroAssembler* masm) {
DirectCEntryStub stub;
stub.GenerateCall(masm, t9);
- __ LeaveExitFrame(false, no_reg);
+ __ LeaveExitFrame(false, no_reg, true);
// v0: result
// subject: subject string (callee saved)
@@ -4424,6 +3420,7 @@ static void GenerateRecordCallTarget(MacroAssembler* masm) {
// Cache the called function in a global property cell. Cache states
// are uninitialized, monomorphic (indicated by a JSFunction), and
// megamorphic.
+ // a0 : number of arguments to the construct function
// a1 : the function to call
// a2 : cache cell for call target
Label initialize, done, miss, megamorphic, not_array_function;
@@ -4444,9 +3441,6 @@ static void GenerateRecordCallTarget(MacroAssembler* masm) {
// If we didn't have a matching function, and we didn't find the megamorph
// sentinel, then we have in the cell either some other function or an
// AllocationSite. Do a map check on the object in a3.
- Handle<Map> allocation_site_map(
- masm->isolate()->heap()->allocation_site_map(),
- masm->isolate());
__ lw(t1, FieldMemOperand(a3, 0));
__ LoadRoot(at, Heap::kAllocationSiteMapRootIndex);
__ Branch(&miss, ne, t1, Operand(at));
@@ -4485,6 +3479,7 @@ static void GenerateRecordCallTarget(MacroAssembler* masm) {
1 << 5 | // a1
1 << 6; // a2
+ // Arguments register must be smi-tagged to call out.
__ SmiTag(a0);
__ MultiPush(kSavedRegs);
@@ -5803,33 +4798,11 @@ void StringAddStub::Generate(MacroAssembler* masm) {
// Just jump to runtime to add the two strings.
__ bind(&call_runtime);
- if ((flags_ & STRING_ADD_ERECT_FRAME) != 0) {
- GenerateRegisterArgsPop(masm);
- // Build a frame.
- {
- FrameScope scope(masm, StackFrame::INTERNAL);
- GenerateRegisterArgsPush(masm);
- __ CallRuntime(Runtime::kStringAdd, 2);
- }
- __ Ret();
- } else {
- __ TailCallRuntime(Runtime::kStringAdd, 2, 1);
- }
+ __ TailCallRuntime(Runtime::kStringAdd, 2, 1);
if (call_builtin.is_linked()) {
__ bind(&call_builtin);
- if ((flags_ & STRING_ADD_ERECT_FRAME) != 0) {
- GenerateRegisterArgsPop(masm);
- // Build a frame.
- {
- FrameScope scope(masm, StackFrame::INTERNAL);
- GenerateRegisterArgsPush(masm);
- __ InvokeBuiltin(builtin_id, CALL_FUNCTION);
- }
- __ Ret();
- } else {
- __ InvokeBuiltin(builtin_id, JUMP_FUNCTION);
- }
+ __ InvokeBuiltin(builtin_id, JUMP_FUNCTION);
}
}
@@ -5863,13 +4836,7 @@ void StringAddStub::GenerateConvertArgument(MacroAssembler* masm,
// Check the number to string cache.
__ bind(&not_string);
// Puts the cached result into scratch1.
- NumberToStringStub::GenerateLookupNumberStringCache(masm,
- arg,
- scratch1,
- scratch2,
- scratch3,
- scratch4,
- slow);
+ __ LookupNumberStringCache(arg, scratch1, scratch2, scratch3, scratch4, slow);
__ mov(arg, scratch1);
__ sw(arg, MemOperand(sp, stack_offset));
__ bind(&done);
@@ -6206,8 +5173,7 @@ void ICCompareStub::GenerateMiss(MacroAssembler* masm) {
ExternalReference(IC_Utility(IC::kCompareIC_Miss), masm->isolate());
FrameScope scope(masm, StackFrame::INTERNAL);
__ Push(a1, a0);
- __ push(ra);
- __ Push(a1, a0);
+ __ Push(ra, a1, a0);
__ li(t0, Operand(Smi::FromInt(op_)));
__ addiu(sp, sp, -kPointerSize);
__ CallExternalReference(miss, 3, USE_DELAY_SLOT);
@@ -6222,9 +5188,16 @@ void ICCompareStub::GenerateMiss(MacroAssembler* masm) {
void DirectCEntryStub::Generate(MacroAssembler* masm) {
- // No need to pop or drop anything, LeaveExitFrame will restore the old
- // stack, thus dropping the allocated space for the return value.
- // The saved ra is after the reserved stack space for the 4 args.
+ // Make place for arguments to fit C calling convention. Most of the callers
+ // of DirectCEntryStub::GenerateCall are using EnterExitFrame/LeaveExitFrame
+ // so they handle stack restoring and we don't have to do that here.
+ // Any caller of DirectCEntryStub::GenerateCall must take care of dropping
+ // kCArgsSlotsSize stack space after the call.
+ __ Subu(sp, sp, Operand(kCArgsSlotsSize));
+ // Place the return address on the stack, making the call
+ // GC safe. The RegExp backend also relies on this.
+ __ sw(ra, MemOperand(sp, kCArgsSlotsSize));
+ __ Call(t9); // Call the C++ function.
__ lw(t9, MemOperand(sp, kCArgsSlotsSize));
if (FLAG_debug_code && FLAG_enable_slow_asserts) {
@@ -6241,33 +5214,11 @@ void DirectCEntryStub::Generate(MacroAssembler* masm) {
void DirectCEntryStub::GenerateCall(MacroAssembler* masm,
Register target) {
- __ Move(t9, target);
- __ AssertStackIsAligned();
- // Allocate space for arg slots.
- __ Subu(sp, sp, kCArgsSlotsSize);
-
- // Block the trampoline pool through the whole function to make sure the
- // number of generated instructions is constant.
- Assembler::BlockTrampolinePoolScope block_trampoline_pool(masm);
-
- // We need to get the current 'pc' value, which is not available on MIPS.
- Label find_ra;
- masm->bal(&find_ra); // ra = pc + 8.
- masm->nop(); // Branch delay slot nop.
- masm->bind(&find_ra);
-
- const int kNumInstructionsToJump = 6;
- masm->addiu(ra, ra, kNumInstructionsToJump * kPointerSize);
- // Push return address (accessible to GC through exit frame pc).
- // This spot for ra was reserved in EnterExitFrame.
- masm->sw(ra, MemOperand(sp, kCArgsSlotsSize));
intptr_t loc =
reinterpret_cast<intptr_t>(GetCode(masm->isolate()).location());
- masm->li(ra, Operand(loc, RelocInfo::CODE_TARGET), CONSTANT_SIZE);
- // Call the function.
- masm->Jump(t9);
- // Make sure the stored 'ra' points to this position.
- ASSERT_EQ(kNumInstructionsToJump, masm->InstructionsGeneratedSince(&find_ra));
+ __ Move(t9, target);
+ __ li(ra, Operand(loc, RelocInfo::CODE_TARGET), CONSTANT_SIZE);
+ __ Call(ra);
}
@@ -6524,89 +5475,13 @@ void NameDictionaryLookupStub::Generate(MacroAssembler* masm) {
}
-struct AheadOfTimeWriteBarrierStubList {
- Register object, value, address;
- RememberedSetAction action;
-};
-
-
-#define REG(Name) { kRegister_ ## Name ## _Code }
-
-static const AheadOfTimeWriteBarrierStubList kAheadOfTime[] = {
- // Used in RegExpExecStub.
- { REG(s2), REG(s0), REG(t3), EMIT_REMEMBERED_SET },
- // Used in CompileArrayPushCall.
- // Also used in StoreIC::GenerateNormal via GenerateDictionaryStore.
- // Also used in KeyedStoreIC::GenerateGeneric.
- { REG(a3), REG(t0), REG(t1), EMIT_REMEMBERED_SET },
- // Used in StoreStubCompiler::CompileStoreField via GenerateStoreField.
- { REG(a1), REG(a2), REG(a3), EMIT_REMEMBERED_SET },
- { REG(a3), REG(a2), REG(a1), EMIT_REMEMBERED_SET },
- // Used in KeyedStoreStubCompiler::CompileStoreField via GenerateStoreField.
- { REG(a2), REG(a1), REG(a3), EMIT_REMEMBERED_SET },
- { REG(a3), REG(a1), REG(a2), EMIT_REMEMBERED_SET },
- // KeyedStoreStubCompiler::GenerateStoreFastElement.
- { REG(a3), REG(a2), REG(t0), EMIT_REMEMBERED_SET },
- { REG(a2), REG(a3), REG(t0), EMIT_REMEMBERED_SET },
- // ElementsTransitionGenerator::GenerateMapChangeElementTransition
- // and ElementsTransitionGenerator::GenerateSmiToDouble
- // and ElementsTransitionGenerator::GenerateDoubleToObject
- { REG(a2), REG(a3), REG(t5), EMIT_REMEMBERED_SET },
- { REG(a2), REG(a3), REG(t5), OMIT_REMEMBERED_SET },
- // ElementsTransitionGenerator::GenerateDoubleToObject
- { REG(t2), REG(a2), REG(a0), EMIT_REMEMBERED_SET },
- { REG(a2), REG(t2), REG(t5), EMIT_REMEMBERED_SET },
- // StoreArrayLiteralElementStub::Generate
- { REG(t1), REG(a0), REG(t2), EMIT_REMEMBERED_SET },
- // FastNewClosureStub::Generate
- { REG(a2), REG(t0), REG(a1), EMIT_REMEMBERED_SET },
- // StringAddStub::Generate
- { REG(t3), REG(a1), REG(t0), EMIT_REMEMBERED_SET },
- { REG(t3), REG(a0), REG(t0), EMIT_REMEMBERED_SET },
- // Null termination.
- { REG(no_reg), REG(no_reg), REG(no_reg), EMIT_REMEMBERED_SET}
-};
-
-#undef REG
-
-
-bool RecordWriteStub::IsPregenerated(Isolate* isolate) {
- for (const AheadOfTimeWriteBarrierStubList* entry = kAheadOfTime;
- !entry->object.is(no_reg);
- entry++) {
- if (object_.is(entry->object) &&
- value_.is(entry->value) &&
- address_.is(entry->address) &&
- remembered_set_action_ == entry->action &&
- save_fp_regs_mode_ == kDontSaveFPRegs) {
- return true;
- }
- }
- return false;
-}
-
-
void StoreBufferOverflowStub::GenerateFixedRegStubsAheadOfTime(
Isolate* isolate) {
StoreBufferOverflowStub stub1(kDontSaveFPRegs);
- stub1.GetCode(isolate)->set_is_pregenerated(true);
+ stub1.GetCode(isolate);
// Hydrogen code stubs need stub2 at snapshot time.
StoreBufferOverflowStub stub2(kSaveFPRegs);
- stub2.GetCode(isolate)->set_is_pregenerated(true);
-}
-
-
-void RecordWriteStub::GenerateFixedRegStubsAheadOfTime(Isolate* isolate) {
- for (const AheadOfTimeWriteBarrierStubList* entry = kAheadOfTime;
- !entry->object.is(no_reg);
- entry++) {
- RecordWriteStub stub(entry->object,
- entry->value,
- entry->address,
- entry->action,
- kDontSaveFPRegs);
- stub.GetCode(isolate)->set_is_pregenerated(true);
- }
+ stub2.GetCode(isolate);
}
@@ -6896,9 +5771,26 @@ void StubFailureTrampolineStub::Generate(MacroAssembler* masm) {
}
+void StubFailureTailCallTrampolineStub::Generate(MacroAssembler* masm) {
+ CEntryStub ces(1, fp_registers_ ? kSaveFPRegs : kDontSaveFPRegs);
+ __ Call(ces.GetCode(masm->isolate()), RelocInfo::CODE_TARGET);
+ __ mov(a1, v0);
+ int parameter_count_offset =
+ StubFailureTrampolineFrame::kCallerStackParameterCountFrameOffset;
+ __ lw(a0, MemOperand(fp, parameter_count_offset));
+ // The parameter count above includes the receiver for the arguments passed to
+ // the deoptimization handler. Subtract the receiver for the parameter count
+ // for the call.
+ __ Subu(a0, a0, 1);
+ masm->LeaveFrame(StackFrame::STUB_FAILURE_TRAMPOLINE);
+ ParameterCount argument_count(a0);
+ __ InvokeFunction(
+ a1, argument_count, JUMP_FUNCTION, NullCallWrapper(), CALL_AS_METHOD);
+}
+
+
void ProfileEntryHookStub::MaybeCallEntryHook(MacroAssembler* masm) {
if (masm->isolate()->function_entry_hook() != NULL) {
- AllowStubCallsScope allow_stub_calls(masm, true);
ProfileEntryHookStub stub;
__ push(ra);
__ CallStub(&stub);
@@ -7047,11 +5939,14 @@ static void CreateArrayDispatchOneArgument(MacroAssembler* masm,
__ lw(t1, FieldMemOperand(a2, Cell::kValueOffset));
}
- // Save the resulting elements kind in type info
- __ SmiTag(a3);
- __ lw(t1, FieldMemOperand(a2, Cell::kValueOffset));
- __ sw(a3, FieldMemOperand(t1, AllocationSite::kTransitionInfoOffset));
- __ SmiUntag(a3);
+ // Save the resulting elements kind in type info. We can't just store a3
+ // in the AllocationSite::transition_info field because elements kind is
+ // restricted to a portion of the field...upper bits need to be left alone.
+ STATIC_ASSERT(AllocationSite::ElementsKindBits::kShift == 0);
+ __ lw(t0, FieldMemOperand(t1, AllocationSite::kTransitionInfoOffset));
+ __ Addu(t0, t0, Operand(Smi::FromInt(kFastElementsKindPackedToHoley)));
+ __ sw(t0, FieldMemOperand(t1, AllocationSite::kTransitionInfoOffset));
+
__ bind(&normal_sequence);
int last_index = GetSequenceIndexFromFastElementsKind(
@@ -7083,12 +5978,12 @@ static void ArrayConstructorStubAheadOfTimeHelper(Isolate* isolate) {
for (int i = 0; i <= to_index; ++i) {
ElementsKind kind = GetFastElementsKindFromSequenceIndex(i);
T stub(kind);
- stub.GetCode(isolate)->set_is_pregenerated(true);
+ stub.GetCode(isolate);
if (AllocationSite::GetMode(kind) != DONT_TRACK_ALLOCATION_SITE ||
(!FLAG_track_allocation_sites &&
(kind == initial_kind || kind == initial_holey_kind))) {
T stub1(kind, CONTEXT_CHECK_REQUIRED, DISABLE_ALLOCATION_SITES);
- stub1.GetCode(isolate)->set_is_pregenerated(true);
+ stub1.GetCode(isolate);
}
}
}
@@ -7110,11 +6005,11 @@ void InternalArrayConstructorStubBase::GenerateStubsAheadOfTime(
for (int i = 0; i < 2; i++) {
// For internal arrays we only need a few things.
InternalArrayNoArgumentConstructorStub stubh1(kinds[i]);
- stubh1.GetCode(isolate)->set_is_pregenerated(true);
+ stubh1.GetCode(isolate);
InternalArraySingleArgumentConstructorStub stubh2(kinds[i]);
- stubh2.GetCode(isolate)->set_is_pregenerated(true);
+ stubh2.GetCode(isolate);
InternalArrayNArgumentsConstructorStub stubh3(kinds[i]);
- stubh3.GetCode(isolate)->set_is_pregenerated(true);
+ stubh3.GetCode(isolate);
}
}
@@ -7161,7 +6056,7 @@ void ArrayConstructorStub::Generate(MacroAssembler* masm) {
// Initial map for the builtin Array function should be a map.
__ lw(a3, FieldMemOperand(a1, JSFunction::kPrototypeOrInitialMapOffset));
// Will both indicate a NULL and a Smi.
- __ And(at, a3, Operand(kSmiTagMask));
+ __ SmiTst(a3, at);
__ Assert(ne, kUnexpectedInitialMapForArrayFunction,
at, Operand(zero_reg));
__ GetObjectType(a3, a3, t0);
@@ -7193,6 +6088,8 @@ void ArrayConstructorStub::Generate(MacroAssembler* masm) {
__ lw(a3, FieldMemOperand(a3, AllocationSite::kTransitionInfoOffset));
__ SmiUntag(a3);
+ STATIC_ASSERT(AllocationSite::ElementsKindBits::kShift == 0);
+ __ And(a3, a3, Operand(AllocationSite::ElementsKindBits::kMask));
GenerateDispatchToArrayStub(masm, DONT_OVERRIDE);
__ bind(&no_info);
@@ -7248,7 +6145,7 @@ void InternalArrayConstructorStub::Generate(MacroAssembler* masm) {
// Initial map for the builtin Array function should be a map.
__ lw(a3, FieldMemOperand(a1, JSFunction::kPrototypeOrInitialMapOffset));
// Will both indicate a NULL and a Smi.
- __ And(at, a3, Operand(kSmiTagMask));
+ __ SmiTst(a3, at);
__ Assert(ne, kUnexpectedInitialMapForArrayFunction,
at, Operand(zero_reg));
__ GetObjectType(a3, a3, t0);