summaryrefslogtreecommitdiff
path: root/deps/v8/src/arm
diff options
context:
space:
mode:
Diffstat (limited to 'deps/v8/src/arm')
-rw-r--r--deps/v8/src/arm/code-stubs-arm.cc8
-rw-r--r--deps/v8/src/arm/constants-arm.h10
-rw-r--r--deps/v8/src/arm/disasm-arm.cc2
-rw-r--r--deps/v8/src/arm/full-codegen-arm.cc6
-rw-r--r--deps/v8/src/arm/ic-arm.cc26
-rw-r--r--deps/v8/src/arm/lithium-codegen-arm.cc58
-rw-r--r--deps/v8/src/arm/macro-assembler-arm.cc72
-rw-r--r--deps/v8/src/arm/macro-assembler-arm.h12
-rw-r--r--deps/v8/src/arm/stub-cache-arm.cc26
9 files changed, 135 insertions, 85 deletions
diff --git a/deps/v8/src/arm/code-stubs-arm.cc b/deps/v8/src/arm/code-stubs-arm.cc
index 15ef9bcf9d..c33df5cf77 100644
--- a/deps/v8/src/arm/code-stubs-arm.cc
+++ b/deps/v8/src/arm/code-stubs-arm.cc
@@ -5821,7 +5821,7 @@ void SubStringStub::Generate(MacroAssembler* masm) {
__ b(eq, &return_r0);
Label result_longer_than_two;
- // Check for special case of two character ascii string, in which case
+ // Check for special case of two character ASCII string, in which case
// we do a lookup in the symbol table first.
__ cmp(r2, Operand(2));
__ b(gt, &result_longer_than_two);
@@ -5951,7 +5951,7 @@ void SubStringStub::Generate(MacroAssembler* masm) {
__ tst(r1, Operand(kStringEncodingMask));
__ b(eq, &two_byte_sequential);
- // Allocate and copy the resulting ascii string.
+ // Allocate and copy the resulting ASCII string.
__ AllocateAsciiString(r0, r2, r4, r6, r7, &runtime);
// Locate first character of substring to copy.
@@ -6268,7 +6268,7 @@ void StringAddStub::Generate(MacroAssembler* masm) {
__ bind(&longer_than_two);
// Check if resulting string will be flat.
- __ cmp(r6, Operand(String::kMinNonFlatLength));
+ __ cmp(r6, Operand(ConsString::kMinLength));
__ b(lt, &string_add_flat_result);
// Handle exceptionally long strings in the runtime system.
STATIC_ASSERT((String::kMaxLength & 0x80000000) == 0);
@@ -6322,7 +6322,7 @@ void StringAddStub::Generate(MacroAssembler* masm) {
__ jmp(&allocated);
// We cannot encounter sliced strings or cons strings here since:
- STATIC_ASSERT(SlicedString::kMinLength >= String::kMinNonFlatLength);
+ STATIC_ASSERT(SlicedString::kMinLength >= ConsString::kMinLength);
// Handle creating a flat result from either external or sequential strings.
// Locate the first characters' locations.
// r0: first string
diff --git a/deps/v8/src/arm/constants-arm.h b/deps/v8/src/arm/constants-arm.h
index 49b8db79f8..e767001e1d 100644
--- a/deps/v8/src/arm/constants-arm.h
+++ b/deps/v8/src/arm/constants-arm.h
@@ -573,13 +573,13 @@ class Instruction {
// The naming of these accessor corresponds to figure A3-1.
//
// Two kind of accessors are declared:
- // - <Name>Field() will return the raw field, ie the field's bits at their
+ // - <Name>Field() will return the raw field, i.e. the field's bits at their
// original place in the instruction encoding.
- // eg. if instr is the 'addgt r0, r1, r2' instruction, encoded as 0xC0810002
- // ConditionField(instr) will return 0xC0000000.
+ // e.g. if instr is the 'addgt r0, r1, r2' instruction, encoded as
+ // 0xC0810002 ConditionField(instr) will return 0xC0000000.
// - <Name>Value() will return the field value, shifted back to bit 0.
- // eg. if instr is the 'addgt r0, r1, r2' instruction, encoded as 0xC0810002
- // ConditionField(instr) will return 0xC.
+ // e.g. if instr is the 'addgt r0, r1, r2' instruction, encoded as
+ // 0xC0810002 ConditionField(instr) will return 0xC.
// Generally applicable fields
diff --git a/deps/v8/src/arm/disasm-arm.cc b/deps/v8/src/arm/disasm-arm.cc
index 603b3cfd9c..16b568cbaf 100644
--- a/deps/v8/src/arm/disasm-arm.cc
+++ b/deps/v8/src/arm/disasm-arm.cc
@@ -473,7 +473,7 @@ int Decoder::FormatOption(Instruction* instr, const char* format) {
return 1;
}
case 'i': { // 'i: immediate value from adjacent bits.
- // Expects tokens in the form imm%02d@%02d, ie. imm05@07, imm10@16
+ // Expects tokens in the form imm%02d@%02d, i.e. imm05@07, imm10@16
int width = (format[3] - '0') * 10 + (format[4] - '0');
int lsb = (format[6] - '0') * 10 + (format[7] - '0');
diff --git a/deps/v8/src/arm/full-codegen-arm.cc b/deps/v8/src/arm/full-codegen-arm.cc
index 38999a8e36..6654263989 100644
--- a/deps/v8/src/arm/full-codegen-arm.cc
+++ b/deps/v8/src/arm/full-codegen-arm.cc
@@ -115,7 +115,7 @@ class JumpPatchSite BASE_EMBEDDED {
// function.
//
// The live registers are:
-// o r1: the JS function object being called (ie, ourselves)
+// o r1: the JS function object being called (i.e., ourselves)
// o cp: our context
// o fp: our caller's frame pointer
// o sp: stack pointer
@@ -3618,7 +3618,7 @@ void FullCodeGenerator::EmitFastAsciiArrayJoin(CallRuntime* expr) {
// One-character separator case
__ bind(&one_char_separator);
- // Replace separator with its ascii character value.
+ // Replace separator with its ASCII character value.
__ ldrb(separator, FieldMemOperand(separator, SeqAsciiString::kHeaderSize));
// Jump into the loop after the code that copies the separator, so the first
// element is not preceded by a separator
@@ -3629,7 +3629,7 @@ void FullCodeGenerator::EmitFastAsciiArrayJoin(CallRuntime* expr) {
// result_pos: the position to which we are currently copying characters.
// element: Current array element.
// elements_end: Array end.
- // separator: Single separator ascii char (in lower byte).
+ // separator: Single separator ASCII char (in lower byte).
// Copy the separator character to the result.
__ strb(separator, MemOperand(result_pos, 1, PostIndex));
diff --git a/deps/v8/src/arm/ic-arm.cc b/deps/v8/src/arm/ic-arm.cc
index 219d354c7e..583453ab2c 100644
--- a/deps/v8/src/arm/ic-arm.cc
+++ b/deps/v8/src/arm/ic-arm.cc
@@ -1031,14 +1031,25 @@ void KeyedLoadIC::GenerateGeneric(MacroAssembler* masm) {
__ mov(r3, Operand(r2, ASR, KeyedLookupCache::kMapHashShift));
__ ldr(r4, FieldMemOperand(r0, String::kHashFieldOffset));
__ eor(r3, r3, Operand(r4, ASR, String::kHashShift));
- __ And(r3, r3, Operand(KeyedLookupCache::kCapacityMask));
+ int mask = KeyedLookupCache::kCapacityMask & KeyedLookupCache::kHashMask;
+ __ And(r3, r3, Operand(mask));
// Load the key (consisting of map and symbol) from the cache and
// check for match.
+ Label try_second_entry, hit_on_first_entry, load_in_object_property;
ExternalReference cache_keys =
ExternalReference::keyed_lookup_cache_keys(isolate);
__ mov(r4, Operand(cache_keys));
__ add(r4, r4, Operand(r3, LSL, kPointerSizeLog2 + 1));
+ // Move r4 to second entry.
+ __ ldr(r5, MemOperand(r4, kPointerSize * 2, PostIndex));
+ __ cmp(r2, r5);
+ __ b(ne, &try_second_entry);
+ __ ldr(r5, MemOperand(r4, -kPointerSize)); // Load symbol
+ __ cmp(r0, r5);
+ __ b(eq, &hit_on_first_entry);
+
+ __ bind(&try_second_entry);
__ ldr(r5, MemOperand(r4, kPointerSize, PostIndex)); // Move r4 to symbol.
__ cmp(r2, r5);
__ b(ne, &slow);
@@ -1053,6 +1064,18 @@ void KeyedLoadIC::GenerateGeneric(MacroAssembler* masm) {
// r3 : lookup cache index
ExternalReference cache_field_offsets =
ExternalReference::keyed_lookup_cache_field_offsets(isolate);
+
+ // Hit on second entry.
+ __ mov(r4, Operand(cache_field_offsets));
+ __ add(r3, r3, Operand(1));
+ __ ldr(r5, MemOperand(r4, r3, LSL, kPointerSizeLog2));
+ __ ldrb(r6, FieldMemOperand(r2, Map::kInObjectPropertiesOffset));
+ __ sub(r5, r5, r6, SetCC);
+ __ b(ge, &property_array_property);
+ __ jmp(&load_in_object_property);
+
+ // Hit on first entry.
+ __ bind(&hit_on_first_entry);
__ mov(r4, Operand(cache_field_offsets));
__ ldr(r5, MemOperand(r4, r3, LSL, kPointerSizeLog2));
__ ldrb(r6, FieldMemOperand(r2, Map::kInObjectPropertiesOffset));
@@ -1060,6 +1083,7 @@ void KeyedLoadIC::GenerateGeneric(MacroAssembler* masm) {
__ b(ge, &property_array_property);
// Load in-object property.
+ __ bind(&load_in_object_property);
__ ldrb(r6, FieldMemOperand(r2, Map::kInstanceSizeOffset));
__ add(r6, r6, r5); // Index from start of object.
__ sub(r1, r1, Operand(kHeapObjectTag)); // Remove the heap tag.
diff --git a/deps/v8/src/arm/lithium-codegen-arm.cc b/deps/v8/src/arm/lithium-codegen-arm.cc
index b5ed517087..080785725f 100644
--- a/deps/v8/src/arm/lithium-codegen-arm.cc
+++ b/deps/v8/src/arm/lithium-codegen-arm.cc
@@ -262,7 +262,7 @@ bool LCodeGen::GenerateDeferredCode() {
bool LCodeGen::GenerateDeoptJumpTable() {
// Check that the jump table is accessible from everywhere in the function
- // code, ie that offsets to the table can be encoded in the 24bit signed
+ // code, i.e. that offsets to the table can be encoded in the 24bit signed
// immediate of a branch instruction.
// To simplify we consider the code size from the first instruction to the
// end of the jump table. We also don't consider the pc load delta.
@@ -2387,7 +2387,7 @@ void LCodeGen::EmitLoadFieldOrConstantFunction(Register result,
Handle<String> name) {
LookupResult lookup(isolate());
type->LookupInDescriptors(NULL, *name, &lookup);
- ASSERT(lookup.IsProperty() &&
+ ASSERT(lookup.IsFound() &&
(lookup.type() == FIELD || lookup.type() == CONSTANT_FUNCTION));
if (lookup.type() == FIELD) {
int index = lookup.GetLocalFieldIndexFromMap(*type);
@@ -2828,7 +2828,7 @@ void LCodeGen::DoApplyArguments(LApplyArguments* instr) {
this, pointers, Safepoint::kLazyDeopt);
// The number of arguments is stored in receiver which is r0, as expected
// by InvokeFunction.
- v8::internal::ParameterCount actual(receiver);
+ ParameterCount actual(receiver);
__ InvokeFunction(function, actual, CALL_FUNCTION,
safepoint_generator, CALL_AS_METHOD);
__ ldr(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
@@ -2883,31 +2883,41 @@ void LCodeGen::CallKnownFunction(Handle<JSFunction> function,
int arity,
LInstruction* instr,
CallKind call_kind) {
- // Change context if needed.
- bool change_context =
- (info()->closure()->context() != function->context()) ||
- scope()->contains_with() ||
- (scope()->num_heap_slots() > 0);
- if (change_context) {
- __ ldr(cp, FieldMemOperand(r1, JSFunction::kContextOffset));
- }
-
- // Set r0 to arguments count if adaption is not needed. Assumes that r0
- // is available to write to at this point.
- if (!function->NeedsArgumentsAdaption()) {
- __ mov(r0, Operand(arity));
- }
+ bool can_invoke_directly = !function->NeedsArgumentsAdaption() ||
+ function->shared()->formal_parameter_count() == arity;
LPointerMap* pointers = instr->pointer_map();
RecordPosition(pointers->position());
- // Invoke function.
- __ SetCallKind(r5, call_kind);
- __ ldr(ip, FieldMemOperand(r1, JSFunction::kCodeEntryOffset));
- __ Call(ip);
+ if (can_invoke_directly) {
+ __ LoadHeapObject(r1, function);
+ // Change context if needed.
+ bool change_context =
+ (info()->closure()->context() != function->context()) ||
+ scope()->contains_with() ||
+ (scope()->num_heap_slots() > 0);
+ if (change_context) {
+ __ ldr(cp, FieldMemOperand(r1, JSFunction::kContextOffset));
+ }
- // Set up deoptimization.
- RecordSafepointWithLazyDeopt(instr, RECORD_SIMPLE_SAFEPOINT);
+ // Set r0 to arguments count if adaption is not needed. Assumes that r0
+ // is available to write to at this point.
+ if (!function->NeedsArgumentsAdaption()) {
+ __ mov(r0, Operand(arity));
+ }
+
+ // Invoke function.
+ __ SetCallKind(r5, call_kind);
+ __ ldr(ip, FieldMemOperand(r1, JSFunction::kCodeEntryOffset));
+ __ Call(ip);
+
+ // Set up deoptimization.
+ RecordSafepointWithLazyDeopt(instr, RECORD_SIMPLE_SAFEPOINT);
+ } else {
+ SafepointGenerator generator(this, pointers, Safepoint::kLazyDeopt);
+ ParameterCount count(arity);
+ __ InvokeFunction(function, count, CALL_FUNCTION, generator, call_kind);
+ }
// Restore context.
__ ldr(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
@@ -2916,7 +2926,6 @@ void LCodeGen::CallKnownFunction(Handle<JSFunction> function,
void LCodeGen::DoCallConstantFunction(LCallConstantFunction* instr) {
ASSERT(ToRegister(instr->result()).is(r0));
- __ LoadHeapObject(r1, instr->function());
CallKnownFunction(instr->function(),
instr->arity(),
instr,
@@ -3351,7 +3360,6 @@ void LCodeGen::DoCallGlobal(LCallGlobal* instr) {
void LCodeGen::DoCallKnownGlobal(LCallKnownGlobal* instr) {
ASSERT(ToRegister(instr->result()).is(r0));
- __ LoadHeapObject(r1, instr->target());
CallKnownFunction(instr->target(), instr->arity(), instr, CALL_AS_FUNCTION);
}
diff --git a/deps/v8/src/arm/macro-assembler-arm.cc b/deps/v8/src/arm/macro-assembler-arm.cc
index fa97611cf8..9894ff202e 100644
--- a/deps/v8/src/arm/macro-assembler-arm.cc
+++ b/deps/v8/src/arm/macro-assembler-arm.cc
@@ -957,10 +957,12 @@ void MacroAssembler::InvokePrologue(const ParameterCount& expected,
Handle<Code> code_constant,
Register code_reg,
Label* done,
+ bool* definitely_mismatches,
InvokeFlag flag,
const CallWrapper& call_wrapper,
CallKind call_kind) {
bool definitely_matches = false;
+ *definitely_mismatches = false;
Label regular_invoke;
// Check whether the expected and actual arguments count match. If not,
@@ -991,6 +993,7 @@ void MacroAssembler::InvokePrologue(const ParameterCount& expected,
// arguments.
definitely_matches = true;
} else {
+ *definitely_mismatches = true;
mov(r2, Operand(expected.immediate()));
}
}
@@ -1018,7 +1021,9 @@ void MacroAssembler::InvokePrologue(const ParameterCount& expected,
SetCallKind(r5, call_kind);
Call(adaptor);
call_wrapper.AfterCall();
- b(done);
+ if (!*definitely_mismatches) {
+ b(done);
+ }
} else {
SetCallKind(r5, call_kind);
Jump(adaptor, RelocInfo::CODE_TARGET);
@@ -1038,23 +1043,26 @@ void MacroAssembler::InvokeCode(Register code,
ASSERT(flag == JUMP_FUNCTION || has_frame());
Label done;
-
- InvokePrologue(expected, actual, Handle<Code>::null(), code, &done, flag,
+ bool definitely_mismatches = false;
+ InvokePrologue(expected, actual, Handle<Code>::null(), code,
+ &done, &definitely_mismatches, flag,
call_wrapper, call_kind);
- if (flag == CALL_FUNCTION) {
- call_wrapper.BeforeCall(CallSize(code));
- SetCallKind(r5, call_kind);
- Call(code);
- call_wrapper.AfterCall();
- } else {
- ASSERT(flag == JUMP_FUNCTION);
- SetCallKind(r5, call_kind);
- Jump(code);
- }
+ if (!definitely_mismatches) {
+ if (flag == CALL_FUNCTION) {
+ call_wrapper.BeforeCall(CallSize(code));
+ SetCallKind(r5, call_kind);
+ Call(code);
+ call_wrapper.AfterCall();
+ } else {
+ ASSERT(flag == JUMP_FUNCTION);
+ SetCallKind(r5, call_kind);
+ Jump(code);
+ }
- // Continue here if InvokePrologue does handle the invocation due to
- // mismatched parameter counts.
- bind(&done);
+ // Continue here if InvokePrologue does handle the invocation due to
+ // mismatched parameter counts.
+ bind(&done);
+ }
}
@@ -1068,20 +1076,23 @@ void MacroAssembler::InvokeCode(Handle<Code> code,
ASSERT(flag == JUMP_FUNCTION || has_frame());
Label done;
-
- InvokePrologue(expected, actual, code, no_reg, &done, flag,
+ bool definitely_mismatches = false;
+ InvokePrologue(expected, actual, code, no_reg,
+ &done, &definitely_mismatches, flag,
NullCallWrapper(), call_kind);
- if (flag == CALL_FUNCTION) {
- SetCallKind(r5, call_kind);
- Call(code, rmode);
- } else {
- SetCallKind(r5, call_kind);
- Jump(code, rmode);
- }
+ if (!definitely_mismatches) {
+ if (flag == CALL_FUNCTION) {
+ SetCallKind(r5, call_kind);
+ Call(code, rmode);
+ } else {
+ SetCallKind(r5, call_kind);
+ Jump(code, rmode);
+ }
- // Continue here if InvokePrologue does handle the invocation due to
- // mismatched parameter counts.
- bind(&done);
+ // Continue here if InvokePrologue does handle the invocation due to
+ // mismatched parameter counts.
+ bind(&done);
+ }
}
@@ -1116,6 +1127,7 @@ void MacroAssembler::InvokeFunction(Register fun,
void MacroAssembler::InvokeFunction(Handle<JSFunction> function,
const ParameterCount& actual,
InvokeFlag flag,
+ const CallWrapper& call_wrapper,
CallKind call_kind) {
// You can't call a function without a valid frame.
ASSERT(flag == JUMP_FUNCTION || has_frame());
@@ -1129,7 +1141,7 @@ void MacroAssembler::InvokeFunction(Handle<JSFunction> function,
// allow recompilation to take effect without changing any of the
// call sites.
ldr(r3, FieldMemOperand(r1, JSFunction::kCodeEntryOffset));
- InvokeCode(r3, expected, actual, flag, NullCallWrapper(), call_kind);
+ InvokeCode(r3, expected, actual, flag, call_wrapper, call_kind);
}
@@ -2387,7 +2399,7 @@ void MacroAssembler::ConvertToInt32(Register source,
b(gt, not_int32);
// We know the exponent is smaller than 30 (biased). If it is less than
- // 0 (biased) then the number is smaller in magnitude than 1.0 * 2^0, ie
+ // 0 (biased) then the number is smaller in magnitude than 1.0 * 2^0, i.e.
// it rounds to zero.
const uint32_t zero_exponent = HeapNumber::kExponentBias + 0;
sub(scratch2, scratch2, Operand(zero_exponent - fudge_factor), SetCC);
diff --git a/deps/v8/src/arm/macro-assembler-arm.h b/deps/v8/src/arm/macro-assembler-arm.h
index 4b55a3b064..60c2e6f6bf 100644
--- a/deps/v8/src/arm/macro-assembler-arm.h
+++ b/deps/v8/src/arm/macro-assembler-arm.h
@@ -243,7 +243,7 @@ class MacroAssembler: public Assembler {
Register scratch3,
Label* object_is_white_and_not_data);
- // Detects conservatively whether an object is data-only, ie it does need to
+ // Detects conservatively whether an object is data-only, i.e. it does need to
// be scanned by the garbage collector.
void JumpIfDataObject(Register value,
Register scratch,
@@ -539,6 +539,7 @@ class MacroAssembler: public Assembler {
void InvokeFunction(Handle<JSFunction> function,
const ParameterCount& actual,
InvokeFlag flag,
+ const CallWrapper& call_wrapper,
CallKind call_kind);
void IsObjectJSObjectType(Register heap_object,
@@ -606,7 +607,7 @@ class MacroAssembler: public Assembler {
}
// Check if the given instruction is a 'type' marker.
- // ie. check if is is a mov r<type>, r<type> (referenced as nop(type))
+ // i.e. check if is is a mov r<type>, r<type> (referenced as nop(type))
// These instructions are generated to mark special location in the code,
// like some special IC code.
static inline bool IsMarkedCode(Instr instr, int type) {
@@ -810,7 +811,7 @@ class MacroAssembler: public Assembler {
// Check if the map of an object is equal to a specified map and branch to
// label if not. Skip the smi check if not required (object is known to be a
// heap object). If mode is ALLOW_ELEMENT_TRANSITION_MAPS, then also match
- // against maps that are ElementsKind transition maps of the specificed map.
+ // against maps that are ElementsKind transition maps of the specified map.
void CheckMap(Register obj,
Register scratch,
Handle<Map> map,
@@ -908,7 +909,7 @@ class MacroAssembler: public Assembler {
// Truncates a double using a specific rounding mode.
// Clears the z flag (ne condition) if an overflow occurs.
// If exact_conversion is true, the z flag is also cleared if the conversion
- // was inexact, ie. if the double value could not be converted exactly
+ // was inexact, i.e. if the double value could not be converted exactly
// to a 32bit integer.
void EmitVFPTruncate(VFPRoundingMode rounding_mode,
SwVfpRegister result,
@@ -1025,7 +1026,7 @@ class MacroAssembler: public Assembler {
// Calls an API function. Allocates HandleScope, extracts returned value
// from handle and propagates exceptions. Restores context. stack_space
- // - space to be unwound on exit (includes the call js arguments space and
+ // - space to be unwound on exit (includes the call JS arguments space and
// the additional space allocated for the fast call).
void CallApiFunctionAndReturn(ExternalReference function, int stack_space);
@@ -1248,6 +1249,7 @@ class MacroAssembler: public Assembler {
Handle<Code> code_constant,
Register code_reg,
Label* done,
+ bool* definitely_mismatches,
InvokeFlag flag,
const CallWrapper& call_wrapper,
CallKind call_kind);
diff --git a/deps/v8/src/arm/stub-cache-arm.cc b/deps/v8/src/arm/stub-cache-arm.cc
index c3a82ff934..33fbee52d6 100644
--- a/deps/v8/src/arm/stub-cache-arm.cc
+++ b/deps/v8/src/arm/stub-cache-arm.cc
@@ -562,11 +562,11 @@ static void GenerateFastApiDirectCall(MacroAssembler* masm,
int argc) {
// ----------- S t a t e -------------
// -- sp[0] : holder (set by CheckPrototypes)
- // -- sp[4] : callee js function
+ // -- sp[4] : callee JS function
// -- sp[8] : call data
- // -- sp[12] : last js argument
+ // -- sp[12] : last JS argument
// -- ...
- // -- sp[(argc + 3) * 4] : first js argument
+ // -- sp[(argc + 3) * 4] : first JS argument
// -- sp[(argc + 4) * 4] : receiver
// -----------------------------------
// Get the function and setup the context.
@@ -583,7 +583,7 @@ static void GenerateFastApiDirectCall(MacroAssembler* masm,
} else {
__ Move(r6, call_data);
}
- // Store js function and call data.
+ // Store JS function and call data.
__ stm(ib, sp, r5.bit() | r6.bit());
// r2 points to call data as expected by Arguments
@@ -738,7 +738,7 @@ class CallInterceptorCompiler BASE_EMBEDDED {
? CALL_AS_FUNCTION
: CALL_AS_METHOD;
__ InvokeFunction(optimization.constant_function(), arguments_,
- JUMP_FUNCTION, call_kind);
+ JUMP_FUNCTION, NullCallWrapper(), call_kind);
}
// Deferred code for fast API call case---clean preallocated space.
@@ -1179,7 +1179,7 @@ void StubCompiler::GenerateLoadInterceptor(Handle<JSObject> object,
// and CALLBACKS, so inline only them, other cases may be added
// later.
bool compile_followup_inline = false;
- if (lookup->IsProperty() && lookup->IsCacheable()) {
+ if (lookup->IsFound() && lookup->IsCacheable()) {
if (lookup->type() == FIELD) {
compile_followup_inline = true;
} else if (lookup->type() == CALLBACKS &&
@@ -1904,7 +1904,8 @@ Handle<Code> CallStubCompiler::CompileStringFromCharCodeCall(
// Tail call the full function. We do not have to patch the receiver
// because the function makes no use of it.
__ bind(&slow);
- __ InvokeFunction(function, arguments(), JUMP_FUNCTION, CALL_AS_METHOD);
+ __ InvokeFunction(
+ function, arguments(), JUMP_FUNCTION, NullCallWrapper(), CALL_AS_METHOD);
__ bind(&miss);
// r2: function name.
@@ -1983,7 +1984,7 @@ Handle<Code> CallStubCompiler::CompileMathFloorCall(
__ vmrs(r3);
// Set custom FPCSR:
// - Set rounding mode to "Round towards Minus Infinity"
- // (ie bits [23:22] = 0b10).
+ // (i.e. bits [23:22] = 0b10).
// - Clear vfp cumulative exception flags (bits [3:0]).
// - Make sure Flush-to-zero mode control bit is unset (bit 22).
__ bic(r9, r3,
@@ -2049,7 +2050,8 @@ Handle<Code> CallStubCompiler::CompileMathFloorCall(
__ bind(&slow);
// Tail call the full function. We do not have to patch the receiver
// because the function makes no use of it.
- __ InvokeFunction(function, arguments(), JUMP_FUNCTION, CALL_AS_METHOD);
+ __ InvokeFunction(
+ function, arguments(), JUMP_FUNCTION, NullCallWrapper(), CALL_AS_METHOD);
__ bind(&miss);
// r2: function name.
@@ -2147,7 +2149,8 @@ Handle<Code> CallStubCompiler::CompileMathAbsCall(
// Tail call the full function. We do not have to patch the receiver
// because the function makes no use of it.
__ bind(&slow);
- __ InvokeFunction(function, arguments(), JUMP_FUNCTION, CALL_AS_METHOD);
+ __ InvokeFunction(
+ function, arguments(), JUMP_FUNCTION, NullCallWrapper(), CALL_AS_METHOD);
__ bind(&miss);
// r2: function name.
@@ -2325,7 +2328,8 @@ Handle<Code> CallStubCompiler::CompileCallConstant(Handle<Object> object,
CallKind call_kind = CallICBase::Contextual::decode(extra_state_)
? CALL_AS_FUNCTION
: CALL_AS_METHOD;
- __ InvokeFunction(function, arguments(), JUMP_FUNCTION, call_kind);
+ __ InvokeFunction(
+ function, arguments(), JUMP_FUNCTION, NullCallWrapper(), call_kind);
// Handle call cache miss.
__ bind(&miss);