1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
|
// Copyright 2013 the V8 project authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#if V8_TARGET_ARCH_ARM64
#include "src/codegen.h"
#include "src/ic/ic.h"
#include "src/ic/ic-compiler.h"
#include "src/ic/stub-cache.h"
namespace v8 {
namespace internal {
#define __ ACCESS_MASM(masm)
static void StoreIC_PushArgs(MacroAssembler* masm) {
__ Push(StoreWithVectorDescriptor::ValueRegister(),
StoreWithVectorDescriptor::SlotRegister(),
StoreWithVectorDescriptor::VectorRegister(),
StoreWithVectorDescriptor::ReceiverRegister(),
StoreWithVectorDescriptor::NameRegister());
}
void KeyedStoreIC::GenerateMiss(MacroAssembler* masm) {
ASM_LOCATION("KeyedStoreIC::GenerateMiss");
StoreIC_PushArgs(masm);
__ TailCallRuntime(Runtime::kKeyedStoreIC_Miss);
}
void KeyedStoreIC::GenerateSlow(MacroAssembler* masm) {
ASM_LOCATION("KeyedStoreIC::GenerateSlow");
StoreIC_PushArgs(masm);
// The slow case calls into the runtime to complete the store without causing
// an IC miss that would otherwise cause a transition to the generic stub.
__ TailCallRuntime(Runtime::kKeyedStoreIC_Slow);
}
Condition CompareIC::ComputeCondition(Token::Value op) {
switch (op) {
case Token::EQ_STRICT:
case Token::EQ:
return eq;
case Token::LT:
return lt;
case Token::GT:
return gt;
case Token::LTE:
return le;
case Token::GTE:
return ge;
default:
UNREACHABLE();
return al;
}
}
bool CompareIC::HasInlinedSmiCode(Address address) {
// The address of the instruction following the call.
Address info_address = Assembler::return_address_from_call_start(address);
InstructionSequence* patch_info = InstructionSequence::At(info_address);
return patch_info->IsInlineData();
}
// Activate a SMI fast-path by patching the instructions generated by
// JumpPatchSite::EmitJumpIf(Not)Smi(), using the information encoded by
// JumpPatchSite::EmitPatchInfo().
void PatchInlinedSmiCode(Isolate* isolate, Address address,
InlinedSmiCheck check) {
// The patch information is encoded in the instruction stream using
// instructions which have no side effects, so we can safely execute them.
// The patch information is encoded directly after the call to the helper
// function which is requesting this patch operation.
Address info_address = Assembler::return_address_from_call_start(address);
InlineSmiCheckInfo info(info_address);
// Check and decode the patch information instruction.
if (!info.HasSmiCheck()) {
return;
}
if (FLAG_trace_ic) {
PrintF("[ Patching ic at %p, marker=%p, SMI check=%p\n",
static_cast<void*>(address), static_cast<void*>(info_address),
static_cast<void*>(info.SmiCheck()));
}
// Patch and activate code generated by JumpPatchSite::EmitJumpIfNotSmi()
// and JumpPatchSite::EmitJumpIfSmi().
// Changing
// tb(n)z xzr, #0, <target>
// to
// tb(!n)z test_reg, #0, <target>
Instruction* to_patch = info.SmiCheck();
PatchingAssembler patcher(isolate, to_patch, 1);
DCHECK(to_patch->IsTestBranch());
DCHECK(to_patch->ImmTestBranchBit5() == 0);
DCHECK(to_patch->ImmTestBranchBit40() == 0);
STATIC_ASSERT(kSmiTag == 0);
STATIC_ASSERT(kSmiTagMask == 1);
int branch_imm = to_patch->ImmTestBranch();
Register smi_reg;
if (check == ENABLE_INLINED_SMI_CHECK) {
DCHECK(to_patch->Rt() == xzr.code());
smi_reg = info.SmiRegister();
} else {
DCHECK(check == DISABLE_INLINED_SMI_CHECK);
DCHECK(to_patch->Rt() != xzr.code());
smi_reg = xzr;
}
if (to_patch->Mask(TestBranchMask) == TBZ) {
// This is JumpIfNotSmi(smi_reg, branch_imm).
patcher.tbnz(smi_reg, 0, branch_imm);
} else {
DCHECK(to_patch->Mask(TestBranchMask) == TBNZ);
// This is JumpIfSmi(smi_reg, branch_imm).
patcher.tbz(smi_reg, 0, branch_imm);
}
}
} // namespace internal
} // namespace v8
#endif // V8_TARGET_ARCH_ARM64
|