summaryrefslogtreecommitdiff
path: root/Source/JavaScriptCore/jit
diff options
context:
space:
mode:
authorSimon Hausmann <simon.hausmann@digia.com>2012-10-15 16:08:57 +0200
committerSimon Hausmann <simon.hausmann@digia.com>2012-10-15 16:08:57 +0200
commit5466563f4b5b6b86523e3f89bb7f77e5b5270c78 (patch)
tree8caccf7cd03a15207cde3ba282c88bf132482a91 /Source/JavaScriptCore/jit
parent33b26980cb24288b5a9f2590ccf32a949281bb79 (diff)
downloadqtwebkit-5466563f4b5b6b86523e3f89bb7f77e5b5270c78.tar.gz
Imported WebKit commit 0dc6cd75e1d4836eaffbb520be96fac4847cc9d2 (http://svn.webkit.org/repository/webkit/trunk@131300)
WebKit update which introduces the QtWebKitWidgets module that contains the WK1 widgets based API. (In fact it renames QtWebKit to QtWebKitWidgets while we're working on completing the entire split as part of https://bugs.webkit.org/show_bug.cgi?id=99314
Diffstat (limited to 'Source/JavaScriptCore/jit')
-rw-r--r--Source/JavaScriptCore/jit/ExecutableAllocatorFixedVMPool.cpp1
-rw-r--r--Source/JavaScriptCore/jit/HostCallReturnValue.h16
-rw-r--r--Source/JavaScriptCore/jit/JIT.cpp39
-rw-r--r--Source/JavaScriptCore/jit/JIT.h82
-rw-r--r--Source/JavaScriptCore/jit/JITCall.cpp22
-rw-r--r--Source/JavaScriptCore/jit/JITCall32_64.cpp34
-rw-r--r--Source/JavaScriptCore/jit/JITCode.h6
-rw-r--r--Source/JavaScriptCore/jit/JITInlineMethods.h83
-rw-r--r--Source/JavaScriptCore/jit/JITOpcodes.cpp103
-rw-r--r--Source/JavaScriptCore/jit/JITOpcodes32_64.cpp132
-rw-r--r--Source/JavaScriptCore/jit/JITPropertyAccess.cpp557
-rw-r--r--Source/JavaScriptCore/jit/JITPropertyAccess32_64.cpp192
-rw-r--r--Source/JavaScriptCore/jit/JITStubs.cpp203
-rw-r--r--Source/JavaScriptCore/jit/JITStubs.h20
-rw-r--r--Source/JavaScriptCore/jit/JSInterfaceJIT.h2
-rw-r--r--Source/JavaScriptCore/jit/SpecializedThunkJIT.h10
16 files changed, 1174 insertions, 328 deletions
diff --git a/Source/JavaScriptCore/jit/ExecutableAllocatorFixedVMPool.cpp b/Source/JavaScriptCore/jit/ExecutableAllocatorFixedVMPool.cpp
index 2123f5a67..7ee3e0497 100644
--- a/Source/JavaScriptCore/jit/ExecutableAllocatorFixedVMPool.cpp
+++ b/Source/JavaScriptCore/jit/ExecutableAllocatorFixedVMPool.cpp
@@ -31,7 +31,6 @@
#include "CodeProfiling.h"
#include <errno.h>
-#include <sys/mman.h>
#include <unistd.h>
#include <wtf/MetaAllocator.h>
#include <wtf/PageReservation.h>
diff --git a/Source/JavaScriptCore/jit/HostCallReturnValue.h b/Source/JavaScriptCore/jit/HostCallReturnValue.h
index 0e17ca035..3f61179a3 100644
--- a/Source/JavaScriptCore/jit/HostCallReturnValue.h
+++ b/Source/JavaScriptCore/jit/HostCallReturnValue.h
@@ -30,10 +30,7 @@
#include "MacroAssemblerCodeRef.h"
#include <wtf/Platform.h>
-// Unfortunately this only works on GCC-like compilers. And it's currently only used
-// by LLInt and DFG, which also are restricted to GCC-like compilers. We should
-// probably fix that at some point.
-#if COMPILER(GCC) && ENABLE(JIT)
+#if ENABLE(JIT)
#if CALLING_CONVENTION_IS_STDCALL
#define HOST_CALL_RETURN_VALUE_OPTION CDECL
@@ -45,6 +42,8 @@ namespace JSC {
extern "C" EncodedJSValue HOST_CALL_RETURN_VALUE_OPTION getHostCallReturnValue() REFERENCED_FROM_ASM WTF_INTERNAL;
+#if COMPILER(GCC)
+
// This is a public declaration only to convince CLANG not to elide it.
extern "C" EncodedJSValue HOST_CALL_RETURN_VALUE_OPTION getHostCallReturnValueWithExecState(ExecState*) REFERENCED_FROM_ASM WTF_INTERNAL;
@@ -53,15 +52,14 @@ inline void initializeHostCallReturnValue()
getHostCallReturnValueWithExecState(0);
}
-}
-
#else // COMPILER(GCC)
-namespace JSC {
inline void initializeHostCallReturnValue() { }
-}
#endif // COMPILER(GCC)
-#endif // HostCallReturnValue_h
+} // namespace JSC
+#endif // ENABLE(JIT)
+
+#endif // HostCallReturnValue_h
diff --git a/Source/JavaScriptCore/jit/JIT.cpp b/Source/JavaScriptCore/jit/JIT.cpp
index bf5ac88dd..143800d18 100644
--- a/Source/JavaScriptCore/jit/JIT.cpp
+++ b/Source/JavaScriptCore/jit/JIT.cpp
@@ -78,7 +78,7 @@ JIT::JIT(JSGlobalData* globalData, CodeBlock* codeBlock)
#if USE(JSVALUE32_64)
, m_jumpTargetIndex(0)
, m_mappedBytecodeOffset((unsigned)-1)
- , m_mappedVirtualRegisterIndex(RegisterFile::ReturnPC)
+ , m_mappedVirtualRegisterIndex(JSStack::ReturnPC)
, m_mappedTag((RegisterID)-1)
, m_mappedPayload((RegisterID)-1)
#else
@@ -400,6 +400,7 @@ void JIT::privateCompileSlowCases()
Instruction* instructionsBegin = m_codeBlock->instructions().begin();
m_propertyAccessInstructionIndex = 0;
+ m_byValInstructionIndex = 0;
m_globalResolveInfoIndex = 0;
m_callLinkInfoIndex = 0;
@@ -606,8 +607,8 @@ JITCode JIT::privateCompile(CodePtr* functionEntryArityCheck, JITCompilationEffo
nop();
preserveReturnAddressAfterCall(regT2);
- emitPutToCallFrameHeader(regT2, RegisterFile::ReturnPC);
- emitPutImmediateToCallFrameHeader(m_codeBlock, RegisterFile::CodeBlock);
+ emitPutToCallFrameHeader(regT2, JSStack::ReturnPC);
+ emitPutImmediateToCallFrameHeader(m_codeBlock, JSStack::CodeBlock);
Label beginLabel(this);
@@ -616,7 +617,7 @@ JITCode JIT::privateCompile(CodePtr* functionEntryArityCheck, JITCompilationEffo
sampleInstruction(m_codeBlock->instructions().begin());
#endif
- Jump registerFileCheck;
+ Jump stackCheck;
if (m_codeBlock->codeType() == FunctionCode) {
#if ENABLE(DFG_JIT)
#if DFG_ENABLE(SUCCESS_STATS)
@@ -646,7 +647,7 @@ JITCode JIT::privateCompile(CodePtr* functionEntryArityCheck, JITCompilationEffo
#endif
addPtr(TrustedImm32(m_codeBlock->m_numCalleeRegisters * sizeof(Register)), callFrameRegister, regT1);
- registerFileCheck = branchPtr(Below, AbsoluteAddress(m_globalData->interpreter->registerFile().addressOfEnd()), regT1);
+ stackCheck = branchPtr(Below, AbsoluteAddress(m_globalData->interpreter->stack().addressOfEnd()), regT1);
}
Label functionBody = label();
@@ -662,9 +663,9 @@ JITCode JIT::privateCompile(CodePtr* functionEntryArityCheck, JITCompilationEffo
Label arityCheck;
if (m_codeBlock->codeType() == FunctionCode) {
- registerFileCheck.link(this);
+ stackCheck.link(this);
m_bytecodeOffset = 0;
- JITStubCall(this, cti_register_file_check).call();
+ JITStubCall(this, cti_stack_check).call();
#ifndef NDEBUG
m_bytecodeOffset = (unsigned)-1; // Reset this, in order to guard its use with ASSERTs.
#endif
@@ -672,10 +673,10 @@ JITCode JIT::privateCompile(CodePtr* functionEntryArityCheck, JITCompilationEffo
arityCheck = label();
preserveReturnAddressAfterCall(regT2);
- emitPutToCallFrameHeader(regT2, RegisterFile::ReturnPC);
- emitPutImmediateToCallFrameHeader(m_codeBlock, RegisterFile::CodeBlock);
+ emitPutToCallFrameHeader(regT2, JSStack::ReturnPC);
+ emitPutImmediateToCallFrameHeader(m_codeBlock, JSStack::CodeBlock);
- load32(payloadFor(RegisterFile::ArgumentCount), regT1);
+ load32(payloadFor(JSStack::ArgumentCount), regT1);
branch32(AboveOrEqual, regT1, TrustedImm32(m_codeBlock->m_numParameters)).linkTo(beginLabel, this);
m_bytecodeOffset = 0;
@@ -715,8 +716,8 @@ JITCode JIT::privateCompile(CodePtr* functionEntryArityCheck, JITCompilationEffo
StringJumpTable::StringOffsetTable::iterator end = record.jumpTable.stringJumpTable->offsetTable.end();
for (StringJumpTable::StringOffsetTable::iterator it = record.jumpTable.stringJumpTable->offsetTable.begin(); it != end; ++it) {
- unsigned offset = it->second.branchOffset;
- it->second.ctiOffset = offset ? patchBuffer.locationOf(m_labels[bytecodeOffset + offset]) : record.jumpTable.stringJumpTable->ctiDefault;
+ unsigned offset = it->value.branchOffset;
+ it->value.ctiOffset = offset ? patchBuffer.locationOf(m_labels[bytecodeOffset + offset]) : record.jumpTable.stringJumpTable->ctiDefault;
}
}
}
@@ -738,6 +739,20 @@ JITCode JIT::privateCompile(CodePtr* functionEntryArityCheck, JITCompilationEffo
m_codeBlock->setNumberOfStructureStubInfos(m_propertyAccessCompilationInfo.size());
for (unsigned i = 0; i < m_propertyAccessCompilationInfo.size(); ++i)
m_propertyAccessCompilationInfo[i].copyToStubInfo(m_codeBlock->structureStubInfo(i), patchBuffer);
+ m_codeBlock->setNumberOfByValInfos(m_byValCompilationInfo.size());
+ for (unsigned i = 0; i < m_byValCompilationInfo.size(); ++i) {
+ CodeLocationJump badTypeJump = CodeLocationJump(patchBuffer.locationOf(m_byValCompilationInfo[i].badTypeJump));
+ CodeLocationLabel doneTarget = patchBuffer.locationOf(m_byValCompilationInfo[i].doneTarget);
+ CodeLocationLabel slowPathTarget = patchBuffer.locationOf(m_byValCompilationInfo[i].slowPathTarget);
+ CodeLocationCall returnAddress = patchBuffer.locationOf(m_byValCompilationInfo[i].returnAddress);
+
+ m_codeBlock->byValInfo(i) = ByValInfo(
+ m_byValCompilationInfo[i].bytecodeIndex,
+ badTypeJump,
+ m_byValCompilationInfo[i].arrayMode,
+ differenceBetweenCodePtr(badTypeJump, doneTarget),
+ differenceBetweenCodePtr(returnAddress, slowPathTarget));
+ }
m_codeBlock->setNumberOfCallLinkInfos(m_callStructureStubCompilationInfo.size());
for (unsigned i = 0; i < m_codeBlock->numberOfCallLinkInfos(); ++i) {
CallLinkInfo& info = m_codeBlock->callLinkInfo(i);
diff --git a/Source/JavaScriptCore/jit/JIT.h b/Source/JavaScriptCore/jit/JIT.h
index 150aae9ea..3e16972e2 100644
--- a/Source/JavaScriptCore/jit/JIT.h
+++ b/Source/JavaScriptCore/jit/JIT.h
@@ -58,9 +58,9 @@ namespace JSC {
class JIT;
class JSPropertyNameIterator;
class Interpreter;
- class Register;
- class RegisterFile;
class JSScope;
+ class JSStack;
+ class Register;
class StructureChain;
struct CallLinkInfo;
@@ -264,6 +264,25 @@ namespace JSC {
void copyToStubInfo(StructureStubInfo& info, LinkBuffer &patchBuffer);
};
+ struct ByValCompilationInfo {
+ ByValCompilationInfo() { }
+
+ ByValCompilationInfo(unsigned bytecodeIndex, MacroAssembler::PatchableJump badTypeJump, JITArrayMode arrayMode, MacroAssembler::Label doneTarget)
+ : bytecodeIndex(bytecodeIndex)
+ , badTypeJump(badTypeJump)
+ , arrayMode(arrayMode)
+ , doneTarget(doneTarget)
+ {
+ }
+
+ unsigned bytecodeIndex;
+ MacroAssembler::PatchableJump badTypeJump;
+ JITArrayMode arrayMode;
+ MacroAssembler::Label doneTarget;
+ MacroAssembler::Label slowPathTarget;
+ MacroAssembler::Call returnAddress;
+ };
+
struct StructureStubCompilationInfo {
MacroAssembler::DataLabelPtr hotPathBegin;
MacroAssembler::Call hotPathOther;
@@ -348,6 +367,20 @@ namespace JSC {
jit.m_bytecodeOffset = stubInfo->bytecodeIndex;
jit.privateCompilePutByIdTransition(stubInfo, oldStructure, newStructure, cachedOffset, chain, returnAddress, direct);
}
+
+ static void compileGetByVal(JSGlobalData* globalData, CodeBlock* codeBlock, ByValInfo* byValInfo, ReturnAddressPtr returnAddress, JITArrayMode arrayMode)
+ {
+ JIT jit(globalData, codeBlock);
+ jit.m_bytecodeOffset = byValInfo->bytecodeIndex;
+ jit.privateCompileGetByVal(byValInfo, returnAddress, arrayMode);
+ }
+
+ static void compilePutByVal(JSGlobalData* globalData, CodeBlock* codeBlock, ByValInfo* byValInfo, ReturnAddressPtr returnAddress, JITArrayMode arrayMode)
+ {
+ JIT jit(globalData, codeBlock);
+ jit.m_bytecodeOffset = byValInfo->bytecodeIndex;
+ jit.privateCompilePutByVal(byValInfo, returnAddress, arrayMode);
+ }
static PassRefPtr<ExecutableMemoryHandle> compileCTIMachineTrampolines(JSGlobalData* globalData, TrampolineStructure *trampolines)
{
@@ -379,6 +412,10 @@ namespace JSC {
static void compilePatchGetArrayLength(JSGlobalData* globalData, CodeBlock* codeBlock, ReturnAddressPtr returnAddress)
{
JIT jit(globalData, codeBlock);
+#if ENABLE(DFG_JIT)
+ // Force profiling to be enabled during stub generation.
+ jit.m_canBeOptimized = true;
+#endif // ENABLE(DFG_JIT)
return jit.privateCompilePatchGetArrayLength(returnAddress);
}
@@ -397,6 +434,9 @@ namespace JSC {
void privateCompileGetByIdChainList(StructureStubInfo*, PolymorphicAccessStructureList*, int, Structure*, StructureChain*, size_t count, const Identifier&, const PropertySlot&, PropertyOffset cachedOffset, CallFrame*);
void privateCompileGetByIdChain(StructureStubInfo*, Structure*, StructureChain*, size_t count, const Identifier&, const PropertySlot&, PropertyOffset cachedOffset, ReturnAddressPtr, CallFrame*);
void privateCompilePutByIdTransition(StructureStubInfo*, Structure*, Structure*, PropertyOffset cachedOffset, StructureChain*, ReturnAddressPtr, bool direct);
+
+ void privateCompileGetByVal(ByValInfo*, ReturnAddressPtr, JITArrayMode);
+ void privateCompilePutByVal(ByValInfo*, ReturnAddressPtr, JITArrayMode);
PassRefPtr<ExecutableMemoryHandle> privateCompileCTIMachineTrampolines(JSGlobalData*, TrampolineStructure*);
Label privateCompileCTINativeCall(JSGlobalData*, bool isConstruct = false);
@@ -434,7 +474,7 @@ namespace JSC {
void emitWriteBarrier(RegisterID owner, RegisterID valueTag, RegisterID scratch, RegisterID scratch2, WriteBarrierMode, WriteBarrierUseKind);
void emitWriteBarrier(JSCell* owner, RegisterID value, RegisterID scratch, WriteBarrierMode, WriteBarrierUseKind);
- template<typename ClassType, bool destructor, typename StructureType> void emitAllocateBasicJSObject(StructureType, RegisterID result, RegisterID storagePtr);
+ template<typename ClassType, MarkedBlock::DestructorType, typename StructureType> void emitAllocateBasicJSObject(StructureType, RegisterID result, RegisterID storagePtr);
void emitAllocateBasicStorage(size_t, ptrdiff_t offsetFromBase, RegisterID result);
template<typename T> void emitAllocateJSFinalObject(T structure, RegisterID result, RegisterID storagePtr);
void emitAllocateJSArray(unsigned valuesRegister, unsigned length, RegisterID cellResult, RegisterID storageResult, RegisterID storagePtr, RegisterID scratch);
@@ -452,7 +492,27 @@ namespace JSC {
void emitArrayProfilingSite(RegisterID structureAndIndexingType, RegisterID scratch, ArrayProfile*);
void emitArrayProfilingSiteForBytecodeIndex(RegisterID structureAndIndexingType, RegisterID scratch, unsigned bytecodeIndex);
void emitArrayProfileStoreToHoleSpecialCase(ArrayProfile*);
-
+
+ JITArrayMode chooseArrayMode(ArrayProfile*);
+
+ // Property is in regT1, base is in regT0. regT2 contains indexing type.
+ // Property is int-checked and zero extended. Base is cell checked.
+ // Structure is already profiled. Returns the slow cases. Fall-through
+ // case contains result in regT0, and it is not yet profiled.
+ JumpList emitContiguousGetByVal(Instruction*, PatchableJump& badType);
+ JumpList emitArrayStorageGetByVal(Instruction*, PatchableJump& badType);
+ JumpList emitIntTypedArrayGetByVal(Instruction*, PatchableJump& badType, const TypedArrayDescriptor&, size_t elementSize, TypedArraySignedness);
+ JumpList emitFloatTypedArrayGetByVal(Instruction*, PatchableJump& badType, const TypedArrayDescriptor&, size_t elementSize);
+
+ // Property is in regT0, base is in regT0. regT2 contains indecing type.
+ // The value to store is not yet loaded. Property is int-checked and
+ // zero-extended. Base is cell checked. Structure is already profiled.
+ // returns the slow cases.
+ JumpList emitContiguousPutByVal(Instruction*, PatchableJump& badType);
+ JumpList emitArrayStoragePutByVal(Instruction*, PatchableJump& badType);
+ JumpList emitIntTypedArrayPutByVal(Instruction*, PatchableJump& badType, const TypedArrayDescriptor&, size_t elementSize, TypedArraySignedness, TypedArrayRounding);
+ JumpList emitFloatTypedArrayPutByVal(Instruction*, PatchableJump& badType, const TypedArrayDescriptor&, size_t elementSize);
+
enum FinalObjectMode { MayBeFinal, KnownNotFinal };
#if USE(JSVALUE32_64)
@@ -778,12 +838,12 @@ namespace JSC {
void emitInitRegister(unsigned dst);
- void emitPutToCallFrameHeader(RegisterID from, RegisterFile::CallFrameHeaderEntry entry);
- void emitPutCellToCallFrameHeader(RegisterID from, RegisterFile::CallFrameHeaderEntry);
- void emitPutIntToCallFrameHeader(RegisterID from, RegisterFile::CallFrameHeaderEntry);
- void emitPutImmediateToCallFrameHeader(void* value, RegisterFile::CallFrameHeaderEntry entry);
- void emitGetFromCallFrameHeaderPtr(RegisterFile::CallFrameHeaderEntry entry, RegisterID to, RegisterID from = callFrameRegister);
- void emitGetFromCallFrameHeader32(RegisterFile::CallFrameHeaderEntry entry, RegisterID to, RegisterID from = callFrameRegister);
+ void emitPutToCallFrameHeader(RegisterID from, JSStack::CallFrameHeaderEntry);
+ void emitPutCellToCallFrameHeader(RegisterID from, JSStack::CallFrameHeaderEntry);
+ void emitPutIntToCallFrameHeader(RegisterID from, JSStack::CallFrameHeaderEntry);
+ void emitPutImmediateToCallFrameHeader(void* value, JSStack::CallFrameHeaderEntry);
+ void emitGetFromCallFrameHeaderPtr(JSStack::CallFrameHeaderEntry, RegisterID to, RegisterID from = callFrameRegister);
+ void emitGetFromCallFrameHeader32(JSStack::CallFrameHeaderEntry, RegisterID to, RegisterID from = callFrameRegister);
JSValue getConstantOperand(unsigned src);
bool isOperandConstantImmediateInt(unsigned src);
@@ -870,6 +930,7 @@ namespace JSC {
Vector<CallRecord> m_calls;
Vector<Label> m_labels;
Vector<PropertyStubCompilationInfo> m_propertyAccessCompilationInfo;
+ Vector<ByValCompilationInfo> m_byValCompilationInfo;
Vector<StructureStubCompilationInfo> m_callStructureStubCompilationInfo;
Vector<MethodCallCompilationInfo> m_methodCallCompilationInfo;
Vector<JumpTable> m_jmpTable;
@@ -879,6 +940,7 @@ namespace JSC {
Vector<SwitchRecord> m_switches;
unsigned m_propertyAccessInstructionIndex;
+ unsigned m_byValInstructionIndex;
unsigned m_globalResolveInfoIndex;
unsigned m_callLinkInfoIndex;
diff --git a/Source/JavaScriptCore/jit/JITCall.cpp b/Source/JavaScriptCore/jit/JITCall.cpp
index b5f4f8278..1de877daa 100644
--- a/Source/JavaScriptCore/jit/JITCall.cpp
+++ b/Source/JavaScriptCore/jit/JITCall.cpp
@@ -74,20 +74,20 @@ void JIT::compileLoadVarargs(Instruction* instruction)
emitGetVirtualRegister(arguments, regT0);
slowCase.append(branchPtr(NotEqual, regT0, TrustedImmPtr(JSValue::encode(JSValue()))));
- emitGetFromCallFrameHeader32(RegisterFile::ArgumentCount, regT0);
+ emitGetFromCallFrameHeader32(JSStack::ArgumentCount, regT0);
slowCase.append(branch32(Above, regT0, TrustedImm32(Arguments::MaxArguments + 1)));
// regT0: argumentCountIncludingThis
move(regT0, regT1);
- add32(TrustedImm32(firstFreeRegister + RegisterFile::CallFrameHeaderSize), regT1);
+ add32(TrustedImm32(firstFreeRegister + JSStack::CallFrameHeaderSize), regT1);
lshift32(TrustedImm32(3), regT1);
addPtr(callFrameRegister, regT1);
// regT1: newCallFrame
- slowCase.append(branchPtr(Below, AbsoluteAddress(m_globalData->interpreter->registerFile().addressOfEnd()), regT1));
+ slowCase.append(branchPtr(Below, AbsoluteAddress(m_globalData->interpreter->stack().addressOfEnd()), regT1));
// Initialize ArgumentCount.
- store32(regT0, Address(regT1, RegisterFile::ArgumentCount * static_cast<int>(sizeof(Register)) + OBJECT_OFFSETOF(EncodedValueDescriptor, asBits.payload)));
+ store32(regT0, Address(regT1, JSStack::ArgumentCount * static_cast<int>(sizeof(Register)) + OBJECT_OFFSETOF(EncodedValueDescriptor, asBits.payload)));
// Initialize 'this'.
emitGetVirtualRegister(thisValue, regT2);
@@ -125,7 +125,7 @@ void JIT::compileCallEval()
JITStubCall stubCall(this, cti_op_call_eval); // Initializes ScopeChain; ReturnPC; CodeBlock.
stubCall.call();
addSlowCase(branchPtr(Equal, regT0, TrustedImmPtr(JSValue::encode(JSValue()))));
- emitGetFromCallFrameHeaderPtr(RegisterFile::CallerFrame, callFrameRegister);
+ emitGetFromCallFrameHeaderPtr(JSStack::CallerFrame, callFrameRegister);
sampleCodeBlock(m_codeBlock);
}
@@ -134,7 +134,7 @@ void JIT::compileCallEvalSlowCase(Vector<SlowCaseEntry>::iterator& iter)
{
linkSlowCase(iter);
- emitGetFromCallFrameHeaderPtr(RegisterFile::Callee, regT0);
+ emitGetFromCallFrameHeaderPtr(JSStack::Callee, regT0);
emitNakedCall(m_globalData->jitStubs->ctiVirtualCall());
sampleCodeBlock(m_codeBlock);
@@ -173,14 +173,14 @@ void JIT::compileOpCall(OpcodeID opcodeID, Instruction* instruction, unsigned ca
}
addPtr(TrustedImm32(registerOffset * sizeof(Register)), callFrameRegister, regT1);
- store32(TrustedImm32(argCount), Address(regT1, RegisterFile::ArgumentCount * static_cast<int>(sizeof(Register)) + OBJECT_OFFSETOF(EncodedValueDescriptor, asBits.payload)));
+ store32(TrustedImm32(argCount), Address(regT1, JSStack::ArgumentCount * static_cast<int>(sizeof(Register)) + OBJECT_OFFSETOF(EncodedValueDescriptor, asBits.payload)));
} // regT1 holds newCallFrame with ArgumentCount initialized.
- store32(TrustedImm32(instruction - m_codeBlock->instructions().begin()), Address(callFrameRegister, RegisterFile::ArgumentCount * static_cast<int>(sizeof(Register)) + OBJECT_OFFSETOF(EncodedValueDescriptor, asBits.tag)));
+ store32(TrustedImm32(instruction - m_codeBlock->instructions().begin()), Address(callFrameRegister, JSStack::ArgumentCount * static_cast<int>(sizeof(Register)) + OBJECT_OFFSETOF(EncodedValueDescriptor, asBits.tag)));
emitGetVirtualRegister(callee, regT0); // regT0 holds callee.
- storePtr(callFrameRegister, Address(regT1, RegisterFile::CallerFrame * static_cast<int>(sizeof(Register))));
- storePtr(regT0, Address(regT1, RegisterFile::Callee * static_cast<int>(sizeof(Register))));
+ storePtr(callFrameRegister, Address(regT1, JSStack::CallerFrame * static_cast<int>(sizeof(Register))));
+ storePtr(regT0, Address(regT1, JSStack::Callee * static_cast<int>(sizeof(Register))));
move(regT1, callFrameRegister);
if (opcodeID == op_call_eval) {
@@ -201,7 +201,7 @@ void JIT::compileOpCall(OpcodeID opcodeID, Instruction* instruction, unsigned ca
m_callStructureStubCompilationInfo[callLinkInfoIndex].bytecodeIndex = m_bytecodeOffset;
loadPtr(Address(regT0, OBJECT_OFFSETOF(JSFunction, m_scope)), regT1);
- emitPutToCallFrameHeader(regT1, RegisterFile::ScopeChain);
+ emitPutToCallFrameHeader(regT1, JSStack::ScopeChain);
m_callStructureStubCompilationInfo[callLinkInfoIndex].hotPathOther = emitNakedCall();
sampleCodeBlock(m_codeBlock);
diff --git a/Source/JavaScriptCore/jit/JITCall32_64.cpp b/Source/JavaScriptCore/jit/JITCall32_64.cpp
index 09727d532..ad827cdf9 100644
--- a/Source/JavaScriptCore/jit/JITCall32_64.cpp
+++ b/Source/JavaScriptCore/jit/JITCall32_64.cpp
@@ -59,8 +59,8 @@ void JIT::emit_op_ret(Instruction* currentInstruction)
unsigned dst = currentInstruction[1].u.operand;
emitLoad(dst, regT1, regT0);
- emitGetFromCallFrameHeaderPtr(RegisterFile::ReturnPC, regT2);
- emitGetFromCallFrameHeaderPtr(RegisterFile::CallerFrame, callFrameRegister);
+ emitGetFromCallFrameHeaderPtr(JSStack::ReturnPC, regT2);
+ emitGetFromCallFrameHeaderPtr(JSStack::CallerFrame, callFrameRegister);
restoreReturnAddressBeforeReturn(regT2);
ret();
@@ -76,8 +76,8 @@ void JIT::emit_op_ret_object_or_this(Instruction* currentInstruction)
loadPtr(Address(regT0, JSCell::structureOffset()), regT2);
Jump notObject = emitJumpIfNotObject(regT2);
- emitGetFromCallFrameHeaderPtr(RegisterFile::ReturnPC, regT2);
- emitGetFromCallFrameHeaderPtr(RegisterFile::CallerFrame, callFrameRegister);
+ emitGetFromCallFrameHeaderPtr(JSStack::ReturnPC, regT2);
+ emitGetFromCallFrameHeaderPtr(JSStack::CallerFrame, callFrameRegister);
restoreReturnAddressBeforeReturn(regT2);
ret();
@@ -86,8 +86,8 @@ void JIT::emit_op_ret_object_or_this(Instruction* currentInstruction)
notObject.link(this);
emitLoad(thisReg, regT1, regT0);
- emitGetFromCallFrameHeaderPtr(RegisterFile::ReturnPC, regT2);
- emitGetFromCallFrameHeaderPtr(RegisterFile::CallerFrame, callFrameRegister);
+ emitGetFromCallFrameHeaderPtr(JSStack::ReturnPC, regT2);
+ emitGetFromCallFrameHeaderPtr(JSStack::CallerFrame, callFrameRegister);
restoreReturnAddressBeforeReturn(regT2);
ret();
@@ -149,20 +149,20 @@ void JIT::compileLoadVarargs(Instruction* instruction)
emitLoadTag(arguments, regT1);
slowCase.append(branch32(NotEqual, regT1, TrustedImm32(JSValue::EmptyValueTag)));
- load32(payloadFor(RegisterFile::ArgumentCount), regT2);
+ load32(payloadFor(JSStack::ArgumentCount), regT2);
slowCase.append(branch32(Above, regT2, TrustedImm32(Arguments::MaxArguments + 1)));
// regT2: argumentCountIncludingThis
move(regT2, regT3);
- add32(TrustedImm32(firstFreeRegister + RegisterFile::CallFrameHeaderSize), regT3);
+ add32(TrustedImm32(firstFreeRegister + JSStack::CallFrameHeaderSize), regT3);
lshift32(TrustedImm32(3), regT3);
addPtr(callFrameRegister, regT3);
// regT3: newCallFrame
- slowCase.append(branchPtr(Below, AbsoluteAddress(m_globalData->interpreter->registerFile().addressOfEnd()), regT3));
+ slowCase.append(branchPtr(Below, AbsoluteAddress(m_globalData->interpreter->stack().addressOfEnd()), regT3));
// Initialize ArgumentCount.
- store32(regT2, payloadFor(RegisterFile::ArgumentCount, regT3));
+ store32(regT2, payloadFor(JSStack::ArgumentCount, regT3));
// Initialize 'this'.
emitLoad(thisValue, regT1, regT0);
@@ -202,7 +202,7 @@ void JIT::compileCallEval()
JITStubCall stubCall(this, cti_op_call_eval); // Initializes ScopeChain; ReturnPC; CodeBlock.
stubCall.call();
addSlowCase(branch32(Equal, regT1, TrustedImm32(JSValue::EmptyValueTag)));
- emitGetFromCallFrameHeaderPtr(RegisterFile::CallerFrame, callFrameRegister);
+ emitGetFromCallFrameHeaderPtr(JSStack::CallerFrame, callFrameRegister);
sampleCodeBlock(m_codeBlock);
}
@@ -211,7 +211,7 @@ void JIT::compileCallEvalSlowCase(Vector<SlowCaseEntry>::iterator& iter)
{
linkSlowCase(iter);
- emitLoad(RegisterFile::Callee, regT1, regT0);
+ emitLoad(JSStack::Callee, regT1, regT0);
emitNakedCall(m_globalData->jitStubs->ctiVirtualCall());
sampleCodeBlock(m_codeBlock);
@@ -251,14 +251,14 @@ void JIT::compileOpCall(OpcodeID opcodeID, Instruction* instruction, unsigned ca
addPtr(TrustedImm32(registerOffset * sizeof(Register)), callFrameRegister, regT3);
- store32(TrustedImm32(argCount), payloadFor(RegisterFile::ArgumentCount, regT3));
+ store32(TrustedImm32(argCount), payloadFor(JSStack::ArgumentCount, regT3));
} // regT3 holds newCallFrame with ArgumentCount initialized.
- storePtr(TrustedImmPtr(instruction), tagFor(RegisterFile::ArgumentCount, callFrameRegister));
+ storePtr(TrustedImmPtr(instruction), tagFor(JSStack::ArgumentCount, callFrameRegister));
emitLoad(callee, regT1, regT0); // regT1, regT0 holds callee.
- storePtr(callFrameRegister, Address(regT3, RegisterFile::CallerFrame * static_cast<int>(sizeof(Register))));
- emitStore(RegisterFile::Callee, regT1, regT0, regT3);
+ storePtr(callFrameRegister, Address(regT3, JSStack::CallerFrame * static_cast<int>(sizeof(Register))));
+ emitStore(JSStack::Callee, regT1, regT0, regT3);
move(regT3, callFrameRegister);
if (opcodeID == op_call_eval) {
@@ -281,7 +281,7 @@ void JIT::compileOpCall(OpcodeID opcodeID, Instruction* instruction, unsigned ca
m_callStructureStubCompilationInfo[callLinkInfoIndex].bytecodeIndex = m_bytecodeOffset;
loadPtr(Address(regT0, OBJECT_OFFSETOF(JSFunction, m_scope)), regT1);
- emitPutCellToCallFrameHeader(regT1, RegisterFile::ScopeChain);
+ emitPutCellToCallFrameHeader(regT1, JSStack::ScopeChain);
m_callStructureStubCompilationInfo[callLinkInfoIndex].hotPathOther = emitNakedCall();
sampleCodeBlock(m_codeBlock);
diff --git a/Source/JavaScriptCore/jit/JITCode.h b/Source/JavaScriptCore/jit/JITCode.h
index e39b4f98d..93fa88a23 100644
--- a/Source/JavaScriptCore/jit/JITCode.h
+++ b/Source/JavaScriptCore/jit/JITCode.h
@@ -38,7 +38,7 @@ namespace JSC {
#if ENABLE(JIT)
class JSGlobalData;
- class RegisterFile;
+ class JSStack;
#endif
class JITCode {
@@ -129,9 +129,9 @@ namespace JSC {
#if ENABLE(JIT)
// Execute the code!
- inline JSValue execute(RegisterFile* registerFile, CallFrame* callFrame, JSGlobalData* globalData)
+ inline JSValue execute(JSStack* stack, CallFrame* callFrame, JSGlobalData* globalData)
{
- JSValue result = JSValue::decode(ctiTrampoline(m_ref.code().executableAddress(), registerFile, callFrame, 0, 0, globalData));
+ JSValue result = JSValue::decode(ctiTrampoline(m_ref.code().executableAddress(), stack, callFrame, 0, 0, globalData));
return globalData->exception ? jsNull() : result;
}
#endif
diff --git a/Source/JavaScriptCore/jit/JITInlineMethods.h b/Source/JavaScriptCore/jit/JITInlineMethods.h
index ed63ad348..a7aecd3e8 100644
--- a/Source/JavaScriptCore/jit/JITInlineMethods.h
+++ b/Source/JavaScriptCore/jit/JITInlineMethods.h
@@ -50,12 +50,12 @@ ALWAYS_INLINE JSValue JIT::getConstantOperand(unsigned src)
return m_codeBlock->getConstant(src);
}
-ALWAYS_INLINE void JIT::emitPutToCallFrameHeader(RegisterID from, RegisterFile::CallFrameHeaderEntry entry)
+ALWAYS_INLINE void JIT::emitPutToCallFrameHeader(RegisterID from, JSStack::CallFrameHeaderEntry entry)
{
storePtr(from, payloadFor(entry, callFrameRegister));
}
-ALWAYS_INLINE void JIT::emitPutCellToCallFrameHeader(RegisterID from, RegisterFile::CallFrameHeaderEntry entry)
+ALWAYS_INLINE void JIT::emitPutCellToCallFrameHeader(RegisterID from, JSStack::CallFrameHeaderEntry entry)
{
#if USE(JSVALUE32_64)
store32(TrustedImm32(JSValue::CellTag), tagFor(entry, callFrameRegister));
@@ -63,18 +63,18 @@ ALWAYS_INLINE void JIT::emitPutCellToCallFrameHeader(RegisterID from, RegisterFi
storePtr(from, payloadFor(entry, callFrameRegister));
}
-ALWAYS_INLINE void JIT::emitPutIntToCallFrameHeader(RegisterID from, RegisterFile::CallFrameHeaderEntry entry)
+ALWAYS_INLINE void JIT::emitPutIntToCallFrameHeader(RegisterID from, JSStack::CallFrameHeaderEntry entry)
{
store32(TrustedImm32(Int32Tag), intTagFor(entry, callFrameRegister));
store32(from, intPayloadFor(entry, callFrameRegister));
}
-ALWAYS_INLINE void JIT::emitPutImmediateToCallFrameHeader(void* value, RegisterFile::CallFrameHeaderEntry entry)
+ALWAYS_INLINE void JIT::emitPutImmediateToCallFrameHeader(void* value, JSStack::CallFrameHeaderEntry entry)
{
storePtr(TrustedImmPtr(value), Address(callFrameRegister, entry * sizeof(Register)));
}
-ALWAYS_INLINE void JIT::emitGetFromCallFrameHeaderPtr(RegisterFile::CallFrameHeaderEntry entry, RegisterID to, RegisterID from)
+ALWAYS_INLINE void JIT::emitGetFromCallFrameHeaderPtr(JSStack::CallFrameHeaderEntry entry, RegisterID to, RegisterID from)
{
loadPtr(Address(from, entry * sizeof(Register)), to);
#if USE(JSVALUE64)
@@ -101,7 +101,7 @@ ALWAYS_INLINE void JIT::emitLoadCharacterString(RegisterID src, RegisterID dst,
cont8Bit.link(this);
}
-ALWAYS_INLINE void JIT::emitGetFromCallFrameHeader32(RegisterFile::CallFrameHeaderEntry entry, RegisterID to, RegisterID from)
+ALWAYS_INLINE void JIT::emitGetFromCallFrameHeader32(JSStack::CallFrameHeaderEntry entry, RegisterID to, RegisterID from)
{
load32(Address(from, entry * sizeof(Register)), to);
#if USE(JSVALUE64)
@@ -265,9 +265,9 @@ ALWAYS_INLINE void JIT::updateTopCallFrame()
ASSERT(static_cast<int>(m_bytecodeOffset) >= 0);
if (m_bytecodeOffset) {
#if USE(JSVALUE32_64)
- storePtr(TrustedImmPtr(m_codeBlock->instructions().begin() + m_bytecodeOffset + 1), intTagFor(RegisterFile::ArgumentCount));
+ storePtr(TrustedImmPtr(m_codeBlock->instructions().begin() + m_bytecodeOffset + 1), intTagFor(JSStack::ArgumentCount));
#else
- store32(TrustedImm32(m_bytecodeOffset + 1), intTagFor(RegisterFile::ArgumentCount));
+ store32(TrustedImm32(m_bytecodeOffset + 1), intTagFor(JSStack::ArgumentCount));
#endif
}
storePtr(callFrameRegister, &m_globalData->topCallFrame);
@@ -405,13 +405,16 @@ ALWAYS_INLINE bool JIT::isOperandConstantImmediateChar(unsigned src)
return m_codeBlock->isConstantRegisterIndex(src) && getConstantOperand(src).isString() && asString(getConstantOperand(src).asCell())->length() == 1;
}
-template <typename ClassType, bool destructor, typename StructureType> inline void JIT::emitAllocateBasicJSObject(StructureType structure, RegisterID result, RegisterID storagePtr)
+template <typename ClassType, MarkedBlock::DestructorType destructorType, typename StructureType> inline void JIT::emitAllocateBasicJSObject(StructureType structure, RegisterID result, RegisterID storagePtr)
{
+ size_t size = ClassType::allocationSize(INLINE_STORAGE_CAPACITY);
MarkedAllocator* allocator = 0;
- if (destructor)
- allocator = &m_globalData->heap.allocatorForObjectWithDestructor(sizeof(ClassType));
+ if (destructorType == MarkedBlock::Normal)
+ allocator = &m_globalData->heap.allocatorForObjectWithNormalDestructor(size);
+ else if (destructorType == MarkedBlock::ImmortalStructure)
+ allocator = &m_globalData->heap.allocatorForObjectWithImmortalStructureDestructor(size);
else
- allocator = &m_globalData->heap.allocatorForObjectWithoutDestructor(sizeof(ClassType));
+ allocator = &m_globalData->heap.allocatorForObjectWithoutDestructor(size);
loadPtr(&allocator->m_freeList.head, result);
addSlowCase(branchTestPtr(Zero, result));
@@ -428,7 +431,7 @@ template <typename ClassType, bool destructor, typename StructureType> inline vo
template <typename T> inline void JIT::emitAllocateJSFinalObject(T structure, RegisterID result, RegisterID scratch)
{
- emitAllocateBasicJSObject<JSFinalObject, false, T>(structure, result, scratch);
+ emitAllocateBasicJSObject<JSFinalObject, MarkedBlock::None, T>(structure, result, scratch);
}
inline void JIT::emitAllocateBasicStorage(size_t size, ptrdiff_t offsetFromBase, RegisterID result)
@@ -445,23 +448,24 @@ inline void JIT::emitAllocateBasicStorage(size_t size, ptrdiff_t offsetFromBase,
inline void JIT::emitAllocateJSArray(unsigned valuesRegister, unsigned length, RegisterID cellResult, RegisterID storageResult, RegisterID storagePtr, RegisterID scratch)
{
- unsigned initialLength = std::max(length, 4U);
- size_t initialStorage = Butterfly::totalSize(0, 0, true, ArrayStorage::sizeFor(initialLength));
+ unsigned initialLength = std::max(length, BASE_VECTOR_LEN);
+ size_t initialStorage = Butterfly::totalSize(0, 0, true, initialLength * sizeof(EncodedJSValue));
+
+ loadPtr(m_codeBlock->globalObject()->addressOfArrayStructure(), scratch);
+ load8(Address(scratch, Structure::indexingTypeOffset()), storagePtr);
+ and32(TrustedImm32(IndexingShapeMask), storagePtr);
+ addSlowCase(branch32(NotEqual, storagePtr, TrustedImm32(ContiguousShape)));
// We allocate the backing store first to ensure that garbage collection
// doesn't happen during JSArray initialization.
emitAllocateBasicStorage(initialStorage, sizeof(IndexingHeader), storageResult);
// Allocate the cell for the array.
- loadPtr(m_codeBlock->globalObject()->addressOfArrayStructure(), scratch);
- emitAllocateBasicJSObject<JSArray, false>(scratch, cellResult, storagePtr);
+ emitAllocateBasicJSObject<JSArray, MarkedBlock::None>(scratch, cellResult, storagePtr);
- // Store all the necessary info in the ArrayStorage.
- store32(Imm32(length), Address(storageResult, ArrayStorage::lengthOffset()));
- store32(Imm32(length), Address(storageResult, ArrayStorage::numValuesInVectorOffset()));
- store32(Imm32(initialLength), Address(storageResult, ArrayStorage::vectorLengthOffset()));
- store32(TrustedImm32(0), Address(storageResult, ArrayStorage::indexBiasOffset()));
- storePtr(TrustedImmPtr(0), Address(storageResult, ArrayStorage::sparseMapOffset()));
+ // Store all the necessary info in the indexing header.
+ store32(Imm32(length), Address(storageResult, Butterfly::offsetOfPublicLength()));
+ store32(Imm32(initialLength), Address(storageResult, Butterfly::offsetOfVectorLength()));
// Store the newly allocated ArrayStorage.
storePtr(storageResult, Address(cellResult, JSObject::butterflyOffset()));
@@ -470,12 +474,12 @@ inline void JIT::emitAllocateJSArray(unsigned valuesRegister, unsigned length, R
for (unsigned i = 0; i < length; i++) {
#if USE(JSVALUE64)
loadPtr(Address(callFrameRegister, (valuesRegister + i) * sizeof(Register)), storagePtr);
- storePtr(storagePtr, Address(storageResult, ArrayStorage::vectorOffset() + sizeof(WriteBarrier<Unknown>) * i));
+ storePtr(storagePtr, Address(storageResult, sizeof(WriteBarrier<Unknown>) * i));
#else
load32(Address(callFrameRegister, (valuesRegister + i) * sizeof(Register)), storagePtr);
- store32(storagePtr, Address(storageResult, ArrayStorage::vectorOffset() + sizeof(WriteBarrier<Unknown>) * i));
+ store32(storagePtr, Address(storageResult, sizeof(WriteBarrier<Unknown>) * i));
load32(Address(callFrameRegister, (valuesRegister + i) * sizeof(Register) + sizeof(uint32_t)), storagePtr);
- store32(storagePtr, Address(storageResult, ArrayStorage::vectorOffset() + sizeof(WriteBarrier<Unknown>) * i + sizeof(uint32_t)));
+ store32(storagePtr, Address(storageResult, sizeof(WriteBarrier<Unknown>) * i + sizeof(uint32_t)));
#endif
}
}
@@ -559,10 +563,29 @@ inline void JIT::emitArrayProfilingSiteForBytecodeIndex(RegisterID structureAndI
inline void JIT::emitArrayProfileStoreToHoleSpecialCase(ArrayProfile* arrayProfile)
{
- if (!canBeOptimized())
- return;
-
+#if ENABLE(VALUE_PROFILER)
store8(TrustedImm32(1), arrayProfile->addressOfMayStoreToHole());
+#else
+ UNUSED_PARAM(arrayProfile);
+#endif
+}
+
+static inline bool arrayProfileSaw(ArrayProfile* profile, IndexingType capability)
+{
+#if ENABLE(VALUE_PROFILER)
+ return !!(profile->observedArrayModes() & (asArrayModes(NonArray | capability) | asArrayModes(ArrayClass | capability)));
+#else
+ UNUSED_PARAM(profile);
+ UNUSED_PARAM(capability);
+ return false;
+#endif
+}
+
+inline JITArrayMode JIT::chooseArrayMode(ArrayProfile* profile)
+{
+ if (arrayProfileSaw(profile, ArrayStorageShape))
+ return JITArrayStorage;
+ return JITContiguous;
}
#if USE(JSVALUE32_64)
@@ -755,7 +778,7 @@ inline void JIT::unmap(RegisterID registerID)
inline void JIT::unmap()
{
m_mappedBytecodeOffset = (unsigned)-1;
- m_mappedVirtualRegisterIndex = RegisterFile::ReturnPC;
+ m_mappedVirtualRegisterIndex = JSStack::ReturnPC;
m_mappedTag = (RegisterID)-1;
m_mappedPayload = (RegisterID)-1;
}
diff --git a/Source/JavaScriptCore/jit/JITOpcodes.cpp b/Source/JavaScriptCore/jit/JITOpcodes.cpp
index 3b7f38dc7..33db1d44f 100644
--- a/Source/JavaScriptCore/jit/JITOpcodes.cpp
+++ b/Source/JavaScriptCore/jit/JITOpcodes.cpp
@@ -76,11 +76,11 @@ PassRefPtr<ExecutableMemoryHandle> JIT::privateCompileCTIMachineTrampolines(JSGl
// Finish canonical initialization before JS function call.
loadPtr(Address(regT0, OBJECT_OFFSETOF(JSFunction, m_scope)), regT1);
- emitPutCellToCallFrameHeader(regT1, RegisterFile::ScopeChain);
+ emitPutCellToCallFrameHeader(regT1, JSStack::ScopeChain);
// Also initialize ReturnPC for use by lazy linking and exceptions.
preserveReturnAddressAfterCall(regT3);
- emitPutToCallFrameHeader(regT3, RegisterFile::ReturnPC);
+ emitPutToCallFrameHeader(regT3, JSStack::ReturnPC);
storePtr(callFrameRegister, &m_globalData->topCallFrame);
restoreArgumentReference();
@@ -96,11 +96,11 @@ PassRefPtr<ExecutableMemoryHandle> JIT::privateCompileCTIMachineTrampolines(JSGl
// Finish canonical initialization before JS function call.
loadPtr(Address(regT0, OBJECT_OFFSETOF(JSFunction, m_scope)), regT1);
- emitPutCellToCallFrameHeader(regT1, RegisterFile::ScopeChain);
+ emitPutCellToCallFrameHeader(regT1, JSStack::ScopeChain);
// Also initialize ReturnPC for use by lazy linking and exeptions.
preserveReturnAddressAfterCall(regT3);
- emitPutToCallFrameHeader(regT3, RegisterFile::ReturnPC);
+ emitPutToCallFrameHeader(regT3, JSStack::ReturnPC);
storePtr(callFrameRegister, &m_globalData->topCallFrame);
restoreArgumentReference();
@@ -116,7 +116,7 @@ PassRefPtr<ExecutableMemoryHandle> JIT::privateCompileCTIMachineTrampolines(JSGl
// Finish canonical initialization before JS function call.
loadPtr(Address(regT0, OBJECT_OFFSETOF(JSFunction, m_scope)), regT1);
- emitPutCellToCallFrameHeader(regT1, RegisterFile::ScopeChain);
+ emitPutCellToCallFrameHeader(regT1, JSStack::ScopeChain);
loadPtr(Address(regT0, OBJECT_OFFSETOF(JSFunction, m_executable)), regT2);
Jump hasCodeBlock1 = branch32(GreaterThanOrEqual, Address(regT2, OBJECT_OFFSETOF(FunctionExecutable, m_numParametersForCall)), TrustedImm32(0));
@@ -139,7 +139,7 @@ PassRefPtr<ExecutableMemoryHandle> JIT::privateCompileCTIMachineTrampolines(JSGl
// Finish canonical initialization before JS function call.
loadPtr(Address(regT0, OBJECT_OFFSETOF(JSFunction, m_scope)), regT1);
- emitPutCellToCallFrameHeader(regT1, RegisterFile::ScopeChain);
+ emitPutCellToCallFrameHeader(regT1, JSStack::ScopeChain);
loadPtr(Address(regT0, OBJECT_OFFSETOF(JSFunction, m_executable)), regT2);
Jump hasCodeBlock2 = branch32(GreaterThanOrEqual, Address(regT2, OBJECT_OFFSETOF(FunctionExecutable, m_numParametersForConstruct)), TrustedImm32(0));
@@ -156,37 +156,37 @@ PassRefPtr<ExecutableMemoryHandle> JIT::privateCompileCTIMachineTrampolines(JSGl
callSlowCase.link(this);
// Finish canonical initialization before JS function call.
- emitGetFromCallFrameHeaderPtr(RegisterFile::CallerFrame, regT2);
- emitGetFromCallFrameHeaderPtr(RegisterFile::ScopeChain, regT2, regT2);
- emitPutCellToCallFrameHeader(regT2, RegisterFile::ScopeChain);
+ emitGetFromCallFrameHeaderPtr(JSStack::CallerFrame, regT2);
+ emitGetFromCallFrameHeaderPtr(JSStack::ScopeChain, regT2, regT2);
+ emitPutCellToCallFrameHeader(regT2, JSStack::ScopeChain);
// Also initialize ReturnPC and CodeBlock, like a JS function would.
preserveReturnAddressAfterCall(regT3);
- emitPutToCallFrameHeader(regT3, RegisterFile::ReturnPC);
- emitPutImmediateToCallFrameHeader(0, RegisterFile::CodeBlock);
+ emitPutToCallFrameHeader(regT3, JSStack::ReturnPC);
+ emitPutImmediateToCallFrameHeader(0, JSStack::CodeBlock);
storePtr(callFrameRegister, &m_globalData->topCallFrame);
restoreArgumentReference();
Call callCallNotJSFunction = call();
- emitGetFromCallFrameHeaderPtr(RegisterFile::CallerFrame, callFrameRegister);
+ emitGetFromCallFrameHeaderPtr(JSStack::CallerFrame, callFrameRegister);
restoreReturnAddressBeforeReturn(regT3);
ret();
constructSlowCase.link(this);
// Finish canonical initialization before JS function call.
- emitGetFromCallFrameHeaderPtr(RegisterFile::CallerFrame, regT2);
- emitGetFromCallFrameHeaderPtr(RegisterFile::ScopeChain, regT2, regT2);
- emitPutCellToCallFrameHeader(regT2, RegisterFile::ScopeChain);
+ emitGetFromCallFrameHeaderPtr(JSStack::CallerFrame, regT2);
+ emitGetFromCallFrameHeaderPtr(JSStack::ScopeChain, regT2, regT2);
+ emitPutCellToCallFrameHeader(regT2, JSStack::ScopeChain);
// Also initialize ReturnPC and CodeBlock, like a JS function would.
preserveReturnAddressAfterCall(regT3);
- emitPutToCallFrameHeader(regT3, RegisterFile::ReturnPC);
- emitPutImmediateToCallFrameHeader(0, RegisterFile::CodeBlock);
+ emitPutToCallFrameHeader(regT3, JSStack::ReturnPC);
+ emitPutImmediateToCallFrameHeader(0, JSStack::CodeBlock);
storePtr(callFrameRegister, &m_globalData->topCallFrame);
restoreArgumentReference();
Call callConstructNotJSFunction = call();
- emitGetFromCallFrameHeaderPtr(RegisterFile::CallerFrame, callFrameRegister);
+ emitGetFromCallFrameHeaderPtr(JSStack::CallerFrame, callFrameRegister);
restoreReturnAddressBeforeReturn(regT3);
ret();
@@ -231,18 +231,18 @@ JIT::Label JIT::privateCompileCTINativeCall(JSGlobalData* globalData, bool isCon
Label nativeCallThunk = align();
- emitPutImmediateToCallFrameHeader(0, RegisterFile::CodeBlock);
+ emitPutImmediateToCallFrameHeader(0, JSStack::CodeBlock);
storePtr(callFrameRegister, &m_globalData->topCallFrame);
#if CPU(X86_64)
// Load caller frame's scope chain into this callframe so that whatever we call can
// get to its global data.
- emitGetFromCallFrameHeaderPtr(RegisterFile::CallerFrame, regT0);
- emitGetFromCallFrameHeaderPtr(RegisterFile::ScopeChain, regT1, regT0);
- emitPutCellToCallFrameHeader(regT1, RegisterFile::ScopeChain);
+ emitGetFromCallFrameHeaderPtr(JSStack::CallerFrame, regT0);
+ emitGetFromCallFrameHeaderPtr(JSStack::ScopeChain, regT1, regT0);
+ emitPutCellToCallFrameHeader(regT1, JSStack::ScopeChain);
peek(regT1);
- emitPutToCallFrameHeader(regT1, RegisterFile::ReturnPC);
+ emitPutToCallFrameHeader(regT1, JSStack::ReturnPC);
// Calling convention: f(edi, esi, edx, ecx, ...);
// Host function signature: f(ExecState*);
@@ -250,7 +250,7 @@ JIT::Label JIT::privateCompileCTINativeCall(JSGlobalData* globalData, bool isCon
subPtr(TrustedImm32(16 - sizeof(void*)), stackPointerRegister); // Align stack after call.
- emitGetFromCallFrameHeaderPtr(RegisterFile::Callee, X86Registers::esi);
+ emitGetFromCallFrameHeaderPtr(JSStack::Callee, X86Registers::esi);
loadPtr(Address(X86Registers::esi, OBJECT_OFFSETOF(JSFunction, m_executable)), X86Registers::r9);
move(regT0, callFrameRegister); // Eagerly restore caller frame register to avoid loading from stack.
call(Address(X86Registers::r9, executableOffsetToFunction));
@@ -260,18 +260,18 @@ JIT::Label JIT::privateCompileCTINativeCall(JSGlobalData* globalData, bool isCon
#elif CPU(ARM)
// Load caller frame's scope chain into this callframe so that whatever we call can
// get to its global data.
- emitGetFromCallFrameHeaderPtr(RegisterFile::CallerFrame, regT2);
- emitGetFromCallFrameHeaderPtr(RegisterFile::ScopeChain, regT1, regT2);
- emitPutCellToCallFrameHeader(regT1, RegisterFile::ScopeChain);
+ emitGetFromCallFrameHeaderPtr(JSStack::CallerFrame, regT2);
+ emitGetFromCallFrameHeaderPtr(JSStack::ScopeChain, regT1, regT2);
+ emitPutCellToCallFrameHeader(regT1, JSStack::ScopeChain);
preserveReturnAddressAfterCall(regT3); // Callee preserved
- emitPutToCallFrameHeader(regT3, RegisterFile::ReturnPC);
+ emitPutToCallFrameHeader(regT3, JSStack::ReturnPC);
// Calling convention: f(r0 == regT0, r1 == regT1, ...);
// Host function signature: f(ExecState*);
move(callFrameRegister, ARMRegisters::r0);
- emitGetFromCallFrameHeaderPtr(RegisterFile::Callee, ARMRegisters::r1);
+ emitGetFromCallFrameHeaderPtr(JSStack::Callee, ARMRegisters::r1);
move(regT2, callFrameRegister); // Eagerly restore caller frame register to avoid loading from stack.
loadPtr(Address(ARMRegisters::r1, OBJECT_OFFSETOF(JSFunction, m_executable)), regT2);
call(Address(regT2, executableOffsetToFunction));
@@ -281,12 +281,12 @@ JIT::Label JIT::privateCompileCTINativeCall(JSGlobalData* globalData, bool isCon
#elif CPU(MIPS)
// Load caller frame's scope chain into this callframe so that whatever we call can
// get to its global data.
- emitGetFromCallFrameHeaderPtr(RegisterFile::CallerFrame, regT0);
- emitGetFromCallFrameHeaderPtr(RegisterFile::ScopeChain, regT1, regT0);
- emitPutCellToCallFrameHeader(regT1, RegisterFile::ScopeChain);
+ emitGetFromCallFrameHeaderPtr(JSStack::CallerFrame, regT0);
+ emitGetFromCallFrameHeaderPtr(JSStack::ScopeChain, regT1, regT0);
+ emitPutCellToCallFrameHeader(regT1, JSStack::ScopeChain);
preserveReturnAddressAfterCall(regT3); // Callee preserved
- emitPutToCallFrameHeader(regT3, RegisterFile::ReturnPC);
+ emitPutToCallFrameHeader(regT3, JSStack::ReturnPC);
// Calling convention: f(a0, a1, a2, a3);
// Host function signature: f(ExecState*);
@@ -299,7 +299,7 @@ JIT::Label JIT::privateCompileCTINativeCall(JSGlobalData* globalData, bool isCon
move(callFrameRegister, MIPSRegisters::a0);
// Call
- emitGetFromCallFrameHeaderPtr(RegisterFile::Callee, MIPSRegisters::a2);
+ emitGetFromCallFrameHeaderPtr(JSStack::Callee, MIPSRegisters::a2);
loadPtr(Address(MIPSRegisters::a2, OBJECT_OFFSETOF(JSFunction, m_executable)), regT2);
move(regT0, callFrameRegister); // Eagerly restore caller frame register to avoid loading from stack.
call(Address(regT2, executableOffsetToFunction));
@@ -382,7 +382,7 @@ void JIT::emit_op_end(Instruction* currentInstruction)
{
ASSERT(returnValueRegister != callFrameRegister);
emitGetVirtualRegister(currentInstruction[1].u.operand, returnValueRegister);
- restoreReturnAddressBeforeReturn(Address(callFrameRegister, RegisterFile::ReturnPC * static_cast<int>(sizeof(Register))));
+ restoreReturnAddressBeforeReturn(Address(callFrameRegister, JSStack::ReturnPC * static_cast<int>(sizeof(Register))));
ret();
}
@@ -583,10 +583,10 @@ void JIT::emit_op_ret(Instruction* currentInstruction)
emitGetVirtualRegister(currentInstruction[1].u.operand, returnValueRegister);
// Grab the return address.
- emitGetFromCallFrameHeaderPtr(RegisterFile::ReturnPC, regT1);
+ emitGetFromCallFrameHeaderPtr(JSStack::ReturnPC, regT1);
// Restore our caller's "r".
- emitGetFromCallFrameHeaderPtr(RegisterFile::CallerFrame, callFrameRegister);
+ emitGetFromCallFrameHeaderPtr(JSStack::CallerFrame, callFrameRegister);
// Return.
restoreReturnAddressBeforeReturn(regT1);
@@ -606,10 +606,10 @@ void JIT::emit_op_ret_object_or_this(Instruction* currentInstruction)
Jump notObject = emitJumpIfNotObject(regT2);
// Grab the return address.
- emitGetFromCallFrameHeaderPtr(RegisterFile::ReturnPC, regT1);
+ emitGetFromCallFrameHeaderPtr(JSStack::ReturnPC, regT1);
// Restore our caller's "r".
- emitGetFromCallFrameHeaderPtr(RegisterFile::CallerFrame, callFrameRegister);
+ emitGetFromCallFrameHeaderPtr(JSStack::CallerFrame, callFrameRegister);
// Return.
restoreReturnAddressBeforeReturn(regT1);
@@ -621,10 +621,10 @@ void JIT::emit_op_ret_object_or_this(Instruction* currentInstruction)
emitGetVirtualRegister(currentInstruction[2].u.operand, returnValueRegister);
// Grab the return address.
- emitGetFromCallFrameHeaderPtr(RegisterFile::ReturnPC, regT1);
+ emitGetFromCallFrameHeaderPtr(JSStack::ReturnPC, regT1);
// Restore our caller's "r".
- emitGetFromCallFrameHeaderPtr(RegisterFile::CallerFrame, callFrameRegister);
+ emitGetFromCallFrameHeaderPtr(JSStack::CallerFrame, callFrameRegister);
// Return.
restoreReturnAddressBeforeReturn(regT1);
@@ -798,11 +798,11 @@ void JIT::emit_op_jneq_null(Instruction* currentInstruction)
void JIT::emit_op_jneq_ptr(Instruction* currentInstruction)
{
unsigned src = currentInstruction[1].u.operand;
- JSCell* ptr = currentInstruction[2].u.jsCell.get();
+ Special::Pointer ptr = currentInstruction[2].u.specialPointer;
unsigned target = currentInstruction[3].u.operand;
emitGetVirtualRegister(src, regT0);
- addJump(branchPtr(NotEqual, regT0, TrustedImmPtr(JSValue::encode(JSValue(ptr)))), target);
+ addJump(branchPtr(NotEqual, regT0, TrustedImmPtr(actualPointerFor(m_codeBlock, ptr))), target);
}
void JIT::emit_op_eq(Instruction* currentInstruction)
@@ -1285,7 +1285,7 @@ void JIT::emit_op_convert_this(Instruction* currentInstruction)
void JIT::emit_op_create_this(Instruction* currentInstruction)
{
- emitGetFromCallFrameHeaderPtr(RegisterFile::Callee, regT0);
+ emitGetFromCallFrameHeaderPtr(JSStack::Callee, regT0);
loadPtr(Address(regT0, JSFunction::offsetOfCachedInheritorID()), regT2);
addSlowCase(branchTestPtr(Zero, regT2));
@@ -1509,7 +1509,7 @@ void JIT::emit_op_get_arguments_length(Instruction* currentInstruction)
int dst = currentInstruction[1].u.operand;
int argumentsRegister = currentInstruction[2].u.operand;
addSlowCase(branchTestPtr(NonZero, addressFor(argumentsRegister)));
- emitGetFromCallFrameHeader32(RegisterFile::ArgumentCount, regT0);
+ emitGetFromCallFrameHeader32(JSStack::ArgumentCount, regT0);
sub32(TrustedImm32(1), regT0);
emitFastArithReTagImmediate(regT0, regT0);
emitPutVirtualRegister(dst, regT0);
@@ -1539,7 +1539,7 @@ void JIT::emit_op_get_argument_by_val(Instruction* currentInstruction)
addSlowCase(emitJumpIfNotImmediateInteger(regT1));
add32(TrustedImm32(1), regT1);
// regT1 now contains the integer index of the argument we want, including this
- emitGetFromCallFrameHeader32(RegisterFile::ArgumentCount, regT2);
+ emitGetFromCallFrameHeader32(JSStack::ArgumentCount, regT2);
addSlowCase(branch32(AboveOrEqual, regT1, regT2));
neg32(regT1);
@@ -1565,7 +1565,7 @@ void JIT::emitSlow_op_get_argument_by_val(Instruction* currentInstruction, Vecto
emitPutVirtualRegister(unmodifiedArgumentsRegister(arguments));
skipArgumentsCreation.link(this);
- JITStubCall stubCall(this, cti_op_get_by_val);
+ JITStubCall stubCall(this, cti_op_get_by_val_generic);
stubCall.addArgument(arguments, regT2);
stubCall.addArgument(property, regT2);
stubCall.callWithValueProfiling(dst);
@@ -1577,7 +1577,7 @@ void JIT::emit_op_resolve_global_dynamic(Instruction* currentInstruction)
{
int skip = currentInstruction[5].u.operand;
- emitGetFromCallFrameHeaderPtr(RegisterFile::ScopeChain, regT0);
+ emitGetFromCallFrameHeaderPtr(JSStack::ScopeChain, regT0);
bool checkTopLevel = m_codeBlock->codeType() == FunctionCode && m_codeBlock->needsFullScopeChain();
ASSERT(skip || !checkTopLevel);
@@ -1661,7 +1661,8 @@ void JIT::emit_op_new_func_exp(Instruction* currentInstruction)
void JIT::emit_op_new_array(Instruction* currentInstruction)
{
int length = currentInstruction[3].u.operand;
- if (CopiedSpace::isOversize(Butterfly::totalSize(0, 0, true, ArrayStorage::sizeFor(length)))) {
+ if (m_codeBlock->globalObject()->isHavingABadTime()
+ || CopiedSpace::isOversize(Butterfly::totalSize(0, 0, true, ArrayStorage::sizeFor(length)))) {
JITStubCall stubCall(this, cti_op_new_array);
stubCall.addArgument(TrustedImm32(currentInstruction[2].u.operand));
stubCall.addArgument(TrustedImm32(currentInstruction[3].u.operand));
@@ -1680,8 +1681,10 @@ void JIT::emitSlow_op_new_array(Instruction* currentInstruction, Vector<SlowCase
// If the allocation would be oversize, we will already make the proper stub call above in
// emit_op_new_array.
int length = currentInstruction[3].u.operand;
- if (CopiedSpace::isOversize(Butterfly::totalSize(0, 0, true, ArrayStorage::sizeFor(length))))
+ if (m_codeBlock->globalObject()->isHavingABadTime()
+ || CopiedSpace::isOversize(Butterfly::totalSize(0, 0, true, ArrayStorage::sizeFor(length))))
return;
+ linkSlowCase(iter); // We're having a bad time.
linkSlowCase(iter); // Not enough space in CopiedSpace for storage.
linkSlowCase(iter); // Not enough space in MarkedSpace for cell.
diff --git a/Source/JavaScriptCore/jit/JITOpcodes32_64.cpp b/Source/JavaScriptCore/jit/JITOpcodes32_64.cpp
index 21744fba8..db5365535 100644
--- a/Source/JavaScriptCore/jit/JITOpcodes32_64.cpp
+++ b/Source/JavaScriptCore/jit/JITOpcodes32_64.cpp
@@ -70,11 +70,11 @@ PassRefPtr<ExecutableMemoryHandle> JIT::privateCompileCTIMachineTrampolines(JSGl
// Finish canonical initialization before JS function call.
loadPtr(Address(regT0, OBJECT_OFFSETOF(JSFunction, m_scope)), regT1);
- emitPutCellToCallFrameHeader(regT1, RegisterFile::ScopeChain);
+ emitPutCellToCallFrameHeader(regT1, JSStack::ScopeChain);
// Also initialize ReturnPC for use by lazy linking and exceptions.
preserveReturnAddressAfterCall(regT3);
- emitPutToCallFrameHeader(regT3, RegisterFile::ReturnPC);
+ emitPutToCallFrameHeader(regT3, JSStack::ReturnPC);
storePtr(callFrameRegister, &m_globalData->topCallFrame);
restoreArgumentReference();
@@ -90,11 +90,11 @@ PassRefPtr<ExecutableMemoryHandle> JIT::privateCompileCTIMachineTrampolines(JSGl
// Finish canonical initialization before JS function call.
loadPtr(Address(regT0, OBJECT_OFFSETOF(JSFunction, m_scope)), regT1);
- emitPutCellToCallFrameHeader(regT1, RegisterFile::ScopeChain);
+ emitPutCellToCallFrameHeader(regT1, JSStack::ScopeChain);
// Also initialize ReturnPC for use by lazy linking and exeptions.
preserveReturnAddressAfterCall(regT3);
- emitPutToCallFrameHeader(regT3, RegisterFile::ReturnPC);
+ emitPutToCallFrameHeader(regT3, JSStack::ReturnPC);
storePtr(callFrameRegister, &m_globalData->topCallFrame);
restoreArgumentReference();
@@ -110,7 +110,7 @@ PassRefPtr<ExecutableMemoryHandle> JIT::privateCompileCTIMachineTrampolines(JSGl
// Finish canonical initialization before JS function call.
loadPtr(Address(regT0, OBJECT_OFFSETOF(JSFunction, m_scope)), regT1);
- emitPutCellToCallFrameHeader(regT1, RegisterFile::ScopeChain);
+ emitPutCellToCallFrameHeader(regT1, JSStack::ScopeChain);
loadPtr(Address(regT0, OBJECT_OFFSETOF(JSFunction, m_executable)), regT2);
Jump hasCodeBlock1 = branch32(GreaterThanOrEqual, Address(regT2, OBJECT_OFFSETOF(FunctionExecutable, m_numParametersForCall)), TrustedImm32(0));
@@ -134,7 +134,7 @@ PassRefPtr<ExecutableMemoryHandle> JIT::privateCompileCTIMachineTrampolines(JSGl
// Finish canonical initialization before JS function call.
loadPtr(Address(regT0, OBJECT_OFFSETOF(JSFunction, m_scope)), regT1);
- emitPutCellToCallFrameHeader(regT1, RegisterFile::ScopeChain);
+ emitPutCellToCallFrameHeader(regT1, JSStack::ScopeChain);
loadPtr(Address(regT0, OBJECT_OFFSETOF(JSFunction, m_executable)), regT2);
Jump hasCodeBlock2 = branch32(GreaterThanOrEqual, Address(regT2, OBJECT_OFFSETOF(FunctionExecutable, m_numParametersForConstruct)), TrustedImm32(0));
@@ -152,37 +152,37 @@ PassRefPtr<ExecutableMemoryHandle> JIT::privateCompileCTIMachineTrampolines(JSGl
callSlowCase.link(this);
// Finish canonical initialization before JS function call.
- emitGetFromCallFrameHeaderPtr(RegisterFile::CallerFrame, regT2);
- emitGetFromCallFrameHeaderPtr(RegisterFile::ScopeChain, regT2, regT2);
- emitPutCellToCallFrameHeader(regT2, RegisterFile::ScopeChain);
+ emitGetFromCallFrameHeaderPtr(JSStack::CallerFrame, regT2);
+ emitGetFromCallFrameHeaderPtr(JSStack::ScopeChain, regT2, regT2);
+ emitPutCellToCallFrameHeader(regT2, JSStack::ScopeChain);
// Also initialize ReturnPC and CodeBlock, like a JS function would.
preserveReturnAddressAfterCall(regT3);
- emitPutToCallFrameHeader(regT3, RegisterFile::ReturnPC);
- emitPutImmediateToCallFrameHeader(0, RegisterFile::CodeBlock);
+ emitPutToCallFrameHeader(regT3, JSStack::ReturnPC);
+ emitPutImmediateToCallFrameHeader(0, JSStack::CodeBlock);
storePtr(callFrameRegister, &m_globalData->topCallFrame);
restoreArgumentReference();
Call callCallNotJSFunction = call();
- emitGetFromCallFrameHeaderPtr(RegisterFile::CallerFrame, callFrameRegister);
+ emitGetFromCallFrameHeaderPtr(JSStack::CallerFrame, callFrameRegister);
restoreReturnAddressBeforeReturn(regT3);
ret();
constructSlowCase.link(this);
// Finish canonical initialization before JS function call.
- emitGetFromCallFrameHeaderPtr(RegisterFile::CallerFrame, regT2);
- emitGetFromCallFrameHeaderPtr(RegisterFile::ScopeChain, regT2, regT2);
- emitPutCellToCallFrameHeader(regT2, RegisterFile::ScopeChain);
+ emitGetFromCallFrameHeaderPtr(JSStack::CallerFrame, regT2);
+ emitGetFromCallFrameHeaderPtr(JSStack::ScopeChain, regT2, regT2);
+ emitPutCellToCallFrameHeader(regT2, JSStack::ScopeChain);
// Also initialize ReturnPC and CodeBlock, like a JS function would.
preserveReturnAddressAfterCall(regT3);
- emitPutToCallFrameHeader(regT3, RegisterFile::ReturnPC);
- emitPutImmediateToCallFrameHeader(0, RegisterFile::CodeBlock);
+ emitPutToCallFrameHeader(regT3, JSStack::ReturnPC);
+ emitPutImmediateToCallFrameHeader(0, JSStack::CodeBlock);
storePtr(callFrameRegister, &m_globalData->topCallFrame);
restoreArgumentReference();
Call callConstructNotJSFunction = call();
- emitGetFromCallFrameHeaderPtr(RegisterFile::CallerFrame, callFrameRegister);
+ emitGetFromCallFrameHeaderPtr(JSStack::CallerFrame, callFrameRegister);
restoreReturnAddressBeforeReturn(regT3);
ret();
@@ -227,18 +227,18 @@ JIT::Label JIT::privateCompileCTINativeCall(JSGlobalData* globalData, bool isCon
Label nativeCallThunk = align();
- emitPutImmediateToCallFrameHeader(0, RegisterFile::CodeBlock);
+ emitPutImmediateToCallFrameHeader(0, JSStack::CodeBlock);
storePtr(callFrameRegister, &m_globalData->topCallFrame);
#if CPU(X86)
// Load caller frame's scope chain into this callframe so that whatever we call can
// get to its global data.
- emitGetFromCallFrameHeaderPtr(RegisterFile::CallerFrame, regT0);
- emitGetFromCallFrameHeaderPtr(RegisterFile::ScopeChain, regT1, regT0);
- emitPutCellToCallFrameHeader(regT1, RegisterFile::ScopeChain);
+ emitGetFromCallFrameHeaderPtr(JSStack::CallerFrame, regT0);
+ emitGetFromCallFrameHeaderPtr(JSStack::ScopeChain, regT1, regT0);
+ emitPutCellToCallFrameHeader(regT1, JSStack::ScopeChain);
peek(regT1);
- emitPutToCallFrameHeader(regT1, RegisterFile::ReturnPC);
+ emitPutToCallFrameHeader(regT1, JSStack::ReturnPC);
// Calling convention: f(ecx, edx, ...);
// Host function signature: f(ExecState*);
@@ -247,7 +247,7 @@ JIT::Label JIT::privateCompileCTINativeCall(JSGlobalData* globalData, bool isCon
subPtr(TrustedImm32(16 - sizeof(void*)), stackPointerRegister); // Align stack after call.
// call the function
- emitGetFromCallFrameHeaderPtr(RegisterFile::Callee, regT1);
+ emitGetFromCallFrameHeaderPtr(JSStack::Callee, regT1);
loadPtr(Address(regT1, OBJECT_OFFSETOF(JSFunction, m_executable)), regT1);
move(regT0, callFrameRegister); // Eagerly restore caller frame register to avoid loading from stack.
call(Address(regT1, executableOffsetToFunction));
@@ -257,19 +257,19 @@ JIT::Label JIT::privateCompileCTINativeCall(JSGlobalData* globalData, bool isCon
#elif CPU(ARM)
// Load caller frame's scope chain into this callframe so that whatever we call can
// get to its global data.
- emitGetFromCallFrameHeaderPtr(RegisterFile::CallerFrame, regT2);
- emitGetFromCallFrameHeaderPtr(RegisterFile::ScopeChain, regT1, regT2);
- emitPutCellToCallFrameHeader(regT1, RegisterFile::ScopeChain);
+ emitGetFromCallFrameHeaderPtr(JSStack::CallerFrame, regT2);
+ emitGetFromCallFrameHeaderPtr(JSStack::ScopeChain, regT1, regT2);
+ emitPutCellToCallFrameHeader(regT1, JSStack::ScopeChain);
preserveReturnAddressAfterCall(regT3); // Callee preserved
- emitPutToCallFrameHeader(regT3, RegisterFile::ReturnPC);
+ emitPutToCallFrameHeader(regT3, JSStack::ReturnPC);
// Calling convention: f(r0 == regT0, r1 == regT1, ...);
// Host function signature: f(ExecState*);
move(callFrameRegister, ARMRegisters::r0);
// call the function
- emitGetFromCallFrameHeaderPtr(RegisterFile::Callee, ARMRegisters::r1);
+ emitGetFromCallFrameHeaderPtr(JSStack::Callee, ARMRegisters::r1);
move(regT2, callFrameRegister); // Eagerly restore caller frame register to avoid loading from stack.
loadPtr(Address(ARMRegisters::r1, OBJECT_OFFSETOF(JSFunction, m_executable)), regT2);
call(Address(regT2, executableOffsetToFunction));
@@ -278,18 +278,18 @@ JIT::Label JIT::privateCompileCTINativeCall(JSGlobalData* globalData, bool isCon
#elif CPU(SH4)
// Load caller frame's scope chain into this callframe so that whatever we call can
// get to its global data.
- emitGetFromCallFrameHeaderPtr(RegisterFile::CallerFrame, regT2);
- emitGetFromCallFrameHeaderPtr(RegisterFile::ScopeChain, regT1, regT2);
- emitPutCellToCallFrameHeader(regT1, RegisterFile::ScopeChain);
+ emitGetFromCallFrameHeaderPtr(JSStack::CallerFrame, regT2);
+ emitGetFromCallFrameHeaderPtr(JSStack::ScopeChain, regT1, regT2);
+ emitPutCellToCallFrameHeader(regT1, JSStack::ScopeChain);
preserveReturnAddressAfterCall(regT3); // Callee preserved
- emitPutToCallFrameHeader(regT3, RegisterFile::ReturnPC);
+ emitPutToCallFrameHeader(regT3, JSStack::ReturnPC);
// Calling convention: f(r0 == regT4, r1 == regT5, ...);
// Host function signature: f(ExecState*);
move(callFrameRegister, regT4);
- emitGetFromCallFrameHeaderPtr(RegisterFile::Callee, regT5);
+ emitGetFromCallFrameHeaderPtr(JSStack::Callee, regT5);
move(regT2, callFrameRegister); // Eagerly restore caller frame register to avoid loading from stack.
loadPtr(Address(regT5, OBJECT_OFFSETOF(JSFunction, m_executable)), regT2);
@@ -298,12 +298,12 @@ JIT::Label JIT::privateCompileCTINativeCall(JSGlobalData* globalData, bool isCon
#elif CPU(MIPS)
// Load caller frame's scope chain into this callframe so that whatever we call can
// get to its global data.
- emitGetFromCallFrameHeaderPtr(RegisterFile::CallerFrame, regT0);
- emitGetFromCallFrameHeaderPtr(RegisterFile::ScopeChain, regT1, regT0);
- emitPutCellToCallFrameHeader(regT1, RegisterFile::ScopeChain);
+ emitGetFromCallFrameHeaderPtr(JSStack::CallerFrame, regT0);
+ emitGetFromCallFrameHeaderPtr(JSStack::ScopeChain, regT1, regT0);
+ emitPutCellToCallFrameHeader(regT1, JSStack::ScopeChain);
preserveReturnAddressAfterCall(regT3); // Callee preserved
- emitPutToCallFrameHeader(regT3, RegisterFile::ReturnPC);
+ emitPutToCallFrameHeader(regT3, JSStack::ReturnPC);
// Calling convention: f(a0, a1, a2, a3);
// Host function signature: f(ExecState*);
@@ -316,7 +316,7 @@ JIT::Label JIT::privateCompileCTINativeCall(JSGlobalData* globalData, bool isCon
move(callFrameRegister, MIPSRegisters::a0);
// Call
- emitGetFromCallFrameHeaderPtr(RegisterFile::Callee, MIPSRegisters::a2);
+ emitGetFromCallFrameHeaderPtr(JSStack::Callee, MIPSRegisters::a2);
loadPtr(Address(MIPSRegisters::a2, OBJECT_OFFSETOF(JSFunction, m_executable)), regT2);
move(regT0, callFrameRegister); // Eagerly restore caller frame register to avoid loading from stack.
call(Address(regT2, executableOffsetToFunction));
@@ -362,18 +362,18 @@ JIT::CodeRef JIT::privateCompileCTINativeCall(JSGlobalData* globalData, NativeFu
{
Call nativeCall;
- emitPutImmediateToCallFrameHeader(0, RegisterFile::CodeBlock);
+ emitPutImmediateToCallFrameHeader(0, JSStack::CodeBlock);
storePtr(callFrameRegister, &m_globalData->topCallFrame);
#if CPU(X86)
// Load caller frame's scope chain into this callframe so that whatever we call can
// get to its global data.
- emitGetFromCallFrameHeaderPtr(RegisterFile::CallerFrame, regT0);
- emitGetFromCallFrameHeaderPtr(RegisterFile::ScopeChain, regT1, regT0);
- emitPutCellToCallFrameHeader(regT1, RegisterFile::ScopeChain);
+ emitGetFromCallFrameHeaderPtr(JSStack::CallerFrame, regT0);
+ emitGetFromCallFrameHeaderPtr(JSStack::ScopeChain, regT1, regT0);
+ emitPutCellToCallFrameHeader(regT1, JSStack::ScopeChain);
peek(regT1);
- emitPutToCallFrameHeader(regT1, RegisterFile::ReturnPC);
+ emitPutToCallFrameHeader(regT1, JSStack::ReturnPC);
// Calling convention: f(ecx, edx, ...);
// Host function signature: f(ExecState*);
@@ -391,18 +391,18 @@ JIT::CodeRef JIT::privateCompileCTINativeCall(JSGlobalData* globalData, NativeFu
#elif CPU(ARM)
// Load caller frame's scope chain into this callframe so that whatever we call can
// get to its global data.
- emitGetFromCallFrameHeaderPtr(RegisterFile::CallerFrame, regT2);
- emitGetFromCallFrameHeaderPtr(RegisterFile::ScopeChain, regT1, regT2);
- emitPutCellToCallFrameHeader(regT1, RegisterFile::ScopeChain);
+ emitGetFromCallFrameHeaderPtr(JSStack::CallerFrame, regT2);
+ emitGetFromCallFrameHeaderPtr(JSStack::ScopeChain, regT1, regT2);
+ emitPutCellToCallFrameHeader(regT1, JSStack::ScopeChain);
preserveReturnAddressAfterCall(regT3); // Callee preserved
- emitPutToCallFrameHeader(regT3, RegisterFile::ReturnPC);
+ emitPutToCallFrameHeader(regT3, JSStack::ReturnPC);
// Calling convention: f(r0 == regT0, r1 == regT1, ...);
// Host function signature: f(ExecState*);
move(callFrameRegister, ARMRegisters::r0);
- emitGetFromCallFrameHeaderPtr(RegisterFile::Callee, ARMRegisters::r1);
+ emitGetFromCallFrameHeaderPtr(JSStack::Callee, ARMRegisters::r1);
move(regT2, callFrameRegister); // Eagerly restore caller frame register to avoid loading from stack.
loadPtr(Address(ARMRegisters::r1, OBJECT_OFFSETOF(JSFunction, m_executable)), regT2);
@@ -414,12 +414,12 @@ JIT::CodeRef JIT::privateCompileCTINativeCall(JSGlobalData* globalData, NativeFu
#elif CPU(MIPS)
// Load caller frame's scope chain into this callframe so that whatever we call can
// get to its global data.
- emitGetFromCallFrameHeaderPtr(RegisterFile::CallerFrame, regT0);
- emitGetFromCallFrameHeaderPtr(RegisterFile::ScopeChain, regT1, regT0);
- emitPutCellToCallFrameHeader(regT1, RegisterFile::ScopeChain);
+ emitGetFromCallFrameHeaderPtr(JSStack::CallerFrame, regT0);
+ emitGetFromCallFrameHeaderPtr(JSStack::ScopeChain, regT1, regT0);
+ emitPutCellToCallFrameHeader(regT1, JSStack::ScopeChain);
preserveReturnAddressAfterCall(regT3); // Callee preserved
- emitPutToCallFrameHeader(regT3, RegisterFile::ReturnPC);
+ emitPutToCallFrameHeader(regT3, JSStack::ReturnPC);
// Calling convention: f(a0, a1, a2, a3);
// Host function signature: f(ExecState*);
@@ -432,7 +432,7 @@ JIT::CodeRef JIT::privateCompileCTINativeCall(JSGlobalData* globalData, NativeFu
move(callFrameRegister, MIPSRegisters::a0);
// Call
- emitGetFromCallFrameHeaderPtr(RegisterFile::Callee, MIPSRegisters::a2);
+ emitGetFromCallFrameHeaderPtr(JSStack::Callee, MIPSRegisters::a2);
loadPtr(Address(MIPSRegisters::a2, OBJECT_OFFSETOF(JSFunction, m_executable)), regT2);
move(regT0, callFrameRegister); // Eagerly restore caller frame register to avoid loading from stack.
@@ -446,18 +446,18 @@ JIT::CodeRef JIT::privateCompileCTINativeCall(JSGlobalData* globalData, NativeFu
#elif CPU(SH4)
// Load caller frame's scope chain into this callframe so that whatever we call can
// get to its global data.
- emitGetFromCallFrameHeaderPtr(RegisterFile::CallerFrame, regT2);
- emitGetFromCallFrameHeaderPtr(RegisterFile::ScopeChain, regT1, regT2);
- emitPutCellToCallFrameHeader(regT1, RegisterFile::ScopeChain);
+ emitGetFromCallFrameHeaderPtr(JSStack::CallerFrame, regT2);
+ emitGetFromCallFrameHeaderPtr(JSStack::ScopeChain, regT1, regT2);
+ emitPutCellToCallFrameHeader(regT1, JSStack::ScopeChain);
preserveReturnAddressAfterCall(regT3); // Callee preserved
- emitPutToCallFrameHeader(regT3, RegisterFile::ReturnPC);
+ emitPutToCallFrameHeader(regT3, JSStack::ReturnPC);
// Calling convention: f(r0 == regT4, r1 == regT5, ...);
// Host function signature: f(ExecState*);
move(callFrameRegister, regT4);
- emitGetFromCallFrameHeaderPtr(RegisterFile::Callee, regT5);
+ emitGetFromCallFrameHeaderPtr(JSStack::Callee, regT5);
move(regT2, callFrameRegister); // Eagerly restore caller frame register to avoid loading from stack.
loadPtr(Address(regT5, OBJECT_OFFSETOF(JSFunction, m_executable)), regT2);
@@ -518,7 +518,7 @@ void JIT::emit_op_end(Instruction* currentInstruction)
{
ASSERT(returnValueRegister != callFrameRegister);
emitLoad(currentInstruction[1].u.operand, regT1, regT0);
- restoreReturnAddressBeforeReturn(Address(callFrameRegister, RegisterFile::ReturnPC * static_cast<int>(sizeof(Register))));
+ restoreReturnAddressBeforeReturn(Address(callFrameRegister, JSStack::ReturnPC * static_cast<int>(sizeof(Register))));
ret();
}
@@ -975,12 +975,12 @@ void JIT::emit_op_jneq_null(Instruction* currentInstruction)
void JIT::emit_op_jneq_ptr(Instruction* currentInstruction)
{
unsigned src = currentInstruction[1].u.operand;
- JSCell* ptr = currentInstruction[2].u.jsCell.get();
+ Special::Pointer ptr = currentInstruction[2].u.specialPointer;
unsigned target = currentInstruction[3].u.operand;
emitLoad(src, regT1, regT0);
addJump(branch32(NotEqual, regT1, TrustedImm32(JSValue::CellTag)), target);
- addJump(branchPtr(NotEqual, regT0, TrustedImmPtr(ptr)), target);
+ addJump(branchPtr(NotEqual, regT0, TrustedImmPtr(actualPointerFor(m_codeBlock, ptr))), target);
}
void JIT::emit_op_eq(Instruction* currentInstruction)
@@ -1544,7 +1544,7 @@ void JIT::emit_op_init_lazy_reg(Instruction* currentInstruction)
void JIT::emit_op_create_this(Instruction* currentInstruction)
{
- emitGetFromCallFrameHeaderPtr(RegisterFile::Callee, regT0);
+ emitGetFromCallFrameHeaderPtr(JSStack::Callee, regT0);
loadPtr(Address(regT0, JSFunction::offsetOfCachedInheritorID()), regT2);
addSlowCase(branchTestPtr(Zero, regT2));
@@ -1626,7 +1626,7 @@ void JIT::emit_op_get_arguments_length(Instruction* currentInstruction)
int dst = currentInstruction[1].u.operand;
int argumentsRegister = currentInstruction[2].u.operand;
addSlowCase(branch32(NotEqual, tagFor(argumentsRegister), TrustedImm32(JSValue::EmptyValueTag)));
- load32(payloadFor(RegisterFile::ArgumentCount), regT0);
+ load32(payloadFor(JSStack::ArgumentCount), regT0);
sub32(TrustedImm32(1), regT0);
emitStoreInt32(dst, regT0);
}
@@ -1654,7 +1654,7 @@ void JIT::emit_op_get_argument_by_val(Instruction* currentInstruction)
addSlowCase(branch32(NotEqual, regT1, TrustedImm32(JSValue::Int32Tag)));
add32(TrustedImm32(1), regT2);
// regT2 now contains the integer index of the argument we want, including this
- load32(payloadFor(RegisterFile::ArgumentCount), regT3);
+ load32(payloadFor(JSStack::ArgumentCount), regT3);
addSlowCase(branch32(AboveOrEqual, regT2, regT3));
neg32(regT2);
@@ -1680,7 +1680,7 @@ void JIT::emitSlow_op_get_argument_by_val(Instruction* currentInstruction, Vecto
emitStore(unmodifiedArgumentsRegister(arguments), regT1, regT0);
skipArgumentsCreation.link(this);
- JITStubCall stubCall(this, cti_op_get_by_val);
+ JITStubCall stubCall(this, cti_op_get_by_val_generic);
stubCall.addArgument(arguments);
stubCall.addArgument(property);
stubCall.callWithValueProfiling(dst);
diff --git a/Source/JavaScriptCore/jit/JITPropertyAccess.cpp b/Source/JavaScriptCore/jit/JITPropertyAccess.cpp
index b4d52e225..9deded62a 100644
--- a/Source/JavaScriptCore/jit/JITPropertyAccess.cpp
+++ b/Source/JavaScriptCore/jit/JITPropertyAccess.cpp
@@ -97,6 +97,7 @@ void JIT::emit_op_get_by_val(Instruction* currentInstruction)
unsigned dst = currentInstruction[1].u.operand;
unsigned base = currentInstruction[2].u.operand;
unsigned property = currentInstruction[3].u.operand;
+ ArrayProfile* profile = currentInstruction[4].u.arrayProfile;
emitGetVirtualRegisters(base, regT0, property, regT1);
emitJumpSlowCaseIfNotImmediateInteger(regT1);
@@ -111,17 +112,69 @@ void JIT::emit_op_get_by_val(Instruction* currentInstruction)
emitJumpSlowCaseIfNotJSCell(regT0, base);
loadPtr(Address(regT0, JSCell::structureOffset()), regT2);
- emitArrayProfilingSite(regT2, regT3, currentInstruction[4].u.arrayProfile);
- addSlowCase(branchTest32(Zero, regT2, TrustedImm32(HasArrayStorage)));
+ emitArrayProfilingSite(regT2, regT3, profile);
+ and32(TrustedImm32(IndexingShapeMask), regT2);
+
+ PatchableJump badType;
+ JumpList slowCases;
+
+ JITArrayMode mode = chooseArrayMode(profile);
+ switch (mode) {
+ case JITContiguous:
+ slowCases = emitContiguousGetByVal(currentInstruction, badType);
+ break;
+ case JITArrayStorage:
+ slowCases = emitArrayStorageGetByVal(currentInstruction, badType);
+ break;
+ default:
+ CRASH();
+ break;
+ }
+
+ addSlowCase(badType);
+ addSlowCase(slowCases);
+
+ Label done = label();
+
+#if !ASSERT_DISABLED
+ Jump resultOK = branchTestPtr(NonZero, regT0);
+ breakpoint();
+ resultOK.link(this);
+#endif
+ emitValueProfilingSite();
+ emitPutVirtualRegister(dst);
+
+ m_byValCompilationInfo.append(ByValCompilationInfo(m_bytecodeOffset, badType, mode, done));
+}
+
+JIT::JumpList JIT::emitContiguousGetByVal(Instruction*, PatchableJump& badType)
+{
+ JumpList slowCases;
+
+ badType = patchableBranch32(NotEqual, regT2, TrustedImm32(ContiguousShape));
loadPtr(Address(regT0, JSObject::butterflyOffset()), regT2);
- addSlowCase(branch32(AboveOrEqual, regT1, Address(regT2, ArrayStorage::vectorLengthOffset())));
+ slowCases.append(branch32(AboveOrEqual, regT1, Address(regT2, Butterfly::offsetOfPublicLength())));
+ loadPtr(BaseIndex(regT2, regT1, ScalePtr), regT0);
+ slowCases.append(branchTestPtr(Zero, regT0));
+
+ return slowCases;
+}
- loadPtr(BaseIndex(regT2, regT1, ScalePtr, OBJECT_OFFSETOF(ArrayStorage, m_vector[0])), regT0);
- addSlowCase(branchTestPtr(Zero, regT0));
+JIT::JumpList JIT::emitArrayStorageGetByVal(Instruction*, PatchableJump& badType)
+{
+ JumpList slowCases;
- emitValueProfilingSite();
- emitPutVirtualRegister(dst);
+ add32(TrustedImm32(-ArrayStorageShape), regT2, regT3);
+ badType = patchableBranch32(Above, regT3, TrustedImm32(SlowPutArrayStorageShape - ArrayStorageShape));
+
+ loadPtr(Address(regT0, JSObject::butterflyOffset()), regT2);
+ slowCases.append(branch32(AboveOrEqual, regT1, Address(regT2, ArrayStorage::vectorLengthOffset())));
+
+ loadPtr(BaseIndex(regT2, regT1, ScalePtr, ArrayStorage::vectorOffset()), regT0);
+ slowCases.append(branchTestPtr(Zero, regT0));
+
+ return slowCases;
}
void JIT::emitSlow_op_get_by_val(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
@@ -146,10 +199,16 @@ void JIT::emitSlow_op_get_by_val(Instruction* currentInstruction, Vector<SlowCas
linkSlowCase(iter); // vector length check
linkSlowCase(iter); // empty value
+ Label slowPath = label();
+
JITStubCall stubCall(this, cti_op_get_by_val);
stubCall.addArgument(base, regT2);
stubCall.addArgument(property, regT2);
- stubCall.call(dst);
+ Call call = stubCall.call(dst);
+
+ m_byValCompilationInfo[m_byValInstructionIndex].slowPathTarget = slowPath;
+ m_byValCompilationInfo[m_byValInstructionIndex].returnAddress = call;
+ m_byValInstructionIndex++;
emitValueProfilingSite();
}
@@ -159,16 +218,16 @@ void JIT::compileGetDirectOffset(RegisterID base, RegisterID result, RegisterID
ASSERT(sizeof(JSValue) == 8);
if (finalObjectMode == MayBeFinal) {
- Jump isInline = branch32(LessThan, offset, TrustedImm32(inlineStorageCapacity));
+ Jump isInline = branch32(LessThan, offset, TrustedImm32(firstOutOfLineOffset));
loadPtr(Address(base, JSObject::butterflyOffset()), scratch);
neg32(offset);
Jump done = jump();
isInline.link(this);
- addPtr(TrustedImm32(JSObject::offsetOfInlineStorage() - (inlineStorageCapacity - 2) * sizeof(EncodedJSValue)), base, scratch);
+ addPtr(TrustedImm32(JSObject::offsetOfInlineStorage() - (firstOutOfLineOffset - 2) * sizeof(EncodedJSValue)), base, scratch);
done.link(this);
} else {
#if !ASSERT_DISABLED
- Jump isOutOfLine = branch32(GreaterThanOrEqual, offset, TrustedImm32(inlineStorageCapacity));
+ Jump isOutOfLine = branch32(GreaterThanOrEqual, offset, TrustedImm32(firstOutOfLineOffset));
breakpoint();
isOutOfLine.link(this);
#endif
@@ -176,7 +235,7 @@ void JIT::compileGetDirectOffset(RegisterID base, RegisterID result, RegisterID
neg32(offset);
}
signExtend32ToPtr(offset, offset);
- loadPtr(BaseIndex(scratch, offset, ScalePtr, (inlineStorageCapacity - 2) * static_cast<ptrdiff_t>(sizeof(JSValue))), result);
+ loadPtr(BaseIndex(scratch, offset, ScalePtr, (firstOutOfLineOffset - 2) * sizeof(EncodedJSValue)), result);
}
void JIT::emit_op_get_by_pname(Instruction* currentInstruction)
@@ -199,7 +258,10 @@ void JIT::emit_op_get_by_pname(Instruction* currentInstruction)
load32(addressFor(i), regT3);
sub32(TrustedImm32(1), regT3);
addSlowCase(branch32(AboveOrEqual, regT3, Address(regT1, OBJECT_OFFSETOF(JSPropertyNameIterator, m_numCacheableSlots))));
- add32(Address(regT1, OBJECT_OFFSETOF(JSPropertyNameIterator, m_offsetBase)), regT3);
+ Jump inlineProperty = branch32(Below, regT3, Address(regT1, OBJECT_OFFSETOF(JSPropertyNameIterator, m_cachedStructureInlineCapacity)));
+ add32(TrustedImm32(firstOutOfLineOffset), regT3);
+ sub32(Address(regT1, OBJECT_OFFSETOF(JSPropertyNameIterator, m_cachedStructureInlineCapacity)), regT3);
+ inlineProperty.link(this);
compileGetDirectOffset(regT0, regT0, regT3, regT1);
emitPutVirtualRegister(dst, regT0);
@@ -226,7 +288,7 @@ void JIT::emit_op_put_by_val(Instruction* currentInstruction)
{
unsigned base = currentInstruction[1].u.operand;
unsigned property = currentInstruction[2].u.operand;
- unsigned value = currentInstruction[3].u.operand;
+ ArrayProfile* profile = currentInstruction[4].u.arrayProfile;
emitGetVirtualRegisters(base, regT0, property, regT1);
emitJumpSlowCaseIfNotImmediateInteger(regT1);
@@ -234,10 +296,76 @@ void JIT::emit_op_put_by_val(Instruction* currentInstruction)
zeroExtend32ToPtr(regT1, regT1);
emitJumpSlowCaseIfNotJSCell(regT0, base);
loadPtr(Address(regT0, JSCell::structureOffset()), regT2);
- emitArrayProfilingSite(regT2, regT3, currentInstruction[4].u.arrayProfile);
- addSlowCase(branchTest32(Zero, regT2, TrustedImm32(HasArrayStorage)));
+ emitArrayProfilingSite(regT2, regT3, profile);
+ and32(TrustedImm32(IndexingShapeMask), regT2);
+
+ PatchableJump badType;
+ JumpList slowCases;
+
+ JITArrayMode mode = chooseArrayMode(profile);
+ switch (mode) {
+ case JITContiguous:
+ slowCases = emitContiguousPutByVal(currentInstruction, badType);
+ break;
+ case JITArrayStorage:
+ slowCases = emitArrayStoragePutByVal(currentInstruction, badType);
+ break;
+ default:
+ CRASH();
+ break;
+ }
+
+ addSlowCase(badType);
+ addSlowCase(slowCases);
+
+ Label done = label();
+
+ m_byValCompilationInfo.append(ByValCompilationInfo(m_bytecodeOffset, badType, mode, done));
+
+ emitWriteBarrier(regT0, regT3, regT1, regT3, ShouldFilterImmediates, WriteBarrierForPropertyAccess);
+}
+
+JIT::JumpList JIT::emitContiguousPutByVal(Instruction* currentInstruction, PatchableJump& badType)
+{
+ unsigned value = currentInstruction[3].u.operand;
+ ArrayProfile* profile = currentInstruction[4].u.arrayProfile;
+
+ badType = patchableBranch32(NotEqual, regT2, TrustedImm32(ContiguousShape));
+
loadPtr(Address(regT0, JSObject::butterflyOffset()), regT2);
- addSlowCase(branch32(AboveOrEqual, regT1, Address(regT2, ArrayStorage::vectorLengthOffset())));
+ Jump outOfBounds = branch32(AboveOrEqual, regT1, Address(regT2, Butterfly::offsetOfPublicLength()));
+
+ Label storeResult = label();
+ emitGetVirtualRegister(value, regT3);
+ storePtr(regT3, BaseIndex(regT2, regT1, ScalePtr));
+
+ Jump done = jump();
+ outOfBounds.link(this);
+
+ JumpList slowCases;
+ slowCases.append(branch32(AboveOrEqual, regT1, Address(regT2, Butterfly::offsetOfVectorLength())));
+
+ emitArrayProfileStoreToHoleSpecialCase(profile);
+
+ add32(TrustedImm32(1), regT1, regT3);
+ store32(regT3, Address(regT2, Butterfly::offsetOfPublicLength()));
+ jump().linkTo(storeResult, this);
+
+ done.link(this);
+
+ return slowCases;
+}
+
+JIT::JumpList JIT::emitArrayStoragePutByVal(Instruction* currentInstruction, PatchableJump& badType)
+{
+ unsigned value = currentInstruction[3].u.operand;
+ ArrayProfile* profile = currentInstruction[4].u.arrayProfile;
+
+ JumpList slowCases;
+
+ badType = patchableBranch32(NotEqual, regT2, TrustedImm32(ArrayStorageShape));
+ loadPtr(Address(regT0, JSObject::butterflyOffset()), regT2);
+ slowCases.append(branch32(AboveOrEqual, regT1, Address(regT2, ArrayStorage::vectorLengthOffset())));
Jump empty = branchTestPtr(Zero, BaseIndex(regT2, regT1, ScalePtr, OBJECT_OFFSETOF(ArrayStorage, m_vector[0])));
@@ -247,8 +375,8 @@ void JIT::emit_op_put_by_val(Instruction* currentInstruction)
Jump end = jump();
empty.link(this);
- emitArrayProfileStoreToHoleSpecialCase(currentInstruction[4].u.arrayProfile);
- add32(TrustedImm32(1), Address(regT2, OBJECT_OFFSETOF(ArrayStorage, m_numValuesInVector)));
+ emitArrayProfileStoreToHoleSpecialCase(profile);
+ add32(TrustedImm32(1), Address(regT2, ArrayStorage::numValuesInVectorOffset()));
branch32(Below, regT1, Address(regT2, ArrayStorage::lengthOffset())).linkTo(storeResult, this);
add32(TrustedImm32(1), regT1);
@@ -257,8 +385,8 @@ void JIT::emit_op_put_by_val(Instruction* currentInstruction)
jump().linkTo(storeResult, this);
end.link(this);
-
- emitWriteBarrier(regT0, regT3, regT1, regT3, ShouldFilterImmediates, WriteBarrierForPropertyAccess);
+
+ return slowCases;
}
void JIT::emitSlow_op_put_by_val(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
@@ -270,13 +398,19 @@ void JIT::emitSlow_op_put_by_val(Instruction* currentInstruction, Vector<SlowCas
linkSlowCase(iter); // property int32 check
linkSlowCaseIfNotJSCell(iter, base); // base cell check
linkSlowCase(iter); // base not array check
- linkSlowCase(iter); // in vector check
+ linkSlowCase(iter); // out of bounds
+
+ Label slowPath = label();
JITStubCall stubPutByValCall(this, cti_op_put_by_val);
stubPutByValCall.addArgument(regT0);
stubPutByValCall.addArgument(property, regT2);
stubPutByValCall.addArgument(value, regT2);
- stubPutByValCall.call();
+ Call call = stubPutByValCall.call();
+
+ m_byValCompilationInfo[m_byValInstructionIndex].slowPathTarget = slowPath;
+ m_byValCompilationInfo[m_byValInstructionIndex].returnAddress = call;
+ m_byValInstructionIndex++;
}
void JIT::emit_op_put_by_index(Instruction* currentInstruction)
@@ -656,7 +790,7 @@ void JIT::privateCompilePatchGetArrayLength(ReturnAddressPtr returnAddress)
loadPtr(Address(regT0, JSCell::structureOffset()), regT2);
emitArrayProfilingSiteForBytecodeIndex(regT2, regT1, stubInfo->bytecodeIndex);
Jump failureCases1 = branchTest32(Zero, regT2, TrustedImm32(IsArray));
- Jump failureCases2 = branchTest32(Zero, regT2, TrustedImm32(HasArrayStorage));
+ Jump failureCases2 = branchTest32(Zero, regT2, TrustedImm32(IndexingShapeMask));
// Checks out okay! - get the length from the storage
loadPtr(Address(regT0, JSObject::butterflyOffset()), regT3);
@@ -1060,7 +1194,7 @@ void JIT::emit_op_get_scoped_var(Instruction* currentInstruction)
{
int skip = currentInstruction[3].u.operand;
- emitGetFromCallFrameHeaderPtr(RegisterFile::ScopeChain, regT0);
+ emitGetFromCallFrameHeaderPtr(JSStack::ScopeChain, regT0);
bool checkTopLevel = m_codeBlock->codeType() == FunctionCode && m_codeBlock->needsFullScopeChain();
ASSERT(skip || !checkTopLevel);
if (checkTopLevel && skip--) {
@@ -1085,7 +1219,7 @@ void JIT::emit_op_put_scoped_var(Instruction* currentInstruction)
emitGetVirtualRegister(currentInstruction[3].u.operand, regT0);
- emitGetFromCallFrameHeaderPtr(RegisterFile::ScopeChain, regT1);
+ emitGetFromCallFrameHeaderPtr(JSStack::ScopeChain, regT1);
bool checkTopLevel = m_codeBlock->codeType() == FunctionCode && m_codeBlock->needsFullScopeChain();
ASSERT(skip || !checkTopLevel);
if (checkTopLevel && skip--) {
@@ -1274,6 +1408,377 @@ bool JIT::isDirectPutById(StructureStubInfo* stubInfo)
}
}
+void JIT::privateCompileGetByVal(ByValInfo* byValInfo, ReturnAddressPtr returnAddress, JITArrayMode arrayMode)
+{
+ Instruction* currentInstruction = m_codeBlock->instructions().begin() + byValInfo->bytecodeIndex;
+
+ PatchableJump badType;
+ JumpList slowCases;
+
+ switch (arrayMode) {
+ case JITContiguous:
+ slowCases = emitContiguousGetByVal(currentInstruction, badType);
+ break;
+ case JITArrayStorage:
+ slowCases = emitArrayStorageGetByVal(currentInstruction, badType);
+ break;
+ case JITInt8Array:
+ slowCases = emitIntTypedArrayGetByVal(currentInstruction, badType, m_globalData->int8ArrayDescriptor(), 1, SignedTypedArray);
+ break;
+ case JITInt16Array:
+ slowCases = emitIntTypedArrayGetByVal(currentInstruction, badType, m_globalData->int16ArrayDescriptor(), 2, SignedTypedArray);
+ break;
+ case JITInt32Array:
+ slowCases = emitIntTypedArrayGetByVal(currentInstruction, badType, m_globalData->int32ArrayDescriptor(), 4, SignedTypedArray);
+ break;
+ case JITUint8Array:
+ case JITUint8ClampedArray:
+ slowCases = emitIntTypedArrayGetByVal(currentInstruction, badType, m_globalData->uint8ArrayDescriptor(), 1, UnsignedTypedArray);
+ break;
+ case JITUint16Array:
+ slowCases = emitIntTypedArrayGetByVal(currentInstruction, badType, m_globalData->uint16ArrayDescriptor(), 2, UnsignedTypedArray);
+ break;
+ case JITUint32Array:
+ slowCases = emitIntTypedArrayGetByVal(currentInstruction, badType, m_globalData->uint32ArrayDescriptor(), 4, UnsignedTypedArray);
+ break;
+ case JITFloat32Array:
+ slowCases = emitFloatTypedArrayGetByVal(currentInstruction, badType, m_globalData->float32ArrayDescriptor(), 4);
+ break;
+ case JITFloat64Array:
+ slowCases = emitFloatTypedArrayGetByVal(currentInstruction, badType, m_globalData->float64ArrayDescriptor(), 8);
+ break;
+ default:
+ CRASH();
+ }
+
+ Jump done = jump();
+
+ LinkBuffer patchBuffer(*m_globalData, this, m_codeBlock);
+
+ patchBuffer.link(badType, CodeLocationLabel(returnAddress.value()).labelAtOffset(byValInfo->returnAddressToSlowPath));
+ patchBuffer.link(slowCases, CodeLocationLabel(returnAddress.value()).labelAtOffset(byValInfo->returnAddressToSlowPath));
+
+ patchBuffer.link(done, byValInfo->badTypeJump.labelAtOffset(byValInfo->badTypeJumpToDone));
+
+ byValInfo->stubRoutine = FINALIZE_CODE_FOR_STUB(
+ patchBuffer,
+ ("Baseline get_by_val stub for CodeBlock %p, return point %p", m_codeBlock, returnAddress.value()));
+
+ RepatchBuffer repatchBuffer(m_codeBlock);
+ repatchBuffer.relink(byValInfo->badTypeJump, CodeLocationLabel(byValInfo->stubRoutine->code().code()));
+ repatchBuffer.relinkCallerToFunction(returnAddress, FunctionPtr(cti_op_get_by_val_generic));
+}
+
+void JIT::privateCompilePutByVal(ByValInfo* byValInfo, ReturnAddressPtr returnAddress, JITArrayMode arrayMode)
+{
+ Instruction* currentInstruction = m_codeBlock->instructions().begin() + byValInfo->bytecodeIndex;
+
+ PatchableJump badType;
+ JumpList slowCases;
+
+ switch (arrayMode) {
+ case JITContiguous:
+ slowCases = emitContiguousPutByVal(currentInstruction, badType);
+ break;
+ case JITArrayStorage:
+ slowCases = emitArrayStoragePutByVal(currentInstruction, badType);
+ break;
+ case JITInt8Array:
+ slowCases = emitIntTypedArrayPutByVal(currentInstruction, badType, m_globalData->int8ArrayDescriptor(), 1, SignedTypedArray, TruncateRounding);
+ break;
+ case JITInt16Array:
+ slowCases = emitIntTypedArrayPutByVal(currentInstruction, badType, m_globalData->int16ArrayDescriptor(), 2, SignedTypedArray, TruncateRounding);
+ break;
+ case JITInt32Array:
+ slowCases = emitIntTypedArrayPutByVal(currentInstruction, badType, m_globalData->int32ArrayDescriptor(), 4, SignedTypedArray, TruncateRounding);
+ break;
+ case JITUint8Array:
+ slowCases = emitIntTypedArrayPutByVal(currentInstruction, badType, m_globalData->uint8ArrayDescriptor(), 1, UnsignedTypedArray, TruncateRounding);
+ break;
+ case JITUint8ClampedArray:
+ slowCases = emitIntTypedArrayPutByVal(currentInstruction, badType, m_globalData->uint8ClampedArrayDescriptor(), 1, UnsignedTypedArray, ClampRounding);
+ break;
+ case JITUint16Array:
+ slowCases = emitIntTypedArrayPutByVal(currentInstruction, badType, m_globalData->uint16ArrayDescriptor(), 2, UnsignedTypedArray, TruncateRounding);
+ break;
+ case JITUint32Array:
+ slowCases = emitIntTypedArrayPutByVal(currentInstruction, badType, m_globalData->uint32ArrayDescriptor(), 4, UnsignedTypedArray, TruncateRounding);
+ break;
+ case JITFloat32Array:
+ slowCases = emitFloatTypedArrayPutByVal(currentInstruction, badType, m_globalData->float32ArrayDescriptor(), 4);
+ break;
+ case JITFloat64Array:
+ slowCases = emitFloatTypedArrayPutByVal(currentInstruction, badType, m_globalData->float64ArrayDescriptor(), 8);
+ break;
+ default:
+ CRASH();
+ break;
+ }
+
+ Jump done = jump();
+
+ LinkBuffer patchBuffer(*m_globalData, this, m_codeBlock);
+
+ patchBuffer.link(badType, CodeLocationLabel(MacroAssemblerCodePtr::createFromExecutableAddress(returnAddress.value())).labelAtOffset(byValInfo->returnAddressToSlowPath));
+ patchBuffer.link(slowCases, CodeLocationLabel(MacroAssemblerCodePtr::createFromExecutableAddress(returnAddress.value())).labelAtOffset(byValInfo->returnAddressToSlowPath));
+
+ patchBuffer.link(done, byValInfo->badTypeJump.labelAtOffset(byValInfo->badTypeJumpToDone));
+
+ byValInfo->stubRoutine = FINALIZE_CODE_FOR_STUB(
+ patchBuffer,
+ ("Baseline put_by_val stub for CodeBlock %p, return point %p", m_codeBlock, returnAddress.value()));
+
+ RepatchBuffer repatchBuffer(m_codeBlock);
+ repatchBuffer.relink(byValInfo->badTypeJump, CodeLocationLabel(byValInfo->stubRoutine->code().code()));
+ repatchBuffer.relinkCallerToFunction(returnAddress, FunctionPtr(cti_op_put_by_val_generic));
+}
+
+JIT::JumpList JIT::emitIntTypedArrayGetByVal(Instruction*, PatchableJump& badType, const TypedArrayDescriptor& descriptor, size_t elementSize, TypedArraySignedness signedness)
+{
+ // The best way to test the array type is to use the classInfo. We need to do so without
+ // clobbering the register that holds the indexing type, base, and property.
+
+#if USE(JSVALUE64)
+ RegisterID base = regT0;
+ RegisterID property = regT1;
+ RegisterID resultPayload = regT0;
+ RegisterID scratch = regT3;
+#else
+ RegisterID base = regT0;
+ RegisterID property = regT2;
+ RegisterID resultPayload = regT0;
+ RegisterID resultTag = regT1;
+ RegisterID scratch = regT3;
+#endif
+
+ JumpList slowCases;
+
+ loadPtr(Address(base, JSCell::structureOffset()), scratch);
+ badType = patchableBranchPtr(NotEqual, Address(scratch, Structure::classInfoOffset()), TrustedImmPtr(descriptor.m_classInfo));
+ slowCases.append(branch32(AboveOrEqual, property, Address(base, descriptor.m_lengthOffset)));
+ loadPtr(Address(base, descriptor.m_storageOffset), base);
+
+ switch (elementSize) {
+ case 1:
+ if (signedness == SignedTypedArray)
+ load8Signed(BaseIndex(base, property, TimesOne), resultPayload);
+ else
+ load8(BaseIndex(base, property, TimesOne), resultPayload);
+ break;
+ case 2:
+ if (signedness == SignedTypedArray)
+ load16Signed(BaseIndex(base, property, TimesTwo), resultPayload);
+ else
+ load16(BaseIndex(base, property, TimesTwo), resultPayload);
+ break;
+ case 4:
+ load32(BaseIndex(base, property, TimesFour), resultPayload);
+ break;
+ default:
+ CRASH();
+ }
+
+ Jump done;
+ if (elementSize == 4 && signedness == UnsignedTypedArray) {
+ Jump canBeInt = branch32(GreaterThanOrEqual, resultPayload, TrustedImm32(0));
+
+ convertInt32ToDouble(resultPayload, fpRegT0);
+ addDouble(AbsoluteAddress(&twoToThe32), fpRegT0);
+#if USE(JSVALUE64)
+ moveDoubleToPtr(fpRegT0, resultPayload);
+ subPtr(tagTypeNumberRegister, resultPayload);
+#else
+ moveDoubleToInts(fpRegT0, resultPayload, resultTag);
+#endif
+
+ done = jump();
+ canBeInt.link(this);
+ }
+
+#if USE(JSVALUE64)
+ orPtr(tagTypeNumberRegister, resultPayload);
+#else
+ move(TrustedImm32(JSValue::Int32Tag), resultTag);
+#endif
+ if (done.isSet())
+ done.link(this);
+ return slowCases;
+}
+
+JIT::JumpList JIT::emitFloatTypedArrayGetByVal(Instruction*, PatchableJump& badType, const TypedArrayDescriptor& descriptor, size_t elementSize)
+{
+#if USE(JSVALUE64)
+ RegisterID base = regT0;
+ RegisterID property = regT1;
+ RegisterID resultPayload = regT0;
+ RegisterID scratch = regT3;
+#else
+ RegisterID base = regT0;
+ RegisterID property = regT2;
+ RegisterID resultPayload = regT0;
+ RegisterID resultTag = regT1;
+ RegisterID scratch = regT3;
+#endif
+
+ JumpList slowCases;
+
+ loadPtr(Address(base, JSCell::structureOffset()), scratch);
+ badType = patchableBranchPtr(NotEqual, Address(scratch, Structure::classInfoOffset()), TrustedImmPtr(descriptor.m_classInfo));
+ slowCases.append(branch32(AboveOrEqual, property, Address(base, descriptor.m_lengthOffset)));
+ loadPtr(Address(base, descriptor.m_storageOffset), base);
+
+ switch (elementSize) {
+ case 4:
+ loadFloat(BaseIndex(base, property, TimesFour), fpRegT0);
+ convertFloatToDouble(fpRegT0, fpRegT0);
+ break;
+ case 8: {
+ loadDouble(BaseIndex(base, property, TimesEight), fpRegT0);
+ Jump notNaN = branchDouble(DoubleEqual, fpRegT0, fpRegT0);
+ static const double NaN = std::numeric_limits<double>::quiet_NaN();
+ loadDouble(&NaN, fpRegT0);
+ notNaN.link(this);
+ break;
+ }
+ default:
+ CRASH();
+ }
+
+#if USE(JSVALUE64)
+ moveDoubleToPtr(fpRegT0, resultPayload);
+ subPtr(tagTypeNumberRegister, resultPayload);
+#else
+ moveDoubleToInts(fpRegT0, resultPayload, resultTag);
+#endif
+ return slowCases;
+}
+
+JIT::JumpList JIT::emitIntTypedArrayPutByVal(Instruction* currentInstruction, PatchableJump& badType, const TypedArrayDescriptor& descriptor, size_t elementSize, TypedArraySignedness signedness, TypedArrayRounding rounding)
+{
+ unsigned value = currentInstruction[3].u.operand;
+
+#if USE(JSVALUE64)
+ RegisterID base = regT0;
+ RegisterID property = regT1;
+ RegisterID earlyScratch = regT3;
+ RegisterID lateScratch = regT2;
+#else
+ RegisterID base = regT0;
+ RegisterID property = regT2;
+ RegisterID earlyScratch = regT3;
+ RegisterID lateScratch = regT1;
+#endif
+
+ JumpList slowCases;
+
+ loadPtr(Address(base, JSCell::structureOffset()), earlyScratch);
+ badType = patchableBranchPtr(NotEqual, Address(earlyScratch, Structure::classInfoOffset()), TrustedImmPtr(descriptor.m_classInfo));
+ slowCases.append(branch32(AboveOrEqual, property, Address(base, descriptor.m_lengthOffset)));
+
+#if USE(JSVALUE64)
+ emitGetVirtualRegister(value, earlyScratch);
+ slowCases.append(emitJumpIfNotImmediateInteger(earlyScratch));
+#else
+ emitLoad(value, lateScratch, earlyScratch);
+ slowCases.append(branch32(NotEqual, lateScratch, TrustedImm32(JSValue::Int32Tag)));
+#endif
+
+ // We would be loading this into base as in get_by_val, except that the slow
+ // path expects the base to be unclobbered.
+ loadPtr(Address(base, descriptor.m_storageOffset), lateScratch);
+
+ if (rounding == ClampRounding) {
+ ASSERT(elementSize == 1);
+ ASSERT_UNUSED(signedness, signedness = UnsignedTypedArray);
+ Jump inBounds = branch32(BelowOrEqual, earlyScratch, TrustedImm32(0xff));
+ Jump tooBig = branch32(GreaterThan, earlyScratch, TrustedImm32(0xff));
+ xor32(earlyScratch, earlyScratch);
+ Jump clamped = jump();
+ tooBig.link(this);
+ move(TrustedImm32(0xff), earlyScratch);
+ clamped.link(this);
+ inBounds.link(this);
+ }
+
+ switch (elementSize) {
+ case 1:
+ store8(earlyScratch, BaseIndex(lateScratch, property, TimesOne));
+ break;
+ case 2:
+ store16(earlyScratch, BaseIndex(lateScratch, property, TimesTwo));
+ break;
+ case 4:
+ store32(earlyScratch, BaseIndex(lateScratch, property, TimesFour));
+ break;
+ default:
+ CRASH();
+ }
+
+ return slowCases;
+}
+
+JIT::JumpList JIT::emitFloatTypedArrayPutByVal(Instruction* currentInstruction, PatchableJump& badType, const TypedArrayDescriptor& descriptor, size_t elementSize)
+{
+ unsigned value = currentInstruction[3].u.operand;
+
+#if USE(JSVALUE64)
+ RegisterID base = regT0;
+ RegisterID property = regT1;
+ RegisterID earlyScratch = regT3;
+ RegisterID lateScratch = regT2;
+#else
+ RegisterID base = regT0;
+ RegisterID property = regT2;
+ RegisterID earlyScratch = regT3;
+ RegisterID lateScratch = regT1;
+#endif
+
+ JumpList slowCases;
+
+ loadPtr(Address(base, JSCell::structureOffset()), earlyScratch);
+ badType = patchableBranchPtr(NotEqual, Address(earlyScratch, Structure::classInfoOffset()), TrustedImmPtr(descriptor.m_classInfo));
+ slowCases.append(branch32(AboveOrEqual, property, Address(base, descriptor.m_lengthOffset)));
+
+#if USE(JSVALUE64)
+ emitGetVirtualRegister(value, earlyScratch);
+ Jump doubleCase = emitJumpIfNotImmediateInteger(earlyScratch);
+ convertInt32ToDouble(earlyScratch, fpRegT0);
+ Jump ready = jump();
+ doubleCase.link(this);
+ slowCases.append(emitJumpIfNotImmediateNumber(earlyScratch));
+ addPtr(tagTypeNumberRegister, earlyScratch);
+ movePtrToDouble(earlyScratch, fpRegT0);
+ ready.link(this);
+#else
+ emitLoad(value, lateScratch, earlyScratch);
+ Jump doubleCase = branch32(NotEqual, lateScratch, TrustedImm32(JSValue::Int32Tag));
+ convertInt32ToDouble(earlyScratch, fpRegT0);
+ Jump ready = jump();
+ doubleCase.link(this);
+ slowCases.append(branch32(Above, lateScratch, TrustedImm32(JSValue::LowestTag)));
+ moveIntsToDouble(earlyScratch, lateScratch, fpRegT0, fpRegT1);
+ ready.link(this);
+#endif
+
+ // We would be loading this into base as in get_by_val, except that the slow
+ // path expects the base to be unclobbered.
+ loadPtr(Address(base, descriptor.m_storageOffset), lateScratch);
+
+ switch (elementSize) {
+ case 4:
+ convertDoubleToFloat(fpRegT0, fpRegT0);
+ storeFloat(fpRegT0, BaseIndex(lateScratch, property, TimesFour));
+ break;
+ case 8:
+ storeDouble(fpRegT0, BaseIndex(lateScratch, property, TimesEight));
+ break;
+ default:
+ CRASH();
+ }
+
+ return slowCases;
+}
+
} // namespace JSC
#endif // ENABLE(JIT)
diff --git a/Source/JavaScriptCore/jit/JITPropertyAccess32_64.cpp b/Source/JavaScriptCore/jit/JITPropertyAccess32_64.cpp
index ed561a28b..e7c4a479b 100644
--- a/Source/JavaScriptCore/jit/JITPropertyAccess32_64.cpp
+++ b/Source/JavaScriptCore/jit/JITPropertyAccess32_64.cpp
@@ -204,27 +204,82 @@ void JIT::emit_op_get_by_val(Instruction* currentInstruction)
unsigned dst = currentInstruction[1].u.operand;
unsigned base = currentInstruction[2].u.operand;
unsigned property = currentInstruction[3].u.operand;
+ ArrayProfile* profile = currentInstruction[4].u.arrayProfile;
emitLoad2(base, regT1, regT0, property, regT3, regT2);
addSlowCase(branch32(NotEqual, regT3, TrustedImm32(JSValue::Int32Tag)));
emitJumpSlowCaseIfNotJSCell(base, regT1);
loadPtr(Address(regT0, JSCell::structureOffset()), regT1);
- emitArrayProfilingSite(regT1, regT3, currentInstruction[4].u.arrayProfile);
- addSlowCase(branchTest32(Zero, regT1, TrustedImm32(HasArrayStorage)));
-
- loadPtr(Address(regT0, JSObject::butterflyOffset()), regT3);
- addSlowCase(branch32(AboveOrEqual, regT2, Address(regT3, ArrayStorage::vectorLengthOffset())));
+ emitArrayProfilingSite(regT1, regT3, profile);
+ and32(TrustedImm32(IndexingShapeMask), regT1);
+
+ PatchableJump badType;
+ JumpList slowCases;
+
+ JITArrayMode mode = chooseArrayMode(profile);
+ switch (mode) {
+ case JITContiguous:
+ slowCases = emitContiguousGetByVal(currentInstruction, badType);
+ break;
+ case JITArrayStorage:
+ slowCases = emitArrayStorageGetByVal(currentInstruction, badType);
+ break;
+ default:
+ CRASH();
+ }
- load32(BaseIndex(regT3, regT2, TimesEight, OBJECT_OFFSETOF(ArrayStorage, m_vector[0]) + OBJECT_OFFSETOF(JSValue, u.asBits.tag)), regT1); // tag
- load32(BaseIndex(regT3, regT2, TimesEight, OBJECT_OFFSETOF(ArrayStorage, m_vector[0]) + OBJECT_OFFSETOF(JSValue, u.asBits.payload)), regT0); // payload
- addSlowCase(branch32(Equal, regT1, TrustedImm32(JSValue::EmptyValueTag)));
+ addSlowCase(badType);
+ addSlowCase(slowCases);
+ Label done = label();
+
+#if !ASSERT_DISABLED
+ Jump resultOK = branch32(NotEqual, regT1, TrustedImm32(JSValue::EmptyValueTag));
+ breakpoint();
+ resultOK.link(this);
+#endif
+
emitValueProfilingSite();
emitStore(dst, regT1, regT0);
map(m_bytecodeOffset + OPCODE_LENGTH(op_get_by_val), dst, regT1, regT0);
+
+ m_byValCompilationInfo.append(ByValCompilationInfo(m_bytecodeOffset, badType, mode, done));
+}
+
+JIT::JumpList JIT::emitContiguousGetByVal(Instruction*, PatchableJump& badType)
+{
+ JumpList slowCases;
+
+ badType = patchableBranch32(NotEqual, regT1, TrustedImm32(ContiguousShape));
+
+ loadPtr(Address(regT0, JSObject::butterflyOffset()), regT3);
+ slowCases.append(branch32(AboveOrEqual, regT2, Address(regT3, Butterfly::offsetOfPublicLength())));
+
+ load32(BaseIndex(regT3, regT2, TimesEight, OBJECT_OFFSETOF(JSValue, u.asBits.tag)), regT1); // tag
+ load32(BaseIndex(regT3, regT2, TimesEight, OBJECT_OFFSETOF(JSValue, u.asBits.payload)), regT0); // payload
+ slowCases.append(branch32(Equal, regT1, TrustedImm32(JSValue::EmptyValueTag)));
+
+ return slowCases;
}
+JIT::JumpList JIT::emitArrayStorageGetByVal(Instruction*, PatchableJump& badType)
+{
+ JumpList slowCases;
+
+ add32(TrustedImm32(-ArrayStorageShape), regT1, regT3);
+ badType = patchableBranch32(Above, regT3, TrustedImm32(SlowPutArrayStorageShape - ArrayStorageShape));
+
+ loadPtr(Address(regT0, JSObject::butterflyOffset()), regT3);
+ slowCases.append(branch32(AboveOrEqual, regT2, Address(regT3, ArrayStorage::vectorLengthOffset())));
+
+ load32(BaseIndex(regT3, regT2, TimesEight, OBJECT_OFFSETOF(ArrayStorage, m_vector[0]) + OBJECT_OFFSETOF(JSValue, u.asBits.tag)), regT1); // tag
+ load32(BaseIndex(regT3, regT2, TimesEight, OBJECT_OFFSETOF(ArrayStorage, m_vector[0]) + OBJECT_OFFSETOF(JSValue, u.asBits.payload)), regT0); // payload
+ slowCases.append(branch32(Equal, regT1, TrustedImm32(JSValue::EmptyValueTag)));
+
+ return slowCases;
+}
+
void JIT::emitSlow_op_get_by_val(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
{
unsigned dst = currentInstruction[1].u.operand;
@@ -248,10 +303,16 @@ void JIT::emitSlow_op_get_by_val(Instruction* currentInstruction, Vector<SlowCas
linkSlowCase(iter); // vector length check
linkSlowCase(iter); // empty value
+ Label slowPath = label();
+
JITStubCall stubCall(this, cti_op_get_by_val);
stubCall.addArgument(base);
stubCall.addArgument(property);
- stubCall.call(dst);
+ Call call = stubCall.call(dst);
+
+ m_byValCompilationInfo[m_byValInstructionIndex].slowPathTarget = slowPath;
+ m_byValCompilationInfo[m_byValInstructionIndex].returnAddress = call;
+ m_byValInstructionIndex++;
emitValueProfilingSite();
}
@@ -260,20 +321,86 @@ void JIT::emit_op_put_by_val(Instruction* currentInstruction)
{
unsigned base = currentInstruction[1].u.operand;
unsigned property = currentInstruction[2].u.operand;
- unsigned value = currentInstruction[3].u.operand;
+ ArrayProfile* profile = currentInstruction[4].u.arrayProfile;
emitLoad2(base, regT1, regT0, property, regT3, regT2);
addSlowCase(branch32(NotEqual, regT3, TrustedImm32(JSValue::Int32Tag)));
emitJumpSlowCaseIfNotJSCell(base, regT1);
loadPtr(Address(regT0, JSCell::structureOffset()), regT1);
- emitArrayProfilingSite(regT1, regT3, currentInstruction[4].u.arrayProfile);
- addSlowCase(branchTest32(Zero, regT1, TrustedImm32(HasArrayStorage)));
- loadPtr(Address(regT0, JSObject::butterflyOffset()), regT3);
- addSlowCase(branch32(AboveOrEqual, regT2, Address(regT3, ArrayStorage::vectorLengthOffset())));
+ emitArrayProfilingSite(regT1, regT3, profile);
+ and32(TrustedImm32(IndexingShapeMask), regT1);
+
+ PatchableJump badType;
+ JumpList slowCases;
+
+ JITArrayMode mode = chooseArrayMode(profile);
+ switch (mode) {
+ case JITContiguous:
+ slowCases = emitContiguousPutByVal(currentInstruction, badType);
+ break;
+ case JITArrayStorage:
+ slowCases = emitArrayStoragePutByVal(currentInstruction, badType);
+ break;
+ default:
+ CRASH();
+ break;
+ }
+
+ addSlowCase(badType);
+ addSlowCase(slowCases);
+
+ Label done = label();
+
+ m_byValCompilationInfo.append(ByValCompilationInfo(m_bytecodeOffset, badType, mode, done));
+}
+JIT::JumpList JIT::emitContiguousPutByVal(Instruction* currentInstruction, PatchableJump& badType)
+{
+ unsigned value = currentInstruction[3].u.operand;
+ ArrayProfile* profile = currentInstruction[4].u.arrayProfile;
+
+ JumpList slowCases;
+
+ badType = patchableBranch32(NotEqual, regT1, TrustedImm32(ContiguousShape));
+
+ loadPtr(Address(regT0, JSObject::butterflyOffset()), regT3);
+ Jump outOfBounds = branch32(AboveOrEqual, regT2, Address(regT3, Butterfly::offsetOfPublicLength()));
+
+ Label storeResult = label();
+ emitLoad(value, regT1, regT0);
+ store32(regT0, BaseIndex(regT3, regT2, TimesEight, OBJECT_OFFSETOF(JSValue, u.asBits.payload)));
+ store32(regT1, BaseIndex(regT3, regT2, TimesEight, OBJECT_OFFSETOF(JSValue, u.asBits.tag)));
+ Jump done = jump();
+
+ outOfBounds.link(this);
+ slowCases.append(branch32(AboveOrEqual, regT2, Address(regT3, Butterfly::offsetOfVectorLength())));
+
+ emitArrayProfileStoreToHoleSpecialCase(profile);
+
+ add32(TrustedImm32(1), regT2, regT1);
+ store32(regT1, Address(regT3, Butterfly::offsetOfPublicLength()));
+ jump().linkTo(storeResult, this);
+
+ done.link(this);
+
emitWriteBarrier(regT0, regT1, regT1, regT3, UnconditionalWriteBarrier, WriteBarrierForPropertyAccess);
+ return slowCases;
+}
+
+JIT::JumpList JIT::emitArrayStoragePutByVal(Instruction* currentInstruction, PatchableJump& badType)
+{
+ unsigned value = currentInstruction[3].u.operand;
+ ArrayProfile* profile = currentInstruction[4].u.arrayProfile;
+
+ JumpList slowCases;
+
+ badType = patchableBranch32(NotEqual, regT1, TrustedImm32(ArrayStorageShape));
+
+ loadPtr(Address(regT0, JSObject::butterflyOffset()), regT3);
+ slowCases.append(branch32(AboveOrEqual, regT2, Address(regT3, ArrayStorage::vectorLengthOffset())));
+
Jump empty = branch32(Equal, BaseIndex(regT3, regT2, TimesEight, OBJECT_OFFSETOF(ArrayStorage, m_vector[0]) + OBJECT_OFFSETOF(JSValue, u.asBits.tag)), TrustedImm32(JSValue::EmptyValueTag));
Label storeResult(this);
@@ -283,7 +410,7 @@ void JIT::emit_op_put_by_val(Instruction* currentInstruction)
Jump end = jump();
empty.link(this);
- emitArrayProfileStoreToHoleSpecialCase(currentInstruction[4].u.arrayProfile);
+ emitArrayProfileStoreToHoleSpecialCase(profile);
add32(TrustedImm32(1), Address(regT3, OBJECT_OFFSETOF(ArrayStorage, m_numValuesInVector)));
branch32(Below, regT2, Address(regT3, ArrayStorage::lengthOffset())).linkTo(storeResult, this);
@@ -292,6 +419,10 @@ void JIT::emit_op_put_by_val(Instruction* currentInstruction)
jump().linkTo(storeResult, this);
end.link(this);
+
+ emitWriteBarrier(regT0, regT1, regT1, regT3, UnconditionalWriteBarrier, WriteBarrierForPropertyAccess);
+
+ return slowCases;
}
void JIT::emitSlow_op_put_by_val(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
@@ -303,13 +434,19 @@ void JIT::emitSlow_op_put_by_val(Instruction* currentInstruction, Vector<SlowCas
linkSlowCase(iter); // property int32 check
linkSlowCaseIfNotJSCell(iter, base); // base cell check
linkSlowCase(iter); // base not array check
- linkSlowCase(iter); // in vector check
+ linkSlowCase(iter); // out of bounds
+
+ Label slowPath = label();
JITStubCall stubPutByValCall(this, cti_op_put_by_val);
stubPutByValCall.addArgument(base);
stubPutByValCall.addArgument(property);
stubPutByValCall.addArgument(value);
- stubPutByValCall.call();
+ Call call = stubPutByValCall.call();
+
+ m_byValCompilationInfo[m_byValInstructionIndex].slowPathTarget = slowPath;
+ m_byValCompilationInfo[m_byValInstructionIndex].returnAddress = call;
+ m_byValInstructionIndex++;
}
void JIT::emit_op_get_by_id(Instruction* currentInstruction)
@@ -616,7 +753,7 @@ void JIT::privateCompilePatchGetArrayLength(ReturnAddressPtr returnAddress)
loadPtr(Address(regT0, JSCell::structureOffset()), regT2);
emitArrayProfilingSiteForBytecodeIndex(regT2, regT3, stubInfo->bytecodeIndex);
Jump failureCases1 = branchTest32(Zero, regT2, TrustedImm32(IsArray));
- Jump failureCases2 = branchTest32(Zero, regT2, TrustedImm32(HasArrayStorage));
+ Jump failureCases2 = branchTest32(Zero, regT2, TrustedImm32(IndexingShapeMask));
// Checks out okay! - get the length from the storage
loadPtr(Address(regT0, JSArray::butterflyOffset()), regT2);
@@ -1025,24 +1162,24 @@ void JIT::compileGetDirectOffset(RegisterID base, RegisterID resultTag, Register
ASSERT(sizeof(JSValue) == 8);
if (finalObjectMode == MayBeFinal) {
- Jump isInline = branch32(LessThan, offset, TrustedImm32(inlineStorageCapacity));
+ Jump isInline = branch32(LessThan, offset, TrustedImm32(firstOutOfLineOffset));
loadPtr(Address(base, JSObject::butterflyOffset()), base);
neg32(offset);
Jump done = jump();
isInline.link(this);
- addPtr(TrustedImmPtr(JSObject::offsetOfInlineStorage() - (inlineStorageCapacity - 2) * sizeof(EncodedJSValue)), base);
+ addPtr(TrustedImmPtr(JSObject::offsetOfInlineStorage() - (firstOutOfLineOffset - 2) * sizeof(EncodedJSValue)), base);
done.link(this);
} else {
#if !ASSERT_DISABLED
- Jump isOutOfLine = branch32(GreaterThanOrEqual, offset, TrustedImm32(inlineStorageCapacity));
+ Jump isOutOfLine = branch32(GreaterThanOrEqual, offset, TrustedImm32(firstOutOfLineOffset));
breakpoint();
isOutOfLine.link(this);
#endif
loadPtr(Address(base, JSObject::butterflyOffset()), base);
neg32(offset);
}
- load32(BaseIndex(base, offset, TimesEight, OBJECT_OFFSETOF(JSValue, u.asBits.payload) + (inlineStorageCapacity - 2) * sizeof(EncodedJSValue)), resultPayload);
- load32(BaseIndex(base, offset, TimesEight, OBJECT_OFFSETOF(JSValue, u.asBits.tag) + (inlineStorageCapacity - 2) * sizeof(EncodedJSValue)), resultTag);
+ load32(BaseIndex(base, offset, TimesEight, OBJECT_OFFSETOF(JSValue, u.asBits.payload) + (firstOutOfLineOffset - 2) * sizeof(EncodedJSValue)), resultPayload);
+ load32(BaseIndex(base, offset, TimesEight, OBJECT_OFFSETOF(JSValue, u.asBits.tag) + (firstOutOfLineOffset - 2) * sizeof(EncodedJSValue)), resultTag);
}
void JIT::emit_op_get_by_pname(Instruction* currentInstruction)
@@ -1067,7 +1204,10 @@ void JIT::emit_op_get_by_pname(Instruction* currentInstruction)
load32(addressFor(i), regT3);
sub32(TrustedImm32(1), regT3);
addSlowCase(branch32(AboveOrEqual, regT3, Address(regT1, OBJECT_OFFSETOF(JSPropertyNameIterator, m_numCacheableSlots))));
- add32(Address(regT1, OBJECT_OFFSETOF(JSPropertyNameIterator, m_offsetBase)), regT3);
+ Jump inlineProperty = branch32(Below, regT3, Address(regT1, OBJECT_OFFSETOF(JSPropertyNameIterator, m_cachedStructureInlineCapacity)));
+ add32(TrustedImm32(firstOutOfLineOffset), regT3);
+ sub32(Address(regT1, OBJECT_OFFSETOF(JSPropertyNameIterator, m_cachedStructureInlineCapacity)), regT3);
+ inlineProperty.link(this);
compileGetDirectOffset(regT2, regT1, regT0, regT3);
emitStore(dst, regT1, regT0);
@@ -1098,7 +1238,7 @@ void JIT::emit_op_get_scoped_var(Instruction* currentInstruction)
int index = currentInstruction[2].u.operand;
int skip = currentInstruction[3].u.operand;
- emitGetFromCallFrameHeaderPtr(RegisterFile::ScopeChain, regT2);
+ emitGetFromCallFrameHeaderPtr(JSStack::ScopeChain, regT2);
bool checkTopLevel = m_codeBlock->codeType() == FunctionCode && m_codeBlock->needsFullScopeChain();
ASSERT(skip || !checkTopLevel);
if (checkTopLevel && skip--) {
@@ -1127,7 +1267,7 @@ void JIT::emit_op_put_scoped_var(Instruction* currentInstruction)
emitLoad(value, regT1, regT0);
- emitGetFromCallFrameHeaderPtr(RegisterFile::ScopeChain, regT2);
+ emitGetFromCallFrameHeaderPtr(JSStack::ScopeChain, regT2);
bool checkTopLevel = m_codeBlock->codeType() == FunctionCode && m_codeBlock->needsFullScopeChain();
ASSERT(skip || !checkTopLevel);
if (checkTopLevel && skip--) {
diff --git a/Source/JavaScriptCore/jit/JITStubs.cpp b/Source/JavaScriptCore/jit/JITStubs.cpp
index da507838a..1a2c654bc 100644
--- a/Source/JavaScriptCore/jit/JITStubs.cpp
+++ b/Source/JavaScriptCore/jit/JITStubs.cpp
@@ -63,6 +63,7 @@
#include "RegExpObject.h"
#include "RegExpPrototype.h"
#include "Register.h"
+#include "RepatchBuffer.h"
#include "SamplingTool.h"
#include "Strong.h"
#include <wtf/StdLibExtras.h>
@@ -224,7 +225,7 @@ COMPILE_ASSERT(offsetof(struct JITStackFrame, code) == 0x50, JITStackFrame_code_
extern "C" {
- __declspec(naked) EncodedJSValue ctiTrampoline(void* code, RegisterFile*, CallFrame*, void* /*unused1*/, void* /*unused2*/, JSGlobalData*)
+ __declspec(naked) EncodedJSValue ctiTrampoline(void* code, JSStack*, CallFrame*, void* /*unused1*/, void* /*unused2*/, JSGlobalData*)
{
__asm {
push ebp;
@@ -285,7 +286,7 @@ extern "C" {
#define STACK_LENGTH 104
#elif CPU(SH4)
#define SYMBOL_STRING(name) #name
-/* code (r4), RegisterFile* (r5), CallFrame* (r6), void* unused1 (r7), void* unused2(sp), JSGlobalData (sp)*/
+/* code (r4), JSStack* (r5), CallFrame* (r6), void* unused1 (r7), void* unused2(sp), JSGlobalData (sp)*/
asm volatile (
".text\n"
@@ -458,7 +459,7 @@ SYMBOL_STRING(ctiTrampoline) ":" "\n"
"move $16,$6 # set callFrameRegister" "\n"
"li $17,512 # set timeoutCheckRegister" "\n"
"move $25,$4 # move executableAddress to t9" "\n"
- "sw $5," STRINGIZE_VALUE_OF(REGISTER_FILE_OFFSET) "($29) # store registerFile to current stack" "\n"
+ "sw $5," STRINGIZE_VALUE_OF(REGISTER_FILE_OFFSET) "($29) # store JSStack to current stack" "\n"
"lw $9," STRINGIZE_VALUE_OF(STACK_LENGTH + 20) "($29) # load globalData from previous stack" "\n"
"jalr $25" "\n"
"sw $9," STRINGIZE_VALUE_OF(GLOBAL_DATA_OFFSET) "($29) # store globalData to current stack" "\n"
@@ -659,7 +660,7 @@ SYMBOL_STRING(ctiOpThrowNotCaught) ":" "\n"
#elif COMPILER(RVCT) && CPU(ARM_THUMB2)
-__asm EncodedJSValue ctiTrampoline(void*, RegisterFile*, CallFrame*, void* /*unused1*/, void* /*unused2*/, JSGlobalData*)
+__asm EncodedJSValue ctiTrampoline(void*, JSStack*, CallFrame*, void* /*unused1*/, void* /*unused2*/, JSGlobalData*)
{
PRESERVE8
sub sp, sp, # FIRST_STACK_ARGUMENT
@@ -727,7 +728,7 @@ __asm void ctiOpThrowNotCaught()
#elif COMPILER(RVCT) && CPU(ARM_TRADITIONAL)
-__asm EncodedJSValue ctiTrampoline(void*, RegisterFile*, CallFrame*, void* /*unused1*/, void* /*unused2*/, JSGlobalData*)
+__asm EncodedJSValue ctiTrampoline(void*, JSStack*, CallFrame*, void* /*unused1*/, void* /*unused2*/, JSGlobalData*)
{
ARM
stmdb sp!, {r1-r3}
@@ -796,7 +797,7 @@ JITThunks::JITThunks(JSGlobalData* globalData)
ASSERT(OBJECT_OFFSETOF(struct JITStackFrame, preservedR10) == PRESERVED_R10_OFFSET);
ASSERT(OBJECT_OFFSETOF(struct JITStackFrame, preservedR11) == PRESERVED_R11_OFFSET);
- ASSERT(OBJECT_OFFSETOF(struct JITStackFrame, registerFile) == REGISTER_FILE_OFFSET);
+ ASSERT(OBJECT_OFFSETOF(struct JITStackFrame, stack) == REGISTER_FILE_OFFSET);
// The fifth argument is the first item already on the stack.
ASSERT(OBJECT_OFFSETOF(struct JITStackFrame, unused1) == FIRST_STACK_ARGUMENT);
@@ -815,7 +816,7 @@ JITThunks::JITThunks(JSGlobalData* globalData)
ASSERT(OBJECT_OFFSETOF(struct JITStackFrame, preservedS2) == PRESERVED_S2_OFFSET);
ASSERT(OBJECT_OFFSETOF(struct JITStackFrame, preservedReturnAddress) == PRESERVED_RETURN_ADDRESS_OFFSET);
ASSERT(OBJECT_OFFSETOF(struct JITStackFrame, thunkReturnAddress) == THUNK_RETURN_ADDRESS_OFFSET);
- ASSERT(OBJECT_OFFSETOF(struct JITStackFrame, registerFile) == REGISTER_FILE_OFFSET);
+ ASSERT(OBJECT_OFFSETOF(struct JITStackFrame, stack) == REGISTER_FILE_OFFSET);
ASSERT(OBJECT_OFFSETOF(struct JITStackFrame, globalData) == GLOBAL_DATA_OFFSET);
#endif
@@ -1049,7 +1050,7 @@ static NEVER_INLINE void returnToThrowTrampoline(JSGlobalData* globalData, Retur
} while (0)
// Helper function for JIT stubs that may throw an exception in the middle of
-// processing a function call. This function rolls back the register file to
+// processing a function call. This function rolls back the stack to
// our caller, so exception processing can proceed from a valid state.
template<typename T> static T throwExceptionFromOpCall(JITStackFrame& jitStackFrame, CallFrame* newCallFrame, ReturnAddressPtr& returnAddressSlot)
{
@@ -1359,12 +1360,12 @@ DEFINE_STUB_FUNCTION(int, timeout_check)
return timeoutChecker.ticksUntilNextCheck();
}
-DEFINE_STUB_FUNCTION(void*, register_file_check)
+DEFINE_STUB_FUNCTION(void*, stack_check)
{
STUB_INIT_STACK_FRAME(stackFrame);
CallFrame* callFrame = stackFrame.callFrame;
- if (UNLIKELY(!stackFrame.registerFile->grow(&callFrame->registers()[callFrame->codeBlock()->m_numCalleeRegisters])))
+ if (UNLIKELY(!stackFrame.stack->grow(&callFrame->registers()[callFrame->codeBlock()->m_numCalleeRegisters])))
return throwExceptionFromOpCall<void*>(stackFrame, callFrame, STUB_RETURN_ADDRESS, createStackOverflowError(callFrame->callerFrame()));
return callFrame;
@@ -2191,7 +2192,7 @@ DEFINE_STUB_FUNCTION(void*, op_call_arityCheck)
CallFrame* callFrame = stackFrame.callFrame;
- CallFrame* newCallFrame = CommonSlowPaths::arityCheckFor(callFrame, stackFrame.registerFile, CodeForCall);
+ CallFrame* newCallFrame = CommonSlowPaths::arityCheckFor(callFrame, stackFrame.stack, CodeForCall);
if (!newCallFrame)
return throwExceptionFromOpCall<void*>(stackFrame, callFrame, STUB_RETURN_ADDRESS, createStackOverflowError(callFrame->callerFrame()));
@@ -2204,7 +2205,7 @@ DEFINE_STUB_FUNCTION(void*, op_construct_arityCheck)
CallFrame* callFrame = stackFrame.callFrame;
- CallFrame* newCallFrame = CommonSlowPaths::arityCheckFor(callFrame, stackFrame.registerFile, CodeForConstruct);
+ CallFrame* newCallFrame = CommonSlowPaths::arityCheckFor(callFrame, stackFrame.stack, CodeForConstruct);
if (!newCallFrame)
return throwExceptionFromOpCall<void*>(stackFrame, callFrame, STUB_RETURN_ADDRESS, createStackOverflowError(callFrame->callerFrame()));
@@ -2414,6 +2415,30 @@ DEFINE_STUB_FUNCTION(EncodedJSValue, op_construct_NotJSConstruct)
return returnValue;
}
+static JSValue getByVal(
+ CallFrame* callFrame, JSValue baseValue, JSValue subscript, ReturnAddressPtr returnAddress)
+{
+ if (LIKELY(baseValue.isCell() && subscript.isString())) {
+ if (JSValue result = baseValue.asCell()->fastGetOwnProperty(callFrame, asString(subscript)->value(callFrame)))
+ return result;
+ }
+
+ if (subscript.isUInt32()) {
+ uint32_t i = subscript.asUInt32();
+ if (isJSString(baseValue) && asString(baseValue)->canGetIndex(i)) {
+ ctiPatchCallByReturnAddress(callFrame->codeBlock(), returnAddress, FunctionPtr(cti_op_get_by_val_string));
+ return asString(baseValue)->getIndex(callFrame, i);
+ }
+ return baseValue.get(callFrame, i);
+ }
+
+ if (isName(subscript))
+ return baseValue.get(callFrame, jsCast<NameInstance*>(subscript.asCell())->privateName());
+
+ Identifier property(callFrame, subscript.toString(callFrame)->value(callFrame));
+ return baseValue.get(callFrame, property);
+}
+
DEFINE_STUB_FUNCTION(EncodedJSValue, op_get_by_val)
{
STUB_INIT_STACK_FRAME(stackFrame);
@@ -2423,35 +2448,56 @@ DEFINE_STUB_FUNCTION(EncodedJSValue, op_get_by_val)
JSValue baseValue = stackFrame.args[0].jsValue();
JSValue subscript = stackFrame.args[1].jsValue();
- if (LIKELY(baseValue.isCell() && subscript.isString())) {
- if (JSValue result = baseValue.asCell()->fastGetOwnProperty(callFrame, asString(subscript)->value(callFrame))) {
- CHECK_FOR_EXCEPTION();
- return JSValue::encode(result);
- }
- }
+ if (baseValue.isObject() && subscript.isInt32()) {
+ // See if it's worth optimizing this at all.
+ JSObject* object = asObject(baseValue);
+ bool didOptimize = false;
- if (subscript.isUInt32()) {
- uint32_t i = subscript.asUInt32();
- if (isJSString(baseValue) && asString(baseValue)->canGetIndex(i)) {
- ctiPatchCallByReturnAddress(callFrame->codeBlock(), STUB_RETURN_ADDRESS, FunctionPtr(cti_op_get_by_val_string));
- JSValue result = asString(baseValue)->getIndex(callFrame, i);
- CHECK_FOR_EXCEPTION();
- return JSValue::encode(result);
+ unsigned bytecodeOffset = callFrame->bytecodeOffsetForNonDFGCode();
+ ASSERT(bytecodeOffset);
+ ByValInfo& byValInfo = callFrame->codeBlock()->getByValInfo(bytecodeOffset - 1);
+ ASSERT(!byValInfo.stubRoutine);
+
+ if (hasOptimizableIndexing(object->structure())) {
+ // Attempt to optimize.
+ JITArrayMode arrayMode = jitArrayModeForStructure(object->structure());
+ if (arrayMode != byValInfo.arrayMode) {
+ JIT::compileGetByVal(&callFrame->globalData(), callFrame->codeBlock(), &byValInfo, STUB_RETURN_ADDRESS, arrayMode);
+ didOptimize = true;
+ }
+ }
+
+ if (!didOptimize) {
+ // If we take slow path more than 10 times without patching then make sure we
+ // never make that mistake again. Or, if we failed to patch and we have some object
+ // that intercepts indexed get, then don't even wait until 10 times. For cases
+ // where we see non-index-intercepting objects, this gives 10 iterations worth of
+ // opportunity for us to observe that the get_by_val may be polymorphic.
+ if (++byValInfo.slowPathCount >= 10
+ || object->structure()->typeInfo().interceptsGetOwnPropertySlotByIndexEvenWhenLengthIsNotZero()) {
+ // Don't ever try to optimize.
+ RepatchBuffer repatchBuffer(callFrame->codeBlock());
+ repatchBuffer.relinkCallerToFunction(STUB_RETURN_ADDRESS, FunctionPtr(cti_op_get_by_val_generic));
+ }
}
- JSValue result = baseValue.get(callFrame, i);
- CHECK_FOR_EXCEPTION();
- return JSValue::encode(result);
}
+
+ JSValue result = getByVal(callFrame, baseValue, subscript, STUB_RETURN_ADDRESS);
+ CHECK_FOR_EXCEPTION();
+ return JSValue::encode(result);
+}
+
+DEFINE_STUB_FUNCTION(EncodedJSValue, op_get_by_val_generic)
+{
+ STUB_INIT_STACK_FRAME(stackFrame);
- if (isName(subscript)) {
- JSValue result = baseValue.get(callFrame, jsCast<NameInstance*>(subscript.asCell())->privateName());
- CHECK_FOR_EXCEPTION();
- return JSValue::encode(result);
- }
+ CallFrame* callFrame = stackFrame.callFrame;
- Identifier property(callFrame, subscript.toString(callFrame)->value(callFrame));
- JSValue result = baseValue.get(callFrame, property);
- CHECK_FOR_EXCEPTION_AT_END();
+ JSValue baseValue = stackFrame.args[0].jsValue();
+ JSValue subscript = stackFrame.args[1].jsValue();
+
+ JSValue result = getByVal(callFrame, baseValue, subscript, STUB_RETURN_ADDRESS);
+ CHECK_FOR_EXCEPTION();
return JSValue::encode(result);
}
@@ -2502,23 +2548,14 @@ DEFINE_STUB_FUNCTION(EncodedJSValue, op_sub)
return JSValue::encode(result);
}
-DEFINE_STUB_FUNCTION(void, op_put_by_val)
+static void putByVal(CallFrame* callFrame, JSValue baseValue, JSValue subscript, JSValue value)
{
- STUB_INIT_STACK_FRAME(stackFrame);
-
- CallFrame* callFrame = stackFrame.callFrame;
- JSGlobalData* globalData = stackFrame.globalData;
-
- JSValue baseValue = stackFrame.args[0].jsValue();
- JSValue subscript = stackFrame.args[1].jsValue();
- JSValue value = stackFrame.args[2].jsValue();
-
if (LIKELY(subscript.isUInt32())) {
uint32_t i = subscript.asUInt32();
if (baseValue.isObject()) {
JSObject* object = asObject(baseValue);
if (object->canSetIndexQuickly(i))
- object->setIndexQuickly(*globalData, i, value);
+ object->setIndexQuickly(callFrame->globalData(), i, value);
else
object->methodTable()->putByIndex(object, callFrame, i, value, callFrame->codeBlock()->isStrictMode());
} else
@@ -2528,11 +2565,73 @@ DEFINE_STUB_FUNCTION(void, op_put_by_val)
baseValue.put(callFrame, jsCast<NameInstance*>(subscript.asCell())->privateName(), value, slot);
} else {
Identifier property(callFrame, subscript.toString(callFrame)->value(callFrame));
- if (!stackFrame.globalData->exception) { // Don't put to an object if toString threw an exception.
+ if (!callFrame->globalData().exception) { // Don't put to an object if toString threw an exception.
PutPropertySlot slot(callFrame->codeBlock()->isStrictMode());
baseValue.put(callFrame, property, value, slot);
}
}
+}
+
+DEFINE_STUB_FUNCTION(void, op_put_by_val)
+{
+ STUB_INIT_STACK_FRAME(stackFrame);
+
+ CallFrame* callFrame = stackFrame.callFrame;
+
+ JSValue baseValue = stackFrame.args[0].jsValue();
+ JSValue subscript = stackFrame.args[1].jsValue();
+ JSValue value = stackFrame.args[2].jsValue();
+
+ if (baseValue.isObject() && subscript.isInt32()) {
+ // See if it's worth optimizing at all.
+ JSObject* object = asObject(baseValue);
+ bool didOptimize = false;
+
+ unsigned bytecodeOffset = callFrame->bytecodeOffsetForNonDFGCode();
+ ASSERT(bytecodeOffset);
+ ByValInfo& byValInfo = callFrame->codeBlock()->getByValInfo(bytecodeOffset - 1);
+ ASSERT(!byValInfo.stubRoutine);
+
+ if (hasOptimizableIndexing(object->structure())) {
+ // Attempt to optimize.
+ JITArrayMode arrayMode = jitArrayModeForStructure(object->structure());
+ if (arrayMode != byValInfo.arrayMode) {
+ JIT::compilePutByVal(&callFrame->globalData(), callFrame->codeBlock(), &byValInfo, STUB_RETURN_ADDRESS, arrayMode);
+ didOptimize = true;
+ }
+ }
+
+ if (!didOptimize) {
+ // If we take slow path more than 10 times without patching then make sure we
+ // never make that mistake again. Or, if we failed to patch and we have some object
+ // that intercepts indexed get, then don't even wait until 10 times. For cases
+ // where we see non-index-intercepting objects, this gives 10 iterations worth of
+ // opportunity for us to observe that the get_by_val may be polymorphic.
+ if (++byValInfo.slowPathCount >= 10
+ || object->structure()->typeInfo().interceptsGetOwnPropertySlotByIndexEvenWhenLengthIsNotZero()) {
+ // Don't ever try to optimize.
+ RepatchBuffer repatchBuffer(callFrame->codeBlock());
+ repatchBuffer.relinkCallerToFunction(STUB_RETURN_ADDRESS, FunctionPtr(cti_op_put_by_val_generic));
+ }
+ }
+ }
+
+ putByVal(callFrame, baseValue, subscript, value);
+
+ CHECK_FOR_EXCEPTION_AT_END();
+}
+
+DEFINE_STUB_FUNCTION(void, op_put_by_val_generic)
+{
+ STUB_INIT_STACK_FRAME(stackFrame);
+
+ CallFrame* callFrame = stackFrame.callFrame;
+
+ JSValue baseValue = stackFrame.args[0].jsValue();
+ JSValue subscript = stackFrame.args[1].jsValue();
+ JSValue value = stackFrame.args[2].jsValue();
+
+ putByVal(callFrame, baseValue, subscript, value);
CHECK_FOR_EXCEPTION_AT_END();
}
@@ -2582,12 +2681,12 @@ DEFINE_STUB_FUNCTION(void*, op_load_varargs)
STUB_INIT_STACK_FRAME(stackFrame);
CallFrame* callFrame = stackFrame.callFrame;
- RegisterFile* registerFile = stackFrame.registerFile;
+ JSStack* stack = stackFrame.stack;
JSValue thisValue = stackFrame.args[0].jsValue();
JSValue arguments = stackFrame.args[1].jsValue();
int firstFreeRegister = stackFrame.args[2].int32();
- CallFrame* newCallFrame = loadVarargs(callFrame, registerFile, thisValue, arguments, firstFreeRegister);
+ CallFrame* newCallFrame = loadVarargs(callFrame, stack, thisValue, arguments, firstFreeRegister);
if (!newCallFrame)
VM_THROW_EXCEPTION();
return newCallFrame;
@@ -3423,8 +3522,8 @@ MacroAssemblerCodeRef JITThunks::ctiStub(JSGlobalData* globalData, ThunkGenerato
{
CTIStubMap::AddResult entry = m_ctiStubMap.add(generator, MacroAssemblerCodeRef());
if (entry.isNewEntry)
- entry.iterator->second = generator(globalData);
- return entry.iterator->second;
+ entry.iterator->value = generator(globalData);
+ return entry.iterator->value;
}
NativeExecutable* JITThunks::hostFunctionStub(JSGlobalData* globalData, NativeFunction function, NativeFunction constructor)
diff --git a/Source/JavaScriptCore/jit/JITStubs.h b/Source/JavaScriptCore/jit/JITStubs.h
index a4619c816..ecf415d1f 100644
--- a/Source/JavaScriptCore/jit/JITStubs.h
+++ b/Source/JavaScriptCore/jit/JITStubs.h
@@ -52,13 +52,13 @@ namespace JSC {
class JSGlobalObject;
class JSObject;
class JSPropertyNameIterator;
+ class JSStack;
class JSValue;
class JSValueEncodedAsPointer;
class NativeExecutable;
class Profiler;
class PropertySlot;
class PutPropertySlot;
- class RegisterFile;
class RegExp;
class Structure;
@@ -101,7 +101,7 @@ namespace JSC {
void* padding[2]; // Maintain 32-byte stack alignment (possibly overkill).
void* code;
- RegisterFile* registerFile;
+ JSStack* stack;
CallFrame* callFrame;
void* unused1;
void* unused2;
@@ -137,7 +137,7 @@ namespace JSC {
void* savedEIP;
void* code;
- RegisterFile* registerFile;
+ JSStack* stack;
CallFrame* callFrame;
void* unused1;
void* unused2;
@@ -167,7 +167,7 @@ namespace JSC {
void* preservedR11;
// These arguments passed in r1..r3 (r0 contained the entry code pointed, which is not preserved)
- RegisterFile* registerFile;
+ JSStack* stack;
CallFrame* callFrame;
// These arguments passed on the stack.
@@ -196,7 +196,7 @@ namespace JSC {
void* preservedR11;
void* preservedLink;
- RegisterFile* registerFile;
+ JSStack* stack;
CallFrame* callFrame;
void* unused1;
@@ -228,7 +228,7 @@ namespace JSC {
ReturnAddressPtr thunkReturnAddress;
// These arguments passed in a1..a3 (a0 contained the entry code pointed, which is not preserved)
- RegisterFile* registerFile;
+ JSStack* stack;
CallFrame* callFrame;
void* unused1;
@@ -251,7 +251,7 @@ namespace JSC {
void* savedR14;
void* savedTimeoutReg;
- RegisterFile* registerFile;
+ JSStack* stack;
CallFrame* callFrame;
JSValue* exception;
void* unused1;
@@ -284,7 +284,7 @@ namespace JSC {
extern "C" void ctiVMThrowTrampoline();
extern "C" void ctiOpThrowNotCaught();
- extern "C" EncodedJSValue ctiTrampoline(void* code, RegisterFile*, CallFrame*, void* /*unused1*/, void* /*unused2*/, JSGlobalData*);
+ extern "C" EncodedJSValue ctiTrampoline(void* code, JSStack*, CallFrame*, void* /*unused1*/, void* /*unused2*/, JSGlobalData*);
#if ENABLE(DFG_JIT)
extern "C" void ctiTrampolineEnd();
@@ -370,6 +370,7 @@ extern "C" {
EncodedJSValue JIT_STUB cti_op_get_by_id_self_fail(STUB_ARGS_DECLARATION) WTF_INTERNAL;
EncodedJSValue JIT_STUB cti_op_get_by_id_string_fail(STUB_ARGS_DECLARATION) WTF_INTERNAL;
EncodedJSValue JIT_STUB cti_op_get_by_val(STUB_ARGS_DECLARATION) WTF_INTERNAL;
+ EncodedJSValue JIT_STUB cti_op_get_by_val_generic(STUB_ARGS_DECLARATION) WTF_INTERNAL;
EncodedJSValue JIT_STUB cti_op_get_by_val_string(STUB_ARGS_DECLARATION) WTF_INTERNAL;
EncodedJSValue JIT_STUB cti_op_in(STUB_ARGS_DECLARATION) WTF_INTERNAL;
EncodedJSValue JIT_STUB cti_op_instanceof(STUB_ARGS_DECLARATION) WTF_INTERNAL;
@@ -446,6 +447,7 @@ extern "C" {
void JIT_STUB cti_op_put_by_id_direct_generic(STUB_ARGS_DECLARATION) WTF_INTERNAL;
void JIT_STUB cti_op_put_by_index(STUB_ARGS_DECLARATION) WTF_INTERNAL;
void JIT_STUB cti_op_put_by_val(STUB_ARGS_DECLARATION) WTF_INTERNAL;
+ void JIT_STUB cti_op_put_by_val_generic(STUB_ARGS_DECLARATION) WTF_INTERNAL;
void JIT_STUB cti_op_put_getter_setter(STUB_ARGS_DECLARATION) WTF_INTERNAL;
void JIT_STUB cti_op_put_global_var_check(STUB_ARGS_DECLARATION) WTF_INTERNAL;
void JIT_STUB cti_op_tear_off_activation(STUB_ARGS_DECLARATION) WTF_INTERNAL;
@@ -462,7 +464,7 @@ extern "C" {
void* JIT_STUB cti_op_switch_imm(STUB_ARGS_DECLARATION) WTF_INTERNAL;
void* JIT_STUB cti_op_switch_string(STUB_ARGS_DECLARATION) WTF_INTERNAL;
void* JIT_STUB cti_op_throw(STUB_ARGS_DECLARATION) WTF_INTERNAL;
- void* JIT_STUB cti_register_file_check(STUB_ARGS_DECLARATION) WTF_INTERNAL;
+ void* JIT_STUB cti_stack_check(STUB_ARGS_DECLARATION) WTF_INTERNAL;
void* JIT_STUB cti_vm_lazyLinkCall(STUB_ARGS_DECLARATION) WTF_INTERNAL;
void* JIT_STUB cti_vm_lazyLinkConstruct(STUB_ARGS_DECLARATION) WTF_INTERNAL;
void* JIT_STUB cti_vm_throw(STUB_ARGS_DECLARATION) REFERENCED_FROM_ASM WTF_INTERNAL;
diff --git a/Source/JavaScriptCore/jit/JSInterfaceJIT.h b/Source/JavaScriptCore/jit/JSInterfaceJIT.h
index 52f1dd0b0..8d9a0c800 100644
--- a/Source/JavaScriptCore/jit/JSInterfaceJIT.h
+++ b/Source/JavaScriptCore/jit/JSInterfaceJIT.h
@@ -29,10 +29,10 @@
#include "BytecodeConventions.h"
#include "JITCode.h"
#include "JITStubs.h"
+#include "JSStack.h"
#include "JSString.h"
#include "JSValue.h"
#include "MacroAssembler.h"
-#include "RegisterFile.h"
#include <wtf/AlwaysInline.h>
#include <wtf/Vector.h>
diff --git a/Source/JavaScriptCore/jit/SpecializedThunkJIT.h b/Source/JavaScriptCore/jit/SpecializedThunkJIT.h
index 0fe606476..560f7c833 100644
--- a/Source/JavaScriptCore/jit/SpecializedThunkJIT.h
+++ b/Source/JavaScriptCore/jit/SpecializedThunkJIT.h
@@ -40,7 +40,7 @@ namespace JSC {
SpecializedThunkJIT(int expectedArgCount)
{
// Check that we have the expected number of arguments
- m_failures.append(branch32(NotEqual, payloadFor(RegisterFile::ArgumentCount), TrustedImm32(expectedArgCount + 1)));
+ m_failures.append(branch32(NotEqual, payloadFor(JSStack::ArgumentCount), TrustedImm32(expectedArgCount + 1)));
}
void loadDoubleArgument(int argument, FPRegisterID dst, RegisterID scratch)
@@ -83,7 +83,7 @@ namespace JSC {
{
if (src != regT0)
move(src, regT0);
- loadPtr(payloadFor(RegisterFile::CallerFrame, callFrameRegister), callFrameRegister);
+ loadPtr(payloadFor(JSStack::CallerFrame, callFrameRegister), callFrameRegister);
ret();
}
@@ -108,7 +108,7 @@ namespace JSC {
lowNonZero.link(this);
highNonZero.link(this);
#endif
- loadPtr(payloadFor(RegisterFile::CallerFrame, callFrameRegister), callFrameRegister);
+ loadPtr(payloadFor(JSStack::CallerFrame, callFrameRegister), callFrameRegister);
ret();
}
@@ -117,7 +117,7 @@ namespace JSC {
if (src != regT0)
move(src, regT0);
tagReturnAsInt32();
- loadPtr(payloadFor(RegisterFile::CallerFrame, callFrameRegister), callFrameRegister);
+ loadPtr(payloadFor(JSStack::CallerFrame, callFrameRegister), callFrameRegister);
ret();
}
@@ -126,7 +126,7 @@ namespace JSC {
if (src != regT0)
move(src, regT0);
tagReturnAsJSCell();
- loadPtr(payloadFor(RegisterFile::CallerFrame, callFrameRegister), callFrameRegister);
+ loadPtr(payloadFor(JSStack::CallerFrame, callFrameRegister), callFrameRegister);
ret();
}