diff options
author | Simon Hausmann <simon.hausmann@digia.com> | 2012-10-17 16:21:14 +0200 |
---|---|---|
committer | Simon Hausmann <simon.hausmann@digia.com> | 2012-10-17 16:21:14 +0200 |
commit | 8995b83bcbfbb68245f779b64e5517627c6cc6ea (patch) | |
tree | 17985605dab9263cc2444bd4d45f189e142cca7c /Source/JavaScriptCore/dfg | |
parent | b9c9652036d5e9f1e29c574f40bc73a35c81ace6 (diff) | |
download | qtwebkit-8995b83bcbfbb68245f779b64e5517627c6cc6ea.tar.gz |
Imported WebKit commit cf4f8fc6f19b0629f51860cb2d4b25e139d07e00 (http://svn.webkit.org/repository/webkit/trunk@131592)
New snapshot that includes the build fixes for Mac OS X 10.6 and earlier as well
as the previously cherry-picked changes
Diffstat (limited to 'Source/JavaScriptCore/dfg')
34 files changed, 2036 insertions, 900 deletions
diff --git a/Source/JavaScriptCore/dfg/DFGAbstractState.cpp b/Source/JavaScriptCore/dfg/DFGAbstractState.cpp index db0861c7d..da5682f55 100644 --- a/Source/JavaScriptCore/dfg/DFGAbstractState.cpp +++ b/Source/JavaScriptCore/dfg/DFGAbstractState.cpp @@ -859,12 +859,16 @@ bool AbstractState::execute(unsigned indexInBlock) forNode(node.child2()).filter(SpecInt32); forNode(nodeIndex).makeTop(); break; + case IN_BOUNDS_CONTIGUOUS_MODES: case IN_BOUNDS_ARRAY_STORAGE_MODES: forNode(node.child2()).filter(SpecInt32); forNode(nodeIndex).makeTop(); break; + case OUT_OF_BOUNDS_CONTIGUOUS_MODES: case OUT_OF_BOUNDS_ARRAY_STORAGE_MODES: - case ALL_EFFECTFUL_ARRAY_STORAGE_MODES: + case SLOW_PUT_ARRAY_STORAGE_MODES: + case ALL_EFFECTFUL_MODES: + forNode(node.child1()).filter(SpecCell); forNode(node.child2()).filter(SpecInt32); clobberWorld(node.codeOrigin, indexInBlock); forNode(nodeIndex).makeTop(); @@ -908,6 +912,9 @@ bool AbstractState::execute(unsigned indexInBlock) forNode(node.child2()).filter(SpecInt32); forNode(nodeIndex).set(SpecDouble); break; + default: + ASSERT_NOT_REACHED(); + break; } break; } @@ -915,6 +922,7 @@ bool AbstractState::execute(unsigned indexInBlock) case PutByVal: case PutByValAlias: { node.setCanExit(true); + Edge child1 = m_graph.varArgChild(node, 0); Edge child2 = m_graph.varArgChild(node, 1); Edge child3 = m_graph.varArgChild(node, 2); switch (modeForPut(node.arrayMode())) { @@ -924,11 +932,17 @@ bool AbstractState::execute(unsigned indexInBlock) case Array::Generic: clobberWorld(node.codeOrigin, indexInBlock); break; + case IN_BOUNDS_CONTIGUOUS_MODES: + case CONTIGUOUS_TO_TAIL_MODES: case IN_BOUNDS_ARRAY_STORAGE_MODES: forNode(child2).filter(SpecInt32); break; + case OUT_OF_BOUNDS_CONTIGUOUS_MODES: + case ARRAY_STORAGE_TO_HOLE_MODES: case OUT_OF_BOUNDS_ARRAY_STORAGE_MODES: - case ALL_EFFECTFUL_ARRAY_STORAGE_MODES: + case SLOW_PUT_ARRAY_STORAGE_MODES: + case ALL_EFFECTFUL_MODES: + forNode(child1).filter(SpecCell); forNode(child2).filter(SpecInt32); clobberWorld(node.codeOrigin, indexInBlock); break; @@ -1110,16 +1124,15 @@ bool AbstractState::execute(unsigned indexInBlock) break; case NewArrayBuffer: - // Unless we're having a bad time, this node can change its mind about what structure - // it uses. - node.setCanExit(false); - forNode(nodeIndex).set(SpecArray); + node.setCanExit(true); + forNode(nodeIndex).set(m_graph.globalObjectFor(node.codeOrigin)->arrayStructure()); + m_haveStructures = true; break; case NewArrayWithSize: node.setCanExit(true); forNode(node.child1()).filter(SpecInt32); - forNode(nodeIndex).set(m_graph.globalObjectFor(node.codeOrigin)->arrayStructure()); + forNode(nodeIndex).set(SpecArray); m_haveStructures = true; break; @@ -1367,6 +1380,7 @@ bool AbstractState::execute(unsigned indexInBlock) case Array::String: forNode(node.child1()).filter(SpecString); break; + case ALL_CONTIGUOUS_MODES: case ALL_ARRAY_STORAGE_MODES: // This doesn't filter anything meaningful right now. We may want to add // CFA tracking of array mode speculations, but we don't have that, yet. @@ -1410,9 +1424,11 @@ bool AbstractState::execute(unsigned indexInBlock) } case Arrayify: { switch (node.arrayMode()) { - case EFFECTFUL_NON_ARRAY_ARRAY_STORAGE_MODES: + case ALL_EFFECTFUL_MODES: node.setCanExit(true); forNode(node.child1()).filter(SpecCell); + if (node.child2()) + forNode(node.child2()).filter(SpecInt32); forNode(nodeIndex).clear(); clobberStructures(indexInBlock); break; diff --git a/Source/JavaScriptCore/dfg/DFGAbstractState.h b/Source/JavaScriptCore/dfg/DFGAbstractState.h index d2bc1a551..ec1a06231 100644 --- a/Source/JavaScriptCore/dfg/DFGAbstractState.h +++ b/Source/JavaScriptCore/dfg/DFGAbstractState.h @@ -185,7 +185,7 @@ public: void reset(); // Abstractly executes the given node. The new abstract state is stored into an - // abstract register file stored in *this. Loads of local variables (that span + // abstract stack stored in *this. Loads of local variables (that span // basic blocks) interrogate the basic block's notion of the state at the head. // Stores to local variables are handled in endBasicBlock(). This returns true // if execution should continue past this node. Notably, it will return true diff --git a/Source/JavaScriptCore/dfg/DFGArgumentsSimplificationPhase.cpp b/Source/JavaScriptCore/dfg/DFGArgumentsSimplificationPhase.cpp index 513357424..bb61a59e6 100644 --- a/Source/JavaScriptCore/dfg/DFGArgumentsSimplificationPhase.cpp +++ b/Source/JavaScriptCore/dfg/DFGArgumentsSimplificationPhase.cpp @@ -210,7 +210,7 @@ public: // Make sure that if it's a variable that we think is aliased to // the arguments, that we know that it might actually not be. ArgumentsAliasingData& data = - m_argumentsAliasing.find(variableAccessData)->second; + m_argumentsAliasing.find(variableAccessData)->value; data.mergeNonArgumentsAssignment(); data.mergeCallContext(node.codeOrigin.inlineCallFrame); break; @@ -228,7 +228,7 @@ public: break; } ArgumentsAliasingData& data = - m_argumentsAliasing.find(variableAccessData)->second; + m_argumentsAliasing.find(variableAccessData)->value; data.mergeArgumentsAssignment(); // This ensures that the variable's uses are in the same context as // the arguments it is aliasing. @@ -243,7 +243,7 @@ public: if (variableAccessData->isCaptured()) break; ArgumentsAliasingData& data = - m_argumentsAliasing.find(variableAccessData)->second; + m_argumentsAliasing.find(variableAccessData)->value; data.mergeCallContext(node.codeOrigin.inlineCallFrame); break; } @@ -253,7 +253,7 @@ public: if (variableAccessData->isCaptured()) break; ArgumentsAliasingData& data = - m_argumentsAliasing.find(variableAccessData)->second; + m_argumentsAliasing.find(variableAccessData)->value; data.mergeCallContext(node.codeOrigin.inlineCallFrame); // If a variable is used in a flush then by definition it escapes. @@ -266,7 +266,7 @@ public: if (variableAccessData->isCaptured()) break; ArgumentsAliasingData& data = - m_argumentsAliasing.find(variableAccessData)->second; + m_argumentsAliasing.find(variableAccessData)->value; data.mergeNonArgumentsAssignment(); data.mergeCallContext(node.codeOrigin.inlineCallFrame); break; @@ -350,7 +350,7 @@ public: } ArgumentsAliasingData& data = - m_argumentsAliasing.find(variableAccessData)->second; + m_argumentsAliasing.find(variableAccessData)->value; if (data.isValid()) continue; @@ -369,7 +369,7 @@ public: dataLog("Captured"); else { ArgumentsAliasingData& data = - m_argumentsAliasing.find(variableAccessData)->second; + m_argumentsAliasing.find(variableAccessData)->value; bool first = true; if (data.callContextIsValid()) { if (!first) @@ -441,7 +441,7 @@ public: // things. Note also that the SetLocal should become dead as soon as // we replace all uses of this variable with GetMyArgumentsLength and // GetMyArgumentByVal. - ASSERT(m_argumentsAliasing.find(variableAccessData)->second.isValid()); + ASSERT(m_argumentsAliasing.find(variableAccessData)->value.isValid()); changed |= variableAccessData->mergeIsArgumentsAlias(true); break; } @@ -685,7 +685,7 @@ private: if (variableAccessData->isCaptured()) break; - ArgumentsAliasingData& data = m_argumentsAliasing.find(variableAccessData)->second; + ArgumentsAliasingData& data = m_argumentsAliasing.find(variableAccessData)->value; data.escapes = true; break; } @@ -734,7 +734,7 @@ private: if (variableAccessData->isCaptured()) return; - ArgumentsAliasingData& data = m_argumentsAliasing.find(variableAccessData)->second; + ArgumentsAliasingData& data = m_argumentsAliasing.find(variableAccessData)->value; data.mergeCallContext(node.codeOrigin.inlineCallFrame); } @@ -756,7 +756,7 @@ private: if (variableAccessData->isCaptured()) break; ArgumentsAliasingData& data = - m_argumentsAliasing.find(variableAccessData)->second; + m_argumentsAliasing.find(variableAccessData)->value; if (!data.isValid()) break; @@ -792,7 +792,7 @@ private: && !m_createsArguments.contains(child.codeOrigin.inlineCallFrame); bool isAliasedArgumentsRegister = !variableAccessData->isCaptured() - && m_argumentsAliasing.find(variableAccessData)->second.isValid() + && m_argumentsAliasing.find(variableAccessData)->value.isValid() && !m_createsArguments.contains(child.codeOrigin.inlineCallFrame); if (!isDeadArgumentsRegister && !isAliasedArgumentsRegister) break; diff --git a/Source/JavaScriptCore/dfg/DFGArrayMode.cpp b/Source/JavaScriptCore/dfg/DFGArrayMode.cpp index 12c9640c8..3985d769c 100644 --- a/Source/JavaScriptCore/dfg/DFGArrayMode.cpp +++ b/Source/JavaScriptCore/dfg/DFGArrayMode.cpp @@ -39,8 +39,14 @@ Array::Mode fromObserved(ArrayProfile* profile, Array::Action action, bool makeS return Array::Unprofiled; case asArrayModes(NonArray): if (action == Array::Write && !profile->mayInterceptIndexedAccesses()) - return Array::BlankToArrayStorage; // FIXME: we don't know whether to go to slow put mode, or not. This is a decent guess. + return Array::ToContiguous; // FIXME: we don't know whether to go to contiguous or array storage. We're making a static guess here. In future we should use exit profiling for this. return Array::Undecided; + case asArrayModes(NonArrayWithContiguous): + return makeSafe ? Array::ContiguousOutOfBounds : (profile->mayStoreToHole() ? Array::ContiguousToTail : Array::Contiguous); + case asArrayModes(ArrayWithContiguous): + return makeSafe ? Array::ArrayWithContiguousOutOfBounds : (profile->mayStoreToHole() ? Array::ArrayWithContiguousToTail : Array::ArrayWithContiguous); + case asArrayModes(NonArrayWithContiguous) | asArrayModes(ArrayWithContiguous): + return makeSafe ? Array::PossiblyArrayWithContiguousOutOfBounds : (profile->mayStoreToHole() ? Array::PossiblyArrayWithContiguousToTail : Array::PossiblyArrayWithContiguous); case asArrayModes(NonArrayWithArrayStorage): return makeSafe ? Array::ArrayStorageOutOfBounds : (profile->mayStoreToHole() ? Array::ArrayStorageToHole : Array::ArrayStorage); case asArrayModes(NonArrayWithSlowPutArrayStorage): @@ -56,14 +62,25 @@ Array::Mode fromObserved(ArrayProfile* profile, Array::Action action, bool makeS case asArrayModes(NonArrayWithSlowPutArrayStorage) | asArrayModes(ArrayWithSlowPutArrayStorage): case asArrayModes(NonArrayWithArrayStorage) | asArrayModes(ArrayWithArrayStorage) | asArrayModes(NonArrayWithSlowPutArrayStorage) | asArrayModes(ArrayWithSlowPutArrayStorage): return Array::PossiblyArrayWithSlowPutArrayStorage; + case asArrayModes(NonArrayWithContiguous) | asArrayModes(NonArrayWithArrayStorage): + return Array::ToArrayStorage; + case asArrayModes(ArrayWithContiguous) | asArrayModes(ArrayWithArrayStorage): + return Array::ArrayToArrayStorage; + case asArrayModes(NonArrayWithContiguous) | asArrayModes(NonArrayWithArrayStorage) | asArrayModes(ArrayWithContiguous) | asArrayModes(ArrayWithArrayStorage): + return Array::PossiblyArrayToArrayStorage; + case asArrayModes(NonArray) | asArrayModes(NonArrayWithContiguous): + if (action == Array::Write && !profile->mayInterceptIndexedAccesses()) + return Array::ToContiguous; + return Array::Undecided; + case asArrayModes(NonArray) | asArrayModes(NonArrayWithContiguous) | asArrayModes(NonArrayWithArrayStorage): case asArrayModes(NonArray) | asArrayModes(NonArrayWithArrayStorage): if (action == Array::Write && !profile->mayInterceptIndexedAccesses()) - return Array::BlankToArrayStorage; + return Array::ToArrayStorage; return Array::Undecided; case asArrayModes(NonArray) | asArrayModes(NonArrayWithSlowPutArrayStorage): case asArrayModes(NonArray) | asArrayModes(NonArrayWithArrayStorage) | asArrayModes(NonArrayWithSlowPutArrayStorage): if (action == Array::Write && !profile->mayInterceptIndexedAccesses()) - return Array::BlankToSlowPutArrayStorage; + return Array::ToSlowPutArrayStorage; return Array::Undecided; default: // We know that this is possibly a kind of array for which, though there is no @@ -144,6 +161,22 @@ bool modeAlreadyChecked(AbstractValue& value, Array::Mode arrayMode) case Array::String: return isStringSpeculation(value.m_type); + case Array::Contiguous: + case Array::ContiguousToTail: + case Array::ContiguousOutOfBounds: + case Array::PossiblyArrayWithContiguous: + case Array::PossiblyArrayWithContiguousToTail: + case Array::PossiblyArrayWithContiguousOutOfBounds: + return value.m_currentKnownStructure.hasSingleton() + && hasContiguous(value.m_currentKnownStructure.singleton()->indexingType()); + + case Array::ArrayWithContiguous: + case Array::ArrayWithContiguousToTail: + case Array::ArrayWithContiguousOutOfBounds: + return value.m_currentKnownStructure.hasSingleton() + && hasContiguous(value.m_currentKnownStructure.singleton()->indexingType()) + && (value.m_currentKnownStructure.singleton()->indexingType() & IsArray); + case Array::ArrayStorage: case Array::ArrayStorageToHole: case Array::ArrayStorageOutOfBounds: @@ -151,26 +184,26 @@ bool modeAlreadyChecked(AbstractValue& value, Array::Mode arrayMode) case Array::PossiblyArrayWithArrayStorageToHole: case Array::PossiblyArrayWithArrayStorageOutOfBounds: return value.m_currentKnownStructure.hasSingleton() - && (value.m_currentKnownStructure.singleton()->indexingType() & HasArrayStorage); + && hasFastArrayStorage(value.m_currentKnownStructure.singleton()->indexingType()); case Array::SlowPutArrayStorage: case Array::PossiblyArrayWithSlowPutArrayStorage: return value.m_currentKnownStructure.hasSingleton() - && (value.m_currentKnownStructure.singleton()->indexingType() & (HasArrayStorage | HasSlowPutArrayStorage)); + && hasArrayStorage(value.m_currentKnownStructure.singleton()->indexingType()); case Array::ArrayWithArrayStorage: case Array::ArrayWithArrayStorageToHole: case Array::ArrayWithArrayStorageOutOfBounds: return value.m_currentKnownStructure.hasSingleton() - && (value.m_currentKnownStructure.singleton()->indexingType() & HasArrayStorage) + && hasFastArrayStorage(value.m_currentKnownStructure.singleton()->indexingType()) && (value.m_currentKnownStructure.singleton()->indexingType() & IsArray); case Array::ArrayWithSlowPutArrayStorage: return value.m_currentKnownStructure.hasSingleton() - && (value.m_currentKnownStructure.singleton()->indexingType() & (HasArrayStorage | HasSlowPutArrayStorage)) + && hasArrayStorage(value.m_currentKnownStructure.singleton()->indexingType()) && (value.m_currentKnownStructure.singleton()->indexingType() & IsArray); - case ALL_EFFECTFUL_ARRAY_STORAGE_MODES: + case ALL_EFFECTFUL_MODES: return false; case Array::Arguments: @@ -225,6 +258,24 @@ const char* modeToString(Array::Mode mode) return "ForceExit"; case Array::String: return "String"; + case Array::Contiguous: + return "Contiguous"; + case Array::ContiguousToTail: + return "ContiguousToTail"; + case Array::ContiguousOutOfBounds: + return "ContiguousOutOfBounds"; + case Array::ArrayWithContiguous: + return "ArrayWithContiguous"; + case Array::ArrayWithContiguousToTail: + return "ArrayWithContiguousToTail"; + case Array::ArrayWithContiguousOutOfBounds: + return "ArrayWithContiguousOutOfBounds"; + case Array::PossiblyArrayWithContiguous: + return "PossiblyArrayWithContiguous"; + case Array::PossiblyArrayWithContiguousToTail: + return "PossiblyArrayWithContiguousToTail"; + case Array::PossiblyArrayWithContiguousOutOfBounds: + return "PossiblyArrayWithContiguousOutOfBounds"; case Array::ArrayStorage: return "ArrayStorage"; case Array::ArrayStorageToHole: @@ -249,10 +300,16 @@ const char* modeToString(Array::Mode mode) return "PossiblyArrayWithSlowPutArrayStorage"; case Array::PossiblyArrayWithArrayStorageOutOfBounds: return "PossiblyArrayWithArrayStorageOutOfBounds"; - case Array::BlankToArrayStorage: - return "BlankToArrayStorage"; - case Array::BlankToSlowPutArrayStorage: - return "BlankToSlowPutArrayStorage"; + case Array::ToContiguous: + return "ToContiguous"; + case Array::ToArrayStorage: + return "ToArrayStorage"; + case Array::ToSlowPutArrayStorage: + return "ToSlowPutArrayStorage"; + case Array::ArrayToArrayStorage: + return "ArrayToArrayStorage"; + case Array::PossiblyArrayToArrayStorage: + return "PossiblyArrayToArrayStorage"; case Array::Arguments: return "Arguments"; case Array::Int8Array: diff --git a/Source/JavaScriptCore/dfg/DFGArrayMode.h b/Source/JavaScriptCore/dfg/DFGArrayMode.h index d4be9c0eb..a666bb83f 100644 --- a/Source/JavaScriptCore/dfg/DFGArrayMode.h +++ b/Source/JavaScriptCore/dfg/DFGArrayMode.h @@ -54,6 +54,15 @@ enum Mode { String, // Modes of conventional indexed storage where the check is non side-effecting. + Contiguous, + ContiguousToTail, + ContiguousOutOfBounds, + ArrayWithContiguous, + ArrayWithContiguousToTail, + ArrayWithContiguousOutOfBounds, + PossiblyArrayWithContiguous, + PossiblyArrayWithContiguousToTail, + PossiblyArrayWithContiguousOutOfBounds, ArrayStorage, ArrayStorageToHole, SlowPutArrayStorage, @@ -68,8 +77,11 @@ enum Mode { PossiblyArrayWithArrayStorageOutOfBounds, // Modes of conventional indexed storage where the check is side-effecting. - BlankToArrayStorage, - BlankToSlowPutArrayStorage, + ToContiguous, + ToArrayStorage, + ArrayToArrayStorage, + PossiblyArrayToArrayStorage, + ToSlowPutArrayStorage, Arguments, Int8Array, @@ -89,6 +101,32 @@ enum Mode { // have the word "ArrayStorage" in them. // First: helpers for non-side-effecting checks. +#define NON_ARRAY_CONTIGUOUS_MODES \ + Array::Contiguous: \ + case Array::ContiguousToTail: \ + case Array::ContiguousOutOfBounds: \ + case Array::PossiblyArrayWithContiguous: \ + case Array::PossiblyArrayWithContiguousToTail: \ + case Array::PossiblyArrayWithContiguousOutOfBounds +#define ARRAY_WITH_CONTIGUOUS_MODES \ + Array::ArrayWithContiguous: \ + case Array::ArrayWithContiguousToTail: \ + case Array::ArrayWithContiguousOutOfBounds +#define ALL_CONTIGUOUS_MODES \ + NON_ARRAY_CONTIGUOUS_MODES: \ + case ARRAY_WITH_CONTIGUOUS_MODES +#define IN_BOUNDS_CONTIGUOUS_MODES \ + Array::Contiguous: \ + case Array::ArrayWithContiguous: \ + case Array::PossiblyArrayWithContiguous +#define CONTIGUOUS_TO_TAIL_MODES \ + Array::ContiguousToTail: \ + case Array::ArrayWithContiguousToTail: \ + case Array::PossiblyArrayWithContiguousToTail +#define OUT_OF_BOUNDS_CONTIGUOUS_MODES \ + Array::ContiguousOutOfBounds: \ + case Array::ArrayWithContiguousOutOfBounds: \ + case Array::PossiblyArrayWithContiguousOutOfBounds #define NON_ARRAY_ARRAY_STORAGE_MODES \ Array::ArrayStorage: \ case Array::ArrayStorageToHole: \ @@ -106,33 +144,43 @@ enum Mode { #define ALL_ARRAY_STORAGE_MODES \ NON_ARRAY_ARRAY_STORAGE_MODES: \ case ARRAY_WITH_ARRAY_STORAGE_MODES +#define IN_BOUNDS_ARRAY_STORAGE_MODES \ + Array::ArrayStorage: \ + case Array::ArrayWithArrayStorage: \ + case Array::PossiblyArrayWithArrayStorage #define ARRAY_STORAGE_TO_HOLE_MODES \ Array::ArrayStorageToHole: \ case Array::ArrayWithArrayStorageToHole: \ case Array::PossiblyArrayWithArrayStorageToHole -#define IN_BOUNDS_ARRAY_STORAGE_MODES \ - ARRAY_STORAGE_TO_HOLE_MODES: \ - case Array::ArrayStorage: \ - case Array::ArrayWithArrayStorage: \ - case Array::PossiblyArrayWithArrayStorage #define SLOW_PUT_ARRAY_STORAGE_MODES \ Array::SlowPutArrayStorage: \ case Array::ArrayWithSlowPutArrayStorage: \ case Array::PossiblyArrayWithSlowPutArrayStorage #define OUT_OF_BOUNDS_ARRAY_STORAGE_MODES \ - SLOW_PUT_ARRAY_STORAGE_MODES: \ - case Array::ArrayStorageOutOfBounds: \ + Array::ArrayStorageOutOfBounds: \ case Array::ArrayWithArrayStorageOutOfBounds: \ case Array::PossiblyArrayWithArrayStorageOutOfBounds // Next: helpers for side-effecting checks. -#define EFFECTFUL_NON_ARRAY_ARRAY_STORAGE_MODES \ - Array::BlankToArrayStorage: \ - case Array::BlankToSlowPutArrayStorage -#define ALL_EFFECTFUL_ARRAY_STORAGE_MODES \ - EFFECTFUL_NON_ARRAY_ARRAY_STORAGE_MODES -#define SLOW_PUT_EFFECTFUL_ARRAY_STORAGE_MODES \ - Array::BlankToSlowPutArrayStorage +#define NON_ARRAY_EFFECTFUL_MODES \ + Array::ToContiguous: \ + case Array::ToArrayStorage: \ + case Array::ToSlowPutArrayStorage: \ + case Array::PossiblyArrayToArrayStorage +#define ARRAY_EFFECTFUL_MODES \ + Array::ArrayToArrayStorage +#define ALL_EFFECTFUL_CONTIGUOUS_MODES \ + Array::ToContiguous +#define ALL_EFFECTFUL_ARRAY_STORAGE_MODES \ + Array::ToArrayStorage: \ + case Array::ToSlowPutArrayStorage: \ + case Array::ArrayToArrayStorage: \ + case Array::PossiblyArrayToArrayStorage +#define SLOW_PUT_EFFECTFUL_ARRAY_STORAGE_MODES \ + Array::ToSlowPutArrayStorage +#define ALL_EFFECTFUL_MODES \ + ALL_EFFECTFUL_CONTIGUOUS_MODES: \ + case ALL_EFFECTFUL_ARRAY_STORAGE_MODES Array::Mode fromObserved(ArrayProfile*, Array::Action, bool makeSafe); @@ -145,8 +193,9 @@ const char* modeToString(Array::Mode); inline bool modeUsesButterfly(Array::Mode arrayMode) { switch (arrayMode) { + case ALL_CONTIGUOUS_MODES: case ALL_ARRAY_STORAGE_MODES: - case ALL_EFFECTFUL_ARRAY_STORAGE_MODES: + case ALL_EFFECTFUL_MODES: return true; default: return false; @@ -156,7 +205,9 @@ inline bool modeUsesButterfly(Array::Mode arrayMode) inline bool modeIsJSArray(Array::Mode arrayMode) { switch (arrayMode) { + case ARRAY_WITH_CONTIGUOUS_MODES: case ARRAY_WITH_ARRAY_STORAGE_MODES: + case ARRAY_EFFECTFUL_MODES: return true; default: return false; @@ -166,6 +217,9 @@ inline bool modeIsJSArray(Array::Mode arrayMode) inline bool isInBoundsAccess(Array::Mode arrayMode) { switch (arrayMode) { + case IN_BOUNDS_CONTIGUOUS_MODES: + case CONTIGUOUS_TO_TAIL_MODES: + case ARRAY_STORAGE_TO_HOLE_MODES: case IN_BOUNDS_ARRAY_STORAGE_MODES: return true; default: @@ -184,11 +238,24 @@ inline bool isSlowPutAccess(Array::Mode arrayMode) } } +inline bool mayStoreToTail(Array::Mode arrayMode) +{ + switch (arrayMode) { + case CONTIGUOUS_TO_TAIL_MODES: + case OUT_OF_BOUNDS_CONTIGUOUS_MODES: + case ALL_EFFECTFUL_CONTIGUOUS_MODES: + return true; + default: + return false; + } +} + inline bool mayStoreToHole(Array::Mode arrayMode) { switch (arrayMode) { case ARRAY_STORAGE_TO_HOLE_MODES: case OUT_OF_BOUNDS_ARRAY_STORAGE_MODES: + case SLOW_PUT_ARRAY_STORAGE_MODES: case ALL_EFFECTFUL_ARRAY_STORAGE_MODES: return true; default: @@ -249,7 +316,9 @@ inline bool modeSupportsLength(Array::Mode mode) case Array::Unprofiled: case Array::ForceExit: case Array::Generic: + case NON_ARRAY_CONTIGUOUS_MODES: case NON_ARRAY_ARRAY_STORAGE_MODES: + case NON_ARRAY_EFFECTFUL_MODES: return false; default: return true; @@ -259,7 +328,7 @@ inline bool modeSupportsLength(Array::Mode mode) inline bool benefitsFromStructureCheck(Array::Mode mode) { switch (mode) { - case ALL_EFFECTFUL_ARRAY_STORAGE_MODES: + case ALL_EFFECTFUL_MODES: case Array::Undecided: case Array::Unprofiled: case Array::ForceExit: @@ -273,7 +342,7 @@ inline bool benefitsFromStructureCheck(Array::Mode mode) inline bool isEffectful(Array::Mode mode) { switch (mode) { - case ALL_EFFECTFUL_ARRAY_STORAGE_MODES: + case ALL_EFFECTFUL_MODES: return true; default: return false; diff --git a/Source/JavaScriptCore/dfg/DFGAssemblyHelpers.cpp b/Source/JavaScriptCore/dfg/DFGAssemblyHelpers.cpp index 7799ee505..a19b723d8 100644 --- a/Source/JavaScriptCore/dfg/DFGAssemblyHelpers.cpp +++ b/Source/JavaScriptCore/dfg/DFGAssemblyHelpers.cpp @@ -30,8 +30,6 @@ namespace JSC { namespace DFG { -const double AssemblyHelpers::twoToThe32 = (double)0x100000000ull; - ExecutableBase* AssemblyHelpers::executableFor(const CodeOrigin& codeOrigin) { if (!codeOrigin.inlineCallFrame) @@ -49,9 +47,9 @@ Vector<BytecodeAndMachineOffset>& AssemblyHelpers::decodedCodeMapFor(CodeBlock* HashMap<CodeBlock*, Vector<BytecodeAndMachineOffset> >::AddResult result = m_decodedCodeMaps.add(codeBlock, Vector<BytecodeAndMachineOffset>()); if (result.isNewEntry) - codeBlock->jitCodeMap()->decode(result.iterator->second); + codeBlock->jitCodeMap()->decode(result.iterator->value); - return result.iterator->second; + return result.iterator->value; } #if ENABLE(SAMPLING_FLAGS) diff --git a/Source/JavaScriptCore/dfg/DFGAssemblyHelpers.h b/Source/JavaScriptCore/dfg/DFGAssemblyHelpers.h index a2003c5bf..5d338fa57 100644 --- a/Source/JavaScriptCore/dfg/DFGAssemblyHelpers.h +++ b/Source/JavaScriptCore/dfg/DFGAssemblyHelpers.h @@ -93,16 +93,16 @@ public: } #endif - void emitGetFromCallFrameHeaderPtr(RegisterFile::CallFrameHeaderEntry entry, GPRReg to) + void emitGetFromCallFrameHeaderPtr(JSStack::CallFrameHeaderEntry entry, GPRReg to) { loadPtr(Address(GPRInfo::callFrameRegister, entry * sizeof(Register)), to); } - void emitPutToCallFrameHeader(GPRReg from, RegisterFile::CallFrameHeaderEntry entry) + void emitPutToCallFrameHeader(GPRReg from, JSStack::CallFrameHeaderEntry entry) { storePtr(from, Address(GPRInfo::callFrameRegister, entry * sizeof(Register))); } - void emitPutImmediateToCallFrameHeader(void* value, RegisterFile::CallFrameHeaderEntry entry) + void emitPutImmediateToCallFrameHeader(void* value, JSStack::CallFrameHeaderEntry entry) { storePtr(TrustedImmPtr(value), Address(GPRInfo::callFrameRegister, entry * sizeof(Register))); } @@ -243,33 +243,14 @@ public: } #endif -#if USE(JSVALUE32_64) && CPU(X86) +#if USE(JSVALUE32_64) void boxDouble(FPRReg fpr, GPRReg tagGPR, GPRReg payloadGPR) { - movePackedToInt32(fpr, payloadGPR); - rshiftPacked(TrustedImm32(32), fpr); - movePackedToInt32(fpr, tagGPR); + moveDoubleToInts(fpr, payloadGPR, tagGPR); } void unboxDouble(GPRReg tagGPR, GPRReg payloadGPR, FPRReg fpr, FPRReg scratchFPR) { - jitAssertIsJSDouble(tagGPR); - moveInt32ToPacked(payloadGPR, fpr); - moveInt32ToPacked(tagGPR, scratchFPR); - lshiftPacked(TrustedImm32(32), scratchFPR); - orPacked(scratchFPR, fpr); - } -#endif - -#if USE(JSVALUE32_64) && CPU(ARM) - void boxDouble(FPRReg fpr, GPRReg tagGPR, GPRReg payloadGPR) - { - m_assembler.vmov(payloadGPR, tagGPR, fpr); - } - void unboxDouble(GPRReg tagGPR, GPRReg payloadGPR, FPRReg fpr, FPRReg scratchFPR) - { - jitAssertIsJSDouble(tagGPR); - UNUSED_PARAM(scratchFPR); - m_assembler.vmov(fpr, payloadGPR, tagGPR); + moveIntsToDouble(payloadGPR, tagGPR, fpr, scratchFPR); } #endif @@ -364,8 +345,6 @@ public: Vector<BytecodeAndMachineOffset>& decodedCodeMapFor(CodeBlock*); - static const double twoToThe32; - protected: JSGlobalData* m_globalData; CodeBlock* m_codeBlock; diff --git a/Source/JavaScriptCore/dfg/DFGByteCodeCache.h b/Source/JavaScriptCore/dfg/DFGByteCodeCache.h index ab88e99e5..6b9056e54 100644 --- a/Source/JavaScriptCore/dfg/DFGByteCodeCache.h +++ b/Source/JavaScriptCore/dfg/DFGByteCodeCache.h @@ -132,10 +132,10 @@ public: Map::iterator begin = m_map.begin(); Map::iterator end = m_map.end(); for (Map::iterator iter = begin; iter != end; ++iter) { - if (!iter->second.codeBlock) + if (!iter->value.codeBlock) continue; - if (iter->second.owned) { - delete iter->second.codeBlock; + if (iter->value.owned) { + delete iter->value.codeBlock; continue; } } @@ -145,7 +145,7 @@ public: { Map::iterator iter = m_map.find(key); if (iter != m_map.end()) - return iter->second.codeBlock; + return iter->value.codeBlock; ByteCodeCacheValue value; diff --git a/Source/JavaScriptCore/dfg/DFGByteCodeParser.cpp b/Source/JavaScriptCore/dfg/DFGByteCodeParser.cpp index 6d5f68200..dc668d93e 100644 --- a/Source/JavaScriptCore/dfg/DFGByteCodeParser.cpp +++ b/Source/JavaScriptCore/dfg/DFGByteCodeParser.cpp @@ -43,6 +43,76 @@ namespace JSC { namespace DFG { +class ConstantBufferKey { +public: + ConstantBufferKey() + : m_codeBlock(0) + , m_index(0) + { + } + + ConstantBufferKey(WTF::HashTableDeletedValueType) + : m_codeBlock(0) + , m_index(1) + { + } + + ConstantBufferKey(CodeBlock* codeBlock, unsigned index) + : m_codeBlock(codeBlock) + , m_index(index) + { + } + + bool operator==(const ConstantBufferKey& other) const + { + return m_codeBlock == other.m_codeBlock + && m_index == other.m_index; + } + + unsigned hash() const + { + return WTF::PtrHash<CodeBlock*>::hash(m_codeBlock) ^ m_index; + } + + bool isHashTableDeletedValue() const + { + return !m_codeBlock && m_index; + } + + CodeBlock* codeBlock() const { return m_codeBlock; } + unsigned index() const { return m_index; } + +private: + CodeBlock* m_codeBlock; + unsigned m_index; +}; + +struct ConstantBufferKeyHash { + static unsigned hash(const ConstantBufferKey& key) { return key.hash(); } + static bool equal(const ConstantBufferKey& a, const ConstantBufferKey& b) + { + return a == b; + } + + static const bool safeToCompareToEmptyOrDeleted = true; +}; + +} } // namespace JSC::DFG + +namespace WTF { + +template<typename T> struct DefaultHash; +template<> struct DefaultHash<JSC::DFG::ConstantBufferKey> { + typedef JSC::DFG::ConstantBufferKeyHash Hash; +}; + +template<typename T> struct HashTraits; +template<> struct HashTraits<JSC::DFG::ConstantBufferKey> : SimpleClassHashTraits<JSC::DFG::ConstantBufferKey> { }; + +} // namespace WTF + +namespace JSC { namespace DFG { + // === ByteCodeParser === // // This class is used to compile the dataflow graph from a CodeBlock. @@ -142,7 +212,7 @@ private: return getJSConstant(constant); } - if (operand == RegisterFile::Callee) + if (operand == JSStack::Callee) return getCallee(); // Is this an argument? @@ -227,7 +297,10 @@ private: if (nodePtr->op() == GetLocal) nodeIndex = nodePtr->child1().index(); - return injectLazyOperandSpeculation(addToGraph(GetLocal, OpInfo(nodePtr->variableAccessData()), nodeIndex)); + NodeIndex newGetLocal = injectLazyOperandSpeculation( + addToGraph(GetLocal, OpInfo(nodePtr->variableAccessData()), nodeIndex)); + m_currentBlock->variablesAtTail.local(operand) = newGetLocal; + return newGetLocal; } if (nodePtr->op() == GetLocal) @@ -366,11 +439,11 @@ private: InlineCallFrame* inlineCallFrame = stack->m_inlineCallFrame; if (!inlineCallFrame) break; - if (operand >= static_cast<int>(inlineCallFrame->stackOffset - RegisterFile::CallFrameHeaderSize)) + if (operand >= static_cast<int>(inlineCallFrame->stackOffset - JSStack::CallFrameHeaderSize)) continue; if (operand == inlineCallFrame->stackOffset + CallFrame::thisArgumentOffset()) continue; - if (operand < static_cast<int>(inlineCallFrame->stackOffset - RegisterFile::CallFrameHeaderSize - inlineCallFrame->arguments.size())) + if (operand < static_cast<int>(inlineCallFrame->stackOffset - JSStack::CallFrameHeaderSize - inlineCallFrame->arguments.size())) continue; int argument = operandToArgument(operand - inlineCallFrame->stackOffset); return stack->m_argumentPositions[argument]; @@ -669,9 +742,9 @@ private: { HashMap<JSCell*, NodeIndex>::AddResult result = m_cellConstantNodes.add(cell, NoNode); if (result.isNewEntry) - result.iterator->second = addToGraph(WeakJSConstant, OpInfo(cell)); + result.iterator->value = addToGraph(WeakJSConstant, OpInfo(cell)); - return result.iterator->second; + return result.iterator->value; } CodeOrigin currentCodeOrigin() @@ -758,8 +831,8 @@ private: addVarArgChild(get(currentInstruction[1].u.operand)); int argCount = currentInstruction[2].u.operand; - if (RegisterFile::CallFrameHeaderSize + (unsigned)argCount > m_parameterSlots) - m_parameterSlots = RegisterFile::CallFrameHeaderSize + argCount; + if (JSStack::CallFrameHeaderSize + (unsigned)argCount > m_parameterSlots) + m_parameterSlots = JSStack::CallFrameHeaderSize + argCount; int registerOffset = currentInstruction[3].u.operand; int dummyThisArgument = op == Call ? 0 : 1; @@ -1049,6 +1122,8 @@ private: Vector<PhiStackEntry, 16> m_argumentPhiStack; Vector<PhiStackEntry, 16> m_localPhiStack; + HashMap<ConstantBufferKey, unsigned> m_constantBufferCache; + struct InlineStackEntry { ByteCodeParser* m_byteCodeParser; @@ -1067,6 +1142,7 @@ private: // direct, caller). Vector<unsigned> m_identifierRemap; Vector<unsigned> m_constantRemap; + Vector<unsigned> m_constantBufferRemap; // Blocks introduced by this code block, which need successor linking. // May include up to one basic block that includes the continuation after @@ -1139,7 +1215,7 @@ private: return result; } - if (operand == RegisterFile::Callee) + if (operand == JSStack::Callee) return m_calleeVR; return operand + m_inlineCallFrame->stackOffset; @@ -1361,14 +1437,14 @@ bool ByteCodeParser::handleInlining(bool usesResult, int callTarget, NodeIndex c // FIXME: Don't flush constants! - int inlineCallFrameStart = m_inlineStackTop->remapOperand(registerOffset) - RegisterFile::CallFrameHeaderSize; + int inlineCallFrameStart = m_inlineStackTop->remapOperand(registerOffset) - JSStack::CallFrameHeaderSize; // Make sure that the area used by the call frame is reserved. - for (int arg = inlineCallFrameStart + RegisterFile::CallFrameHeaderSize + codeBlock->m_numVars; arg-- > inlineCallFrameStart;) + for (int arg = inlineCallFrameStart + JSStack::CallFrameHeaderSize + codeBlock->m_numVars; arg-- > inlineCallFrameStart;) m_preservedVars.set(arg); // Make sure that we have enough locals. - unsigned newNumLocals = inlineCallFrameStart + RegisterFile::CallFrameHeaderSize + codeBlock->m_numCalleeRegisters; + unsigned newNumLocals = inlineCallFrameStart + JSStack::CallFrameHeaderSize + codeBlock->m_numCalleeRegisters; if (newNumLocals > m_numLocals) { m_numLocals = newNumLocals; for (size_t i = 0; i < m_graph.m_blocks.size(); ++i) @@ -1571,6 +1647,8 @@ bool ByteCodeParser::handleIntrinsic(bool usesResult, int resultOperand, Intrins case Array::ArrayWithArrayStorageToHole: ASSERT_NOT_REACHED(); + case Array::ArrayWithContiguous: + case Array::ArrayWithContiguousOutOfBounds: case Array::ArrayWithArrayStorage: case Array::ArrayWithArrayStorageOutOfBounds: { NodeIndex arrayPush = addToGraph(ArrayPush, OpInfo(arrayMode), OpInfo(prediction), get(registerOffset + argumentToOperand(0)), get(registerOffset + argumentToOperand(1))); @@ -1594,6 +1672,8 @@ bool ByteCodeParser::handleIntrinsic(bool usesResult, int resultOperand, Intrins case Array::ArrayWithArrayStorageToHole: ASSERT_NOT_REACHED(); + case Array::ArrayWithContiguous: + case Array::ArrayWithContiguousOutOfBounds: case Array::ArrayWithArrayStorage: case Array::ArrayWithArrayStorageOutOfBounds: { NodeIndex arrayPop = addToGraph(ArrayPop, OpInfo(arrayMode), OpInfo(prediction), get(registerOffset + argumentToOperand(0))); @@ -1868,7 +1948,7 @@ bool ByteCodeParser::parseBlock(unsigned limit) } case op_create_this: { - set(currentInstruction[1].u.operand, addToGraph(CreateThis, get(RegisterFile::Callee))); + set(currentInstruction[1].u.operand, addToGraph(CreateThis, get(JSStack::Callee))); NEXT_OPCODE(op_create_this); } @@ -1889,7 +1969,7 @@ bool ByteCodeParser::parseBlock(unsigned limit) case op_new_array_buffer: { int startConstant = currentInstruction[2].u.operand; int numConstants = currentInstruction[3].u.operand; - set(currentInstruction[1].u.operand, addToGraph(NewArrayBuffer, OpInfo(startConstant), OpInfo(numConstants))); + set(currentInstruction[1].u.operand, addToGraph(NewArrayBuffer, OpInfo(m_inlineStackTop->m_constantBufferRemap[startConstant]), OpInfo(numConstants))); NEXT_OPCODE(op_new_array_buffer); } @@ -2753,8 +2833,8 @@ bool ByteCodeParser::parseBlock(unsigned limit) addToGraph(CheckArgumentsNotCreated); unsigned argCount = m_inlineStackTop->m_inlineCallFrame->arguments.size(); - if (RegisterFile::CallFrameHeaderSize + argCount > m_parameterSlots) - m_parameterSlots = RegisterFile::CallFrameHeaderSize + argCount; + if (JSStack::CallFrameHeaderSize + argCount > m_parameterSlots) + m_parameterSlots = JSStack::CallFrameHeaderSize + argCount; addVarArgChild(get(currentInstruction[1].u.operand)); // callee addVarArgChild(get(currentInstruction[2].u.operand)); // this @@ -2775,8 +2855,10 @@ bool ByteCodeParser::parseBlock(unsigned limit) // Statically speculate for now. It makes sense to let speculate-only jneq_ptr // support simmer for a while before making it more general, since it's // already gnarly enough as it is. + ASSERT(pointerIsFunction(currentInstruction[2].u.specialPointer)); addToGraph( - CheckFunction, OpInfo(currentInstruction[2].u.jsCell.get()), + CheckFunction, + OpInfo(actualPointerFor(m_inlineStackTop->m_codeBlock, currentInstruction[2].u.specialPointer)), get(currentInstruction[1].u.operand)); addToGraph(Jump, OpInfo(m_currentIndex + OPCODE_LENGTH(op_jneq_ptr))); LAST_OPCODE(op_jneq_ptr); @@ -3205,7 +3287,7 @@ ByteCodeParser::InlineStackEntry::InlineStackEntry( InlineCallFrame inlineCallFrame; inlineCallFrame.executable.set(*byteCodeParser->m_globalData, byteCodeParser->m_codeBlock->ownerExecutable(), codeBlock->ownerExecutable()); - inlineCallFrame.stackOffset = inlineCallFrameStart + RegisterFile::CallFrameHeaderSize; + inlineCallFrame.stackOffset = inlineCallFrameStart + JSStack::CallFrameHeaderSize; inlineCallFrame.callee.set(*byteCodeParser->m_globalData, byteCodeParser->m_codeBlock->ownerExecutable(), callee); inlineCallFrame.caller = byteCodeParser->currentCodeOrigin(); inlineCallFrame.arguments.resize(argumentCountIncludingThis); // Set the number of arguments including this, but don't configure the value recoveries, yet. @@ -3242,13 +3324,14 @@ ByteCodeParser::InlineStackEntry::InlineStackEntry( m_identifierRemap.resize(codeBlock->numberOfIdentifiers()); m_constantRemap.resize(codeBlock->numberOfConstantRegisters()); + m_constantBufferRemap.resize(codeBlock->numberOfConstantBuffers()); for (size_t i = 0; i < codeBlock->numberOfIdentifiers(); ++i) { StringImpl* rep = codeBlock->identifier(i).impl(); IdentifierMap::AddResult result = byteCodeParser->m_identifierMap.add(rep, byteCodeParser->m_codeBlock->numberOfIdentifiers()); if (result.isNewEntry) byteCodeParser->m_codeBlock->addIdentifier(Identifier(byteCodeParser->m_globalData, rep)); - m_identifierRemap[i] = result.iterator->second; + m_identifierRemap[i] = result.iterator->value; } for (size_t i = 0; i < codeBlock->numberOfConstantRegisters(); ++i) { JSValue value = codeBlock->getConstant(i + FirstConstantRegisterIndex); @@ -3266,10 +3349,24 @@ ByteCodeParser::InlineStackEntry::InlineStackEntry( byteCodeParser->m_codeBlock->addConstant(value); byteCodeParser->m_constants.append(ConstantRecord()); } - m_constantRemap[i] = result.iterator->second; + m_constantRemap[i] = result.iterator->value; } for (unsigned i = 0; i < codeBlock->numberOfGlobalResolveInfos(); ++i) byteCodeParser->m_codeBlock->addGlobalResolveInfo(std::numeric_limits<unsigned>::max()); + for (unsigned i = 0; i < codeBlock->numberOfConstantBuffers(); ++i) { + // If we inline the same code block multiple times, we don't want to needlessly + // duplicate its constant buffers. + HashMap<ConstantBufferKey, unsigned>::iterator iter = + byteCodeParser->m_constantBufferCache.find(ConstantBufferKey(codeBlock, i)); + if (iter != byteCodeParser->m_constantBufferCache.end()) { + m_constantBufferRemap[i] = iter->value; + continue; + } + Vector<JSValue>& buffer = codeBlock->constantBufferAsVector(i); + unsigned newIndex = byteCodeParser->m_codeBlock->addConstantBuffer(buffer); + m_constantBufferRemap[i] = newIndex; + byteCodeParser->m_constantBufferCache.add(ConstantBufferKey(codeBlock, i), newIndex); + } m_callsiteBlockHeadNeedsLinking = true; } else { @@ -3285,11 +3382,14 @@ ByteCodeParser::InlineStackEntry::InlineStackEntry( m_identifierRemap.resize(codeBlock->numberOfIdentifiers()); m_constantRemap.resize(codeBlock->numberOfConstantRegisters()); + m_constantBufferRemap.resize(codeBlock->numberOfConstantBuffers()); for (size_t i = 0; i < codeBlock->numberOfIdentifiers(); ++i) m_identifierRemap[i] = i; for (size_t i = 0; i < codeBlock->numberOfConstantRegisters(); ++i) m_constantRemap[i] = i + FirstConstantRegisterIndex; + for (size_t i = 0; i < codeBlock->numberOfConstantBuffers(); ++i) + m_constantBufferRemap[i] = i; m_callsiteBlockHeadNeedsLinking = false; } diff --git a/Source/JavaScriptCore/dfg/DFGCCallHelpers.h b/Source/JavaScriptCore/dfg/DFGCCallHelpers.h index 5cd0baab2..4a6024305 100644 --- a/Source/JavaScriptCore/dfg/DFGCCallHelpers.h +++ b/Source/JavaScriptCore/dfg/DFGCCallHelpers.h @@ -184,6 +184,14 @@ public: addCallArgument(arg2); } + ALWAYS_INLINE void setupArgumentsWithExecState(TrustedImmPtr arg1, TrustedImm32 arg2) + { + resetCallArguments(); + addCallArgument(GPRInfo::callFrameRegister); + addCallArgument(arg1); + addCallArgument(arg2); + } + ALWAYS_INLINE void setupArgumentsWithExecState(GPRReg arg1, GPRReg arg2, GPRReg arg3) { resetCallArguments(); @@ -593,6 +601,13 @@ public: move(GPRInfo::callFrameRegister, GPRInfo::argumentGPR0); } + ALWAYS_INLINE void setupArgumentsWithExecState(TrustedImmPtr arg1, TrustedImm32 arg2) + { + move(arg1, GPRInfo::argumentGPR1); + move(arg2, GPRInfo::argumentGPR2); + move(GPRInfo::callFrameRegister, GPRInfo::argumentGPR0); + } + ALWAYS_INLINE void setupArgumentsWithExecState(GPRReg arg1, GPRReg arg2, GPRReg arg3) { setupStubArguments(arg1, arg2, arg3); diff --git a/Source/JavaScriptCore/dfg/DFGCSEPhase.cpp b/Source/JavaScriptCore/dfg/DFGCSEPhase.cpp index cea2f3c48..185332921 100644 --- a/Source/JavaScriptCore/dfg/DFGCSEPhase.cpp +++ b/Source/JavaScriptCore/dfg/DFGCSEPhase.cpp @@ -177,6 +177,7 @@ private: if (!m_graph.byValIsPure(node)) return NoNode; switch (node.arrayMode()) { + case CONTIGUOUS_TO_TAIL_MODES: case ARRAY_STORAGE_TO_HOLE_MODES: return NoNode; default: @@ -197,6 +198,8 @@ private: for (unsigned i = m_indexInBlock; i--;) { NodeIndex index = m_currentBlock->at(i); Node& node = m_graph[index]; + if (!node.shouldGenerate()) + continue; switch (node.op()) { case GetGlobalVar: if (node.registerPointer() == registerPointer) @@ -220,6 +223,8 @@ private: for (unsigned i = m_indexInBlock; i--;) { NodeIndex index = m_currentBlock->at(i); Node& node = m_graph[index]; + if (!node.shouldGenerate()) + continue; switch (node.op()) { case GetScopedVar: { Node& getScopeRegisters = m_graph[node.child1()]; @@ -248,6 +253,8 @@ private: for (unsigned i = m_indexInBlock; i--;) { NodeIndex index = m_currentBlock->at(i); Node& node = m_graph[index]; + if (!node.shouldGenerate()) + continue; switch (node.op()) { case GlobalVarWatchpoint: if (node.registerPointer() == registerPointer) @@ -334,6 +341,8 @@ private: break; Node& node = m_graph[index]; + if (!node.shouldGenerate()) + continue; switch (node.op()) { case GetByVal: if (!m_graph.byValIsPure(node)) @@ -359,9 +368,6 @@ private: // for a structure change or a put to property storage to affect // the GetByVal. break; - case ArrayPush: - // A push cannot affect previously existing elements in the array. - break; default: if (m_graph.clobbersWorld(index)) return NoNode; @@ -393,6 +399,8 @@ private: break; Node& node = m_graph[index]; + if (!node.shouldGenerate()) + continue; switch (node.op()) { case CheckStructure: case ForwardCheckStructure: @@ -447,6 +455,8 @@ private: break; Node& node = m_graph[index]; + if (!node.shouldGenerate()) + continue; switch (node.op()) { case CheckStructure: case ForwardCheckStructure: @@ -549,6 +559,8 @@ private: break; Node& node = m_graph[index]; + if (!node.shouldGenerate()) + continue; switch (node.op()) { case GetByOffset: if (node.child1() == child1 @@ -1222,9 +1234,7 @@ private: case PutByVal: { Edge child1 = m_graph.varArgChild(node, 0); Edge child2 = m_graph.varArgChild(node, 1); - if (isActionableMutableArraySpeculation(m_graph[child1].prediction()) - && m_graph[child2].shouldSpeculateInteger() - && !m_graph[child1].shouldSpeculateArguments()) { + if (canCSEStorage(node.arrayMode())) { NodeIndex nodeIndex = getByValLoadElimination(child1.index(), child2.index()); if (nodeIndex == NoNode) break; diff --git a/Source/JavaScriptCore/dfg/DFGCallArrayAllocatorSlowPathGenerator.h b/Source/JavaScriptCore/dfg/DFGCallArrayAllocatorSlowPathGenerator.h new file mode 100644 index 000000000..46d5f44cb --- /dev/null +++ b/Source/JavaScriptCore/dfg/DFGCallArrayAllocatorSlowPathGenerator.h @@ -0,0 +1,128 @@ +/* + * Copyright (C) 2012 Apple Inc. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY + * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR + * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR + * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, + * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR + * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY + * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#ifndef DFGCallArrayAllocatorSlowPathGenerator_h +#define DFGCallArrayAllocatorSlowPathGenerator_h + +#include <wtf/Platform.h> + +#if ENABLE(DFG_JIT) + +#include "DFGCommon.h" +#include "DFGSlowPathGenerator.h" +#include "DFGSpeculativeJIT.h" +#include <wtf/Vector.h> + +namespace JSC { namespace DFG { + +class CallArrayAllocatorSlowPathGenerator : public JumpingSlowPathGenerator<MacroAssembler::JumpList> { +public: + CallArrayAllocatorSlowPathGenerator( + MacroAssembler::JumpList from, SpeculativeJIT* jit, P_DFGOperation_EStZ function, + GPRReg resultGPR, GPRReg storageGPR, Structure* structure, size_t size) + : JumpingSlowPathGenerator<MacroAssembler::JumpList>(from, jit) + , m_function(function) + , m_resultGPR(resultGPR) + , m_storageGPR(storageGPR) + , m_structure(structure) + , m_size(size) + { + ASSERT(size < static_cast<size_t>(std::numeric_limits<int32_t>::max())); + jit->silentSpillAllRegistersImpl(false, m_plans, resultGPR); + } + +protected: + void generateInternal(SpeculativeJIT* jit) + { + linkFrom(jit); + for (unsigned i = 0; i < m_plans.size(); ++i) + jit->silentSpill(m_plans[i]); + jit->callOperation(m_function, m_resultGPR, m_structure, m_size); + GPRReg canTrample = SpeculativeJIT::pickCanTrample(m_resultGPR); + for (unsigned i = 0; i < m_plans.size(); ++i) + jit->silentFill(m_plans[i], canTrample); + jit->m_jit.loadPtr(MacroAssembler::Address(m_resultGPR, JSObject::butterflyOffset()), m_storageGPR); + jumpTo(jit); + } + +private: + P_DFGOperation_EStZ m_function; + GPRReg m_resultGPR; + GPRReg m_storageGPR; + Structure* m_structure; + size_t m_size; + Vector<SilentRegisterSavePlan, 2> m_plans; +}; + +class CallArrayAllocatorWithVariableSizeSlowPathGenerator : public JumpingSlowPathGenerator<MacroAssembler::JumpList> { +public: + CallArrayAllocatorWithVariableSizeSlowPathGenerator( + MacroAssembler::JumpList from, SpeculativeJIT* jit, P_DFGOperation_EStZ function, + GPRReg resultGPR, Structure* contiguousStructure, Structure* arrayStorageStructure, GPRReg sizeGPR) + : JumpingSlowPathGenerator<MacroAssembler::JumpList>(from, jit) + , m_function(function) + , m_resultGPR(resultGPR) + , m_contiguousStructure(contiguousStructure) + , m_arrayStorageStructure(arrayStorageStructure) + , m_sizeGPR(sizeGPR) + { + jit->silentSpillAllRegistersImpl(false, m_plans, resultGPR); + } + +protected: + void generateInternal(SpeculativeJIT* jit) + { + linkFrom(jit); + for (unsigned i = 0; i < m_plans.size(); ++i) + jit->silentSpill(m_plans[i]); + GPRReg scratchGPR = AssemblyHelpers::selectScratchGPR(m_sizeGPR); + MacroAssembler::Jump bigLength = jit->m_jit.branch32(MacroAssembler::AboveOrEqual, m_sizeGPR, MacroAssembler::TrustedImm32(MIN_SPARSE_ARRAY_INDEX)); + jit->m_jit.move(MacroAssembler::TrustedImmPtr(m_contiguousStructure), scratchGPR); + MacroAssembler::Jump done = jit->m_jit.jump(); + bigLength.link(&jit->m_jit); + jit->m_jit.move(MacroAssembler::TrustedImmPtr(m_arrayStorageStructure), scratchGPR); + done.link(&jit->m_jit); + jit->callOperation(m_function, m_resultGPR, scratchGPR, m_sizeGPR); + GPRReg canTrample = SpeculativeJIT::pickCanTrample(m_resultGPR); + for (unsigned i = 0; i < m_plans.size(); ++i) + jit->silentFill(m_plans[i], canTrample); + jumpTo(jit); + } + +private: + P_DFGOperation_EStZ m_function; + GPRReg m_resultGPR; + Structure* m_contiguousStructure; + Structure* m_arrayStorageStructure; + GPRReg m_sizeGPR; + Vector<SilentRegisterSavePlan, 2> m_plans; +}; + +} } // namespace JSC::DFG + +#endif // ENABLE(DFG_JIT) + +#endif // DFGCallArrayAllocatorSlowPathGenerator_h + diff --git a/Source/JavaScriptCore/dfg/DFGCapabilities.h b/Source/JavaScriptCore/dfg/DFGCapabilities.h index bccde7ca7..e1760699a 100644 --- a/Source/JavaScriptCore/dfg/DFGCapabilities.h +++ b/Source/JavaScriptCore/dfg/DFGCapabilities.h @@ -201,10 +201,6 @@ inline bool canInlineOpcode(OpcodeID opcodeID, CodeBlock* codeBlock, Instruction case op_resolve: case op_resolve_base: - // Constant buffers aren't copied correctly. This is easy to fix, but for - // now we just disable inlining for functions that use them. - case op_new_array_buffer: - // Inlining doesn't correctly remap regular expression operands. case op_new_regexp: diff --git a/Source/JavaScriptCore/dfg/DFGConstantFoldingPhase.cpp b/Source/JavaScriptCore/dfg/DFGConstantFoldingPhase.cpp index dfb62cbc4..8a261ad2b 100644 --- a/Source/JavaScriptCore/dfg/DFGConstantFoldingPhase.cpp +++ b/Source/JavaScriptCore/dfg/DFGConstantFoldingPhase.cpp @@ -179,7 +179,8 @@ private: block->variablesAtTail.operand(node.local()) = previousLocalAccess; else { ASSERT(m_graph[tailNodeIndex].op() == Flush - || m_graph[tailNodeIndex].op() == SetLocal); + || m_graph[tailNodeIndex].op() == SetLocal + || node.variableAccessData()->isCaptured()); } } diff --git a/Source/JavaScriptCore/dfg/DFGFixupPhase.cpp b/Source/JavaScriptCore/dfg/DFGFixupPhase.cpp index aa2d5dff4..49212730c 100644 --- a/Source/JavaScriptCore/dfg/DFGFixupPhase.cpp +++ b/Source/JavaScriptCore/dfg/DFGFixupPhase.cpp @@ -116,7 +116,7 @@ private: m_graph.deref(m_compileIndex); nodePtr->setArrayMode(arrayMode); - NodeIndex storage = checkArray(arrayMode, nodePtr->codeOrigin, nodePtr->child1().index(), lengthNeedsStorage, nodePtr->shouldGenerate()); + NodeIndex storage = checkArray(arrayMode, nodePtr->codeOrigin, nodePtr->child1().index(), NoNode, lengthNeedsStorage, nodePtr->shouldGenerate()); if (storage == NoNode) break; @@ -137,17 +137,18 @@ private: m_graph[node.child1()].prediction(), m_graph[node.child2()].prediction())); - blessArrayOperation(node.child1(), 2); + blessArrayOperation(node.child1(), node.child2(), 2); break; } case ArrayPush: { - blessArrayOperation(node.child1(), 2); + blessArrayOperation(node.child1(), node.child2(), 2); break; } case ArrayPop: { - blessArrayOperation(node.child1(), 1); + blessArrayOperation(node.child1(), node.child2(), 1); + break; } case ValueToInt32: { @@ -327,7 +328,7 @@ private: m_graph[child1].prediction(), m_graph[child2].prediction())); - blessArrayOperation(child1, 3); + blessArrayOperation(child1, child2, 3); Node* nodePtr = &m_graph[m_compileIndex]; @@ -375,25 +376,28 @@ private: return nodeIndex; } - NodeIndex checkArray(Array::Mode arrayMode, CodeOrigin codeOrigin, NodeIndex array, bool (*storageCheck)(Array::Mode) = canCSEStorage, bool shouldGenerate = true) + NodeIndex checkArray(Array::Mode arrayMode, CodeOrigin codeOrigin, NodeIndex array, NodeIndex index, bool (*storageCheck)(Array::Mode) = canCSEStorage, bool shouldGenerate = true) { ASSERT(modeIsSpecific(arrayMode)); m_graph.ref(array); if (isEffectful(arrayMode)) { - Node arrayify(Arrayify, codeOrigin, OpInfo(arrayMode), array); + if (index != NoNode) + m_graph.ref(index); + Node arrayify(Arrayify, codeOrigin, OpInfo(arrayMode), array, index); arrayify.ref(); // Once because it's used as a butterfly. arrayify.ref(); // And twice because it's must-generate. NodeIndex arrayifyIndex = m_graph.size(); m_graph.append(arrayify); m_insertionSet.append(m_indexInBlock, arrayifyIndex); - ASSERT(storageCheck == canCSEStorage); ASSERT(shouldGenerate); ASSERT(canCSEStorage(arrayMode)); ASSERT(modeUsesButterfly(arrayMode)); - + + if (!storageCheck(arrayMode)) + return NoNode; return arrayifyIndex; } @@ -415,14 +419,15 @@ private: return addNode(Node(GetIndexedPropertyStorage, codeOrigin, OpInfo(arrayMode), array), shouldGenerate); } - void blessArrayOperation(Edge base, unsigned storageChildIdx) + void blessArrayOperation(Edge base, Edge index, unsigned storageChildIdx) { if (m_graph.m_fixpointState > BeforeFixpoint) return; Node* nodePtr = &m_graph[m_compileIndex]; - if (nodePtr->arrayMode() == Array::ForceExit) { + switch (nodePtr->arrayMode()) { + case Array::ForceExit: { Node forceExit(ForceOSRExit, nodePtr->codeOrigin); forceExit.ref(); NodeIndex forceExitIndex = m_graph.size(); @@ -430,15 +435,23 @@ private: m_insertionSet.append(m_indexInBlock, forceExitIndex); return; } - - if (!modeIsSpecific(nodePtr->arrayMode())) + + case Array::Undecided: + case Array::Unprofiled: + ASSERT_NOT_REACHED(); return; - NodeIndex storage = checkArray(nodePtr->arrayMode(), nodePtr->codeOrigin, base.index()); - if (storage == NoNode) + case Array::Generic: return; - m_graph.child(m_graph[m_compileIndex], storageChildIdx) = Edge(storage); + default: { + NodeIndex storage = checkArray(nodePtr->arrayMode(), nodePtr->codeOrigin, base.index(), index.indexUnchecked()); + if (storage == NoNode) + return; + + m_graph.child(m_graph[m_compileIndex], storageChildIdx) = Edge(storage); + return; + } } } void fixIntEdge(Edge& edge) diff --git a/Source/JavaScriptCore/dfg/DFGGenerationInfo.h b/Source/JavaScriptCore/dfg/DFGGenerationInfo.h index 905c5c5fb..227433e0e 100644 --- a/Source/JavaScriptCore/dfg/DFGGenerationInfo.h +++ b/Source/JavaScriptCore/dfg/DFGGenerationInfo.h @@ -201,7 +201,7 @@ public: // Get the format of the value in machine registers (or 'none'). DataFormat registerFormat() { return m_registerFormat; } - // Get the format of the value as it is spilled in the RegisterFile (or 'none'). + // Get the format of the value as it is spilled in the JSStack (or 'none'). DataFormat spillFormat() { return m_spillFormat; } bool isJSFormat(DataFormat expectedFormat) @@ -255,11 +255,11 @@ public: // This should only be called on values that are currently in a register. ASSERT(m_registerFormat != DataFormatNone); // Constants do not need spilling, nor do values that have already been - // spilled to the RegisterFile. + // spilled to the JSStack. return !m_canFill; } - // Called when a VirtualRegister is being spilled to the RegisterFile for the first time. + // Called when a VirtualRegister is being spilled to the JSStack for the first time. void spill(VariableEventStream& stream, VirtualRegister virtualRegister, DataFormat spillFormat) { // We shouldn't be spill values that don't need spilling. diff --git a/Source/JavaScriptCore/dfg/DFGGraph.h b/Source/JavaScriptCore/dfg/DFGGraph.h index b02c9991c..212c8bbd2 100644 --- a/Source/JavaScriptCore/dfg/DFGGraph.h +++ b/Source/JavaScriptCore/dfg/DFGGraph.h @@ -36,8 +36,8 @@ #include "DFGBasicBlock.h" #include "DFGDominators.h" #include "DFGNode.h" +#include "JSStack.h" #include "MethodOfGettingAValueProfile.h" -#include "RegisterFile.h" #include <wtf/BitVector.h> #include <wtf/HashMap.h> #include <wtf/Vector.h> @@ -479,8 +479,11 @@ public: { switch (node.arrayMode()) { case Array::Generic: + case OUT_OF_BOUNDS_CONTIGUOUS_MODES: + case ARRAY_STORAGE_TO_HOLE_MODES: case OUT_OF_BOUNDS_ARRAY_STORAGE_MODES: - case ALL_EFFECTFUL_ARRAY_STORAGE_MODES: + case SLOW_PUT_ARRAY_STORAGE_MODES: + case ALL_EFFECTFUL_MODES: return false; case Array::String: return node.op() == GetByVal; diff --git a/Source/JavaScriptCore/dfg/DFGJITCompiler.cpp b/Source/JavaScriptCore/dfg/DFGJITCompiler.cpp index ae28fad3f..c7f941a7a 100644 --- a/Source/JavaScriptCore/dfg/DFGJITCompiler.cpp +++ b/Source/JavaScriptCore/dfg/DFGJITCompiler.cpp @@ -68,14 +68,14 @@ void JITCompiler::compileEntry() { // This code currently matches the old JIT. In the function header we need to // pop the return address (since we do not allow any recursion on the machine - // stack), and perform a fast register file check. + // stack), and perform a fast stack check. // FIXME: https://bugs.webkit.org/show_bug.cgi?id=56292 - // We'll need to convert the remaining cti_ style calls (specifically the register file + // We'll need to convert the remaining cti_ style calls (specifically the stack // check) which will be dependent on stack layout. (We'd need to account for this in // both normal return code and when jumping to an exception handler). preserveReturnAddressAfterCall(GPRInfo::regT2); - emitPutToCallFrameHeader(GPRInfo::regT2, RegisterFile::ReturnPC); - emitPutImmediateToCallFrameHeader(m_codeBlock, RegisterFile::CodeBlock); + emitPutToCallFrameHeader(GPRInfo::regT2, JSStack::ReturnPC); + emitPutImmediateToCallFrameHeader(m_codeBlock, JSStack::CodeBlock); } void JITCompiler::compileBody(SpeculativeJIT& speculative) @@ -256,12 +256,12 @@ bool JITCompiler::compileFunction(JITCode& entry, MacroAssemblerCodePtr& entryWi // If we needed to perform an arity check we will already have moved the return address, // so enter after this. Label fromArityCheck(this); - // Plant a check that sufficient space is available in the RegisterFile. + // Plant a check that sufficient space is available in the JSStack. // FIXME: https://bugs.webkit.org/show_bug.cgi?id=56291 addPtr(TrustedImm32(m_codeBlock->m_numCalleeRegisters * sizeof(Register)), GPRInfo::callFrameRegister, GPRInfo::regT1); - Jump registerFileCheck = branchPtr(Below, AbsoluteAddress(m_globalData->interpreter->registerFile().addressOfEnd()), GPRInfo::regT1); - // Return here after register file check. - Label fromRegisterFileCheck = label(); + Jump stackCheck = branchPtr(Below, AbsoluteAddress(m_globalData->interpreter->stack().addressOfEnd()), GPRInfo::regT1); + // Return here after stack check. + Label fromStackCheck = label(); // === Function body code generation === @@ -271,21 +271,21 @@ bool JITCompiler::compileFunction(JITCode& entry, MacroAssemblerCodePtr& entryWi // === Function footer code generation === // - // Generate code to perform the slow register file check (if the fast one in + // Generate code to perform the slow stack check (if the fast one in // the function header fails), and generate the entry point with arity check. // - // Generate the register file check; if the fast check in the function head fails, + // Generate the stack check; if the fast check in the function head fails, // we need to call out to a helper function to check whether more space is available. // FIXME: change this from a cti call to a DFG style operation (normal C calling conventions). - registerFileCheck.link(this); + stackCheck.link(this); move(stackPointerRegister, GPRInfo::argumentGPR0); poke(GPRInfo::callFrameRegister, OBJECT_OFFSETOF(struct JITStackFrame, callFrame) / sizeof(void*)); CallBeginToken token; beginCall(CodeOrigin(0), token); - Call callRegisterFileCheck = call(); - notifyCall(callRegisterFileCheck, CodeOrigin(0), token); - jump(fromRegisterFileCheck); + Call callStackCheck = call(); + notifyCall(callStackCheck, CodeOrigin(0), token); + jump(fromStackCheck); // The fast entry point into a function does not check the correct number of arguments // have been passed to the call (we only use the fast entry point where we can statically @@ -295,7 +295,7 @@ bool JITCompiler::compileFunction(JITCode& entry, MacroAssemblerCodePtr& entryWi Label arityCheck = label(); compileEntry(); - load32(AssemblyHelpers::payloadFor((VirtualRegister)RegisterFile::ArgumentCount), GPRInfo::regT1); + load32(AssemblyHelpers::payloadFor((VirtualRegister)JSStack::ArgumentCount), GPRInfo::regT1); branch32(AboveOrEqual, GPRInfo::regT1, TrustedImm32(m_codeBlock->numParameters())).linkTo(fromArityCheck, this); move(stackPointerRegister, GPRInfo::argumentGPR0); poke(GPRInfo::callFrameRegister, OBJECT_OFFSETOF(struct JITStackFrame, callFrame) / sizeof(void*)); @@ -322,8 +322,8 @@ bool JITCompiler::compileFunction(JITCode& entry, MacroAssemblerCodePtr& entryWi link(linkBuffer); speculative.linkOSREntries(linkBuffer); - // FIXME: switch the register file check & arity check over to DFGOpertaion style calls, not JIT stubs. - linkBuffer.link(callRegisterFileCheck, cti_register_file_check); + // FIXME: switch the stack check & arity check over to DFGOpertaion style calls, not JIT stubs. + linkBuffer.link(callStackCheck, cti_stack_check); linkBuffer.link(callArityCheck, m_codeBlock->m_isConstructor ? cti_op_construct_arityCheck : cti_op_call_arityCheck); if (m_disassembler) diff --git a/Source/JavaScriptCore/dfg/DFGJITCompiler.h b/Source/JavaScriptCore/dfg/DFGJITCompiler.h index 7ff399f78..c73934832 100644 --- a/Source/JavaScriptCore/dfg/DFGJITCompiler.h +++ b/Source/JavaScriptCore/dfg/DFGJITCompiler.h @@ -297,7 +297,7 @@ public: void beginCall(CodeOrigin codeOrigin, CallBeginToken& token) { unsigned index = m_exceptionChecks.size(); - store32(TrustedImm32(index), tagFor(static_cast<VirtualRegister>(RegisterFile::ArgumentCount))); + store32(TrustedImm32(index), tagFor(static_cast<VirtualRegister>(JSStack::ArgumentCount))); token.set(codeOrigin, index); } diff --git a/Source/JavaScriptCore/dfg/DFGOSREntry.cpp b/Source/JavaScriptCore/dfg/DFGOSREntry.cpp index 9a7bc96cc..b838c4fb4 100644 --- a/Source/JavaScriptCore/dfg/DFGOSREntry.cpp +++ b/Source/JavaScriptCore/dfg/DFGOSREntry.cpp @@ -136,7 +136,7 @@ void* prepareOSREntry(ExecState* exec, CodeBlock* codeBlock, unsigned bytecodeIn // it seems silly: you'd be diverting the program to error handling when it // would have otherwise just kept running albeit less quickly. - if (!globalData->interpreter->registerFile().grow(&exec->registers()[codeBlock->m_numCalleeRegisters])) { + if (!globalData->interpreter->stack().grow(&exec->registers()[codeBlock->m_numCalleeRegisters])) { #if ENABLE(JIT_VERBOSE_OSR) dataLog(" OSR failed because stack growth failed.\n"); #endif diff --git a/Source/JavaScriptCore/dfg/DFGOSRExitCompiler32_64.cpp b/Source/JavaScriptCore/dfg/DFGOSRExitCompiler32_64.cpp index 8c8e2f949..cb13dcc50 100644 --- a/Source/JavaScriptCore/dfg/DFGOSRExitCompiler32_64.cpp +++ b/Source/JavaScriptCore/dfg/DFGOSRExitCompiler32_64.cpp @@ -125,9 +125,9 @@ void OSRExitCompiler::compileExit(const OSRExit& exit, const Operands<ValueRecov // Int32s, have no FPRs, and have no constants. If there are constants, we // expect most of them to be jsUndefined(); if that's true then we handle that // specially to minimize code size and execution time. - bool haveUnboxedInt32InRegisterFile = false; - bool haveUnboxedCellInRegisterFile = false; - bool haveUnboxedBooleanInRegisterFile = false; + bool haveUnboxedInt32InJSStack = false; + bool haveUnboxedCellInJSStack = false; + bool haveUnboxedBooleanInJSStack = false; bool haveUInt32s = false; bool haveFPRs = false; bool haveConstants = false; @@ -137,10 +137,10 @@ void OSRExitCompiler::compileExit(const OSRExit& exit, const Operands<ValueRecov for (size_t index = 0; index < operands.size(); ++index) { const ValueRecovery& recovery = operands[index]; switch (recovery.technique()) { - case DisplacedInRegisterFile: - case Int32DisplacedInRegisterFile: - case CellDisplacedInRegisterFile: - case BooleanDisplacedInRegisterFile: + case DisplacedInJSStack: + case Int32DisplacedInJSStack: + case CellDisplacedInJSStack: + case BooleanDisplacedInJSStack: numberOfDisplacedVirtualRegisters++; ASSERT((int)recovery.virtualRegister() >= 0); @@ -174,16 +174,16 @@ void OSRExitCompiler::compileExit(const OSRExit& exit, const Operands<ValueRecov haveUInt32s = true; break; - case AlreadyInRegisterFileAsUnboxedInt32: - haveUnboxedInt32InRegisterFile = true; + case AlreadyInJSStackAsUnboxedInt32: + haveUnboxedInt32InJSStack = true; break; - case AlreadyInRegisterFileAsUnboxedCell: - haveUnboxedCellInRegisterFile = true; + case AlreadyInJSStackAsUnboxedCell: + haveUnboxedCellInJSStack = true; break; - case AlreadyInRegisterFileAsUnboxedBoolean: - haveUnboxedBooleanInRegisterFile = true; + case AlreadyInJSStackAsUnboxedBoolean: + haveUnboxedBooleanInJSStack = true; break; case InFPR: @@ -214,19 +214,19 @@ void OSRExitCompiler::compileExit(const OSRExit& exit, const Operands<ValueRecov // 5) Perform all reboxing of integers and cells, except for those in registers. - if (haveUnboxedInt32InRegisterFile || haveUnboxedCellInRegisterFile || haveUnboxedBooleanInRegisterFile) { + if (haveUnboxedInt32InJSStack || haveUnboxedCellInJSStack || haveUnboxedBooleanInJSStack) { for (size_t index = 0; index < operands.size(); ++index) { const ValueRecovery& recovery = operands[index]; switch (recovery.technique()) { - case AlreadyInRegisterFileAsUnboxedInt32: + case AlreadyInJSStackAsUnboxedInt32: m_jit.store32(AssemblyHelpers::TrustedImm32(JSValue::Int32Tag), AssemblyHelpers::tagFor(static_cast<VirtualRegister>(operands.operandForIndex(index)))); break; - case AlreadyInRegisterFileAsUnboxedCell: + case AlreadyInJSStackAsUnboxedCell: m_jit.store32(AssemblyHelpers::TrustedImm32(JSValue::CellTag), AssemblyHelpers::tagFor(static_cast<VirtualRegister>(operands.operandForIndex(index)))); break; - case AlreadyInRegisterFileAsUnboxedBoolean: + case AlreadyInJSStackAsUnboxedBoolean: m_jit.store32(AssemblyHelpers::TrustedImm32(JSValue::BooleanTag), AssemblyHelpers::tagFor(static_cast<VirtualRegister>(operands.operandForIndex(index)))); break; @@ -327,7 +327,7 @@ void OSRExitCompiler::compileExit(const OSRExit& exit, const Operands<ValueRecov } } - // 7) Dump all doubles into the register file, or to the scratch storage if the + // 7) Dump all doubles into the stack, or to the scratch storage if the // destination virtual register is poisoned. if (haveFPRs) { for (size_t index = 0; index < operands.size(); ++index) { @@ -360,19 +360,19 @@ void OSRExitCompiler::compileExit(const OSRExit& exit, const Operands<ValueRecov for (size_t index = 0; index < operands.size(); ++index) { const ValueRecovery& recovery = operands[index]; switch (recovery.technique()) { - case DisplacedInRegisterFile: + case DisplacedInJSStack: m_jit.load32(AssemblyHelpers::payloadFor(recovery.virtualRegister()), GPRInfo::toRegister(displacementIndex++)); m_jit.load32(AssemblyHelpers::tagFor(recovery.virtualRegister()), GPRInfo::toRegister(displacementIndex++)); break; - case Int32DisplacedInRegisterFile: + case Int32DisplacedInJSStack: m_jit.load32(AssemblyHelpers::payloadFor(recovery.virtualRegister()), GPRInfo::toRegister(displacementIndex++)); m_jit.move(AssemblyHelpers::TrustedImm32(JSValue::Int32Tag), GPRInfo::toRegister(displacementIndex++)); break; - case CellDisplacedInRegisterFile: + case CellDisplacedInJSStack: m_jit.load32(AssemblyHelpers::payloadFor(recovery.virtualRegister()), GPRInfo::toRegister(displacementIndex++)); m_jit.move(AssemblyHelpers::TrustedImm32(JSValue::CellTag), GPRInfo::toRegister(displacementIndex++)); break; - case BooleanDisplacedInRegisterFile: + case BooleanDisplacedInJSStack: m_jit.load32(AssemblyHelpers::payloadFor(recovery.virtualRegister()), GPRInfo::toRegister(displacementIndex++)); m_jit.move(AssemblyHelpers::TrustedImm32(JSValue::BooleanTag), GPRInfo::toRegister(displacementIndex++)); break; @@ -385,10 +385,10 @@ void OSRExitCompiler::compileExit(const OSRExit& exit, const Operands<ValueRecov for (size_t index = 0; index < operands.size(); ++index) { const ValueRecovery& recovery = operands[index]; switch (recovery.technique()) { - case DisplacedInRegisterFile: - case Int32DisplacedInRegisterFile: - case CellDisplacedInRegisterFile: - case BooleanDisplacedInRegisterFile: + case DisplacedInJSStack: + case Int32DisplacedInJSStack: + case CellDisplacedInJSStack: + case BooleanDisplacedInJSStack: m_jit.store32(GPRInfo::toRegister(displacementIndex++), AssemblyHelpers::payloadFor((VirtualRegister)operands.operandForIndex(index))); m_jit.store32(GPRInfo::toRegister(displacementIndex++), AssemblyHelpers::tagFor((VirtualRegister)operands.operandForIndex(index))); break; @@ -418,16 +418,16 @@ void OSRExitCompiler::compileExit(const OSRExit& exit, const Operands<ValueRecov for (size_t index = 0; index < operands.size(); ++index) { const ValueRecovery& recovery = operands[index]; switch (recovery.technique()) { - case DisplacedInRegisterFile: + case DisplacedInJSStack: m_jit.load32(AssemblyHelpers::payloadFor(recovery.virtualRegister()), GPRInfo::regT0); m_jit.load32(AssemblyHelpers::tagFor(recovery.virtualRegister()), GPRInfo::regT1); m_jit.store32(GPRInfo::regT0, reinterpret_cast<char*>(scratchDataBuffer + scratchIndex) + OBJECT_OFFSETOF(EncodedValueDescriptor, asBits.payload)); m_jit.store32(GPRInfo::regT1, reinterpret_cast<char*>(scratchDataBuffer + scratchIndex) + OBJECT_OFFSETOF(EncodedValueDescriptor, asBits.tag)); scratchIndex++; break; - case Int32DisplacedInRegisterFile: - case CellDisplacedInRegisterFile: - case BooleanDisplacedInRegisterFile: + case Int32DisplacedInJSStack: + case CellDisplacedInJSStack: + case BooleanDisplacedInJSStack: m_jit.load32(AssemblyHelpers::payloadFor(recovery.virtualRegister()), GPRInfo::regT0); m_jit.store32(GPRInfo::regT0, reinterpret_cast<char*>(scratchDataBuffer + scratchIndex++) + OBJECT_OFFSETOF(EncodedValueDescriptor, asBits.payload)); break; @@ -440,24 +440,24 @@ void OSRExitCompiler::compileExit(const OSRExit& exit, const Operands<ValueRecov for (size_t index = 0; index < operands.size(); ++index) { const ValueRecovery& recovery = operands[index]; switch (recovery.technique()) { - case DisplacedInRegisterFile: + case DisplacedInJSStack: m_jit.load32(reinterpret_cast<char*>(scratchDataBuffer + scratchIndex) + OBJECT_OFFSETOF(EncodedValueDescriptor, asBits.payload), GPRInfo::regT0); m_jit.load32(reinterpret_cast<char*>(scratchDataBuffer + scratchIndex) + OBJECT_OFFSETOF(EncodedValueDescriptor, asBits.tag), GPRInfo::regT1); m_jit.store32(GPRInfo::regT0, AssemblyHelpers::payloadFor((VirtualRegister)operands.operandForIndex(index))); m_jit.store32(GPRInfo::regT1, AssemblyHelpers::tagFor((VirtualRegister)operands.operandForIndex(index))); scratchIndex++; break; - case Int32DisplacedInRegisterFile: + case Int32DisplacedInJSStack: m_jit.load32(reinterpret_cast<char*>(scratchDataBuffer + scratchIndex++) + OBJECT_OFFSETOF(EncodedValueDescriptor, asBits.payload), GPRInfo::regT0); m_jit.store32(AssemblyHelpers::TrustedImm32(JSValue::Int32Tag), AssemblyHelpers::tagFor((VirtualRegister)operands.operandForIndex(index))); m_jit.store32(GPRInfo::regT0, AssemblyHelpers::payloadFor((VirtualRegister)operands.operandForIndex(index))); break; - case CellDisplacedInRegisterFile: + case CellDisplacedInJSStack: m_jit.load32(reinterpret_cast<char*>(scratchDataBuffer + scratchIndex++) + OBJECT_OFFSETOF(EncodedValueDescriptor, asBits.payload), GPRInfo::regT0); m_jit.store32(AssemblyHelpers::TrustedImm32(JSValue::CellTag), AssemblyHelpers::tagFor((VirtualRegister)operands.operandForIndex(index))); m_jit.store32(GPRInfo::regT0, AssemblyHelpers::payloadFor((VirtualRegister)operands.operandForIndex(index))); break; - case BooleanDisplacedInRegisterFile: + case BooleanDisplacedInJSStack: m_jit.load32(reinterpret_cast<char*>(scratchDataBuffer + scratchIndex++) + OBJECT_OFFSETOF(EncodedValueDescriptor, asBits.payload), GPRInfo::regT0); m_jit.store32(AssemblyHelpers::TrustedImm32(JSValue::BooleanTag), AssemblyHelpers::tagFor((VirtualRegister)operands.operandForIndex(index))); m_jit.store32(GPRInfo::regT0, AssemblyHelpers::payloadFor((VirtualRegister)operands.operandForIndex(index))); @@ -575,7 +575,7 @@ void OSRExitCompiler::compileExit(const OSRExit& exit, const Operands<ValueRecov // 13) Reify inlined call frames. ASSERT(m_jit.baselineCodeBlock()->getJITType() == JITCode::BaselineJIT); - m_jit.storePtr(AssemblyHelpers::TrustedImmPtr(m_jit.baselineCodeBlock()), AssemblyHelpers::addressFor((VirtualRegister)RegisterFile::CodeBlock)); + m_jit.storePtr(AssemblyHelpers::TrustedImmPtr(m_jit.baselineCodeBlock()), AssemblyHelpers::addressFor((VirtualRegister)JSStack::CodeBlock)); for (CodeOrigin codeOrigin = exit.m_codeOrigin; codeOrigin.inlineCallFrame; codeOrigin = codeOrigin.inlineCallFrame->caller) { InlineCallFrame* inlineCallFrame = codeOrigin.inlineCallFrame; @@ -597,15 +597,15 @@ void OSRExitCompiler::compileExit(const OSRExit& exit, const Operands<ValueRecov } else callerFrameGPR = GPRInfo::callFrameRegister; - m_jit.storePtr(AssemblyHelpers::TrustedImmPtr(baselineCodeBlock), AssemblyHelpers::addressFor((VirtualRegister)(inlineCallFrame->stackOffset + RegisterFile::CodeBlock))); - m_jit.store32(AssemblyHelpers::TrustedImm32(JSValue::CellTag), AssemblyHelpers::tagFor((VirtualRegister)(inlineCallFrame->stackOffset + RegisterFile::ScopeChain))); - m_jit.storePtr(AssemblyHelpers::TrustedImmPtr(inlineCallFrame->callee->scope()), AssemblyHelpers::payloadFor((VirtualRegister)(inlineCallFrame->stackOffset + RegisterFile::ScopeChain))); - m_jit.store32(AssemblyHelpers::TrustedImm32(JSValue::CellTag), AssemblyHelpers::tagFor((VirtualRegister)(inlineCallFrame->stackOffset + RegisterFile::CallerFrame))); - m_jit.storePtr(callerFrameGPR, AssemblyHelpers::payloadFor((VirtualRegister)(inlineCallFrame->stackOffset + RegisterFile::CallerFrame))); - m_jit.storePtr(AssemblyHelpers::TrustedImmPtr(jumpTarget), AssemblyHelpers::payloadFor((VirtualRegister)(inlineCallFrame->stackOffset + RegisterFile::ReturnPC))); - m_jit.store32(AssemblyHelpers::TrustedImm32(inlineCallFrame->arguments.size()), AssemblyHelpers::payloadFor((VirtualRegister)(inlineCallFrame->stackOffset + RegisterFile::ArgumentCount))); - m_jit.store32(AssemblyHelpers::TrustedImm32(JSValue::CellTag), AssemblyHelpers::tagFor((VirtualRegister)(inlineCallFrame->stackOffset + RegisterFile::Callee))); - m_jit.storePtr(AssemblyHelpers::TrustedImmPtr(inlineCallFrame->callee.get()), AssemblyHelpers::payloadFor((VirtualRegister)(inlineCallFrame->stackOffset + RegisterFile::Callee))); + m_jit.storePtr(AssemblyHelpers::TrustedImmPtr(baselineCodeBlock), AssemblyHelpers::addressFor((VirtualRegister)(inlineCallFrame->stackOffset + JSStack::CodeBlock))); + m_jit.store32(AssemblyHelpers::TrustedImm32(JSValue::CellTag), AssemblyHelpers::tagFor((VirtualRegister)(inlineCallFrame->stackOffset + JSStack::ScopeChain))); + m_jit.storePtr(AssemblyHelpers::TrustedImmPtr(inlineCallFrame->callee->scope()), AssemblyHelpers::payloadFor((VirtualRegister)(inlineCallFrame->stackOffset + JSStack::ScopeChain))); + m_jit.store32(AssemblyHelpers::TrustedImm32(JSValue::CellTag), AssemblyHelpers::tagFor((VirtualRegister)(inlineCallFrame->stackOffset + JSStack::CallerFrame))); + m_jit.storePtr(callerFrameGPR, AssemblyHelpers::payloadFor((VirtualRegister)(inlineCallFrame->stackOffset + JSStack::CallerFrame))); + m_jit.storePtr(AssemblyHelpers::TrustedImmPtr(jumpTarget), AssemblyHelpers::payloadFor((VirtualRegister)(inlineCallFrame->stackOffset + JSStack::ReturnPC))); + m_jit.store32(AssemblyHelpers::TrustedImm32(inlineCallFrame->arguments.size()), AssemblyHelpers::payloadFor((VirtualRegister)(inlineCallFrame->stackOffset + JSStack::ArgumentCount))); + m_jit.store32(AssemblyHelpers::TrustedImm32(JSValue::CellTag), AssemblyHelpers::tagFor((VirtualRegister)(inlineCallFrame->stackOffset + JSStack::Callee))); + m_jit.storePtr(AssemblyHelpers::TrustedImmPtr(inlineCallFrame->callee.get()), AssemblyHelpers::payloadFor((VirtualRegister)(inlineCallFrame->stackOffset + JSStack::Callee))); } // 14) Create arguments if necessary and place them into the appropriate aliased diff --git a/Source/JavaScriptCore/dfg/DFGOSRExitCompiler64.cpp b/Source/JavaScriptCore/dfg/DFGOSRExitCompiler64.cpp index fcaf0a4bc..968e56f1a 100644 --- a/Source/JavaScriptCore/dfg/DFGOSRExitCompiler64.cpp +++ b/Source/JavaScriptCore/dfg/DFGOSRExitCompiler64.cpp @@ -133,9 +133,9 @@ void OSRExitCompiler::compileExit(const OSRExit& exit, const Operands<ValueRecov for (size_t index = 0; index < operands.size(); ++index) { const ValueRecovery& recovery = operands[index]; switch (recovery.technique()) { - case Int32DisplacedInRegisterFile: - case DoubleDisplacedInRegisterFile: - case DisplacedInRegisterFile: + case Int32DisplacedInJSStack: + case DoubleDisplacedInJSStack: + case DisplacedInJSStack: numberOfDisplacedVirtualRegisters++; ASSERT((int)recovery.virtualRegister() >= 0); @@ -164,11 +164,11 @@ void OSRExitCompiler::compileExit(const OSRExit& exit, const Operands<ValueRecov break; case UnboxedInt32InGPR: - case AlreadyInRegisterFileAsUnboxedInt32: + case AlreadyInJSStackAsUnboxedInt32: haveUnboxedInt32s = true; break; - case AlreadyInRegisterFileAsUnboxedDouble: + case AlreadyInJSStackAsUnboxedDouble: haveUnboxedDoubles = true; break; @@ -233,7 +233,7 @@ void OSRExitCompiler::compileExit(const OSRExit& exit, const Operands<ValueRecov m_jit.orPtr(GPRInfo::tagTypeNumberRegister, recovery.gpr()); break; - case AlreadyInRegisterFileAsUnboxedInt32: + case AlreadyInJSStackAsUnboxedInt32: m_jit.store32(AssemblyHelpers::TrustedImm32(static_cast<uint32_t>(TagTypeNumber >> 32)), AssemblyHelpers::tagFor(static_cast<VirtualRegister>(operands.operandForIndex(index)))); break; @@ -321,7 +321,7 @@ void OSRExitCompiler::compileExit(const OSRExit& exit, const Operands<ValueRecov m_jit.boxDouble(fpr, gpr); } - // 8) Dump all doubles into the register file, or to the scratch storage if + // 8) Dump all doubles into the stack, or to the scratch storage if // the destination virtual register is poisoned. for (size_t index = 0; index < operands.size(); ++index) { @@ -340,11 +340,11 @@ void OSRExitCompiler::compileExit(const OSRExit& exit, const Operands<ValueRecov // At this point all GPRs and FPRs are available for scratch use. - // 9) Box all unboxed doubles in the register file. + // 9) Box all unboxed doubles in the stack. if (haveUnboxedDoubles) { for (size_t index = 0; index < operands.size(); ++index) { const ValueRecovery& recovery = operands[index]; - if (recovery.technique() != AlreadyInRegisterFileAsUnboxedDouble) + if (recovery.technique() != AlreadyInJSStackAsUnboxedDouble) continue; m_jit.loadDouble(AssemblyHelpers::addressFor((VirtualRegister)operands.operandForIndex(index)), FPRInfo::fpRegT0); m_jit.boxDouble(FPRInfo::fpRegT0, GPRInfo::regT0); @@ -367,18 +367,18 @@ void OSRExitCompiler::compileExit(const OSRExit& exit, const Operands<ValueRecov for (size_t index = 0; index < operands.size(); ++index) { const ValueRecovery& recovery = operands[index]; switch (recovery.technique()) { - case DisplacedInRegisterFile: + case DisplacedInJSStack: m_jit.loadPtr(AssemblyHelpers::addressFor(recovery.virtualRegister()), GPRInfo::toRegister(displacementIndex++)); break; - case Int32DisplacedInRegisterFile: { + case Int32DisplacedInJSStack: { GPRReg gpr = GPRInfo::toRegister(displacementIndex++); m_jit.load32(AssemblyHelpers::addressFor(recovery.virtualRegister()), gpr); m_jit.orPtr(GPRInfo::tagTypeNumberRegister, gpr); break; } - case DoubleDisplacedInRegisterFile: { + case DoubleDisplacedInJSStack: { GPRReg gpr = GPRInfo::toRegister(displacementIndex++); m_jit.loadPtr(AssemblyHelpers::addressFor(recovery.virtualRegister()), gpr); m_jit.subPtr(GPRInfo::tagTypeNumberRegister, gpr); @@ -394,9 +394,9 @@ void OSRExitCompiler::compileExit(const OSRExit& exit, const Operands<ValueRecov for (size_t index = 0; index < operands.size(); ++index) { const ValueRecovery& recovery = operands[index]; switch (recovery.technique()) { - case DisplacedInRegisterFile: - case Int32DisplacedInRegisterFile: - case DoubleDisplacedInRegisterFile: + case DisplacedInJSStack: + case Int32DisplacedInJSStack: + case DoubleDisplacedInJSStack: m_jit.storePtr(GPRInfo::toRegister(displacementIndex++), AssemblyHelpers::addressFor((VirtualRegister)operands.operandForIndex(index))); break; @@ -427,19 +427,19 @@ void OSRExitCompiler::compileExit(const OSRExit& exit, const Operands<ValueRecov const ValueRecovery& recovery = operands[index]; switch (recovery.technique()) { - case DisplacedInRegisterFile: + case DisplacedInJSStack: m_jit.loadPtr(AssemblyHelpers::addressFor(recovery.virtualRegister()), GPRInfo::regT0); m_jit.storePtr(GPRInfo::regT0, scratchDataBuffer + scratchIndex++); break; - case Int32DisplacedInRegisterFile: { + case Int32DisplacedInJSStack: { m_jit.load32(AssemblyHelpers::addressFor(recovery.virtualRegister()), GPRInfo::regT0); m_jit.orPtr(GPRInfo::tagTypeNumberRegister, GPRInfo::regT0); m_jit.storePtr(GPRInfo::regT0, scratchDataBuffer + scratchIndex++); break; } - case DoubleDisplacedInRegisterFile: { + case DoubleDisplacedInJSStack: { m_jit.loadPtr(AssemblyHelpers::addressFor(recovery.virtualRegister()), GPRInfo::regT0); m_jit.subPtr(GPRInfo::tagTypeNumberRegister, GPRInfo::regT0); m_jit.storePtr(GPRInfo::regT0, scratchDataBuffer + scratchIndex++); @@ -455,9 +455,9 @@ void OSRExitCompiler::compileExit(const OSRExit& exit, const Operands<ValueRecov for (size_t index = 0; index < operands.size(); ++index) { const ValueRecovery& recovery = operands[index]; switch (recovery.technique()) { - case DisplacedInRegisterFile: - case Int32DisplacedInRegisterFile: - case DoubleDisplacedInRegisterFile: + case DisplacedInJSStack: + case Int32DisplacedInJSStack: + case DoubleDisplacedInJSStack: m_jit.loadPtr(scratchDataBuffer + scratchIndex++, GPRInfo::regT0); m_jit.storePtr(GPRInfo::regT0, AssemblyHelpers::addressFor((VirtualRegister)operands.operandForIndex(index))); break; @@ -553,7 +553,7 @@ void OSRExitCompiler::compileExit(const OSRExit& exit, const Operands<ValueRecov // 14) Reify inlined call frames. ASSERT(m_jit.baselineCodeBlock()->getJITType() == JITCode::BaselineJIT); - m_jit.storePtr(AssemblyHelpers::TrustedImmPtr(m_jit.baselineCodeBlock()), AssemblyHelpers::addressFor((VirtualRegister)RegisterFile::CodeBlock)); + m_jit.storePtr(AssemblyHelpers::TrustedImmPtr(m_jit.baselineCodeBlock()), AssemblyHelpers::addressFor((VirtualRegister)JSStack::CodeBlock)); for (CodeOrigin codeOrigin = exit.m_codeOrigin; codeOrigin.inlineCallFrame; codeOrigin = codeOrigin.inlineCallFrame->caller) { InlineCallFrame* inlineCallFrame = codeOrigin.inlineCallFrame; @@ -575,12 +575,12 @@ void OSRExitCompiler::compileExit(const OSRExit& exit, const Operands<ValueRecov } else callerFrameGPR = GPRInfo::callFrameRegister; - m_jit.storePtr(AssemblyHelpers::TrustedImmPtr(baselineCodeBlock), AssemblyHelpers::addressFor((VirtualRegister)(inlineCallFrame->stackOffset + RegisterFile::CodeBlock))); - m_jit.storePtr(AssemblyHelpers::TrustedImmPtr(inlineCallFrame->callee->scope()), AssemblyHelpers::addressFor((VirtualRegister)(inlineCallFrame->stackOffset + RegisterFile::ScopeChain))); - m_jit.storePtr(callerFrameGPR, AssemblyHelpers::addressFor((VirtualRegister)(inlineCallFrame->stackOffset + RegisterFile::CallerFrame))); - m_jit.storePtr(AssemblyHelpers::TrustedImmPtr(jumpTarget), AssemblyHelpers::addressFor((VirtualRegister)(inlineCallFrame->stackOffset + RegisterFile::ReturnPC))); - m_jit.store32(AssemblyHelpers::TrustedImm32(inlineCallFrame->arguments.size()), AssemblyHelpers::payloadFor((VirtualRegister)(inlineCallFrame->stackOffset + RegisterFile::ArgumentCount))); - m_jit.storePtr(AssemblyHelpers::TrustedImmPtr(inlineCallFrame->callee.get()), AssemblyHelpers::addressFor((VirtualRegister)(inlineCallFrame->stackOffset + RegisterFile::Callee))); + m_jit.storePtr(AssemblyHelpers::TrustedImmPtr(baselineCodeBlock), AssemblyHelpers::addressFor((VirtualRegister)(inlineCallFrame->stackOffset + JSStack::CodeBlock))); + m_jit.storePtr(AssemblyHelpers::TrustedImmPtr(inlineCallFrame->callee->scope()), AssemblyHelpers::addressFor((VirtualRegister)(inlineCallFrame->stackOffset + JSStack::ScopeChain))); + m_jit.storePtr(callerFrameGPR, AssemblyHelpers::addressFor((VirtualRegister)(inlineCallFrame->stackOffset + JSStack::CallerFrame))); + m_jit.storePtr(AssemblyHelpers::TrustedImmPtr(jumpTarget), AssemblyHelpers::addressFor((VirtualRegister)(inlineCallFrame->stackOffset + JSStack::ReturnPC))); + m_jit.store32(AssemblyHelpers::TrustedImm32(inlineCallFrame->arguments.size()), AssemblyHelpers::payloadFor((VirtualRegister)(inlineCallFrame->stackOffset + JSStack::ArgumentCount))); + m_jit.storePtr(AssemblyHelpers::TrustedImmPtr(inlineCallFrame->callee.get()), AssemblyHelpers::addressFor((VirtualRegister)(inlineCallFrame->stackOffset + JSStack::Callee))); } // 15) Create arguments if necessary and place them into the appropriate aliased diff --git a/Source/JavaScriptCore/dfg/DFGOperations.cpp b/Source/JavaScriptCore/dfg/DFGOperations.cpp index eaa0f47f7..db736feeb 100644 --- a/Source/JavaScriptCore/dfg/DFGOperations.cpp +++ b/Source/JavaScriptCore/dfg/DFGOperations.cpp @@ -49,7 +49,7 @@ #if ENABLE(DFG_JIT) -#if CPU(X86_64) +#if COMPILER(GCC) && CPU(X86_64) #define FUNCTION_WRAPPER_WITH_RETURN_ADDRESS(function, register) \ asm( \ @@ -64,7 +64,7 @@ #define FUNCTION_WRAPPER_WITH_RETURN_ADDRESS_EJI(function) FUNCTION_WRAPPER_WITH_RETURN_ADDRESS(function, rcx) #define FUNCTION_WRAPPER_WITH_RETURN_ADDRESS_EJCI(function) FUNCTION_WRAPPER_WITH_RETURN_ADDRESS(function, r8) -#elif CPU(X86) +#elif COMPILER(GCC) && CPU(X86) #define FUNCTION_WRAPPER_WITH_RETURN_ADDRESS(function, offset) \ asm( \ @@ -556,9 +556,7 @@ void DFG_OPERATION operationPutByValBeyondArrayBoundsStrict(ExecState* exec, JSO NativeCallFrameTracer tracer(globalData, exec); if (index >= 0) { - // We should only get here if index is outside the existing vector. - ASSERT(!array->canSetIndexQuickly(index)); - array->methodTable()->putByIndex(array, exec, index, JSValue::decode(encodedValue), true); + array->putByIndexInline(exec, index, JSValue::decode(encodedValue), true); return; } @@ -573,9 +571,7 @@ void DFG_OPERATION operationPutByValBeyondArrayBoundsNonStrict(ExecState* exec, NativeCallFrameTracer tracer(globalData, exec); if (index >= 0) { - // We should only get here if index is outside the existing vector. - ASSERT(!array->canSetIndexQuickly(index)); - array->methodTable()->putByIndex(array, exec, index, JSValue::decode(encodedValue), false); + array->putByIndexInline(exec, index, JSValue::decode(encodedValue), false); return; } @@ -601,6 +597,16 @@ EncodedJSValue DFG_OPERATION operationArrayPop(ExecState* exec, JSArray* array) return JSValue::encode(array->pop(exec)); } +EncodedJSValue DFG_OPERATION operationArrayPopAndRecoverLength(ExecState* exec, JSArray* array) +{ + JSGlobalData* globalData = &exec->globalData(); + NativeCallFrameTracer tracer(globalData, exec); + + array->butterfly()->setPublicLength(array->butterfly()->publicLength() + 1); + + return JSValue::encode(array->pop(exec)); +} + EncodedJSValue DFG_OPERATION operationRegExpExec(ExecState* exec, JSCell* base, JSCell* argument) { JSGlobalData& globalData = exec->globalData(); @@ -1100,29 +1106,35 @@ EncodedJSValue DFG_OPERATION operationStrCat(ExecState* exec, void* buffer, size return JSValue::encode(jsString(exec, static_cast<Register*>(buffer), size)); } -EncodedJSValue DFG_OPERATION operationNewArray(ExecState* exec, Structure* arrayStructure, void* buffer, size_t size) +char* DFG_OPERATION operationNewArray(ExecState* exec, Structure* arrayStructure, void* buffer, size_t size) { JSGlobalData* globalData = &exec->globalData(); NativeCallFrameTracer tracer(globalData, exec); - - return JSValue::encode(constructArray(exec, arrayStructure, static_cast<JSValue*>(buffer), size)); + + return bitwise_cast<char*>(constructArray(exec, arrayStructure, static_cast<JSValue*>(buffer), size)); } -EncodedJSValue DFG_OPERATION operationNewEmptyArray(ExecState* exec, Structure* arrayStructure) +char* DFG_OPERATION operationNewEmptyArray(ExecState* exec, Structure* arrayStructure) { - return JSValue::encode(JSArray::create(exec->globalData(), arrayStructure)); + JSGlobalData* globalData = &exec->globalData(); + NativeCallFrameTracer tracer(globalData, exec); + + return bitwise_cast<char*>(JSArray::create(*globalData, arrayStructure)); } -EncodedJSValue DFG_OPERATION operationNewArrayWithSize(ExecState* exec, Structure* arrayStructure, int32_t size) +char* DFG_OPERATION operationNewArrayWithSize(ExecState* exec, Structure* arrayStructure, int32_t size) { - return JSValue::encode(JSArray::create(exec->globalData(), arrayStructure, size)); + JSGlobalData* globalData = &exec->globalData(); + NativeCallFrameTracer tracer(globalData, exec); + + return bitwise_cast<char*>(JSArray::create(*globalData, arrayStructure, size)); } -EncodedJSValue DFG_OPERATION operationNewArrayBuffer(ExecState* exec, size_t start, size_t size) +char* DFG_OPERATION operationNewArrayBuffer(ExecState* exec, Structure* arrayStructure, size_t start, size_t size) { JSGlobalData& globalData = exec->globalData(); NativeCallFrameTracer tracer(&globalData, exec); - return JSValue::encode(constructArray(exec, exec->codeBlock()->constantBuffer(start), size)); + return bitwise_cast<char*>(constructArray(exec, arrayStructure, exec->codeBlock()->constantBuffer(start), size)); } EncodedJSValue DFG_OPERATION operationNewRegexp(ExecState* exec, void* regexpPtr) @@ -1309,6 +1321,14 @@ char* DFG_OPERATION operationReallocateButterflyToGrowPropertyStorage(ExecState* return reinterpret_cast<char*>(result); } +char* DFG_OPERATION operationEnsureContiguous(ExecState* exec, JSObject* object) +{ + JSGlobalData& globalData = exec->globalData(); + NativeCallFrameTracer tracer(&globalData, exec); + + return reinterpret_cast<char*>(object->ensureContiguous(globalData)); +} + char* DFG_OPERATION operationEnsureArrayStorage(ExecState* exec, JSObject* object) { JSGlobalData& globalData = exec->globalData(); @@ -1317,6 +1337,16 @@ char* DFG_OPERATION operationEnsureArrayStorage(ExecState* exec, JSObject* objec return reinterpret_cast<char*>(object->ensureArrayStorage(globalData)); } +char* DFG_OPERATION operationEnsureContiguousOrArrayStorage(ExecState* exec, JSObject* object, int32_t index) +{ + JSGlobalData& globalData = exec->globalData(); + NativeCallFrameTracer tracer(&globalData, exec); + + if (static_cast<unsigned>(index) >= MIN_SPARSE_ARRAY_INDEX) + return reinterpret_cast<char*>(object->ensureArrayStorage(globalData)); + return reinterpret_cast<char*>(object->ensureIndexedStorage(globalData)); +} + double DFG_OPERATION operationFModOnInts(int32_t a, int32_t b) { return fmod(a, b); @@ -1425,11 +1455,9 @@ extern "C" void DFG_OPERATION triggerReoptimizationNow(CodeBlock* codeBlock) #endif // ENABLE(DFG_JIT) -#if COMPILER(GCC) - namespace JSC { -#if CPU(X86_64) +#if COMPILER(GCC) && CPU(X86_64) asm ( ".globl " SYMBOL_STRING(getHostCallReturnValue) "\n" HIDE_SYMBOL(getHostCallReturnValue) "\n" @@ -1438,7 +1466,7 @@ SYMBOL_STRING(getHostCallReturnValue) ":" "\n" "mov %r13, %rdi\n" "jmp " LOCAL_REFERENCE(getHostCallReturnValueWithExecState) "\n" ); -#elif CPU(X86) +#elif COMPILER(GCC) && CPU(X86) asm ( ".text" "\n" \ ".globl " SYMBOL_STRING(getHostCallReturnValue) "\n" @@ -1448,7 +1476,7 @@ SYMBOL_STRING(getHostCallReturnValue) ":" "\n" "mov %edi, 4(%esp)\n" "jmp " LOCAL_REFERENCE(getHostCallReturnValueWithExecState) "\n" ); -#elif CPU(ARM_THUMB2) +#elif COMPILER(GCC) && CPU(ARM_THUMB2) asm ( ".text" "\n" ".align 2" "\n" @@ -1461,7 +1489,7 @@ SYMBOL_STRING(getHostCallReturnValue) ":" "\n" "mov r0, r5" "\n" "b " LOCAL_REFERENCE(getHostCallReturnValueWithExecState) "\n" ); -#elif CPU(ARM_TRADITIONAL) +#elif COMPILER(GCC) && CPU(ARM_TRADITIONAL) asm ( ".text" "\n" ".globl " SYMBOL_STRING(getHostCallReturnValue) "\n" @@ -1483,6 +1511,4 @@ extern "C" EncodedJSValue HOST_CALL_RETURN_VALUE_OPTION getHostCallReturnValueWi } // namespace JSC -#endif // COMPILER(GCC) - #endif // ENABLE(JIT) diff --git a/Source/JavaScriptCore/dfg/DFGOperations.h b/Source/JavaScriptCore/dfg/DFGOperations.h index 3b947ecbf..b6530b755 100644 --- a/Source/JavaScriptCore/dfg/DFGOperations.h +++ b/Source/JavaScriptCore/dfg/DFGOperations.h @@ -77,9 +77,6 @@ typedef EncodedJSValue DFG_OPERATION (*J_DFGOperation_EP)(ExecState*, void*); typedef EncodedJSValue DFG_OPERATION (*J_DFGOperation_EPP)(ExecState*, void*, void*); typedef EncodedJSValue DFG_OPERATION (*J_DFGOperation_EPS)(ExecState*, void*, size_t); typedef EncodedJSValue DFG_OPERATION (*J_DFGOperation_ESS)(ExecState*, size_t, size_t); -typedef EncodedJSValue DFG_OPERATION (*J_DFGOperation_ESt)(ExecState*, Structure*); -typedef EncodedJSValue DFG_OPERATION (*J_DFGOperation_EStI)(ExecState*, Structure*, int32_t); -typedef EncodedJSValue DFG_OPERATION (*J_DFGOperation_EStPS)(ExecState*, Structure*, void*, size_t); typedef EncodedJSValue DFG_OPERATION (*J_DFGOperation_EZ)(ExecState*, int32_t); typedef EncodedJSValue DFG_OPERATION (*J_DFGOperation_EZIcfZ)(ExecState*, int32_t, InlineCallFrame*, int32_t); typedef EncodedJSValue DFG_OPERATION (*J_DFGOperation_EZZ)(ExecState*, int32_t, int32_t); @@ -110,8 +107,13 @@ typedef void DFG_OPERATION (*V_DFGOperation_W)(WatchpointSet*); typedef char* DFG_OPERATION (*P_DFGOperation_E)(ExecState*); typedef char* DFG_OPERATION (*P_DFGOperation_EO)(ExecState*, JSObject*); typedef char* DFG_OPERATION (*P_DFGOperation_EOS)(ExecState*, JSObject*, size_t); +typedef char* DFG_OPERATION (*P_DFGOperation_EOZ)(ExecState*, JSObject*, int32_t); typedef char* DFG_OPERATION (*P_DFGOperation_EPS)(ExecState*, void*, size_t); typedef char* DFG_OPERATION (*P_DFGOperation_ES)(ExecState*, size_t); +typedef char* DFG_OPERATION (*P_DFGOperation_ESt)(ExecState*, Structure*); +typedef char* DFG_OPERATION (*P_DFGOperation_EStPS)(ExecState*, Structure*, void*, size_t); +typedef char* DFG_OPERATION (*P_DFGOperation_EStSS)(ExecState*, Structure*, size_t, size_t); +typedef char* DFG_OPERATION (*P_DFGOperation_EStZ)(ExecState*, Structure*, int32_t); // These routines are provide callbacks out to C++ implementations of operations too complex to JIT. JSCell* DFG_OPERATION operationNewObject(ExecState*) WTF_INTERNAL; @@ -135,10 +137,10 @@ EncodedJSValue DFG_OPERATION operationResolveBaseStrictPut(ExecState*, Identifie EncodedJSValue DFG_OPERATION operationResolveGlobal(ExecState*, GlobalResolveInfo*, JSGlobalObject*, Identifier*) WTF_INTERNAL; EncodedJSValue DFG_OPERATION operationToPrimitive(ExecState*, EncodedJSValue) WTF_INTERNAL; EncodedJSValue DFG_OPERATION operationStrCat(ExecState*, void*, size_t) WTF_INTERNAL; -EncodedJSValue DFG_OPERATION operationNewArray(ExecState*, Structure*, void*, size_t) WTF_INTERNAL; -EncodedJSValue DFG_OPERATION operationNewArrayBuffer(ExecState*, size_t, size_t) WTF_INTERNAL; -EncodedJSValue DFG_OPERATION operationNewEmptyArray(ExecState*, Structure*) WTF_INTERNAL; -EncodedJSValue DFG_OPERATION operationNewArrayWithSize(ExecState*, Structure*, int32_t) WTF_INTERNAL; +char* DFG_OPERATION operationNewArray(ExecState*, Structure*, void*, size_t) WTF_INTERNAL; +char* DFG_OPERATION operationNewArrayBuffer(ExecState*, Structure*, size_t, size_t) WTF_INTERNAL; +char* DFG_OPERATION operationNewEmptyArray(ExecState*, Structure*) WTF_INTERNAL; +char* DFG_OPERATION operationNewArrayWithSize(ExecState*, Structure*, int32_t) WTF_INTERNAL; EncodedJSValue DFG_OPERATION operationNewRegexp(ExecState*, void*) WTF_INTERNAL; void DFG_OPERATION operationPutByValStrict(ExecState*, EncodedJSValue encodedBase, EncodedJSValue encodedProperty, EncodedJSValue encodedValue) WTF_INTERNAL; void DFG_OPERATION operationPutByValNonStrict(ExecState*, EncodedJSValue encodedBase, EncodedJSValue encodedProperty, EncodedJSValue encodedValue) WTF_INTERNAL; @@ -148,6 +150,7 @@ void DFG_OPERATION operationPutByValBeyondArrayBoundsStrict(ExecState*, JSObject void DFG_OPERATION operationPutByValBeyondArrayBoundsNonStrict(ExecState*, JSObject*, int32_t index, EncodedJSValue encodedValue) WTF_INTERNAL; EncodedJSValue DFG_OPERATION operationArrayPush(ExecState*, EncodedJSValue encodedValue, JSArray*) WTF_INTERNAL; EncodedJSValue DFG_OPERATION operationArrayPop(ExecState*, JSArray*) WTF_INTERNAL; +EncodedJSValue DFG_OPERATION operationArrayPopAndRecoverLength(ExecState*, JSArray*) WTF_INTERNAL; EncodedJSValue DFG_OPERATION operationRegExpExec(ExecState*, JSCell*, JSCell*) WTF_INTERNAL; void DFG_OPERATION operationPutByIdStrict(ExecState*, EncodedJSValue encodedValue, JSCell* base, Identifier*) WTF_INTERNAL; void DFG_OPERATION operationPutByIdNonStrict(ExecState*, EncodedJSValue encodedValue, JSCell* base, Identifier*) WTF_INTERNAL; @@ -192,7 +195,9 @@ char* DFG_OPERATION operationAllocatePropertyStorageWithInitialCapacity(ExecStat char* DFG_OPERATION operationAllocatePropertyStorage(ExecState*, size_t newSize) WTF_INTERNAL; char* DFG_OPERATION operationReallocateButterflyToHavePropertyStorageWithInitialCapacity(ExecState*, JSObject*) WTF_INTERNAL; char* DFG_OPERATION operationReallocateButterflyToGrowPropertyStorage(ExecState*, JSObject*, size_t newSize) WTF_INTERNAL; +char* DFG_OPERATION operationEnsureContiguous(ExecState*, JSObject*); char* DFG_OPERATION operationEnsureArrayStorage(ExecState*, JSObject*); +char* DFG_OPERATION operationEnsureContiguousOrArrayStorage(ExecState*, JSObject*, int32_t); // This method is used to lookup an exception hander, keyed by faultLocation, which is // the return location from one of the calls out to one of the helper operations above. diff --git a/Source/JavaScriptCore/dfg/DFGRepatch.cpp b/Source/JavaScriptCore/dfg/DFGRepatch.cpp index b05537fdf..6fb185c12 100644 --- a/Source/JavaScriptCore/dfg/DFGRepatch.cpp +++ b/Source/JavaScriptCore/dfg/DFGRepatch.cpp @@ -256,7 +256,7 @@ static bool tryCacheGetByID(ExecState* exec, JSValue baseValue, const Identifier stubJit.loadPtr(MacroAssembler::Address(baseGPR, JSCell::structureOffset()), scratchGPR); stubJit.load8(MacroAssembler::Address(scratchGPR, Structure::indexingTypeOffset()), scratchGPR); failureCases.append(stubJit.branchTest32(MacroAssembler::Zero, scratchGPR, MacroAssembler::TrustedImm32(IsArray))); - failureCases.append(stubJit.branchTest32(MacroAssembler::Zero, scratchGPR, MacroAssembler::TrustedImm32(HasArrayStorage))); + failureCases.append(stubJit.branchTest32(MacroAssembler::Zero, scratchGPR, MacroAssembler::TrustedImm32(IndexingShapeMask))); stubJit.loadPtr(MacroAssembler::Address(baseGPR, JSObject::butterflyOffset()), scratchGPR); stubJit.load32(MacroAssembler::Address(scratchGPR, ArrayStorage::lengthOffset()), scratchGPR); @@ -448,7 +448,7 @@ static bool tryBuildGetByIDList(ExecState* exec, JSValue baseValue, const Identi // right now! stubJit.store32( MacroAssembler::TrustedImm32(exec->codeOriginIndexForDFG()), - CCallHelpers::tagFor(static_cast<VirtualRegister>(RegisterFile::ArgumentCount))); + CCallHelpers::tagFor(static_cast<VirtualRegister>(JSStack::ArgumentCount))); operationCall = stubJit.call(); #if USE(JSVALUE64) diff --git a/Source/JavaScriptCore/dfg/DFGSpeculativeJIT.cpp b/Source/JavaScriptCore/dfg/DFGSpeculativeJIT.cpp index 05b1e741e..850d5aa74 100644 --- a/Source/JavaScriptCore/dfg/DFGSpeculativeJIT.cpp +++ b/Source/JavaScriptCore/dfg/DFGSpeculativeJIT.cpp @@ -29,6 +29,7 @@ #if ENABLE(DFG_JIT) #include "Arguments.h" +#include "DFGCallArrayAllocatorSlowPathGenerator.h" #include "DFGSlowPathGenerator.h" #include "LinkBuffer.h" @@ -56,6 +57,37 @@ SpeculativeJIT::~SpeculativeJIT() WTF::deleteAllValues(m_slowPathGenerators); } +void SpeculativeJIT::emitAllocateJSArray(Structure* structure, GPRReg resultGPR, GPRReg storageGPR, unsigned numElements) +{ + ASSERT(hasContiguous(structure->indexingType())); + + GPRTemporary scratch(this); + GPRReg scratchGPR = scratch.gpr(); + + unsigned vectorLength = std::max(BASE_VECTOR_LEN, numElements); + + JITCompiler::JumpList slowCases; + slowCases.append( + emitAllocateBasicStorage(TrustedImm32(vectorLength * sizeof(JSValue) + sizeof(IndexingHeader)), storageGPR)); + m_jit.subPtr(TrustedImm32(vectorLength * sizeof(JSValue)), storageGPR); + emitAllocateBasicJSObject<JSArray, MarkedBlock::None>( + TrustedImmPtr(structure), resultGPR, scratchGPR, + storageGPR, sizeof(JSArray), slowCases); + + // I'm assuming that two 32-bit stores are better than a 64-bit store. + // I have no idea if that's true. And it probably doesn't matter anyway. + m_jit.store32(TrustedImm32(numElements), MacroAssembler::Address(storageGPR, Butterfly::offsetOfPublicLength())); + m_jit.store32(TrustedImm32(vectorLength), MacroAssembler::Address(storageGPR, Butterfly::offsetOfVectorLength())); + + // I want a slow path that also loads out the storage pointer, and that's + // what this custom CallArrayAllocatorSlowPathGenerator gives me. It's a lot + // of work for a very small piece of functionality. :-/ + addSlowPathGenerator(adoptPtr( + new CallArrayAllocatorSlowPathGenerator( + slowCases, this, operationNewArrayWithSize, resultGPR, storageGPR, + structure, numElements))); +} + void SpeculativeJIT::speculationCheck(ExitKind kind, JSValueSource jsValueSource, NodeIndex nodeIndex, MacroAssembler::Jump jumpToFail) { if (!m_compileOkay) @@ -70,7 +102,7 @@ void SpeculativeJIT::speculationCheck(ExitKind kind, JSValueSource jsValueSource speculationCheck(kind, jsValueSource, nodeUse.index(), jumpToFail); } -void SpeculativeJIT::speculationCheck(ExitKind kind, JSValueSource jsValueSource, NodeIndex nodeIndex, MacroAssembler::JumpList& jumpsToFail) +void SpeculativeJIT::speculationCheck(ExitKind kind, JSValueSource jsValueSource, NodeIndex nodeIndex, const MacroAssembler::JumpList& jumpsToFail) { ASSERT(at(m_compileIndex).canExit() || m_isCheckingArgumentTypes); Vector<MacroAssembler::Jump, 16> jumpVector = jumpsToFail.jumps(); @@ -78,7 +110,7 @@ void SpeculativeJIT::speculationCheck(ExitKind kind, JSValueSource jsValueSource speculationCheck(kind, jsValueSource, nodeIndex, jumpVector[i]); } -void SpeculativeJIT::speculationCheck(ExitKind kind, JSValueSource jsValueSource, Edge nodeUse, MacroAssembler::JumpList& jumpsToFail) +void SpeculativeJIT::speculationCheck(ExitKind kind, JSValueSource jsValueSource, Edge nodeUse, const MacroAssembler::JumpList& jumpsToFail) { ASSERT(at(m_compileIndex).canExit() || m_isCheckingArgumentTypes); speculationCheck(kind, jsValueSource, nodeUse.index(), jumpsToFail); @@ -190,7 +222,7 @@ void SpeculativeJIT::forwardSpeculationCheck(ExitKind kind, JSValueSource jsValu convertLastOSRExitToForward(valueRecovery); } -void SpeculativeJIT::forwardSpeculationCheck(ExitKind kind, JSValueSource jsValueSource, NodeIndex nodeIndex, MacroAssembler::JumpList& jumpsToFail, const ValueRecovery& valueRecovery) +void SpeculativeJIT::forwardSpeculationCheck(ExitKind kind, JSValueSource jsValueSource, NodeIndex nodeIndex, const MacroAssembler::JumpList& jumpsToFail, const ValueRecovery& valueRecovery) { ASSERT(at(m_compileIndex).canExit() || m_isCheckingArgumentTypes); Vector<MacroAssembler::Jump, 16> jumpVector = jumpsToFail.jumps(); @@ -295,6 +327,66 @@ const TypedArrayDescriptor* SpeculativeJIT::typedArrayDescriptor(Array::Mode arr } } +JITCompiler::JumpList SpeculativeJIT::jumpSlowForUnwantedArrayMode(GPRReg tempGPR, Array::Mode arrayMode) +{ + JITCompiler::JumpList result; + + switch (arrayMode) { + case NON_ARRAY_CONTIGUOUS_MODES: { + m_jit.and32(TrustedImm32(IndexingShapeMask), tempGPR); + result.append( + m_jit.branch32(MacroAssembler::NotEqual, tempGPR, TrustedImm32(ContiguousShape))); + break; + } + case ARRAY_WITH_CONTIGUOUS_MODES: { + m_jit.and32(TrustedImm32(IsArray | IndexingShapeMask), tempGPR); + result.append( + m_jit.branch32( + MacroAssembler::NotEqual, tempGPR, TrustedImm32(IsArray | ContiguousShape))); + break; + } + case NON_ARRAY_ARRAY_STORAGE_MODES: { + m_jit.and32(TrustedImm32(IndexingShapeMask), tempGPR); + if (isSlowPutAccess(arrayMode)) { + m_jit.sub32(TrustedImm32(ArrayStorageShape), tempGPR); + result.append( + m_jit.branch32( + MacroAssembler::Above, tempGPR, + TrustedImm32(SlowPutArrayStorageShape - ArrayStorageShape))); + } else { + result.append( + m_jit.branch32(MacroAssembler::NotEqual, tempGPR, TrustedImm32(ArrayStorageShape))); + } + break; + } + case Array::ArrayWithArrayStorage: + case Array::ArrayWithArrayStorageToHole: + case Array::ArrayWithArrayStorageOutOfBounds: { + m_jit.and32(TrustedImm32(IsArray | IndexingShapeMask), tempGPR); + result.append( + m_jit.branch32(MacroAssembler::NotEqual, tempGPR, TrustedImm32(ArrayStorageShape))); + break; + } + case Array::ArrayWithSlowPutArrayStorage: { + result.append( + m_jit.branchTest32( + MacroAssembler::Zero, tempGPR, MacroAssembler::TrustedImm32(IsArray))); + m_jit.and32(TrustedImm32(IndexingShapeMask), tempGPR); + m_jit.sub32(TrustedImm32(ArrayStorageShape), tempGPR); + result.append( + m_jit.branch32( + MacroAssembler::Above, tempGPR, + TrustedImm32(SlowPutArrayStorageShape - ArrayStorageShape))); + break; + } + default: + CRASH(); + break; + } + + return result; +} + void SpeculativeJIT::checkArray(Node& node) { ASSERT(modeIsSpecific(node.arrayMode())); @@ -315,39 +407,18 @@ void SpeculativeJIT::checkArray(Node& node) case Array::String: expectedClassInfo = &JSString::s_info; break; - case NON_ARRAY_ARRAY_STORAGE_MODES: { - GPRTemporary temp(this); - m_jit.loadPtr( - MacroAssembler::Address(baseReg, JSCell::structureOffset()), temp.gpr()); - speculationCheck( - Uncountable, JSValueRegs(), NoNode, - m_jit.branchTest8( - MacroAssembler::Zero, - MacroAssembler::Address(temp.gpr(), Structure::indexingTypeOffset()), - MacroAssembler::TrustedImm32( - isSlowPutAccess(node.arrayMode()) ? (HasArrayStorage | HasSlowPutArrayStorage) : HasArrayStorage))); - - noResult(m_compileIndex); - return; - } + case NON_ARRAY_CONTIGUOUS_MODES: + case ARRAY_WITH_CONTIGUOUS_MODES: + case NON_ARRAY_ARRAY_STORAGE_MODES: case ARRAY_WITH_ARRAY_STORAGE_MODES: { GPRTemporary temp(this); GPRReg tempGPR = temp.gpr(); m_jit.loadPtr( MacroAssembler::Address(baseReg, JSCell::structureOffset()), tempGPR); m_jit.load8(MacroAssembler::Address(tempGPR, Structure::indexingTypeOffset()), tempGPR); - // FIXME: This can be turned into a single branch. But we currently have no evidence - // that doing so would be profitable, nor do I feel comfortable with the present test - // coverage for this code path. speculationCheck( Uncountable, JSValueRegs(), NoNode, - m_jit.branchTest32( - MacroAssembler::Zero, tempGPR, MacroAssembler::TrustedImm32(IsArray))); - speculationCheck( - Uncountable, JSValueRegs(), NoNode, - m_jit.branchTest32( - MacroAssembler::Zero, tempGPR, MacroAssembler::TrustedImm32( - isSlowPutAccess(node.arrayMode()) ? (HasArrayStorage | HasSlowPutArrayStorage) : HasArrayStorage))); + jumpSlowForUnwantedArrayMode(tempGPR, node.arrayMode())); noResult(m_compileIndex); return; @@ -384,78 +455,122 @@ void SpeculativeJIT::checkArray(Node& node) noResult(m_compileIndex); } -void SpeculativeJIT::arrayify(Node& node) +void SpeculativeJIT::arrayify(Node& node, GPRReg baseReg, GPRReg propertyReg) { - ASSERT(modeIsSpecific(node.arrayMode())); - ASSERT(!modeAlreadyChecked(m_state.forNode(node.child1()), node.arrayMode())); - - SpeculateCellOperand base(this, node.child1()); - GPRReg baseReg = base.gpr(); + Array::Mode desiredArrayMode; switch (node.arrayMode()) { - case EFFECTFUL_NON_ARRAY_ARRAY_STORAGE_MODES: { - GPRTemporary structure(this); - GPRTemporary temp(this); - GPRReg structureGPR = structure.gpr(); - GPRReg tempGPR = temp.gpr(); - - m_jit.loadPtr( - MacroAssembler::Address(baseReg, JSCell::structureOffset()), structureGPR); + case Array::ToContiguous: + desiredArrayMode = Array::Contiguous; + break; + case Array::ToArrayStorage: + desiredArrayMode = Array::ArrayStorage; + break; + case Array::ToSlowPutArrayStorage: + desiredArrayMode = Array::SlowPutArrayStorage; + break; + case Array::ArrayToArrayStorage: + desiredArrayMode = Array::ArrayWithArrayStorage; + break; + case Array::PossiblyArrayToArrayStorage: + desiredArrayMode = Array::PossiblyArrayWithArrayStorage; + break; + default: + CRASH(); + desiredArrayMode = Array::Undecided; + break; + } + + GPRTemporary structure(this); + GPRTemporary temp(this); + GPRReg structureGPR = structure.gpr(); + GPRReg tempGPR = temp.gpr(); - // We can skip all that comes next if we already have array storage. - IndexingType desiredIndexingTypeMask = - isSlowPutAccess(node.arrayMode()) ? (HasArrayStorage | HasSlowPutArrayStorage) : HasArrayStorage; - MacroAssembler::Jump slowCase = m_jit.branchTest8( - MacroAssembler::Zero, - MacroAssembler::Address(structureGPR, Structure::indexingTypeOffset()), - MacroAssembler::TrustedImm32(desiredIndexingTypeMask)); + m_jit.loadPtr( + MacroAssembler::Address(baseReg, JSCell::structureOffset()), structureGPR); + + m_jit.load8( + MacroAssembler::Address(structureGPR, Structure::indexingTypeOffset()), tempGPR); - m_jit.loadPtr( - MacroAssembler::Address(baseReg, JSObject::butterflyOffset()), tempGPR); + // We can skip all that comes next if we already have array storage. + MacroAssembler::JumpList slowCases = + jumpSlowForUnwantedArrayMode(tempGPR, desiredArrayMode); - MacroAssembler::Jump done = m_jit.jump(); + m_jit.loadPtr( + MacroAssembler::Address(baseReg, JSObject::butterflyOffset()), tempGPR); - slowCase.link(&m_jit); + MacroAssembler::Jump done = m_jit.jump(); - // Next check that the object does not intercept indexed accesses. If it does, - // then this mode won't work. + slowCases.link(&m_jit); + + // If we're allegedly creating contiguous storage and the index is bogus, then + // just don't. + if (node.arrayMode() == Array::ToContiguous && propertyReg != InvalidGPRReg) { speculationCheck( Uncountable, JSValueRegs(), NoNode, - m_jit.branchTest8( - MacroAssembler::NonZero, - MacroAssembler::Address(structureGPR, Structure::typeInfoFlagsOffset()), - MacroAssembler::TrustedImm32(InterceptsGetOwnPropertySlotByIndexEvenWhenLengthIsNotZero))); + m_jit.branch32( + MacroAssembler::AboveOrEqual, propertyReg, TrustedImm32(MIN_SPARSE_ARRAY_INDEX))); + } + + // Next check that the object does not intercept indexed accesses. If it does, + // then this mode won't work. + speculationCheck( + Uncountable, JSValueRegs(), NoNode, + m_jit.branchTest8( + MacroAssembler::NonZero, + MacroAssembler::Address(structureGPR, Structure::typeInfoFlagsOffset()), + MacroAssembler::TrustedImm32(InterceptsGetOwnPropertySlotByIndexEvenWhenLengthIsNotZero))); - // Now call out to create the array storage. - silentSpillAllRegisters(tempGPR); + // Now call out to create the array storage. + silentSpillAllRegisters(tempGPR); + switch (node.arrayMode()) { + case ALL_EFFECTFUL_CONTIGUOUS_MODES: + callOperation(operationEnsureContiguous, tempGPR, baseReg); + break; + case ALL_EFFECTFUL_ARRAY_STORAGE_MODES: callOperation(operationEnsureArrayStorage, tempGPR, baseReg); - silentFillAllRegisters(tempGPR); - - // Alas, we need to reload the structure because silent spilling does not save - // temporaries. Nor would it be useful for it to do so. Either way we're talking - // about a load. - m_jit.loadPtr( - MacroAssembler::Address(baseReg, JSCell::structureOffset()), structureGPR); - - // Finally, check that we have the kind of array storage that we wanted to get. - // Note that this is a backwards speculation check, which will result in the - // bytecode operation corresponding to this arrayification being reexecuted. - // That's fine, since arrayification is not user-visible. - speculationCheck( - Uncountable, JSValueRegs(), NoNode, - m_jit.branchTest8( - MacroAssembler::Zero, - MacroAssembler::Address(structureGPR, Structure::indexingTypeOffset()), - MacroAssembler::TrustedImm32(desiredIndexingTypeMask))); - - done.link(&m_jit); - storageResult(tempGPR, m_compileIndex); break; - } default: - ASSERT_NOT_REACHED(); + CRASH(); break; } + silentFillAllRegisters(tempGPR); + + // Alas, we need to reload the structure because silent spilling does not save + // temporaries. Nor would it be useful for it to do so. Either way we're talking + // about a load. + m_jit.loadPtr( + MacroAssembler::Address(baseReg, JSCell::structureOffset()), structureGPR); + + // Finally, check that we have the kind of array storage that we wanted to get. + // Note that this is a backwards speculation check, which will result in the + // bytecode operation corresponding to this arrayification being reexecuted. + // That's fine, since arrayification is not user-visible. + m_jit.load8( + MacroAssembler::Address(structureGPR, Structure::indexingTypeOffset()), structureGPR); + speculationCheck( + Uncountable, JSValueRegs(), NoNode, + jumpSlowForUnwantedArrayMode(structureGPR, desiredArrayMode)); + + done.link(&m_jit); + storageResult(tempGPR, m_compileIndex); +} + +void SpeculativeJIT::arrayify(Node& node) +{ + ASSERT(modeIsSpecific(node.arrayMode())); + ASSERT(!modeAlreadyChecked(m_state.forNode(node.child1()), node.arrayMode())); + + SpeculateCellOperand base(this, node.child1()); + + if (!node.child2()) { + arrayify(node, base.gpr(), InvalidGPRReg); + return; + } + + SpeculateIntegerOperand property(this, node.child2()); + + arrayify(node, base.gpr(), property.gpr()); } GPRReg SpeculativeJIT::fillStorage(NodeIndex nodeIndex) @@ -1367,7 +1482,7 @@ void SpeculativeJIT::compile(BasicBlock& block) ASSERT(m_arguments.size() == block.variablesAtHead.numberOfArguments()); for (size_t i = 0; i < m_arguments.size(); ++i) { - ValueSource valueSource = ValueSource(ValueInRegisterFile); + ValueSource valueSource = ValueSource(ValueInJSStack); m_arguments[i] = valueSource; m_stream->appendAndLog(VariableEvent::setLocal(argumentToOperand(i), valueSource.dataFormat())); } @@ -1384,11 +1499,11 @@ void SpeculativeJIT::compile(BasicBlock& block) else if (at(nodeIndex).variableAccessData()->isArgumentsAlias()) valueSource = ValueSource(ArgumentsSource); else if (at(nodeIndex).variableAccessData()->isCaptured()) - valueSource = ValueSource(ValueInRegisterFile); + valueSource = ValueSource(ValueInJSStack); else if (!at(nodeIndex).refCount()) valueSource = ValueSource(SourceIsDead); else if (at(nodeIndex).variableAccessData()->shouldUseDoubleFormat()) - valueSource = ValueSource(DoubleInRegisterFile); + valueSource = ValueSource(DoubleInJSStack); else valueSource = ValueSource::forSpeculation(at(nodeIndex).variableAccessData()->argumentAwarePrediction()); m_variables[i] = valueSource; @@ -1440,25 +1555,25 @@ void SpeculativeJIT::compile(BasicBlock& block) for (int i = 0; i < argumentCountIncludingThis; ++i) { ValueRecovery recovery; if (codeBlock->isCaptured(argumentToOperand(i))) - recovery = ValueRecovery::alreadyInRegisterFile(); + recovery = ValueRecovery::alreadyInJSStack(); else { ArgumentPosition& argumentPosition = m_jit.graph().m_argumentPositions[argumentPositionStart + i]; ValueSource valueSource; if (argumentPosition.shouldUseDoubleFormat()) - valueSource = ValueSource(DoubleInRegisterFile); + valueSource = ValueSource(DoubleInJSStack); else if (isInt32Speculation(argumentPosition.prediction())) - valueSource = ValueSource(Int32InRegisterFile); + valueSource = ValueSource(Int32InJSStack); else if (isCellSpeculation(argumentPosition.prediction())) - valueSource = ValueSource(CellInRegisterFile); + valueSource = ValueSource(CellInJSStack); else if (isBooleanSpeculation(argumentPosition.prediction())) - valueSource = ValueSource(BooleanInRegisterFile); + valueSource = ValueSource(BooleanInJSStack); else - valueSource = ValueSource(ValueInRegisterFile); + valueSource = ValueSource(ValueInJSStack); recovery = computeValueRecoveryFor(valueSource); } // The recovery should refer either to something that has already been - // stored into the register file at the right place, or to a constant, + // stored into the stack at the right place, or to a constant, // since the Arguments code isn't smart enough to handle anything else. // The exception is the this argument, which we don't really need to be // able to recover. @@ -1550,9 +1665,9 @@ void SpeculativeJIT::checkArgumentTypes() m_codeOriginForOSR = CodeOrigin(0); for (size_t i = 0; i < m_arguments.size(); ++i) - m_arguments[i] = ValueSource(ValueInRegisterFile); + m_arguments[i] = ValueSource(ValueInJSStack); for (size_t i = 0; i < m_variables.size(); ++i) - m_variables[i] = ValueSource(ValueInRegisterFile); + m_variables[i] = ValueSource(ValueInJSStack); for (int i = 0; i < m_jit.codeBlock()->numParameters(); ++i) { NodeIndex nodeIndex = m_jit.graph().m_arguments[i]; @@ -1649,7 +1764,7 @@ void SpeculativeJIT::linkOSREntries(LinkBuffer& linkBuffer) ValueRecovery SpeculativeJIT::computeValueRecoveryFor(const ValueSource& valueSource) { - if (valueSource.isInRegisterFile()) + if (valueSource.isInJSStack()) return valueSource.valueRecovery(); ASSERT(valueSource.kind() == HaveNode); @@ -1942,16 +2057,17 @@ void SpeculativeJIT::compileUInt32ToNumber(Node& node) } IntegerOperand op1(this, node.child1()); - GPRTemporary result(this, op1); + GPRTemporary result(this); // For the benefit of OSR exit, force these to be in different registers. In reality the OSR exit compiler could find cases where you have uint32(%r1) followed by int32(%r1) and then use different registers, but that seems like too much effort. + + m_jit.move(op1.gpr(), result.gpr()); // Test the operand is positive. This is a very special speculation check - we actually // use roll-forward speculation here, where if this fails, we jump to the baseline // instruction that follows us, rather than the one we're executing right now. We have // to do this because by this point, the original values necessary to compile whatever // operation the UInt32ToNumber originated from might be dead. - forwardSpeculationCheck(Overflow, JSValueRegs(), NoNode, m_jit.branch32(MacroAssembler::LessThan, op1.gpr(), TrustedImm32(0)), ValueRecovery::uint32InGPR(op1.gpr())); + forwardSpeculationCheck(Overflow, JSValueRegs(), NoNode, m_jit.branch32(MacroAssembler::LessThan, result.gpr(), TrustedImm32(0)), ValueRecovery::uint32InGPR(result.gpr())); - m_jit.move(op1.gpr(), result.gpr()); integerResult(result.gpr(), m_compileIndex, op1.format()); } @@ -2140,7 +2256,7 @@ void SpeculativeJIT::compileGetByValOnIntTypedArray(const TypedArrayDescriptor& m_jit.load32(MacroAssembler::BaseIndex(storageReg, propertyReg, MacroAssembler::TimesFour), resultReg); break; default: - ASSERT_NOT_REACHED(); + CRASH(); } if (elementSize < 4 || signedness == SignedTypedArray) { integerResult(resultReg, m_compileIndex); @@ -2250,7 +2366,7 @@ void SpeculativeJIT::compilePutByValForIntTypedArray(const TypedArrayDescriptor& m_jit.store32(value.gpr(), MacroAssembler::BaseIndex(storageReg, property, MacroAssembler::TimesFour)); break; default: - ASSERT_NOT_REACHED(); + CRASH(); } if (node.op() == PutByVal) outOfBounds.link(&m_jit); @@ -3205,12 +3321,23 @@ void SpeculativeJIT::compileGetArrayLength(Node& node) const TypedArrayDescriptor* descriptor = typedArrayDescriptor(node.arrayMode()); switch (node.arrayMode()) { - case ARRAY_WITH_ARRAY_STORAGE_MODES: { + case ARRAY_WITH_CONTIGUOUS_MODES: { + StorageOperand storage(this, node.child2()); + GPRTemporary result(this, storage); + GPRReg storageReg = storage.gpr(); + GPRReg resultReg = result.gpr(); + m_jit.load32(MacroAssembler::Address(storageReg, Butterfly::offsetOfPublicLength()), resultReg); + + integerResult(resultReg, m_compileIndex); + break; + } + case ARRAY_WITH_ARRAY_STORAGE_MODES: + case ARRAY_EFFECTFUL_MODES: { StorageOperand storage(this, node.child2()); GPRTemporary result(this, storage); GPRReg storageReg = storage.gpr(); GPRReg resultReg = result.gpr(); - m_jit.load32(MacroAssembler::Address(storageReg, ArrayStorage::lengthOffset()), resultReg); + m_jit.load32(MacroAssembler::Address(storageReg, Butterfly::offsetOfPublicLength()), resultReg); speculationCheck(Uncountable, JSValueRegs(), NoNode, m_jit.branch32(MacroAssembler::LessThan, resultReg, MacroAssembler::TrustedImm32(0))); @@ -3328,14 +3455,11 @@ void SpeculativeJIT::compileAllocatePropertyStorage(Node& node) ASSERT(!node.structureTransitionData().previousStructure->outOfLineCapacity()); ASSERT(initialOutOfLineCapacity == node.structureTransitionData().newStructure->outOfLineCapacity()); - size_t newSize = initialOutOfLineCapacity * sizeof(JSValue); - CopiedAllocator* copiedAllocator = &m_jit.globalData()->heap.storageAllocator(); - - m_jit.loadPtr(&copiedAllocator->m_currentRemaining, scratchGPR); - JITCompiler::Jump slowPath = m_jit.branchSubPtr(JITCompiler::Signed, JITCompiler::TrustedImm32(newSize), scratchGPR); - m_jit.storePtr(scratchGPR, &copiedAllocator->m_currentRemaining); - m_jit.negPtr(scratchGPR); - m_jit.addPtr(JITCompiler::AbsoluteAddress(&copiedAllocator->m_currentPayloadEnd), scratchGPR); + + JITCompiler::Jump slowPath = + emitAllocateBasicStorage( + TrustedImm32(initialOutOfLineCapacity * sizeof(JSValue)), scratchGPR); + m_jit.addPtr(JITCompiler::TrustedImm32(sizeof(JSValue)), scratchGPR); addSlowPathGenerator( @@ -3376,15 +3500,9 @@ void SpeculativeJIT::compileReallocatePropertyStorage(Node& node) GPRReg scratchGPR1 = scratch1.gpr(); GPRReg scratchGPR2 = scratch2.gpr(); - JITCompiler::Jump slowPath; - - CopiedAllocator* copiedAllocator = &m_jit.globalData()->heap.storageAllocator(); - - m_jit.loadPtr(&copiedAllocator->m_currentRemaining, scratchGPR2); - slowPath = m_jit.branchSubPtr(JITCompiler::Signed, JITCompiler::TrustedImm32(newSize), scratchGPR2); - m_jit.storePtr(scratchGPR2, &copiedAllocator->m_currentRemaining); - m_jit.negPtr(scratchGPR2); - m_jit.addPtr(JITCompiler::AbsoluteAddress(&copiedAllocator->m_currentPayloadEnd), scratchGPR2); + JITCompiler::Jump slowPath = + emitAllocateBasicStorage(TrustedImm32(newSize), scratchGPR2); + m_jit.addPtr(JITCompiler::TrustedImm32(sizeof(JSValue)), scratchGPR2); addSlowPathGenerator( @@ -3399,6 +3517,16 @@ void SpeculativeJIT::compileReallocatePropertyStorage(Node& node) storageResult(scratchGPR2, m_compileIndex); } +GPRReg SpeculativeJIT::temporaryRegisterForPutByVal(GPRTemporary& temporary, Array::Mode arrayMode) +{ + if (!putByValWillNeedExtraRegister(arrayMode)) + return InvalidGPRReg; + + GPRTemporary realTemporary(this); + temporary.adopt(realTemporary); + return temporary.gpr(); +} + } } // namespace JSC::DFG #endif diff --git a/Source/JavaScriptCore/dfg/DFGSpeculativeJIT.h b/Source/JavaScriptCore/dfg/DFGSpeculativeJIT.h index 15314b2f2..90b6d483a 100644 --- a/Source/JavaScriptCore/dfg/DFGSpeculativeJIT.h +++ b/Source/JavaScriptCore/dfg/DFGSpeculativeJIT.h @@ -42,6 +42,7 @@ namespace JSC { namespace DFG { +class GPRTemporary; class JSValueOperand; class SlowPathGenerator; class SpeculativeJIT; @@ -325,7 +326,7 @@ public: // These methods are used when generating 'unexpected' // calls out from JIT code to C++ helper routines - // they spill all live values to the appropriate - // slots in the RegisterFile without changing any state + // slots in the JSStack without changing any state // in the GenerationInfo. SilentRegisterSavePlan silentSavePlanForGPR(VirtualRegister spillMe, GPRReg source) { @@ -704,7 +705,7 @@ public: } #endif - // Spill a VirtualRegister to the RegisterFile. + // Spill a VirtualRegister to the JSStack. void spill(VirtualRegister spillMe) { GenerationInfo& info = m_generationInfo[spillMe]; @@ -714,7 +715,7 @@ public: return; #endif // Check the GenerationInfo to see if this value need writing - // to the RegisterFile - if not, mark it as spilled & return. + // to the JSStack - if not, mark it as spilled & return. if (!info.needsSpill()) { info.setSpilled(*m_stream, spillMe); return; @@ -829,7 +830,7 @@ public: return &m_jit.codeBlock()->identifier(index); } - // Spill all VirtualRegisters back to the RegisterFile. + // Spill all VirtualRegisters back to the JSStack. void flushRegisters() { for (gpr_iterator iter = m_gprs.begin(); iter != m_gprs.end(); ++iter) { @@ -1166,6 +1167,11 @@ public: m_jit.setupArgumentsWithExecState(object, TrustedImmPtr(size)); return appendCallWithExceptionCheckSetResult(operation, result); } + JITCompiler::Call callOperation(P_DFGOperation_EOZ operation, GPRReg result, GPRReg object, int32_t size) + { + m_jit.setupArgumentsWithExecState(object, TrustedImmPtr(size)); + return appendCallWithExceptionCheckSetResult(operation, result); + } JITCompiler::Call callOperation(P_DFGOperation_EPS operation, GPRReg result, GPRReg old, size_t size) { m_jit.setupArgumentsWithExecState(old, TrustedImmPtr(size)); @@ -1213,21 +1219,36 @@ public: m_jit.setupArgumentsWithExecState(arg1, arg2); return appendCallWithExceptionCheckSetResult(operation, result); } - JITCompiler::Call callOperation(J_DFGOperation_ESt operation, GPRReg result, Structure* structure) + JITCompiler::Call callOperation(P_DFGOperation_ESt operation, GPRReg result, Structure* structure) { m_jit.setupArgumentsWithExecState(TrustedImmPtr(structure)); return appendCallWithExceptionCheckSetResult(operation, result); } - JITCompiler::Call callOperation(J_DFGOperation_EStI operation, GPRReg result, Structure* structure, GPRReg arg2) + JITCompiler::Call callOperation(P_DFGOperation_EStZ operation, GPRReg result, Structure* structure, GPRReg arg2) { m_jit.setupArgumentsWithExecState(TrustedImmPtr(structure), arg2); return appendCallWithExceptionCheckSetResult(operation, result); } - JITCompiler::Call callOperation(J_DFGOperation_EStPS operation, GPRReg result, Structure* structure, void* pointer, size_t size) + JITCompiler::Call callOperation(P_DFGOperation_EStZ operation, GPRReg result, Structure* structure, size_t arg2) + { + m_jit.setupArgumentsWithExecState(TrustedImmPtr(structure), TrustedImm32(arg2)); + return appendCallWithExceptionCheckSetResult(operation, result); + } + JITCompiler::Call callOperation(P_DFGOperation_EStZ operation, GPRReg result, GPRReg arg1, GPRReg arg2) + { + m_jit.setupArgumentsWithExecState(arg1, arg2); + return appendCallWithExceptionCheckSetResult(operation, result); + } + JITCompiler::Call callOperation(P_DFGOperation_EStPS operation, GPRReg result, Structure* structure, void* pointer, size_t size) { m_jit.setupArgumentsWithExecState(TrustedImmPtr(structure), TrustedImmPtr(pointer), TrustedImmPtr(size)); return appendCallWithExceptionCheckSetResult(operation, result); } + JITCompiler::Call callOperation(P_DFGOperation_EStSS operation, GPRReg result, Structure* structure, size_t index, size_t size) + { + m_jit.setupArgumentsWithExecState(TrustedImmPtr(structure), TrustedImmPtr(index), TrustedImmPtr(size)); + return appendCallWithExceptionCheckSetResult(operation, result); + } JITCompiler::Call callOperation(J_DFGOperation_EPS operation, GPRReg result, void* pointer, size_t size) { m_jit.setupArgumentsWithExecState(TrustedImmPtr(pointer), TrustedImmPtr(size)); @@ -1468,6 +1489,11 @@ public: m_jit.setupArgumentsWithExecState(arg1, TrustedImmPtr(arg2)); return appendCallWithExceptionCheckSetResult(operation, result); } + JITCompiler::Call callOperation(P_DFGOperation_EOZ operation, GPRReg result, GPRReg arg1, int32_t arg2) + { + m_jit.setupArgumentsWithExecState(arg1, TrustedImmPtr(arg2)); + return appendCallWithExceptionCheckSetResult(operation, result); + } JITCompiler::Call callOperation(P_DFGOperation_EPS operation, GPRReg result, GPRReg old, size_t size) { m_jit.setupArgumentsWithExecState(old, TrustedImmPtr(size)); @@ -1526,20 +1552,35 @@ public: m_jit.setupArgumentsWithExecState(arg1, arg2); return appendCallWithExceptionCheckSetResult(operation, resultPayload, resultTag); } - JITCompiler::Call callOperation(J_DFGOperation_ESt operation, GPRReg resultTag, GPRReg resultPayload, Structure* structure) + JITCompiler::Call callOperation(P_DFGOperation_ESt operation, GPRReg result, Structure* structure) { m_jit.setupArgumentsWithExecState(TrustedImmPtr(structure)); - return appendCallWithExceptionCheckSetResult(operation, resultPayload, resultTag); + return appendCallWithExceptionCheckSetResult(operation, result); } - JITCompiler::Call callOperation(J_DFGOperation_EStI operation, GPRReg resultTag, GPRReg resultPayload, Structure* structure, GPRReg arg2) + JITCompiler::Call callOperation(P_DFGOperation_EStZ operation, GPRReg result, Structure* structure, GPRReg arg2) { m_jit.setupArgumentsWithExecState(TrustedImmPtr(structure), arg2); - return appendCallWithExceptionCheckSetResult(operation, resultPayload, resultTag); + return appendCallWithExceptionCheckSetResult(operation, result); + } + JITCompiler::Call callOperation(P_DFGOperation_EStZ operation, GPRReg result, Structure* structure, size_t arg2) + { + m_jit.setupArgumentsWithExecState(TrustedImmPtr(structure), TrustedImm32(arg2)); + return appendCallWithExceptionCheckSetResult(operation, result); + } + JITCompiler::Call callOperation(P_DFGOperation_EStZ operation, GPRReg result, GPRReg arg1, GPRReg arg2) + { + m_jit.setupArgumentsWithExecState(arg1, arg2); + return appendCallWithExceptionCheckSetResult(operation, result); } - JITCompiler::Call callOperation(J_DFGOperation_EStPS operation, GPRReg resultTag, GPRReg resultPayload, Structure* structure, void* pointer, size_t size) + JITCompiler::Call callOperation(P_DFGOperation_EStPS operation, GPRReg result, Structure* structure, void* pointer, size_t size) { m_jit.setupArgumentsWithExecState(TrustedImmPtr(structure), TrustedImmPtr(pointer), TrustedImmPtr(size)); - return appendCallWithExceptionCheckSetResult(operation, resultPayload, resultTag); + return appendCallWithExceptionCheckSetResult(operation, result); + } + JITCompiler::Call callOperation(P_DFGOperation_EStSS operation, GPRReg result, Structure* structure, size_t index, size_t size) + { + m_jit.setupArgumentsWithExecState(TrustedImmPtr(structure), TrustedImmPtr(index), TrustedImmPtr(size)); + return appendCallWithExceptionCheckSetResult(operation, result); } JITCompiler::Call callOperation(J_DFGOperation_EPS operation, GPRReg resultTag, GPRReg resultPayload, void* pointer, size_t size) { @@ -2149,6 +2190,48 @@ public: void compileAllocatePropertyStorage(Node&); void compileReallocatePropertyStorage(Node&); +#if USE(JSVALUE64) + MacroAssembler::JumpList compileContiguousGetByVal(Node&, GPRReg baseReg, GPRReg propertyReg, GPRReg storageReg, GPRReg resultReg); + MacroAssembler::JumpList compileArrayStorageGetByVal(Node&, GPRReg baseReg, GPRReg propertyReg, GPRReg storageReg, GPRReg resultReg); +#else + MacroAssembler::JumpList compileContiguousGetByVal(Node&, GPRReg baseReg, GPRReg propertyReg, GPRReg storageReg, GPRReg resultTagReg, GPRReg resultPayloadReg); + MacroAssembler::JumpList compileArrayStorageGetByVal(Node&, GPRReg baseReg, GPRReg propertyReg, GPRReg storageReg, GPRReg resultTagReg, GPRReg resultPayloadReg); +#endif + + bool putByValWillNeedExtraRegister(Array::Mode arrayMode) + { + switch (arrayMode) { + // For ArrayStorage, we need an extra reg for stores to holes except if + // we're in SlowPut mode. + case ARRAY_STORAGE_TO_HOLE_MODES: + case OUT_OF_BOUNDS_ARRAY_STORAGE_MODES: + case ALL_EFFECTFUL_ARRAY_STORAGE_MODES: + return true; + + // For Contiguous, we need an extra reg for any access that may store + // to the tail. + case CONTIGUOUS_TO_TAIL_MODES: + case OUT_OF_BOUNDS_CONTIGUOUS_MODES: + case ALL_EFFECTFUL_CONTIGUOUS_MODES: + return true; + + default: + return false; + } + } + GPRReg temporaryRegisterForPutByVal(GPRTemporary&, Array::Mode); + GPRReg temporaryRegisterForPutByVal(GPRTemporary& temporary, Node& node) + { + return temporaryRegisterForPutByVal(temporary, node.arrayMode()); + } +#if USE(JSVALUE64) + MacroAssembler::JumpList compileContiguousPutByVal(Node&, GPRReg baseReg, GPRReg propertyReg, GPRReg storageReg, GPRReg valueReg, GPRReg tempReg); + MacroAssembler::JumpList compileArrayStoragePutByVal(Node&, GPRReg baseReg, GPRReg propertyReg, GPRReg storageReg, GPRReg valueReg, GPRReg tempReg); +#else + MacroAssembler::JumpList compileContiguousPutByVal(Node&, GPRReg baseReg, GPRReg propertyReg, GPRReg storageReg, GPRReg valueTagReg, GPRReg valuePayloadReg); + MacroAssembler::JumpList compileArrayStoragePutByVal(Node&, GPRReg baseReg, GPRReg propertyReg, GPRReg storageReg, GPRReg valueTagReg, GPRReg valuePayloadReg); +#endif + void compileGetCharCodeAt(Node&); void compileGetByValOnString(Node&); @@ -2170,14 +2253,6 @@ public: #endif void compileArithMod(Node&); void compileSoftModulo(Node&); - enum TypedArraySignedness { - SignedTypedArray, - UnsignedTypedArray - }; - enum TypedArrayRounding { - TruncateRounding, - ClampRounding - }; void compileGetIndexedPropertyStorage(Node&); void compileGetByValOnIntTypedArray(const TypedArrayDescriptor&, Node&, size_t elementSize, TypedArraySignedness); void compilePutByValForIntTypedArray(const TypedArrayDescriptor&, GPRReg base, GPRReg property, Node&, size_t elementSize, TypedArraySignedness, TypedArrayRounding = TruncateRounding); @@ -2186,17 +2261,43 @@ public: void compileNewFunctionNoCheck(Node&); void compileNewFunctionExpression(Node&); bool compileRegExpExec(Node&); - + + // size can be an immediate or a register, and must be in bytes. If size is a register, + // it must be a different register than resultGPR. Emits code that place a pointer to + // the end of the allocation. The returned jump is the jump to the slow path. + template<typename SizeType> + MacroAssembler::Jump emitAllocateBasicStorage(SizeType size, GPRReg resultGPR) + { + CopiedAllocator* copiedAllocator = &m_jit.globalData()->heap.storageAllocator(); + + m_jit.loadPtr(&copiedAllocator->m_currentRemaining, resultGPR); + MacroAssembler::Jump slowPath = m_jit.branchSubPtr(JITCompiler::Signed, size, resultGPR); +#if 0 + MacroAssembler::Jump done = m_jit.jump(); + slowPath1.link(&m_jit); + m_jit.breakpoint(); + MacroAssembler::Jump slowPath = m_jit.jump(); + done.link(&m_jit); +#endif + m_jit.storePtr(resultGPR, &copiedAllocator->m_currentRemaining); + m_jit.negPtr(resultGPR); + m_jit.addPtr(JITCompiler::AbsoluteAddress(&copiedAllocator->m_currentPayloadEnd), resultGPR); + + return slowPath; + } + // It is NOT okay for the structure and the scratch register to be the same thing because if they are then the Structure will // get clobbered. - template <typename ClassType, bool destructor, typename StructureType> - void emitAllocateBasicJSObject(StructureType structure, GPRReg resultGPR, GPRReg scratchGPR, MacroAssembler::JumpList& slowPath) + template <typename ClassType, MarkedBlock::DestructorType destructorType, typename StructureType, typename StorageType> + void emitAllocateBasicJSObject(StructureType structure, GPRReg resultGPR, GPRReg scratchGPR, StorageType storage, size_t size, MacroAssembler::JumpList& slowPath) { MarkedAllocator* allocator = 0; - if (destructor) - allocator = &m_jit.globalData()->heap.allocatorForObjectWithDestructor(sizeof(ClassType)); + if (destructorType == MarkedBlock::Normal) + allocator = &m_jit.globalData()->heap.allocatorForObjectWithNormalDestructor(size); + else if (destructorType == MarkedBlock::ImmortalStructure) + allocator = &m_jit.globalData()->heap.allocatorForObjectWithImmortalStructureDestructor(size); else - allocator = &m_jit.globalData()->heap.allocatorForObjectWithoutDestructor(sizeof(ClassType)); + allocator = &m_jit.globalData()->heap.allocatorForObjectWithoutDestructor(size); m_jit.loadPtr(&allocator->m_freeList.head, resultGPR); slowPath.append(m_jit.branchTestPtr(MacroAssembler::Zero, resultGPR)); @@ -2210,14 +2311,16 @@ public: m_jit.storePtr(structure, MacroAssembler::Address(resultGPR, JSCell::structureOffset())); // Initialize the object's property storage pointer. - m_jit.storePtr(MacroAssembler::TrustedImmPtr(0), MacroAssembler::Address(resultGPR, JSObject::butterflyOffset())); + m_jit.storePtr(storage, MacroAssembler::Address(resultGPR, JSObject::butterflyOffset())); } template<typename T> void emitAllocateJSFinalObject(T structure, GPRReg resultGPR, GPRReg scratchGPR, MacroAssembler::JumpList& slowPath) { - return emitAllocateBasicJSObject<JSFinalObject, false>(structure, resultGPR, scratchGPR, slowPath); + return emitAllocateBasicJSObject<JSFinalObject, MarkedBlock::None>(structure, resultGPR, scratchGPR, TrustedImmPtr(0), JSFinalObject::allocationSize(INLINE_STORAGE_CAPACITY), slowPath); } + + void emitAllocateJSArray(Structure*, GPRReg resultGPR, GPRReg storageGPR, unsigned numElements); #if USE(JSVALUE64) JITCompiler::Jump convertToDouble(GPRReg value, FPRReg result, GPRReg tmp); @@ -2229,8 +2332,8 @@ public: void speculationCheck(ExitKind, JSValueSource, NodeIndex, MacroAssembler::Jump jumpToFail); void speculationCheck(ExitKind, JSValueSource, Edge, MacroAssembler::Jump jumpToFail); // Add a set of speculation checks without additional recovery. - void speculationCheck(ExitKind, JSValueSource, NodeIndex, MacroAssembler::JumpList& jumpsToFail); - void speculationCheck(ExitKind, JSValueSource, Edge, MacroAssembler::JumpList& jumpsToFail); + void speculationCheck(ExitKind, JSValueSource, NodeIndex, const MacroAssembler::JumpList& jumpsToFail); + void speculationCheck(ExitKind, JSValueSource, Edge, const MacroAssembler::JumpList& jumpsToFail); // Add a speculation check with additional recovery. void speculationCheck(ExitKind, JSValueSource, NodeIndex, MacroAssembler::Jump jumpToFail, const SpeculationRecovery&); void speculationCheck(ExitKind, JSValueSource, Edge, MacroAssembler::Jump jumpToFail, const SpeculationRecovery&); @@ -2251,7 +2354,7 @@ public: // Note: not specifying the valueRecovery argument (leaving it as ValueRecovery()) implies // that you've ensured that there exists a MovHint prior to your use of forwardSpeculationCheck(). void forwardSpeculationCheck(ExitKind, JSValueSource, NodeIndex, MacroAssembler::Jump jumpToFail, const ValueRecovery& = ValueRecovery()); - void forwardSpeculationCheck(ExitKind, JSValueSource, NodeIndex, MacroAssembler::JumpList& jumpsToFail, const ValueRecovery& = ValueRecovery()); + void forwardSpeculationCheck(ExitKind, JSValueSource, NodeIndex, const MacroAssembler::JumpList& jumpsToFail, const ValueRecovery& = ValueRecovery()); void speculationCheckWithConditionalDirection(ExitKind, JSValueSource, NodeIndex, MacroAssembler::Jump jumpToFail, bool isForward); // Called when we statically determine that a speculation will fail. void terminateSpeculativeExecution(ExitKind, JSValueRegs, NodeIndex); @@ -2264,7 +2367,9 @@ public: const TypedArrayDescriptor* typedArrayDescriptor(Array::Mode); + JITCompiler::JumpList jumpSlowForUnwantedArrayMode(GPRReg tempWithIndexingTypeReg, Array::Mode arrayMode); void checkArray(Node&); + void arrayify(Node&, GPRReg baseReg, GPRReg propertyReg); void arrayify(Node&); template<bool strict> diff --git a/Source/JavaScriptCore/dfg/DFGSpeculativeJIT32_64.cpp b/Source/JavaScriptCore/dfg/DFGSpeculativeJIT32_64.cpp index 70709b52f..0396f8696 100644 --- a/Source/JavaScriptCore/dfg/DFGSpeculativeJIT32_64.cpp +++ b/Source/JavaScriptCore/dfg/DFGSpeculativeJIT32_64.cpp @@ -1,5 +1,5 @@ /* - * Copyright (C) 2011 Apple Inc. All rights reserved. + * Copyright (C) 2011, 2012 Apple Inc. All rights reserved. * Copyright (C) 2011 Intel Corporation. All rights reserved. * * Redistribution and use in source and binary forms, with or without @@ -29,6 +29,7 @@ #if ENABLE(DFG_JIT) +#include "DFGCallArrayAllocatorSlowPathGenerator.h" #include "DFGSlowPathGenerator.h" #include "JSActivation.h" @@ -1015,10 +1016,10 @@ void SpeculativeJIT::emitCall(Node& node) // receiver (method call). subsequent children are the arguments. int numPassedArgs = node.numChildren() - 1; - m_jit.store32(MacroAssembler::TrustedImm32(numPassedArgs + dummyThisArgument), callFramePayloadSlot(RegisterFile::ArgumentCount)); - m_jit.storePtr(GPRInfo::callFrameRegister, callFramePayloadSlot(RegisterFile::CallerFrame)); - m_jit.store32(calleePayloadGPR, callFramePayloadSlot(RegisterFile::Callee)); - m_jit.store32(calleeTagGPR, callFrameTagSlot(RegisterFile::Callee)); + m_jit.store32(MacroAssembler::TrustedImm32(numPassedArgs + dummyThisArgument), callFramePayloadSlot(JSStack::ArgumentCount)); + m_jit.storePtr(GPRInfo::callFrameRegister, callFramePayloadSlot(JSStack::CallerFrame)); + m_jit.store32(calleePayloadGPR, callFramePayloadSlot(JSStack::Callee)); + m_jit.store32(calleeTagGPR, callFrameTagSlot(JSStack::Callee)); for (int i = 0; i < numPassedArgs; i++) { Edge argEdge = m_jit.graph().m_varArgChildren[node.firstChild() + 1 + i]; @@ -1049,8 +1050,8 @@ void SpeculativeJIT::emitCall(Node& node) slowPath.append(m_jit.branchPtrWithPatch(MacroAssembler::NotEqual, calleePayloadGPR, targetToCheck)); slowPath.append(m_jit.branch32(MacroAssembler::NotEqual, calleeTagGPR, TrustedImm32(JSValue::CellTag))); m_jit.loadPtr(MacroAssembler::Address(calleePayloadGPR, OBJECT_OFFSETOF(JSFunction, m_scope)), resultPayloadGPR); - m_jit.storePtr(resultPayloadGPR, MacroAssembler::Address(GPRInfo::callFrameRegister, static_cast<ptrdiff_t>(sizeof(Register)) * RegisterFile::ScopeChain + OBJECT_OFFSETOF(EncodedValueDescriptor, asBits.payload))); - m_jit.store32(MacroAssembler::TrustedImm32(JSValue::CellTag), MacroAssembler::Address(GPRInfo::callFrameRegister, static_cast<ptrdiff_t>(sizeof(Register)) * RegisterFile::ScopeChain + OBJECT_OFFSETOF(EncodedValueDescriptor, asBits.tag))); + m_jit.storePtr(resultPayloadGPR, MacroAssembler::Address(GPRInfo::callFrameRegister, static_cast<ptrdiff_t>(sizeof(Register)) * JSStack::ScopeChain + OBJECT_OFFSETOF(EncodedValueDescriptor, asBits.payload))); + m_jit.store32(MacroAssembler::TrustedImm32(JSValue::CellTag), MacroAssembler::Address(GPRInfo::callFrameRegister, static_cast<ptrdiff_t>(sizeof(Register)) * JSStack::ScopeChain + OBJECT_OFFSETOF(EncodedValueDescriptor, asBits.tag))); CodeOrigin codeOrigin = at(m_compileIndex).codeOrigin; JITCompiler::Call fastCall = m_jit.nearCall(); @@ -2045,6 +2046,113 @@ void SpeculativeJIT::emitBranch(Node& node) } } +MacroAssembler::JumpList SpeculativeJIT::compileContiguousGetByVal(Node&, GPRReg, GPRReg propertyReg, GPRReg storageReg, GPRReg resultTagReg, GPRReg resultPayloadReg) +{ + MacroAssembler::JumpList slowCases; + slowCases.append(m_jit.branch32(MacroAssembler::AboveOrEqual, propertyReg, MacroAssembler::Address(storageReg, Butterfly::offsetOfPublicLength()))); + + m_jit.load32(MacroAssembler::BaseIndex(storageReg, propertyReg, MacroAssembler::TimesEight, OBJECT_OFFSETOF(JSValue, u.asBits.tag)), resultTagReg); + m_jit.load32(MacroAssembler::BaseIndex(storageReg, propertyReg, MacroAssembler::TimesEight, OBJECT_OFFSETOF(JSValue, u.asBits.payload)), resultPayloadReg); + slowCases.append(m_jit.branch32(MacroAssembler::Equal, resultTagReg, TrustedImm32(JSValue::EmptyValueTag))); + + return slowCases; +} + +MacroAssembler::JumpList SpeculativeJIT::compileArrayStorageGetByVal(Node&, GPRReg, GPRReg propertyReg, GPRReg storageReg, GPRReg resultTagReg, GPRReg resultPayloadReg) +{ + MacroAssembler::Jump outOfBounds = m_jit.branch32(MacroAssembler::AboveOrEqual, propertyReg, MacroAssembler::Address(storageReg, ArrayStorage::vectorLengthOffset())); + + m_jit.load32(MacroAssembler::BaseIndex(storageReg, propertyReg, MacroAssembler::TimesEight, OBJECT_OFFSETOF(ArrayStorage, m_vector[0]) + OBJECT_OFFSETOF(JSValue, u.asBits.tag)), resultTagReg); + m_jit.load32(MacroAssembler::BaseIndex(storageReg, propertyReg, MacroAssembler::TimesEight, OBJECT_OFFSETOF(ArrayStorage, m_vector[0]) + OBJECT_OFFSETOF(JSValue, u.asBits.payload)), resultPayloadReg); + MacroAssembler::Jump hole = m_jit.branch32(MacroAssembler::Equal, resultTagReg, TrustedImm32(JSValue::EmptyValueTag)); + + MacroAssembler::JumpList slowCases; + slowCases.append(outOfBounds); + slowCases.append(hole); + return slowCases; +} + +MacroAssembler::JumpList SpeculativeJIT::compileContiguousPutByVal(Node& node, GPRReg, GPRReg propertyReg, GPRReg storageReg, GPRReg valueTagReg, GPRReg valuePayloadReg) +{ + Array::Mode arrayMode = node.arrayMode(); + + MacroAssembler::JumpList slowCases; + + if (!mayStoreToTail(arrayMode)) { + speculationCheck( + Uncountable, JSValueRegs(), NoNode, + m_jit.branch32(MacroAssembler::AboveOrEqual, propertyReg, MacroAssembler::Address(storageReg, Butterfly::offsetOfPublicLength()))); + } else { + MacroAssembler::Jump inBounds = m_jit.branch32(MacroAssembler::Below, propertyReg, MacroAssembler::Address(storageReg, Butterfly::offsetOfPublicLength())); + + slowCases.append(m_jit.branch32(MacroAssembler::AboveOrEqual, propertyReg, MacroAssembler::Address(storageReg, Butterfly::offsetOfVectorLength()))); + + if (isInBoundsAccess(arrayMode)) + speculationCheck(Uncountable, JSValueRegs(), NoNode, slowCases); + + m_jit.add32(TrustedImm32(1), propertyReg); + m_jit.store32(propertyReg, MacroAssembler::Address(storageReg, Butterfly::offsetOfPublicLength())); + m_jit.sub32(TrustedImm32(1), propertyReg); + + inBounds.link(&m_jit); + } + + m_jit.store32(valueTagReg, MacroAssembler::BaseIndex(storageReg, propertyReg, MacroAssembler::TimesEight, OBJECT_OFFSETOF(JSValue, u.asBits.tag))); + m_jit.store32(valuePayloadReg, MacroAssembler::BaseIndex(storageReg, propertyReg, MacroAssembler::TimesEight, OBJECT_OFFSETOF(JSValue, u.asBits.payload))); + + if (isInBoundsAccess(arrayMode)) + return MacroAssembler::JumpList(); + return slowCases; +} + +MacroAssembler::JumpList SpeculativeJIT::compileArrayStoragePutByVal(Node& node, GPRReg, GPRReg propertyReg, GPRReg storageReg, GPRReg valueTagReg, GPRReg valuePayloadReg) +{ + Array::Mode arrayMode = node.arrayMode(); + + MacroAssembler::JumpList slowCases; + + MacroAssembler::Jump beyondArrayBounds = m_jit.branch32(MacroAssembler::AboveOrEqual, propertyReg, MacroAssembler::Address(storageReg, ArrayStorage::vectorLengthOffset())); + if (isInBoundsAccess(arrayMode)) + speculationCheck(OutOfBounds, JSValueRegs(), NoNode, beyondArrayBounds); + else + slowCases.append(beyondArrayBounds); + + // Check if we're writing to a hole; if so increment m_numValuesInVector. + if (!mayStoreToHole(arrayMode)) { + // This is uncountable because if we take this exit, then the baseline JIT + // will immediately count the hole store. So there is no need for exit + // profiling. + speculationCheck( + Uncountable, JSValueRegs(), NoNode, + m_jit.branch32(MacroAssembler::Equal, MacroAssembler::BaseIndex(storageReg, propertyReg, MacroAssembler::TimesEight, OBJECT_OFFSETOF(ArrayStorage, m_vector[0]) + OBJECT_OFFSETOF(JSValue, u.asBits.tag)), TrustedImm32(JSValue::EmptyValueTag))); + } else { + MacroAssembler::Jump notHoleValue = m_jit.branch32(MacroAssembler::NotEqual, MacroAssembler::BaseIndex(storageReg, propertyReg, MacroAssembler::TimesEight, OBJECT_OFFSETOF(ArrayStorage, m_vector[0]) + OBJECT_OFFSETOF(JSValue, u.asBits.tag)), TrustedImm32(JSValue::EmptyValueTag)); + if (isSlowPutAccess(arrayMode)) { + // This is sort of strange. If we wanted to optimize this code path, we would invert + // the above branch. But it's simply not worth it since this only happens if we're + // already having a bad time. + slowCases.append(m_jit.jump()); + } else { + m_jit.add32(TrustedImm32(1), MacroAssembler::Address(storageReg, ArrayStorage::numValuesInVectorOffset())); + + // If we're writing to a hole we might be growing the array; + MacroAssembler::Jump lengthDoesNotNeedUpdate = m_jit.branch32(MacroAssembler::Below, propertyReg, MacroAssembler::Address(storageReg, ArrayStorage::lengthOffset())); + m_jit.add32(TrustedImm32(1), propertyReg); + m_jit.store32(propertyReg, MacroAssembler::Address(storageReg, ArrayStorage::lengthOffset())); + m_jit.sub32(TrustedImm32(1), propertyReg); + + lengthDoesNotNeedUpdate.link(&m_jit); + } + notHoleValue.link(&m_jit); + } + + // Store the value to the array. + m_jit.store32(valueTagReg, MacroAssembler::BaseIndex(storageReg, propertyReg, MacroAssembler::TimesEight, OBJECT_OFFSETOF(ArrayStorage, m_vector[0]) + OBJECT_OFFSETOF(JSValue, u.asBits.tag))); + m_jit.store32(valuePayloadReg, MacroAssembler::BaseIndex(storageReg, propertyReg, MacroAssembler::TimesEight, OBJECT_OFFSETOF(ArrayStorage, m_vector[0]) + OBJECT_OFFSETOF(JSValue, u.asBits.payload))); + + return slowCases; +} + void SpeculativeJIT::compile(Node& node) { NodeType op = node.op(); @@ -2161,7 +2269,7 @@ void SpeculativeJIT::compile(Node& node) // SetLocal doubles as a hint as to where a node will be stored and // as a speculation point. So before we speculate make sure that we // know where the child of this node needs to go in the virtual - // register file. + // stack. compileMovHint(node); // As far as OSR is concerned, we're on the bytecode index corresponding @@ -2190,7 +2298,7 @@ void SpeculativeJIT::compile(Node& node) // this SetLocal should not have executed. But for op_post_inc, it's just // fine, because this SetLocal's local (i.e. the LHS in a x = y++ // statement) would be dead anyway - so the fact that DFG would have - // already made the assignment, and baked it into the register file during + // already made the assignment, and baked it into the stack during // OSR exit, would not be visible to the old JIT in any way. m_codeOriginForOSR = nextNode->codeOrigin; @@ -2200,9 +2308,9 @@ void SpeculativeJIT::compile(Node& node) m_jit.storeDouble(value.fpr(), JITCompiler::addressFor(node.local())); noResult(m_compileIndex); // Indicate that it's no longer necessary to retrieve the value of - // this bytecode variable from registers or other locations in the register file, + // this bytecode variable from registers or other locations in the stack, // but that it is stored as a double. - recordSetLocal(node.local(), ValueSource(DoubleInRegisterFile)); + recordSetLocal(node.local(), ValueSource(DoubleInJSStack)); break; } SpeculatedType predictedType = node.variableAccessData()->argumentAwarePrediction(); @@ -2210,14 +2318,14 @@ void SpeculativeJIT::compile(Node& node) DoubleOperand value(this, node.child1()); m_jit.storeDouble(value.fpr(), JITCompiler::addressFor(node.local())); noResult(m_compileIndex); - recordSetLocal(node.local(), ValueSource(DoubleInRegisterFile)); + recordSetLocal(node.local(), ValueSource(DoubleInJSStack)); break; } if (isInt32Speculation(predictedType)) { SpeculateIntegerOperand value(this, node.child1()); m_jit.store32(value.gpr(), JITCompiler::payloadFor(node.local())); noResult(m_compileIndex); - recordSetLocal(node.local(), ValueSource(Int32InRegisterFile)); + recordSetLocal(node.local(), ValueSource(Int32InJSStack)); break; } if (isCellSpeculation(predictedType)) { @@ -2225,14 +2333,14 @@ void SpeculativeJIT::compile(Node& node) GPRReg cellGPR = cell.gpr(); m_jit.storePtr(cellGPR, JITCompiler::payloadFor(node.local())); noResult(m_compileIndex); - recordSetLocal(node.local(), ValueSource(CellInRegisterFile)); + recordSetLocal(node.local(), ValueSource(CellInJSStack)); break; } if (isBooleanSpeculation(predictedType)) { SpeculateBooleanOperand value(this, node.child1()); m_jit.store32(value.gpr(), JITCompiler::payloadFor(node.local())); noResult(m_compileIndex); - recordSetLocal(node.local(), ValueSource(BooleanInRegisterFile)); + recordSetLocal(node.local(), ValueSource(BooleanInJSStack)); break; } } @@ -2240,7 +2348,7 @@ void SpeculativeJIT::compile(Node& node) m_jit.store32(value.payloadGPR(), JITCompiler::payloadFor(node.local())); m_jit.store32(value.tagGPR(), JITCompiler::tagFor(node.local())); noResult(m_compileIndex); - recordSetLocal(node.local(), ValueSource(ValueInRegisterFile)); + recordSetLocal(node.local(), ValueSource(ValueInJSStack)); // If we're storing an arguments object that has been optimized away, // our variable event stream for OSR exit now reflects the optimized @@ -2565,6 +2673,55 @@ void SpeculativeJIT::compile(Node& node) jsValueResult(resultTag.gpr(), resultPayload.gpr(), m_compileIndex); break; } + case IN_BOUNDS_CONTIGUOUS_MODES: { + SpeculateStrictInt32Operand property(this, node.child2()); + StorageOperand storage(this, node.child3()); + + GPRReg propertyReg = property.gpr(); + GPRReg storageReg = storage.gpr(); + + if (!m_compileOkay) + return; + + speculationCheck(OutOfBounds, JSValueRegs(), NoNode, m_jit.branch32(MacroAssembler::AboveOrEqual, propertyReg, MacroAssembler::Address(storageReg, Butterfly::offsetOfPublicLength()))); + + GPRTemporary resultTag(this); + GPRTemporary resultPayload(this); + m_jit.load32(MacroAssembler::BaseIndex(storageReg, propertyReg, MacroAssembler::TimesEight, OBJECT_OFFSETOF(JSValue, u.asBits.tag)), resultTag.gpr()); + speculationCheck(OutOfBounds, JSValueRegs(), NoNode, m_jit.branch32(MacroAssembler::Equal, resultTag.gpr(), TrustedImm32(JSValue::EmptyValueTag))); + m_jit.load32(MacroAssembler::BaseIndex(storageReg, propertyReg, MacroAssembler::TimesEight, OBJECT_OFFSETOF(JSValue, u.asBits.payload)), resultPayload.gpr()); + jsValueResult(resultTag.gpr(), resultPayload.gpr(), m_compileIndex); + break; + } + case CONTIGUOUS_TO_TAIL_MODES: + case OUT_OF_BOUNDS_CONTIGUOUS_MODES: + case ALL_EFFECTFUL_CONTIGUOUS_MODES: { + SpeculateCellOperand base(this, node.child1()); + SpeculateStrictInt32Operand property(this, node.child2()); + StorageOperand storage(this, node.child3()); + + GPRReg baseReg = base.gpr(); + GPRReg propertyReg = property.gpr(); + GPRReg storageReg = storage.gpr(); + + if (!m_compileOkay) + return; + + GPRTemporary resultTag(this); + GPRTemporary resultPayload(this); + GPRReg resultTagReg = resultTag.gpr(); + GPRReg resultPayloadReg = resultPayload.gpr(); + + MacroAssembler::JumpList slowCases = + compileContiguousGetByVal(node, baseReg, propertyReg, storageReg, resultTagReg, resultPayloadReg); + addSlowPathGenerator( + slowPathCall( + slowCases, this, operationGetByValArrayInt, + JSValueRegs(resultTagReg, resultPayloadReg), baseReg, propertyReg)); + + jsValueResult(resultTagReg, resultPayloadReg, m_compileIndex); + break; + } case IN_BOUNDS_ARRAY_STORAGE_MODES: { SpeculateStrictInt32Operand property(this, node.child2()); StorageOperand storage(this, node.child3()); @@ -2587,6 +2744,7 @@ void SpeculativeJIT::compile(Node& node) break; } case OUT_OF_BOUNDS_ARRAY_STORAGE_MODES: + case SLOW_PUT_ARRAY_STORAGE_MODES: case ALL_EFFECTFUL_ARRAY_STORAGE_MODES: { SpeculateCellOperand base(this, node.child1()); SpeculateStrictInt32Operand property(this, node.child2()); @@ -2714,6 +2872,55 @@ void SpeculativeJIT::compile(Node& node) GPRReg propertyReg = property.gpr(); switch (arrayMode) { + case ALL_CONTIGUOUS_MODES: + case ALL_EFFECTFUL_CONTIGUOUS_MODES: { + JSValueOperand value(this, child3); + + GPRReg valueTagReg = value.tagGPR(); + GPRReg valuePayloadReg = value.payloadGPR(); + + if (!m_compileOkay) + return; + + if (Heap::isWriteBarrierEnabled()) { + GPRTemporary scratch(this); + writeBarrier(baseReg, valueTagReg, child3, WriteBarrierForPropertyAccess, scratch.gpr()); + } + + StorageOperand storage(this, child4); + GPRReg storageReg = storage.gpr(); + + if (node.op() == PutByValAlias) { + // Store the value to the array. + GPRReg propertyReg = property.gpr(); + m_jit.store32(valueTagReg, MacroAssembler::BaseIndex(storageReg, propertyReg, MacroAssembler::TimesEight, OBJECT_OFFSETOF(JSValue, u.asBits.tag))); + m_jit.store32(valuePayloadReg, MacroAssembler::BaseIndex(storageReg, propertyReg, MacroAssembler::TimesEight, OBJECT_OFFSETOF(JSValue, u.asBits.payload))); + + noResult(m_compileIndex); + break; + } + + MacroAssembler::JumpList slowCases = + compileContiguousPutByVal( + node, baseReg, propertyReg, storageReg, valueTagReg, valuePayloadReg); + + base.use(); + property.use(); + value.use(); + storage.use(); + + if (!slowCases.empty()) { + addSlowPathGenerator( + slowPathCall( + slowCases, this, + m_jit.codeBlock()->isStrictMode() ? operationPutByValBeyondArrayBoundsStrict : operationPutByValBeyondArrayBoundsNonStrict, + NoResult, baseReg, propertyReg, valueTagReg, valuePayloadReg)); + } + + noResult(m_compileIndex, UseChildrenCalledExplicitly); + break; + } + case ALL_ARRAY_STORAGE_MODES: case ALL_EFFECTFUL_ARRAY_STORAGE_MODES: { JSValueOperand value(this, child3); @@ -2743,61 +2950,23 @@ void SpeculativeJIT::compile(Node& node) break; } - MacroAssembler::Jump beyondArrayBounds = m_jit.branch32(MacroAssembler::AboveOrEqual, propertyReg, MacroAssembler::Address(storageReg, ArrayStorage::vectorLengthOffset())); - if (isInBoundsAccess(node.arrayMode())) - speculationCheck(OutOfBounds, JSValueRegs(), NoNode, beyondArrayBounds); - - // Check if we're writing to a hole; if so increment m_numValuesInVector. - MacroAssembler::Jump isHoleValue; - if (!mayStoreToHole(arrayMode)) { - // This is uncountable because if we take this exit, then the baseline JIT - // will immediately count the hole store. So there is no need for exit - // profiling. - speculationCheck( - Uncountable, JSValueRegs(), NoNode, - m_jit.branch32(MacroAssembler::Equal, MacroAssembler::BaseIndex(storageReg, propertyReg, MacroAssembler::TimesEight, OBJECT_OFFSETOF(ArrayStorage, m_vector[0]) + OBJECT_OFFSETOF(JSValue, u.asBits.tag)), TrustedImm32(JSValue::EmptyValueTag))); - } else { - MacroAssembler::Jump notHoleValue = m_jit.branch32(MacroAssembler::NotEqual, MacroAssembler::BaseIndex(storageReg, propertyReg, MacroAssembler::TimesEight, OBJECT_OFFSETOF(ArrayStorage, m_vector[0]) + OBJECT_OFFSETOF(JSValue, u.asBits.tag)), TrustedImm32(JSValue::EmptyValueTag)); - if (isSlowPutAccess(arrayMode)) { - // This is sort of strange. If we wanted to optimize this code path, we would invert - // the above branch. But it's simply not worth it since this only happens if we're - // already having a bad time. - isHoleValue = m_jit.jump(); - } else { - m_jit.add32(TrustedImm32(1), MacroAssembler::Address(storageReg, OBJECT_OFFSETOF(ArrayStorage, m_numValuesInVector))); - - // If we're writing to a hole we might be growing the array; - MacroAssembler::Jump lengthDoesNotNeedUpdate = m_jit.branch32(MacroAssembler::Below, propertyReg, MacroAssembler::Address(storageReg, ArrayStorage::lengthOffset())); - m_jit.add32(TrustedImm32(1), propertyReg); - m_jit.store32(propertyReg, MacroAssembler::Address(storageReg, ArrayStorage::lengthOffset())); - m_jit.sub32(TrustedImm32(1), propertyReg); - - lengthDoesNotNeedUpdate.link(&m_jit); - } - notHoleValue.link(&m_jit); - } + MacroAssembler::JumpList slowCases = + compileArrayStoragePutByVal( + node, baseReg, propertyReg, storageReg, valueTagReg, valuePayloadReg); base.use(); property.use(); value.use(); storage.use(); - - // Store the value to the array. - m_jit.store32(valueTagReg, MacroAssembler::BaseIndex(storageReg, propertyReg, MacroAssembler::TimesEight, OBJECT_OFFSETOF(ArrayStorage, m_vector[0]) + OBJECT_OFFSETOF(JSValue, u.asBits.tag))); - m_jit.store32(valuePayloadReg, MacroAssembler::BaseIndex(storageReg, propertyReg, MacroAssembler::TimesEight, OBJECT_OFFSETOF(ArrayStorage, m_vector[0]) + OBJECT_OFFSETOF(JSValue, u.asBits.payload))); - - if (!isInBoundsAccess(arrayMode)) { - MacroAssembler::JumpList slowCases; - slowCases.append(beyondArrayBounds); - if (isSlowPutAccess(arrayMode)) - slowCases.append(isHoleValue); + + if (!slowCases.empty()) { addSlowPathGenerator( slowPathCall( slowCases, this, m_jit.codeBlock()->isStrictMode() ? operationPutByValBeyondArrayBoundsStrict : operationPutByValBeyondArrayBoundsNonStrict, NoResult, baseReg, propertyReg, valueTagReg, valuePayloadReg)); } - + noResult(m_compileIndex, UseChildrenCalledExplicitly); break; } @@ -2919,25 +3088,55 @@ void SpeculativeJIT::compile(Node& node) StorageOperand storage(this, node.child3()); GPRReg storageGPR = storage.gpr(); - - m_jit.load32(MacroAssembler::Address(storageGPR, ArrayStorage::lengthOffset()), storageLengthGPR); - // Refuse to handle bizarre lengths. - speculationCheck(Uncountable, JSValueRegs(), NoNode, m_jit.branch32(MacroAssembler::Above, storageLengthGPR, TrustedImm32(0x7ffffffe))); + switch (node.arrayMode()) { + case Array::ArrayWithContiguous: + case Array::ArrayWithContiguousOutOfBounds: { + m_jit.load32(MacroAssembler::Address(storageGPR, Butterfly::offsetOfPublicLength()), storageLengthGPR); + MacroAssembler::Jump slowPath = m_jit.branch32(MacroAssembler::AboveOrEqual, storageLengthGPR, MacroAssembler::Address(storageGPR, Butterfly::offsetOfVectorLength())); + m_jit.store32(valueTagGPR, MacroAssembler::BaseIndex(storageGPR, storageLengthGPR, MacroAssembler::TimesEight, OBJECT_OFFSETOF(JSValue, u.asBits.tag))); + m_jit.store32(valuePayloadGPR, MacroAssembler::BaseIndex(storageGPR, storageLengthGPR, MacroAssembler::TimesEight, OBJECT_OFFSETOF(JSValue, u.asBits.payload))); + m_jit.add32(TrustedImm32(1), storageLengthGPR); + m_jit.store32(storageLengthGPR, MacroAssembler::Address(storageGPR, Butterfly::offsetOfPublicLength())); + m_jit.move(TrustedImm32(JSValue::Int32Tag), storageGPR); + + addSlowPathGenerator( + slowPathCall( + slowPath, this, operationArrayPush, + JSValueRegs(storageGPR, storageLengthGPR), + valueTagGPR, valuePayloadGPR, baseGPR)); + + jsValueResult(storageGPR, storageLengthGPR, m_compileIndex); + break; + } + + case Array::ArrayWithArrayStorage: + case Array::ArrayWithArrayStorageOutOfBounds: { + m_jit.load32(MacroAssembler::Address(storageGPR, ArrayStorage::lengthOffset()), storageLengthGPR); - MacroAssembler::Jump slowPath = m_jit.branch32(MacroAssembler::AboveOrEqual, storageLengthGPR, MacroAssembler::Address(storageGPR, ArrayStorage::vectorLengthOffset())); + // Refuse to handle bizarre lengths. + speculationCheck(Uncountable, JSValueRegs(), NoNode, m_jit.branch32(MacroAssembler::Above, storageLengthGPR, TrustedImm32(0x7ffffffe))); - m_jit.store32(valueTagGPR, MacroAssembler::BaseIndex(storageGPR, storageLengthGPR, MacroAssembler::TimesEight, OBJECT_OFFSETOF(ArrayStorage, m_vector[0]) + OBJECT_OFFSETOF(JSValue, u.asBits.tag))); - m_jit.store32(valuePayloadGPR, MacroAssembler::BaseIndex(storageGPR, storageLengthGPR, MacroAssembler::TimesEight, OBJECT_OFFSETOF(ArrayStorage, m_vector[0]) + OBJECT_OFFSETOF(JSValue, u.asBits.payload))); + MacroAssembler::Jump slowPath = m_jit.branch32(MacroAssembler::AboveOrEqual, storageLengthGPR, MacroAssembler::Address(storageGPR, ArrayStorage::vectorLengthOffset())); - m_jit.add32(TrustedImm32(1), storageLengthGPR); - m_jit.store32(storageLengthGPR, MacroAssembler::Address(storageGPR, ArrayStorage::lengthOffset())); - m_jit.add32(TrustedImm32(1), MacroAssembler::Address(storageGPR, OBJECT_OFFSETOF(ArrayStorage, m_numValuesInVector))); - m_jit.move(TrustedImm32(JSValue::Int32Tag), storageGPR); + m_jit.store32(valueTagGPR, MacroAssembler::BaseIndex(storageGPR, storageLengthGPR, MacroAssembler::TimesEight, OBJECT_OFFSETOF(ArrayStorage, m_vector[0]) + OBJECT_OFFSETOF(JSValue, u.asBits.tag))); + m_jit.store32(valuePayloadGPR, MacroAssembler::BaseIndex(storageGPR, storageLengthGPR, MacroAssembler::TimesEight, OBJECT_OFFSETOF(ArrayStorage, m_vector[0]) + OBJECT_OFFSETOF(JSValue, u.asBits.payload))); - addSlowPathGenerator(slowPathCall(slowPath, this, operationArrayPush, JSValueRegs(storageGPR, storageLengthGPR), valueTagGPR, valuePayloadGPR, baseGPR)); + m_jit.add32(TrustedImm32(1), storageLengthGPR); + m_jit.store32(storageLengthGPR, MacroAssembler::Address(storageGPR, ArrayStorage::lengthOffset())); + m_jit.add32(TrustedImm32(1), MacroAssembler::Address(storageGPR, OBJECT_OFFSETOF(ArrayStorage, m_numValuesInVector))); + m_jit.move(TrustedImm32(JSValue::Int32Tag), storageGPR); - jsValueResult(storageGPR, storageLengthGPR, m_compileIndex); + addSlowPathGenerator(slowPathCall(slowPath, this, operationArrayPush, JSValueRegs(storageGPR, storageLengthGPR), valueTagGPR, valuePayloadGPR, baseGPR)); + + jsValueResult(storageGPR, storageLengthGPR, m_compileIndex); + break; + } + + default: + CRASH(); + break; + } break; } @@ -2948,46 +3147,88 @@ void SpeculativeJIT::compile(Node& node) StorageOperand storage(this, node.child2()); GPRTemporary valueTag(this); GPRTemporary valuePayload(this); - GPRTemporary storageLength(this); GPRReg baseGPR = base.gpr(); GPRReg valueTagGPR = valueTag.gpr(); GPRReg valuePayloadGPR = valuePayload.gpr(); GPRReg storageGPR = storage.gpr(); - GPRReg storageLengthGPR = storageLength.gpr(); - m_jit.load32(MacroAssembler::Address(storageGPR, ArrayStorage::lengthOffset()), storageLengthGPR); + switch (node.arrayMode()) { + case Array::ArrayWithContiguous: + case Array::ArrayWithContiguousOutOfBounds: { + m_jit.load32( + MacroAssembler::Address(storageGPR, Butterfly::offsetOfPublicLength()), valueTagGPR); + MacroAssembler::Jump undefinedCase = + m_jit.branchTest32(MacroAssembler::Zero, valueTagGPR); + m_jit.sub32(TrustedImm32(1), valueTagGPR); + m_jit.store32( + valueTagGPR, MacroAssembler::Address(storageGPR, Butterfly::offsetOfPublicLength())); + m_jit.load32( + MacroAssembler::BaseIndex(storageGPR, valueTagGPR, MacroAssembler::TimesEight, OBJECT_OFFSETOF(JSValue, u.asBits.payload)), + valuePayloadGPR); + m_jit.load32( + MacroAssembler::BaseIndex(storageGPR, valueTagGPR, MacroAssembler::TimesEight, OBJECT_OFFSETOF(JSValue, u.asBits.tag)), + valueTagGPR); + MacroAssembler::Jump slowCase = m_jit.branch32(MacroAssembler::Equal, valueTagGPR, TrustedImm32(JSValue::EmptyValueTag)); + + addSlowPathGenerator( + slowPathMove( + undefinedCase, this, + MacroAssembler::TrustedImm32(jsUndefined().tag()), valueTagGPR, + MacroAssembler::TrustedImm32(jsUndefined().payload()), valuePayloadGPR)); + addSlowPathGenerator( + slowPathCall( + slowCase, this, operationArrayPopAndRecoverLength, + JSValueRegs(valueTagGPR, valuePayloadGPR), baseGPR)); + + jsValueResult(valueTagGPR, valuePayloadGPR, m_compileIndex); + break; + } + + case Array::ArrayWithArrayStorage: + case Array::ArrayWithArrayStorageOutOfBounds: { + GPRTemporary storageLength(this); + GPRReg storageLengthGPR = storageLength.gpr(); + + m_jit.load32(MacroAssembler::Address(storageGPR, ArrayStorage::lengthOffset()), storageLengthGPR); - JITCompiler::JumpList setUndefinedCases; - setUndefinedCases.append(m_jit.branchTest32(MacroAssembler::Zero, storageLengthGPR)); + JITCompiler::JumpList setUndefinedCases; + setUndefinedCases.append(m_jit.branchTest32(MacroAssembler::Zero, storageLengthGPR)); - m_jit.sub32(TrustedImm32(1), storageLengthGPR); + m_jit.sub32(TrustedImm32(1), storageLengthGPR); - MacroAssembler::Jump slowCase = m_jit.branch32(MacroAssembler::AboveOrEqual, storageLengthGPR, MacroAssembler::Address(storageGPR, ArrayStorage::vectorLengthOffset())); + MacroAssembler::Jump slowCase = m_jit.branch32(MacroAssembler::AboveOrEqual, storageLengthGPR, MacroAssembler::Address(storageGPR, ArrayStorage::vectorLengthOffset())); - m_jit.load32(MacroAssembler::BaseIndex(storageGPR, storageLengthGPR, MacroAssembler::TimesEight, OBJECT_OFFSETOF(ArrayStorage, m_vector[0]) + OBJECT_OFFSETOF(JSValue, u.asBits.tag)), valueTagGPR); - m_jit.load32(MacroAssembler::BaseIndex(storageGPR, storageLengthGPR, MacroAssembler::TimesEight, OBJECT_OFFSETOF(ArrayStorage, m_vector[0]) + OBJECT_OFFSETOF(JSValue, u.asBits.payload)), valuePayloadGPR); + m_jit.load32(MacroAssembler::BaseIndex(storageGPR, storageLengthGPR, MacroAssembler::TimesEight, OBJECT_OFFSETOF(ArrayStorage, m_vector[0]) + OBJECT_OFFSETOF(JSValue, u.asBits.tag)), valueTagGPR); + m_jit.load32(MacroAssembler::BaseIndex(storageGPR, storageLengthGPR, MacroAssembler::TimesEight, OBJECT_OFFSETOF(ArrayStorage, m_vector[0]) + OBJECT_OFFSETOF(JSValue, u.asBits.payload)), valuePayloadGPR); - m_jit.store32(storageLengthGPR, MacroAssembler::Address(storageGPR, ArrayStorage::lengthOffset())); + m_jit.store32(storageLengthGPR, MacroAssembler::Address(storageGPR, ArrayStorage::lengthOffset())); - setUndefinedCases.append(m_jit.branch32(MacroAssembler::Equal, TrustedImm32(JSValue::EmptyValueTag), valueTagGPR)); + setUndefinedCases.append(m_jit.branch32(MacroAssembler::Equal, TrustedImm32(JSValue::EmptyValueTag), valueTagGPR)); - m_jit.store32(TrustedImm32(JSValue::EmptyValueTag), MacroAssembler::BaseIndex(storageGPR, storageLengthGPR, MacroAssembler::TimesEight, OBJECT_OFFSETOF(ArrayStorage, m_vector[0]) + OBJECT_OFFSETOF(JSValue, u.asBits.tag))); + m_jit.store32(TrustedImm32(JSValue::EmptyValueTag), MacroAssembler::BaseIndex(storageGPR, storageLengthGPR, MacroAssembler::TimesEight, OBJECT_OFFSETOF(ArrayStorage, m_vector[0]) + OBJECT_OFFSETOF(JSValue, u.asBits.tag))); - m_jit.sub32(TrustedImm32(1), MacroAssembler::Address(storageGPR, OBJECT_OFFSETOF(ArrayStorage, m_numValuesInVector))); + m_jit.sub32(TrustedImm32(1), MacroAssembler::Address(storageGPR, OBJECT_OFFSETOF(ArrayStorage, m_numValuesInVector))); - addSlowPathGenerator( - slowPathMove( - setUndefinedCases, this, - MacroAssembler::TrustedImm32(jsUndefined().tag()), valueTagGPR, - MacroAssembler::TrustedImm32(jsUndefined().payload()), valuePayloadGPR)); + addSlowPathGenerator( + slowPathMove( + setUndefinedCases, this, + MacroAssembler::TrustedImm32(jsUndefined().tag()), valueTagGPR, + MacroAssembler::TrustedImm32(jsUndefined().payload()), valuePayloadGPR)); - addSlowPathGenerator( - slowPathCall( - slowCase, this, operationArrayPop, - JSValueRegs(valueTagGPR, valuePayloadGPR), baseGPR)); + addSlowPathGenerator( + slowPathCall( + slowCase, this, operationArrayPop, + JSValueRegs(valueTagGPR, valuePayloadGPR), baseGPR)); - jsValueResult(valueTagGPR, valuePayloadGPR, m_compileIndex); + jsValueResult(valueTagGPR, valuePayloadGPR, m_compileIndex); + break; + } + + default: + CRASH(); + break; + } break; } @@ -3051,9 +3292,9 @@ void SpeculativeJIT::compile(Node& node) } // Grab the return address. - m_jit.emitGetFromCallFrameHeaderPtr(RegisterFile::ReturnPC, GPRInfo::regT2); + m_jit.emitGetFromCallFrameHeaderPtr(JSStack::ReturnPC, GPRInfo::regT2); // Restore our caller's "r". - m_jit.emitGetFromCallFrameHeaderPtr(RegisterFile::CallerFrame, GPRInfo::callFrameRegister); + m_jit.emitGetFromCallFrameHeaderPtr(JSStack::CallerFrame, GPRInfo::callFrameRegister); // Return. m_jit.restoreReturnAddressBeforeReturn(GPRInfo::regT2); m_jit.ret(); @@ -3163,16 +3404,48 @@ void SpeculativeJIT::compile(Node& node) case NewArray: { JSGlobalObject* globalObject = m_jit.graph().globalObjectFor(node.codeOrigin); - if (!globalObject->isHavingABadTime()) + if (!globalObject->isHavingABadTime()) { globalObject->havingABadTimeWatchpoint()->add(speculationWatchpoint()); + + ASSERT(hasContiguous(globalObject->arrayStructure()->indexingType())); + + unsigned numElements = node.numChildren(); + + GPRTemporary result(this); + GPRTemporary storage(this); + + GPRReg resultGPR = result.gpr(); + GPRReg storageGPR = storage.gpr(); + + emitAllocateJSArray(globalObject->arrayStructure(), resultGPR, storageGPR, numElements); + + // At this point, one way or another, resultGPR and storageGPR have pointers to + // the JSArray and the Butterfly, respectively. + + for (unsigned operandIdx = 0; operandIdx < node.numChildren(); ++operandIdx) { + JSValueOperand operand(this, m_jit.graph().m_varArgChildren[node.firstChild() + operandIdx]); + GPRReg opTagGPR = operand.tagGPR(); + GPRReg opPayloadGPR = operand.payloadGPR(); + m_jit.store32(opTagGPR, MacroAssembler::Address(storageGPR, sizeof(JSValue) * operandIdx + OBJECT_OFFSETOF(JSValue, u.asBits.tag))); + m_jit.store32(opPayloadGPR, MacroAssembler::Address(storageGPR, sizeof(JSValue) * operandIdx + OBJECT_OFFSETOF(JSValue, u.asBits.payload))); + } + + // Yuck, we should *really* have a way of also returning the storageGPR. But + // that's the least of what's wrong with this code. We really shouldn't be + // allocating the array after having computed - and probably spilled to the + // stack - all of the things that will go into the array. The solution to that + // bigger problem will also likely fix the redundancy in reloading the storage + // pointer that we currently have. + + cellResult(resultGPR, m_compileIndex); + break; + } if (!node.numChildren()) { flushRegisters(); GPRResult result(this); - GPRResult2 resultTagIgnored(this); callOperation( - operationNewEmptyArray, resultTagIgnored.gpr(), result.gpr(), - globalObject->arrayStructure()); + operationNewEmptyArray, result.gpr(), globalObject->arrayStructure()); cellResult(result.gpr(), m_compileIndex); break; } @@ -3201,11 +3474,10 @@ void SpeculativeJIT::compile(Node& node) m_jit.storePtr(TrustedImmPtr(scratchSize), scratch.gpr()); } - GPRResult resultPayload(this); - GPRResult2 resultTag(this); + GPRResult result(this); callOperation( - operationNewArray, resultTag.gpr(), resultPayload.gpr(), globalObject->arrayStructure(), + operationNewArray, result.gpr(), globalObject->arrayStructure(), static_cast<void *>(buffer), node.numChildren()); if (scratchSize) { @@ -3215,37 +3487,94 @@ void SpeculativeJIT::compile(Node& node) m_jit.storePtr(TrustedImmPtr(0), scratch.gpr()); } - // FIXME: make the callOperation above explicitly return a cell result, or jitAssert the tag is a cell tag. - cellResult(resultPayload.gpr(), m_compileIndex, UseChildrenCalledExplicitly); + cellResult(result.gpr(), m_compileIndex, UseChildrenCalledExplicitly); break; } case NewArrayWithSize: { JSGlobalObject* globalObject = m_jit.graph().globalObjectFor(node.codeOrigin); - if (!globalObject->isHavingABadTime()) + if (!globalObject->isHavingABadTime()) { globalObject->havingABadTimeWatchpoint()->add(speculationWatchpoint()); + + SpeculateStrictInt32Operand size(this, node.child1()); + GPRTemporary result(this); + GPRTemporary storage(this); + GPRTemporary scratch(this); + + GPRReg sizeGPR = size.gpr(); + GPRReg resultGPR = result.gpr(); + GPRReg storageGPR = storage.gpr(); + GPRReg scratchGPR = scratch.gpr(); + + MacroAssembler::JumpList slowCases; + slowCases.append(m_jit.branch32(MacroAssembler::AboveOrEqual, sizeGPR, TrustedImm32(MIN_SPARSE_ARRAY_INDEX))); + + ASSERT((1 << 3) == sizeof(JSValue)); + m_jit.move(sizeGPR, scratchGPR); + m_jit.lshift32(TrustedImm32(3), scratchGPR); + m_jit.add32(TrustedImm32(sizeof(IndexingHeader)), scratchGPR, resultGPR); + slowCases.append( + emitAllocateBasicStorage(resultGPR, storageGPR)); + m_jit.subPtr(scratchGPR, storageGPR); + emitAllocateBasicJSObject<JSArray, MarkedBlock::None>( + TrustedImmPtr(globalObject->arrayStructure()), resultGPR, scratchGPR, + storageGPR, sizeof(JSArray), slowCases); + + m_jit.store32(sizeGPR, MacroAssembler::Address(storageGPR, Butterfly::offsetOfPublicLength())); + m_jit.store32(sizeGPR, MacroAssembler::Address(storageGPR, Butterfly::offsetOfVectorLength())); + + addSlowPathGenerator(adoptPtr( + new CallArrayAllocatorWithVariableSizeSlowPathGenerator( + slowCases, this, operationNewArrayWithSize, resultGPR, + globalObject->arrayStructure(), + globalObject->arrayStructureWithArrayStorage(), + sizeGPR))); + + cellResult(resultGPR, m_compileIndex); + break; + } SpeculateStrictInt32Operand size(this, node.child1()); GPRReg sizeGPR = size.gpr(); flushRegisters(); GPRResult result(this); - GPRResult2 resultTagIgnored(this); callOperation( - operationNewArrayWithSize, resultTagIgnored.gpr(), result.gpr(), - globalObject->arrayStructure(), sizeGPR); + operationNewArrayWithSize, result.gpr(), globalObject->arrayStructure(), sizeGPR); cellResult(result.gpr(), m_compileIndex); break; } case NewArrayBuffer: { + JSGlobalObject* globalObject = m_jit.graph().globalObjectFor(node.codeOrigin); + if (!globalObject->isHavingABadTime()) { + globalObject->havingABadTimeWatchpoint()->add(speculationWatchpoint()); + + unsigned numElements = node.numConstants(); + + GPRTemporary result(this); + GPRTemporary storage(this); + + GPRReg resultGPR = result.gpr(); + GPRReg storageGPR = storage.gpr(); + + emitAllocateJSArray(globalObject->arrayStructure(), resultGPR, storageGPR, numElements); + + int32_t* data = bitwise_cast<int32_t*>(m_jit.codeBlock()->constantBuffer(node.startConstant())); + for (unsigned index = 0; index < node.numConstants() * 2; ++index) { + m_jit.store32( + Imm32(data[index]), MacroAssembler::Address(storageGPR, sizeof(int32_t) * index)); + } + + cellResult(resultGPR, m_compileIndex); + break; + } + flushRegisters(); - GPRResult resultPayload(this); - GPRResult2 resultTag(this); + GPRResult result(this); - callOperation(operationNewArrayBuffer, resultTag.gpr(), resultPayload.gpr(), node.startConstant(), node.numConstants()); + callOperation(operationNewArrayBuffer, result.gpr(), globalObject->arrayStructure(), node.startConstant(), node.numConstants()); - // FIXME: make the callOperation above explicitly return a cell result, or jitAssert the tag is a cell tag. - cellResult(resultPayload.gpr(), m_compileIndex); + cellResult(result.gpr(), m_compileIndex); break; } @@ -3366,7 +3695,7 @@ void SpeculativeJIT::compile(Node& node) case GetCallee: { GPRTemporary result(this); - m_jit.loadPtr(JITCompiler::addressFor(static_cast<VirtualRegister>(RegisterFile::Callee)), result.gpr()); + m_jit.loadPtr(JITCompiler::addressFor(static_cast<VirtualRegister>(JSStack::Callee)), result.gpr()); cellResult(result.gpr(), m_compileIndex); break; } @@ -3375,7 +3704,7 @@ void SpeculativeJIT::compile(Node& node) GPRTemporary result(this); GPRReg resultGPR = result.gpr(); - m_jit.loadPtr(JITCompiler::addressFor(static_cast<VirtualRegister>(RegisterFile::ScopeChain)), resultGPR); + m_jit.loadPtr(JITCompiler::addressFor(static_cast<VirtualRegister>(JSStack::ScopeChain)), resultGPR); bool checkTopLevel = m_jit.codeBlock()->codeType() == FunctionCode && m_jit.codeBlock()->needsFullScopeChain(); int skip = node.scopeChainDepth(); ASSERT(skip || !checkTopLevel); @@ -4004,14 +4333,14 @@ void SpeculativeJIT::compile(Node& node) m_jit.loadPtr(JITCompiler::Address(globalObjectGPR, JSObject::butterflyOffset()), resultPayloadGPR); m_jit.load32(JITCompiler::Address(resolveInfoGPR, OBJECT_OFFSETOF(GlobalResolveInfo, offset)), resolveInfoGPR); #if DFG_ENABLE(JIT_ASSERT) - JITCompiler::Jump isOutOfLine = m_jit.branch32(JITCompiler::GreaterThanOrEqual, resolveInfoGPR, TrustedImm32(inlineStorageCapacity)); + JITCompiler::Jump isOutOfLine = m_jit.branch32(JITCompiler::GreaterThanOrEqual, resolveInfoGPR, TrustedImm32(firstOutOfLineOffset)); m_jit.breakpoint(); isOutOfLine.link(&m_jit); #endif m_jit.neg32(resolveInfoGPR); m_jit.signExtend32ToPtr(resolveInfoGPR, resolveInfoGPR); - m_jit.load32(JITCompiler::BaseIndex(resultPayloadGPR, resolveInfoGPR, JITCompiler::TimesEight, OBJECT_OFFSETOF(EncodedValueDescriptor, asBits.tag) + (inlineStorageCapacity - 2) * static_cast<ptrdiff_t>(sizeof(JSValue))), resultTagGPR); - m_jit.load32(JITCompiler::BaseIndex(resultPayloadGPR, resolveInfoGPR, JITCompiler::TimesEight, OBJECT_OFFSETOF(EncodedValueDescriptor, asBits.payload) + (inlineStorageCapacity - 2) * static_cast<ptrdiff_t>(sizeof(JSValue))), resultPayloadGPR); + m_jit.load32(JITCompiler::BaseIndex(resultPayloadGPR, resolveInfoGPR, JITCompiler::TimesEight, OBJECT_OFFSETOF(EncodedValueDescriptor, asBits.tag) + (firstOutOfLineOffset - 2) * static_cast<ptrdiff_t>(sizeof(JSValue))), resultTagGPR); + m_jit.load32(JITCompiler::BaseIndex(resultPayloadGPR, resolveInfoGPR, JITCompiler::TimesEight, OBJECT_OFFSETOF(EncodedValueDescriptor, asBits.payload) + (firstOutOfLineOffset - 2) * static_cast<ptrdiff_t>(sizeof(JSValue))), resultPayloadGPR); addSlowPathGenerator( slowPathCall( @@ -4161,7 +4490,7 @@ void SpeculativeJIT::compile(Node& node) } ASSERT(!node.codeOrigin.inlineCallFrame); - m_jit.load32(JITCompiler::payloadFor(RegisterFile::ArgumentCount), resultGPR); + m_jit.load32(JITCompiler::payloadFor(JSStack::ArgumentCount), resultGPR); m_jit.sub32(TrustedImm32(1), resultGPR); integerResult(resultGPR, m_compileIndex); break; @@ -4183,7 +4512,7 @@ void SpeculativeJIT::compile(Node& node) Imm32(node.codeOrigin.inlineCallFrame->arguments.size() - 1), resultPayloadGPR); } else { - m_jit.load32(JITCompiler::payloadFor(RegisterFile::ArgumentCount), resultPayloadGPR); + m_jit.load32(JITCompiler::payloadFor(JSStack::ArgumentCount), resultPayloadGPR); m_jit.sub32(TrustedImm32(1), resultPayloadGPR); } m_jit.move(TrustedImm32(JSValue::Int32Tag), resultTagGPR); @@ -4236,7 +4565,7 @@ void SpeculativeJIT::compile(Node& node) m_jit.branch32( JITCompiler::AboveOrEqual, resultPayloadGPR, - JITCompiler::payloadFor(RegisterFile::ArgumentCount))); + JITCompiler::payloadFor(JSStack::ArgumentCount))); } JITCompiler::JumpList slowArgument; @@ -4313,7 +4642,7 @@ void SpeculativeJIT::compile(Node& node) m_jit.branch32( JITCompiler::AboveOrEqual, resultPayloadGPR, - JITCompiler::payloadFor(RegisterFile::ArgumentCount))); + JITCompiler::payloadFor(JSStack::ArgumentCount))); } JITCompiler::JumpList slowArgument; diff --git a/Source/JavaScriptCore/dfg/DFGSpeculativeJIT64.cpp b/Source/JavaScriptCore/dfg/DFGSpeculativeJIT64.cpp index d7cec27c1..0928dfa58 100644 --- a/Source/JavaScriptCore/dfg/DFGSpeculativeJIT64.cpp +++ b/Source/JavaScriptCore/dfg/DFGSpeculativeJIT64.cpp @@ -29,6 +29,7 @@ #if ENABLE(DFG_JIT) #include "Arguments.h" +#include "DFGCallArrayAllocatorSlowPathGenerator.h" #include "DFGSlowPathGenerator.h" namespace JSC { namespace DFG { @@ -72,7 +73,7 @@ GPRReg SpeculativeJIT::fillInteger(NodeIndex nodeIndex, DataFormat& returnFormat } // Since we statically know that we're filling an integer, and values - // in the RegisterFile are boxed, this must be DataFormatJSInteger. + // in the JSStack are boxed, this must be DataFormatJSInteger. // We will check this with a jitAssert below. info.fillJSValue(*m_stream, gpr, DataFormatJSInteger); unlock(gpr); @@ -1005,9 +1006,9 @@ void SpeculativeJIT::emitCall(Node& node) // arguments. int numPassedArgs = node.numChildren() - 1; - m_jit.store32(MacroAssembler::TrustedImm32(numPassedArgs + dummyThisArgument), callFramePayloadSlot(RegisterFile::ArgumentCount)); - m_jit.storePtr(GPRInfo::callFrameRegister, callFrameSlot(RegisterFile::CallerFrame)); - m_jit.storePtr(calleeGPR, callFrameSlot(RegisterFile::Callee)); + m_jit.store32(MacroAssembler::TrustedImm32(numPassedArgs + dummyThisArgument), callFramePayloadSlot(JSStack::ArgumentCount)); + m_jit.storePtr(GPRInfo::callFrameRegister, callFrameSlot(JSStack::CallerFrame)); + m_jit.storePtr(calleeGPR, callFrameSlot(JSStack::Callee)); for (int i = 0; i < numPassedArgs; i++) { Edge argEdge = m_jit.graph().m_varArgChildren[node.firstChild() + 1 + i]; @@ -1033,7 +1034,7 @@ void SpeculativeJIT::emitCall(Node& node) slowPath = m_jit.branchPtrWithPatch(MacroAssembler::NotEqual, calleeGPR, targetToCheck, MacroAssembler::TrustedImmPtr(JSValue::encode(JSValue()))); m_jit.loadPtr(MacroAssembler::Address(calleeGPR, OBJECT_OFFSETOF(JSFunction, m_scope)), resultGPR); - m_jit.storePtr(resultGPR, MacroAssembler::Address(GPRInfo::callFrameRegister, static_cast<ptrdiff_t>(sizeof(Register)) * RegisterFile::ScopeChain)); + m_jit.storePtr(resultGPR, MacroAssembler::Address(GPRInfo::callFrameRegister, static_cast<ptrdiff_t>(sizeof(Register)) * JSStack::ScopeChain)); CodeOrigin codeOrigin = at(m_compileIndex).codeOrigin; JITCompiler::Call fastCall = m_jit.nearCall(); @@ -2109,10 +2110,111 @@ void SpeculativeJIT::emitBranch(Node& node) } } +MacroAssembler::JumpList SpeculativeJIT::compileContiguousGetByVal(Node&, GPRReg, GPRReg propertyReg, GPRReg storageReg, GPRReg resultReg) +{ + MacroAssembler::JumpList slowCases; + slowCases.append(m_jit.branch32(MacroAssembler::AboveOrEqual, propertyReg, MacroAssembler::Address(storageReg, Butterfly::offsetOfPublicLength()))); + + m_jit.loadPtr(MacroAssembler::BaseIndex(storageReg, propertyReg, MacroAssembler::ScalePtr), resultReg); + slowCases.append(m_jit.branchTestPtr(MacroAssembler::Zero, resultReg)); + + return slowCases; +} + +MacroAssembler::JumpList SpeculativeJIT::compileArrayStorageGetByVal(Node&, GPRReg, GPRReg propertyReg, GPRReg storageReg, GPRReg resultReg) +{ + MacroAssembler::Jump outOfBounds = m_jit.branch32(MacroAssembler::AboveOrEqual, propertyReg, MacroAssembler::Address(storageReg, ArrayStorage::vectorLengthOffset())); + + m_jit.loadPtr(MacroAssembler::BaseIndex(storageReg, propertyReg, MacroAssembler::ScalePtr, OBJECT_OFFSETOF(ArrayStorage, m_vector[0])), resultReg); + MacroAssembler::Jump hole = m_jit.branchTestPtr(MacroAssembler::Zero, resultReg); + + MacroAssembler::JumpList slowCases; + slowCases.append(outOfBounds); + slowCases.append(hole); + return slowCases; +} + +MacroAssembler::JumpList SpeculativeJIT::compileContiguousPutByVal(Node& node, GPRReg, GPRReg propertyReg, GPRReg storageReg, GPRReg valueReg, GPRReg tempReg) +{ + Array::Mode arrayMode = node.arrayMode(); + + MacroAssembler::JumpList slowCases; + + if (!mayStoreToTail(arrayMode)) { + speculationCheck( + Uncountable, JSValueRegs(), NoNode, + m_jit.branch32(MacroAssembler::AboveOrEqual, propertyReg, MacroAssembler::Address(storageReg, Butterfly::offsetOfPublicLength()))); + } else { + MacroAssembler::Jump inBounds = m_jit.branch32(MacroAssembler::Below, propertyReg, MacroAssembler::Address(storageReg, Butterfly::offsetOfPublicLength())); + + slowCases.append(m_jit.branch32(MacroAssembler::AboveOrEqual, propertyReg, MacroAssembler::Address(storageReg, Butterfly::offsetOfVectorLength()))); + + if (isInBoundsAccess(arrayMode)) + speculationCheck(Uncountable, JSValueRegs(), NoNode, slowCases); + + m_jit.add32(TrustedImm32(1), propertyReg, tempReg); + m_jit.store32(tempReg, MacroAssembler::Address(storageReg, Butterfly::offsetOfPublicLength())); + + inBounds.link(&m_jit); + } + + m_jit.storePtr(valueReg, MacroAssembler::BaseIndex(storageReg, propertyReg, MacroAssembler::ScalePtr)); + + if (isInBoundsAccess(arrayMode)) + return MacroAssembler::JumpList(); + return slowCases; +} + +MacroAssembler::JumpList SpeculativeJIT::compileArrayStoragePutByVal(Node& node, GPRReg, GPRReg propertyReg, GPRReg storageReg, GPRReg valueReg, GPRReg tempReg) +{ + Array::Mode arrayMode = node.arrayMode(); + + MacroAssembler::JumpList slowCases; + + MacroAssembler::Jump beyondArrayBounds = m_jit.branch32(MacroAssembler::AboveOrEqual, propertyReg, MacroAssembler::Address(storageReg, ArrayStorage::vectorLengthOffset())); + if (isInBoundsAccess(arrayMode)) + speculationCheck(OutOfBounds, JSValueRegs(), NoNode, beyondArrayBounds); + else + slowCases.append(beyondArrayBounds); + + // Check if we're writing to a hole; if so increment m_numValuesInVector. + if (!mayStoreToHole(arrayMode)) { + // This is uncountable because if we take this exit, then the baseline JIT + // will immediately count the hole store. So there is no need for exit + // profiling. + speculationCheck( + Uncountable, JSValueRegs(), NoNode, + m_jit.branchTestPtr(MacroAssembler::Zero, MacroAssembler::BaseIndex(storageReg, propertyReg, MacroAssembler::ScalePtr, OBJECT_OFFSETOF(ArrayStorage, m_vector[0])))); + } else { + MacroAssembler::Jump notHoleValue = m_jit.branchTestPtr(MacroAssembler::NonZero, MacroAssembler::BaseIndex(storageReg, propertyReg, MacroAssembler::ScalePtr, OBJECT_OFFSETOF(ArrayStorage, m_vector[0]))); + if (isSlowPutAccess(arrayMode)) { + // This is sort of strange. If we wanted to optimize this code path, we would invert + // the above branch. But it's simply not worth it since this only happens if we're + // already having a bad time. + slowCases.append(m_jit.jump()); + } else { + m_jit.add32(TrustedImm32(1), MacroAssembler::Address(storageReg, ArrayStorage::numValuesInVectorOffset())); + + // If we're writing to a hole we might be growing the array; + MacroAssembler::Jump lengthDoesNotNeedUpdate = m_jit.branch32(MacroAssembler::Below, propertyReg, MacroAssembler::Address(storageReg, ArrayStorage::lengthOffset())); + m_jit.add32(TrustedImm32(1), propertyReg, tempReg); + m_jit.store32(tempReg, MacroAssembler::Address(storageReg, ArrayStorage::lengthOffset())); + + lengthDoesNotNeedUpdate.link(&m_jit); + } + notHoleValue.link(&m_jit); + } + + // Store the value to the array. + m_jit.storePtr(valueReg, MacroAssembler::BaseIndex(storageReg, propertyReg, MacroAssembler::ScalePtr, OBJECT_OFFSETOF(ArrayStorage, m_vector[0]))); + + return slowCases; +} + void SpeculativeJIT::compile(Node& node) { NodeType op = node.op(); - + switch (op) { case JSConstant: initConstantInfo(m_compileIndex); @@ -2202,7 +2304,7 @@ void SpeculativeJIT::compile(Node& node) // SetLocal doubles as a hint as to where a node will be stored and // as a speculation point. So before we speculate make sure that we // know where the child of this node needs to go in the virtual - // register file. + // stack. compileMovHint(node); // As far as OSR is concerned, we're on the bytecode index corresponding @@ -2231,7 +2333,7 @@ void SpeculativeJIT::compile(Node& node) // this SetLocal should not have executed. But for op_post_inc, it's just // fine, because this SetLocal's local (i.e. the LHS in a x = y++ // statement) would be dead anyway - so the fact that DFG would have - // already made the assignment, and baked it into the register file during + // already made the assignment, and baked it into the stack during // OSR exit, would not be visible to the old JIT in any way. m_codeOriginForOSR = nextNode->codeOrigin; @@ -2241,9 +2343,9 @@ void SpeculativeJIT::compile(Node& node) m_jit.storeDouble(value.fpr(), JITCompiler::addressFor(node.local())); noResult(m_compileIndex); // Indicate that it's no longer necessary to retrieve the value of - // this bytecode variable from registers or other locations in the register file, + // this bytecode variable from registers or other locations in the stack, // but that it is stored as a double. - recordSetLocal(node.local(), ValueSource(DoubleInRegisterFile)); + recordSetLocal(node.local(), ValueSource(DoubleInJSStack)); break; } @@ -2252,7 +2354,7 @@ void SpeculativeJIT::compile(Node& node) SpeculateIntegerOperand value(this, node.child1()); m_jit.store32(value.gpr(), JITCompiler::payloadFor(node.local())); noResult(m_compileIndex); - recordSetLocal(node.local(), ValueSource(Int32InRegisterFile)); + recordSetLocal(node.local(), ValueSource(Int32InJSStack)); break; } if (isCellSpeculation(predictedType)) { @@ -2260,14 +2362,14 @@ void SpeculativeJIT::compile(Node& node) GPRReg cellGPR = cell.gpr(); m_jit.storePtr(cellGPR, JITCompiler::addressFor(node.local())); noResult(m_compileIndex); - recordSetLocal(node.local(), ValueSource(CellInRegisterFile)); + recordSetLocal(node.local(), ValueSource(CellInJSStack)); break; } if (isBooleanSpeculation(predictedType)) { SpeculateBooleanOperand boolean(this, node.child1()); m_jit.storePtr(boolean.gpr(), JITCompiler::addressFor(node.local())); noResult(m_compileIndex); - recordSetLocal(node.local(), ValueSource(BooleanInRegisterFile)); + recordSetLocal(node.local(), ValueSource(BooleanInJSStack)); break; } } @@ -2276,7 +2378,7 @@ void SpeculativeJIT::compile(Node& node) m_jit.storePtr(value.gpr(), JITCompiler::addressFor(node.local())); noResult(m_compileIndex); - recordSetLocal(node.local(), ValueSource(ValueInRegisterFile)); + recordSetLocal(node.local(), ValueSource(ValueInJSStack)); // If we're storing an arguments object that has been optimized away, // our variable event stream for OSR exit now reflects the optimized @@ -2595,6 +2697,51 @@ void SpeculativeJIT::compile(Node& node) jsValueResult(result.gpr(), m_compileIndex); break; } + case IN_BOUNDS_CONTIGUOUS_MODES: { + SpeculateStrictInt32Operand property(this, node.child2()); + StorageOperand storage(this, node.child3()); + + GPRReg propertyReg = property.gpr(); + GPRReg storageReg = storage.gpr(); + + if (!m_compileOkay) + return; + + speculationCheck(OutOfBounds, JSValueRegs(), NoNode, m_jit.branch32(MacroAssembler::AboveOrEqual, propertyReg, MacroAssembler::Address(storageReg, Butterfly::offsetOfPublicLength()))); + + GPRTemporary result(this); + m_jit.loadPtr(MacroAssembler::BaseIndex(storageReg, propertyReg, MacroAssembler::ScalePtr), result.gpr()); + speculationCheck(OutOfBounds, JSValueRegs(), NoNode, m_jit.branchTestPtr(MacroAssembler::Zero, result.gpr())); + jsValueResult(result.gpr(), m_compileIndex); + break; + } + case CONTIGUOUS_TO_TAIL_MODES: + case OUT_OF_BOUNDS_CONTIGUOUS_MODES: + case ALL_EFFECTFUL_CONTIGUOUS_MODES: { + SpeculateCellOperand base(this, node.child1()); + SpeculateStrictInt32Operand property(this, node.child2()); + StorageOperand storage(this, node.child3()); + + GPRReg baseReg = base.gpr(); + GPRReg propertyReg = property.gpr(); + GPRReg storageReg = storage.gpr(); + + if (!m_compileOkay) + return; + + GPRTemporary result(this); + GPRReg resultReg = result.gpr(); + + MacroAssembler::JumpList slowCases = + compileContiguousGetByVal(node, baseReg, propertyReg, storageReg, resultReg); + addSlowPathGenerator( + slowPathCall( + slowCases, this, operationGetByValArrayInt, + result.gpr(), baseReg, propertyReg)); + + jsValueResult(resultReg, m_compileIndex); + break; + } case IN_BOUNDS_ARRAY_STORAGE_MODES: { SpeculateStrictInt32Operand property(this, node.child2()); StorageOperand storage(this, node.child3()); @@ -2615,6 +2762,7 @@ void SpeculativeJIT::compile(Node& node) break; } case OUT_OF_BOUNDS_ARRAY_STORAGE_MODES: + case SLOW_PUT_ARRAY_STORAGE_MODES: case ALL_EFFECTFUL_ARRAY_STORAGE_MODES: { SpeculateCellOperand base(this, node.child1()); SpeculateStrictInt32Operand property(this, node.child2()); @@ -2627,21 +2775,17 @@ void SpeculativeJIT::compile(Node& node) if (!m_compileOkay) return; - MacroAssembler::Jump outOfBounds = m_jit.branch32(MacroAssembler::AboveOrEqual, propertyReg, MacroAssembler::Address(storageReg, ArrayStorage::vectorLengthOffset())); - GPRTemporary result(this); - m_jit.loadPtr(MacroAssembler::BaseIndex(storageReg, propertyReg, MacroAssembler::ScalePtr, OBJECT_OFFSETOF(ArrayStorage, m_vector[0])), result.gpr()); - MacroAssembler::Jump hole = m_jit.branchTestPtr(MacroAssembler::Zero, result.gpr()); - - MacroAssembler::JumpList slowCases; - slowCases.append(outOfBounds); - slowCases.append(hole); + GPRReg resultReg = result.gpr(); + + MacroAssembler::JumpList slowCases = + compileArrayStorageGetByVal(node, baseReg, propertyReg, storageReg, resultReg); addSlowPathGenerator( slowPathCall( slowCases, this, operationGetByValArrayInt, result.gpr(), baseReg, propertyReg)); - jsValueResult(result.gpr(), m_compileIndex); + jsValueResult(resultReg, m_compileIndex); break; } case Array::String: @@ -2735,8 +2879,8 @@ void SpeculativeJIT::compile(Node& node) GPRReg propertyReg = property.gpr(); switch (arrayMode) { - case ALL_ARRAY_STORAGE_MODES: - case ALL_EFFECTFUL_ARRAY_STORAGE_MODES: { + case ALL_CONTIGUOUS_MODES: + case ALL_EFFECTFUL_CONTIGUOUS_MODES: { JSValueOperand value(this, child3); GPRReg valueReg = value.gpr(); @@ -2756,59 +2900,76 @@ void SpeculativeJIT::compile(Node& node) // Store the value to the array. GPRReg propertyReg = property.gpr(); GPRReg valueReg = value.gpr(); - m_jit.storePtr(valueReg, MacroAssembler::BaseIndex(storageReg, propertyReg, MacroAssembler::ScalePtr, OBJECT_OFFSETOF(ArrayStorage, m_vector[0]))); + m_jit.storePtr(valueReg, MacroAssembler::BaseIndex(storageReg, propertyReg, MacroAssembler::ScalePtr)); noResult(m_compileIndex); break; } - MacroAssembler::Jump beyondArrayBounds = m_jit.branch32(MacroAssembler::AboveOrEqual, propertyReg, MacroAssembler::Address(storageReg, ArrayStorage::vectorLengthOffset())); - if (isInBoundsAccess(arrayMode)) - speculationCheck(OutOfBounds, JSValueRegs(), NoNode, beyondArrayBounds); - - // Check if we're writing to a hole; if so increment m_numValuesInVector. - MacroAssembler::Jump isHoleValue; - if (!mayStoreToHole(arrayMode)) { - // This is uncountable because if we take this exit, then the baseline JIT - // will immediately count the hole store. So there is no need for exit - // profiling. - speculationCheck( - Uncountable, JSValueRegs(), NoNode, - m_jit.branchTestPtr(MacroAssembler::Zero, MacroAssembler::BaseIndex(storageReg, propertyReg, MacroAssembler::ScalePtr, OBJECT_OFFSETOF(ArrayStorage, m_vector[0])))); - } else { - MacroAssembler::Jump notHoleValue = m_jit.branchTestPtr(MacroAssembler::NonZero, MacroAssembler::BaseIndex(storageReg, propertyReg, MacroAssembler::ScalePtr, OBJECT_OFFSETOF(ArrayStorage, m_vector[0]))); - if (isSlowPutAccess(arrayMode)) { - // This is sort of strange. If we wanted to optimize this code path, we would invert - // the above branch. But it's simply not worth it since this only happens if we're - // already having a bad time. - isHoleValue = m_jit.jump(); - } else { - m_jit.add32(TrustedImm32(1), MacroAssembler::Address(storageReg, OBJECT_OFFSETOF(ArrayStorage, m_numValuesInVector))); - - // If we're writing to a hole we might be growing the array; - MacroAssembler::Jump lengthDoesNotNeedUpdate = m_jit.branch32(MacroAssembler::Below, propertyReg, MacroAssembler::Address(storageReg, ArrayStorage::lengthOffset())); - m_jit.add32(TrustedImm32(1), propertyReg); - m_jit.store32(propertyReg, MacroAssembler::Address(storageReg, ArrayStorage::lengthOffset())); - m_jit.sub32(TrustedImm32(1), propertyReg); + GPRTemporary temporary; + GPRReg temporaryReg = temporaryRegisterForPutByVal(temporary, node); + + MacroAssembler::JumpList slowCases = + compileContiguousPutByVal( + node, baseReg, propertyReg, storageReg, valueReg, temporaryReg); + + base.use(); + property.use(); + value.use(); + storage.use(); + + if (!slowCases.empty()) { + addSlowPathGenerator( + slowPathCall( + slowCases, this, + m_jit.codeBlock()->isStrictMode() ? operationPutByValBeyondArrayBoundsStrict : operationPutByValBeyondArrayBoundsNonStrict, + NoResult, baseReg, propertyReg, valueReg)); + } + + noResult(m_compileIndex, UseChildrenCalledExplicitly); + break; + } + + case ALL_ARRAY_STORAGE_MODES: + case ALL_EFFECTFUL_ARRAY_STORAGE_MODES: { + JSValueOperand value(this, child3); + + GPRReg valueReg = value.gpr(); + + if (!m_compileOkay) + return; + + if (Heap::isWriteBarrierEnabled()) { + GPRTemporary scratch(this); + writeBarrier(baseReg, value.gpr(), child3, WriteBarrierForPropertyAccess, scratch.gpr()); + } + + StorageOperand storage(this, child4); + GPRReg storageReg = storage.gpr(); + + if (node.op() == PutByValAlias) { + // Store the value to the array. + GPRReg propertyReg = property.gpr(); + GPRReg valueReg = value.gpr(); + m_jit.storePtr(valueReg, MacroAssembler::BaseIndex(storageReg, propertyReg, MacroAssembler::ScalePtr, OBJECT_OFFSETOF(ArrayStorage, m_vector[0]))); - lengthDoesNotNeedUpdate.link(&m_jit); - } - notHoleValue.link(&m_jit); + noResult(m_compileIndex); + break; } + GPRTemporary temporary; + GPRReg temporaryReg = temporaryRegisterForPutByVal(temporary, node); + + MacroAssembler::JumpList slowCases = + compileArrayStoragePutByVal( + node, baseReg, propertyReg, storageReg, valueReg, temporaryReg); + base.use(); property.use(); value.use(); storage.use(); - - // Store the value to the array. - m_jit.storePtr(valueReg, MacroAssembler::BaseIndex(storageReg, propertyReg, MacroAssembler::ScalePtr, OBJECT_OFFSETOF(ArrayStorage, m_vector[0]))); - - if (!isInBoundsAccess(arrayMode)) { - MacroAssembler::JumpList slowCases; - slowCases.append(beyondArrayBounds); - if (isSlowPutAccess(arrayMode)) - slowCases.append(isHoleValue); + + if (!slowCases.empty()) { addSlowPathGenerator( slowPathCall( slowCases, this, @@ -2973,26 +3134,54 @@ void SpeculativeJIT::compile(Node& node) StorageOperand storage(this, node.child3()); GPRReg storageGPR = storage.gpr(); - m_jit.load32(MacroAssembler::Address(storageGPR, ArrayStorage::lengthOffset()), storageLengthGPR); + switch (node.arrayMode()) { + case Array::ArrayWithContiguous: + case Array::ArrayWithContiguousOutOfBounds: { + m_jit.load32(MacroAssembler::Address(storageGPR, Butterfly::offsetOfPublicLength()), storageLengthGPR); + MacroAssembler::Jump slowPath = m_jit.branch32(MacroAssembler::AboveOrEqual, storageLengthGPR, MacroAssembler::Address(storageGPR, Butterfly::offsetOfVectorLength())); + m_jit.storePtr(valueGPR, MacroAssembler::BaseIndex(storageGPR, storageLengthGPR, MacroAssembler::ScalePtr)); + m_jit.add32(TrustedImm32(1), storageLengthGPR); + m_jit.store32(storageLengthGPR, MacroAssembler::Address(storageGPR, Butterfly::offsetOfPublicLength())); + m_jit.orPtr(GPRInfo::tagTypeNumberRegister, storageLengthGPR); + + addSlowPathGenerator( + slowPathCall( + slowPath, this, operationArrayPush, NoResult, storageLengthGPR, + valueGPR, baseGPR)); - // Refuse to handle bizarre lengths. - speculationCheck(Uncountable, JSValueRegs(), NoNode, m_jit.branch32(MacroAssembler::Above, storageLengthGPR, TrustedImm32(0x7ffffffe))); + jsValueResult(storageLengthGPR, m_compileIndex); + break; + } + + case Array::ArrayWithArrayStorage: + case Array::ArrayWithArrayStorageOutOfBounds: { + m_jit.load32(MacroAssembler::Address(storageGPR, ArrayStorage::lengthOffset()), storageLengthGPR); - MacroAssembler::Jump slowPath = m_jit.branch32(MacroAssembler::AboveOrEqual, storageLengthGPR, MacroAssembler::Address(storageGPR, ArrayStorage::vectorLengthOffset())); + // Refuse to handle bizarre lengths. + speculationCheck(Uncountable, JSValueRegs(), NoNode, m_jit.branch32(MacroAssembler::Above, storageLengthGPR, TrustedImm32(0x7ffffffe))); - m_jit.storePtr(valueGPR, MacroAssembler::BaseIndex(storageGPR, storageLengthGPR, MacroAssembler::ScalePtr, OBJECT_OFFSETOF(ArrayStorage, m_vector[0]))); + MacroAssembler::Jump slowPath = m_jit.branch32(MacroAssembler::AboveOrEqual, storageLengthGPR, MacroAssembler::Address(storageGPR, ArrayStorage::vectorLengthOffset())); - m_jit.add32(TrustedImm32(1), storageLengthGPR); - m_jit.store32(storageLengthGPR, MacroAssembler::Address(storageGPR, ArrayStorage::lengthOffset())); - m_jit.add32(TrustedImm32(1), MacroAssembler::Address(storageGPR, OBJECT_OFFSETOF(ArrayStorage, m_numValuesInVector))); - m_jit.orPtr(GPRInfo::tagTypeNumberRegister, storageLengthGPR); + m_jit.storePtr(valueGPR, MacroAssembler::BaseIndex(storageGPR, storageLengthGPR, MacroAssembler::ScalePtr, OBJECT_OFFSETOF(ArrayStorage, m_vector[0]))); - addSlowPathGenerator( - slowPathCall( - slowPath, this, operationArrayPush, NoResult, storageLengthGPR, - valueGPR, baseGPR)); + m_jit.add32(TrustedImm32(1), storageLengthGPR); + m_jit.store32(storageLengthGPR, MacroAssembler::Address(storageGPR, ArrayStorage::lengthOffset())); + m_jit.add32(TrustedImm32(1), MacroAssembler::Address(storageGPR, OBJECT_OFFSETOF(ArrayStorage, m_numValuesInVector))); + m_jit.orPtr(GPRInfo::tagTypeNumberRegister, storageLengthGPR); + + addSlowPathGenerator( + slowPathCall( + slowPath, this, operationArrayPush, NoResult, storageLengthGPR, + valueGPR, baseGPR)); - jsValueResult(storageLengthGPR, m_compileIndex); + jsValueResult(storageLengthGPR, m_compileIndex); + break; + } + + default: + CRASH(); + break; + } break; } @@ -3002,41 +3191,77 @@ void SpeculativeJIT::compile(Node& node) SpeculateCellOperand base(this, node.child1()); StorageOperand storage(this, node.child2()); GPRTemporary value(this); - GPRTemporary storageLength(this); GPRReg baseGPR = base.gpr(); GPRReg storageGPR = storage.gpr(); GPRReg valueGPR = value.gpr(); - GPRReg storageLengthGPR = storageLength.gpr(); - m_jit.load32(MacroAssembler::Address(storageGPR, ArrayStorage::lengthOffset()), storageLengthGPR); + switch (node.arrayMode()) { + case Array::ArrayWithContiguous: + case Array::ArrayWithContiguousOutOfBounds: { + m_jit.load32( + MacroAssembler::Address(storageGPR, Butterfly::offsetOfPublicLength()), valueGPR); + MacroAssembler::Jump undefinedCase = + m_jit.branchTest32(MacroAssembler::Zero, valueGPR); + m_jit.sub32(TrustedImm32(1), valueGPR); + m_jit.store32( + valueGPR, MacroAssembler::Address(storageGPR, Butterfly::offsetOfPublicLength())); + m_jit.loadPtr( + MacroAssembler::BaseIndex(storageGPR, valueGPR, MacroAssembler::ScalePtr), + valueGPR); + MacroAssembler::Jump slowCase = m_jit.branchTestPtr(MacroAssembler::Zero, valueGPR); + + addSlowPathGenerator( + slowPathMove( + undefinedCase, this, + MacroAssembler::TrustedImmPtr(JSValue::encode(jsUndefined())), valueGPR)); + addSlowPathGenerator( + slowPathCall( + slowCase, this, operationArrayPopAndRecoverLength, valueGPR, baseGPR)); + + jsValueResult(valueGPR, m_compileIndex); + break; + } + + case Array::ArrayWithArrayStorage: + case Array::ArrayWithArrayStorageOutOfBounds: { + GPRTemporary storageLength(this); + GPRReg storageLengthGPR = storageLength.gpr(); + m_jit.load32(MacroAssembler::Address(storageGPR, ArrayStorage::lengthOffset()), storageLengthGPR); - JITCompiler::JumpList setUndefinedCases; - setUndefinedCases.append(m_jit.branchTest32(MacroAssembler::Zero, storageLengthGPR)); + JITCompiler::Jump undefinedCase = + m_jit.branchTest32(MacroAssembler::Zero, storageLengthGPR); - m_jit.sub32(TrustedImm32(1), storageLengthGPR); + m_jit.sub32(TrustedImm32(1), storageLengthGPR); - MacroAssembler::Jump slowCase = m_jit.branch32(MacroAssembler::AboveOrEqual, storageLengthGPR, MacroAssembler::Address(storageGPR, ArrayStorage::vectorLengthOffset())); + JITCompiler::JumpList slowCases; + slowCases.append(m_jit.branch32(MacroAssembler::AboveOrEqual, storageLengthGPR, MacroAssembler::Address(storageGPR, ArrayStorage::vectorLengthOffset()))); - m_jit.loadPtr(MacroAssembler::BaseIndex(storageGPR, storageLengthGPR, MacroAssembler::ScalePtr, OBJECT_OFFSETOF(ArrayStorage, m_vector[0])), valueGPR); + m_jit.loadPtr(MacroAssembler::BaseIndex(storageGPR, storageLengthGPR, MacroAssembler::ScalePtr, OBJECT_OFFSETOF(ArrayStorage, m_vector[0])), valueGPR); + slowCases.append(m_jit.branchTestPtr(MacroAssembler::Zero, valueGPR)); - m_jit.store32(storageLengthGPR, MacroAssembler::Address(storageGPR, ArrayStorage::lengthOffset())); - - setUndefinedCases.append(m_jit.branchTestPtr(MacroAssembler::Zero, valueGPR)); + m_jit.store32(storageLengthGPR, MacroAssembler::Address(storageGPR, ArrayStorage::lengthOffset())); - m_jit.storePtr(MacroAssembler::TrustedImmPtr(0), MacroAssembler::BaseIndex(storageGPR, storageLengthGPR, MacroAssembler::ScalePtr, OBJECT_OFFSETOF(ArrayStorage, m_vector[0]))); - m_jit.sub32(MacroAssembler::TrustedImm32(1), MacroAssembler::Address(storageGPR, OBJECT_OFFSETOF(ArrayStorage, m_numValuesInVector))); + m_jit.storePtr(MacroAssembler::TrustedImmPtr(0), MacroAssembler::BaseIndex(storageGPR, storageLengthGPR, MacroAssembler::ScalePtr, OBJECT_OFFSETOF(ArrayStorage, m_vector[0]))); + m_jit.sub32(MacroAssembler::TrustedImm32(1), MacroAssembler::Address(storageGPR, OBJECT_OFFSETOF(ArrayStorage, m_numValuesInVector))); - addSlowPathGenerator( - slowPathMove( - setUndefinedCases, this, - MacroAssembler::TrustedImmPtr(JSValue::encode(jsUndefined())), valueGPR)); + addSlowPathGenerator( + slowPathMove( + undefinedCase, this, + MacroAssembler::TrustedImmPtr(JSValue::encode(jsUndefined())), valueGPR)); - addSlowPathGenerator( - slowPathCall( - slowCase, this, operationArrayPop, valueGPR, baseGPR)); + addSlowPathGenerator( + slowPathCall( + slowCases, this, operationArrayPop, valueGPR, baseGPR)); - jsValueResult(valueGPR, m_compileIndex); + jsValueResult(valueGPR, m_compileIndex); + break; + } + + default: + CRASH(); + break; + } break; } @@ -3087,9 +3312,9 @@ void SpeculativeJIT::compile(Node& node) m_jit.move(op1.gpr(), GPRInfo::returnValueGPR); // Grab the return address. - m_jit.emitGetFromCallFrameHeaderPtr(RegisterFile::ReturnPC, GPRInfo::regT1); + m_jit.emitGetFromCallFrameHeaderPtr(JSStack::ReturnPC, GPRInfo::regT1); // Restore our caller's "r". - m_jit.emitGetFromCallFrameHeaderPtr(RegisterFile::CallerFrame, GPRInfo::callFrameRegister); + m_jit.emitGetFromCallFrameHeaderPtr(JSStack::CallerFrame, GPRInfo::callFrameRegister); // Return. m_jit.restoreReturnAddressBeforeReturn(GPRInfo::regT1); m_jit.ret(); @@ -3151,8 +3376,40 @@ void SpeculativeJIT::compile(Node& node) case NewArray: { JSGlobalObject* globalObject = m_jit.graph().globalObjectFor(node.codeOrigin); - if (!globalObject->isHavingABadTime()) + if (!globalObject->isHavingABadTime()) { globalObject->havingABadTimeWatchpoint()->add(speculationWatchpoint()); + + ASSERT(hasContiguous(globalObject->arrayStructure()->indexingType())); + + unsigned numElements = node.numChildren(); + + GPRTemporary result(this); + GPRTemporary storage(this); + + GPRReg resultGPR = result.gpr(); + GPRReg storageGPR = storage.gpr(); + + emitAllocateJSArray(globalObject->arrayStructure(), resultGPR, storageGPR, numElements); + + // At this point, one way or another, resultGPR and storageGPR have pointers to + // the JSArray and the Butterfly, respectively. + + for (unsigned operandIdx = 0; operandIdx < node.numChildren(); ++operandIdx) { + JSValueOperand operand(this, m_jit.graph().m_varArgChildren[node.firstChild() + operandIdx]); + GPRReg opGPR = operand.gpr(); + m_jit.storePtr(opGPR, MacroAssembler::Address(storageGPR, sizeof(JSValue) * operandIdx)); + } + + // Yuck, we should *really* have a way of also returning the storageGPR. But + // that's the least of what's wrong with this code. We really shouldn't be + // allocating the array after having computed - and probably spilled to the + // stack - all of the things that will go into the array. The solution to that + // bigger problem will also likely fix the redundancy in reloading the storage + // pointer that we currently have. + + cellResult(resultGPR, m_compileIndex); + break; + } if (!node.numChildren()) { flushRegisters(); @@ -3203,15 +3460,61 @@ void SpeculativeJIT::compile(Node& node) case NewArrayWithSize: { JSGlobalObject* globalObject = m_jit.graph().globalObjectFor(node.codeOrigin); - if (!globalObject->isHavingABadTime()) + if (!globalObject->isHavingABadTime()) { globalObject->havingABadTimeWatchpoint()->add(speculationWatchpoint()); + + SpeculateStrictInt32Operand size(this, node.child1()); + GPRTemporary result(this); + GPRTemporary storage(this); + GPRTemporary scratch(this); + + GPRReg sizeGPR = size.gpr(); + GPRReg resultGPR = result.gpr(); + GPRReg storageGPR = storage.gpr(); + GPRReg scratchGPR = scratch.gpr(); + + MacroAssembler::JumpList slowCases; + slowCases.append(m_jit.branch32(MacroAssembler::AboveOrEqual, sizeGPR, TrustedImm32(MIN_SPARSE_ARRAY_INDEX))); + + ASSERT((1 << 3) == sizeof(JSValue)); + m_jit.move(sizeGPR, scratchGPR); + m_jit.lshift32(TrustedImm32(3), scratchGPR); + m_jit.add32(TrustedImm32(sizeof(IndexingHeader)), scratchGPR, resultGPR); + slowCases.append( + emitAllocateBasicStorage(resultGPR, storageGPR)); + m_jit.subPtr(scratchGPR, storageGPR); + emitAllocateBasicJSObject<JSArray, MarkedBlock::None>( + TrustedImmPtr(globalObject->arrayStructure()), resultGPR, scratchGPR, + storageGPR, sizeof(JSArray), slowCases); + + m_jit.store32(sizeGPR, MacroAssembler::Address(storageGPR, Butterfly::offsetOfPublicLength())); + m_jit.store32(sizeGPR, MacroAssembler::Address(storageGPR, Butterfly::offsetOfVectorLength())); + + addSlowPathGenerator(adoptPtr( + new CallArrayAllocatorWithVariableSizeSlowPathGenerator( + slowCases, this, operationNewArrayWithSize, resultGPR, + globalObject->arrayStructure(), + globalObject->arrayStructureWithArrayStorage(), + sizeGPR))); + + cellResult(resultGPR, m_compileIndex); + break; + } SpeculateStrictInt32Operand size(this, node.child1()); GPRReg sizeGPR = size.gpr(); flushRegisters(); GPRResult result(this); - callOperation(operationNewArrayWithSize, result.gpr(), globalObject->arrayStructure(), sizeGPR); - cellResult(result.gpr(), m_compileIndex); + GPRReg resultGPR = result.gpr(); + GPRReg structureGPR = selectScratchGPR(sizeGPR); + MacroAssembler::Jump bigLength = m_jit.branch32(MacroAssembler::AboveOrEqual, sizeGPR, TrustedImm32(MIN_SPARSE_ARRAY_INDEX)); + m_jit.move(TrustedImmPtr(globalObject->arrayStructure()), structureGPR); + MacroAssembler::Jump done = m_jit.jump(); + bigLength.link(&m_jit); + m_jit.move(TrustedImmPtr(globalObject->arrayStructureWithArrayStorage()), structureGPR); + done.link(&m_jit); + callOperation(operationNewArrayWithSize, resultGPR, structureGPR, sizeGPR); + cellResult(resultGPR, m_compileIndex); break; } @@ -3254,10 +3557,35 @@ void SpeculativeJIT::compile(Node& node) } case NewArrayBuffer: { + JSGlobalObject* globalObject = m_jit.graph().globalObjectFor(node.codeOrigin); + if (!globalObject->isHavingABadTime()) { + globalObject->havingABadTimeWatchpoint()->add(speculationWatchpoint()); + + unsigned numElements = node.numConstants(); + + GPRTemporary result(this); + GPRTemporary storage(this); + + GPRReg resultGPR = result.gpr(); + GPRReg storageGPR = storage.gpr(); + + emitAllocateJSArray(globalObject->arrayStructure(), resultGPR, storageGPR, numElements); + + JSValue* data = m_jit.codeBlock()->constantBuffer(node.startConstant()); + for (unsigned index = 0; index < node.numConstants(); ++index) { + m_jit.storePtr( + ImmPtr(bitwise_cast<void*>(JSValue::encode(data[index]))), + MacroAssembler::Address(storageGPR, sizeof(JSValue) * index)); + } + + cellResult(resultGPR, m_compileIndex); + break; + } + flushRegisters(); GPRResult result(this); - callOperation(operationNewArrayBuffer, result.gpr(), node.startConstant(), node.numConstants()); + callOperation(operationNewArrayBuffer, result.gpr(), globalObject->arrayStructure(), node.startConstant(), node.numConstants()); cellResult(result.gpr(), m_compileIndex); break; @@ -3375,7 +3703,7 @@ void SpeculativeJIT::compile(Node& node) case GetCallee: { GPRTemporary result(this); - m_jit.loadPtr(JITCompiler::addressFor(static_cast<VirtualRegister>(RegisterFile::Callee)), result.gpr()); + m_jit.loadPtr(JITCompiler::addressFor(static_cast<VirtualRegister>(JSStack::Callee)), result.gpr()); cellResult(result.gpr(), m_compileIndex); break; } @@ -3384,7 +3712,7 @@ void SpeculativeJIT::compile(Node& node) GPRTemporary result(this); GPRReg resultGPR = result.gpr(); - m_jit.loadPtr(JITCompiler::addressFor(static_cast<VirtualRegister>(RegisterFile::ScopeChain)), resultGPR); + m_jit.loadPtr(JITCompiler::addressFor(static_cast<VirtualRegister>(JSStack::ScopeChain)), resultGPR); bool checkTopLevel = m_jit.codeBlock()->codeType() == FunctionCode && m_jit.codeBlock()->needsFullScopeChain(); int skip = node.scopeChainDepth(); ASSERT(skip || !checkTopLevel); @@ -3973,14 +4301,14 @@ void SpeculativeJIT::compile(Node& node) // Fast case m_jit.load32(JITCompiler::Address(resolveInfoGPR, OBJECT_OFFSETOF(GlobalResolveInfo, offset)), resolveInfoGPR); #if DFG_ENABLE(JIT_ASSERT) - JITCompiler::Jump isOutOfLine = m_jit.branch32(JITCompiler::GreaterThanOrEqual, resolveInfoGPR, TrustedImm32(inlineStorageCapacity)); + JITCompiler::Jump isOutOfLine = m_jit.branch32(JITCompiler::GreaterThanOrEqual, resolveInfoGPR, TrustedImm32(firstOutOfLineOffset)); m_jit.breakpoint(); isOutOfLine.link(&m_jit); #endif m_jit.neg32(resolveInfoGPR); m_jit.signExtend32ToPtr(resolveInfoGPR, resolveInfoGPR); m_jit.loadPtr(JITCompiler::Address(globalObjectGPR, JSObject::butterflyOffset()), resultGPR); - m_jit.loadPtr(JITCompiler::BaseIndex(resultGPR, resolveInfoGPR, JITCompiler::ScalePtr, (inlineStorageCapacity - 2) * static_cast<ptrdiff_t>(sizeof(JSValue))), resultGPR); + m_jit.loadPtr(JITCompiler::BaseIndex(resultGPR, resolveInfoGPR, JITCompiler::ScalePtr, (firstOutOfLineOffset - 2) * static_cast<ptrdiff_t>(sizeof(JSValue))), resultGPR); addSlowPathGenerator( slowPathCall( @@ -4106,7 +4434,7 @@ void SpeculativeJIT::compile(Node& node) } ASSERT(!node.codeOrigin.inlineCallFrame); - m_jit.load32(JITCompiler::payloadFor(RegisterFile::ArgumentCount), resultGPR); + m_jit.load32(JITCompiler::payloadFor(JSStack::ArgumentCount), resultGPR); m_jit.sub32(TrustedImm32(1), resultGPR); integerResult(resultGPR, m_compileIndex); break; @@ -4129,7 +4457,7 @@ void SpeculativeJIT::compile(Node& node) jsNumber(node.codeOrigin.inlineCallFrame->arguments.size() - 1)))), resultGPR); } else { - m_jit.load32(JITCompiler::payloadFor(RegisterFile::ArgumentCount), resultGPR); + m_jit.load32(JITCompiler::payloadFor(JSStack::ArgumentCount), resultGPR); m_jit.sub32(TrustedImm32(1), resultGPR); m_jit.orPtr(GPRInfo::tagTypeNumberRegister, resultGPR); } @@ -4178,7 +4506,7 @@ void SpeculativeJIT::compile(Node& node) m_jit.branch32( JITCompiler::AboveOrEqual, resultGPR, - JITCompiler::payloadFor(RegisterFile::ArgumentCount))); + JITCompiler::payloadFor(JSStack::ArgumentCount))); } JITCompiler::JumpList slowArgument; @@ -4243,7 +4571,7 @@ void SpeculativeJIT::compile(Node& node) m_jit.branch32( JITCompiler::AboveOrEqual, resultGPR, - JITCompiler::payloadFor(RegisterFile::ArgumentCount))); + JITCompiler::payloadFor(JSStack::ArgumentCount))); } JITCompiler::JumpList slowArgument; diff --git a/Source/JavaScriptCore/dfg/DFGStructureCheckHoistingPhase.cpp b/Source/JavaScriptCore/dfg/DFGStructureCheckHoistingPhase.cpp index 5b0b22963..2e44af2d7 100644 --- a/Source/JavaScriptCore/dfg/DFGStructureCheckHoistingPhase.cpp +++ b/Source/JavaScriptCore/dfg/DFGStructureCheckHoistingPhase.cpp @@ -122,14 +122,20 @@ public: if (!subNode.shouldGenerate()) continue; switch (subNode.op()) { - case CheckStructure: - case StructureTransitionWatchpoint: { + case CheckStructure: { if (subNode.child1().index() != source) break; noticeStructureCheck(variable, subNode.structureSet()); break; } + case StructureTransitionWatchpoint: { + if (subNode.child1().index() != source) + break; + + noticeStructureCheck(variable, subNode.structure()); + break; + } default: break; } @@ -162,7 +168,7 @@ public: dataLog("Zeroing the structure to hoist for %s because the ratio is %lf.\n", m_graph.nameOfVariableAccessData(variable), variable->voteRatio()); #endif - iter->second.m_structure = 0; + iter->value.m_structure = 0; } // Disable structure check hoisting for variables that cross the OSR entry that @@ -187,7 +193,7 @@ public: HashMap<VariableAccessData*, CheckData>::iterator iter = m_map.find(variable); if (iter == m_map.end()) continue; - if (!iter->second.m_structure) + if (!iter->value.m_structure) continue; JSValue value = m_graph.m_mustHandleValues[i]; if (!value || !value.isCell()) { @@ -195,162 +201,34 @@ public: dataLog("Zeroing the structure to hoist for %s because the OSR entry value is not a cell: %s.\n", m_graph.nameOfVariableAccessData(variable), value.description()); #endif - iter->second.m_structure = 0; + iter->value.m_structure = 0; continue; } - if (value.asCell()->structure() != iter->second.m_structure) { + if (value.asCell()->structure() != iter->value.m_structure) { #if DFG_ENABLE(DEBUG_PROPAGATION_VERBOSE) dataLog("Zeroing the structure to hoist for %s because the OSR entry value has structure %p and we wanted %p.\n", - m_graph.nameOfVariableAccessData(variable), value.asCell()->structure(), iter->second.m_structure); + m_graph.nameOfVariableAccessData(variable), value.asCell()->structure(), iter->value.m_structure); #endif - iter->second.m_structure = 0; + iter->value.m_structure = 0; continue; } } } - // Identify the set of variables that are live across a structure clobber. - - Operands<VariableAccessData*> live( - m_graph.m_blocks[0]->variablesAtTail.numberOfArguments(), - m_graph.m_blocks[0]->variablesAtTail.numberOfLocals()); - for (BlockIndex blockIndex = 0; blockIndex < m_graph.m_blocks.size(); ++blockIndex) { - BasicBlock* block = m_graph.m_blocks[blockIndex].get(); - if (!block) - continue; - ASSERT(live.numberOfArguments() == block->variablesAtTail.numberOfArguments()); - ASSERT(live.numberOfLocals() == block->variablesAtTail.numberOfLocals()); - for (unsigned i = live.size(); i--;) { - NodeIndex indexAtTail = block->variablesAtTail[i]; - VariableAccessData* variable; - if (indexAtTail == NoNode) - variable = 0; - else - variable = m_graph[indexAtTail].variableAccessData(); - live[i] = variable; - } - for (unsigned indexInBlock = block->size(); indexInBlock--;) { - NodeIndex nodeIndex = block->at(indexInBlock); - Node& node = m_graph[nodeIndex]; - if (!node.shouldGenerate()) - continue; - switch (node.op()) { - case GetLocal: - case Flush: - // This is a birth. - live.operand(node.local()) = node.variableAccessData(); - break; - - case SetLocal: - case SetArgument: - ASSERT(live.operand(node.local())); // Must be live. - ASSERT(live.operand(node.local()) == node.variableAccessData()); // Must have the variable we expected. - // This is a death. - live.operand(node.local()) = 0; - break; - - // Use the CFA's notion of what clobbers the world. - case ValueAdd: - if (m_graph.addShouldSpeculateInteger(node)) - break; - if (Node::shouldSpeculateNumber(m_graph[node.child1()], m_graph[node.child2()])) - break; - clobber(live); - break; - - case CompareLess: - case CompareLessEq: - case CompareGreater: - case CompareGreaterEq: - case CompareEq: { - Node& left = m_graph[node.child1()]; - Node& right = m_graph[node.child2()]; - if (Node::shouldSpeculateInteger(left, right)) - break; - if (Node::shouldSpeculateNumber(left, right)) - break; - if (node.op() == CompareEq) { - if ((m_graph.isConstant(node.child1().index()) - && m_graph.valueOfJSConstant(node.child1().index()).isNull()) - || (m_graph.isConstant(node.child2().index()) - && m_graph.valueOfJSConstant(node.child2().index()).isNull())) - break; - - if (Node::shouldSpeculateFinalObject(left, right)) - break; - if (Node::shouldSpeculateArray(left, right)) - break; - if (left.shouldSpeculateFinalObject() && right.shouldSpeculateFinalObjectOrOther()) - break; - if (right.shouldSpeculateFinalObject() && left.shouldSpeculateFinalObjectOrOther()) - break; - if (left.shouldSpeculateArray() && right.shouldSpeculateArrayOrOther()) - break; - if (right.shouldSpeculateArray() && left.shouldSpeculateArrayOrOther()) - break; - } - clobber(live); - break; - } - - case GetByVal: - case PutByVal: - case PutByValAlias: - if (m_graph.byValIsPure(node)) - break; - clobber(live); - break; - - case GetMyArgumentsLengthSafe: - case GetMyArgumentByValSafe: - case GetById: - case GetByIdFlush: - case PutStructure: - case PhantomPutStructure: - case PutById: - case PutByIdDirect: - case Call: - case Construct: - case Resolve: - case ResolveBase: - case ResolveBaseStrictPut: - case ResolveGlobal: - case ArrayPush: - case ArrayPop: - case Arrayify: - clobber(live); - break; - - default: - ASSERT(node.op() != Phi); - break; - } - } - } - bool changed = false; #if DFG_ENABLE(DEBUG_PROPAGATION_VERBOSE) for (HashMap<VariableAccessData*, CheckData>::iterator it = m_map.begin(); it != m_map.end(); ++it) { - if (!it->second.m_structure) { - dataLog("Not hoisting checks for %s because of heuristics.\n", m_graph.nameOfVariableAccessData(it->first)); - continue; - } - if (it->second.m_isClobbered && !it->second.m_structure->transitionWatchpointSetIsStillValid()) { - dataLog("Not hoisting checks for %s because the structure is clobbered and has an invalid watchpoint set.\n", m_graph.nameOfVariableAccessData(it->first)); + if (!it->value.m_structure) { + dataLog("Not hoisting checks for %s because of heuristics.\n", m_graph.nameOfVariableAccessData(it->key)); continue; } - dataLog("Hoisting checks for %s\n", m_graph.nameOfVariableAccessData(it->first)); + dataLog("Hoisting checks for %s\n", m_graph.nameOfVariableAccessData(it->key)); } #endif // DFG_ENABLE(DEBUG_PROPAGATION_VERBOSE) - // Make changes: - // 1) If a variable's live range does not span a clobber, then inject structure - // checks before the SetLocal. - // 2) If a variable's live range spans a clobber but is watchpointable, then - // inject structure checks before the SetLocal and replace all other structure - // checks on that variable with structure transition watchpoints. + // Place CheckStructure's at SetLocal sites. InsertionSet<NodeIndex> insertionSet; for (BlockIndex blockIndex = 0; blockIndex < m_graph.m_blocks.size(); ++blockIndex) { @@ -376,9 +254,7 @@ public: HashMap<VariableAccessData*, CheckData>::iterator iter = m_map.find(variable); if (iter == m_map.end()) break; - if (!iter->second.m_structure) - break; - if (iter->second.m_isClobbered && !iter->second.m_structure->transitionWatchpointSetIsStillValid()) + if (!iter->value.m_structure) break; node.ref(); @@ -392,7 +268,7 @@ public: m_graph.append(getLocal); insertionSet.append(indexInBlock + 1, getLocalIndex); - Node checkStructure(CheckStructure, codeOrigin, OpInfo(m_graph.addStructureSet(iter->second.m_structure)), getLocalIndex); + Node checkStructure(CheckStructure, codeOrigin, OpInfo(m_graph.addStructureSet(iter->value.m_structure)), getLocalIndex); checkStructure.ref(); NodeIndex checkStructureIndex = m_graph.size(); m_graph.append(checkStructure); @@ -412,9 +288,7 @@ public: HashMap<VariableAccessData*, CheckData>::iterator iter = m_map.find(variable); if (iter == m_map.end()) break; - if (!iter->second.m_structure) - break; - if (iter->second.m_isClobbered && !iter->second.m_structure->transitionWatchpointSetIsStillValid()) + if (!iter->value.m_structure) break; // First insert a dead SetLocal to tell OSR that the child's value should @@ -431,7 +305,7 @@ public: m_graph[child1].ref(); // Use a ForwardCheckStructure to indicate that we should exit to the // next bytecode instruction rather than reexecuting the current one. - Node checkStructure(ForwardCheckStructure, codeOrigin, OpInfo(m_graph.addStructureSet(iter->second.m_structure)), child1); + Node checkStructure(ForwardCheckStructure, codeOrigin, OpInfo(m_graph.addStructureSet(iter->value.m_structure)), child1); checkStructure.ref(); NodeIndex checkStructureIndex = m_graph.size(); m_graph.append(checkStructure); @@ -440,28 +314,6 @@ public: break; } - case CheckStructure: { - Node& child = m_graph[node.child1()]; - if (child.op() != GetLocal) - break; - HashMap<VariableAccessData*, CheckData>::iterator iter = m_map.find(child.variableAccessData()); - if (iter == m_map.end()) - break; - if (!iter->second.m_structure) - break; - if (!iter->second.m_isClobbered) { - node.setOpAndDefaultFlags(Phantom); - ASSERT(node.refCount() == 1); - break; - } - if (!iter->second.m_structure->transitionWatchpointSetIsStillValid()) - break; - ASSERT(iter->second.m_structure == node.structureSet().singletonStructure()); - node.convertToStructureTransitionWatchpoint(); - changed = true; - break; - } - default: break; } @@ -476,12 +328,12 @@ private: void noticeStructureCheck(VariableAccessData* variable, Structure* structure) { HashMap<VariableAccessData*, CheckData>::AddResult result = - m_map.add(variable, CheckData(structure, false)); + m_map.add(variable, CheckData(structure)); if (result.isNewEntry) return; - if (result.iterator->second.m_structure == structure) + if (result.iterator->value.m_structure == structure) return; - result.iterator->second.m_structure = 0; + result.iterator->value.m_structure = 0; } void noticeStructureCheck(VariableAccessData* variable, const StructureSet& set) @@ -493,38 +345,16 @@ private: noticeStructureCheck(variable, set.singletonStructure()); } - void noticeClobber(VariableAccessData* variable) - { - HashMap<VariableAccessData*, CheckData>::iterator iter = - m_map.find(variable); - if (iter == m_map.end()) - return; - iter->second.m_isClobbered = true; - } - - void clobber(const Operands<VariableAccessData*>& live) - { - for (size_t i = live.size(); i--;) { - VariableAccessData* variable = live[i]; - if (!variable) - continue; - noticeClobber(variable); - } - } - struct CheckData { Structure* m_structure; - bool m_isClobbered; CheckData() : m_structure(0) - , m_isClobbered(false) { } - CheckData(Structure* structure, bool isClobbered) + CheckData(Structure* structure) : m_structure(structure) - , m_isClobbered(isClobbered) { } }; diff --git a/Source/JavaScriptCore/dfg/DFGThunks.cpp b/Source/JavaScriptCore/dfg/DFGThunks.cpp index 546aec256..25fcad10a 100644 --- a/Source/JavaScriptCore/dfg/DFGThunks.cpp +++ b/Source/JavaScriptCore/dfg/DFGThunks.cpp @@ -113,7 +113,7 @@ MacroAssemblerCodeRef throwExceptionFromCallSlowPathGenerator(JSGlobalData* glob jit.loadPtr( CCallHelpers::Address( GPRInfo::callFrameRegister, - static_cast<ptrdiff_t>(sizeof(Register)) * RegisterFile::CallerFrame), + static_cast<ptrdiff_t>(sizeof(Register)) * JSStack::CallerFrame), GPRInfo::callFrameRegister); jit.peek(GPRInfo::nonPreservedNonReturnGPR, JITSTACKFRAME_ARGS_INDEX); jit.setupArgumentsWithExecState(GPRInfo::nonPreservedNonReturnGPR); @@ -136,7 +136,7 @@ static void slowPathFor( GPRInfo::nonArgGPR2, CCallHelpers::Address( GPRInfo::callFrameRegister, - static_cast<ptrdiff_t>(sizeof(Register)) * RegisterFile::ReturnPC)); + static_cast<ptrdiff_t>(sizeof(Register)) * JSStack::ReturnPC)); jit.storePtr(GPRInfo::callFrameRegister, &globalData->topCallFrame); jit.poke(GPRInfo::nonPreservedNonReturnGPR, JITSTACKFRAME_ARGS_INDEX); jit.setupArgumentsExecState(); @@ -151,13 +151,13 @@ static void slowPathFor( jit.loadPtr( CCallHelpers::Address( GPRInfo::callFrameRegister, - static_cast<ptrdiff_t>(sizeof(Register)) * RegisterFile::ReturnPC), + static_cast<ptrdiff_t>(sizeof(Register)) * JSStack::ReturnPC), GPRInfo::nonPreservedNonReturnGPR); jit.storePtr( CCallHelpers::TrustedImmPtr(0), CCallHelpers::Address( GPRInfo::callFrameRegister, - static_cast<ptrdiff_t>(sizeof(Register)) * RegisterFile::ReturnPC)); + static_cast<ptrdiff_t>(sizeof(Register)) * JSStack::ReturnPC)); emitPointerValidation(jit, GPRInfo::nonPreservedNonReturnGPR); jit.restoreReturnAddressBeforeReturn(GPRInfo::nonPreservedNonReturnGPR); emitPointerValidation(jit, GPRInfo::returnValueGPR); @@ -249,19 +249,19 @@ static MacroAssemblerCodeRef virtualForThunkGenerator( GPRInfo::nonArgGPR1, CCallHelpers::Address( GPRInfo::callFrameRegister, - static_cast<ptrdiff_t>(sizeof(Register)) * RegisterFile::ScopeChain)); + static_cast<ptrdiff_t>(sizeof(Register)) * JSStack::ScopeChain)); #else jit.storePtr( GPRInfo::nonArgGPR1, CCallHelpers::Address( GPRInfo::callFrameRegister, - static_cast<ptrdiff_t>(sizeof(Register)) * RegisterFile::ScopeChain + + static_cast<ptrdiff_t>(sizeof(Register)) * JSStack::ScopeChain + OBJECT_OFFSETOF(EncodedValueDescriptor, asBits.payload))); jit.store32( CCallHelpers::TrustedImm32(JSValue::CellTag), CCallHelpers::Address( GPRInfo::callFrameRegister, - static_cast<ptrdiff_t>(sizeof(Register)) * RegisterFile::ScopeChain + + static_cast<ptrdiff_t>(sizeof(Register)) * JSStack::ScopeChain + OBJECT_OFFSETOF(EncodedValueDescriptor, asBits.tag))); #endif diff --git a/Source/JavaScriptCore/dfg/DFGValueSource.cpp b/Source/JavaScriptCore/dfg/DFGValueSource.cpp index 25d43ee6b..d59e4842f 100644 --- a/Source/JavaScriptCore/dfg/DFGValueSource.cpp +++ b/Source/JavaScriptCore/dfg/DFGValueSource.cpp @@ -39,19 +39,19 @@ void ValueSource::dump(FILE* out) const case SourceIsDead: fprintf(out, "IsDead"); break; - case ValueInRegisterFile: - fprintf(out, "InRegFile"); + case ValueInJSStack: + fprintf(out, "InStack"); break; - case Int32InRegisterFile: + case Int32InJSStack: fprintf(out, "Int32"); break; - case CellInRegisterFile: + case CellInJSStack: fprintf(out, "Cell"); break; - case BooleanInRegisterFile: + case BooleanInJSStack: fprintf(out, "Bool"); break; - case DoubleInRegisterFile: + case DoubleInJSStack: fprintf(out, "Double"); break; case ArgumentsSource: diff --git a/Source/JavaScriptCore/dfg/DFGValueSource.h b/Source/JavaScriptCore/dfg/DFGValueSource.h index f776137d0..8a4c66b37 100644 --- a/Source/JavaScriptCore/dfg/DFGValueSource.h +++ b/Source/JavaScriptCore/dfg/DFGValueSource.h @@ -39,11 +39,11 @@ namespace JSC { namespace DFG { enum ValueSourceKind { SourceNotSet, - ValueInRegisterFile, - Int32InRegisterFile, - CellInRegisterFile, - BooleanInRegisterFile, - DoubleInRegisterFile, + ValueInJSStack, + Int32InJSStack, + CellInJSStack, + BooleanInJSStack, + DoubleInJSStack, ArgumentsSource, SourceIsDead, HaveNode @@ -53,35 +53,35 @@ static inline ValueSourceKind dataFormatToValueSourceKind(DataFormat dataFormat) { switch (dataFormat) { case DataFormatInteger: - return Int32InRegisterFile; + return Int32InJSStack; case DataFormatDouble: - return DoubleInRegisterFile; + return DoubleInJSStack; case DataFormatBoolean: - return BooleanInRegisterFile; + return BooleanInJSStack; case DataFormatCell: - return CellInRegisterFile; + return CellInJSStack; case DataFormatDead: return SourceIsDead; case DataFormatArguments: return ArgumentsSource; default: ASSERT(dataFormat & DataFormatJS); - return ValueInRegisterFile; + return ValueInJSStack; } } static inline DataFormat valueSourceKindToDataFormat(ValueSourceKind kind) { switch (kind) { - case ValueInRegisterFile: + case ValueInJSStack: return DataFormatJS; - case Int32InRegisterFile: + case Int32InJSStack: return DataFormatInteger; - case CellInRegisterFile: + case CellInJSStack: return DataFormatCell; - case BooleanInRegisterFile: + case BooleanInJSStack: return DataFormatBoolean; - case DoubleInRegisterFile: + case DoubleInJSStack: return DataFormatDouble; case ArgumentsSource: return DataFormatArguments; @@ -92,7 +92,7 @@ static inline DataFormat valueSourceKindToDataFormat(ValueSourceKind kind) } } -static inline bool isInRegisterFile(ValueSourceKind kind) +static inline bool isInJSStack(ValueSourceKind kind) { DataFormat format = valueSourceKindToDataFormat(kind); return format != DataFormatNone && format < DataFormatOSRMarker; @@ -129,12 +129,12 @@ public: static ValueSource forSpeculation(SpeculatedType prediction) { if (isInt32Speculation(prediction)) - return ValueSource(Int32InRegisterFile); + return ValueSource(Int32InJSStack); if (isArraySpeculation(prediction) || isCellSpeculation(prediction)) - return ValueSource(CellInRegisterFile); + return ValueSource(CellInJSStack); if (isBooleanSpeculation(prediction)) - return ValueSource(BooleanInRegisterFile); - return ValueSource(ValueInRegisterFile); + return ValueSource(BooleanInJSStack); + return ValueSource(ValueInJSStack); } static ValueSource forDataFormat(DataFormat dataFormat) @@ -152,7 +152,7 @@ public: return kindFromNodeIndex(m_nodeIndex); } - bool isInRegisterFile() const { return JSC::DFG::isInRegisterFile(kind()); } + bool isInJSStack() const { return JSC::DFG::isInJSStack(kind()); } bool isTriviallyRecoverable() const { return JSC::DFG::isTriviallyRecoverable(kind()); } DataFormat dataFormat() const @@ -164,20 +164,20 @@ public: { ASSERT(isTriviallyRecoverable()); switch (kind()) { - case ValueInRegisterFile: - return ValueRecovery::alreadyInRegisterFile(); + case ValueInJSStack: + return ValueRecovery::alreadyInJSStack(); - case Int32InRegisterFile: - return ValueRecovery::alreadyInRegisterFileAsUnboxedInt32(); + case Int32InJSStack: + return ValueRecovery::alreadyInJSStackAsUnboxedInt32(); - case CellInRegisterFile: - return ValueRecovery::alreadyInRegisterFileAsUnboxedCell(); + case CellInJSStack: + return ValueRecovery::alreadyInJSStackAsUnboxedCell(); - case BooleanInRegisterFile: - return ValueRecovery::alreadyInRegisterFileAsUnboxedBoolean(); + case BooleanInJSStack: + return ValueRecovery::alreadyInJSStackAsUnboxedBoolean(); - case DoubleInRegisterFile: - return ValueRecovery::alreadyInRegisterFileAsUnboxedDouble(); + case DoubleInJSStack: + return ValueRecovery::alreadyInJSStackAsUnboxedDouble(); case SourceIsDead: return ValueRecovery::constant(jsUndefined()); diff --git a/Source/JavaScriptCore/dfg/DFGVariableEventStream.cpp b/Source/JavaScriptCore/dfg/DFGVariableEventStream.cpp index a1152bc2b..fa36ccdb5 100644 --- a/Source/JavaScriptCore/dfg/DFGVariableEventStream.cpp +++ b/Source/JavaScriptCore/dfg/DFGVariableEventStream.cpp @@ -93,7 +93,7 @@ void VariableEventStream::reconstruct( if (!index) { valueRecoveries = Operands<ValueRecovery>(codeBlock->numParameters(), numVariables); for (size_t i = 0; i < valueRecoveries.size(); ++i) - valueRecoveries[i] = ValueRecovery::alreadyInRegisterFile(); + valueRecoveries[i] = ValueRecovery::alreadyInJSStack(); return; } @@ -280,7 +280,7 @@ void VariableEventStream::reconstruct( } valueRecoveries[i] = - ValueRecovery::displacedInRegisterFile(static_cast<VirtualRegister>(info->u.virtualReg), info->format); + ValueRecovery::displacedInJSStack(static_cast<VirtualRegister>(info->u.virtualReg), info->format); } } |