summaryrefslogtreecommitdiff
path: root/Source/JavaScriptCore/dfg
diff options
context:
space:
mode:
authorSimon Hausmann <simon.hausmann@nokia.com>2012-05-07 11:21:11 +0200
committerSimon Hausmann <simon.hausmann@nokia.com>2012-05-07 11:21:11 +0200
commit2cf6c8816a73e0132bd8fa3b509d62d7c51b6e47 (patch)
tree988e8c5b116dd0466244ae2fe5af8ee9be926d76 /Source/JavaScriptCore/dfg
parentdd91e772430dc294e3bf478c119ef8d43c0a3358 (diff)
downloadqtwebkit-2cf6c8816a73e0132bd8fa3b509d62d7c51b6e47.tar.gz
Imported WebKit commit 7e538425aa020340619e927792f3d895061fb54b (http://svn.webkit.org/repository/webkit/trunk@116286)
Diffstat (limited to 'Source/JavaScriptCore/dfg')
-rw-r--r--Source/JavaScriptCore/dfg/DFGAbstractState.cpp189
-rw-r--r--Source/JavaScriptCore/dfg/DFGAbstractState.h2
-rw-r--r--Source/JavaScriptCore/dfg/DFGAdjacencyList.h (renamed from Source/JavaScriptCore/dfg/DFGNodeReferenceBlob.h)48
-rw-r--r--Source/JavaScriptCore/dfg/DFGArgumentPosition.h75
-rw-r--r--Source/JavaScriptCore/dfg/DFGArithNodeFlagsInferencePhase.cpp235
-rw-r--r--Source/JavaScriptCore/dfg/DFGAssemblyHelpers.cpp15
-rw-r--r--Source/JavaScriptCore/dfg/DFGAssemblyHelpers.h25
-rw-r--r--Source/JavaScriptCore/dfg/DFGBasicBlock.h2
-rw-r--r--Source/JavaScriptCore/dfg/DFGByteCodeParser.cpp315
-rw-r--r--Source/JavaScriptCore/dfg/DFGCCallHelpers.h11
-rw-r--r--Source/JavaScriptCore/dfg/DFGCFAPhase.cpp2
-rw-r--r--Source/JavaScriptCore/dfg/DFGCSEPhase.cpp55
-rw-r--r--Source/JavaScriptCore/dfg/DFGCapabilities.cpp25
-rw-r--r--Source/JavaScriptCore/dfg/DFGCapabilities.h6
-rw-r--r--Source/JavaScriptCore/dfg/DFGCommon.h25
-rw-r--r--Source/JavaScriptCore/dfg/DFGCorrectableJumpPoint.h10
-rw-r--r--Source/JavaScriptCore/dfg/DFGDoubleFormatState.h96
-rw-r--r--Source/JavaScriptCore/dfg/DFGDriver.cpp4
-rw-r--r--Source/JavaScriptCore/dfg/DFGEdge.h (renamed from Source/JavaScriptCore/dfg/DFGNodeUse.h)30
-rw-r--r--Source/JavaScriptCore/dfg/DFGFixupPhase.cpp394
-rw-r--r--Source/JavaScriptCore/dfg/DFGFixupPhase.h (renamed from Source/JavaScriptCore/dfg/DFGArithNodeFlagsInferencePhase.h)19
-rw-r--r--Source/JavaScriptCore/dfg/DFGGPRInfo.h15
-rw-r--r--Source/JavaScriptCore/dfg/DFGGenerationInfo.h1
-rw-r--r--Source/JavaScriptCore/dfg/DFGGraph.cpp55
-rw-r--r--Source/JavaScriptCore/dfg/DFGGraph.h45
-rw-r--r--Source/JavaScriptCore/dfg/DFGInsertionSet.h98
-rw-r--r--Source/JavaScriptCore/dfg/DFGJITCompiler.cpp31
-rw-r--r--Source/JavaScriptCore/dfg/DFGJITCompiler.h14
-rw-r--r--Source/JavaScriptCore/dfg/DFGNode.h432
-rw-r--r--Source/JavaScriptCore/dfg/DFGNodeFlags.cpp (renamed from Source/JavaScriptCore/dfg/DFGNode.cpp)71
-rw-r--r--Source/JavaScriptCore/dfg/DFGNodeFlags.h102
-rw-r--r--Source/JavaScriptCore/dfg/DFGNodeType.h237
-rw-r--r--Source/JavaScriptCore/dfg/DFGOSREntry.cpp2
-rw-r--r--Source/JavaScriptCore/dfg/DFGOSREntry.h2
-rw-r--r--Source/JavaScriptCore/dfg/DFGOSRExit.cpp15
-rw-r--r--Source/JavaScriptCore/dfg/DFGOSRExit.h3
-rw-r--r--Source/JavaScriptCore/dfg/DFGOSRExitCompiler.cpp61
-rw-r--r--Source/JavaScriptCore/dfg/DFGOSRExitCompiler.h2
-rw-r--r--Source/JavaScriptCore/dfg/DFGOSRExitCompiler32_64.cpp37
-rw-r--r--Source/JavaScriptCore/dfg/DFGOSRExitCompiler64.cpp37
-rw-r--r--Source/JavaScriptCore/dfg/DFGOperands.h165
-rw-r--r--Source/JavaScriptCore/dfg/DFGOperations.cpp118
-rw-r--r--Source/JavaScriptCore/dfg/DFGOperations.h73
-rw-r--r--Source/JavaScriptCore/dfg/DFGPredictionPropagationPhase.cpp474
-rw-r--r--Source/JavaScriptCore/dfg/DFGRedundantPhiEliminationPhase.cpp8
-rw-r--r--Source/JavaScriptCore/dfg/DFGRepatch.cpp110
-rw-r--r--Source/JavaScriptCore/dfg/DFGScoreBoard.h2
-rw-r--r--Source/JavaScriptCore/dfg/DFGSpeculativeJIT.cpp1005
-rw-r--r--Source/JavaScriptCore/dfg/DFGSpeculativeJIT.h219
-rw-r--r--Source/JavaScriptCore/dfg/DFGSpeculativeJIT32_64.cpp515
-rw-r--r--Source/JavaScriptCore/dfg/DFGSpeculativeJIT64.cpp535
-rw-r--r--Source/JavaScriptCore/dfg/DFGVariableAccessData.h105
-rw-r--r--Source/JavaScriptCore/dfg/DFGVirtualRegisterAllocationPhase.cpp14
53 files changed, 3988 insertions, 2198 deletions
diff --git a/Source/JavaScriptCore/dfg/DFGAbstractState.cpp b/Source/JavaScriptCore/dfg/DFGAbstractState.cpp
index 7ab05f329..6df40ca6f 100644
--- a/Source/JavaScriptCore/dfg/DFGAbstractState.cpp
+++ b/Source/JavaScriptCore/dfg/DFGAbstractState.cpp
@@ -100,7 +100,7 @@ void AbstractState::initialize(Graph& graph)
root->cfaShouldRevisit = true;
for (size_t i = 0; i < root->valuesAtHead.numberOfArguments(); ++i) {
Node& node = graph[root->variablesAtHead.argument(i)];
- ASSERT(node.op == SetArgument);
+ ASSERT(node.op() == SetArgument);
if (!node.shouldGenerate()) {
// The argument is dead. We don't do any checks for such arguments, and so
// for the purpose of the analysis, they contain no value.
@@ -118,8 +118,6 @@ void AbstractState::initialize(Graph& graph)
root->valuesAtHead.argument(i).set(PredictInt32);
else if (isArrayPrediction(prediction))
root->valuesAtHead.argument(i).set(PredictArray);
- else if (isByteArrayPrediction(prediction))
- root->valuesAtHead.argument(i).set(PredictByteArray);
else if (isBooleanPrediction(prediction))
root->valuesAtHead.argument(i).set(PredictBoolean);
else if (isInt8ArrayPrediction(prediction))
@@ -222,7 +220,7 @@ bool AbstractState::execute(unsigned indexInBlock)
if (!node.shouldGenerate())
return true;
- switch (node.op) {
+ switch (node.op()) {
case JSConstant:
case WeakJSConstant: {
JSValue value = m_graph.valueOfJSConstant(nodeIndex);
@@ -253,13 +251,11 @@ bool AbstractState::execute(unsigned indexInBlock)
break;
}
- PredictedType predictedType = node.variableAccessData()->prediction();
+ PredictedType predictedType = node.variableAccessData()->argumentAwarePrediction();
if (isInt32Prediction(predictedType))
forNode(node.child1()).filter(PredictInt32);
else if (isArrayPrediction(predictedType))
forNode(node.child1()).filter(PredictArray);
- else if (isByteArrayPrediction(predictedType))
- forNode(node.child1()).filter(PredictByteArray);
else if (isBooleanPrediction(predictedType))
forNode(node.child1()).filter(PredictBoolean);
@@ -290,15 +286,30 @@ bool AbstractState::execute(unsigned indexInBlock)
forNode(nodeIndex).set(PredictInt32);
break;
+ case DoubleAsInt32:
+ forNode(node.child1()).filter(PredictNumber);
+ forNode(nodeIndex).set(PredictInt32);
+ break;
+
case ValueToInt32:
- if (m_graph[node.child1()].shouldNotSpeculateInteger()) {
- if (m_graph[node.child1()].shouldSpeculateDouble())
- forNode(node.child1()).filter(PredictNumber);
- } else
+ if (m_graph[node.child1()].shouldSpeculateInteger())
forNode(node.child1()).filter(PredictInt32);
+ else if (m_graph[node.child1()].shouldSpeculateNumber())
+ forNode(node.child1()).filter(PredictNumber);
+ else if (m_graph[node.child1()].shouldSpeculateBoolean())
+ forNode(node.child1()).filter(PredictBoolean);
forNode(nodeIndex).set(PredictInt32);
break;
+
+ case Int32ToDouble:
+ forNode(node.child1()).filter(PredictNumber);
+ forNode(nodeIndex).set(PredictDouble);
+ break;
+
+ case CheckNumber:
+ forNode(node.child1()).filter(PredictNumber);
+ break;
case ValueAdd:
case ArithAdd: {
@@ -314,7 +325,7 @@ bool AbstractState::execute(unsigned indexInBlock)
forNode(nodeIndex).set(PredictDouble);
break;
}
- if (node.op == ValueAdd) {
+ if (node.op() == ValueAdd) {
clobberStructures(indexInBlock);
forNode(nodeIndex).set(PredictString | PredictInt32 | PredictNumber);
break;
@@ -351,7 +362,8 @@ bool AbstractState::execute(unsigned indexInBlock)
case ArithMul:
case ArithDiv:
case ArithMin:
- case ArithMax: {
+ case ArithMax:
+ case ArithMod: {
if (Node::shouldSpeculateInteger(m_graph[node.child1()], m_graph[node.child2()]) && node.canSpeculateInteger()) {
forNode(node.child1()).filter(PredictInt32);
forNode(node.child2()).filter(PredictInt32);
@@ -364,19 +376,6 @@ bool AbstractState::execute(unsigned indexInBlock)
break;
}
- case ArithMod: {
- if (m_graph[node.child1()].shouldNotSpeculateInteger() || m_graph[node.child2()].shouldNotSpeculateInteger() || !node.canSpeculateInteger()) {
- forNode(node.child1()).filter(PredictNumber);
- forNode(node.child2()).filter(PredictNumber);
- forNode(nodeIndex).set(PredictDouble);
- break;
- }
- forNode(node.child1()).filter(PredictInt32);
- forNode(node.child2()).filter(PredictInt32);
- forNode(nodeIndex).set(PredictInt32);
- break;
- }
-
case ArithAbs:
if (m_graph[node.child1()].shouldSpeculateInteger() && node.canSpeculateInteger()) {
forNode(node.child1()).filter(PredictInt32);
@@ -394,7 +393,7 @@ bool AbstractState::execute(unsigned indexInBlock)
case LogicalNot: {
Node& child = m_graph[node.child1()];
- if (isBooleanPrediction(child.prediction()) || !child.prediction())
+ if (isBooleanPrediction(child.prediction()))
forNode(node.child1()).filter(PredictBoolean);
else if (child.shouldSpeculateFinalObjectOrOther())
forNode(node.child1()).filter(PredictFinalObject | PredictOther);
@@ -409,12 +408,24 @@ bool AbstractState::execute(unsigned indexInBlock)
forNode(nodeIndex).set(PredictBoolean);
break;
}
+
+ case IsUndefined:
+ case IsBoolean:
+ case IsNumber:
+ case IsString:
+ case IsObject:
+ case IsFunction: {
+ forNode(nodeIndex).set(PredictBoolean);
+ break;
+ }
case CompareLess:
case CompareLessEq:
case CompareGreater:
case CompareGreaterEq:
case CompareEq: {
+ forNode(nodeIndex).set(PredictBoolean);
+
Node& left = m_graph[node.child1()];
Node& right = m_graph[node.child2()];
PredictedType filter;
@@ -422,17 +433,45 @@ bool AbstractState::execute(unsigned indexInBlock)
filter = PredictInt32;
else if (Node::shouldSpeculateNumber(left, right))
filter = PredictNumber;
- else if (node.op == CompareEq && Node::shouldSpeculateFinalObject(left, right))
- filter = PredictFinalObject;
- else if (node.op == CompareEq && Node::shouldSpeculateArray(left, right))
- filter = PredictArray;
- else {
+ else if (node.op() == CompareEq) {
+ if ((m_graph.isConstant(node.child1().index())
+ && m_graph.valueOfJSConstant(node.child1().index()).isNull())
+ || (m_graph.isConstant(node.child2().index())
+ && m_graph.valueOfJSConstant(node.child2().index()).isNull())) {
+ // We know that this won't clobber the world. But that's all we know.
+ break;
+ }
+
+ if (Node::shouldSpeculateFinalObject(left, right))
+ filter = PredictFinalObject;
+ else if (Node::shouldSpeculateArray(left, right))
+ filter = PredictArray;
+ else if (left.shouldSpeculateFinalObject() && right.shouldSpeculateFinalObjectOrOther()) {
+ forNode(node.child1()).filter(PredictFinalObject);
+ forNode(node.child2()).filter(PredictFinalObject | PredictOther);
+ break;
+ } else if (right.shouldSpeculateFinalObject() && left.shouldSpeculateFinalObjectOrOther()) {
+ forNode(node.child1()).filter(PredictFinalObject | PredictOther);
+ forNode(node.child2()).filter(PredictFinalObject);
+ break;
+ } else if (left.shouldSpeculateArray() && right.shouldSpeculateArrayOrOther()) {
+ forNode(node.child1()).filter(PredictFinalObject);
+ forNode(node.child2()).filter(PredictFinalObject | PredictOther);
+ break;
+ } else if (right.shouldSpeculateArray() && left.shouldSpeculateArrayOrOther()) {
+ forNode(node.child1()).filter(PredictFinalObject | PredictOther);
+ forNode(node.child2()).filter(PredictFinalObject);
+ break;
+ } else {
+ filter = PredictTop;
+ clobberStructures(indexInBlock);
+ }
+ } else {
filter = PredictTop;
clobberStructures(indexInBlock);
}
forNode(node.child1()).filter(filter);
forNode(node.child2()).filter(filter);
- forNode(nodeIndex).set(PredictBoolean);
break;
}
@@ -468,12 +507,6 @@ bool AbstractState::execute(unsigned indexInBlock)
forNode(nodeIndex).set(PredictString);
break;
}
- if (m_graph[node.child1()].shouldSpeculateByteArray()) {
- forNode(node.child1()).filter(PredictByteArray);
- forNode(node.child2()).filter(PredictInt32);
- forNode(nodeIndex).set(PredictInt32);
- break;
- }
if (m_graph[node.child1()].shouldSpeculateInt8Array()) {
forNode(node.child1()).filter(PredictInt8Array);
@@ -514,7 +547,10 @@ bool AbstractState::execute(unsigned indexInBlock)
if (m_graph[node.child1()].shouldSpeculateUint32Array()) {
forNode(node.child1()).filter(PredictUint32Array);
forNode(node.child2()).filter(PredictInt32);
- forNode(nodeIndex).set(PredictDouble);
+ if (node.shouldSpeculateInteger())
+ forNode(nodeIndex).set(PredictInt32);
+ else
+ forNode(nodeIndex).set(PredictDouble);
break;
}
if (m_graph[node.child1()].shouldSpeculateFloat32Array()) {
@@ -543,58 +579,73 @@ bool AbstractState::execute(unsigned indexInBlock)
break;
}
if (!m_graph[node.child2()].shouldSpeculateInteger() || !isActionableMutableArrayPrediction(m_graph[node.child1()].prediction())) {
- ASSERT(node.op == PutByVal);
+ ASSERT(node.op() == PutByVal);
clobberStructures(indexInBlock);
forNode(nodeIndex).makeTop();
break;
}
- if (m_graph[node.child1()].shouldSpeculateByteArray()) {
- forNode(node.child1()).filter(PredictByteArray);
- forNode(node.child2()).filter(PredictInt32);
- forNode(node.child3()).filter(PredictNumber);
- break;
- }
if (m_graph[node.child1()].shouldSpeculateInt8Array()) {
forNode(node.child1()).filter(PredictInt8Array);
forNode(node.child2()).filter(PredictInt32);
- forNode(node.child3()).filter(PredictNumber);
+ if (m_graph[node.child3()].shouldSpeculateInteger())
+ forNode(node.child3()).filter(PredictInt32);
+ else
+ forNode(node.child3()).filter(PredictNumber);
break;
}
if (m_graph[node.child1()].shouldSpeculateInt16Array()) {
forNode(node.child1()).filter(PredictInt16Array);
forNode(node.child2()).filter(PredictInt32);
- forNode(node.child3()).filter(PredictNumber);
+ if (m_graph[node.child3()].shouldSpeculateInteger())
+ forNode(node.child3()).filter(PredictInt32);
+ else
+ forNode(node.child3()).filter(PredictNumber);
break;
}
if (m_graph[node.child1()].shouldSpeculateInt32Array()) {
forNode(node.child1()).filter(PredictInt32Array);
forNode(node.child2()).filter(PredictInt32);
- forNode(node.child3()).filter(PredictNumber);
+ if (m_graph[node.child3()].shouldSpeculateInteger())
+ forNode(node.child3()).filter(PredictInt32);
+ else
+ forNode(node.child3()).filter(PredictNumber);
break;
}
if (m_graph[node.child1()].shouldSpeculateUint8Array()) {
forNode(node.child1()).filter(PredictUint8Array);
forNode(node.child2()).filter(PredictInt32);
- forNode(node.child3()).filter(PredictNumber);
+ if (m_graph[node.child3()].shouldSpeculateInteger())
+ forNode(node.child3()).filter(PredictInt32);
+ else
+ forNode(node.child3()).filter(PredictNumber);
break;
}
if (m_graph[node.child1()].shouldSpeculateUint8ClampedArray()) {
forNode(node.child1()).filter(PredictUint8ClampedArray);
forNode(node.child2()).filter(PredictInt32);
- forNode(node.child3()).filter(PredictNumber);
+ if (m_graph[node.child3()].shouldSpeculateInteger())
+ forNode(node.child3()).filter(PredictInt32);
+ else
+ forNode(node.child3()).filter(PredictNumber);
break;
}
if (m_graph[node.child1()].shouldSpeculateUint16Array()) {
forNode(node.child1()).filter(PredictUint16Array);
forNode(node.child2()).filter(PredictInt32);
- forNode(node.child3()).filter(PredictNumber);
+ if (m_graph[node.child3()].shouldSpeculateInteger())
+ forNode(node.child3()).filter(PredictInt32);
+ else
+ forNode(node.child3()).filter(PredictNumber);
break;
}
if (m_graph[node.child1()].shouldSpeculateUint32Array()) {
forNode(node.child1()).filter(PredictUint32Array);
forNode(node.child2()).filter(PredictInt32);
- forNode(node.child3()).filter(PredictNumber);
+ if (m_graph[node.child3()].shouldSpeculateInteger())
+ forNode(node.child3()).filter(PredictInt32);
+ else
+ forNode(node.child3()).filter(PredictNumber);
break;
}
if (m_graph[node.child1()].shouldSpeculateFloat32Array()) {
@@ -625,6 +676,13 @@ bool AbstractState::execute(unsigned indexInBlock)
forNode(nodeIndex).makeTop();
break;
+ case RegExpExec:
+ case RegExpTest:
+ forNode(node.child1()).filter(PredictCell);
+ forNode(node.child2()).filter(PredictCell);
+ forNode(nodeIndex).makeTop();
+ break;
+
case Jump:
break;
@@ -633,7 +691,7 @@ bool AbstractState::execute(unsigned indexInBlock)
// propagation, and to take it one step further, where a variable's value
// is specialized on each direction of a branch. For now, we don't do this.
Node& child = m_graph[node.child1()];
- if (isBooleanPrediction(child.prediction()) || !child.prediction())
+ if (child.shouldSpeculateBoolean())
forNode(node.child1()).filter(PredictBoolean);
else if (child.shouldSpeculateFinalObjectOrOther())
forNode(node.child1()).filter(PredictFinalObject | PredictOther);
@@ -787,10 +845,6 @@ bool AbstractState::execute(unsigned indexInBlock)
forNode(nodeIndex).set(PredictInt32);
break;
- case GetByteArrayLength:
- forNode(node.child1()).filter(PredictByteArray);
- forNode(nodeIndex).set(PredictInt32);
- break;
case GetInt8ArrayLength:
forNode(node.child1()).filter(PredictInt8Array);
forNode(nodeIndex).set(PredictInt32);
@@ -854,11 +908,6 @@ bool AbstractState::execute(unsigned indexInBlock)
forNode(nodeIndex).clear();
break;
}
- if (m_graph[node.child1()].shouldSpeculateByteArray()) {
- forNode(node.child1()).filter(PredictByteArray);
- forNode(nodeIndex).clear();
- break;
- }
if (m_graph[node.child1()].shouldSpeculateInt8Array()) {
forNode(node.child1()).filter(PredictInt8Array);
@@ -974,6 +1023,10 @@ bool AbstractState::execute(unsigned indexInBlock)
case InlineStart:
case Nop:
break;
+
+ case LastNodeType:
+ ASSERT_NOT_REACHED();
+ break;
}
return m_isValid;
@@ -1005,10 +1058,10 @@ inline bool AbstractState::mergeStateAtTail(AbstractValue& destination, Abstract
return false;
#if DFG_ENABLE(DEBUG_PROPAGATION_VERBOSE)
- dataLog(" It's live, node @%u.\n", nodeIndex);
+ dataLog(" It's live, node @%u.\n", nodeIndex);
#endif
-
- switch (node.op) {
+
+ switch (node.op()) {
case Phi:
case SetArgument:
case Flush:
@@ -1110,7 +1163,7 @@ inline bool AbstractState::mergeToSuccessors(Graph& graph, BasicBlock* basicBloc
ASSERT(terminal.isTerminal());
- switch (terminal.op) {
+ switch (terminal.op()) {
case Jump:
return merge(basicBlock, graph.m_blocks[terminal.takenBlockIndex()].get());
diff --git a/Source/JavaScriptCore/dfg/DFGAbstractState.h b/Source/JavaScriptCore/dfg/DFGAbstractState.h
index d9d5cc0f8..3325e0703 100644
--- a/Source/JavaScriptCore/dfg/DFGAbstractState.h
+++ b/Source/JavaScriptCore/dfg/DFGAbstractState.h
@@ -101,7 +101,7 @@ public:
return m_nodes[nodeIndex];
}
- AbstractValue& forNode(NodeUse nodeUse)
+ AbstractValue& forNode(Edge nodeUse)
{
return forNode(nodeUse.index());
}
diff --git a/Source/JavaScriptCore/dfg/DFGNodeReferenceBlob.h b/Source/JavaScriptCore/dfg/DFGAdjacencyList.h
index df3ff5f5f..e2b096bf4 100644
--- a/Source/JavaScriptCore/dfg/DFGNodeReferenceBlob.h
+++ b/Source/JavaScriptCore/dfg/DFGAdjacencyList.h
@@ -23,26 +23,26 @@
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
-#ifndef DFGNodeReferenceBlob_h
-#define DFGNodeReferenceBlob_h
+#ifndef DFGAdjacencyList_h
+#define DFGAdjacencyList_h
#include <wtf/Platform.h>
#if ENABLE(DFG_JIT)
#include "DFGCommon.h"
-#include "DFGNodeUse.h"
+#include "DFGEdge.h"
namespace JSC { namespace DFG {
-class NodeReferenceBlob {
+class AdjacencyList {
public:
enum Kind {
Fixed,
Variable
};
- NodeReferenceBlob(Kind kind)
+ AdjacencyList(Kind kind)
#if !ASSERT_DISABLED
: m_kind(kind)
#endif
@@ -53,7 +53,7 @@ public:
}
}
- NodeReferenceBlob(Kind kind, NodeIndex child1, NodeIndex child2, NodeIndex child3)
+ AdjacencyList(Kind kind, NodeIndex child1, NodeIndex child2, NodeIndex child3)
#if !ASSERT_DISABLED
: m_kind(Fixed)
#endif
@@ -62,7 +62,7 @@ public:
initialize(child1, child2, child3);
}
- NodeReferenceBlob(Kind kind, unsigned firstChild, unsigned numChildren)
+ AdjacencyList(Kind kind, unsigned firstChild, unsigned numChildren)
#if !ASSERT_DISABLED
: m_kind(Variable)
#endif
@@ -72,42 +72,42 @@ public:
setNumChildren(numChildren);
}
- const NodeUse& child(unsigned i) const
+ const Edge& child(unsigned i) const
{
ASSERT(i < 3);
ASSERT(m_kind == Fixed);
return m_words[i];
}
- NodeUse& child(unsigned i)
+ Edge& child(unsigned i)
{
ASSERT(i < 3);
ASSERT(m_kind == Fixed);
return m_words[i];
}
- void setChild(unsigned i, NodeUse nodeUse)
+ void setChild(unsigned i, Edge nodeUse)
{
ASSERT(i < 30);
ASSERT(m_kind == Fixed);
m_words[i] = nodeUse;
}
- NodeUse child1() const { return child(0); }
- NodeUse child2() const { return child(1); }
- NodeUse child3() const { return child(2); }
+ Edge child1() const { return child(0); }
+ Edge child2() const { return child(1); }
+ Edge child3() const { return child(2); }
- NodeUse& child1() { return child(0); }
- NodeUse& child2() { return child(1); }
- NodeUse& child3() { return child(2); }
+ Edge& child1() { return child(0); }
+ Edge& child2() { return child(1); }
+ Edge& child3() { return child(2); }
- void setChild1(NodeUse nodeUse) { setChild(0, nodeUse); }
- void setChild2(NodeUse nodeUse) { setChild(1, nodeUse); }
- void setChild3(NodeUse nodeUse) { setChild(2, nodeUse); }
+ void setChild1(Edge nodeUse) { setChild(0, nodeUse); }
+ void setChild2(Edge nodeUse) { setChild(1, nodeUse); }
+ void setChild3(Edge nodeUse) { setChild(2, nodeUse); }
- NodeUse child1Unchecked() const { return m_words[0]; }
+ Edge child1Unchecked() const { return m_words[0]; }
- void initialize(NodeUse child1, NodeUse child2, NodeUse child3)
+ void initialize(Edge child1, Edge child2, Edge child3)
{
child(0) = child1;
child(1) = child2;
@@ -116,7 +116,7 @@ public:
void initialize(NodeIndex child1, NodeIndex child2, NodeIndex child3)
{
- initialize(NodeUse(child1), NodeUse(child2), NodeUse(child3));
+ initialize(Edge(child1), Edge(child2), Edge(child3));
}
unsigned firstChild() const
@@ -142,7 +142,7 @@ public:
}
private:
- NodeUse m_words[3];
+ Edge m_words[3];
#if !ASSERT_DISABLED
Kind m_kind;
#endif
@@ -152,4 +152,4 @@ private:
#endif // ENABLE(DFG_JIT)
-#endif // DFGNodeReferenceBlob_h
+#endif // DFGAdjacencyList_h
diff --git a/Source/JavaScriptCore/dfg/DFGArgumentPosition.h b/Source/JavaScriptCore/dfg/DFGArgumentPosition.h
new file mode 100644
index 000000000..ed447ff91
--- /dev/null
+++ b/Source/JavaScriptCore/dfg/DFGArgumentPosition.h
@@ -0,0 +1,75 @@
+/*
+ * Copyright (C) 2012 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef DFGArgumentPosition_h
+#define DFGArgumentPosition_h
+
+#include "DFGDoubleFormatState.h"
+#include "DFGVariableAccessData.h"
+#include "PredictedType.h"
+
+namespace JSC { namespace DFG {
+
+class ArgumentPosition {
+public:
+ ArgumentPosition()
+ : m_prediction(PredictNone)
+ , m_doubleFormatState(EmptyDoubleFormatState)
+ {
+ }
+
+ void addVariable(VariableAccessData* variable)
+ {
+ m_variables.append(variable);
+ }
+
+ bool mergeArgumentAwareness()
+ {
+ bool changed = false;
+ for (unsigned i = 0; i < m_variables.size(); ++i) {
+ changed |= mergePrediction(m_prediction, m_variables[i]->argumentAwarePrediction());
+ changed |= mergeDoubleFormatState(m_doubleFormatState, m_variables[i]->doubleFormatState());
+ }
+ if (!changed)
+ return false;
+ changed = false;
+ for (unsigned i = 0; i < m_variables.size(); ++i) {
+ changed |= m_variables[i]->mergeArgumentAwarePrediction(m_prediction);
+ changed |= m_variables[i]->mergeDoubleFormatState(m_doubleFormatState);
+ }
+ return changed;
+ }
+
+private:
+ PredictedType m_prediction;
+ DoubleFormatState m_doubleFormatState;
+
+ Vector<VariableAccessData*, 2> m_variables;
+};
+
+} } // namespace JSC::DFG
+
+#endif // DFGArgumentPosition_h
+
diff --git a/Source/JavaScriptCore/dfg/DFGArithNodeFlagsInferencePhase.cpp b/Source/JavaScriptCore/dfg/DFGArithNodeFlagsInferencePhase.cpp
deleted file mode 100644
index 9a49364dd..000000000
--- a/Source/JavaScriptCore/dfg/DFGArithNodeFlagsInferencePhase.cpp
+++ /dev/null
@@ -1,235 +0,0 @@
-/*
- * Copyright (C) 2011 Apple Inc. All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- * 1. Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * 2. Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in the
- * documentation and/or other materials provided with the distribution.
- *
- * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
- * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
- * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
- * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
- * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
- * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
- * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
- * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- */
-
-#include "config.h"
-#include "DFGArithNodeFlagsInferencePhase.h"
-
-#if ENABLE(DFG_JIT)
-
-#include "DFGGraph.h"
-#include "DFGPhase.h"
-
-namespace JSC { namespace DFG {
-
-class ArithNodeFlagsInferencePhase : public Phase {
-public:
- ArithNodeFlagsInferencePhase(Graph& graph)
- : Phase(graph, "arithmetic node flags inference")
- {
- }
-
- void run()
- {
-#if DFG_ENABLE(DEBUG_PROPAGATION_VERBOSE)
- m_count = 0;
-#endif
- do {
- m_changed = false;
-
- // Up here we start with a backward pass because we suspect that to be
- // more profitable.
- propagateBackward();
- if (!m_changed)
- break;
-
- m_changed = false;
- propagateForward();
- } while (m_changed);
- }
-
-private:
- bool isNotNegZero(NodeIndex nodeIndex)
- {
- if (!m_graph.isNumberConstant(nodeIndex))
- return false;
- double value = m_graph.valueOfNumberConstant(nodeIndex);
- return !value && 1.0 / value < 0.0;
- }
-
- bool isNotZero(NodeIndex nodeIndex)
- {
- if (!m_graph.isNumberConstant(nodeIndex))
- return false;
- return !!m_graph.valueOfNumberConstant(nodeIndex);
- }
-
- void propagate(Node& node)
- {
- if (!node.shouldGenerate())
- return;
-
- NodeType op = static_cast<NodeType>(node.op);
- NodeFlags flags = node.flags;
-
-#if DFG_ENABLE(DEBUG_PROPAGATION_VERBOSE)
- dataLog(" %s @%u: %s ", Graph::opName(op), m_compileIndex, arithNodeFlagsAsString(flags));
-#endif
-
- flags &= NodeUsedAsMask;
-
- bool changed = false;
-
- switch (op) {
- case ValueToInt32:
- case BitAnd:
- case BitOr:
- case BitXor:
- case BitLShift:
- case BitRShift:
- case BitURShift: {
- // These operations are perfectly happy with truncated integers,
- // so we don't want to propagate anything.
- break;
- }
-
- case UInt32ToNumber: {
- changed |= m_graph[node.child1()].mergeArithNodeFlags(flags);
- break;
- }
-
- case ArithAdd:
- case ValueAdd: {
- if (isNotNegZero(node.child1().index()) || isNotNegZero(node.child2().index()))
- flags &= ~NodeNeedsNegZero;
-
- changed |= m_graph[node.child1()].mergeArithNodeFlags(flags);
- changed |= m_graph[node.child2()].mergeArithNodeFlags(flags);
- break;
- }
-
- case ArithSub: {
- if (isNotZero(node.child1().index()) || isNotZero(node.child2().index()))
- flags &= ~NodeNeedsNegZero;
-
- changed |= m_graph[node.child1()].mergeArithNodeFlags(flags);
- changed |= m_graph[node.child2()].mergeArithNodeFlags(flags);
- break;
- }
-
- case ArithNegate: {
- changed |= m_graph[node.child1()].mergeArithNodeFlags(flags);
- break;
- }
-
- case ArithMul:
- case ArithDiv: {
- // As soon as a multiply happens, we can easily end up in the part
- // of the double domain where the point at which you do truncation
- // can change the outcome. So, ArithMul always checks for overflow
- // no matter what, and always forces its inputs to check as well.
-
- flags |= NodeUsedAsNumber | NodeNeedsNegZero;
- changed |= m_graph[node.child1()].mergeArithNodeFlags(flags);
- changed |= m_graph[node.child2()].mergeArithNodeFlags(flags);
- break;
- }
-
- case ArithMin:
- case ArithMax: {
- flags |= NodeUsedAsNumber;
- changed |= m_graph[node.child1()].mergeArithNodeFlags(flags);
- changed |= m_graph[node.child2()].mergeArithNodeFlags(flags);
- break;
- }
-
- case ArithAbs: {
- flags &= ~NodeNeedsNegZero;
- changed |= m_graph[node.child1()].mergeArithNodeFlags(flags);
- break;
- }
-
- case PutByVal: {
- changed |= m_graph[node.child1()].mergeArithNodeFlags(flags | NodeUsedAsNumber | NodeNeedsNegZero);
- changed |= m_graph[node.child2()].mergeArithNodeFlags(flags | NodeUsedAsNumber);
- changed |= m_graph[node.child3()].mergeArithNodeFlags(flags | NodeUsedAsNumber | NodeNeedsNegZero);
- break;
- }
-
- case GetByVal: {
- changed |= m_graph[node.child1()].mergeArithNodeFlags(flags | NodeUsedAsNumber | NodeNeedsNegZero);
- changed |= m_graph[node.child2()].mergeArithNodeFlags(flags | NodeUsedAsNumber);
- break;
- }
-
- default:
- flags |= NodeUsedAsNumber | NodeNeedsNegZero;
- if (node.flags & NodeHasVarArgs) {
- for (unsigned childIdx = node.firstChild(); childIdx < node.firstChild() + node.numChildren(); childIdx++)
- changed |= m_graph[m_graph.m_varArgChildren[childIdx]].mergeArithNodeFlags(flags);
- } else {
- if (!node.child1())
- break;
- changed |= m_graph[node.child1()].mergeArithNodeFlags(flags);
- if (!node.child2())
- break;
- changed |= m_graph[node.child2()].mergeArithNodeFlags(flags);
- if (!node.child3())
- break;
- changed |= m_graph[node.child3()].mergeArithNodeFlags(flags);
- }
- break;
- }
-
-#if DFG_ENABLE(DEBUG_PROPAGATION_VERBOSE)
- dataLog("%s\n", changed ? "CHANGED" : "");
-#endif
-
- m_changed |= changed;
- }
-
- void propagateForward()
- {
-#if DFG_ENABLE(DEBUG_PROPAGATION_VERBOSE)
- dataLog("Propagating arithmetic node flags forward [%u]\n", ++m_count);
-#endif
- for (m_compileIndex = 0; m_compileIndex < m_graph.size(); ++m_compileIndex)
- propagate(m_graph[m_compileIndex]);
- }
-
- void propagateBackward()
- {
-#if DFG_ENABLE(DEBUG_PROPAGATION_VERBOSE)
- dataLog("Propagating arithmetic node flags backward [%u]\n", ++m_count);
-#endif
- for (m_compileIndex = m_graph.size(); m_compileIndex-- > 0;)
- propagate(m_graph[m_compileIndex]);
- }
-
- NodeIndex m_compileIndex;
- bool m_changed;
-#if DFG_ENABLE(DEBUG_PROPAGATION_VERBOSE)
- unsigned m_count;
-#endif
-};
-
-void performArithNodeFlagsInference(Graph& graph)
-{
- runPhase<ArithNodeFlagsInferencePhase>(graph);
-}
-
-} } // namespace JSC::DFG
-
-#endif // ENABLE(DFG_JIT)
-
diff --git a/Source/JavaScriptCore/dfg/DFGAssemblyHelpers.cpp b/Source/JavaScriptCore/dfg/DFGAssemblyHelpers.cpp
index 969101e87..15f6d19a5 100644
--- a/Source/JavaScriptCore/dfg/DFGAssemblyHelpers.cpp
+++ b/Source/JavaScriptCore/dfg/DFGAssemblyHelpers.cpp
@@ -38,12 +38,12 @@ Vector<BytecodeAndMachineOffset>& AssemblyHelpers::decodedCodeMapFor(CodeBlock*
ASSERT(codeBlock->getJITType() == JITCode::BaselineJIT);
ASSERT(codeBlock->jitCodeMap());
- std::pair<HashMap<CodeBlock*, Vector<BytecodeAndMachineOffset> >::iterator, bool> result = m_decodedCodeMaps.add(codeBlock, Vector<BytecodeAndMachineOffset>());
+ HashMap<CodeBlock*, Vector<BytecodeAndMachineOffset> >::AddResult result = m_decodedCodeMaps.add(codeBlock, Vector<BytecodeAndMachineOffset>());
- if (result.second)
- codeBlock->jitCodeMap()->decode(result.first->second);
+ if (result.isNewEntry)
+ codeBlock->jitCodeMap()->decode(result.iterator->second);
- return result.first->second;
+ return result.iterator->second;
}
#if ENABLE(SAMPLING_FLAGS)
@@ -140,6 +140,13 @@ void AssemblyHelpers::jitAssertIsCell(GPRReg gpr)
checkCell.link(this);
}
#endif // USE(JSVALUE32_64)
+
+void AssemblyHelpers::jitAssertHasValidCallFrame()
+{
+ Jump checkCFR = branchTestPtr(Zero, GPRInfo::callFrameRegister, TrustedImm32(7));
+ breakpoint();
+ checkCFR.link(this);
+}
#endif // DFG_ENABLE(JIT_ASSERT)
} } // namespace JSC::DFG
diff --git a/Source/JavaScriptCore/dfg/DFGAssemblyHelpers.h b/Source/JavaScriptCore/dfg/DFGAssemblyHelpers.h
index 00a226d4c..e7a3132f3 100644
--- a/Source/JavaScriptCore/dfg/DFGAssemblyHelpers.h
+++ b/Source/JavaScriptCore/dfg/DFGAssemblyHelpers.h
@@ -150,6 +150,23 @@ public:
return branch8(Below, Address(structureReg, Structure::typeInfoTypeOffset()), TrustedImm32(ObjectType));
}
+ static GPRReg selectScratchGPR(GPRReg preserve1 = InvalidGPRReg, GPRReg preserve2 = InvalidGPRReg, GPRReg preserve3 = InvalidGPRReg, GPRReg preserve4 = InvalidGPRReg)
+ {
+ if (preserve1 != GPRInfo::regT0 && preserve2 != GPRInfo::regT0 && preserve3 != GPRInfo::regT0 && preserve4 != GPRInfo::regT0)
+ return GPRInfo::regT0;
+
+ if (preserve1 != GPRInfo::regT1 && preserve2 != GPRInfo::regT1 && preserve3 != GPRInfo::regT1 && preserve4 != GPRInfo::regT1)
+ return GPRInfo::regT1;
+
+ if (preserve1 != GPRInfo::regT2 && preserve2 != GPRInfo::regT2 && preserve3 != GPRInfo::regT2 && preserve4 != GPRInfo::regT2)
+ return GPRInfo::regT2;
+
+ if (preserve1 != GPRInfo::regT3 && preserve2 != GPRInfo::regT3 && preserve3 != GPRInfo::regT3 && preserve4 != GPRInfo::regT3)
+ return GPRInfo::regT3;
+
+ return GPRInfo::regT4;
+ }
+
// Add a debug call. This call has no effect on JIT code execution state.
void debugCall(V_DFGDebugOperation_EP function, void* argument)
{
@@ -164,14 +181,16 @@ public:
#if CPU(X86_64) || CPU(ARM_THUMB2)
move(TrustedImmPtr(argument), GPRInfo::argumentGPR1);
move(GPRInfo::callFrameRegister, GPRInfo::argumentGPR0);
+ GPRReg scratch = selectScratchGPR(GPRInfo::argumentGPR0, GPRInfo::argumentGPR1);
#elif CPU(X86)
poke(GPRInfo::callFrameRegister, 0);
poke(TrustedImmPtr(argument), 1);
+ GPRReg scratch = GPRInfo::regT0;
#else
#error "DFG JIT not supported on this platform."
#endif
- move(TrustedImmPtr(reinterpret_cast<void*>(function)), GPRInfo::regT0);
- call(GPRInfo::regT0);
+ move(TrustedImmPtr(reinterpret_cast<void*>(function)), scratch);
+ call(scratch);
for (unsigned i = 0; i < FPRInfo::numberOfRegisters; ++i) {
move(TrustedImmPtr(buffer + GPRInfo::numberOfRegisters + i), GPRInfo::regT0);
loadDouble(GPRInfo::regT0, FPRInfo::toRegister(i));
@@ -187,12 +206,14 @@ public:
void jitAssertIsJSNumber(GPRReg);
void jitAssertIsJSDouble(GPRReg);
void jitAssertIsCell(GPRReg);
+ void jitAssertHasValidCallFrame();
#else
void jitAssertIsInt32(GPRReg) { }
void jitAssertIsJSInt32(GPRReg) { }
void jitAssertIsJSNumber(GPRReg) { }
void jitAssertIsJSDouble(GPRReg) { }
void jitAssertIsCell(GPRReg) { }
+ void jitAssertHasValidCallFrame() { }
#endif
// These methods convert between doubles, and doubles boxed and JSValues.
diff --git a/Source/JavaScriptCore/dfg/DFGBasicBlock.h b/Source/JavaScriptCore/dfg/DFGBasicBlock.h
index 1c890b498..92df58d09 100644
--- a/Source/JavaScriptCore/dfg/DFGBasicBlock.h
+++ b/Source/JavaScriptCore/dfg/DFGBasicBlock.h
@@ -30,7 +30,7 @@
#include "DFGAbstractValue.h"
#include "DFGNode.h"
-#include "DFGOperands.h"
+#include "Operands.h"
#include <wtf/OwnPtr.h>
#include <wtf/Vector.h>
diff --git a/Source/JavaScriptCore/dfg/DFGByteCodeParser.cpp b/Source/JavaScriptCore/dfg/DFGByteCodeParser.cpp
index 3a3678d12..7a2d7bdee 100644
--- a/Source/JavaScriptCore/dfg/DFGByteCodeParser.cpp
+++ b/Source/JavaScriptCore/dfg/DFGByteCodeParser.cpp
@@ -89,6 +89,8 @@ private:
void emitFunctionCheck(JSFunction* expectedFunction, NodeIndex callTarget, int registerOffset, CodeSpecializationKind);
// Handle inlining. Return true if it succeeded, false if we need to plant a call.
bool handleInlining(bool usesResult, int callTarget, NodeIndex callTargetNodeIndex, int resultOperand, bool certainAboutExpectedFunction, JSFunction*, int registerOffset, int argumentCountIncludingThis, unsigned nextOffset, CodeSpecializationKind);
+ // Handle setting the result of an intrinsic.
+ void setIntrinsicResult(bool usesResult, int resultOperand, NodeIndex);
// Handle intrinsic functions. Return true if it succeeded, false if we need to plant a call.
bool handleIntrinsic(bool usesResult, int resultOperand, Intrinsic, int registerOffset, int argumentCountIncludingThis, PredictedType prediction);
// Prepare to parse a block.
@@ -162,7 +164,7 @@ private:
NodeIndex injectLazyOperandPrediction(NodeIndex nodeIndex)
{
Node& node = m_graph[nodeIndex];
- ASSERT(node.op == GetLocal);
+ ASSERT(node.op() == GetLocal);
ASSERT(node.codeOrigin.bytecodeIndex == m_currentIndex);
PredictedType prediction =
m_inlineStackTop->m_lazyOperands.prediction(
@@ -182,13 +184,13 @@ private:
if (nodeIndex != NoNode) {
Node* nodePtr = &m_graph[nodeIndex];
- if (nodePtr->op == Flush) {
+ if (nodePtr->op() == Flush) {
// Two possibilities: either the block wants the local to be live
// but has not loaded its value, or it has loaded its value, in
// which case we're done.
nodeIndex = nodePtr->child1().index();
Node& flushChild = m_graph[nodeIndex];
- if (flushChild.op == Phi) {
+ if (flushChild.op() == Phi) {
VariableAccessData* variableAccessData = flushChild.variableAccessData();
nodeIndex = injectLazyOperandPrediction(addToGraph(GetLocal, OpInfo(variableAccessData), nodeIndex));
m_currentBlock->variablesAtTail.local(operand) = nodeIndex;
@@ -198,7 +200,7 @@ private:
}
ASSERT(&m_graph[nodeIndex] == nodePtr);
- ASSERT(nodePtr->op != Flush);
+ ASSERT(nodePtr->op() != Flush);
if (m_graph.localIsCaptured(operand)) {
// We wish to use the same variable access data as the previous access,
@@ -206,15 +208,15 @@ private:
// know, at this stage of compilation, the local has been clobbered.
// Make sure we link to the Phi node, not to the GetLocal.
- if (nodePtr->op == GetLocal)
+ if (nodePtr->op() == GetLocal)
nodeIndex = nodePtr->child1().index();
return injectLazyOperandPrediction(addToGraph(GetLocal, OpInfo(nodePtr->variableAccessData()), nodeIndex));
}
- if (nodePtr->op == GetLocal)
+ if (nodePtr->op() == GetLocal)
return nodeIndex;
- ASSERT(nodePtr->op == SetLocal);
+ ASSERT(nodePtr->op() == SetLocal);
return nodePtr->child1().index();
}
@@ -238,7 +240,29 @@ private:
VariableAccessData* variableAccessData = newVariableAccessData(operand);
NodeIndex nodeIndex = addToGraph(SetLocal, OpInfo(variableAccessData), value);
m_currentBlock->variablesAtTail.local(operand) = nodeIndex;
- if (m_graph.localIsCaptured(operand))
+
+ bool shouldFlush = m_graph.localIsCaptured(operand);
+
+ if (!shouldFlush) {
+ // If this is in argument position, then it should be flushed.
+ for (InlineStackEntry* stack = m_inlineStackTop; ; stack = stack->m_caller) {
+ InlineCallFrame* inlineCallFrame = stack->m_inlineCallFrame;
+ if (!inlineCallFrame)
+ break;
+ if (static_cast<int>(operand) >= inlineCallFrame->stackOffset - RegisterFile::CallFrameHeaderSize)
+ continue;
+ if (static_cast<int>(operand) == inlineCallFrame->stackOffset + CallFrame::thisArgumentOffset())
+ continue;
+ if (operand < inlineCallFrame->stackOffset - RegisterFile::CallFrameHeaderSize - inlineCallFrame->arguments.size())
+ continue;
+ int argument = operandToArgument(operand - inlineCallFrame->stackOffset);
+ stack->m_argumentPositions[argument]->addVariable(variableAccessData);
+ shouldFlush = true;
+ break;
+ }
+ }
+
+ if (shouldFlush)
addToGraph(Flush, OpInfo(variableAccessData), nodeIndex);
}
@@ -252,13 +276,13 @@ private:
if (nodeIndex != NoNode) {
Node* nodePtr = &m_graph[nodeIndex];
- if (nodePtr->op == Flush) {
+ if (nodePtr->op() == Flush) {
// Two possibilities: either the block wants the local to be live
// but has not loaded its value, or it has loaded its value, in
// which case we're done.
nodeIndex = nodePtr->child1().index();
Node& flushChild = m_graph[nodeIndex];
- if (flushChild.op == Phi) {
+ if (flushChild.op() == Phi) {
VariableAccessData* variableAccessData = flushChild.variableAccessData();
nodeIndex = injectLazyOperandPrediction(addToGraph(GetLocal, OpInfo(variableAccessData), nodeIndex));
m_currentBlock->variablesAtTail.local(operand) = nodeIndex;
@@ -268,9 +292,9 @@ private:
}
ASSERT(&m_graph[nodeIndex] == nodePtr);
- ASSERT(nodePtr->op != Flush);
+ ASSERT(nodePtr->op() != Flush);
- if (nodePtr->op == SetArgument) {
+ if (nodePtr->op() == SetArgument) {
// We're getting an argument in the first basic block; link
// the GetLocal to the SetArgument.
ASSERT(nodePtr->local() == static_cast<VirtualRegister>(operand));
@@ -280,15 +304,15 @@ private:
}
if (m_graph.argumentIsCaptured(argument)) {
- if (nodePtr->op == GetLocal)
+ if (nodePtr->op() == GetLocal)
nodeIndex = nodePtr->child1().index();
return injectLazyOperandPrediction(addToGraph(GetLocal, OpInfo(nodePtr->variableAccessData()), nodeIndex));
}
- if (nodePtr->op == GetLocal)
+ if (nodePtr->op() == GetLocal)
return nodeIndex;
- ASSERT(nodePtr->op == SetLocal);
+ ASSERT(nodePtr->op() == SetLocal);
return nodePtr->child1().index();
}
@@ -309,13 +333,17 @@ private:
ASSERT(argument < m_numArguments);
VariableAccessData* variableAccessData = newVariableAccessData(operand);
+ InlineStackEntry* stack = m_inlineStackTop;
+ while (stack->m_inlineCallFrame) // find the machine stack entry.
+ stack = stack->m_caller;
+ stack->m_argumentPositions[argument]->addVariable(variableAccessData);
NodeIndex nodeIndex = addToGraph(SetLocal, OpInfo(variableAccessData), value);
m_currentBlock->variablesAtTail.argument(argument) = nodeIndex;
- if (m_graph.argumentIsCaptured(argument))
- addToGraph(Flush, OpInfo(variableAccessData), nodeIndex);
+ // Always flush arguments.
+ addToGraph(Flush, OpInfo(variableAccessData), nodeIndex);
}
- void flushArgument(int operand)
+ VariableAccessData* flushArgument(int operand)
{
// FIXME: This should check if the same operand had already been flushed to
// some other local variable.
@@ -337,16 +365,26 @@ private:
if (nodeIndex != NoNode) {
Node& node = m_graph[nodeIndex];
- if (node.op == Flush)
+ switch (node.op()) {
+ case Flush:
nodeIndex = node.child1().index();
+ break;
+ case GetLocal:
+ nodeIndex = node.child1().index();
+ break;
+ default:
+ break;
+ }
- ASSERT(m_graph[nodeIndex].op != Flush);
+ ASSERT(m_graph[nodeIndex].op() != Flush
+ && m_graph[nodeIndex].op() != GetLocal);
// Emit a Flush regardless of whether we already flushed it.
// This gives us guidance to see that the variable also needs to be flushed
// for arguments, even if it already had to be flushed for other reasons.
- addToGraph(Flush, OpInfo(node.variableAccessData()), nodeIndex);
- return;
+ VariableAccessData* variableAccessData = node.variableAccessData();
+ addToGraph(Flush, OpInfo(variableAccessData), nodeIndex);
+ return variableAccessData;
}
VariableAccessData* variableAccessData = newVariableAccessData(operand);
@@ -361,6 +399,7 @@ private:
m_currentBlock->variablesAtTail.local(index) = nodeIndex;
m_currentBlock->variablesAtHead.setLocalFirstTime(index, nodeIndex);
}
+ return variableAccessData;
}
// Get an operand, and perform a ToInt32/ToNumber conversion on it.
@@ -377,11 +416,11 @@ private:
if (node.hasInt32Result())
return index;
- if (node.op == UInt32ToNumber)
+ if (node.op() == UInt32ToNumber)
return node.child1().index();
// Check for numeric constants boxed as JSValues.
- if (node.op == JSConstant) {
+ if (node.op() == JSConstant) {
JSValue v = valueOfJSConstant(index);
if (v.isInt32())
return getJSConstant(node.constantNumber());
@@ -427,7 +466,7 @@ private:
// Convenience methods for checking nodes for constants.
bool isJSConstant(NodeIndex index)
{
- return m_graph[index].op == JSConstant;
+ return m_graph[index].op() == JSConstant;
}
bool isInt32Constant(NodeIndex nodeIndex)
{
@@ -551,11 +590,11 @@ private:
NodeIndex cellConstant(JSCell* cell)
{
- pair<HashMap<JSCell*, NodeIndex>::iterator, bool> iter = m_cellConstantNodes.add(cell, NoNode);
- if (iter.second)
- iter.first->second = addToGraph(WeakJSConstant, OpInfo(cell));
+ HashMap<JSCell*, NodeIndex>::AddResult result = m_cellConstantNodes.add(cell, NoNode);
+ if (result.isNewEntry)
+ result.iterator->second = addToGraph(WeakJSConstant, OpInfo(cell));
- return iter.first->second;
+ return result.iterator->second;
}
CodeOrigin currentCodeOrigin()
@@ -626,7 +665,7 @@ private:
void addVarArgChild(NodeIndex child)
{
- m_graph.m_varArgChildren.append(NodeUse(child));
+ m_graph.m_varArgChildren.append(Edge(child));
m_numPassedVarArgs++;
}
@@ -693,23 +732,27 @@ private:
NodeIndex makeSafe(NodeIndex nodeIndex)
{
- if (!m_inlineStackTop->m_profiledBlock->likelyToTakeSlowCase(m_currentIndex)
+ Node& node = m_graph[nodeIndex];
+
+ bool likelyToTakeSlowCase;
+ if (!isX86() && node.op() == ArithMod)
+ likelyToTakeSlowCase = false;
+ else
+ likelyToTakeSlowCase = m_inlineStackTop->m_profiledBlock->likelyToTakeSlowCase(m_currentIndex);
+
+ if (!likelyToTakeSlowCase
&& !m_inlineStackTop->m_exitProfile.hasExitSite(m_currentIndex, Overflow)
&& !m_inlineStackTop->m_exitProfile.hasExitSite(m_currentIndex, NegativeZero))
return nodeIndex;
-#if DFG_ENABLE(DEBUG_VERBOSE)
- dataLog("Making %s @%u safe at bc#%u because slow-case counter is at %u and exit profiles say %d, %d\n", Graph::opName(static_cast<NodeType>(m_graph[nodeIndex].op)), nodeIndex, m_currentIndex, m_inlineStackTop->m_profiledBlock->rareCaseProfileForBytecodeOffset(m_currentIndex)->m_counter, m_inlineStackTop->m_exitProfile.hasExitSite(m_currentIndex, Overflow), m_inlineStackTop->m_exitProfile.hasExitSite(m_currentIndex, NegativeZero));
-#endif
-
- switch (m_graph[nodeIndex].op) {
+ switch (m_graph[nodeIndex].op()) {
case UInt32ToNumber:
case ArithAdd:
case ArithSub:
case ArithNegate:
case ValueAdd:
- case ArithMod: // for ArithMode "MayOverflow" means we tried to divide by zero, or we saw double.
- m_graph[nodeIndex].mergeArithNodeFlags(NodeMayOverflow);
+ case ArithMod: // for ArithMod "MayOverflow" means we tried to divide by zero, or we saw double.
+ m_graph[nodeIndex].mergeFlags(NodeMayOverflow);
break;
case ArithMul:
@@ -718,13 +761,13 @@ private:
#if DFG_ENABLE(DEBUG_VERBOSE)
dataLog("Making ArithMul @%u take deepest slow case.\n", nodeIndex);
#endif
- m_graph[nodeIndex].mergeArithNodeFlags(NodeMayOverflow | NodeMayNegZero);
+ m_graph[nodeIndex].mergeFlags(NodeMayOverflow | NodeMayNegZero);
} else if (m_inlineStackTop->m_profiledBlock->likelyToTakeSlowCase(m_currentIndex)
|| m_inlineStackTop->m_exitProfile.hasExitSite(m_currentIndex, NegativeZero)) {
#if DFG_ENABLE(DEBUG_VERBOSE)
dataLog("Making ArithMul @%u take faster slow case.\n", nodeIndex);
#endif
- m_graph[nodeIndex].mergeArithNodeFlags(NodeMayNegZero);
+ m_graph[nodeIndex].mergeFlags(NodeMayNegZero);
}
break;
@@ -738,7 +781,7 @@ private:
NodeIndex makeDivSafe(NodeIndex nodeIndex)
{
- ASSERT(m_graph[nodeIndex].op == ArithDiv);
+ ASSERT(m_graph[nodeIndex].op() == ArithDiv);
// The main slow case counter for op_div in the old JIT counts only when
// the operands are not numbers. We don't care about that since we already
@@ -752,12 +795,12 @@ private:
return nodeIndex;
#if DFG_ENABLE(DEBUG_VERBOSE)
- dataLog("Making %s @%u safe at bc#%u because special fast-case counter is at %u and exit profiles say %d, %d\n", Graph::opName(static_cast<NodeType>(m_graph[nodeIndex].op)), nodeIndex, m_currentIndex, m_inlineStackTop->m_profiledBlock->specialFastCaseProfileForBytecodeOffset(m_currentIndex)->m_counter, m_inlineStackTop->m_exitProfile.hasExitSite(m_currentIndex, Overflow), m_inlineStackTop->m_exitProfile.hasExitSite(m_currentIndex, NegativeZero));
+ dataLog("Making %s @%u safe at bc#%u because special fast-case counter is at %u and exit profiles say %d, %d\n", Graph::opName(m_graph[nodeIndex].op()), nodeIndex, m_currentIndex, m_inlineStackTop->m_profiledBlock->specialFastCaseProfileForBytecodeOffset(m_currentIndex)->m_counter, m_inlineStackTop->m_exitProfile.hasExitSite(m_currentIndex, Overflow), m_inlineStackTop->m_exitProfile.hasExitSite(m_currentIndex, NegativeZero));
#endif
// FIXME: It might be possible to make this more granular. The DFG certainly can
// distinguish between negative zero and overflow in its exit profiles.
- m_graph[nodeIndex].mergeArithNodeFlags(NodeMayOverflow | NodeMayNegZero);
+ m_graph[nodeIndex].mergeFlags(NodeMayOverflow | NodeMayNegZero);
return nodeIndex;
}
@@ -936,6 +979,9 @@ private:
// Did we have any early returns?
bool m_didEarlyReturn;
+ // Pointers to the argument position trackers for this slice of code.
+ Vector<ArgumentPosition*> m_argumentPositions;
+
InlineStackEntry* m_caller;
InlineStackEntry(ByteCodeParser*, CodeBlock*, CodeBlock* profiledBlock, BlockIndex callsiteBlockHead, VirtualRegister calleeVR, JSFunction* callee, VirtualRegister returnValueVR, VirtualRegister inlineCallFrameStart, CodeSpecializationKind);
@@ -993,13 +1039,19 @@ void ByteCodeParser::handleCall(Interpreter* interpreter, Instruction* currentIn
NodeIndex callTarget = get(currentInstruction[1].u.operand);
enum { ConstantFunction, LinkedFunction, UnknownFunction } callType;
-#if DFG_ENABLE(DEBUG_VERBOSE)
- dataLog("Slow case count for call at @%zu bc#%u: %u/%u; exit profile: %d.\n", m_graph.size(), m_currentIndex, m_inlineStackTop->m_profiledBlock->rareCaseProfileForBytecodeOffset(m_currentIndex)->m_counter, m_inlineStackTop->m_profiledBlock->executionEntryCount(), m_inlineStackTop->m_exitProfile.hasExitSite(m_currentIndex, BadCache));
-#endif
-
CallLinkStatus callLinkStatus = CallLinkStatus::computeFor(
m_inlineStackTop->m_profiledBlock, m_currentIndex);
+#if DFG_ENABLE(DEBUG_VERBOSE)
+ dataLog("For call at @%lu bc#%u: ", m_graph.size(), m_currentIndex);
+ if (callLinkStatus.isSet()) {
+ if (callLinkStatus.couldTakeSlowPath())
+ dataLog("could take slow path, ");
+ dataLog("target = %p\n", callLinkStatus.callTarget());
+ } else
+ dataLog("not set.\n");
+#endif
+
if (m_graph.isFunctionConstant(callTarget))
callType = ConstantFunction;
else if (callLinkStatus.isSet() && !callLinkStatus.couldTakeSlowPath()
@@ -1125,14 +1177,15 @@ bool ByteCodeParser::handleInlining(bool usesResult, int callTarget, NodeIndex c
// FIXME: Don't flush constants!
+ Vector<VariableAccessData*, 8> arguments;
for (int i = 1; i < argumentCountIncludingThis; ++i)
- flushArgument(registerOffset + argumentToOperand(i));
+ arguments.append(flushArgument(registerOffset + argumentToOperand(i)));
int inlineCallFrameStart = m_inlineStackTop->remapOperand(registerOffset) - RegisterFile::CallFrameHeaderSize;
// Make sure that the area used by the call frame is reserved.
for (int arg = inlineCallFrameStart + RegisterFile::CallFrameHeaderSize + codeBlock->m_numVars; arg-- > inlineCallFrameStart;)
- m_preservedVars.set(m_inlineStackTop->remapOperand(arg));
+ m_preservedVars.set(arg);
// Make sure that we have enough locals.
unsigned newNumLocals = inlineCallFrameStart + RegisterFile::CallFrameHeaderSize + codeBlock->m_numCalleeRegisters;
@@ -1144,6 +1197,10 @@ bool ByteCodeParser::handleInlining(bool usesResult, int callTarget, NodeIndex c
InlineStackEntry inlineStackEntry(this, codeBlock, profiledBlock, m_graph.m_blocks.size() - 1, (VirtualRegister)m_inlineStackTop->remapOperand(callTarget), expectedFunction, (VirtualRegister)m_inlineStackTop->remapOperand(usesResult ? resultOperand : InvalidVirtualRegister), (VirtualRegister)inlineCallFrameStart, kind);
+ // Link up the argument variable access datas to their argument positions.
+ for (int i = 1; i < argumentCountIncludingThis; ++i)
+ inlineStackEntry.m_argumentPositions[i]->addVariable(arguments[i - 1]);
+
// This is where the actual inlining really happens.
unsigned oldIndex = m_currentIndex;
unsigned oldProfilingIndex = m_currentProfilingIndex;
@@ -1222,7 +1279,7 @@ bool ByteCodeParser::handleInlining(bool usesResult, int callTarget, NodeIndex c
BasicBlock* block = m_graph.m_blocks[inlineStackEntry.m_unlinkedBlocks[i].m_blockIndex].get();
ASSERT(!block->isLinked);
Node& node = m_graph[block->last()];
- ASSERT(node.op == Jump);
+ ASSERT(node.op() == Jump);
ASSERT(node.takenBlockIndex() == NoBlock);
node.setTakenBlockIndex(m_graph.m_blocks.size());
inlineStackEntry.m_unlinkedBlocks[i].m_needsEarlyReturnLinking = false;
@@ -1251,23 +1308,30 @@ bool ByteCodeParser::handleInlining(bool usesResult, int callTarget, NodeIndex c
return true;
}
-bool ByteCodeParser::handleMinMax(bool usesResult, int resultOperand, NodeType op, int registerOffset, int argumentCountIncludingThis)
+void ByteCodeParser::setIntrinsicResult(bool usesResult, int resultOperand, NodeIndex nodeIndex)
{
if (!usesResult)
- return true;
+ return;
+ set(resultOperand, nodeIndex);
+}
+bool ByteCodeParser::handleMinMax(bool usesResult, int resultOperand, NodeType op, int registerOffset, int argumentCountIncludingThis)
+{
if (argumentCountIncludingThis == 1) { // Math.min()
- set(resultOperand, constantNaN());
+ setIntrinsicResult(usesResult, resultOperand, constantNaN());
return true;
}
if (argumentCountIncludingThis == 2) { // Math.min(x)
- set(resultOperand, get(registerOffset + argumentToOperand(1)));
+ // FIXME: what we'd really like is a ValueToNumber, except we don't support that right now. Oh well.
+ NodeIndex result = get(registerOffset + argumentToOperand(1));
+ addToGraph(CheckNumber, result);
+ setIntrinsicResult(usesResult, resultOperand, result);
return true;
}
if (argumentCountIncludingThis == 3) { // Math.min(x, y)
- set(resultOperand, addToGraph(op, get(registerOffset + argumentToOperand(1)), get(registerOffset + argumentToOperand(2))));
+ setIntrinsicResult(usesResult, resultOperand, addToGraph(op, get(registerOffset + argumentToOperand(1)), get(registerOffset + argumentToOperand(2))));
return true;
}
@@ -1281,14 +1345,8 @@ bool ByteCodeParser::handleIntrinsic(bool usesResult, int resultOperand, Intrins
{
switch (intrinsic) {
case AbsIntrinsic: {
- if (!usesResult) {
- // There is no such thing as executing abs for effect, so this
- // is dead code.
- return true;
- }
-
if (argumentCountIncludingThis == 1) { // Math.abs()
- set(resultOperand, constantNaN());
+ setIntrinsicResult(usesResult, resultOperand, constantNaN());
return true;
}
@@ -1297,8 +1355,8 @@ bool ByteCodeParser::handleIntrinsic(bool usesResult, int resultOperand, Intrins
NodeIndex nodeIndex = addToGraph(ArithAbs, get(registerOffset + argumentToOperand(1)));
if (m_inlineStackTop->m_exitProfile.hasExitSite(m_currentIndex, Overflow))
- m_graph[nodeIndex].mergeArithNodeFlags(NodeMayOverflow);
- set(resultOperand, nodeIndex);
+ m_graph[nodeIndex].mergeFlags(NodeMayOverflow);
+ setIntrinsicResult(usesResult, resultOperand, nodeIndex);
return true;
}
@@ -1309,18 +1367,15 @@ bool ByteCodeParser::handleIntrinsic(bool usesResult, int resultOperand, Intrins
return handleMinMax(usesResult, resultOperand, ArithMax, registerOffset, argumentCountIncludingThis);
case SqrtIntrinsic: {
- if (!usesResult)
- return true;
-
if (argumentCountIncludingThis == 1) { // Math.sqrt()
- set(resultOperand, constantNaN());
+ setIntrinsicResult(usesResult, resultOperand, constantNaN());
return true;
}
if (!MacroAssembler::supportsFloatingPointSqrt())
return false;
- set(resultOperand, addToGraph(ArithSqrt, get(registerOffset + argumentToOperand(1))));
+ setIntrinsicResult(usesResult, resultOperand, addToGraph(ArithSqrt, get(registerOffset + argumentToOperand(1))));
return true;
}
@@ -1379,6 +1434,28 @@ bool ByteCodeParser::handleIntrinsic(bool usesResult, int resultOperand, Intrins
return true;
}
+ case RegExpExecIntrinsic: {
+ if (argumentCountIncludingThis != 2)
+ return false;
+
+ NodeIndex regExpExec = addToGraph(RegExpExec, OpInfo(0), OpInfo(prediction), get(registerOffset + argumentToOperand(0)), get(registerOffset + argumentToOperand(1)));
+ if (usesResult)
+ set(resultOperand, regExpExec);
+
+ return true;
+ }
+
+ case RegExpTestIntrinsic: {
+ if (argumentCountIncludingThis != 2)
+ return false;
+
+ NodeIndex regExpExec = addToGraph(RegExpTest, OpInfo(0), OpInfo(prediction), get(registerOffset + argumentToOperand(0)), get(registerOffset + argumentToOperand(1)));
+ if (usesResult)
+ set(resultOperand, regExpExec);
+
+ return true;
+ }
+
default:
return false;
}
@@ -1449,7 +1526,7 @@ bool ByteCodeParser::parseBlock(unsigned limit)
case op_convert_this: {
NodeIndex op1 = getThis();
- if (m_graph[op1].op == ConvertThis)
+ if (m_graph[op1].op() == ConvertThis)
setThis(op1);
else
setThis(addToGraph(ConvertThis, op1));
@@ -1678,6 +1755,42 @@ bool ByteCodeParser::parseBlock(unsigned limit)
set(currentInstruction[1].u.operand, addToGraph(InstanceOf, value, baseValue, prototype));
NEXT_OPCODE(op_instanceof);
}
+
+ case op_is_undefined: {
+ NodeIndex value = get(currentInstruction[2].u.operand);
+ set(currentInstruction[1].u.operand, addToGraph(IsUndefined, value));
+ NEXT_OPCODE(op_is_undefined);
+ }
+
+ case op_is_boolean: {
+ NodeIndex value = get(currentInstruction[2].u.operand);
+ set(currentInstruction[1].u.operand, addToGraph(IsBoolean, value));
+ NEXT_OPCODE(op_is_boolean);
+ }
+
+ case op_is_number: {
+ NodeIndex value = get(currentInstruction[2].u.operand);
+ set(currentInstruction[1].u.operand, addToGraph(IsNumber, value));
+ NEXT_OPCODE(op_is_number);
+ }
+
+ case op_is_string: {
+ NodeIndex value = get(currentInstruction[2].u.operand);
+ set(currentInstruction[1].u.operand, addToGraph(IsString, value));
+ NEXT_OPCODE(op_is_string);
+ }
+
+ case op_is_object: {
+ NodeIndex value = get(currentInstruction[2].u.operand);
+ set(currentInstruction[1].u.operand, addToGraph(IsObject, value));
+ NEXT_OPCODE(op_is_object);
+ }
+
+ case op_is_function: {
+ NodeIndex value = get(currentInstruction[2].u.operand);
+ set(currentInstruction[1].u.operand, addToGraph(IsFunction, value));
+ NEXT_OPCODE(op_is_function);
+ }
case op_not: {
NodeIndex value = get(currentInstruction[2].u.operand);
@@ -1857,10 +1970,6 @@ bool ByteCodeParser::parseBlock(unsigned limit)
GetByIdStatus getByIdStatus = GetByIdStatus::computeFor(
m_inlineStackTop->m_profiledBlock, m_currentIndex, identifier);
-#if DFG_ENABLE(DEBUG_VERBOSE)
- dataLog("Slow case count for GetById @%zu bc#%u: %u; exit profile: %d\n", m_graph.size(), m_currentIndex, m_inlineStackTop->m_profiledBlock->rareCaseProfileForBytecodeOffset(m_currentIndex)->m_counter, m_inlineStackTop->m_exitProfile.hasExitSite(m_currentIndex, BadCache));
-#endif
-
if (getByIdStatus.isSimpleDirect()
&& !m_inlineStackTop->m_exitProfile.hasExitSite(m_currentIndex, BadCache)) {
ASSERT(getByIdStatus.structureSet().size());
@@ -1871,10 +1980,20 @@ bool ByteCodeParser::parseBlock(unsigned limit)
addToGraph(ForceOSRExit);
addToGraph(CheckStructure, OpInfo(m_graph.addStructureSet(getByIdStatus.structureSet())), base);
- set(currentInstruction[1].u.operand, addToGraph(GetByOffset, OpInfo(m_graph.m_storageAccessData.size()), OpInfo(prediction), addToGraph(GetPropertyStorage, base)));
+ NodeIndex propertyStorage;
+ size_t offsetOffset;
+ if (getByIdStatus.structureSet().allAreUsingInlinePropertyStorage()) {
+ propertyStorage = base;
+ ASSERT(!(sizeof(JSObject) % sizeof(EncodedJSValue)));
+ offsetOffset = sizeof(JSObject) / sizeof(EncodedJSValue);
+ } else {
+ propertyStorage = addToGraph(GetPropertyStorage, base);
+ offsetOffset = 0;
+ }
+ set(currentInstruction[1].u.operand, addToGraph(GetByOffset, OpInfo(m_graph.m_storageAccessData.size()), OpInfo(prediction), propertyStorage));
StorageAccessData storageAccessData;
- storageAccessData.offset = getByIdStatus.offset();
+ storageAccessData.offset = getByIdStatus.offset() + offsetOffset;
storageAccessData.identifierNumber = identifierNumber;
m_graph.m_storageAccessData.append(storageAccessData);
} else
@@ -1899,10 +2018,6 @@ bool ByteCodeParser::parseBlock(unsigned limit)
bool hasExitSite = m_inlineStackTop->m_exitProfile.hasExitSite(m_currentIndex, BadCache);
-#if DFG_ENABLE(DEBUG_VERBOSE)
- dataLog("Slow case count for PutById @%zu bc#%u: %u; exit profile: %d\n", m_graph.size(), m_currentIndex, m_inlineStackTop->m_profiledBlock->rareCaseProfileForBytecodeOffset(m_currentIndex)->m_counter, m_inlineStackTop->m_exitProfile.hasExitSite(m_currentIndex, BadCache));
-#endif
-
if (!hasExitSite && putByIdStatus.isSimpleReplace()) {
addToGraph(CheckStructure, OpInfo(m_graph.addStructureSet(putByIdStatus.oldStructure())), base);
addToGraph(PutByOffset, OpInfo(m_graph.m_storageAccessData.size()), base, addToGraph(GetPropertyStorage, base), value);
@@ -1971,9 +2086,8 @@ bool ByteCodeParser::parseBlock(unsigned limit)
case op_get_global_var: {
PredictedType prediction = getPrediction();
- NodeIndex getGlobalVar = addToGraph(GetGlobalVar, OpInfo(currentInstruction[2].u.operand));
+ NodeIndex getGlobalVar = addToGraph(GetGlobalVar, OpInfo(currentInstruction[2].u.operand), OpInfo(prediction));
set(currentInstruction[1].u.operand, getGlobalVar);
- m_graph.predictGlobalVar(currentInstruction[2].u.operand, prediction);
NEXT_OPCODE(op_get_global_var);
}
@@ -2340,7 +2454,7 @@ void ByteCodeParser::processPhiStack()
else
predecessorBlock->variablesAtHead.setLocalFirstTime(varNo, valueInPredecessor);
phiStack.append(PhiStackEntry(predecessorBlock, valueInPredecessor, varNo));
- } else if (m_graph[valueInPredecessor].op == GetLocal) {
+ } else if (m_graph[valueInPredecessor].op() == GetLocal) {
#if DFG_ENABLE(DEBUG_PROPAGATION_VERBOSE)
dataLog(" Found GetLocal @%u.\n", valueInPredecessor);
#endif
@@ -2357,10 +2471,10 @@ void ByteCodeParser::processPhiStack()
dataLog(" Found @%u.\n", valueInPredecessor);
#endif
}
- ASSERT(m_graph[valueInPredecessor].op == SetLocal
- || m_graph[valueInPredecessor].op == Phi
- || m_graph[valueInPredecessor].op == Flush
- || (m_graph[valueInPredecessor].op == SetArgument
+ ASSERT(m_graph[valueInPredecessor].op() == SetLocal
+ || m_graph[valueInPredecessor].op() == Phi
+ || m_graph[valueInPredecessor].op() == Flush
+ || (m_graph[valueInPredecessor].op() == SetArgument
&& stackType == ArgumentPhiStack));
VariableAccessData* dataForPredecessor = m_graph[valueInPredecessor].variableAccessData();
@@ -2382,7 +2496,7 @@ void ByteCodeParser::processPhiStack()
#if DFG_ENABLE(DEBUG_PROPAGATION_VERBOSE)
dataLog(" Setting @%u->child1 = @%u.\n", entry.m_phi, valueInPredecessor);
#endif
- phiNode->children.setChild1(NodeUse(valueInPredecessor));
+ phiNode->children.setChild1(Edge(valueInPredecessor));
#if DFG_ENABLE(DEBUG_PROPAGATION_VERBOSE)
dataLog(" Children of @%u: ", entry.m_phi);
phiNode->dumpChildren(WTF::dataFile());
@@ -2394,7 +2508,7 @@ void ByteCodeParser::processPhiStack()
#if DFG_ENABLE(DEBUG_PROPAGATION_VERBOSE)
dataLog(" Setting @%u->child2 = @%u.\n", entry.m_phi, valueInPredecessor);
#endif
- phiNode->children.setChild2(NodeUse(valueInPredecessor));
+ phiNode->children.setChild2(Edge(valueInPredecessor));
#if DFG_ENABLE(DEBUG_PROPAGATION_VERBOSE)
dataLog(" Children of @%u: ", entry.m_phi);
phiNode->dumpChildren(WTF::dataFile());
@@ -2406,7 +2520,7 @@ void ByteCodeParser::processPhiStack()
#if DFG_ENABLE(DEBUG_PROPAGATION_VERBOSE)
dataLog(" Setting @%u->child3 = @%u.\n", entry.m_phi, valueInPredecessor);
#endif
- phiNode->children.setChild3(NodeUse(valueInPredecessor));
+ phiNode->children.setChild3(Edge(valueInPredecessor));
#if DFG_ENABLE(DEBUG_PROPAGATION_VERBOSE)
dataLog(" Children of @%u: ", entry.m_phi);
phiNode->dumpChildren(WTF::dataFile());
@@ -2460,7 +2574,7 @@ void ByteCodeParser::linkBlock(BasicBlock* block, Vector<BlockIndex>& possibleTa
Node& node = m_graph[block->last()];
ASSERT(node.isTerminal());
- switch (node.op) {
+ switch (node.op()) {
case Jump:
node.setTakenBlockIndex(m_graph.blockIndexForBytecodeOffset(possibleTargets, node.takenBytecodeOffsetDuringParsing()));
#if DFG_ENABLE(DEBUG_VERBOSE)
@@ -2564,6 +2678,13 @@ ByteCodeParser::InlineStackEntry::InlineStackEntry(ByteCodeParser* byteCodeParse
, m_didEarlyReturn(false)
, m_caller(byteCodeParser->m_inlineStackTop)
{
+ m_argumentPositions.resize(codeBlock->numParameters());
+ for (unsigned i = codeBlock->numParameters(); i--;) {
+ byteCodeParser->m_graph.m_argumentPositions.append(ArgumentPosition());
+ ArgumentPosition* argumentPosition = &byteCodeParser->m_graph.m_argumentPositions.last();
+ m_argumentPositions[i] = argumentPosition;
+ }
+
if (m_caller) {
// Inline case.
ASSERT(codeBlock != byteCodeParser->m_codeBlock);
@@ -2589,10 +2710,10 @@ ByteCodeParser::InlineStackEntry::InlineStackEntry(ByteCodeParser* byteCodeParse
for (size_t i = 0; i < codeBlock->numberOfIdentifiers(); ++i) {
StringImpl* rep = codeBlock->identifier(i).impl();
- pair<IdentifierMap::iterator, bool> result = byteCodeParser->m_identifierMap.add(rep, byteCodeParser->m_codeBlock->numberOfIdentifiers());
- if (result.second)
+ IdentifierMap::AddResult result = byteCodeParser->m_identifierMap.add(rep, byteCodeParser->m_codeBlock->numberOfIdentifiers());
+ if (result.isNewEntry)
byteCodeParser->m_codeBlock->addIdentifier(Identifier(byteCodeParser->m_globalData, rep));
- m_identifierRemap[i] = result.first->second;
+ m_identifierRemap[i] = result.iterator->second;
}
for (size_t i = 0; i < codeBlock->numberOfConstantRegisters(); ++i) {
JSValue value = codeBlock->getConstant(i + FirstConstantRegisterIndex);
@@ -2605,12 +2726,12 @@ ByteCodeParser::InlineStackEntry::InlineStackEntry(ByteCodeParser* byteCodeParse
m_constantRemap[i] = byteCodeParser->m_emptyJSValueIndex;
continue;
}
- pair<JSValueMap::iterator, bool> result = byteCodeParser->m_jsValueMap.add(JSValue::encode(value), byteCodeParser->m_codeBlock->numberOfConstantRegisters() + FirstConstantRegisterIndex);
- if (result.second) {
+ JSValueMap::AddResult result = byteCodeParser->m_jsValueMap.add(JSValue::encode(value), byteCodeParser->m_codeBlock->numberOfConstantRegisters() + FirstConstantRegisterIndex);
+ if (result.isNewEntry) {
byteCodeParser->m_codeBlock->addConstant(value);
byteCodeParser->m_constants.append(ConstantRecord());
}
- m_constantRemap[i] = result.first->second;
+ m_constantRemap[i] = result.iterator->second;
}
m_callsiteBlockHeadNeedsLinking = true;
diff --git a/Source/JavaScriptCore/dfg/DFGCCallHelpers.h b/Source/JavaScriptCore/dfg/DFGCCallHelpers.h
index 16793bb46..256608f0d 100644
--- a/Source/JavaScriptCore/dfg/DFGCCallHelpers.h
+++ b/Source/JavaScriptCore/dfg/DFGCCallHelpers.h
@@ -82,6 +82,12 @@ public:
addCallArgument(arg2);
}
+ ALWAYS_INLINE void setupArguments(GPRReg arg1)
+ {
+ resetCallArguments();
+ addCallArgument(arg1);
+ }
+
ALWAYS_INLINE void setupArguments(GPRReg arg1, GPRReg arg2)
{
resetCallArguments();
@@ -386,6 +392,11 @@ public:
}
#endif
+ ALWAYS_INLINE void setupArguments(GPRReg arg1)
+ {
+ move(arg1, GPRInfo::argumentGPR0);
+ }
+
ALWAYS_INLINE void setupArguments(GPRReg arg1, GPRReg arg2)
{
setupTwoStubArgs<GPRInfo::argumentGPR0, GPRInfo::argumentGPR1>(arg1, arg2);
diff --git a/Source/JavaScriptCore/dfg/DFGCFAPhase.cpp b/Source/JavaScriptCore/dfg/DFGCFAPhase.cpp
index b4e75f808..6e69c1094 100644
--- a/Source/JavaScriptCore/dfg/DFGCFAPhase.cpp
+++ b/Source/JavaScriptCore/dfg/DFGCFAPhase.cpp
@@ -87,7 +87,7 @@ private:
if (!m_graph[nodeIndex].shouldGenerate())
continue;
#if DFG_ENABLE(DEBUG_PROPAGATION_VERBOSE)
- dataLog(" %s @%u: ", Graph::opName(static_cast<NodeType>(m_graph[nodeIndex].op)), nodeIndex);
+ dataLog(" %s @%u: ", Graph::opName(m_graph[nodeIndex].op()), nodeIndex);
m_state.dump(WTF::dataFile());
dataLog("\n");
#endif
diff --git a/Source/JavaScriptCore/dfg/DFGCSEPhase.cpp b/Source/JavaScriptCore/dfg/DFGCSEPhase.cpp
index 82e1b4609..020b1cfd2 100644
--- a/Source/JavaScriptCore/dfg/DFGCSEPhase.cpp
+++ b/Source/JavaScriptCore/dfg/DFGCSEPhase.cpp
@@ -58,19 +58,19 @@ private:
if (nodeIndex == NoNode)
return NoNode;
- if (m_graph[nodeIndex].op == ValueToInt32)
+ if (m_graph[nodeIndex].op() == ValueToInt32)
nodeIndex = m_graph[nodeIndex].child1().index();
return nodeIndex;
}
- NodeIndex canonicalize(NodeUse nodeUse)
+ NodeIndex canonicalize(Edge nodeUse)
{
return canonicalize(nodeUse.indexUnchecked());
}
unsigned endIndexForPureCSE()
{
- unsigned result = m_lastSeen[m_graph[m_compileIndex].op];
+ unsigned result = m_lastSeen[m_graph[m_compileIndex].op()];
if (result == UINT_MAX)
result = 0;
else
@@ -94,7 +94,7 @@ private:
break;
Node& otherNode = m_graph[index];
- if (node.op != otherNode.op)
+ if (node.op() != otherNode.op())
continue;
if (node.arithNodeFlags() != otherNode.arithNodeFlags())
@@ -139,7 +139,7 @@ private:
bool byValIsPure(Node& node)
{
return m_graph[node.child2()].shouldSpeculateInteger()
- && ((node.op == PutByVal || node.op == PutByValAlias)
+ && ((node.op() == PutByVal || node.op() == PutByValAlias)
? isActionableMutableArrayPrediction(m_graph[node.child1()].prediction())
: isActionableArrayPrediction(m_graph[node.child1()].prediction()));
}
@@ -147,11 +147,11 @@ private:
bool clobbersWorld(NodeIndex nodeIndex)
{
Node& node = m_graph[nodeIndex];
- if (node.flags & NodeClobbersWorld)
+ if (node.flags() & NodeClobbersWorld)
return true;
- if (!(node.flags & NodeMightClobber))
+ if (!(node.flags() & NodeMightClobber))
return false;
- switch (node.op) {
+ switch (node.op()) {
case ValueAdd:
case CompareLess:
case CompareLessEq:
@@ -181,7 +181,7 @@ private:
break;
Node& otherNode = m_graph[index];
- if (node.op == otherNode.op
+ if (node.op() == otherNode.op()
&& node.arithNodeFlags() == otherNode.arithNodeFlags()) {
NodeIndex otherChild = canonicalize(otherNode.child1());
if (otherChild == NoNode)
@@ -210,7 +210,7 @@ private:
for (unsigned i = m_indexInBlock; i--;) {
NodeIndex index = m_currentBlock->at(i);
Node& node = m_graph[index];
- switch (node.op) {
+ switch (node.op()) {
case GetGlobalVar:
if (node.varNumber() == varNumber && codeBlock()->globalObjectFor(node.codeOrigin) == globalObject)
return index;
@@ -236,7 +236,7 @@ private:
break;
Node& node = m_graph[index];
- switch (node.op) {
+ switch (node.op()) {
case GetByVal:
if (!byValIsPure(node))
return NoNode;
@@ -280,7 +280,7 @@ private:
break;
Node& node = m_graph[index];
- if (node.op == CheckFunction && node.child1() == child1 && node.function() == function)
+ if (node.op() == CheckFunction && node.child1() == child1 && node.function() == function)
return true;
}
return false;
@@ -294,7 +294,7 @@ private:
break;
Node& node = m_graph[index];
- switch (node.op) {
+ switch (node.op()) {
case CheckStructure:
if (node.child1() == child1
&& structureSet.isSupersetOf(node.structureSet()))
@@ -340,7 +340,7 @@ private:
break;
Node& node = m_graph[index];
- switch (node.op) {
+ switch (node.op()) {
case GetByOffset:
if (node.child1() == child1
&& m_graph.m_storageAccessData[node.storageAccessDataIndex()].identifierNumber == identifierNumber)
@@ -386,7 +386,7 @@ private:
break;
Node& node = m_graph[index];
- switch (node.op) {
+ switch (node.op()) {
case GetPropertyStorage:
if (node.child1() == child1)
return index;
@@ -425,7 +425,7 @@ private:
break;
Node& node = m_graph[index];
- switch (node.op) {
+ switch (node.op()) {
case GetIndexedPropertyStorage: {
PredictedType basePrediction = m_graph[node.child2()].prediction();
bool nodeHasIntegerIndexPrediction = !(!(basePrediction & PredictInt32) && basePrediction);
@@ -463,14 +463,14 @@ private:
for (unsigned i = endIndexForPureCSE(); i--;) {
NodeIndex index = m_currentBlock->at(i);
Node& node = m_graph[index];
- if (node.op == GetScopeChain
+ if (node.op() == GetScopeChain
&& node.scopeChainDepth() == depth)
return index;
}
return NoNode;
}
- void performSubstitution(NodeUse& child, bool addRef = true)
+ void performSubstitution(Edge& child, bool addRef = true)
{
// Check if this operand is actually unused.
if (!child)
@@ -529,7 +529,7 @@ private:
{
bool shouldGenerate = node.shouldGenerate();
- if (node.flags & NodeHasVarArgs) {
+ if (node.flags() & NodeHasVarArgs) {
for (unsigned childIdx = node.firstChild(); childIdx < node.firstChild() + node.numChildren(); childIdx++)
performSubstitution(m_graph.m_varArgChildren[childIdx], shouldGenerate);
} else {
@@ -542,7 +542,7 @@ private:
return;
#if DFG_ENABLE(DEBUG_PROPAGATION_VERBOSE)
- dataLog(" %s @%u: ", Graph::opName(static_cast<NodeType>(m_graph[m_compileIndex].op)), m_compileIndex);
+ dataLog(" %s @%u: ", Graph::opName(m_graph[m_compileIndex].op()), m_compileIndex);
#endif
// NOTE: there are some nodes that we deliberately don't CSE even though we
@@ -554,7 +554,7 @@ private:
// ToPrimitive, but we could change that with some speculations if we really
// needed to.
- switch (node.op) {
+ switch (node.op()) {
// Handle the pure nodes. These nodes never have any side-effects.
case BitAnd:
@@ -573,7 +573,6 @@ private:
case ArithMin:
case ArithMax:
case ArithSqrt:
- case GetByteArrayLength:
case GetInt8ArrayLength:
case GetInt16ArrayLength:
case GetInt32ArrayLength:
@@ -587,6 +586,14 @@ private:
case GetStringLength:
case StringCharAt:
case StringCharCodeAt:
+ case Int32ToDouble:
+ case IsUndefined:
+ case IsBoolean:
+ case IsNumber:
+ case IsString:
+ case IsObject:
+ case IsFunction:
+ case DoubleAsInt32:
setReplacement(pureCSE(node));
break;
@@ -636,7 +643,7 @@ private:
case PutByVal:
if (byValIsPure(node) && getByValLoadElimination(node.child1().index(), node.child2().index()) != NoNode)
- node.op = PutByValAlias;
+ node.setOp(PutByValAlias);
break;
case CheckStructure:
@@ -669,7 +676,7 @@ private:
break;
}
- m_lastSeen[node.op] = m_indexInBlock;
+ m_lastSeen[node.op()] = m_indexInBlock;
#if DFG_ENABLE(DEBUG_PROPAGATION_VERBOSE)
dataLog("\n");
#endif
diff --git a/Source/JavaScriptCore/dfg/DFGCapabilities.cpp b/Source/JavaScriptCore/dfg/DFGCapabilities.cpp
index a8dec067f..450a5d83e 100644
--- a/Source/JavaScriptCore/dfg/DFGCapabilities.cpp
+++ b/Source/JavaScriptCore/dfg/DFGCapabilities.cpp
@@ -27,12 +27,23 @@
#include "DFGCapabilities.h"
#include "CodeBlock.h"
+#include "DFGCommon.h"
#include "Interpreter.h"
namespace JSC { namespace DFG {
#if ENABLE(DFG_JIT)
+static inline void debugFail(CodeBlock* codeBlock, OpcodeID opcodeID)
+{
+#if DFG_ENABLE(DEBUG_VERBOSE)
+ dataLog("Cannot handle code block %p because of opcode %s.\n", codeBlock, opcodeNames[opcodeID]);
+#else
+ UNUSED_PARAM(codeBlock);
+ UNUSED_PARAM(opcodeID);
+#endif
+}
+
template<bool (*canHandleOpcode)(OpcodeID)>
bool canHandleOpcodes(CodeBlock* codeBlock)
{
@@ -42,11 +53,13 @@ bool canHandleOpcodes(CodeBlock* codeBlock)
for (unsigned bytecodeOffset = 0; bytecodeOffset < instructionCount; ) {
switch (interpreter->getOpcodeID(instructionsBegin[bytecodeOffset].u.opcode)) {
-#define DEFINE_OP(opcode, length) \
- case opcode: \
- if (!canHandleOpcode(opcode)) \
- return false; \
- bytecodeOffset += length; \
+#define DEFINE_OP(opcode, length) \
+ case opcode: \
+ if (!canHandleOpcode(opcode)) { \
+ debugFail(codeBlock, opcode); \
+ return false; \
+ } \
+ bytecodeOffset += length; \
break;
FOR_EACH_OPCODE_ID(DEFINE_OP)
#undef DEFINE_OP
@@ -61,6 +74,8 @@ bool canHandleOpcodes(CodeBlock* codeBlock)
bool canCompileOpcodes(CodeBlock* codeBlock)
{
+ if (!MacroAssembler::supportsFloatingPoint())
+ return false;
return canHandleOpcodes<canCompileOpcode>(codeBlock);
}
diff --git a/Source/JavaScriptCore/dfg/DFGCapabilities.h b/Source/JavaScriptCore/dfg/DFGCapabilities.h
index 6509dbc3d..b807979ba 100644
--- a/Source/JavaScriptCore/dfg/DFGCapabilities.h
+++ b/Source/JavaScriptCore/dfg/DFGCapabilities.h
@@ -96,6 +96,12 @@ inline bool canCompileOpcode(OpcodeID opcodeID)
case op_mov:
case op_check_has_instance:
case op_instanceof:
+ case op_is_undefined:
+ case op_is_boolean:
+ case op_is_number:
+ case op_is_string:
+ case op_is_object:
+ case op_is_function:
case op_not:
case op_less:
case op_lesseq:
diff --git a/Source/JavaScriptCore/dfg/DFGCommon.h b/Source/JavaScriptCore/dfg/DFGCommon.h
index 8ff1e5cdd..828bcb2a3 100644
--- a/Source/JavaScriptCore/dfg/DFGCommon.h
+++ b/Source/JavaScriptCore/dfg/DFGCommon.h
@@ -1,5 +1,5 @@
/*
- * Copyright (C) 2011 Apple Inc. All rights reserved.
+ * Copyright (C) 2011, 2012 Apple Inc. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
@@ -97,9 +97,32 @@ struct NodeIndexTraits {
enum UseKind {
UntypedUse,
+ DoubleUse,
LastUseKind // Must always be the last entry in the enum, as it is used to denote the number of enum elements.
};
+inline const char* useKindToString(UseKind useKind)
+{
+ switch (useKind) {
+ case UntypedUse:
+ return "";
+ case DoubleUse:
+ return "d";
+ default:
+ ASSERT_NOT_REACHED();
+ return 0;
+ }
+}
+
+inline bool isX86()
+{
+#if CPU(X86_64) || CPU(X86)
+ return true;
+#else
+ return false;
+#endif
+}
+
} } // namespace JSC::DFG
#endif // ENABLE(DFG_JIT)
diff --git a/Source/JavaScriptCore/dfg/DFGCorrectableJumpPoint.h b/Source/JavaScriptCore/dfg/DFGCorrectableJumpPoint.h
index 983f479c2..bfa149604 100644
--- a/Source/JavaScriptCore/dfg/DFGCorrectableJumpPoint.h
+++ b/Source/JavaScriptCore/dfg/DFGCorrectableJumpPoint.h
@@ -39,7 +39,7 @@ namespace JSC { namespace DFG {
// Thus it goes through three states:
//
// 1) Label of unpatchable branch or jump (i.e. MacroAssembler::Jump).
-// 2) Label of patchable jump (i.e. MacroAssembler::Jump).
+// 2) Label of patchable jump (i.e. MacroAssembler::PatchableJump).
// 3) Corrected post-linking label of patchable jump (i.e. CodeLocationJump).
//
// The setting of state (1) corresponds to planting the in-line unpatchable
@@ -66,7 +66,7 @@ public:
#endif
}
- void switchToLateJump(MacroAssembler::Jump check)
+ void switchToLateJump(MacroAssembler::PatchableJump check)
{
#ifndef NDEBUG
ASSERT(m_mode == InitialJump);
@@ -74,12 +74,12 @@ public:
#endif
// Late jumps should only ever be real jumps.
#if CPU(ARM_THUMB2)
- ASSERT(check.m_type == ARMv7Assembler::JumpNoConditionFixedSize);
- ASSERT(check.m_condition == ARMv7Assembler::ConditionInvalid);
+ ASSERT(check.m_jump.m_type == ARMv7Assembler::JumpNoConditionFixedSize);
+ ASSERT(check.m_jump.m_condition == ARMv7Assembler::ConditionInvalid);
m_type = ARMv7Assembler::JumpNoConditionFixedSize;
m_condition = ARMv7Assembler::ConditionInvalid;
#endif
- m_codeOffset = check.m_label.m_offset;
+ m_codeOffset = check.m_jump.m_label.m_offset;
}
void correctInitialJump(LinkBuffer& linkBuffer)
diff --git a/Source/JavaScriptCore/dfg/DFGDoubleFormatState.h b/Source/JavaScriptCore/dfg/DFGDoubleFormatState.h
new file mode 100644
index 000000000..2aa0f3d4d
--- /dev/null
+++ b/Source/JavaScriptCore/dfg/DFGDoubleFormatState.h
@@ -0,0 +1,96 @@
+/*
+ * Copyright (C) 2012 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef DFGDoubleFormatState_h
+#define DFGDoubleFormatState_h
+
+namespace JSC { namespace DFG {
+
+enum DoubleFormatState {
+ EmptyDoubleFormatState, // bottom
+ UsingDoubleFormat,
+ NotUsingDoubleFormat,
+ CantUseDoubleFormat // top
+};
+
+inline DoubleFormatState mergeDoubleFormatStates(DoubleFormatState a, DoubleFormatState b)
+{
+ switch (a) {
+ case EmptyDoubleFormatState:
+ return b;
+ case UsingDoubleFormat:
+ switch (b) {
+ case EmptyDoubleFormatState:
+ case UsingDoubleFormat:
+ return UsingDoubleFormat;
+ case NotUsingDoubleFormat:
+ case CantUseDoubleFormat:
+ return CantUseDoubleFormat;
+ }
+ case NotUsingDoubleFormat:
+ switch (b) {
+ case EmptyDoubleFormatState:
+ case NotUsingDoubleFormat:
+ return NotUsingDoubleFormat;
+ case UsingDoubleFormat:
+ case CantUseDoubleFormat:
+ return CantUseDoubleFormat;
+ }
+ case CantUseDoubleFormat:
+ return CantUseDoubleFormat;
+ }
+ ASSERT_NOT_REACHED();
+ return CantUseDoubleFormat;
+}
+
+inline bool mergeDoubleFormatState(DoubleFormatState& dest, DoubleFormatState src)
+{
+ DoubleFormatState newState = mergeDoubleFormatStates(dest, src);
+ if (newState == dest)
+ return false;
+ dest = newState;
+ return true;
+}
+
+inline const char* doubleFormatStateToString(DoubleFormatState state)
+{
+ switch (state) {
+ case EmptyDoubleFormatState:
+ return "Empty";
+ case UsingDoubleFormat:
+ return "DoubleFormat";
+ case NotUsingDoubleFormat:
+ return "ValueFormat";
+ case CantUseDoubleFormat:
+ return "ForceValue";
+ }
+ ASSERT_NOT_REACHED();
+ return 0;
+}
+
+} } // namespace JSC::DFG
+
+#endif // DFGDoubleFormatState_h
+
diff --git a/Source/JavaScriptCore/dfg/DFGDriver.cpp b/Source/JavaScriptCore/dfg/DFGDriver.cpp
index a0af3e6ad..205e94e6b 100644
--- a/Source/JavaScriptCore/dfg/DFGDriver.cpp
+++ b/Source/JavaScriptCore/dfg/DFGDriver.cpp
@@ -28,10 +28,10 @@
#if ENABLE(DFG_JIT)
-#include "DFGArithNodeFlagsInferencePhase.h"
#include "DFGByteCodeParser.h"
#include "DFGCFAPhase.h"
#include "DFGCSEPhase.h"
+#include "DFGFixupPhase.h"
#include "DFGJITCompiler.h"
#include "DFGPredictionPropagationPhase.h"
#include "DFGRedundantPhiEliminationPhase.h"
@@ -60,8 +60,8 @@ inline bool compile(CompileMode compileMode, JSGlobalData& globalData, CodeBlock
dfg.predictArgumentTypes();
performRedundantPhiElimination(dfg);
- performArithNodeFlagsInference(dfg);
performPredictionPropagation(dfg);
+ performFixup(dfg);
performCSE(dfg);
performVirtualRegisterAllocation(dfg);
performCFA(dfg);
diff --git a/Source/JavaScriptCore/dfg/DFGNodeUse.h b/Source/JavaScriptCore/dfg/DFGEdge.h
index 71154997c..7b4b5b8bf 100644
--- a/Source/JavaScriptCore/dfg/DFGNodeUse.h
+++ b/Source/JavaScriptCore/dfg/DFGEdge.h
@@ -23,8 +23,8 @@
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
-#ifndef DFGNodeUse_h
-#define DFGNodeUse_h
+#ifndef DFGEdge_h
+#define DFGEdge_h
#include <wtf/Platform.h>
@@ -34,21 +34,21 @@
namespace JSC { namespace DFG {
-class NodeReferenceBlob;
+class AdjacencyList;
-class NodeUse {
+class Edge {
public:
- NodeUse()
+ Edge()
: m_encodedWord(makeWord(NoNode, UntypedUse))
{
}
- explicit NodeUse(NodeIndex nodeIndex)
+ explicit Edge(NodeIndex nodeIndex)
: m_encodedWord(makeWord(nodeIndex, UntypedUse))
{
}
- NodeUse(NodeIndex nodeIndex, UseKind useKind)
+ Edge(NodeIndex nodeIndex, UseKind useKind)
: m_encodedWord(makeWord(nodeIndex, useKind))
{
}
@@ -80,17 +80,17 @@ public:
bool isSet() const { return indexUnchecked() != NoNode; }
bool operator!() const { return !isSet(); }
- bool operator==(NodeUse other) const
+ bool operator==(Edge other) const
{
return m_encodedWord == other.m_encodedWord;
}
- bool operator!=(NodeUse other) const
+ bool operator!=(Edge other) const
{
return m_encodedWord != other.m_encodedWord;
}
private:
- friend class NodeReferenceBlob;
+ friend class AdjacencyList;
static uint32_t shift() { return 4; }
@@ -105,19 +105,19 @@ private:
int32_t m_encodedWord;
};
-inline bool operator==(NodeUse nodeUse, NodeIndex nodeIndex)
+inline bool operator==(Edge nodeUse, NodeIndex nodeIndex)
{
return nodeUse.indexUnchecked() == nodeIndex;
}
-inline bool operator==(NodeIndex nodeIndex, NodeUse nodeUse)
+inline bool operator==(NodeIndex nodeIndex, Edge nodeUse)
{
return nodeUse.indexUnchecked() == nodeIndex;
}
-inline bool operator!=(NodeUse nodeUse, NodeIndex nodeIndex)
+inline bool operator!=(Edge nodeUse, NodeIndex nodeIndex)
{
return nodeUse.indexUnchecked() != nodeIndex;
}
-inline bool operator!=(NodeIndex nodeIndex, NodeUse nodeUse)
+inline bool operator!=(NodeIndex nodeIndex, Edge nodeUse)
{
return nodeUse.indexUnchecked() != nodeIndex;
}
@@ -126,5 +126,5 @@ inline bool operator!=(NodeIndex nodeIndex, NodeUse nodeUse)
#endif // ENABLE(DFG_JIT)
-#endif // DFGNodeUse_h
+#endif // DFGEdge_h
diff --git a/Source/JavaScriptCore/dfg/DFGFixupPhase.cpp b/Source/JavaScriptCore/dfg/DFGFixupPhase.cpp
new file mode 100644
index 000000000..242fdf852
--- /dev/null
+++ b/Source/JavaScriptCore/dfg/DFGFixupPhase.cpp
@@ -0,0 +1,394 @@
+/*
+ * Copyright (C) 2012 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "config.h"
+#include "DFGFixupPhase.h"
+
+#if ENABLE(DFG_JIT)
+
+#include "DFGGraph.h"
+#include "DFGInsertionSet.h"
+#include "DFGPhase.h"
+
+namespace JSC { namespace DFG {
+
+class FixupPhase : public Phase {
+public:
+ FixupPhase(Graph& graph)
+ : Phase(graph, "fixup")
+ {
+ }
+
+ void run()
+ {
+ for (BlockIndex blockIndex = 0; blockIndex < m_graph.m_blocks.size(); ++blockIndex)
+ fixupBlock(m_graph.m_blocks[blockIndex].get());
+ }
+
+private:
+ void fixupBlock(BasicBlock* block)
+ {
+ for (m_indexInBlock = 0; m_indexInBlock < block->size(); ++m_indexInBlock) {
+ m_compileIndex = block->at(m_indexInBlock);
+ fixupNode(m_graph[m_compileIndex]);
+ }
+ m_insertionSet.execute(*block);
+ }
+
+ void fixupNode(Node& node)
+ {
+ if (!node.shouldGenerate())
+ return;
+
+ NodeType op = node.op();
+
+#if DFG_ENABLE(DEBUG_PROPAGATION_VERBOSE)
+ dataLog(" %s @%u: ", Graph::opName(op), m_compileIndex);
+#endif
+
+ switch (op) {
+ case GetById: {
+ if (!isInt32Prediction(m_graph[m_compileIndex].prediction()))
+ break;
+ if (codeBlock()->identifier(node.identifierNumber()) != globalData().propertyNames->length)
+ break;
+ bool isArray = isArrayPrediction(m_graph[node.child1()].prediction());
+ bool isString = isStringPrediction(m_graph[node.child1()].prediction());
+ bool isInt8Array = m_graph[node.child1()].shouldSpeculateInt8Array();
+ bool isInt16Array = m_graph[node.child1()].shouldSpeculateInt16Array();
+ bool isInt32Array = m_graph[node.child1()].shouldSpeculateInt32Array();
+ bool isUint8Array = m_graph[node.child1()].shouldSpeculateUint8Array();
+ bool isUint8ClampedArray = m_graph[node.child1()].shouldSpeculateUint8ClampedArray();
+ bool isUint16Array = m_graph[node.child1()].shouldSpeculateUint16Array();
+ bool isUint32Array = m_graph[node.child1()].shouldSpeculateUint32Array();
+ bool isFloat32Array = m_graph[node.child1()].shouldSpeculateFloat32Array();
+ bool isFloat64Array = m_graph[node.child1()].shouldSpeculateFloat64Array();
+ if (!isArray && !isString && !isInt8Array && !isInt16Array && !isInt32Array && !isUint8Array && !isUint8ClampedArray && !isUint16Array && !isUint32Array && !isFloat32Array && !isFloat64Array)
+ break;
+
+#if DFG_ENABLE(DEBUG_PROPAGATION_VERBOSE)
+ dataLog(" @%u -> %s", m_compileIndex, isArray ? "GetArrayLength" : "GetStringLength");
+#endif
+ if (isArray)
+ node.setOp(GetArrayLength);
+ else if (isString)
+ node.setOp(GetStringLength);
+ else if (isInt8Array)
+ node.setOp(GetInt8ArrayLength);
+ else if (isInt16Array)
+ node.setOp(GetInt16ArrayLength);
+ else if (isInt32Array)
+ node.setOp(GetInt32ArrayLength);
+ else if (isUint8Array)
+ node.setOp(GetUint8ArrayLength);
+ else if (isUint8ClampedArray)
+ node.setOp(GetUint8ClampedArrayLength);
+ else if (isUint16Array)
+ node.setOp(GetUint16ArrayLength);
+ else if (isUint32Array)
+ node.setOp(GetUint32ArrayLength);
+ else if (isFloat32Array)
+ node.setOp(GetFloat32ArrayLength);
+ else if (isFloat64Array)
+ node.setOp(GetFloat64ArrayLength);
+ else
+ ASSERT_NOT_REACHED();
+ // No longer MustGenerate
+ ASSERT(node.flags() & NodeMustGenerate);
+ node.clearFlags(NodeMustGenerate);
+ m_graph.deref(m_compileIndex);
+ break;
+ }
+ case GetIndexedPropertyStorage: {
+ PredictedType basePrediction = m_graph[node.child2()].prediction();
+ if (!(basePrediction & PredictInt32) && basePrediction) {
+ node.setOpAndDefaultFlags(Nop);
+ m_graph.clearAndDerefChild1(node);
+ m_graph.clearAndDerefChild2(node);
+ m_graph.clearAndDerefChild3(node);
+ node.setRefCount(0);
+ }
+ break;
+ }
+ case GetByVal:
+ case StringCharAt:
+ case StringCharCodeAt: {
+ if (!!node.child3() && m_graph[node.child3()].op() == Nop)
+ node.children.child3() = Edge();
+ break;
+ }
+
+ case ValueToInt32: {
+ if (m_graph[node.child1()].shouldSpeculateNumber()) {
+ node.clearFlags(NodeMustGenerate);
+ m_graph.deref(m_compileIndex);
+ }
+ break;
+ }
+
+ case BitAnd:
+ case BitOr:
+ case BitXor:
+ case BitRShift:
+ case BitLShift:
+ case BitURShift: {
+ fixIntEdge(node.children.child1());
+ fixIntEdge(node.children.child2());
+ break;
+ }
+
+ case CompareEq:
+ case CompareLess:
+ case CompareLessEq:
+ case CompareGreater:
+ case CompareGreaterEq:
+ case CompareStrictEq: {
+ if (Node::shouldSpeculateInteger(m_graph[node.child1()], m_graph[node.child2()]))
+ break;
+ if (!Node::shouldSpeculateNumber(m_graph[node.child1()], m_graph[node.child2()]))
+ break;
+ fixDoubleEdge(0);
+ fixDoubleEdge(1);
+ break;
+ }
+
+ case LogicalNot: {
+ if (m_graph[node.child1()].shouldSpeculateInteger())
+ break;
+ if (!m_graph[node.child1()].shouldSpeculateNumber())
+ break;
+ fixDoubleEdge(0);
+ break;
+ }
+
+ case Branch: {
+ if (!m_graph[node.child1()].shouldSpeculateInteger()
+ && m_graph[node.child1()].shouldSpeculateNumber())
+ fixDoubleEdge(0);
+
+ Node& myNode = m_graph[m_compileIndex]; // reload because the graph may have changed
+ Edge logicalNotEdge = myNode.child1();
+ Node& logicalNot = m_graph[logicalNotEdge];
+ if (logicalNot.op() == LogicalNot
+ && logicalNot.adjustedRefCount() == 1) {
+ Edge newChildEdge = logicalNot.child1();
+ if (m_graph[newChildEdge].hasBooleanResult()) {
+ m_graph.ref(newChildEdge);
+ m_graph.deref(logicalNotEdge);
+ myNode.children.setChild1(newChildEdge);
+
+ BlockIndex toBeTaken = myNode.notTakenBlockIndex();
+ BlockIndex toBeNotTaken = myNode.takenBlockIndex();
+ myNode.setTakenBlockIndex(toBeTaken);
+ myNode.setNotTakenBlockIndex(toBeNotTaken);
+ }
+ }
+ break;
+ }
+
+ case SetLocal: {
+ if (m_graph.isCaptured(node.local()))
+ break;
+ if (!node.variableAccessData()->shouldUseDoubleFormat())
+ break;
+ fixDoubleEdge(0);
+ break;
+ }
+
+ case ArithAdd:
+ case ValueAdd: {
+ if (m_graph.addShouldSpeculateInteger(node))
+ break;
+ if (!Node::shouldSpeculateNumber(m_graph[node.child1()], m_graph[node.child2()]))
+ break;
+ fixDoubleEdge(0);
+ fixDoubleEdge(1);
+ break;
+ }
+
+ case ArithSub: {
+ if (m_graph.addShouldSpeculateInteger(node)
+ && node.canSpeculateInteger())
+ break;
+ fixDoubleEdge(0);
+ fixDoubleEdge(1);
+ break;
+ }
+
+ case ArithNegate: {
+ if (m_graph.negateShouldSpeculateInteger(node))
+ break;
+ fixDoubleEdge(0);
+ break;
+ }
+
+ case ArithMin:
+ case ArithMax:
+ case ArithMul:
+ case ArithMod: {
+ if (Node::shouldSpeculateInteger(m_graph[node.child1()], m_graph[node.child2()])
+ && node.canSpeculateInteger())
+ break;
+ fixDoubleEdge(0);
+ fixDoubleEdge(1);
+ break;
+ }
+
+ case ArithDiv: {
+ if (Node::shouldSpeculateInteger(m_graph[node.child1()], m_graph[node.child2()])
+ && node.canSpeculateInteger()) {
+ if (isX86())
+ break;
+ fixDoubleEdge(0);
+ fixDoubleEdge(1);
+
+ Node& oldDivision = m_graph[m_compileIndex];
+
+ Node newDivision = oldDivision;
+ newDivision.setRefCount(2);
+ newDivision.predict(PredictDouble);
+ NodeIndex newDivisionIndex = m_graph.size();
+
+ oldDivision.setOp(DoubleAsInt32);
+ oldDivision.children.initialize(Edge(newDivisionIndex, DoubleUse), Edge(), Edge());
+
+ m_graph.append(newDivision);
+ m_insertionSet.append(m_indexInBlock, newDivisionIndex);
+
+ break;
+ }
+ fixDoubleEdge(0);
+ fixDoubleEdge(1);
+ break;
+ }
+
+ case ArithAbs: {
+ if (m_graph[node.child1()].shouldSpeculateInteger()
+ && node.canSpeculateInteger())
+ break;
+ fixDoubleEdge(0);
+ break;
+ }
+
+ case ArithSqrt: {
+ fixDoubleEdge(0);
+ break;
+ }
+
+ case PutByVal: {
+ if (!m_graph[node.child1()].prediction() || !m_graph[node.child2()].prediction())
+ break;
+ if (!m_graph[node.child2()].shouldSpeculateInteger())
+ break;
+ if (isActionableIntMutableArrayPrediction(m_graph[node.child1()].prediction())) {
+ if (m_graph[node.child3()].isConstant())
+ break;
+ if (m_graph[node.child3()].shouldSpeculateInteger())
+ break;
+ fixDoubleEdge(2);
+ break;
+ }
+ if (isActionableFloatMutableArrayPrediction(m_graph[node.child1()].prediction())) {
+ fixDoubleEdge(2);
+ break;
+ }
+ break;
+ }
+
+ default:
+ break;
+ }
+
+#if DFG_ENABLE(DEBUG_PROPAGATION_VERBOSE)
+ if (!(node.flags() & NodeHasVarArgs)) {
+ dataLog("new children: ");
+ node.dumpChildren(WTF::dataFile());
+ }
+ dataLog("\n");
+#endif
+ }
+
+ void fixIntEdge(Edge& edge)
+ {
+ Node& node = m_graph[edge];
+ if (node.op() != ValueToInt32)
+ return;
+
+ if (!m_graph[node.child1()].shouldSpeculateInteger())
+ return;
+
+ Edge oldEdge = edge;
+ Edge newEdge = node.child1();
+
+ m_graph.ref(newEdge);
+ m_graph.deref(oldEdge);
+
+ edge = newEdge;
+ }
+
+ void fixDoubleEdge(unsigned childIndex)
+ {
+ Node& source = m_graph[m_compileIndex];
+ Edge& edge = source.children.child(childIndex);
+
+ if (!m_graph[edge].shouldSpeculateInteger()) {
+ edge.setUseKind(DoubleUse);
+ return;
+ }
+
+ NodeIndex resultIndex = (NodeIndex)m_graph.size();
+
+#if DFG_ENABLE(DEBUG_PROPAGATION_VERBOSE)
+ dataLog("(replacing @%u->@%u with @%u->@%u) ",
+ m_compileIndex, edge.index(), m_compileIndex, resultIndex);
+#endif
+
+ // Fix the edge up here because it's a reference that will be clobbered by
+ // the append() below.
+ NodeIndex oldIndex = edge.index();
+ edge = Edge(resultIndex, DoubleUse);
+
+ m_graph.append(Node(Int32ToDouble, source.codeOrigin, oldIndex));
+ m_insertionSet.append(m_indexInBlock, resultIndex);
+
+ Node& int32ToDouble = m_graph[resultIndex];
+ int32ToDouble.predict(PredictDouble);
+ int32ToDouble.ref();
+ }
+
+ unsigned m_indexInBlock;
+ NodeIndex m_compileIndex;
+ InsertionSet<NodeIndex> m_insertionSet;
+};
+
+void performFixup(Graph& graph)
+{
+ runPhase<FixupPhase>(graph);
+}
+
+} } // namespace JSC::DFG
+
+#endif // ENABLE(DFG_JIT)
+
diff --git a/Source/JavaScriptCore/dfg/DFGArithNodeFlagsInferencePhase.h b/Source/JavaScriptCore/dfg/DFGFixupPhase.h
index 64546e253..1ba85ebfe 100644
--- a/Source/JavaScriptCore/dfg/DFGArithNodeFlagsInferencePhase.h
+++ b/Source/JavaScriptCore/dfg/DFGFixupPhase.h
@@ -1,5 +1,5 @@
/*
- * Copyright (C) 2011 Apple Inc. All rights reserved.
+ * Copyright (C) 2012 Apple Inc. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
@@ -23,8 +23,8 @@
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
-#ifndef DFGArithNodeFlagsInferencePhase_h
-#define DFGArithNodeFlagsInferencePhase_h
+#ifndef DFGFixupPhase_h
+#define DFGFixupPhase_h
#include <wtf/Platform.h>
@@ -34,18 +34,13 @@ namespace JSC { namespace DFG {
class Graph;
-// Determine which arithmetic nodes' results are only used in a context that
-// truncates to integer anyway. This is great for optimizing away checks for
-// overflow and negative zero. NB the way this phase integrates into the rest
-// of the DFG makes it non-optional. Instead of proving that a node is only
-// used in integer context, it actually does the opposite: finds nodes that
-// are used in non-integer contexts. Hence failing to run this phase will make
-// the compiler assume that all nodes are just used as integers!
+// Fix portions of the graph that are inefficient given the predictions that
+// we have. This should run after prediction propagation but before CSE.
-void performArithNodeFlagsInference(Graph&);
+void performFixup(Graph&);
} } // namespace JSC::DFG::Phase
#endif // ENABLE(DFG_JIT)
-#endif // DFGArithNodeFlagsInferencePhase_h
+#endif // DFGFixupPhase_h
diff --git a/Source/JavaScriptCore/dfg/DFGGPRInfo.h b/Source/JavaScriptCore/dfg/DFGGPRInfo.h
index f010d8c18..4a250328f 100644
--- a/Source/JavaScriptCore/dfg/DFGGPRInfo.h
+++ b/Source/JavaScriptCore/dfg/DFGGPRInfo.h
@@ -386,18 +386,17 @@ private:
class GPRInfo {
public:
typedef GPRReg RegisterType;
- static const unsigned numberOfRegisters = 9;
+ static const unsigned numberOfRegisters = 8;
// Temporary registers.
static const GPRReg regT0 = ARMRegisters::r0;
static const GPRReg regT1 = ARMRegisters::r1;
static const GPRReg regT2 = ARMRegisters::r2;
static const GPRReg regT3 = ARMRegisters::r4;
- static const GPRReg regT4 = ARMRegisters::r7;
- static const GPRReg regT5 = ARMRegisters::r8;
- static const GPRReg regT6 = ARMRegisters::r9;
- static const GPRReg regT7 = ARMRegisters::r10;
- static const GPRReg regT8 = ARMRegisters::r11;
+ static const GPRReg regT4 = ARMRegisters::r8;
+ static const GPRReg regT5 = ARMRegisters::r9;
+ static const GPRReg regT6 = ARMRegisters::r10;
+ static const GPRReg regT7 = ARMRegisters::r11;
// These registers match the baseline JIT.
static const GPRReg cachedResultRegister = regT0;
static const GPRReg cachedResultRegister2 = regT1;
@@ -418,7 +417,7 @@ public:
static GPRReg toRegister(unsigned index)
{
ASSERT(index < numberOfRegisters);
- static const GPRReg registerForIndex[numberOfRegisters] = { regT0, regT1, regT2, regT3, regT4, regT5, regT6, regT7, regT8 };
+ static const GPRReg registerForIndex[numberOfRegisters] = { regT0, regT1, regT2, regT3, regT4, regT5, regT6, regT7 };
return registerForIndex[index];
}
@@ -426,7 +425,7 @@ public:
{
ASSERT(reg != InvalidGPRReg);
ASSERT(reg < 16);
- static const unsigned indexForRegister[16] = { 0, 1, 2, InvalidIndex, 3, InvalidIndex, InvalidIndex, 4, 5, 6, 7, 8, InvalidIndex, InvalidIndex, InvalidIndex, InvalidIndex };
+ static const unsigned indexForRegister[16] = { 0, 1, 2, InvalidIndex, 3, InvalidIndex, InvalidIndex, InvalidIndex, 4, 5, 6, 7, InvalidIndex, InvalidIndex, InvalidIndex, InvalidIndex };
unsigned result = indexForRegister[reg];
ASSERT(result != InvalidIndex);
return result;
diff --git a/Source/JavaScriptCore/dfg/DFGGenerationInfo.h b/Source/JavaScriptCore/dfg/DFGGenerationInfo.h
index 6f0fe3143..125a5a4f9 100644
--- a/Source/JavaScriptCore/dfg/DFGGenerationInfo.h
+++ b/Source/JavaScriptCore/dfg/DFGGenerationInfo.h
@@ -1,3 +1,4 @@
+
/*
* Copyright (C) 2011 Apple Inc. All rights reserved.
*
diff --git a/Source/JavaScriptCore/dfg/DFGGraph.cpp b/Source/JavaScriptCore/dfg/DFGGraph.cpp
index 900251e10..3c99e5d4e 100644
--- a/Source/JavaScriptCore/dfg/DFGGraph.cpp
+++ b/Source/JavaScriptCore/dfg/DFGGraph.cpp
@@ -27,6 +27,7 @@
#include "DFGGraph.h"
#include "CodeBlock.h"
+#include <wtf/BoundsCheckedPointer.h>
#if ENABLE(DFG_JIT)
@@ -83,13 +84,13 @@ static void printWhiteSpace(unsigned amount)
dataLog(" ");
}
-void Graph::dumpCodeOrigin(NodeIndex nodeIndex)
+void Graph::dumpCodeOrigin(NodeIndex prevNodeIndex, NodeIndex nodeIndex)
{
- if (!nodeIndex)
+ if (prevNodeIndex == NoNode)
return;
Node& currentNode = at(nodeIndex);
- Node& previousNode = at(nodeIndex - 1);
+ Node& previousNode = at(prevNodeIndex);
if (previousNode.codeOrigin.inlineCallFrame == currentNode.codeOrigin.inlineCallFrame)
return;
@@ -120,7 +121,7 @@ void Graph::dumpCodeOrigin(NodeIndex nodeIndex)
void Graph::dump(NodeIndex nodeIndex)
{
Node& node = at(nodeIndex);
- NodeType op = static_cast<NodeType>(node.op);
+ NodeType op = node.op();
unsigned refCount = node.refCount();
bool skipped = !refCount;
@@ -130,7 +131,6 @@ void Graph::dump(NodeIndex nodeIndex)
--refCount;
}
- dumpCodeOrigin(nodeIndex);
printWhiteSpace((node.codeOrigin.inlineDepth() - 1) * 2);
// Example/explanation of dataflow dump output
@@ -157,26 +157,41 @@ void Graph::dump(NodeIndex nodeIndex)
dataLog("-");
dataLog(">\t%s(", opName(op));
bool hasPrinted = false;
- if (node.flags & NodeHasVarArgs) {
+ if (node.flags() & NodeHasVarArgs) {
for (unsigned childIdx = node.firstChild(); childIdx < node.firstChild() + node.numChildren(); childIdx++) {
if (hasPrinted)
dataLog(", ");
else
hasPrinted = true;
- dataLog("@%u", m_varArgChildren[childIdx].index());
+ dataLog("%s@%u%s",
+ useKindToString(m_varArgChildren[childIdx].useKind()),
+ m_varArgChildren[childIdx].index(),
+ predictionToAbbreviatedString(at(childIdx).prediction()));
}
} else {
- if (!!node.child1())
- dataLog("@%u", node.child1().index());
- if (!!node.child2())
- dataLog(", @%u", node.child2().index());
- if (!!node.child3())
- dataLog(", @%u", node.child3().index());
+ if (!!node.child1()) {
+ dataLog("%s@%u%s",
+ useKindToString(node.child1().useKind()),
+ node.child1().index(),
+ predictionToAbbreviatedString(at(node.child1()).prediction()));
+ }
+ if (!!node.child2()) {
+ dataLog(", %s@%u%s",
+ useKindToString(node.child2().useKind()),
+ node.child2().index(),
+ predictionToAbbreviatedString(at(node.child2()).prediction()));
+ }
+ if (!!node.child3()) {
+ dataLog(", %s@%u%s",
+ useKindToString(node.child3().useKind()),
+ node.child3().index(),
+ predictionToAbbreviatedString(at(node.child3()).prediction()));
+ }
hasPrinted = !!node.child1();
}
- if (node.arithNodeFlags()) {
- dataLog("%s%s", hasPrinted ? ", " : "", arithNodeFlagsAsString(node.arithNodeFlags()));
+ if (node.flags()) {
+ dataLog("%s%s", hasPrinted ? ", " : "", nodeFlagsAsString(node.flags()));
hasPrinted = true;
}
if (node.hasVarNumber()) {
@@ -253,8 +268,6 @@ void Graph::dump(NodeIndex nodeIndex)
dataLog(" predicting %s, double ratio %lf%s", predictionToString(node.variableAccessData()->prediction()), node.variableAccessData()->doubleVoteRatio(), node.variableAccessData()->shouldUseDoubleFormat() ? ", forcing double" : "");
else if (node.hasHeapPrediction())
dataLog(" predicting %s", predictionToString(node.getHeapPrediction()));
- else if (node.hasVarNumber())
- dataLog(" predicting %s", predictionToString(getGlobalVarPrediction(node.varNumber())));
}
dataLog("\n");
@@ -262,6 +275,7 @@ void Graph::dump(NodeIndex nodeIndex)
void Graph::dump()
{
+ NodeIndex lastNodeIndex = NoNode;
for (size_t b = 0; b < m_blocks.size(); ++b) {
BasicBlock* block = m_blocks[b].get();
dataLog("Block #%u (bc#%u): %s%s\n", (int)b, block->bytecodeBegin, block->isReachable ? "" : " (skipped)", block->isOSRTarget ? " (OSR target)" : "");
@@ -280,8 +294,11 @@ void Graph::dump()
dataLog(" var links: ");
dumpOperands(block->variablesAtHead, WTF::dataFile());
dataLog("\n");
- for (size_t i = 0; i < block->size(); ++i)
+ for (size_t i = 0; i < block->size(); ++i) {
+ dumpCodeOrigin(lastNodeIndex, block->at(i));
dump(block->at(i));
+ lastNodeIndex = block->at(i);
+ }
dataLog(" vars after: ");
if (block->cfaHasVisited)
dumpOperands(block->valuesAtTail, WTF::dataFile());
@@ -294,7 +311,7 @@ void Graph::dump()
// FIXME: Convert this to be iterative, not recursive.
#define DO_TO_CHILDREN(node, thingToDo) do { \
Node& _node = (node); \
- if (_node.flags & NodeHasVarArgs) { \
+ if (_node.flags() & NodeHasVarArgs) { \
for (unsigned _childIdx = _node.firstChild(); \
_childIdx < _node.firstChild() + _node.numChildren(); \
_childIdx++) \
diff --git a/Source/JavaScriptCore/dfg/DFGGraph.h b/Source/JavaScriptCore/dfg/DFGGraph.h
index bacbac827..0c8ac2dcf 100644
--- a/Source/JavaScriptCore/dfg/DFGGraph.h
+++ b/Source/JavaScriptCore/dfg/DFGGraph.h
@@ -29,11 +29,11 @@
#if ENABLE(DFG_JIT)
#include "CodeBlock.h"
+#include "DFGArgumentPosition.h"
#include "DFGAssemblyHelpers.h"
#include "DFGBasicBlock.h"
#include "DFGNode.h"
#include "MethodOfGettingAValueProfile.h"
-#include "PredictionTracker.h"
#include "RegisterFile.h"
#include <wtf/BitVector.h>
#include <wtf/HashMap.h>
@@ -84,11 +84,11 @@ public:
using Vector<Node, 64>::operator[];
using Vector<Node, 64>::at;
- Node& operator[](NodeUse nodeUse) { return at(nodeUse.index()); }
- const Node& operator[](NodeUse nodeUse) const { return at(nodeUse.index()); }
+ Node& operator[](Edge nodeUse) { return at(nodeUse.index()); }
+ const Node& operator[](Edge nodeUse) const { return at(nodeUse.index()); }
- Node& at(NodeUse nodeUse) { return at(nodeUse.index()); }
- const Node& at(NodeUse nodeUse) const { return at(nodeUse.index()); }
+ Node& at(Edge nodeUse) { return at(nodeUse.index()); }
+ const Node& at(Edge nodeUse) const { return at(nodeUse.index()); }
// Mark a node as being referenced.
void ref(NodeIndex nodeIndex)
@@ -98,7 +98,7 @@ public:
if (node.ref())
refChildren(nodeIndex);
}
- void ref(NodeUse nodeUse)
+ void ref(Edge nodeUse)
{
ref(nodeUse.index());
}
@@ -108,7 +108,7 @@ public:
if (at(nodeIndex).deref())
derefChildren(nodeIndex);
}
- void deref(NodeUse nodeUse)
+ void deref(Edge nodeUse)
{
deref(nodeUse.index());
}
@@ -118,7 +118,7 @@ public:
if (!node.child1())
return;
deref(node.child1());
- node.children.child1() = NodeUse();
+ node.children.child1() = Edge();
}
void clearAndDerefChild2(Node& node)
@@ -126,7 +126,7 @@ public:
if (!node.child2())
return;
deref(node.child2());
- node.children.child2() = NodeUse();
+ node.children.child2() = Edge();
}
void clearAndDerefChild3(Node& node)
@@ -134,7 +134,7 @@ public:
if (!node.child3())
return;
deref(node.child3());
- node.children.child3() = NodeUse();
+ node.children.child3() = Edge();
}
// CodeBlock is optional, but may allow additional information to be dumped (e.g. Identifier names).
@@ -143,20 +143,10 @@ public:
// Dump the code origin of the given node as a diff from the code origin of the
// preceding node.
- void dumpCodeOrigin(NodeIndex);
+ void dumpCodeOrigin(NodeIndex, NodeIndex);
BlockIndex blockIndexForBytecodeOffset(Vector<BlockIndex>& blocks, unsigned bytecodeBegin);
- bool predictGlobalVar(unsigned varNumber, PredictedType prediction)
- {
- return m_predictions.predictGlobalVar(varNumber, prediction);
- }
-
- PredictedType getGlobalVarPrediction(unsigned varNumber)
- {
- return m_predictions.getGlobalVarPrediction(varNumber);
- }
-
PredictedType getJSConstantPrediction(Node& node)
{
return predictionFromValue(node.valueOfJSConstant(m_codeBlock));
@@ -164,7 +154,7 @@ public:
bool addShouldSpeculateInteger(Node& add)
{
- ASSERT(add.op == ValueAdd || add.op == ArithAdd || add.op == ArithSub);
+ ASSERT(add.op() == ValueAdd || add.op() == ArithAdd || add.op() == ArithSub);
Node& left = at(add.child1());
Node& right = at(add.child2());
@@ -179,7 +169,7 @@ public:
bool negateShouldSpeculateInteger(Node& negate)
{
- ASSERT(negate.op == ArithNegate);
+ ASSERT(negate.op() == ArithNegate);
return at(negate.child1()).shouldSpeculateInteger() && negate.canSpeculateInteger();
}
@@ -242,7 +232,7 @@ public:
{
JSCell* function = getJSFunction(valueOfJSConstant(nodeIndex));
ASSERT(function);
- return asFunction(function);
+ return jsCast<JSFunction*>(function);
}
static const char *opName(NodeType);
@@ -301,7 +291,7 @@ public:
Node& node = at(nodeIndex);
CodeBlock* profiledBlock = baselineCodeBlockFor(node.codeOrigin);
- if (node.op == GetLocal) {
+ if (node.op() == GetLocal) {
return MethodOfGettingAValueProfile::fromLazyOperand(
profiledBlock,
LazyOperandValueProfileKey(
@@ -351,11 +341,12 @@ public:
CodeBlock* m_profiledBlock;
Vector< OwnPtr<BasicBlock> , 8> m_blocks;
- Vector<NodeUse, 16> m_varArgChildren;
+ Vector<Edge, 16> m_varArgChildren;
Vector<StorageAccessData> m_storageAccessData;
Vector<ResolveGlobalData> m_resolveGlobalData;
Vector<NodeIndex, 8> m_arguments;
SegmentedVector<VariableAccessData, 16> m_variableAccessData;
+ SegmentedVector<ArgumentPosition, 8> m_argumentPositions;
SegmentedVector<StructureSet, 16> m_structureSet;
SegmentedVector<StructureTransitionData, 8> m_structureTransitionData;
BitVector m_preservedVars;
@@ -388,8 +379,6 @@ private:
// When a node's refCount goes from 0 to 1, it must (logically) recursively ref all of its children, and vice versa.
void refChildren(NodeIndex);
void derefChildren(NodeIndex);
-
- PredictionTracker m_predictions;
};
class GetBytecodeBeginForBlock {
diff --git a/Source/JavaScriptCore/dfg/DFGInsertionSet.h b/Source/JavaScriptCore/dfg/DFGInsertionSet.h
new file mode 100644
index 000000000..82a6a6fa4
--- /dev/null
+++ b/Source/JavaScriptCore/dfg/DFGInsertionSet.h
@@ -0,0 +1,98 @@
+/*
+ * Copyright (C) 2012 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef DFGInsertionSet_h
+#define DFGInsectionSet_h
+
+#include <wtf/Platform.h>
+
+#if ENABLE(DFG_JIT)
+
+#include <wtf/Vector.h>
+
+namespace JSC { namespace DFG {
+
+template<typename ElementType>
+class Insertion {
+public:
+ Insertion() { }
+
+ Insertion(size_t index, const ElementType& element)
+ : m_index(index)
+ , m_element(element)
+ {
+ }
+
+ size_t index() const { return m_index; }
+ const ElementType& element() const { return m_element; }
+private:
+ size_t m_index;
+ ElementType m_element;
+};
+
+template<typename ElementType>
+class InsertionSet {
+public:
+ InsertionSet() { }
+
+ void append(const Insertion<ElementType>& insertion)
+ {
+ ASSERT(!m_insertions.size() || m_insertions.last().index() <= insertion.index());
+ m_insertions.append(insertion);
+ }
+
+ void append(size_t index, const ElementType& element)
+ {
+ append(Insertion<ElementType>(index, element));
+ }
+
+ template<typename CollectionType>
+ void execute(CollectionType& collection)
+ {
+ if (!m_insertions.size())
+ return;
+ collection.grow(collection.size() + m_insertions.size());
+ size_t lastIndex = collection.size();
+ for (size_t indexInInsertions = m_insertions.size(); indexInInsertions--;) {
+ Insertion<ElementType>& insertion = m_insertions[indexInInsertions];
+ size_t firstIndex = insertion.index() + indexInInsertions;
+ size_t indexOffset = indexInInsertions + 1;
+ for (size_t i = lastIndex; i-- > firstIndex;)
+ collection[i] = collection[i - indexOffset];
+ collection[firstIndex] = insertion.element();
+ lastIndex = firstIndex;
+ }
+ m_insertions.resize(0);
+ }
+private:
+ Vector<Insertion<ElementType>, 8> m_insertions;
+};
+
+} } // namespace JSC::DFG
+
+#endif // ENABLE(DFG_JIT)
+
+#endif // DFGInsertionSet_h
+
diff --git a/Source/JavaScriptCore/dfg/DFGJITCompiler.cpp b/Source/JavaScriptCore/dfg/DFGJITCompiler.cpp
index af98f8d7a..56e0d4e18 100644
--- a/Source/JavaScriptCore/dfg/DFGJITCompiler.cpp
+++ b/Source/JavaScriptCore/dfg/DFGJITCompiler.cpp
@@ -44,10 +44,9 @@ void JITCompiler::linkOSRExits()
for (unsigned i = 0; i < codeBlock()->numberOfOSRExits(); ++i) {
OSRExit& exit = codeBlock()->osrExit(i);
exit.m_check.initialJump().link(this);
+ jitAssertHasValidCallFrame();
store32(TrustedImm32(i), &globalData()->osrExitIndex);
- beginUninterruptedSequence();
- exit.m_check.switchToLateJump(jump());
- endUninterruptedSequence();
+ exit.m_check.switchToLateJump(patchableJump());
}
}
@@ -152,25 +151,25 @@ void JITCompiler::link(LinkBuffer& linkBuffer)
CodeLocationCall callReturnLocation = linkBuffer.locationOf(m_propertyAccesses[i].m_functionCall);
info.codeOrigin = m_propertyAccesses[i].m_codeOrigin;
info.callReturnLocation = callReturnLocation;
- info.deltaCheckImmToCall = differenceBetweenCodePtr(linkBuffer.locationOf(m_propertyAccesses[i].m_deltaCheckImmToCall), callReturnLocation);
- info.deltaCallToStructCheck = differenceBetweenCodePtr(callReturnLocation, linkBuffer.locationOf(m_propertyAccesses[i].m_deltaCallToStructCheck));
+ info.patch.dfg.deltaCheckImmToCall = differenceBetweenCodePtr(linkBuffer.locationOf(m_propertyAccesses[i].m_deltaCheckImmToCall), callReturnLocation);
+ info.patch.dfg.deltaCallToStructCheck = differenceBetweenCodePtr(callReturnLocation, linkBuffer.locationOf(m_propertyAccesses[i].m_deltaCallToStructCheck));
#if USE(JSVALUE64)
- info.deltaCallToLoadOrStore = differenceBetweenCodePtr(callReturnLocation, linkBuffer.locationOf(m_propertyAccesses[i].m_deltaCallToLoadOrStore));
+ info.patch.dfg.deltaCallToLoadOrStore = differenceBetweenCodePtr(callReturnLocation, linkBuffer.locationOf(m_propertyAccesses[i].m_deltaCallToLoadOrStore));
#else
- info.deltaCallToTagLoadOrStore = differenceBetweenCodePtr(callReturnLocation, linkBuffer.locationOf(m_propertyAccesses[i].m_deltaCallToTagLoadOrStore));
- info.deltaCallToPayloadLoadOrStore = differenceBetweenCodePtr(callReturnLocation, linkBuffer.locationOf(m_propertyAccesses[i].m_deltaCallToPayloadLoadOrStore));
+ info.patch.dfg.deltaCallToTagLoadOrStore = differenceBetweenCodePtr(callReturnLocation, linkBuffer.locationOf(m_propertyAccesses[i].m_deltaCallToTagLoadOrStore));
+ info.patch.dfg.deltaCallToPayloadLoadOrStore = differenceBetweenCodePtr(callReturnLocation, linkBuffer.locationOf(m_propertyAccesses[i].m_deltaCallToPayloadLoadOrStore));
#endif
- info.deltaCallToSlowCase = differenceBetweenCodePtr(callReturnLocation, linkBuffer.locationOf(m_propertyAccesses[i].m_deltaCallToSlowCase));
- info.deltaCallToDone = differenceBetweenCodePtr(callReturnLocation, linkBuffer.locationOf(m_propertyAccesses[i].m_deltaCallToDone));
- info.baseGPR = m_propertyAccesses[i].m_baseGPR;
+ info.patch.dfg.deltaCallToSlowCase = differenceBetweenCodePtr(callReturnLocation, linkBuffer.locationOf(m_propertyAccesses[i].m_deltaCallToSlowCase));
+ info.patch.dfg.deltaCallToDone = differenceBetweenCodePtr(callReturnLocation, linkBuffer.locationOf(m_propertyAccesses[i].m_deltaCallToDone));
+ info.patch.dfg.baseGPR = m_propertyAccesses[i].m_baseGPR;
#if USE(JSVALUE64)
- info.valueGPR = m_propertyAccesses[i].m_valueGPR;
+ info.patch.dfg.valueGPR = m_propertyAccesses[i].m_valueGPR;
#else
- info.valueTagGPR = m_propertyAccesses[i].m_valueTagGPR;
- info.valueGPR = m_propertyAccesses[i].m_valueGPR;
+ info.patch.dfg.valueTagGPR = m_propertyAccesses[i].m_valueTagGPR;
+ info.patch.dfg.valueGPR = m_propertyAccesses[i].m_valueGPR;
#endif
- info.scratchGPR = m_propertyAccesses[i].m_scratchGPR;
- info.registersFlushed = m_propertyAccesses[i].m_registerMode == PropertyAccessRecord::RegistersFlushed;
+ info.patch.dfg.scratchGPR = m_propertyAccesses[i].m_scratchGPR;
+ info.patch.dfg.registersFlushed = m_propertyAccesses[i].m_registerMode == PropertyAccessRecord::RegistersFlushed;
}
m_codeBlock->setNumberOfCallLinkInfos(m_jsCalls.size());
diff --git a/Source/JavaScriptCore/dfg/DFGJITCompiler.h b/Source/JavaScriptCore/dfg/DFGJITCompiler.h
index 2df2703b0..01a1e7246 100644
--- a/Source/JavaScriptCore/dfg/DFGJITCompiler.h
+++ b/Source/JavaScriptCore/dfg/DFGJITCompiler.h
@@ -130,9 +130,9 @@ struct PropertyAccessRecord {
enum RegisterMode { RegistersFlushed, RegistersInUse };
#if USE(JSVALUE64)
- PropertyAccessRecord(CodeOrigin codeOrigin, MacroAssembler::DataLabelPtr deltaCheckImmToCall, MacroAssembler::Call functionCall, MacroAssembler::Jump deltaCallToStructCheck, MacroAssembler::DataLabelCompact deltaCallToLoadOrStore, MacroAssembler::Label deltaCallToSlowCase, MacroAssembler::Label deltaCallToDone, int8_t baseGPR, int8_t valueGPR, int8_t scratchGPR, RegisterMode registerMode = RegistersInUse)
+ PropertyAccessRecord(CodeOrigin codeOrigin, MacroAssembler::DataLabelPtr deltaCheckImmToCall, MacroAssembler::Call functionCall, MacroAssembler::PatchableJump deltaCallToStructCheck, MacroAssembler::DataLabelCompact deltaCallToLoadOrStore, MacroAssembler::Label deltaCallToSlowCase, MacroAssembler::Label deltaCallToDone, int8_t baseGPR, int8_t valueGPR, int8_t scratchGPR, RegisterMode registerMode = RegistersInUse)
#elif USE(JSVALUE32_64)
- PropertyAccessRecord(CodeOrigin codeOrigin, MacroAssembler::DataLabelPtr deltaCheckImmToCall, MacroAssembler::Call functionCall, MacroAssembler::Jump deltaCallToStructCheck, MacroAssembler::DataLabelCompact deltaCallToTagLoadOrStore, MacroAssembler::DataLabelCompact deltaCallToPayloadLoadOrStore, MacroAssembler::Label deltaCallToSlowCase, MacroAssembler::Label deltaCallToDone, int8_t baseGPR, int8_t valueTagGPR, int8_t valueGPR, int8_t scratchGPR, RegisterMode registerMode = RegistersInUse)
+ PropertyAccessRecord(CodeOrigin codeOrigin, MacroAssembler::DataLabelPtr deltaCheckImmToCall, MacroAssembler::Call functionCall, MacroAssembler::PatchableJump deltaCallToStructCheck, MacroAssembler::DataLabelCompact deltaCallToTagLoadOrStore, MacroAssembler::DataLabelCompact deltaCallToPayloadLoadOrStore, MacroAssembler::Label deltaCallToSlowCase, MacroAssembler::Label deltaCallToDone, int8_t baseGPR, int8_t valueTagGPR, int8_t valueGPR, int8_t scratchGPR, RegisterMode registerMode = RegistersInUse)
#endif
: m_codeOrigin(codeOrigin)
, m_deltaCheckImmToCall(deltaCheckImmToCall)
@@ -159,7 +159,7 @@ struct PropertyAccessRecord {
CodeOrigin m_codeOrigin;
MacroAssembler::DataLabelPtr m_deltaCheckImmToCall;
MacroAssembler::Call m_functionCall;
- MacroAssembler::Jump m_deltaCallToStructCheck;
+ MacroAssembler::PatchableJump m_deltaCallToStructCheck;
#if USE(JSVALUE64)
MacroAssembler::DataLabelCompact m_deltaCallToLoadOrStore;
#elif USE(JSVALUE32_64)
@@ -200,12 +200,6 @@ public:
// Accessors for properties.
Graph& graph() { return m_graph; }
- // Just get a token for beginning a call.
- CallBeginToken beginJSCall()
- {
- return CallBeginToken(m_currentCodeOriginIndex++);
- }
-
// Get a token for beginning a call, and set the current code origin index in
// the call frame.
CallBeginToken beginCall()
@@ -247,7 +241,7 @@ public:
// Helper methods to get predictions
PredictedType getPrediction(Node& node) { return node.prediction(); }
PredictedType getPrediction(NodeIndex nodeIndex) { return getPrediction(graph()[nodeIndex]); }
- PredictedType getPrediction(NodeUse nodeUse) { return getPrediction(nodeUse.index()); }
+ PredictedType getPrediction(Edge nodeUse) { return getPrediction(nodeUse.index()); }
#if USE(JSVALUE32_64)
void* addressOfDoubleConstant(NodeIndex nodeIndex)
diff --git a/Source/JavaScriptCore/dfg/DFGNode.h b/Source/JavaScriptCore/dfg/DFGNode.h
index b672b67c5..f79a93a69 100644
--- a/Source/JavaScriptCore/dfg/DFGNode.h
+++ b/Source/JavaScriptCore/dfg/DFGNode.h
@@ -32,15 +32,15 @@
#include "CodeBlock.h"
#include "CodeOrigin.h"
+#include "DFGAdjacencyList.h"
#include "DFGCommon.h"
-#include "DFGNodeReferenceBlob.h"
-#include "DFGOperands.h"
+#include "DFGNodeFlags.h"
+#include "DFGNodeType.h"
#include "DFGVariableAccessData.h"
#include "JSValue.h"
+#include "Operands.h"
#include "PredictedType.h"
#include "ValueProfile.h"
-#include <wtf/BoundsCheckedPointer.h>
-#include <wtf/Vector.h>
namespace JSC { namespace DFG {
@@ -57,240 +57,6 @@ struct StructureTransitionData {
}
};
-// Entries in the NodeType enum (below) are composed of an id, a result type (possibly none)
-// and some additional informative flags (must generate, is constant, etc).
-#define NodeResultMask 0xF
-#define NodeResultJS 0x1
-#define NodeResultNumber 0x2
-#define NodeResultInt32 0x3
-#define NodeResultBoolean 0x4
-#define NodeResultStorage 0x5
-#define NodeMustGenerate 0x10 // set on nodes that have side effects, and may not trivially be removed by DCE.
-#define NodeHasVarArgs 0x20
-#define NodeClobbersWorld 0x40
-#define NodeMightClobber 0x80
-#define NodeArithMask 0xF00
-#define NodeUseBottom 0x000
-#define NodeUsedAsNumber 0x100
-#define NodeNeedsNegZero 0x200
-#define NodeUsedAsMask 0x300
-#define NodeMayOverflow 0x400
-#define NodeMayNegZero 0x800
-#define NodeBehaviorMask 0xc00
-
-typedef uint16_t NodeFlags;
-
-static inline bool nodeUsedAsNumber(NodeFlags flags)
-{
- return !!(flags & NodeUsedAsNumber);
-}
-
-static inline bool nodeCanTruncateInteger(NodeFlags flags)
-{
- return !nodeUsedAsNumber(flags);
-}
-
-static inline bool nodeCanIgnoreNegativeZero(NodeFlags flags)
-{
- return !(flags & NodeNeedsNegZero);
-}
-
-static inline bool nodeMayOverflow(NodeFlags flags)
-{
- return !!(flags & NodeMayOverflow);
-}
-
-static inline bool nodeCanSpeculateInteger(NodeFlags flags)
-{
- if (flags & NodeMayOverflow)
- return !nodeUsedAsNumber(flags);
-
- if (flags & NodeMayNegZero)
- return nodeCanIgnoreNegativeZero(flags);
-
- return true;
-}
-
-const char* arithNodeFlagsAsString(NodeFlags);
-
-// This macro defines a set of information about all known node types, used to populate NodeId, NodeType below.
-#define FOR_EACH_DFG_OP(macro) \
- /* A constant in the CodeBlock's constant pool. */\
- macro(JSConstant, NodeResultJS) \
- \
- /* A constant not in the CodeBlock's constant pool. Uses get patched to jumps that exit the */\
- /* code block. */\
- macro(WeakJSConstant, NodeResultJS) \
- \
- /* Nodes for handling functions (both as call and as construct). */\
- macro(ConvertThis, NodeResultJS) \
- macro(CreateThis, NodeResultJS) /* Note this is not MustGenerate since we're returning it anyway. */ \
- macro(GetCallee, NodeResultJS) \
- \
- /* Nodes for local variable access. */\
- macro(GetLocal, NodeResultJS) \
- macro(SetLocal, 0) \
- macro(Phantom, NodeMustGenerate) \
- macro(Nop, 0) \
- macro(Phi, 0) \
- macro(Flush, NodeMustGenerate) \
- \
- /* Marker for arguments being set. */\
- macro(SetArgument, 0) \
- \
- /* Hint that inlining begins here. No code is generated for this node. It's only */\
- /* used for copying OSR data into inline frame data, to support reification of */\
- /* call frames of inlined functions. */\
- macro(InlineStart, 0) \
- \
- /* Nodes for bitwise operations. */\
- macro(BitAnd, NodeResultInt32) \
- macro(BitOr, NodeResultInt32) \
- macro(BitXor, NodeResultInt32) \
- macro(BitLShift, NodeResultInt32) \
- macro(BitRShift, NodeResultInt32) \
- macro(BitURShift, NodeResultInt32) \
- /* Bitwise operators call ToInt32 on their operands. */\
- macro(ValueToInt32, NodeResultInt32 | NodeMustGenerate) \
- /* Used to box the result of URShift nodes (result has range 0..2^32-1). */\
- macro(UInt32ToNumber, NodeResultNumber) \
- \
- /* Nodes for arithmetic operations. */\
- macro(ArithAdd, NodeResultNumber) \
- macro(ArithSub, NodeResultNumber) \
- macro(ArithNegate, NodeResultNumber) \
- macro(ArithMul, NodeResultNumber) \
- macro(ArithDiv, NodeResultNumber) \
- macro(ArithMod, NodeResultNumber) \
- macro(ArithAbs, NodeResultNumber) \
- macro(ArithMin, NodeResultNumber) \
- macro(ArithMax, NodeResultNumber) \
- macro(ArithSqrt, NodeResultNumber) \
- \
- /* Add of values may either be arithmetic, or result in string concatenation. */\
- macro(ValueAdd, NodeResultJS | NodeMustGenerate | NodeMightClobber) \
- \
- /* Property access. */\
- /* PutByValAlias indicates a 'put' aliases a prior write to the same property. */\
- /* Since a put to 'length' may invalidate optimizations here, */\
- /* this must be the directly subsequent property put. */\
- macro(GetByVal, NodeResultJS | NodeMustGenerate | NodeMightClobber) \
- macro(PutByVal, NodeMustGenerate | NodeClobbersWorld) \
- macro(PutByValAlias, NodeMustGenerate | NodeClobbersWorld) \
- macro(GetById, NodeResultJS | NodeMustGenerate | NodeClobbersWorld) \
- macro(GetByIdFlush, NodeResultJS | NodeMustGenerate | NodeClobbersWorld) \
- macro(PutById, NodeMustGenerate | NodeClobbersWorld) \
- macro(PutByIdDirect, NodeMustGenerate | NodeClobbersWorld) \
- macro(CheckStructure, NodeMustGenerate) \
- macro(PutStructure, NodeMustGenerate | NodeClobbersWorld) \
- macro(GetPropertyStorage, NodeResultStorage) \
- macro(GetIndexedPropertyStorage, NodeMustGenerate | NodeResultStorage) \
- macro(GetByOffset, NodeResultJS) \
- macro(PutByOffset, NodeMustGenerate | NodeClobbersWorld) \
- macro(GetArrayLength, NodeResultInt32) \
- macro(GetStringLength, NodeResultInt32) \
- macro(GetByteArrayLength, NodeResultInt32) \
- macro(GetInt8ArrayLength, NodeResultInt32) \
- macro(GetInt16ArrayLength, NodeResultInt32) \
- macro(GetInt32ArrayLength, NodeResultInt32) \
- macro(GetUint8ArrayLength, NodeResultInt32) \
- macro(GetUint8ClampedArrayLength, NodeResultInt32) \
- macro(GetUint16ArrayLength, NodeResultInt32) \
- macro(GetUint32ArrayLength, NodeResultInt32) \
- macro(GetFloat32ArrayLength, NodeResultInt32) \
- macro(GetFloat64ArrayLength, NodeResultInt32) \
- macro(GetScopeChain, NodeResultJS) \
- macro(GetScopedVar, NodeResultJS | NodeMustGenerate) \
- macro(PutScopedVar, NodeMustGenerate | NodeClobbersWorld) \
- macro(GetGlobalVar, NodeResultJS | NodeMustGenerate) \
- macro(PutGlobalVar, NodeMustGenerate | NodeClobbersWorld) \
- macro(CheckFunction, NodeMustGenerate) \
- \
- /* Optimizations for array mutation. */\
- macro(ArrayPush, NodeResultJS | NodeMustGenerate | NodeClobbersWorld) \
- macro(ArrayPop, NodeResultJS | NodeMustGenerate | NodeClobbersWorld) \
- \
- /* Optimizations for string access */ \
- macro(StringCharCodeAt, NodeResultInt32) \
- macro(StringCharAt, NodeResultJS) \
- \
- /* Nodes for comparison operations. */\
- macro(CompareLess, NodeResultBoolean | NodeMustGenerate | NodeMightClobber) \
- macro(CompareLessEq, NodeResultBoolean | NodeMustGenerate | NodeMightClobber) \
- macro(CompareGreater, NodeResultBoolean | NodeMustGenerate | NodeMightClobber) \
- macro(CompareGreaterEq, NodeResultBoolean | NodeMustGenerate | NodeMightClobber) \
- macro(CompareEq, NodeResultBoolean | NodeMustGenerate | NodeMightClobber) \
- macro(CompareStrictEq, NodeResultBoolean) \
- \
- /* Calls. */\
- macro(Call, NodeResultJS | NodeMustGenerate | NodeHasVarArgs | NodeClobbersWorld) \
- macro(Construct, NodeResultJS | NodeMustGenerate | NodeHasVarArgs | NodeClobbersWorld) \
- \
- /* Allocations. */\
- macro(NewObject, NodeResultJS) \
- macro(NewArray, NodeResultJS | NodeHasVarArgs) \
- macro(NewArrayBuffer, NodeResultJS) \
- macro(NewRegexp, NodeResultJS) \
- \
- /* Resolve nodes. */\
- macro(Resolve, NodeResultJS | NodeMustGenerate | NodeClobbersWorld) \
- macro(ResolveBase, NodeResultJS | NodeMustGenerate | NodeClobbersWorld) \
- macro(ResolveBaseStrictPut, NodeResultJS | NodeMustGenerate | NodeClobbersWorld) \
- macro(ResolveGlobal, NodeResultJS | NodeMustGenerate | NodeClobbersWorld) \
- \
- /* Nodes for misc operations. */\
- macro(Breakpoint, NodeMustGenerate | NodeClobbersWorld) \
- macro(CheckHasInstance, NodeMustGenerate) \
- macro(InstanceOf, NodeResultBoolean) \
- macro(LogicalNot, NodeResultBoolean | NodeMightClobber) \
- macro(ToPrimitive, NodeResultJS | NodeMustGenerate | NodeClobbersWorld) \
- macro(StrCat, NodeResultJS | NodeMustGenerate | NodeHasVarArgs | NodeClobbersWorld) \
- \
- /* Nodes used for activations. Activation support works by having it anchored at */\
- /* epilgoues via TearOffActivation, and all CreateActivation nodes kept alive by */\
- /* being threaded with each other. */\
- macro(CreateActivation, NodeResultJS) \
- macro(TearOffActivation, NodeMustGenerate) \
- \
- /* Nodes for creating functions. */\
- macro(NewFunctionNoCheck, NodeResultJS) \
- macro(NewFunction, NodeResultJS) \
- macro(NewFunctionExpression, NodeResultJS) \
- \
- /* Block terminals. */\
- macro(Jump, NodeMustGenerate) \
- macro(Branch, NodeMustGenerate) \
- macro(Return, NodeMustGenerate) \
- macro(Throw, NodeMustGenerate) \
- macro(ThrowReferenceError, NodeMustGenerate) \
- \
- /* This is a pseudo-terminal. It means that execution should fall out of DFG at */\
- /* this point, but execution does continue in the basic block - just in a */\
- /* different compiler. */\
- macro(ForceOSRExit, NodeMustGenerate)
-
-// This enum generates a monotonically increasing id for all Node types,
-// and is used by the subsequent enum to fill out the id (as accessed via the NodeIdMask).
-enum NodeType {
-#define DFG_OP_ENUM(opcode, flags) opcode,
- FOR_EACH_DFG_OP(DFG_OP_ENUM)
-#undef DFG_OP_ENUM
- LastNodeType
-};
-
-// Specifies the default flags for each node.
-inline NodeFlags defaultFlags(NodeType op)
-{
- switch (op) {
-#define DFG_OP_ENUM(opcode, flags) case opcode: return flags;
- FOR_EACH_DFG_OP(DFG_OP_ENUM)
-#undef DFG_OP_ENUM
- default:
- ASSERT_NOT_REACHED();
- return 0;
- }
-}
-
// This type used in passing an immediate argument to Node constructor;
// distinguishes an immediate value (typically an index into a CodeBlock data structure -
// a constant index, argument, or identifier) from a NodeIndex.
@@ -313,32 +79,32 @@ struct Node {
// Construct a node with up to 3 children, no immediate value.
Node(NodeType op, CodeOrigin codeOrigin, NodeIndex child1 = NoNode, NodeIndex child2 = NoNode, NodeIndex child3 = NoNode)
: codeOrigin(codeOrigin)
- , children(NodeReferenceBlob::Fixed, child1, child2, child3)
+ , children(AdjacencyList::Fixed, child1, child2, child3)
, m_virtualRegister(InvalidVirtualRegister)
, m_refCount(0)
, m_prediction(PredictNone)
{
setOpAndDefaultFlags(op);
- ASSERT(!(flags & NodeHasVarArgs));
+ ASSERT(!(m_flags & NodeHasVarArgs));
}
// Construct a node with up to 3 children and an immediate value.
Node(NodeType op, CodeOrigin codeOrigin, OpInfo imm, NodeIndex child1 = NoNode, NodeIndex child2 = NoNode, NodeIndex child3 = NoNode)
: codeOrigin(codeOrigin)
- , children(NodeReferenceBlob::Fixed, child1, child2, child3)
+ , children(AdjacencyList::Fixed, child1, child2, child3)
, m_virtualRegister(InvalidVirtualRegister)
, m_refCount(0)
, m_opInfo(imm.m_value)
, m_prediction(PredictNone)
{
setOpAndDefaultFlags(op);
- ASSERT(!(flags & NodeHasVarArgs));
+ ASSERT(!(m_flags & NodeHasVarArgs));
}
// Construct a node with up to 3 children and two immediate values.
Node(NodeType op, CodeOrigin codeOrigin, OpInfo imm1, OpInfo imm2, NodeIndex child1 = NoNode, NodeIndex child2 = NoNode, NodeIndex child3 = NoNode)
: codeOrigin(codeOrigin)
- , children(NodeReferenceBlob::Fixed, child1, child2, child3)
+ , children(AdjacencyList::Fixed, child1, child2, child3)
, m_virtualRegister(InvalidVirtualRegister)
, m_refCount(0)
, m_opInfo(imm1.m_value)
@@ -346,13 +112,13 @@ struct Node {
, m_prediction(PredictNone)
{
setOpAndDefaultFlags(op);
- ASSERT(!(flags & NodeHasVarArgs));
+ ASSERT(!(m_flags & NodeHasVarArgs));
}
// Construct a node with a variable number of children and two immediate values.
Node(VarArgTag, NodeType op, CodeOrigin codeOrigin, OpInfo imm1, OpInfo imm2, unsigned firstChild, unsigned numChildren)
: codeOrigin(codeOrigin)
- , children(NodeReferenceBlob::Variable, firstChild, numChildren)
+ , children(AdjacencyList::Variable, firstChild, numChildren)
, m_virtualRegister(InvalidVirtualRegister)
, m_refCount(0)
, m_opInfo(imm1.m_value)
@@ -360,28 +126,64 @@ struct Node {
, m_prediction(PredictNone)
{
setOpAndDefaultFlags(op);
- ASSERT(flags & NodeHasVarArgs);
+ ASSERT(m_flags & NodeHasVarArgs);
+ }
+
+ NodeType op() const { return static_cast<NodeType>(m_op); }
+ NodeFlags flags() const { return m_flags; }
+
+ void setOp(NodeType op)
+ {
+ m_op = op;
+ }
+
+ void setFlags(NodeFlags flags)
+ {
+ m_flags = flags;
+ }
+
+ bool mergeFlags(NodeFlags flags)
+ {
+ NodeFlags newFlags = m_flags | flags;
+ if (newFlags == m_flags)
+ return false;
+ m_flags = newFlags;
+ return true;
+ }
+
+ bool filterFlags(NodeFlags flags)
+ {
+ NodeFlags newFlags = m_flags & flags;
+ if (newFlags == m_flags)
+ return false;
+ m_flags = newFlags;
+ return true;
+ }
+
+ bool clearFlags(NodeFlags flags)
+ {
+ return filterFlags(~flags);
}
void setOpAndDefaultFlags(NodeType op)
{
- this->op = op;
- flags = defaultFlags(op);
+ m_op = op;
+ m_flags = defaultFlags(op);
}
bool mustGenerate()
{
- return flags & NodeMustGenerate;
+ return m_flags & NodeMustGenerate;
}
bool isConstant()
{
- return op == JSConstant;
+ return op() == JSConstant;
}
bool isWeakConstant()
{
- return op == WeakJSConstant;
+ return op() == WeakJSConstant;
}
bool hasConstant()
@@ -402,7 +204,7 @@ struct Node {
JSValue valueOfJSConstant(CodeBlock* codeBlock)
{
- if (op == WeakJSConstant)
+ if (op() == WeakJSConstant)
return JSValue(weakConstant());
return codeBlock->constantRegister(FirstConstantRegisterIndex + constantNumber()).get();
}
@@ -434,7 +236,7 @@ struct Node {
bool hasVariableAccessData()
{
- switch (op) {
+ switch (op()) {
case GetLocal:
case SetLocal:
case Phi:
@@ -464,7 +266,7 @@ struct Node {
bool hasIdentifier()
{
- switch (op) {
+ switch (op()) {
case GetById:
case GetByIdFlush:
case PutById:
@@ -486,13 +288,13 @@ struct Node {
unsigned resolveGlobalDataIndex()
{
- ASSERT(op == ResolveGlobal);
+ ASSERT(op() == ResolveGlobal);
return m_opInfo;
}
bool hasArithNodeFlags()
{
- switch (op) {
+ switch (op()) {
case UInt32ToNumber:
case ArithAdd:
case ArithSub:
@@ -515,33 +317,15 @@ struct Node {
// to know if it can speculate on negative zero.
NodeFlags arithNodeFlags()
{
- NodeFlags result = flags & NodeArithMask;
- if (op == ArithMul)
+ NodeFlags result = m_flags;
+ if (op() == ArithMul || op() == ArithDiv || op() == ArithMod)
return result;
return result & ~NodeNeedsNegZero;
}
- void setArithNodeFlag(NodeFlags newFlags)
- {
- ASSERT(!(newFlags & ~NodeArithMask));
-
- flags &= ~NodeArithMask;
- flags |= newFlags;
- }
-
- bool mergeArithNodeFlags(NodeFlags newFlags)
- {
- ASSERT(!(newFlags & ~NodeArithMask));
- newFlags = flags | newFlags;
- if (newFlags == flags)
- return false;
- flags = newFlags;
- return true;
- }
-
bool hasConstantBuffer()
{
- return op == NewArrayBuffer;
+ return op() == NewArrayBuffer;
}
unsigned startConstant()
@@ -558,7 +342,7 @@ struct Node {
bool hasRegexpIndex()
{
- return op == NewRegexp;
+ return op() == NewRegexp;
}
unsigned regexpIndex()
@@ -569,7 +353,7 @@ struct Node {
bool hasVarNumber()
{
- return op == GetGlobalVar || op == PutGlobalVar || op == GetScopedVar || op == PutScopedVar;
+ return op() == GetGlobalVar || op() == PutGlobalVar || op() == GetScopedVar || op() == PutScopedVar;
}
unsigned varNumber()
@@ -580,7 +364,7 @@ struct Node {
bool hasScopeChainDepth()
{
- return op == GetScopeChain;
+ return op() == GetScopeChain;
}
unsigned scopeChainDepth()
@@ -591,42 +375,42 @@ struct Node {
bool hasResult()
{
- return flags & NodeResultMask;
+ return m_flags & NodeResultMask;
}
bool hasInt32Result()
{
- return (flags & NodeResultMask) == NodeResultInt32;
+ return (m_flags & NodeResultMask) == NodeResultInt32;
}
bool hasNumberResult()
{
- return (flags & NodeResultMask) == NodeResultNumber;
+ return (m_flags & NodeResultMask) == NodeResultNumber;
}
bool hasJSResult()
{
- return (flags & NodeResultMask) == NodeResultJS;
+ return (m_flags & NodeResultMask) == NodeResultJS;
}
bool hasBooleanResult()
{
- return (flags & NodeResultMask) == NodeResultBoolean;
+ return (m_flags & NodeResultMask) == NodeResultBoolean;
}
bool isJump()
{
- return op == Jump;
+ return op() == Jump;
}
bool isBranch()
{
- return op == Branch;
+ return op() == Branch;
}
bool isTerminal()
{
- switch (op) {
+ switch (op()) {
case Jump:
case Branch:
case Return:
@@ -676,7 +460,7 @@ struct Node {
bool hasHeapPrediction()
{
- switch (op) {
+ switch (op()) {
case GetById:
case GetByIdFlush:
case GetByVal:
@@ -690,6 +474,9 @@ struct Node {
case ResolveGlobal:
case ArrayPop:
case ArrayPush:
+ case RegExpExec:
+ case RegExpTest:
+ case GetGlobalVar:
return true;
default:
return false;
@@ -711,7 +498,7 @@ struct Node {
bool hasFunctionCheckData()
{
- return op == CheckFunction;
+ return op() == CheckFunction;
}
JSFunction* function()
@@ -722,7 +509,7 @@ struct Node {
bool hasStructureTransitionData()
{
- return op == PutStructure;
+ return op() == PutStructure;
}
StructureTransitionData& structureTransitionData()
@@ -733,7 +520,7 @@ struct Node {
bool hasStructureSet()
{
- return op == CheckStructure;
+ return op() == CheckStructure;
}
StructureSet& structureSet()
@@ -744,7 +531,7 @@ struct Node {
bool hasStorageAccessData()
{
- return op == GetByOffset || op == PutByOffset;
+ return op() == GetByOffset || op() == PutByOffset;
}
unsigned storageAccessDataIndex()
@@ -755,8 +542,8 @@ struct Node {
bool hasFunctionDeclIndex()
{
- return op == NewFunction
- || op == NewFunctionNoCheck;
+ return op() == NewFunction
+ || op() == NewFunctionNoCheck;
}
unsigned functionDeclIndex()
@@ -767,7 +554,7 @@ struct Node {
bool hasFunctionExprIndex()
{
- return op == NewFunctionExpression;
+ return op() == NewFunctionExpression;
}
unsigned functionExprIndex()
@@ -830,41 +617,41 @@ struct Node {
return !--m_refCount;
}
- NodeUse child1()
+ Edge child1()
{
- ASSERT(!(flags & NodeHasVarArgs));
+ ASSERT(!(m_flags & NodeHasVarArgs));
return children.child1();
}
// This is useful if you want to do a fast check on the first child
// before also doing a check on the opcode. Use this with care and
// avoid it if possible.
- NodeUse child1Unchecked()
+ Edge child1Unchecked()
{
return children.child1Unchecked();
}
- NodeUse child2()
+ Edge child2()
{
- ASSERT(!(flags & NodeHasVarArgs));
+ ASSERT(!(m_flags & NodeHasVarArgs));
return children.child2();
}
- NodeUse child3()
+ Edge child3()
{
- ASSERT(!(flags & NodeHasVarArgs));
+ ASSERT(!(m_flags & NodeHasVarArgs));
return children.child3();
}
unsigned firstChild()
{
- ASSERT(flags & NodeHasVarArgs);
+ ASSERT(m_flags & NodeHasVarArgs);
return children.firstChild();
}
unsigned numChildren()
{
- ASSERT(flags & NodeHasVarArgs);
+ ASSERT(m_flags & NodeHasVarArgs);
return children.numChildren();
}
@@ -890,12 +677,12 @@ struct Node {
bool shouldSpeculateNumber()
{
- return isNumberPrediction(prediction()) || prediction() == PredictNone;
+ return isNumberPrediction(prediction());
}
- bool shouldNotSpeculateInteger()
+ bool shouldSpeculateBoolean()
{
- return !!(prediction() & PredictDouble);
+ return isBooleanPrediction(prediction());
}
bool shouldSpeculateFinalObject()
@@ -913,27 +700,14 @@ struct Node {
return isArrayPrediction(prediction());
}
- bool shouldSpeculateByteArray()
- {
- return !!(prediction() & PredictByteArray);
- }
-
bool shouldSpeculateInt8Array()
{
-#if CPU(X86) || CPU(X86_64)
return isInt8ArrayPrediction(prediction());
-#else
- return false;
-#endif
}
bool shouldSpeculateInt16Array()
{
-#if CPU(X86) || CPU(X86_64)
return isInt16ArrayPrediction(prediction());
-#else
- return false;
-#endif
}
bool shouldSpeculateInt32Array()
@@ -963,11 +737,7 @@ struct Node {
bool shouldSpeculateFloat32Array()
{
-#if CPU(X86) || CPU(X86_64)
return isFloat32ArrayPrediction(prediction());
-#else
- return false;
-#endif
}
bool shouldSpeculateFloat64Array()
@@ -1002,14 +772,12 @@ struct Node {
static bool shouldSpeculateFinalObject(Node& op1, Node& op2)
{
- return (op1.shouldSpeculateFinalObject() && op2.shouldSpeculateObject())
- || (op1.shouldSpeculateObject() && op2.shouldSpeculateFinalObject());
+ return op1.shouldSpeculateFinalObject() && op2.shouldSpeculateFinalObject();
}
static bool shouldSpeculateArray(Node& op1, Node& op2)
{
- return (op1.shouldSpeculateArray() && op2.shouldSpeculateObject())
- || (op1.shouldSpeculateObject() && op2.shouldSpeculateArray());
+ return op1.shouldSpeculateArray() && op2.shouldSpeculateArray();
}
bool canSpeculateInteger()
@@ -1030,14 +798,14 @@ struct Node {
fprintf(out, ", @%u", child3().index());
}
- uint16_t op; // real type is NodeType
- NodeFlags flags;
// Used to look up exception handling information (currently implemented as a bytecode index).
CodeOrigin codeOrigin;
// References to up to 3 children, or links to a variable length set of children.
- NodeReferenceBlob children;
+ AdjacencyList children;
private:
+ uint16_t m_op; // real type is NodeType
+ NodeFlags m_flags;
// The virtual register number (spill location) associated with this .
VirtualRegister m_virtualRegister;
// The number of uses of the result of this operation (+1 for 'must generate' nodes, which have side-effects).
diff --git a/Source/JavaScriptCore/dfg/DFGNode.cpp b/Source/JavaScriptCore/dfg/DFGNodeFlags.cpp
index c53817ba9..54e6b69b7 100644
--- a/Source/JavaScriptCore/dfg/DFGNode.cpp
+++ b/Source/JavaScriptCore/dfg/DFGNodeFlags.cpp
@@ -24,26 +24,80 @@
*/
#include "config.h"
-#include "DFGNode.h"
+#include "DFGNodeFlags.h"
#if ENABLE(DFG_JIT)
+#include <wtf/BoundsCheckedPointer.h>
+
namespace JSC { namespace DFG {
-const char* arithNodeFlagsAsString(NodeFlags flags)
+const char* nodeFlagsAsString(NodeFlags flags)
{
- flags &= NodeArithMask;
-
if (!flags)
return "<empty>";
- static const int size = 64;
+ static const int size = 128;
static char description[size];
BoundsCheckedPointer<char> ptr(description, size);
bool hasPrinted = false;
+ if (flags & NodeResultMask) {
+ switch (flags & NodeResultMask) {
+ case NodeResultJS:
+ ptr.strcat("ResultJS");
+ break;
+ case NodeResultNumber:
+ ptr.strcat("ResultNumber");
+ break;
+ case NodeResultInt32:
+ ptr.strcat("ResultInt32");
+ break;
+ case NodeResultBoolean:
+ ptr.strcat("ResultBoolean");
+ break;
+ case NodeResultStorage:
+ ptr.strcat("ResultStorage");
+ break;
+ default:
+ ASSERT_NOT_REACHED();
+ break;
+ }
+ hasPrinted = true;
+ }
+
+ if (flags & NodeMustGenerate) {
+ if (hasPrinted)
+ ptr.strcat("|");
+ ptr.strcat("MustGenerate");
+ hasPrinted = true;
+ }
+
+ if (flags & NodeHasVarArgs) {
+ if (hasPrinted)
+ ptr.strcat("|");
+ ptr.strcat("HasVarArgs");
+ hasPrinted = true;
+ }
+
+ if (flags & NodeClobbersWorld) {
+ if (hasPrinted)
+ ptr.strcat("|");
+ ptr.strcat("ClobbersWorld");
+ hasPrinted = true;
+ }
+
+ if (flags & NodeMightClobber) {
+ if (hasPrinted)
+ ptr.strcat("|");
+ ptr.strcat("MightClobber");
+ hasPrinted = true;
+ }
+
if (flags & NodeUsedAsNumber) {
+ if (hasPrinted)
+ ptr.strcat("|");
ptr.strcat("UsedAsNum");
hasPrinted = true;
}
@@ -69,6 +123,13 @@ const char* arithNodeFlagsAsString(NodeFlags flags)
hasPrinted = true;
}
+ if (flags & NodeUsedAsInt) {
+ if (hasPrinted)
+ ptr.strcat("|");
+ ptr.strcat("UsedAsInt");
+ hasPrinted = true;
+ }
+
*ptr++ = 0;
return description;
diff --git a/Source/JavaScriptCore/dfg/DFGNodeFlags.h b/Source/JavaScriptCore/dfg/DFGNodeFlags.h
new file mode 100644
index 000000000..16d76655e
--- /dev/null
+++ b/Source/JavaScriptCore/dfg/DFGNodeFlags.h
@@ -0,0 +1,102 @@
+/*
+ * Copyright (C) 2012 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef DFGNodeFlags_h
+#define DFGNodeFlags_h
+
+#include <wtf/Platform.h>
+
+#if ENABLE(DFG_JIT)
+
+#include <wtf/StdLibExtras.h>
+
+namespace JSC { namespace DFG {
+
+// Entries in the NodeType enum (below) are composed of an id, a result type (possibly none)
+// and some additional informative flags (must generate, is constant, etc).
+#define NodeResultMask 0xF
+#define NodeResultJS 0x1
+#define NodeResultNumber 0x2
+#define NodeResultInt32 0x3
+#define NodeResultBoolean 0x4
+#define NodeResultStorage 0x5
+
+#define NodeMustGenerate 0x10 // set on nodes that have side effects, and may not trivially be removed by DCE.
+#define NodeHasVarArgs 0x20
+#define NodeClobbersWorld 0x40
+#define NodeMightClobber 0x80
+
+#define NodeBehaviorMask 0x300
+#define NodeMayOverflow 0x100
+#define NodeMayNegZero 0x200
+
+#define NodeBackPropMask 0x1C00
+#define NodeUseBottom 0x000
+#define NodeUsedAsNumber 0x400 // The result of this computation may be used in a context that observes fractional results.
+#define NodeNeedsNegZero 0x800 // The result of this computation may be used in a context that observes -0.
+#define NodeUsedAsValue (NodeUsedAsNumber | NodeNeedsNegZero)
+#define NodeUsedAsInt 0x1000 // The result of this computation is known to be used in a context that prefers, but does not require, integer values.
+
+typedef uint16_t NodeFlags;
+
+static inline bool nodeUsedAsNumber(NodeFlags flags)
+{
+ return !!(flags & NodeUsedAsNumber);
+}
+
+static inline bool nodeCanTruncateInteger(NodeFlags flags)
+{
+ return !nodeUsedAsNumber(flags);
+}
+
+static inline bool nodeCanIgnoreNegativeZero(NodeFlags flags)
+{
+ return !(flags & NodeNeedsNegZero);
+}
+
+static inline bool nodeMayOverflow(NodeFlags flags)
+{
+ return !!(flags & NodeMayOverflow);
+}
+
+static inline bool nodeCanSpeculateInteger(NodeFlags flags)
+{
+ if (flags & NodeMayOverflow)
+ return !nodeUsedAsNumber(flags);
+
+ if (flags & NodeMayNegZero)
+ return nodeCanIgnoreNegativeZero(flags);
+
+ return true;
+}
+
+const char* nodeFlagsAsString(NodeFlags);
+
+} } // namespace JSC::DFG
+
+#endif // ENABLE(DFG_JIT)
+
+#endif // DFGNodeFlags_h
+
diff --git a/Source/JavaScriptCore/dfg/DFGNodeType.h b/Source/JavaScriptCore/dfg/DFGNodeType.h
new file mode 100644
index 000000000..8a3828c31
--- /dev/null
+++ b/Source/JavaScriptCore/dfg/DFGNodeType.h
@@ -0,0 +1,237 @@
+/*
+ * Copyright (C) 2012 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef DFGNodeType_h
+#define DFGNodeType_h
+
+#include <wtf/Platform.h>
+
+#if ENABLE(DFG_JIT)
+
+#include "DFGNodeFlags.h"
+
+namespace JSC { namespace DFG {
+
+// This macro defines a set of information about all known node types, used to populate NodeId, NodeType below.
+#define FOR_EACH_DFG_OP(macro) \
+ /* A constant in the CodeBlock's constant pool. */\
+ macro(JSConstant, NodeResultJS) \
+ \
+ /* A constant not in the CodeBlock's constant pool. Uses get patched to jumps that exit the */\
+ /* code block. */\
+ macro(WeakJSConstant, NodeResultJS) \
+ \
+ /* Nodes for handling functions (both as call and as construct). */\
+ macro(ConvertThis, NodeResultJS) \
+ macro(CreateThis, NodeResultJS) /* Note this is not MustGenerate since we're returning it anyway. */ \
+ macro(GetCallee, NodeResultJS) \
+ \
+ /* Nodes for local variable access. */\
+ macro(GetLocal, NodeResultJS) \
+ macro(SetLocal, 0) \
+ macro(Phantom, NodeMustGenerate) \
+ macro(Nop, 0) \
+ macro(Phi, 0) \
+ macro(Flush, NodeMustGenerate) \
+ \
+ /* Marker for arguments being set. */\
+ macro(SetArgument, 0) \
+ \
+ /* Hint that inlining begins here. No code is generated for this node. It's only */\
+ /* used for copying OSR data into inline frame data, to support reification of */\
+ /* call frames of inlined functions. */\
+ macro(InlineStart, 0) \
+ \
+ /* Nodes for bitwise operations. */\
+ macro(BitAnd, NodeResultInt32) \
+ macro(BitOr, NodeResultInt32) \
+ macro(BitXor, NodeResultInt32) \
+ macro(BitLShift, NodeResultInt32) \
+ macro(BitRShift, NodeResultInt32) \
+ macro(BitURShift, NodeResultInt32) \
+ /* Bitwise operators call ToInt32 on their operands. */\
+ macro(ValueToInt32, NodeResultInt32 | NodeMustGenerate) \
+ /* Used to box the result of URShift nodes (result has range 0..2^32-1). */\
+ macro(UInt32ToNumber, NodeResultNumber) \
+ \
+ /* Used to cast known integers to doubles, so as to separate the double form */\
+ /* of the value from the integer form. */\
+ macro(Int32ToDouble, NodeResultNumber) \
+ /* Used to speculate that a double value is actually an integer. */\
+ macro(DoubleAsInt32, NodeResultInt32) \
+ /* Used to record places where we must check if a value is a number. */\
+ macro(CheckNumber, NodeMustGenerate) \
+ \
+ /* Nodes for arithmetic operations. */\
+ macro(ArithAdd, NodeResultNumber | NodeMustGenerate) \
+ macro(ArithSub, NodeResultNumber | NodeMustGenerate) \
+ macro(ArithNegate, NodeResultNumber | NodeMustGenerate) \
+ macro(ArithMul, NodeResultNumber | NodeMustGenerate) \
+ macro(ArithDiv, NodeResultNumber | NodeMustGenerate) \
+ macro(ArithMod, NodeResultNumber | NodeMustGenerate) \
+ macro(ArithAbs, NodeResultNumber | NodeMustGenerate) \
+ macro(ArithMin, NodeResultNumber | NodeMustGenerate) \
+ macro(ArithMax, NodeResultNumber | NodeMustGenerate) \
+ macro(ArithSqrt, NodeResultNumber | NodeMustGenerate) \
+ \
+ /* Add of values may either be arithmetic, or result in string concatenation. */\
+ macro(ValueAdd, NodeResultJS | NodeMustGenerate | NodeMightClobber) \
+ \
+ /* Property access. */\
+ /* PutByValAlias indicates a 'put' aliases a prior write to the same property. */\
+ /* Since a put to 'length' may invalidate optimizations here, */\
+ /* this must be the directly subsequent property put. */\
+ macro(GetByVal, NodeResultJS | NodeMustGenerate | NodeMightClobber) \
+ macro(PutByVal, NodeMustGenerate | NodeClobbersWorld) \
+ macro(PutByValAlias, NodeMustGenerate | NodeClobbersWorld) \
+ macro(GetById, NodeResultJS | NodeMustGenerate | NodeClobbersWorld) \
+ macro(GetByIdFlush, NodeResultJS | NodeMustGenerate | NodeClobbersWorld) \
+ macro(PutById, NodeMustGenerate | NodeClobbersWorld) \
+ macro(PutByIdDirect, NodeMustGenerate | NodeClobbersWorld) \
+ macro(CheckStructure, NodeMustGenerate) \
+ macro(PutStructure, NodeMustGenerate | NodeClobbersWorld) \
+ macro(GetPropertyStorage, NodeResultStorage) \
+ macro(GetIndexedPropertyStorage, NodeMustGenerate | NodeResultStorage) \
+ macro(GetByOffset, NodeResultJS) \
+ macro(PutByOffset, NodeMustGenerate | NodeClobbersWorld) \
+ macro(GetArrayLength, NodeResultInt32) \
+ macro(GetStringLength, NodeResultInt32) \
+ macro(GetInt8ArrayLength, NodeResultInt32) \
+ macro(GetInt16ArrayLength, NodeResultInt32) \
+ macro(GetInt32ArrayLength, NodeResultInt32) \
+ macro(GetUint8ArrayLength, NodeResultInt32) \
+ macro(GetUint8ClampedArrayLength, NodeResultInt32) \
+ macro(GetUint16ArrayLength, NodeResultInt32) \
+ macro(GetUint32ArrayLength, NodeResultInt32) \
+ macro(GetFloat32ArrayLength, NodeResultInt32) \
+ macro(GetFloat64ArrayLength, NodeResultInt32) \
+ macro(GetScopeChain, NodeResultJS) \
+ macro(GetScopedVar, NodeResultJS | NodeMustGenerate) \
+ macro(PutScopedVar, NodeMustGenerate | NodeClobbersWorld) \
+ macro(GetGlobalVar, NodeResultJS | NodeMustGenerate) \
+ macro(PutGlobalVar, NodeMustGenerate | NodeClobbersWorld) \
+ macro(CheckFunction, NodeMustGenerate) \
+ \
+ /* Optimizations for array mutation. */\
+ macro(ArrayPush, NodeResultJS | NodeMustGenerate | NodeClobbersWorld) \
+ macro(ArrayPop, NodeResultJS | NodeMustGenerate | NodeClobbersWorld) \
+ \
+ /* Optimizations for regular expression matching. */\
+ macro(RegExpExec, NodeResultJS | NodeMustGenerate) \
+ macro(RegExpTest, NodeResultJS | NodeMustGenerate) \
+ \
+ /* Optimizations for string access */ \
+ macro(StringCharCodeAt, NodeResultInt32) \
+ macro(StringCharAt, NodeResultJS) \
+ \
+ /* Nodes for comparison operations. */\
+ macro(CompareLess, NodeResultBoolean | NodeMustGenerate | NodeMightClobber) \
+ macro(CompareLessEq, NodeResultBoolean | NodeMustGenerate | NodeMightClobber) \
+ macro(CompareGreater, NodeResultBoolean | NodeMustGenerate | NodeMightClobber) \
+ macro(CompareGreaterEq, NodeResultBoolean | NodeMustGenerate | NodeMightClobber) \
+ macro(CompareEq, NodeResultBoolean | NodeMustGenerate | NodeMightClobber) \
+ macro(CompareStrictEq, NodeResultBoolean) \
+ \
+ /* Calls. */\
+ macro(Call, NodeResultJS | NodeMustGenerate | NodeHasVarArgs | NodeClobbersWorld) \
+ macro(Construct, NodeResultJS | NodeMustGenerate | NodeHasVarArgs | NodeClobbersWorld) \
+ \
+ /* Allocations. */\
+ macro(NewObject, NodeResultJS) \
+ macro(NewArray, NodeResultJS | NodeHasVarArgs) \
+ macro(NewArrayBuffer, NodeResultJS) \
+ macro(NewRegexp, NodeResultJS) \
+ \
+ /* Resolve nodes. */\
+ macro(Resolve, NodeResultJS | NodeMustGenerate | NodeClobbersWorld) \
+ macro(ResolveBase, NodeResultJS | NodeMustGenerate | NodeClobbersWorld) \
+ macro(ResolveBaseStrictPut, NodeResultJS | NodeMustGenerate | NodeClobbersWorld) \
+ macro(ResolveGlobal, NodeResultJS | NodeMustGenerate | NodeClobbersWorld) \
+ \
+ /* Nodes for misc operations. */\
+ macro(Breakpoint, NodeMustGenerate | NodeClobbersWorld) \
+ macro(CheckHasInstance, NodeMustGenerate) \
+ macro(InstanceOf, NodeResultBoolean) \
+ macro(IsUndefined, NodeResultBoolean) \
+ macro(IsBoolean, NodeResultBoolean) \
+ macro(IsNumber, NodeResultBoolean) \
+ macro(IsString, NodeResultBoolean) \
+ macro(IsObject, NodeResultBoolean) \
+ macro(IsFunction, NodeResultBoolean) \
+ macro(LogicalNot, NodeResultBoolean | NodeMightClobber) \
+ macro(ToPrimitive, NodeResultJS | NodeMustGenerate | NodeClobbersWorld) \
+ macro(StrCat, NodeResultJS | NodeMustGenerate | NodeHasVarArgs | NodeClobbersWorld) \
+ \
+ /* Nodes used for activations. Activation support works by having it anchored at */\
+ /* epilgoues via TearOffActivation, and all CreateActivation nodes kept alive by */\
+ /* being threaded with each other. */\
+ macro(CreateActivation, NodeResultJS) \
+ macro(TearOffActivation, NodeMustGenerate) \
+ \
+ /* Nodes for creating functions. */\
+ macro(NewFunctionNoCheck, NodeResultJS) \
+ macro(NewFunction, NodeResultJS) \
+ macro(NewFunctionExpression, NodeResultJS) \
+ \
+ /* Block terminals. */\
+ macro(Jump, NodeMustGenerate) \
+ macro(Branch, NodeMustGenerate) \
+ macro(Return, NodeMustGenerate) \
+ macro(Throw, NodeMustGenerate) \
+ macro(ThrowReferenceError, NodeMustGenerate) \
+ \
+ /* This is a pseudo-terminal. It means that execution should fall out of DFG at */\
+ /* this point, but execution does continue in the basic block - just in a */\
+ /* different compiler. */\
+ macro(ForceOSRExit, NodeMustGenerate)
+
+// This enum generates a monotonically increasing id for all Node types,
+// and is used by the subsequent enum to fill out the id (as accessed via the NodeIdMask).
+enum NodeType {
+#define DFG_OP_ENUM(opcode, flags) opcode,
+ FOR_EACH_DFG_OP(DFG_OP_ENUM)
+#undef DFG_OP_ENUM
+ LastNodeType
+};
+
+// Specifies the default flags for each node.
+inline NodeFlags defaultFlags(NodeType op)
+{
+ switch (op) {
+#define DFG_OP_ENUM(opcode, flags) case opcode: return flags;
+ FOR_EACH_DFG_OP(DFG_OP_ENUM)
+#undef DFG_OP_ENUM
+ default:
+ ASSERT_NOT_REACHED();
+ return 0;
+ }
+}
+
+} } // namespace JSC::DFG
+
+#endif // ENABLE(DFG_JIT)
+
+#endif // DFGNodeType_h
+
diff --git a/Source/JavaScriptCore/dfg/DFGOSREntry.cpp b/Source/JavaScriptCore/dfg/DFGOSREntry.cpp
index 65f4cfcdd..21c76c6fe 100644
--- a/Source/JavaScriptCore/dfg/DFGOSREntry.cpp
+++ b/Source/JavaScriptCore/dfg/DFGOSREntry.cpp
@@ -141,13 +141,11 @@ void* prepareOSREntry(ExecState* exec, CodeBlock* codeBlock, unsigned bytecodeIn
dataLog(" OSR should succeed.\n");
#endif
-#if USE(JSVALUE64)
// 3) Perform data format conversions.
for (size_t local = 0; local < entry->m_expectedValues.numberOfLocals(); ++local) {
if (entry->m_localsForcedDouble.get(local))
*bitwise_cast<double*>(exec->registers() + local) = exec->registers()[local].jsValue().asNumber();
}
-#endif
// 4) Fix the call frame.
diff --git a/Source/JavaScriptCore/dfg/DFGOSREntry.h b/Source/JavaScriptCore/dfg/DFGOSREntry.h
index e38a6ceb9..a5c264cd6 100644
--- a/Source/JavaScriptCore/dfg/DFGOSREntry.h
+++ b/Source/JavaScriptCore/dfg/DFGOSREntry.h
@@ -27,7 +27,7 @@
#define DFGOSREntry_h
#include "DFGAbstractValue.h"
-#include "DFGOperands.h"
+#include "Operands.h"
#include <wtf/BitVector.h>
namespace JSC {
diff --git a/Source/JavaScriptCore/dfg/DFGOSRExit.cpp b/Source/JavaScriptCore/dfg/DFGOSRExit.cpp
index 95e4d8bf2..844be2a7c 100644
--- a/Source/JavaScriptCore/dfg/DFGOSRExit.cpp
+++ b/Source/JavaScriptCore/dfg/DFGOSRExit.cpp
@@ -33,17 +33,28 @@
namespace JSC { namespace DFG {
+static unsigned computeNumVariablesForCodeOrigin(
+ CodeBlock* codeBlock, const CodeOrigin& codeOrigin)
+{
+ if (!codeOrigin.inlineCallFrame)
+ return codeBlock->m_numCalleeRegisters;
+ return
+ codeOrigin.inlineCallFrame->stackOffset +
+ baselineCodeBlockForInlineCallFrame(codeOrigin.inlineCallFrame)->m_numCalleeRegisters;
+}
+
OSRExit::OSRExit(ExitKind kind, JSValueSource jsValueSource, MethodOfGettingAValueProfile valueProfile, MacroAssembler::Jump check, SpeculativeJIT* jit, unsigned recoveryIndex)
: m_jsValueSource(jsValueSource)
, m_valueProfile(valueProfile)
, m_check(check)
, m_nodeIndex(jit->m_compileIndex)
, m_codeOrigin(jit->m_codeOriginForOSR)
+ , m_codeOriginForExitProfile(m_codeOrigin)
, m_recoveryIndex(recoveryIndex)
, m_kind(kind)
, m_count(0)
, m_arguments(jit->m_arguments.size())
- , m_variables(jit->m_variables.size())
+ , m_variables(computeNumVariablesForCodeOrigin(jit->m_jit.graph().m_profiledBlock, jit->m_codeOriginForOSR))
, m_lastSetOperand(jit->m_lastSetOperand)
{
ASSERT(m_codeOrigin.isSet());
@@ -67,7 +78,7 @@ bool OSRExit::considerAddingAsFrequentExitSiteSlow(CodeBlock* dfgCodeBlock, Code
if (static_cast<double>(m_count) / dfgCodeBlock->speculativeFailCounter() <= Options::osrExitProminenceForFrequentExitSite)
return false;
- return baselineCodeBlockForOriginAndBaselineCodeBlock(m_codeOrigin, profiledCodeBlock)->addFrequentExitSite(FrequentExitSite(m_codeOrigin.bytecodeIndex, m_kind));
+ return baselineCodeBlockForOriginAndBaselineCodeBlock(m_codeOriginForExitProfile, profiledCodeBlock)->addFrequentExitSite(FrequentExitSite(m_codeOriginForExitProfile.bytecodeIndex, m_kind));
}
} } // namespace JSC::DFG
diff --git a/Source/JavaScriptCore/dfg/DFGOSRExit.h b/Source/JavaScriptCore/dfg/DFGOSRExit.h
index c28f7cbef..841fdddb3 100644
--- a/Source/JavaScriptCore/dfg/DFGOSRExit.h
+++ b/Source/JavaScriptCore/dfg/DFGOSRExit.h
@@ -35,9 +35,9 @@
#include "DFGCorrectableJumpPoint.h"
#include "DFGExitProfile.h"
#include "DFGGPRInfo.h"
-#include "DFGOperands.h"
#include "MacroAssembler.h"
#include "MethodOfGettingAValueProfile.h"
+#include "Operands.h"
#include "ValueProfile.h"
#include "ValueRecovery.h"
#include <wtf/Vector.h>
@@ -93,6 +93,7 @@ struct OSRExit {
CorrectableJumpPoint m_check;
NodeIndex m_nodeIndex;
CodeOrigin m_codeOrigin;
+ CodeOrigin m_codeOriginForExitProfile;
unsigned m_recoveryIndex;
diff --git a/Source/JavaScriptCore/dfg/DFGOSRExitCompiler.cpp b/Source/JavaScriptCore/dfg/DFGOSRExitCompiler.cpp
index a195ee3ba..a63f671bc 100644
--- a/Source/JavaScriptCore/dfg/DFGOSRExitCompiler.cpp
+++ b/Source/JavaScriptCore/dfg/DFGOSRExitCompiler.cpp
@@ -74,7 +74,8 @@ void compileOSRExit(ExecState* exec)
{
AssemblyHelpers jit(globalData, codeBlock);
OSRExitCompiler exitCompiler(jit);
-
+
+ jit.jitAssertHasValidCallFrame();
exitCompiler.compileExit(exit, recovery);
LinkBuffer patchBuffer(*globalData, &jit, codeBlock);
@@ -95,6 +96,64 @@ void compileOSRExit(ExecState* exec)
} // extern "C"
+void OSRExitCompiler::handleExitCounts(const OSRExit& exit)
+{
+ m_jit.add32(AssemblyHelpers::TrustedImm32(1), AssemblyHelpers::AbsoluteAddress(&exit.m_count));
+
+ m_jit.move(AssemblyHelpers::TrustedImmPtr(m_jit.codeBlock()), GPRInfo::regT0);
+
+ AssemblyHelpers::JumpList tooFewFails;
+
+ if (exit.m_kind == InadequateCoverage) {
+ // Proceed based on the assumption that we can profitably optimize this code once
+ // it has executed enough times.
+
+ m_jit.load32(AssemblyHelpers::Address(GPRInfo::regT0, CodeBlock::offsetOfForcedOSRExitCounter()), GPRInfo::regT2);
+ m_jit.load32(AssemblyHelpers::Address(GPRInfo::regT0, CodeBlock::offsetOfSpeculativeSuccessCounter()), GPRInfo::regT1);
+ m_jit.add32(AssemblyHelpers::TrustedImm32(1), GPRInfo::regT2);
+ m_jit.add32(AssemblyHelpers::TrustedImm32(-1), GPRInfo::regT1);
+ m_jit.store32(GPRInfo::regT2, AssemblyHelpers::Address(GPRInfo::regT0, CodeBlock::offsetOfForcedOSRExitCounter()));
+ m_jit.store32(GPRInfo::regT1, AssemblyHelpers::Address(GPRInfo::regT0, CodeBlock::offsetOfSpeculativeSuccessCounter()));
+
+ tooFewFails.append(m_jit.branch32(AssemblyHelpers::BelowOrEqual, GPRInfo::regT2, AssemblyHelpers::TrustedImm32(Options::forcedOSRExitCountForReoptimization)));
+ } else {
+ // Proceed based on the assumption that we can handle these exits so long as they
+ // don't get too frequent.
+
+ m_jit.load32(AssemblyHelpers::Address(GPRInfo::regT0, CodeBlock::offsetOfSpeculativeFailCounter()), GPRInfo::regT2);
+ m_jit.load32(AssemblyHelpers::Address(GPRInfo::regT0, CodeBlock::offsetOfSpeculativeSuccessCounter()), GPRInfo::regT1);
+ m_jit.add32(AssemblyHelpers::TrustedImm32(1), GPRInfo::regT2);
+ m_jit.add32(AssemblyHelpers::TrustedImm32(-1), GPRInfo::regT1);
+ m_jit.store32(GPRInfo::regT2, AssemblyHelpers::Address(GPRInfo::regT0, CodeBlock::offsetOfSpeculativeFailCounter()));
+ m_jit.store32(GPRInfo::regT1, AssemblyHelpers::Address(GPRInfo::regT0, CodeBlock::offsetOfSpeculativeSuccessCounter()));
+
+ m_jit.move(AssemblyHelpers::TrustedImmPtr(m_jit.baselineCodeBlock()), GPRInfo::regT0);
+
+ tooFewFails.append(m_jit.branch32(AssemblyHelpers::BelowOrEqual, GPRInfo::regT2, AssemblyHelpers::TrustedImm32(m_jit.codeBlock()->largeFailCountThreshold())));
+ m_jit.mul32(AssemblyHelpers::TrustedImm32(Options::desiredSpeculativeSuccessFailRatio), GPRInfo::regT2, GPRInfo::regT2);
+
+ tooFewFails.append(m_jit.branch32(AssemblyHelpers::BelowOrEqual, GPRInfo::regT2, GPRInfo::regT1));
+ }
+
+ // Reoptimize as soon as possible.
+ m_jit.store32(AssemblyHelpers::TrustedImm32(0), AssemblyHelpers::Address(GPRInfo::regT0, CodeBlock::offsetOfJITExecuteCounter()));
+ m_jit.store32(AssemblyHelpers::TrustedImm32(0), AssemblyHelpers::Address(GPRInfo::regT0, CodeBlock::offsetOfJITExecutionActiveThreshold()));
+ AssemblyHelpers::Jump doneAdjusting = m_jit.jump();
+
+ tooFewFails.link(&m_jit);
+
+ // Adjust the execution counter such that the target is to only optimize after a while.
+ int32_t targetValue =
+ ExecutionCounter::applyMemoryUsageHeuristicsAndConvertToInt(
+ m_jit.baselineCodeBlock()->counterValueForOptimizeAfterLongWarmUp(),
+ m_jit.baselineCodeBlock());
+ m_jit.store32(AssemblyHelpers::TrustedImm32(-targetValue), AssemblyHelpers::Address(GPRInfo::regT0, CodeBlock::offsetOfJITExecuteCounter()));
+ m_jit.store32(AssemblyHelpers::TrustedImm32(targetValue), AssemblyHelpers::Address(GPRInfo::regT0, CodeBlock::offsetOfJITExecutionActiveThreshold()));
+ m_jit.store32(AssemblyHelpers::TrustedImm32(ExecutionCounter::formattedTotalCount(targetValue)), AssemblyHelpers::Address(GPRInfo::regT0, CodeBlock::offsetOfJITExecutionTotalCount()));
+
+ doneAdjusting.link(&m_jit);
+}
+
} } // namespace JSC::DFG
#endif // ENABLE(DFG_JIT)
diff --git a/Source/JavaScriptCore/dfg/DFGOSRExitCompiler.h b/Source/JavaScriptCore/dfg/DFGOSRExitCompiler.h
index e08362f22..523644982 100644
--- a/Source/JavaScriptCore/dfg/DFGOSRExitCompiler.h
+++ b/Source/JavaScriptCore/dfg/DFGOSRExitCompiler.h
@@ -70,6 +70,8 @@ private:
return result;
}
+ void handleExitCounts(const OSRExit&);
+
AssemblyHelpers& m_jit;
Vector<unsigned> m_poisonScratchIndices;
};
diff --git a/Source/JavaScriptCore/dfg/DFGOSRExitCompiler32_64.cpp b/Source/JavaScriptCore/dfg/DFGOSRExitCompiler32_64.cpp
index bd45020d1..3c7f27579 100644
--- a/Source/JavaScriptCore/dfg/DFGOSRExitCompiler32_64.cpp
+++ b/Source/JavaScriptCore/dfg/DFGOSRExitCompiler32_64.cpp
@@ -562,42 +562,7 @@ void OSRExitCompiler::compileExit(const OSRExit& exit, SpeculationRecovery* reco
// counter to 0; otherwise we set the counter to
// counterValueForOptimizeAfterWarmUp().
- m_jit.add32(AssemblyHelpers::TrustedImm32(1), AssemblyHelpers::AbsoluteAddress(&exit.m_count));
-
- m_jit.move(AssemblyHelpers::TrustedImmPtr(m_jit.codeBlock()), GPRInfo::regT0);
-
- m_jit.load32(AssemblyHelpers::Address(GPRInfo::regT0, CodeBlock::offsetOfSpeculativeFailCounter()), GPRInfo::regT2);
- m_jit.load32(AssemblyHelpers::Address(GPRInfo::regT0, CodeBlock::offsetOfSpeculativeSuccessCounter()), GPRInfo::regT1);
- m_jit.add32(AssemblyHelpers::TrustedImm32(1), GPRInfo::regT2);
- m_jit.add32(AssemblyHelpers::TrustedImm32(-1), GPRInfo::regT1);
- m_jit.store32(GPRInfo::regT2, AssemblyHelpers::Address(GPRInfo::regT0, CodeBlock::offsetOfSpeculativeFailCounter()));
- m_jit.store32(GPRInfo::regT1, AssemblyHelpers::Address(GPRInfo::regT0, CodeBlock::offsetOfSpeculativeSuccessCounter()));
-
- m_jit.move(AssemblyHelpers::TrustedImmPtr(m_jit.baselineCodeBlock()), GPRInfo::regT0);
-
- AssemblyHelpers::Jump fewFails = m_jit.branch32(AssemblyHelpers::BelowOrEqual, GPRInfo::regT2, AssemblyHelpers::TrustedImm32(m_jit.codeBlock()->largeFailCountThreshold()));
- m_jit.mul32(AssemblyHelpers::TrustedImm32(Options::desiredSpeculativeSuccessFailRatio), GPRInfo::regT2, GPRInfo::regT2);
-
- AssemblyHelpers::Jump lowFailRate = m_jit.branch32(AssemblyHelpers::BelowOrEqual, GPRInfo::regT2, GPRInfo::regT1);
-
- // Reoptimize as soon as possible.
- m_jit.store32(AssemblyHelpers::TrustedImm32(0), AssemblyHelpers::Address(GPRInfo::regT0, CodeBlock::offsetOfJITExecuteCounter()));
- m_jit.store32(AssemblyHelpers::TrustedImm32(0), AssemblyHelpers::Address(GPRInfo::regT0, CodeBlock::offsetOfJITExecutionActiveThreshold()));
- AssemblyHelpers::Jump doneAdjusting = m_jit.jump();
-
- fewFails.link(&m_jit);
- lowFailRate.link(&m_jit);
-
- // Adjust the execution counter such that the target is to only optimize after a while.
- int32_t targetValue =
- ExecutionCounter::applyMemoryUsageHeuristicsAndConvertToInt(
- m_jit.baselineCodeBlock()->counterValueForOptimizeAfterLongWarmUp(),
- m_jit.baselineCodeBlock());
- m_jit.store32(AssemblyHelpers::TrustedImm32(-targetValue), AssemblyHelpers::Address(GPRInfo::regT0, CodeBlock::offsetOfJITExecuteCounter()));
- m_jit.store32(AssemblyHelpers::TrustedImm32(targetValue), AssemblyHelpers::Address(GPRInfo::regT0, CodeBlock::offsetOfJITExecutionActiveThreshold()));
- m_jit.store32(AssemblyHelpers::TrustedImm32(ExecutionCounter::formattedTotalCount(targetValue)), AssemblyHelpers::Address(GPRInfo::regT0, CodeBlock::offsetOfJITExecutionTotalCount()));
-
- doneAdjusting.link(&m_jit);
+ handleExitCounts(exit);
// 12) Load the result of the last bytecode operation into regT0.
diff --git a/Source/JavaScriptCore/dfg/DFGOSRExitCompiler64.cpp b/Source/JavaScriptCore/dfg/DFGOSRExitCompiler64.cpp
index 91a515c48..86d47b90e 100644
--- a/Source/JavaScriptCore/dfg/DFGOSRExitCompiler64.cpp
+++ b/Source/JavaScriptCore/dfg/DFGOSRExitCompiler64.cpp
@@ -541,42 +541,7 @@ void OSRExitCompiler::compileExit(const OSRExit& exit, SpeculationRecovery* reco
// counter to 0; otherwise we set the counter to
// counterValueForOptimizeAfterWarmUp().
- m_jit.add32(AssemblyHelpers::TrustedImm32(1), AssemblyHelpers::AbsoluteAddress(&exit.m_count));
-
- m_jit.move(AssemblyHelpers::TrustedImmPtr(m_jit.codeBlock()), GPRInfo::regT0);
-
- m_jit.load32(AssemblyHelpers::Address(GPRInfo::regT0, CodeBlock::offsetOfSpeculativeFailCounter()), GPRInfo::regT2);
- m_jit.load32(AssemblyHelpers::Address(GPRInfo::regT0, CodeBlock::offsetOfSpeculativeSuccessCounter()), GPRInfo::regT1);
- m_jit.add32(AssemblyHelpers::TrustedImm32(1), GPRInfo::regT2);
- m_jit.add32(AssemblyHelpers::TrustedImm32(-1), GPRInfo::regT1);
- m_jit.store32(GPRInfo::regT2, AssemblyHelpers::Address(GPRInfo::regT0, CodeBlock::offsetOfSpeculativeFailCounter()));
- m_jit.store32(GPRInfo::regT1, AssemblyHelpers::Address(GPRInfo::regT0, CodeBlock::offsetOfSpeculativeSuccessCounter()));
-
- m_jit.move(AssemblyHelpers::TrustedImmPtr(m_jit.baselineCodeBlock()), GPRInfo::regT0);
-
- AssemblyHelpers::Jump fewFails = m_jit.branch32(AssemblyHelpers::BelowOrEqual, GPRInfo::regT2, AssemblyHelpers::TrustedImm32(m_jit.codeBlock()->largeFailCountThreshold()));
- m_jit.mul32(AssemblyHelpers::TrustedImm32(Options::desiredSpeculativeSuccessFailRatio), GPRInfo::regT2, GPRInfo::regT2);
-
- AssemblyHelpers::Jump lowFailRate = m_jit.branch32(AssemblyHelpers::BelowOrEqual, GPRInfo::regT2, GPRInfo::regT1);
-
- // Reoptimize as soon as possible.
- m_jit.store32(AssemblyHelpers::TrustedImm32(0), AssemblyHelpers::Address(GPRInfo::regT0, CodeBlock::offsetOfJITExecuteCounter()));
- m_jit.store32(AssemblyHelpers::TrustedImm32(0), AssemblyHelpers::Address(GPRInfo::regT0, CodeBlock::offsetOfJITExecutionActiveThreshold()));
- AssemblyHelpers::Jump doneAdjusting = m_jit.jump();
-
- fewFails.link(&m_jit);
- lowFailRate.link(&m_jit);
-
- // Adjust the execution counter such that the target is to only optimize after a while.
- int32_t targetValue =
- ExecutionCounter::applyMemoryUsageHeuristicsAndConvertToInt(
- m_jit.baselineCodeBlock()->counterValueForOptimizeAfterLongWarmUp(),
- m_jit.baselineCodeBlock());
- m_jit.store32(AssemblyHelpers::TrustedImm32(-targetValue), AssemblyHelpers::Address(GPRInfo::regT0, CodeBlock::offsetOfJITExecuteCounter()));
- m_jit.store32(AssemblyHelpers::TrustedImm32(targetValue), AssemblyHelpers::Address(GPRInfo::regT0, CodeBlock::offsetOfJITExecutionActiveThreshold()));
- m_jit.store32(AssemblyHelpers::TrustedImm32(ExecutionCounter::formattedTotalCount(targetValue)), AssemblyHelpers::Address(GPRInfo::regT0, CodeBlock::offsetOfJITExecutionTotalCount()));
-
- doneAdjusting.link(&m_jit);
+ handleExitCounts(exit);
// 14) Load the result of the last bytecode operation into regT0.
diff --git a/Source/JavaScriptCore/dfg/DFGOperands.h b/Source/JavaScriptCore/dfg/DFGOperands.h
deleted file mode 100644
index 9ce43119c..000000000
--- a/Source/JavaScriptCore/dfg/DFGOperands.h
+++ /dev/null
@@ -1,165 +0,0 @@
-/*
- * Copyright (C) 2011 Apple Inc. All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- * 1. Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * 2. Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in the
- * documentation and/or other materials provided with the distribution.
- *
- * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
- * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
- * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
- * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
- * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
- * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
- * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
- * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- */
-
-#ifndef DFGOperands_h
-#define DFGOperands_h
-
-#include <wtf/Platform.h>
-
-#if ENABLE(DFG_JIT)
-
-#include "CallFrame.h"
-#include <wtf/Vector.h>
-
-namespace JSC { namespace DFG {
-
-// argument 0 is 'this'.
-inline bool operandIsArgument(int operand) { return operand < 0; }
-inline int operandToArgument(int operand) { return -operand + CallFrame::thisArgumentOffset(); }
-inline int argumentToOperand(int argument) { return -argument + CallFrame::thisArgumentOffset(); }
-
-template<typename T> struct OperandValueTraits;
-
-template<typename T>
-struct OperandValueTraits {
- static T defaultValue() { return T(); }
- static void dump(const T& value, FILE* out) { value.dump(out); }
-};
-
-template<typename T, typename Traits = OperandValueTraits<T> >
-class Operands {
-public:
- Operands() { }
-
- explicit Operands(size_t numArguments, size_t numLocals)
- {
- m_arguments.fill(Traits::defaultValue(), numArguments);
- m_locals.fill(Traits::defaultValue(), numLocals);
- }
-
- size_t numberOfArguments() const { return m_arguments.size(); }
- size_t numberOfLocals() const { return m_locals.size(); }
-
- T& argument(size_t idx) { return m_arguments[idx]; }
- const T& argument(size_t idx) const { return m_arguments[idx]; }
-
- T& local(size_t idx) { return m_locals[idx]; }
- const T& local(size_t idx) const { return m_locals[idx]; }
-
- void ensureLocals(size_t size)
- {
- if (size <= m_locals.size())
- return;
-
- size_t oldSize = m_locals.size();
- m_locals.resize(size);
- for (size_t i = oldSize; i < m_locals.size(); ++i)
- m_locals[i] = Traits::defaultValue();
- }
-
- void setLocal(size_t idx, const T& value)
- {
- ensureLocals(idx + 1);
-
- m_locals[idx] = value;
- }
-
- T getLocal(size_t idx)
- {
- if (idx >= m_locals.size())
- return Traits::defaultValue();
- return m_locals[idx];
- }
-
- void setArgumentFirstTime(size_t idx, const T& value)
- {
- ASSERT(m_arguments[idx] == Traits::defaultValue());
- argument(idx) = value;
- }
-
- void setLocalFirstTime(size_t idx, const T& value)
- {
- ASSERT(idx >= m_locals.size() || m_locals[idx] == Traits::defaultValue());
- setLocal(idx, value);
- }
-
- T& operand(int operand)
- {
- if (operandIsArgument(operand)) {
- int argument = operandToArgument(operand);
- return m_arguments[argument];
- }
-
- return m_locals[operand];
- }
-
- const T& operand(int operand) const { return const_cast<const T&>(const_cast<Operands*>(this)->operand(operand)); }
-
- void setOperand(int operand, const T& value)
- {
- if (operandIsArgument(operand)) {
- int argument = operandToArgument(operand);
- m_arguments[argument] = value;
- return;
- }
-
- setLocal(operand, value);
- }
-
- void clear()
- {
- for (size_t i = 0; i < m_arguments.size(); ++i)
- m_arguments[i] = Traits::defaultValue();
- for (size_t i = 0; i < m_locals.size(); ++i)
- m_locals[i] = Traits::defaultValue();
- }
-
-private:
- Vector<T, 8> m_arguments;
- Vector<T, 16> m_locals;
-};
-
-template<typename T, typename Traits>
-void dumpOperands(Operands<T, Traits>& operands, FILE* out)
-{
- for (size_t argument = 0; argument < operands.numberOfArguments(); ++argument) {
- if (argument)
- fprintf(out, " ");
- Traits::dump(operands.argument(argument), out);
- }
- fprintf(out, " : ");
- for (size_t local = 0; local < operands.numberOfLocals(); ++local) {
- if (local)
- fprintf(out, " ");
- Traits::dump(operands.local(local), out);
- }
-}
-
-} } // namespace JSC::DFG
-
-#endif // ENABLE(DFG_JIT)
-
-#endif // DFGOperands_h
-
diff --git a/Source/JavaScriptCore/dfg/DFGOperations.cpp b/Source/JavaScriptCore/dfg/DFGOperations.cpp
index 304c54d95..0e6e2f972 100644
--- a/Source/JavaScriptCore/dfg/DFGOperations.cpp
+++ b/Source/JavaScriptCore/dfg/DFGOperations.cpp
@@ -34,7 +34,6 @@
#include <wtf/InlineASM.h>
#include "Interpreter.h"
#include "JSActivation.h"
-#include "JSByteArray.h"
#include "JSGlobalData.h"
#include "JSStaticScopeObject.h"
#include "Operations.h"
@@ -60,6 +59,7 @@
#define FUNCTION_WRAPPER_WITH_RETURN_ADDRESS(function, offset) \
asm( \
+ ".text" "\n" \
".globl " SYMBOL_STRING(function) "\n" \
HIDE_SYMBOL(function) "\n" \
SYMBOL_STRING(function) ":" "\n" \
@@ -83,7 +83,7 @@
".thumb" "\n" \
".thumb_func " THUMB_FUNC_PARAM(function) "\n" \
SYMBOL_STRING(function) ":" "\n" \
- "cpy a2, lr" "\n" \
+ "mov a2, lr" "\n" \
"b " SYMBOL_STRING_RELOCATION(function) "WithReturnAddress" "\n" \
);
@@ -96,7 +96,7 @@
".thumb" "\n" \
".thumb_func " THUMB_FUNC_PARAM(function) "\n" \
SYMBOL_STRING(function) ":" "\n" \
- "cpy a4, lr" "\n" \
+ "mov a4, lr" "\n" \
"b " SYMBOL_STRING_RELOCATION(function) "WithReturnAddress" "\n" \
);
@@ -149,12 +149,13 @@ namespace JSC { namespace DFG {
template<bool strict>
static inline void putByVal(ExecState* exec, JSValue baseValue, uint32_t index, JSValue value)
{
- JSGlobalData* globalData = &exec->globalData();
-
+ JSGlobalData& globalData = exec->globalData();
+ NativeCallFrameTracer tracer(&globalData, exec);
+
if (isJSArray(baseValue)) {
JSArray* array = asArray(baseValue);
if (array->canSetIndex(index)) {
- array->setIndex(*globalData, index, value);
+ array->setIndex(globalData, index, value);
return;
}
@@ -162,20 +163,6 @@ static inline void putByVal(ExecState* exec, JSValue baseValue, uint32_t index,
return;
}
- if (isJSByteArray(baseValue) && asByteArray(baseValue)->canAccessIndex(index)) {
- JSByteArray* byteArray = asByteArray(baseValue);
- // FIXME: the JITstub used to relink this to an optimized form!
- if (value.isInt32()) {
- byteArray->setIndex(index, value.asInt32());
- return;
- }
-
- if (value.isNumber()) {
- byteArray->setIndex(index, value.asNumber());
- return;
- }
- }
-
baseValue.putByIndex(exec, index, value, strict);
}
@@ -230,7 +217,8 @@ inline JSCell* createThis(ExecState* exec, JSCell* prototype, JSFunction* constr
#endif
JSGlobalData& globalData = exec->globalData();
-
+ NativeCallFrameTracer tracer(&globalData, exec);
+
Structure* structure;
if (prototype->isObject())
structure = asObject(prototype)->inheritorID(globalData);
@@ -245,7 +233,7 @@ JSCell* DFG_OPERATION operationCreateThis(ExecState* exec, JSCell* prototype)
JSGlobalData* globalData = &exec->globalData();
NativeCallFrameTracer tracer(globalData, exec);
- return createThis(exec, prototype, asFunction(exec->callee()));
+ return createThis(exec, prototype, jsCast<JSFunction*>(exec->callee()));
}
JSCell* DFG_OPERATION operationCreateThisInlined(ExecState* exec, JSCell* prototype, JSCell* constructor)
@@ -253,7 +241,7 @@ JSCell* DFG_OPERATION operationCreateThisInlined(ExecState* exec, JSCell* protot
JSGlobalData* globalData = &exec->globalData();
NativeCallFrameTracer tracer(globalData, exec);
- return createThis(exec, prototype, static_cast<JSFunction*>(constructor));
+ return createThis(exec, prototype, jsCast<JSFunction*>(constructor));
}
JSCell* DFG_OPERATION operationNewObject(ExecState* exec)
@@ -293,6 +281,9 @@ EncodedJSValue DFG_OPERATION operationValueAddNotNumber(ExecState* exec, Encoded
static inline EncodedJSValue getByVal(ExecState* exec, JSCell* base, uint32_t index)
{
+ JSGlobalData& globalData = exec->globalData();
+ NativeCallFrameTracer tracer(&globalData, exec);
+
// FIXME: the JIT used to handle these in compiled code!
if (isJSArray(base) && asArray(base)->canGetIndex(index))
return JSValue::encode(asArray(base)->getIndex(index));
@@ -301,10 +292,6 @@ static inline EncodedJSValue getByVal(ExecState* exec, JSCell* base, uint32_t in
if (isJSString(base) && asString(base)->canGetIndex(index))
return JSValue::encode(asString(base)->getIndex(exec, index));
- // FIXME: the JITstub used to relink this to an optimized form!
- if (isJSByteArray(base) && asByteArray(base)->canAccessIndex(index))
- return JSValue::encode(asByteArray(base)->getIndex(exec, index));
-
return JSValue::encode(JSValue(base).get(exec, index));
}
@@ -502,6 +489,34 @@ EncodedJSValue DFG_OPERATION operationArrayPush(ExecState* exec, EncodedJSValue
array->push(exec, JSValue::decode(encodedValue));
return JSValue::encode(jsNumber(array->length()));
}
+
+EncodedJSValue DFG_OPERATION operationRegExpExec(ExecState* exec, JSCell* base, JSCell* argument)
+{
+ JSGlobalData& globalData = exec->globalData();
+ NativeCallFrameTracer tracer(&globalData, exec);
+
+ if (!base->inherits(&RegExpObject::s_info))
+ return throwVMTypeError(exec);
+
+ ASSERT(argument->isString() || argument->isObject());
+ JSString* input = argument->isString() ? asString(argument) : asObject(argument)->toString(exec);
+ return JSValue::encode(asRegExpObject(base)->exec(exec, input));
+}
+
+size_t DFG_OPERATION operationRegExpTest(ExecState* exec, JSCell* base, JSCell* argument)
+{
+ JSGlobalData& globalData = exec->globalData();
+ NativeCallFrameTracer tracer(&globalData, exec);
+
+ if (!base->inherits(&RegExpObject::s_info)) {
+ throwTypeError(exec);
+ return false;
+ }
+
+ ASSERT(argument->isString() || argument->isObject());
+ JSString* input = argument->isString() ? asString(argument) : asObject(argument)->toString(exec);
+ return asRegExpObject(base)->test(exec, input);
+}
EncodedJSValue DFG_OPERATION operationArrayPop(ExecState* exec, JSArray* array)
{
@@ -770,6 +785,8 @@ static void* handleHostCall(ExecState* execCallee, JSValue callee, CodeSpecializ
ASSERT(callType != CallTypeJS);
if (callType == CallTypeHost) {
+ NativeCallFrameTracer tracer(globalData, execCallee);
+ execCallee->setCallee(asObject(callee));
globalData->hostCallReturnValue = JSValue::decode(callData.native.function(execCallee));
if (globalData->exception)
return 0;
@@ -790,6 +807,8 @@ static void* handleHostCall(ExecState* execCallee, JSValue callee, CodeSpecializ
ASSERT(constructType != ConstructTypeJS);
if (constructType == ConstructTypeHost) {
+ NativeCallFrameTracer tracer(globalData, execCallee);
+ execCallee->setCallee(asObject(callee));
globalData->hostCallReturnValue = JSValue::decode(constructData.native.function(execCallee));
if (globalData->exception)
return 0;
@@ -813,7 +832,7 @@ inline void* linkFor(ExecState* execCallee, ReturnAddressPtr returnAddress, Code
if (!calleeAsFunctionCell)
return handleHostCall(execCallee, calleeAsValue, kind);
- JSFunction* callee = asFunction(calleeAsFunctionCell);
+ JSFunction* callee = jsCast<JSFunction*>(calleeAsFunctionCell);
execCallee->setScopeChain(callee->scopeUnchecked());
ExecutableBase* executable = callee->executable();
@@ -865,7 +884,7 @@ inline void* virtualFor(ExecState* execCallee, CodeSpecializationKind kind)
if (UNLIKELY(!calleeAsFunctionCell))
return handleHostCall(execCallee, calleeAsValue, kind);
- JSFunction* function = asFunction(calleeAsFunctionCell);
+ JSFunction* function = jsCast<JSFunction*>(calleeAsFunctionCell);
execCallee->setScopeChain(function->scopeUnchecked());
ExecutableBase* executable = function->executable();
if (UNLIKELY(!executable->hasJITCodeFor(kind))) {
@@ -976,11 +995,15 @@ EncodedJSValue DFG_OPERATION operationNewArray(ExecState* exec, void* start, siz
EncodedJSValue DFG_OPERATION operationNewArrayBuffer(ExecState* exec, size_t start, size_t size)
{
+ JSGlobalData& globalData = exec->globalData();
+ NativeCallFrameTracer tracer(&globalData, exec);
return JSValue::encode(constructArray(exec, exec->codeBlock()->constantBuffer(start), size));
}
EncodedJSValue DFG_OPERATION operationNewRegexp(ExecState* exec, void* regexpPtr)
{
+ JSGlobalData& globalData = exec->globalData();
+ NativeCallFrameTracer tracer(&globalData, exec);
RegExp* regexp = static_cast<RegExp*>(regexpPtr);
if (!regexp->isValid()) {
throwError(exec, createSyntaxError(exec, "Invalid flags supplied to RegExp constructor."));
@@ -993,6 +1016,7 @@ EncodedJSValue DFG_OPERATION operationNewRegexp(ExecState* exec, void* regexpPtr
JSCell* DFG_OPERATION operationCreateActivation(ExecState* exec)
{
JSGlobalData& globalData = exec->globalData();
+ NativeCallFrameTracer tracer(&globalData, exec);
JSActivation* activation = JSActivation::create(
globalData, exec, static_cast<FunctionExecutable*>(exec->codeBlock()->ownerExecutable()));
exec->setScopeChain(exec->scopeChain()->push(activation));
@@ -1003,12 +1027,16 @@ void DFG_OPERATION operationTearOffActivation(ExecState* exec, JSCell* activatio
{
ASSERT(activation);
ASSERT(activation->inherits(&JSActivation::s_info));
- static_cast<JSActivation*>(activation)->tearOff(exec->globalData());
+ JSGlobalData& globalData = exec->globalData();
+ NativeCallFrameTracer tracer(&globalData, exec);
+ jsCast<JSActivation*>(activation)->tearOff(exec->globalData());
}
JSCell* DFG_OPERATION operationNewFunction(ExecState* exec, JSCell* functionExecutable)
{
ASSERT(functionExecutable->inherits(&FunctionExecutable::s_info));
+ JSGlobalData& globalData = exec->globalData();
+ NativeCallFrameTracer tracer(&globalData, exec);
return static_cast<FunctionExecutable*>(functionExecutable)->make(exec, exec->scopeChain());
}
@@ -1027,6 +1055,21 @@ JSCell* DFG_OPERATION operationNewFunctionExpression(ExecState* exec, JSCell* fu
return function;
}
+size_t DFG_OPERATION operationIsObject(EncodedJSValue value)
+{
+ return jsIsObjectType(JSValue::decode(value));
+}
+
+size_t DFG_OPERATION operationIsFunction(EncodedJSValue value)
+{
+ return jsIsFunctionType(JSValue::decode(value));
+}
+
+double DFG_OPERATION operationFModOnInts(int32_t a, int32_t b)
+{
+ return fmod(a, b);
+}
+
DFGHandlerEncoded DFG_OPERATION lookupExceptionHandler(ExecState* exec, uint32_t callIndex)
{
JSGlobalData* globalData = &exec->globalData();
@@ -1096,7 +1139,17 @@ void DFG_OPERATION debugOperationPrintSpeculationFailure(ExecState* exec, void*
SpeculationFailureDebugInfo* debugInfo = static_cast<SpeculationFailureDebugInfo*>(debugInfoRaw);
CodeBlock* codeBlock = debugInfo->codeBlock;
CodeBlock* alternative = codeBlock->alternative();
- dataLog("Speculation failure in %p at @%u with executeCounter = %d, reoptimizationRetryCounter = %u, optimizationDelayCounter = %u, success/fail %u/%u\n", codeBlock, debugInfo->nodeIndex, alternative ? alternative->jitExecuteCounter() : 0, alternative ? alternative->reoptimizationRetryCounter() : 0, alternative ? alternative->optimizationDelayCounter() : 0, codeBlock->speculativeSuccessCounter(), codeBlock->speculativeFailCounter());
+ dataLog("Speculation failure in %p at @%u with executeCounter = %d, "
+ "reoptimizationRetryCounter = %u, optimizationDelayCounter = %u, "
+ "success/fail %u/(%u+%u)\n",
+ codeBlock,
+ debugInfo->nodeIndex,
+ alternative ? alternative->jitExecuteCounter() : 0,
+ alternative ? alternative->reoptimizationRetryCounter() : 0,
+ alternative ? alternative->optimizationDelayCounter() : 0,
+ codeBlock->speculativeSuccessCounter(),
+ codeBlock->speculativeFailCounter(),
+ codeBlock->forcedOSRExitCounter());
}
#endif
@@ -1120,6 +1173,7 @@ SYMBOL_STRING(getHostCallReturnValue) ":" "\n"
);
#elif CPU(X86)
asm (
+".text" "\n" \
".globl " SYMBOL_STRING(getHostCallReturnValue) "\n"
HIDE_SYMBOL(getHostCallReturnValue) "\n"
SYMBOL_STRING(getHostCallReturnValue) ":" "\n"
@@ -1137,7 +1191,7 @@ HIDE_SYMBOL(getHostCallReturnValue) "\n"
".thumb_func " THUMB_FUNC_PARAM(getHostCallReturnValue) "\n"
SYMBOL_STRING(getHostCallReturnValue) ":" "\n"
"ldr r5, [r5, #-40]" "\n"
- "cpy r0, r5" "\n"
+ "mov r0, r5" "\n"
"b " SYMBOL_STRING_RELOCATION(getHostCallReturnValueWithExecState) "\n"
);
#endif
diff --git a/Source/JavaScriptCore/dfg/DFGOperations.h b/Source/JavaScriptCore/dfg/DFGOperations.h
index 4ca58d621..52e99cb95 100644
--- a/Source/JavaScriptCore/dfg/DFGOperations.h
+++ b/Source/JavaScriptCore/dfg/DFGOperations.h
@@ -60,36 +60,40 @@ extern "C" {
I: Identifier*
G: GlobalResolveInfo*
*/
-typedef int32_t DFG_OPERATION (*Z_DFGOperation_D)(double);
-typedef JSCell* DFG_OPERATION (*C_DFGOperation_E)(ExecState*);
-typedef JSCell* DFG_OPERATION (*C_DFGOperation_EC)(ExecState*, JSCell*);
-typedef JSCell* DFG_OPERATION (*C_DFGOperation_ECC)(ExecState*, JSCell*, JSCell*);
typedef EncodedJSValue DFG_OPERATION (*J_DFGOperation_EA)(ExecState*, JSArray*);
-typedef EncodedJSValue DFG_OPERATION (*J_DFGOperation_EJA)(ExecState*, EncodedJSValue, JSArray*);
+typedef EncodedJSValue DFG_OPERATION (*J_DFGOperation_ECC)(ExecState*, JSCell*, JSCell*);
+typedef EncodedJSValue DFG_OPERATION (*J_DFGOperation_ECI)(ExecState*, JSCell*, Identifier*);
typedef EncodedJSValue DFG_OPERATION (*J_DFGOperation_ECJ)(ExecState*, JSCell*, EncodedJSValue);
-typedef EncodedJSValue DFG_OPERATION (*J_DFGOperation_EJJ)(ExecState*, EncodedJSValue, EncodedJSValue);
+typedef EncodedJSValue DFG_OPERATION (*J_DFGOperation_EGI)(ExecState*, GlobalResolveInfo*, Identifier*);
+typedef EncodedJSValue DFG_OPERATION (*J_DFGOperation_EI)(ExecState*, Identifier*);
typedef EncodedJSValue DFG_OPERATION (*J_DFGOperation_EJ)(ExecState*, EncodedJSValue);
-typedef EncodedJSValue DFG_OPERATION (*J_DFGOperation_EJP)(ExecState*, EncodedJSValue, void*);
-typedef EncodedJSValue DFG_OPERATION (*J_DFGOperation_ECI)(ExecState*, JSCell*, Identifier*);
+typedef EncodedJSValue DFG_OPERATION (*J_DFGOperation_EJA)(ExecState*, EncodedJSValue, JSArray*);
typedef EncodedJSValue DFG_OPERATION (*J_DFGOperation_EJI)(ExecState*, EncodedJSValue, Identifier*);
+typedef EncodedJSValue DFG_OPERATION (*J_DFGOperation_EJJ)(ExecState*, EncodedJSValue, EncodedJSValue);
+typedef EncodedJSValue DFG_OPERATION (*J_DFGOperation_EJP)(ExecState*, EncodedJSValue, void*);
typedef EncodedJSValue DFG_OPERATION (*J_DFGOperation_EP)(ExecState*, void*);
typedef EncodedJSValue DFG_OPERATION (*J_DFGOperation_EPP)(ExecState*, void*, void*);
-typedef EncodedJSValue DFG_OPERATION (*J_DFGOperation_EGI)(ExecState*, GlobalResolveInfo*, Identifier*);
typedef EncodedJSValue DFG_OPERATION (*J_DFGOperation_EPS)(ExecState*, void*, size_t);
typedef EncodedJSValue DFG_OPERATION (*J_DFGOperation_ESS)(ExecState*, size_t, size_t);
-typedef EncodedJSValue DFG_OPERATION (*J_DFGOperation_EI)(ExecState*, Identifier*);
+typedef JSCell* DFG_OPERATION (*C_DFGOperation_E)(ExecState*);
+typedef JSCell* DFG_OPERATION (*C_DFGOperation_EC)(ExecState*, JSCell*);
+typedef JSCell* DFG_OPERATION (*C_DFGOperation_ECC)(ExecState*, JSCell*, JSCell*);
+typedef double DFG_OPERATION (*D_DFGOperation_DD)(double, double);
+typedef double DFG_OPERATION (*D_DFGOperation_ZZ)(int32_t, int32_t);
+typedef double DFG_OPERATION (*D_DFGOperation_EJ)(ExecState*, EncodedJSValue);
+typedef int32_t DFG_OPERATION (*Z_DFGOperation_D)(double);
+typedef size_t DFG_OPERATION (*S_DFGOperation_ECC)(ExecState*, JSCell*, JSCell*);
typedef size_t DFG_OPERATION (*S_DFGOperation_EJ)(ExecState*, EncodedJSValue);
typedef size_t DFG_OPERATION (*S_DFGOperation_EJJ)(ExecState*, EncodedJSValue, EncodedJSValue);
-typedef void DFG_OPERATION (*V_DFGOperation_EJJJ)(ExecState*, EncodedJSValue, EncodedJSValue, EncodedJSValue);
+typedef size_t DFG_OPERATION (*S_DFGOperation_J)(EncodedJSValue);
+typedef void DFG_OPERATION (*V_DFGOperation_EAZJ)(ExecState*, JSArray*, int32_t, EncodedJSValue);
typedef void DFG_OPERATION (*V_DFGOperation_ECJJ)(ExecState*, JSCell*, EncodedJSValue, EncodedJSValue);
-typedef void DFG_OPERATION (*V_DFGOperation_EJPP)(ExecState*, EncodedJSValue, EncodedJSValue, void*);
typedef void DFG_OPERATION (*V_DFGOperation_EJCI)(ExecState*, EncodedJSValue, JSCell*, Identifier*);
+typedef void DFG_OPERATION (*V_DFGOperation_EJJJ)(ExecState*, EncodedJSValue, EncodedJSValue, EncodedJSValue);
+typedef void DFG_OPERATION (*V_DFGOperation_EJPP)(ExecState*, EncodedJSValue, EncodedJSValue, void*);
typedef void DFG_OPERATION (*V_DFGOperation_EPZJ)(ExecState*, void*, int32_t, EncodedJSValue);
-typedef void DFG_OPERATION (*V_DFGOperation_EAZJ)(ExecState*, JSArray*, int32_t, EncodedJSValue);
-typedef double DFG_OPERATION (*D_DFGOperation_DD)(double, double);
-typedef double DFG_OPERATION (*D_DFGOperation_EJ)(ExecState*, EncodedJSValue);
-typedef void* DFG_OPERATION (*P_DFGOperation_E)(ExecState*);
typedef void DFG_OPERATION (V_DFGOperation_EC)(ExecState*, JSCell*);
+typedef void* DFG_OPERATION (*P_DFGOperation_E)(ExecState*);
// These routines are provide callbacks out to C++ implementations of operations too complex to JIT.
JSCell* DFG_OPERATION operationNewObject(ExecState*);
@@ -123,6 +127,7 @@ void DFG_OPERATION operationPutByValBeyondArrayBoundsStrict(ExecState*, JSArray*
void DFG_OPERATION operationPutByValBeyondArrayBoundsNonStrict(ExecState*, JSArray*, int32_t index, EncodedJSValue encodedValue);
EncodedJSValue DFG_OPERATION operationArrayPush(ExecState*, EncodedJSValue encodedValue, JSArray*);
EncodedJSValue DFG_OPERATION operationArrayPop(ExecState*, JSArray*);
+EncodedJSValue DFG_OPERATION operationRegExpExec(ExecState*, JSCell*, JSCell*);
void DFG_OPERATION operationPutByIdStrict(ExecState*, EncodedJSValue encodedValue, JSCell* base, Identifier*);
void DFG_OPERATION operationPutByIdNonStrict(ExecState*, EncodedJSValue encodedValue, JSCell* base, Identifier*);
void DFG_OPERATION operationPutByIdDirectStrict(ExecState*, EncodedJSValue encodedValue, JSCell* base, Identifier*);
@@ -136,6 +141,7 @@ void DFG_OPERATION operationPutByIdNonStrictBuildList(ExecState*, EncodedJSValue
void DFG_OPERATION operationPutByIdDirectStrictBuildList(ExecState*, EncodedJSValue encodedValue, JSCell* base, Identifier*);
void DFG_OPERATION operationPutByIdDirectNonStrictBuildList(ExecState*, EncodedJSValue encodedValue, JSCell* base, Identifier*);
// These comparisons return a boolean within a size_t such that the value is zero extended to fill the register.
+size_t DFG_OPERATION operationRegExpTest(ExecState*, JSCell*, JSCell*);
size_t DFG_OPERATION operationCompareLess(ExecState*, EncodedJSValue encodedOp1, EncodedJSValue encodedOp2);
size_t DFG_OPERATION operationCompareLessEq(ExecState*, EncodedJSValue encodedOp1, EncodedJSValue encodedOp2);
size_t DFG_OPERATION operationCompareGreater(ExecState*, EncodedJSValue encodedOp1, EncodedJSValue encodedOp2);
@@ -151,24 +157,17 @@ JSCell* DFG_OPERATION operationCreateActivation(ExecState*);
void DFG_OPERATION operationTearOffActivation(ExecState*, JSCell*);
JSCell* DFG_OPERATION operationNewFunction(ExecState*, JSCell*);
JSCell* DFG_OPERATION operationNewFunctionExpression(ExecState*, JSCell*);
+double DFG_OPERATION operationFModOnInts(int32_t, int32_t);
+size_t DFG_OPERATION operationIsObject(EncodedJSValue);
+size_t DFG_OPERATION operationIsFunction(EncodedJSValue);
// This method is used to lookup an exception hander, keyed by faultLocation, which is
// the return location from one of the calls out to one of the helper operations above.
-struct DFGHandler {
- DFGHandler(ExecState* exec, void* handler)
- {
- u.s.exec = exec;
- u.s.handler = handler;
- }
-
-#if !CPU(X86_64)
- uint64_t encoded()
- {
- COMPILE_ASSERT(sizeof(Union) == sizeof(uint64_t), DFGHandler_Union_is_64bit);
- return u.encoded;
- }
-#endif
+// According to C++ rules, a type used for the return signature of function with C linkage (i.e.
+// 'extern "C"') needs to be POD; hence putting any constructors into it could cause either compiler
+// warnings, or worse, a change in the ABI used to return these types.
+struct DFGHandler {
union Union {
struct Struct {
ExecState* exec;
@@ -177,17 +176,27 @@ struct DFGHandler {
uint64_t encoded;
} u;
};
+
+inline DFGHandler createDFGHandler(ExecState* exec, void* handler)
+{
+ DFGHandler result;
+ result.u.s.exec = exec;
+ result.u.s.handler = handler;
+ return result;
+}
+
#if CPU(X86_64)
typedef DFGHandler DFGHandlerEncoded;
inline DFGHandlerEncoded dfgHandlerEncoded(ExecState* exec, void* handler)
{
- return DFGHandler(exec, handler);
+ return createDFGHandler(exec, handler);
}
#else
typedef uint64_t DFGHandlerEncoded;
inline DFGHandlerEncoded dfgHandlerEncoded(ExecState* exec, void* handler)
{
- return DFGHandler(exec, handler).encoded();
+ COMPILE_ASSERT(sizeof(DFGHandler::Union) == sizeof(uint64_t), DFGHandler_Union_is_64bit);
+ return createDFGHandler(exec, handler).u.encoded;
}
#endif
DFGHandlerEncoded DFG_OPERATION lookupExceptionHandler(ExecState*, uint32_t);
diff --git a/Source/JavaScriptCore/dfg/DFGPredictionPropagationPhase.cpp b/Source/JavaScriptCore/dfg/DFGPredictionPropagationPhase.cpp
index 98bdaac06..53174604a 100644
--- a/Source/JavaScriptCore/dfg/DFGPredictionPropagationPhase.cpp
+++ b/Source/JavaScriptCore/dfg/DFGPredictionPropagationPhase.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (C) 2011 Apple Inc. All rights reserved.
+ * Copyright (C) 2011, 2012 Apple Inc. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
@@ -45,8 +45,8 @@ public:
#if DFG_ENABLE(DEBUG_PROPAGATION_VERBOSE)
m_count = 0;
#endif
- // Two stage process: first propagate predictions, then propagate while doing double voting.
-
+ // 1) propagate predictions
+
do {
m_changed = false;
@@ -64,6 +64,8 @@ public:
propagateBackward();
} while (m_changed);
+ // 2) repropagate predictions while doing double voting.
+
do {
m_changed = false;
doRoundOfDoubleVoting();
@@ -75,8 +77,6 @@ public:
doRoundOfDoubleVoting();
propagateBackward();
} while (m_changed);
-
- fixup();
}
private:
@@ -100,15 +100,31 @@ private:
return m_graph[m_compileIndex].predict(prediction);
}
+ bool isNotNegZero(NodeIndex nodeIndex)
+ {
+ if (!m_graph.isNumberConstant(nodeIndex))
+ return false;
+ double value = m_graph.valueOfNumberConstant(nodeIndex);
+ return !value && 1.0 / value < 0.0;
+ }
+
+ bool isNotZero(NodeIndex nodeIndex)
+ {
+ if (!m_graph.isNumberConstant(nodeIndex))
+ return false;
+ return !!m_graph.valueOfNumberConstant(nodeIndex);
+ }
+
void propagate(Node& node)
{
if (!node.shouldGenerate())
return;
- NodeType op = static_cast<NodeType>(node.op);
+ NodeType op = node.op();
+ NodeFlags flags = node.flags() & NodeBackPropMask;
#if DFG_ENABLE(DEBUG_PROPAGATION_VERBOSE)
- dataLog(" %s @%u: ", Graph::opName(op), m_compileIndex);
+ dataLog(" %s @%u: %s ", Graph::opName(op), m_compileIndex, nodeFlagsAsString(flags));
#endif
bool changed = false;
@@ -121,14 +137,26 @@ private:
}
case GetLocal: {
- PredictedType prediction = node.variableAccessData()->prediction();
+ VariableAccessData* variableAccessData = node.variableAccessData();
+ PredictedType prediction = variableAccessData->prediction();
if (prediction)
changed |= mergePrediction(prediction);
+
+ changed |= variableAccessData->mergeFlags(flags);
break;
}
case SetLocal: {
- changed |= node.variableAccessData()->predict(m_graph[node.child1()].prediction());
+ VariableAccessData* variableAccessData = node.variableAccessData();
+ changed |= variableAccessData->predict(m_graph[node.child1()].prediction());
+ changed |= m_graph[node.child1()].mergeFlags(variableAccessData->flags());
+ break;
+ }
+
+ case Flush: {
+ // Make sure that the analysis knows that flushed locals escape.
+ VariableAccessData* variableAccessData = node.variableAccessData();
+ changed |= variableAccessData->mergeFlags(NodeUsedAsValue);
break;
}
@@ -137,21 +165,47 @@ private:
case BitXor:
case BitRShift:
case BitLShift:
- case BitURShift:
+ case BitURShift: {
+ changed |= setPrediction(PredictInt32);
+ flags |= NodeUsedAsInt;
+ flags &= ~(NodeUsedAsNumber | NodeNeedsNegZero);
+ changed |= m_graph[node.child1()].mergeFlags(flags);
+ changed |= m_graph[node.child2()].mergeFlags(flags);
+ break;
+ }
+
case ValueToInt32: {
changed |= setPrediction(PredictInt32);
+ flags |= NodeUsedAsInt;
+ flags &= ~(NodeUsedAsNumber | NodeNeedsNegZero);
+ changed |= m_graph[node.child1()].mergeFlags(flags);
break;
}
- case ArrayPop:
+ case ArrayPop: {
+ changed |= mergePrediction(node.getHeapPrediction());
+ changed |= mergeDefaultFlags(node);
+ break;
+ }
+
case ArrayPush: {
- if (node.getHeapPrediction())
- changed |= mergePrediction(node.getHeapPrediction());
+ changed |= mergePrediction(node.getHeapPrediction());
+ changed |= m_graph[node.child1()].mergeFlags(NodeUsedAsValue);
+ changed |= m_graph[node.child2()].mergeFlags(NodeUsedAsValue);
+ break;
+ }
+
+ case RegExpExec:
+ case RegExpTest: {
+ changed |= mergePrediction(node.getHeapPrediction());
+ changed |= mergeDefaultFlags(node);
break;
}
case StringCharCodeAt: {
changed |= mergePrediction(PredictInt32);
+ changed |= m_graph[node.child1()].mergeFlags(NodeUsedAsValue);
+ changed |= m_graph[node.child2()].mergeFlags(NodeUsedAsNumber | NodeUsedAsInt);
break;
}
@@ -160,19 +214,26 @@ private:
PredictedType right = m_graph[node.child2()].prediction();
if (left && right) {
- if (isInt32Prediction(mergePredictions(left, right)) && nodeCanSpeculateInteger(node.arithNodeFlags()))
+ if (isInt32Prediction(mergePredictions(left, right))
+ && nodeCanSpeculateInteger(node.arithNodeFlags()))
changed |= mergePrediction(PredictInt32);
else
changed |= mergePrediction(PredictDouble);
}
+
+ flags |= NodeUsedAsValue;
+ changed |= m_graph[node.child1()].mergeFlags(flags);
+ changed |= m_graph[node.child2()].mergeFlags(flags);
break;
}
case UInt32ToNumber: {
if (nodeCanSpeculateInteger(node.arithNodeFlags()))
- changed |= setPrediction(PredictInt32);
+ changed |= mergePrediction(PredictInt32);
else
- changed |= setPrediction(PredictNumber);
+ changed |= mergePrediction(PredictNumber);
+
+ changed |= m_graph[node.child1()].mergeFlags(flags);
break;
}
@@ -192,10 +253,34 @@ private:
} else
changed |= mergePrediction(PredictString | PredictInt32 | PredictDouble);
}
+
+ if (isNotNegZero(node.child1().index()) || isNotNegZero(node.child2().index()))
+ flags &= ~NodeNeedsNegZero;
+
+ changed |= m_graph[node.child1()].mergeFlags(flags);
+ changed |= m_graph[node.child2()].mergeFlags(flags);
+ break;
+ }
+
+ case ArithAdd: {
+ PredictedType left = m_graph[node.child1()].prediction();
+ PredictedType right = m_graph[node.child2()].prediction();
+
+ if (left && right) {
+ if (m_graph.addShouldSpeculateInteger(node))
+ changed |= mergePrediction(PredictInt32);
+ else
+ changed |= mergePrediction(PredictDouble);
+ }
+
+ if (isNotNegZero(node.child1().index()) || isNotNegZero(node.child2().index()))
+ flags &= ~NodeNeedsNegZero;
+
+ changed |= m_graph[node.child1()].mergeFlags(flags);
+ changed |= m_graph[node.child2()].mergeFlags(flags);
break;
}
- case ArithAdd:
case ArithSub: {
PredictedType left = m_graph[node.child1()].prediction();
PredictedType right = m_graph[node.child2()].prediction();
@@ -206,6 +291,12 @@ private:
else
changed |= mergePrediction(PredictDouble);
}
+
+ if (isNotZero(node.child1().index()) || isNotZero(node.child2().index()))
+ flags &= ~NodeNeedsNegZero;
+
+ changed |= m_graph[node.child1()].mergeFlags(flags);
+ changed |= m_graph[node.child2()].mergeFlags(flags);
break;
}
@@ -216,37 +307,68 @@ private:
else
changed |= mergePrediction(PredictDouble);
}
+
+ changed |= m_graph[node.child1()].mergeFlags(flags);
break;
- case ArithMul:
case ArithMin:
- case ArithMax:
+ case ArithMax: {
+ PredictedType left = m_graph[node.child1()].prediction();
+ PredictedType right = m_graph[node.child2()].prediction();
+
+ if (left && right) {
+ if (isInt32Prediction(mergePredictions(left, right))
+ && nodeCanSpeculateInteger(node.arithNodeFlags()))
+ changed |= mergePrediction(PredictInt32);
+ else
+ changed |= mergePrediction(PredictDouble);
+ }
+
+ flags |= NodeUsedAsNumber;
+ changed |= m_graph[node.child1()].mergeFlags(flags);
+ changed |= m_graph[node.child2()].mergeFlags(flags);
+ break;
+ }
+
+ case ArithMul:
case ArithDiv: {
PredictedType left = m_graph[node.child1()].prediction();
PredictedType right = m_graph[node.child2()].prediction();
if (left && right) {
- if (isInt32Prediction(mergePredictions(left, right)) && nodeCanSpeculateInteger(node.arithNodeFlags()))
+ if (isInt32Prediction(mergePredictions(left, right))
+ && nodeCanSpeculateInteger(node.arithNodeFlags()))
changed |= mergePrediction(PredictInt32);
else
changed |= mergePrediction(PredictDouble);
}
+
+ // As soon as a multiply happens, we can easily end up in the part
+ // of the double domain where the point at which you do truncation
+ // can change the outcome. So, ArithMul always checks for overflow
+ // no matter what, and always forces its inputs to check as well.
+
+ flags |= NodeUsedAsNumber | NodeNeedsNegZero;
+ changed |= m_graph[node.child1()].mergeFlags(flags);
+ changed |= m_graph[node.child2()].mergeFlags(flags);
break;
}
case ArithSqrt: {
changed |= setPrediction(PredictDouble);
+ changed |= m_graph[node.child1()].mergeFlags(flags | NodeUsedAsValue);
break;
}
case ArithAbs: {
PredictedType child = m_graph[node.child1()].prediction();
- if (child) {
- if (nodeCanSpeculateInteger(node.arithNodeFlags()))
- changed |= mergePrediction(child);
- else
- changed |= setPrediction(PredictDouble);
- }
+ if (nodeCanSpeculateInteger(node.arithNodeFlags()))
+ changed |= mergePrediction(child);
+ else
+ changed |= setPrediction(PredictDouble);
+
+ flags &= ~NodeNeedsNegZero;
+ changed |= m_graph[node.child1()].mergeFlags(flags);
break;
}
@@ -257,64 +379,63 @@ private:
case CompareGreaterEq:
case CompareEq:
case CompareStrictEq:
- case InstanceOf: {
+ case InstanceOf:
+ case IsUndefined:
+ case IsBoolean:
+ case IsNumber:
+ case IsString:
+ case IsObject:
+ case IsFunction: {
changed |= setPrediction(PredictBoolean);
+ changed |= mergeDefaultFlags(node);
break;
}
case GetById: {
- if (node.getHeapPrediction())
- changed |= mergePrediction(node.getHeapPrediction());
- else if (codeBlock()->identifier(node.identifierNumber()) == globalData().propertyNames->length) {
- // If there is no prediction from value profiles, check if we might be
- // able to infer the type ourselves.
- bool isArray = isArrayPrediction(m_graph[node.child1()].prediction());
- bool isString = isStringPrediction(m_graph[node.child1()].prediction());
- bool isByteArray = m_graph[node.child1()].shouldSpeculateByteArray();
- bool isInt8Array = m_graph[node.child1()].shouldSpeculateInt8Array();
- bool isInt16Array = m_graph[node.child1()].shouldSpeculateInt16Array();
- bool isInt32Array = m_graph[node.child1()].shouldSpeculateInt32Array();
- bool isUint8Array = m_graph[node.child1()].shouldSpeculateUint8Array();
- bool isUint8ClampedArray = m_graph[node.child1()].shouldSpeculateUint8ClampedArray();
- bool isUint16Array = m_graph[node.child1()].shouldSpeculateUint16Array();
- bool isUint32Array = m_graph[node.child1()].shouldSpeculateUint32Array();
- bool isFloat32Array = m_graph[node.child1()].shouldSpeculateFloat32Array();
- bool isFloat64Array = m_graph[node.child1()].shouldSpeculateFloat64Array();
- if (isArray || isString || isByteArray || isInt8Array || isInt16Array || isInt32Array || isUint8Array || isUint8ClampedArray || isUint16Array || isUint32Array || isFloat32Array || isFloat64Array)
- changed |= mergePrediction(PredictInt32);
- }
+ changed |= mergePrediction(node.getHeapPrediction());
+ changed |= mergeDefaultFlags(node);
break;
}
case GetByIdFlush:
- if (node.getHeapPrediction())
- changed |= mergePrediction(node.getHeapPrediction());
+ changed |= mergePrediction(node.getHeapPrediction());
+ changed |= mergeDefaultFlags(node);
break;
case GetByVal: {
- if (m_graph[node.child1()].shouldSpeculateUint32Array() || m_graph[node.child1()].shouldSpeculateFloat32Array() || m_graph[node.child1()].shouldSpeculateFloat64Array())
+ if (m_graph[node.child1()].shouldSpeculateFloat32Array()
+ || m_graph[node.child1()].shouldSpeculateFloat64Array())
changed |= mergePrediction(PredictDouble);
- else if (node.getHeapPrediction())
+ else
changed |= mergePrediction(node.getHeapPrediction());
+
+ changed |= m_graph[node.child1()].mergeFlags(NodeUsedAsValue);
+ changed |= m_graph[node.child2()].mergeFlags(NodeUsedAsNumber | NodeUsedAsInt);
break;
}
case GetPropertyStorage:
case GetIndexedPropertyStorage: {
changed |= setPrediction(PredictOther);
+ changed |= mergeDefaultFlags(node);
break;
}
case GetByOffset: {
- if (node.getHeapPrediction())
- changed |= mergePrediction(node.getHeapPrediction());
+ changed |= mergePrediction(node.getHeapPrediction());
+ changed |= mergeDefaultFlags(node);
break;
}
case Call:
case Construct: {
- if (node.getHeapPrediction())
- changed |= mergePrediction(node.getHeapPrediction());
+ changed |= mergePrediction(node.getHeapPrediction());
+ for (unsigned childIdx = node.firstChild();
+ childIdx < node.firstChild() + node.numChildren();
+ ++childIdx) {
+ Edge edge = m_graph.m_varArgChildren[childIdx];
+ changed |= m_graph[edge].mergeFlags(NodeUsedAsValue);
+ }
break;
}
@@ -327,18 +448,17 @@ private:
}
changed |= mergePrediction(prediction);
}
+ changed |= mergeDefaultFlags(node);
break;
}
case GetGlobalVar: {
- PredictedType prediction = m_graph.getGlobalVarPrediction(node.varNumber());
- if (prediction)
- changed |= mergePrediction(prediction);
+ changed |= mergePrediction(node.getHeapPrediction());
break;
}
case PutGlobalVar: {
- changed |= m_graph.predictGlobalVar(node.varNumber(), m_graph[node.child1()].prediction());
+ changed |= m_graph[node.child1()].mergeFlags(NodeUsedAsValue);
break;
}
@@ -348,8 +468,7 @@ private:
case ResolveBaseStrictPut:
case ResolveGlobal: {
PredictedType prediction = node.getHeapPrediction();
- if (prediction)
- changed |= mergePrediction(prediction);
+ changed |= mergePrediction(prediction);
break;
}
@@ -366,10 +485,21 @@ private:
case CreateThis:
case NewObject: {
changed |= setPrediction(PredictFinalObject);
+ changed |= mergeDefaultFlags(node);
+ break;
+ }
+
+ case NewArray: {
+ changed |= setPrediction(PredictArray);
+ for (unsigned childIdx = node.firstChild();
+ childIdx < node.firstChild() + node.numChildren();
+ ++childIdx) {
+ Edge edge = m_graph.m_varArgChildren[childIdx];
+ changed |= m_graph[edge].mergeFlags(NodeUsedAsValue);
+ }
break;
}
- case NewArray:
case NewArrayBuffer: {
changed |= setPrediction(PredictArray);
break;
@@ -380,9 +510,19 @@ private:
break;
}
- case StringCharAt:
+ case StringCharAt: {
+ changed |= setPrediction(PredictString);
+ changed |= m_graph[node.child1()].mergeFlags(NodeUsedAsValue);
+ changed |= m_graph[node.child2()].mergeFlags(NodeUsedAsNumber | NodeUsedAsInt);
+ break;
+ }
+
case StrCat: {
changed |= setPrediction(PredictString);
+ for (unsigned childIdx = node.firstChild();
+ childIdx < node.firstChild() + node.numChildren();
+ ++childIdx)
+ changed |= m_graph[m_graph.m_varArgChildren[childIdx]].mergeFlags(NodeUsedAsNumber);
break;
}
@@ -399,10 +539,12 @@ private:
} else if (child & PredictObjectMask) {
// Objects get turned into strings. So if the input has hints of objectness,
// the output will have hinsts of stringiness.
- changed |= mergePrediction(mergePredictions(child & ~PredictObjectMask, PredictString));
+ changed |= mergePrediction(
+ mergePredictions(child & ~PredictObjectMask, PredictString));
} else
changed |= mergePrediction(child);
}
+ changed |= m_graph[node.child1()].mergeFlags(flags);
break;
}
@@ -418,8 +560,8 @@ private:
break;
}
+ case PutByValAlias:
case GetArrayLength:
- case GetByteArrayLength:
case GetInt8ArrayLength:
case GetInt16ArrayLength:
case GetInt32ArrayLength:
@@ -429,38 +571,56 @@ private:
case GetUint32ArrayLength:
case GetFloat32ArrayLength:
case GetFloat64ArrayLength:
- case GetStringLength: {
+ case GetStringLength:
+ case Int32ToDouble:
+ case DoubleAsInt32: {
// This node should never be visible at this stage of compilation. It is
// inserted by fixup(), which follows this phase.
ASSERT_NOT_REACHED();
break;
}
- case Flush:
+ case PutByVal:
+ changed |= m_graph[node.child1()].mergeFlags(NodeUsedAsValue);
+ changed |= m_graph[node.child2()].mergeFlags(NodeUsedAsNumber | NodeUsedAsInt);
+ changed |= m_graph[node.child3()].mergeFlags(NodeUsedAsValue);
+ break;
+
+ case PutScopedVar:
+ case Return:
+ case Throw:
+ changed |= m_graph[node.child1()].mergeFlags(NodeUsedAsValue);
+ break;
+
+ case PutById:
+ case PutByIdDirect:
+ changed |= m_graph[node.child1()].mergeFlags(NodeUsedAsValue);
+ changed |= m_graph[node.child2()].mergeFlags(NodeUsedAsValue);
+ break;
+
+ case PutByOffset:
+ changed |= m_graph[node.child1()].mergeFlags(NodeUsedAsValue);
+ changed |= m_graph[node.child3()].mergeFlags(NodeUsedAsValue);
+ break;
+
+ case Phi:
break;
#ifndef NDEBUG
// These get ignored because they don't return anything.
- case PutScopedVar:
case DFG::Jump:
case Branch:
case Breakpoint:
- case Return:
case CheckHasInstance:
- case Phi:
- case Throw:
case ThrowReferenceError:
case ForceOSRExit:
case SetArgument:
- case PutByVal:
- case PutByValAlias:
- case PutById:
- case PutByIdDirect:
case CheckStructure:
case CheckFunction:
case PutStructure:
- case PutByOffset:
case TearOffActivation:
+ case CheckNumber:
+ changed |= mergeDefaultFlags(node);
break;
// These gets ignored because it doesn't do anything.
@@ -474,6 +634,7 @@ private:
break;
#else
default:
+ changed |= mergeDefaultFlags(node);
break;
#endif
}
@@ -484,6 +645,28 @@ private:
m_changed |= changed;
}
+
+ bool mergeDefaultFlags(Node& node)
+ {
+ bool changed = false;
+ if (node.flags() & NodeHasVarArgs) {
+ for (unsigned childIdx = node.firstChild();
+ childIdx < node.firstChild() + node.numChildren();
+ childIdx++)
+ changed |= m_graph[m_graph.m_varArgChildren[childIdx]].mergeFlags(NodeUsedAsValue);
+ } else {
+ if (!node.child1())
+ return changed;
+ changed |= m_graph[node.child1()].mergeFlags(NodeUsedAsValue);
+ if (!node.child2())
+ return changed;
+ changed |= m_graph[node.child2()].mergeFlags(NodeUsedAsValue);
+ if (!node.child3())
+ return changed;
+ changed |= m_graph[node.child3()].mergeFlags(NodeUsedAsValue);
+ }
+ return changed;
+ }
void propagateForward()
{
@@ -502,10 +685,10 @@ private:
for (m_compileIndex = m_graph.size(); m_compileIndex-- > 0;)
propagate(m_graph[m_compileIndex]);
}
-
- void vote(NodeUse nodeUse, VariableAccessData::Ballot ballot)
+
+ void vote(Edge nodeUse, VariableAccessData::Ballot ballot)
{
- switch (m_graph[nodeUse].op) {
+ switch (m_graph[nodeUse].op()) {
case ValueToInt32:
case UInt32ToNumber:
nodeUse = m_graph[nodeUse].child1();
@@ -514,14 +697,16 @@ private:
break;
}
- if (m_graph[nodeUse].op == GetLocal)
+ if (m_graph[nodeUse].op() == GetLocal)
m_graph[nodeUse].variableAccessData()->vote(ballot);
}
void vote(Node& node, VariableAccessData::Ballot ballot)
{
- if (node.flags & NodeHasVarArgs) {
- for (unsigned childIdx = node.firstChild(); childIdx < node.firstChild() + node.numChildren(); childIdx++)
+ if (node.flags() & NodeHasVarArgs) {
+ for (unsigned childIdx = node.firstChild();
+ childIdx < node.firstChild() + node.numChildren();
+ childIdx++)
vote(m_graph.m_varArgChildren[childIdx], ballot);
return;
}
@@ -546,7 +731,7 @@ private:
m_graph.m_variableAccessData[i].find()->clearVotes();
for (m_compileIndex = 0; m_compileIndex < m_graph.size(); ++m_compileIndex) {
Node& node = m_graph[m_compileIndex];
- switch (node.op) {
+ switch (node.op()) {
case ValueAdd:
case ArithAdd:
case ArithSub: {
@@ -576,7 +761,9 @@ private:
VariableAccessData::Ballot ballot;
- if (isNumberPrediction(left) && isNumberPrediction(right) && !(Node::shouldSpeculateInteger(m_graph[node.child1()], m_graph[node.child1()]) && node.canSpeculateInteger()))
+ if (isNumberPrediction(left) && isNumberPrediction(right)
+ && !(Node::shouldSpeculateInteger(m_graph[node.child1()], m_graph[node.child1()])
+ && node.canSpeculateInteger()))
ballot = VariableAccessData::VoteDouble;
else
ballot = VariableAccessData::VoteValue;
@@ -588,7 +775,8 @@ private:
case ArithAbs:
VariableAccessData::Ballot ballot;
- if (!(m_graph[node.child1()].shouldSpeculateInteger() && node.canSpeculateInteger()))
+ if (!(m_graph[node.child1()].shouldSpeculateInteger()
+ && node.canSpeculateInteger()))
ballot = VariableAccessData::VoteDouble;
else
ballot = VariableAccessData::VoteValue;
@@ -615,115 +803,25 @@ private:
}
}
for (unsigned i = 0; i < m_graph.m_variableAccessData.size(); ++i) {
- VariableAccessData* variableAccessData = m_graph.m_variableAccessData[i].find();
+ VariableAccessData* variableAccessData = &m_graph.m_variableAccessData[i];
+ if (!variableAccessData->isRoot())
+ continue;
if (operandIsArgument(variableAccessData->local())
|| m_graph.isCaptured(variableAccessData->local()))
continue;
m_changed |= variableAccessData->tallyVotesForShouldUseDoubleFormat();
}
- }
-
- void fixupNode(Node& node)
- {
- if (!node.shouldGenerate())
- return;
-
- NodeType op = static_cast<NodeType>(node.op);
-
-#if DFG_ENABLE(DEBUG_PROPAGATION_VERBOSE)
- dataLog(" %s @%u: ", Graph::opName(op), m_compileIndex);
-#endif
-
- switch (op) {
- case GetById: {
- if (!isInt32Prediction(m_graph[m_compileIndex].prediction()))
- break;
- if (codeBlock()->identifier(node.identifierNumber()) != globalData().propertyNames->length)
- break;
- bool isArray = isArrayPrediction(m_graph[node.child1()].prediction());
- bool isString = isStringPrediction(m_graph[node.child1()].prediction());
- bool isByteArray = m_graph[node.child1()].shouldSpeculateByteArray();
- bool isInt8Array = m_graph[node.child1()].shouldSpeculateInt8Array();
- bool isInt16Array = m_graph[node.child1()].shouldSpeculateInt16Array();
- bool isInt32Array = m_graph[node.child1()].shouldSpeculateInt32Array();
- bool isUint8Array = m_graph[node.child1()].shouldSpeculateUint8Array();
- bool isUint8ClampedArray = m_graph[node.child1()].shouldSpeculateUint8ClampedArray();
- bool isUint16Array = m_graph[node.child1()].shouldSpeculateUint16Array();
- bool isUint32Array = m_graph[node.child1()].shouldSpeculateUint32Array();
- bool isFloat32Array = m_graph[node.child1()].shouldSpeculateFloat32Array();
- bool isFloat64Array = m_graph[node.child1()].shouldSpeculateFloat64Array();
- if (!isArray && !isString && !isByteArray && !isInt8Array && !isInt16Array && !isInt32Array && !isUint8Array && !isUint8ClampedArray && !isUint16Array && !isUint32Array && !isFloat32Array && !isFloat64Array)
- break;
-
-#if DFG_ENABLE(DEBUG_PROPAGATION_VERBOSE)
- dataLog(" @%u -> %s", m_compileIndex, isArray ? "GetArrayLength" : "GetStringLength");
-#endif
- if (isArray)
- node.op = GetArrayLength;
- else if (isString)
- node.op = GetStringLength;
- else if (isByteArray)
- node.op = GetByteArrayLength;
- else if (isInt8Array)
- node.op = GetInt8ArrayLength;
- else if (isInt16Array)
- node.op = GetInt16ArrayLength;
- else if (isInt32Array)
- node.op = GetInt32ArrayLength;
- else if (isUint8Array)
- node.op = GetUint8ArrayLength;
- else if (isUint8ClampedArray)
- node.op = GetUint8ClampedArrayLength;
- else if (isUint16Array)
- node.op = GetUint16ArrayLength;
- else if (isUint32Array)
- node.op = GetUint32ArrayLength;
- else if (isFloat32Array)
- node.op = GetFloat32ArrayLength;
- else if (isFloat64Array)
- node.op = GetFloat64ArrayLength;
- else
- ASSERT_NOT_REACHED();
- // No longer MustGenerate
- ASSERT(node.flags & NodeMustGenerate);
- node.flags &= ~NodeMustGenerate;
- m_graph.deref(m_compileIndex);
- break;
- }
- case GetIndexedPropertyStorage: {
- PredictedType basePrediction = m_graph[node.child2()].prediction();
- if (!(basePrediction & PredictInt32) && basePrediction) {
- node.setOpAndDefaultFlags(Nop);
- m_graph.clearAndDerefChild1(node);
- m_graph.clearAndDerefChild2(node);
- m_graph.clearAndDerefChild3(node);
- node.setRefCount(0);
- }
- break;
- }
- case GetByVal:
- case StringCharAt:
- case StringCharCodeAt: {
- if (!!node.child3() && m_graph[node.child3()].op == Nop)
- node.children.child3() = NodeUse();
- break;
- }
- default:
- break;
+ for (unsigned i = 0; i < m_graph.m_argumentPositions.size(); ++i)
+ m_changed |= m_graph.m_argumentPositions[i].mergeArgumentAwareness();
+ for (unsigned i = 0; i < m_graph.m_variableAccessData.size(); ++i) {
+ VariableAccessData* variableAccessData = &m_graph.m_variableAccessData[i];
+ if (!variableAccessData->isRoot())
+ continue;
+ if (operandIsArgument(variableAccessData->local())
+ || m_graph.isCaptured(variableAccessData->local()))
+ continue;
+ m_changed |= variableAccessData->makePredictionForDoubleFormat();
}
-
-#if DFG_ENABLE(DEBUG_PROPAGATION_VERBOSE)
- dataLog("\n");
-#endif
- }
-
- void fixup()
- {
-#if DFG_ENABLE(DEBUG_PROPAGATION_VERBOSE)
- dataLog("Performing Fixup\n");
-#endif
- for (m_compileIndex = 0; m_compileIndex < m_graph.size(); ++m_compileIndex)
- fixupNode(m_graph[m_compileIndex]);
}
NodeIndex m_compileIndex;
diff --git a/Source/JavaScriptCore/dfg/DFGRedundantPhiEliminationPhase.cpp b/Source/JavaScriptCore/dfg/DFGRedundantPhiEliminationPhase.cpp
index fb30de742..b16a72a7e 100644
--- a/Source/JavaScriptCore/dfg/DFGRedundantPhiEliminationPhase.cpp
+++ b/Source/JavaScriptCore/dfg/DFGRedundantPhiEliminationPhase.cpp
@@ -55,7 +55,7 @@ public:
if (!node.shouldGenerate())
continue;
- switch (node.op) {
+ switch (node.op()) {
case GetLocal:
replacePhiChild(node, 0);
break;
@@ -95,7 +95,7 @@ private:
bool replaced = false;
NodeIndex child = node.children.child(childIndex).indexUnchecked();
- if (child != NoNode && m_graph[child].op == Phi) {
+ if (child != NoNode && m_graph[child].op() == Phi) {
NodeIndex childReplacement = getRedundantReplacement(child);
if (childReplacement != NoNode) {
node.children.child(childIndex).setIndex(childReplacement);
@@ -138,7 +138,7 @@ private:
for (size_t arg = 0; arg < basicBlock->variablesAtHead.numberOfArguments(); ++arg) {
NodeIndex nodeIndex = basicBlock->variablesAtHead.argument(arg);
- if (nodeIndex != NoNode && m_graph[nodeIndex].op == Phi && !m_graph[nodeIndex].refCount()) {
+ if (nodeIndex != NoNode && m_graph[nodeIndex].op() == Phi && !m_graph[nodeIndex].refCount()) {
NodeIndex replacement = getRedundantReplacement(nodeIndex);
if (replacement != NoNode) {
// This argument must be unused in this block.
@@ -151,7 +151,7 @@ private:
for (size_t local = 0; local < basicBlock->variablesAtHead.numberOfLocals(); ++local) {
NodeIndex nodeIndex = basicBlock->variablesAtHead.local(local);
- if (nodeIndex != NoNode && m_graph[nodeIndex].op == Phi && !m_graph[nodeIndex].refCount()) {
+ if (nodeIndex != NoNode && m_graph[nodeIndex].op() == Phi && !m_graph[nodeIndex].refCount()) {
NodeIndex replacement = getRedundantReplacement(nodeIndex);
if (replacement != NoNode) {
// This local variable must be unused in this block.
diff --git a/Source/JavaScriptCore/dfg/DFGRepatch.cpp b/Source/JavaScriptCore/dfg/DFGRepatch.cpp
index edf3c9505..794538184 100644
--- a/Source/JavaScriptCore/dfg/DFGRepatch.cpp
+++ b/Source/JavaScriptCore/dfg/DFGRepatch.cpp
@@ -51,19 +51,19 @@ static void dfgRepatchByIdSelfAccess(CodeBlock* codeBlock, StructureStubInfo& st
repatchBuffer.relink(stubInfo.callReturnLocation, slowPathFunction);
// Patch the structure check & the offset of the load.
- repatchBuffer.repatch(stubInfo.callReturnLocation.dataLabelPtrAtOffset(-(intptr_t)stubInfo.deltaCheckImmToCall), structure);
+ repatchBuffer.repatch(stubInfo.callReturnLocation.dataLabelPtrAtOffset(-(intptr_t)stubInfo.patch.dfg.deltaCheckImmToCall), structure);
#if USE(JSVALUE64)
if (compact)
- repatchBuffer.repatch(stubInfo.callReturnLocation.dataLabelCompactAtOffset(stubInfo.deltaCallToLoadOrStore), sizeof(JSValue) * offset);
+ repatchBuffer.repatch(stubInfo.callReturnLocation.dataLabelCompactAtOffset(stubInfo.patch.dfg.deltaCallToLoadOrStore), sizeof(JSValue) * offset);
else
- repatchBuffer.repatch(stubInfo.callReturnLocation.dataLabel32AtOffset(stubInfo.deltaCallToLoadOrStore), sizeof(JSValue) * offset);
+ repatchBuffer.repatch(stubInfo.callReturnLocation.dataLabel32AtOffset(stubInfo.patch.dfg.deltaCallToLoadOrStore), sizeof(JSValue) * offset);
#elif USE(JSVALUE32_64)
if (compact) {
- repatchBuffer.repatch(stubInfo.callReturnLocation.dataLabelCompactAtOffset(stubInfo.deltaCallToTagLoadOrStore), sizeof(JSValue) * offset + OBJECT_OFFSETOF(EncodedValueDescriptor, asBits.tag));
- repatchBuffer.repatch(stubInfo.callReturnLocation.dataLabelCompactAtOffset(stubInfo.deltaCallToPayloadLoadOrStore), sizeof(JSValue) * offset + OBJECT_OFFSETOF(EncodedValueDescriptor, asBits.payload));
+ repatchBuffer.repatch(stubInfo.callReturnLocation.dataLabelCompactAtOffset(stubInfo.patch.dfg.deltaCallToTagLoadOrStore), sizeof(JSValue) * offset + OBJECT_OFFSETOF(EncodedValueDescriptor, asBits.tag));
+ repatchBuffer.repatch(stubInfo.callReturnLocation.dataLabelCompactAtOffset(stubInfo.patch.dfg.deltaCallToPayloadLoadOrStore), sizeof(JSValue) * offset + OBJECT_OFFSETOF(EncodedValueDescriptor, asBits.payload));
} else {
- repatchBuffer.repatch(stubInfo.callReturnLocation.dataLabel32AtOffset(stubInfo.deltaCallToTagLoadOrStore), sizeof(JSValue) * offset + OBJECT_OFFSETOF(EncodedValueDescriptor, asBits.tag));
- repatchBuffer.repatch(stubInfo.callReturnLocation.dataLabel32AtOffset(stubInfo.deltaCallToPayloadLoadOrStore), sizeof(JSValue) * offset + OBJECT_OFFSETOF(EncodedValueDescriptor, asBits.payload));
+ repatchBuffer.repatch(stubInfo.callReturnLocation.dataLabel32AtOffset(stubInfo.patch.dfg.deltaCallToTagLoadOrStore), sizeof(JSValue) * offset + OBJECT_OFFSETOF(EncodedValueDescriptor, asBits.tag));
+ repatchBuffer.repatch(stubInfo.callReturnLocation.dataLabel32AtOffset(stubInfo.patch.dfg.deltaCallToPayloadLoadOrStore), sizeof(JSValue) * offset + OBJECT_OFFSETOF(EncodedValueDescriptor, asBits.payload));
}
#endif
}
@@ -102,7 +102,7 @@ static void linkRestoreScratch(LinkBuffer& patchBuffer, bool needToRestoreScratc
static void linkRestoreScratch(LinkBuffer& patchBuffer, bool needToRestoreScratch, StructureStubInfo& stubInfo, MacroAssembler::Jump success, MacroAssembler::Jump fail, MacroAssembler::JumpList failureCases)
{
- linkRestoreScratch(patchBuffer, needToRestoreScratch, success, fail, failureCases, stubInfo.callReturnLocation.labelAtOffset(stubInfo.deltaCallToDone), stubInfo.callReturnLocation.labelAtOffset(stubInfo.deltaCallToSlowCase));
+ linkRestoreScratch(patchBuffer, needToRestoreScratch, success, fail, failureCases, stubInfo.callReturnLocation.labelAtOffset(stubInfo.patch.dfg.deltaCallToDone), stubInfo.callReturnLocation.labelAtOffset(stubInfo.patch.dfg.deltaCallToSlowCase));
}
static void generateProtoChainAccessStub(ExecState* exec, StructureStubInfo& stubInfo, StructureChain* chain, size_t count, size_t offset, Structure* structure, CodeLocationLabel successLabel, CodeLocationLabel slowCaseLabel, MacroAssemblerCodeRef& stubRoutine)
@@ -111,12 +111,12 @@ static void generateProtoChainAccessStub(ExecState* exec, StructureStubInfo& stu
MacroAssembler stubJit;
- GPRReg baseGPR = static_cast<GPRReg>(stubInfo.baseGPR);
+ GPRReg baseGPR = static_cast<GPRReg>(stubInfo.patch.dfg.baseGPR);
#if USE(JSVALUE32_64)
- GPRReg resultTagGPR = static_cast<GPRReg>(stubInfo.valueTagGPR);
+ GPRReg resultTagGPR = static_cast<GPRReg>(stubInfo.patch.dfg.valueTagGPR);
#endif
- GPRReg resultGPR = static_cast<GPRReg>(stubInfo.valueGPR);
- GPRReg scratchGPR = static_cast<GPRReg>(stubInfo.scratchGPR);
+ GPRReg resultGPR = static_cast<GPRReg>(stubInfo.patch.dfg.valueGPR);
+ GPRReg scratchGPR = static_cast<GPRReg>(stubInfo.patch.dfg.scratchGPR);
bool needToRestoreScratch = false;
if (scratchGPR == InvalidGPRReg) {
@@ -167,12 +167,12 @@ static bool tryCacheGetByID(ExecState* exec, JSValue baseValue, const Identifier
JSGlobalData* globalData = &exec->globalData();
if (isJSArray(baseValue) && propertyName == exec->propertyNames().length) {
- GPRReg baseGPR = static_cast<GPRReg>(stubInfo.baseGPR);
+ GPRReg baseGPR = static_cast<GPRReg>(stubInfo.patch.dfg.baseGPR);
#if USE(JSVALUE32_64)
- GPRReg resultTagGPR = static_cast<GPRReg>(stubInfo.valueTagGPR);
+ GPRReg resultTagGPR = static_cast<GPRReg>(stubInfo.patch.dfg.valueTagGPR);
#endif
- GPRReg resultGPR = static_cast<GPRReg>(stubInfo.valueGPR);
- GPRReg scratchGPR = static_cast<GPRReg>(stubInfo.scratchGPR);
+ GPRReg resultGPR = static_cast<GPRReg>(stubInfo.patch.dfg.valueGPR);
+ GPRReg scratchGPR = static_cast<GPRReg>(stubInfo.patch.dfg.scratchGPR);
bool needToRestoreScratch = false;
MacroAssembler stubJit;
@@ -209,7 +209,7 @@ static bool tryCacheGetByID(ExecState* exec, JSValue baseValue, const Identifier
stubInfo.stubRoutine = patchBuffer.finalizeCode();
RepatchBuffer repatchBuffer(codeBlock);
- repatchBuffer.relink(stubInfo.callReturnLocation.jumpAtOffset(stubInfo.deltaCallToStructCheck), CodeLocationLabel(stubInfo.stubRoutine.code()));
+ repatchBuffer.relink(stubInfo.callReturnLocation.jumpAtOffset(stubInfo.patch.dfg.deltaCallToStructCheck), CodeLocationLabel(stubInfo.stubRoutine.code()));
repatchBuffer.relink(stubInfo.callReturnLocation, operationGetById);
return true;
@@ -255,10 +255,10 @@ static bool tryCacheGetByID(ExecState* exec, JSValue baseValue, const Identifier
ASSERT(slot.slotBase().isObject());
- generateProtoChainAccessStub(exec, stubInfo, prototypeChain, count, offset, structure, stubInfo.callReturnLocation.labelAtOffset(stubInfo.deltaCallToDone), stubInfo.callReturnLocation.labelAtOffset(stubInfo.deltaCallToSlowCase), stubInfo.stubRoutine);
+ generateProtoChainAccessStub(exec, stubInfo, prototypeChain, count, offset, structure, stubInfo.callReturnLocation.labelAtOffset(stubInfo.patch.dfg.deltaCallToDone), stubInfo.callReturnLocation.labelAtOffset(stubInfo.patch.dfg.deltaCallToSlowCase), stubInfo.stubRoutine);
RepatchBuffer repatchBuffer(codeBlock);
- repatchBuffer.relink(stubInfo.callReturnLocation.jumpAtOffset(stubInfo.deltaCallToStructCheck), CodeLocationLabel(stubInfo.stubRoutine.code()));
+ repatchBuffer.relink(stubInfo.callReturnLocation.jumpAtOffset(stubInfo.patch.dfg.deltaCallToStructCheck), CodeLocationLabel(stubInfo.stubRoutine.code()));
repatchBuffer.relink(stubInfo.callReturnLocation, operationGetByIdProtoBuildList);
stubInfo.initGetByIdChain(*globalData, codeBlock->ownerExecutable(), structure, prototypeChain);
@@ -280,7 +280,7 @@ static bool tryBuildGetByIDList(ExecState* exec, JSValue baseValue, const Identi
|| slot.slotBase() != baseValue)
return false;
- if (!stubInfo.registersFlushed) {
+ if (!stubInfo.patch.dfg.registersFlushed) {
// We cannot do as much inline caching if the registers were not flushed prior to this GetById. In particular,
// non-Value cached properties require planting calls, which requires registers to have been flushed. Thus,
// if registers were not flushed, don't do non-Value caching.
@@ -305,7 +305,7 @@ static bool tryBuildGetByIDList(ExecState* exec, JSValue baseValue, const Identi
listIndex = 0;
} else if (stubInfo.accessType == access_get_by_id_self) {
ASSERT(!stubInfo.stubRoutine);
- polymorphicStructureList = new PolymorphicAccessStructureList(*globalData, codeBlock->ownerExecutable(), MacroAssemblerCodeRef::createSelfManagedCodeRef(stubInfo.callReturnLocation.labelAtOffset(stubInfo.deltaCallToSlowCase)), stubInfo.u.getByIdSelf.baseObjectStructure.get(), true);
+ polymorphicStructureList = new PolymorphicAccessStructureList(*globalData, codeBlock->ownerExecutable(), MacroAssemblerCodeRef::createSelfManagedCodeRef(stubInfo.callReturnLocation.labelAtOffset(stubInfo.patch.dfg.deltaCallToSlowCase)), stubInfo.u.getByIdSelf.baseObjectStructure.get(), true);
stubInfo.initGetByIdSelfList(polymorphicStructureList, 1);
listIndex = 1;
} else {
@@ -316,12 +316,12 @@ static bool tryBuildGetByIDList(ExecState* exec, JSValue baseValue, const Identi
if (listIndex < POLYMORPHIC_LIST_CACHE_SIZE) {
stubInfo.u.getByIdSelfList.listSize++;
- GPRReg baseGPR = static_cast<GPRReg>(stubInfo.baseGPR);
+ GPRReg baseGPR = static_cast<GPRReg>(stubInfo.patch.dfg.baseGPR);
#if USE(JSVALUE32_64)
- GPRReg resultTagGPR = static_cast<GPRReg>(stubInfo.valueTagGPR);
+ GPRReg resultTagGPR = static_cast<GPRReg>(stubInfo.patch.dfg.valueTagGPR);
#endif
- GPRReg resultGPR = static_cast<GPRReg>(stubInfo.valueGPR);
- GPRReg scratchGPR = static_cast<GPRReg>(stubInfo.scratchGPR);
+ GPRReg resultGPR = static_cast<GPRReg>(stubInfo.patch.dfg.valueGPR);
+ GPRReg scratchGPR = static_cast<GPRReg>(stubInfo.patch.dfg.scratchGPR);
CCallHelpers stubJit(globalData, codeBlock);
@@ -395,11 +395,11 @@ static bool tryBuildGetByIDList(ExecState* exec, JSValue baseValue, const Identi
if (listIndex)
lastProtoBegin = CodeLocationLabel(polymorphicStructureList->list[listIndex - 1].stubRoutine.code());
else
- lastProtoBegin = stubInfo.callReturnLocation.labelAtOffset(stubInfo.deltaCallToSlowCase);
+ lastProtoBegin = stubInfo.callReturnLocation.labelAtOffset(stubInfo.patch.dfg.deltaCallToSlowCase);
ASSERT(!!lastProtoBegin);
patchBuffer.link(wrongStruct, lastProtoBegin);
- patchBuffer.link(success, stubInfo.callReturnLocation.labelAtOffset(stubInfo.deltaCallToDone));
+ patchBuffer.link(success, stubInfo.callReturnLocation.labelAtOffset(stubInfo.patch.dfg.deltaCallToDone));
if (!isDirect) {
patchBuffer.link(operationCall, operationFunction);
patchBuffer.link(handlerCall, lookupExceptionHandlerInStub);
@@ -409,7 +409,7 @@ static bool tryBuildGetByIDList(ExecState* exec, JSValue baseValue, const Identi
polymorphicStructureList->list[listIndex].set(*globalData, codeBlock->ownerExecutable(), stubRoutine, structure, isDirect);
- CodeLocationJump jumpLocation = stubInfo.callReturnLocation.jumpAtOffset(stubInfo.deltaCallToStructCheck);
+ CodeLocationJump jumpLocation = stubInfo.callReturnLocation.jumpAtOffset(stubInfo.patch.dfg.deltaCallToStructCheck);
RepatchBuffer repatchBuffer(codeBlock);
repatchBuffer.relink(jumpLocation, CodeLocationLabel(stubRoutine.code()));
@@ -471,11 +471,11 @@ static bool tryBuildGetByIDProtoList(ExecState* exec, JSValue baseValue, const I
MacroAssemblerCodeRef stubRoutine;
- generateProtoChainAccessStub(exec, stubInfo, prototypeChain, count, offset, structure, stubInfo.callReturnLocation.labelAtOffset(stubInfo.deltaCallToDone), lastProtoBegin, stubRoutine);
+ generateProtoChainAccessStub(exec, stubInfo, prototypeChain, count, offset, structure, stubInfo.callReturnLocation.labelAtOffset(stubInfo.patch.dfg.deltaCallToDone), lastProtoBegin, stubRoutine);
polymorphicStructureList->list[listIndex].set(*globalData, codeBlock->ownerExecutable(), stubRoutine, structure, true);
- CodeLocationJump jumpLocation = stubInfo.callReturnLocation.jumpAtOffset(stubInfo.deltaCallToStructCheck);
+ CodeLocationJump jumpLocation = stubInfo.callReturnLocation.jumpAtOffset(stubInfo.patch.dfg.deltaCallToStructCheck);
RepatchBuffer repatchBuffer(codeBlock);
repatchBuffer.relink(jumpLocation, CodeLocationLabel(stubRoutine.code()));
@@ -540,12 +540,12 @@ static void emitPutReplaceStub(
MacroAssemblerCodeRef& stubRoutine)
{
JSGlobalData* globalData = &exec->globalData();
- GPRReg baseGPR = static_cast<GPRReg>(stubInfo.baseGPR);
+ GPRReg baseGPR = static_cast<GPRReg>(stubInfo.patch.dfg.baseGPR);
#if USE(JSVALUE32_64)
- GPRReg valueTagGPR = static_cast<GPRReg>(stubInfo.valueTagGPR);
+ GPRReg valueTagGPR = static_cast<GPRReg>(stubInfo.patch.dfg.valueTagGPR);
#endif
- GPRReg valueGPR = static_cast<GPRReg>(stubInfo.valueGPR);
- GPRReg scratchGPR = static_cast<GPRReg>(stubInfo.scratchGPR);
+ GPRReg valueGPR = static_cast<GPRReg>(stubInfo.patch.dfg.valueGPR);
+ GPRReg scratchGPR = static_cast<GPRReg>(stubInfo.patch.dfg.scratchGPR);
bool needToRestoreScratch = false;
#if ENABLE(GGC) || ENABLE(WRITE_BARRIER_PROFILING)
GPRReg scratchGPR2;
@@ -608,7 +608,7 @@ static void emitPutReplaceStub(
}
LinkBuffer patchBuffer(*globalData, &stubJit, exec->codeBlock());
- patchBuffer.link(success, stubInfo.callReturnLocation.labelAtOffset(stubInfo.deltaCallToDone));
+ patchBuffer.link(success, stubInfo.callReturnLocation.labelAtOffset(stubInfo.patch.dfg.deltaCallToDone));
patchBuffer.link(failure, failureLabel);
stubRoutine = patchBuffer.finalizeCode();
@@ -629,12 +629,12 @@ static void emitPutTransitionStub(
{
JSGlobalData* globalData = &exec->globalData();
- GPRReg baseGPR = static_cast<GPRReg>(stubInfo.baseGPR);
+ GPRReg baseGPR = static_cast<GPRReg>(stubInfo.patch.dfg.baseGPR);
#if USE(JSVALUE32_64)
- GPRReg valueTagGPR = static_cast<GPRReg>(stubInfo.valueTagGPR);
+ GPRReg valueTagGPR = static_cast<GPRReg>(stubInfo.patch.dfg.valueTagGPR);
#endif
- GPRReg valueGPR = static_cast<GPRReg>(stubInfo.valueGPR);
- GPRReg scratchGPR = static_cast<GPRReg>(stubInfo.scratchGPR);
+ GPRReg valueGPR = static_cast<GPRReg>(stubInfo.patch.dfg.valueGPR);
+ GPRReg scratchGPR = static_cast<GPRReg>(stubInfo.patch.dfg.scratchGPR);
bool needToRestoreScratch = false;
ASSERT(scratchGPR != baseGPR);
@@ -699,7 +699,7 @@ static void emitPutTransitionStub(
success = stubJit.jump();
LinkBuffer patchBuffer(*globalData, &stubJit, exec->codeBlock());
- patchBuffer.link(success, stubInfo.callReturnLocation.labelAtOffset(stubInfo.deltaCallToDone));
+ patchBuffer.link(success, stubInfo.callReturnLocation.labelAtOffset(stubInfo.patch.dfg.deltaCallToDone));
if (needToRestoreScratch)
patchBuffer.link(failure, failureLabel);
else
@@ -741,11 +741,11 @@ static bool tryCachePutByID(ExecState* exec, JSValue baseValue, const Identifier
emitPutTransitionStub(
exec, baseValue, ident, slot, stubInfo, putKind,
structure, oldStructure, prototypeChain,
- stubInfo.callReturnLocation.labelAtOffset(stubInfo.deltaCallToSlowCase),
+ stubInfo.callReturnLocation.labelAtOffset(stubInfo.patch.dfg.deltaCallToSlowCase),
stubInfo.stubRoutine);
RepatchBuffer repatchBuffer(codeBlock);
- repatchBuffer.relink(stubInfo.callReturnLocation.jumpAtOffset(stubInfo.deltaCallToStructCheck), CodeLocationLabel(stubInfo.stubRoutine.code()));
+ repatchBuffer.relink(stubInfo.callReturnLocation.jumpAtOffset(stubInfo.patch.dfg.deltaCallToStructCheck), CodeLocationLabel(stubInfo.stubRoutine.code()));
repatchBuffer.relink(stubInfo.callReturnLocation, appropriateListBuildingPutByIdFunction(slot, putKind));
stubInfo.initPutByIdTransition(*globalData, codeBlock->ownerExecutable(), oldStructure, structure, prototypeChain, putKind == Direct);
@@ -804,7 +804,7 @@ static bool tryBuildPutByIdList(ExecState* exec, JSValue baseValue, const Identi
// We're now committed to creating the stub. Mogrify the meta-data accordingly.
list = PolymorphicPutByIdList::from(
putKind, stubInfo,
- stubInfo.callReturnLocation.labelAtOffset(stubInfo.deltaCallToSlowCase));
+ stubInfo.callReturnLocation.labelAtOffset(stubInfo.patch.dfg.deltaCallToSlowCase));
emitPutTransitionStub(
exec, baseValue, propertyName, slot, stubInfo, putKind,
@@ -821,7 +821,7 @@ static bool tryBuildPutByIdList(ExecState* exec, JSValue baseValue, const Identi
// We're now committed to creating the stub. Mogrify the meta-data accordingly.
list = PolymorphicPutByIdList::from(
putKind, stubInfo,
- stubInfo.callReturnLocation.labelAtOffset(stubInfo.deltaCallToSlowCase));
+ stubInfo.callReturnLocation.labelAtOffset(stubInfo.patch.dfg.deltaCallToSlowCase));
emitPutReplaceStub(
exec, baseValue, propertyName, slot, stubInfo, putKind,
@@ -834,7 +834,7 @@ static bool tryBuildPutByIdList(ExecState* exec, JSValue baseValue, const Identi
}
RepatchBuffer repatchBuffer(codeBlock);
- repatchBuffer.relink(stubInfo.callReturnLocation.jumpAtOffset(stubInfo.deltaCallToStructCheck), CodeLocationLabel(stubRoutine.code()));
+ repatchBuffer.relink(stubInfo.callReturnLocation.jumpAtOffset(stubInfo.patch.dfg.deltaCallToStructCheck), CodeLocationLabel(stubRoutine.code()));
if (list->isFull())
repatchBuffer.relink(stubInfo.callReturnLocation, appropriateGenericPutByIdFunction(slot, putKind));
@@ -877,14 +877,14 @@ void dfgLinkFor(ExecState* exec, CallLinkInfo& callLinkInfo, CodeBlock* calleeCo
void dfgResetGetByID(RepatchBuffer& repatchBuffer, StructureStubInfo& stubInfo)
{
repatchBuffer.relink(stubInfo.callReturnLocation, operationGetByIdOptimize);
- repatchBuffer.repatch(stubInfo.callReturnLocation.dataLabelPtrAtOffset(-(uintptr_t)stubInfo.deltaCheckImmToCall), reinterpret_cast<void*>(-1));
+ repatchBuffer.repatch(stubInfo.callReturnLocation.dataLabelPtrAtOffset(-(uintptr_t)stubInfo.patch.dfg.deltaCheckImmToCall), reinterpret_cast<void*>(-1));
#if USE(JSVALUE64)
- repatchBuffer.repatch(stubInfo.callReturnLocation.dataLabelCompactAtOffset(stubInfo.deltaCallToLoadOrStore), 0);
+ repatchBuffer.repatch(stubInfo.callReturnLocation.dataLabelCompactAtOffset(stubInfo.patch.dfg.deltaCallToLoadOrStore), 0);
#else
- repatchBuffer.repatch(stubInfo.callReturnLocation.dataLabelCompactAtOffset(stubInfo.deltaCallToTagLoadOrStore), 0);
- repatchBuffer.repatch(stubInfo.callReturnLocation.dataLabelCompactAtOffset(stubInfo.deltaCallToPayloadLoadOrStore), 0);
+ repatchBuffer.repatch(stubInfo.callReturnLocation.dataLabelCompactAtOffset(stubInfo.patch.dfg.deltaCallToTagLoadOrStore), 0);
+ repatchBuffer.repatch(stubInfo.callReturnLocation.dataLabelCompactAtOffset(stubInfo.patch.dfg.deltaCallToPayloadLoadOrStore), 0);
#endif
- repatchBuffer.relink(stubInfo.callReturnLocation.jumpAtOffset(stubInfo.deltaCallToStructCheck), stubInfo.callReturnLocation.labelAtOffset(stubInfo.deltaCallToSlowCase));
+ repatchBuffer.relink(stubInfo.callReturnLocation.jumpAtOffset(stubInfo.patch.dfg.deltaCallToStructCheck), stubInfo.callReturnLocation.labelAtOffset(stubInfo.patch.dfg.deltaCallToSlowCase));
}
void dfgResetPutByID(RepatchBuffer& repatchBuffer, StructureStubInfo& stubInfo)
@@ -902,14 +902,14 @@ void dfgResetPutByID(RepatchBuffer& repatchBuffer, StructureStubInfo& stubInfo)
optimizedFunction = operationPutByIdDirectNonStrictOptimize;
}
repatchBuffer.relink(stubInfo.callReturnLocation, optimizedFunction);
- repatchBuffer.repatch(stubInfo.callReturnLocation.dataLabelPtrAtOffset(-(uintptr_t)stubInfo.deltaCheckImmToCall), reinterpret_cast<void*>(-1));
+ repatchBuffer.repatch(stubInfo.callReturnLocation.dataLabelPtrAtOffset(-(uintptr_t)stubInfo.patch.dfg.deltaCheckImmToCall), reinterpret_cast<void*>(-1));
#if USE(JSVALUE64)
- repatchBuffer.repatch(stubInfo.callReturnLocation.dataLabel32AtOffset(stubInfo.deltaCallToLoadOrStore), 0);
+ repatchBuffer.repatch(stubInfo.callReturnLocation.dataLabel32AtOffset(stubInfo.patch.dfg.deltaCallToLoadOrStore), 0);
#else
- repatchBuffer.repatch(stubInfo.callReturnLocation.dataLabel32AtOffset(stubInfo.deltaCallToTagLoadOrStore), 0);
- repatchBuffer.repatch(stubInfo.callReturnLocation.dataLabel32AtOffset(stubInfo.deltaCallToPayloadLoadOrStore), 0);
+ repatchBuffer.repatch(stubInfo.callReturnLocation.dataLabel32AtOffset(stubInfo.patch.dfg.deltaCallToTagLoadOrStore), 0);
+ repatchBuffer.repatch(stubInfo.callReturnLocation.dataLabel32AtOffset(stubInfo.patch.dfg.deltaCallToPayloadLoadOrStore), 0);
#endif
- repatchBuffer.relink(stubInfo.callReturnLocation.jumpAtOffset(stubInfo.deltaCallToStructCheck), stubInfo.callReturnLocation.labelAtOffset(stubInfo.deltaCallToSlowCase));
+ repatchBuffer.relink(stubInfo.callReturnLocation.jumpAtOffset(stubInfo.patch.dfg.deltaCallToStructCheck), stubInfo.callReturnLocation.labelAtOffset(stubInfo.patch.dfg.deltaCallToSlowCase));
}
} } // namespace JSC::DFG
diff --git a/Source/JavaScriptCore/dfg/DFGScoreBoard.h b/Source/JavaScriptCore/dfg/DFGScoreBoard.h
index 140de185b..578f2b147 100644
--- a/Source/JavaScriptCore/dfg/DFGScoreBoard.h
+++ b/Source/JavaScriptCore/dfg/DFGScoreBoard.h
@@ -122,7 +122,7 @@ public:
m_free.append(index);
}
}
- void use(NodeUse child)
+ void use(Edge child)
{
use(child.indexUnchecked());
}
diff --git a/Source/JavaScriptCore/dfg/DFGSpeculativeJIT.cpp b/Source/JavaScriptCore/dfg/DFGSpeculativeJIT.cpp
index 7bcb44576..18db85c22 100644
--- a/Source/JavaScriptCore/dfg/DFGSpeculativeJIT.cpp
+++ b/Source/JavaScriptCore/dfg/DFGSpeculativeJIT.cpp
@@ -28,7 +28,6 @@
#if ENABLE(DFG_JIT)
-#include "JSByteArray.h"
#include "LinkBuffer.h"
namespace JSC { namespace DFG {
@@ -60,12 +59,16 @@ GPRReg SpeculativeJIT::fillStorage(NodeIndex nodeIndex)
switch (info.registerFormat()) {
case DataFormatNone: {
- GPRReg gpr = allocate();
- ASSERT(info.spillFormat() == DataFormatStorage);
- m_gprs.retain(gpr, virtualRegister, SpillOrderSpilled);
- m_jit.loadPtr(JITCompiler::addressFor(virtualRegister), gpr);
- info.fillStorage(gpr);
- return gpr;
+ if (info.spillFormat() == DataFormatStorage) {
+ GPRReg gpr = allocate();
+ m_gprs.retain(gpr, virtualRegister, SpillOrderSpilled);
+ m_jit.loadPtr(JITCompiler::addressFor(virtualRegister), gpr);
+ info.fillStorage(gpr);
+ return gpr;
+ }
+
+ // Must be a cell; fill it as a cell and then return the pointer.
+ return fillSpeculateCell(nodeIndex);
}
case DataFormatStorage: {
@@ -75,33 +78,31 @@ GPRReg SpeculativeJIT::fillStorage(NodeIndex nodeIndex)
}
default:
- ASSERT_NOT_REACHED();
+ return fillSpeculateCell(nodeIndex);
}
-
- return InvalidGPRReg;
}
void SpeculativeJIT::useChildren(Node& node)
{
- if (node.flags & NodeHasVarArgs) {
+ if (node.flags() & NodeHasVarArgs) {
for (unsigned childIdx = node.firstChild(); childIdx < node.firstChild() + node.numChildren(); childIdx++)
use(m_jit.graph().m_varArgChildren[childIdx]);
} else {
- NodeUse child1 = node.child1();
+ Edge child1 = node.child1();
if (!child1) {
ASSERT(!node.child2() && !node.child3());
return;
}
use(child1);
- NodeUse child2 = node.child2();
+ Edge child2 = node.child2();
if (!child2) {
ASSERT(!node.child3());
return;
}
use(child2);
- NodeUse child3 = node.child3();
+ Edge child3 = node.child3();
if (!child3)
return;
use(child3);
@@ -184,31 +185,6 @@ bool SpeculativeJIT::isKnownNotNumber(NodeIndex nodeIndex)
|| (node.hasConstant() && !isNumberConstant(nodeIndex));
}
-bool SpeculativeJIT::isKnownBoolean(NodeIndex nodeIndex)
-{
- Node& node = m_jit.graph()[nodeIndex];
- if (node.hasBooleanResult())
- return true;
-
- if (isBooleanConstant(nodeIndex))
- return true;
-
- VirtualRegister virtualRegister = node.virtualRegister();
- GenerationInfo& info = m_generationInfo[virtualRegister];
-
- return info.isJSBoolean();
-}
-
-bool SpeculativeJIT::isKnownNotBoolean(NodeIndex nodeIndex)
-{
- Node& node = m_jit.graph()[nodeIndex];
- VirtualRegister virtualRegister = node.virtualRegister();
- GenerationInfo& info = m_generationInfo[virtualRegister];
- if (node.hasConstant() && !valueOfJSConstant(nodeIndex).isBoolean())
- return true;
- return !(info.isJSBoolean() || info.isUnknownJS());
-}
-
void SpeculativeJIT::writeBarrier(MacroAssembler& jit, GPRReg owner, GPRReg scratch1, GPRReg scratch2, WriteBarrierUseKind useKind)
{
UNUSED_PARAM(jit);
@@ -249,7 +225,7 @@ void SpeculativeJIT::markCellCard(MacroAssembler& jit, GPRReg owner, GPRReg scra
#endif
}
-void SpeculativeJIT::writeBarrier(GPRReg ownerGPR, GPRReg valueGPR, NodeUse valueUse, WriteBarrierUseKind useKind, GPRReg scratch1, GPRReg scratch2)
+void SpeculativeJIT::writeBarrier(GPRReg ownerGPR, GPRReg valueGPR, Edge valueUse, WriteBarrierUseKind useKind, GPRReg scratch1, GPRReg scratch2)
{
UNUSED_PARAM(ownerGPR);
UNUSED_PARAM(valueGPR);
@@ -325,7 +301,7 @@ void SpeculativeJIT::writeBarrier(GPRReg ownerGPR, JSCell* value, WriteBarrierUs
#endif
}
-void SpeculativeJIT::writeBarrier(JSCell* owner, GPRReg valueGPR, NodeUse valueUse, WriteBarrierUseKind useKind, GPRReg scratch)
+void SpeculativeJIT::writeBarrier(JSCell* owner, GPRReg valueGPR, Edge valueUse, WriteBarrierUseKind useKind, GPRReg scratch)
{
UNUSED_PARAM(owner);
UNUSED_PARAM(valueGPR);
@@ -774,6 +750,9 @@ void ValueSource::dump(FILE* out) const
case SourceNotSet:
fprintf(out, "NotSet");
break;
+ case SourceIsDead:
+ fprintf(out, "IsDead");
+ break;
case ValueInRegisterFile:
fprintf(out, "InRegFile");
break;
@@ -882,25 +861,52 @@ bool SpeculativeJIT::compilePeepHoleBranch(Node& node, MacroAssembler::Relationa
// so can be no intervening nodes to also reference the compare.
ASSERT(node.adjustedRefCount() == 1);
- if (Node::shouldSpeculateInteger(at(node.child1()), at(node.child2()))) {
+ if (Node::shouldSpeculateInteger(at(node.child1()), at(node.child2())))
compilePeepHoleIntegerBranch(node, branchNodeIndex, condition);
- use(node.child1());
- use(node.child2());
- } else if (Node::shouldSpeculateNumber(at(node.child1()), at(node.child2()))) {
+ else if (Node::shouldSpeculateNumber(at(node.child1()), at(node.child2())))
compilePeepHoleDoubleBranch(node, branchNodeIndex, doubleCondition);
- use(node.child1());
- use(node.child2());
- } else if (node.op == CompareEq && Node::shouldSpeculateFinalObject(at(node.child1()), at(node.child2()))) {
- compilePeepHoleObjectEquality(node, branchNodeIndex, &JSFinalObject::s_info, isFinalObjectPrediction);
- use(node.child1());
- use(node.child2());
- } else if (node.op == CompareEq && Node::shouldSpeculateArray(at(node.child1()), at(node.child2()))) {
- compilePeepHoleObjectEquality(node, branchNodeIndex, &JSArray::s_info, isArrayPrediction);
- use(node.child1());
- use(node.child2());
- } else
+ else if (node.op() == CompareEq) {
+ if (Node::shouldSpeculateFinalObject(
+ at(node.child1()), at(node.child2()))) {
+ compilePeepHoleObjectEquality(
+ node, branchNodeIndex, &JSFinalObject::s_info,
+ isFinalObjectPrediction);
+ } else if (Node::shouldSpeculateArray(
+ at(node.child1()), at(node.child2()))) {
+ compilePeepHoleObjectEquality(
+ node, branchNodeIndex, &JSArray::s_info,
+ isArrayPrediction);
+ } else if (at(node.child1()).shouldSpeculateFinalObject()
+ && at(node.child2()).shouldSpeculateFinalObjectOrOther()) {
+ compilePeepHoleObjectToObjectOrOtherEquality(
+ node.child1(), node.child2(), branchNodeIndex,
+ &JSFinalObject::s_info, isFinalObjectPrediction);
+ } else if (at(node.child1()).shouldSpeculateFinalObjectOrOther()
+ && at(node.child2()).shouldSpeculateFinalObject()) {
+ compilePeepHoleObjectToObjectOrOtherEquality(
+ node.child2(), node.child1(), branchNodeIndex,
+ &JSFinalObject::s_info, isFinalObjectPrediction);
+ } else if (at(node.child1()).shouldSpeculateArray()
+ && at(node.child2()).shouldSpeculateArrayOrOther()) {
+ compilePeepHoleObjectToObjectOrOtherEquality(
+ node.child1(), node.child2(), branchNodeIndex,
+ &JSArray::s_info, isArrayPrediction);
+ } else if (at(node.child1()).shouldSpeculateArrayOrOther()
+ && at(node.child2()).shouldSpeculateArray()) {
+ compilePeepHoleObjectToObjectOrOtherEquality(
+ node.child2(), node.child1(), branchNodeIndex,
+ &JSArray::s_info, isArrayPrediction);
+ } else {
+ nonSpeculativePeepholeBranch(node, branchNodeIndex, condition, operation);
+ return true;
+ }
+ } else {
nonSpeculativePeepholeBranch(node, branchNodeIndex, condition, operation);
+ return true;
+ }
+ use(node.child1());
+ use(node.child2());
m_indexInBlock = branchIndexInBlock;
m_compileIndex = branchNodeIndex;
return true;
@@ -910,7 +916,7 @@ bool SpeculativeJIT::compilePeepHoleBranch(Node& node, MacroAssembler::Relationa
void SpeculativeJIT::compileMovHint(Node& node)
{
- ASSERT(node.op == SetLocal);
+ ASSERT(node.op() == SetLocal);
setNodeIndexForOperand(node.child1().index(), node.local());
m_lastSetOperand = node.local();
@@ -927,6 +933,8 @@ void SpeculativeJIT::compile(BasicBlock& block)
#if DFG_ENABLE(JIT_BREAK_ON_EVERY_BLOCK)
m_jit.breakpoint();
#endif
+
+ m_jit.jitAssertHasValidCallFrame();
ASSERT(m_arguments.size() == block.variablesAtHead.numberOfArguments());
for (size_t i = 0; i < m_arguments.size(); ++i) {
@@ -943,7 +951,9 @@ void SpeculativeJIT::compile(BasicBlock& block)
ASSERT(m_variables.size() == block.variablesAtHead.numberOfLocals());
for (size_t i = 0; i < m_variables.size(); ++i) {
NodeIndex nodeIndex = block.variablesAtHead.local(i);
- if (nodeIndex == NoNode || m_jit.graph().localIsCaptured(i))
+ if ((nodeIndex == NoNode || !at(nodeIndex).refCount()) && !m_jit.graph().localIsCaptured(i))
+ m_variables[i] = ValueSource(SourceIsDead);
+ else if (m_jit.graph().localIsCaptured(i))
m_variables[i] = ValueSource(ValueInRegisterFile);
else if (at(nodeIndex).variableAccessData()->shouldUseDoubleFormat())
m_variables[i] = ValueSource(DoubleInRegisterFile);
@@ -969,7 +979,7 @@ void SpeculativeJIT::compile(BasicBlock& block)
#if DFG_ENABLE(DEBUG_VERBOSE)
dataLog("SpeculativeJIT skipping Node @%d (bc#%u) at JIT offset 0x%x ", (int)m_compileIndex, node.codeOrigin.bytecodeIndex, m_jit.debugOffset());
#endif
- switch (node.op) {
+ switch (node.op()) {
case SetLocal:
compileMovHint(node);
break;
@@ -979,10 +989,16 @@ void SpeculativeJIT::compile(BasicBlock& block)
int argumentCountIncludingThis = inlineCallFrame->arguments.size();
for (int i = 0; i < argumentCountIncludingThis; ++i) {
ValueRecovery recovery = computeValueRecoveryFor(m_variables[inlineCallFrame->stackOffset + CallFrame::argumentOffsetIncludingThis(i)]);
- // The recovery cannot point to registers, since the call frame reification isn't
- // as smart as OSR, so it can't handle that. The exception is the this argument,
- // which we don't really need to be able to recover.
- ASSERT(!i || !recovery.isInRegisters());
+ // The recovery should refer either to something that has already been
+ // stored into the register file at the right place, or to a constant,
+ // since the Arguments code isn't smart enough to handle anything else.
+ // The exception is the this argument, which we don't really need to be
+ // able to recover.
+#if DFG_ENABLE(DEBUG_VERBOSE)
+ dataLog("\nRecovery for argument %d: ", i);
+ recovery.dump(WTF::dataFile());
+#endif
+ ASSERT(!i || (recovery.isAlreadyInRegisterFile() || recovery.isConstant()));
inlineCallFrame->arguments[i] = recovery;
}
break;
@@ -1076,7 +1092,7 @@ void SpeculativeJIT::checkArgumentTypes()
for (int i = 0; i < m_jit.codeBlock()->numParameters(); ++i) {
NodeIndex nodeIndex = m_jit.graph().m_arguments[i];
Node& node = at(nodeIndex);
- ASSERT(node.op == SetArgument);
+ ASSERT(node.op() == SetArgument);
if (!node.shouldGenerate()) {
// The argument is dead. We don't do any checks for such arguments.
continue;
@@ -1096,11 +1112,6 @@ void SpeculativeJIT::checkArgumentTypes()
m_jit.loadPtr(JITCompiler::addressFor(virtualRegister), temp.gpr());
speculationCheck(BadType, valueSource, nodeIndex, m_jit.branchTestPtr(MacroAssembler::NonZero, temp.gpr(), GPRInfo::tagMaskRegister));
speculationCheck(BadType, valueSource, nodeIndex, m_jit.branchPtr(MacroAssembler::NotEqual, MacroAssembler::Address(temp.gpr(), JSCell::classInfoOffset()), MacroAssembler::TrustedImmPtr(&JSArray::s_info)));
- } else if (isByteArrayPrediction(predictedType)) {
- GPRTemporary temp(this);
- m_jit.loadPtr(JITCompiler::addressFor(virtualRegister), temp.gpr());
- speculationCheck(BadType, valueSource, nodeIndex, m_jit.branchTestPtr(MacroAssembler::NonZero, temp.gpr(), GPRInfo::tagMaskRegister));
- speculationCheck(BadType, valueSource, nodeIndex, m_jit.branchPtr(MacroAssembler::NotEqual, MacroAssembler::Address(temp.gpr(), JSCell::classInfoOffset()), MacroAssembler::TrustedImmPtr(&JSByteArray::s_info)));
} else if (isBooleanPrediction(predictedType)) {
GPRTemporary temp(this);
m_jit.loadPtr(JITCompiler::addressFor(virtualRegister), temp.gpr());
@@ -1161,12 +1172,6 @@ void SpeculativeJIT::checkArgumentTypes()
speculationCheck(BadType, valueSource, nodeIndex, m_jit.branch32(MacroAssembler::NotEqual, temp.gpr(), TrustedImm32(JSValue::CellTag)));
m_jit.load32(JITCompiler::payloadFor(virtualRegister), temp.gpr());
speculationCheck(BadType, valueSource, nodeIndex, m_jit.branchPtr(MacroAssembler::NotEqual, MacroAssembler::Address(temp.gpr(), JSCell::classInfoOffset()), MacroAssembler::TrustedImmPtr(&JSArray::s_info)));
- } else if (isByteArrayPrediction(predictedType)) {
- GPRTemporary temp(this);
- m_jit.load32(JITCompiler::tagFor(virtualRegister), temp.gpr());
- speculationCheck(BadType, valueSource, nodeIndex, m_jit.branch32(MacroAssembler::NotEqual, temp.gpr(), TrustedImm32(JSValue::CellTag)));
- m_jit.load32(JITCompiler::payloadFor(virtualRegister), temp.gpr());
- speculationCheck(BadType, valueSource, nodeIndex, m_jit.branchPtr(MacroAssembler::NotEqual, MacroAssembler::Address(temp.gpr(), JSCell::classInfoOffset()), MacroAssembler::TrustedImmPtr(&JSByteArray::s_info)));
} else if (isBooleanPrediction(predictedType))
speculationCheck(BadType, valueSource, nodeIndex, m_jit.branch32(MacroAssembler::NotEqual, JITCompiler::tagFor(virtualRegister), TrustedImm32(JSValue::BooleanTag)));
else if (isInt8ArrayPrediction(predictedType)) {
@@ -1278,6 +1283,9 @@ void SpeculativeJIT::linkOSREntries(LinkBuffer& linkBuffer)
ValueRecovery SpeculativeJIT::computeValueRecoveryFor(const ValueSource& valueSource)
{
switch (valueSource.kind()) {
+ case SourceIsDead:
+ return ValueRecovery::constant(jsUndefined());
+
case ValueInRegisterFile:
return ValueRecovery::alreadyInRegisterFile();
@@ -1309,6 +1317,10 @@ ValueRecovery SpeculativeJIT::computeValueRecoveryFor(const ValueSource& valueSo
// Try to see if there is an alternate node that would contain the value we want.
// There are four possibilities:
//
+ // Int32ToDouble: We can use this in place of the original node, but
+ // we'd rather not; so we use it only if it is the only remaining
+ // live version.
+ //
// ValueToInt32: If the only remaining live version of the value is
// ValueToInt32, then we can use it.
//
@@ -1319,10 +1331,13 @@ ValueRecovery SpeculativeJIT::computeValueRecoveryFor(const ValueSource& valueSo
// The reverse of the above: This node could be a UInt32ToNumber, but its
// alternative is still alive. This means that the only remaining uses of
// the number would be fine with a UInt32 intermediate.
+ //
+ // DoubleAsInt32: Same as UInt32ToNumber.
+ //
bool found = false;
- if (nodePtr->op == UInt32ToNumber) {
+ if (nodePtr->op() == UInt32ToNumber || nodePtr->op() == DoubleAsInt32) {
NodeIndex nodeIndex = nodePtr->child1().index();
nodePtr = &at(nodeIndex);
infoPtr = &m_generationInfo[nodePtr->virtualRegister()];
@@ -1331,8 +1346,10 @@ ValueRecovery SpeculativeJIT::computeValueRecoveryFor(const ValueSource& valueSo
}
if (!found) {
+ NodeIndex int32ToDoubleIndex = NoNode;
NodeIndex valueToInt32Index = NoNode;
NodeIndex uint32ToNumberIndex = NoNode;
+ NodeIndex doubleAsInt32Index = NoNode;
for (unsigned virtualRegister = 0; virtualRegister < m_generationInfo.size(); ++virtualRegister) {
GenerationInfo& info = m_generationInfo[virtualRegister];
@@ -1343,20 +1360,29 @@ ValueRecovery SpeculativeJIT::computeValueRecoveryFor(const ValueSource& valueSo
Node& node = at(info.nodeIndex());
if (node.child1Unchecked() != valueSource.nodeIndex())
continue;
- switch (node.op) {
+ switch (node.op()) {
+ case Int32ToDouble:
+ int32ToDoubleIndex = info.nodeIndex();
+ break;
case ValueToInt32:
valueToInt32Index = info.nodeIndex();
break;
case UInt32ToNumber:
uint32ToNumberIndex = info.nodeIndex();
break;
+ case DoubleAsInt32:
+ doubleAsInt32Index = info.nodeIndex();
default:
break;
}
}
NodeIndex nodeIndexToUse;
- if (valueToInt32Index != NoNode)
+ if (doubleAsInt32Index != NoNode)
+ nodeIndexToUse = doubleAsInt32Index;
+ else if (int32ToDoubleIndex != NoNode)
+ nodeIndexToUse = int32ToDoubleIndex;
+ else if (valueToInt32Index != NoNode)
nodeIndexToUse = valueToInt32Index;
else if (uint32ToNumberIndex != NoNode)
nodeIndexToUse = uint32ToNumberIndex;
@@ -1488,33 +1514,179 @@ void SpeculativeJIT::compileGetByValOnString(Node& node)
cellResult(scratchReg, m_compileIndex);
}
+GeneratedOperandType SpeculativeJIT::checkGeneratedTypeForToInt32(NodeIndex nodeIndex)
+{
+#if DFG_ENABLE(DEBUG_VERBOSE)
+ dataLog("checkGeneratedTypeForToInt32@%d ", nodeIndex);
+#endif
+ Node& node = at(nodeIndex);
+ VirtualRegister virtualRegister = node.virtualRegister();
+ GenerationInfo& info = m_generationInfo[virtualRegister];
+
+ if (info.registerFormat() == DataFormatNone) {
+ if (node.hasConstant()) {
+ if (isInt32Constant(nodeIndex))
+ return GeneratedOperandInteger;
+
+ if (isNumberConstant(nodeIndex))
+ return GeneratedOperandDouble;
+
+ terminateSpeculativeExecution(Uncountable, JSValueRegs(), NoNode);
+ return GeneratedOperandTypeUnknown;
+ }
+
+ if (info.spillFormat() == DataFormatDouble)
+ return GeneratedOperandDouble;
+ }
+
+ switch (info.registerFormat()) {
+ case DataFormatBoolean: // This type never occurs.
+ case DataFormatStorage:
+ ASSERT_NOT_REACHED();
+
+ case DataFormatCell:
+ terminateSpeculativeExecution(Uncountable, JSValueRegs(), NoNode);
+ return GeneratedOperandTypeUnknown;
+
+ case DataFormatNone:
+ case DataFormatJSCell:
+ case DataFormatJS:
+ case DataFormatJSBoolean:
+ return GeneratedOperandJSValue;
+
+ case DataFormatJSInteger:
+ case DataFormatInteger:
+ return GeneratedOperandInteger;
+
+ case DataFormatJSDouble:
+ case DataFormatDouble:
+ return GeneratedOperandDouble;
+ }
+
+ ASSERT_NOT_REACHED();
+ return GeneratedOperandTypeUnknown;
+}
+
void SpeculativeJIT::compileValueToInt32(Node& node)
{
- if (at(node.child1()).shouldNotSpeculateInteger()) {
- if (at(node.child1()).shouldSpeculateDouble()) {
- SpeculateDoubleOperand op1(this, node.child1());
+ if (at(node.child1()).shouldSpeculateInteger()) {
+ SpeculateIntegerOperand op1(this, node.child1());
+ GPRTemporary result(this, op1);
+ m_jit.move(op1.gpr(), result.gpr());
+ integerResult(result.gpr(), m_compileIndex, op1.format());
+ return;
+ }
+
+ if (at(node.child1()).shouldSpeculateNumber()) {
+ switch (checkGeneratedTypeForToInt32(node.child1().index())) {
+ case GeneratedOperandInteger: {
+ SpeculateIntegerOperand op1(this, node.child1());
+ GPRTemporary result(this, op1);
+ m_jit.move(op1.gpr(), result.gpr());
+ integerResult(result.gpr(), m_compileIndex, op1.format());
+ return;
+ }
+ case GeneratedOperandDouble: {
GPRTemporary result(this);
+ DoubleOperand op1(this, node.child1());
FPRReg fpr = op1.fpr();
GPRReg gpr = result.gpr();
JITCompiler::Jump truncatedToInteger = m_jit.branchTruncateDoubleToInt32(fpr, gpr, JITCompiler::BranchIfTruncateSuccessful);
-
+
silentSpillAllRegisters(gpr);
callOperation(toInt32, gpr, fpr);
silentFillAllRegisters(gpr);
-
+
truncatedToInteger.link(&m_jit);
integerResult(gpr, m_compileIndex);
return;
}
- // Do it the safe way.
- nonSpeculativeValueToInt32(node);
+ case GeneratedOperandJSValue: {
+ GPRTemporary result(this);
+#if USE(JSVALUE64)
+ JSValueOperand op1(this, node.child1());
+
+ GPRReg gpr = op1.gpr();
+ GPRReg resultGpr = result.gpr();
+ FPRTemporary tempFpr(this);
+ FPRReg fpr = tempFpr.fpr();
+
+ JITCompiler::Jump isInteger = m_jit.branchPtr(MacroAssembler::AboveOrEqual, gpr, GPRInfo::tagTypeNumberRegister);
+
+ speculationCheck(BadType, JSValueRegs(gpr), node.child1().index(), m_jit.branchTestPtr(MacroAssembler::Zero, gpr, GPRInfo::tagTypeNumberRegister));
+
+ // First, if we get here we have a double encoded as a JSValue
+ m_jit.move(gpr, resultGpr);
+ unboxDouble(resultGpr, fpr);
+
+ silentSpillAllRegisters(resultGpr);
+ callOperation(toInt32, resultGpr, fpr);
+ silentFillAllRegisters(resultGpr);
+
+ JITCompiler::Jump converted = m_jit.jump();
+
+ isInteger.link(&m_jit);
+ m_jit.zeroExtend32ToPtr(gpr, resultGpr);
+
+ converted.link(&m_jit);
+#else
+ Node& childNode = at(node.child1().index());
+ VirtualRegister virtualRegister = childNode.virtualRegister();
+ GenerationInfo& info = m_generationInfo[virtualRegister];
+
+ JSValueOperand op1(this, node.child1());
+
+ GPRReg payloadGPR = op1.payloadGPR();
+ GPRReg resultGpr = result.gpr();
+
+ if (info.registerFormat() == DataFormatJSInteger)
+ m_jit.move(payloadGPR, resultGpr);
+ else {
+ GPRReg tagGPR = op1.tagGPR();
+ FPRTemporary tempFpr(this);
+ FPRReg fpr = tempFpr.fpr();
+ FPRTemporary scratch(this);
+
+ JITCompiler::Jump isInteger = m_jit.branch32(MacroAssembler::Equal, tagGPR, TrustedImm32(JSValue::Int32Tag));
+
+ speculationCheck(BadType, JSValueRegs(tagGPR, payloadGPR), node.child1().index(), m_jit.branch32(MacroAssembler::AboveOrEqual, tagGPR, TrustedImm32(JSValue::LowestTag)));
+
+ unboxDouble(tagGPR, payloadGPR, fpr, scratch.fpr());
+
+ silentSpillAllRegisters(resultGpr);
+ callOperation(toInt32, resultGpr, fpr);
+ silentFillAllRegisters(resultGpr);
+
+ JITCompiler::Jump converted = m_jit.jump();
+
+ isInteger.link(&m_jit);
+ m_jit.move(payloadGPR, resultGpr);
+
+ converted.link(&m_jit);
+ }
+#endif
+ integerResult(resultGpr, m_compileIndex);
+ return;
+ }
+ case GeneratedOperandTypeUnknown:
+ ASSERT_NOT_REACHED();
+ break;
+ }
+ }
+
+ if (at(node.child1()).shouldSpeculateBoolean()) {
+ SpeculateBooleanOperand op1(this, node.child1());
+ GPRTemporary result(this, op1);
+
+ m_jit.and32(JITCompiler::TrustedImm32(1), op1.gpr());
+
+ integerResult(op1.gpr(), m_compileIndex);
return;
}
- SpeculateIntegerOperand op1(this, node.child1());
- GPRTemporary result(this, op1);
- m_jit.move(op1.gpr(), result.gpr());
- integerResult(result.gpr(), m_compileIndex, op1.format());
+ // Do it the safe way.
+ nonSpeculativeValueToInt32(node);
+ return;
}
void SpeculativeJIT::compileUInt32ToNumber(Node& node)
@@ -1547,26 +1719,105 @@ void SpeculativeJIT::compileUInt32ToNumber(Node& node)
// instruction that follows us, rather than the one we're executing right now. We have
// to do this because by this point, the original values necessary to compile whatever
// operation the UInt32ToNumber originated from might be dead.
- speculationCheck(Overflow, JSValueRegs(), NoNode, m_jit.branch32(MacroAssembler::LessThan, op1.gpr(), TrustedImm32(0)));
-
- // Verify that we can do roll forward.
- ASSERT(at(m_compileIndex + 1).op == SetLocal);
- ASSERT(at(m_compileIndex + 1).codeOrigin == node.codeOrigin);
- ASSERT(at(m_compileIndex + 2).codeOrigin != node.codeOrigin);
-
- // Now do the magic.
- OSRExit& exit = m_jit.codeBlock()->lastOSRExit();
- Node& setLocal = at(m_compileIndex + 1);
- exit.m_codeOrigin = at(m_compileIndex + 2).codeOrigin;
- exit.m_lastSetOperand = setLocal.local();
-
- // Create the value recovery, and stuff it into the right place.
- exit.valueRecoveryForOperand(setLocal.local()) = ValueRecovery::uint32InGPR(op1.gpr());
+ forwardSpeculationCheck(Overflow, JSValueRegs(), NoNode, m_jit.branch32(MacroAssembler::LessThan, op1.gpr(), TrustedImm32(0)), ValueRecovery::uint32InGPR(op1.gpr()));
m_jit.move(op1.gpr(), result.gpr());
integerResult(result.gpr(), m_compileIndex, op1.format());
}
+void SpeculativeJIT::compileDoubleAsInt32(Node& node)
+{
+ SpeculateDoubleOperand op1(this, node.child1());
+ FPRTemporary scratch(this);
+ GPRTemporary result(this);
+
+ FPRReg valueFPR = op1.fpr();
+ FPRReg scratchFPR = scratch.fpr();
+ GPRReg resultGPR = result.gpr();
+
+ JITCompiler::JumpList failureCases;
+ m_jit.branchConvertDoubleToInt32(valueFPR, resultGPR, failureCases, scratchFPR);
+ forwardSpeculationCheck(Overflow, JSValueRegs(), NoNode, failureCases, ValueRecovery::inFPR(valueFPR));
+
+ integerResult(resultGPR, m_compileIndex);
+}
+
+void SpeculativeJIT::compileInt32ToDouble(Node& node)
+{
+#if USE(JSVALUE64)
+ // On JSVALUE64 we have a way of loading double constants in a more direct manner
+ // than a int->double conversion. On 32_64, unfortunately, we currently don't have
+ // any such mechanism - though we could have it, if we just provisioned some memory
+ // in CodeBlock for the double form of integer constants.
+ if (at(node.child1()).hasConstant()) {
+ ASSERT(isInt32Constant(node.child1().index()));
+ FPRTemporary result(this);
+ GPRTemporary temp(this);
+ m_jit.move(MacroAssembler::ImmPtr(reinterpret_cast<void*>(reinterpretDoubleToIntptr(valueOfNumberConstant(node.child1().index())))), temp.gpr());
+ m_jit.movePtrToDouble(temp.gpr(), result.fpr());
+ doubleResult(result.fpr(), m_compileIndex);
+ return;
+ }
+#endif
+
+ if (isInt32Prediction(m_state.forNode(node.child1()).m_type)) {
+ SpeculateIntegerOperand op1(this, node.child1());
+ FPRTemporary result(this);
+ m_jit.convertInt32ToDouble(op1.gpr(), result.fpr());
+ doubleResult(result.fpr(), m_compileIndex);
+ return;
+ }
+
+ JSValueOperand op1(this, node.child1());
+ FPRTemporary result(this);
+
+#if USE(JSVALUE64)
+ GPRTemporary temp(this);
+
+ GPRReg op1GPR = op1.gpr();
+ GPRReg tempGPR = temp.gpr();
+ FPRReg resultFPR = result.fpr();
+
+ JITCompiler::Jump isInteger = m_jit.branchPtr(
+ MacroAssembler::AboveOrEqual, op1GPR, GPRInfo::tagTypeNumberRegister);
+
+ speculationCheck(
+ BadType, JSValueRegs(op1GPR), node.child1(),
+ m_jit.branchTestPtr(MacroAssembler::Zero, op1GPR, GPRInfo::tagTypeNumberRegister));
+
+ m_jit.move(op1GPR, tempGPR);
+ unboxDouble(tempGPR, resultFPR);
+ JITCompiler::Jump done = m_jit.jump();
+
+ isInteger.link(&m_jit);
+ m_jit.convertInt32ToDouble(op1GPR, resultFPR);
+ done.link(&m_jit);
+#else
+ FPRTemporary temp(this);
+
+ GPRReg op1TagGPR = op1.tagGPR();
+ GPRReg op1PayloadGPR = op1.payloadGPR();
+ FPRReg tempFPR = temp.fpr();
+ FPRReg resultFPR = result.fpr();
+
+ JITCompiler::Jump isInteger = m_jit.branch32(
+ MacroAssembler::Equal, op1TagGPR, TrustedImm32(JSValue::Int32Tag));
+
+ speculationCheck(
+ BadType, JSValueRegs(op1TagGPR, op1PayloadGPR), node.child1(),
+ m_jit.branch32(MacroAssembler::AboveOrEqual, op1TagGPR, TrustedImm32(JSValue::LowestTag)));
+
+ unboxDouble(op1TagGPR, op1PayloadGPR, resultFPR, tempFPR);
+ JITCompiler::Jump done = m_jit.jump();
+
+ isInteger.link(&m_jit);
+ m_jit.convertInt32ToDouble(op1PayloadGPR, resultFPR);
+ done.link(&m_jit);
+#endif
+
+ doubleResult(resultFPR, m_compileIndex);
+}
+
static double clampDoubleToByte(double d)
{
d += 0.5;
@@ -1619,85 +1870,6 @@ static void compileClampDoubleToByte(JITCompiler& jit, GPRReg result, FPRReg sou
}
-void SpeculativeJIT::compilePutByValForByteArray(GPRReg base, GPRReg property, Node& node)
-{
- NodeUse baseUse = node.child1();
- NodeUse valueUse = node.child3();
-
- if (!isByteArrayPrediction(m_state.forNode(baseUse).m_type))
- speculationCheck(BadType, JSValueSource::unboxedCell(base), baseUse.index(), m_jit.branchPtr(MacroAssembler::NotEqual, MacroAssembler::Address(base, JSCell::classInfoOffset()), MacroAssembler::TrustedImmPtr(&JSByteArray::s_info)));
- GPRTemporary value;
- GPRReg valueGPR;
-
- if (at(valueUse).isConstant()) {
- JSValue jsValue = valueOfJSConstant(valueUse.index());
- if (!jsValue.isNumber()) {
- terminateSpeculativeExecution(Uncountable, JSValueRegs(), NoNode);
- noResult(m_compileIndex);
- return;
- }
- int clampedValue = clampDoubleToByte(jsValue.asNumber());
- GPRTemporary scratch(this);
- GPRReg scratchReg = scratch.gpr();
- m_jit.move(Imm32(clampedValue), scratchReg);
- value.adopt(scratch);
- valueGPR = scratchReg;
- } else if (!at(valueUse).shouldNotSpeculateInteger()) {
- SpeculateIntegerOperand valueOp(this, valueUse);
- GPRTemporary scratch(this);
- GPRReg scratchReg = scratch.gpr();
- m_jit.move(valueOp.gpr(), scratchReg);
- compileClampIntegerToByte(m_jit, scratchReg);
- value.adopt(scratch);
- valueGPR = scratchReg;
- } else {
- SpeculateDoubleOperand valueOp(this, valueUse);
- GPRTemporary result(this);
- FPRTemporary floatScratch(this);
- FPRReg fpr = valueOp.fpr();
- GPRReg gpr = result.gpr();
- compileClampDoubleToByte(m_jit, gpr, fpr, floatScratch.fpr());
- value.adopt(result);
- valueGPR = gpr;
- }
- ASSERT_UNUSED(valueGPR, valueGPR != property);
- ASSERT(valueGPR != base);
- GPRTemporary storage(this);
- GPRReg storageReg = storage.gpr();
- ASSERT(valueGPR != storageReg);
- m_jit.loadPtr(MacroAssembler::Address(base, JSByteArray::offsetOfStorage()), storageReg);
- MacroAssembler::Jump outOfBounds = m_jit.branch32(MacroAssembler::AboveOrEqual, property, MacroAssembler::Address(storageReg, ByteArray::offsetOfSize()));
- m_jit.store8(value.gpr(), MacroAssembler::BaseIndex(storageReg, property, MacroAssembler::TimesOne, ByteArray::offsetOfData()));
- outOfBounds.link(&m_jit);
- noResult(m_compileIndex);
-}
-
-void SpeculativeJIT::compileGetByValOnByteArray(Node& node)
-{
- SpeculateCellOperand base(this, node.child1());
- SpeculateStrictInt32Operand property(this, node.child2());
-
- GPRReg baseReg = base.gpr();
- GPRReg propertyReg = property.gpr();
-
- if (!isByteArrayPrediction(m_state.forNode(node.child1()).m_type)) {
- terminateSpeculativeExecution(Uncountable, JSValueRegs(), NoNode);
- noResult(m_compileIndex);
- return;
- }
-
- // Load the character into scratchReg
- GPRTemporary storage(this);
- GPRReg storageReg = storage.gpr();
- m_jit.loadPtr(MacroAssembler::Address(baseReg, JSByteArray::offsetOfStorage()), storageReg);
-
- // unsigned comparison so we can filter out negative indices and indices that are too large
- speculationCheck(Uncountable, JSValueRegs(), NoNode, m_jit.branch32(MacroAssembler::AboveOrEqual, propertyReg, MacroAssembler::Address(storageReg, ByteArray::offsetOfSize())));
-
- m_jit.load8(MacroAssembler::BaseIndex(storageReg, propertyReg, MacroAssembler::TimesOne, ByteArray::offsetOfData()), storageReg);
- integerResult(storageReg, m_compileIndex);
-}
-
void SpeculativeJIT::compileGetTypedArrayLength(const TypedArrayDescriptor& descriptor, Node& node, bool needsSpeculationCheck)
{
SpeculateCellOperand base(this, node.child1());
@@ -1758,22 +1930,30 @@ void SpeculativeJIT::compileGetByValOnIntTypedArray(const TypedArrayDescriptor&
ASSERT_NOT_REACHED();
}
outOfBounds.link(&m_jit);
- if (elementSize < 4 || signedness == SignedTypedArray)
+ if (elementSize < 4 || signedness == SignedTypedArray) {
integerResult(resultReg, m_compileIndex);
- else {
- FPRTemporary fresult(this);
- m_jit.convertInt32ToDouble(resultReg, fresult.fpr());
- JITCompiler::Jump positive = m_jit.branch32(MacroAssembler::GreaterThanOrEqual, resultReg, TrustedImm32(0));
- m_jit.addDouble(JITCompiler::AbsoluteAddress(&AssemblyHelpers::twoToThe32), fresult.fpr());
- positive.link(&m_jit);
- doubleResult(fresult.fpr(), m_compileIndex);
+ return;
}
+
+ ASSERT(elementSize == 4 && signedness == UnsignedTypedArray);
+ if (node.shouldSpeculateInteger()) {
+ forwardSpeculationCheck(Overflow, JSValueRegs(), NoNode, m_jit.branch32(MacroAssembler::LessThan, resultReg, TrustedImm32(0)), ValueRecovery::uint32InGPR(resultReg));
+ integerResult(resultReg, m_compileIndex);
+ return;
+ }
+
+ FPRTemporary fresult(this);
+ m_jit.convertInt32ToDouble(resultReg, fresult.fpr());
+ JITCompiler::Jump positive = m_jit.branch32(MacroAssembler::GreaterThanOrEqual, resultReg, TrustedImm32(0));
+ m_jit.addDouble(JITCompiler::AbsoluteAddress(&AssemblyHelpers::twoToThe32), fresult.fpr());
+ positive.link(&m_jit);
+ doubleResult(fresult.fpr(), m_compileIndex);
}
void SpeculativeJIT::compilePutByValForIntTypedArray(const TypedArrayDescriptor& descriptor, GPRReg base, GPRReg property, Node& node, size_t elementSize, TypedArraySpeculationRequirements speculationRequirements, TypedArraySignedness signedness, TypedArrayRounding rounding)
{
- NodeUse baseUse = node.child1();
- NodeUse valueUse = node.child3();
+ Edge baseUse = node.child1();
+ Edge valueUse = node.child3();
if (speculationRequirements != NoTypedArrayTypeSpecCheck)
speculationCheck(BadType, JSValueSource::unboxedCell(base), baseUse, m_jit.branchPtr(MacroAssembler::NotEqual, MacroAssembler::Address(base, JSCell::classInfoOffset()), MacroAssembler::TrustedImmPtr(descriptor.m_classInfo)));
@@ -1797,7 +1977,7 @@ void SpeculativeJIT::compilePutByValForIntTypedArray(const TypedArrayDescriptor&
m_jit.move(Imm32(static_cast<int>(d)), scratchReg);
value.adopt(scratch);
valueGPR = scratchReg;
- } else if (!at(valueUse).shouldNotSpeculateInteger()) {
+ } else if (at(valueUse).shouldSpeculateInteger()) {
SpeculateIntegerOperand valueOp(this, valueUse);
GPRTemporary scratch(this);
GPRReg scratchReg = scratch.gpr();
@@ -1918,8 +2098,8 @@ void SpeculativeJIT::compileGetByValOnFloatTypedArray(const TypedArrayDescriptor
void SpeculativeJIT::compilePutByValForFloatTypedArray(const TypedArrayDescriptor& descriptor, GPRReg base, GPRReg property, Node& node, size_t elementSize, TypedArraySpeculationRequirements speculationRequirements)
{
- NodeUse baseUse = node.child1();
- NodeUse valueUse = node.child3();
+ Edge baseUse = node.child1();
+ Edge valueUse = node.child3();
SpeculateDoubleOperand valueOp(this, valueUse);
@@ -1999,8 +2179,12 @@ void SpeculativeJIT::compileInstanceOfForObject(Node&, GPRReg valueReg, GPRReg p
void SpeculativeJIT::compileInstanceOf(Node& node)
{
- if (!!(at(node.child1()).prediction() & ~PredictCell) && !!(m_state.forNode(node.child1()).m_type & ~PredictCell)) {
+ if ((!!(at(node.child1()).prediction() & ~PredictCell)
+ && !!(m_state.forNode(node.child1()).m_type & ~PredictCell))
+ || at(node.child1()).adjustedRefCount() == 1) {
// It might not be a cell. Speculate less aggressively.
+ // Or: it might only be used once (i.e. by us), so we get zero benefit
+ // from speculating any more aggressively than we absolutely need to.
JSValueOperand value(this, node.child1());
SpeculateCellOperand prototype(this, node.child3());
@@ -2055,172 +2239,139 @@ void SpeculativeJIT::compileInstanceOf(Node& node)
#endif
}
-static bool isPowerOfTwo(int32_t num)
-{
- return num && !(num & (num - 1));
-}
-
void SpeculativeJIT::compileSoftModulo(Node& node)
{
- bool shouldGeneratePowerOfTwoCheck = true;
-
// In the fast path, the dividend value could be the final result
// (in case of |dividend| < |divisor|), so we speculate it as strict int32.
SpeculateStrictInt32Operand op1(this, node.child1());
- GPRReg op1Gpr = op1.gpr();
-
+#if CPU(X86) || CPU(X86_64)
if (isInt32Constant(node.child2().index())) {
int32_t divisor = valueOfInt32Constant(node.child2().index());
- if (divisor < 0)
- divisor = -divisor;
-
- if (isPowerOfTwo(divisor)) {
- GPRTemporary result(this);
- GPRReg resultGPR = result.gpr();
- m_jit.move(op1Gpr, resultGPR);
- JITCompiler::Jump positiveDividend = m_jit.branch32(JITCompiler::GreaterThanOrEqual, op1Gpr, TrustedImm32(0));
- m_jit.neg32(resultGPR);
- m_jit.and32(TrustedImm32(divisor - 1), resultGPR);
- m_jit.neg32(resultGPR);
- JITCompiler::Jump done = m_jit.jump();
-
- positiveDividend.link(&m_jit);
- m_jit.and32(TrustedImm32(divisor - 1), resultGPR);
-
- done.link(&m_jit);
- integerResult(resultGPR, m_compileIndex);
- return;
- }
-#if CPU(X86) || CPU(X86_64)
if (divisor) {
+ GPRReg op1Gpr = op1.gpr();
+
GPRTemporary eax(this, X86Registers::eax);
GPRTemporary edx(this, X86Registers::edx);
GPRTemporary scratch(this);
GPRReg scratchGPR = scratch.gpr();
+ GPRReg op1SaveGPR;
+ if (op1Gpr == X86Registers::eax || op1Gpr == X86Registers::edx) {
+ op1SaveGPR = allocate();
+ ASSERT(op1Gpr != op1SaveGPR);
+ m_jit.move(op1Gpr, op1SaveGPR);
+ } else
+ op1SaveGPR = op1Gpr;
+ ASSERT(op1SaveGPR != X86Registers::eax);
+ ASSERT(op1SaveGPR != X86Registers::edx);
+
m_jit.move(op1Gpr, eax.gpr());
m_jit.move(TrustedImm32(divisor), scratchGPR);
+ if (divisor == -1)
+ speculationCheck(Overflow, JSValueRegs(), NoNode, m_jit.branch32(JITCompiler::Equal, eax.gpr(), TrustedImm32(-2147483647-1)));
m_jit.assembler().cdq();
m_jit.assembler().idivl_r(scratchGPR);
+ // Check that we're not about to create negative zero.
+ // FIXME: if the node use doesn't care about neg zero, we can do this more easily.
+ JITCompiler::Jump numeratorPositive = m_jit.branch32(JITCompiler::GreaterThanOrEqual, op1SaveGPR, TrustedImm32(0));
+ speculationCheck(Overflow, JSValueRegs(), NoNode, m_jit.branchTest32(JITCompiler::Zero, edx.gpr()));
+ numeratorPositive.link(&m_jit);
+
+ if (op1SaveGPR != op1Gpr)
+ unlock(op1SaveGPR);
+
integerResult(edx.gpr(), m_compileIndex);
return;
}
-#endif
- // Fallback to non-constant case but avoid unnecessary checks.
- shouldGeneratePowerOfTwoCheck = false;
}
+#endif
SpeculateIntegerOperand op2(this, node.child2());
- GPRReg op2Gpr = op2.gpr();
-
- speculationCheck(Overflow, JSValueRegs(), NoNode, m_jit.branchTest32(JITCompiler::Zero, op2Gpr));
-
#if CPU(X86) || CPU(X86_64)
GPRTemporary eax(this, X86Registers::eax);
GPRTemporary edx(this, X86Registers::edx);
- GPRReg temp2 = InvalidGPRReg;
- if (op2Gpr == X86Registers::eax || op2Gpr == X86Registers::edx) {
- temp2 = allocate();
- m_jit.move(op2Gpr, temp2);
- op2Gpr = temp2;
- }
- GPRReg resultGPR = edx.gpr();
- GPRReg scratchGPR = eax.gpr();
-#else
- GPRTemporary result(this);
- GPRTemporary scratch(this);
- GPRTemporary scratch3(this);
- GPRReg scratchGPR3 = scratch3.gpr();
- GPRReg resultGPR = result.gpr();
- GPRReg scratchGPR = scratch.gpr();
-#endif
-
- GPRTemporary scratch2(this);
- GPRReg scratchGPR2 = scratch2.gpr();
- JITCompiler::JumpList exitBranch;
-
- // resultGPR is to hold the ABS value of the dividend before final result is produced
- m_jit.move(op1Gpr, resultGPR);
- // scratchGPR2 is to hold the ABS value of the divisor
- m_jit.move(op2Gpr, scratchGPR2);
-
- // Check for negative result remainder
- // According to ECMA-262, the sign of the result equals the sign of the dividend
- JITCompiler::Jump positiveDividend = m_jit.branch32(JITCompiler::GreaterThanOrEqual, op1Gpr, TrustedImm32(0));
- m_jit.neg32(resultGPR);
- m_jit.move(TrustedImm32(1), scratchGPR);
- JITCompiler::Jump saveCondition = m_jit.jump();
-
- positiveDividend.link(&m_jit);
- m_jit.move(TrustedImm32(0), scratchGPR);
-
- // Save the condition for negative remainder
- saveCondition.link(&m_jit);
- m_jit.push(scratchGPR);
-
- JITCompiler::Jump positiveDivisor = m_jit.branch32(JITCompiler::GreaterThanOrEqual, op2Gpr, TrustedImm32(0));
- m_jit.neg32(scratchGPR2);
-
- positiveDivisor.link(&m_jit);
- exitBranch.append(m_jit.branch32(JITCompiler::LessThan, resultGPR, scratchGPR2));
-
- // Power of two fast case
- if (shouldGeneratePowerOfTwoCheck) {
- m_jit.move(scratchGPR2, scratchGPR);
- m_jit.sub32(TrustedImm32(1), scratchGPR);
- JITCompiler::Jump notPowerOfTwo = m_jit.branchTest32(JITCompiler::NonZero, scratchGPR, scratchGPR2);
- m_jit.and32(scratchGPR, resultGPR);
- exitBranch.append(m_jit.jump());
-
- notPowerOfTwo.link(&m_jit);
+ GPRReg op1GPR = op1.gpr();
+ GPRReg op2GPR = op2.gpr();
+
+ GPRReg op2TempGPR;
+ GPRReg temp;
+ GPRReg op1SaveGPR;
+
+ if (op2GPR == X86Registers::eax || op2GPR == X86Registers::edx) {
+ op2TempGPR = allocate();
+ temp = op2TempGPR;
+ } else {
+ op2TempGPR = InvalidGPRReg;
+ if (op1GPR == X86Registers::eax)
+ temp = X86Registers::edx;
+ else
+ temp = X86Registers::eax;
}
-
-#if CPU(X86) || CPU(X86_64)
- m_jit.move(resultGPR, eax.gpr());
- m_jit.assembler().cdq();
- m_jit.assembler().idivl_r(scratchGPR2);
-#elif CPU(ARM_THUMB2)
- m_jit.countLeadingZeros32(scratchGPR2, scratchGPR);
- m_jit.countLeadingZeros32(resultGPR, scratchGPR3);
- m_jit.sub32(scratchGPR3, scratchGPR);
-
- JITCompiler::Jump useFullTable = m_jit.branch32(JITCompiler::Equal, scratchGPR, TrustedImm32(31));
-
- m_jit.neg32(scratchGPR);
- m_jit.add32(TrustedImm32(31), scratchGPR);
-
- int elementSizeByShift = -1;
- elementSizeByShift = 3;
- m_jit.relativeTableJump(scratchGPR, elementSizeByShift);
-
- useFullTable.link(&m_jit);
- // Modulo table
- for (int i = 31; i > 0; --i) {
- ShiftTypeAndAmount shift(SRType_LSL, i);
- m_jit.assembler().sub_S(scratchGPR, resultGPR, scratchGPR2, shift);
- m_jit.assembler().it(ARMv7Assembler::ConditionCS);
- m_jit.assembler().mov(resultGPR, scratchGPR);
+
+ if (op1GPR == X86Registers::eax || op1GPR == X86Registers::edx) {
+ op1SaveGPR = allocate();
+ ASSERT(op1GPR != op1SaveGPR);
+ m_jit.move(op1GPR, op1SaveGPR);
+ } else
+ op1SaveGPR = op1GPR;
+
+ ASSERT(temp != op1GPR);
+ ASSERT(temp != op2GPR);
+ ASSERT(op1SaveGPR != X86Registers::eax);
+ ASSERT(op1SaveGPR != X86Registers::edx);
+
+ m_jit.add32(JITCompiler::TrustedImm32(1), op2GPR, temp);
+
+ JITCompiler::Jump safeDenominator = m_jit.branch32(JITCompiler::Above, temp, JITCompiler::TrustedImm32(1));
+
+ JITCompiler::Jump done;
+ // FIXME: if the node is not used as number then we can do this more easily.
+ speculationCheck(Overflow, JSValueRegs(), NoNode, m_jit.branchTest32(JITCompiler::Zero, op2GPR));
+ speculationCheck(Overflow, JSValueRegs(), NoNode, m_jit.branch32(JITCompiler::Equal, op1GPR, TrustedImm32(-2147483647-1)));
+
+ safeDenominator.link(&m_jit);
+
+ if (op2TempGPR != InvalidGPRReg) {
+ m_jit.move(op2GPR, op2TempGPR);
+ op2GPR = op2TempGPR;
}
+
+ m_jit.move(op1GPR, eax.gpr());
+ m_jit.assembler().cdq();
+ m_jit.assembler().idivl_r(op2GPR);
+
+ if (op2TempGPR != InvalidGPRReg)
+ unlock(op2TempGPR);
- JITCompiler::Jump lower = m_jit.branch32(JITCompiler::Below, resultGPR, scratchGPR2);
- m_jit.sub32(scratchGPR2, resultGPR);
- lower.link(&m_jit);
+ // Check that we're not about to create negative zero.
+ // FIXME: if the node use doesn't care about neg zero, we can do this more easily.
+ JITCompiler::Jump numeratorPositive = m_jit.branch32(JITCompiler::GreaterThanOrEqual, op1SaveGPR, TrustedImm32(0));
+ speculationCheck(Overflow, JSValueRegs(), NoNode, m_jit.branchTest32(JITCompiler::Zero, edx.gpr()));
+ numeratorPositive.link(&m_jit);
+
+ if (op1SaveGPR != op1GPR)
+ unlock(op1SaveGPR);
+
+ integerResult(edx.gpr(), m_compileIndex);
+#else // CPU(X86) || CPU(X86_64) --> so not X86
+ // Do this the *safest* way possible: call out to a C function that will do the modulo,
+ // and then attempt to convert back.
+ GPRReg op1GPR = op1.gpr();
+ GPRReg op2GPR = op2.gpr();
+
+ FPRResult result(this);
+
+ flushRegisters();
+ callOperation(operationFModOnInts, result.fpr(), op1GPR, op2GPR);
+
+ FPRTemporary scratch(this);
+ GPRTemporary intResult(this);
+ JITCompiler::JumpList failureCases;
+ m_jit.branchConvertDoubleToInt32(result.fpr(), intResult.gpr(), failureCases, scratch.fpr());
+ speculationCheck(Overflow, JSValueRegs(), NoNode, failureCases);
+
+ integerResult(intResult.gpr(), m_compileIndex);
#endif // CPU(X86) || CPU(X86_64)
-
- exitBranch.link(&m_jit);
-
- // Check for negative remainder
- m_jit.pop(scratchGPR);
- JITCompiler::Jump positiveResult = m_jit.branch32(JITCompiler::Equal, scratchGPR, TrustedImm32(0));
- m_jit.neg32(resultGPR);
- positiveResult.link(&m_jit);
-
- integerResult(resultGPR, m_compileIndex);
-
-#if CPU(X86) || CPU(X86_64)
- if (temp2 != InvalidGPRReg)
- unlock(temp2);
-#endif
}
void SpeculativeJIT::compileAdd(Node& node)
@@ -2299,7 +2450,7 @@ void SpeculativeJIT::compileAdd(Node& node)
return;
}
- if (node.op == ValueAdd) {
+ if (node.op() == ValueAdd) {
compileValueAdd(node);
return;
}
@@ -2444,9 +2595,85 @@ void SpeculativeJIT::compileArithMul(Node& node)
doubleResult(result.fpr(), m_compileIndex);
}
+#if CPU(X86) || CPU(X86_64)
+void SpeculativeJIT::compileIntegerArithDivForX86(Node& node)
+{
+ SpeculateIntegerOperand op1(this, node.child1());
+ SpeculateIntegerOperand op2(this, node.child2());
+ GPRTemporary eax(this, X86Registers::eax);
+ GPRTemporary edx(this, X86Registers::edx);
+ GPRReg op1GPR = op1.gpr();
+ GPRReg op2GPR = op2.gpr();
+
+ GPRReg op2TempGPR;
+ GPRReg temp;
+ if (op2GPR == X86Registers::eax || op2GPR == X86Registers::edx) {
+ op2TempGPR = allocate();
+ temp = op2TempGPR;
+ } else {
+ op2TempGPR = InvalidGPRReg;
+ if (op1GPR == X86Registers::eax)
+ temp = X86Registers::edx;
+ else
+ temp = X86Registers::eax;
+ }
+
+ ASSERT(temp != op1GPR);
+ ASSERT(temp != op2GPR);
+
+ m_jit.add32(JITCompiler::TrustedImm32(1), op2GPR, temp);
+
+ JITCompiler::Jump safeDenominator = m_jit.branch32(JITCompiler::Above, temp, JITCompiler::TrustedImm32(1));
+
+ JITCompiler::Jump done;
+ if (nodeUsedAsNumber(node.arithNodeFlags())) {
+ speculationCheck(Overflow, JSValueRegs(), NoNode, m_jit.branchTest32(JITCompiler::Zero, op2GPR));
+ speculationCheck(Overflow, JSValueRegs(), NoNode, m_jit.branch32(JITCompiler::Equal, op1GPR, TrustedImm32(-2147483647-1)));
+ } else {
+ JITCompiler::Jump zero = m_jit.branchTest32(JITCompiler::Zero, op2GPR);
+ JITCompiler::Jump notNeg2ToThe31 = m_jit.branch32(JITCompiler::Equal, op1GPR, TrustedImm32(-2147483647-1));
+ zero.link(&m_jit);
+ m_jit.move(TrustedImm32(0), eax.gpr());
+ done = m_jit.jump();
+ notNeg2ToThe31.link(&m_jit);
+ }
+
+ safeDenominator.link(&m_jit);
+
+ // If the user cares about negative zero, then speculate that we're not about
+ // to produce negative zero.
+ if (!nodeCanIgnoreNegativeZero(node.arithNodeFlags())) {
+ MacroAssembler::Jump numeratorNonZero = m_jit.branchTest32(MacroAssembler::NonZero, op1GPR);
+ speculationCheck(NegativeZero, JSValueRegs(), NoNode, m_jit.branch32(MacroAssembler::LessThan, op2GPR, TrustedImm32(0)));
+ numeratorNonZero.link(&m_jit);
+ }
+
+ if (op2TempGPR != InvalidGPRReg) {
+ m_jit.move(op2GPR, op2TempGPR);
+ op2GPR = op2TempGPR;
+ }
+
+ m_jit.move(op1GPR, eax.gpr());
+ m_jit.assembler().cdq();
+ m_jit.assembler().idivl_r(op2GPR);
+
+ if (op2TempGPR != InvalidGPRReg)
+ unlock(op2TempGPR);
+
+ // Check that there was no remainder. If there had been, then we'd be obligated to
+ // produce a double result instead.
+ if (nodeUsedAsNumber(node.arithNodeFlags()))
+ speculationCheck(Overflow, JSValueRegs(), NoNode, m_jit.branchTest32(JITCompiler::NonZero, edx.gpr()));
+ else
+ done.link(&m_jit);
+
+ integerResult(eax.gpr(), m_compileIndex);
+}
+#endif // CPU(X86) || CPU(X86_64)
+
void SpeculativeJIT::compileArithMod(Node& node)
{
- if (!at(node.child1()).shouldNotSpeculateInteger() && !at(node.child2()).shouldNotSpeculateInteger()
+ if (Node::shouldSpeculateInteger(at(node.child1()), at(node.child2()))
&& node.canSpeculateInteger()) {
compileSoftModulo(node);
return;
@@ -2473,21 +2700,65 @@ bool SpeculativeJIT::compare(Node& node, MacroAssembler::RelationalCondition con
if (compilePeepHoleBranch(node, condition, doubleCondition, operation))
return true;
- if (Node::shouldSpeculateInteger(at(node.child1()), at(node.child2())))
+ if (Node::shouldSpeculateInteger(at(node.child1()), at(node.child2()))) {
compileIntegerCompare(node, condition);
- else if (Node::shouldSpeculateNumber(at(node.child1()), at(node.child2())))
+ return false;
+ }
+
+ if (Node::shouldSpeculateNumber(at(node.child1()), at(node.child2()))) {
compileDoubleCompare(node, doubleCondition);
- else if (node.op == CompareEq && Node::shouldSpeculateFinalObject(at(node.child1()), at(node.child2())))
- compileObjectEquality(node, &JSFinalObject::s_info, isFinalObjectPrediction);
- else if (node.op == CompareEq && Node::shouldSpeculateArray(at(node.child1()), at(node.child2())))
- compileObjectEquality(node, &JSArray::s_info, isArrayPrediction);
- else
- nonSpeculativeNonPeepholeCompare(node, condition, operation);
+ return false;
+ }
+ if (node.op() == CompareEq) {
+ if (Node::shouldSpeculateFinalObject(at(node.child1()), at(node.child2()))) {
+ compileObjectEquality(node, &JSFinalObject::s_info, isFinalObjectPrediction);
+ return false;
+ }
+
+ if (Node::shouldSpeculateArray(at(node.child1()), at(node.child2()))) {
+ compileObjectEquality(node, &JSArray::s_info, isArrayPrediction);
+ return false;
+ }
+
+ if (at(node.child1()).shouldSpeculateFinalObject()
+ && at(node.child2()).shouldSpeculateFinalObjectOrOther()) {
+ compileObjectToObjectOrOtherEquality(
+ node.child1(), node.child2(), &JSFinalObject::s_info,
+ isFinalObjectPrediction);
+ return false;
+ }
+
+ if (at(node.child1()).shouldSpeculateFinalObjectOrOther()
+ && at(node.child2()).shouldSpeculateFinalObject()) {
+ compileObjectToObjectOrOtherEquality(
+ node.child2(), node.child1(), &JSFinalObject::s_info,
+ isFinalObjectPrediction);
+ return false;
+ }
+
+ if (at(node.child1()).shouldSpeculateArray()
+ && at(node.child2()).shouldSpeculateArrayOrOther()) {
+ compileObjectToObjectOrOtherEquality(
+ node.child1(), node.child2(), &JSArray::s_info,
+ isArrayPrediction);
+ return false;
+ }
+
+ if (at(node.child1()).shouldSpeculateArrayOrOther()
+ && at(node.child2()).shouldSpeculateArray()) {
+ compileObjectToObjectOrOtherEquality(
+ node.child2(), node.child1(), &JSArray::s_info,
+ isArrayPrediction);
+ return false;
+ }
+ }
+
+ nonSpeculativeNonPeepholeCompare(node, condition, operation);
return false;
}
-bool SpeculativeJIT::compileStrictEqForConstant(Node& node, NodeUse value, JSValue constant)
+bool SpeculativeJIT::compileStrictEqForConstant(Node& node, Edge value, JSValue constant)
{
JSValueOperand op1(this, value);
@@ -2652,7 +2923,7 @@ bool SpeculativeJIT::compileStrictEq(Node& node)
void SpeculativeJIT::compileGetIndexedPropertyStorage(Node& node)
{
if (!node.prediction() || !at(node.child1()).prediction() || !at(node.child2()).prediction()) {
- terminateSpeculativeExecution(Uncountable, JSValueRegs(), NoNode);
+ terminateSpeculativeExecution(InadequateCoverage, JSValueRegs(), NoNode);
return;
}
@@ -2679,10 +2950,6 @@ void SpeculativeJIT::compileGetIndexedPropertyStorage(Node& node)
speculationCheck(Uncountable, JSValueRegs(), NoNode, m_jit.branchTest32(MacroAssembler::Zero, storageReg));
m_jit.loadPtr(MacroAssembler::Address(storageReg, StringImpl::dataOffset()), storageReg);
- } else if (at(node.child1()).shouldSpeculateByteArray()) {
- if (!isByteArrayPrediction(m_state.forNode(node.child1()).m_type))
- speculationCheck(BadType, JSValueSource::unboxedCell(baseReg), node.child1(), m_jit.branchPtr(MacroAssembler::NotEqual, MacroAssembler::Address(baseReg, JSCell::classInfoOffset()), MacroAssembler::TrustedImmPtr(&JSByteArray::s_info)));
- m_jit.loadPtr(MacroAssembler::Address(baseReg, JSByteArray::offsetOfStorage()), storageReg);
} else if (at(node.child1()).shouldSpeculateInt8Array()) {
const TypedArrayDescriptor& descriptor = m_jit.globalData()->int8ArrayDescriptor();
if (!isInt8ArrayPrediction(m_state.forNode(node.child1()).m_type))
@@ -2758,6 +3025,46 @@ void SpeculativeJIT::compileNewFunctionExpression(Node& node)
cellResult(resultGPR, m_compileIndex);
}
+bool SpeculativeJIT::compileRegExpExec(Node& node)
+{
+ unsigned branchIndexInBlock = detectPeepHoleBranch();
+ if (branchIndexInBlock == UINT_MAX)
+ return false;
+ NodeIndex branchNodeIndex = m_jit.graph().m_blocks[m_block]->at(branchIndexInBlock);
+ ASSERT(node.adjustedRefCount() == 1);
+
+ Node& branchNode = at(branchNodeIndex);
+ BlockIndex taken = branchNode.takenBlockIndex();
+ BlockIndex notTaken = branchNode.notTakenBlockIndex();
+
+ bool invert = false;
+ if (taken == (m_block + 1)) {
+ invert = true;
+ BlockIndex tmp = taken;
+ taken = notTaken;
+ notTaken = tmp;
+ }
+
+ SpeculateCellOperand base(this, node.child1());
+ SpeculateCellOperand argument(this, node.child2());
+ GPRReg baseGPR = base.gpr();
+ GPRReg argumentGPR = argument.gpr();
+
+ flushRegisters();
+ GPRResult result(this);
+ callOperation(operationRegExpTest, result.gpr(), baseGPR, argumentGPR);
+
+ branchTest32(invert ? JITCompiler::Zero : JITCompiler::NonZero, result.gpr(), taken);
+ jump(notTaken);
+
+ use(node.child1());
+ use(node.child2());
+ m_indexInBlock = branchIndexInBlock;
+ m_compileIndex = branchNodeIndex;
+
+ return true;
+}
+
} } // namespace JSC::DFG
#endif
diff --git a/Source/JavaScriptCore/dfg/DFGSpeculativeJIT.h b/Source/JavaScriptCore/dfg/DFGSpeculativeJIT.h
index 1744a03f3..dbfaec4f8 100644
--- a/Source/JavaScriptCore/dfg/DFGSpeculativeJIT.h
+++ b/Source/JavaScriptCore/dfg/DFGSpeculativeJIT.h
@@ -54,6 +54,7 @@ enum ValueSourceKind {
CellInRegisterFile,
BooleanInRegisterFile,
DoubleInRegisterFile,
+ SourceIsDead,
HaveNode
};
@@ -81,7 +82,7 @@ public:
{
if (isInt32Prediction(prediction))
return ValueSource(Int32InRegisterFile);
- if (isArrayPrediction(prediction) || isByteArrayPrediction(prediction))
+ if (isArrayPrediction(prediction))
return ValueSource(CellInRegisterFile);
if (isBooleanPrediction(prediction))
return ValueSource(BooleanInRegisterFile);
@@ -123,7 +124,10 @@ private:
NodeIndex m_nodeIndex;
};
-
+
+
+enum GeneratedOperandType { GeneratedOperandTypeUnknown, GeneratedOperandInteger, GeneratedOperandDouble, GeneratedOperandJSValue};
+
// === SpeculativeJIT ===
//
// The SpeculativeJIT is used to generate a fast, but potentially
@@ -181,7 +185,7 @@ public:
{
return m_jit.graph()[nodeIndex];
}
- Node& at(NodeUse nodeUse)
+ Node& at(Edge nodeUse)
{
return at(nodeUse.index());
}
@@ -221,7 +225,7 @@ public:
GenerationInfo& info = m_generationInfo[virtualRegister];
return info.canReuse();
}
- bool canReuse(NodeUse nodeUse)
+ bool canReuse(Edge nodeUse)
{
return canReuse(nodeUse.index());
}
@@ -325,7 +329,7 @@ public:
m_gprs.release(info.gpr());
#endif
}
- void use(NodeUse nodeUse)
+ void use(Edge nodeUse)
{
use(nodeUse.index());
}
@@ -333,25 +337,13 @@ public:
static void markCellCard(MacroAssembler&, GPRReg ownerGPR, GPRReg scratchGPR1, GPRReg scratchGPR2);
static void writeBarrier(MacroAssembler&, GPRReg ownerGPR, GPRReg scratchGPR1, GPRReg scratchGPR2, WriteBarrierUseKind);
- void writeBarrier(GPRReg ownerGPR, GPRReg valueGPR, NodeUse valueUse, WriteBarrierUseKind, GPRReg scratchGPR1 = InvalidGPRReg, GPRReg scratchGPR2 = InvalidGPRReg);
+ void writeBarrier(GPRReg ownerGPR, GPRReg valueGPR, Edge valueUse, WriteBarrierUseKind, GPRReg scratchGPR1 = InvalidGPRReg, GPRReg scratchGPR2 = InvalidGPRReg);
void writeBarrier(GPRReg ownerGPR, JSCell* value, WriteBarrierUseKind, GPRReg scratchGPR1 = InvalidGPRReg, GPRReg scratchGPR2 = InvalidGPRReg);
- void writeBarrier(JSCell* owner, GPRReg valueGPR, NodeUse valueUse, WriteBarrierUseKind, GPRReg scratchGPR1 = InvalidGPRReg);
+ void writeBarrier(JSCell* owner, GPRReg valueGPR, Edge valueUse, WriteBarrierUseKind, GPRReg scratchGPR1 = InvalidGPRReg);
static GPRReg selectScratchGPR(GPRReg preserve1 = InvalidGPRReg, GPRReg preserve2 = InvalidGPRReg, GPRReg preserve3 = InvalidGPRReg, GPRReg preserve4 = InvalidGPRReg)
{
- if (preserve1 != GPRInfo::regT0 && preserve2 != GPRInfo::regT0 && preserve3 != GPRInfo::regT0 && preserve4 != GPRInfo::regT0)
- return GPRInfo::regT0;
-
- if (preserve1 != GPRInfo::regT1 && preserve2 != GPRInfo::regT1 && preserve3 != GPRInfo::regT1 && preserve4 != GPRInfo::regT1)
- return GPRInfo::regT1;
-
- if (preserve1 != GPRInfo::regT2 && preserve2 != GPRInfo::regT2 && preserve3 != GPRInfo::regT2 && preserve4 != GPRInfo::regT2)
- return GPRInfo::regT2;
-
- if (preserve1 != GPRInfo::regT3 && preserve2 != GPRInfo::regT3 && preserve3 != GPRInfo::regT3 && preserve4 != GPRInfo::regT3)
- return GPRInfo::regT3;
-
- return GPRInfo::regT4;
+ return AssemblyHelpers::selectScratchGPR(preserve1, preserve2, preserve3, preserve4);
}
// Called by the speculative operand types, below, to fill operand to
@@ -361,6 +353,7 @@ public:
FPRReg fillSpeculateDouble(NodeIndex);
GPRReg fillSpeculateCell(NodeIndex);
GPRReg fillSpeculateBoolean(NodeIndex);
+ GeneratedOperandType checkGeneratedTypeForToInt32(NodeIndex);
private:
void compile(Node&);
@@ -730,9 +723,6 @@ private:
bool isKnownNotInteger(NodeIndex);
bool isKnownNotNumber(NodeIndex);
- bool isKnownBoolean(NodeIndex);
- bool isKnownNotBoolean(NodeIndex);
-
bool isKnownNotCell(NodeIndex);
// Checks/accessors for constant values.
@@ -890,7 +880,7 @@ private:
// Check if the lastNode is a branch on this node.
Node& lastNode = at(block->last());
- return lastNode.op == Branch && lastNode.child1().index() == m_compileIndex ? block->size() - 1 : UINT_MAX;
+ return lastNode.op() == Branch && lastNode.child1().index() == m_compileIndex ? block->size() - 1 : UINT_MAX;
}
void nonSpeculativeValueToNumber(Node&);
@@ -900,15 +890,15 @@ private:
enum SpillRegistersMode { NeedToSpill, DontSpill };
#if USE(JSVALUE64)
JITCompiler::Call cachedGetById(CodeOrigin, GPRReg baseGPR, GPRReg resultGPR, GPRReg scratchGPR, unsigned identifierNumber, JITCompiler::Jump slowPathTarget = JITCompiler::Jump(), SpillRegistersMode = NeedToSpill);
- void cachedPutById(CodeOrigin, GPRReg base, GPRReg value, NodeUse valueUse, GPRReg scratchGPR, unsigned identifierNumber, PutKind, JITCompiler::Jump slowPathTarget = JITCompiler::Jump());
+ void cachedPutById(CodeOrigin, GPRReg base, GPRReg value, Edge valueUse, GPRReg scratchGPR, unsigned identifierNumber, PutKind, JITCompiler::Jump slowPathTarget = JITCompiler::Jump());
#elif USE(JSVALUE32_64)
JITCompiler::Call cachedGetById(CodeOrigin, GPRReg baseTagGPROrNone, GPRReg basePayloadGPR, GPRReg resultTagGPR, GPRReg resultPayloadGPR, GPRReg scratchGPR, unsigned identifierNumber, JITCompiler::Jump slowPathTarget = JITCompiler::Jump(), SpillRegistersMode = NeedToSpill);
- void cachedPutById(CodeOrigin, GPRReg basePayloadGPR, GPRReg valueTagGPR, GPRReg valuePayloadGPR, NodeUse valueUse, GPRReg scratchGPR, unsigned identifierNumber, PutKind, JITCompiler::Jump slowPathTarget = JITCompiler::Jump());
+ void cachedPutById(CodeOrigin, GPRReg basePayloadGPR, GPRReg valueTagGPR, GPRReg valuePayloadGPR, Edge valueUse, GPRReg scratchGPR, unsigned identifierNumber, PutKind, JITCompiler::Jump slowPathTarget = JITCompiler::Jump());
#endif
- void nonSpeculativeNonPeepholeCompareNull(NodeUse operand, bool invert = false);
- void nonSpeculativePeepholeBranchNull(NodeUse operand, NodeIndex branchNodeIndex, bool invert = false);
- bool nonSpeculativeCompareNull(Node&, NodeUse operand, bool invert = false);
+ void nonSpeculativeNonPeepholeCompareNull(Edge operand, bool invert = false);
+ void nonSpeculativePeepholeBranchNull(Edge operand, NodeIndex branchNodeIndex, bool invert = false);
+ bool nonSpeculativeCompareNull(Node&, Edge operand, bool invert = false);
void nonSpeculativePeepholeBranch(Node&, NodeIndex branchNodeIndex, MacroAssembler::RelationalCondition, S_DFGOperation_EJJ helperFunction);
void nonSpeculativeNonPeepholeCompare(Node&, MacroAssembler::RelationalCondition, S_DFGOperation_EJJ helperFunction);
@@ -1174,6 +1164,11 @@ private:
m_jit.setupArgumentsWithExecState(arg1, TrustedImmPtr(cell));
return appendCallWithExceptionCheckSetResult(operation, result);
}
+ JITCompiler::Call callOperation(S_DFGOperation_J operation, GPRReg result, GPRReg arg1)
+ {
+ m_jit.setupArguments(arg1);
+ return appendCallSetResult(operation, result);
+ }
JITCompiler::Call callOperation(S_DFGOperation_EJ operation, GPRReg result, GPRReg arg1)
{
m_jit.setupArgumentsWithExecState(arg1);
@@ -1184,6 +1179,11 @@ private:
m_jit.setupArgumentsWithExecState(arg1, arg2);
return appendCallWithExceptionCheckSetResult(operation, result);
}
+ JITCompiler::Call callOperation(S_DFGOperation_ECC operation, GPRReg result, GPRReg arg1, GPRReg arg2)
+ {
+ m_jit.setupArgumentsWithExecState(arg1, arg2);
+ return appendCallWithExceptionCheckSetResult(operation, result);
+ }
JITCompiler::Call callOperation(J_DFGOperation_EPP operation, GPRReg result, GPRReg arg1, GPRReg arg2)
{
m_jit.setupArgumentsWithExecState(arg1, arg2);
@@ -1199,6 +1199,11 @@ private:
m_jit.setupArgumentsWithExecState(MacroAssembler::TrustedImmPtr(static_cast<const void*>(JSValue::encode(jsNumber(imm.m_value)))), arg2);
return appendCallWithExceptionCheckSetResult(operation, result);
}
+ JITCompiler::Call callOperation(J_DFGOperation_ECC operation, GPRReg result, GPRReg arg1, GPRReg arg2)
+ {
+ m_jit.setupArgumentsWithExecState(arg1, arg2);
+ return appendCallWithExceptionCheckSetResult(operation, result);
+ }
JITCompiler::Call callOperation(J_DFGOperation_ECJ operation, GPRReg result, GPRReg arg1, GPRReg arg2)
{
m_jit.setupArgumentsWithExecState(arg1, arg2);
@@ -1244,6 +1249,11 @@ private:
m_jit.setupArgumentsWithExecState(arg1);
return appendCallWithExceptionCheckSetResult(operation, result);
}
+ JITCompiler::Call callOperation(D_DFGOperation_ZZ operation, FPRReg result, GPRReg arg1, GPRReg arg2)
+ {
+ m_jit.setupArguments(arg1, arg2);
+ return appendCallSetResult(operation, result);
+ }
JITCompiler::Call callOperation(D_DFGOperation_DD operation, FPRReg result, FPRReg arg1, FPRReg arg2)
{
m_jit.setupArguments(arg1, arg2);
@@ -1252,6 +1262,7 @@ private:
#else
JITCompiler::Call callOperation(Z_DFGOperation_D operation, GPRReg result, FPRReg arg1)
{
+ prepareForExternalCall();
m_jit.setupArguments(arg1);
JITCompiler::Call call = m_jit.appendCall(operation);
m_jit.zeroExtend32ToPtr(GPRInfo::returnValueGPR, result);
@@ -1352,11 +1363,21 @@ private:
m_jit.setupArgumentsWithExecState(arg1, TrustedImmPtr(cell));
return appendCallWithExceptionCheckSetResult(operation, result);
}
+ JITCompiler::Call callOperation(S_DFGOperation_J operation, GPRReg result, GPRReg arg1Tag, GPRReg arg1Payload)
+ {
+ m_jit.setupArguments(arg1Payload, arg1Tag);
+ return appendCallSetResult(operation, result);
+ }
JITCompiler::Call callOperation(S_DFGOperation_EJ operation, GPRReg result, GPRReg arg1Tag, GPRReg arg1Payload)
{
m_jit.setupArgumentsWithExecState(arg1Payload, arg1Tag);
return appendCallWithExceptionCheckSetResult(operation, result);
}
+ JITCompiler::Call callOperation(S_DFGOperation_ECC operation, GPRReg result, GPRReg arg1, GPRReg arg2)
+ {
+ m_jit.setupArgumentsWithExecState(arg1, arg2);
+ return appendCallWithExceptionCheckSetResult(operation, result);
+ }
JITCompiler::Call callOperation(S_DFGOperation_EJJ operation, GPRReg result, GPRReg arg1Tag, GPRReg arg1Payload, GPRReg arg2Tag, GPRReg arg2Payload)
{
m_jit.setupArgumentsWithExecState(arg1Payload, arg1Tag, arg2Payload, arg2Tag);
@@ -1382,6 +1403,11 @@ private:
m_jit.setupArgumentsWithExecState(arg1, arg2Payload, arg2Tag);
return appendCallWithExceptionCheckSetResult(operation, resultPayload, resultTag);
}
+ JITCompiler::Call callOperation(J_DFGOperation_ECC operation, GPRReg resultTag, GPRReg resultPayload, GPRReg arg1, GPRReg arg2)
+ {
+ m_jit.setupArgumentsWithExecState(arg1, arg2);
+ return appendCallWithExceptionCheckSetResult(operation, resultPayload, resultTag);
+ }
JITCompiler::Call callOperation(V_DFGOperation_EC operation, GPRReg arg1)
{
m_jit.setupArgumentsWithExecState(arg1);
@@ -1419,16 +1445,32 @@ private:
return appendCallWithExceptionCheckSetResult(operation, result);
}
+ JITCompiler::Call callOperation(D_DFGOperation_ZZ operation, FPRReg result, GPRReg arg1, GPRReg arg2)
+ {
+ m_jit.setupArguments(arg1, arg2);
+ return appendCallSetResult(operation, result);
+ }
JITCompiler::Call callOperation(D_DFGOperation_DD operation, FPRReg result, FPRReg arg1, FPRReg arg2)
{
m_jit.setupArguments(arg1, arg2);
return appendCallSetResult(operation, result);
}
#endif
+
+#ifndef NDEBUG
+ void prepareForExternalCall()
+ {
+ for (unsigned i = 0; i < sizeof(void*) / 4; i++)
+ m_jit.store32(TrustedImm32(0xbadbeef), reinterpret_cast<char*>(&m_jit.globalData()->topCallFrame) + i * 4);
+ }
+#else
+ void prepareForExternalCall() { }
+#endif
// These methods add call instructions, with optional exception checks & setting results.
JITCompiler::Call appendCallWithExceptionCheck(const FunctionPtr& function)
{
+ prepareForExternalCall();
CodeOrigin codeOrigin = at(m_compileIndex).codeOrigin;
CallBeginToken token = m_jit.beginCall();
JITCompiler::Call call = m_jit.appendCall(function);
@@ -1441,6 +1483,13 @@ private:
m_jit.move(GPRInfo::returnValueGPR, result);
return call;
}
+ JITCompiler::Call appendCallSetResult(const FunctionPtr& function, GPRReg result)
+ {
+ prepareForExternalCall();
+ JITCompiler::Call call = m_jit.appendCall(function);
+ m_jit.move(GPRInfo::returnValueGPR, result);
+ return call;
+ }
JITCompiler::Call appendCallWithExceptionCheckSetResult(const FunctionPtr& function, GPRReg result1, GPRReg result2)
{
JITCompiler::Call call = appendCallWithExceptionCheck(function);
@@ -1693,17 +1742,21 @@ private:
void compilePeepHoleIntegerBranch(Node&, NodeIndex branchNodeIndex, JITCompiler::RelationalCondition);
void compilePeepHoleDoubleBranch(Node&, NodeIndex branchNodeIndex, JITCompiler::DoubleCondition);
void compilePeepHoleObjectEquality(Node&, NodeIndex branchNodeIndex, const ClassInfo*, PredictionChecker);
+ void compilePeepHoleObjectToObjectOrOtherEquality(
+ Edge leftChild, Edge rightChild, NodeIndex branchNodeIndex, const ClassInfo*, PredictionChecker);
void compileObjectEquality(Node&, const ClassInfo*, PredictionChecker);
+ void compileObjectToObjectOrOtherEquality(
+ Edge leftChild, Edge rightChild, const ClassInfo*, PredictionChecker);
void compileValueAdd(Node&);
- void compileObjectOrOtherLogicalNot(NodeUse value, const ClassInfo*, bool needSpeculationCheck);
+ void compileObjectOrOtherLogicalNot(Edge value, const ClassInfo*, bool needSpeculationCheck);
void compileLogicalNot(Node&);
- void emitObjectOrOtherBranch(NodeUse value, BlockIndex taken, BlockIndex notTaken, const ClassInfo*, bool needSpeculationCheck);
+ void emitObjectOrOtherBranch(Edge value, BlockIndex taken, BlockIndex notTaken, const ClassInfo*, bool needSpeculationCheck);
void emitBranch(Node&);
void compileIntegerCompare(Node&, MacroAssembler::RelationalCondition);
void compileDoubleCompare(Node&, MacroAssembler::DoubleCondition);
- bool compileStrictEqForConstant(Node&, NodeUse value, JSValue constant);
+ bool compileStrictEqForConstant(Node&, Edge value, JSValue constant);
bool compileStrictEq(Node&);
@@ -1711,12 +1764,15 @@ private:
void compileGetByValOnString(Node&);
void compileValueToInt32(Node&);
void compileUInt32ToNumber(Node&);
- void compileGetByValOnByteArray(Node&);
- void compilePutByValForByteArray(GPRReg base, GPRReg property, Node&);
+ void compileDoubleAsInt32(Node&);
+ void compileInt32ToDouble(Node&);
void compileAdd(Node&);
void compileArithSub(Node&);
void compileArithNegate(Node&);
void compileArithMul(Node&);
+#if CPU(X86) || CPU(X86_64)
+ void compileIntegerArithDivForX86(Node&);
+#endif
void compileArithMod(Node&);
void compileSoftModulo(Node&);
void compileGetTypedArrayLength(const TypedArrayDescriptor&, Node&, bool needsSpeculationCheck);
@@ -1738,8 +1794,9 @@ private:
void compilePutByValForIntTypedArray(const TypedArrayDescriptor&, GPRReg base, GPRReg property, Node&, size_t elementSize, TypedArraySpeculationRequirements, TypedArraySignedness, TypedArrayRounding = TruncateRounding);
void compileGetByValOnFloatTypedArray(const TypedArrayDescriptor&, Node&, size_t elementSize, TypedArraySpeculationRequirements);
void compilePutByValForFloatTypedArray(const TypedArrayDescriptor&, GPRReg base, GPRReg property, Node&, size_t elementSize, TypedArraySpeculationRequirements);
- void compileNewFunctionNoCheck(Node& node);
- void compileNewFunctionExpression(Node& node);
+ void compileNewFunctionNoCheck(Node&);
+ void compileNewFunctionExpression(Node&);
+ bool compileRegExpExec(Node&);
template <typename ClassType, bool destructor, typename StructureType>
void emitAllocateBasicJSObject(StructureType structure, GPRReg resultGPR, GPRReg scratchGPR, MacroAssembler::JumpList& slowPath)
@@ -1750,7 +1807,7 @@ private:
else
allocator = &m_jit.globalData()->heap.allocatorForObjectWithoutDestructor(sizeof(ClassType));
- m_jit.loadPtr(&allocator->m_firstFreeCell, resultGPR);
+ m_jit.loadPtr(&allocator->m_freeList.head, resultGPR);
slowPath.append(m_jit.branchTestPtr(MacroAssembler::Zero, resultGPR));
// The object is half-allocated: we have what we know is a fresh object, but
@@ -1762,7 +1819,7 @@ private:
// Now that we have scratchGPR back, remove the object from the free list
m_jit.loadPtr(MacroAssembler::Address(resultGPR), scratchGPR);
- m_jit.storePtr(scratchGPR, &allocator->m_firstFreeCell);
+ m_jit.storePtr(scratchGPR, &allocator->m_freeList.head);
// Initialize the object's classInfo pointer
m_jit.storePtr(MacroAssembler::TrustedImmPtr(&ClassType::s_info), MacroAssembler::Address(resultGPR, JSCell::classInfoOffset()));
@@ -1796,18 +1853,18 @@ private:
return;
m_jit.codeBlock()->appendOSRExit(OSRExit(kind, jsValueSource, m_jit.graph().methodOfGettingAValueProfileFor(nodeIndex), jumpToFail, this));
}
- void speculationCheck(ExitKind kind, JSValueSource jsValueSource, NodeUse nodeUse, MacroAssembler::Jump jumpToFail)
+ void speculationCheck(ExitKind kind, JSValueSource jsValueSource, Edge nodeUse, MacroAssembler::Jump jumpToFail)
{
speculationCheck(kind, jsValueSource, nodeUse.index(), jumpToFail);
}
// Add a set of speculation checks without additional recovery.
void speculationCheck(ExitKind kind, JSValueSource jsValueSource, NodeIndex nodeIndex, MacroAssembler::JumpList& jumpsToFail)
{
- Vector<MacroAssembler::Jump, 16> JumpVector = jumpsToFail.jumps();
- for (unsigned i = 0; i < JumpVector.size(); ++i)
- speculationCheck(kind, jsValueSource, nodeIndex, JumpVector[i]);
+ Vector<MacroAssembler::Jump, 16> jumpVector = jumpsToFail.jumps();
+ for (unsigned i = 0; i < jumpVector.size(); ++i)
+ speculationCheck(kind, jsValueSource, nodeIndex, jumpVector[i]);
}
- void speculationCheck(ExitKind kind, JSValueSource jsValueSource, NodeUse nodeUse, MacroAssembler::JumpList& jumpsToFail)
+ void speculationCheck(ExitKind kind, JSValueSource jsValueSource, Edge nodeUse, MacroAssembler::JumpList& jumpsToFail)
{
speculationCheck(kind, jsValueSource, nodeUse.index(), jumpsToFail);
}
@@ -1819,10 +1876,47 @@ private:
m_jit.codeBlock()->appendSpeculationRecovery(recovery);
m_jit.codeBlock()->appendOSRExit(OSRExit(kind, jsValueSource, m_jit.graph().methodOfGettingAValueProfileFor(nodeIndex), jumpToFail, this, m_jit.codeBlock()->numberOfSpeculationRecoveries()));
}
- void speculationCheck(ExitKind kind, JSValueSource jsValueSource, NodeUse nodeUse, MacroAssembler::Jump jumpToFail, const SpeculationRecovery& recovery)
+ void speculationCheck(ExitKind kind, JSValueSource jsValueSource, Edge nodeUse, MacroAssembler::Jump jumpToFail, const SpeculationRecovery& recovery)
{
speculationCheck(kind, jsValueSource, nodeUse.index(), jumpToFail, recovery);
}
+ void forwardSpeculationCheck(ExitKind kind, JSValueSource jsValueSource, NodeIndex nodeIndex, MacroAssembler::Jump jumpToFail, const ValueRecovery& valueRecovery)
+ {
+ speculationCheck(kind, jsValueSource, nodeIndex, jumpToFail);
+
+ unsigned setLocalIndexInBlock = m_indexInBlock + 1;
+
+ Node* setLocal = &at(m_jit.graph().m_blocks[m_block]->at(setLocalIndexInBlock));
+
+ if (setLocal->op() == Int32ToDouble) {
+ setLocal = &at(m_jit.graph().m_blocks[m_block]->at(++setLocalIndexInBlock));
+ ASSERT(at(setLocal->child1()).child1() == m_compileIndex);
+ } else
+ ASSERT(setLocal->child1() == m_compileIndex);
+
+ ASSERT(setLocal->op() == SetLocal);
+ ASSERT(setLocal->codeOrigin == at(m_compileIndex).codeOrigin);
+
+ Node* nextNode = &at(m_jit.graph().m_blocks[m_block]->at(setLocalIndexInBlock + 1));
+ if (nextNode->codeOrigin == at(m_compileIndex).codeOrigin) {
+ ASSERT(nextNode->op() == Flush);
+ nextNode = &at(m_jit.graph().m_blocks[m_block]->at(setLocalIndexInBlock + 2));
+ ASSERT(nextNode->codeOrigin != at(m_compileIndex).codeOrigin); // duplicate the same assertion as below so that if we fail, we'll know we came down this path.
+ }
+ ASSERT(nextNode->codeOrigin != at(m_compileIndex).codeOrigin);
+
+ OSRExit& exit = m_jit.codeBlock()->lastOSRExit();
+ exit.m_codeOrigin = nextNode->codeOrigin;
+ exit.m_lastSetOperand = setLocal->local();
+
+ exit.valueRecoveryForOperand(setLocal->local()) = valueRecovery;
+ }
+ void forwardSpeculationCheck(ExitKind kind, JSValueSource jsValueSource, NodeIndex nodeIndex, MacroAssembler::JumpList& jumpsToFail, const ValueRecovery& valueRecovery)
+ {
+ Vector<MacroAssembler::Jump, 16> jumpVector = jumpsToFail.jumps();
+ for (unsigned i = 0; i < jumpVector.size(); ++i)
+ forwardSpeculationCheck(kind, jsValueSource, nodeIndex, jumpVector[i], valueRecovery);
+ }
// Called when we statically determine that a speculation will fail.
void terminateSpeculativeExecution(ExitKind kind, JSValueRegs jsValueRegs, NodeIndex nodeIndex)
@@ -1835,7 +1929,7 @@ private:
speculationCheck(kind, jsValueRegs, nodeIndex, m_jit.jump());
m_compileOkay = false;
}
- void terminateSpeculativeExecution(ExitKind kind, JSValueRegs jsValueRegs, NodeUse nodeUse)
+ void terminateSpeculativeExecution(ExitKind kind, JSValueRegs jsValueRegs, Edge nodeUse)
{
terminateSpeculativeExecution(kind, jsValueRegs, nodeUse.index());
}
@@ -1934,7 +2028,7 @@ private:
class IntegerOperand {
public:
- explicit IntegerOperand(SpeculativeJIT* jit, NodeUse use)
+ explicit IntegerOperand(SpeculativeJIT* jit, Edge use)
: m_jit(jit)
, m_index(use.index())
, m_gprOrInvalid(InvalidGPRReg)
@@ -1943,6 +2037,7 @@ public:
#endif
{
ASSERT(m_jit);
+ ASSERT(use.useKind() != DoubleUse);
if (jit->isFilled(m_index))
gpr();
}
@@ -1986,12 +2081,21 @@ private:
class DoubleOperand {
public:
- explicit DoubleOperand(SpeculativeJIT* jit, NodeUse use)
+ explicit DoubleOperand(SpeculativeJIT* jit, Edge use)
: m_jit(jit)
, m_index(use.index())
, m_fprOrInvalid(InvalidFPRReg)
{
ASSERT(m_jit);
+
+ // This is counter-intuitive but correct. DoubleOperand is intended to
+ // be used only when you're a node that is happy to accept an untyped
+ // value, but will special-case for doubles (using DoubleOperand) if the
+ // value happened to already be represented as a double. The implication
+ // is that you will not try to force the value to become a double if it
+ // is not one already.
+ ASSERT(use.useKind() != DoubleUse);
+
if (jit->isFilledDouble(m_index))
fpr();
}
@@ -2027,7 +2131,7 @@ private:
class JSValueOperand {
public:
- explicit JSValueOperand(SpeculativeJIT* jit, NodeUse use)
+ explicit JSValueOperand(SpeculativeJIT* jit, Edge use)
: m_jit(jit)
, m_index(use.index())
#if USE(JSVALUE64)
@@ -2037,6 +2141,7 @@ public:
#endif
{
ASSERT(m_jit);
+ ASSERT(use.useKind() != DoubleUse);
#if USE(JSVALUE64)
if (jit->isFilled(m_index))
gpr();
@@ -2141,12 +2246,13 @@ private:
class StorageOperand {
public:
- explicit StorageOperand(SpeculativeJIT* jit, NodeUse use)
+ explicit StorageOperand(SpeculativeJIT* jit, Edge use)
: m_jit(jit)
, m_index(use.index())
, m_gprOrInvalid(InvalidGPRReg)
{
ASSERT(m_jit);
+ ASSERT(use.useKind() != DoubleUse);
if (jit->isFilled(m_index))
gpr();
}
@@ -2310,7 +2416,7 @@ private:
class SpeculateIntegerOperand {
public:
- explicit SpeculateIntegerOperand(SpeculativeJIT* jit, NodeUse use)
+ explicit SpeculateIntegerOperand(SpeculativeJIT* jit, Edge use)
: m_jit(jit)
, m_index(use.index())
, m_gprOrInvalid(InvalidGPRReg)
@@ -2319,6 +2425,7 @@ public:
#endif
{
ASSERT(m_jit);
+ ASSERT(use.useKind() != DoubleUse);
if (jit->isFilled(m_index))
gpr();
}
@@ -2357,12 +2464,13 @@ private:
class SpeculateStrictInt32Operand {
public:
- explicit SpeculateStrictInt32Operand(SpeculativeJIT* jit, NodeUse use)
+ explicit SpeculateStrictInt32Operand(SpeculativeJIT* jit, Edge use)
: m_jit(jit)
, m_index(use.index())
, m_gprOrInvalid(InvalidGPRReg)
{
ASSERT(m_jit);
+ ASSERT(use.useKind() != DoubleUse);
if (jit->isFilled(m_index))
gpr();
}
@@ -2398,12 +2506,13 @@ private:
class SpeculateDoubleOperand {
public:
- explicit SpeculateDoubleOperand(SpeculativeJIT* jit, NodeUse use)
+ explicit SpeculateDoubleOperand(SpeculativeJIT* jit, Edge use)
: m_jit(jit)
, m_index(use.index())
, m_fprOrInvalid(InvalidFPRReg)
{
ASSERT(m_jit);
+ ASSERT(use.useKind() == DoubleUse);
if (jit->isFilled(m_index))
fpr();
}
@@ -2434,12 +2543,13 @@ private:
class SpeculateCellOperand {
public:
- explicit SpeculateCellOperand(SpeculativeJIT* jit, NodeUse use)
+ explicit SpeculateCellOperand(SpeculativeJIT* jit, Edge use)
: m_jit(jit)
, m_index(use.index())
, m_gprOrInvalid(InvalidGPRReg)
{
ASSERT(m_jit);
+ ASSERT(use.useKind() != DoubleUse);
if (jit->isFilled(m_index))
gpr();
}
@@ -2475,12 +2585,13 @@ private:
class SpeculateBooleanOperand {
public:
- explicit SpeculateBooleanOperand(SpeculativeJIT* jit, NodeUse use)
+ explicit SpeculateBooleanOperand(SpeculativeJIT* jit, Edge use)
: m_jit(jit)
, m_index(use.index())
, m_gprOrInvalid(InvalidGPRReg)
{
ASSERT(m_jit);
+ ASSERT(use.useKind() != DoubleUse);
if (jit->isFilled(m_index))
gpr();
}
diff --git a/Source/JavaScriptCore/dfg/DFGSpeculativeJIT32_64.cpp b/Source/JavaScriptCore/dfg/DFGSpeculativeJIT32_64.cpp
index b6814229c..c156e81d0 100644
--- a/Source/JavaScriptCore/dfg/DFGSpeculativeJIT32_64.cpp
+++ b/Source/JavaScriptCore/dfg/DFGSpeculativeJIT32_64.cpp
@@ -29,8 +29,6 @@
#if ENABLE(DFG_JIT)
-#include "JSByteArray.h"
-
namespace JSC { namespace DFG {
#if USE(JSVALUE32_64)
@@ -495,10 +493,8 @@ void SpeculativeJIT::nonSpeculativeUInt32ToNumber(Node& node)
JITCompiler::Call SpeculativeJIT::cachedGetById(CodeOrigin codeOrigin, GPRReg baseTagGPROrNone, GPRReg basePayloadGPR, GPRReg resultTagGPR, GPRReg resultPayloadGPR, GPRReg scratchGPR, unsigned identifierNumber, JITCompiler::Jump slowPathTarget, SpillRegistersMode spillMode)
{
- m_jit.beginUninterruptedSequence();
JITCompiler::DataLabelPtr structureToCompare;
- JITCompiler::Jump structureCheck = m_jit.branchPtrWithPatch(JITCompiler::NotEqual, JITCompiler::Address(basePayloadGPR, JSCell::structureOffset()), structureToCompare, JITCompiler::TrustedImmPtr(reinterpret_cast<void*>(-1)));
- m_jit.endUninterruptedSequence();
+ JITCompiler::PatchableJump structureCheck = m_jit.patchableBranchPtrWithPatch(JITCompiler::NotEqual, JITCompiler::Address(basePayloadGPR, JSCell::structureOffset()), structureToCompare, JITCompiler::TrustedImmPtr(reinterpret_cast<void*>(-1)));
m_jit.loadPtr(JITCompiler::Address(basePayloadGPR, JSObject::offsetOfPropertyStorage()), resultPayloadGPR);
JITCompiler::DataLabelCompact tagLoadWithPatch = m_jit.load32WithCompactAddressOffsetPatch(JITCompiler::Address(resultPayloadGPR, OBJECT_OFFSETOF(EncodedValueDescriptor, asBits.tag)), resultTagGPR);
@@ -506,7 +502,7 @@ JITCompiler::Call SpeculativeJIT::cachedGetById(CodeOrigin codeOrigin, GPRReg ba
JITCompiler::Jump done = m_jit.jump();
- structureCheck.link(&m_jit);
+ structureCheck.m_jump.link(&m_jit);
if (slowPathTarget.isSet())
slowPathTarget.link(&m_jit);
@@ -532,12 +528,10 @@ JITCompiler::Call SpeculativeJIT::cachedGetById(CodeOrigin codeOrigin, GPRReg ba
return functionCall;
}
-void SpeculativeJIT::cachedPutById(CodeOrigin codeOrigin, GPRReg basePayloadGPR, GPRReg valueTagGPR, GPRReg valuePayloadGPR, NodeUse valueUse, GPRReg scratchGPR, unsigned identifierNumber, PutKind putKind, JITCompiler::Jump slowPathTarget)
+void SpeculativeJIT::cachedPutById(CodeOrigin codeOrigin, GPRReg basePayloadGPR, GPRReg valueTagGPR, GPRReg valuePayloadGPR, Edge valueUse, GPRReg scratchGPR, unsigned identifierNumber, PutKind putKind, JITCompiler::Jump slowPathTarget)
{
- m_jit.beginUninterruptedSequence();
JITCompiler::DataLabelPtr structureToCompare;
- JITCompiler::Jump structureCheck = m_jit.branchPtrWithPatch(JITCompiler::NotEqual, JITCompiler::Address(basePayloadGPR, JSCell::structureOffset()), structureToCompare, JITCompiler::TrustedImmPtr(reinterpret_cast<void*>(-1)));
- m_jit.endUninterruptedSequence();
+ JITCompiler::PatchableJump structureCheck = m_jit.patchableBranchPtrWithPatch(JITCompiler::NotEqual, JITCompiler::Address(basePayloadGPR, JSCell::structureOffset()), structureToCompare, JITCompiler::TrustedImmPtr(reinterpret_cast<void*>(-1)));
writeBarrier(basePayloadGPR, valueTagGPR, valueUse, WriteBarrierForPropertyAccess, scratchGPR);
@@ -547,7 +541,7 @@ void SpeculativeJIT::cachedPutById(CodeOrigin codeOrigin, GPRReg basePayloadGPR,
JITCompiler::Jump done = m_jit.jump();
- structureCheck.link(&m_jit);
+ structureCheck.m_jump.link(&m_jit);
if (slowPathTarget.isSet())
slowPathTarget.link(&m_jit);
@@ -576,7 +570,7 @@ void SpeculativeJIT::cachedPutById(CodeOrigin codeOrigin, GPRReg basePayloadGPR,
m_jit.addPropertyAccess(PropertyAccessRecord(codeOrigin, structureToCompare, functionCall, structureCheck, JITCompiler::DataLabelCompact(tagStoreWithPatch.label()), JITCompiler::DataLabelCompact(payloadStoreWithPatch.label()), slowCase, doneLabel, safeCast<int8_t>(basePayloadGPR), safeCast<int8_t>(valueTagGPR), safeCast<int8_t>(valuePayloadGPR), safeCast<int8_t>(scratchGPR)));
}
-void SpeculativeJIT::nonSpeculativeNonPeepholeCompareNull(NodeUse operand, bool invert)
+void SpeculativeJIT::nonSpeculativeNonPeepholeCompareNull(Edge operand, bool invert)
{
JSValueOperand arg(this, operand);
GPRReg argTagGPR = arg.tagGPR();
@@ -608,7 +602,7 @@ void SpeculativeJIT::nonSpeculativeNonPeepholeCompareNull(NodeUse operand, bool
booleanResult(resultPayloadGPR, m_compileIndex);
}
-void SpeculativeJIT::nonSpeculativePeepholeBranchNull(NodeUse operand, NodeIndex branchNodeIndex, bool invert)
+void SpeculativeJIT::nonSpeculativePeepholeBranchNull(Edge operand, NodeIndex branchNodeIndex, bool invert)
{
Node& branchNode = at(branchNodeIndex);
BlockIndex taken = branchNode.takenBlockIndex();
@@ -650,7 +644,7 @@ void SpeculativeJIT::nonSpeculativePeepholeBranchNull(NodeUse operand, NodeIndex
jump(notTaken);
}
-bool SpeculativeJIT::nonSpeculativeCompareNull(Node& node, NodeUse operand, bool invert)
+bool SpeculativeJIT::nonSpeculativeCompareNull(Node& node, Edge operand, bool invert)
{
unsigned branchIndexInBlock = detectPeepHoleBranch();
if (branchIndexInBlock != UINT_MAX) {
@@ -739,6 +733,9 @@ void SpeculativeJIT::nonSpeculativePeepholeBranch(Node& node, NodeIndex branchNo
}
jump(notTaken);
+
+ m_indexInBlock = m_jit.graph().m_blocks[m_block]->size() - 1;
+ m_compileIndex = branchNodeIndex;
}
void SpeculativeJIT::nonSpeculativeNonPeepholeCompare(Node& node, MacroAssembler::RelationalCondition cond, S_DFGOperation_EJJ helperFunction)
@@ -895,24 +892,24 @@ void SpeculativeJIT::emitCall(Node& node)
{
P_DFGOperation_E slowCallFunction;
- if (node.op == Call)
+ if (node.op() == Call)
slowCallFunction = operationLinkCall;
else {
- ASSERT(node.op == Construct);
+ ASSERT(node.op() == Construct);
slowCallFunction = operationLinkConstruct;
}
// For constructors, the this argument is not passed but we have to make space
// for it.
- int dummyThisArgument = node.op == Call ? 0 : 1;
+ int dummyThisArgument = node.op() == Call ? 0 : 1;
- CallLinkInfo::CallType callType = node.op == Call ? CallLinkInfo::Call : CallLinkInfo::Construct;
+ CallLinkInfo::CallType callType = node.op() == Call ? CallLinkInfo::Call : CallLinkInfo::Construct;
- NodeUse calleeNodeUse = m_jit.graph().m_varArgChildren[node.firstChild()];
- JSValueOperand callee(this, calleeNodeUse);
+ Edge calleeEdge = m_jit.graph().m_varArgChildren[node.firstChild()];
+ JSValueOperand callee(this, calleeEdge);
GPRReg calleeTagGPR = callee.tagGPR();
GPRReg calleePayloadGPR = callee.payloadGPR();
- use(calleeNodeUse);
+ use(calleeEdge);
// The call instruction's first child is either the function (normal call) or the
// receiver (method call). subsequent children are the arguments.
@@ -924,11 +921,11 @@ void SpeculativeJIT::emitCall(Node& node)
m_jit.store32(calleeTagGPR, callFrameTagSlot(RegisterFile::Callee));
for (int i = 0; i < numPassedArgs; i++) {
- NodeUse argNodeUse = m_jit.graph().m_varArgChildren[node.firstChild() + 1 + i];
- JSValueOperand arg(this, argNodeUse);
+ Edge argEdge = m_jit.graph().m_varArgChildren[node.firstChild() + 1 + i];
+ JSValueOperand arg(this, argEdge);
GPRReg argTagGPR = arg.tagGPR();
GPRReg argPayloadGPR = arg.payloadGPR();
- use(argNodeUse);
+ use(argEdge);
m_jit.store32(argTagGPR, argumentTagSlot(i + dummyThisArgument));
m_jit.store32(argPayloadGPR, argumentPayloadSlot(i + dummyThisArgument));
@@ -953,7 +950,7 @@ void SpeculativeJIT::emitCall(Node& node)
m_jit.addPtr(TrustedImm32(m_jit.codeBlock()->m_numCalleeRegisters * sizeof(Register)), GPRInfo::callFrameRegister);
CodeOrigin codeOrigin = at(m_compileIndex).codeOrigin;
- CallBeginToken token = m_jit.beginJSCall();
+ CallBeginToken token = m_jit.beginCall();
JITCompiler::Call fastCall = m_jit.nearCall();
m_jit.notifyCall(fastCall, codeOrigin, token);
@@ -967,7 +964,7 @@ void SpeculativeJIT::emitCall(Node& node)
JITCompiler::Call slowCall = m_jit.appendCall(slowCallFunction);
m_jit.addFastExceptionCheck(slowCall, codeOrigin, token);
m_jit.addPtr(TrustedImm32(m_jit.codeBlock()->m_numCalleeRegisters * sizeof(Register)), GPRInfo::callFrameRegister);
- token = m_jit.beginJSCall();
+ token = m_jit.beginCall();
JITCompiler::Call theCall = m_jit.call(GPRInfo::returnValueGPR);
m_jit.notifyCall(theCall, codeOrigin, token);
@@ -1285,15 +1282,15 @@ GPRReg SpeculativeJIT::fillSpeculateBoolean(NodeIndex nodeIndex)
#if DFG_ENABLE(DEBUG_VERBOSE)
dataLog("SpecBool@%d ", nodeIndex);
#endif
- if (isKnownNotBoolean(nodeIndex)) {
+ Node& node = m_jit.graph()[nodeIndex];
+ VirtualRegister virtualRegister = node.virtualRegister();
+ GenerationInfo& info = m_generationInfo[virtualRegister];
+ if ((node.hasConstant() && !valueOfJSConstant(nodeIndex).isBoolean())
+ || !(info.isJSBoolean() || info.isUnknownJS())) {
terminateSpeculativeExecution(Uncountable, JSValueRegs(), NoNode);
return allocate();
}
- Node& node = at(nodeIndex);
- VirtualRegister virtualRegister = node.virtualRegister();
- GenerationInfo& info = m_generationInfo[virtualRegister];
-
switch (info.registerFormat()) {
case DataFormatNone: {
@@ -1399,6 +1396,147 @@ void SpeculativeJIT::compileObjectEquality(Node& node, const ClassInfo* classInf
booleanResult(resultPayloadGPR, m_compileIndex);
}
+void SpeculativeJIT::compileObjectToObjectOrOtherEquality(
+ Edge leftChild, Edge rightChild,
+ const ClassInfo* classInfo, PredictionChecker predictionCheck)
+{
+ SpeculateCellOperand op1(this, leftChild);
+ JSValueOperand op2(this, rightChild);
+ GPRTemporary result(this);
+
+ GPRReg op1GPR = op1.gpr();
+ GPRReg op2TagGPR = op2.tagGPR();
+ GPRReg op2PayloadGPR = op2.payloadGPR();
+ GPRReg resultGPR = result.gpr();
+
+ if (!predictionCheck(m_state.forNode(leftChild).m_type)) {
+ speculationCheck(
+ BadType, JSValueSource::unboxedCell(op1GPR), leftChild.index(),
+ m_jit.branchPtr(
+ MacroAssembler::NotEqual,
+ MacroAssembler::Address(op1GPR, JSCell::classInfoOffset()),
+ MacroAssembler::TrustedImmPtr(classInfo)));
+ }
+
+ // It seems that most of the time when programs do a == b where b may be either null/undefined
+ // or an object, b is usually an object. Balance the branches to make that case fast.
+ MacroAssembler::Jump rightNotCell =
+ m_jit.branch32(MacroAssembler::NotEqual, op2TagGPR, TrustedImm32(JSValue::CellTag));
+
+ // We know that within this branch, rightChild must be a cell. If the CFA can tell us that the
+ // proof, when filtered on cell, demonstrates that we have an object of the desired type
+ // (predictionCheck() will test for FinalObject or Array, currently), then we can skip the
+ // speculation.
+ if (!predictionCheck(m_state.forNode(rightChild).m_type & PredictCell)) {
+ speculationCheck(
+ BadType, JSValueRegs(op2TagGPR, op2PayloadGPR), rightChild.index(),
+ m_jit.branchPtr(
+ MacroAssembler::NotEqual,
+ MacroAssembler::Address(op2PayloadGPR, JSCell::classInfoOffset()),
+ MacroAssembler::TrustedImmPtr(classInfo)));
+ }
+
+ // At this point we know that we can perform a straight-forward equality comparison on pointer
+ // values because both left and right are pointers to objects that have no special equality
+ // protocols.
+ MacroAssembler::Jump falseCase = m_jit.branchPtr(MacroAssembler::NotEqual, op1GPR, op2PayloadGPR);
+ MacroAssembler::Jump trueCase = m_jit.jump();
+
+ rightNotCell.link(&m_jit);
+
+ // We know that within this branch, rightChild must not be a cell. Check if that is enough to
+ // prove that it is either null or undefined.
+ if (!isOtherPrediction(m_state.forNode(rightChild).m_type & ~PredictCell)) {
+ m_jit.move(op2TagGPR, resultGPR);
+ m_jit.or32(TrustedImm32(1), resultGPR);
+
+ speculationCheck(
+ BadType, JSValueRegs(op2TagGPR, op2PayloadGPR), rightChild.index(),
+ m_jit.branch32(
+ MacroAssembler::NotEqual, resultGPR,
+ MacroAssembler::TrustedImm32(JSValue::NullTag)));
+ }
+
+ falseCase.link(&m_jit);
+ m_jit.move(TrustedImm32(0), resultGPR);
+ MacroAssembler::Jump done = m_jit.jump();
+ trueCase.link(&m_jit);
+ m_jit.move(TrustedImm32(1), resultGPR);
+ done.link(&m_jit);
+
+ booleanResult(resultGPR, m_compileIndex);
+}
+
+void SpeculativeJIT::compilePeepHoleObjectToObjectOrOtherEquality(
+ Edge leftChild, Edge rightChild, NodeIndex branchNodeIndex,
+ const ClassInfo* classInfo, PredictionChecker predictionCheck)
+{
+ Node& branchNode = at(branchNodeIndex);
+ BlockIndex taken = branchNode.takenBlockIndex();
+ BlockIndex notTaken = branchNode.notTakenBlockIndex();
+
+ SpeculateCellOperand op1(this, leftChild);
+ JSValueOperand op2(this, rightChild);
+ GPRTemporary result(this);
+
+ GPRReg op1GPR = op1.gpr();
+ GPRReg op2TagGPR = op2.tagGPR();
+ GPRReg op2PayloadGPR = op2.payloadGPR();
+ GPRReg resultGPR = result.gpr();
+
+ if (!predictionCheck(m_state.forNode(leftChild).m_type)) {
+ speculationCheck(
+ BadType, JSValueSource::unboxedCell(op1GPR), leftChild.index(),
+ m_jit.branchPtr(
+ MacroAssembler::NotEqual,
+ MacroAssembler::Address(op1GPR, JSCell::classInfoOffset()),
+ MacroAssembler::TrustedImmPtr(classInfo)));
+ }
+
+ // It seems that most of the time when programs do a == b where b may be either null/undefined
+ // or an object, b is usually an object. Balance the branches to make that case fast.
+ MacroAssembler::Jump rightNotCell =
+ m_jit.branch32(MacroAssembler::NotEqual, op2TagGPR, TrustedImm32(JSValue::CellTag));
+
+ // We know that within this branch, rightChild must be a cell. If the CFA can tell us that the
+ // proof, when filtered on cell, demonstrates that we have an object of the desired type
+ // (predictionCheck() will test for FinalObject or Array, currently), then we can skip the
+ // speculation.
+ if (!predictionCheck(m_state.forNode(rightChild).m_type & PredictCell)) {
+ speculationCheck(
+ BadType, JSValueRegs(op2TagGPR, op2PayloadGPR), rightChild.index(),
+ m_jit.branchPtr(
+ MacroAssembler::NotEqual,
+ MacroAssembler::Address(op2PayloadGPR, JSCell::classInfoOffset()),
+ MacroAssembler::TrustedImmPtr(classInfo)));
+ }
+
+ // At this point we know that we can perform a straight-forward equality comparison on pointer
+ // values because both left and right are pointers to objects that have no special equality
+ // protocols.
+ branch32(MacroAssembler::Equal, op1GPR, op2PayloadGPR, taken);
+
+ // We know that within this branch, rightChild must not be a cell. Check if that is enough to
+ // prove that it is either null or undefined.
+ if (isOtherPrediction(m_state.forNode(rightChild).m_type & ~PredictCell))
+ rightNotCell.link(&m_jit);
+ else {
+ jump(notTaken, ForceJump);
+
+ rightNotCell.link(&m_jit);
+ m_jit.move(op2TagGPR, resultGPR);
+ m_jit.or32(TrustedImm32(1), resultGPR);
+
+ speculationCheck(
+ BadType, JSValueRegs(op2TagGPR, op2PayloadGPR), rightChild.index(),
+ m_jit.branch32(
+ MacroAssembler::NotEqual, resultGPR,
+ MacroAssembler::TrustedImm32(JSValue::NullTag)));
+ }
+
+ jump(notTaken);
+}
+
void SpeculativeJIT::compileIntegerCompare(Node& node, MacroAssembler::RelationalCondition condition)
{
SpeculateIntegerOperand op1(this, node.child1());
@@ -1447,7 +1585,7 @@ void SpeculativeJIT::compileValueAdd(Node& node)
jsValueResult(resultTag.gpr(), resultPayload.gpr(), m_compileIndex);
}
-void SpeculativeJIT::compileObjectOrOtherLogicalNot(NodeUse nodeUse, const ClassInfo* classInfo, bool needSpeculationCheck)
+void SpeculativeJIT::compileObjectOrOtherLogicalNot(Edge nodeUse, const ClassInfo* classInfo, bool needSpeculationCheck)
{
JSValueOperand value(this, nodeUse);
GPRTemporary resultPayload(this);
@@ -1478,7 +1616,7 @@ void SpeculativeJIT::compileObjectOrOtherLogicalNot(NodeUse nodeUse, const Class
void SpeculativeJIT::compileLogicalNot(Node& node)
{
- if (isKnownBoolean(node.child1().index()) || isBooleanPrediction(m_jit.getPrediction(node.child1().index()))) {
+ if (at(node.child1()).shouldSpeculateBoolean()) {
SpeculateBooleanOperand value(this, node.child1());
GPRTemporary result(this, value);
m_jit.xor32(TrustedImm32(1), value.gpr(), result.gpr());
@@ -1535,7 +1673,7 @@ void SpeculativeJIT::compileLogicalNot(Node& node)
booleanResult(resultPayloadGPR, m_compileIndex, UseChildrenCalledExplicitly);
}
-void SpeculativeJIT::emitObjectOrOtherBranch(NodeUse nodeUse, BlockIndex taken, BlockIndex notTaken, const ClassInfo* classInfo, bool needSpeculationCheck)
+void SpeculativeJIT::emitObjectOrOtherBranch(Edge nodeUse, BlockIndex taken, BlockIndex notTaken, const ClassInfo* classInfo, bool needSpeculationCheck)
{
JSValueOperand value(this, nodeUse);
GPRTemporary scratch(this);
@@ -1567,7 +1705,7 @@ void SpeculativeJIT::emitBranch(Node& node)
BlockIndex taken = node.takenBlockIndex();
BlockIndex notTaken = node.notTakenBlockIndex();
- if (isKnownBoolean(node.child1().index())) {
+ if (at(node.child1()).shouldSpeculateBoolean()) {
SpeculateBooleanOperand value(this, node.child1());
MacroAssembler::ResultCondition condition = MacroAssembler::NonZero;
@@ -1640,7 +1778,7 @@ void SpeculativeJIT::emitBranch(Node& node)
void SpeculativeJIT::compile(Node& node)
{
- NodeType op = static_cast<NodeType>(node.op);
+ NodeType op = node.op();
switch (op) {
case JSConstant:
@@ -1658,7 +1796,7 @@ void SpeculativeJIT::compile(Node& node)
// If we have no prediction for this local, then don't attempt to compile.
if (prediction == PredictNone || value.isClear()) {
- terminateSpeculativeExecution(Uncountable, JSValueRegs(), NoNode);
+ terminateSpeculativeExecution(InadequateCoverage, JSValueRegs(), NoNode);
break;
}
@@ -1684,7 +1822,7 @@ void SpeculativeJIT::compile(Node& node)
break;
}
- if (isArrayPrediction(prediction) || isByteArrayPrediction(prediction)) {
+ if (isArrayPrediction(prediction)) {
GPRTemporary result(this);
m_jit.load32(JITCompiler::payloadFor(node.local()), result.gpr());
@@ -1742,9 +1880,30 @@ void SpeculativeJIT::compile(Node& node)
// SetLocal and whatever other DFG Nodes are associated with the same
// bytecode index as the SetLocal.
ASSERT(m_codeOriginForOSR == node.codeOrigin);
- Node& nextNode = at(m_compileIndex + 1);
-
- m_codeOriginForOSR = nextNode.codeOrigin;
+ Node* nextNode = &at(block()->at(m_indexInBlock + 1));
+
+ // But even more oddly, we need to be super careful about the following
+ // sequence:
+ //
+ // a: Foo()
+ // b: SetLocal(@a)
+ // c: Flush(@b)
+ //
+ // This next piece of crazy takes care of this.
+ if (nextNode->op() == Flush && nextNode->child1() == m_compileIndex)
+ nextNode = &at(block()->at(m_indexInBlock + 2));
+
+ // Oddly, it's possible for the bytecode index for the next node to be
+ // equal to ours. This will happen for op_post_inc. And, even more oddly,
+ // this is just fine. Ordinarily, this wouldn't be fine, since if the
+ // next node failed OSR then we'd be OSR-ing with this SetLocal's local
+ // variable already set even though from the standpoint of the old JIT,
+ // this SetLocal should not have executed. But for op_post_inc, it's just
+ // fine, because this SetLocal's local (i.e. the LHS in a x = y++
+ // statement) would be dead anyway - so the fact that DFG would have
+ // already made the assignment, and baked it into the register file during
+ // OSR exit, would not be visible to the old JIT in any way.
+ m_codeOriginForOSR = nextNode->codeOrigin;
if (!m_jit.graph().isCaptured(node.local())) {
if (node.variableAccessData()->shouldUseDoubleFormat()) {
@@ -1757,7 +1916,7 @@ void SpeculativeJIT::compile(Node& node)
valueSourceReferenceForOperand(node.local()) = ValueSource(DoubleInRegisterFile);
break;
}
- PredictedType predictedType = node.variableAccessData()->prediction();
+ PredictedType predictedType = node.variableAccessData()->argumentAwarePrediction();
if (m_generationInfo[at(node.child1()).virtualRegister()].registerFormat() == DataFormatDouble) {
DoubleOperand value(this, node.child1());
m_jit.storeDouble(value.fpr(), JITCompiler::addressFor(node.local()));
@@ -1782,16 +1941,6 @@ void SpeculativeJIT::compile(Node& node)
valueSourceReferenceForOperand(node.local()) = ValueSource(CellInRegisterFile);
break;
}
- if (isByteArrayPrediction(predictedType)) {
- SpeculateCellOperand cell(this, node.child1());
- GPRReg cellGPR = cell.gpr();
- if (!isByteArrayPrediction(m_state.forNode(node.child1()).m_type))
- speculationCheck(BadType, JSValueSource::unboxedCell(cellGPR), node.child1(), m_jit.branchPtr(MacroAssembler::NotEqual, MacroAssembler::Address(cellGPR, JSCell::classInfoOffset()), MacroAssembler::TrustedImmPtr(&JSByteArray::s_info)));
- m_jit.storePtr(cellGPR, JITCompiler::payloadFor(node.local()));
- noResult(m_compileIndex);
- valueSourceReferenceForOperand(node.local()) = ValueSource(CellInRegisterFile);
- break;
- }
if (isBooleanPrediction(predictedType)) {
SpeculateBooleanOperand value(this, node.child1());
m_jit.store32(value.gpr(), JITCompiler::payloadFor(node.local()));
@@ -1873,11 +2022,34 @@ void SpeculativeJIT::compile(Node& node)
compileUInt32ToNumber(node);
break;
}
+
+ case DoubleAsInt32: {
+ compileDoubleAsInt32(node);
+ break;
+ }
case ValueToInt32: {
compileValueToInt32(node);
break;
}
+
+ case Int32ToDouble: {
+ compileInt32ToDouble(node);
+ break;
+ }
+
+ case CheckNumber: {
+ if (!isNumberPrediction(m_state.forNode(node.child1()).m_type)) {
+ JSValueOperand op1(this, node.child1());
+ JITCompiler::Jump isInteger = m_jit.branch32(MacroAssembler::Equal, op1.tagGPR(), TrustedImm32(JSValue::Int32Tag));
+ speculationCheck(
+ BadType, JSValueRegs(op1.tagGPR(), op1.payloadGPR()), node.child1().index(),
+ m_jit.branch32(MacroAssembler::AboveOrEqual, op1.tagGPR(), TrustedImm32(JSValue::LowestTag)));
+ isInteger.link(&m_jit);
+ }
+ noResult(m_compileIndex);
+ break;
+ }
case ValueAdd:
case ArithAdd:
@@ -1899,63 +2071,9 @@ void SpeculativeJIT::compile(Node& node)
case ArithDiv: {
if (Node::shouldSpeculateInteger(at(node.child1()), at(node.child2())) && node.canSpeculateInteger()) {
#if CPU(X86)
- SpeculateIntegerOperand op1(this, node.child1());
- SpeculateIntegerOperand op2(this, node.child2());
- GPRReg op1GPR = op1.gpr();
- GPRReg op2GPR = op2.gpr();
-
- speculationCheck(Overflow, JSValueRegs(), NoNode, m_jit.branchTest32(JITCompiler::Zero, op2GPR));
-
- // If the user cares about negative zero, then speculate that we're not about
- // to produce negative zero.
- if (!nodeCanIgnoreNegativeZero(node.arithNodeFlags())) {
- MacroAssembler::Jump numeratorNonZero = m_jit.branchTest32(MacroAssembler::NonZero, op1GPR);
- speculationCheck(NegativeZero, JSValueRegs(), NoNode, m_jit.branch32(MacroAssembler::LessThan, op2GPR, TrustedImm32(0)));
- numeratorNonZero.link(&m_jit);
- }
-
- GPRTemporary eax(this, X86Registers::eax);
- GPRTemporary edx(this, X86Registers::edx);
-
- GPRReg temp2 = InvalidGPRReg;
- if (op2GPR == X86Registers::eax || op2GPR == X86Registers::edx) {
- temp2 = allocate();
- m_jit.move(op2GPR, temp2);
- op2GPR = temp2;
- }
-
- m_jit.move(op1GPR, eax.gpr());
- m_jit.assembler().cdq();
- m_jit.assembler().idivl_r(op2GPR);
-
- if (temp2 != InvalidGPRReg)
- unlock(temp2);
-
- // Check that there was no remainder. If there had been, then we'd be obligated to
- // produce a double result instead.
- speculationCheck(Overflow, JSValueRegs(), NoNode, m_jit.branchTest32(JITCompiler::NonZero, edx.gpr()));
-
- integerResult(eax.gpr(), m_compileIndex);
+ compileIntegerArithDivForX86(node);
#else // CPU(X86) -> so non-X86 code follows
- SpeculateDoubleOperand op1(this, node.child1());
- SpeculateDoubleOperand op2(this, node.child2());
- FPRTemporary result(this);
- FPRTemporary scratch(this);
- GPRTemporary intResult(this);
-
- FPRReg op1FPR = op1.fpr();
- FPRReg op2FPR = op2.fpr();
- FPRReg resultFPR = result.fpr();
- FPRReg scratchFPR = scratch.fpr();
- GPRReg resultGPR = intResult.gpr();
-
- m_jit.divDouble(op1FPR, op2FPR, resultFPR);
-
- JITCompiler::JumpList failureCases;
- m_jit.branchConvertDoubleToInt32(resultFPR, resultGPR, failureCases, scratchFPR);
- speculationCheck(Overflow, JSValueRegs(), NoNode, failureCases);
-
- integerResult(resultGPR, m_compileIndex);
+ ASSERT_NOT_REACHED(); // should have been coverted into a double divide.
#endif // CPU(X86)
break;
}
@@ -2121,7 +2239,7 @@ void SpeculativeJIT::compile(Node& node)
case GetByVal: {
if (!node.prediction() || !at(node.child1()).prediction() || !at(node.child2()).prediction()) {
- terminateSpeculativeExecution(Uncountable, JSValueRegs(), NoNode);
+ terminateSpeculativeExecution(InadequateCoverage, JSValueRegs(), NoNode);
break;
}
@@ -2148,13 +2266,6 @@ void SpeculativeJIT::compile(Node& node)
break;
}
- if (at(node.child1()).shouldSpeculateByteArray()) {
- compileGetByValOnByteArray(node);
- if (!m_compileOkay)
- return;
- break;
- }
-
if (at(node.child1()).shouldSpeculateInt8Array()) {
compileGetByValOnIntTypedArray(m_jit.globalData()->int8ArrayDescriptor(), node, sizeof(int8_t), isInt8ArrayPrediction(m_state.forNode(node.child1()).m_type) ? NoTypedArrayTypeSpecCheck : AllTypedArraySpecChecks, SignedTypedArray);
if (!m_compileOkay)
@@ -2254,7 +2365,7 @@ void SpeculativeJIT::compile(Node& node)
case PutByVal: {
if (!at(node.child1()).prediction() || !at(node.child2()).prediction()) {
- terminateSpeculativeExecution(Uncountable, JSValueRegs(), NoNode);
+ terminateSpeculativeExecution(InadequateCoverage, JSValueRegs(), NoNode);
break;
}
@@ -2277,15 +2388,6 @@ void SpeculativeJIT::compile(Node& node)
SpeculateCellOperand base(this, node.child1());
SpeculateStrictInt32Operand property(this, node.child2());
- if (at(node.child1()).shouldSpeculateByteArray()) {
- compilePutByValForByteArray(base.gpr(), property.gpr(), node);
- break;
- }
- if (at(node.child1()).shouldSpeculateByteArray()) {
- compilePutByValForByteArray(base.gpr(), property.gpr(), node);
- break;
- }
-
if (at(node.child1()).shouldSpeculateInt8Array()) {
compilePutByValForIntTypedArray(m_jit.globalData()->int8ArrayDescriptor(), base.gpr(), property.gpr(), node, sizeof(int8_t), isInt8ArrayPrediction(m_state.forNode(node.child1()).m_type) ? NoTypedArrayTypeSpecCheck : AllTypedArraySpecChecks, SignedTypedArray);
if (!m_compileOkay)
@@ -2414,7 +2516,7 @@ void SpeculativeJIT::compile(Node& node)
case PutByValAlias: {
if (!at(node.child1()).prediction() || !at(node.child2()).prediction()) {
- terminateSpeculativeExecution(Uncountable, JSValueRegs(), NoNode);
+ terminateSpeculativeExecution(InadequateCoverage, JSValueRegs(), NoNode);
break;
}
@@ -2424,11 +2526,6 @@ void SpeculativeJIT::compile(Node& node)
SpeculateCellOperand base(this, node.child1());
SpeculateStrictInt32Operand property(this, node.child2());
- if (at(node.child1()).shouldSpeculateByteArray()) {
- compilePutByValForByteArray(base.gpr(), property.gpr(), node);
- break;
- }
-
if (at(node.child1()).shouldSpeculateInt8Array()) {
compilePutByValForIntTypedArray(m_jit.globalData()->int8ArrayDescriptor(), base.gpr(), property.gpr(), node, sizeof(int8_t), NoTypedArraySpecCheck, SignedTypedArray);
if (!m_compileOkay)
@@ -2515,6 +2612,55 @@ void SpeculativeJIT::compile(Node& node)
break;
}
+ case RegExpExec: {
+ if (compileRegExpExec(node))
+ return;
+
+ if (!node.adjustedRefCount()) {
+ SpeculateCellOperand base(this, node.child1());
+ SpeculateCellOperand argument(this, node.child2());
+ GPRReg baseGPR = base.gpr();
+ GPRReg argumentGPR = argument.gpr();
+
+ flushRegisters();
+ GPRResult result(this);
+ callOperation(operationRegExpTest, result.gpr(), baseGPR, argumentGPR);
+
+ // Must use jsValueResult because otherwise we screw up register
+ // allocation, which thinks that this node has a result.
+ booleanResult(result.gpr(), m_compileIndex);
+ break;
+ }
+
+ SpeculateCellOperand base(this, node.child1());
+ SpeculateCellOperand argument(this, node.child2());
+ GPRReg baseGPR = base.gpr();
+ GPRReg argumentGPR = argument.gpr();
+
+ flushRegisters();
+ GPRResult2 resultTag(this);
+ GPRResult resultPayload(this);
+ callOperation(operationRegExpExec, resultTag.gpr(), resultPayload.gpr(), baseGPR, argumentGPR);
+
+ jsValueResult(resultTag.gpr(), resultPayload.gpr(), m_compileIndex);
+ break;
+ }
+
+ case RegExpTest: {
+ SpeculateCellOperand base(this, node.child1());
+ SpeculateCellOperand argument(this, node.child2());
+ GPRReg baseGPR = base.gpr();
+ GPRReg argumentGPR = argument.gpr();
+
+ flushRegisters();
+ GPRResult result(this);
+ callOperation(operationRegExpTest, result.gpr(), baseGPR, argumentGPR);
+
+ // If we add a DataFormatBool, we should use it here.
+ booleanResult(result.gpr(), m_compileIndex);
+ break;
+ }
+
case ArrayPush: {
SpeculateCellOperand base(this, node.child1());
JSValueOperand value(this, node.child2());
@@ -2716,8 +2862,6 @@ void SpeculativeJIT::compile(Node& node)
// FIXME: Add string speculation here.
- bool wasPrimitive = isKnownNumeric(node.child1().index()) || isKnownBoolean(node.child1().index());
-
JSValueOperand op1(this, node.child1());
GPRTemporary resultTag(this, op1);
GPRTemporary resultPayload(this, op1, false);
@@ -2729,7 +2873,7 @@ void SpeculativeJIT::compile(Node& node)
op1.use();
- if (wasPrimitive) {
+ if (!(m_state.forNode(node.child1()).m_type & ~(PredictNumber | PredictBoolean))) {
m_jit.move(op1TagGPR, resultTagGPR);
m_jit.move(op1PayloadGPR, resultPayloadGPR);
} else {
@@ -3014,7 +3158,7 @@ void SpeculativeJIT::compile(Node& node)
case GetById: {
if (!node.prediction()) {
- terminateSpeculativeExecution(Uncountable, JSValueRegs(), NoNode);
+ terminateSpeculativeExecution(InadequateCoverage, JSValueRegs(), NoNode);
break;
}
@@ -3068,7 +3212,7 @@ void SpeculativeJIT::compile(Node& node)
case GetByIdFlush: {
if (!node.prediction()) {
- terminateSpeculativeExecution(Uncountable, JSValueRegs(), NoNode);
+ terminateSpeculativeExecution(InadequateCoverage, JSValueRegs(), NoNode);
break;
}
@@ -3152,22 +3296,6 @@ void SpeculativeJIT::compile(Node& node)
break;
}
- case GetByteArrayLength: {
- SpeculateCellOperand base(this, node.child1());
- GPRReg baseGPR = base.gpr();
-
- if (!isByteArrayPrediction(m_state.forNode(node.child1()).m_type))
- speculationCheck(BadType, JSValueSource::unboxedCell(baseGPR), node.child1(), m_jit.branchPtr(MacroAssembler::NotEqual, MacroAssembler::Address(baseGPR, JSCell::classInfoOffset()), MacroAssembler::TrustedImmPtr(&JSByteArray::s_info)));
-
- GPRTemporary result(this);
- GPRReg resultGPR = result.gpr();
-
- m_jit.loadPtr(MacroAssembler::Address(baseGPR, JSByteArray::offsetOfStorage()), resultGPR);
- m_jit.load32(MacroAssembler::Address(resultGPR, ByteArray::offsetOfSize()), resultGPR);
-
- integerResult(resultGPR, m_compileIndex);
- break;
- }
case GetInt8ArrayLength: {
compileGetTypedArrayLength(m_jit.globalData()->int8ArrayDescriptor(), node, !isInt8ArrayPrediction(m_state.forNode(node.child1()).m_type));
break;
@@ -3411,6 +3539,85 @@ void SpeculativeJIT::compile(Node& node)
break;
}
+ case IsUndefined: {
+ JSValueOperand value(this, node.child1());
+ GPRTemporary result(this);
+
+ JITCompiler::Jump isCell = m_jit.branch32(JITCompiler::Equal, value.tagGPR(), JITCompiler::TrustedImm32(JSValue::CellTag));
+
+ m_jit.compare32(JITCompiler::Equal, value.tagGPR(), TrustedImm32(JSValue::UndefinedTag), result.gpr());
+ JITCompiler::Jump done = m_jit.jump();
+
+ isCell.link(&m_jit);
+ m_jit.loadPtr(JITCompiler::Address(value.payloadGPR(), JSCell::structureOffset()), result.gpr());
+ m_jit.test8(JITCompiler::NonZero, JITCompiler::Address(result.gpr(), Structure::typeInfoFlagsOffset()), TrustedImm32(MasqueradesAsUndefined), result.gpr());
+
+ done.link(&m_jit);
+ booleanResult(result.gpr(), m_compileIndex);
+ break;
+ }
+
+ case IsBoolean: {
+ JSValueOperand value(this, node.child1());
+ GPRTemporary result(this, value);
+
+ m_jit.compare32(JITCompiler::Equal, value.tagGPR(), JITCompiler::TrustedImm32(JSValue::BooleanTag), result.gpr());
+ booleanResult(result.gpr(), m_compileIndex);
+ break;
+ }
+
+ case IsNumber: {
+ JSValueOperand value(this, node.child1());
+ GPRTemporary result(this, value);
+
+ m_jit.add32(TrustedImm32(1), value.tagGPR(), result.gpr());
+ m_jit.compare32(JITCompiler::Below, result.gpr(), JITCompiler::TrustedImm32(JSValue::LowestTag + 1), result.gpr());
+ booleanResult(result.gpr(), m_compileIndex);
+ break;
+ }
+
+ case IsString: {
+ JSValueOperand value(this, node.child1());
+ GPRTemporary result(this, value);
+
+ JITCompiler::Jump isNotCell = m_jit.branch32(JITCompiler::NotEqual, value.tagGPR(), JITCompiler::TrustedImm32(JSValue::CellTag));
+
+ m_jit.loadPtr(JITCompiler::Address(value.payloadGPR(), JSCell::structureOffset()), result.gpr());
+ m_jit.compare8(JITCompiler::Equal, JITCompiler::Address(result.gpr(), Structure::typeInfoTypeOffset()), TrustedImm32(StringType), result.gpr());
+ JITCompiler::Jump done = m_jit.jump();
+
+ isNotCell.link(&m_jit);
+ m_jit.move(TrustedImm32(0), result.gpr());
+
+ done.link(&m_jit);
+ booleanResult(result.gpr(), m_compileIndex);
+ break;
+ }
+
+ case IsObject: {
+ JSValueOperand value(this, node.child1());
+ GPRReg valueTagGPR = value.tagGPR();
+ GPRReg valuePayloadGPR = value.payloadGPR();
+ GPRResult result(this);
+ GPRReg resultGPR = result.gpr();
+ flushRegisters();
+ callOperation(operationIsObject, resultGPR, valueTagGPR, valuePayloadGPR);
+ booleanResult(result.gpr(), m_compileIndex);
+ break;
+ }
+
+ case IsFunction: {
+ JSValueOperand value(this, node.child1());
+ GPRReg valueTagGPR = value.tagGPR();
+ GPRReg valuePayloadGPR = value.payloadGPR();
+ GPRResult result(this);
+ GPRReg resultGPR = result.gpr();
+ flushRegisters();
+ callOperation(operationIsFunction, resultGPR, valueTagGPR, valuePayloadGPR);
+ booleanResult(result.gpr(), m_compileIndex);
+ break;
+ }
+
case Phi:
case Flush:
break;
@@ -3567,7 +3774,7 @@ void SpeculativeJIT::compile(Node& node)
break;
case ForceOSRExit: {
- terminateSpeculativeExecution(Uncountable, JSValueRegs(), NoNode);
+ terminateSpeculativeExecution(InadequateCoverage, JSValueRegs(), NoNode);
break;
}
diff --git a/Source/JavaScriptCore/dfg/DFGSpeculativeJIT64.cpp b/Source/JavaScriptCore/dfg/DFGSpeculativeJIT64.cpp
index 1597b1674..a46f8f262 100644
--- a/Source/JavaScriptCore/dfg/DFGSpeculativeJIT64.cpp
+++ b/Source/JavaScriptCore/dfg/DFGSpeculativeJIT64.cpp
@@ -26,8 +26,6 @@
#include "config.h"
#include "DFGSpeculativeJIT.h"
-#include "JSByteArray.h"
-
#if ENABLE(DFG_JIT)
namespace JSC { namespace DFG {
@@ -76,7 +74,7 @@ GPRReg SpeculativeJIT::fillInteger(NodeIndex nodeIndex, DataFormat& returnFormat
info.fillJSValue(gpr, DataFormatJSInteger);
unlock(gpr);
}
-
+
switch (info.registerFormat()) {
case DataFormatNone:
// Should have filled, above.
@@ -409,7 +407,7 @@ void SpeculativeJIT::nonSpeculativeValueToInt32(Node& node)
if (isKnownInteger(node.child1().index())) {
IntegerOperand op1(this, node.child1());
GPRTemporary result(this, op1);
- m_jit.move(op1.gpr(), result.gpr());
+ m_jit.zeroExtend32ToPtr(op1.gpr(), result.gpr());
integerResult(result.gpr(), m_compileIndex);
return;
}
@@ -480,14 +478,14 @@ void SpeculativeJIT::nonSpeculativeUInt32ToNumber(Node& node)
JITCompiler::Call SpeculativeJIT::cachedGetById(CodeOrigin codeOrigin, GPRReg baseGPR, GPRReg resultGPR, GPRReg scratchGPR, unsigned identifierNumber, JITCompiler::Jump slowPathTarget, SpillRegistersMode spillMode)
{
JITCompiler::DataLabelPtr structureToCompare;
- JITCompiler::Jump structureCheck = m_jit.branchPtrWithPatch(JITCompiler::NotEqual, JITCompiler::Address(baseGPR, JSCell::structureOffset()), structureToCompare, JITCompiler::TrustedImmPtr(reinterpret_cast<void*>(-1)));
+ JITCompiler::PatchableJump structureCheck = m_jit.patchableBranchPtrWithPatch(JITCompiler::NotEqual, JITCompiler::Address(baseGPR, JSCell::structureOffset()), structureToCompare, JITCompiler::TrustedImmPtr(reinterpret_cast<void*>(-1)));
m_jit.loadPtr(JITCompiler::Address(baseGPR, JSObject::offsetOfPropertyStorage()), resultGPR);
JITCompiler::DataLabelCompact loadWithPatch = m_jit.loadPtrWithCompactAddressOffsetPatch(JITCompiler::Address(resultGPR, 0), resultGPR);
JITCompiler::Jump done = m_jit.jump();
- structureCheck.link(&m_jit);
+ structureCheck.m_jump.link(&m_jit);
if (slowPathTarget.isSet())
slowPathTarget.link(&m_jit);
@@ -512,11 +510,11 @@ JITCompiler::Call SpeculativeJIT::cachedGetById(CodeOrigin codeOrigin, GPRReg ba
return functionCall;
}
-void SpeculativeJIT::cachedPutById(CodeOrigin codeOrigin, GPRReg baseGPR, GPRReg valueGPR, NodeUse valueUse, GPRReg scratchGPR, unsigned identifierNumber, PutKind putKind, JITCompiler::Jump slowPathTarget)
+void SpeculativeJIT::cachedPutById(CodeOrigin codeOrigin, GPRReg baseGPR, GPRReg valueGPR, Edge valueUse, GPRReg scratchGPR, unsigned identifierNumber, PutKind putKind, JITCompiler::Jump slowPathTarget)
{
JITCompiler::DataLabelPtr structureToCompare;
- JITCompiler::Jump structureCheck = m_jit.branchPtrWithPatch(JITCompiler::NotEqual, JITCompiler::Address(baseGPR, JSCell::structureOffset()), structureToCompare, JITCompiler::TrustedImmPtr(reinterpret_cast<void*>(-1)));
+ JITCompiler::PatchableJump structureCheck = m_jit.patchableBranchPtrWithPatch(JITCompiler::NotEqual, JITCompiler::Address(baseGPR, JSCell::structureOffset()), structureToCompare, JITCompiler::TrustedImmPtr(reinterpret_cast<void*>(-1)));
writeBarrier(baseGPR, valueGPR, valueUse, WriteBarrierForPropertyAccess, scratchGPR);
@@ -525,7 +523,7 @@ void SpeculativeJIT::cachedPutById(CodeOrigin codeOrigin, GPRReg baseGPR, GPRReg
JITCompiler::Jump done = m_jit.jump();
- structureCheck.link(&m_jit);
+ structureCheck.m_jump.link(&m_jit);
if (slowPathTarget.isSet())
slowPathTarget.link(&m_jit);
@@ -554,7 +552,7 @@ void SpeculativeJIT::cachedPutById(CodeOrigin codeOrigin, GPRReg baseGPR, GPRReg
m_jit.addPropertyAccess(PropertyAccessRecord(codeOrigin, structureToCompare, functionCall, structureCheck, JITCompiler::DataLabelCompact(storeWithPatch.label()), slowCase, doneLabel, safeCast<int8_t>(baseGPR), safeCast<int8_t>(valueGPR), safeCast<int8_t>(scratchGPR)));
}
-void SpeculativeJIT::nonSpeculativeNonPeepholeCompareNull(NodeUse operand, bool invert)
+void SpeculativeJIT::nonSpeculativeNonPeepholeCompareNull(Edge operand, bool invert)
{
JSValueOperand arg(this, operand);
GPRReg argGPR = arg.gpr();
@@ -586,7 +584,7 @@ void SpeculativeJIT::nonSpeculativeNonPeepholeCompareNull(NodeUse operand, bool
jsValueResult(resultGPR, m_compileIndex, DataFormatJSBoolean);
}
-void SpeculativeJIT::nonSpeculativePeepholeBranchNull(NodeUse operand, NodeIndex branchNodeIndex, bool invert)
+void SpeculativeJIT::nonSpeculativePeepholeBranchNull(Edge operand, NodeIndex branchNodeIndex, bool invert)
{
Node& branchNode = at(branchNodeIndex);
BlockIndex taken = branchNode.takenBlockIndex();
@@ -626,7 +624,7 @@ void SpeculativeJIT::nonSpeculativePeepholeBranchNull(NodeUse operand, NodeIndex
jump(notTaken);
}
-bool SpeculativeJIT::nonSpeculativeCompareNull(Node& node, NodeUse operand, bool invert)
+bool SpeculativeJIT::nonSpeculativeCompareNull(Node& node, Edge operand, bool invert)
{
unsigned branchIndexInBlock = detectPeepHoleBranch();
if (branchIndexInBlock != UINT_MAX) {
@@ -713,6 +711,9 @@ void SpeculativeJIT::nonSpeculativePeepholeBranch(Node& node, NodeIndex branchNo
}
jump(notTaken);
+
+ m_indexInBlock = m_jit.graph().m_blocks[m_block]->size() - 1;
+ m_compileIndex = branchNodeIndex;
}
void SpeculativeJIT::nonSpeculativeNonPeepholeCompare(Node& node, MacroAssembler::RelationalCondition cond, S_DFGOperation_EJJ helperFunction)
@@ -916,23 +917,23 @@ void SpeculativeJIT::emitCall(Node& node)
{
P_DFGOperation_E slowCallFunction;
- if (node.op == Call)
+ if (node.op() == Call)
slowCallFunction = operationLinkCall;
else {
- ASSERT(node.op == Construct);
+ ASSERT(node.op() == Construct);
slowCallFunction = operationLinkConstruct;
}
// For constructors, the this argument is not passed but we have to make space
// for it.
- int dummyThisArgument = node.op == Call ? 0 : 1;
+ int dummyThisArgument = node.op() == Call ? 0 : 1;
- CallLinkInfo::CallType callType = node.op == Call ? CallLinkInfo::Call : CallLinkInfo::Construct;
+ CallLinkInfo::CallType callType = node.op() == Call ? CallLinkInfo::Call : CallLinkInfo::Construct;
- NodeUse calleeNodeUse = m_jit.graph().m_varArgChildren[node.firstChild()];
- JSValueOperand callee(this, calleeNodeUse);
+ Edge calleeEdge = m_jit.graph().m_varArgChildren[node.firstChild()];
+ JSValueOperand callee(this, calleeEdge);
GPRReg calleeGPR = callee.gpr();
- use(calleeNodeUse);
+ use(calleeEdge);
// The call instruction's first child is either the function (normal call) or the
// receiver (method call). subsequent children are the arguments.
@@ -943,10 +944,10 @@ void SpeculativeJIT::emitCall(Node& node)
m_jit.storePtr(calleeGPR, callFrameSlot(RegisterFile::Callee));
for (int i = 0; i < numPassedArgs; i++) {
- NodeUse argNodeUse = m_jit.graph().m_varArgChildren[node.firstChild() + 1 + i];
- JSValueOperand arg(this, argNodeUse);
+ Edge argEdge = m_jit.graph().m_varArgChildren[node.firstChild() + 1 + i];
+ JSValueOperand arg(this, argEdge);
GPRReg argGPR = arg.gpr();
- use(argNodeUse);
+ use(argEdge);
m_jit.storePtr(argGPR, argumentSlot(i + dummyThisArgument));
}
@@ -966,7 +967,7 @@ void SpeculativeJIT::emitCall(Node& node)
m_jit.addPtr(TrustedImm32(m_jit.codeBlock()->m_numCalleeRegisters * sizeof(Register)), GPRInfo::callFrameRegister);
CodeOrigin codeOrigin = at(m_compileIndex).codeOrigin;
- CallBeginToken token = m_jit.beginJSCall();
+ CallBeginToken token = m_jit.beginCall();
JITCompiler::Call fastCall = m_jit.nearCall();
m_jit.notifyCall(fastCall, codeOrigin, token);
@@ -979,7 +980,7 @@ void SpeculativeJIT::emitCall(Node& node)
JITCompiler::Call slowCall = m_jit.appendCall(slowCallFunction);
m_jit.addFastExceptionCheck(slowCall, codeOrigin, token);
m_jit.addPtr(TrustedImm32(m_jit.codeBlock()->m_numCalleeRegisters * sizeof(Register)), GPRInfo::callFrameRegister);
- token = m_jit.beginJSCall();
+ token = m_jit.beginCall();
JITCompiler::Call theCall = m_jit.call(GPRInfo::returnValueGPR);
m_jit.notifyCall(theCall, codeOrigin, token);
@@ -1485,6 +1486,145 @@ void SpeculativeJIT::compileObjectEquality(Node& node, const ClassInfo* classInf
jsValueResult(resultGPR, m_compileIndex, DataFormatJSBoolean);
}
+void SpeculativeJIT::compileObjectToObjectOrOtherEquality(
+ Edge leftChild, Edge rightChild,
+ const ClassInfo* classInfo, PredictionChecker predictionCheck)
+{
+ SpeculateCellOperand op1(this, leftChild);
+ JSValueOperand op2(this, rightChild);
+ GPRTemporary result(this);
+
+ GPRReg op1GPR = op1.gpr();
+ GPRReg op2GPR = op2.gpr();
+ GPRReg resultGPR = result.gpr();
+
+ if (!predictionCheck(m_state.forNode(leftChild).m_type)) {
+ speculationCheck(
+ BadType, JSValueRegs(op1GPR), leftChild.index(),
+ m_jit.branchPtr(
+ MacroAssembler::NotEqual,
+ MacroAssembler::Address(op1GPR, JSCell::classInfoOffset()),
+ MacroAssembler::TrustedImmPtr(classInfo)));
+ }
+
+ // It seems that most of the time when programs do a == b where b may be either null/undefined
+ // or an object, b is usually an object. Balance the branches to make that case fast.
+ MacroAssembler::Jump rightNotCell =
+ m_jit.branchTestPtr(MacroAssembler::NonZero, op2GPR, GPRInfo::tagMaskRegister);
+
+ // We know that within this branch, rightChild must be a cell. If the CFA can tell us that the
+ // proof, when filtered on cell, demonstrates that we have an object of the desired type
+ // (predictionCheck() will test for FinalObject or Array, currently), then we can skip the
+ // speculation.
+ if (!predictionCheck(m_state.forNode(rightChild).m_type & PredictCell)) {
+ speculationCheck(
+ BadType, JSValueRegs(op2GPR), rightChild.index(),
+ m_jit.branchPtr(
+ MacroAssembler::NotEqual,
+ MacroAssembler::Address(op2GPR, JSCell::classInfoOffset()),
+ MacroAssembler::TrustedImmPtr(classInfo)));
+ }
+
+ // At this point we know that we can perform a straight-forward equality comparison on pointer
+ // values because both left and right are pointers to objects that have no special equality
+ // protocols.
+ MacroAssembler::Jump falseCase = m_jit.branchPtr(MacroAssembler::NotEqual, op1GPR, op2GPR);
+ MacroAssembler::Jump trueCase = m_jit.jump();
+
+ rightNotCell.link(&m_jit);
+
+ // We know that within this branch, rightChild must not be a cell. Check if that is enough to
+ // prove that it is either null or undefined.
+ if (!isOtherPrediction(m_state.forNode(rightChild).m_type & ~PredictCell)) {
+ m_jit.move(op2GPR, resultGPR);
+ m_jit.andPtr(MacroAssembler::TrustedImm32(~TagBitUndefined), resultGPR);
+
+ speculationCheck(
+ BadType, JSValueRegs(op2GPR), rightChild.index(),
+ m_jit.branchPtr(
+ MacroAssembler::NotEqual, resultGPR,
+ MacroAssembler::TrustedImmPtr(reinterpret_cast<void*>(ValueNull))));
+ }
+
+ falseCase.link(&m_jit);
+ m_jit.move(TrustedImm32(ValueFalse), resultGPR);
+ MacroAssembler::Jump done = m_jit.jump();
+ trueCase.link(&m_jit);
+ m_jit.move(TrustedImm32(ValueTrue), resultGPR);
+ done.link(&m_jit);
+
+ jsValueResult(resultGPR, m_compileIndex, DataFormatJSBoolean);
+}
+
+void SpeculativeJIT::compilePeepHoleObjectToObjectOrOtherEquality(
+ Edge leftChild, Edge rightChild, NodeIndex branchNodeIndex,
+ const ClassInfo* classInfo, PredictionChecker predictionCheck)
+{
+ Node& branchNode = at(branchNodeIndex);
+ BlockIndex taken = branchNode.takenBlockIndex();
+ BlockIndex notTaken = branchNode.notTakenBlockIndex();
+
+ SpeculateCellOperand op1(this, leftChild);
+ JSValueOperand op2(this, rightChild);
+ GPRTemporary result(this);
+
+ GPRReg op1GPR = op1.gpr();
+ GPRReg op2GPR = op2.gpr();
+ GPRReg resultGPR = result.gpr();
+
+ if (!predictionCheck(m_state.forNode(leftChild).m_type)) {
+ speculationCheck(
+ BadType, JSValueRegs(op1GPR), leftChild.index(),
+ m_jit.branchPtr(
+ MacroAssembler::NotEqual,
+ MacroAssembler::Address(op1GPR, JSCell::classInfoOffset()),
+ MacroAssembler::TrustedImmPtr(classInfo)));
+ }
+
+ // It seems that most of the time when programs do a == b where b may be either null/undefined
+ // or an object, b is usually an object. Balance the branches to make that case fast.
+ MacroAssembler::Jump rightNotCell =
+ m_jit.branchTestPtr(MacroAssembler::NonZero, op2GPR, GPRInfo::tagMaskRegister);
+
+ // We know that within this branch, rightChild must be a cell. If the CFA can tell us that the
+ // proof, when filtered on cell, demonstrates that we have an object of the desired type
+ // (predictionCheck() will test for FinalObject or Array, currently), then we can skip the
+ // speculation.
+ if (!predictionCheck(m_state.forNode(rightChild).m_type & PredictCell)) {
+ speculationCheck(
+ BadType, JSValueRegs(op2GPR), rightChild.index(),
+ m_jit.branchPtr(
+ MacroAssembler::NotEqual,
+ MacroAssembler::Address(op2GPR, JSCell::classInfoOffset()),
+ MacroAssembler::TrustedImmPtr(classInfo)));
+ }
+
+ // At this point we know that we can perform a straight-forward equality comparison on pointer
+ // values because both left and right are pointers to objects that have no special equality
+ // protocols.
+ branchPtr(MacroAssembler::Equal, op1GPR, op2GPR, taken);
+
+ // We know that within this branch, rightChild must not be a cell. Check if that is enough to
+ // prove that it is either null or undefined.
+ if (isOtherPrediction(m_state.forNode(rightChild).m_type & ~PredictCell))
+ rightNotCell.link(&m_jit);
+ else {
+ jump(notTaken, ForceJump);
+
+ rightNotCell.link(&m_jit);
+ m_jit.move(op2GPR, resultGPR);
+ m_jit.andPtr(MacroAssembler::TrustedImm32(~TagBitUndefined), resultGPR);
+
+ speculationCheck(
+ BadType, JSValueRegs(op2GPR), rightChild.index(),
+ m_jit.branchPtr(
+ MacroAssembler::NotEqual, resultGPR,
+ MacroAssembler::TrustedImmPtr(reinterpret_cast<void*>(ValueNull))));
+ }
+
+ jump(notTaken);
+}
+
void SpeculativeJIT::compileIntegerCompare(Node& node, MacroAssembler::RelationalCondition condition)
{
SpeculateIntegerOperand op1(this, node.child1());
@@ -1531,7 +1671,7 @@ void SpeculativeJIT::compileValueAdd(Node& node)
jsValueResult(result.gpr(), m_compileIndex);
}
-void SpeculativeJIT::compileObjectOrOtherLogicalNot(NodeUse nodeUse, const ClassInfo* classInfo, bool needSpeculationCheck)
+void SpeculativeJIT::compileObjectOrOtherLogicalNot(Edge nodeUse, const ClassInfo* classInfo, bool needSpeculationCheck)
{
JSValueOperand value(this, nodeUse);
GPRTemporary result(this);
@@ -1560,16 +1700,6 @@ void SpeculativeJIT::compileObjectOrOtherLogicalNot(NodeUse nodeUse, const Class
void SpeculativeJIT::compileLogicalNot(Node& node)
{
- if (isKnownBoolean(node.child1().index())) {
- SpeculateBooleanOperand value(this, node.child1());
- GPRTemporary result(this, value);
-
- m_jit.move(value.gpr(), result.gpr());
- m_jit.xorPtr(TrustedImm32(true), result.gpr());
-
- jsValueResult(result.gpr(), m_compileIndex, DataFormatJSBoolean);
- return;
- }
if (at(node.child1()).shouldSpeculateFinalObjectOrOther()) {
compileObjectOrOtherLogicalNot(node.child1(), &JSFinalObject::s_info, !isFinalObjectOrOtherPrediction(m_state.forNode(node.child1()).m_type));
return;
@@ -1599,7 +1729,18 @@ void SpeculativeJIT::compileLogicalNot(Node& node)
}
PredictedType prediction = m_jit.getPrediction(node.child1());
- if (isBooleanPrediction(prediction) || !prediction) {
+ if (isBooleanPrediction(prediction)) {
+ if (isBooleanPrediction(m_state.forNode(node.child1()).m_type)) {
+ SpeculateBooleanOperand value(this, node.child1());
+ GPRTemporary result(this, value);
+
+ m_jit.move(value.gpr(), result.gpr());
+ m_jit.xorPtr(TrustedImm32(true), result.gpr());
+
+ jsValueResult(result.gpr(), m_compileIndex, DataFormatJSBoolean);
+ return;
+ }
+
JSValueOperand value(this, node.child1());
GPRTemporary result(this); // FIXME: We could reuse, but on speculation fail would need recovery to restore tag (akin to add).
@@ -1635,7 +1776,7 @@ void SpeculativeJIT::compileLogicalNot(Node& node)
jsValueResult(resultGPR, m_compileIndex, DataFormatJSBoolean, UseChildrenCalledExplicitly);
}
-void SpeculativeJIT::emitObjectOrOtherBranch(NodeUse nodeUse, BlockIndex taken, BlockIndex notTaken, const ClassInfo* classInfo, bool needSpeculationCheck)
+void SpeculativeJIT::emitObjectOrOtherBranch(Edge nodeUse, BlockIndex taken, BlockIndex notTaken, const ClassInfo* classInfo, bool needSpeculationCheck)
{
JSValueOperand value(this, nodeUse);
GPRTemporary scratch(this);
@@ -1661,27 +1802,10 @@ void SpeculativeJIT::emitObjectOrOtherBranch(NodeUse nodeUse, BlockIndex taken,
void SpeculativeJIT::emitBranch(Node& node)
{
- JSValueOperand value(this, node.child1());
- GPRReg valueGPR = value.gpr();
-
BlockIndex taken = node.takenBlockIndex();
BlockIndex notTaken = node.notTakenBlockIndex();
- if (isKnownBoolean(node.child1().index())) {
- MacroAssembler::ResultCondition condition = MacroAssembler::NonZero;
-
- if (taken == (m_block + 1)) {
- condition = MacroAssembler::Zero;
- BlockIndex tmp = taken;
- taken = notTaken;
- notTaken = tmp;
- }
-
- branchTest32(condition, valueGPR, TrustedImm32(true), taken);
- jump(notTaken);
-
- noResult(m_compileIndex);
- } else if (at(node.child1()).shouldSpeculateFinalObjectOrOther()) {
+ if (at(node.child1()).shouldSpeculateFinalObjectOrOther()) {
emitObjectOrOtherBranch(node.child1(), taken, notTaken, &JSFinalObject::s_info, !isFinalObjectOrOtherPrediction(m_state.forNode(node.child1()).m_type));
} else if (at(node.child1()).shouldSpeculateArrayOrOther()) {
emitObjectOrOtherBranch(node.child1(), taken, notTaken, &JSArray::s_info, !isArrayOrOtherPrediction(m_state.forNode(node.child1()).m_type));
@@ -1708,18 +1832,35 @@ void SpeculativeJIT::emitBranch(Node& node)
noResult(m_compileIndex);
} else {
- GPRTemporary result(this);
- GPRReg resultGPR = result.gpr();
+ JSValueOperand value(this, node.child1());
+ GPRReg valueGPR = value.gpr();
bool predictBoolean = isBooleanPrediction(m_jit.getPrediction(node.child1()));
if (predictBoolean) {
- branchPtr(MacroAssembler::Equal, valueGPR, MacroAssembler::TrustedImmPtr(JSValue::encode(jsBoolean(false))), notTaken);
- branchPtr(MacroAssembler::Equal, valueGPR, MacroAssembler::TrustedImmPtr(JSValue::encode(jsBoolean(true))), taken);
-
- speculationCheck(BadType, JSValueRegs(valueGPR), node.child1(), m_jit.jump());
+ if (isBooleanPrediction(m_state.forNode(node.child1()).m_type)) {
+ MacroAssembler::ResultCondition condition = MacroAssembler::NonZero;
+
+ if (taken == (m_block + 1)) {
+ condition = MacroAssembler::Zero;
+ BlockIndex tmp = taken;
+ taken = notTaken;
+ notTaken = tmp;
+ }
+
+ branchTest32(condition, valueGPR, TrustedImm32(true), taken);
+ jump(notTaken);
+ } else {
+ branchPtr(MacroAssembler::Equal, valueGPR, MacroAssembler::TrustedImmPtr(JSValue::encode(jsBoolean(false))), notTaken);
+ branchPtr(MacroAssembler::Equal, valueGPR, MacroAssembler::TrustedImmPtr(JSValue::encode(jsBoolean(true))), taken);
+
+ speculationCheck(BadType, JSValueRegs(valueGPR), node.child1(), m_jit.jump());
+ }
value.use();
} else {
+ GPRTemporary result(this);
+ GPRReg resultGPR = result.gpr();
+
branchPtr(MacroAssembler::Equal, valueGPR, MacroAssembler::TrustedImmPtr(JSValue::encode(jsNumber(0))), notTaken);
branchPtr(MacroAssembler::AboveOrEqual, valueGPR, GPRInfo::tagTypeNumberRegister, taken);
@@ -1744,7 +1885,7 @@ void SpeculativeJIT::emitBranch(Node& node)
void SpeculativeJIT::compile(Node& node)
{
- NodeType op = static_cast<NodeType>(node.op);
+ NodeType op = node.op();
switch (op) {
case JSConstant:
@@ -1762,7 +1903,7 @@ void SpeculativeJIT::compile(Node& node)
// If we have no prediction for this local, then don't attempt to compile.
if (prediction == PredictNone || value.isClear()) {
- terminateSpeculativeExecution(Uncountable, JSValueRegs(), NoNode);
+ terminateSpeculativeExecution(InadequateCoverage, JSValueRegs(), NoNode);
break;
}
@@ -1823,7 +1964,18 @@ void SpeculativeJIT::compile(Node& node)
// SetLocal and whatever other DFG Nodes are associated with the same
// bytecode index as the SetLocal.
ASSERT(m_codeOriginForOSR == node.codeOrigin);
- Node& nextNode = at(m_compileIndex + 1);
+ Node* nextNode = &at(block()->at(m_indexInBlock + 1));
+
+ // But even more oddly, we need to be super careful about the following
+ // sequence:
+ //
+ // a: Foo()
+ // b: SetLocal(@a)
+ // c: Flush(@b)
+ //
+ // This next piece of crazy takes care of this.
+ if (nextNode->op() == Flush && nextNode->child1() == m_compileIndex)
+ nextNode = &at(block()->at(m_indexInBlock + 2));
// Oddly, it's possible for the bytecode index for the next node to be
// equal to ours. This will happen for op_post_inc. And, even more oddly,
@@ -1835,7 +1987,7 @@ void SpeculativeJIT::compile(Node& node)
// statement) would be dead anyway - so the fact that DFG would have
// already made the assignment, and baked it into the register file during
// OSR exit, would not be visible to the old JIT in any way.
- m_codeOriginForOSR = nextNode.codeOrigin;
+ m_codeOriginForOSR = nextNode->codeOrigin;
if (!m_jit.graph().isCaptured(node.local())) {
if (node.variableAccessData()->shouldUseDoubleFormat()) {
@@ -1849,7 +2001,7 @@ void SpeculativeJIT::compile(Node& node)
break;
}
- PredictedType predictedType = node.variableAccessData()->prediction();
+ PredictedType predictedType = node.variableAccessData()->argumentAwarePrediction();
if (isInt32Prediction(predictedType)) {
SpeculateIntegerOperand value(this, node.child1());
m_jit.store32(value.gpr(), JITCompiler::payloadFor(node.local()));
@@ -1867,16 +2019,6 @@ void SpeculativeJIT::compile(Node& node)
valueSourceReferenceForOperand(node.local()) = ValueSource(CellInRegisterFile);
break;
}
- if (isByteArrayPrediction(predictedType)) {
- SpeculateCellOperand cell(this, node.child1());
- GPRReg cellGPR = cell.gpr();
- if (!isByteArrayPrediction(m_state.forNode(node.child1()).m_type))
- speculationCheck(BadType, JSValueRegs(cellGPR), node.child1(), m_jit.branchPtr(MacroAssembler::NotEqual, MacroAssembler::Address(cellGPR, JSCell::classInfoOffset()), MacroAssembler::TrustedImmPtr(&JSByteArray::s_info)));
- m_jit.storePtr(cellGPR, JITCompiler::addressFor(node.local()));
- noResult(m_compileIndex);
- valueSourceReferenceForOperand(node.local()) = ValueSource(CellInRegisterFile);
- break;
- }
if (isBooleanPrediction(predictedType)) {
SpeculateBooleanOperand boolean(this, node.child1());
m_jit.storePtr(boolean.gpr(), JITCompiler::addressFor(node.local()));
@@ -1960,10 +2102,33 @@ void SpeculativeJIT::compile(Node& node)
break;
}
+ case DoubleAsInt32: {
+ compileDoubleAsInt32(node);
+ break;
+ }
+
case ValueToInt32: {
compileValueToInt32(node);
break;
}
+
+ case Int32ToDouble: {
+ compileInt32ToDouble(node);
+ break;
+ }
+
+ case CheckNumber: {
+ if (!isNumberPrediction(m_state.forNode(node.child1()).m_type)) {
+ JSValueOperand op1(this, node.child1());
+ JITCompiler::Jump isInteger = m_jit.branchPtr(MacroAssembler::AboveOrEqual, op1.gpr(), GPRInfo::tagTypeNumberRegister);
+ speculationCheck(
+ BadType, JSValueRegs(op1.gpr()), node.child1().index(),
+ m_jit.branchTestPtr(MacroAssembler::Zero, op1.gpr(), GPRInfo::tagTypeNumberRegister));
+ isInteger.link(&m_jit);
+ }
+ noResult(m_compileIndex);
+ break;
+ }
case ValueAdd:
case ArithAdd:
@@ -1984,42 +2149,7 @@ void SpeculativeJIT::compile(Node& node)
case ArithDiv: {
if (Node::shouldSpeculateInteger(at(node.child1()), at(node.child2())) && node.canSpeculateInteger()) {
- SpeculateIntegerOperand op1(this, node.child1());
- SpeculateIntegerOperand op2(this, node.child2());
- GPRTemporary eax(this, X86Registers::eax);
- GPRTemporary edx(this, X86Registers::edx);
- GPRReg op1GPR = op1.gpr();
- GPRReg op2GPR = op2.gpr();
-
- speculationCheck(Overflow, JSValueRegs(), NoNode, m_jit.branchTest32(JITCompiler::Zero, op2GPR));
-
- // If the user cares about negative zero, then speculate that we're not about
- // to produce negative zero.
- if (!nodeCanIgnoreNegativeZero(node.arithNodeFlags())) {
- MacroAssembler::Jump numeratorNonZero = m_jit.branchTest32(MacroAssembler::NonZero, op1GPR);
- speculationCheck(NegativeZero, JSValueRegs(), NoNode, m_jit.branch32(MacroAssembler::LessThan, op2GPR, TrustedImm32(0)));
- numeratorNonZero.link(&m_jit);
- }
-
- GPRReg temp2 = InvalidGPRReg;
- if (op2GPR == X86Registers::eax || op2GPR == X86Registers::edx) {
- temp2 = allocate();
- m_jit.move(op2GPR, temp2);
- op2GPR = temp2;
- }
-
- m_jit.move(op1GPR, eax.gpr());
- m_jit.assembler().cdq();
- m_jit.assembler().idivl_r(op2GPR);
-
- if (temp2 != InvalidGPRReg)
- unlock(temp2);
-
- // Check that there was no remainder. If there had been, then we'd be obligated to
- // produce a double result instead.
- speculationCheck(Overflow, JSValueRegs(), NoNode, m_jit.branchTest32(JITCompiler::NonZero, edx.gpr()));
-
- integerResult(eax.gpr(), m_compileIndex);
+ compileIntegerArithDivForX86(node);
break;
}
@@ -2184,7 +2314,7 @@ void SpeculativeJIT::compile(Node& node)
case GetByVal: {
if (!node.prediction() || !at(node.child1()).prediction() || !at(node.child2()).prediction()) {
- terminateSpeculativeExecution(Uncountable, JSValueRegs(), NoNode);
+ terminateSpeculativeExecution(InadequateCoverage, JSValueRegs(), NoNode);
break;
}
@@ -2209,13 +2339,6 @@ void SpeculativeJIT::compile(Node& node)
break;
}
- if (at(node.child1()).shouldSpeculateByteArray()) {
- compileGetByValOnByteArray(node);
- if (!m_compileOkay)
- return;
- break;
- }
-
if (at(node.child1()).shouldSpeculateInt8Array()) {
compileGetByValOnIntTypedArray(m_jit.globalData()->int8ArrayDescriptor(), node, sizeof(int8_t), isInt8ArrayPrediction(m_state.forNode(node.child1()).m_type) ? NoTypedArrayTypeSpecCheck : AllTypedArraySpecChecks, SignedTypedArray);
if (!m_compileOkay)
@@ -2309,7 +2432,7 @@ void SpeculativeJIT::compile(Node& node)
case PutByVal: {
if (!at(node.child1()).prediction() || !at(node.child2()).prediction()) {
- terminateSpeculativeExecution(Uncountable, JSValueRegs(), NoNode);
+ terminateSpeculativeExecution(InadequateCoverage, JSValueRegs(), NoNode);
break;
}
@@ -2330,11 +2453,6 @@ void SpeculativeJIT::compile(Node& node)
SpeculateCellOperand base(this, node.child1());
SpeculateStrictInt32Operand property(this, node.child2());
- if (at(node.child1()).shouldSpeculateByteArray()) {
- compilePutByValForByteArray(base.gpr(), property.gpr(), node);
- break;
- }
-
if (at(node.child1()).shouldSpeculateInt8Array()) {
compilePutByValForIntTypedArray(m_jit.globalData()->int8ArrayDescriptor(), base.gpr(), property.gpr(), node, sizeof(int8_t), isInt8ArrayPrediction(m_state.forNode(node.child1()).m_type) ? NoTypedArrayTypeSpecCheck : AllTypedArraySpecChecks, SignedTypedArray);
if (!m_compileOkay)
@@ -2459,7 +2577,7 @@ void SpeculativeJIT::compile(Node& node)
case PutByValAlias: {
if (!at(node.child1()).prediction() || !at(node.child2()).prediction()) {
- terminateSpeculativeExecution(Uncountable, JSValueRegs(), NoNode);
+ terminateSpeculativeExecution(InadequateCoverage, JSValueRegs(), NoNode);
break;
}
@@ -2468,11 +2586,6 @@ void SpeculativeJIT::compile(Node& node)
SpeculateCellOperand base(this, node.child1());
SpeculateStrictInt32Operand property(this, node.child2());
- if (at(node.child1()).shouldSpeculateByteArray()) {
- compilePutByValForByteArray(base.gpr(), property.gpr(), node);
- break;
- }
-
if (at(node.child1()).shouldSpeculateInt8Array()) {
compilePutByValForIntTypedArray(m_jit.globalData()->int8ArrayDescriptor(), base.gpr(), property.gpr(), node, sizeof(int8_t), NoTypedArraySpecCheck, SignedTypedArray);
if (!m_compileOkay)
@@ -2559,6 +2672,54 @@ void SpeculativeJIT::compile(Node& node)
break;
}
+ case RegExpExec: {
+ if (compileRegExpExec(node))
+ return;
+ if (!node.adjustedRefCount()) {
+ SpeculateCellOperand base(this, node.child1());
+ SpeculateCellOperand argument(this, node.child2());
+ GPRReg baseGPR = base.gpr();
+ GPRReg argumentGPR = argument.gpr();
+
+ flushRegisters();
+ GPRResult result(this);
+ callOperation(operationRegExpTest, result.gpr(), baseGPR, argumentGPR);
+
+ // Must use jsValueResult because otherwise we screw up register
+ // allocation, which thinks that this node has a result.
+ jsValueResult(result.gpr(), m_compileIndex);
+ break;
+ }
+
+ SpeculateCellOperand base(this, node.child1());
+ SpeculateCellOperand argument(this, node.child2());
+ GPRReg baseGPR = base.gpr();
+ GPRReg argumentGPR = argument.gpr();
+
+ flushRegisters();
+ GPRResult result(this);
+ callOperation(operationRegExpExec, result.gpr(), baseGPR, argumentGPR);
+
+ jsValueResult(result.gpr(), m_compileIndex);
+ break;
+ }
+
+ case RegExpTest: {
+ SpeculateCellOperand base(this, node.child1());
+ SpeculateCellOperand argument(this, node.child2());
+ GPRReg baseGPR = base.gpr();
+ GPRReg argumentGPR = argument.gpr();
+
+ flushRegisters();
+ GPRResult result(this);
+ callOperation(operationRegExpTest, result.gpr(), baseGPR, argumentGPR);
+
+ // If we add a DataFormatBool, we should use it here.
+ m_jit.or32(TrustedImm32(ValueFalse), result.gpr());
+ jsValueResult(result.gpr(), m_compileIndex, DataFormatJSBoolean);
+ break;
+ }
+
case ArrayPush: {
SpeculateCellOperand base(this, node.child1());
JSValueOperand value(this, node.child2());
@@ -2741,8 +2902,6 @@ void SpeculativeJIT::compile(Node& node)
// FIXME: Add string speculation here.
- bool wasPrimitive = isKnownNumeric(node.child1().index()) || isKnownBoolean(node.child1().index());
-
JSValueOperand op1(this, node.child1());
GPRTemporary result(this, op1);
@@ -2751,7 +2910,7 @@ void SpeculativeJIT::compile(Node& node)
op1.use();
- if (wasPrimitive)
+ if (!(m_state.forNode(node.child1()).m_type & ~(PredictNumber | PredictBoolean)))
m_jit.move(op1GPR, resultGPR);
else {
MacroAssembler::JumpList alreadyPrimitive;
@@ -3018,7 +3177,7 @@ void SpeculativeJIT::compile(Node& node)
}
case GetById: {
if (!node.prediction()) {
- terminateSpeculativeExecution(Uncountable, JSValueRegs(), NoNode);
+ terminateSpeculativeExecution(InadequateCoverage, JSValueRegs(), NoNode);
break;
}
@@ -3068,7 +3227,7 @@ void SpeculativeJIT::compile(Node& node)
case GetByIdFlush: {
if (!node.prediction()) {
- terminateSpeculativeExecution(Uncountable, JSValueRegs(), NoNode);
+ terminateSpeculativeExecution(InadequateCoverage, JSValueRegs(), NoNode);
break;
}
@@ -3147,22 +3306,6 @@ void SpeculativeJIT::compile(Node& node)
break;
}
- case GetByteArrayLength: {
- SpeculateCellOperand base(this, node.child1());
- GPRTemporary result(this);
-
- GPRReg baseGPR = base.gpr();
- GPRReg resultGPR = result.gpr();
-
- if (!isByteArrayPrediction(m_state.forNode(node.child1()).m_type))
- speculationCheck(BadType, JSValueRegs(baseGPR), node.child1(), m_jit.branchPtr(MacroAssembler::NotEqual, MacroAssembler::Address(baseGPR, JSCell::classInfoOffset()), MacroAssembler::TrustedImmPtr(&JSByteArray::s_info)));
-
- m_jit.loadPtr(MacroAssembler::Address(baseGPR, JSByteArray::offsetOfStorage()), resultGPR);
- m_jit.load32(MacroAssembler::Address(resultGPR, ByteArray::offsetOfSize()), resultGPR);
-
- integerResult(resultGPR, m_compileIndex);
- break;
- }
case GetInt8ArrayLength: {
compileGetTypedArrayLength(m_jit.globalData()->int8ArrayDescriptor(), node, !isInt8ArrayPrediction(m_state.forNode(node.child1()).m_type));
break;
@@ -3393,6 +3536,90 @@ void SpeculativeJIT::compile(Node& node)
compileInstanceOf(node);
break;
}
+
+ case IsUndefined: {
+ JSValueOperand value(this, node.child1());
+ GPRTemporary result(this);
+
+ JITCompiler::Jump isCell = m_jit.branchTestPtr(JITCompiler::Zero, value.gpr(), GPRInfo::tagMaskRegister);
+
+ m_jit.comparePtr(JITCompiler::Equal, value.gpr(), TrustedImm32(ValueUndefined), result.gpr());
+ JITCompiler::Jump done = m_jit.jump();
+
+ isCell.link(&m_jit);
+ m_jit.loadPtr(JITCompiler::Address(value.gpr(), JSCell::structureOffset()), result.gpr());
+ m_jit.test8(JITCompiler::NonZero, JITCompiler::Address(result.gpr(), Structure::typeInfoFlagsOffset()), TrustedImm32(MasqueradesAsUndefined), result.gpr());
+
+ done.link(&m_jit);
+ m_jit.or32(TrustedImm32(ValueFalse), result.gpr());
+ jsValueResult(result.gpr(), m_compileIndex, DataFormatJSBoolean);
+ break;
+ }
+
+ case IsBoolean: {
+ JSValueOperand value(this, node.child1());
+ GPRTemporary result(this, value);
+
+ m_jit.move(value.gpr(), result.gpr());
+ m_jit.xorPtr(JITCompiler::TrustedImm32(ValueFalse), result.gpr());
+ m_jit.testPtr(JITCompiler::Zero, result.gpr(), JITCompiler::TrustedImm32(static_cast<int32_t>(~1)), result.gpr());
+ m_jit.or32(TrustedImm32(ValueFalse), result.gpr());
+ jsValueResult(result.gpr(), m_compileIndex, DataFormatJSBoolean);
+ break;
+ }
+
+ case IsNumber: {
+ JSValueOperand value(this, node.child1());
+ GPRTemporary result(this, value);
+
+ m_jit.testPtr(JITCompiler::NonZero, value.gpr(), GPRInfo::tagTypeNumberRegister, result.gpr());
+ m_jit.or32(TrustedImm32(ValueFalse), result.gpr());
+ jsValueResult(result.gpr(), m_compileIndex, DataFormatJSBoolean);
+ break;
+ }
+
+ case IsString: {
+ JSValueOperand value(this, node.child1());
+ GPRTemporary result(this, value);
+
+ JITCompiler::Jump isNotCell = m_jit.branchTestPtr(JITCompiler::NonZero, value.gpr(), GPRInfo::tagMaskRegister);
+
+ m_jit.loadPtr(JITCompiler::Address(value.gpr(), JSCell::structureOffset()), result.gpr());
+ m_jit.compare8(JITCompiler::Equal, JITCompiler::Address(result.gpr(), Structure::typeInfoTypeOffset()), TrustedImm32(StringType), result.gpr());
+ m_jit.or32(TrustedImm32(ValueFalse), result.gpr());
+ JITCompiler::Jump done = m_jit.jump();
+
+ isNotCell.link(&m_jit);
+ m_jit.move(TrustedImm32(ValueFalse), result.gpr());
+
+ done.link(&m_jit);
+ jsValueResult(result.gpr(), m_compileIndex, DataFormatJSBoolean);
+ break;
+ }
+
+ case IsObject: {
+ JSValueOperand value(this, node.child1());
+ GPRReg valueGPR = value.gpr();
+ GPRResult result(this);
+ GPRReg resultGPR = result.gpr();
+ flushRegisters();
+ callOperation(operationIsObject, resultGPR, valueGPR);
+ m_jit.or32(TrustedImm32(ValueFalse), resultGPR);
+ jsValueResult(result.gpr(), m_compileIndex, DataFormatJSBoolean);
+ break;
+ }
+
+ case IsFunction: {
+ JSValueOperand value(this, node.child1());
+ GPRReg valueGPR = value.gpr();
+ GPRResult result(this);
+ GPRReg resultGPR = result.gpr();
+ flushRegisters();
+ callOperation(operationIsFunction, resultGPR, valueGPR);
+ m_jit.or32(TrustedImm32(ValueFalse), resultGPR);
+ jsValueResult(result.gpr(), m_compileIndex, DataFormatJSBoolean);
+ break;
+ }
case Flush:
case Phi:
@@ -3539,7 +3766,7 @@ void SpeculativeJIT::compile(Node& node)
break;
case ForceOSRExit: {
- terminateSpeculativeExecution(Uncountable, JSValueRegs(), NoNode);
+ terminateSpeculativeExecution(InadequateCoverage, JSValueRegs(), NoNode);
break;
}
diff --git a/Source/JavaScriptCore/dfg/DFGVariableAccessData.h b/Source/JavaScriptCore/dfg/DFGVariableAccessData.h
index bd626f9fb..1d99ed516 100644
--- a/Source/JavaScriptCore/dfg/DFGVariableAccessData.h
+++ b/Source/JavaScriptCore/dfg/DFGVariableAccessData.h
@@ -1,5 +1,5 @@
/*
- * Copyright (C) 2011 Apple Inc. All rights reserved.
+ * Copyright (C) 2011, 2012 Apple Inc. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
@@ -26,11 +26,14 @@
#ifndef DFGVariableAccessData_h
#define DFGVariableAccessData_h
-#include "DFGOperands.h"
+#include "DFGDoubleFormatState.h"
+#include "DFGNodeFlags.h"
+#include "Operands.h"
#include "PredictedType.h"
#include "VirtualRegister.h"
#include <wtf/Platform.h>
#include <wtf/UnionFind.h>
+#include <wtf/Vector.h>
namespace JSC { namespace DFG {
@@ -41,7 +44,9 @@ public:
VariableAccessData()
: m_local(static_cast<VirtualRegister>(std::numeric_limits<int>::min()))
, m_prediction(PredictNone)
- , m_shouldUseDoubleFormat(false)
+ , m_argumentAwarePrediction(PredictNone)
+ , m_flags(0)
+ , m_doubleFormatState(EmptyDoubleFormatState)
{
clearVotes();
}
@@ -49,7 +54,9 @@ public:
VariableAccessData(VirtualRegister local)
: m_local(local)
, m_prediction(PredictNone)
- , m_shouldUseDoubleFormat(false)
+ , m_argumentAwarePrediction(PredictNone)
+ , m_flags(0)
+ , m_doubleFormatState(EmptyDoubleFormatState)
{
clearVotes();
}
@@ -67,7 +74,11 @@ public:
bool predict(PredictedType prediction)
{
- return mergePrediction(find()->m_prediction, prediction);
+ VariableAccessData* self = find();
+ bool result = mergePrediction(self->m_prediction, prediction);
+ if (result)
+ mergePrediction(m_argumentAwarePrediction, m_prediction);
+ return result;
}
PredictedType nonUnifiedPrediction()
@@ -80,6 +91,16 @@ public:
return find()->m_prediction;
}
+ PredictedType argumentAwarePrediction()
+ {
+ return find()->m_argumentAwarePrediction;
+ }
+
+ bool mergeArgumentAwarePrediction(PredictedType prediction)
+ {
+ return mergePrediction(find()->m_argumentAwarePrediction, prediction);
+ }
+
void clearVotes()
{
ASSERT(find() == this);
@@ -101,19 +122,51 @@ public:
bool shouldUseDoubleFormatAccordingToVote()
{
+ // We don't support this facility for arguments, yet.
// FIXME: make this work for arguments.
- return !operandIsArgument(operand()) && ((isNumberPrediction(prediction()) && doubleVoteRatio() >= Options::doubleVoteRatioForDoubleFormat) || isDoublePrediction(prediction()));
+ if (operandIsArgument(operand()))
+ return false;
+
+ // If the variable is not a number prediction, then this doesn't
+ // make any sense.
+ if (!isNumberPrediction(prediction()))
+ return false;
+
+ // If the variable is predicted to hold only doubles, then it's a
+ // no-brainer: it should be formatted as a double.
+ if (isDoublePrediction(prediction()))
+ return true;
+
+ // If the variable is known to be used as an integer, then be safe -
+ // don't force it to be a double.
+ if (flags() & NodeUsedAsInt)
+ return false;
+
+ // If the variable has been voted to become a double, then make it a
+ // double.
+ if (doubleVoteRatio() >= Options::doubleVoteRatioForDoubleFormat)
+ return true;
+
+ return false;
+ }
+
+ DoubleFormatState doubleFormatState()
+ {
+ return find()->m_doubleFormatState;
}
bool shouldUseDoubleFormat()
{
- ASSERT(find() == this);
- return m_shouldUseDoubleFormat;
+ ASSERT(isRoot());
+ return m_doubleFormatState == UsingDoubleFormat;
}
bool tallyVotesForShouldUseDoubleFormat()
{
- ASSERT(find() == this);
+ ASSERT(isRoot());
+
+ if (m_doubleFormatState == CantUseDoubleFormat)
+ return false;
bool newValueOfShouldUseDoubleFormat = shouldUseDoubleFormatAccordingToVote();
if (!newValueOfShouldUseDoubleFormat) {
@@ -122,11 +175,35 @@ public:
return false;
}
- if (m_shouldUseDoubleFormat)
+ if (m_doubleFormatState == UsingDoubleFormat)
return false;
- m_shouldUseDoubleFormat = true;
- mergePrediction(m_prediction, PredictDouble);
+ return DFG::mergeDoubleFormatState(m_doubleFormatState, UsingDoubleFormat);
+ }
+
+ bool mergeDoubleFormatState(DoubleFormatState doubleFormatState)
+ {
+ return DFG::mergeDoubleFormatState(find()->m_doubleFormatState, doubleFormatState);
+ }
+
+ bool makePredictionForDoubleFormat()
+ {
+ ASSERT(isRoot());
+
+ if (m_doubleFormatState != UsingDoubleFormat)
+ return false;
+
+ return mergePrediction(m_prediction, PredictDouble);
+ }
+
+ NodeFlags flags() const { return m_flags; }
+
+ bool mergeFlags(NodeFlags newFlags)
+ {
+ newFlags |= m_flags;
+ if (newFlags == m_flags)
+ return false;
+ m_flags = newFlags;
return true;
}
@@ -138,9 +215,11 @@ private:
VirtualRegister m_local;
PredictedType m_prediction;
+ PredictedType m_argumentAwarePrediction;
+ NodeFlags m_flags;
float m_votes[2];
- bool m_shouldUseDoubleFormat;
+ DoubleFormatState m_doubleFormatState;
};
} } // namespace JSC::DFG
diff --git a/Source/JavaScriptCore/dfg/DFGVirtualRegisterAllocationPhase.cpp b/Source/JavaScriptCore/dfg/DFGVirtualRegisterAllocationPhase.cpp
index 255003612..11ac69524 100644
--- a/Source/JavaScriptCore/dfg/DFGVirtualRegisterAllocationPhase.cpp
+++ b/Source/JavaScriptCore/dfg/DFGVirtualRegisterAllocationPhase.cpp
@@ -64,17 +64,17 @@ public:
#endif
Node& node = m_graph[nodeIndex];
- if (!node.shouldGenerate() || node.op == Phi || node.op == Flush)
+ if (!node.shouldGenerate() || node.op() == Phi || node.op() == Flush)
continue;
// GetLocal nodes are effectively phi nodes in the graph, referencing
// results from prior blocks.
- if (node.op != GetLocal) {
+ if (node.op() != GetLocal) {
// First, call use on all of the current node's children, then
// allocate a VirtualRegister for this node. We do so in this
// order so that if a child is on its last use, and a
// VirtualRegister is freed, then it may be reused for node.
- if (node.flags & NodeHasVarArgs) {
+ if (node.flags() & NodeHasVarArgs) {
for (unsigned childIdx = node.firstChild(); childIdx < node.firstChild() + node.numChildren(); childIdx++)
scoreBoard.use(m_graph.m_varArgChildren[childIdx]);
} else {
@@ -109,6 +109,14 @@ public:
// for the function (and checked for on entry). Since we perform a new and
// different allocation of temporaries, more registers may now be required.
unsigned calleeRegisters = scoreBoard.highWatermark() + m_graph.m_parameterSlots;
+ size_t inlineCallFrameCount = codeBlock()->inlineCallFrames().size();
+ for (size_t i = 0; i < inlineCallFrameCount; i++) {
+ InlineCallFrame& inlineCallFrame = codeBlock()->inlineCallFrames()[i];
+ CodeBlock* codeBlock = baselineCodeBlockForInlineCallFrame(&inlineCallFrame);
+ unsigned requiredCalleeRegisters = inlineCallFrame.stackOffset + codeBlock->m_numCalleeRegisters;
+ if (requiredCalleeRegisters > calleeRegisters)
+ calleeRegisters = requiredCalleeRegisters;
+ }
if ((unsigned)codeBlock()->m_numCalleeRegisters < calleeRegisters)
codeBlock()->m_numCalleeRegisters = calleeRegisters;
#if DFG_ENABLE(DEBUG_VERBOSE)