diff options
author | Simon Hausmann <simon.hausmann@nokia.com> | 2012-06-25 13:35:59 +0200 |
---|---|---|
committer | Simon Hausmann <simon.hausmann@nokia.com> | 2012-06-25 13:35:59 +0200 |
commit | 79ad030d505ccf79cf10aa9f8189ca3e2f61f6f4 (patch) | |
tree | 0287b1a69d84492c901e8bc820e635e7133809a0 /Source/JavaScriptCore/dfg/DFGByteCodeParser.cpp | |
parent | 682ab87480e7757346802ce7f54cfdbdfeb2339e (diff) | |
download | qtwebkit-79ad030d505ccf79cf10aa9f8189ca3e2f61f6f4.tar.gz |
Imported WebKit commit c4b613825abd39ac739a47d7b4410468fcef66dc (http://svn.webkit.org/repository/webkit/trunk@121147)
New snapshot that includes Win32 debug build fix (use SVGAllInOne)
Diffstat (limited to 'Source/JavaScriptCore/dfg/DFGByteCodeParser.cpp')
-rw-r--r-- | Source/JavaScriptCore/dfg/DFGByteCodeParser.cpp | 144 |
1 files changed, 122 insertions, 22 deletions
diff --git a/Source/JavaScriptCore/dfg/DFGByteCodeParser.cpp b/Source/JavaScriptCore/dfg/DFGByteCodeParser.cpp index 317a08504..75611972e 100644 --- a/Source/JavaScriptCore/dfg/DFGByteCodeParser.cpp +++ b/Source/JavaScriptCore/dfg/DFGByteCodeParser.cpp @@ -28,6 +28,7 @@ #if ENABLE(DFG_JIT) +#include "ArrayConstructor.h" #include "CallLinkStatus.h" #include "CodeBlock.h" #include "DFGByteCodeCache.h" @@ -35,6 +36,7 @@ #include "GetByIdStatus.h" #include "MethodCallLinkStatus.h" #include "PutByIdStatus.h" +#include "ResolveGlobalStatus.h" #include <wtf/HashMap.h> #include <wtf/MathExtras.h> @@ -94,6 +96,10 @@ private: void setIntrinsicResult(bool usesResult, int resultOperand, NodeIndex); // Handle intrinsic functions. Return true if it succeeded, false if we need to plant a call. bool handleIntrinsic(bool usesResult, int resultOperand, Intrinsic, int registerOffset, int argumentCountIncludingThis, SpeculatedType prediction); + bool handleConstantInternalFunction(bool usesResult, int resultOperand, InternalFunction*, int registerOffset, int argumentCountIncludingThis, SpeculatedType prediction, CodeSpecializationKind); + void handleGetByOffset( + int destinationOperand, SpeculatedType, NodeIndex base, unsigned identifierNumber, + bool useInlineStorage, size_t offset); void handleGetById( int destinationOperand, SpeculatedType, NodeIndex base, unsigned identifierNumber, const GetByIdStatus&); @@ -1124,7 +1130,12 @@ void ByteCodeParser::handleCall(Interpreter* interpreter, Instruction* currentIn ASSERT(OPCODE_LENGTH(op_call) == OPCODE_LENGTH(op_construct)); NodeIndex callTarget = get(currentInstruction[1].u.operand); - enum { ConstantFunction, LinkedFunction, UnknownFunction } callType; + enum { + ConstantFunction, + ConstantInternalFunction, + LinkedFunction, + UnknownFunction + } callType; CallLinkStatus callLinkStatus = CallLinkStatus::computeFor( m_inlineStackTop->m_profiledBlock, m_currentIndex); @@ -1147,6 +1158,13 @@ void ByteCodeParser::handleCall(Interpreter* interpreter, Instruction* currentIn m_graph.valueOfFunctionConstant(callTarget), m_graph.valueOfFunctionConstant(callTarget)->executable()); #endif + } else if (m_graph.isInternalFunctionConstant(callTarget)) { + callType = ConstantInternalFunction; +#if DFG_ENABLE(DEBUG_VERBOSE) + dataLog("Call at [@%lu, bc#%u] has an internal function constant: %p.\n", + m_graph.size(), m_currentIndex, + m_graph.valueOfInternalFunctionConstant(callTarget)); +#endif } else if (callLinkStatus.isSet() && !callLinkStatus.couldTakeSlowPath() && !m_inlineStackTop->m_exitProfile.hasExitSite(m_currentIndex, BadCache)) { callType = LinkedFunction; @@ -1179,6 +1197,16 @@ void ByteCodeParser::handleCall(Interpreter* interpreter, Instruction* currentIn prediction = getPrediction(); nextOffset += OPCODE_LENGTH(op_call_put_result); } + + if (callType == ConstantInternalFunction) { + if (handleConstantInternalFunction(usesResult, resultOperand, m_graph.valueOfInternalFunctionConstant(callTarget), registerOffset, argumentCountIncludingThis, prediction, kind)) + return; + + // Can only handle this using the generic call handler. + addCall(interpreter, currentInstruction, op); + return; + } + JSFunction* expectedFunction; Intrinsic intrinsic; bool certainAboutExpectedFunction; @@ -1210,7 +1238,7 @@ void ByteCodeParser::handleCall(Interpreter* interpreter, Instruction* currentIn } else if (handleInlining(usesResult, currentInstruction[1].u.operand, callTarget, resultOperand, certainAboutExpectedFunction, expectedFunction, registerOffset, argumentCountIncludingThis, nextOffset, kind)) return; } - + addCall(interpreter, currentInstruction, op); } @@ -1567,6 +1595,60 @@ bool ByteCodeParser::handleIntrinsic(bool usesResult, int resultOperand, Intrins } } +bool ByteCodeParser::handleConstantInternalFunction( + bool usesResult, int resultOperand, InternalFunction* function, int registerOffset, + int argumentCountIncludingThis, SpeculatedType prediction, CodeSpecializationKind kind) +{ + // If we ever find that we have a lot of internal functions that we specialize for, + // then we should probably have some sort of hashtable dispatch, or maybe even + // dispatch straight through the MethodTable of the InternalFunction. But for now, + // it seems that this case is hit infrequently enough, and the number of functions + // we know about is small enough, that having just a linear cascade of if statements + // is good enough. + + UNUSED_PARAM(registerOffset); // Remove this once we do more things to the arguments. + UNUSED_PARAM(prediction); // Remove this once we do more things. + UNUSED_PARAM(kind); // Remove this once we do more things. + + if (function->classInfo() == &ArrayConstructor::s_info) { + // We could handle this but don't for now. + if (argumentCountIncludingThis != 1) + return false; + + setIntrinsicResult( + usesResult, resultOperand, + addToGraph(Node::VarArg, NewArray, OpInfo(0), OpInfo(0))); + return true; + } + + return false; +} + +void ByteCodeParser::handleGetByOffset( + int destinationOperand, SpeculatedType prediction, NodeIndex base, unsigned identifierNumber, + bool useInlineStorage, size_t offset) +{ + NodeIndex propertyStorage; + size_t offsetOffset; + if (useInlineStorage) { + propertyStorage = base; + ASSERT(!(sizeof(JSObject) % sizeof(EncodedJSValue))); + offsetOffset = sizeof(JSObject) / sizeof(EncodedJSValue); + } else { + propertyStorage = addToGraph(GetPropertyStorage, base); + offsetOffset = 0; + } + set(destinationOperand, + addToGraph( + GetByOffset, OpInfo(m_graph.m_storageAccessData.size()), OpInfo(prediction), + propertyStorage)); + + StorageAccessData storageAccessData; + storageAccessData.offset = offset + offsetOffset; + storageAccessData.identifierNumber = identifierNumber; + m_graph.m_storageAccessData.append(storageAccessData); +} + void ByteCodeParser::handleGetById( int destinationOperand, SpeculatedType prediction, NodeIndex base, unsigned identifierNumber, const GetByIdStatus& getByIdStatus) @@ -1620,25 +1702,9 @@ void ByteCodeParser::handleGetById( return; } - NodeIndex propertyStorage; - size_t offsetOffset; - if (useInlineStorage) { - propertyStorage = base; - ASSERT(!(sizeof(JSObject) % sizeof(EncodedJSValue))); - offsetOffset = sizeof(JSObject) / sizeof(EncodedJSValue); - } else { - propertyStorage = addToGraph(GetPropertyStorage, base); - offsetOffset = 0; - } - set(destinationOperand, - addToGraph( - GetByOffset, OpInfo(m_graph.m_storageAccessData.size()), OpInfo(prediction), - propertyStorage)); - - StorageAccessData storageAccessData; - storageAccessData.offset = getByIdStatus.offset() + offsetOffset; - storageAccessData.identifierNumber = identifierNumber; - m_graph.m_storageAccessData.append(storageAccessData); + handleGetByOffset( + destinationOperand, prediction, base, identifierNumber, useInlineStorage, + getByIdStatus.offset()); } void ByteCodeParser::prepareToParseBlock() @@ -2648,10 +2714,39 @@ bool ByteCodeParser::parseBlock(unsigned limit) case op_resolve_global: { SpeculatedType prediction = getPrediction(); + unsigned identifierNumber = m_inlineStackTop->m_identifierRemap[ + currentInstruction[2].u.operand]; + + ResolveGlobalStatus status = ResolveGlobalStatus::computeFor( + m_inlineStackTop->m_profiledBlock, m_currentIndex, + m_codeBlock->identifier(identifierNumber)); + if (status.isSimple()) { + ASSERT(status.structure()); + + NodeIndex globalObject = addStructureTransitionCheck( + m_inlineStackTop->m_codeBlock->globalObject(), status.structure()); + + if (status.specificValue()) { + ASSERT(status.specificValue().isCell()); + + set(currentInstruction[1].u.operand, + cellConstant(status.specificValue().asCell())); + } else { + handleGetByOffset( + currentInstruction[1].u.operand, prediction, globalObject, + identifierNumber, status.structure()->isUsingInlineStorage(), + status.offset()); + } + + m_globalResolveNumber++; // Skip over the unused global resolve info. + + NEXT_OPCODE(op_resolve_global); + } + NodeIndex resolve = addToGraph(ResolveGlobal, OpInfo(m_graph.m_resolveGlobalData.size()), OpInfo(prediction)); m_graph.m_resolveGlobalData.append(ResolveGlobalData()); ResolveGlobalData& data = m_graph.m_resolveGlobalData.last(); - data.identifierNumber = m_inlineStackTop->m_identifierRemap[currentInstruction[2].u.operand]; + data.identifierNumber = identifierNumber; data.resolveInfoIndex = m_globalResolveNumber++; set(currentInstruction[1].u.operand, resolve); @@ -3077,6 +3172,8 @@ ByteCodeParser::InlineStackEntry::InlineStackEntry( } m_constantRemap[i] = result.iterator->second; } + for (unsigned i = 0; i < codeBlock->numberOfGlobalResolveInfos(); ++i) + byteCodeParser->m_codeBlock->addGlobalResolveInfo(std::numeric_limits<unsigned>::max()); m_callsiteBlockHeadNeedsLinking = true; } else { @@ -3160,6 +3257,9 @@ void ByteCodeParser::parseCodeBlock() ASSERT(m_inlineStackTop->m_unlinkedBlocks.isEmpty() || m_graph.m_blocks[m_inlineStackTop->m_unlinkedBlocks.last().m_blockIndex]->bytecodeBegin < m_currentIndex); m_inlineStackTop->m_unlinkedBlocks.append(UnlinkedBlock(m_graph.m_blocks.size())); m_inlineStackTop->m_blockLinkingTargets.append(m_graph.m_blocks.size()); + // The first block is definitely an OSR target. + if (!m_graph.m_blocks.size()) + block->isOSRTarget = true; m_graph.m_blocks.append(block.release()); prepareToParseBlock(); } |