summaryrefslogtreecommitdiff
path: root/Source/JavaScriptCore
diff options
context:
space:
mode:
Diffstat (limited to 'Source/JavaScriptCore')
-rw-r--r--Source/JavaScriptCore/API/JSClassRef.cpp2
-rw-r--r--Source/JavaScriptCore/API/WebKitAvailability.h19
-rw-r--r--Source/JavaScriptCore/CMakeLists.txt18
-rw-r--r--Source/JavaScriptCore/ChangeLog3735
-rw-r--r--Source/JavaScriptCore/Configurations/Base.xcconfig5
-rw-r--r--Source/JavaScriptCore/Configurations/FeatureDefines.xcconfig5
-rw-r--r--Source/JavaScriptCore/Configurations/Version.xcconfig2
-rw-r--r--Source/JavaScriptCore/GNUmakefile.am1
-rw-r--r--Source/JavaScriptCore/GNUmakefile.list.am46
-rw-r--r--Source/JavaScriptCore/JSCTypedArrayStubs.h202
-rw-r--r--Source/JavaScriptCore/JavaScriptCore.gypi10
-rw-r--r--Source/JavaScriptCore/JavaScriptCore.order3
-rw-r--r--Source/JavaScriptCore/JavaScriptCore.pri2
-rw-r--r--Source/JavaScriptCore/JavaScriptCore.vcproj/JavaScriptCore/JavaScriptCore.def12
-rw-r--r--Source/JavaScriptCore/JavaScriptCore.vcproj/JavaScriptCore/JavaScriptCore.vcproj52
-rw-r--r--Source/JavaScriptCore/JavaScriptCore.vcproj/JavaScriptCore/JavaScriptCoreCommon.vsprops2
-rwxr-xr-xSource/JavaScriptCore/JavaScriptCore.vcproj/JavaScriptCore/copy-files.cmd1
-rw-r--r--Source/JavaScriptCore/JavaScriptCore.vcproj/WTF/WTF.vcproj8
-rw-r--r--Source/JavaScriptCore/JavaScriptCore.xcodeproj/project.pbxproj512
-rw-r--r--Source/JavaScriptCore/Target.pri14
-rw-r--r--Source/JavaScriptCore/assembler/LinkBuffer.h59
-rw-r--r--Source/JavaScriptCore/assembler/MacroAssembler.h61
-rw-r--r--Source/JavaScriptCore/assembler/MacroAssemblerARMv7.h6
-rw-r--r--Source/JavaScriptCore/assembler/MacroAssemblerCodeRef.h12
-rw-r--r--Source/JavaScriptCore/assembler/SH4Assembler.h3
-rw-r--r--Source/JavaScriptCore/bytecode/BytecodeConventions.h36
-rw-r--r--Source/JavaScriptCore/bytecode/CallLinkStatus.cpp29
-rw-r--r--Source/JavaScriptCore/bytecode/CallLinkStatus.h6
-rw-r--r--Source/JavaScriptCore/bytecode/CodeBlock.cpp518
-rw-r--r--Source/JavaScriptCore/bytecode/CodeBlock.h150
-rw-r--r--Source/JavaScriptCore/bytecode/GetByIdStatus.cpp40
-rw-r--r--Source/JavaScriptCore/bytecode/GetByIdStatus.h8
-rw-r--r--Source/JavaScriptCore/bytecode/Instruction.h12
-rw-r--r--Source/JavaScriptCore/bytecode/LLIntCallLinkInfo.h66
-rw-r--r--Source/JavaScriptCore/bytecode/LazyOperandValueProfile.cpp100
-rw-r--r--Source/JavaScriptCore/bytecode/LazyOperandValueProfile.h189
-rw-r--r--Source/JavaScriptCore/bytecode/MethodCallLinkStatus.cpp5
-rw-r--r--Source/JavaScriptCore/bytecode/MethodOfGettingAValueProfile.cpp69
-rw-r--r--Source/JavaScriptCore/bytecode/MethodOfGettingAValueProfile.h99
-rw-r--r--Source/JavaScriptCore/bytecode/Opcode.cpp28
-rw-r--r--Source/JavaScriptCore/bytecode/Opcode.h9
-rw-r--r--Source/JavaScriptCore/bytecode/PolymorphicPutByIdList.cpp148
-rw-r--r--Source/JavaScriptCore/bytecode/PolymorphicPutByIdList.h190
-rw-r--r--Source/JavaScriptCore/bytecode/PredictedType.cpp1
-rw-r--r--Source/JavaScriptCore/bytecode/PutByIdStatus.cpp46
-rw-r--r--Source/JavaScriptCore/bytecode/PutByIdStatus.h2
-rw-r--r--Source/JavaScriptCore/bytecode/PutKind.h (renamed from Source/JavaScriptCore/heap/BumpBlock.h)29
-rw-r--r--Source/JavaScriptCore/bytecode/SamplingTool.cpp68
-rw-r--r--Source/JavaScriptCore/bytecode/SamplingTool.h2
-rw-r--r--Source/JavaScriptCore/bytecode/StructureStubInfo.cpp16
-rw-r--r--Source/JavaScriptCore/bytecode/StructureStubInfo.h17
-rw-r--r--Source/JavaScriptCore/bytecode/ValueProfile.cpp52
-rw-r--r--Source/JavaScriptCore/bytecode/ValueProfile.h69
-rw-r--r--Source/JavaScriptCore/bytecompiler/BytecodeGenerator.cpp57
-rw-r--r--Source/JavaScriptCore/bytecompiler/BytecodeGenerator.h3
-rw-r--r--Source/JavaScriptCore/bytecompiler/NodesCodegen.cpp6
-rw-r--r--Source/JavaScriptCore/dfg/DFGAbstractState.cpp58
-rw-r--r--Source/JavaScriptCore/dfg/DFGAbstractState.h2
-rw-r--r--Source/JavaScriptCore/dfg/DFGArithNodeFlagsInferencePhase.cpp230
-rw-r--r--Source/JavaScriptCore/dfg/DFGArithNodeFlagsInferencePhase.h51
-rw-r--r--Source/JavaScriptCore/dfg/DFGAssemblyHelpers.h10
-rw-r--r--Source/JavaScriptCore/dfg/DFGByteCodeParser.cpp198
-rw-r--r--Source/JavaScriptCore/dfg/DFGByteCodeParser.h2
-rw-r--r--Source/JavaScriptCore/dfg/DFGCFAPhase.cpp132
-rw-r--r--Source/JavaScriptCore/dfg/DFGCFAPhase.h49
-rw-r--r--Source/JavaScriptCore/dfg/DFGCSEPhase.cpp733
-rw-r--r--Source/JavaScriptCore/dfg/DFGCSEPhase.h49
-rw-r--r--Source/JavaScriptCore/dfg/DFGCapabilities.h2
-rw-r--r--Source/JavaScriptCore/dfg/DFGCommon.h2
-rw-r--r--Source/JavaScriptCore/dfg/DFGDriver.cpp29
-rw-r--r--Source/JavaScriptCore/dfg/DFGGraph.cpp132
-rw-r--r--Source/JavaScriptCore/dfg/DFGGraph.h106
-rw-r--r--Source/JavaScriptCore/dfg/DFGJITCompiler.cpp40
-rw-r--r--Source/JavaScriptCore/dfg/DFGJITCompiler.h47
-rw-r--r--Source/JavaScriptCore/dfg/DFGNode.h2
-rw-r--r--Source/JavaScriptCore/dfg/DFGOSREntry.cpp28
-rw-r--r--Source/JavaScriptCore/dfg/DFGOSRExit.cpp4
-rw-r--r--Source/JavaScriptCore/dfg/DFGOSRExit.h5
-rw-r--r--Source/JavaScriptCore/dfg/DFGOSRExitCompiler.cpp19
-rw-r--r--Source/JavaScriptCore/dfg/DFGOSRExitCompiler32_64.cpp26
-rw-r--r--Source/JavaScriptCore/dfg/DFGOSRExitCompiler64.cpp42
-rw-r--r--Source/JavaScriptCore/dfg/DFGOperations.cpp165
-rw-r--r--Source/JavaScriptCore/dfg/DFGOperations.h9
-rw-r--r--Source/JavaScriptCore/dfg/DFGPhase.cpp (renamed from Source/JavaScriptCore/dfg/DFGPropagator.h)28
-rw-r--r--Source/JavaScriptCore/dfg/DFGPhase.h87
-rw-r--r--Source/JavaScriptCore/dfg/DFGPredictionPropagationPhase.cpp709
-rw-r--r--Source/JavaScriptCore/dfg/DFGPredictionPropagationPhase.h52
-rw-r--r--Source/JavaScriptCore/dfg/DFGPropagator.cpp1743
-rw-r--r--Source/JavaScriptCore/dfg/DFGRegisterBank.h6
-rw-r--r--Source/JavaScriptCore/dfg/DFGRepatch.cpp380
-rw-r--r--Source/JavaScriptCore/dfg/DFGRepatch.h7
-rw-r--r--Source/JavaScriptCore/dfg/DFGScoreBoard.h16
-rw-r--r--Source/JavaScriptCore/dfg/DFGSpeculativeJIT.cpp250
-rw-r--r--Source/JavaScriptCore/dfg/DFGSpeculativeJIT.h196
-rw-r--r--Source/JavaScriptCore/dfg/DFGSpeculativeJIT32_64.cpp77
-rw-r--r--Source/JavaScriptCore/dfg/DFGSpeculativeJIT64.cpp91
-rw-r--r--Source/JavaScriptCore/dfg/DFGVariableAccessData.h5
-rw-r--r--Source/JavaScriptCore/dfg/DFGVirtualRegisterAllocationPhase.cpp104
-rw-r--r--Source/JavaScriptCore/dfg/DFGVirtualRegisterAllocationPhase.h52
-rw-r--r--Source/JavaScriptCore/heap/ConservativeRoots.cpp14
-rw-r--r--Source/JavaScriptCore/heap/ConservativeRoots.h4
-rw-r--r--Source/JavaScriptCore/heap/CopiedAllocator.h106
-rw-r--r--Source/JavaScriptCore/heap/CopiedBlock.h67
-rw-r--r--Source/JavaScriptCore/heap/CopiedSpace.cpp (renamed from Source/JavaScriptCore/heap/BumpSpaceInlineMethods.h)388
-rw-r--r--Source/JavaScriptCore/heap/CopiedSpace.h (renamed from Source/JavaScriptCore/heap/BumpSpace.h)41
-rw-r--r--Source/JavaScriptCore/heap/CopiedSpaceInlineMethods.h184
-rw-r--r--Source/JavaScriptCore/heap/GCAssertions.h56
-rw-r--r--Source/JavaScriptCore/heap/Heap.cpp17
-rw-r--r--Source/JavaScriptCore/heap/Heap.h31
-rw-r--r--Source/JavaScriptCore/heap/HeapBlock.h3
-rw-r--r--Source/JavaScriptCore/heap/MarkStack.cpp33
-rw-r--r--Source/JavaScriptCore/heap/MarkStack.h6
-rw-r--r--Source/JavaScriptCore/heap/MarkedAllocator.cpp4
-rw-r--r--Source/JavaScriptCore/heap/MarkedAllocator.h20
-rw-r--r--Source/JavaScriptCore/heap/MarkedBlock.cpp40
-rw-r--r--Source/JavaScriptCore/heap/MarkedBlock.h25
-rw-r--r--Source/JavaScriptCore/heap/MarkedSpace.cpp28
-rw-r--r--Source/JavaScriptCore/heap/MarkedSpace.h56
-rw-r--r--Source/JavaScriptCore/heap/PassWeak.h147
-rw-r--r--Source/JavaScriptCore/heap/SlotVisitor.h4
-rw-r--r--Source/JavaScriptCore/heap/Weak.h100
-rw-r--r--Source/JavaScriptCore/interpreter/AbstractPC.cpp3
-rw-r--r--Source/JavaScriptCore/interpreter/AbstractPC.h2
-rw-r--r--Source/JavaScriptCore/interpreter/CallFrame.cpp47
-rw-r--r--Source/JavaScriptCore/interpreter/CallFrame.h36
-rw-r--r--Source/JavaScriptCore/interpreter/Interpreter.cpp324
-rw-r--r--Source/JavaScriptCore/interpreter/Interpreter.h99
-rw-r--r--Source/JavaScriptCore/interpreter/RegisterFile.h3
-rw-r--r--Source/JavaScriptCore/jit/ExecutableAllocator.h8
-rw-r--r--Source/JavaScriptCore/jit/ExecutableAllocatorFixedVMPool.cpp2
-rw-r--r--Source/JavaScriptCore/jit/HostCallReturnValue.cpp (renamed from Source/JavaScriptCore/heap/BumpSpace.cpp)27
-rw-r--r--Source/JavaScriptCore/jit/HostCallReturnValue.h67
-rw-r--r--Source/JavaScriptCore/jit/JIT.cpp22
-rw-r--r--Source/JavaScriptCore/jit/JIT.h6
-rw-r--r--Source/JavaScriptCore/jit/JITCode.h14
-rw-r--r--Source/JavaScriptCore/jit/JITDriver.h12
-rw-r--r--Source/JavaScriptCore/jit/JITExceptions.cpp2
-rw-r--r--Source/JavaScriptCore/jit/JITInlineMethods.h22
-rw-r--r--Source/JavaScriptCore/jit/JITOpcodes.cpp8
-rw-r--r--Source/JavaScriptCore/jit/JITOpcodes32_64.cpp16
-rw-r--r--Source/JavaScriptCore/jit/JITPropertyAccess.cpp12
-rw-r--r--Source/JavaScriptCore/jit/JITPropertyAccess32_64.cpp10
-rw-r--r--Source/JavaScriptCore/jit/JITStubs.cpp88
-rw-r--r--Source/JavaScriptCore/jit/JITStubs.h8
-rw-r--r--Source/JavaScriptCore/jit/JSInterfaceJIT.h2
-rw-r--r--Source/JavaScriptCore/jsc.cpp37
-rw-r--r--Source/JavaScriptCore/llint/LLIntCommon.h49
-rw-r--r--Source/JavaScriptCore/llint/LLIntData.cpp116
-rw-r--r--Source/JavaScriptCore/llint/LLIntData.h93
-rw-r--r--Source/JavaScriptCore/llint/LLIntEntrypoints.cpp86
-rw-r--r--Source/JavaScriptCore/llint/LLIntEntrypoints.h64
-rw-r--r--Source/JavaScriptCore/llint/LLIntExceptions.cpp80
-rw-r--r--Source/JavaScriptCore/llint/LLIntExceptions.h66
-rw-r--r--Source/JavaScriptCore/llint/LLIntOfflineAsmConfig.h90
-rw-r--r--Source/JavaScriptCore/llint/LLIntOffsetsExtractor.cpp84
-rw-r--r--Source/JavaScriptCore/llint/LLIntSlowPaths.cpp1558
-rw-r--r--Source/JavaScriptCore/llint/LLIntSlowPaths.h171
-rw-r--r--Source/JavaScriptCore/llint/LLIntThunks.cpp81
-rw-r--r--Source/JavaScriptCore/llint/LLIntThunks.h52
-rw-r--r--Source/JavaScriptCore/llint/LowLevelInterpreter.asm2390
-rw-r--r--Source/JavaScriptCore/llint/LowLevelInterpreter.cpp38
-rw-r--r--Source/JavaScriptCore/llint/LowLevelInterpreter.h53
-rw-r--r--Source/JavaScriptCore/offlineasm/armv7.rb1032
-rw-r--r--Source/JavaScriptCore/offlineasm/asm.rb176
-rw-r--r--Source/JavaScriptCore/offlineasm/ast.rb1039
-rw-r--r--Source/JavaScriptCore/offlineasm/backends.rb96
-rw-r--r--Source/JavaScriptCore/offlineasm/generate_offset_extractor.rb146
-rw-r--r--Source/JavaScriptCore/offlineasm/instructions.rb217
-rw-r--r--Source/JavaScriptCore/offlineasm/offsets.rb173
-rw-r--r--Source/JavaScriptCore/offlineasm/opt.rb134
-rw-r--r--Source/JavaScriptCore/offlineasm/parser.rb586
-rw-r--r--Source/JavaScriptCore/offlineasm/registers.rb60
-rw-r--r--Source/JavaScriptCore/offlineasm/self_hash.rb46
-rw-r--r--Source/JavaScriptCore/offlineasm/settings.rb205
-rw-r--r--Source/JavaScriptCore/offlineasm/transform.rb342
-rw-r--r--Source/JavaScriptCore/offlineasm/x86.rb681
-rw-r--r--Source/JavaScriptCore/os-win32/inttypes.h155
-rw-r--r--Source/JavaScriptCore/parser/ASTBuilder.h6
-rw-r--r--Source/JavaScriptCore/parser/Keywords.table11
-rw-r--r--Source/JavaScriptCore/parser/Lexer.cpp7
-rw-r--r--Source/JavaScriptCore/parser/Nodes.h19
-rw-r--r--Source/JavaScriptCore/parser/Parser.cpp3
-rw-r--r--Source/JavaScriptCore/parser/Parser.h13
-rw-r--r--Source/JavaScriptCore/profiler/Profile.cpp11
-rw-r--r--Source/JavaScriptCore/profiler/ProfileNode.cpp19
-rw-r--r--Source/JavaScriptCore/runtime/Arguments.cpp73
-rw-r--r--Source/JavaScriptCore/runtime/Arguments.h1
-rw-r--r--Source/JavaScriptCore/runtime/CodeSpecializationKind.h36
-rw-r--r--Source/JavaScriptCore/runtime/CommonIdentifiers.h1
-rw-r--r--Source/JavaScriptCore/runtime/CommonSlowPaths.h33
-rw-r--r--Source/JavaScriptCore/runtime/DatePrototype.cpp4
-rw-r--r--Source/JavaScriptCore/runtime/Error.cpp29
-rw-r--r--Source/JavaScriptCore/runtime/Error.h5
-rw-r--r--Source/JavaScriptCore/runtime/Executable.cpp80
-rw-r--r--Source/JavaScriptCore/runtime/Executable.h30
-rw-r--r--Source/JavaScriptCore/runtime/ExecutionHarness.h72
-rw-r--r--Source/JavaScriptCore/runtime/JSActivation.h2
-rw-r--r--Source/JavaScriptCore/runtime/JSArray.cpp108
-rw-r--r--Source/JavaScriptCore/runtime/JSArray.h35
-rw-r--r--Source/JavaScriptCore/runtime/JSCell.h30
-rw-r--r--Source/JavaScriptCore/runtime/JSFunction.cpp8
-rw-r--r--Source/JavaScriptCore/runtime/JSFunction.h5
-rw-r--r--Source/JavaScriptCore/runtime/JSGlobalData.cpp26
-rw-r--r--Source/JavaScriptCore/runtime/JSGlobalData.h34
-rw-r--r--Source/JavaScriptCore/runtime/JSGlobalObject.cpp6
-rw-r--r--Source/JavaScriptCore/runtime/JSGlobalObject.h7
-rw-r--r--Source/JavaScriptCore/runtime/JSGlobalObjectFunctions.cpp40
-rw-r--r--Source/JavaScriptCore/runtime/JSGlobalObjectFunctions.h2
-rw-r--r--Source/JavaScriptCore/runtime/JSObject.cpp158
-rw-r--r--Source/JavaScriptCore/runtime/JSObject.h51
-rw-r--r--Source/JavaScriptCore/runtime/JSPropertyNameIterator.h3
-rw-r--r--Source/JavaScriptCore/runtime/JSString.cpp6
-rw-r--r--Source/JavaScriptCore/runtime/JSString.h3
-rw-r--r--Source/JavaScriptCore/runtime/JSTypeInfo.h4
-rw-r--r--Source/JavaScriptCore/runtime/JSValue.cpp6
-rw-r--r--Source/JavaScriptCore/runtime/JSValue.h7
-rw-r--r--Source/JavaScriptCore/runtime/JSVariableObject.h2
-rw-r--r--Source/JavaScriptCore/runtime/LiteralParser.cpp5
-rw-r--r--Source/JavaScriptCore/runtime/ObjectConstructor.cpp126
-rw-r--r--Source/JavaScriptCore/runtime/ObjectPrototype.cpp18
-rw-r--r--Source/JavaScriptCore/runtime/ObjectPrototype.h1
-rw-r--r--Source/JavaScriptCore/runtime/Options.cpp10
-rw-r--r--Source/JavaScriptCore/runtime/Options.h4
-rw-r--r--Source/JavaScriptCore/runtime/PropertyDescriptor.cpp4
-rw-r--r--Source/JavaScriptCore/runtime/RegExp.cpp12
-rw-r--r--Source/JavaScriptCore/runtime/RegExpCache.cpp2
-rw-r--r--Source/JavaScriptCore/runtime/SamplingCounter.cpp6
-rw-r--r--Source/JavaScriptCore/runtime/SamplingCounter.h2
-rw-r--r--Source/JavaScriptCore/runtime/ScopeChain.cpp6
-rw-r--r--Source/JavaScriptCore/runtime/ScopeChain.h3
-rw-r--r--Source/JavaScriptCore/runtime/Structure.cpp46
-rw-r--r--Source/JavaScriptCore/runtime/Structure.h16
-rw-r--r--Source/JavaScriptCore/runtime/StructureChain.h3
-rw-r--r--Source/JavaScriptCore/runtime/StructureTransitionTable.h19
-rw-r--r--Source/JavaScriptCore/runtime/WriteBarrier.h8
-rw-r--r--Source/JavaScriptCore/shell/CMakeLists.txt7
-rw-r--r--Source/JavaScriptCore/tests/mozilla/ecma/String/15.5.4.11-2.js44
-rw-r--r--Source/JavaScriptCore/tools/CodeProfile.cpp12
-rw-r--r--Source/JavaScriptCore/tools/CodeProfiling.cpp19
-rw-r--r--Source/JavaScriptCore/tools/ProfileTreeNode.h4
-rw-r--r--Source/JavaScriptCore/wtf/Assertions.h29
-rw-r--r--Source/JavaScriptCore/wtf/CMakeLists.txt15
-rw-r--r--Source/JavaScriptCore/wtf/Compiler.h3
-rw-r--r--Source/JavaScriptCore/wtf/DataLog.cpp99
-rw-r--r--Source/JavaScriptCore/wtf/DataLog.h46
-rw-r--r--Source/JavaScriptCore/wtf/DoublyLinkedList.h7
-rw-r--r--Source/JavaScriptCore/wtf/HashTable.cpp14
-rw-r--r--Source/JavaScriptCore/wtf/HashTraits.h8
-rw-r--r--Source/JavaScriptCore/wtf/InlineASM.h7
-rw-r--r--Source/JavaScriptCore/wtf/MainThread.cpp40
-rw-r--r--Source/JavaScriptCore/wtf/MainThread.h5
-rw-r--r--Source/JavaScriptCore/wtf/MetaAllocator.cpp2
-rw-r--r--Source/JavaScriptCore/wtf/NullPtr.cpp2
-rw-r--r--Source/JavaScriptCore/wtf/NullPtr.h4
-rw-r--r--Source/JavaScriptCore/wtf/OSAllocatorPosix.cpp2
-rw-r--r--Source/JavaScriptCore/wtf/ParallelJobsGeneric.cpp3
-rw-r--r--Source/JavaScriptCore/wtf/ParallelJobsGeneric.h2
-rw-r--r--Source/JavaScriptCore/wtf/Platform.h40
-rw-r--r--Source/JavaScriptCore/wtf/PlatformEfl.cmake21
-rw-r--r--Source/JavaScriptCore/wtf/SentinelLinkedList.h2
-rw-r--r--Source/JavaScriptCore/wtf/StdLibExtras.h6
-rw-r--r--Source/JavaScriptCore/wtf/ThreadFunctionInvocation.h2
-rw-r--r--Source/JavaScriptCore/wtf/ThreadIdentifierDataPthreads.cpp4
-rw-r--r--Source/JavaScriptCore/wtf/ThreadSpecific.h8
-rw-r--r--Source/JavaScriptCore/wtf/Threading.cpp56
-rw-r--r--Source/JavaScriptCore/wtf/Threading.h4
-rw-r--r--Source/JavaScriptCore/wtf/ThreadingPthreads.cpp32
-rw-r--r--Source/JavaScriptCore/wtf/ThreadingWin.cpp6
-rw-r--r--Source/JavaScriptCore/wtf/Vector.h5
-rw-r--r--Source/JavaScriptCore/wtf/dtoa.cpp534
-rw-r--r--Source/JavaScriptCore/wtf/dtoa.h5
-rw-r--r--Source/JavaScriptCore/wtf/dtoa/utils.h2
-rw-r--r--Source/JavaScriptCore/wtf/gobject/GOwnPtr.cpp5
-rw-r--r--Source/JavaScriptCore/wtf/gobject/GOwnPtr.h1
-rw-r--r--Source/JavaScriptCore/wtf/gobject/GTypedefs.h1
-rw-r--r--Source/JavaScriptCore/wtf/mac/MainThreadMac.mm39
-rw-r--r--Source/JavaScriptCore/wtf/text/StringImpl.h8
-rw-r--r--Source/JavaScriptCore/wtf/text/WTFString.cpp30
-rw-r--r--Source/JavaScriptCore/wtf/text/WTFString.h2
-rw-r--r--Source/JavaScriptCore/wtf/url/api/ParsedURL.cpp94
-rw-r--r--Source/JavaScriptCore/wtf/url/api/ParsedURL.h66
-rw-r--r--Source/JavaScriptCore/wtf/url/api/URLString.h59
-rw-r--r--Source/JavaScriptCore/wtf/url/src/RawURLBuffer.h74
-rw-r--r--Source/JavaScriptCore/wtf/url/src/URLBuffer.h140
-rw-r--r--Source/JavaScriptCore/wtf/url/src/URLCharacterTypes.cpp177
-rw-r--r--Source/JavaScriptCore/wtf/url/src/URLCharacterTypes.h65
-rw-r--r--Source/JavaScriptCore/wtf/url/src/URLComponent.h81
-rw-r--r--Source/JavaScriptCore/wtf/url/src/URLEscape.cpp43
-rw-r--r--Source/JavaScriptCore/wtf/url/src/URLEscape.h53
-rw-r--r--Source/JavaScriptCore/wtf/url/src/URLParser.h579
-rw-r--r--Source/JavaScriptCore/wtf/url/src/URLQueryCanonicalizer.h109
-rw-r--r--Source/JavaScriptCore/wtf/url/src/URLSegments.cpp114
-rw-r--r--Source/JavaScriptCore/wtf/url/src/URLSegments.h109
-rw-r--r--Source/JavaScriptCore/yarr/YarrInterpreter.cpp7
-rw-r--r--Source/JavaScriptCore/yarr/YarrJIT.cpp92
295 files changed, 24785 insertions, 4798 deletions
diff --git a/Source/JavaScriptCore/API/JSClassRef.cpp b/Source/JavaScriptCore/API/JSClassRef.cpp
index 0909e9975..298c734ea 100644
--- a/Source/JavaScriptCore/API/JSClassRef.cpp
+++ b/Source/JavaScriptCore/API/JSClassRef.cpp
@@ -222,7 +222,7 @@ JSObject* OpaqueJSClass::prototype(ExecState* exec)
if (!jsClassData.cachedPrototype) {
// Recursive, but should be good enough for our purposes
- jsClassData.cachedPrototype.set(exec->globalData(), JSCallbackObject<JSNonFinalObject>::create(exec, exec->lexicalGlobalObject(), exec->lexicalGlobalObject()->callbackObjectStructure(), prototypeClass, &jsClassData), 0); // set jsClassData as the object's private data, so it can clear our reference on destruction
+ jsClassData.cachedPrototype = PassWeak<JSObject>(exec->globalData(), JSCallbackObject<JSNonFinalObject>::create(exec, exec->lexicalGlobalObject(), exec->lexicalGlobalObject()->callbackObjectStructure(), prototypeClass, &jsClassData), 0); // set jsClassData as the object's private data, so it can clear our reference on destruction
if (parentClass) {
if (JSObject* prototype = parentClass->prototype(exec))
jsClassData.cachedPrototype->setPrototype(exec->globalData(), prototype);
diff --git a/Source/JavaScriptCore/API/WebKitAvailability.h b/Source/JavaScriptCore/API/WebKitAvailability.h
index 0e4f091cc..7846058fa 100644
--- a/Source/JavaScriptCore/API/WebKitAvailability.h
+++ b/Source/JavaScriptCore/API/WebKitAvailability.h
@@ -901,4 +901,23 @@
#endif
+
+
+
+
+/*
+ * AVAILABLE_AFTER_WEBKIT_VERSION_5_1
+ *
+ * Used on functions introduced after WebKit 5.1
+ */
+#define AVAILABLE_AFTER_WEBKIT_VERSION_5_1
+
+/* AVAILABLE_WEBKIT_VERSION_1_3_AND_LATER_BUT_DEPRECATED_AFTER_WEBKIT_VERSION_5_1
+ *
+ * Used on declarations introduced in WebKit 1.3,
+ * but later deprecated after WebKit 5.1
+ */
+#define AVAILABLE_WEBKIT_VERSION_1_3_AND_LATER_BUT_DEPRECATED_AFTER_WEBKIT_VERSION_5_1
+
+
#endif /* __WebKitAvailability__ */
diff --git a/Source/JavaScriptCore/CMakeLists.txt b/Source/JavaScriptCore/CMakeLists.txt
index eaa395e15..2afcbdc65 100644
--- a/Source/JavaScriptCore/CMakeLists.txt
+++ b/Source/JavaScriptCore/CMakeLists.txt
@@ -11,6 +11,7 @@ SET(JavaScriptCore_INCLUDE_DIRECTORIES
"${JAVASCRIPTCORE_DIR}/debugger"
"${JAVASCRIPTCORE_DIR}/interpreter"
"${JAVASCRIPTCORE_DIR}/jit"
+ "${JAVASCRIPTCORE_DIR}/llint"
"${JAVASCRIPTCORE_DIR}/parser"
"${JAVASCRIPTCORE_DIR}/profiler"
"${JAVASCRIPTCORE_DIR}/runtime"
@@ -41,23 +42,28 @@ SET(JavaScriptCore_SOURCES
bytecode/DFGExitProfile.cpp
bytecode/GetByIdStatus.cpp
bytecode/JumpTable.cpp
+ bytecode/LazyOperandValueProfile.cpp
bytecode/MethodCallLinkInfo.cpp
bytecode/MethodCallLinkStatus.cpp
+ bytecode/MethodOfGettingAValueProfile.cpp
bytecode/Opcode.cpp
+ bytecode/PolymorphicPutByIdList.cpp
bytecode/PredictedType.cpp
bytecode/PutByIdStatus.cpp
bytecode/SamplingTool.cpp
bytecode/StructureStubInfo.cpp
- bytecode/ValueProfile.cpp
bytecompiler/BytecodeGenerator.cpp
bytecompiler/NodesCodegen.cpp
dfg/DFGAbstractState.cpp
+ dfg/DFGArithNodeFlagsInferencePhase.cpp
dfg/DFGAssemblyHelpers.cpp
dfg/DFGByteCodeParser.cpp
dfg/DFGCapabilities.cpp
+ dfg/DFGCFAPhase.cpp
dfg/DFGCorrectableJumpPoint.cpp
+ dfg/DFGCSEPhase.cpp
dfg/DFGDriver.cpp
dfg/DFGGraph.cpp
dfg/DFGJITCompiler.cpp
@@ -67,14 +73,16 @@ SET(JavaScriptCore_SOURCES
dfg/DFGOSRExitCompiler32_64.cpp
dfg/DFGOSRExitCompiler64.cpp
dfg/DFGOperations.cpp
- dfg/DFGPropagator.cpp
+ dfg/DFGPhase.cpp
+ dfg/DFGPredictionPropagationPhase.cpp
dfg/DFGRepatch.cpp
dfg/DFGSpeculativeJIT.cpp
dfg/DFGSpeculativeJIT32_64.cpp
dfg/DFGSpeculativeJIT64.cpp
dfg/DFGThunks.cpp
+ dfg/DFGVirtualRegisterAllocationPhase.cpp
- heap/BumpSpace.cpp
+ heap/CopiedSpace.cpp
heap/DFGCodeBlocks.cpp
heap/Heap.cpp
heap/HandleHeap.cpp
@@ -96,6 +104,7 @@ SET(JavaScriptCore_SOURCES
interpreter/RegisterFile.cpp
jit/ExecutableAllocator.cpp
+ jit/HostCallReturnValue.cpp
jit/JITArithmetic32_64.cpp
jit/JITArithmetic.cpp
jit/JITCall32_64.cpp
@@ -210,7 +219,6 @@ SET(JavaScriptCore_SOURCES
yarr/YarrJIT.cpp
yarr/YarrSyntaxChecker.cpp
)
-SET(JavaScriptCore_HEADERS )
SET(JavaScriptCore_LUT_FILES
runtime/ArrayConstructor.cpp
@@ -281,7 +289,7 @@ ELSE ()
ENDIF ()
-INCLUDE_IF_EXISTS(${JAVASCRIPTCORE_DIR}/Platform${PORT}.cmake)
+WEBKIT_INCLUDE_CONFIG_FILES_IF_EXISTS()
ADD_SUBDIRECTORY(wtf)
diff --git a/Source/JavaScriptCore/ChangeLog b/Source/JavaScriptCore/ChangeLog
index bbef23628..87b921f0f 100644
--- a/Source/JavaScriptCore/ChangeLog
+++ b/Source/JavaScriptCore/ChangeLog
@@ -1,3 +1,3738 @@
+2012-02-24 Zoltan Herczeg <zherczeg@webkit.org>
+
+ [Qt] Buildfix for "Zero out CopiedBlocks on initialization".
+ https://bugs.webkit.org/show_bug.cgi?id=79199
+
+ Ruber stamped by Csaba Osztrogonác.
+
+ Temporary fix since the new member wastes a little space on
+ 64 bit systems. Although it is harmless, it is only needed
+ for 32 bit systems.
+
+ * heap/CopiedBlock.h:
+ (CopiedBlock):
+
+2012-02-24 Han Hojong <hojong.han@samsung.com>
+
+ Remove useless jump instructions for short circuit
+ https://bugs.webkit.org/show_bug.cgi?id=75602
+
+ Reviewed by Michael Saboff.
+
+ Jump instruction is inserted to make short circuit,
+ however it does nothing but moving to the next instruction.
+ Therefore useless jump instructions are removed,
+ and jump list is moved into the case not for a short circuit,
+ so that only necessary instructions are added to JIT code
+ unless it has a 16 bit pattern character and an 8 bit string.
+
+ * yarr/YarrJIT.cpp:
+ (JSC::Yarr::YarrGenerator::generatePatternCharacterGreedy):
+ (JSC::Yarr::YarrGenerator::backtrackPatternCharacterNonGreedy):
+
+2012-02-24 Sheriff Bot <webkit.review.bot@gmail.com>
+
+ Unreviewed, rolling out r108731.
+ http://trac.webkit.org/changeset/108731
+ https://bugs.webkit.org/show_bug.cgi?id=79464
+
+ Broke Chromium Win tests (Requested by bashi on #webkit).
+
+ * wtf/Platform.h:
+
+2012-02-24 Andrew Lo <anlo@rim.com>
+
+ [BlackBerry] Enable requestAnimationFrame
+ https://bugs.webkit.org/show_bug.cgi?id=79408
+
+ Use timer implementation of requestAnimationFrame on BlackBerry.
+
+ Reviewed by Rob Buis.
+
+ * wtf/Platform.h:
+
+2012-02-24 Mathias Bynens <mathias@qiwi.be>
+
+ `\u200c` and `\u200d` should be allowed in IdentifierPart, as per ES5
+ https://bugs.webkit.org/show_bug.cgi?id=78908
+
+ Add additional checks for zero-width non-joiner (0x200C) and
+ zero-width joiner (0x200D) characters.
+
+ Reviewed by Michael Saboff.
+
+ * parser/Lexer.cpp:
+ (JSC::isNonASCIIIdentPart)
+ * runtime/LiteralParser.cpp:
+ (JSC::::Lexer::lexIdentifier)
+
+2012-02-23 Kenichi Ishibashi <bashi@chromium.org>
+
+ Adding WebSocket per-frame DEFLATE extension
+ https://bugs.webkit.org/show_bug.cgi?id=77522
+
+ Added USE(ZLIB) flag.
+
+ Reviewed by Kent Tamura.
+
+ * wtf/Platform.h:
+
+2012-02-23 Mark Hahnenberg <mhahnenberg@apple.com>
+
+ Zero out CopiedBlocks on initialization
+ https://bugs.webkit.org/show_bug.cgi?id=79199
+
+ Reviewed by Filip Pizlo.
+
+ Made CopyBlocks zero their payloads during construction. This allows
+ JSArray to avoid having to manually clear its backing store upon allocation
+ and also alleviates any future pain with regard to the garbage collector trying
+ to mark what it thinks are values in what is actually uninitialized memory.
+
+ * heap/CopiedBlock.h:
+ (JSC::CopiedBlock::CopiedBlock):
+ * runtime/JSArray.cpp:
+ (JSC::JSArray::finishCreation):
+ (JSC::JSArray::tryFinishCreationUninitialized):
+ (JSC::JSArray::increaseVectorLength):
+ (JSC::JSArray::unshiftCountSlowCase):
+
+2012-02-23 Oliver Hunt <oliver@apple.com>
+
+ Make Interpreter::getStackTrace be able to generate the line number for the top callframe if none is provided
+ https://bugs.webkit.org/show_bug.cgi?id=79407
+
+ Reviewed by Gavin Barraclough.
+
+ Outside of exception handling, we don't know what our source line number is. This
+ change allows us to pass -1 is as the initial line number, and get the correct line
+ number in the resultant stack trace. We can't completely elide the initial line
+ number (yet) due to some idiosyncrasies of the exception handling machinery.
+
+ * interpreter/Interpreter.cpp:
+ (JSC::getLineNumberForCallFrame):
+ (JSC):
+ (JSC::Interpreter::getStackTrace):
+
+2012-02-22 Filip Pizlo <fpizlo@apple.com>
+
+ DFG OSR exit value profiling should have graceful handling of local variables and arguments
+ https://bugs.webkit.org/show_bug.cgi?id=79310
+
+ Reviewed by Gavin Barraclough.
+
+ Previously, if we OSR exited because a prediction in a local was wrong, we'd
+ only realize what the true type of the local was if the regular value profiling
+ kicked in and told us. Unless the local was block-locally copy propagated, in
+ which case we'd know from an OSR exit profile.
+
+ This patch adds OSR exit profiling to all locals and arguments. Now, if we OSR
+ exit because of a mispredicted local or argument type, we'll know what the type of
+ the local or argument should be immediately upon exiting.
+
+ The way that local variable OSR exit profiling works is that we now have a lazily
+ added set of OSR-exit-only value profiles for exit sites that are BadType and that
+ cited a GetLocal as their value source. The value profiles are only added if the
+ OSR exit is taken, and are keyed by CodeBlock, bytecode index of the GetLocal, and
+ operand. The look-up is performed by querying the
+ CompressedLazyOperandValueProfileHolder in the CodeBlock, using a key that contains
+ the bytecode index and the operand. Because the value profiles are added at random
+ times, they are not sorted; instead they are just stored in an arbitrarily-ordered
+ SegmentedVector. Look-ups are made fast by "decompressing": the DFG::ByteCodeParser
+ creates a LazyOperandValueProfileParser, which turns the
+ CompressedLazyOperandValueProfileHolder's contents into a HashMap for the duration
+ of DFG parsing.
+
+ Previously, OSR exits had a pointer to the ValueProfile that had the specFailBucket
+ into which values observed during OSR exit would be placed. Now it uses a lazy
+ thunk for a ValueProfile. I call this the MethodOfGettingAValueProfile. It may
+ either contain a ValueProfile inside it (which works for previous uses of OSR exit
+ profiling) or it may just have knowledge of how to go about creating the
+ LazyOperandValueProfile in the case that the OSR exit is actually taken. This
+ ensures that we never have to create NumOperands*NumBytecodeIndices*NumCodeBlocks
+ value profiling buckets unless we actually did OSR exit on every single operand,
+ in every single instruction, in each code block (that's probably unlikely).
+
+ This appears to be neutral on the major benchmarks, but is a double-digit speed-up
+ on code deliberately written to have data flow that spans basic blocks and where
+ the code exhibits post-optimization polymorphism in a local variable.
+
+ * CMakeLists.txt:
+ * GNUmakefile.list.am:
+ * JavaScriptCore.vcproj/JavaScriptCore/JavaScriptCore.vcproj:
+ * JavaScriptCore.xcodeproj/project.pbxproj:
+ * Target.pri:
+ * bytecode/CodeBlock.cpp:
+ (JSC::CodeBlock::stronglyVisitStrongReferences):
+ * bytecode/CodeBlock.h:
+ (CodeBlock):
+ (JSC::CodeBlock::lazyOperandValueProfiles):
+ * bytecode/LazyOperandValueProfile.cpp: Added.
+ (JSC):
+ (JSC::CompressedLazyOperandValueProfileHolder::CompressedLazyOperandValueProfileHolder):
+ (JSC::CompressedLazyOperandValueProfileHolder::~CompressedLazyOperandValueProfileHolder):
+ (JSC::CompressedLazyOperandValueProfileHolder::computeUpdatedPredictions):
+ (JSC::CompressedLazyOperandValueProfileHolder::add):
+ (JSC::LazyOperandValueProfileParser::LazyOperandValueProfileParser):
+ (JSC::LazyOperandValueProfileParser::~LazyOperandValueProfileParser):
+ (JSC::LazyOperandValueProfileParser::getIfPresent):
+ (JSC::LazyOperandValueProfileParser::prediction):
+ * bytecode/LazyOperandValueProfile.h: Added.
+ (JSC):
+ (LazyOperandValueProfileKey):
+ (JSC::LazyOperandValueProfileKey::LazyOperandValueProfileKey):
+ (JSC::LazyOperandValueProfileKey::operator!):
+ (JSC::LazyOperandValueProfileKey::operator==):
+ (JSC::LazyOperandValueProfileKey::hash):
+ (JSC::LazyOperandValueProfileKey::bytecodeOffset):
+ (JSC::LazyOperandValueProfileKey::operand):
+ (JSC::LazyOperandValueProfileKey::isHashTableDeletedValue):
+ (JSC::LazyOperandValueProfileKeyHash::hash):
+ (JSC::LazyOperandValueProfileKeyHash::equal):
+ (LazyOperandValueProfileKeyHash):
+ (WTF):
+ (JSC::LazyOperandValueProfile::LazyOperandValueProfile):
+ (LazyOperandValueProfile):
+ (JSC::LazyOperandValueProfile::key):
+ (CompressedLazyOperandValueProfileHolder):
+ (LazyOperandValueProfileParser):
+ * bytecode/MethodOfGettingAValueProfile.cpp: Added.
+ (JSC):
+ (JSC::MethodOfGettingAValueProfile::fromLazyOperand):
+ (JSC::MethodOfGettingAValueProfile::getSpecFailBucket):
+ * bytecode/MethodOfGettingAValueProfile.h: Added.
+ (JSC):
+ (MethodOfGettingAValueProfile):
+ (JSC::MethodOfGettingAValueProfile::MethodOfGettingAValueProfile):
+ (JSC::MethodOfGettingAValueProfile::operator!):
+ * bytecode/ValueProfile.cpp: Removed.
+ * bytecode/ValueProfile.h:
+ (JSC):
+ (ValueProfileBase):
+ (JSC::ValueProfileBase::ValueProfileBase):
+ (JSC::ValueProfileBase::dump):
+ (JSC::ValueProfileBase::computeUpdatedPrediction):
+ (JSC::MinimalValueProfile::MinimalValueProfile):
+ (ValueProfileWithLogNumberOfBuckets):
+ (JSC::ValueProfileWithLogNumberOfBuckets::ValueProfileWithLogNumberOfBuckets):
+ (JSC::ValueProfile::ValueProfile):
+ (JSC::getValueProfileBytecodeOffset):
+ (JSC::getRareCaseProfileBytecodeOffset):
+ * dfg/DFGByteCodeParser.cpp:
+ (ByteCodeParser):
+ (JSC::DFG::ByteCodeParser::injectLazyOperandPrediction):
+ (JSC::DFG::ByteCodeParser::getLocal):
+ (JSC::DFG::ByteCodeParser::getArgument):
+ (InlineStackEntry):
+ (JSC::DFG::ByteCodeParser::fixVariableAccessPredictions):
+ (DFG):
+ (JSC::DFG::ByteCodeParser::InlineStackEntry::InlineStackEntry):
+ (JSC::DFG::ByteCodeParser::parse):
+ * dfg/DFGDriver.cpp:
+ (JSC::DFG::compile):
+ * dfg/DFGGraph.h:
+ (JSC::DFG::Graph::valueProfileFor):
+ (JSC::DFG::Graph::methodOfGettingAValueProfileFor):
+ (Graph):
+ * dfg/DFGNode.h:
+ (Node):
+ * dfg/DFGOSRExit.cpp:
+ (JSC::DFG::OSRExit::OSRExit):
+ * dfg/DFGOSRExit.h:
+ (OSRExit):
+ * dfg/DFGOSRExitCompiler32_64.cpp:
+ (JSC::DFG::OSRExitCompiler::compileExit):
+ * dfg/DFGOSRExitCompiler64.cpp:
+ (JSC::DFG::OSRExitCompiler::compileExit):
+ * dfg/DFGPhase.cpp:
+ (JSC::DFG::Phase::beginPhase):
+ (JSC::DFG::Phase::endPhase):
+ * dfg/DFGSpeculativeJIT.cpp:
+ (JSC::DFG::SpeculativeJIT::checkArgumentTypes):
+ * dfg/DFGSpeculativeJIT.h:
+ (JSC::DFG::SpeculativeJIT::speculationCheck):
+ * dfg/DFGVariableAccessData.h:
+ (JSC::DFG::VariableAccessData::nonUnifiedPrediction):
+ (VariableAccessData):
+
+2012-02-23 Filip Pizlo <fpizlo@apple.com>
+
+ Build fix.
+
+ * llint/LLIntOffsetsExtractor.cpp:
+
+2012-02-23 Kevin Ollivier <kevino@theolliviers.com>
+
+ [wx] Build fix, disable LLINT for now and fix ENABLE defines for it.
+
+ * llint/LLIntOffsetsExtractor.cpp:
+ * wtf/Platform.h:
+
+2012-02-23 Kevin Ollivier <kevino@theolliviers.com>
+
+ [wx] Build fix for non-Mac wx builds.
+
+ * runtime/DatePrototype.cpp:
+
+2012-02-22 Filip Pizlo <fpizlo@apple.com>
+
+ DFG's logic for emitting a Flush is too convoluted and contains an inaccurate comment
+ https://bugs.webkit.org/show_bug.cgi?id=79334
+
+ Reviewed by Oliver Hunt.
+
+ * dfg/DFGByteCodeParser.cpp:
+ (JSC::DFG::ByteCodeParser::getLocal):
+ (JSC::DFG::ByteCodeParser::getArgument):
+ (JSC::DFG::ByteCodeParser::flush):
+
+2012-02-23 Gavin Barraclough <barraclough@apple.com>
+
+ Object.isSealed / Object.isFrozen don't work for native objects
+ https://bugs.webkit.org/show_bug.cgi?id=79331
+
+ Reviewed by Sam Weinig.
+
+ Need to inspect all properties, including static ones.
+ This exposes a couple of bugs in Array & Arguments:
+ - getOwnPropertyDescriptor doesn't correctly report the writable attribute of array length.
+ - Arguments object's defineOwnProperty does not handle callee/caller/length correctly.
+
+ * runtime/Arguments.cpp:
+ (JSC::Arguments::defineOwnProperty):
+ - Add handling for callee/caller/length.
+ * runtime/JSArray.cpp:
+ (JSC::JSArray::getOwnPropertyDescriptor):
+ - report length's writability correctly.
+ * runtime/ObjectConstructor.cpp:
+ (JSC::objectConstructorSeal):
+ (JSC::objectConstructorFreeze):
+ (JSC::objectConstructorIsSealed):
+ (JSC::objectConstructorIsFrozen):
+ - Add spec-based implementation for non-final objects.
+
+2012-02-23 Gavin Barraclough <barraclough@apple.com>
+
+ pop of array hole should get from the prototype chain
+ https://bugs.webkit.org/show_bug.cgi?id=79338
+
+ Reviewed by Sam Weinig.
+
+ * runtime/JSArray.cpp:
+ (JSC::JSArray::pop):
+ - If the fast fast vector case fails, more closely follow the spec.
+
+2012-02-23 Yong Li <yoli@rim.com>
+
+ JSString::outOfMemory() should ASSERT(isRope()) rather than !isRope()
+ https://bugs.webkit.org/show_bug.cgi?id=79268
+
+ Reviewed by Michael Saboff.
+
+ resolveRope() is the only caller of outOfMemory(), and it calls outOfMemory()
+ after it fails to allocate a buffer for m_value. So outOfMemory() should assert
+ isRope() rather than !isRope().
+
+ * runtime/JSString.cpp:
+ (JSC::JSString::outOfMemory):
+
+2012-02-23 Patrick Gansterer <paroga@webkit.org>
+
+ [CMake] Add WEBKIT_INCLUDE_CONFIG_FILES_IF_EXISTS macro
+ https://bugs.webkit.org/show_bug.cgi?id=79371
+
+ Reviewed by Daniel Bates.
+
+ * CMakeLists.txt:
+ * shell/CMakeLists.txt:
+ * wtf/CMakeLists.txt:
+
+2012-02-23 Aron Rosenberg <arosenberg@logitech.com>
+
+ Fix the PRI macros used in WTF::String formatters to be compatible with Qt and Visual Studio 2005 and newer.
+ https://bugs.webkit.org/show_bug.cgi?id=76210
+
+ Add compile time check for Visual Studio 2005 or newer.
+
+ Reviewed by Simon Hausmann.
+
+ * os-win32/inttypes.h:
+
+2012-02-22 Gavin Barraclough <barraclough@apple.com>
+
+ Implement [[DefineOwnProperty]] for the arguments object
+ https://bugs.webkit.org/show_bug.cgi?id=79309
+
+ Reviewed by Sam Weinig.
+
+ * runtime/Arguments.cpp:
+ (JSC::Arguments::deletePropertyByIndex):
+ (JSC::Arguments::deleteProperty):
+ - Deleting an argument should also delete the copy on the object, if any.
+ (JSC::Arguments::defineOwnProperty):
+ - Defining a property may override the live mapping.
+ * runtime/Arguments.h:
+ (Arguments):
+
+2012-02-22 Gavin Barraclough <barraclough@apple.com>
+
+ Fix Object.freeze for non-final objects.
+ https://bugs.webkit.org/show_bug.cgi?id=79286
+
+ Reviewed by Oliver Hunt.
+
+ For vanilla objects we implement this with a single transition, for objects
+ with special properties we should just follow the spec defined algorithm.
+
+ * runtime/JSArray.cpp:
+ (JSC::SparseArrayValueMap::put):
+ - this does need to handle inextensible objects.
+ * runtime/ObjectConstructor.cpp:
+ (JSC::objectConstructorSeal):
+ (JSC::objectConstructorFreeze):
+ - Implement spec defined algorithm for non-final objects.
+ * runtime/Structure.cpp:
+ (JSC::Structure::Structure):
+ (JSC::Structure::freezeTransition):
+ - freeze should set m_hasReadOnlyOrGetterSetterPropertiesExcludingProto.
+ * runtime/Structure.h:
+ (JSC::Structure::hasReadOnlyOrGetterSetterPropertiesExcludingProto):
+ (JSC::Structure::setHasGetterSetterProperties):
+ (JSC::Structure::setContainsReadOnlyProperties):
+ (Structure):
+ - renamed m_hasReadOnlyOrGetterSetterPropertiesExcludingProto.
+
+2012-02-22 Mark Hahnenberg <mhahnenberg@apple.com>
+
+ Allocations from CopiedBlocks should always be 8-byte aligned
+ https://bugs.webkit.org/show_bug.cgi?id=79271
+
+ Reviewed by Geoffrey Garen.
+
+ * heap/CopiedAllocator.h:
+ (JSC::CopiedAllocator::allocate):
+ * heap/CopiedBlock.h: Changed to add padding so that the start of the payload is always
+ guaranteed to be 8 byte aligned on both 64- and 32-bit platforms.
+ (CopiedBlock):
+ * heap/CopiedSpace.cpp: Changed all assertions of isPointerAligned to is8ByteAligned.
+ (JSC::CopiedSpace::tryAllocateOversize):
+ (JSC::CopiedSpace::getFreshBlock):
+ * heap/CopiedSpaceInlineMethods.h:
+ (JSC::CopiedSpace::allocateFromBlock):
+ * runtime/JSArray.h:
+ (ArrayStorage): Added padding for ArrayStorage to make sure that it is always 8 byte
+ aligned on both 64- and 32-bit platforms.
+ * wtf/StdLibExtras.h:
+ (WTF::is8ByteAligned): Added new utility function that functions similarly to the
+ way isPointerAligned does, but it just always checks for 8 byte alignment.
+ (WTF):
+
+2012-02-22 Sheriff Bot <webkit.review.bot@gmail.com>
+
+ Unreviewed, rolling out r108456.
+ http://trac.webkit.org/changeset/108456
+ https://bugs.webkit.org/show_bug.cgi?id=79223
+
+ Broke fast/regex/pcre-test-4.html and cannot find anyone on
+ IRC (Requested by zherczeg on #webkit).
+
+ * yarr/YarrJIT.cpp:
+ (JSC::Yarr::YarrGenerator::backtrackPatternCharacterGreedy):
+
+2012-02-22 Sheriff Bot <webkit.review.bot@gmail.com>
+
+ Unreviewed, rolling out r108468.
+ http://trac.webkit.org/changeset/108468
+ https://bugs.webkit.org/show_bug.cgi?id=79219
+
+ Broke Chromium Win release build (Requested by bashi on
+ #webkit).
+
+ * wtf/Platform.h:
+
+2012-02-22 Kenichi Ishibashi <bashi@chromium.org>
+
+ Adding WebSocket per-frame DEFLATE extension
+ https://bugs.webkit.org/show_bug.cgi?id=77522
+
+ Added USE(ZLIB) flag.
+
+ Reviewed by Kent Tamura.
+
+ * wtf/Platform.h:
+
+2012-02-22 Hojong Han <hojong.han@samsung.com>
+
+ Short circuit fixed for a 16 bt pattern character and an 8 bit string.
+ https://bugs.webkit.org/show_bug.cgi?id=75602
+
+ Reviewed by Gavin Barraclough.
+
+ * yarr/YarrJIT.cpp:
+ (JSC::Yarr::YarrGenerator::backtrackPatternCharacterGreedy):
+
+2012-02-21 Filip Pizlo <fpizlo@apple.com>
+
+ Build fix for systems with case sensitive disks.
+
+ * llint/LLIntOfflineAsmConfig.h:
+
+2012-02-21 Filip Pizlo <fpizlo@apple.com>
+
+ JSC should be a triple-tier VM
+ https://bugs.webkit.org/show_bug.cgi?id=75812
+ <rdar://problem/10079694>
+
+ Reviewed by Gavin Barraclough.
+
+ Implemented an interpreter that uses the JIT's calling convention. This
+ interpreter is called LLInt, or the Low Level Interpreter. JSC will now
+ will start by executing code in LLInt and will only tier up to the old
+ JIT after the code is proven hot.
+
+ LLInt is written in a modified form of our macro assembly. This new macro
+ assembly is compiled by an offline assembler (see offlineasm), which
+ implements many modern conveniences such as a Turing-complete CPS-based
+ macro language and direct access to relevant C++ type information
+ (basically offsets of fields and sizes of structs/classes).
+
+ Code executing in LLInt appears to the rest of the JSC world "as if" it
+ were executing in the old JIT. Hence, things like exception handling and
+ cross-execution-engine calls just work and require pretty much no
+ additional overhead.
+
+ This interpreter is 2-2.5x faster than our old interpreter on SunSpider,
+ V8, and Kraken. With triple-tiering turned on, we're neutral on SunSpider,
+ V8, and Kraken, but appear to get a double-digit improvement on real-world
+ websites due to a huge reduction in the amount of JIT'ing.
+
+ * CMakeLists.txt:
+ * GNUmakefile.am:
+ * GNUmakefile.list.am:
+ * JavaScriptCore.pri:
+ * JavaScriptCore.vcproj/JavaScriptCore/JavaScriptCore.vcproj:
+ * JavaScriptCore.vcproj/JavaScriptCore/JavaScriptCoreCommon.vsprops:
+ * JavaScriptCore.vcproj/JavaScriptCore/copy-files.cmd:
+ * JavaScriptCore.xcodeproj/project.pbxproj:
+ * Target.pri:
+ * assembler/LinkBuffer.h:
+ * assembler/MacroAssemblerCodeRef.h:
+ (MacroAssemblerCodePtr):
+ (JSC::MacroAssemblerCodePtr::createFromExecutableAddress):
+ * bytecode/BytecodeConventions.h: Added.
+ * bytecode/CallLinkStatus.cpp:
+ (JSC::CallLinkStatus::computeFromLLInt):
+ (JSC):
+ (JSC::CallLinkStatus::computeFor):
+ * bytecode/CallLinkStatus.h:
+ (JSC::CallLinkStatus::isSet):
+ (JSC::CallLinkStatus::operator!):
+ (CallLinkStatus):
+ * bytecode/CodeBlock.cpp:
+ (JSC::CodeBlock::dump):
+ (JSC::CodeBlock::CodeBlock):
+ (JSC::CodeBlock::~CodeBlock):
+ (JSC::CodeBlock::finalizeUnconditionally):
+ (JSC::CodeBlock::stronglyVisitStrongReferences):
+ (JSC):
+ (JSC::CodeBlock::unlinkCalls):
+ (JSC::CodeBlock::unlinkIncomingCalls):
+ (JSC::CodeBlock::bytecodeOffset):
+ (JSC::ProgramCodeBlock::jettison):
+ (JSC::EvalCodeBlock::jettison):
+ (JSC::FunctionCodeBlock::jettison):
+ (JSC::ProgramCodeBlock::jitCompileImpl):
+ (JSC::EvalCodeBlock::jitCompileImpl):
+ (JSC::FunctionCodeBlock::jitCompileImpl):
+ * bytecode/CodeBlock.h:
+ (JSC):
+ (CodeBlock):
+ (JSC::CodeBlock::baselineVersion):
+ (JSC::CodeBlock::linkIncomingCall):
+ (JSC::CodeBlock::bytecodeOffset):
+ (JSC::CodeBlock::jitCompile):
+ (JSC::CodeBlock::hasOptimizedReplacement):
+ (JSC::CodeBlock::addPropertyAccessInstruction):
+ (JSC::CodeBlock::addGlobalResolveInstruction):
+ (JSC::CodeBlock::addLLIntCallLinkInfo):
+ (JSC::CodeBlock::addGlobalResolveInfo):
+ (JSC::CodeBlock::numberOfMethodCallLinkInfos):
+ (JSC::CodeBlock::valueProfilePredictionForBytecodeOffset):
+ (JSC::CodeBlock::likelyToTakeSlowCase):
+ (JSC::CodeBlock::couldTakeSlowCase):
+ (JSC::CodeBlock::likelyToTakeSpecialFastCase):
+ (JSC::CodeBlock::likelyToTakeDeepestSlowCase):
+ (JSC::CodeBlock::likelyToTakeAnySlowCase):
+ (JSC::CodeBlock::addFrequentExitSite):
+ (JSC::CodeBlock::dontJITAnytimeSoon):
+ (JSC::CodeBlock::jitAfterWarmUp):
+ (JSC::CodeBlock::jitSoon):
+ (JSC::CodeBlock::llintExecuteCounter):
+ (ProgramCodeBlock):
+ (EvalCodeBlock):
+ (FunctionCodeBlock):
+ * bytecode/GetByIdStatus.cpp:
+ (JSC::GetByIdStatus::computeFromLLInt):
+ (JSC):
+ (JSC::GetByIdStatus::computeFor):
+ * bytecode/GetByIdStatus.h:
+ (JSC::GetByIdStatus::GetByIdStatus):
+ (JSC::GetByIdStatus::wasSeenInJIT):
+ (GetByIdStatus):
+ * bytecode/Instruction.h:
+ (JSC):
+ (JSC::Instruction::Instruction):
+ (Instruction):
+ * bytecode/LLIntCallLinkInfo.h: Added.
+ (JSC):
+ (JSC::LLIntCallLinkInfo::LLIntCallLinkInfo):
+ (LLIntCallLinkInfo):
+ (JSC::LLIntCallLinkInfo::~LLIntCallLinkInfo):
+ (JSC::LLIntCallLinkInfo::isLinked):
+ (JSC::LLIntCallLinkInfo::unlink):
+ * bytecode/MethodCallLinkStatus.cpp:
+ (JSC::MethodCallLinkStatus::computeFor):
+ * bytecode/Opcode.cpp:
+ (JSC):
+ * bytecode/Opcode.h:
+ (JSC):
+ (JSC::padOpcodeName):
+ * bytecode/PutByIdStatus.cpp:
+ (JSC::PutByIdStatus::computeFromLLInt):
+ (JSC):
+ (JSC::PutByIdStatus::computeFor):
+ * bytecode/PutByIdStatus.h:
+ (PutByIdStatus):
+ * bytecompiler/BytecodeGenerator.cpp:
+ (JSC::BytecodeGenerator::emitResolve):
+ (JSC::BytecodeGenerator::emitResolveWithBase):
+ (JSC::BytecodeGenerator::emitGetById):
+ (JSC::BytecodeGenerator::emitPutById):
+ (JSC::BytecodeGenerator::emitDirectPutById):
+ (JSC::BytecodeGenerator::emitCall):
+ (JSC::BytecodeGenerator::emitConstruct):
+ (JSC::BytecodeGenerator::emitCatch):
+ * dfg/DFGByteCodeParser.cpp:
+ (JSC::DFG::ByteCodeParser::getPredictionWithoutOSRExit):
+ (JSC::DFG::ByteCodeParser::handleInlining):
+ (JSC::DFG::ByteCodeParser::parseBlock):
+ * dfg/DFGCapabilities.h:
+ (JSC::DFG::canCompileOpcode):
+ * dfg/DFGOSRExitCompiler.cpp:
+ * dfg/DFGOperations.cpp:
+ * heap/Heap.h:
+ (JSC):
+ (JSC::Heap::firstAllocatorWithoutDestructors):
+ (Heap):
+ * heap/MarkStack.cpp:
+ (JSC::visitChildren):
+ * heap/MarkedAllocator.h:
+ (JSC):
+ (MarkedAllocator):
+ * heap/MarkedSpace.h:
+ (JSC):
+ (MarkedSpace):
+ (JSC::MarkedSpace::firstAllocator):
+ * interpreter/CallFrame.cpp:
+ (JSC):
+ (JSC::CallFrame::bytecodeOffsetForNonDFGCode):
+ (JSC::CallFrame::setBytecodeOffsetForNonDFGCode):
+ (JSC::CallFrame::currentVPC):
+ (JSC::CallFrame::setCurrentVPC):
+ (JSC::CallFrame::trueCallerFrame):
+ * interpreter/CallFrame.h:
+ (JSC::ExecState::hasReturnPC):
+ (JSC::ExecState::clearReturnPC):
+ (ExecState):
+ (JSC::ExecState::bytecodeOffsetForNonDFGCode):
+ (JSC::ExecState::currentVPC):
+ (JSC::ExecState::setCurrentVPC):
+ * interpreter/Interpreter.cpp:
+ (JSC::Interpreter::Interpreter):
+ (JSC::Interpreter::~Interpreter):
+ (JSC):
+ (JSC::Interpreter::initialize):
+ (JSC::Interpreter::isOpcode):
+ (JSC::Interpreter::unwindCallFrame):
+ (JSC::getCallerInfo):
+ (JSC::Interpreter::privateExecute):
+ (JSC::Interpreter::retrieveLastCaller):
+ * interpreter/Interpreter.h:
+ (JSC):
+ (Interpreter):
+ (JSC::Interpreter::getOpcode):
+ (JSC::Interpreter::getOpcodeID):
+ (JSC::Interpreter::classicEnabled):
+ * interpreter/RegisterFile.h:
+ (JSC):
+ (RegisterFile):
+ * jit/ExecutableAllocator.h:
+ (JSC):
+ * jit/HostCallReturnValue.cpp: Added.
+ (JSC):
+ (JSC::getHostCallReturnValueWithExecState):
+ * jit/HostCallReturnValue.h: Added.
+ (JSC):
+ (JSC::initializeHostCallReturnValue):
+ * jit/JIT.cpp:
+ (JSC::JIT::privateCompileMainPass):
+ (JSC::JIT::privateCompileSlowCases):
+ (JSC::JIT::privateCompile):
+ * jit/JITCode.h:
+ (JSC::JITCode::isOptimizingJIT):
+ (JITCode):
+ (JSC::JITCode::isBaselineCode):
+ (JSC::JITCode::JITCode):
+ * jit/JITDriver.h:
+ (JSC::jitCompileIfAppropriate):
+ (JSC::jitCompileFunctionIfAppropriate):
+ * jit/JITExceptions.cpp:
+ (JSC::jitThrow):
+ * jit/JITInlineMethods.h:
+ (JSC::JIT::updateTopCallFrame):
+ * jit/JITStubs.cpp:
+ (JSC::DEFINE_STUB_FUNCTION):
+ (JSC):
+ * jit/JITStubs.h:
+ (JSC):
+ * jit/JSInterfaceJIT.h:
+ * llint: Added.
+ * llint/LLIntCommon.h: Added.
+ * llint/LLIntData.cpp: Added.
+ (LLInt):
+ (JSC::LLInt::Data::Data):
+ (JSC::LLInt::Data::performAssertions):
+ (JSC::LLInt::Data::~Data):
+ * llint/LLIntData.h: Added.
+ (JSC):
+ (LLInt):
+ (Data):
+ (JSC::LLInt::Data::exceptionInstructions):
+ (JSC::LLInt::Data::opcodeMap):
+ (JSC::LLInt::Data::performAssertions):
+ * llint/LLIntEntrypoints.cpp: Added.
+ (LLInt):
+ (JSC::LLInt::getFunctionEntrypoint):
+ (JSC::LLInt::getEvalEntrypoint):
+ (JSC::LLInt::getProgramEntrypoint):
+ * llint/LLIntEntrypoints.h: Added.
+ (JSC):
+ (LLInt):
+ (JSC::LLInt::getEntrypoint):
+ * llint/LLIntExceptions.cpp: Added.
+ (LLInt):
+ (JSC::LLInt::interpreterThrowInCaller):
+ (JSC::LLInt::returnToThrowForThrownException):
+ (JSC::LLInt::returnToThrow):
+ (JSC::LLInt::callToThrow):
+ * llint/LLIntExceptions.h: Added.
+ (JSC):
+ (LLInt):
+ * llint/LLIntOfflineAsmConfig.h: Added.
+ * llint/LLIntOffsetsExtractor.cpp: Added.
+ (JSC):
+ (LLIntOffsetsExtractor):
+ (JSC::LLIntOffsetsExtractor::dummy):
+ (main):
+ * llint/LLIntSlowPaths.cpp: Added.
+ (LLInt):
+ (JSC::LLInt::llint_trace_operand):
+ (JSC::LLInt::llint_trace_value):
+ (JSC::LLInt::LLINT_SLOW_PATH_DECL):
+ (JSC::LLInt::traceFunctionPrologue):
+ (JSC::LLInt::shouldJIT):
+ (JSC::LLInt::entryOSR):
+ (JSC::LLInt::resolveGlobal):
+ (JSC::LLInt::getByVal):
+ (JSC::LLInt::handleHostCall):
+ (JSC::LLInt::setUpCall):
+ (JSC::LLInt::genericCall):
+ * llint/LLIntSlowPaths.h: Added.
+ (JSC):
+ (LLInt):
+ * llint/LLIntThunks.cpp: Added.
+ (LLInt):
+ (JSC::LLInt::generateThunkWithJumpTo):
+ (JSC::LLInt::functionForCallEntryThunkGenerator):
+ (JSC::LLInt::functionForConstructEntryThunkGenerator):
+ (JSC::LLInt::functionForCallArityCheckThunkGenerator):
+ (JSC::LLInt::functionForConstructArityCheckThunkGenerator):
+ (JSC::LLInt::evalEntryThunkGenerator):
+ (JSC::LLInt::programEntryThunkGenerator):
+ * llint/LLIntThunks.h: Added.
+ (JSC):
+ (LLInt):
+ * llint/LowLevelInterpreter.asm: Added.
+ * llint/LowLevelInterpreter.cpp: Added.
+ * llint/LowLevelInterpreter.h: Added.
+ * offlineasm: Added.
+ * offlineasm/armv7.rb: Added.
+ * offlineasm/asm.rb: Added.
+ * offlineasm/ast.rb: Added.
+ * offlineasm/backends.rb: Added.
+ * offlineasm/generate_offset_extractor.rb: Added.
+ * offlineasm/instructions.rb: Added.
+ * offlineasm/offset_extractor_constants.rb: Added.
+ * offlineasm/offsets.rb: Added.
+ * offlineasm/opt.rb: Added.
+ * offlineasm/parser.rb: Added.
+ * offlineasm/registers.rb: Added.
+ * offlineasm/self_hash.rb: Added.
+ * offlineasm/settings.rb: Added.
+ * offlineasm/transform.rb: Added.
+ * offlineasm/x86.rb: Added.
+ * runtime/CodeSpecializationKind.h: Added.
+ (JSC):
+ * runtime/CommonSlowPaths.h:
+ (JSC::CommonSlowPaths::arityCheckFor):
+ (CommonSlowPaths):
+ * runtime/Executable.cpp:
+ (JSC::jettisonCodeBlock):
+ (JSC):
+ (JSC::EvalExecutable::jitCompile):
+ (JSC::samplingDescription):
+ (JSC::EvalExecutable::compileInternal):
+ (JSC::ProgramExecutable::jitCompile):
+ (JSC::ProgramExecutable::compileInternal):
+ (JSC::FunctionExecutable::baselineCodeBlockFor):
+ (JSC::FunctionExecutable::jitCompileForCall):
+ (JSC::FunctionExecutable::jitCompileForConstruct):
+ (JSC::FunctionExecutable::compileForCallInternal):
+ (JSC::FunctionExecutable::compileForConstructInternal):
+ * runtime/Executable.h:
+ (JSC):
+ (EvalExecutable):
+ (ProgramExecutable):
+ (FunctionExecutable):
+ (JSC::FunctionExecutable::jitCompileFor):
+ * runtime/ExecutionHarness.h: Added.
+ (JSC):
+ (JSC::prepareForExecution):
+ (JSC::prepareFunctionForExecution):
+ * runtime/JSArray.h:
+ (JSC):
+ (JSArray):
+ * runtime/JSCell.h:
+ (JSC):
+ (JSCell):
+ * runtime/JSFunction.h:
+ (JSC):
+ (JSFunction):
+ * runtime/JSGlobalData.cpp:
+ (JSC::JSGlobalData::JSGlobalData):
+ * runtime/JSGlobalData.h:
+ (JSC):
+ (JSGlobalData):
+ * runtime/JSGlobalObject.h:
+ (JSC):
+ (JSGlobalObject):
+ * runtime/JSObject.h:
+ (JSC):
+ (JSObject):
+ (JSFinalObject):
+ * runtime/JSPropertyNameIterator.h:
+ (JSC):
+ (JSPropertyNameIterator):
+ * runtime/JSString.h:
+ (JSC):
+ (JSString):
+ * runtime/JSTypeInfo.h:
+ (JSC):
+ (TypeInfo):
+ * runtime/JSValue.cpp:
+ (JSC::JSValue::description):
+ * runtime/JSValue.h:
+ (LLInt):
+ (JSValue):
+ * runtime/JSVariableObject.h:
+ (JSC):
+ (JSVariableObject):
+ * runtime/Options.cpp:
+ (Options):
+ (JSC::Options::initializeOptions):
+ * runtime/Options.h:
+ (Options):
+ * runtime/ScopeChain.h:
+ (JSC):
+ (ScopeChainNode):
+ * runtime/Structure.cpp:
+ (JSC::Structure::addPropertyTransition):
+ * runtime/Structure.h:
+ (JSC):
+ (Structure):
+ * runtime/StructureChain.h:
+ (JSC):
+ (StructureChain):
+ * wtf/InlineASM.h:
+ * wtf/Platform.h:
+ * wtf/SentinelLinkedList.h:
+ (SentinelLinkedList):
+ (WTF::SentinelLinkedList::isEmpty):
+ * wtf/text/StringImpl.h:
+ (JSC):
+ (StringImpl):
+
+2012-02-21 Oliver Hunt <oliver@apple.com>
+
+ Unbreak double-typed arrays on ARMv7
+ https://bugs.webkit.org/show_bug.cgi?id=79177
+
+ Reviewed by Gavin Barraclough.
+
+ The existing code had completely broken address arithmetic.
+
+ * JSCTypedArrayStubs.h:
+ (JSC):
+ * assembler/MacroAssemblerARMv7.h:
+ (JSC::MacroAssemblerARMv7::storeDouble):
+ (JSC::MacroAssemblerARMv7::storeFloat):
+
+2012-02-21 Gavin Barraclough <barraclough@apple.com>
+
+ Should be able to reconfigure a non-configurable property as read-only
+ https://bugs.webkit.org/show_bug.cgi?id=79170
+
+ Reviewed by Sam Weinig.
+
+ See ES5.1 8.12.9 10.a.i - the spec prohibits making a read-only property writable,
+ but does not inhibit making a writable property read-only.
+
+ * runtime/JSGlobalData.cpp:
+ (JSC::JSGlobalData::JSGlobalData):
+ * runtime/JSGlobalData.h:
+ (JSC::JSGlobalData::setInDefineOwnProperty):
+ (JSGlobalData):
+ (JSC::JSGlobalData::isInDefineOwnProperty):
+ - Added flag, tracking whether we are in JSObject::defineOwnProperty.
+ * runtime/JSObject.cpp:
+ (JSC::JSObject::deleteProperty):
+ (DefineOwnPropertyScope):
+ - Always allow properties to be deleted by DefineOwnProperty - assume it knows what it is doing!
+ (JSC::DefineOwnPropertyScope::DefineOwnPropertyScope):
+ (JSC::DefineOwnPropertyScope::~DefineOwnPropertyScope):
+ - Added RAII helper.
+ (JSC::JSObject::defineOwnProperty):
+ - Track on the globalData when we are in this method.
+
+2012-02-21 Oliver Hunt <oliver@apple.com>
+
+ Make TypedArrays be available in commandline jsc
+ https://bugs.webkit.org/show_bug.cgi?id=79163
+
+ Reviewed by Gavin Barraclough.
+
+ Adds a compile time option to have jsc support a basic implementation
+ of the TypedArrays available in WebCore. This lets us test the typed
+ array logic in the JIT witout having to build webcore.
+
+ * JSCTypedArrayStubs.h: Added.
+ (JSC):
+ * JavaScriptCore.xcodeproj/project.pbxproj:
+ * jsc.cpp:
+ (GlobalObject::finishCreation):
+ (GlobalObject):
+ (GlobalObject::addConstructableFunction):
+ * runtime/JSGlobalData.h:
+ (JSGlobalData):
+
+2012-02-21 Tom Sepez <tsepez@chromium.org>
+
+ equalIgnoringNullity() only comparing half the bytes for equality
+ https://bugs.webkit.org/show_bug.cgi?id=79135
+
+ Reviewed by Adam Barth.
+
+ * wtf/text/StringImpl.h:
+ (WTF::equalIgnoringNullity):
+
+2012-02-21 Roland Takacs <takacs.roland@stud.u-szeged.hu>
+
+ Unnecessary preprocessor macros in MainThread.h/cpp
+ https://bugs.webkit.org/show_bug.cgi?id=79083
+
+ Removed invalid/wrong PLATFORM(WINDOWS) preprocessor macro.
+
+ * wtf/MainThread.cpp:
+ (WTF):
+ * wtf/MainThread.h:
+ (WTF):
+
+2012-02-21 Sam Weinig <sam@webkit.org>
+
+ Attempt to fix the Snow Leopard build.
+
+ * Configurations/Base.xcconfig:
+
+2012-02-21 Sam Weinig <sam@webkit.org>
+
+ Use libc++ when building with Clang on Mac
+ https://bugs.webkit.org/show_bug.cgi?id=78981
+
+ Reviewed by Dan Bernstein.
+
+ * Configurations/Base.xcconfig:
+
+2012-02-21 Adam Roben <aroben@apple.com>
+
+ Roll out r108309, r108323, and r108326
+
+ They broke the 32-bit Lion build.
+
+ Original bugs is <http://webkit.org/b/75812> <rdar://problem/10079694>.
+
+ * CMakeLists.txt:
+ * GNUmakefile.am:
+ * GNUmakefile.list.am:
+ * JavaScriptCore.pri:
+ * JavaScriptCore.vcproj/JavaScriptCore/JavaScriptCore.vcproj:
+ * JavaScriptCore.vcproj/JavaScriptCore/JavaScriptCoreCommon.vsprops:
+ * JavaScriptCore.vcproj/JavaScriptCore/copy-files.cmd:
+ * JavaScriptCore.xcodeproj/project.pbxproj:
+ * Target.pri:
+ * assembler/LinkBuffer.h:
+ * assembler/MacroAssemblerCodeRef.h:
+ * bytecode/BytecodeConventions.h: Removed.
+ * bytecode/CallLinkStatus.cpp:
+ * bytecode/CallLinkStatus.h:
+ * bytecode/CodeBlock.cpp:
+ * bytecode/CodeBlock.h:
+ * bytecode/GetByIdStatus.cpp:
+ * bytecode/GetByIdStatus.h:
+ * bytecode/Instruction.h:
+ * bytecode/LLIntCallLinkInfo.h: Removed.
+ * bytecode/MethodCallLinkStatus.cpp:
+ * bytecode/Opcode.cpp:
+ * bytecode/Opcode.h:
+ * bytecode/PutByIdStatus.cpp:
+ * bytecode/PutByIdStatus.h:
+ * bytecompiler/BytecodeGenerator.cpp:
+ * dfg/DFGByteCodeParser.cpp:
+ * dfg/DFGCapabilities.h:
+ * dfg/DFGOSRExitCompiler.cpp:
+ * dfg/DFGOperations.cpp:
+ * heap/Heap.h:
+ * heap/MarkStack.cpp:
+ * heap/MarkedAllocator.h:
+ * heap/MarkedSpace.h:
+ * interpreter/CallFrame.cpp:
+ * interpreter/CallFrame.h:
+ * interpreter/Interpreter.cpp:
+ * interpreter/Interpreter.h:
+ * interpreter/RegisterFile.h:
+ * jit/ExecutableAllocator.h:
+ * jit/HostCallReturnValue.cpp: Removed.
+ * jit/HostCallReturnValue.h: Removed.
+ * jit/JIT.cpp:
+ * jit/JITCode.h:
+ * jit/JITDriver.h:
+ * jit/JITExceptions.cpp:
+ * jit/JITInlineMethods.h:
+ * jit/JITStubs.cpp:
+ * jit/JITStubs.h:
+ * jit/JSInterfaceJIT.h:
+ * llint/LLIntCommon.h: Removed.
+ * llint/LLIntData.cpp: Removed.
+ * llint/LLIntData.h: Removed.
+ * llint/LLIntEntrypoints.cpp: Removed.
+ * llint/LLIntEntrypoints.h: Removed.
+ * llint/LLIntExceptions.cpp: Removed.
+ * llint/LLIntExceptions.h: Removed.
+ * llint/LLIntOfflineAsmConfig.h: Removed.
+ * llint/LLIntOffsetsExtractor.cpp: Removed.
+ * llint/LLIntSlowPaths.cpp: Removed.
+ * llint/LLIntSlowPaths.h: Removed.
+ * llint/LLIntThunks.cpp: Removed.
+ * llint/LLIntThunks.h: Removed.
+ * llint/LowLevelInterpreter.asm: Removed.
+ * llint/LowLevelInterpreter.cpp: Removed.
+ * llint/LowLevelInterpreter.h: Removed.
+ * offlineasm/armv7.rb: Removed.
+ * offlineasm/asm.rb: Removed.
+ * offlineasm/ast.rb: Removed.
+ * offlineasm/backends.rb: Removed.
+ * offlineasm/generate_offset_extractor.rb: Removed.
+ * offlineasm/instructions.rb: Removed.
+ * offlineasm/offset_extractor_constants.rb: Removed.
+ * offlineasm/offsets.rb: Removed.
+ * offlineasm/opt.rb: Removed.
+ * offlineasm/parser.rb: Removed.
+ * offlineasm/registers.rb: Removed.
+ * offlineasm/self_hash.rb: Removed.
+ * offlineasm/settings.rb: Removed.
+ * offlineasm/transform.rb: Removed.
+ * offlineasm/x86.rb: Removed.
+ * runtime/CodeSpecializationKind.h: Removed.
+ * runtime/CommonSlowPaths.h:
+ * runtime/Executable.cpp:
+ * runtime/Executable.h:
+ * runtime/ExecutionHarness.h: Removed.
+ * runtime/JSArray.h:
+ * runtime/JSCell.h:
+ * runtime/JSFunction.h:
+ * runtime/JSGlobalData.cpp:
+ * runtime/JSGlobalData.h:
+ * runtime/JSGlobalObject.h:
+ * runtime/JSObject.h:
+ * runtime/JSPropertyNameIterator.h:
+ * runtime/JSString.h:
+ * runtime/JSTypeInfo.h:
+ * runtime/JSValue.cpp:
+ * runtime/JSValue.h:
+ * runtime/JSVariableObject.h:
+ * runtime/Options.cpp:
+ * runtime/Options.h:
+ * runtime/ScopeChain.h:
+ * runtime/Structure.cpp:
+ * runtime/Structure.h:
+ * runtime/StructureChain.h:
+ * wtf/InlineASM.h:
+ * wtf/Platform.h:
+ * wtf/SentinelLinkedList.h:
+ * wtf/text/StringImpl.h:
+
+2012-02-21 Gustavo Noronha Silva <kov@debian.org> and Bob Tracy <rct@frus.com>
+
+ Does not build on IA64, SPARC and Alpha
+ https://bugs.webkit.org/show_bug.cgi?id=79047
+
+ Rubber-stamped by Kent Tamura.
+
+ * wtf/dtoa/utils.h: these architectures also have correct double
+ operations, so add them to the appropriate side of the check.
+
+2012-02-21 Filip Pizlo <fpizlo@apple.com>
+
+ Fix massive crashes in all tests introduced by previous build fix, and fix non-DFG build.
+ https://bugs.webkit.org/show_bug.cgi?id=75812
+
+ Reviewed by Csaba Osztrogonác.
+
+ * dfg/DFGOperations.cpp:
+ (JSC):
+ * jit/HostCallReturnValue.h:
+ (JSC::initializeHostCallReturnValue):
+
+2012-02-21 Filip Pizlo <fpizlo@apple.com>
+
+ Attempted build fix for ELF platforms.
+
+ * dfg/DFGOperations.cpp:
+ (JSC):
+ (JSC::getHostCallReturnValueWithExecState):
+ * jit/HostCallReturnValue.cpp:
+ (JSC):
+ * jit/HostCallReturnValue.h:
+ (JSC::initializeHostCallReturnValue):
+
+2012-02-20 Filip Pizlo <fpizlo@apple.com>
+
+ JSC should be a triple-tier VM
+ https://bugs.webkit.org/show_bug.cgi?id=75812
+ <rdar://problem/10079694>
+
+ Reviewed by Gavin Barraclough.
+
+ Implemented an interpreter that uses the JIT's calling convention. This
+ interpreter is called LLInt, or the Low Level Interpreter. JSC will now
+ will start by executing code in LLInt and will only tier up to the old
+ JIT after the code is proven hot.
+
+ LLInt is written in a modified form of our macro assembly. This new macro
+ assembly is compiled by an offline assembler (see offlineasm), which
+ implements many modern conveniences such as a Turing-complete CPS-based
+ macro language and direct access to relevant C++ type information
+ (basically offsets of fields and sizes of structs/classes).
+
+ Code executing in LLInt appears to the rest of the JSC world "as if" it
+ were executing in the old JIT. Hence, things like exception handling and
+ cross-execution-engine calls just work and require pretty much no
+ additional overhead.
+
+ This interpreter is 2-2.5x faster than our old interpreter on SunSpider,
+ V8, and Kraken. With triple-tiering turned on, we're neutral on SunSpider,
+ V8, and Kraken, but appear to get a double-digit improvement on real-world
+ websites due to a huge reduction in the amount of JIT'ing.
+
+ * CMakeLists.txt:
+ * GNUmakefile.am:
+ * GNUmakefile.list.am:
+ * JavaScriptCore.pri:
+ * JavaScriptCore.vcproj/JavaScriptCore/JavaScriptCore.vcproj:
+ * JavaScriptCore.vcproj/JavaScriptCore/JavaScriptCoreCommon.vsprops:
+ * JavaScriptCore.vcproj/JavaScriptCore/copy-files.cmd:
+ * JavaScriptCore.xcodeproj/project.pbxproj:
+ * Target.pri:
+ * assembler/LinkBuffer.h:
+ * assembler/MacroAssemblerCodeRef.h:
+ (MacroAssemblerCodePtr):
+ (JSC::MacroAssemblerCodePtr::createFromExecutableAddress):
+ * bytecode/BytecodeConventions.h: Added.
+ * bytecode/CallLinkStatus.cpp:
+ (JSC::CallLinkStatus::computeFromLLInt):
+ (JSC):
+ (JSC::CallLinkStatus::computeFor):
+ * bytecode/CallLinkStatus.h:
+ (JSC::CallLinkStatus::isSet):
+ (JSC::CallLinkStatus::operator!):
+ (CallLinkStatus):
+ * bytecode/CodeBlock.cpp:
+ (JSC::CodeBlock::dump):
+ (JSC::CodeBlock::CodeBlock):
+ (JSC::CodeBlock::~CodeBlock):
+ (JSC::CodeBlock::finalizeUnconditionally):
+ (JSC::CodeBlock::stronglyVisitStrongReferences):
+ (JSC):
+ (JSC::CodeBlock::unlinkCalls):
+ (JSC::CodeBlock::unlinkIncomingCalls):
+ (JSC::CodeBlock::bytecodeOffset):
+ (JSC::ProgramCodeBlock::jettison):
+ (JSC::EvalCodeBlock::jettison):
+ (JSC::FunctionCodeBlock::jettison):
+ (JSC::ProgramCodeBlock::jitCompileImpl):
+ (JSC::EvalCodeBlock::jitCompileImpl):
+ (JSC::FunctionCodeBlock::jitCompileImpl):
+ * bytecode/CodeBlock.h:
+ (JSC):
+ (CodeBlock):
+ (JSC::CodeBlock::baselineVersion):
+ (JSC::CodeBlock::linkIncomingCall):
+ (JSC::CodeBlock::bytecodeOffset):
+ (JSC::CodeBlock::jitCompile):
+ (JSC::CodeBlock::hasOptimizedReplacement):
+ (JSC::CodeBlock::addPropertyAccessInstruction):
+ (JSC::CodeBlock::addGlobalResolveInstruction):
+ (JSC::CodeBlock::addLLIntCallLinkInfo):
+ (JSC::CodeBlock::addGlobalResolveInfo):
+ (JSC::CodeBlock::numberOfMethodCallLinkInfos):
+ (JSC::CodeBlock::valueProfilePredictionForBytecodeOffset):
+ (JSC::CodeBlock::likelyToTakeSlowCase):
+ (JSC::CodeBlock::couldTakeSlowCase):
+ (JSC::CodeBlock::likelyToTakeSpecialFastCase):
+ (JSC::CodeBlock::likelyToTakeDeepestSlowCase):
+ (JSC::CodeBlock::likelyToTakeAnySlowCase):
+ (JSC::CodeBlock::addFrequentExitSite):
+ (JSC::CodeBlock::dontJITAnytimeSoon):
+ (JSC::CodeBlock::jitAfterWarmUp):
+ (JSC::CodeBlock::jitSoon):
+ (JSC::CodeBlock::llintExecuteCounter):
+ (ProgramCodeBlock):
+ (EvalCodeBlock):
+ (FunctionCodeBlock):
+ * bytecode/GetByIdStatus.cpp:
+ (JSC::GetByIdStatus::computeFromLLInt):
+ (JSC):
+ (JSC::GetByIdStatus::computeFor):
+ * bytecode/GetByIdStatus.h:
+ (JSC::GetByIdStatus::GetByIdStatus):
+ (JSC::GetByIdStatus::wasSeenInJIT):
+ (GetByIdStatus):
+ * bytecode/Instruction.h:
+ (JSC):
+ (JSC::Instruction::Instruction):
+ (Instruction):
+ * bytecode/LLIntCallLinkInfo.h: Added.
+ (JSC):
+ (JSC::LLIntCallLinkInfo::LLIntCallLinkInfo):
+ (LLIntCallLinkInfo):
+ (JSC::LLIntCallLinkInfo::~LLIntCallLinkInfo):
+ (JSC::LLIntCallLinkInfo::isLinked):
+ (JSC::LLIntCallLinkInfo::unlink):
+ * bytecode/MethodCallLinkStatus.cpp:
+ (JSC::MethodCallLinkStatus::computeFor):
+ * bytecode/Opcode.cpp:
+ (JSC):
+ * bytecode/Opcode.h:
+ (JSC):
+ (JSC::padOpcodeName):
+ * bytecode/PutByIdStatus.cpp:
+ (JSC::PutByIdStatus::computeFromLLInt):
+ (JSC):
+ (JSC::PutByIdStatus::computeFor):
+ * bytecode/PutByIdStatus.h:
+ (PutByIdStatus):
+ * bytecompiler/BytecodeGenerator.cpp:
+ (JSC::BytecodeGenerator::emitResolve):
+ (JSC::BytecodeGenerator::emitResolveWithBase):
+ (JSC::BytecodeGenerator::emitGetById):
+ (JSC::BytecodeGenerator::emitPutById):
+ (JSC::BytecodeGenerator::emitDirectPutById):
+ (JSC::BytecodeGenerator::emitCall):
+ (JSC::BytecodeGenerator::emitConstruct):
+ (JSC::BytecodeGenerator::emitCatch):
+ * dfg/DFGByteCodeParser.cpp:
+ (JSC::DFG::ByteCodeParser::getPredictionWithoutOSRExit):
+ (JSC::DFG::ByteCodeParser::handleInlining):
+ (JSC::DFG::ByteCodeParser::parseBlock):
+ * dfg/DFGCapabilities.h:
+ (JSC::DFG::canCompileOpcode):
+ * dfg/DFGOSRExitCompiler.cpp:
+ * dfg/DFGOperations.cpp:
+ * heap/Heap.h:
+ (JSC):
+ (JSC::Heap::firstAllocatorWithoutDestructors):
+ (Heap):
+ * heap/MarkStack.cpp:
+ (JSC::visitChildren):
+ * heap/MarkedAllocator.h:
+ (JSC):
+ (MarkedAllocator):
+ * heap/MarkedSpace.h:
+ (JSC):
+ (MarkedSpace):
+ (JSC::MarkedSpace::firstAllocator):
+ * interpreter/CallFrame.cpp:
+ (JSC):
+ (JSC::CallFrame::bytecodeOffsetForNonDFGCode):
+ (JSC::CallFrame::setBytecodeOffsetForNonDFGCode):
+ (JSC::CallFrame::currentVPC):
+ (JSC::CallFrame::setCurrentVPC):
+ (JSC::CallFrame::trueCallerFrame):
+ * interpreter/CallFrame.h:
+ (JSC::ExecState::hasReturnPC):
+ (JSC::ExecState::clearReturnPC):
+ (ExecState):
+ (JSC::ExecState::bytecodeOffsetForNonDFGCode):
+ (JSC::ExecState::currentVPC):
+ (JSC::ExecState::setCurrentVPC):
+ * interpreter/Interpreter.cpp:
+ (JSC::Interpreter::Interpreter):
+ (JSC::Interpreter::~Interpreter):
+ (JSC):
+ (JSC::Interpreter::initialize):
+ (JSC::Interpreter::isOpcode):
+ (JSC::Interpreter::unwindCallFrame):
+ (JSC::getCallerInfo):
+ (JSC::Interpreter::privateExecute):
+ (JSC::Interpreter::retrieveLastCaller):
+ * interpreter/Interpreter.h:
+ (JSC):
+ (Interpreter):
+ (JSC::Interpreter::getOpcode):
+ (JSC::Interpreter::getOpcodeID):
+ (JSC::Interpreter::classicEnabled):
+ * interpreter/RegisterFile.h:
+ (JSC):
+ (RegisterFile):
+ * jit/ExecutableAllocator.h:
+ (JSC):
+ * jit/HostCallReturnValue.cpp: Added.
+ (JSC):
+ (JSC::getHostCallReturnValueWithExecState):
+ * jit/HostCallReturnValue.h: Added.
+ (JSC):
+ (JSC::initializeHostCallReturnValue):
+ * jit/JIT.cpp:
+ (JSC::JIT::privateCompileMainPass):
+ (JSC::JIT::privateCompileSlowCases):
+ (JSC::JIT::privateCompile):
+ * jit/JITCode.h:
+ (JSC::JITCode::isOptimizingJIT):
+ (JITCode):
+ (JSC::JITCode::isBaselineCode):
+ (JSC::JITCode::JITCode):
+ * jit/JITDriver.h:
+ (JSC::jitCompileIfAppropriate):
+ (JSC::jitCompileFunctionIfAppropriate):
+ * jit/JITExceptions.cpp:
+ (JSC::jitThrow):
+ * jit/JITInlineMethods.h:
+ (JSC::JIT::updateTopCallFrame):
+ * jit/JITStubs.cpp:
+ (JSC::DEFINE_STUB_FUNCTION):
+ (JSC):
+ * jit/JITStubs.h:
+ (JSC):
+ * jit/JSInterfaceJIT.h:
+ * llint: Added.
+ * llint/LLIntCommon.h: Added.
+ * llint/LLIntData.cpp: Added.
+ (LLInt):
+ (JSC::LLInt::Data::Data):
+ (JSC::LLInt::Data::performAssertions):
+ (JSC::LLInt::Data::~Data):
+ * llint/LLIntData.h: Added.
+ (JSC):
+ (LLInt):
+ (Data):
+ (JSC::LLInt::Data::exceptionInstructions):
+ (JSC::LLInt::Data::opcodeMap):
+ (JSC::LLInt::Data::performAssertions):
+ * llint/LLIntEntrypoints.cpp: Added.
+ (LLInt):
+ (JSC::LLInt::getFunctionEntrypoint):
+ (JSC::LLInt::getEvalEntrypoint):
+ (JSC::LLInt::getProgramEntrypoint):
+ * llint/LLIntEntrypoints.h: Added.
+ (JSC):
+ (LLInt):
+ (JSC::LLInt::getEntrypoint):
+ * llint/LLIntExceptions.cpp: Added.
+ (LLInt):
+ (JSC::LLInt::interpreterThrowInCaller):
+ (JSC::LLInt::returnToThrowForThrownException):
+ (JSC::LLInt::returnToThrow):
+ (JSC::LLInt::callToThrow):
+ * llint/LLIntExceptions.h: Added.
+ (JSC):
+ (LLInt):
+ * llint/LLIntOfflineAsmConfig.h: Added.
+ * llint/LLIntOffsetsExtractor.cpp: Added.
+ (JSC):
+ (LLIntOffsetsExtractor):
+ (JSC::LLIntOffsetsExtractor::dummy):
+ (main):
+ * llint/LLIntSlowPaths.cpp: Added.
+ (LLInt):
+ (JSC::LLInt::llint_trace_operand):
+ (JSC::LLInt::llint_trace_value):
+ (JSC::LLInt::LLINT_SLOW_PATH_DECL):
+ (JSC::LLInt::traceFunctionPrologue):
+ (JSC::LLInt::shouldJIT):
+ (JSC::LLInt::entryOSR):
+ (JSC::LLInt::resolveGlobal):
+ (JSC::LLInt::getByVal):
+ (JSC::LLInt::handleHostCall):
+ (JSC::LLInt::setUpCall):
+ (JSC::LLInt::genericCall):
+ * llint/LLIntSlowPaths.h: Added.
+ (JSC):
+ (LLInt):
+ * llint/LLIntThunks.cpp: Added.
+ (LLInt):
+ (JSC::LLInt::generateThunkWithJumpTo):
+ (JSC::LLInt::functionForCallEntryThunkGenerator):
+ (JSC::LLInt::functionForConstructEntryThunkGenerator):
+ (JSC::LLInt::functionForCallArityCheckThunkGenerator):
+ (JSC::LLInt::functionForConstructArityCheckThunkGenerator):
+ (JSC::LLInt::evalEntryThunkGenerator):
+ (JSC::LLInt::programEntryThunkGenerator):
+ * llint/LLIntThunks.h: Added.
+ (JSC):
+ (LLInt):
+ * llint/LowLevelInterpreter.asm: Added.
+ * llint/LowLevelInterpreter.cpp: Added.
+ * llint/LowLevelInterpreter.h: Added.
+ * offlineasm: Added.
+ * offlineasm/armv7.rb: Added.
+ * offlineasm/asm.rb: Added.
+ * offlineasm/ast.rb: Added.
+ * offlineasm/backends.rb: Added.
+ * offlineasm/generate_offset_extractor.rb: Added.
+ * offlineasm/instructions.rb: Added.
+ * offlineasm/offset_extractor_constants.rb: Added.
+ * offlineasm/offsets.rb: Added.
+ * offlineasm/opt.rb: Added.
+ * offlineasm/parser.rb: Added.
+ * offlineasm/registers.rb: Added.
+ * offlineasm/self_hash.rb: Added.
+ * offlineasm/settings.rb: Added.
+ * offlineasm/transform.rb: Added.
+ * offlineasm/x86.rb: Added.
+ * runtime/CodeSpecializationKind.h: Added.
+ (JSC):
+ * runtime/CommonSlowPaths.h:
+ (JSC::CommonSlowPaths::arityCheckFor):
+ (CommonSlowPaths):
+ * runtime/Executable.cpp:
+ (JSC::jettisonCodeBlock):
+ (JSC):
+ (JSC::EvalExecutable::jitCompile):
+ (JSC::samplingDescription):
+ (JSC::EvalExecutable::compileInternal):
+ (JSC::ProgramExecutable::jitCompile):
+ (JSC::ProgramExecutable::compileInternal):
+ (JSC::FunctionExecutable::baselineCodeBlockFor):
+ (JSC::FunctionExecutable::jitCompileForCall):
+ (JSC::FunctionExecutable::jitCompileForConstruct):
+ (JSC::FunctionExecutable::compileForCallInternal):
+ (JSC::FunctionExecutable::compileForConstructInternal):
+ * runtime/Executable.h:
+ (JSC):
+ (EvalExecutable):
+ (ProgramExecutable):
+ (FunctionExecutable):
+ (JSC::FunctionExecutable::jitCompileFor):
+ * runtime/ExecutionHarness.h: Added.
+ (JSC):
+ (JSC::prepareForExecution):
+ (JSC::prepareFunctionForExecution):
+ * runtime/JSArray.h:
+ (JSC):
+ (JSArray):
+ * runtime/JSCell.h:
+ (JSC):
+ (JSCell):
+ * runtime/JSFunction.h:
+ (JSC):
+ (JSFunction):
+ * runtime/JSGlobalData.cpp:
+ (JSC::JSGlobalData::JSGlobalData):
+ * runtime/JSGlobalData.h:
+ (JSC):
+ (JSGlobalData):
+ * runtime/JSGlobalObject.h:
+ (JSC):
+ (JSGlobalObject):
+ * runtime/JSObject.h:
+ (JSC):
+ (JSObject):
+ (JSFinalObject):
+ * runtime/JSPropertyNameIterator.h:
+ (JSC):
+ (JSPropertyNameIterator):
+ * runtime/JSString.h:
+ (JSC):
+ (JSString):
+ * runtime/JSTypeInfo.h:
+ (JSC):
+ (TypeInfo):
+ * runtime/JSValue.cpp:
+ (JSC::JSValue::description):
+ * runtime/JSValue.h:
+ (LLInt):
+ (JSValue):
+ * runtime/JSVariableObject.h:
+ (JSC):
+ (JSVariableObject):
+ * runtime/Options.cpp:
+ (Options):
+ (JSC::Options::initializeOptions):
+ * runtime/Options.h:
+ (Options):
+ * runtime/ScopeChain.h:
+ (JSC):
+ (ScopeChainNode):
+ * runtime/Structure.cpp:
+ (JSC::Structure::addPropertyTransition):
+ * runtime/Structure.h:
+ (JSC):
+ (Structure):
+ * runtime/StructureChain.h:
+ (JSC):
+ (StructureChain):
+ * wtf/InlineASM.h:
+ * wtf/Platform.h:
+ * wtf/SentinelLinkedList.h:
+ (SentinelLinkedList):
+ (WTF::SentinelLinkedList::isEmpty):
+ * wtf/text/StringImpl.h:
+ (JSC):
+ (StringImpl):
+
+2012-02-20 Filip Pizlo <fpizlo@apple.com>
+
+ Unreviewed, rolling out http://trac.webkit.org/changeset/108291
+ It completely broke the 32-bit JIT.
+
+ * heap/CopiedAllocator.h:
+ * heap/CopiedSpace.h:
+ (CopiedSpace):
+ * heap/Heap.h:
+ (JSC::Heap::allocatorForObjectWithDestructor):
+ * jit/JIT.cpp:
+ (JSC::JIT::privateCompileSlowCases):
+ * jit/JIT.h:
+ (JIT):
+ * jit/JITInlineMethods.h:
+ (JSC):
+ * jit/JITOpcodes.cpp:
+ (JSC::JIT::emit_op_new_array):
+ * runtime/JSArray.cpp:
+ (JSC::storageSize):
+ (JSC):
+ * runtime/JSArray.h:
+ (ArrayStorage):
+ (JSArray):
+
+2012-02-20 Gavin Barraclough <barraclough@apple.com>
+
+ [[Put]] should throw if prototype chain contains a readonly property.
+ https://bugs.webkit.org/show_bug.cgi?id=79069
+
+ Reviewed by Oliver Hunt.
+
+ Currently we only check the base of the put, not the prototype chain.
+ Fold this check in with the test for accessors.
+
+ * runtime/JSObject.cpp:
+ (JSC::JSObject::put):
+ - Updated to test all objects in the propotype chain for readonly properties.
+ (JSC::JSObject::putDirectAccessor):
+ (JSC::putDescriptor):
+ - Record the presence of readonly properties on the structure.
+ * runtime/Structure.cpp:
+ (JSC::Structure::Structure):
+ - hasGetterSetterPropertiesExcludingProto expanded to hasReadOnlyOrGetterSetterPropertiesExcludingProto.
+ * runtime/Structure.h:
+ (JSC::Structure::hasReadOnlyOrGetterSetterPropertiesExcludingProto):
+ (JSC::Structure::setHasGetterSetterProperties):
+ - hasGetterSetterPropertiesExcludingProto expanded to hasReadOnlyOrGetterSetterPropertiesExcludingProto.
+ (JSC::Structure::setContainsReadOnlyProperties):
+ - Added.
+
+2012-02-20 Mark Hahnenberg <mhahnenberg@apple.com>
+
+ Implement fast path for op_new_array in the baseline JIT
+ https://bugs.webkit.org/show_bug.cgi?id=78612
+
+ Reviewed by Filip Pizlo.
+
+ * heap/CopiedAllocator.h:
+ (CopiedAllocator): Friended the JIT to allow access to m_currentOffset.
+ * heap/CopiedSpace.h:
+ (CopiedSpace): Friended the JIT to allow access to
+ (JSC::CopiedSpace::allocator):
+ * heap/Heap.h:
+ (JSC::Heap::storageAllocator): Added a getter for the CopiedAllocator class so the JIT
+ can use it for simple allocation i.e. when we can just bump the offset without having to
+ do anything else.
+ * jit/JIT.cpp:
+ (JSC::JIT::privateCompileSlowCases): Added new slow case for op_new_array for when
+ we have to bail out because the fast allocation path fails for whatever reason.
+ * jit/JIT.h:
+ (JIT):
+ * jit/JITInlineMethods.h:
+ (JSC::JIT::emitAllocateBasicStorage): Added utility function that allows objects to
+ allocate generic backing stores. This function is used by emitAllocateJSArray.
+ (JSC):
+ (JSC::JIT::emitAllocateJSArray): Added utility function that allows the client to
+ more easily allocate JSArrays. This function is used by emit_op_new_array and I expect
+ it will also be used for emit_op_new_array_buffer.
+ * jit/JITOpcodes.cpp:
+ (JSC::JIT::emit_op_new_array): Changed to do inline allocation of JSArrays. Still does
+ a stub call for oversize arrays.
+ (JSC):
+ (JSC::JIT::emitSlow_op_new_array): Just bails out to a stub call if we fail in any way on
+ the fast path.
+ * runtime/JSArray.cpp:
+ (JSC):
+ * runtime/JSArray.h: Added lots of offset functions for all the fields that we need to
+ initialize in the JIT.
+ (ArrayStorage):
+ (JSC::ArrayStorage::lengthOffset):
+ (JSC::ArrayStorage::numValuesInVectorOffset):
+ (JSC::ArrayStorage::allocBaseOffset):
+ (JSC::ArrayStorage::vectorOffset):
+ (JSArray):
+ (JSC::JSArray::sparseValueMapOffset):
+ (JSC::JSArray::subclassDataOffset):
+ (JSC::JSArray::indexBiasOffset):
+ (JSC):
+ (JSC::JSArray::storageSize): Moved this function from being a static function in the cpp file
+ to being a static function in the JSArray class. This move allows the JIT to call it to
+ see what size it should allocate.
+
+2012-02-20 Gavin Barraclough <barraclough@apple.com>
+
+ DefineOwnProperty fails with numeric properties & Object.prototype
+ https://bugs.webkit.org/show_bug.cgi?id=79059
+
+ Reviewed by Oliver Hunt.
+
+ ObjectPrototype caches whether it contains any numeric properties (m_hasNoPropertiesWithUInt32Names),
+ calls to defineOwnProperty need to update this cache.
+
+ * runtime/ObjectPrototype.cpp:
+ (JSC::ObjectPrototype::put):
+ (JSC::ObjectPrototype::defineOwnProperty):
+ (JSC):
+ (JSC::ObjectPrototype::getOwnPropertySlotByIndex):
+ * runtime/ObjectPrototype.h:
+ (ObjectPrototype):
+
+2012-02-20 Pino Toscano <pino@debian.org>
+
+ Does not build on GNU Hurd
+ https://bugs.webkit.org/show_bug.cgi?id=79045
+
+ Reviewed by Gustavo Noronha Silva.
+
+ * wtf/Platform.h: define WTF_OS_HURD.
+ * wtf/ThreadIdentifierDataPthreads.cpp: adds a band-aid fix
+ for the lack of PTHREAD_KEYS_MAX definition, with a value which
+ should not cause issues.
+
+2012-02-20 Gavin Barraclough <barraclough@apple.com>
+
+ Unreviewed windows build fix.
+
+ * JavaScriptCore.vcproj/JavaScriptCore/JavaScriptCore.def:
+
+2012-02-20 Mark Hahnenberg <mhahnenberg@apple.com>
+
+ Undoing accidental changes
+
+ * heap/Heap.cpp:
+ (JSC::Heap::collectAllGarbage):
+
+2012-02-20 Mark Hahnenberg <mhahnenberg@apple.com>
+
+ Factor out allocation in CopySpace into a separate CopyAllocator
+ https://bugs.webkit.org/show_bug.cgi?id=78610
+
+ Reviewed by Oliver Hunt.
+
+ Added a new CopyAllocator class, which allows us to do allocations without
+ having to load the current offset and store the current offset in the current
+ block. This change will allow us to easily do inline assembly in the JIT for
+ array allocations.
+
+ * GNUmakefile.list.am:
+ * JavaScriptCore.gypi:
+ * JavaScriptCore.vcproj/JavaScriptCore/JavaScriptCore.vcproj:
+ * JavaScriptCore.xcodeproj/project.pbxproj:
+ * heap/CopiedAllocator.h: Added.
+ (JSC):
+ (CopiedAllocator):
+ (JSC::CopiedAllocator::currentBlock):
+ (JSC::CopiedAllocator::CopiedAllocator):
+ (JSC::CopiedAllocator::allocate):
+ (JSC::CopiedAllocator::fitsInCurrentBlock):
+ (JSC::CopiedAllocator::wasLastAllocation):
+ (JSC::CopiedAllocator::startedCopying):
+ (JSC::CopiedAllocator::resetCurrentBlock):
+ (JSC::CopiedAllocator::currentUtilization):
+ (JSC::CopiedAllocator::resetLastAllocation):
+ * heap/CopiedBlock.h:
+ (CopiedBlock):
+ * heap/CopiedSpace.cpp: Moved some stuff from CopiedSpaceInlineMethods to here because we
+ weren't really getting any benefits from having such big functions in a header file.
+ (JSC::CopiedSpace::CopiedSpace):
+ (JSC):
+ (JSC::CopiedSpace::init):
+ (JSC::CopiedSpace::tryAllocateSlowCase):
+ (JSC::CopiedSpace::tryAllocateOversize):
+ (JSC::CopiedSpace::tryReallocate):
+ (JSC::CopiedSpace::tryReallocateOversize):
+ (JSC::CopiedSpace::doneFillingBlock):
+ (JSC::CopiedSpace::doneCopying):
+ (JSC::CopiedSpace::getFreshBlock):
+ * heap/CopiedSpace.h:
+ (CopiedSpace):
+ * heap/CopiedSpaceInlineMethods.h:
+ (JSC):
+ (JSC::CopiedSpace::startedCopying):
+ (JSC::CopiedSpace::addNewBlock):
+ (JSC::CopiedSpace::allocateNewBlock):
+ (JSC::CopiedSpace::fitsInBlock):
+ (JSC::CopiedSpace::tryAllocate):
+ (JSC::CopiedSpace::allocateFromBlock):
+ * heap/Heap.cpp:
+ (JSC::Heap::collectAllGarbage):
+ * heap/HeapBlock.h:
+ (HeapBlock):
+
+2012-02-20 Patrick Gansterer <paroga@webkit.org>
+
+ Fix Visual Studio 2010 build.
+
+ * bytecompiler/NodesCodegen.cpp:
+ (JSC::PropertyListNode::emitBytecode):
+
+2012-02-16 Gavin Barraclough <barraclough@apple.com>
+
+ Move special __proto__ property to Object.prototype
+ https://bugs.webkit.org/show_bug.cgi?id=78409
+
+ Reviewed by Oliver Hunt.
+
+ Re-implement this as a regular accessor property. This has three key benefits:
+ 1) It makes it possible for objects to be given properties named __proto__.
+ 2) Object.prototype.__proto__ can be deleted, preventing object prototypes from being changed.
+ 3) This largely removes the magic used the implement __proto__, it can just be made a regular accessor property.
+
+ * parser/Parser.cpp:
+ (JSC::::parseFunctionInfo):
+ - No need to prohibit functions named __proto__.
+ * runtime/JSGlobalObject.cpp:
+ (JSC::JSGlobalObject::reset):
+ - Add __proto__ accessor to Object.prototype.
+ * runtime/JSGlobalObjectFunctions.cpp:
+ (JSC::globalFuncProtoGetter):
+ (JSC::globalFuncProtoSetter):
+ - Definition of the __proto__ accessor functions.
+ * runtime/JSGlobalObjectFunctions.h:
+ - Declaration of the __proto__ accessor functions.
+ * runtime/JSObject.cpp:
+ (JSC::JSObject::put):
+ - Remove the special handling for __proto__, there is still a check to allow for a fast guard for accessors excluding __proto__.
+ (JSC::JSObject::putDirectAccessor):
+ - Track on the structure whether an object contains accessors other than one for __proto__.
+ (JSC::JSObject::defineOwnProperty):
+ - No need to prohibit definition of own properties named __proto__.
+ * runtime/JSObject.h:
+ (JSC::JSObject::inlineGetOwnPropertySlot):
+ - Remove the special handling for __proto__.
+ (JSC::JSValue::get):
+ - Remove the special handling for __proto__.
+ * runtime/JSString.cpp:
+ (JSC::JSString::getOwnPropertySlot):
+ - Remove the special handling for __proto__.
+ * runtime/JSValue.h:
+ (JSValue):
+ - Made synthesizePrototype public (this may be needed by the __proto__ getter).
+ * runtime/ObjectConstructor.cpp:
+ (JSC::objectConstructorGetPrototypeOf):
+ - Perform the security check & call prototype() directly.
+ * runtime/Structure.cpp:
+ (JSC::Structure::Structure):
+ - Added 'ExcludingProto' variant of the 'hasGetterSetterProperties' state.
+ * runtime/Structure.h:
+ (JSC::Structure::hasGetterSetterPropertiesExcludingProto):
+ (JSC::Structure::setHasGetterSetterProperties):
+ (Structure):
+ - Added 'ExcludingProto' variant of the 'hasGetterSetterProperties' state.
+
+2012-02-20 Michael Saboff <msaboff@apple.com>
+
+ Update toLower and toUpper tests for Unicode 6.1 changes
+ https://bugs.webkit.org/show_bug.cgi?id=78923
+
+ Reviewed by Oliver Hunt.
+
+ * tests/mozilla/ecma/String/15.5.4.11-2.js: Updated the test
+ to handle a third set of results for updated Unicode 6.1
+ changes.
+ (getTestCases):
+ (TestCaseMultiExpected):
+ (writeTestCaseResultMultiExpected):
+ (getTestCaseResultMultiExpected):
+ (test):
+ (GetUnicodeValues):
+ (DecimalToHexString):
+
+2012-02-20 Andy Wingo <wingo@igalia.com>
+
+ Remove unused features from CodeFeatures
+ https://bugs.webkit.org/show_bug.cgi?id=78804
+
+ Reviewed by Gavin Barraclough.
+
+ * parser/Nodes.h:
+ * parser/ASTBuilder.h:
+ (JSC::ClosureFeature):
+ (JSC::ASTBuilder::createFunctionBody):
+ (JSC::ASTBuilder::usesClosures):
+ Remove "ClosureFeature". Since we track captured variables more
+ precisely, this bit doesn't do us any good.
+
+ (JSC::AssignFeature):
+ (JSC::ASTBuilder::makeAssignNode):
+ (JSC::ASTBuilder::makePrefixNode):
+ (JSC::ASTBuilder::makePostfixNode):
+ (JSC::ASTBuilder::usesAssignment):
+ Similarly, remove AssignFeature. It is unused.
+
+2012-02-19 Carlos Garcia Campos <cgarcia@igalia.com>
+
+ Unreviewed. Fix make distcheck issues.
+
+ * GNUmakefile.list.am: Add missing files.
+
+2012-02-18 Sam Weinig <sam@webkit.org>
+
+ Fix style issues in DFG Phase classes
+ https://bugs.webkit.org/show_bug.cgi?id=78983
+
+ Reviewed by Ryosuke Niwa.
+
+ * dfg/DFGArithNodeFlagsInferencePhase.cpp:
+ * dfg/DFGCFAPhase.cpp:
+ * dfg/DFGCSEPhase.cpp:
+ * dfg/DFGPredictionPropagationPhase.cpp:
+ * dfg/DFGVirtualRegisterAllocationPhase.cpp:
+ Add a space before the colon in class declarations.
+
+2012-02-18 Filip Pizlo <fpizlo@apple.com>
+
+ Attempt to fix Windows build.
+
+ * JavaScriptCore.vcproj/JavaScriptCore/JavaScriptCore.def:
+
+2012-02-18 Sam Weinig <sam@webkit.org>
+
+ Fix the libc++ build.
+
+ Reviewed by Anders Carlsson.
+
+ * heap/Weak.h:
+ Libc++'s nullptr emulation does not allow default construction
+ of the nullptr_t type. Work around this with the arguably clearer
+ just returning nullptr.
+
+2012-02-18 Filip Pizlo <fpizlo@apple.com>
+
+ DFGPropagator.cpp has too many things
+ https://bugs.webkit.org/show_bug.cgi?id=78956
+
+ Reviewed by Oliver Hunt.
+
+ Added the notion of a DFG::Phase. Removed DFG::Propagator, and took its
+ various things and put them into separate files. These new phases follow
+ the naming convention "DFG<name>Phase" where <name> is a noun. They are
+ called via functions of the form "perform<name>".
+
+ * CMakeLists.txt:
+ * GNUmakefile.list.am:
+ * JavaScriptCore.xcodeproj/project.pbxproj:
+ * Target.pri:
+ * dfg/DFGArithNodeFlagsInferencePhase.cpp: Added.
+ (DFG):
+ (JSC::DFG::performArithNodeFlagsInference):
+ * dfg/DFGArithNodeFlagsInferencePhase.h: Added.
+ (DFG):
+ * dfg/DFGCFAPhase.cpp: Added.
+ (DFG):
+ (JSC::DFG::performCFA):
+ * dfg/DFGCFAPhase.h: Added.
+ (DFG):
+ * dfg/DFGCSEPhase.cpp: Added.
+ (DFG):
+ (JSC::DFG::performCSE):
+ * dfg/DFGCSEPhase.h: Added.
+ (DFG):
+ * dfg/DFGDriver.cpp:
+ (JSC::DFG::compile):
+ * dfg/DFGPhase.cpp: Added.
+ (DFG):
+ (JSC::DFG::Phase::beginPhase):
+ (JSC::DFG::Phase::endPhase):
+ * dfg/DFGPhase.h: Added.
+ (DFG):
+ (Phase):
+ (JSC::DFG::Phase::Phase):
+ (JSC::DFG::Phase::~Phase):
+ (JSC::DFG::Phase::globalData):
+ (JSC::DFG::Phase::codeBlock):
+ (JSC::DFG::Phase::profiledBlock):
+ (JSC::DFG::Phase::beginPhase):
+ (JSC::DFG::Phase::endPhase):
+ (JSC::DFG::runPhase):
+ * dfg/DFGPredictionPropagationPhase.cpp: Added.
+ (DFG):
+ (JSC::DFG::performPredictionPropagation):
+ * dfg/DFGPredictionPropagationPhase.h: Added.
+ (DFG):
+ * dfg/DFGPropagator.cpp: Removed.
+ * dfg/DFGPropagator.h: Removed.
+ * dfg/DFGVirtualRegisterAllocationPhase.cpp: Added.
+ (DFG):
+ (JSC::DFG::performVirtualRegisterAllocation):
+ * dfg/DFGVirtualRegisterAllocationPhase.h: Added.
+ (DFG):
+
+2012-02-17 Filip Pizlo <fpizlo@apple.com>
+
+ DFG::Graph should have references to JSGlobalData, the CodeBlock being compiled, and
+ the CodeBlock that was used for profiling
+ https://bugs.webkit.org/show_bug.cgi?id=78954
+
+ Reviewed by Gavin Barraclough.
+
+ * bytecode/CodeBlock.h:
+ (JSC::baselineCodeBlockForOriginAndBaselineCodeBlock):
+ (JSC):
+ * dfg/DFGAbstractState.cpp:
+ (JSC::DFG::AbstractState::AbstractState):
+ (JSC::DFG::AbstractState::execute):
+ * dfg/DFGAbstractState.h:
+ * dfg/DFGAssemblyHelpers.h:
+ (AssemblyHelpers):
+ * dfg/DFGByteCodeParser.cpp:
+ (JSC::DFG::ByteCodeParser::ByteCodeParser):
+ (JSC::DFG::ByteCodeParser::handleCall):
+ (JSC::DFG::parse):
+ * dfg/DFGByteCodeParser.h:
+ (DFG):
+ * dfg/DFGDriver.cpp:
+ (JSC::DFG::compile):
+ * dfg/DFGGraph.cpp:
+ (JSC::DFG::Graph::dump):
+ (JSC::DFG::Graph::predictArgumentTypes):
+ * dfg/DFGGraph.h:
+ (JSC::DFG::Graph::Graph):
+ (Graph):
+ (JSC::DFG::Graph::getJSConstantPrediction):
+ (JSC::DFG::Graph::addShouldSpeculateInteger):
+ (JSC::DFG::Graph::isInt32Constant):
+ (JSC::DFG::Graph::isDoubleConstant):
+ (JSC::DFG::Graph::isNumberConstant):
+ (JSC::DFG::Graph::isBooleanConstant):
+ (JSC::DFG::Graph::isFunctionConstant):
+ (JSC::DFG::Graph::valueOfJSConstant):
+ (JSC::DFG::Graph::valueOfInt32Constant):
+ (JSC::DFG::Graph::valueOfNumberConstant):
+ (JSC::DFG::Graph::valueOfBooleanConstant):
+ (JSC::DFG::Graph::valueOfFunctionConstant):
+ (JSC::DFG::Graph::baselineCodeBlockFor):
+ (JSC::DFG::Graph::valueProfileFor):
+ (JSC::DFG::Graph::addImmediateShouldSpeculateInteger):
+ * dfg/DFGJITCompiler.h:
+ (JSC::DFG::JITCompiler::JITCompiler):
+ (JITCompiler):
+ * dfg/DFGOSRExit.cpp:
+ (JSC::DFG::OSRExit::considerAddingAsFrequentExitSiteSlow):
+ * dfg/DFGPropagator.cpp:
+ (JSC::DFG::Propagator::Propagator):
+ (JSC::DFG::Propagator::isNotNegZero):
+ (JSC::DFG::Propagator::isNotZero):
+ (JSC::DFG::Propagator::propagateNodePredictions):
+ (JSC::DFG::Propagator::doRoundOfDoubleVoting):
+ (JSC::DFG::Propagator::globalCFA):
+ (JSC::DFG::propagate):
+ * dfg/DFGPropagator.h:
+ (DFG):
+ * dfg/DFGSpeculativeJIT.cpp:
+ (JSC::DFG::SpeculativeJIT::computeValueRecoveryFor):
+ (JSC::DFG::SpeculativeJIT::compileAdd):
+ (JSC::DFG::SpeculativeJIT::compileArithSub):
+ * dfg/DFGSpeculativeJIT.h:
+ (JSC::DFG::SpeculativeJIT::isConstant):
+ (JSC::DFG::SpeculativeJIT::isJSConstant):
+ (JSC::DFG::SpeculativeJIT::isInt32Constant):
+ (JSC::DFG::SpeculativeJIT::isDoubleConstant):
+ (JSC::DFG::SpeculativeJIT::isNumberConstant):
+ (JSC::DFG::SpeculativeJIT::isBooleanConstant):
+ (JSC::DFG::SpeculativeJIT::isFunctionConstant):
+ (JSC::DFG::SpeculativeJIT::valueOfInt32Constant):
+ (JSC::DFG::SpeculativeJIT::valueOfNumberConstant):
+ (JSC::DFG::SpeculativeJIT::valueOfJSConstant):
+ (JSC::DFG::SpeculativeJIT::valueOfBooleanConstant):
+ (JSC::DFG::SpeculativeJIT::valueOfFunctionConstant):
+ (JSC::DFG::SpeculativeJIT::speculationCheck):
+ (JSC::DFG::SpeculativeJIT::SpeculativeJIT):
+
+2012-02-17 Ahmad Sharif <asharif.tools@gmail.com>
+
+ There is a warning in memset in glibc that gets triggered through a
+ warndecl when the fill-value of memset is a non-zero constant and the
+ size is zero. This warning is enabled when building with
+ -D_FORTIFY_SOURCE=2. This patch fixes the warning.
+
+ https://bugs.webkit.org/show_bug.cgi?id=78513
+
+ Reviewed by Alexey Proskuryakov
+
+ * wtf/Vector.h:
+
+2012-02-17 Kalev Lember <kalevlember@gmail.com>
+
+ Remove unused parameters from WTF threading API
+ https://bugs.webkit.org/show_bug.cgi?id=78389
+
+ Reviewed by Adam Roben.
+
+ waitForThreadCompletion() had an out param 'void **result' to get the
+ 'void *' returned by ThreadFunction. However, the implementation in
+ ThreadingWin.cpp ignored the out param, not filling it in. This had
+ led to a situation where none of the client code made use of the param
+ and just ignored it.
+
+ To clean this up, the patch changes the signature of ThreadFunction to
+ return void instead of void* and drops the the unused 'void **result'
+ parameter from waitForThreadCompletion. Also, all client code is
+ updated for the API change.
+
+ As mentioned in https://bugs.webkit.org/show_bug.cgi?id=78389 , even
+ though the change only affects internal API, Safari is using it
+ directly and we'll need to keep the old versions around for ABI
+ compatibility. For this, the patch adds compatibility wrappers with
+ the old ABI.
+
+ * JavaScriptCore.order:
+ * JavaScriptCore.vcproj/JavaScriptCore/JavaScriptCore.def:
+ * bytecode/SamplingTool.cpp:
+ (JSC::SamplingThread::threadStartFunc):
+ (JSC::SamplingThread::stop):
+ * bytecode/SamplingTool.h:
+ (SamplingThread):
+ * heap/Heap.cpp:
+ (JSC::Heap::~Heap):
+ (JSC::Heap::blockFreeingThreadStartFunc):
+ * heap/Heap.h:
+ * heap/MarkStack.cpp:
+ (JSC::MarkStackThreadSharedData::markingThreadStartFunc):
+ (JSC::MarkStackThreadSharedData::~MarkStackThreadSharedData):
+ * heap/MarkStack.h:
+ (MarkStackThreadSharedData):
+ * wtf/ParallelJobsGeneric.cpp:
+ (WTF::ParallelEnvironment::ThreadPrivate::workerThread):
+ * wtf/ParallelJobsGeneric.h:
+ (ThreadPrivate):
+ * wtf/ThreadFunctionInvocation.h: Update the signature of
+ ThreadFunction.
+ (WTF):
+ * wtf/Threading.cpp:
+ (WTF::threadEntryPoint): Update for ThreadFunction signature change.
+ (WTF):
+ (WTF::ThreadFunctionWithReturnValueInvocation::ThreadFunctionWithReturnValueInvocation):
+ ABI compatibility function for Safari.
+ (ThreadFunctionWithReturnValueInvocation): Ditto.
+ (WTF::compatEntryPoint): Ditto.
+ (WTF::createThread): Ditto.
+ (WTF::waitForThreadCompletion): Ditto.
+ * wtf/Threading.h: Update the signature of ThreadFunction and
+ waitForThreadCompletion.
+ (WTF):
+ * wtf/ThreadingPthreads.cpp: Implement the new API.
+ (WTF::wtfThreadEntryPoint):
+ (WTF):
+ (WTF::createThreadInternal):
+ (WTF::waitForThreadCompletion):
+ * wtf/ThreadingWin.cpp: Implement the new API.
+ (WTF::wtfThreadEntryPoint):
+ (WTF::waitForThreadCompletion):
+
+2012-02-16 Oliver Hunt <oliver@apple.com>
+
+ Implement Error.stack
+ https://bugs.webkit.org/show_bug.cgi?id=66994
+
+ Reviewed by Gavin Barraclough.
+
+ Implement support for stack traces on exception objects. This is a rewrite
+ of the core portion of the last stack walking logic, but the mechanical work
+ of adding the information to an exception comes from the original work by
+ Juan Carlos Montemayor Elosua.
+
+ * interpreter/Interpreter.cpp:
+ (JSC::getCallerInfo):
+ (JSC):
+ (JSC::getSourceURLFromCallFrame):
+ (JSC::getStackFrameCodeType):
+ (JSC::Interpreter::getStackTrace):
+ (JSC::Interpreter::throwException):
+ (JSC::Interpreter::privateExecute):
+ * interpreter/Interpreter.h:
+ (JSC):
+ (StackFrame):
+ (JSC::StackFrame::toString):
+ (Interpreter):
+ * jsc.cpp:
+ (GlobalObject::finishCreation):
+ (functionJSCStack):
+ * parser/Nodes.h:
+ (JSC::FunctionBodyNode::setInferredName):
+ * parser/Parser.h:
+ (JSC::::parse):
+ * runtime/CommonIdentifiers.h:
+ * runtime/Error.cpp:
+ (JSC::addErrorInfo):
+ * runtime/Error.h:
+ (JSC):
+
+2012-02-17 Mark Hahnenberg <mhahnenberg@apple.com>
+
+ Rename Bump* to Copy*
+ https://bugs.webkit.org/show_bug.cgi?id=78573
+
+ Reviewed by Geoffrey Garen.
+
+ Renamed anything with "Bump" in the name to have "Copied" instead.
+
+ * CMakeLists.txt:
+ * GNUmakefile.list.am:
+ * JavaScriptCore.gypi:
+ * JavaScriptCore.vcproj/JavaScriptCore/JavaScriptCore.vcproj:
+ * JavaScriptCore.xcodeproj/project.pbxproj:
+ * Target.pri:
+ * heap/BumpBlock.h: Removed.
+ * heap/BumpSpace.cpp: Removed.
+ * heap/BumpSpace.h: Removed.
+ * heap/BumpSpaceInlineMethods.h: Removed.
+ * heap/ConservativeRoots.cpp:
+ (JSC::ConservativeRoots::ConservativeRoots):
+ (JSC::ConservativeRoots::genericAddPointer):
+ * heap/ConservativeRoots.h:
+ (ConservativeRoots):
+ * heap/CopiedBlock.h: Added.
+ (JSC):
+ (CopiedBlock):
+ (JSC::CopiedBlock::CopiedBlock):
+ * heap/CopiedSpace.cpp: Added.
+ (JSC):
+ (JSC::CopiedSpace::tryAllocateSlowCase):
+ * heap/CopiedSpace.h: Added.
+ (JSC):
+ (CopiedSpace):
+ (JSC::CopiedSpace::isInCopyPhase):
+ (JSC::CopiedSpace::totalMemoryAllocated):
+ (JSC::CopiedSpace::totalMemoryUtilized):
+ * heap/CopiedSpaceInlineMethods.h: Added.
+ (JSC):
+ (JSC::CopiedSpace::CopiedSpace):
+ (JSC::CopiedSpace::init):
+ (JSC::CopiedSpace::contains):
+ (JSC::CopiedSpace::pin):
+ (JSC::CopiedSpace::startedCopying):
+ (JSC::CopiedSpace::doneCopying):
+ (JSC::CopiedSpace::doneFillingBlock):
+ (JSC::CopiedSpace::recycleBlock):
+ (JSC::CopiedSpace::getFreshBlock):
+ (JSC::CopiedSpace::borrowBlock):
+ (JSC::CopiedSpace::addNewBlock):
+ (JSC::CopiedSpace::allocateNewBlock):
+ (JSC::CopiedSpace::fitsInBlock):
+ (JSC::CopiedSpace::fitsInCurrentBlock):
+ (JSC::CopiedSpace::tryAllocate):
+ (JSC::CopiedSpace::tryAllocateOversize):
+ (JSC::CopiedSpace::allocateFromBlock):
+ (JSC::CopiedSpace::tryReallocate):
+ (JSC::CopiedSpace::tryReallocateOversize):
+ (JSC::CopiedSpace::isOversize):
+ (JSC::CopiedSpace::isPinned):
+ (JSC::CopiedSpace::oversizeBlockFor):
+ (JSC::CopiedSpace::blockFor):
+ * heap/Heap.cpp:
+ * heap/Heap.h:
+ (JSC):
+ (Heap):
+ * heap/MarkStack.cpp:
+ (JSC::MarkStackThreadSharedData::MarkStackThreadSharedData):
+ (JSC::SlotVisitor::drainFromShared):
+ (JSC::SlotVisitor::startCopying):
+ (JSC::SlotVisitor::allocateNewSpace):
+ (JSC::SlotVisitor::doneCopying):
+ * heap/MarkStack.h:
+ (MarkStackThreadSharedData):
+ * heap/SlotVisitor.h:
+ (SlotVisitor):
+ * runtime/JSArray.cpp:
+ * runtime/JSObject.cpp:
+
+2012-02-16 Yuqiang Xian <yuqiang.xian@intel.com>
+
+ Add JSC code profiling support on Linux x86
+ https://bugs.webkit.org/show_bug.cgi?id=78871
+
+ Reviewed by Gavin Barraclough.
+
+ We don't unwind the stack for now as we cannot guarantee all the
+ libraries are compiled without -fomit-frame-pointer.
+
+ * tools/CodeProfile.cpp:
+ (JSC::CodeProfile::sample):
+ * tools/CodeProfiling.cpp:
+ (JSC):
+ (JSC::profilingTimer):
+ (JSC::CodeProfiling::begin):
+ (JSC::CodeProfiling::end):
+
+2012-02-16 Csaba Osztrogonác <ossy@webkit.org>
+
+ Unreviewed. Rolling out r107980, because it broke 32 bit platforms.
+
+ * JavaScriptCore.vcproj/JavaScriptCore/JavaScriptCore.def:
+ * interpreter/Interpreter.cpp:
+ (JSC::Interpreter::throwException):
+ (JSC::Interpreter::privateExecute):
+ * interpreter/Interpreter.h:
+ (JSC):
+ (Interpreter):
+ * jsc.cpp:
+ (GlobalObject::finishCreation):
+ * parser/Nodes.h:
+ (JSC::FunctionBodyNode::setInferredName):
+ * parser/Parser.h:
+ (JSC::::parse):
+ * runtime/CommonIdentifiers.h:
+ * runtime/Error.cpp:
+ (JSC::addErrorInfo):
+ * runtime/Error.h:
+ (JSC):
+
+2012-02-16 Filip Pizlo <fpizlo@apple.com>
+
+ ENABLE_INTERPRETER should be ENABLE_CLASSIC_INTERPRETER
+ https://bugs.webkit.org/show_bug.cgi?id=78791
+
+ Rubber stamped by Oliver Hunt.
+
+ Just a renaming, nothing more. Also renamed COMPUTED_GOTO_INTERPRETER to
+ COMPUTED_GOTO_CLASSIC_INTERPRETER.
+
+ * bytecode/CodeBlock.cpp:
+ (JSC::CodeBlock::dump):
+ (JSC::CodeBlock::stronglyVisitStrongReferences):
+ (JSC):
+ (JSC::CodeBlock::shrinkToFit):
+ * bytecode/CodeBlock.h:
+ (CodeBlock):
+ * bytecode/Instruction.h:
+ (JSC::Instruction::Instruction):
+ * bytecode/Opcode.h:
+ (JSC::padOpcodeName):
+ * bytecompiler/BytecodeGenerator.cpp:
+ (JSC::BytecodeGenerator::emitResolve):
+ (JSC::BytecodeGenerator::emitResolveWithBase):
+ (JSC::BytecodeGenerator::emitGetById):
+ (JSC::BytecodeGenerator::emitPutById):
+ (JSC::BytecodeGenerator::emitDirectPutById):
+ * interpreter/AbstractPC.cpp:
+ (JSC::AbstractPC::AbstractPC):
+ * interpreter/AbstractPC.h:
+ (AbstractPC):
+ * interpreter/CallFrame.h:
+ (ExecState):
+ * interpreter/Interpreter.cpp:
+ (JSC):
+ (JSC::Interpreter::initialize):
+ (JSC::Interpreter::isOpcode):
+ (JSC::Interpreter::unwindCallFrame):
+ (JSC::Interpreter::execute):
+ (JSC::Interpreter::privateExecute):
+ (JSC::Interpreter::retrieveLastCaller):
+ * interpreter/Interpreter.h:
+ (JSC::Interpreter::getOpcode):
+ (JSC::Interpreter::getOpcodeID):
+ (Interpreter):
+ * jit/ExecutableAllocatorFixedVMPool.cpp:
+ (JSC::FixedVMPoolExecutableAllocator::FixedVMPoolExecutableAllocator):
+ * runtime/Executable.cpp:
+ (JSC::EvalExecutable::compileInternal):
+ (JSC::ProgramExecutable::compileInternal):
+ (JSC::FunctionExecutable::compileForCallInternal):
+ (JSC::FunctionExecutable::compileForConstructInternal):
+ * runtime/Executable.h:
+ (NativeExecutable):
+ * runtime/JSGlobalData.cpp:
+ (JSC::JSGlobalData::JSGlobalData):
+ (JSC::JSGlobalData::getHostFunction):
+ * runtime/JSGlobalData.h:
+ (JSGlobalData):
+ * wtf/OSAllocatorPosix.cpp:
+ (WTF::OSAllocator::reserveAndCommit):
+ * wtf/Platform.h:
+
+2012-02-15 Geoffrey Garen <ggaren@apple.com>
+
+ Made Weak<T> single-owner, adding PassWeak<T>
+ https://bugs.webkit.org/show_bug.cgi?id=78740
+
+ Reviewed by Sam Weinig.
+
+ This works basically the same way as OwnPtr<T> and PassOwnPtr<T>.
+
+ This clarifies the semantics of finalizers: It's ambiguous and probably
+ a bug to copy a finalizer (i.e., it's a bug to run a C++ destructor
+ twice), so I've made Weak<T> non-copyable. Anywhere we used to copy a
+ Weak<T>, we now use PassWeak<T>.
+
+ This also makes Weak<T> HashMaps more efficient.
+
+ * API/JSClassRef.cpp:
+ (OpaqueJSClass::prototype): Use PassWeak<T> instead of set(), since
+ set() is gone now.
+
+ * JavaScriptCore.xcodeproj/project.pbxproj: Export!
+
+ * heap/PassWeak.h: Added.
+ (JSC):
+ (PassWeak):
+ (JSC::PassWeak::PassWeak):
+ (JSC::PassWeak::~PassWeak):
+ (JSC::PassWeak::get):
+ (JSC::::leakHandle):
+ (JSC::adoptWeak):
+ (JSC::operator==):
+ (JSC::operator!=): This is the Weak<T> version of PassOwnPtr<T>.
+
+ * heap/Weak.h:
+ (Weak):
+ (JSC::Weak::Weak):
+ (JSC::Weak::release):
+ (JSC::Weak::hashTableDeletedValue):
+ (JSC::=):
+ (JSC): Changed to be non-copyable, removing a lot of copying-related
+ APIs. Added hash traits so hash maps still work.
+
+ * jit/JITStubs.cpp:
+ (JSC::JITThunks::hostFunctionStub):
+ * runtime/RegExpCache.cpp:
+ (JSC::RegExpCache::lookupOrCreate): Use PassWeak<T>, as required by
+ our new hash map API.
+
+2012-02-16 Mark Hahnenberg <mhahnenberg@apple.com>
+
+ Fix the broken viewport tests
+ https://bugs.webkit.org/show_bug.cgi?id=78774
+
+ Reviewed by Kenneth Rohde Christiansen.
+
+ * JavaScriptCore.vcproj/JavaScriptCore/JavaScriptCore.def:
+ * wtf/text/WTFString.cpp:
+ (WTF):
+ (WTF::toDoubleType): Template-ized to allow other functions to specify whether they
+ want to allow trailing junk or not when calling strtod.
+ (WTF::charactersToDouble):
+ (WTF::charactersToFloat):
+ (WTF::charactersToFloatIgnoringJunk): Created new version of charactersToFloat that allows
+ trailing junk.
+ * wtf/text/WTFString.h:
+ (WTF):
+
+2012-02-16 Oliver Hunt <oliver@apple.com>
+
+ Implement Error.stack
+ https://bugs.webkit.org/show_bug.cgi?id=66994
+
+ Reviewed by Gavin Barraclough.
+
+ Implement support for stack traces on exception objects. This is a rewrite
+ of the core portion of the last stack walking logic, but the mechanical work
+ of adding the information to an exception comes from the original work by
+ Juan Carlos Montemayor Elosua.
+
+ * interpreter/Interpreter.cpp:
+ (JSC::getCallerInfo):
+ (JSC):
+ (JSC::getSourceURLFromCallFrame):
+ (JSC::getStackFrameCodeType):
+ (JSC::Interpreter::getStackTrace):
+ (JSC::Interpreter::throwException):
+ (JSC::Interpreter::privateExecute):
+ * interpreter/Interpreter.h:
+ (JSC):
+ (StackFrame):
+ (JSC::StackFrame::toString):
+ (Interpreter):
+ * jsc.cpp:
+ (GlobalObject::finishCreation):
+ (functionJSCStack):
+ * parser/Nodes.h:
+ (JSC::FunctionBodyNode::setInferredName):
+ * parser/Parser.h:
+ (JSC::::parse):
+ * runtime/CommonIdentifiers.h:
+ * runtime/Error.cpp:
+ (JSC::addErrorInfo):
+ * runtime/Error.h:
+ (JSC):
+
+2012-02-15 Gavin Barraclough <barraclough@apple.com>
+
+ Numerous trivial bugs in Object.defineProperty
+ https://bugs.webkit.org/show_bug.cgi?id=78777
+
+ Reviewed by Sam Weinig.
+
+ There are a handful of really trivial bugs, related to Object.defineProperty:
+ * Redefining an accessor with different attributes changes the attributes, but not the get/set functions!
+ * Calling an undefined setter should only throw in strict mode.
+ * When redefining an accessor to a data decriptor, if writable is not specified we should default to false.
+ * Any attempt to redefine a non-configurable property of an array as configurable should be rejected.
+ * Object.defineProperties should call toObject on 'Properties' argument, rather than throwing if it is not an object.
+ * If preventExtensions has been called on an array, subsequent assignment beyond array bounds should fail.
+ * 'isFrozen' shouldn't be checking the ReadOnly bit for accessor descriptors (we presently always keep this bit as 'false').
+ * Should be able to redefine an non-writable, non-configurable property, with the same value and attributes.
+ * Should be able to define an non-configurable accessor.
+ These are mostly all one-line changes, e.g. inverted boolean checks, masking against wrong attribute.
+
+ * runtime/JSArray.cpp:
+ (JSC::SparseArrayValueMap::put):
+ - Added ASSERT.
+ - Calling an undefined setter should only throw in strict mode.
+ (JSC::JSArray::putDescriptor):
+ - Should be able to define an non-configurable accessor.
+ (JSC::JSArray::defineOwnNumericProperty):
+ - Any attempt to redefine a non-configurable property of an array as configurable should be rejected.
+ (JSC::JSArray::putByIndexBeyondVectorLength):
+ - If preventExtensions has been called on an array, subsequent assignment beyond array bounds should fail.
+ * runtime/JSArray.h:
+ (JSArray):
+ - made enterDictionaryMode public, called from JSObject.
+ * runtime/JSObject.cpp:
+ (JSC::JSObject::put):
+ - Calling an undefined setter should only throw in strict mode.
+ (JSC::JSObject::preventExtensions):
+ - Put array objects into dictionary mode to handle this!
+ (JSC::JSObject::defineOwnProperty):
+ - Should be able to redefine an non-writable, non-configurable property, with the same value and attributes.
+ - Redefining an accessor with different attributes changes the attributes, but not the get/set functions!
+ * runtime/ObjectConstructor.cpp:
+ (JSC::objectConstructorDefineProperties):
+ - Object.defineProperties should call toObject on 'Properties' argument, rather than throwing if it is not an object.
+ * runtime/PropertyDescriptor.cpp:
+ (JSC::PropertyDescriptor::attributesWithOverride):
+ - When redefining an accessor to a data decriptor, if writable is not specified we should default to false.
+ (JSC::PropertyDescriptor::attributesOverridingCurrent):
+ - When redefining an accessor to a data decriptor, if writable is not specified we should default to false.
+ * runtime/Structure.cpp:
+ (JSC::Structure::freezeTransition):
+ - 'freezeTransition' shouldn't be setting the ReadOnly bit for accessor descriptors (we presently always keep this bit as 'false').
+ (JSC::Structure::isFrozen):
+ - 'isFrozen' shouldn't be checking the ReadOnly bit for accessor descriptors (we presently always keep this bit as 'false').
+
+2012-02-13 Filip Pizlo <fpizlo@apple.com>
+
+ DFG should not check the types of arguments that are dead
+ https://bugs.webkit.org/show_bug.cgi?id=78518
+
+ Reviewed by Geoff Garen.
+
+ The argument checks are now elided if the corresponding SetArgument is dead,
+ and the abstract value of the argument is set to bottom (None, []). This is
+ performance neutral on the benchmarks we currently track.
+
+ * dfg/DFGAbstractState.cpp:
+ (JSC::DFG::AbstractState::initialize):
+ * dfg/DFGSpeculativeJIT.cpp:
+ (JSC::DFG::SpeculativeJIT::checkArgumentTypes):
+
+2012-02-15 Oliver Hunt <oliver@apple.com>
+
+ Ensure that the DFG JIT always plants a CodeOrigin when making calls
+ https://bugs.webkit.org/show_bug.cgi?id=78763
+
+ Reviewed by Gavin Barraclough.
+
+ Make all calls plant a CodeOrigin prior to the actual
+ call. Also clobbers the Interpreter with logic to ensure
+ that the interpreter always plants a bytecode offset.
+
+ * dfg/DFGJITCompiler.cpp:
+ (JSC::DFG::JITCompiler::link):
+ (JSC::DFG::JITCompiler::compileFunction):
+ * dfg/DFGJITCompiler.h:
+ (CallBeginToken):
+ (JSC::DFG::JITCompiler::beginJSCall):
+ (JSC::DFG::JITCompiler::beginCall):
+ * dfg/DFGRepatch.cpp:
+ (JSC::DFG::tryBuildGetByIDList):
+ * dfg/DFGSpeculativeJIT.h:
+ (JSC::DFG::SpeculativeJIT::appendCallWithExceptionCheck):
+ * dfg/DFGSpeculativeJIT32_64.cpp:
+ (JSC::DFG::SpeculativeJIT::emitCall):
+ * dfg/DFGSpeculativeJIT64.cpp:
+ (JSC::DFG::SpeculativeJIT::emitCall):
+ * interpreter/AbstractPC.cpp:
+ (JSC::AbstractPC::AbstractPC):
+ * interpreter/CallFrame.cpp:
+ (JSC::CallFrame::trueCallFrame):
+ * interpreter/CallFrame.h:
+ (JSC::ExecState::bytecodeOffsetForNonDFGCode):
+ (ExecState):
+ (JSC::ExecState::setBytecodeOffsetForNonDFGCode):
+ (JSC::ExecState::codeOriginIndexForDFG):
+
+2012-02-14 Oliver Hunt <oliver@apple.com>
+
+ Fix Interpreter.
+
+ * runtime/Executable.cpp:
+ (JSC):
+ * runtime/Executable.h:
+ (ExecutableBase):
+
+2012-02-14 Matt Lilek <mrl@apple.com>
+
+ Don't ENABLE_DASHBOARD_SUPPORT unconditionally on all Mac platforms
+ https://bugs.webkit.org/show_bug.cgi?id=78629
+
+ Reviewed by David Kilzer.
+
+ * Configurations/FeatureDefines.xcconfig:
+
+2012-02-14 Filip Pizlo <fpizlo@apple.com>
+
+ Unreviewed, build fix for non-DFG platforms.
+
+ * assembler/MacroAssembler.h:
+ (MacroAssembler):
+
+2012-02-14 Filip Pizlo <fpizlo@apple.com>
+
+ Unreviewed, fix build and configuration goof.
+
+ * assembler/MacroAssembler.h:
+ (JSC::MacroAssembler::invert):
+ * dfg/DFGCommon.h:
+
+2012-02-13 Filip Pizlo <fpizlo@apple.com>
+
+ DFG should be able to emit code on control flow edges
+ https://bugs.webkit.org/show_bug.cgi?id=78515
+
+ Reviewed by Gavin Barraclough.
+
+ This gets us a few steps closer to being able to perform global register allocation,
+ by allowing us to have landing pads on control flow edges. This will let us reshuffle
+ registers if it happens to be necessary due to different reg alloc decisions in
+ differen blocks.
+
+ This also introduces the notion of a landing pad for OSR entry, which will allow us
+ to emit code that places data into registers when we're entering into the DFG from
+ the old JIT.
+
+ Finally, this patch introduces a verification mode that checks that the landing pads
+ are actually emitted and do actually work as advertised. When verification is disabled,
+ this has no effect on behavior.
+
+ * assembler/MacroAssembler.h:
+ (MacroAssembler):
+ (JSC::MacroAssembler::invert):
+ (JSC::MacroAssembler::isInvertible):
+ * dfg/DFGCommon.h:
+ * dfg/DFGJITCompiler.cpp:
+ (JSC::DFG::JITCompiler::compile):
+ (JSC::DFG::JITCompiler::compileFunction):
+ * dfg/DFGSpeculativeJIT.cpp:
+ (JSC::DFG::SpeculativeJIT::compilePeepHoleDoubleBranch):
+ (JSC::DFG::SpeculativeJIT::compilePeepHoleObjectEquality):
+ (JSC::DFG::SpeculativeJIT::compilePeepHoleIntegerBranch):
+ (JSC::DFG::SpeculativeJIT::compile):
+ (JSC::DFG::SpeculativeJIT::createOSREntries):
+ (DFG):
+ (JSC::DFG::SpeculativeJIT::linkOSREntries):
+ (JSC::DFG::SpeculativeJIT::compileStrictEqForConstant):
+ * dfg/DFGSpeculativeJIT.h:
+ (SpeculativeJIT):
+ (JSC::DFG::SpeculativeJIT::branchDouble):
+ (JSC::DFG::SpeculativeJIT::branchDoubleNonZero):
+ (JSC::DFG::SpeculativeJIT::branch32):
+ (JSC::DFG::SpeculativeJIT::branchTest32):
+ (JSC::DFG::SpeculativeJIT::branchPtr):
+ (JSC::DFG::SpeculativeJIT::branchTestPtr):
+ (JSC::DFG::SpeculativeJIT::branchTest8):
+ (JSC::DFG::SpeculativeJIT::jump):
+ (JSC::DFG::SpeculativeJIT::haveEdgeCodeToEmit):
+ (JSC::DFG::SpeculativeJIT::emitEdgeCode):
+ * dfg/DFGSpeculativeJIT32_64.cpp:
+ (JSC::DFG::SpeculativeJIT::nonSpeculativePeepholeBranchNull):
+ (JSC::DFG::SpeculativeJIT::nonSpeculativePeepholeBranch):
+ (JSC::DFG::SpeculativeJIT::nonSpeculativePeepholeStrictEq):
+ (JSC::DFG::SpeculativeJIT::emitObjectOrOtherBranch):
+ (JSC::DFG::SpeculativeJIT::emitBranch):
+ (JSC::DFG::SpeculativeJIT::compile):
+ * dfg/DFGSpeculativeJIT64.cpp:
+ (JSC::DFG::SpeculativeJIT::nonSpeculativePeepholeBranchNull):
+ (JSC::DFG::SpeculativeJIT::nonSpeculativePeepholeBranch):
+ (JSC::DFG::SpeculativeJIT::nonSpeculativePeepholeStrictEq):
+ (JSC::DFG::SpeculativeJIT::emitObjectOrOtherBranch):
+ (JSC::DFG::SpeculativeJIT::emitBranch):
+ (JSC::DFG::SpeculativeJIT::compile):
+
+2012-02-14 Filip Pizlo <fpizlo@apple.com>
+
+ Assertion failure under JSC::DFG::AbstractState::execute loading economist.com
+ https://bugs.webkit.org/show_bug.cgi?id=78153
+ <rdar://problem/10861712> <rdar://problem/10861947>
+
+ Reviewed by Oliver Hunt.
+
+ * dfg/DFGAbstractState.cpp:
+ (JSC::DFG::AbstractState::execute):
+ * dfg/DFGSpeculativeJIT.cpp:
+ (JSC::DFG::SpeculativeJIT::compileAdd):
+
+2012-02-14 Eric Seidel <eric@webkit.org>
+
+ Upstream Android's additions to Platform.h
+ https://bugs.webkit.org/show_bug.cgi?id=78536
+
+ Reviewed by Adam Barth.
+
+ * wtf/Platform.h:
+
+2012-02-12 Mark Hahnenberg <mhahnenberg@apple.com>
+
+ Replace old strtod with new strtod
+ https://bugs.webkit.org/show_bug.cgi?id=68044
+
+ Reviewed by Geoffrey Garen.
+
+ * parser/Lexer.cpp: Added template argument. This version allows junk after numbers.
+ (JSC::::lex):
+ * runtime/JSGlobalObjectFunctions.cpp: Ditto.
+ (JSC::parseInt):
+ (JSC::jsStrDecimalLiteral):
+ * runtime/LiteralParser.cpp: Ditto.
+ (JSC::::Lexer::lexNumber):
+ * wtf/dtoa.cpp: Replaced old strtod with a new version that uses the new StringToDoubleConverter.
+ It takes a template argument to allow clients to determine statically whether it should allow
+ junk after the numbers or not.
+ (WTF):
+ (WTF::strtod):
+ * wtf/dtoa.h:
+ (WTF):
+ * wtf/text/WTFString.cpp: Added template argument. This version does not allow junk after numbers.
+ (WTF::toDoubleType):
+
+2012-02-13 Mark Hahnenberg <mhahnenberg@apple.com>
+
+ More windows build fixing
+
+ * JavaScriptCore.vcproj/JavaScriptCore/JavaScriptCore.def:
+
+2012-02-13 Oliver Hunt <oliver@apple.com>
+
+ Executing out of bounds in JSC::Yarr::YarrCodeBlock::execute / JSC::RegExp::match
+ https://bugs.webkit.org/show_bug.cgi?id=76315
+
+ Reviewed by Gavin Barraclough.
+
+ Perform a 3 byte compare using two comparisons, rather than trying to perform the
+ operation with a four byte load.
+
+ * yarr/YarrJIT.cpp:
+ (JSC::Yarr::YarrGenerator::generatePatternCharacterOnce):
+
+2012-02-13 Mark Hahnenberg <mhahnenberg@apple.com>
+
+ Windows build fix
+
+ * JavaScriptCore.vcproj/JavaScriptCore/JavaScriptCore.def:
+
+2012-02-12 Mark Hahnenberg <mhahnenberg@apple.com>
+
+ Replace old strtod with new strtod
+ https://bugs.webkit.org/show_bug.cgi?id=68044
+
+ Reviewed by Geoffrey Garen.
+
+ * parser/Lexer.cpp: Added template argument. This version allows junk after numbers.
+ (JSC::::lex):
+ * runtime/JSGlobalObjectFunctions.cpp: Ditto.
+ (JSC::parseInt):
+ (JSC::jsStrDecimalLiteral):
+ * runtime/LiteralParser.cpp: Ditto.
+ (JSC::::Lexer::lexNumber):
+ * wtf/dtoa.cpp: Replaced old strtod with a new version that uses the new StringToDoubleConverter.
+ It takes a template argument to allow clients to determine statically whether it should allow
+ junk after the numbers or not.
+ (WTF):
+ (WTF::strtod):
+ * wtf/dtoa.h:
+ (WTF):
+ * wtf/text/WTFString.cpp: Added template argument. This version does not allow junk after numbers.
+ (WTF::toDoubleType):
+
+2012-02-13 Sam Weinig <sam@webkit.org>
+
+ Move JSC related assertions out of Assertions.h and into their own header
+ https://bugs.webkit.org/show_bug.cgi?id=78508
+
+ Reviewed by Gavin Barraclough.
+
+ * GNUmakefile.list.am:
+ * JavaScriptCore.gypi:
+ * JavaScriptCore.vcproj/JavaScriptCore/JavaScriptCore.vcproj:
+ * JavaScriptCore.xcodeproj/project.pbxproj:
+ Add GCAssertions.h
+
+ * heap/GCAssertions.h: Added.
+ Move assertions here.
+
+ * runtime/WriteBarrier.h:
+ Add #include of GCAssertions.h
+
+ * wtf/Assertions.h:
+ Remove JSC related assertions.
+
+ * wtf/Compiler.h:
+ Add compiler check for __has_trivial_destructor.
+
+2012-02-13 Chao-ying Fu <fu@mips.com>
+
+ Update MIPS patchOffsetGetByIdSlowCaseCall
+ https://bugs.webkit.org/show_bug.cgi?id=78392
+
+ Reviewed by Gavin Barraclough.
+
+ * jit/JIT.h:
+ (JIT):
+
+2012-02-13 Patrick Gansterer <paroga@webkit.org>
+
+ Remove obsolete #if from ThreadSpecific.h
+ https://bugs.webkit.org/show_bug.cgi?id=78485
+
+ Reviewed by Adam Roben.
+
+ Since alle platform use either pthread or Win32 for threading,
+ we can remove all PLATFORM() preprocessor statements.
+
+ * wtf/ThreadSpecific.h:
+ (ThreadSpecific):
+
+2012-02-13 Jessie Berlin <jberlin@apple.com>
+
+ Fix the Windows build.
+
+ * JavaScriptCore.vcproj/JavaScriptCore/JavaScriptCore.def:
+
+2012-02-13 Sam Weinig <sam@webkit.org>
+
+ Use C11's _Static_assert for COMPILE_ASSERT if it is available
+ https://bugs.webkit.org/show_bug.cgi?id=78506
+
+ Rubber-stamped by Antti Koivisto.
+
+ Use C11's _Static_assert for COMPILE_ASSERT if it is available to give slightly
+ better error messages.
+
+ * wtf/Assertions.h:
+ Use _Static_assert if it is available.
+
+ * wtf/Compiler.h:
+ Add COMPILER_SUPPORTS support for _Static_assert when using the LLVM Compiler.
+
+2012-02-13 Mario Sanchez Prada <msanchez@igalia.com>
+
+ [GTK] Add GSList to the list of GObject types in GOwnPtr
+ https://bugs.webkit.org/show_bug.cgi?id=78487
+
+ Reviewed by Philippe Normand.
+
+ Handle the GSList type in GOwnPtr, by calling g_slist_free in the
+ implementation of the freeOwnedGPtr template function.
+
+ * wtf/gobject/GOwnPtr.cpp:
+ (WTF::GSList):
+ (WTF):
+ * wtf/gobject/GOwnPtr.h:
+ (WTF):
+ * wtf/gobject/GTypedefs.h:
+
+2012-02-06 Raphael Kubo da Costa <kubo@profusion.mobi>
+
+ [EFL] Drop support for the Curl network backend.
+ https://bugs.webkit.org/show_bug.cgi?id=77874
+
+ Reviewed by Eric Seidel.
+
+ Nobody seems to be maintaining the Curl backend in WebCore, the
+ EFL port developers all seem to be using the Soup backend and the
+ port itself has many features which are only implemented for the
+ latter.
+
+ * wtf/PlatformEfl.cmake: Always build the gobject-dependent source
+ files.
+
+2012-02-13 Patrick Gansterer <paroga@webkit.org>
+
+ Unreviewed. Build fix for !ENABLE(JIT) after r107485.
+
+ * bytecode/PolymorphicPutByIdList.cpp:
+
+2012-02-13 Gavin Barraclough <barraclough@apple.com>
+
+ https://bugs.webkit.org/show_bug.cgi?id=78434
+ Unreviewed - temporarily reverting r107498 will I fix a couple of testcases.
+
+ * parser/Parser.cpp:
+ (JSC::::parseFunctionInfo):
+ * runtime/ClassInfo.h:
+ (MethodTable):
+ (JSC):
+ * runtime/JSCell.cpp:
+ (JSC):
+ * runtime/JSCell.h:
+ (JSCell):
+ * runtime/JSGlobalObject.cpp:
+ (JSC::JSGlobalObject::reset):
+ * runtime/JSGlobalObjectFunctions.cpp:
+ (JSC):
+ * runtime/JSGlobalObjectFunctions.h:
+ (JSC):
+ * runtime/JSObject.cpp:
+ (JSC::JSObject::put):
+ (JSC):
+ (JSC::JSObject::putDirectAccessor):
+ (JSC::JSObject::defineOwnProperty):
+ * runtime/JSObject.h:
+ (JSC::JSObject::inlineGetOwnPropertySlot):
+ (JSC::JSValue::get):
+ * runtime/JSString.cpp:
+ (JSC::JSString::getOwnPropertySlot):
+ * runtime/JSValue.h:
+ (JSValue):
+ * runtime/ObjectConstructor.cpp:
+ (JSC::objectConstructorGetPrototypeOf):
+ * runtime/Structure.cpp:
+ (JSC::Structure::Structure):
+ * runtime/Structure.h:
+ (JSC::Structure::setHasGetterSetterProperties):
+ (Structure):
+
+2012-02-12 Ashod Nakashian <ashodnakashian@yahoo.com>
+
+ KeywordLookupGenerator.py script fails in some cases
+ https://bugs.webkit.org/show_bug.cgi?id=77886
+
+ Reviewed by Benjamin Poulain.
+
+ * parser/Keywords.table: Converted to LF-only.
+
+2012-02-12 Shinya Kawanaka <shinyak@google.com>
+
+ Introduce ShadowRootList.
+ https://bugs.webkit.org/show_bug.cgi?id=78069
+
+ Reviewed by Hajime Morita.
+
+ DoublyLinkedList should have tail() method to take the last element.
+
+ * wtf/DoublyLinkedList.h:
+ (DoublyLinkedList):
+ (WTF::::tail):
+ (WTF):
+
+2012-02-12 Raphael Kubo da Costa <kubo@profusion.mobi>
+
+ [CMake] Move source files in WTF_HEADERS to WTF_SOURCES.
+ https://bugs.webkit.org/show_bug.cgi?id=78436
+
+ Reviewed by Daniel Bates.
+
+ * wtf/CMakeLists.txt: Move .cpp files from WTF_HEADERS to WTF_SOURCES,
+ and correctly sort the files which start with 'M'.
+
+2012-02-12 Sam Weinig <sam@webkit.org>
+
+ Move the NumberOfCores.h/cpp files into the WTF group of JavaScriptCore.xcodeproj.
+
+ Rubber-stamped by Anders Carlsson.
+
+ * JavaScriptCore.xcodeproj/project.pbxproj:
+
+2012-02-12 Raphael Kubo da Costa <kubo@profusion.mobi>
+
+ [CMake] Remove unused or empty variable definitions.
+ https://bugs.webkit.org/show_bug.cgi?id=78437
+
+ Reviewed by Daniel Bates.
+
+ * CMakeLists.txt: Remove unused JavaScriptCore_HEADERS definition.
+ * shell/CMakeLists.txt: Remove unused JSC_HEADERS definition.
+ * wtf/CMakeLists.txt: Remove empty WTF_LIBRARIES definition, it will
+ be defined later by Platform*.cmake via LIST(APPEND WTF_LIBRARIES).
+
+2012-02-12 Filip Pizlo <fpizlo@apple.com>
+
+ DFG::SpeculativeJIT calls fprintf() instead of dataLog in terminateSpeculativeExecution()
+ https://bugs.webkit.org/show_bug.cgi?id=78431
+
+ Reviewed by Gavin Barraclough.
+
+ * dfg/DFGSpeculativeJIT.h:
+ (JSC::DFG::SpeculativeJIT::terminateSpeculativeExecution):
+
+2012-02-11 Benjamin Poulain <benjamin@webkit.org>
+
+ Add back WTFURL to WebKit
+ https://bugs.webkit.org/show_bug.cgi?id=77291
+
+ Reviewed by Adam Barth.
+
+ WTFURL was removed from WebKit in r86787.
+
+ This patch adds the code back to WTF with the following changes:
+ -Guard the feature with USE(WTFURL).
+ -Change the typename CHAR to CharacterType to follow recent WebKit conventions.
+ -Fix some coding style to make check-webkit-style happy.
+
+ * JavaScriptCore.xcodeproj/project.pbxproj:
+ * wtf/Platform.h:
+ * wtf/url/api/ParsedURL.cpp: Added.
+ (WTF):
+ (WTF::ParsedURL::ParsedURL):
+ (WTF::ParsedURL::scheme):
+ (WTF::ParsedURL::username):
+ (WTF::ParsedURL::password):
+ (WTF::ParsedURL::host):
+ (WTF::ParsedURL::port):
+ (WTF::ParsedURL::path):
+ (WTF::ParsedURL::query):
+ (WTF::ParsedURL::fragment):
+ (WTF::ParsedURL::segment):
+ * wtf/url/api/ParsedURL.h: Added.
+ (WTF):
+ (ParsedURL):
+ (WTF::ParsedURL::spec):
+ * wtf/url/api/URLString.h: Added.
+ (WTF):
+ (URLString):
+ (WTF::URLString::URLString):
+ (WTF::URLString::string):
+ * wtf/url/src/RawURLBuffer.h: Added.
+ (WTF):
+ (RawURLBuffer):
+ (WTF::RawURLBuffer::RawURLBuffer):
+ (WTF::RawURLBuffer::~RawURLBuffer):
+ (WTF::RawURLBuffer::resize):
+ * wtf/url/src/URLBuffer.h: Added.
+ (WTF):
+ (URLBuffer):
+ (WTF::URLBuffer::URLBuffer):
+ (WTF::URLBuffer::~URLBuffer):
+ (WTF::URLBuffer::at):
+ (WTF::URLBuffer::set):
+ (WTF::URLBuffer::capacity):
+ (WTF::URLBuffer::length):
+ (WTF::URLBuffer::data):
+ (WTF::URLBuffer::setLength):
+ (WTF::URLBuffer::append):
+ (WTF::URLBuffer::grow):
+ * wtf/url/src/URLCharacterTypes.cpp: Added.
+ (WTF):
+ ():
+ * wtf/url/src/URLCharacterTypes.h: Added.
+ (WTF):
+ (URLCharacterTypes):
+ (WTF::URLCharacterTypes::isQueryChar):
+ (WTF::URLCharacterTypes::isIPv4Char):
+ (WTF::URLCharacterTypes::isHexChar):
+ ():
+ (WTF::URLCharacterTypes::isCharOfType):
+ * wtf/url/src/URLComponent.h: Added.
+ (WTF):
+ (URLComponent):
+ (WTF::URLComponent::URLComponent):
+ (WTF::URLComponent::fromRange):
+ (WTF::URLComponent::isValid):
+ (WTF::URLComponent::isNonEmpty):
+ (WTF::URLComponent::isEmptyOrInvalid):
+ (WTF::URLComponent::reset):
+ (WTF::URLComponent::operator==):
+ (WTF::URLComponent::begin):
+ (WTF::URLComponent::setBegin):
+ (WTF::URLComponent::length):
+ (WTF::URLComponent::setLength):
+ (WTF::URLComponent::end):
+ * wtf/url/src/URLEscape.cpp: Added.
+ (WTF):
+ ():
+ * wtf/url/src/URLEscape.h: Added.
+ (WTF):
+ (WTF::appendURLEscapedCharacter):
+ * wtf/url/src/URLParser.h: Added.
+ (WTF):
+ (URLParser):
+ ():
+ (WTF::URLParser::isPossibleAuthorityTerminator):
+ (WTF::URLParser::parseAuthority):
+ (WTF::URLParser::extractScheme):
+ (WTF::URLParser::parseAfterScheme):
+ (WTF::URLParser::parseStandardURL):
+ (WTF::URLParser::parsePath):
+ (WTF::URLParser::parsePathURL):
+ (WTF::URLParser::parseMailtoURL):
+ (WTF::URLParser::parsePort):
+ (WTF::URLParser::extractFileName):
+ (WTF::URLParser::extractQueryKeyValue):
+ (WTF::URLParser::isURLSlash):
+ (WTF::URLParser::shouldTrimFromURL):
+ (WTF::URLParser::trimURL):
+ (WTF::URLParser::consecutiveSlashes):
+ (WTF::URLParser::isPortDigit):
+ (WTF::URLParser::nextAuthorityTerminator):
+ (WTF::URLParser::parseUserInfo):
+ (WTF::URLParser::parseServerInfo):
+ * wtf/url/src/URLQueryCanonicalizer.h: Added.
+ (WTF):
+ (URLQueryCanonicalizer):
+ (WTF::URLQueryCanonicalizer::canonicalize):
+ (WTF::URLQueryCanonicalizer::isAllASCII):
+ (WTF::URLQueryCanonicalizer::isRaw8Bit):
+ (WTF::URLQueryCanonicalizer::appendRaw8BitQueryString):
+ (WTF::URLQueryCanonicalizer::convertToQueryEncoding):
+ * wtf/url/src/URLSegments.cpp: Added.
+ (WTF):
+ (WTF::URLSegments::length):
+ (WTF::URLSegments::charactersBefore):
+ * wtf/url/src/URLSegments.h: Added.
+ (WTF):
+ (URLSegments):
+ ():
+ (WTF::URLSegments::URLSegments):
+
+2012-02-11 Filip Pizlo <fpizlo@apple.com>
+
+ Old JIT put_by_id profiling counts every put_by_id_transition as taking slow path
+ https://bugs.webkit.org/show_bug.cgi?id=78430
+ <rdar://problem/10849469> <rdar://problem/10849684>
+
+ Reviewed by Gavin Barraclough.
+
+ The old JIT's put_by_id transition caching involves repatching the slow call to
+ a generated stub. That means that the call is counted as "slow case". So, this
+ patch inserts code to decrement the slow case count if the stub succeeds.
+
+ Looks like a ~1% speed-up on V8.
+
+ * jit/JITPropertyAccess.cpp:
+ (JSC::JIT::privateCompilePutByIdTransition):
+ * jit/JITPropertyAccess32_64.cpp:
+ (JSC::JIT::privateCompilePutByIdTransition):
+
+2012-02-11 Filip Pizlo <fpizlo@apple.com>
+
+ Build fix for Qt.
+
+ * wtf/DataLog.h:
+
+2012-02-11 Filip Pizlo <fpizlo@apple.com>
+
+ It should be possible to send all JSC debug logging to a file
+ https://bugs.webkit.org/show_bug.cgi?id=78418
+
+ Reviewed by Sam Weinig.
+
+ Introduced wtf/DataLog, which defines WTF::dataFile, WTF::dataLog,
+ and WTF::dataLogV. Changed all debugging- and profiling-related printfs
+ to use WTF::dataLog() or one of its friends. By default, debug logging
+ goes to stderr, unless you change the setting in wtf/DataLog.cpp.
+
+ * GNUmakefile.list.am:
+ * JavaScriptCore.gypi:
+ * JavaScriptCore.vcproj/WTF/WTF.vcproj:
+ * JavaScriptCore.xcodeproj/project.pbxproj:
+ * assembler/LinkBuffer.h:
+ (JSC::LinkBuffer::dumpLinkStatistics):
+ (JSC::LinkBuffer::dumpCode):
+ * assembler/SH4Assembler.h:
+ (JSC::SH4Assembler::vprintfStdoutInstr):
+ * bytecode/CodeBlock.cpp:
+ (JSC::CodeBlock::printUnaryOp):
+ (JSC::CodeBlock::printBinaryOp):
+ (JSC::CodeBlock::printConditionalJump):
+ (JSC::CodeBlock::printGetByIdOp):
+ (JSC::CodeBlock::printCallOp):
+ (JSC::CodeBlock::printPutByIdOp):
+ (JSC::printGlobalResolveInfo):
+ (JSC::printStructureStubInfo):
+ (JSC::CodeBlock::printStructure):
+ (JSC::CodeBlock::printStructures):
+ (JSC::CodeBlock::dump):
+ (JSC::CodeBlock::dumpStatistics):
+ (JSC::CodeBlock::finalizeUnconditionally):
+ (JSC::CodeBlock::shouldOptimizeNow):
+ (JSC::CodeBlock::tallyFrequentExitSites):
+ (JSC::CodeBlock::dumpValueProfiles):
+ * bytecode/Opcode.cpp:
+ (JSC::OpcodeStats::~OpcodeStats):
+ * bytecode/SamplingTool.cpp:
+ (JSC::SamplingFlags::stop):
+ (JSC::SamplingRegion::dumpInternal):
+ (JSC::SamplingTool::dump):
+ * dfg/DFGAbstractState.cpp:
+ (JSC::DFG::AbstractState::endBasicBlock):
+ (JSC::DFG::AbstractState::mergeStateAtTail):
+ * dfg/DFGByteCodeParser.cpp:
+ (JSC::DFG::ByteCodeParser::getPredictionWithoutOSRExit):
+ (JSC::DFG::ByteCodeParser::makeSafe):
+ (JSC::DFG::ByteCodeParser::makeDivSafe):
+ (JSC::DFG::ByteCodeParser::handleCall):
+ (JSC::DFG::ByteCodeParser::handleInlining):
+ (JSC::DFG::ByteCodeParser::parseBlock):
+ (JSC::DFG::ByteCodeParser::processPhiStack):
+ (JSC::DFG::ByteCodeParser::linkBlock):
+ (JSC::DFG::ByteCodeParser::parseCodeBlock):
+ (JSC::DFG::ByteCodeParser::parse):
+ * dfg/DFGCommon.h:
+ * dfg/DFGDriver.cpp:
+ (JSC::DFG::compile):
+ * dfg/DFGGraph.cpp:
+ (JSC::DFG::printWhiteSpace):
+ (JSC::DFG::Graph::dumpCodeOrigin):
+ (JSC::DFG::Graph::dump):
+ (JSC::DFG::Graph::predictArgumentTypes):
+ * dfg/DFGJITCompiler.cpp:
+ (JSC::DFG::JITCompiler::link):
+ * dfg/DFGOSREntry.cpp:
+ (JSC::DFG::prepareOSREntry):
+ * dfg/DFGOSRExitCompiler.cpp:
+ * dfg/DFGOSRExitCompiler32_64.cpp:
+ (JSC::DFG::OSRExitCompiler::compileExit):
+ * dfg/DFGOSRExitCompiler64.cpp:
+ (JSC::DFG::OSRExitCompiler::compileExit):
+ * dfg/DFGOperations.cpp:
+ * dfg/DFGPropagator.cpp:
+ (JSC::DFG::Propagator::fixpoint):
+ (JSC::DFG::Propagator::propagateArithNodeFlags):
+ (JSC::DFG::Propagator::propagateArithNodeFlagsForward):
+ (JSC::DFG::Propagator::propagateArithNodeFlagsBackward):
+ (JSC::DFG::Propagator::propagateNodePredictions):
+ (JSC::DFG::Propagator::propagatePredictionsForward):
+ (JSC::DFG::Propagator::propagatePredictionsBackward):
+ (JSC::DFG::Propagator::doRoundOfDoubleVoting):
+ (JSC::DFG::Propagator::fixupNode):
+ (JSC::DFG::Propagator::fixup):
+ (JSC::DFG::Propagator::startIndexForChildren):
+ (JSC::DFG::Propagator::endIndexForPureCSE):
+ (JSC::DFG::Propagator::setReplacement):
+ (JSC::DFG::Propagator::eliminate):
+ (JSC::DFG::Propagator::performNodeCSE):
+ (JSC::DFG::Propagator::localCSE):
+ (JSC::DFG::Propagator::allocateVirtualRegisters):
+ (JSC::DFG::Propagator::performBlockCFA):
+ (JSC::DFG::Propagator::performForwardCFA):
+ * dfg/DFGRegisterBank.h:
+ (JSC::DFG::RegisterBank::dump):
+ * dfg/DFGScoreBoard.h:
+ (JSC::DFG::ScoreBoard::dump):
+ * dfg/DFGSpeculativeJIT.cpp:
+ (JSC::DFG::SpeculativeJIT::dump):
+ (JSC::DFG::SpeculativeJIT::checkConsistency):
+ (JSC::DFG::SpeculativeJIT::compile):
+ * dfg/DFGSpeculativeJIT32_64.cpp:
+ (JSC::DFG::SpeculativeJIT::fillSpeculateIntInternal):
+ (JSC::DFG::SpeculativeJIT::fillSpeculateDouble):
+ (JSC::DFG::SpeculativeJIT::fillSpeculateCell):
+ (JSC::DFG::SpeculativeJIT::fillSpeculateBoolean):
+ * dfg/DFGSpeculativeJIT64.cpp:
+ (JSC::DFG::SpeculativeJIT::fillSpeculateIntInternal):
+ (JSC::DFG::SpeculativeJIT::fillSpeculateDouble):
+ (JSC::DFG::SpeculativeJIT::fillSpeculateCell):
+ (JSC::DFG::SpeculativeJIT::fillSpeculateBoolean):
+ * heap/Heap.cpp:
+ (JSC::Heap::destroy):
+ * heap/MarkedBlock.h:
+ * interpreter/CallFrame.cpp:
+ (JSC::CallFrame::dumpCaller):
+ * interpreter/Interpreter.cpp:
+ (JSC::Interpreter::dumpRegisters):
+ * jit/JIT.cpp:
+ (JSC::JIT::privateCompileMainPass):
+ (JSC::JIT::privateCompileSlowCases):
+ (JSC::JIT::privateCompile):
+ * jit/JITStubs.cpp:
+ (JSC::DEFINE_STUB_FUNCTION):
+ * profiler/Profile.cpp:
+ (JSC::Profile::debugPrintData):
+ (JSC::Profile::debugPrintDataSampleStyle):
+ * profiler/ProfileNode.cpp:
+ (JSC::ProfileNode::debugPrintData):
+ (JSC::ProfileNode::debugPrintDataSampleStyle):
+ * runtime/JSGlobalData.cpp:
+ (JSC::JSGlobalData::dumpRegExpTrace):
+ * runtime/RegExp.cpp:
+ (JSC::RegExp::matchCompareWithInterpreter):
+ * runtime/SamplingCounter.cpp:
+ (JSC::AbstractSamplingCounter::dump):
+ * runtime/SamplingCounter.h:
+ (JSC::DeletableSamplingCounter::~DeletableSamplingCounter):
+ * runtime/ScopeChain.cpp:
+ (JSC::ScopeChainNode::print):
+ * runtime/Structure.cpp:
+ (JSC::Structure::dumpStatistics):
+ (JSC::PropertyMapStatisticsExitLogger::~PropertyMapStatisticsExitLogger):
+ * tools/CodeProfile.cpp:
+ (JSC::CodeProfile::report):
+ * tools/ProfileTreeNode.h:
+ (JSC::ProfileTreeNode::dumpInternal):
+ * wtf/CMakeLists.txt:
+ * wtf/DataLog.cpp: Added.
+ (WTF):
+ (WTF::initializeLogFileOnce):
+ (WTF::initializeLogFile):
+ (WTF::dataFile):
+ (WTF::dataLogV):
+ (WTF::dataLog):
+ * wtf/DataLog.h: Added.
+ (WTF):
+ * wtf/HashTable.cpp:
+ (WTF::HashTableStats::~HashTableStats):
+ * wtf/MetaAllocator.cpp:
+ (WTF::MetaAllocator::dumpProfile):
+ * wtf/text/WTFString.cpp:
+ (String::show):
+ * yarr/YarrInterpreter.cpp:
+ (JSC::Yarr::ByteCompiler::dumpDisjunction):
+
+2012-02-11 Gavin Barraclough <barraclough@apple.com>
+
+ Move special __proto__ property to Object.prototype
+ https://bugs.webkit.org/show_bug.cgi?id=78409
+
+ Reviewed by Oliver Hunt.
+
+ Re-implement this as a regular accessor property. This has three key benefits:
+ 1) It makes it possible for objects to be given properties named __proto__.
+ 2) Object.prototype.__proto__ can be deleted, preventing object prototypes from being changed.
+ 3) This largely removes the magic used the implement __proto__, it can just be made a regular accessor property.
+
+ * parser/Parser.cpp:
+ (JSC::::parseFunctionInfo):
+ - No need to prohibit functions named __proto__.
+ * runtime/JSGlobalObject.cpp:
+ (JSC::JSGlobalObject::reset):
+ - Add __proto__ accessor to Object.prototype.
+ * runtime/JSGlobalObjectFunctions.cpp:
+ (JSC::globalFuncProtoGetter):
+ (JSC::globalFuncProtoSetter):
+ - Definition of the __proto__ accessor functions.
+ * runtime/JSGlobalObjectFunctions.h:
+ - Declaration of the __proto__ accessor functions.
+ * runtime/JSObject.cpp:
+ (JSC::JSObject::put):
+ - Remove the special handling for __proto__, there is still a check to allow for a fast guard for accessors excluding __proto__.
+ (JSC::JSObject::putDirectAccessor):
+ - Track on the structure whether an object contains accessors other than one for __proto__.
+ (JSC::JSObject::defineOwnProperty):
+ - No need to prohibit definition of own properties named __proto__.
+ * runtime/JSObject.h:
+ (JSC::JSObject::inlineGetOwnPropertySlot):
+ - Remove the special handling for __proto__.
+ (JSC::JSValue::get):
+ - Remove the special handling for __proto__.
+ * runtime/JSString.cpp:
+ (JSC::JSString::getOwnPropertySlot):
+ - Remove the special handling for __proto__.
+ * runtime/JSValue.h:
+ (JSValue):
+ - Made synthesizePrototype public (this may be needed by the __proto__ getter).
+ * runtime/ObjectConstructor.cpp:
+ (JSC::objectConstructorGetPrototypeOf):
+ - Perform the security check & call prototype() directly.
+ * runtime/Structure.cpp:
+ (JSC::Structure::Structure):
+ - Added 'ExcludingProto' variant of the 'hasGetterSetterProperties' state.
+ * runtime/Structure.h:
+ (JSC::Structure::hasGetterSetterPropertiesExcludingProto):
+ (JSC::Structure::setHasGetterSetterProperties):
+ (Structure):
+ - Added 'ExcludingProto' variant of the 'hasGetterSetterProperties' state.
+
+2012-02-11 Filip Pizlo <fpizlo@apple.com>
+
+ DFG CFA assumes that a WeakJSConstant's structure is known
+ https://bugs.webkit.org/show_bug.cgi?id=78428
+ <rdar://problem/10849492> <rdar://problem/10849621>
+
+ Reviewed by Gavin Barraclough.
+
+ * dfg/DFGAbstractState.cpp:
+ (JSC::DFG::AbstractState::execute):
+
+2012-02-11 Mark Hahnenberg <mhahnenberg@apple.com>
+
+ Qt debug build fix
+
+ * heap/MarkedBlock.cpp:
+ (JSC::MarkedBlock::callDestructor): Platforms that don't use clang will allocate
+ JSFinalObjects in the destuctor subspace, so we should remove this assert so it
+ doesn't cause crashes.
+
+2012-02-11 Filip Pizlo <fpizlo@apple.com>
+
+ Old 32_64 JIT should assert that its use of map() is consistent with the DFG
+ OSR exit's expectations
+ https://bugs.webkit.org/show_bug.cgi?id=78419
+ <rdar://problem/10817121>
+
+ Reviewed by Oliver Hunt.
+
+ * jit/JITInlineMethods.h:
+ (JSC::JIT::map):
+
+2012-02-11 Mark Hahnenberg <mhahnenberg@apple.com>
+
+ Reduce the reentrancy limit of the interpreter for the iOS simulator
+ https://bugs.webkit.org/show_bug.cgi?id=78400
+
+ Reviewed by Gavin Barraclough.
+
+ * interpreter/Interpreter.h: Lowered the maximum reentrancy limit for large thread stacks.
+ (JSC):
+
+2012-02-11 Filip Pizlo <fpizlo@apple.com>
+
+ [DFG] Misuse of WeakJSConstants in silentFillGPR code.
+ https://bugs.webkit.org/show_bug.cgi?id=78423
+ <rdar://problem/10849353> <rdar://problem/10804043>
+
+ Reviewed by Sam Weinig.
+
+ The code was using Node::isConstant(), when it was supposed to use Node::hasConstant().
+ This patch is a surgical fix; the bigger problem is: why do we have isConstant() and
+ hasConstant() when hasConstant() is correct and isConstant() is almost always wrong?
+
+ * dfg/DFGSpeculativeJIT.h:
+ (JSC::DFG::SpeculativeJIT::silentFillGPR):
+
+2012-02-11 Sam Weinig <sam@webkit.org>
+
+ Prepare JavaScriptCore to build with libc++
+ <rdar://problem/10426673>
+ https://bugs.webkit.org/show_bug.cgi?id=78424
+
+ Reviewed by Anders Carlsson.
+
+ * wtf/NullPtr.cpp:
+ * wtf/NullPtr.h:
+ libc++ provides std::nullptr emulation, so we don't have to.
+
+2012-02-07 Filip Pizlo <fpizlo@apple.com>
+
+ DFG should have polymorphic put_by_id caching
+ https://bugs.webkit.org/show_bug.cgi?id=78062
+ <rdar://problem/10326439> <rdar://problem/10824839>
+
+ Reviewed by Oliver Hunt.
+
+ Implemented polymorphic put_by_id caching in the DFG, and added much of the
+ machinery that would be needed to implement it in the old JIT as well.
+
+ I decided against using the old PolymorphicAccessStructureList mechanism as
+ this didn't quite fit with put_by_id. In particular, I wanted the ability to
+ have one list that captured all relevant cases (including proto put_by_id
+ if we ever decided to do it). And I wanted the code to have better
+ encapsulation. And I didn't want to get confused by the fact that the
+ original (non-list) put_by_id cache may itself consist of a stub routine.
+
+ This code is still sub-optimal (for example adding a replace to a list whose
+ previous elements are all transitions should just repatch the original code,
+ but here it will generate a stub) but it already generates a >20% speed-up
+ on V8-splay, leading to a 2% win overall in splay. Neutral elsewhere.
+
+ * CMakeLists.txt:
+ * GNUmakefile.list.am:
+ * JavaScriptCore.vcproj/JavaScriptCore/JavaScriptCore.vcproj:
+ * JavaScriptCore.xcodeproj/project.pbxproj:
+ * Target.pri:
+ * bytecode/PolymorphicPutByIdList.cpp: Added.
+ (JSC):
+ (JSC::PutByIdAccess::fromStructureStubInfo):
+ (JSC::PutByIdAccess::visitWeak):
+ (JSC::PolymorphicPutByIdList::PolymorphicPutByIdList):
+ (JSC::PolymorphicPutByIdList::from):
+ (JSC::PolymorphicPutByIdList::~PolymorphicPutByIdList):
+ (JSC::PolymorphicPutByIdList::isFull):
+ (JSC::PolymorphicPutByIdList::isAlmostFull):
+ (JSC::PolymorphicPutByIdList::addAccess):
+ (JSC::PolymorphicPutByIdList::visitWeak):
+ * bytecode/PolymorphicPutByIdList.h: Added.
+ (JSC):
+ (PutByIdAccess):
+ (JSC::PutByIdAccess::PutByIdAccess):
+ (JSC::PutByIdAccess::transition):
+ (JSC::PutByIdAccess::replace):
+ (JSC::PutByIdAccess::isSet):
+ (JSC::PutByIdAccess::operator!):
+ (JSC::PutByIdAccess::type):
+ (JSC::PutByIdAccess::isTransition):
+ (JSC::PutByIdAccess::isReplace):
+ (JSC::PutByIdAccess::oldStructure):
+ (JSC::PutByIdAccess::structure):
+ (JSC::PutByIdAccess::newStructure):
+ (JSC::PutByIdAccess::chain):
+ (JSC::PutByIdAccess::stubRoutine):
+ (PolymorphicPutByIdList):
+ (JSC::PolymorphicPutByIdList::currentSlowPathTarget):
+ (JSC::PolymorphicPutByIdList::isEmpty):
+ (JSC::PolymorphicPutByIdList::size):
+ (JSC::PolymorphicPutByIdList::at):
+ (JSC::PolymorphicPutByIdList::operator[]):
+ (JSC::PolymorphicPutByIdList::kind):
+ * bytecode/PutKind.h: Added.
+ (JSC):
+ * bytecode/StructureStubInfo.cpp:
+ (JSC::StructureStubInfo::deref):
+ (JSC::StructureStubInfo::visitWeakReferences):
+ * bytecode/StructureStubInfo.h:
+ (JSC):
+ (JSC::isPutByIdAccess):
+ (JSC::StructureStubInfo::initPutByIdList):
+ (StructureStubInfo):
+ (JSC::StructureStubInfo::reset):
+ * dfg/DFGOperations.cpp:
+ * dfg/DFGOperations.h:
+ (DFG):
+ * dfg/DFGRepatch.cpp:
+ (JSC::DFG::appropriateGenericPutByIdFunction):
+ (JSC::DFG::appropriateListBuildingPutByIdFunction):
+ (DFG):
+ (JSC::DFG::emitPutReplaceStub):
+ (JSC::DFG::emitPutTransitionStub):
+ (JSC::DFG::tryCachePutByID):
+ (JSC::DFG::dfgRepatchPutByID):
+ (JSC::DFG::tryBuildPutByIdList):
+ (JSC::DFG::dfgBuildPutByIdList):
+ (JSC::DFG::dfgResetPutByID):
+ * dfg/DFGRepatch.h:
+ (DFG):
+ * runtime/WriteBarrier.h:
+ (WriteBarrierBase):
+ (JSC::WriteBarrierBase::copyFrom):
+
+2012-02-10 Vineet Chaudhary <rgf748@motorola.com>
+
+ https://bugs.webkit.org/show_bug.cgi?id=72756
+ DOMHTMLElement’s accessKey property is declared as available in WebKit version that didn’t have it
+
+ Reviewed by Timothy Hatcher.
+
+ * API/WebKitAvailability.h: Added AVAILABLE_AFTER_WEBKIT_VERSION_5_1 and
+ AVAILABLE_WEBKIT_VERSION_1_3_AND_LATER_BUT_DEPRECATED_AFTER_WEBKIT_VERSION_5_1 for the new versions.
+
+2012-02-10 Mark Hahnenberg <mhahnenberg@apple.com>
+
+ Fixing windows build
+
+ Unreviewed build fix
+
+ * JavaScriptCore.vcproj/JavaScriptCore/JavaScriptCore.def:
+
+2012-02-10 Adam Klein <adamk@chromium.org>
+
+ Enable MUTATION_OBSERVERS by default on all platforms
+ https://bugs.webkit.org/show_bug.cgi?id=78196
+
+ Reviewed by Ojan Vafai.
+
+ * Configurations/FeatureDefines.xcconfig:
+
+2012-02-10 Yong Li <yoli@rim.com>
+
+ ENABLE(ASSEMBLER_WX_EXCLUSIVE): LinkBuffer can leave pages not marked as executable.
+ https://bugs.webkit.org/show_bug.cgi?id=76724
+
+ Reviewed by Rob Buis.
+
+ This issue only exists when both ENABLE(ASSEMBLER_WX_EXCLUSIVE) and ENABLE(BRANCH_COMPACTION) are on.
+ The size used to call makeExecutable can be smaller than the one that was used for makeWritable.
+ So it can leave pages behind that are not set back to default flags. When an assembly on one of those
+ pages is executed or JIT returns to those pages in the case it was already executing from there, the
+ software will crash.
+
+ * assembler/LinkBuffer.h: Add m_initialSize and use it in performFinalization().
+ (JSC::LinkBuffer::LinkBuffer):
+ (JSC::LinkBuffer::linkCode):
+ (JSC::LinkBuffer::performFinalization):
+ (LinkBuffer):
+
+2012-02-10 Mark Hahnenberg <mhahnenberg@apple.com>
+
+ Split MarkedSpace into destructor and destructor-free subspaces
+ https://bugs.webkit.org/show_bug.cgi?id=77761
+
+ Reviewed by Geoffrey Garen.
+
+ * dfg/DFGSpeculativeJIT.h:
+ (JSC::DFG::SpeculativeJIT::emitAllocateJSFinalObject): Switched over to use destructor-free space.
+ * heap/Heap.h:
+ (JSC::Heap::allocatorForObjectWithoutDestructor): Added to give clients (e.g. the JIT) the ability to
+ pick which subspace they want to allocate out of.
+ (JSC::Heap::allocatorForObjectWithDestructor): Ditto.
+ (Heap):
+ (JSC::Heap::allocateWithDestructor): Added private function for CellAllocator to use.
+ (JSC):
+ (JSC::Heap::allocateWithoutDestructor): Ditto.
+ * heap/MarkedAllocator.cpp: Added the cellsNeedDestruction flag to allocators so that they can allocate
+ their MarkedBlocks correctly.
+ (JSC::MarkedAllocator::allocateBlock):
+ * heap/MarkedAllocator.h:
+ (JSC::MarkedAllocator::cellsNeedDestruction):
+ (MarkedAllocator):
+ (JSC::MarkedAllocator::MarkedAllocator):
+ (JSC):
+ (JSC::MarkedAllocator::init): Replaced custom set functions, which were only used upon initialization, with
+ an init function that does all of that stuff in fewer lines.
+ * heap/MarkedBlock.cpp:
+ (JSC::MarkedBlock::create):
+ (JSC::MarkedBlock::recycle):
+ (JSC::MarkedBlock::MarkedBlock):
+ (JSC::MarkedBlock::callDestructor): Templatized, along with specializedSweep and sweepHelper, to make
+ checking the m_cellsNeedDestructor flag faster and cleaner looking.
+ (JSC):
+ (JSC::MarkedBlock::specializedSweep):
+ (JSC::MarkedBlock::sweep):
+ (JSC::MarkedBlock::sweepHelper):
+ * heap/MarkedBlock.h:
+ (MarkedBlock):
+ (JSC::MarkedBlock::cellsNeedDestruction):
+ (JSC):
+ * heap/MarkedSpace.cpp:
+ (JSC::MarkedSpace::MarkedSpace):
+ (JSC::MarkedSpace::resetAllocators):
+ (JSC::MarkedSpace::canonicalizeCellLivenessData):
+ (JSC::TakeIfUnmarked::operator()):
+ * heap/MarkedSpace.h:
+ (MarkedSpace):
+ (Subspace):
+ (JSC::MarkedSpace::allocatorFor): Needed function to differentiate between the two broad subspaces of
+ allocators.
+ (JSC):
+ (JSC::MarkedSpace::destructorAllocatorFor): Ditto.
+ (JSC::MarkedSpace::allocateWithoutDestructor): Ditto.
+ (JSC::MarkedSpace::allocateWithDestructor): Ditto.
+ (JSC::MarkedSpace::forEachBlock):
+ * jit/JIT.h:
+ * jit/JITInlineMethods.h: Modified to use the proper allocator for JSFinalObjects and others.
+ (JSC::JIT::emitAllocateBasicJSObject):
+ (JSC::JIT::emitAllocateJSFinalObject):
+ (JSC::JIT::emitAllocateJSFunction):
+ * runtime/JSArray.cpp:
+ (JSC):
+ * runtime/JSArray.h:
+ (JSArray):
+ (JSC::JSArray::create):
+ (JSC):
+ (JSC::JSArray::tryCreateUninitialized):
+ * runtime/JSCell.h:
+ (JSCell):
+ (JSC):
+ (NeedsDestructor): Template struct that calculates at compile time whether the class in question requires
+ destruction or not using the compiler type trait __has_trivial_destructor. allocateCell then checks this
+ constant to decide whether to allocate in the destructor or destructor-free parts of the heap.
+ (JSC::allocateCell):
+ * runtime/JSFunction.cpp:
+ (JSC):
+ * runtime/JSFunction.h:
+ (JSFunction):
+ * runtime/JSObject.cpp:
+ (JSC):
+ * runtime/JSObject.h:
+ (JSNonFinalObject):
+ (JSC):
+ (JSFinalObject):
+ (JSC::JSFinalObject::create):
+
+2012-02-10 Adrienne Walker <enne@google.com>
+
+ Remove implicit copy constructor usage in HashMaps with OwnPtr
+ https://bugs.webkit.org/show_bug.cgi?id=78071
+
+ Reviewed by Darin Adler.
+
+ Change the return type of emptyValue() in PairHashTraits to be the
+ actual type returned rather than the trait type to avoid an implicit
+ generation of the OwnPtr copy constructor. This happens for hash
+ traits involving OwnPtr where the empty value is not zero and each
+ hash bucket needs to be initialized with emptyValue().
+
+ Also, update StructureTransitionTable to use default hash traits
+ rather than rolling its own, in order to update it to handle
+ EmptyValueType.
+
+ Test: patch from bug 74154 compiles on Clang with this patch
+
+ * runtime/StructureTransitionTable.h:
+ (StructureTransitionTable):
+ * wtf/HashTraits.h:
+ (GenericHashTraits):
+ (PairHashTraits):
+ (WTF::PairHashTraits::emptyValue):
+
+2012-02-10 Aron Rosenberg <arosenberg@logitech.com>
+
+ [Qt] Fix compiler warning in Visual Studio 2010 about TR1
+ https://bugs.webkit.org/show_bug.cgi?id=63642
+
+ Reviewed by Simon Hausmann.
+
+ * JavaScriptCore.pri:
+
+2012-02-10 Michael Saboff <msaboff@apple.com>
+
+ Yarr assert with regexp where alternative in *-quantified group matches empty
+ https://bugs.webkit.org/show_bug.cgi?id=67752
+
+ Reviewed by Gavin Barraclough.
+
+ Added backtracking for the prior alternative if it matched
+ but didn't consume any input characters.
+
+ * yarr/YarrJIT.cpp:
+ (YarrOp): New jump.
+ (JSC::Yarr::YarrGenerator::generate): Emit conditional jump
+ when an alternative matches and no input was consumed. Moved the
+ zero length match check for a set of alternatives to the alternative
+ code from the parentheses cases to the alternative end cases.
+ Converted the existing zero length checks in the parentheses cases
+ to runtime assertion checks.
+ (JSC::Yarr::YarrGenerator::backtrack): Link new jump to backtrack
+ to prior term.
+
+2012-02-10 Roland Takacs <takacs.roland@stud.u-szeged.hu>
+
+ [Qt] GC should be parallel on Qt platform
+ https://bugs.webkit.org/show_bug.cgi?id=73309
+
+ Reviewed by Zoltan Herczeg.
+
+ These changes made the parallel gc feature available for Qt port.
+ The implementation of "registerGCThread" and "isMainThreadOrGCThread",
+ and a local static function [initializeGCThreads] is moved from
+ MainThreadMac.mm to the common MainThread.cpp to make them available
+ for other platforms.
+
+ Measurement results:
+ V8 speed-up: 1.025x as fast [From: 663.4ms To: 647.0ms ]
+ V8 Splay speed-up: 1.185x as fast [From: 138.4ms To: 116.8ms ]
+
+ Tested on Intel(R) Core(TM) i5-2320 CPU @ 3.00GHz with 4-core.
+
+ * JavaScriptCore.order:
+ * wtf/MainThread.cpp:
+ (WTF::initializeMainThread):
+ (WTF):
+ (WTF::initializeGCThreads):
+ (WTF::registerGCThread):
+ (WTF::isMainThreadOrGCThread):
+ * wtf/MainThread.h:
+ (WTF):
+ * wtf/Platform.h:
+ * wtf/mac/MainThreadMac.mm:
+ (WTF):
+
+2012-02-09 Andy Wingo <wingo@igalia.com>
+
+ Eliminate dead code in BytecodeGenerator::resolve()
+ https://bugs.webkit.org/show_bug.cgi?id=78242
+
+ Reviewed by Gavin Barraclough.
+
+ * bytecompiler/BytecodeGenerator.cpp:
+ (JSC::BytecodeGenerator::resolve):
+ BytecodeGenerator::shouldOptimizeLocals() is only true for
+ FunctionCode, and thus cannot be true for GlobalCode.
+
+2012-02-09 Andy Wingo <wingo@igalia.com>
+
+ Remove BytecodeGenerator::isLocal
+ https://bugs.webkit.org/show_bug.cgi?id=78241
+
+ Minor refactor to BytecodeGenerator.
+
+ Reviewed by Gavin Barraclough.
+
+ * bytecompiler/BytecodeGenerator.h:
+ * bytecompiler/BytecodeGenerator.cpp:
+ (JSC::BytecodeGenerator::isLocal):
+ (JSC::BytecodeGenerator::isLocalConstant): Remove now-unused
+ methods.
+ * bytecompiler/NodesCodegen.cpp:
+ (JSC::ResolveNode::isPure): Use the ResolveResult mechanism
+ instead of isLocal. This will recognize more resolve nodes as
+ being pure.
+ (JSC::PrefixResolveNode::emitBytecode): Use isReadOnly on the
+ location instead of isLocalConstant.
+
+2012-02-09 Oliver Hunt <oliver@apple.com>
+
+ The JS Parser scope object needs a VectorTrait specialization
+ https://bugs.webkit.org/show_bug.cgi?id=78308
+
+ Reviewed by Gavin Barraclough.
+
+ This showed up as a periodic crash in various bits of generated code
+ originally, but I've added an assertion in the bytecode generator
+ that makes the effected code much more crash-happy should it go
+ wrong again.
+
+ * bytecompiler/BytecodeGenerator.cpp:
+ (JSC::BytecodeGenerator::BytecodeGenerator):
+ (JSC::BytecodeGenerator::resolve):
+ * parser/Parser.cpp:
+ * parser/Parser.h:
+ (JSC):
+ * runtime/JSActivation.h:
+ (JSC::JSActivation::isValidScopedLookup):
+ (JSActivation):
+
2012-02-08 Oliver Hunt <oliver@apple.com>
Whoops, fix the build.
diff --git a/Source/JavaScriptCore/Configurations/Base.xcconfig b/Source/JavaScriptCore/Configurations/Base.xcconfig
index e304bd1fb..40c11b647 100644
--- a/Source/JavaScriptCore/Configurations/Base.xcconfig
+++ b/Source/JavaScriptCore/Configurations/Base.xcconfig
@@ -73,6 +73,11 @@ WARNING_CFLAGS_macosx_ppc64 = $(WARNING_CFLAGS_BASE);
WARNING_CFLAGS_macosx_x86_64 = $(WARNING_CFLAGS_BASE);
HEADER_SEARCH_PATHS = . icu $(HEADER_SEARCH_PATHS);
+CLANG_CXX_LIBRARY = $(CLANG_CXX_LIBRARY_$(TARGET_MAC_OS_X_VERSION_MAJOR));
+CLANG_CXX_LIBRARY_1060 = libstdc++;
+CLANG_CXX_LIBRARY_1070 = libc++;
+CLANG_CXX_LIBRARY_1080 = libc++;
+CLANG_CXX_LIBRARY_1090 = libc++;
REAL_PLATFORM_NAME = $(REAL_PLATFORM_NAME_$(PLATFORM_NAME));
REAL_PLATFORM_NAME_ = $(REAL_PLATFORM_NAME_macosx);
diff --git a/Source/JavaScriptCore/Configurations/FeatureDefines.xcconfig b/Source/JavaScriptCore/Configurations/FeatureDefines.xcconfig
index 6447eb7ee..576e746ad 100644
--- a/Source/JavaScriptCore/Configurations/FeatureDefines.xcconfig
+++ b/Source/JavaScriptCore/Configurations/FeatureDefines.xcconfig
@@ -37,7 +37,8 @@ ENABLE_BLOB_macosx = ENABLE_BLOB;
ENABLE_CLIENT_BASED_GEOLOCATION = $(ENABLE_CLIENT_BASED_GEOLOCATION_$(REAL_PLATFORM_NAME));
ENABLE_CLIENT_BASED_GEOLOCATION_macosx = ENABLE_CLIENT_BASED_GEOLOCATION;
-ENABLE_DASHBOARD_SUPPORT = ENABLE_DASHBOARD_SUPPORT;
+ENABLE_DASHBOARD_SUPPORT = $(ENABLE_DASHBOARD_SUPPORT_$(REAL_PLATFORM_NAME));
+ENABLE_DASHBOARD_SUPPORT_macosx = ENABLE_DASHBOARD_SUPPORT;
ENABLE_DATALIST = $(ENABLE_DATALIST_$(REAL_PLATFORM_NAME));
ENABLE_DATALIST_macosx = ENABLE_DATALIST;
@@ -95,7 +96,7 @@ ENABLE_MEDIA_SOURCE = ;
ENABLE_MEDIA_STATISTICS = ;
ENABLE_METER_TAG = ENABLE_METER_TAG;
ENABLE_MHTML = ;
-ENABLE_MUTATION_OBSERVERS = ;
+ENABLE_MUTATION_OBSERVERS = ENABLE_MUTATION_OBSERVERS;
ENABLE_NOTIFICATIONS = $(ENABLE_NOTIFICATIONS_$(REAL_PLATFORM_NAME));
ENABLE_NOTIFICATIONS_macosx = $(ENABLE_NOTIFICATIONS_macosx_$(TARGET_MAC_OS_X_VERSION_MAJOR));
diff --git a/Source/JavaScriptCore/Configurations/Version.xcconfig b/Source/JavaScriptCore/Configurations/Version.xcconfig
index 73fc62d56..7e3f57ea8 100644
--- a/Source/JavaScriptCore/Configurations/Version.xcconfig
+++ b/Source/JavaScriptCore/Configurations/Version.xcconfig
@@ -22,7 +22,7 @@
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
MAJOR_VERSION = 535;
-MINOR_VERSION = 20;
+MINOR_VERSION = 23;
TINY_VERSION = 0;
FULL_VERSION = $(MAJOR_VERSION).$(MINOR_VERSION);
diff --git a/Source/JavaScriptCore/GNUmakefile.am b/Source/JavaScriptCore/GNUmakefile.am
index 654cd108e..8d6d252d4 100644
--- a/Source/JavaScriptCore/GNUmakefile.am
+++ b/Source/JavaScriptCore/GNUmakefile.am
@@ -57,6 +57,7 @@ javascriptcore_cppflags += \
-I$(srcdir)/Source/JavaScriptCore/interpreter \
-I$(srcdir)/Source/JavaScriptCore/jit \
-I$(srcdir)/Source/JavaScriptCore/jit \
+ -I$(srcdir)/Source/JavaScriptCore/llint \
-I$(srcdir)/Source/JavaScriptCore/parser \
-I$(srcdir)/Source/JavaScriptCore/profiler \
-I$(srcdir)/Source/JavaScriptCore/runtime \
diff --git a/Source/JavaScriptCore/GNUmakefile.list.am b/Source/JavaScriptCore/GNUmakefile.list.am
index 79c68326d..36b64a375 100644
--- a/Source/JavaScriptCore/GNUmakefile.list.am
+++ b/Source/JavaScriptCore/GNUmakefile.list.am
@@ -81,6 +81,7 @@ javascriptcore_sources += \
Source/JavaScriptCore/assembler/RepatchBuffer.h \
Source/JavaScriptCore/assembler/SH4Assembler.h \
Source/JavaScriptCore/assembler/X86Assembler.h \
+ Source/JavaScriptCore/bytecode/BytecodeConventions.h \
Source/JavaScriptCore/bytecode/CallLinkInfo.cpp \
Source/JavaScriptCore/bytecode/CallLinkInfo.h \
Source/JavaScriptCore/bytecode/CallLinkStatus.cpp \
@@ -102,24 +103,31 @@ javascriptcore_sources += \
Source/JavaScriptCore/bytecode/Instruction.h \
Source/JavaScriptCore/bytecode/JumpTable.cpp \
Source/JavaScriptCore/bytecode/JumpTable.h \
+ Source/JavaScriptCore/bytecode/LLIntCallLinkInfo.h \
+ Source/JavaScriptCore/bytecode/LazyOperandValueProfile.cpp \
+ Source/JavaScriptCore/bytecode/LazyOperandValueProfile.h \
Source/JavaScriptCore/bytecode/LineInfo.h \
Source/JavaScriptCore/bytecode/MethodCallLinkInfo.cpp \
Source/JavaScriptCore/bytecode/MethodCallLinkInfo.h \
Source/JavaScriptCore/bytecode/MethodCallLinkStatus.cpp \
Source/JavaScriptCore/bytecode/MethodCallLinkStatus.h \
+ Source/JavaScriptCore/bytecode/MethodOfGettingAValueProfile.cpp \
+ Source/JavaScriptCore/bytecode/MethodOfGettingAValueProfile.h \
Source/JavaScriptCore/bytecode/Opcode.cpp \
Source/JavaScriptCore/bytecode/Opcode.h \
+ Source/JavaScriptCore/bytecode/PolymorphicPutByIdList.cpp \
+ Source/JavaScriptCore/bytecode/PolymorphicPutByIdList.h \
Source/JavaScriptCore/bytecode/PredictedType.cpp \
Source/JavaScriptCore/bytecode/PredictedType.h \
Source/JavaScriptCore/bytecode/PredictionTracker.h \
Source/JavaScriptCore/bytecode/PutByIdStatus.cpp \
Source/JavaScriptCore/bytecode/PutByIdStatus.h \
+ Source/JavaScriptCore/bytecode/PutKind.h \
Source/JavaScriptCore/bytecode/SamplingTool.cpp \
Source/JavaScriptCore/bytecode/SamplingTool.h \
Source/JavaScriptCore/bytecode/StructureSet.h \
Source/JavaScriptCore/bytecode/StructureStubInfo.cpp \
Source/JavaScriptCore/bytecode/StructureStubInfo.h \
- Source/JavaScriptCore/bytecode/ValueProfile.cpp \
Source/JavaScriptCore/bytecode/ValueProfile.h \
Source/JavaScriptCore/bytecode/ValueRecovery.h \
Source/JavaScriptCore/bytecode/VirtualRegister.h \
@@ -132,6 +140,8 @@ javascriptcore_sources += \
Source/JavaScriptCore/dfg/DFGAbstractState.cpp \
Source/JavaScriptCore/dfg/DFGAbstractState.h \
Source/JavaScriptCore/dfg/DFGAbstractValue.h \
+ Source/JavaScriptCore/dfg/DFGArithNodeFlagsInferencePhase.cpp \
+ Source/JavaScriptCore/dfg/DFGArithNodeFlagsInferencePhase.h \
Source/JavaScriptCore/dfg/DFGAssemblyHelpers.cpp \
Source/JavaScriptCore/dfg/DFGAssemblyHelpers.h \
Source/JavaScriptCore/dfg/DFGBasicBlock.h \
@@ -141,9 +151,13 @@ javascriptcore_sources += \
Source/JavaScriptCore/dfg/DFGCCallHelpers.h \
Source/JavaScriptCore/dfg/DFGCapabilities.cpp \
Source/JavaScriptCore/dfg/DFGCapabilities.h \
+ Source/JavaScriptCore/dfg/DFGCFAPhase.cpp \
+ Source/JavaScriptCore/dfg/DFGCFAPhase.h \
Source/JavaScriptCore/dfg/DFGCommon.h \
Source/JavaScriptCore/dfg/DFGCorrectableJumpPoint.cpp \
Source/JavaScriptCore/dfg/DFGCorrectableJumpPoint.h \
+ Source/JavaScriptCore/dfg/DFGCSEPhase.cpp \
+ Source/JavaScriptCore/dfg/DFGCSEPhase.h \
Source/JavaScriptCore/dfg/DFGDriver.cpp \
Source/JavaScriptCore/dfg/DFGDriver.h \
Source/JavaScriptCore/dfg/DFGFPRInfo.h \
@@ -167,8 +181,10 @@ javascriptcore_sources += \
Source/JavaScriptCore/dfg/DFGOSRExitCompiler.h \
Source/JavaScriptCore/dfg/DFGOSRExit.h \
Source/JavaScriptCore/dfg/DFGOSRExit.cpp \
- Source/JavaScriptCore/dfg/DFGPropagator.cpp \
- Source/JavaScriptCore/dfg/DFGPropagator.h \
+ Source/JavaScriptCore/dfg/DFGPhase.cpp \
+ Source/JavaScriptCore/dfg/DFGPhase.h \
+ Source/JavaScriptCore/dfg/DFGPredictionPropagationPhase.cpp \
+ Source/JavaScriptCore/dfg/DFGPredictionPropagationPhase.h \
Source/JavaScriptCore/dfg/DFGRegisterBank.h \
Source/JavaScriptCore/dfg/DFGRepatch.cpp \
Source/JavaScriptCore/dfg/DFGRepatch.h \
@@ -180,15 +196,19 @@ javascriptcore_sources += \
Source/JavaScriptCore/dfg/DFGThunks.cpp \
Source/JavaScriptCore/dfg/DFGThunks.h \
Source/JavaScriptCore/dfg/DFGVariableAccessData.h \
- Source/JavaScriptCore/heap/BumpBlock.h \
- Source/JavaScriptCore/heap/BumpSpace.cpp \
- Source/JavaScriptCore/heap/BumpSpace.h \
- Source/JavaScriptCore/heap/BumpSpaceInlineMethods.h \
+ Source/JavaScriptCore/dfg/DFGVirtualRegisterAllocationPhase.cpp \
+ Source/JavaScriptCore/dfg/DFGVirtualRegisterAllocationPhase.h \
+ Source/JavaScriptCore/heap/CopiedAllocator.h \
+ Source/JavaScriptCore/heap/CopiedBlock.h \
+ Source/JavaScriptCore/heap/CopiedSpace.cpp \
+ Source/JavaScriptCore/heap/CopiedSpace.h \
+ Source/JavaScriptCore/heap/CopiedSpaceInlineMethods.h \
Source/JavaScriptCore/heap/CardSet.h \
Source/JavaScriptCore/heap/ConservativeRoots.cpp \
Source/JavaScriptCore/heap/ConservativeRoots.h \
Source/JavaScriptCore/heap/DFGCodeBlocks.cpp \
Source/JavaScriptCore/heap/DFGCodeBlocks.h \
+ Source/JavaScriptCore/heap/GCAssertions.h \
Source/JavaScriptCore/heap/Handle.h \
Source/JavaScriptCore/heap/HandleHeap.cpp \
Source/JavaScriptCore/heap/HandleHeap.h \
@@ -207,14 +227,15 @@ javascriptcore_sources += \
Source/JavaScriptCore/heap/MarkStack.cpp \
Source/JavaScriptCore/heap/MarkStack.h \
Source/JavaScriptCore/heap/HeapRootVisitor.h \
- Source/JavaScriptCore/heap/MarkedAllocator.cpp \
- Source/JavaScriptCore/heap/MarkedAllocator.h \
+ Source/JavaScriptCore/heap/MarkedAllocator.cpp \
+ Source/JavaScriptCore/heap/MarkedAllocator.h \
Source/JavaScriptCore/heap/MarkedBlock.cpp \
Source/JavaScriptCore/heap/MarkedBlock.h \
Source/JavaScriptCore/heap/MarkedBlockSet.h \
Source/JavaScriptCore/heap/TinyBloomFilter.h \
Source/JavaScriptCore/heap/MarkedSpace.cpp \
Source/JavaScriptCore/heap/MarkedSpace.h \
+ Source/JavaScriptCore/heap/PassWeak.h \
Source/JavaScriptCore/heap/Strong.h \
Source/JavaScriptCore/heap/StrongInlines.h \
Source/JavaScriptCore/heap/UnconditionalFinalizer.h \
@@ -281,6 +302,8 @@ javascriptcore_sources += \
Source/JavaScriptCore/jit/CompactJITCodeMap.h \
Source/JavaScriptCore/jit/ExecutableAllocator.cpp \
Source/JavaScriptCore/jit/ExecutableAllocator.h \
+ Source/JavaScriptCore/jit/HostCallReturnValue.cpp \
+ Source/JavaScriptCore/jit/HostCallReturnValue.h \
Source/JavaScriptCore/jit/JITArithmetic32_64.cpp \
Source/JavaScriptCore/jit/JITArithmetic.cpp \
Source/JavaScriptCore/jit/JITCall32_64.cpp \
@@ -304,6 +327,7 @@ javascriptcore_sources += \
Source/JavaScriptCore/jit/SpecializedThunkJIT.h \
Source/JavaScriptCore/jit/ThunkGenerators.cpp \
Source/JavaScriptCore/jit/ThunkGenerators.h \
+ Source/JavaScriptCore/llint/LLIntData.h \
Source/JavaScriptCore/os-win32/stdbool.h \
Source/JavaScriptCore/os-win32/stdint.h \
Source/JavaScriptCore/parser/ASTBuilder.h \
@@ -354,6 +378,7 @@ javascriptcore_sources += \
Source/JavaScriptCore/runtime/CallData.cpp \
Source/JavaScriptCore/runtime/CallData.h \
Source/JavaScriptCore/runtime/ClassInfo.h \
+ Source/JavaScriptCore/runtime/CodeSpecializationKind.h \
Source/JavaScriptCore/runtime/CommonIdentifiers.cpp \
Source/JavaScriptCore/runtime/CommonIdentifiers.h \
Source/JavaScriptCore/runtime/CommonSlowPaths.h \
@@ -382,6 +407,7 @@ javascriptcore_sources += \
Source/JavaScriptCore/runtime/ExceptionHelpers.h \
Source/JavaScriptCore/runtime/Executable.cpp \
Source/JavaScriptCore/runtime/Executable.h \
+ Source/JavaScriptCore/runtime/ExecutionHarness.h \
Source/JavaScriptCore/runtime/FunctionConstructor.cpp \
Source/JavaScriptCore/runtime/FunctionConstructor.h \
Source/JavaScriptCore/runtime/FunctionPrototype.cpp \
@@ -564,6 +590,8 @@ javascriptcore_sources += \
Source/JavaScriptCore/wtf/CurrentTime.h \
Source/JavaScriptCore/wtf/DateMath.cpp \
Source/JavaScriptCore/wtf/DateMath.h \
+ Source/JavaScriptCore/wtf/DataLog.cpp \
+ Source/JavaScriptCore/wtf/DataLog.h \
Source/JavaScriptCore/wtf/DecimalNumber.cpp \
Source/JavaScriptCore/wtf/DecimalNumber.h \
Source/JavaScriptCore/wtf/Decoder.h \
diff --git a/Source/JavaScriptCore/JSCTypedArrayStubs.h b/Source/JavaScriptCore/JSCTypedArrayStubs.h
new file mode 100644
index 000000000..cda55fc9b
--- /dev/null
+++ b/Source/JavaScriptCore/JSCTypedArrayStubs.h
@@ -0,0 +1,202 @@
+/*
+ * Copyright (C) 2012 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef JSCTypedArrayStubs_h
+#define JSCTypedArrayStubs_h
+
+#include "Float32Array.h"
+#include "Float64Array.h"
+#include "Int16Array.h"
+#include "Int32Array.h"
+#include "Int8Array.h"
+#include "JSObject.h"
+#include "ObjectPrototype.h"
+#include "Uint16Array.h"
+#include "Uint32Array.h"
+#include "Uint8Array.h"
+
+namespace JSC {
+
+#define TYPED_ARRAY(name, type) \
+class JS##name##Array : public JSNonFinalObject { \
+public: \
+ typedef JSNonFinalObject Base; \
+ static JS##name##Array* create(JSC::Structure* structure, JSGlobalObject* globalObject, PassRefPtr<name##Array> impl) \
+ { \
+ JS##name##Array* ptr = new (NotNull, JSC::allocateCell<JS##name##Array>(globalObject->globalData().heap)) JS##name##Array(structure, globalObject, impl); \
+ ptr->finishCreation(globalObject->globalData()); \
+ return ptr; \
+ }\
+\
+ static bool getOwnPropertySlot(JSC::JSCell*, JSC::ExecState*, const JSC::Identifier& propertyName, JSC::PropertySlot&);\
+ static bool getOwnPropertyDescriptor(JSC::JSObject*, JSC::ExecState*, const JSC::Identifier& propertyName, JSC::PropertyDescriptor&);\
+ static bool getOwnPropertySlotByIndex(JSC::JSCell*, JSC::ExecState*, unsigned propertyName, JSC::PropertySlot&);\
+ static void put(JSC::JSCell*, JSC::ExecState*, const JSC::Identifier& propertyName, JSC::JSValue, JSC::PutPropertySlot&);\
+ static void putByIndex(JSC::JSCell*, JSC::ExecState*, unsigned propertyName, JSC::JSValue);\
+ static const JSC::ClassInfo s_info;\
+\
+ static JSC::Structure* createStructure(JSC::JSGlobalData& globalData, JSC::JSGlobalObject* globalObject, JSC::JSValue prototype)\
+ {\
+ return JSC::Structure::create(globalData, globalObject, prototype, JSC::TypeInfo(JSC::ObjectType, StructureFlags), &s_info);\
+ }\
+\
+ static void getOwnPropertyNames(JSC::JSObject*, JSC::ExecState*, JSC::PropertyNameArray&, JSC::EnumerationMode mode = JSC::ExcludeDontEnumProperties);\
+ static JSC::JSValue getConstructor(JSC::ExecState*, JSC::JSGlobalObject*);\
+\
+ static const JSC::TypedArrayType TypedArrayStorageType = JSC::TypedArray##name;\
+ uint32_t m_storageLength;\
+ type* m_storage;\
+ RefPtr<name##Array> m_impl;\
+protected:\
+ JS##name##Array(JSC::Structure*, JSGlobalObject*, PassRefPtr<name##Array>);\
+ void finishCreation(JSC::JSGlobalData&);\
+ static const unsigned StructureFlags = JSC::OverridesGetPropertyNames | JSC::OverridesGetOwnPropertySlot | Base::StructureFlags;\
+ JSC::JSValue getByIndex(JSC::ExecState*, unsigned index);\
+ void indexSetter(JSC::ExecState*, unsigned index, JSC::JSValue);\
+};\
+\
+const ClassInfo JS##name##Array::s_info = { #name "Array" , &Base::s_info, 0, 0, CREATE_METHOD_TABLE(JS##name##Array) };\
+\
+JS##name##Array::JS##name##Array(Structure* structure, JSGlobalObject* globalObject, PassRefPtr<name##Array> impl)\
+ : Base(globalObject->globalData(), structure)\
+ , m_impl(impl)\
+{\
+}\
+\
+void JS##name##Array::finishCreation(JSGlobalData& globalData)\
+{\
+ Base::finishCreation(globalData);\
+ TypedArrayDescriptor descriptor(&JS##name##Array::s_info, OBJECT_OFFSETOF(JS##name##Array, m_storage), OBJECT_OFFSETOF(JS##name##Array, m_storageLength));\
+ globalData.registerTypedArrayDescriptor(m_impl.get(), descriptor);\
+ m_storage = m_impl->data();\
+ m_storageLength = m_impl->length();\
+ putDirect(globalData, globalData.propertyNames->length, jsNumber(m_storageLength), DontDelete | ReadOnly | DontEnum); \
+ ASSERT(inherits(&s_info));\
+}\
+\
+bool JS##name##Array::getOwnPropertySlot(JSCell* cell, ExecState* exec, const Identifier& propertyName, PropertySlot& slot)\
+{\
+ JS##name##Array* thisObject = jsCast<JS##name##Array*>(cell);\
+ ASSERT_GC_OBJECT_INHERITS(thisObject, &s_info);\
+ bool ok;\
+ unsigned index = propertyName.toUInt32(ok);\
+ if (ok && index < thisObject->m_storageLength) {\
+ slot.setValue(thisObject->getByIndex(exec, index));\
+ return true;\
+ }\
+ return Base::getOwnPropertySlot(cell, exec, propertyName, slot);\
+}\
+\
+bool JS##name##Array::getOwnPropertyDescriptor(JSObject* object, ExecState* exec, const Identifier& propertyName, PropertyDescriptor& descriptor)\
+{\
+ JS##name##Array* thisObject = jsCast<JS##name##Array*>(object);\
+ ASSERT_GC_OBJECT_INHERITS(thisObject, &s_info);\
+ bool ok;\
+ unsigned index = propertyName.toUInt32(ok);\
+ if (ok && index < thisObject->m_storageLength) {\
+ descriptor.setDescriptor(thisObject->getByIndex(exec, index), DontDelete);\
+ return true;\
+ }\
+ return Base::getOwnPropertyDescriptor(object, exec, propertyName, descriptor);\
+}\
+\
+bool JS##name##Array::getOwnPropertySlotByIndex(JSCell* cell, ExecState* exec, unsigned propertyName, PropertySlot& slot)\
+{\
+ JS##name##Array* thisObject = jsCast<JS##name##Array*>(cell);\
+ ASSERT_GC_OBJECT_INHERITS(thisObject, &s_info);\
+ if (propertyName < thisObject->m_storageLength) {\
+ slot.setValue(thisObject->getByIndex(exec, propertyName));\
+ return true;\
+ }\
+ return thisObject->methodTable()->getOwnPropertySlot(thisObject, exec, Identifier::from(exec, propertyName), slot);\
+}\
+\
+void JS##name##Array::put(JSCell* cell, ExecState* exec, const Identifier& propertyName, JSValue value, PutPropertySlot& slot)\
+{\
+ JS##name##Array* thisObject = jsCast<JS##name##Array*>(cell);\
+ ASSERT_GC_OBJECT_INHERITS(thisObject, &s_info);\
+ bool ok;\
+ unsigned index = propertyName.toUInt32(ok);\
+ if (ok) {\
+ thisObject->indexSetter(exec, index, value);\
+ return;\
+ }\
+ Base::put(thisObject, exec, propertyName, value, slot);\
+}\
+\
+void JS##name##Array::indexSetter(JSC::ExecState* exec, unsigned index, JSC::JSValue value) \
+{\
+ m_impl->set(index, value.toNumber(exec));\
+}\
+\
+void JS##name##Array::putByIndex(JSCell* cell, ExecState* exec, unsigned propertyName, JSValue value)\
+{\
+ JS##name##Array* thisObject = jsCast<JS##name##Array*>(cell);\
+ ASSERT_GC_OBJECT_INHERITS(thisObject, &s_info);\
+ thisObject->indexSetter(exec, propertyName, value);\
+ return;\
+}\
+\
+void JS##name##Array::getOwnPropertyNames(JSObject* object, ExecState* exec, PropertyNameArray& propertyNames, EnumerationMode mode)\
+{\
+ JS##name##Array* thisObject = jsCast<JS##name##Array*>(object);\
+ ASSERT_GC_OBJECT_INHERITS(thisObject, &s_info);\
+ for (unsigned i = 0; i < thisObject->m_storageLength; ++i)\
+ propertyNames.add(Identifier::from(exec, i));\
+ Base::getOwnPropertyNames(thisObject, exec, propertyNames, mode);\
+}\
+\
+JSValue JS##name##Array::getByIndex(ExecState*, unsigned index)\
+{\
+ ASSERT_GC_OBJECT_INHERITS(this, &s_info);\
+ type result = m_impl->item(index);\
+ if (isnan((double)result))\
+ return jsNaN();\
+ return JSValue(result);\
+}\
+static EncodedJSValue JSC_HOST_CALL constructJS##name##Array(ExecState* callFrame) { \
+ if (callFrame->argumentCount() < 1) \
+ return JSValue::encode(jsUndefined()); \
+ int32_t length = callFrame->argument(0).toInt32(callFrame); \
+ if (length < 0) \
+ return JSValue::encode(jsUndefined()); \
+ Structure* structure = JS##name##Array::createStructure(callFrame->globalData(), callFrame->lexicalGlobalObject(), callFrame->lexicalGlobalObject()->objectPrototype()); \
+ return JSValue::encode(JS##name##Array::create(structure, callFrame->lexicalGlobalObject(), name##Array::create(length)));\
+}
+
+#if ENABLE(COMMANDLINE_TYPEDARRAYS)
+TYPED_ARRAY(Uint8, uint8_t);
+TYPED_ARRAY(Uint16, uint16_t);
+TYPED_ARRAY(Uint32, uint32_t);
+TYPED_ARRAY(Int8, int8_t);
+TYPED_ARRAY(Int16, int16_t);
+TYPED_ARRAY(Int32, int32_t);
+TYPED_ARRAY(Float32, float);
+TYPED_ARRAY(Float64, double);
+#endif
+
+}
+
+#endif
diff --git a/Source/JavaScriptCore/JavaScriptCore.gypi b/Source/JavaScriptCore/JavaScriptCore.gypi
index b59be38d9..e19dfb67a 100644
--- a/Source/JavaScriptCore/JavaScriptCore.gypi
+++ b/Source/JavaScriptCore/JavaScriptCore.gypi
@@ -27,10 +27,12 @@
'API/OpaqueJSString.h',
'assembler/MacroAssemblerCodeRef.h',
'bytecode/Opcode.h',
- 'heap/BumpBlock.h',
- 'heap/BumpSpace.h',
- 'heap/BumpSpaceInlineMethods.h',
+ 'heap/CopiedAllocator.h',
+ 'heap/CopiedBlock.h',
+ 'heap/CopiedSpace.h',
+ 'heap/CopiedSpaceInlineMethods.h',
'heap/ConservativeRoots.h',
+ 'heap/GCAssertions.h',
'heap/Handle.h',
'heap/HandleHeap.h',
'heap/HeapBlock.h',
@@ -155,6 +157,7 @@
'wtf/DateMath.h',
'wtf/DecimalNumber.h',
'wtf/Decoder.h',
+ 'wtf/DataLog.h',
'wtf/Deque.h',
'wtf/DisallowCType.h',
'wtf/DoublyLinkedList.h',
@@ -560,6 +563,7 @@
'wtf/CurrentTime.cpp',
'wtf/DateMath.cpp',
'wtf/DecimalNumber.cpp',
+ 'wtf/DataLog.cpp',
'wtf/DynamicAnnotations.cpp',
'wtf/DynamicAnnotations.h',
'wtf/FastMalloc.cpp',
diff --git a/Source/JavaScriptCore/JavaScriptCore.order b/Source/JavaScriptCore/JavaScriptCore.order
index d8513d099..e1e1f231a 100644
--- a/Source/JavaScriptCore/JavaScriptCore.order
+++ b/Source/JavaScriptCore/JavaScriptCore.order
@@ -58,6 +58,7 @@ __ZN3WTF5Mutex6unlockEv
__ZN3WTF10StringImpl12sharedBufferEv
__ZN3WTF10StringImpl8endsWithEPS0_b
__ZN3WTF12createThreadEPFPvS0_ES0_PKc
+__ZN3WTF12createThreadEPFvPvES0_PKc
__ZN3WTF20createThreadInternalEPFPvS0_ES0_PKc
__ZN3WTFL35establishIdentifierForPthreadHandleERKP17_opaque_pthread_t
__ZN3WTF9HashTableIjSt4pairIjP17_opaque_pthread_tENS_18PairFirstExtractorIS4_EENS_7IntHashIjEENS_14PairHashTraitsINS_10HashTraitsIjEENSA_IS3_EEEESB_E6rehashEi
@@ -1144,6 +1145,7 @@ __ZNK3JSC8JSString8toNumberEPNS_9ExecStateE
__ZN3JSC10jsToNumberERKNS_7UStringE
__ZN3JSCL19jsStrDecimalLiteralERPKtS1_
__ZN3WTF22cancelCallOnMainThreadEPFvPvES0_
+__ZN3WTF22isMainThreadOrGCThreadEv
__ZNK3JSC8JSString9toBooleanEPNS_9ExecStateE
__ZN3WTF10StringImpl4findEPFbtEj
__ZN3WTF10StringImpl4findEPKcj
@@ -1152,6 +1154,7 @@ __ZN3WTF10fastStrDupEPKc
__ZN3JSC10Identifier11addSlowCaseEPNS_12JSGlobalDataEPN3WTF10StringImplE
_JSStringRetain
___initializeScavenger_block_invoke_1
+__ZN3WTF23waitForThreadCompletionEj
__ZN3WTF23waitForThreadCompletionEjPPv
_JSObjectCopyPropertyNames
__ZN3JSC8JSObject16getPropertyNamesEPNS_9ExecStateERNS_17PropertyNameArrayENS_15EnumerationModeE
diff --git a/Source/JavaScriptCore/JavaScriptCore.pri b/Source/JavaScriptCore/JavaScriptCore.pri
index 4e2426f8f..eeace1764 100644
--- a/Source/JavaScriptCore/JavaScriptCore.pri
+++ b/Source/JavaScriptCore/JavaScriptCore.pri
@@ -20,6 +20,7 @@ INCLUDEPATH += \
$$SOURCE_DIR/debugger \
$$SOURCE_DIR/interpreter \
$$SOURCE_DIR/jit \
+ $$SOURCE_DIR/llint \
$$SOURCE_DIR/parser \
$$SOURCE_DIR/profiler \
$$SOURCE_DIR/runtime \
@@ -30,7 +31,6 @@ INCLUDEPATH += \
$$JAVASCRIPTCORE_GENERATED_SOURCES_DIR
win32-* {
- DEFINES += _HAS_TR1=0
LIBS += -lwinmm
win32-g++* {
diff --git a/Source/JavaScriptCore/JavaScriptCore.vcproj/JavaScriptCore/JavaScriptCore.def b/Source/JavaScriptCore/JavaScriptCore.vcproj/JavaScriptCore/JavaScriptCore.def
index ad4c02634..acdf47b7c 100644
--- a/Source/JavaScriptCore/JavaScriptCore.vcproj/JavaScriptCore/JavaScriptCore.def
+++ b/Source/JavaScriptCore/JavaScriptCore.vcproj/JavaScriptCore/JavaScriptCore.def
@@ -1,5 +1,7 @@
EXPORTS
+ ??$strtod@$00@WTF@@YANPBDPAPAD@Z
+ ??$strtod@$0A@@WTF@@YANPBDPAPAD@Z
??0ArrayBufferView@WTF@@IAE@V?$PassRefPtr@VArrayBuffer@WTF@@@1@I@Z
??0CString@WTF@@QAE@PBD@Z
??0CString@WTF@@QAE@PBDI@Z
@@ -122,6 +124,7 @@ EXPORTS
?createTable@HashTable@JSC@@ABEXPAVJSGlobalData@2@@Z
?createThread@WTF@@YAIP6APAXPAX@Z0@Z
?createThread@WTF@@YAIP6APAXPAX@Z0PBD@Z
+ ?createThread@WTF@@YAIP6AXPAX@Z0PBD@Z
?createTypeError@JSC@@YAPAVJSObject@1@PAVExecState@1@ABVUString@1@@Z
?cryptographicallyRandomNumber@WTF@@YAIXZ
?cryptographicallyRandomValues@WTF@@YAXPAXI@Z
@@ -129,6 +132,7 @@ EXPORTS
?currentThreadIsHoldingLock@JSLock@JSC@@SA_NXZ
?currentTime@WTF@@YANXZ
?data@CString@WTF@@QBEPBDXZ
+ ?dataLog@WTF@@YAXPBDZZ
?dateToDaysFrom1970@WTF@@YANHHH@Z
?dayInMonthFromDayInYear@WTF@@YAHH_N@Z
?dayInYear@WTF@@YAHNH@Z
@@ -150,8 +154,8 @@ EXPORTS
?despecifyFunctionTransition@Structure@JSC@@SAPAV12@AAVJSGlobalData@2@PAV12@ABVIdentifier@2@@Z
?destroy@Heap@JSC@@QAEXXZ
?destroy@JSByteArray@JSC@@SAXPAVJSCell@2@@Z
+ ?destroy@JSCell@JSC@@KAXPAV12@@Z
?destroy@JSGlobalObject@JSC@@SAXPAVJSCell@2@@Z
- ?destroy@JSNonFinalObject@JSC@@SAXPAVJSCell@2@@Z
?detach@Debugger@JSC@@UAEXPAVJSGlobalObject@2@@Z
?detachThread@WTF@@YAXI@Z
?didTimeOut@TimeoutChecker@JSC@@QAE_NPAVExecState@2@@Z
@@ -189,6 +193,7 @@ EXPORTS
?functionName@DebuggerCallFrame@JSC@@QBEPBVUString@2@XZ
?get@Structure@JSC@@QAEIAAVJSGlobalData@2@ABVIdentifier@2@AAIAAPAVJSCell@2@@Z
?get@Structure@JSC@@QAEIAAVJSGlobalData@2@PAVStringImpl@WTF@@AAIAAPAVJSCell@2@@Z
+ ?getCalculatedDisplayName@JSC@@YA?AVUString@1@PAVExecState@1@PAVJSObject@1@@Z
?getCallData@JSCell@JSC@@SA?AW4CallType@2@PAV12@AATCallData@2@@Z
?getConstructData@JSCell@JSC@@SA?AW4ConstructType@2@PAV12@AATConstructData@2@@Z
?getObject@JSCell@JSC@@QAEPAVJSObject@2@XZ
@@ -206,6 +211,7 @@ EXPORTS
?getPropertyDescriptor@JSObject@JSC@@QAE_NPAVExecState@2@ABVIdentifier@2@AAVPropertyDescriptor@2@@Z
?getPropertyNames@JSObject@JSC@@SAXPAV12@PAVExecState@2@AAVPropertyNameArray@2@W4EnumerationMode@2@@Z
?getSlice@ArgList@JSC@@QBEXHAAV12@@Z
+ ?getStackTrace@Interpreter@JSC@@SAXPAVJSGlobalData@2@HAAV?$Vector@UStackFrame@JSC@@$0A@@WTF@@@Z
?getString@JSCell@JSC@@QBE?AVUString@2@PAVExecState@2@@Z
?getString@JSCell@JSC@@QBE_NPAVExecState@2@AAVUString@2@@Z
?getter@PropertyDescriptor@JSC@@QBE?AVJSValue@2@XZ
@@ -319,10 +325,9 @@ EXPORTS
?startSampling@JSGlobalData@JSC@@QAEXXZ
?stopProfiling@Profiler@JSC@@QAE?AV?$PassRefPtr@VProfile@JSC@@@WTF@@PAVExecState@2@ABVUString@2@@Z
?stopSampling@JSGlobalData@JSC@@QAEXXZ
- ?strtod@WTF@@YANPBDPAPAD@Z
?substringSharingImpl@UString@JSC@@QBE?AV12@II@Z
?symbolTableGet@JSVariableObject@JSC@@IAE_NABVIdentifier@2@AAVPropertyDescriptor@2@@Z
- ?synthesizePrototype@JSValue@JSC@@ABEPAVJSObject@2@PAVExecState@2@@Z
+ ?synthesizePrototype@JSValue@JSC@@QBEPAVJSObject@2@PAVExecState@2@@Z
?thisObject@DebuggerCallFrame@JSC@@QBEPAVJSObject@2@XZ
?throwError@JSC@@YA?AVJSValue@1@PAVExecState@1@V21@@Z
?throwError@JSC@@YAPAVJSObject@1@PAVExecState@1@PAV21@@Z
@@ -361,6 +366,7 @@ EXPORTS
?visitChildren@JSGlobalThis@JSC@@KAXPAVJSCell@2@AAVSlotVisitor@2@@Z
?visitChildren@JSObject@JSC@@SAXPAVJSCell@2@AAVSlotVisitor@2@@Z
?wait@ThreadCondition@WTF@@QAEXAAVMutex@2@@Z
+ ?waitForThreadCompletion@WTF@@YAHI@Z
?waitForThreadCompletion@WTF@@YAHIPAPAX@Z
?writable@PropertyDescriptor@JSC@@QBE_NXZ
?writeBarrier@HandleHeap@JSC@@QAEXPAVJSValue@2@ABV32@@Z
diff --git a/Source/JavaScriptCore/JavaScriptCore.vcproj/JavaScriptCore/JavaScriptCore.vcproj b/Source/JavaScriptCore/JavaScriptCore.vcproj/JavaScriptCore/JavaScriptCore.vcproj
index bee218833..b9e0f3152 100644
--- a/Source/JavaScriptCore/JavaScriptCore.vcproj/JavaScriptCore/JavaScriptCore.vcproj
+++ b/Source/JavaScriptCore/JavaScriptCore.vcproj/JavaScriptCore/JavaScriptCore.vcproj
@@ -1478,6 +1478,22 @@
Name="bytecode"
>
<File
+ RelativePath="..\..\bytecode\MethodOfGettingAValueProfile.cpp"
+ >
+ </File>
+ <File
+ RelativePath="..\..\bytecode\MethodOfGettingAValueProfile.h"
+ >
+ </File>
+ <File
+ RelativePath="..\..\bytecode\LazyOperandValueProfile.cpp"
+ >
+ </File>
+ <File
+ RelativePath="..\..\bytecode\LazyOperandValueProfile.h"
+ >
+ </File>
+ <File
RelativePath="..\..\bytecode\GetByIdStatus.h"
>
</File>
@@ -1586,6 +1602,10 @@
>
</File>
<File
+ RelativePath="..\..\bytecode\PolymorphicPutByIdList.cpp"
+ >
+ </File>
+ <File
RelativePath="..\..\bytecode\PredictedType.cpp"
>
</File>
@@ -1610,10 +1630,6 @@
>
</File>
<File
- RelativePath="..\..\bytecode\ValueProfile.cpp"
- >
- </File>
- <File
RelativePath="..\..\bytecode\ValueProfile.h"
>
</File>
@@ -1758,6 +1774,10 @@
>
</File>
<File
+ RelativePath="..\..\jit\HostCallReturnValue.cpp"
+ >
+ </File>
+ <File
RelativePath="..\..\jit\JIT.cpp"
>
</File>
@@ -1843,6 +1863,14 @@
</File>
</Filter>
<Filter
+ Name="llint"
+ >
+ <File
+ RelativePath="..\..\llint\LLIntData.h"
+ >
+ </File>
+ </Filter>
+ <Filter
Name="interpreter"
>
<File
@@ -2034,19 +2062,23 @@
Name="heap"
>
<File
- RelativePath="..\..\heap\BumpBlock.h"
+ RelativePath="..\..\heap\CopiedAllocator.h"
+ >
+ </File>
+ <File
+ RelativePath="..\..\heap\CopiedBlock.h"
>
</File>
<File
- RelativePath="..\..\heap\BumpSpace.cpp"
+ RelativePath="..\..\heap\CopiedSpace.cpp"
>
</File>
<File
- RelativePath="..\..\heap\BumpSpace.h"
+ RelativePath="..\..\heap\CopiedSpace.h"
>
</File>
<File
- RelativePath="..\..\heap\BumpSpaceInlineMethods.h"
+ RelativePath="..\..\heap\CopiedSpaceInlineMethods.h"
>
</File>
<File
@@ -2058,6 +2090,10 @@
>
</File>
<File
+ RelativePath="..\..\heap\GCAssertions.h"
+ >
+ </File>
+ <File
RelativePath="..\..\heap\MachineStackMarker.cpp"
>
</File>
diff --git a/Source/JavaScriptCore/JavaScriptCore.vcproj/JavaScriptCore/JavaScriptCoreCommon.vsprops b/Source/JavaScriptCore/JavaScriptCore.vcproj/JavaScriptCore/JavaScriptCoreCommon.vsprops
index 33b53442a..b0b45d38d 100644
--- a/Source/JavaScriptCore/JavaScriptCore.vcproj/JavaScriptCore/JavaScriptCoreCommon.vsprops
+++ b/Source/JavaScriptCore/JavaScriptCore.vcproj/JavaScriptCore/JavaScriptCoreCommon.vsprops
@@ -6,7 +6,7 @@
>
<Tool
Name="VCCLCompilerTool"
- AdditionalIncludeDirectories="&quot;$(ConfigurationBuildDir)\obj\JavaScriptCore\DerivedSources\&quot;;../../;../../API/;../../parser/;../../bytecompiler/;../../dfg/;../../jit/;../../runtime/;../../tools/;../../bytecode/;../../interpreter/;../../wtf/;../../profiler;../../assembler/;../../debugger/;../../heap/;&quot;$(WebKitLibrariesDir)\include&quot;;&quot;$(WebKitLibrariesDir)\include\private&quot;;&quot;$(ConfigurationBuildDir)\include&quot;;&quot;$(ConfigurationBuildDir)\include\JavaScriptCore&quot;;&quot;$(ConfigurationBuildDir)\include\private&quot;;&quot;$(WebKitLibrariesDir)\include\pthreads&quot;"
+ AdditionalIncludeDirectories="&quot;$(ConfigurationBuildDir)\obj\JavaScriptCore\DerivedSources\&quot;;../../;../../API/;../../parser/;../../bytecompiler/;../../dfg/;../../jit/;../../llint/;../../runtime/;../../tools/;../../bytecode/;../../interpreter/;../../wtf/;../../profiler;../../assembler/;../../debugger/;../../heap/;&quot;$(WebKitLibrariesDir)\include&quot;;&quot;$(WebKitLibrariesDir)\include\private&quot;;&quot;$(ConfigurationBuildDir)\include&quot;;&quot;$(ConfigurationBuildDir)\include\JavaScriptCore&quot;;&quot;$(ConfigurationBuildDir)\include\private&quot;;&quot;$(WebKitLibrariesDir)\include\pthreads&quot;"
PreprocessorDefinitions="__STD_C"
ForcedIncludeFiles="ICUVersion.h"
/>
diff --git a/Source/JavaScriptCore/JavaScriptCore.vcproj/JavaScriptCore/copy-files.cmd b/Source/JavaScriptCore/JavaScriptCore.vcproj/JavaScriptCore/copy-files.cmd
index 0ed8c63f5..2c0cfad02 100755
--- a/Source/JavaScriptCore/JavaScriptCore.vcproj/JavaScriptCore/copy-files.cmd
+++ b/Source/JavaScriptCore/JavaScriptCore.vcproj/JavaScriptCore/copy-files.cmd
@@ -43,6 +43,7 @@ for %%d in (
debugger
interpreter
jit
+ llint
parser
profiler
runtime
diff --git a/Source/JavaScriptCore/JavaScriptCore.vcproj/WTF/WTF.vcproj b/Source/JavaScriptCore/JavaScriptCore.vcproj/WTF/WTF.vcproj
index c914b22c1..5689b2608 100644
--- a/Source/JavaScriptCore/JavaScriptCore.vcproj/WTF/WTF.vcproj
+++ b/Source/JavaScriptCore/JavaScriptCore.vcproj/WTF/WTF.vcproj
@@ -733,6 +733,14 @@
>
</File>
<File
+ RelativePath="..\..\wtf\DataLog.cpp"
+ >
+ </File>
+ <File
+ RelativePath="..\..\wtf\DataLog.h"
+ >
+ </File>
+ <File
RelativePath="..\..\wtf\Deque.h"
>
</File>
diff --git a/Source/JavaScriptCore/JavaScriptCore.xcodeproj/project.pbxproj b/Source/JavaScriptCore/JavaScriptCore.xcodeproj/project.pbxproj
index 997b0bbf4..d9d8e1631 100644
--- a/Source/JavaScriptCore/JavaScriptCore.xcodeproj/project.pbxproj
+++ b/Source/JavaScriptCore/JavaScriptCore.xcodeproj/project.pbxproj
@@ -7,6 +7,15 @@
objects = {
/* Begin PBXAggregateTarget section */
+ 0F4680A914BA7FD900BFE272 /* LLInt Offsets */ = {
+ isa = PBXAggregateTarget;
+ buildConfigurationList = 0F4680AC14BA7FD900BFE272 /* Build configuration list for PBXAggregateTarget "LLInt Offsets" */;
+ buildPhases = (
+ 0F4680AA14BA7FD900BFE272 /* Generate Derived Sources */,
+ );
+ name = "LLInt Offsets";
+ productName = "Derived Sources";
+ };
65FB3F6609D11E9100F49DEB /* Derived Sources */ = {
isa = PBXAggregateTarget;
buildConfigurationList = 65FB3F7709D11EBD00F49DEB /* Build configuration list for PBXAggregateTarget "Derived Sources" */;
@@ -14,6 +23,9 @@
65FB3F6509D11E9100F49DEB /* Generate Derived Sources */,
5D35DEE10C7C140B008648B2 /* Generate DTrace header */,
);
+ dependencies = (
+ 0FF922D614F46B600041A24E /* PBXTargetDependency */,
+ );
name = "Derived Sources";
productName = "Derived Sources";
};
@@ -48,6 +60,10 @@
0BAC94A01338728400CF135B /* ThreadRestrictionVerifier.h in Headers */ = {isa = PBXBuildFile; fileRef = 0BAC949E1338728400CF135B /* ThreadRestrictionVerifier.h */; settings = {ATTRIBUTES = (Private, ); }; };
0BCD83571485845200EA2003 /* TemporaryChange.h in Headers */ = {isa = PBXBuildFile; fileRef = 0BCD83541485841200EA2003 /* TemporaryChange.h */; settings = {ATTRIBUTES = (Private, ); }; };
0BF28A2911A33DC300638F84 /* SizeLimits.cpp in Sources */ = {isa = PBXBuildFile; fileRef = 0BF28A2811A33DC300638F84 /* SizeLimits.cpp */; };
+ 0F0B839A14BCF45D00885B4F /* LLIntEntrypoints.cpp in Sources */ = {isa = PBXBuildFile; fileRef = 0F0B839514BCF45A00885B4F /* LLIntEntrypoints.cpp */; };
+ 0F0B839B14BCF46000885B4F /* LLIntEntrypoints.h in Headers */ = {isa = PBXBuildFile; fileRef = 0F0B839614BCF45A00885B4F /* LLIntEntrypoints.h */; settings = {ATTRIBUTES = (Private, ); }; };
+ 0F0B839C14BCF46300885B4F /* LLIntThunks.cpp in Sources */ = {isa = PBXBuildFile; fileRef = 0F0B839714BCF45A00885B4F /* LLIntThunks.cpp */; };
+ 0F0B839D14BCF46600885B4F /* LLIntThunks.h in Headers */ = {isa = PBXBuildFile; fileRef = 0F0B839814BCF45A00885B4F /* LLIntThunks.h */; settings = {ATTRIBUTES = (Private, ); }; };
0F0B83A714BCF50700885B4F /* CodeType.h in Headers */ = {isa = PBXBuildFile; fileRef = 0F0B83A514BCF50400885B4F /* CodeType.h */; settings = {ATTRIBUTES = (Private, ); }; };
0F0B83A914BCF56200885B4F /* HandlerInfo.h in Headers */ = {isa = PBXBuildFile; fileRef = 0F0B83A814BCF55E00885B4F /* HandlerInfo.h */; settings = {ATTRIBUTES = (Private, ); }; };
0F0B83AB14BCF5BB00885B4F /* ExpressionRangeInfo.h in Headers */ = {isa = PBXBuildFile; fileRef = 0F0B83AA14BCF5B900885B4F /* ExpressionRangeInfo.h */; settings = {ATTRIBUTES = (Private, ); }; };
@@ -58,9 +74,13 @@
0F0B83B514BCF86200885B4F /* MethodCallLinkInfo.h in Headers */ = {isa = PBXBuildFile; fileRef = 0F0B83B314BCF85E00885B4F /* MethodCallLinkInfo.h */; settings = {ATTRIBUTES = (Private, ); }; };
0F0B83B714BCF8E100885B4F /* GlobalResolveInfo.h in Headers */ = {isa = PBXBuildFile; fileRef = 0F0B83B614BCF8DF00885B4F /* GlobalResolveInfo.h */; settings = {ATTRIBUTES = (Private, ); }; };
0F0B83B914BCF95F00885B4F /* CallReturnOffsetToBytecodeOffset.h in Headers */ = {isa = PBXBuildFile; fileRef = 0F0B83B814BCF95B00885B4F /* CallReturnOffsetToBytecodeOffset.h */; settings = {ATTRIBUTES = (Private, ); }; };
+ 0F0FC45A14BD15F500B81154 /* LLIntCallLinkInfo.h in Headers */ = {isa = PBXBuildFile; fileRef = 0F0FC45814BD15F100B81154 /* LLIntCallLinkInfo.h */; settings = {ATTRIBUTES = (Private, ); }; };
0F15F15F14B7A73E005DE37D /* CommonSlowPaths.h in Headers */ = {isa = PBXBuildFile; fileRef = 0F15F15D14B7A73A005DE37D /* CommonSlowPaths.h */; settings = {ATTRIBUTES = (Private, ); }; };
0F16D726142C39C000CF784A /* BitVector.cpp in Sources */ = {isa = PBXBuildFile; fileRef = 0F16D724142C39A200CF784A /* BitVector.cpp */; };
0F21C26814BE5F6800ADC64B /* JITDriver.h in Headers */ = {isa = PBXBuildFile; fileRef = 0F21C26614BE5F5E00ADC64B /* JITDriver.h */; settings = {ATTRIBUTES = (Private, ); }; };
+ 0F21C27C14BE727600ADC64B /* ExecutionHarness.h in Headers */ = {isa = PBXBuildFile; fileRef = 0F21C27A14BE727300ADC64B /* ExecutionHarness.h */; settings = {ATTRIBUTES = (Private, ); }; };
+ 0F21C27D14BE727A00ADC64B /* CodeSpecializationKind.h in Headers */ = {isa = PBXBuildFile; fileRef = 0F21C27914BE727300ADC64B /* CodeSpecializationKind.h */; settings = {ATTRIBUTES = (Private, ); }; };
+ 0F21C27F14BEAA8200ADC64B /* BytecodeConventions.h in Headers */ = {isa = PBXBuildFile; fileRef = 0F21C27E14BEAA8000ADC64B /* BytecodeConventions.h */; settings = {ATTRIBUTES = (Private, ); }; };
0F242DA713F3B1E8007ADD4C /* WeakReferenceHarvester.h in Headers */ = {isa = PBXBuildFile; fileRef = 0F242DA513F3B1BB007ADD4C /* WeakReferenceHarvester.h */; settings = {ATTRIBUTES = (Private, ); }; };
0F2C556F14738F3100121E4F /* DFGCodeBlocks.h in Headers */ = {isa = PBXBuildFile; fileRef = 0F2C556E14738F2E00121E4F /* DFGCodeBlocks.h */; settings = {ATTRIBUTES = (Private, ); }; };
0F2C557014738F3500121E4F /* DFGCodeBlocks.cpp in Sources */ = {isa = PBXBuildFile; fileRef = 0F2C556D14738F2E00121E4F /* DFGCodeBlocks.cpp */; };
@@ -71,6 +91,18 @@
0F431738146BAC69007E3890 /* ListableHandler.h in Headers */ = {isa = PBXBuildFile; fileRef = 0F431736146BAC65007E3890 /* ListableHandler.h */; settings = {ATTRIBUTES = (Private, ); }; };
0F46808214BA572D00BFE272 /* JITExceptions.h in Headers */ = {isa = PBXBuildFile; fileRef = 0F46808014BA572700BFE272 /* JITExceptions.h */; settings = {ATTRIBUTES = (Private, ); }; };
0F46808314BA573100BFE272 /* JITExceptions.cpp in Sources */ = {isa = PBXBuildFile; fileRef = 0F46807F14BA572700BFE272 /* JITExceptions.cpp */; };
+ 0F4680A314BA7F8D00BFE272 /* LLIntExceptions.h in Headers */ = {isa = PBXBuildFile; fileRef = 0F46809E14BA7F8200BFE272 /* LLIntExceptions.h */; settings = {ATTRIBUTES = (Private, ); }; };
+ 0F4680A414BA7F8D00BFE272 /* LLIntSlowPaths.cpp in Sources */ = {isa = PBXBuildFile; fileRef = 0F46809F14BA7F8200BFE272 /* LLIntSlowPaths.cpp */; settings = {COMPILER_FLAGS = "-Wno-unused-parameter"; }; };
+ 0F4680A514BA7F8D00BFE272 /* LLIntSlowPaths.h in Headers */ = {isa = PBXBuildFile; fileRef = 0F4680A014BA7F8200BFE272 /* LLIntSlowPaths.h */; settings = {ATTRIBUTES = (Private, ); }; };
+ 0F4680A814BA7FAB00BFE272 /* LLIntExceptions.cpp in Sources */ = {isa = PBXBuildFile; fileRef = 0F46809D14BA7F8200BFE272 /* LLIntExceptions.cpp */; };
+ 0F4680CA14BBB16C00BFE272 /* LLIntCommon.h in Headers */ = {isa = PBXBuildFile; fileRef = 0F4680C514BBB16900BFE272 /* LLIntCommon.h */; settings = {ATTRIBUTES = (Private, ); }; };
+ 0F4680CB14BBB17200BFE272 /* LLIntOfflineAsmConfig.h in Headers */ = {isa = PBXBuildFile; fileRef = 0F4680C614BBB16900BFE272 /* LLIntOfflineAsmConfig.h */; settings = {ATTRIBUTES = (Private, ); }; };
+ 0F4680CC14BBB17A00BFE272 /* LowLevelInterpreter.cpp in Sources */ = {isa = PBXBuildFile; fileRef = 0F4680C714BBB16900BFE272 /* LowLevelInterpreter.cpp */; };
+ 0F4680CD14BBB17D00BFE272 /* LowLevelInterpreter.h in Headers */ = {isa = PBXBuildFile; fileRef = 0F4680C814BBB16900BFE272 /* LowLevelInterpreter.h */; settings = {ATTRIBUTES = (Private, ); }; };
+ 0F4680D214BBD16500BFE272 /* LLIntData.cpp in Sources */ = {isa = PBXBuildFile; fileRef = 0F4680CE14BBB3D100BFE272 /* LLIntData.cpp */; };
+ 0F4680D314BBD16700BFE272 /* LLIntData.h in Headers */ = {isa = PBXBuildFile; fileRef = 0F4680CF14BBB3D100BFE272 /* LLIntData.h */; settings = {ATTRIBUTES = (Private, ); }; };
+ 0F4680D414BBD24900BFE272 /* HostCallReturnValue.cpp in Sources */ = {isa = PBXBuildFile; fileRef = 0F4680D014BBC5F800BFE272 /* HostCallReturnValue.cpp */; };
+ 0F4680D514BBD24B00BFE272 /* HostCallReturnValue.h in Headers */ = {isa = PBXBuildFile; fileRef = 0F4680D114BBC5F800BFE272 /* HostCallReturnValue.h */; settings = {ATTRIBUTES = (Private, ); }; };
0F55F0F414D1063900AC7649 /* AbstractPC.cpp in Sources */ = {isa = PBXBuildFile; fileRef = 0F55F0F114D1063600AC7649 /* AbstractPC.cpp */; };
0F55F0F514D1063C00AC7649 /* AbstractPC.h in Headers */ = {isa = PBXBuildFile; fileRef = 0F55F0F214D1063600AC7649 /* AbstractPC.h */; settings = {ATTRIBUTES = (Private, ); }; };
0F5F08CF146C7633000472A9 /* UnconditionalFinalizer.h in Headers */ = {isa = PBXBuildFile; fileRef = 0F5F08CE146C762F000472A9 /* UnconditionalFinalizer.h */; settings = {ATTRIBUTES = (Private, ); }; };
@@ -102,6 +134,15 @@
0F963B2D13F854020002D9B2 /* MetaAllocator.h in Headers */ = {isa = PBXBuildFile; fileRef = 0F963B2A13F853BD0002D9B2 /* MetaAllocator.h */; settings = {ATTRIBUTES = (Private, ); }; };
0F963B2F13FC66BB0002D9B2 /* MetaAllocatorHandle.h in Headers */ = {isa = PBXBuildFile; fileRef = 0F963B2E13FC66AE0002D9B2 /* MetaAllocatorHandle.h */; settings = {ATTRIBUTES = (Private, ); }; };
0F963B3813FC6FE90002D9B2 /* ValueProfile.h in Headers */ = {isa = PBXBuildFile; fileRef = 0F963B3613FC6FDE0002D9B2 /* ValueProfile.h */; settings = {ATTRIBUTES = (Private, ); }; };
+ 0F9FC8C314E1B5FE00D52AE0 /* PolymorphicPutByIdList.cpp in Sources */ = {isa = PBXBuildFile; fileRef = 0F9FC8BF14E1B5FB00D52AE0 /* PolymorphicPutByIdList.cpp */; };
+ 0F9FC8C414E1B60000D52AE0 /* PolymorphicPutByIdList.h in Headers */ = {isa = PBXBuildFile; fileRef = 0F9FC8C014E1B5FB00D52AE0 /* PolymorphicPutByIdList.h */; settings = {ATTRIBUTES = (Private, ); }; };
+ 0F9FC8C514E1B60400D52AE0 /* PutKind.h in Headers */ = {isa = PBXBuildFile; fileRef = 0F9FC8C114E1B5FB00D52AE0 /* PutKind.h */; settings = {ATTRIBUTES = (Private, ); }; };
+ 0F9FC8D014E612D800D52AE0 /* DataLog.cpp in Sources */ = {isa = PBXBuildFile; fileRef = 0F9FC8CD14E612D500D52AE0 /* DataLog.cpp */; };
+ 0F9FC8D114E612DA00D52AE0 /* DataLog.h in Headers */ = {isa = PBXBuildFile; fileRef = 0F9FC8CE14E612D500D52AE0 /* DataLog.h */; settings = {ATTRIBUTES = (Private, ); }; };
+ 0FB5467714F59B5C002C2989 /* LazyOperandValueProfile.h in Headers */ = {isa = PBXBuildFile; fileRef = 0FB5467614F59AD1002C2989 /* LazyOperandValueProfile.h */; settings = {ATTRIBUTES = (Private, ); }; };
+ 0FB5467914F5C46B002C2989 /* LazyOperandValueProfile.cpp in Sources */ = {isa = PBXBuildFile; fileRef = 0FB5467814F5C468002C2989 /* LazyOperandValueProfile.cpp */; };
+ 0FB5467B14F5C7E1002C2989 /* MethodOfGettingAValueProfile.h in Headers */ = {isa = PBXBuildFile; fileRef = 0FB5467A14F5C7D4002C2989 /* MethodOfGettingAValueProfile.h */; settings = {ATTRIBUTES = (Private, ); }; };
+ 0FB5467D14F5CFD6002C2989 /* MethodOfGettingAValueProfile.cpp in Sources */ = {isa = PBXBuildFile; fileRef = 0FB5467C14F5CFD3002C2989 /* MethodOfGettingAValueProfile.cpp */; };
0FBC0AE71496C7C400D4FBDD /* DFGExitProfile.cpp in Sources */ = {isa = PBXBuildFile; fileRef = 0FBC0AE41496C7C100D4FBDD /* DFGExitProfile.cpp */; };
0FBC0AE81496C7C700D4FBDD /* DFGExitProfile.h in Headers */ = {isa = PBXBuildFile; fileRef = 0FBC0AE51496C7C100D4FBDD /* DFGExitProfile.h */; settings = {ATTRIBUTES = (Private, ); }; };
0FBD7E691447999600481315 /* CodeOrigin.h in Headers */ = {isa = PBXBuildFile; fileRef = 0FBD7E671447998F00481315 /* CodeOrigin.h */; settings = {ATTRIBUTES = (Private, ); }; };
@@ -121,9 +162,7 @@
0FC8150B14043C0E00CFA603 /* WriteBarrierSupport.cpp in Sources */ = {isa = PBXBuildFile; fileRef = 0FC8150814043BCA00CFA603 /* WriteBarrierSupport.cpp */; };
0FC815151405119B00CFA603 /* VTableSpectrum.h in Headers */ = {isa = PBXBuildFile; fileRef = 0FC815141405118D00CFA603 /* VTableSpectrum.h */; settings = {ATTRIBUTES = (Private, ); }; };
0FC81516140511B500CFA603 /* VTableSpectrum.cpp in Sources */ = {isa = PBXBuildFile; fileRef = 0FC815121405118600CFA603 /* VTableSpectrum.cpp */; };
- 0FD3C82514115D4000FD81CB /* DFGPropagator.cpp in Sources */ = {isa = PBXBuildFile; fileRef = 0FD3C82314115D1A00FD81CB /* DFGPropagator.cpp */; };
0FD3C82614115D4000FD81CB /* DFGDriver.cpp in Sources */ = {isa = PBXBuildFile; fileRef = 0FD3C82014115CF800FD81CB /* DFGDriver.cpp */; };
- 0FD3C82714115D4F00FD81CB /* DFGPropagator.h in Headers */ = {isa = PBXBuildFile; fileRef = 0FD3C82414115D2200FD81CB /* DFGPropagator.h */; };
0FD3C82814115D4F00FD81CB /* DFGDriver.h in Headers */ = {isa = PBXBuildFile; fileRef = 0FD3C82214115D0E00FD81CB /* DFGDriver.h */; };
0FD52AAE143035A00026DC9F /* UnionFind.h in Headers */ = {isa = PBXBuildFile; fileRef = 0FD52AAC1430359D0026DC9F /* UnionFind.h */; settings = {ATTRIBUTES = (Private, ); }; };
0FD82E2114172CE300179C94 /* DFGCapabilities.cpp in Sources */ = {isa = PBXBuildFile; fileRef = 0FD82E1E14172C2F00179C94 /* DFGCapabilities.cpp */; };
@@ -134,10 +173,22 @@
0FD82E57141DAF1000179C94 /* DFGOSREntry.h in Headers */ = {isa = PBXBuildFile; fileRef = 0FD82E53141DAEDE00179C94 /* DFGOSREntry.h */; settings = {ATTRIBUTES = (Private, ); }; };
0FD82E85141F3FE300179C94 /* BoundsCheckedPointer.h in Headers */ = {isa = PBXBuildFile; fileRef = 0FD82E82141F3FC900179C94 /* BoundsCheckedPointer.h */; settings = {ATTRIBUTES = (Private, ); }; };
0FD82E86141F3FF100179C94 /* PredictedType.cpp in Sources */ = {isa = PBXBuildFile; fileRef = 0FD82E84141F3FDA00179C94 /* PredictedType.cpp */; };
- 0FD82E9014207A5F00179C94 /* ValueProfile.cpp in Sources */ = {isa = PBXBuildFile; fileRef = 0FD82E8E14207A5100179C94 /* ValueProfile.cpp */; };
0FD82F4B142806A100179C94 /* BitVector.h in Headers */ = {isa = PBXBuildFile; fileRef = 0FD82F491428069200179C94 /* BitVector.h */; settings = {ATTRIBUTES = (Private, ); }; };
0FE228ED1436AB2700196C48 /* Options.h in Headers */ = {isa = PBXBuildFile; fileRef = 0FE228EB1436AB2300196C48 /* Options.h */; settings = {ATTRIBUTES = (Private, ); }; };
0FE228EE1436AB2C00196C48 /* Options.cpp in Sources */ = {isa = PBXBuildFile; fileRef = 0FE228EA1436AB2300196C48 /* Options.cpp */; };
+ 0FF922D414F46B410041A24E /* LLIntOffsetsExtractor.cpp in Sources */ = {isa = PBXBuildFile; fileRef = 0F4680A114BA7F8200BFE272 /* LLIntOffsetsExtractor.cpp */; };
+ 0FFFC95514EF909A00C72532 /* DFGArithNodeFlagsInferencePhase.cpp in Sources */ = {isa = PBXBuildFile; fileRef = 0FFFC94914EF909500C72532 /* DFGArithNodeFlagsInferencePhase.cpp */; };
+ 0FFFC95614EF909C00C72532 /* DFGArithNodeFlagsInferencePhase.h in Headers */ = {isa = PBXBuildFile; fileRef = 0FFFC94A14EF909500C72532 /* DFGArithNodeFlagsInferencePhase.h */; settings = {ATTRIBUTES = (Private, ); }; };
+ 0FFFC95714EF90A000C72532 /* DFGCFAPhase.cpp in Sources */ = {isa = PBXBuildFile; fileRef = 0FFFC94B14EF909500C72532 /* DFGCFAPhase.cpp */; };
+ 0FFFC95814EF90A200C72532 /* DFGCFAPhase.h in Headers */ = {isa = PBXBuildFile; fileRef = 0FFFC94C14EF909500C72532 /* DFGCFAPhase.h */; settings = {ATTRIBUTES = (Private, ); }; };
+ 0FFFC95914EF90A600C72532 /* DFGCSEPhase.cpp in Sources */ = {isa = PBXBuildFile; fileRef = 0FFFC94D14EF909500C72532 /* DFGCSEPhase.cpp */; };
+ 0FFFC95A14EF90A900C72532 /* DFGCSEPhase.h in Headers */ = {isa = PBXBuildFile; fileRef = 0FFFC94E14EF909500C72532 /* DFGCSEPhase.h */; settings = {ATTRIBUTES = (Private, ); }; };
+ 0FFFC95B14EF90AD00C72532 /* DFGPhase.cpp in Sources */ = {isa = PBXBuildFile; fileRef = 0FFFC94F14EF909500C72532 /* DFGPhase.cpp */; };
+ 0FFFC95C14EF90AF00C72532 /* DFGPhase.h in Headers */ = {isa = PBXBuildFile; fileRef = 0FFFC95014EF909500C72532 /* DFGPhase.h */; settings = {ATTRIBUTES = (Private, ); }; };
+ 0FFFC95D14EF90B300C72532 /* DFGPredictionPropagationPhase.cpp in Sources */ = {isa = PBXBuildFile; fileRef = 0FFFC95114EF909500C72532 /* DFGPredictionPropagationPhase.cpp */; };
+ 0FFFC95E14EF90B700C72532 /* DFGPredictionPropagationPhase.h in Headers */ = {isa = PBXBuildFile; fileRef = 0FFFC95214EF909500C72532 /* DFGPredictionPropagationPhase.h */; settings = {ATTRIBUTES = (Private, ); }; };
+ 0FFFC95F14EF90BB00C72532 /* DFGVirtualRegisterAllocationPhase.cpp in Sources */ = {isa = PBXBuildFile; fileRef = 0FFFC95314EF909500C72532 /* DFGVirtualRegisterAllocationPhase.cpp */; };
+ 0FFFC96014EF90BD00C72532 /* DFGVirtualRegisterAllocationPhase.h in Headers */ = {isa = PBXBuildFile; fileRef = 0FFFC95414EF909500C72532 /* DFGVirtualRegisterAllocationPhase.h */; settings = {ATTRIBUTES = (Private, ); }; };
1400067712A6F7830064D123 /* OSAllocator.h in Headers */ = {isa = PBXBuildFile; fileRef = 1400067612A6F7830064D123 /* OSAllocator.h */; settings = {ATTRIBUTES = (Private, ); }; };
1400069312A6F9E10064D123 /* OSAllocatorPosix.cpp in Sources */ = {isa = PBXBuildFile; fileRef = 1400069212A6F9E10064D123 /* OSAllocatorPosix.cpp */; };
140566C4107EC255005DBC8D /* JSAPIValueWrapper.cpp in Sources */ = {isa = PBXBuildFile; fileRef = BC0894D50FAFBA2D00001865 /* JSAPIValueWrapper.cpp */; };
@@ -261,6 +312,7 @@
148F21B7107EC5470042EC2C /* Nodes.cpp in Sources */ = {isa = PBXBuildFile; fileRef = F692A86D0255597D01FF60F7 /* Nodes.cpp */; };
148F21BC107EC54D0042EC2C /* Parser.cpp in Sources */ = {isa = PBXBuildFile; fileRef = 93F0B3A909BB4DC00068FCE3 /* Parser.cpp */; };
149559EE0DDCDDF700648087 /* DebuggerCallFrame.cpp in Sources */ = {isa = PBXBuildFile; fileRef = 149559ED0DDCDDF700648087 /* DebuggerCallFrame.cpp */; };
+ 1497209114EB831500FEB1B7 /* PassWeak.h in Headers */ = {isa = PBXBuildFile; fileRef = 1497209014EB831500FEB1B7 /* PassWeak.h */; settings = {ATTRIBUTES = (Private, ); }; };
14A1563210966365006FA260 /* DateInstanceCache.h in Headers */ = {isa = PBXBuildFile; fileRef = 14A1563010966365006FA260 /* DateInstanceCache.h */; settings = {ATTRIBUTES = (Private, ); }; };
14A23D750F4E1ABB0023CDAD /* JITStubs.cpp in Sources */ = {isa = PBXBuildFile; fileRef = 14A23D6C0F4E19CE0023CDAD /* JITStubs.cpp */; };
14A42E3F0F4F60EE00599099 /* TimeoutChecker.cpp in Sources */ = {isa = PBXBuildFile; fileRef = 14A42E3D0F4F60EE00599099 /* TimeoutChecker.cpp */; };
@@ -294,6 +346,20 @@
1A082779142168D70090CCAC /* BinarySemaphore.cpp in Sources */ = {isa = PBXBuildFile; fileRef = 1A082777142168D70090CCAC /* BinarySemaphore.cpp */; };
1A08277A142168D70090CCAC /* BinarySemaphore.h in Headers */ = {isa = PBXBuildFile; fileRef = 1A082778142168D70090CCAC /* BinarySemaphore.h */; };
1AA9E5511498093500001A8A /* Functional.h in Headers */ = {isa = PBXBuildFile; fileRef = 1AA9E5501498093500001A8A /* Functional.h */; settings = {ATTRIBUTES = (Private, ); }; };
+ 2684B2D314D4A9B20072C0B6 /* ParsedURL.cpp in Sources */ = {isa = PBXBuildFile; fileRef = 2684B2C414D4A9B20072C0B6 /* ParsedURL.cpp */; };
+ 2684B2D414D4A9B20072C0B6 /* ParsedURL.h in Headers */ = {isa = PBXBuildFile; fileRef = 2684B2C514D4A9B20072C0B6 /* ParsedURL.h */; };
+ 2684B2D514D4A9B20072C0B6 /* URLString.h in Headers */ = {isa = PBXBuildFile; fileRef = 2684B2C614D4A9B20072C0B6 /* URLString.h */; };
+ 2684B2D614D4A9B20072C0B6 /* RawURLBuffer.h in Headers */ = {isa = PBXBuildFile; fileRef = 2684B2C814D4A9B20072C0B6 /* RawURLBuffer.h */; };
+ 2684B2D714D4A9B20072C0B6 /* URLBuffer.h in Headers */ = {isa = PBXBuildFile; fileRef = 2684B2C914D4A9B20072C0B6 /* URLBuffer.h */; };
+ 2684B2D814D4A9B20072C0B6 /* URLCharacterTypes.cpp in Sources */ = {isa = PBXBuildFile; fileRef = 2684B2CA14D4A9B20072C0B6 /* URLCharacterTypes.cpp */; };
+ 2684B2D914D4A9B20072C0B6 /* URLCharacterTypes.h in Headers */ = {isa = PBXBuildFile; fileRef = 2684B2CB14D4A9B20072C0B6 /* URLCharacterTypes.h */; };
+ 2684B2DA14D4A9B20072C0B6 /* URLComponent.h in Headers */ = {isa = PBXBuildFile; fileRef = 2684B2CC14D4A9B20072C0B6 /* URLComponent.h */; };
+ 2684B2DB14D4A9B20072C0B6 /* URLEscape.cpp in Sources */ = {isa = PBXBuildFile; fileRef = 2684B2CD14D4A9B20072C0B6 /* URLEscape.cpp */; };
+ 2684B2DC14D4A9B20072C0B6 /* URLEscape.h in Headers */ = {isa = PBXBuildFile; fileRef = 2684B2CE14D4A9B20072C0B6 /* URLEscape.h */; };
+ 2684B2DD14D4A9B20072C0B6 /* URLParser.h in Headers */ = {isa = PBXBuildFile; fileRef = 2684B2CF14D4A9B20072C0B6 /* URLParser.h */; };
+ 2684B2DE14D4A9B20072C0B6 /* URLQueryCanonicalizer.h in Headers */ = {isa = PBXBuildFile; fileRef = 2684B2D014D4A9B20072C0B6 /* URLQueryCanonicalizer.h */; };
+ 2684B2DF14D4A9B20072C0B6 /* URLSegments.cpp in Sources */ = {isa = PBXBuildFile; fileRef = 2684B2D114D4A9B20072C0B6 /* URLSegments.cpp */; };
+ 2684B2E014D4A9B20072C0B6 /* URLSegments.h in Headers */ = {isa = PBXBuildFile; fileRef = 2684B2D214D4A9B20072C0B6 /* URLSegments.h */; };
2CFC5D1E12F45B48004914E2 /* CharacterNames.h in Headers */ = {isa = PBXBuildFile; fileRef = 2CFC5B7A12F44714004914E2 /* CharacterNames.h */; settings = {ATTRIBUTES = (Private, ); }; };
41359CF30FDD89AD00206180 /* DateConversion.h in Headers */ = {isa = PBXBuildFile; fileRef = D21202290AD4310C00ED79B6 /* DateConversion.h */; };
41359CF60FDD89CB00206180 /* DateMath.cpp in Sources */ = {isa = PBXBuildFile; fileRef = 41359CF40FDD89CB00206180 /* DateMath.cpp */; };
@@ -389,7 +455,7 @@
86B99AE3117E578100DF5A90 /* StringBuffer.h in Headers */ = {isa = PBXBuildFile; fileRef = 86B99AE1117E578100DF5A90 /* StringBuffer.h */; settings = {ATTRIBUTES = (Private, ); }; };
86BB09C0138E381B0056702F /* DFGRepatch.cpp in Sources */ = {isa = PBXBuildFile; fileRef = 86BB09BE138E381B0056702F /* DFGRepatch.cpp */; };
86BB09C1138E381B0056702F /* DFGRepatch.h in Headers */ = {isa = PBXBuildFile; fileRef = 86BB09BF138E381B0056702F /* DFGRepatch.h */; };
- 86C36EEA0EE1289D00B3DF59 /* MacroAssembler.h in Headers */ = {isa = PBXBuildFile; fileRef = 86C36EE90EE1289D00B3DF59 /* MacroAssembler.h */; };
+ 86C36EEA0EE1289D00B3DF59 /* MacroAssembler.h in Headers */ = {isa = PBXBuildFile; fileRef = 86C36EE90EE1289D00B3DF59 /* MacroAssembler.h */; settings = {ATTRIBUTES = (Private, ); }; };
86C568E011A213EE0007F7F0 /* MacroAssemblerARM.cpp in Sources */ = {isa = PBXBuildFile; fileRef = 86C568DD11A213EE0007F7F0 /* MacroAssemblerARM.cpp */; };
86C568E111A213EE0007F7F0 /* MacroAssemblerMIPS.h in Headers */ = {isa = PBXBuildFile; fileRef = 86C568DE11A213EE0007F7F0 /* MacroAssemblerMIPS.h */; };
86C568E211A213EE0007F7F0 /* MIPSAssembler.h in Headers */ = {isa = PBXBuildFile; fileRef = 86C568DF11A213EE0007F7F0 /* MIPSAssembler.h */; };
@@ -687,6 +753,7 @@
BC87CDB910712AD4000614CF /* JSONObject.lut.h in Headers */ = {isa = PBXBuildFile; fileRef = BC87CDB810712ACA000614CF /* JSONObject.lut.h */; };
BC9041480EB9250900FE26FA /* StructureTransitionTable.h in Headers */ = {isa = PBXBuildFile; fileRef = BC9041470EB9250900FE26FA /* StructureTransitionTable.h */; settings = {ATTRIBUTES = (Private, ); }; };
BC95437D0EBA70FD0072B6D3 /* PropertyMapHashTable.h in Headers */ = {isa = PBXBuildFile; fileRef = BC95437C0EBA70FD0072B6D3 /* PropertyMapHashTable.h */; settings = {ATTRIBUTES = (Private, ); }; };
+ BCBE2CAE14E985AA000593AD /* GCAssertions.h in Headers */ = {isa = PBXBuildFile; fileRef = BCBE2CAD14E985AA000593AD /* GCAssertions.h */; settings = {ATTRIBUTES = (Private, ); }; };
BCCF0D080EF0AAB900413C8F /* StructureStubInfo.h in Headers */ = {isa = PBXBuildFile; fileRef = BCCF0D070EF0AAB900413C8F /* StructureStubInfo.h */; };
BCCF0D0C0EF0B8A500413C8F /* StructureStubInfo.cpp in Sources */ = {isa = PBXBuildFile; fileRef = BCCF0D0B0EF0B8A500413C8F /* StructureStubInfo.cpp */; };
BCD202C20E1706A7002C7E82 /* RegExpConstructor.h in Headers */ = {isa = PBXBuildFile; fileRef = BCD202BE0E1706A7002C7E82 /* RegExpConstructor.h */; };
@@ -722,14 +789,15 @@
C22C531313FAF6EF00B7DC0D /* strtod.cc in Sources */ = {isa = PBXBuildFile; fileRef = C22C52B913FAF6EF00B7DC0D /* strtod.cc */; };
C22C531413FAF6EF00B7DC0D /* strtod.h in Headers */ = {isa = PBXBuildFile; fileRef = C22C52BA13FAF6EF00B7DC0D /* strtod.h */; settings = {ATTRIBUTES = (Private, ); }; };
C22C531513FAF6EF00B7DC0D /* utils.h in Headers */ = {isa = PBXBuildFile; fileRef = C22C52BB13FAF6EF00B7DC0D /* utils.h */; settings = {ATTRIBUTES = (Private, ); }; };
- C240305514B404E60079EB64 /* BumpSpace.cpp in Sources */ = {isa = PBXBuildFile; fileRef = C240305314B404C90079EB64 /* BumpSpace.cpp */; };
+ C240305514B404E60079EB64 /* CopiedSpace.cpp in Sources */ = {isa = PBXBuildFile; fileRef = C240305314B404C90079EB64 /* CopiedSpace.cpp */; };
C2B916C214DA014E00CBAC86 /* MarkedAllocator.h in Headers */ = {isa = PBXBuildFile; fileRef = C2B916C114DA014E00CBAC86 /* MarkedAllocator.h */; settings = {ATTRIBUTES = (Private, ); }; };
C2B916C514DA040C00CBAC86 /* MarkedAllocator.cpp in Sources */ = {isa = PBXBuildFile; fileRef = C2B916C414DA040C00CBAC86 /* MarkedAllocator.cpp */; };
- C2C8D02D14A3C6E000578E65 /* BumpSpaceInlineMethods.h in Headers */ = {isa = PBXBuildFile; fileRef = C2C8D02B14A3C6B200578E65 /* BumpSpaceInlineMethods.h */; settings = {ATTRIBUTES = (Private, ); }; };
- C2C8D03014A3CEFC00578E65 /* BumpBlock.h in Headers */ = {isa = PBXBuildFile; fileRef = C2C8D02E14A3CEFC00578E65 /* BumpBlock.h */; settings = {ATTRIBUTES = (Private, ); }; };
+ C2C8D02D14A3C6E000578E65 /* CopiedSpaceInlineMethods.h in Headers */ = {isa = PBXBuildFile; fileRef = C2C8D02B14A3C6B200578E65 /* CopiedSpaceInlineMethods.h */; settings = {ATTRIBUTES = (Private, ); }; };
+ C2C8D03014A3CEFC00578E65 /* CopiedBlock.h in Headers */ = {isa = PBXBuildFile; fileRef = C2C8D02E14A3CEFC00578E65 /* CopiedBlock.h */; settings = {ATTRIBUTES = (Private, ); }; };
C2C8D03114A3CEFC00578E65 /* HeapBlock.h in Headers */ = {isa = PBXBuildFile; fileRef = C2C8D02F14A3CEFC00578E65 /* HeapBlock.h */; settings = {ATTRIBUTES = (Private, ); }; };
C2D9CA1314BCC04600304B46 /* CheckedBoolean.h in Headers */ = {isa = PBXBuildFile; fileRef = C2D9CA1214BCC04600304B46 /* CheckedBoolean.h */; settings = {ATTRIBUTES = (Private, ); }; };
- C2EAA3FA149A835E00FCE112 /* BumpSpace.h in Headers */ = {isa = PBXBuildFile; fileRef = C2EAA3F8149A830800FCE112 /* BumpSpace.h */; settings = {ATTRIBUTES = (Private, ); }; };
+ C2EAA3FA149A835E00FCE112 /* CopiedSpace.h in Headers */ = {isa = PBXBuildFile; fileRef = C2EAA3F8149A830800FCE112 /* CopiedSpace.h */; settings = {ATTRIBUTES = (Private, ); }; };
+ C2EAD2FC14F0249800A4B159 /* CopiedAllocator.h in Headers */ = {isa = PBXBuildFile; fileRef = C2EAD2FB14F0249800A4B159 /* CopiedAllocator.h */; settings = {ATTRIBUTES = (Private, ); }; };
C2EE59A013FC973F009CEAFE /* DecimalNumber.h in Headers */ = {isa = PBXBuildFile; fileRef = C2EE599E13FC972A009CEAFE /* DecimalNumber.h */; settings = {ATTRIBUTES = (Private, ); }; };
C2EE59A113FC9768009CEAFE /* DecimalNumber.cpp in Sources */ = {isa = PBXBuildFile; fileRef = C2EE599D13FC972A009CEAFE /* DecimalNumber.cpp */; };
D7A46A4F1338FFEA00ED695C /* DynamicAnnotations.h in Headers */ = {isa = PBXBuildFile; fileRef = D75AF59612F8CB9500FC0ADF /* DynamicAnnotations.h */; settings = {ATTRIBUTES = (Private, ); }; };
@@ -758,6 +826,20 @@
/* End PBXBuildFile section */
/* Begin PBXContainerItemProxy section */
+ 0FF922D214F46B2F0041A24E /* PBXContainerItemProxy */ = {
+ isa = PBXContainerItemProxy;
+ containerPortal = 0867D690FE84028FC02AAC07 /* Project object */;
+ proxyType = 1;
+ remoteGlobalIDString = 0F4680A914BA7FD900BFE272;
+ remoteInfo = "LLInt Offsets";
+ };
+ 0FF922D514F46B600041A24E /* PBXContainerItemProxy */ = {
+ isa = PBXContainerItemProxy;
+ containerPortal = 0867D690FE84028FC02AAC07 /* Project object */;
+ proxyType = 1;
+ remoteGlobalIDString = 0FF922C314F46B130041A24E;
+ remoteInfo = JSCLLIntOffsetsExtractor;
+ };
141214BE0A49190E00480255 /* PBXContainerItemProxy */ = {
isa = PBXContainerItemProxy;
containerPortal = 0867D690FE84028FC02AAC07 /* Project object */;
@@ -843,6 +925,10 @@
0BAC949E1338728400CF135B /* ThreadRestrictionVerifier.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = ThreadRestrictionVerifier.h; sourceTree = "<group>"; };
0BCD83541485841200EA2003 /* TemporaryChange.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = TemporaryChange.h; sourceTree = "<group>"; };
0BF28A2811A33DC300638F84 /* SizeLimits.cpp */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.cpp; path = SizeLimits.cpp; sourceTree = "<group>"; };
+ 0F0B839514BCF45A00885B4F /* LLIntEntrypoints.cpp */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.cpp; name = LLIntEntrypoints.cpp; path = llint/LLIntEntrypoints.cpp; sourceTree = "<group>"; };
+ 0F0B839614BCF45A00885B4F /* LLIntEntrypoints.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; name = LLIntEntrypoints.h; path = llint/LLIntEntrypoints.h; sourceTree = "<group>"; };
+ 0F0B839714BCF45A00885B4F /* LLIntThunks.cpp */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.cpp; name = LLIntThunks.cpp; path = llint/LLIntThunks.cpp; sourceTree = "<group>"; };
+ 0F0B839814BCF45A00885B4F /* LLIntThunks.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; name = LLIntThunks.h; path = llint/LLIntThunks.h; sourceTree = "<group>"; };
0F0B83A514BCF50400885B4F /* CodeType.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = CodeType.h; sourceTree = "<group>"; };
0F0B83A814BCF55E00885B4F /* HandlerInfo.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = HandlerInfo.h; sourceTree = "<group>"; };
0F0B83AA14BCF5B900885B4F /* ExpressionRangeInfo.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = ExpressionRangeInfo.h; sourceTree = "<group>"; };
@@ -853,9 +939,13 @@
0F0B83B314BCF85E00885B4F /* MethodCallLinkInfo.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = MethodCallLinkInfo.h; sourceTree = "<group>"; };
0F0B83B614BCF8DF00885B4F /* GlobalResolveInfo.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = GlobalResolveInfo.h; sourceTree = "<group>"; };
0F0B83B814BCF95B00885B4F /* CallReturnOffsetToBytecodeOffset.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = CallReturnOffsetToBytecodeOffset.h; sourceTree = "<group>"; };
+ 0F0FC45814BD15F100B81154 /* LLIntCallLinkInfo.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = LLIntCallLinkInfo.h; sourceTree = "<group>"; };
0F15F15D14B7A73A005DE37D /* CommonSlowPaths.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = CommonSlowPaths.h; sourceTree = "<group>"; };
0F16D724142C39A200CF784A /* BitVector.cpp */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.cpp; path = BitVector.cpp; sourceTree = "<group>"; };
0F21C26614BE5F5E00ADC64B /* JITDriver.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = JITDriver.h; sourceTree = "<group>"; };
+ 0F21C27914BE727300ADC64B /* CodeSpecializationKind.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = CodeSpecializationKind.h; sourceTree = "<group>"; };
+ 0F21C27A14BE727300ADC64B /* ExecutionHarness.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = ExecutionHarness.h; sourceTree = "<group>"; };
+ 0F21C27E14BEAA8000ADC64B /* BytecodeConventions.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = BytecodeConventions.h; sourceTree = "<group>"; };
0F242DA513F3B1BB007ADD4C /* WeakReferenceHarvester.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = WeakReferenceHarvester.h; sourceTree = "<group>"; };
0F2C556D14738F2E00121E4F /* DFGCodeBlocks.cpp */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.cpp; path = DFGCodeBlocks.cpp; sourceTree = "<group>"; };
0F2C556E14738F2E00121E4F /* DFGCodeBlocks.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = DFGCodeBlocks.h; sourceTree = "<group>"; };
@@ -866,6 +956,19 @@
0F431736146BAC65007E3890 /* ListableHandler.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = ListableHandler.h; sourceTree = "<group>"; };
0F46807F14BA572700BFE272 /* JITExceptions.cpp */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.cpp; path = JITExceptions.cpp; sourceTree = "<group>"; };
0F46808014BA572700BFE272 /* JITExceptions.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = JITExceptions.h; sourceTree = "<group>"; };
+ 0F46809D14BA7F8200BFE272 /* LLIntExceptions.cpp */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.cpp; name = LLIntExceptions.cpp; path = llint/LLIntExceptions.cpp; sourceTree = "<group>"; };
+ 0F46809E14BA7F8200BFE272 /* LLIntExceptions.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; name = LLIntExceptions.h; path = llint/LLIntExceptions.h; sourceTree = "<group>"; };
+ 0F46809F14BA7F8200BFE272 /* LLIntSlowPaths.cpp */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.cpp; name = LLIntSlowPaths.cpp; path = llint/LLIntSlowPaths.cpp; sourceTree = "<group>"; };
+ 0F4680A014BA7F8200BFE272 /* LLIntSlowPaths.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; name = LLIntSlowPaths.h; path = llint/LLIntSlowPaths.h; sourceTree = "<group>"; };
+ 0F4680A114BA7F8200BFE272 /* LLIntOffsetsExtractor.cpp */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.cpp; name = LLIntOffsetsExtractor.cpp; path = llint/LLIntOffsetsExtractor.cpp; sourceTree = "<group>"; };
+ 0F4680C514BBB16900BFE272 /* LLIntCommon.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; name = LLIntCommon.h; path = llint/LLIntCommon.h; sourceTree = "<group>"; };
+ 0F4680C614BBB16900BFE272 /* LLIntOfflineAsmConfig.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; name = LLIntOfflineAsmConfig.h; path = llint/LLIntOfflineAsmConfig.h; sourceTree = "<group>"; };
+ 0F4680C714BBB16900BFE272 /* LowLevelInterpreter.cpp */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.cpp; name = LowLevelInterpreter.cpp; path = llint/LowLevelInterpreter.cpp; sourceTree = "<group>"; };
+ 0F4680C814BBB16900BFE272 /* LowLevelInterpreter.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; name = LowLevelInterpreter.h; path = llint/LowLevelInterpreter.h; sourceTree = "<group>"; };
+ 0F4680CE14BBB3D100BFE272 /* LLIntData.cpp */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.cpp; name = LLIntData.cpp; path = llint/LLIntData.cpp; sourceTree = "<group>"; };
+ 0F4680CF14BBB3D100BFE272 /* LLIntData.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; name = LLIntData.h; path = llint/LLIntData.h; sourceTree = "<group>"; };
+ 0F4680D014BBC5F800BFE272 /* HostCallReturnValue.cpp */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.cpp; path = HostCallReturnValue.cpp; sourceTree = "<group>"; };
+ 0F4680D114BBC5F800BFE272 /* HostCallReturnValue.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = HostCallReturnValue.h; sourceTree = "<group>"; };
0F55F0F114D1063600AC7649 /* AbstractPC.cpp */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.cpp; path = AbstractPC.cpp; sourceTree = "<group>"; };
0F55F0F214D1063600AC7649 /* AbstractPC.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = AbstractPC.h; sourceTree = "<group>"; };
0F5F08CC146BE602000472A9 /* DFGByteCodeCache.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; name = DFGByteCodeCache.h; path = dfg/DFGByteCodeCache.h; sourceTree = "<group>"; };
@@ -896,6 +999,15 @@
0F963B2B13F853C70002D9B2 /* MetaAllocator.cpp */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.cpp; path = MetaAllocator.cpp; sourceTree = "<group>"; };
0F963B2E13FC66AE0002D9B2 /* MetaAllocatorHandle.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = MetaAllocatorHandle.h; sourceTree = "<group>"; };
0F963B3613FC6FDE0002D9B2 /* ValueProfile.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = ValueProfile.h; sourceTree = "<group>"; };
+ 0F9FC8BF14E1B5FB00D52AE0 /* PolymorphicPutByIdList.cpp */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.cpp; path = PolymorphicPutByIdList.cpp; sourceTree = "<group>"; };
+ 0F9FC8C014E1B5FB00D52AE0 /* PolymorphicPutByIdList.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = PolymorphicPutByIdList.h; sourceTree = "<group>"; };
+ 0F9FC8C114E1B5FB00D52AE0 /* PutKind.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = PutKind.h; sourceTree = "<group>"; };
+ 0F9FC8CD14E612D500D52AE0 /* DataLog.cpp */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.cpp; path = DataLog.cpp; sourceTree = "<group>"; };
+ 0F9FC8CE14E612D500D52AE0 /* DataLog.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = DataLog.h; sourceTree = "<group>"; };
+ 0FB5467614F59AD1002C2989 /* LazyOperandValueProfile.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = LazyOperandValueProfile.h; sourceTree = "<group>"; };
+ 0FB5467814F5C468002C2989 /* LazyOperandValueProfile.cpp */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.cpp; path = LazyOperandValueProfile.cpp; sourceTree = "<group>"; };
+ 0FB5467A14F5C7D4002C2989 /* MethodOfGettingAValueProfile.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = MethodOfGettingAValueProfile.h; sourceTree = "<group>"; };
+ 0FB5467C14F5CFD3002C2989 /* MethodOfGettingAValueProfile.cpp */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.cpp; path = MethodOfGettingAValueProfile.cpp; sourceTree = "<group>"; };
0FBC0AE41496C7C100D4FBDD /* DFGExitProfile.cpp */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.cpp; path = DFGExitProfile.cpp; sourceTree = "<group>"; };
0FBC0AE51496C7C100D4FBDD /* DFGExitProfile.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = DFGExitProfile.h; sourceTree = "<group>"; };
0FBD7E671447998F00481315 /* CodeOrigin.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = CodeOrigin.h; sourceTree = "<group>"; };
@@ -918,8 +1030,6 @@
0FC815141405118D00CFA603 /* VTableSpectrum.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = VTableSpectrum.h; sourceTree = "<group>"; };
0FD3C82014115CF800FD81CB /* DFGDriver.cpp */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.cpp; name = DFGDriver.cpp; path = dfg/DFGDriver.cpp; sourceTree = "<group>"; };
0FD3C82214115D0E00FD81CB /* DFGDriver.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; name = DFGDriver.h; path = dfg/DFGDriver.h; sourceTree = "<group>"; };
- 0FD3C82314115D1A00FD81CB /* DFGPropagator.cpp */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.cpp; name = DFGPropagator.cpp; path = dfg/DFGPropagator.cpp; sourceTree = "<group>"; };
- 0FD3C82414115D2200FD81CB /* DFGPropagator.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; name = DFGPropagator.h; path = dfg/DFGPropagator.h; sourceTree = "<group>"; };
0FD52AAC1430359D0026DC9F /* UnionFind.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = UnionFind.h; sourceTree = "<group>"; };
0FD82E1E14172C2F00179C94 /* DFGCapabilities.cpp */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.cpp; name = DFGCapabilities.cpp; path = dfg/DFGCapabilities.cpp; sourceTree = "<group>"; };
0FD82E1F14172C2F00179C94 /* DFGCapabilities.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; name = DFGCapabilities.h; path = dfg/DFGCapabilities.h; sourceTree = "<group>"; };
@@ -930,10 +1040,22 @@
0FD82E53141DAEDE00179C94 /* DFGOSREntry.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; name = DFGOSREntry.h; path = dfg/DFGOSREntry.h; sourceTree = "<group>"; };
0FD82E82141F3FC900179C94 /* BoundsCheckedPointer.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = BoundsCheckedPointer.h; sourceTree = "<group>"; };
0FD82E84141F3FDA00179C94 /* PredictedType.cpp */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.cpp; path = PredictedType.cpp; sourceTree = "<group>"; };
- 0FD82E8E14207A5100179C94 /* ValueProfile.cpp */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.cpp; path = ValueProfile.cpp; sourceTree = "<group>"; };
0FD82F491428069200179C94 /* BitVector.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = BitVector.h; sourceTree = "<group>"; };
0FE228EA1436AB2300196C48 /* Options.cpp */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.cpp; path = Options.cpp; sourceTree = "<group>"; };
0FE228EB1436AB2300196C48 /* Options.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = Options.h; sourceTree = "<group>"; };
+ 0FF922CF14F46B130041A24E /* JSCLLIntOffsetsExtractor */ = {isa = PBXFileReference; explicitFileType = "compiled.mach-o.executable"; includeInIndex = 0; path = JSCLLIntOffsetsExtractor; sourceTree = BUILT_PRODUCTS_DIR; };
+ 0FFFC94914EF909500C72532 /* DFGArithNodeFlagsInferencePhase.cpp */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.cpp; name = DFGArithNodeFlagsInferencePhase.cpp; path = dfg/DFGArithNodeFlagsInferencePhase.cpp; sourceTree = "<group>"; };
+ 0FFFC94A14EF909500C72532 /* DFGArithNodeFlagsInferencePhase.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; name = DFGArithNodeFlagsInferencePhase.h; path = dfg/DFGArithNodeFlagsInferencePhase.h; sourceTree = "<group>"; };
+ 0FFFC94B14EF909500C72532 /* DFGCFAPhase.cpp */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.cpp; name = DFGCFAPhase.cpp; path = dfg/DFGCFAPhase.cpp; sourceTree = "<group>"; };
+ 0FFFC94C14EF909500C72532 /* DFGCFAPhase.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; name = DFGCFAPhase.h; path = dfg/DFGCFAPhase.h; sourceTree = "<group>"; };
+ 0FFFC94D14EF909500C72532 /* DFGCSEPhase.cpp */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.cpp; name = DFGCSEPhase.cpp; path = dfg/DFGCSEPhase.cpp; sourceTree = "<group>"; };
+ 0FFFC94E14EF909500C72532 /* DFGCSEPhase.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; name = DFGCSEPhase.h; path = dfg/DFGCSEPhase.h; sourceTree = "<group>"; };
+ 0FFFC94F14EF909500C72532 /* DFGPhase.cpp */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.cpp; name = DFGPhase.cpp; path = dfg/DFGPhase.cpp; sourceTree = "<group>"; };
+ 0FFFC95014EF909500C72532 /* DFGPhase.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; name = DFGPhase.h; path = dfg/DFGPhase.h; sourceTree = "<group>"; };
+ 0FFFC95114EF909500C72532 /* DFGPredictionPropagationPhase.cpp */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.cpp; name = DFGPredictionPropagationPhase.cpp; path = dfg/DFGPredictionPropagationPhase.cpp; sourceTree = "<group>"; };
+ 0FFFC95214EF909500C72532 /* DFGPredictionPropagationPhase.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; name = DFGPredictionPropagationPhase.h; path = dfg/DFGPredictionPropagationPhase.h; sourceTree = "<group>"; };
+ 0FFFC95314EF909500C72532 /* DFGVirtualRegisterAllocationPhase.cpp */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.cpp; name = DFGVirtualRegisterAllocationPhase.cpp; path = dfg/DFGVirtualRegisterAllocationPhase.cpp; sourceTree = "<group>"; };
+ 0FFFC95414EF909500C72532 /* DFGVirtualRegisterAllocationPhase.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; name = DFGVirtualRegisterAllocationPhase.h; path = dfg/DFGVirtualRegisterAllocationPhase.h; sourceTree = "<group>"; };
1400067612A6F7830064D123 /* OSAllocator.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = OSAllocator.h; sourceTree = "<group>"; };
1400069212A6F9E10064D123 /* OSAllocatorPosix.cpp */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.cpp; path = OSAllocatorPosix.cpp; sourceTree = "<group>"; };
140D17D60E8AD4A9000CD17D /* JSBasePrivate.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = JSBasePrivate.h; sourceTree = "<group>"; };
@@ -1005,6 +1127,7 @@
148A1ECD0D10C23B0069A47C /* RefPtrHashMap.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = RefPtrHashMap.h; sourceTree = "<group>"; };
148CD1D7108CF902008163C6 /* JSContextRefPrivate.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = JSContextRefPrivate.h; sourceTree = "<group>"; };
149559ED0DDCDDF700648087 /* DebuggerCallFrame.cpp */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.cpp; path = DebuggerCallFrame.cpp; sourceTree = "<group>"; };
+ 1497209014EB831500FEB1B7 /* PassWeak.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = PassWeak.h; sourceTree = "<group>"; };
149B24FF0D8AF6D1009CB8C7 /* Register.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = Register.h; sourceTree = "<group>"; };
149DAAF212EB559D0083B12B /* ConservativeRoots.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = ConservativeRoots.h; sourceTree = "<group>"; };
14A1563010966365006FA260 /* DateInstanceCache.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = DateInstanceCache.h; sourceTree = "<group>"; };
@@ -1054,6 +1177,20 @@
1C9051450BA9E8A70081E9D0 /* Base.xcconfig */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = text.xcconfig; path = Base.xcconfig; sourceTree = "<group>"; };
1CAA8B4A0D32C39A0041BCFF /* JavaScript.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = JavaScript.h; sourceTree = "<group>"; };
1CAA8B4B0D32C39A0041BCFF /* JavaScriptCore.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = JavaScriptCore.h; sourceTree = "<group>"; };
+ 2684B2C414D4A9B20072C0B6 /* ParsedURL.cpp */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.cpp; path = ParsedURL.cpp; sourceTree = "<group>"; };
+ 2684B2C514D4A9B20072C0B6 /* ParsedURL.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = ParsedURL.h; sourceTree = "<group>"; };
+ 2684B2C614D4A9B20072C0B6 /* URLString.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = URLString.h; sourceTree = "<group>"; };
+ 2684B2C814D4A9B20072C0B6 /* RawURLBuffer.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = RawURLBuffer.h; sourceTree = "<group>"; };
+ 2684B2C914D4A9B20072C0B6 /* URLBuffer.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = URLBuffer.h; sourceTree = "<group>"; };
+ 2684B2CA14D4A9B20072C0B6 /* URLCharacterTypes.cpp */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.cpp; path = URLCharacterTypes.cpp; sourceTree = "<group>"; };
+ 2684B2CB14D4A9B20072C0B6 /* URLCharacterTypes.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = URLCharacterTypes.h; sourceTree = "<group>"; };
+ 2684B2CC14D4A9B20072C0B6 /* URLComponent.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = URLComponent.h; sourceTree = "<group>"; };
+ 2684B2CD14D4A9B20072C0B6 /* URLEscape.cpp */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.cpp; path = URLEscape.cpp; sourceTree = "<group>"; };
+ 2684B2CE14D4A9B20072C0B6 /* URLEscape.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = URLEscape.h; sourceTree = "<group>"; };
+ 2684B2CF14D4A9B20072C0B6 /* URLParser.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = URLParser.h; sourceTree = "<group>"; };
+ 2684B2D014D4A9B20072C0B6 /* URLQueryCanonicalizer.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = URLQueryCanonicalizer.h; sourceTree = "<group>"; };
+ 2684B2D114D4A9B20072C0B6 /* URLSegments.cpp */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.cpp; path = URLSegments.cpp; sourceTree = "<group>"; };
+ 2684B2D214D4A9B20072C0B6 /* URLSegments.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = URLSegments.h; sourceTree = "<group>"; };
2CFC5B7A12F44714004914E2 /* CharacterNames.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = CharacterNames.h; sourceTree = "<group>"; };
41359CF40FDD89CB00206180 /* DateMath.cpp */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.cpp; path = DateMath.cpp; sourceTree = "<group>"; };
41359CF50FDD89CB00206180 /* DateMath.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = DateMath.h; sourceTree = "<group>"; };
@@ -1351,6 +1488,7 @@
A74DE1CB120B86D600D40D5B /* ARMv7Assembler.cpp */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.cpp; path = ARMv7Assembler.cpp; sourceTree = "<group>"; };
A7521E121429169A003C8D0C /* CardSet.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = CardSet.h; sourceTree = "<group>"; };
A75706DD118A2BCF0057F88F /* JITArithmetic32_64.cpp */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.cpp; path = JITArithmetic32_64.cpp; sourceTree = "<group>"; };
+ A767FF9F14F4502900789059 /* JSCTypedArrayStubs.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = JSCTypedArrayStubs.h; sourceTree = "<group>"; };
A76C51741182748D00715B05 /* JSInterfaceJIT.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = JSInterfaceJIT.h; sourceTree = "<group>"; };
A76F54A213B28AAB00EF2BCE /* JITWriteBarrier.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = JITWriteBarrier.h; sourceTree = "<group>"; };
A781E358141970C700094D90 /* StorageBarrier.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = StorageBarrier.h; sourceTree = "<group>"; };
@@ -1462,6 +1600,7 @@
BC9BB95B0E19680600DF8855 /* InternalFunction.cpp */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.cpp; path = InternalFunction.cpp; sourceTree = "<group>"; };
BCA62DFE0E2826230004F30D /* CallData.cpp */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.cpp; path = CallData.cpp; sourceTree = "<group>"; };
BCA62DFF0E2826310004F30D /* ConstructData.cpp */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.cpp; path = ConstructData.cpp; sourceTree = "<group>"; };
+ BCBE2CAD14E985AA000593AD /* GCAssertions.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = GCAssertions.h; sourceTree = "<group>"; };
BCCF0D070EF0AAB900413C8F /* StructureStubInfo.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = StructureStubInfo.h; sourceTree = "<group>"; };
BCCF0D0B0EF0B8A500413C8F /* StructureStubInfo.cpp */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.cpp; path = StructureStubInfo.cpp; sourceTree = "<group>"; };
BCD202BD0E1706A7002C7E82 /* RegExpConstructor.cpp */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.cpp; path = RegExpConstructor.cpp; sourceTree = "<group>"; };
@@ -1508,14 +1647,15 @@
C22C52B913FAF6EF00B7DC0D /* strtod.cc */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.cpp; path = strtod.cc; sourceTree = "<group>"; };
C22C52BA13FAF6EF00B7DC0D /* strtod.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = strtod.h; sourceTree = "<group>"; };
C22C52BB13FAF6EF00B7DC0D /* utils.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = utils.h; sourceTree = "<group>"; };
- C240305314B404C90079EB64 /* BumpSpace.cpp */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.cpp; path = BumpSpace.cpp; sourceTree = "<group>"; };
+ C240305314B404C90079EB64 /* CopiedSpace.cpp */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.cpp; path = CopiedSpace.cpp; sourceTree = "<group>"; };
C2B916C114DA014E00CBAC86 /* MarkedAllocator.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = MarkedAllocator.h; sourceTree = "<group>"; };
C2B916C414DA040C00CBAC86 /* MarkedAllocator.cpp */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.cpp; path = MarkedAllocator.cpp; sourceTree = "<group>"; };
- C2C8D02B14A3C6B200578E65 /* BumpSpaceInlineMethods.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = BumpSpaceInlineMethods.h; sourceTree = "<group>"; };
- C2C8D02E14A3CEFC00578E65 /* BumpBlock.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = BumpBlock.h; sourceTree = "<group>"; };
+ C2C8D02B14A3C6B200578E65 /* CopiedSpaceInlineMethods.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = CopiedSpaceInlineMethods.h; sourceTree = "<group>"; };
+ C2C8D02E14A3CEFC00578E65 /* CopiedBlock.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = CopiedBlock.h; sourceTree = "<group>"; };
C2C8D02F14A3CEFC00578E65 /* HeapBlock.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = HeapBlock.h; sourceTree = "<group>"; };
C2D9CA1214BCC04600304B46 /* CheckedBoolean.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = CheckedBoolean.h; sourceTree = "<group>"; };
- C2EAA3F8149A830800FCE112 /* BumpSpace.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = BumpSpace.h; sourceTree = "<group>"; };
+ C2EAA3F8149A830800FCE112 /* CopiedSpace.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = CopiedSpace.h; sourceTree = "<group>"; };
+ C2EAD2FB14F0249800A4B159 /* CopiedAllocator.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = CopiedAllocator.h; sourceTree = "<group>"; };
C2EE599D13FC972A009CEAFE /* DecimalNumber.cpp */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.cpp; path = DecimalNumber.cpp; sourceTree = "<group>"; };
C2EE599E13FC972A009CEAFE /* DecimalNumber.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = DecimalNumber.h; sourceTree = "<group>"; };
D21202280AD4310C00ED79B6 /* DateConversion.cpp */ = {isa = PBXFileReference; fileEncoding = 30; lastKnownFileType = sourcecode.cpp.cpp; path = DateConversion.cpp; sourceTree = "<group>"; };
@@ -1585,13 +1725,20 @@
F692A8850255597D01FF60F7 /* UString.cpp */ = {isa = PBXFileReference; fileEncoding = 30; indentWidth = 4; lastKnownFileType = sourcecode.cpp.cpp; path = UString.cpp; sourceTree = "<group>"; tabWidth = 8; };
F692A8860255597D01FF60F7 /* UString.h */ = {isa = PBXFileReference; fileEncoding = 30; indentWidth = 4; lastKnownFileType = sourcecode.c.h; path = UString.h; sourceTree = "<group>"; tabWidth = 8; };
F692A8870255597D01FF60F7 /* JSValue.cpp */ = {isa = PBXFileReference; fileEncoding = 30; indentWidth = 4; lastKnownFileType = sourcecode.cpp.cpp; path = JSValue.cpp; sourceTree = "<group>"; tabWidth = 8; };
- F69E86C114C6E551002C2C62 /* NumberOfCores.cpp */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.cpp; name = NumberOfCores.cpp; path = wtf/NumberOfCores.cpp; sourceTree = "<group>"; };
- F69E86C214C6E551002C2C62 /* NumberOfCores.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; name = NumberOfCores.h; path = wtf/NumberOfCores.h; sourceTree = "<group>"; };
+ F69E86C114C6E551002C2C62 /* NumberOfCores.cpp */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.cpp; path = NumberOfCores.cpp; sourceTree = "<group>"; };
+ F69E86C214C6E551002C2C62 /* NumberOfCores.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = NumberOfCores.h; sourceTree = "<group>"; };
FDA15C1612B03028003A583A /* Complex.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = Complex.h; sourceTree = "<group>"; };
FE1B44790ECCD73B004F4DD1 /* StdLibExtras.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = StdLibExtras.h; sourceTree = "<group>"; };
/* End PBXFileReference section */
/* Begin PBXFrameworksBuildPhase section */
+ 0FF922C614F46B130041A24E /* Frameworks */ = {
+ isa = PBXFrameworksBuildPhase;
+ buildActionMask = 2147483647;
+ files = (
+ );
+ runOnlyForDeploymentPostprocessing = 0;
+ };
1412111E0A48793C00480255 /* Frameworks */ = {
isa = PBXFrameworksBuildPhase;
buildActionMask = 2147483647;
@@ -1650,6 +1797,7 @@
141211200A48793C00480255 /* minidom */,
14BD59BF0A3E8F9000BAF59C /* testapi */,
6511230514046A4C002B101D /* testRegExp */,
+ 0FF922CF14F46B130041A24E /* JSCLLIntOffsetsExtractor */,
);
name = Products;
sourceTree = "<group>";
@@ -1667,8 +1815,6 @@
0867D691FE84028FC02AAC07 /* JavaScriptCore */ = {
isa = PBXGroup;
children = (
- F69E86C114C6E551002C2C62 /* NumberOfCores.cpp */,
- F69E86C214C6E551002C2C62 /* NumberOfCores.h */,
8604F4F2143A6C4400B295F5 /* ChangeLog */,
A718F8211178EB4B002465A7 /* create_regex_tables */,
937B63CC09E766D200A671DD /* DerivedSources.make */,
@@ -1676,7 +1822,9 @@
F692A8540255597D01FF60F7 /* create_hash_table */,
F5C290E60284F98E018635CA /* JavaScriptCorePrefix.h */,
45E12D8806A49B0F00E9DF84 /* jsc.cpp */,
+ A767FF9F14F4502900789059 /* JSCTypedArrayStubs.h */,
F68EBB8C0255D4C601FF60F7 /* config.h */,
+ 0F46809C14BA7F4D00BFE272 /* llint */,
1432EBD70A34CAD400717B9F /* API */,
9688CB120ED12B4E001D649F /* assembler */,
969A078F0ED1D3AE00F1F681 /* bytecode */,
@@ -1716,6 +1864,28 @@
tabWidth = 4;
usesTabs = 0;
};
+ 0F46809C14BA7F4D00BFE272 /* llint */ = {
+ isa = PBXGroup;
+ children = (
+ 0F0B839514BCF45A00885B4F /* LLIntEntrypoints.cpp */,
+ 0F0B839614BCF45A00885B4F /* LLIntEntrypoints.h */,
+ 0F0B839714BCF45A00885B4F /* LLIntThunks.cpp */,
+ 0F0B839814BCF45A00885B4F /* LLIntThunks.h */,
+ 0F4680CE14BBB3D100BFE272 /* LLIntData.cpp */,
+ 0F4680CF14BBB3D100BFE272 /* LLIntData.h */,
+ 0F4680C514BBB16900BFE272 /* LLIntCommon.h */,
+ 0F4680C614BBB16900BFE272 /* LLIntOfflineAsmConfig.h */,
+ 0F4680C714BBB16900BFE272 /* LowLevelInterpreter.cpp */,
+ 0F4680C814BBB16900BFE272 /* LowLevelInterpreter.h */,
+ 0F46809D14BA7F8200BFE272 /* LLIntExceptions.cpp */,
+ 0F46809E14BA7F8200BFE272 /* LLIntExceptions.h */,
+ 0F46809F14BA7F8200BFE272 /* LLIntSlowPaths.cpp */,
+ 0F4680A014BA7F8200BFE272 /* LLIntSlowPaths.h */,
+ 0F4680A114BA7F8200BFE272 /* LLIntOffsetsExtractor.cpp */,
+ );
+ name = llint;
+ sourceTree = "<group>";
+ };
141211000A48772600480255 /* tests */ = {
isa = PBXGroup;
children = (
@@ -1750,6 +1920,8 @@
1429D92C0ED22D7000B89619 /* jit */ = {
isa = PBXGroup;
children = (
+ 0F4680D014BBC5F800BFE272 /* HostCallReturnValue.cpp */,
+ 0F4680D114BBC5F800BFE272 /* HostCallReturnValue.h */,
0F46807F14BA572700BFE272 /* JITExceptions.cpp */,
0F46808014BA572700BFE272 /* JITExceptions.h */,
0FD82E37141AB14200179C94 /* CompactJITCodeMap.h */,
@@ -1784,15 +1956,17 @@
142E312A134FF0A600AFADB5 /* heap */ = {
isa = PBXGroup;
children = (
- C2C8D02E14A3CEFC00578E65 /* BumpBlock.h */,
- C240305314B404C90079EB64 /* BumpSpace.cpp */,
- C2EAA3F8149A830800FCE112 /* BumpSpace.h */,
- C2C8D02B14A3C6B200578E65 /* BumpSpaceInlineMethods.h */,
+ C2EAD2FB14F0249800A4B159 /* CopiedAllocator.h */,
+ C2C8D02E14A3CEFC00578E65 /* CopiedBlock.h */,
+ C240305314B404C90079EB64 /* CopiedSpace.cpp */,
+ C2EAA3F8149A830800FCE112 /* CopiedSpace.h */,
+ C2C8D02B14A3C6B200578E65 /* CopiedSpaceInlineMethods.h */,
A7521E121429169A003C8D0C /* CardSet.h */,
146B14DB12EB5B12001BEC1B /* ConservativeRoots.cpp */,
149DAAF212EB559D0083B12B /* ConservativeRoots.h */,
0F2C556D14738F2E00121E4F /* DFGCodeBlocks.cpp */,
0F2C556E14738F2E00121E4F /* DFGCodeBlocks.h */,
+ BCBE2CAD14E985AA000593AD /* GCAssertions.h */,
142E312B134FF0A600AFADB5 /* Handle.h */,
142E312C134FF0A600AFADB5 /* HandleHeap.cpp */,
142E312D134FF0A600AFADB5 /* HandleHeap.h */,
@@ -1817,6 +1991,7 @@
14D2F3D9139F4BE200491031 /* MarkedSpace.h */,
142D6F0E13539A4100B02E86 /* MarkStack.cpp */,
142D6F0F13539A4100B02E86 /* MarkStack.h */,
+ 1497209014EB831500FEB1B7 /* PassWeak.h */,
14BA78F013AAB88F005B7C2C /* SlotVisitor.h */,
142E3132134FF0A600AFADB5 /* Strong.h */,
145722851437E140005FDE26 /* StrongInlines.h */,
@@ -1938,6 +2113,43 @@
tabWidth = 4;
usesTabs = 0;
};
+ 2684B2C214D4A9B20072C0B6 /* url */ = {
+ isa = PBXGroup;
+ children = (
+ 2684B2C314D4A9B20072C0B6 /* api */,
+ 2684B2C714D4A9B20072C0B6 /* src */,
+ );
+ path = url;
+ sourceTree = "<group>";
+ };
+ 2684B2C314D4A9B20072C0B6 /* api */ = {
+ isa = PBXGroup;
+ children = (
+ 2684B2C414D4A9B20072C0B6 /* ParsedURL.cpp */,
+ 2684B2C514D4A9B20072C0B6 /* ParsedURL.h */,
+ 2684B2C614D4A9B20072C0B6 /* URLString.h */,
+ );
+ path = api;
+ sourceTree = "<group>";
+ };
+ 2684B2C714D4A9B20072C0B6 /* src */ = {
+ isa = PBXGroup;
+ children = (
+ 2684B2C814D4A9B20072C0B6 /* RawURLBuffer.h */,
+ 2684B2C914D4A9B20072C0B6 /* URLBuffer.h */,
+ 2684B2CA14D4A9B20072C0B6 /* URLCharacterTypes.cpp */,
+ 2684B2CB14D4A9B20072C0B6 /* URLCharacterTypes.h */,
+ 2684B2CC14D4A9B20072C0B6 /* URLComponent.h */,
+ 2684B2CD14D4A9B20072C0B6 /* URLEscape.cpp */,
+ 2684B2CE14D4A9B20072C0B6 /* URLEscape.h */,
+ 2684B2CF14D4A9B20072C0B6 /* URLParser.h */,
+ 2684B2D014D4A9B20072C0B6 /* URLQueryCanonicalizer.h */,
+ 2684B2D114D4A9B20072C0B6 /* URLSegments.cpp */,
+ 2684B2D214D4A9B20072C0B6 /* URLSegments.h */,
+ );
+ path = src;
+ sourceTree = "<group>";
+ };
650FDF8D09D0FCA700769E54 /* Derived Sources */ = {
isa = PBXGroup;
children = (
@@ -1964,6 +2176,12 @@
65162EF108E6A21C007556CD /* wtf */ = {
isa = PBXGroup;
children = (
+ C22C524813FAF6EF00B7DC0D /* dtoa */,
+ 06D358A00DAAD9C4003B174E /* mac */,
+ 8656573E115BE35200291F40 /* text */,
+ 1A082776142168D60090CCAC /* threads */,
+ E195678D09E7CF1200B89D13 /* unicode */,
+ 2684B2C214D4A9B20072C0B6 /* url */,
A1D764511354448B00C5C7C0 /* Alignment.h */,
93AA4F770957251F0084B3A7 /* AlwaysInline.h */,
A73BE154148420520091204B /* ArrayBuffer.cpp */,
@@ -1985,12 +2203,15 @@
A7A1F7AA0F252B3C00E184E2 /* ByteArray.cpp */,
A7A1F7AB0F252B3C00E184E2 /* ByteArray.h */,
A7BC0C81140608B000B1BB71 /* CheckedArithmetic.h */,
+ C2D9CA1214BCC04600304B46 /* CheckedBoolean.h */,
BC66BAE213F4928F00C23FAE /* Compiler.h */,
FDA15C1612B03028003A583A /* Complex.h */,
97941A7C1302A098004A3447 /* CryptographicallyRandomNumber.cpp */,
97941A7D1302A098004A3447 /* CryptographicallyRandomNumber.h */,
180B9AEF0F16C569009BDBC5 /* CurrentTime.cpp */,
180B9AF00F16C569009BDBC5 /* CurrentTime.h */,
+ 0F9FC8CD14E612D500D52AE0 /* DataLog.cpp */,
+ 0F9FC8CE14E612D500D52AE0 /* DataLog.h */,
41359CF40FDD89CB00206180 /* DateMath.cpp */,
41359CF50FDD89CB00206180 /* DateMath.h */,
C2EE599D13FC972A009CEAFE /* DecimalNumber.cpp */,
@@ -1999,7 +2220,6 @@
5186111D0CC824830081412B /* Deque.h */,
938C4F6B0CA06BCE00D9310A /* DisallowCType.h */,
14456A311314657800212CA3 /* DoublyLinkedList.h */,
- C22C524813FAF6EF00B7DC0D /* dtoa */,
651F6412039D5B5F0078395C /* dtoa.cpp */,
651F6413039D5B5F0078395C /* dtoa.h */,
D75AF59512F8CB9500FC0ADF /* DynamicAnnotations.cpp */,
@@ -2032,7 +2252,6 @@
657EB7450B708F540063461B /* ListHashSet.h */,
148A1626095D16BB00666D0D /* ListRefPtr.h */,
E1EE79270D6C964500FEA3BA /* Locker.h */,
- 06D358A00DAAD9C4003B174E /* mac */,
06D358A20DAAD9C4003B174E /* MainThread.cpp */,
06D358A30DAAD9C4003B174E /* MainThread.h */,
5DBD18AF0C5401A700C15EAE /* MallocZoneSupport.h */,
@@ -2048,6 +2267,8 @@
C0A2723F0E509F1E00E96E15 /* NotFound.h */,
93854A9912C93D3B00DAAF77 /* NullPtr.cpp */,
933F5CDB126922690049191E /* NullPtr.h */,
+ F69E86C114C6E551002C2C62 /* NumberOfCores.cpp */,
+ F69E86C214C6E551002C2C62 /* NumberOfCores.h */,
1400067612A6F7830064D123 /* OSAllocator.h */,
1400069212A6F9E10064D123 /* OSAllocatorPosix.cpp */,
97941A3F130299DB004A3447 /* OSRandomSource.cpp */,
@@ -2102,7 +2323,6 @@
6541BD7008E80A17002CBEE7 /* TCSystemAlloc.cpp */,
6541BD7108E80A17002CBEE7 /* TCSystemAlloc.h */,
0BCD83541485841200EA2003 /* TemporaryChange.h */,
- 8656573E115BE35200291F40 /* text */,
18BAB52710DADFCD000D945B /* ThreadIdentifierDataPthreads.cpp */,
18BAB52810DADFCD000D945B /* ThreadIdentifierDataPthreads.h */,
5D6A566A0F05995500266145 /* Threading.cpp */,
@@ -2110,7 +2330,6 @@
BC5F7BBC11823B590052C02C /* ThreadingPrimitives.h */,
E1EE793C0D6C9B9200FEA3BA /* ThreadingPthreads.cpp */,
0BAC949E1338728400CF135B /* ThreadRestrictionVerifier.h */,
- 1A082776142168D60090CCAC /* threads */,
BC5F7BBD11823B590052C02C /* ThreadSafeRefCounted.h */,
E1B7C8BD0DA3A3360074B0DC /* ThreadSpecific.h */,
A73BE17D148420840091204B /* TypedArrayBase.h */,
@@ -2120,7 +2339,6 @@
A73BE167148420520091204B /* Uint32Array.h */,
A73BE163148420520091204B /* Uint8Array.h */,
91A3905514C0F47200F67901 /* Uint8ClampedArray.h */,
- E195678D09E7CF1200B89D13 /* unicode */,
0FD52AAC1430359D0026DC9F /* UnionFind.h */,
935AF46B09E9D9DB00ACD1D8 /* UnusedParam.h */,
E17FF770112131D200076A19 /* ValueCheck.h */,
@@ -2129,7 +2347,6 @@
96DD73780F9DA3100027FBCC /* VMTags.h */,
86D08D5111793613006E5ED0 /* WTFThreadData.cpp */,
86D08D5211793613006E5ED0 /* WTFThreadData.h */,
- C2D9CA1214BCC04600304B46 /* CheckedBoolean.h */,
);
path = wtf;
sourceTree = "<group>";
@@ -2179,6 +2396,8 @@
7EF6E0BB0EB7A1EC0079AFAF /* runtime */ = {
isa = PBXGroup;
children = (
+ 0F21C27914BE727300ADC64B /* CodeSpecializationKind.h */,
+ 0F21C27A14BE727300ADC64B /* ExecutionHarness.h */,
0F15F15D14B7A73A005DE37D /* CommonSlowPaths.h */,
BCF605110E203EF800B9A64D /* ArgList.cpp */,
BCF605120E203EF800B9A64D /* ArgList.h */,
@@ -2437,6 +2656,8 @@
86EC9DB31328DF44002B2AD7 /* dfg */ = {
isa = PBXGroup;
children = (
+ 0FFFC94914EF909500C72532 /* DFGArithNodeFlagsInferencePhase.cpp */,
+ 0FFFC94A14EF909500C72532 /* DFGArithNodeFlagsInferencePhase.h */,
0F62016D143FCD2F0068B77C /* DFGAbstractState.cpp */,
0F62016E143FCD2F0068B77C /* DFGAbstractState.h */,
0F62016F143FCD2F0068B77C /* DFGAbstractValue.h */,
@@ -2447,6 +2668,10 @@
86EC9DB41328DF82002B2AD7 /* DFGByteCodeParser.cpp */,
86EC9DB51328DF82002B2AD7 /* DFGByteCodeParser.h */,
0F7B294814C3CD23007C3DB1 /* DFGCCallHelpers.h */,
+ 0FFFC94B14EF909500C72532 /* DFGCFAPhase.cpp */,
+ 0FFFC94C14EF909500C72532 /* DFGCFAPhase.h */,
+ 0FFFC94D14EF909500C72532 /* DFGCSEPhase.cpp */,
+ 0FFFC94E14EF909500C72532 /* DFGCSEPhase.h */,
0FD82E1E14172C2F00179C94 /* DFGCapabilities.cpp */,
0FD82E1F14172C2F00179C94 /* DFGCapabilities.h */,
0FC0977E1469EBC400CF2442 /* DFGCommon.h */,
@@ -2475,8 +2700,10 @@
0FC0976F14693AEF00CF2442 /* DFGOSRExitCompiler.h */,
0FC09775146943AD00CF2442 /* DFGOSRExitCompiler32_64.cpp */,
0FC0977014693AEF00CF2442 /* DFGOSRExitCompiler64.cpp */,
- 0FD3C82314115D1A00FD81CB /* DFGPropagator.cpp */,
- 0FD3C82414115D2200FD81CB /* DFGPropagator.h */,
+ 0FFFC94F14EF909500C72532 /* DFGPhase.cpp */,
+ 0FFFC95014EF909500C72532 /* DFGPhase.h */,
+ 0FFFC95114EF909500C72532 /* DFGPredictionPropagationPhase.cpp */,
+ 0FFFC95214EF909500C72532 /* DFGPredictionPropagationPhase.h */,
86EC9DC11328DF82002B2AD7 /* DFGRegisterBank.h */,
86BB09BE138E381B0056702F /* DFGRepatch.cpp */,
86BB09BF138E381B0056702F /* DFGRepatch.h */,
@@ -2488,6 +2715,8 @@
0FC0979F146B28C700CF2442 /* DFGThunks.cpp */,
0FC097A0146B28C700CF2442 /* DFGThunks.h */,
0F620172143FCD2F0068B77C /* DFGVariableAccessData.h */,
+ 0FFFC95314EF909500C72532 /* DFGVirtualRegisterAllocationPhase.cpp */,
+ 0FFFC95414EF909500C72532 /* DFGVirtualRegisterAllocationPhase.h */,
);
name = dfg;
sourceTree = "<group>";
@@ -2553,6 +2782,15 @@
969A078F0ED1D3AE00F1F681 /* bytecode */ = {
isa = PBXGroup;
children = (
+ 0FB5467C14F5CFD3002C2989 /* MethodOfGettingAValueProfile.cpp */,
+ 0FB5467A14F5C7D4002C2989 /* MethodOfGettingAValueProfile.h */,
+ 0FB5467814F5C468002C2989 /* LazyOperandValueProfile.cpp */,
+ 0FB5467614F59AD1002C2989 /* LazyOperandValueProfile.h */,
+ 0F21C27E14BEAA8000ADC64B /* BytecodeConventions.h */,
+ 0F0FC45814BD15F100B81154 /* LLIntCallLinkInfo.h */,
+ 0F9FC8BF14E1B5FB00D52AE0 /* PolymorphicPutByIdList.cpp */,
+ 0F9FC8C014E1B5FB00D52AE0 /* PolymorphicPutByIdList.h */,
+ 0F9FC8C114E1B5FB00D52AE0 /* PutKind.h */,
0F93329314CA7DC10085F3C6 /* CallLinkStatus.cpp */,
0F93329414CA7DC10085F3C6 /* CallLinkStatus.h */,
0F93329514CA7DC10085F3C6 /* GetByIdStatus.cpp */,
@@ -2591,7 +2829,6 @@
1429D8840ED21C3D00B89619 /* SamplingTool.h */,
BCCF0D0B0EF0B8A500413C8F /* StructureStubInfo.cpp */,
BCCF0D070EF0AAB900413C8F /* StructureStubInfo.h */,
- 0FD82E8E14207A5100179C94 /* ValueProfile.cpp */,
0F963B3613FC6FDE0002D9B2 /* ValueProfile.h */,
0F426A451460CBAB00131F8F /* ValueRecovery.h */,
0F426A461460CBAB00131F8F /* VirtualRegister.h */,
@@ -2684,6 +2921,7 @@
A73BE169148420520091204B /* ArrayBuffer.h in Headers */,
C2D9CA1314BCC04600304B46 /* CheckedBoolean.h in Headers */,
A73BE16B148420520091204B /* ArrayBufferView.h in Headers */,
+ C2EAD2FC14F0249800A4B159 /* CopiedAllocator.h in Headers */,
C2B916C214DA014E00CBAC86 /* MarkedAllocator.h in Headers */,
BC18C3E60E16F5CD00B34460 /* ArrayConstructor.h in Headers */,
BC18C3E70E16F5CD00B34460 /* ArrayPrototype.h in Headers */,
@@ -2710,10 +2948,10 @@
E4D8CEFB12FC439600BC9F5A /* BloomFilter.h in Headers */,
BC18C3EC0E16F5CD00B34460 /* BooleanObject.h in Headers */,
0FD82E85141F3FE300179C94 /* BoundsCheckedPointer.h in Headers */,
- C2C8D03014A3CEFC00578E65 /* BumpBlock.h in Headers */,
+ C2C8D03014A3CEFC00578E65 /* CopiedBlock.h in Headers */,
86676D5211FED9BC004B6863 /* BumpPointerAllocator.h in Headers */,
- C2EAA3FA149A835E00FCE112 /* BumpSpace.h in Headers */,
- C2C8D02D14A3C6E000578E65 /* BumpSpaceInlineMethods.h in Headers */,
+ C2EAA3FA149A835E00FCE112 /* CopiedSpace.h in Headers */,
+ C2C8D02D14A3C6E000578E65 /* CopiedSpaceInlineMethods.h in Headers */,
A7A1F7AD0F252B3C00E184E2 /* ByteArray.h in Headers */,
969A07230ED1CE3300F1F681 /* BytecodeGenerator.h in Headers */,
C22C52F613FAF6EF00B7DC0D /* cached-powers.h in Headers */,
@@ -2775,7 +3013,6 @@
0FD82E57141DAF1000179C94 /* DFGOSREntry.h in Headers */,
0FC0976A1468A6F700CF2442 /* DFGOSRExit.h in Headers */,
0FC0977114693AF500CF2442 /* DFGOSRExitCompiler.h in Headers */,
- 0FD3C82714115D4F00FD81CB /* DFGPropagator.h in Headers */,
86EC9DD11328DF82002B2AD7 /* DFGRegisterBank.h in Headers */,
86BB09C1138E381B0056702F /* DFGRepatch.h in Headers */,
86ECA3FA132DF25A002B2AD7 /* DFGScoreBoard.h in Headers */,
@@ -2963,6 +3200,7 @@
7934BB7D1361979400CB99A1 /* ParallelJobsGeneric.h in Headers */,
7934BB7E1361979400CB99A1 /* ParallelJobsLibdispatch.h in Headers */,
7934BB7F1361979400CB99A1 /* ParallelJobsOpenMP.h in Headers */,
+ 2684B2D414D4A9B20072C0B6 /* ParsedURL.h in Headers */,
BC18C44B0E16F5CD00B34460 /* Parser.h in Headers */,
93052C350FB792190048FDC3 /* ParserArena.h in Headers */,
65303D641447B9E100D3F904 /* ParserTokens.h in Headers */,
@@ -2986,6 +3224,7 @@
147B84630E6DE6B1004775A4 /* PutPropertySlot.h in Headers */,
088FA5BC0EF76D4300578E6F /* RandomNumber.h in Headers */,
08E279E90EF83B10007DB523 /* RandomNumberSeed.h in Headers */,
+ 2684B2D614D4A9B20072C0B6 /* RawURLBuffer.h in Headers */,
0F963B2713F753BB0002D9B2 /* RedBlackTree.h in Headers */,
BC18C4570E16F5CD00B34460 /* RefCounted.h in Headers */,
90D3469C0E285280009492EE /* RefCountedLeakCounter.h in Headers */,
@@ -3075,6 +3314,14 @@
BC18C4740E16F5CD00B34460 /* UnicodeIcu.h in Headers */,
0FD52AAE143035A00026DC9F /* UnionFind.h in Headers */,
BC18C4750E16F5CD00B34460 /* UnusedParam.h in Headers */,
+ 2684B2D514D4A9B20072C0B6 /* URLString.h in Headers */,
+ 2684B2D714D4A9B20072C0B6 /* URLBuffer.h in Headers */,
+ 2684B2D914D4A9B20072C0B6 /* URLCharacterTypes.h in Headers */,
+ 2684B2DA14D4A9B20072C0B6 /* URLComponent.h in Headers */,
+ 2684B2DC14D4A9B20072C0B6 /* URLEscape.h in Headers */,
+ 2684B2DD14D4A9B20072C0B6 /* URLParser.h in Headers */,
+ 2684B2DE14D4A9B20072C0B6 /* URLQueryCanonicalizer.h in Headers */,
+ 2684B2E014D4A9B20072C0B6 /* URLSegments.h in Headers */,
BC18C4760E16F5CD00B34460 /* UString.h in Headers */,
08DDA5C11264631700751732 /* UStringBuilder.h in Headers */,
BC18C4770E16F5CD00B34460 /* UTF8.h in Headers */,
@@ -3104,7 +3351,16 @@
86704B8A12DBA33700A9FE7B /* YarrPattern.h in Headers */,
86704B4312DB8A8100A9FE7B /* YarrSyntaxChecker.h in Headers */,
0F15F15F14B7A73E005DE37D /* CommonSlowPaths.h in Headers */,
+ 0F4680A314BA7F8D00BFE272 /* LLIntExceptions.h in Headers */,
+ 0F4680A514BA7F8D00BFE272 /* LLIntSlowPaths.h in Headers */,
0F46808214BA572D00BFE272 /* JITExceptions.h in Headers */,
+ 0F4680CA14BBB16C00BFE272 /* LLIntCommon.h in Headers */,
+ 0F4680CB14BBB17200BFE272 /* LLIntOfflineAsmConfig.h in Headers */,
+ 0F4680CD14BBB17D00BFE272 /* LowLevelInterpreter.h in Headers */,
+ 0F4680D314BBD16700BFE272 /* LLIntData.h in Headers */,
+ 0F4680D514BBD24B00BFE272 /* HostCallReturnValue.h in Headers */,
+ 0F0B839B14BCF46000885B4F /* LLIntEntrypoints.h in Headers */,
+ 0F0B839D14BCF46600885B4F /* LLIntThunks.h in Headers */,
0F0B83A714BCF50700885B4F /* CodeType.h in Headers */,
0F0B83A914BCF56200885B4F /* HandlerInfo.h in Headers */,
0F0B83AB14BCF5BB00885B4F /* ExpressionRangeInfo.h in Headers */,
@@ -3113,7 +3369,11 @@
0F0B83B514BCF86200885B4F /* MethodCallLinkInfo.h in Headers */,
0F0B83B714BCF8E100885B4F /* GlobalResolveInfo.h in Headers */,
0F0B83B914BCF95F00885B4F /* CallReturnOffsetToBytecodeOffset.h in Headers */,
+ 0F0FC45A14BD15F500B81154 /* LLIntCallLinkInfo.h in Headers */,
0F21C26814BE5F6800ADC64B /* JITDriver.h in Headers */,
+ 0F21C27C14BE727600ADC64B /* ExecutionHarness.h in Headers */,
+ 0F21C27D14BE727A00ADC64B /* CodeSpecializationKind.h in Headers */,
+ 0F21C27F14BEAA8200ADC64B /* BytecodeConventions.h in Headers */,
0F7B294A14C3CD29007C3DB1 /* DFGCCallHelpers.h in Headers */,
0F7B294B14C3CD2F007C3DB1 /* DFGCapabilities.h in Headers */,
0F7B294C14C3CD43007C3DB1 /* DFGByteCodeCache.h in Headers */,
@@ -3128,12 +3388,43 @@
0F55F0F514D1063C00AC7649 /* AbstractPC.h in Headers */,
0F66E16B14DF3F1600B7B2E4 /* DFGNodeReferenceBlob.h in Headers */,
0F66E16C14DF3F1600B7B2E4 /* DFGNodeUse.h in Headers */,
+ 0F9FC8D114E612DA00D52AE0 /* DataLog.h in Headers */,
+ 0F9FC8C414E1B60000D52AE0 /* PolymorphicPutByIdList.h in Headers */,
+ 0F9FC8C514E1B60400D52AE0 /* PutKind.h in Headers */,
+ BCBE2CAE14E985AA000593AD /* GCAssertions.h in Headers */,
+ 0FFFC95614EF909C00C72532 /* DFGArithNodeFlagsInferencePhase.h in Headers */,
+ 0FFFC95814EF90A200C72532 /* DFGCFAPhase.h in Headers */,
+ 0FFFC95A14EF90A900C72532 /* DFGCSEPhase.h in Headers */,
+ 0FFFC95C14EF90AF00C72532 /* DFGPhase.h in Headers */,
+ 0FFFC95E14EF90B700C72532 /* DFGPredictionPropagationPhase.h in Headers */,
+ 0FFFC96014EF90BD00C72532 /* DFGVirtualRegisterAllocationPhase.h in Headers */,
+ 1497209114EB831500FEB1B7 /* PassWeak.h in Headers */,
+ 0FB5467714F59B5C002C2989 /* LazyOperandValueProfile.h in Headers */,
+ 0FB5467B14F5C7E1002C2989 /* MethodOfGettingAValueProfile.h in Headers */,
);
runOnlyForDeploymentPostprocessing = 0;
};
/* End PBXHeadersBuildPhase section */
/* Begin PBXNativeTarget section */
+ 0FF922C314F46B130041A24E /* JSCLLIntOffsetsExtractor */ = {
+ isa = PBXNativeTarget;
+ buildConfigurationList = 0FF922CA14F46B130041A24E /* Build configuration list for PBXNativeTarget "JSCLLIntOffsetsExtractor" */;
+ buildPhases = (
+ 0FF922C414F46B130041A24E /* Sources */,
+ 0FF922C614F46B130041A24E /* Frameworks */,
+ );
+ buildRules = (
+ );
+ dependencies = (
+ 0FF922D314F46B2F0041A24E /* PBXTargetDependency */,
+ );
+ name = JSCLLIntOffsetsExtractor;
+ productInstallPath = /usr/local/bin;
+ productName = jsc;
+ productReference = 0FF922CF14F46B130041A24E /* JSCLLIntOffsetsExtractor */;
+ productType = "com.apple.product-type.tool";
+ };
1412111F0A48793C00480255 /* minidom */ = {
isa = PBXNativeTarget;
buildConfigurationList = 141211390A48798400480255 /* Build configuration list for PBXNativeTarget "minidom" */;
@@ -3260,11 +3551,29 @@
14BD59BE0A3E8F9000BAF59C /* testapi */,
932F5BDA0822A1C700736975 /* jsc */,
651122F714046A4C002B101D /* testRegExp */,
+ 0F4680A914BA7FD900BFE272 /* LLInt Offsets */,
+ 0FF922C314F46B130041A24E /* JSCLLIntOffsetsExtractor */,
);
};
/* End PBXProject section */
/* Begin PBXShellScriptBuildPhase section */
+ 0F4680AA14BA7FD900BFE272 /* Generate Derived Sources */ = {
+ isa = PBXShellScriptBuildPhase;
+ buildActionMask = 2147483647;
+ files = (
+ );
+ inputPaths = (
+ "$(SRCROOT)/llint/LowLevelAssembler.asm",
+ );
+ name = "Generate Derived Sources";
+ outputPaths = (
+ "$(BUILT_PRODUCTS_DIR)/LLIntOffsets/LLIntDesiredOffsets.h",
+ );
+ runOnlyForDeploymentPostprocessing = 0;
+ shellPath = /bin/sh;
+ shellScript = "mkdir -p \"${BUILT_PRODUCTS_DIR}/LLIntOffsets/\"\n\n/usr/bin/env ruby \"${SRCROOT}/offlineasm/generate_offset_extractor.rb\" \"${SRCROOT}/llint/LowLevelInterpreter.asm\" \"${BUILT_PRODUCTS_DIR}/LLIntOffsets/LLIntDesiredOffsets.h\"\n";
+ };
3713F014142905240036387F /* Check For Inappropriate Objective-C Class Names */ = {
isa = PBXShellScriptBuildPhase;
buildActionMask = 2147483647;
@@ -3370,7 +3679,7 @@
);
runOnlyForDeploymentPostprocessing = 0;
shellPath = /bin/sh;
- shellScript = "mkdir -p \"${BUILT_PRODUCTS_DIR}/DerivedSources/JavaScriptCore/docs\"\ncd \"${BUILT_PRODUCTS_DIR}/DerivedSources/JavaScriptCore\"\n\n/bin/ln -sfh \"${SRCROOT}\" JavaScriptCore\nexport JavaScriptCore=\"JavaScriptCore\"\nexport BUILT_PRODUCTS_DIR=\"../..\"\n\nmake --no-builtin-rules -f \"JavaScriptCore/DerivedSources.make\" -j `/usr/sbin/sysctl -n hw.ncpu`\n";
+ shellScript = "mkdir -p \"${BUILT_PRODUCTS_DIR}/DerivedSources/JavaScriptCore/docs\"\ncd \"${BUILT_PRODUCTS_DIR}/DerivedSources/JavaScriptCore\"\n\n/bin/ln -sfh \"${SRCROOT}\" JavaScriptCore\nexport JavaScriptCore=\"JavaScriptCore\"\nexport BUILT_PRODUCTS_DIR=\"../..\"\n\nmake --no-builtin-rules -f \"JavaScriptCore/DerivedSources.make\" -j `/usr/sbin/sysctl -n hw.ncpu`\n\n/usr/bin/env ruby JavaScriptCore/offlineasm/asm.rb JavaScriptCore/llint/LowLevelInterpreter.asm ${BUILT_PRODUCTS_DIR}/JSCLLIntOffsetsExtractor LLIntAssembly.h\n";
};
9319586B09D9F91A00A56FD4 /* Check For Global Initializers */ = {
isa = PBXShellScriptBuildPhase;
@@ -3405,6 +3714,14 @@
/* End PBXShellScriptBuildPhase section */
/* Begin PBXSourcesBuildPhase section */
+ 0FF922C414F46B130041A24E /* Sources */ = {
+ isa = PBXSourcesBuildPhase;
+ buildActionMask = 2147483647;
+ files = (
+ 0FF922D414F46B410041A24E /* LLIntOffsetsExtractor.cpp in Sources */,
+ );
+ runOnlyForDeploymentPostprocessing = 0;
+ };
1412111D0A48793C00480255 /* Sources */ = {
isa = PBXSourcesBuildPhase;
buildActionMask = 2147483647;
@@ -3454,7 +3771,7 @@
14280863107EC11A0013E7B2 /* BooleanConstructor.cpp in Sources */,
14280864107EC11A0013E7B2 /* BooleanObject.cpp in Sources */,
14280865107EC11A0013E7B2 /* BooleanPrototype.cpp in Sources */,
- C240305514B404E60079EB64 /* BumpSpace.cpp in Sources */,
+ C240305514B404E60079EB64 /* CopiedSpace.cpp in Sources */,
A7A1F7AC0F252B3C00E184E2 /* ByteArray.cpp in Sources */,
148F21AA107EC53A0042EC2C /* BytecodeGenerator.cpp in Sources */,
C22C52F513FAF6EF00B7DC0D /* cached-powers.cc in Sources */,
@@ -3495,7 +3812,6 @@
0FC09792146A6F7300CF2442 /* DFGOSRExitCompiler.cpp in Sources */,
0FC09776146943B000CF2442 /* DFGOSRExitCompiler32_64.cpp in Sources */,
0FC0977214693AF900CF2442 /* DFGOSRExitCompiler64.cpp in Sources */,
- 0FD3C82514115D4000FD81CB /* DFGPropagator.cpp in Sources */,
86BB09C0138E381B0056702F /* DFGRepatch.cpp in Sources */,
86EC9DD21328DF82002B2AD7 /* DFGSpeculativeJIT.cpp in Sources */,
86880F1F14328BB900B08D42 /* DFGSpeculativeJIT32_64.cpp in Sources */,
@@ -3606,6 +3922,7 @@
14FFF98C12BFFF7500795BB8 /* PageAllocationAligned.cpp in Sources */,
14B3EF0612BC24DD00D29EFF /* PageBlock.cpp in Sources */,
7934BB7C1361979400CB99A1 /* ParallelJobsGeneric.cpp in Sources */,
+ 2684B2D314D4A9B20072C0B6 /* ParsedURL.cpp in Sources */,
148F21BC107EC54D0042EC2C /* Parser.cpp in Sources */,
93052C340FB792190048FDC3 /* ParserArena.cpp in Sources */,
0FD82E86141F3FF100179C94 /* PredictedType.cpp in Sources */,
@@ -3651,9 +3968,11 @@
A7386555118697B400540279 /* ThunkGenerators.cpp in Sources */,
14A42E3F0F4F60EE00599099 /* TimeoutChecker.cpp in Sources */,
0B330C270F38C62300692DE3 /* TypeTraits.cpp in Sources */,
+ 2684B2D814D4A9B20072C0B6 /* URLCharacterTypes.cpp in Sources */,
+ 2684B2DB14D4A9B20072C0B6 /* URLEscape.cpp in Sources */,
+ 2684B2DF14D4A9B20072C0B6 /* URLSegments.cpp in Sources */,
14469DEE107EC7E700650446 /* UString.cpp in Sources */,
E1EF79AA0CE97BA60088D500 /* UTF8.cpp in Sources */,
- 0FD82E9014207A5F00179C94 /* ValueProfile.cpp in Sources */,
0FC81516140511B500CFA603 /* VTableSpectrum.cpp in Sources */,
0FC8150B14043C0E00CFA603 /* WriteBarrierSupport.cpp in Sources */,
868BFA17117CF19900B908B1 /* WTFString.cpp in Sources */,
@@ -3662,7 +3981,14 @@
86704B8612DBA33700A9FE7B /* YarrJIT.cpp in Sources */,
86704B8912DBA33700A9FE7B /* YarrPattern.cpp in Sources */,
86704B4212DB8A8100A9FE7B /* YarrSyntaxChecker.cpp in Sources */,
+ 0F4680A414BA7F8D00BFE272 /* LLIntSlowPaths.cpp in Sources */,
+ 0F4680A814BA7FAB00BFE272 /* LLIntExceptions.cpp in Sources */,
0F46808314BA573100BFE272 /* JITExceptions.cpp in Sources */,
+ 0F4680CC14BBB17A00BFE272 /* LowLevelInterpreter.cpp in Sources */,
+ 0F4680D214BBD16500BFE272 /* LLIntData.cpp in Sources */,
+ 0F4680D414BBD24900BFE272 /* HostCallReturnValue.cpp in Sources */,
+ 0F0B839A14BCF45D00885B4F /* LLIntEntrypoints.cpp in Sources */,
+ 0F0B839C14BCF46300885B4F /* LLIntThunks.cpp in Sources */,
0F0B83B014BCF71600885B4F /* CallLinkInfo.cpp in Sources */,
0F0B83B414BCF86000885B4F /* MethodCallLinkInfo.cpp in Sources */,
F69E86C314C6E551002C2C62 /* NumberOfCores.cpp in Sources */,
@@ -3674,6 +4000,16 @@
86B5826714D2796C00A9C306 /* CodeProfile.cpp in Sources */,
86B5826914D2797000A9C306 /* CodeProfiling.cpp in Sources */,
C2B916C514DA040C00CBAC86 /* MarkedAllocator.cpp in Sources */,
+ 0F9FC8D014E612D800D52AE0 /* DataLog.cpp in Sources */,
+ 0F9FC8C314E1B5FE00D52AE0 /* PolymorphicPutByIdList.cpp in Sources */,
+ 0FFFC95514EF909A00C72532 /* DFGArithNodeFlagsInferencePhase.cpp in Sources */,
+ 0FFFC95714EF90A000C72532 /* DFGCFAPhase.cpp in Sources */,
+ 0FFFC95914EF90A600C72532 /* DFGCSEPhase.cpp in Sources */,
+ 0FFFC95B14EF90AD00C72532 /* DFGPhase.cpp in Sources */,
+ 0FFFC95D14EF90B300C72532 /* DFGPredictionPropagationPhase.cpp in Sources */,
+ 0FFFC95F14EF90BB00C72532 /* DFGVirtualRegisterAllocationPhase.cpp in Sources */,
+ 0FB5467914F5C46B002C2989 /* LazyOperandValueProfile.cpp in Sources */,
+ 0FB5467D14F5CFD6002C2989 /* MethodOfGettingAValueProfile.cpp in Sources */,
);
runOnlyForDeploymentPostprocessing = 0;
};
@@ -3688,6 +4024,16 @@
/* End PBXSourcesBuildPhase section */
/* Begin PBXTargetDependency section */
+ 0FF922D314F46B2F0041A24E /* PBXTargetDependency */ = {
+ isa = PBXTargetDependency;
+ target = 0F4680A914BA7FD900BFE272 /* LLInt Offsets */;
+ targetProxy = 0FF922D214F46B2F0041A24E /* PBXContainerItemProxy */;
+ };
+ 0FF922D614F46B600041A24E /* PBXTargetDependency */ = {
+ isa = PBXTargetDependency;
+ target = 0FF922C314F46B130041A24E /* JSCLLIntOffsetsExtractor */;
+ targetProxy = 0FF922D514F46B600041A24E /* PBXContainerItemProxy */;
+ };
141214BF0A49190E00480255 /* PBXTargetDependency */ = {
isa = PBXTargetDependency;
target = 1412111F0A48793C00480255 /* minidom */;
@@ -3721,6 +4067,70 @@
/* End PBXTargetDependency section */
/* Begin XCBuildConfiguration section */
+ 0F4680AD14BA7FD900BFE272 /* Debug */ = {
+ isa = XCBuildConfiguration;
+ buildSettings = {
+ PRODUCT_NAME = "Derived Sources copy";
+ };
+ name = Debug;
+ };
+ 0F4680AE14BA7FD900BFE272 /* Release */ = {
+ isa = XCBuildConfiguration;
+ buildSettings = {
+ PRODUCT_NAME = "Derived Sources copy";
+ };
+ name = Release;
+ };
+ 0F4680AF14BA7FD900BFE272 /* Profiling */ = {
+ isa = XCBuildConfiguration;
+ buildSettings = {
+ PRODUCT_NAME = "Derived Sources copy";
+ };
+ name = Profiling;
+ };
+ 0F4680B014BA7FD900BFE272 /* Production */ = {
+ isa = XCBuildConfiguration;
+ buildSettings = {
+ PRODUCT_NAME = "Derived Sources copy";
+ };
+ name = Production;
+ };
+ 0FF922CB14F46B130041A24E /* Debug */ = {
+ isa = XCBuildConfiguration;
+ baseConfigurationReference = 5DAFD6CB146B686300FBEFB4 /* JSC.xcconfig */;
+ buildSettings = {
+ PRODUCT_NAME = JSCLLIntOffsetsExtractor;
+ USER_HEADER_SEARCH_PATHS = ". icu $(HEADER_SEARCH_PATHS) $(BUILT_PRODUCTS_DIR)/LLIntOffsets";
+ };
+ name = Debug;
+ };
+ 0FF922CC14F46B130041A24E /* Release */ = {
+ isa = XCBuildConfiguration;
+ baseConfigurationReference = 5DAFD6CB146B686300FBEFB4 /* JSC.xcconfig */;
+ buildSettings = {
+ PRODUCT_NAME = JSCLLIntOffsetsExtractor;
+ USER_HEADER_SEARCH_PATHS = ". icu $(HEADER_SEARCH_PATHS) $(BUILT_PRODUCTS_DIR)/LLIntOffsets";
+ };
+ name = Release;
+ };
+ 0FF922CD14F46B130041A24E /* Profiling */ = {
+ isa = XCBuildConfiguration;
+ baseConfigurationReference = 5DAFD6CB146B686300FBEFB4 /* JSC.xcconfig */;
+ buildSettings = {
+ PRODUCT_NAME = JSCLLIntOffsetsExtractor;
+ USER_HEADER_SEARCH_PATHS = ". icu $(HEADER_SEARCH_PATHS) $(BUILT_PRODUCTS_DIR)/LLIntOffsets";
+ };
+ name = Profiling;
+ };
+ 0FF922CE14F46B130041A24E /* Production */ = {
+ isa = XCBuildConfiguration;
+ baseConfigurationReference = 5DAFD6CB146B686300FBEFB4 /* JSC.xcconfig */;
+ buildSettings = {
+ PRODUCT_NAME = JSCLLIntOffsetsExtractor;
+ USER_HEADER_SEARCH_PATHS = ". icu $(HEADER_SEARCH_PATHS) $(BUILT_PRODUCTS_DIR)/LLIntOffsets";
+ };
+ name = Production;
+ };
1412113A0A48798400480255 /* Debug */ = {
isa = XCBuildConfiguration;
buildSettings = {
@@ -3962,6 +4372,28 @@
/* End XCBuildConfiguration section */
/* Begin XCConfigurationList section */
+ 0F4680AC14BA7FD900BFE272 /* Build configuration list for PBXAggregateTarget "LLInt Offsets" */ = {
+ isa = XCConfigurationList;
+ buildConfigurations = (
+ 0F4680AD14BA7FD900BFE272 /* Debug */,
+ 0F4680AE14BA7FD900BFE272 /* Release */,
+ 0F4680AF14BA7FD900BFE272 /* Profiling */,
+ 0F4680B014BA7FD900BFE272 /* Production */,
+ );
+ defaultConfigurationIsVisible = 0;
+ defaultConfigurationName = Production;
+ };
+ 0FF922CA14F46B130041A24E /* Build configuration list for PBXNativeTarget "JSCLLIntOffsetsExtractor" */ = {
+ isa = XCConfigurationList;
+ buildConfigurations = (
+ 0FF922CB14F46B130041A24E /* Debug */,
+ 0FF922CC14F46B130041A24E /* Release */,
+ 0FF922CD14F46B130041A24E /* Profiling */,
+ 0FF922CE14F46B130041A24E /* Production */,
+ );
+ defaultConfigurationIsVisible = 0;
+ defaultConfigurationName = Production;
+ };
141211390A48798400480255 /* Build configuration list for PBXNativeTarget "minidom" */ = {
isa = XCConfigurationList;
buildConfigurations = (
diff --git a/Source/JavaScriptCore/Target.pri b/Source/JavaScriptCore/Target.pri
index 155ee124d..8fa498c08 100644
--- a/Source/JavaScriptCore/Target.pri
+++ b/Source/JavaScriptCore/Target.pri
@@ -55,17 +55,19 @@ SOURCES += \
bytecode/DFGExitProfile.cpp \
bytecode/GetByIdStatus.cpp \
bytecode/JumpTable.cpp \
+ bytecode/LazyOperandValueProfile.cpp \
bytecode/MethodCallLinkInfo.cpp \
bytecode/MethodCallLinkStatus.cpp \
+ bytecode/MethodOfGettingAValueProfile.cpp \
bytecode/Opcode.cpp \
+ bytecode/PolymorphicPutByIdList.cpp \
bytecode/PredictedType.cpp \
bytecode/PutByIdStatus.cpp \
bytecode/SamplingTool.cpp \
bytecode/StructureStubInfo.cpp \
- bytecode/ValueProfile.cpp \
bytecompiler/BytecodeGenerator.cpp \
bytecompiler/NodesCodegen.cpp \
- heap/BumpSpace.cpp \
+ heap/CopiedSpace.cpp \
heap/ConservativeRoots.cpp \
heap/DFGCodeBlocks.cpp \
heap/HandleHeap.cpp \
@@ -83,9 +85,12 @@ SOURCES += \
debugger/Debugger.cpp \
dfg/DFGAbstractState.cpp \
dfg/DFGAssemblyHelpers.cpp \
+ dfg/DFGArithNodeFlagsInferencePhase.cpp \
dfg/DFGByteCodeParser.cpp \
dfg/DFGCapabilities.cpp \
+ dfg/DFGCFAPhase.cpp \
dfg/DFGCorrectableJumpPoint.cpp \
+ dfg/DFGCSEPhase.cpp \
dfg/DFGDriver.cpp \
dfg/DFGGraph.cpp \
dfg/DFGJITCompiler.cpp \
@@ -95,18 +100,21 @@ SOURCES += \
dfg/DFGOSRExitCompiler.cpp \
dfg/DFGOSRExitCompiler64.cpp \
dfg/DFGOSRExitCompiler32_64.cpp \
- dfg/DFGPropagator.cpp \
+ dfg/DFGPhase.cpp \
+ dfg/DFGPredictionPropagationPhase.cpp \
dfg/DFGRepatch.cpp \
dfg/DFGSpeculativeJIT.cpp \
dfg/DFGSpeculativeJIT32_64.cpp \
dfg/DFGSpeculativeJIT64.cpp \
dfg/DFGThunks.cpp \
+ dfg/DFGVirtualRegisterAllocationPhase.cpp \
interpreter/AbstractPC.cpp \
interpreter/CallFrame.cpp \
interpreter/Interpreter.cpp \
interpreter/RegisterFile.cpp \
jit/ExecutableAllocatorFixedVMPool.cpp \
jit/ExecutableAllocator.cpp \
+ jit/HostCallReturnValue.cpp \
jit/JITArithmetic.cpp \
jit/JITArithmetic32_64.cpp \
jit/JITCall.cpp \
diff --git a/Source/JavaScriptCore/assembler/LinkBuffer.h b/Source/JavaScriptCore/assembler/LinkBuffer.h
index e078024b1..2c07d13fc 100644
--- a/Source/JavaScriptCore/assembler/LinkBuffer.h
+++ b/Source/JavaScriptCore/assembler/LinkBuffer.h
@@ -34,7 +34,8 @@
#define GLOBAL_THUNK_ID reinterpret_cast<void*>(static_cast<intptr_t>(-1))
#define REGEXP_CODE_ID reinterpret_cast<void*>(static_cast<intptr_t>(-2))
-#include <MacroAssembler.h>
+#include "MacroAssembler.h"
+#include <wtf/DataLog.h>
#include <wtf/Noncopyable.h>
namespace JSC {
@@ -74,6 +75,9 @@ class LinkBuffer {
public:
LinkBuffer(JSGlobalData& globalData, MacroAssembler* masm, void* ownerUID)
: m_size(0)
+#if ENABLE(BRANCH_COMPACTION)
+ , m_initialSize(0)
+#endif
, m_code(0)
, m_assembler(masm)
, m_globalData(&globalData)
@@ -225,13 +229,13 @@ private:
m_size = m_assembler->m_assembler.codeSize();
ASSERT(m_code);
#else
- size_t initialSize = m_assembler->m_assembler.codeSize();
- m_executableMemory = m_globalData->executableAllocator.allocate(*m_globalData, initialSize, ownerUID);
+ m_initialSize = m_assembler->m_assembler.codeSize();
+ m_executableMemory = m_globalData->executableAllocator.allocate(*m_globalData, m_initialSize, ownerUID);
if (!m_executableMemory)
return;
m_code = (uint8_t*)m_executableMemory->start();
ASSERT(m_code);
- ExecutableAllocator::makeWritable(m_code, initialSize);
+ ExecutableAllocator::makeWritable(m_code, m_initialSize);
uint8_t* inData = (uint8_t*)m_assembler->unlinkedCode();
uint8_t* outData = reinterpret_cast<uint8_t*>(m_code);
int readPtr = 0;
@@ -277,8 +281,8 @@ private:
jumpsToLink[i].setFrom(writePtr);
}
// Copy everything after the last jump
- memcpy(outData + writePtr, inData + readPtr, initialSize - readPtr);
- m_assembler->recordLinkOffsets(readPtr, initialSize, readPtr - writePtr);
+ memcpy(outData + writePtr, inData + readPtr, m_initialSize - readPtr);
+ m_assembler->recordLinkOffsets(readPtr, m_initialSize, readPtr - writePtr);
for (unsigned i = 0; i < jumpCount; ++i) {
uint8_t* location = outData + jumpsToLink[i].from();
@@ -287,11 +291,11 @@ private:
}
jumpsToLink.clear();
- m_size = writePtr + initialSize - readPtr;
+ m_size = writePtr + m_initialSize - readPtr;
m_executableMemory->shrink(m_size);
#if DUMP_LINK_STATISTICS
- dumpLinkStatistics(m_code, initialSize, m_size);
+ dumpLinkStatistics(m_code, m_initialSize, m_size);
#endif
#if DUMP_CODE
dumpCode(m_code, m_size);
@@ -306,7 +310,11 @@ private:
m_completed = true;
#endif
+#if ENABLE(BRANCH_COMPACTION)
+ ExecutableAllocator::makeExecutable(code(), m_initialSize);
+#else
ExecutableAllocator::makeExecutable(code(), m_size);
+#endif
ExecutableAllocator::cacheFlush(code(), m_size);
}
@@ -319,13 +327,13 @@ private:
linkCount++;
totalInitialSize += initialSize;
totalFinalSize += finalSize;
- printf("link %p: orig %u, compact %u (delta %u, %.2f%%)\n",
- code, static_cast<unsigned>(initialSize), static_cast<unsigned>(finalSize),
- static_cast<unsigned>(initialSize - finalSize),
- 100.0 * (initialSize - finalSize) / initialSize);
- printf("\ttotal %u: orig %u, compact %u (delta %u, %.2f%%)\n",
- linkCount, totalInitialSize, totalFinalSize, totalInitialSize - totalFinalSize,
- 100.0 * (totalInitialSize - totalFinalSize) / totalInitialSize);
+ dataLog("link %p: orig %u, compact %u (delta %u, %.2f%%)\n",
+ code, static_cast<unsigned>(initialSize), static_cast<unsigned>(finalSize),
+ static_cast<unsigned>(initialSize - finalSize),
+ 100.0 * (initialSize - finalSize) / initialSize);
+ dataLog("\ttotal %u: orig %u, compact %u (delta %u, %.2f%%)\n",
+ linkCount, totalInitialSize, totalFinalSize, totalInitialSize - totalFinalSize,
+ 100.0 * (totalInitialSize - totalFinalSize) / totalInitialSize);
}
#endif
@@ -342,23 +350,26 @@ private:
size_t tsize = size / sizeof(short);
char nameBuf[128];
snprintf(nameBuf, sizeof(nameBuf), "_jsc_jit%u", codeCount++);
- printf("\t.syntax unified\n"
- "\t.section\t__TEXT,__text,regular,pure_instructions\n"
- "\t.globl\t%s\n"
- "\t.align 2\n"
- "\t.code 16\n"
- "\t.thumb_func\t%s\n"
- "# %p\n"
- "%s:\n", nameBuf, nameBuf, code, nameBuf);
+ dataLog("\t.syntax unified\n"
+ "\t.section\t__TEXT,__text,regular,pure_instructions\n"
+ "\t.globl\t%s\n"
+ "\t.align 2\n"
+ "\t.code 16\n"
+ "\t.thumb_func\t%s\n"
+ "# %p\n"
+ "%s:\n", nameBuf, nameBuf, code, nameBuf);
for (unsigned i = 0; i < tsize; i++)
- printf("\t.short\t0x%x\n", tcode[i]);
+ dataLog("\t.short\t0x%x\n", tcode[i]);
#endif
}
#endif
RefPtr<ExecutableMemoryHandle> m_executableMemory;
size_t m_size;
+#if ENABLE(BRANCH_COMPACTION)
+ size_t m_initialSize;
+#endif
void* m_code;
MacroAssembler* m_assembler;
JSGlobalData* m_globalData;
diff --git a/Source/JavaScriptCore/assembler/MacroAssembler.h b/Source/JavaScriptCore/assembler/MacroAssembler.h
index cc11b5925..347cd0ea0 100644
--- a/Source/JavaScriptCore/assembler/MacroAssembler.h
+++ b/Source/JavaScriptCore/assembler/MacroAssembler.h
@@ -74,6 +74,67 @@ public:
using MacroAssemblerBase::branchTestPtr;
#endif
+ // Utilities used by the DFG JIT.
+#if ENABLE(DFG_JIT)
+ using MacroAssemblerBase::invert;
+
+ static DoubleCondition invert(DoubleCondition cond)
+ {
+ switch (cond) {
+ case DoubleEqual:
+ return DoubleNotEqualOrUnordered;
+ case DoubleNotEqual:
+ return DoubleEqualOrUnordered;
+ case DoubleGreaterThan:
+ return DoubleLessThanOrEqualOrUnordered;
+ case DoubleGreaterThanOrEqual:
+ return DoubleLessThanOrUnordered;
+ case DoubleLessThan:
+ return DoubleGreaterThanOrEqualOrUnordered;
+ case DoubleLessThanOrEqual:
+ return DoubleGreaterThanOrUnordered;
+ case DoubleEqualOrUnordered:
+ return DoubleNotEqual;
+ case DoubleNotEqualOrUnordered:
+ return DoubleEqual;
+ case DoubleGreaterThanOrUnordered:
+ return DoubleLessThanOrEqual;
+ case DoubleGreaterThanOrEqualOrUnordered:
+ return DoubleLessThan;
+ case DoubleLessThanOrUnordered:
+ return DoubleGreaterThanOrEqual;
+ case DoubleLessThanOrEqualOrUnordered:
+ return DoubleGreaterThan;
+ default:
+ ASSERT_NOT_REACHED();
+ return DoubleEqual; // make compiler happy
+ }
+ }
+
+ static bool isInvertible(ResultCondition cond)
+ {
+ switch (cond) {
+ case Zero:
+ case NonZero:
+ return true;
+ default:
+ return false;
+ }
+ }
+
+ static ResultCondition invert(ResultCondition cond)
+ {
+ switch (cond) {
+ case Zero:
+ return NonZero;
+ case NonZero:
+ return Zero;
+ default:
+ ASSERT_NOT_REACHED();
+ return Zero; // Make compiler happy for release builds.
+ }
+ }
+#endif
// Platform agnostic onvenience functions,
// described in terms of other macro assembly methods.
diff --git a/Source/JavaScriptCore/assembler/MacroAssemblerARMv7.h b/Source/JavaScriptCore/assembler/MacroAssemblerARMv7.h
index 6cf2d081b..d883abf4f 100644
--- a/Source/JavaScriptCore/assembler/MacroAssemblerARMv7.h
+++ b/Source/JavaScriptCore/assembler/MacroAssemblerARMv7.h
@@ -796,14 +796,16 @@ public:
void storeDouble(FPRegisterID src, BaseIndex address)
{
move(address.index, addressTempRegister);
- mul32(TrustedImm32(address.scale), addressTempRegister, addressTempRegister);
+ mul32(TrustedImm32(1 << address.scale), addressTempRegister, addressTempRegister);
+ add32(address.base, addressTempRegister);
storeDouble(src, Address(addressTempRegister, address.offset));
}
void storeFloat(FPRegisterID src, BaseIndex address)
{
move(address.index, addressTempRegister);
- mul32(TrustedImm32(address.scale), addressTempRegister, addressTempRegister);
+ mul32(TrustedImm32(1 << address.scale), addressTempRegister, addressTempRegister);
+ add32(address.base, addressTempRegister);
storeDouble(src, Address(addressTempRegister, address.offset));
}
diff --git a/Source/JavaScriptCore/assembler/MacroAssemblerCodeRef.h b/Source/JavaScriptCore/assembler/MacroAssemblerCodeRef.h
index c59d1514a..3d7d84534 100644
--- a/Source/JavaScriptCore/assembler/MacroAssemblerCodeRef.h
+++ b/Source/JavaScriptCore/assembler/MacroAssemblerCodeRef.h
@@ -31,8 +31,6 @@
#include "RefPtr.h"
#include "UnusedParam.h"
-#if ENABLE(ASSEMBLER)
-
// ASSERT_VALID_CODE_POINTER checks that ptr is a non-null pointer, and that it is a valid
// instruction address on the platform (for example, check any alignment requirements).
#if CPU(ARM_THUMB2)
@@ -273,6 +271,14 @@ public:
{
ASSERT_VALID_CODE_POINTER(m_value);
}
+
+ static MacroAssemblerCodePtr createFromExecutableAddress(void* value)
+ {
+ ASSERT_VALID_CODE_POINTER(value);
+ MacroAssemblerCodePtr result;
+ result.m_value = value;
+ return result;
+ }
explicit MacroAssemblerCodePtr(ReturnAddressPtr ra)
: m_value(ra.value())
@@ -360,6 +366,4 @@ private:
} // namespace JSC
-#endif // ENABLE(ASSEMBLER)
-
#endif // MacroAssemblerCodeRef_h
diff --git a/Source/JavaScriptCore/assembler/SH4Assembler.h b/Source/JavaScriptCore/assembler/SH4Assembler.h
index 0709588a5..280a5de85 100644
--- a/Source/JavaScriptCore/assembler/SH4Assembler.h
+++ b/Source/JavaScriptCore/assembler/SH4Assembler.h
@@ -35,6 +35,7 @@
#include <stdint.h>
#include <stdio.h>
#include <wtf/Assertions.h>
+#include <wtf/DataLog.h>
#include <wtf/Vector.h>
#ifndef NDEBUG
@@ -2025,7 +2026,7 @@ public:
static void vprintfStdoutInstr(const char* format, va_list args)
{
if (getenv("JavaScriptCoreDumpJIT"))
- vfprintf(stdout, format, args);
+ WTF::dataLogV(format, args);
}
static void printBlockInstr(uint16_t* first, unsigned int offset, int nbInstr)
diff --git a/Source/JavaScriptCore/bytecode/BytecodeConventions.h b/Source/JavaScriptCore/bytecode/BytecodeConventions.h
new file mode 100644
index 000000000..f33b060f8
--- /dev/null
+++ b/Source/JavaScriptCore/bytecode/BytecodeConventions.h
@@ -0,0 +1,36 @@
+/*
+ * Copyright (C) 2012 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef BytecodeConventions_h
+#define BytecodeConventions_h
+
+// Register numbers used in bytecode operations have different meaning according to their ranges:
+// 0x80000000-0xFFFFFFFF Negative indices from the CallFrame pointer are entries in the call frame, see RegisterFile.h.
+// 0x00000000-0x3FFFFFFF Forwards indices from the CallFrame pointer are local vars and temporaries with the function's callframe.
+// 0x40000000-0x7FFFFFFF Positive indices from 0x40000000 specify entries in the constant pool on the CodeBlock.
+static const int FirstConstantRegisterIndex = 0x40000000;
+
+#endif // BytecodeConventions_h
+
diff --git a/Source/JavaScriptCore/bytecode/CallLinkStatus.cpp b/Source/JavaScriptCore/bytecode/CallLinkStatus.cpp
index f3fd5bb27..7f9e9ee8a 100644
--- a/Source/JavaScriptCore/bytecode/CallLinkStatus.cpp
+++ b/Source/JavaScriptCore/bytecode/CallLinkStatus.cpp
@@ -27,17 +27,40 @@
#include "CallLinkStatus.h"
#include "CodeBlock.h"
+#include "LLIntCallLinkInfo.h"
namespace JSC {
+CallLinkStatus CallLinkStatus::computeFromLLInt(CodeBlock* profiledBlock, unsigned bytecodeIndex)
+{
+ UNUSED_PARAM(profiledBlock);
+ UNUSED_PARAM(bytecodeIndex);
+#if ENABLE(LLINT)
+ Instruction* instruction = profiledBlock->instructions().begin() + bytecodeIndex;
+ LLIntCallLinkInfo* callLinkInfo = instruction[4].u.callLinkInfo;
+
+ return CallLinkStatus(callLinkInfo->lastSeenCallee.get(), false);
+#else
+ return CallLinkStatus(0, false);
+#endif
+}
+
CallLinkStatus CallLinkStatus::computeFor(CodeBlock* profiledBlock, unsigned bytecodeIndex)
{
UNUSED_PARAM(profiledBlock);
UNUSED_PARAM(bytecodeIndex);
#if ENABLE(JIT) && ENABLE(VALUE_PROFILER)
- return CallLinkStatus(
- profiledBlock->getCallLinkInfo(bytecodeIndex).lastSeenCallee.get(),
- profiledBlock->couldTakeSlowCase(bytecodeIndex));
+ if (!profiledBlock->numberOfCallLinkInfos())
+ return computeFromLLInt(profiledBlock, bytecodeIndex);
+
+ if (profiledBlock->couldTakeSlowCase(bytecodeIndex))
+ return CallLinkStatus(0, true);
+
+ JSFunction* target = profiledBlock->getCallLinkInfo(bytecodeIndex).lastSeenCallee.get();
+ if (!target)
+ return computeFromLLInt(profiledBlock, bytecodeIndex);
+
+ return CallLinkStatus(target, false);
#else
return CallLinkStatus(0, false);
#endif
diff --git a/Source/JavaScriptCore/bytecode/CallLinkStatus.h b/Source/JavaScriptCore/bytecode/CallLinkStatus.h
index e1c741016..5f7201905 100644
--- a/Source/JavaScriptCore/bytecode/CallLinkStatus.h
+++ b/Source/JavaScriptCore/bytecode/CallLinkStatus.h
@@ -47,15 +47,17 @@ public:
static CallLinkStatus computeFor(CodeBlock*, unsigned bytecodeIndex);
- bool isSet() const { return !!m_callTarget; }
+ bool isSet() const { return !!m_callTarget || m_couldTakeSlowPath; }
- bool operator!() const { return !m_callTarget; }
+ bool operator!() const { return !isSet(); }
bool couldTakeSlowPath() const { return m_couldTakeSlowPath; }
JSFunction* callTarget() const { return m_callTarget; }
private:
+ static CallLinkStatus computeFromLLInt(CodeBlock*, unsigned bytecodeIndex);
+
JSFunction* m_callTarget;
bool m_couldTakeSlowPath;
};
diff --git a/Source/JavaScriptCore/bytecode/CodeBlock.cpp b/Source/JavaScriptCore/bytecode/CodeBlock.cpp
index 191fafd62..ab89ad965 100644
--- a/Source/JavaScriptCore/bytecode/CodeBlock.cpp
+++ b/Source/JavaScriptCore/bytecode/CodeBlock.cpp
@@ -42,6 +42,7 @@
#include "JSFunction.h"
#include "JSStaticScopeObject.h"
#include "JSValue.h"
+#include "LowLevelInterpreter.h"
#include "RepatchBuffer.h"
#include "UStringConcatenate.h"
#include <stdio.h>
@@ -154,7 +155,7 @@ void CodeBlock::printUnaryOp(ExecState* exec, int location, Vector<Instruction>:
int r0 = (++it)->u.operand;
int r1 = (++it)->u.operand;
- printf("[%4d] %s\t\t %s, %s\n", location, op, registerName(exec, r0).data(), registerName(exec, r1).data());
+ dataLog("[%4d] %s\t\t %s, %s\n", location, op, registerName(exec, r0).data(), registerName(exec, r1).data());
}
void CodeBlock::printBinaryOp(ExecState* exec, int location, Vector<Instruction>::const_iterator& it, const char* op) const
@@ -162,14 +163,14 @@ void CodeBlock::printBinaryOp(ExecState* exec, int location, Vector<Instruction>
int r0 = (++it)->u.operand;
int r1 = (++it)->u.operand;
int r2 = (++it)->u.operand;
- printf("[%4d] %s\t\t %s, %s, %s\n", location, op, registerName(exec, r0).data(), registerName(exec, r1).data(), registerName(exec, r2).data());
+ dataLog("[%4d] %s\t\t %s, %s, %s\n", location, op, registerName(exec, r0).data(), registerName(exec, r1).data(), registerName(exec, r2).data());
}
void CodeBlock::printConditionalJump(ExecState* exec, const Vector<Instruction>::const_iterator&, Vector<Instruction>::const_iterator& it, int location, const char* op) const
{
int r0 = (++it)->u.operand;
int offset = (++it)->u.operand;
- printf("[%4d] %s\t\t %s, %d(->%d)\n", location, op, registerName(exec, r0).data(), offset, location + offset);
+ dataLog("[%4d] %s\t\t %s, %d(->%d)\n", location, op, registerName(exec, r0).data(), offset, location + offset);
}
void CodeBlock::printGetByIdOp(ExecState* exec, int location, Vector<Instruction>::const_iterator& it, const char* op) const
@@ -177,7 +178,7 @@ void CodeBlock::printGetByIdOp(ExecState* exec, int location, Vector<Instruction
int r0 = (++it)->u.operand;
int r1 = (++it)->u.operand;
int id0 = (++it)->u.operand;
- printf("[%4d] %s\t %s, %s, %s\n", location, op, registerName(exec, r0).data(), registerName(exec, r1).data(), idName(id0, m_identifiers[id0]).data());
+ dataLog("[%4d] %s\t %s, %s, %s\n", location, op, registerName(exec, r0).data(), registerName(exec, r1).data(), idName(id0, m_identifiers[id0]).data());
it += 5;
}
@@ -186,7 +187,7 @@ void CodeBlock::printCallOp(ExecState* exec, int location, Vector<Instruction>::
int func = (++it)->u.operand;
int argCount = (++it)->u.operand;
int registerOffset = (++it)->u.operand;
- printf("[%4d] %s\t %s, %d, %d\n", location, op, registerName(exec, func).data(), argCount, registerOffset);
+ dataLog("[%4d] %s\t %s, %d, %d\n", location, op, registerName(exec, func).data(), argCount, registerOffset);
it += 2;
}
@@ -195,7 +196,7 @@ void CodeBlock::printPutByIdOp(ExecState* exec, int location, Vector<Instruction
int r0 = (++it)->u.operand;
int id0 = (++it)->u.operand;
int r1 = (++it)->u.operand;
- printf("[%4d] %s\t %s, %s, %s\n", location, op, registerName(exec, r0).data(), idName(id0, m_identifiers[id0]).data(), registerName(exec, r1).data());
+ dataLog("[%4d] %s\t %s, %s, %s\n", location, op, registerName(exec, r0).data(), idName(id0, m_identifiers[id0]).data(), registerName(exec, r1).data());
it += 5;
}
@@ -243,48 +244,48 @@ static unsigned instructionOffsetForNth(ExecState* exec, const Vector<Instructio
static void printGlobalResolveInfo(const GlobalResolveInfo& resolveInfo, unsigned instructionOffset)
{
- printf(" [%4d] %s: %s\n", instructionOffset, "resolve_global", pointerToSourceString(resolveInfo.structure).utf8().data());
+ dataLog(" [%4d] %s: %s\n", instructionOffset, "resolve_global", pointerToSourceString(resolveInfo.structure).utf8().data());
}
static void printStructureStubInfo(const StructureStubInfo& stubInfo, unsigned instructionOffset)
{
switch (stubInfo.accessType) {
case access_get_by_id_self:
- printf(" [%4d] %s: %s\n", instructionOffset, "get_by_id_self", pointerToSourceString(stubInfo.u.getByIdSelf.baseObjectStructure).utf8().data());
+ dataLog(" [%4d] %s: %s\n", instructionOffset, "get_by_id_self", pointerToSourceString(stubInfo.u.getByIdSelf.baseObjectStructure).utf8().data());
return;
case access_get_by_id_proto:
- printf(" [%4d] %s: %s, %s\n", instructionOffset, "get_by_id_proto", pointerToSourceString(stubInfo.u.getByIdProto.baseObjectStructure).utf8().data(), pointerToSourceString(stubInfo.u.getByIdProto.prototypeStructure).utf8().data());
+ dataLog(" [%4d] %s: %s, %s\n", instructionOffset, "get_by_id_proto", pointerToSourceString(stubInfo.u.getByIdProto.baseObjectStructure).utf8().data(), pointerToSourceString(stubInfo.u.getByIdProto.prototypeStructure).utf8().data());
return;
case access_get_by_id_chain:
- printf(" [%4d] %s: %s, %s\n", instructionOffset, "get_by_id_chain", pointerToSourceString(stubInfo.u.getByIdChain.baseObjectStructure).utf8().data(), pointerToSourceString(stubInfo.u.getByIdChain.chain).utf8().data());
+ dataLog(" [%4d] %s: %s, %s\n", instructionOffset, "get_by_id_chain", pointerToSourceString(stubInfo.u.getByIdChain.baseObjectStructure).utf8().data(), pointerToSourceString(stubInfo.u.getByIdChain.chain).utf8().data());
return;
case access_get_by_id_self_list:
- printf(" [%4d] %s: %s (%d)\n", instructionOffset, "op_get_by_id_self_list", pointerToSourceString(stubInfo.u.getByIdSelfList.structureList).utf8().data(), stubInfo.u.getByIdSelfList.listSize);
+ dataLog(" [%4d] %s: %s (%d)\n", instructionOffset, "op_get_by_id_self_list", pointerToSourceString(stubInfo.u.getByIdSelfList.structureList).utf8().data(), stubInfo.u.getByIdSelfList.listSize);
return;
case access_get_by_id_proto_list:
- printf(" [%4d] %s: %s (%d)\n", instructionOffset, "op_get_by_id_proto_list", pointerToSourceString(stubInfo.u.getByIdProtoList.structureList).utf8().data(), stubInfo.u.getByIdProtoList.listSize);
+ dataLog(" [%4d] %s: %s (%d)\n", instructionOffset, "op_get_by_id_proto_list", pointerToSourceString(stubInfo.u.getByIdProtoList.structureList).utf8().data(), stubInfo.u.getByIdProtoList.listSize);
return;
case access_put_by_id_transition_normal:
case access_put_by_id_transition_direct:
- printf(" [%4d] %s: %s, %s, %s\n", instructionOffset, "put_by_id_transition", pointerToSourceString(stubInfo.u.putByIdTransition.previousStructure).utf8().data(), pointerToSourceString(stubInfo.u.putByIdTransition.structure).utf8().data(), pointerToSourceString(stubInfo.u.putByIdTransition.chain).utf8().data());
+ dataLog(" [%4d] %s: %s, %s, %s\n", instructionOffset, "put_by_id_transition", pointerToSourceString(stubInfo.u.putByIdTransition.previousStructure).utf8().data(), pointerToSourceString(stubInfo.u.putByIdTransition.structure).utf8().data(), pointerToSourceString(stubInfo.u.putByIdTransition.chain).utf8().data());
return;
case access_put_by_id_replace:
- printf(" [%4d] %s: %s\n", instructionOffset, "put_by_id_replace", pointerToSourceString(stubInfo.u.putByIdReplace.baseObjectStructure).utf8().data());
+ dataLog(" [%4d] %s: %s\n", instructionOffset, "put_by_id_replace", pointerToSourceString(stubInfo.u.putByIdReplace.baseObjectStructure).utf8().data());
return;
case access_unset:
- printf(" [%4d] %s\n", instructionOffset, "unset");
+ dataLog(" [%4d] %s\n", instructionOffset, "unset");
return;
case access_get_by_id_generic:
- printf(" [%4d] %s\n", instructionOffset, "op_get_by_id_generic");
+ dataLog(" [%4d] %s\n", instructionOffset, "op_get_by_id_generic");
return;
case access_put_by_id_generic:
- printf(" [%4d] %s\n", instructionOffset, "op_put_by_id_generic");
+ dataLog(" [%4d] %s\n", instructionOffset, "op_put_by_id_generic");
return;
case access_get_array_length:
- printf(" [%4d] %s\n", instructionOffset, "op_get_array_length");
+ dataLog(" [%4d] %s\n", instructionOffset, "op_get_array_length");
return;
case access_get_string_length:
- printf(" [%4d] %s\n", instructionOffset, "op_get_string_length");
+ dataLog(" [%4d] %s\n", instructionOffset, "op_get_string_length");
return;
default:
ASSERT_NOT_REACHED();
@@ -295,7 +296,7 @@ static void printStructureStubInfo(const StructureStubInfo& stubInfo, unsigned i
void CodeBlock::printStructure(const char* name, const Instruction* vPC, int operand) const
{
unsigned instructionOffset = vPC - instructions().begin();
- printf(" [%4d] %s: %s\n", instructionOffset, name, pointerToSourceString(vPC[operand].u.structure).utf8().data());
+ dataLog(" [%4d] %s: %s\n", instructionOffset, name, pointerToSourceString(vPC[operand].u.structure).utf8().data());
}
void CodeBlock::printStructures(const Instruction* vPC) const
@@ -312,15 +313,15 @@ void CodeBlock::printStructures(const Instruction* vPC) const
return;
}
if (vPC[0].u.opcode == interpreter->getOpcode(op_get_by_id_proto)) {
- printf(" [%4d] %s: %s, %s\n", instructionOffset, "get_by_id_proto", pointerToSourceString(vPC[4].u.structure).utf8().data(), pointerToSourceString(vPC[5].u.structure).utf8().data());
+ dataLog(" [%4d] %s: %s, %s\n", instructionOffset, "get_by_id_proto", pointerToSourceString(vPC[4].u.structure).utf8().data(), pointerToSourceString(vPC[5].u.structure).utf8().data());
return;
}
if (vPC[0].u.opcode == interpreter->getOpcode(op_put_by_id_transition)) {
- printf(" [%4d] %s: %s, %s, %s\n", instructionOffset, "put_by_id_transition", pointerToSourceString(vPC[4].u.structure).utf8().data(), pointerToSourceString(vPC[5].u.structure).utf8().data(), pointerToSourceString(vPC[6].u.structureChain).utf8().data());
+ dataLog(" [%4d] %s: %s, %s, %s\n", instructionOffset, "put_by_id_transition", pointerToSourceString(vPC[4].u.structure).utf8().data(), pointerToSourceString(vPC[5].u.structure).utf8().data(), pointerToSourceString(vPC[6].u.structureChain).utf8().data());
return;
}
if (vPC[0].u.opcode == interpreter->getOpcode(op_get_by_id_chain)) {
- printf(" [%4d] %s: %s, %s\n", instructionOffset, "get_by_id_chain", pointerToSourceString(vPC[4].u.structure).utf8().data(), pointerToSourceString(vPC[5].u.structureChain).utf8().data());
+ dataLog(" [%4d] %s: %s, %s\n", instructionOffset, "get_by_id_chain", pointerToSourceString(vPC[4].u.structure).utf8().data(), pointerToSourceString(vPC[5].u.structureChain).utf8().data());
return;
}
if (vPC[0].u.opcode == interpreter->getOpcode(op_put_by_id)) {
@@ -347,7 +348,7 @@ void CodeBlock::printStructures(const Instruction* vPC) const
void CodeBlock::dump(ExecState* exec) const
{
if (!m_instructions) {
- printf("No instructions available.\n");
+ dataLog("No instructions available.\n");
return;
}
@@ -356,10 +357,10 @@ void CodeBlock::dump(ExecState* exec) const
for (size_t i = 0; i < instructions().size(); i += opcodeLengths[exec->interpreter()->getOpcodeID(instructions()[i].u.opcode)])
++instructionCount;
- printf("%lu m_instructions; %lu bytes at %p; %d parameter(s); %d callee register(s)\n\n",
+ dataLog("%lu m_instructions; %lu bytes at %p; %d parameter(s); %d callee register(s); %d variable(s)\n\n",
static_cast<unsigned long>(instructionCount),
static_cast<unsigned long>(instructions().size() * sizeof(Instruction)),
- this, m_numParameters, m_numCalleeRegisters);
+ this, m_numParameters, m_numCalleeRegisters, m_numVars);
Vector<Instruction>::const_iterator begin = instructions().begin();
Vector<Instruction>::const_iterator end = instructions().end();
@@ -367,35 +368,35 @@ void CodeBlock::dump(ExecState* exec) const
dump(exec, begin, it);
if (!m_identifiers.isEmpty()) {
- printf("\nIdentifiers:\n");
+ dataLog("\nIdentifiers:\n");
size_t i = 0;
do {
- printf(" id%u = %s\n", static_cast<unsigned>(i), m_identifiers[i].ustring().utf8().data());
+ dataLog(" id%u = %s\n", static_cast<unsigned>(i), m_identifiers[i].ustring().utf8().data());
++i;
} while (i != m_identifiers.size());
}
if (!m_constantRegisters.isEmpty()) {
- printf("\nConstants:\n");
+ dataLog("\nConstants:\n");
size_t i = 0;
do {
- printf(" k%u = %s\n", static_cast<unsigned>(i), valueToSourceString(exec, m_constantRegisters[i].get()).utf8().data());
+ dataLog(" k%u = %s\n", static_cast<unsigned>(i), valueToSourceString(exec, m_constantRegisters[i].get()).utf8().data());
++i;
} while (i < m_constantRegisters.size());
}
if (m_rareData && !m_rareData->m_regexps.isEmpty()) {
- printf("\nm_regexps:\n");
+ dataLog("\nm_regexps:\n");
size_t i = 0;
do {
- printf(" re%u = %s\n", static_cast<unsigned>(i), regexpToSourceString(m_rareData->m_regexps[i].get()).utf8().data());
+ dataLog(" re%u = %s\n", static_cast<unsigned>(i), regexpToSourceString(m_rareData->m_regexps[i].get()).utf8().data());
++i;
} while (i < m_rareData->m_regexps.size());
}
#if ENABLE(JIT)
if (!m_globalResolveInfos.isEmpty() || !m_structureStubInfos.isEmpty())
- printf("\nStructures:\n");
+ dataLog("\nStructures:\n");
if (!m_globalResolveInfos.isEmpty()) {
size_t i = 0;
@@ -412,9 +413,9 @@ void CodeBlock::dump(ExecState* exec) const
} while (i < m_structureStubInfos.size());
}
#endif
-#if ENABLE(INTERPRETER)
+#if ENABLE(CLASSIC_INTERPRETER)
if (!m_globalResolveInstructions.isEmpty() || !m_propertyAccessInstructions.isEmpty())
- printf("\nStructures:\n");
+ dataLog("\nStructures:\n");
if (!m_globalResolveInstructions.isEmpty()) {
size_t i = 0;
@@ -433,36 +434,36 @@ void CodeBlock::dump(ExecState* exec) const
#endif
if (m_rareData && !m_rareData->m_exceptionHandlers.isEmpty()) {
- printf("\nException Handlers:\n");
+ dataLog("\nException Handlers:\n");
unsigned i = 0;
do {
- printf("\t %d: { start: [%4d] end: [%4d] target: [%4d] }\n", i + 1, m_rareData->m_exceptionHandlers[i].start, m_rareData->m_exceptionHandlers[i].end, m_rareData->m_exceptionHandlers[i].target);
+ dataLog("\t %d: { start: [%4d] end: [%4d] target: [%4d] }\n", i + 1, m_rareData->m_exceptionHandlers[i].start, m_rareData->m_exceptionHandlers[i].end, m_rareData->m_exceptionHandlers[i].target);
++i;
} while (i < m_rareData->m_exceptionHandlers.size());
}
if (m_rareData && !m_rareData->m_immediateSwitchJumpTables.isEmpty()) {
- printf("Immediate Switch Jump Tables:\n");
+ dataLog("Immediate Switch Jump Tables:\n");
unsigned i = 0;
do {
- printf(" %1d = {\n", i);
+ dataLog(" %1d = {\n", i);
int entry = 0;
Vector<int32_t>::const_iterator end = m_rareData->m_immediateSwitchJumpTables[i].branchOffsets.end();
for (Vector<int32_t>::const_iterator iter = m_rareData->m_immediateSwitchJumpTables[i].branchOffsets.begin(); iter != end; ++iter, ++entry) {
if (!*iter)
continue;
- printf("\t\t%4d => %04d\n", entry + m_rareData->m_immediateSwitchJumpTables[i].min, *iter);
+ dataLog("\t\t%4d => %04d\n", entry + m_rareData->m_immediateSwitchJumpTables[i].min, *iter);
}
- printf(" }\n");
+ dataLog(" }\n");
++i;
} while (i < m_rareData->m_immediateSwitchJumpTables.size());
}
if (m_rareData && !m_rareData->m_characterSwitchJumpTables.isEmpty()) {
- printf("\nCharacter Switch Jump Tables:\n");
+ dataLog("\nCharacter Switch Jump Tables:\n");
unsigned i = 0;
do {
- printf(" %1d = {\n", i);
+ dataLog(" %1d = {\n", i);
int entry = 0;
Vector<int32_t>::const_iterator end = m_rareData->m_characterSwitchJumpTables[i].branchOffsets.end();
for (Vector<int32_t>::const_iterator iter = m_rareData->m_characterSwitchJumpTables[i].branchOffsets.begin(); iter != end; ++iter, ++entry) {
@@ -470,27 +471,27 @@ void CodeBlock::dump(ExecState* exec) const
continue;
ASSERT(!((i + m_rareData->m_characterSwitchJumpTables[i].min) & ~0xFFFF));
UChar ch = static_cast<UChar>(entry + m_rareData->m_characterSwitchJumpTables[i].min);
- printf("\t\t\"%s\" => %04d\n", UString(&ch, 1).utf8().data(), *iter);
+ dataLog("\t\t\"%s\" => %04d\n", UString(&ch, 1).utf8().data(), *iter);
}
- printf(" }\n");
+ dataLog(" }\n");
++i;
} while (i < m_rareData->m_characterSwitchJumpTables.size());
}
if (m_rareData && !m_rareData->m_stringSwitchJumpTables.isEmpty()) {
- printf("\nString Switch Jump Tables:\n");
+ dataLog("\nString Switch Jump Tables:\n");
unsigned i = 0;
do {
- printf(" %1d = {\n", i);
+ dataLog(" %1d = {\n", i);
StringJumpTable::StringOffsetTable::const_iterator end = m_rareData->m_stringSwitchJumpTables[i].offsetTable.end();
for (StringJumpTable::StringOffsetTable::const_iterator iter = m_rareData->m_stringSwitchJumpTables[i].offsetTable.begin(); iter != end; ++iter)
- printf("\t\t\"%s\" => %04d\n", UString(iter->first).utf8().data(), iter->second.branchOffset);
- printf(" }\n");
+ dataLog("\t\t\"%s\" => %04d\n", UString(iter->first).utf8().data(), iter->second.branchOffset);
+ dataLog(" }\n");
++i;
} while (i < m_rareData->m_stringSwitchJumpTables.size());
}
- printf("\n");
+ dataLog("\n");
}
void CodeBlock::dump(ExecState* exec, const Vector<Instruction>::const_iterator& begin, Vector<Instruction>::const_iterator& it) const
@@ -498,73 +499,73 @@ void CodeBlock::dump(ExecState* exec, const Vector<Instruction>::const_iterator&
int location = it - begin;
switch (exec->interpreter()->getOpcodeID(it->u.opcode)) {
case op_enter: {
- printf("[%4d] enter\n", location);
+ dataLog("[%4d] enter\n", location);
break;
}
case op_create_activation: {
int r0 = (++it)->u.operand;
- printf("[%4d] create_activation %s\n", location, registerName(exec, r0).data());
+ dataLog("[%4d] create_activation %s\n", location, registerName(exec, r0).data());
break;
}
case op_create_arguments: {
int r0 = (++it)->u.operand;
- printf("[%4d] create_arguments\t %s\n", location, registerName(exec, r0).data());
+ dataLog("[%4d] create_arguments\t %s\n", location, registerName(exec, r0).data());
break;
}
case op_init_lazy_reg: {
int r0 = (++it)->u.operand;
- printf("[%4d] init_lazy_reg\t %s\n", location, registerName(exec, r0).data());
+ dataLog("[%4d] init_lazy_reg\t %s\n", location, registerName(exec, r0).data());
break;
}
case op_get_callee: {
int r0 = (++it)->u.operand;
- printf("[%4d] op_get_callee %s\n", location, registerName(exec, r0).data());
+ dataLog("[%4d] op_get_callee %s\n", location, registerName(exec, r0).data());
break;
}
case op_create_this: {
int r0 = (++it)->u.operand;
int r1 = (++it)->u.operand;
- printf("[%4d] create_this %s %s\n", location, registerName(exec, r0).data(), registerName(exec, r1).data());
+ dataLog("[%4d] create_this %s %s\n", location, registerName(exec, r0).data(), registerName(exec, r1).data());
break;
}
case op_convert_this: {
int r0 = (++it)->u.operand;
- printf("[%4d] convert_this\t %s\n", location, registerName(exec, r0).data());
+ dataLog("[%4d] convert_this\t %s\n", location, registerName(exec, r0).data());
break;
}
case op_new_object: {
int r0 = (++it)->u.operand;
- printf("[%4d] new_object\t %s\n", location, registerName(exec, r0).data());
+ dataLog("[%4d] new_object\t %s\n", location, registerName(exec, r0).data());
break;
}
case op_new_array: {
int dst = (++it)->u.operand;
int argv = (++it)->u.operand;
int argc = (++it)->u.operand;
- printf("[%4d] new_array\t %s, %s, %d\n", location, registerName(exec, dst).data(), registerName(exec, argv).data(), argc);
+ dataLog("[%4d] new_array\t %s, %s, %d\n", location, registerName(exec, dst).data(), registerName(exec, argv).data(), argc);
break;
}
case op_new_array_buffer: {
int dst = (++it)->u.operand;
int argv = (++it)->u.operand;
int argc = (++it)->u.operand;
- printf("[%4d] new_array_buffer %s, %d, %d\n", location, registerName(exec, dst).data(), argv, argc);
+ dataLog("[%4d] new_array_buffer %s, %d, %d\n", location, registerName(exec, dst).data(), argv, argc);
break;
}
case op_new_regexp: {
int r0 = (++it)->u.operand;
int re0 = (++it)->u.operand;
- printf("[%4d] new_regexp\t %s, ", location, registerName(exec, r0).data());
+ dataLog("[%4d] new_regexp\t %s, ", location, registerName(exec, r0).data());
if (r0 >=0 && r0 < (int)numberOfRegExps())
- printf("%s\n", regexpName(re0, regexp(re0)).data());
+ dataLog("%s\n", regexpName(re0, regexp(re0)).data());
else
- printf("bad_regexp(%d)\n", re0);
+ dataLog("bad_regexp(%d)\n", re0);
break;
}
case op_mov: {
int r0 = (++it)->u.operand;
int r1 = (++it)->u.operand;
- printf("[%4d] mov\t\t %s, %s\n", location, registerName(exec, r0).data(), registerName(exec, r1).data());
+ dataLog("[%4d] mov\t\t %s, %s\n", location, registerName(exec, r0).data(), registerName(exec, r1).data());
break;
}
case op_not: {
@@ -613,12 +614,12 @@ void CodeBlock::dump(ExecState* exec, const Vector<Instruction>::const_iterator&
}
case op_pre_inc: {
int r0 = (++it)->u.operand;
- printf("[%4d] pre_inc\t\t %s\n", location, registerName(exec, r0).data());
+ dataLog("[%4d] pre_inc\t\t %s\n", location, registerName(exec, r0).data());
break;
}
case op_pre_dec: {
int r0 = (++it)->u.operand;
- printf("[%4d] pre_dec\t\t %s\n", location, registerName(exec, r0).data());
+ dataLog("[%4d] pre_dec\t\t %s\n", location, registerName(exec, r0).data());
break;
}
case op_post_inc: {
@@ -694,7 +695,7 @@ void CodeBlock::dump(ExecState* exec, const Vector<Instruction>::const_iterator&
}
case op_check_has_instance: {
int base = (++it)->u.operand;
- printf("[%4d] check_has_instance\t\t %s\n", location, registerName(exec, base).data());
+ dataLog("[%4d] check_has_instance\t\t %s\n", location, registerName(exec, base).data());
break;
}
case op_instanceof: {
@@ -702,7 +703,7 @@ void CodeBlock::dump(ExecState* exec, const Vector<Instruction>::const_iterator&
int r1 = (++it)->u.operand;
int r2 = (++it)->u.operand;
int r3 = (++it)->u.operand;
- printf("[%4d] instanceof\t\t %s, %s, %s, %s\n", location, registerName(exec, r0).data(), registerName(exec, r1).data(), registerName(exec, r2).data(), registerName(exec, r3).data());
+ dataLog("[%4d] instanceof\t\t %s, %s, %s, %s\n", location, registerName(exec, r0).data(), registerName(exec, r1).data(), registerName(exec, r2).data(), registerName(exec, r3).data());
break;
}
case op_typeof: {
@@ -740,7 +741,7 @@ void CodeBlock::dump(ExecState* exec, const Vector<Instruction>::const_iterator&
case op_resolve: {
int r0 = (++it)->u.operand;
int id0 = (++it)->u.operand;
- printf("[%4d] resolve\t\t %s, %s\n", location, registerName(exec, r0).data(), idName(id0, m_identifiers[id0]).data());
+ dataLog("[%4d] resolve\t\t %s, %s\n", location, registerName(exec, r0).data(), idName(id0, m_identifiers[id0]).data());
it++;
break;
}
@@ -748,14 +749,14 @@ void CodeBlock::dump(ExecState* exec, const Vector<Instruction>::const_iterator&
int r0 = (++it)->u.operand;
int id0 = (++it)->u.operand;
int skipLevels = (++it)->u.operand;
- printf("[%4d] resolve_skip\t %s, %s, %d\n", location, registerName(exec, r0).data(), idName(id0, m_identifiers[id0]).data(), skipLevels);
+ dataLog("[%4d] resolve_skip\t %s, %s, %d\n", location, registerName(exec, r0).data(), idName(id0, m_identifiers[id0]).data(), skipLevels);
it++;
break;
}
case op_resolve_global: {
int r0 = (++it)->u.operand;
int id0 = (++it)->u.operand;
- printf("[%4d] resolve_global\t %s, %s\n", location, registerName(exec, r0).data(), idName(id0, m_identifiers[id0]).data());
+ dataLog("[%4d] resolve_global\t %s, %s\n", location, registerName(exec, r0).data(), idName(id0, m_identifiers[id0]).data());
it += 3;
break;
}
@@ -765,7 +766,7 @@ void CodeBlock::dump(ExecState* exec, const Vector<Instruction>::const_iterator&
JSValue scope = JSValue((++it)->u.jsCell.get());
++it;
int depth = (++it)->u.operand;
- printf("[%4d] resolve_global_dynamic\t %s, %s, %s, %d\n", location, registerName(exec, r0).data(), valueToSourceString(exec, scope).utf8().data(), idName(id0, m_identifiers[id0]).data(), depth);
+ dataLog("[%4d] resolve_global_dynamic\t %s, %s, %s, %d\n", location, registerName(exec, r0).data(), valueToSourceString(exec, scope).utf8().data(), idName(id0, m_identifiers[id0]).data(), depth);
++it;
break;
}
@@ -773,7 +774,7 @@ void CodeBlock::dump(ExecState* exec, const Vector<Instruction>::const_iterator&
int r0 = (++it)->u.operand;
int index = (++it)->u.operand;
int skipLevels = (++it)->u.operand;
- printf("[%4d] get_scoped_var\t %s, %d, %d\n", location, registerName(exec, r0).data(), index, skipLevels);
+ dataLog("[%4d] get_scoped_var\t %s, %d, %d\n", location, registerName(exec, r0).data(), index, skipLevels);
it++;
break;
}
@@ -781,41 +782,41 @@ void CodeBlock::dump(ExecState* exec, const Vector<Instruction>::const_iterator&
int index = (++it)->u.operand;
int skipLevels = (++it)->u.operand;
int r0 = (++it)->u.operand;
- printf("[%4d] put_scoped_var\t %d, %d, %s\n", location, index, skipLevels, registerName(exec, r0).data());
+ dataLog("[%4d] put_scoped_var\t %d, %d, %s\n", location, index, skipLevels, registerName(exec, r0).data());
break;
}
case op_get_global_var: {
int r0 = (++it)->u.operand;
int index = (++it)->u.operand;
- printf("[%4d] get_global_var\t %s, %d\n", location, registerName(exec, r0).data(), index);
+ dataLog("[%4d] get_global_var\t %s, %d\n", location, registerName(exec, r0).data(), index);
it++;
break;
}
case op_put_global_var: {
int index = (++it)->u.operand;
int r0 = (++it)->u.operand;
- printf("[%4d] put_global_var\t %d, %s\n", location, index, registerName(exec, r0).data());
+ dataLog("[%4d] put_global_var\t %d, %s\n", location, index, registerName(exec, r0).data());
break;
}
case op_resolve_base: {
int r0 = (++it)->u.operand;
int id0 = (++it)->u.operand;
int isStrict = (++it)->u.operand;
- printf("[%4d] resolve_base%s\t %s, %s\n", location, isStrict ? "_strict" : "", registerName(exec, r0).data(), idName(id0, m_identifiers[id0]).data());
+ dataLog("[%4d] resolve_base%s\t %s, %s\n", location, isStrict ? "_strict" : "", registerName(exec, r0).data(), idName(id0, m_identifiers[id0]).data());
it++;
break;
}
case op_ensure_property_exists: {
int r0 = (++it)->u.operand;
int id0 = (++it)->u.operand;
- printf("[%4d] ensure_property_exists\t %s, %s\n", location, registerName(exec, r0).data(), idName(id0, m_identifiers[id0]).data());
+ dataLog("[%4d] ensure_property_exists\t %s, %s\n", location, registerName(exec, r0).data(), idName(id0, m_identifiers[id0]).data());
break;
}
case op_resolve_with_base: {
int r0 = (++it)->u.operand;
int r1 = (++it)->u.operand;
int id0 = (++it)->u.operand;
- printf("[%4d] resolve_with_base %s, %s, %s\n", location, registerName(exec, r0).data(), registerName(exec, r1).data(), idName(id0, m_identifiers[id0]).data());
+ dataLog("[%4d] resolve_with_base %s, %s, %s\n", location, registerName(exec, r0).data(), registerName(exec, r1).data(), idName(id0, m_identifiers[id0]).data());
it++;
break;
}
@@ -823,7 +824,7 @@ void CodeBlock::dump(ExecState* exec, const Vector<Instruction>::const_iterator&
int r0 = (++it)->u.operand;
int r1 = (++it)->u.operand;
int id0 = (++it)->u.operand;
- printf("[%4d] resolve_with_this %s, %s, %s\n", location, registerName(exec, r0).data(), registerName(exec, r1).data(), idName(id0, m_identifiers[id0]).data());
+ dataLog("[%4d] resolve_with_this %s, %s, %s\n", location, registerName(exec, r0).data(), registerName(exec, r1).data(), idName(id0, m_identifiers[id0]).data());
it++;
break;
}
@@ -896,6 +897,14 @@ void CodeBlock::dump(ExecState* exec, const Vector<Instruction>::const_iterator&
printPutByIdOp(exec, location, it, "put_by_id_transition");
break;
}
+ case op_put_by_id_transition_direct: {
+ printPutByIdOp(exec, location, it, "put_by_id_transition_direct");
+ break;
+ }
+ case op_put_by_id_transition_normal: {
+ printPutByIdOp(exec, location, it, "put_by_id_transition_normal");
+ break;
+ }
case op_put_by_id_generic: {
printPutByIdOp(exec, location, it, "put_by_id_generic");
break;
@@ -905,25 +914,25 @@ void CodeBlock::dump(ExecState* exec, const Vector<Instruction>::const_iterator&
int id0 = (++it)->u.operand;
int r1 = (++it)->u.operand;
int r2 = (++it)->u.operand;
- printf("[%4d] put_getter_setter\t %s, %s, %s, %s\n", location, registerName(exec, r0).data(), idName(id0, m_identifiers[id0]).data(), registerName(exec, r1).data(), registerName(exec, r2).data());
+ dataLog("[%4d] put_getter_setter\t %s, %s, %s, %s\n", location, registerName(exec, r0).data(), idName(id0, m_identifiers[id0]).data(), registerName(exec, r1).data(), registerName(exec, r2).data());
break;
}
case op_method_check: {
- printf("[%4d] method_check\n", location);
+ dataLog("[%4d] method_check\n", location);
break;
}
case op_del_by_id: {
int r0 = (++it)->u.operand;
int r1 = (++it)->u.operand;
int id0 = (++it)->u.operand;
- printf("[%4d] del_by_id\t %s, %s, %s\n", location, registerName(exec, r0).data(), registerName(exec, r1).data(), idName(id0, m_identifiers[id0]).data());
+ dataLog("[%4d] del_by_id\t %s, %s, %s\n", location, registerName(exec, r0).data(), registerName(exec, r1).data(), idName(id0, m_identifiers[id0]).data());
break;
}
case op_get_by_val: {
int r0 = (++it)->u.operand;
int r1 = (++it)->u.operand;
int r2 = (++it)->u.operand;
- printf("[%4d] get_by_val\t %s, %s, %s\n", location, registerName(exec, r0).data(), registerName(exec, r1).data(), registerName(exec, r2).data());
+ dataLog("[%4d] get_by_val\t %s, %s, %s\n", location, registerName(exec, r0).data(), registerName(exec, r1).data(), registerName(exec, r2).data());
it++;
break;
}
@@ -931,7 +940,7 @@ void CodeBlock::dump(ExecState* exec, const Vector<Instruction>::const_iterator&
int r0 = (++it)->u.operand;
int r1 = (++it)->u.operand;
int r2 = (++it)->u.operand;
- printf("[%4d] get_argument_by_val\t %s, %s, %s\n", location, registerName(exec, r0).data(), registerName(exec, r1).data(), registerName(exec, r2).data());
+ dataLog("[%4d] get_argument_by_val\t %s, %s, %s\n", location, registerName(exec, r0).data(), registerName(exec, r1).data(), registerName(exec, r2).data());
++it;
break;
}
@@ -942,38 +951,38 @@ void CodeBlock::dump(ExecState* exec, const Vector<Instruction>::const_iterator&
int r3 = (++it)->u.operand;
int r4 = (++it)->u.operand;
int r5 = (++it)->u.operand;
- printf("[%4d] get_by_pname\t %s, %s, %s, %s, %s, %s\n", location, registerName(exec, r0).data(), registerName(exec, r1).data(), registerName(exec, r2).data(), registerName(exec, r3).data(), registerName(exec, r4).data(), registerName(exec, r5).data());
+ dataLog("[%4d] get_by_pname\t %s, %s, %s, %s, %s, %s\n", location, registerName(exec, r0).data(), registerName(exec, r1).data(), registerName(exec, r2).data(), registerName(exec, r3).data(), registerName(exec, r4).data(), registerName(exec, r5).data());
break;
}
case op_put_by_val: {
int r0 = (++it)->u.operand;
int r1 = (++it)->u.operand;
int r2 = (++it)->u.operand;
- printf("[%4d] put_by_val\t %s, %s, %s\n", location, registerName(exec, r0).data(), registerName(exec, r1).data(), registerName(exec, r2).data());
+ dataLog("[%4d] put_by_val\t %s, %s, %s\n", location, registerName(exec, r0).data(), registerName(exec, r1).data(), registerName(exec, r2).data());
break;
}
case op_del_by_val: {
int r0 = (++it)->u.operand;
int r1 = (++it)->u.operand;
int r2 = (++it)->u.operand;
- printf("[%4d] del_by_val\t %s, %s, %s\n", location, registerName(exec, r0).data(), registerName(exec, r1).data(), registerName(exec, r2).data());
+ dataLog("[%4d] del_by_val\t %s, %s, %s\n", location, registerName(exec, r0).data(), registerName(exec, r1).data(), registerName(exec, r2).data());
break;
}
case op_put_by_index: {
int r0 = (++it)->u.operand;
unsigned n0 = (++it)->u.operand;
int r1 = (++it)->u.operand;
- printf("[%4d] put_by_index\t %s, %u, %s\n", location, registerName(exec, r0).data(), n0, registerName(exec, r1).data());
+ dataLog("[%4d] put_by_index\t %s, %u, %s\n", location, registerName(exec, r0).data(), n0, registerName(exec, r1).data());
break;
}
case op_jmp: {
int offset = (++it)->u.operand;
- printf("[%4d] jmp\t\t %d(->%d)\n", location, offset, location + offset);
+ dataLog("[%4d] jmp\t\t %d(->%d)\n", location, offset, location + offset);
break;
}
case op_loop: {
int offset = (++it)->u.operand;
- printf("[%4d] loop\t\t %d(->%d)\n", location, offset, location + offset);
+ dataLog("[%4d] loop\t\t %d(->%d)\n", location, offset, location + offset);
break;
}
case op_jtrue: {
@@ -1004,129 +1013,129 @@ void CodeBlock::dump(ExecState* exec, const Vector<Instruction>::const_iterator&
int r0 = (++it)->u.operand;
int r1 = (++it)->u.operand;
int offset = (++it)->u.operand;
- printf("[%4d] jneq_ptr\t\t %s, %s, %d(->%d)\n", location, registerName(exec, r0).data(), registerName(exec, r1).data(), offset, location + offset);
+ dataLog("[%4d] jneq_ptr\t\t %s, %s, %d(->%d)\n", location, registerName(exec, r0).data(), registerName(exec, r1).data(), offset, location + offset);
break;
}
case op_jless: {
int r0 = (++it)->u.operand;
int r1 = (++it)->u.operand;
int offset = (++it)->u.operand;
- printf("[%4d] jless\t\t %s, %s, %d(->%d)\n", location, registerName(exec, r0).data(), registerName(exec, r1).data(), offset, location + offset);
+ dataLog("[%4d] jless\t\t %s, %s, %d(->%d)\n", location, registerName(exec, r0).data(), registerName(exec, r1).data(), offset, location + offset);
break;
}
case op_jlesseq: {
int r0 = (++it)->u.operand;
int r1 = (++it)->u.operand;
int offset = (++it)->u.operand;
- printf("[%4d] jlesseq\t\t %s, %s, %d(->%d)\n", location, registerName(exec, r0).data(), registerName(exec, r1).data(), offset, location + offset);
+ dataLog("[%4d] jlesseq\t\t %s, %s, %d(->%d)\n", location, registerName(exec, r0).data(), registerName(exec, r1).data(), offset, location + offset);
break;
}
case op_jgreater: {
int r0 = (++it)->u.operand;
int r1 = (++it)->u.operand;
int offset = (++it)->u.operand;
- printf("[%4d] jgreater\t\t %s, %s, %d(->%d)\n", location, registerName(exec, r0).data(), registerName(exec, r1).data(), offset, location + offset);
+ dataLog("[%4d] jgreater\t\t %s, %s, %d(->%d)\n", location, registerName(exec, r0).data(), registerName(exec, r1).data(), offset, location + offset);
break;
}
case op_jgreatereq: {
int r0 = (++it)->u.operand;
int r1 = (++it)->u.operand;
int offset = (++it)->u.operand;
- printf("[%4d] jgreatereq\t\t %s, %s, %d(->%d)\n", location, registerName(exec, r0).data(), registerName(exec, r1).data(), offset, location + offset);
+ dataLog("[%4d] jgreatereq\t\t %s, %s, %d(->%d)\n", location, registerName(exec, r0).data(), registerName(exec, r1).data(), offset, location + offset);
break;
}
case op_jnless: {
int r0 = (++it)->u.operand;
int r1 = (++it)->u.operand;
int offset = (++it)->u.operand;
- printf("[%4d] jnless\t\t %s, %s, %d(->%d)\n", location, registerName(exec, r0).data(), registerName(exec, r1).data(), offset, location + offset);
+ dataLog("[%4d] jnless\t\t %s, %s, %d(->%d)\n", location, registerName(exec, r0).data(), registerName(exec, r1).data(), offset, location + offset);
break;
}
case op_jnlesseq: {
int r0 = (++it)->u.operand;
int r1 = (++it)->u.operand;
int offset = (++it)->u.operand;
- printf("[%4d] jnlesseq\t\t %s, %s, %d(->%d)\n", location, registerName(exec, r0).data(), registerName(exec, r1).data(), offset, location + offset);
+ dataLog("[%4d] jnlesseq\t\t %s, %s, %d(->%d)\n", location, registerName(exec, r0).data(), registerName(exec, r1).data(), offset, location + offset);
break;
}
case op_jngreater: {
int r0 = (++it)->u.operand;
int r1 = (++it)->u.operand;
int offset = (++it)->u.operand;
- printf("[%4d] jngreater\t\t %s, %s, %d(->%d)\n", location, registerName(exec, r0).data(), registerName(exec, r1).data(), offset, location + offset);
+ dataLog("[%4d] jngreater\t\t %s, %s, %d(->%d)\n", location, registerName(exec, r0).data(), registerName(exec, r1).data(), offset, location + offset);
break;
}
case op_jngreatereq: {
int r0 = (++it)->u.operand;
int r1 = (++it)->u.operand;
int offset = (++it)->u.operand;
- printf("[%4d] jngreatereq\t\t %s, %s, %d(->%d)\n", location, registerName(exec, r0).data(), registerName(exec, r1).data(), offset, location + offset);
+ dataLog("[%4d] jngreatereq\t\t %s, %s, %d(->%d)\n", location, registerName(exec, r0).data(), registerName(exec, r1).data(), offset, location + offset);
break;
}
case op_loop_if_less: {
int r0 = (++it)->u.operand;
int r1 = (++it)->u.operand;
int offset = (++it)->u.operand;
- printf("[%4d] loop_if_less\t %s, %s, %d(->%d)\n", location, registerName(exec, r0).data(), registerName(exec, r1).data(), offset, location + offset);
+ dataLog("[%4d] loop_if_less\t %s, %s, %d(->%d)\n", location, registerName(exec, r0).data(), registerName(exec, r1).data(), offset, location + offset);
break;
}
case op_loop_if_lesseq: {
int r0 = (++it)->u.operand;
int r1 = (++it)->u.operand;
int offset = (++it)->u.operand;
- printf("[%4d] loop_if_lesseq\t %s, %s, %d(->%d)\n", location, registerName(exec, r0).data(), registerName(exec, r1).data(), offset, location + offset);
+ dataLog("[%4d] loop_if_lesseq\t %s, %s, %d(->%d)\n", location, registerName(exec, r0).data(), registerName(exec, r1).data(), offset, location + offset);
break;
}
case op_loop_if_greater: {
int r0 = (++it)->u.operand;
int r1 = (++it)->u.operand;
int offset = (++it)->u.operand;
- printf("[%4d] loop_if_greater\t %s, %s, %d(->%d)\n", location, registerName(exec, r0).data(), registerName(exec, r1).data(), offset, location + offset);
+ dataLog("[%4d] loop_if_greater\t %s, %s, %d(->%d)\n", location, registerName(exec, r0).data(), registerName(exec, r1).data(), offset, location + offset);
break;
}
case op_loop_if_greatereq: {
int r0 = (++it)->u.operand;
int r1 = (++it)->u.operand;
int offset = (++it)->u.operand;
- printf("[%4d] loop_if_greatereq\t %s, %s, %d(->%d)\n", location, registerName(exec, r0).data(), registerName(exec, r1).data(), offset, location + offset);
+ dataLog("[%4d] loop_if_greatereq\t %s, %s, %d(->%d)\n", location, registerName(exec, r0).data(), registerName(exec, r1).data(), offset, location + offset);
break;
}
case op_loop_hint: {
- printf("[%4d] loop_hint\n", location);
+ dataLog("[%4d] loop_hint\n", location);
break;
}
case op_switch_imm: {
int tableIndex = (++it)->u.operand;
int defaultTarget = (++it)->u.operand;
int scrutineeRegister = (++it)->u.operand;
- printf("[%4d] switch_imm\t %d, %d(->%d), %s\n", location, tableIndex, defaultTarget, location + defaultTarget, registerName(exec, scrutineeRegister).data());
+ dataLog("[%4d] switch_imm\t %d, %d(->%d), %s\n", location, tableIndex, defaultTarget, location + defaultTarget, registerName(exec, scrutineeRegister).data());
break;
}
case op_switch_char: {
int tableIndex = (++it)->u.operand;
int defaultTarget = (++it)->u.operand;
int scrutineeRegister = (++it)->u.operand;
- printf("[%4d] switch_char\t %d, %d(->%d), %s\n", location, tableIndex, defaultTarget, location + defaultTarget, registerName(exec, scrutineeRegister).data());
+ dataLog("[%4d] switch_char\t %d, %d(->%d), %s\n", location, tableIndex, defaultTarget, location + defaultTarget, registerName(exec, scrutineeRegister).data());
break;
}
case op_switch_string: {
int tableIndex = (++it)->u.operand;
int defaultTarget = (++it)->u.operand;
int scrutineeRegister = (++it)->u.operand;
- printf("[%4d] switch_string\t %d, %d(->%d), %s\n", location, tableIndex, defaultTarget, location + defaultTarget, registerName(exec, scrutineeRegister).data());
+ dataLog("[%4d] switch_string\t %d, %d(->%d), %s\n", location, tableIndex, defaultTarget, location + defaultTarget, registerName(exec, scrutineeRegister).data());
break;
}
case op_new_func: {
int r0 = (++it)->u.operand;
int f0 = (++it)->u.operand;
int shouldCheck = (++it)->u.operand;
- printf("[%4d] new_func\t\t %s, f%d, %s\n", location, registerName(exec, r0).data(), f0, shouldCheck ? "<Checked>" : "<Unchecked>");
+ dataLog("[%4d] new_func\t\t %s, f%d, %s\n", location, registerName(exec, r0).data(), f0, shouldCheck ? "<Checked>" : "<Unchecked>");
break;
}
case op_new_func_exp: {
int r0 = (++it)->u.operand;
int f0 = (++it)->u.operand;
- printf("[%4d] new_func_exp\t %s, f%d\n", location, registerName(exec, r0).data(), f0);
+ dataLog("[%4d] new_func_exp\t %s, f%d\n", location, registerName(exec, r0).data(), f0);
break;
}
case op_call: {
@@ -1142,35 +1151,35 @@ void CodeBlock::dump(ExecState* exec, const Vector<Instruction>::const_iterator&
int thisValue = (++it)->u.operand;
int arguments = (++it)->u.operand;
int firstFreeRegister = (++it)->u.operand;
- printf("[%4d] call_varargs\t %s, %s, %s, %d\n", location, registerName(exec, callee).data(), registerName(exec, thisValue).data(), registerName(exec, arguments).data(), firstFreeRegister);
+ dataLog("[%4d] call_varargs\t %s, %s, %s, %d\n", location, registerName(exec, callee).data(), registerName(exec, thisValue).data(), registerName(exec, arguments).data(), firstFreeRegister);
break;
}
case op_tear_off_activation: {
int r0 = (++it)->u.operand;
int r1 = (++it)->u.operand;
- printf("[%4d] tear_off_activation\t %s, %s\n", location, registerName(exec, r0).data(), registerName(exec, r1).data());
+ dataLog("[%4d] tear_off_activation\t %s, %s\n", location, registerName(exec, r0).data(), registerName(exec, r1).data());
break;
}
case op_tear_off_arguments: {
int r0 = (++it)->u.operand;
- printf("[%4d] tear_off_arguments %s\n", location, registerName(exec, r0).data());
+ dataLog("[%4d] tear_off_arguments %s\n", location, registerName(exec, r0).data());
break;
}
case op_ret: {
int r0 = (++it)->u.operand;
- printf("[%4d] ret\t\t %s\n", location, registerName(exec, r0).data());
+ dataLog("[%4d] ret\t\t %s\n", location, registerName(exec, r0).data());
break;
}
case op_call_put_result: {
int r0 = (++it)->u.operand;
- printf("[%4d] op_call_put_result\t\t %s\n", location, registerName(exec, r0).data());
+ dataLog("[%4d] op_call_put_result\t\t %s\n", location, registerName(exec, r0).data());
it++;
break;
}
case op_ret_object_or_this: {
int r0 = (++it)->u.operand;
int r1 = (++it)->u.operand;
- printf("[%4d] constructor_ret\t\t %s %s\n", location, registerName(exec, r0).data(), registerName(exec, r1).data());
+ dataLog("[%4d] constructor_ret\t\t %s %s\n", location, registerName(exec, r0).data(), registerName(exec, r1).data());
break;
}
case op_construct: {
@@ -1181,13 +1190,13 @@ void CodeBlock::dump(ExecState* exec, const Vector<Instruction>::const_iterator&
int r0 = (++it)->u.operand;
int r1 = (++it)->u.operand;
int count = (++it)->u.operand;
- printf("[%4d] strcat\t\t %s, %s, %d\n", location, registerName(exec, r0).data(), registerName(exec, r1).data(), count);
+ dataLog("[%4d] strcat\t\t %s, %s, %d\n", location, registerName(exec, r0).data(), registerName(exec, r1).data(), count);
break;
}
case op_to_primitive: {
int r0 = (++it)->u.operand;
int r1 = (++it)->u.operand;
- printf("[%4d] to_primitive\t %s, %s\n", location, registerName(exec, r0).data(), registerName(exec, r1).data());
+ dataLog("[%4d] to_primitive\t %s, %s\n", location, registerName(exec, r0).data(), registerName(exec, r1).data());
break;
}
case op_get_pnames: {
@@ -1196,7 +1205,7 @@ void CodeBlock::dump(ExecState* exec, const Vector<Instruction>::const_iterator&
int r2 = it[3].u.operand;
int r3 = it[4].u.operand;
int offset = it[5].u.operand;
- printf("[%4d] get_pnames\t %s, %s, %s, %s, %d(->%d)\n", location, registerName(exec, r0).data(), registerName(exec, r1).data(), registerName(exec, r2).data(), registerName(exec, r3).data(), offset, location + offset);
+ dataLog("[%4d] get_pnames\t %s, %s, %s, %s, %d(->%d)\n", location, registerName(exec, r0).data(), registerName(exec, r1).data(), registerName(exec, r2).data(), registerName(exec, r3).data(), offset, location + offset);
it += OPCODE_LENGTH(op_get_pnames) - 1;
break;
}
@@ -1207,78 +1216,78 @@ void CodeBlock::dump(ExecState* exec, const Vector<Instruction>::const_iterator&
int size = it[4].u.operand;
int iter = it[5].u.operand;
int offset = it[6].u.operand;
- printf("[%4d] next_pname\t %s, %s, %s, %s, %s, %d(->%d)\n", location, registerName(exec, dest).data(), registerName(exec, base).data(), registerName(exec, i).data(), registerName(exec, size).data(), registerName(exec, iter).data(), offset, location + offset);
+ dataLog("[%4d] next_pname\t %s, %s, %s, %s, %s, %d(->%d)\n", location, registerName(exec, dest).data(), registerName(exec, base).data(), registerName(exec, i).data(), registerName(exec, size).data(), registerName(exec, iter).data(), offset, location + offset);
it += OPCODE_LENGTH(op_next_pname) - 1;
break;
}
case op_push_scope: {
int r0 = (++it)->u.operand;
- printf("[%4d] push_scope\t %s\n", location, registerName(exec, r0).data());
+ dataLog("[%4d] push_scope\t %s\n", location, registerName(exec, r0).data());
break;
}
case op_pop_scope: {
- printf("[%4d] pop_scope\n", location);
+ dataLog("[%4d] pop_scope\n", location);
break;
}
case op_push_new_scope: {
int r0 = (++it)->u.operand;
int id0 = (++it)->u.operand;
int r1 = (++it)->u.operand;
- printf("[%4d] push_new_scope \t%s, %s, %s\n", location, registerName(exec, r0).data(), idName(id0, m_identifiers[id0]).data(), registerName(exec, r1).data());
+ dataLog("[%4d] push_new_scope \t%s, %s, %s\n", location, registerName(exec, r0).data(), idName(id0, m_identifiers[id0]).data(), registerName(exec, r1).data());
break;
}
case op_jmp_scopes: {
int scopeDelta = (++it)->u.operand;
int offset = (++it)->u.operand;
- printf("[%4d] jmp_scopes\t^%d, %d(->%d)\n", location, scopeDelta, offset, location + offset);
+ dataLog("[%4d] jmp_scopes\t^%d, %d(->%d)\n", location, scopeDelta, offset, location + offset);
break;
}
case op_catch: {
int r0 = (++it)->u.operand;
- printf("[%4d] catch\t\t %s\n", location, registerName(exec, r0).data());
+ dataLog("[%4d] catch\t\t %s\n", location, registerName(exec, r0).data());
break;
}
case op_throw: {
int r0 = (++it)->u.operand;
- printf("[%4d] throw\t\t %s\n", location, registerName(exec, r0).data());
+ dataLog("[%4d] throw\t\t %s\n", location, registerName(exec, r0).data());
break;
}
case op_throw_reference_error: {
int k0 = (++it)->u.operand;
- printf("[%4d] throw_reference_error\t %s\n", location, constantName(exec, k0, getConstant(k0)).data());
+ dataLog("[%4d] throw_reference_error\t %s\n", location, constantName(exec, k0, getConstant(k0)).data());
break;
}
case op_jsr: {
int retAddrDst = (++it)->u.operand;
int offset = (++it)->u.operand;
- printf("[%4d] jsr\t\t %s, %d(->%d)\n", location, registerName(exec, retAddrDst).data(), offset, location + offset);
+ dataLog("[%4d] jsr\t\t %s, %d(->%d)\n", location, registerName(exec, retAddrDst).data(), offset, location + offset);
break;
}
case op_sret: {
int retAddrSrc = (++it)->u.operand;
- printf("[%4d] sret\t\t %s\n", location, registerName(exec, retAddrSrc).data());
+ dataLog("[%4d] sret\t\t %s\n", location, registerName(exec, retAddrSrc).data());
break;
}
case op_debug: {
int debugHookID = (++it)->u.operand;
int firstLine = (++it)->u.operand;
int lastLine = (++it)->u.operand;
- printf("[%4d] debug\t\t %s, %d, %d\n", location, debugHookName(debugHookID), firstLine, lastLine);
+ dataLog("[%4d] debug\t\t %s, %d, %d\n", location, debugHookName(debugHookID), firstLine, lastLine);
break;
}
case op_profile_will_call: {
int function = (++it)->u.operand;
- printf("[%4d] profile_will_call %s\n", location, registerName(exec, function).data());
+ dataLog("[%4d] profile_will_call %s\n", location, registerName(exec, function).data());
break;
}
case op_profile_did_call: {
int function = (++it)->u.operand;
- printf("[%4d] profile_did_call\t %s\n", location, registerName(exec, function).data());
+ dataLog("[%4d] profile_did_call\t %s\n", location, registerName(exec, function).data());
break;
}
case op_end: {
int r0 = (++it)->u.operand;
- printf("[%4d] end\t\t %s\n", location, registerName(exec, r0).data());
+ dataLog("[%4d] end\t\t %s\n", location, registerName(exec, r0).data());
break;
}
}
@@ -1382,29 +1391,29 @@ void CodeBlock::dumpStatistics()
totalSize += symbolTableTotalSize;
totalSize += (liveCodeBlockSet.size() * sizeof(CodeBlock));
- printf("Number of live CodeBlocks: %d\n", liveCodeBlockSet.size());
- printf("Size of a single CodeBlock [sizeof(CodeBlock)]: %zu\n", sizeof(CodeBlock));
- printf("Size of all CodeBlocks: %zu\n", totalSize);
- printf("Average size of a CodeBlock: %zu\n", totalSize / liveCodeBlockSet.size());
+ dataLog("Number of live CodeBlocks: %d\n", liveCodeBlockSet.size());
+ dataLog("Size of a single CodeBlock [sizeof(CodeBlock)]: %zu\n", sizeof(CodeBlock));
+ dataLog("Size of all CodeBlocks: %zu\n", totalSize);
+ dataLog("Average size of a CodeBlock: %zu\n", totalSize / liveCodeBlockSet.size());
- printf("Number of FunctionCode CodeBlocks: %zu (%.3f%%)\n", isFunctionCode, static_cast<double>(isFunctionCode) * 100.0 / liveCodeBlockSet.size());
- printf("Number of GlobalCode CodeBlocks: %zu (%.3f%%)\n", isGlobalCode, static_cast<double>(isGlobalCode) * 100.0 / liveCodeBlockSet.size());
- printf("Number of EvalCode CodeBlocks: %zu (%.3f%%)\n", isEvalCode, static_cast<double>(isEvalCode) * 100.0 / liveCodeBlockSet.size());
+ dataLog("Number of FunctionCode CodeBlocks: %zu (%.3f%%)\n", isFunctionCode, static_cast<double>(isFunctionCode) * 100.0 / liveCodeBlockSet.size());
+ dataLog("Number of GlobalCode CodeBlocks: %zu (%.3f%%)\n", isGlobalCode, static_cast<double>(isGlobalCode) * 100.0 / liveCodeBlockSet.size());
+ dataLog("Number of EvalCode CodeBlocks: %zu (%.3f%%)\n", isEvalCode, static_cast<double>(isEvalCode) * 100.0 / liveCodeBlockSet.size());
- printf("Number of CodeBlocks with rare data: %zu (%.3f%%)\n", hasRareData, static_cast<double>(hasRareData) * 100.0 / liveCodeBlockSet.size());
+ dataLog("Number of CodeBlocks with rare data: %zu (%.3f%%)\n", hasRareData, static_cast<double>(hasRareData) * 100.0 / liveCodeBlockSet.size());
- #define PRINT_STATS(name) printf("Number of CodeBlocks with " #name ": %zu\n", name##IsNotEmpty); printf("Size of all " #name ": %zu\n", name##TotalSize);
+ #define PRINT_STATS(name) dataLog("Number of CodeBlocks with " #name ": %zu\n", name##IsNotEmpty); dataLog("Size of all " #name ": %zu\n", name##TotalSize);
FOR_EACH_MEMBER_VECTOR(PRINT_STATS)
FOR_EACH_MEMBER_VECTOR_RARE_DATA(PRINT_STATS)
#undef PRINT_STATS
- printf("Number of CodeBlocks with evalCodeCache: %zu\n", evalCodeCacheIsNotEmpty);
- printf("Number of CodeBlocks with symbolTable: %zu\n", symbolTableIsNotEmpty);
+ dataLog("Number of CodeBlocks with evalCodeCache: %zu\n", evalCodeCacheIsNotEmpty);
+ dataLog("Number of CodeBlocks with symbolTable: %zu\n", symbolTableIsNotEmpty);
- printf("Size of all symbolTables: %zu\n", symbolTableTotalSize);
+ dataLog("Size of all symbolTables: %zu\n", symbolTableTotalSize);
#else
- printf("Dumping CodeBlock statistics is not enabled.\n");
+ dataLog("Dumping CodeBlock statistics is not enabled.\n");
#endif
}
@@ -1453,6 +1462,7 @@ CodeBlock::CodeBlock(CopyParsedBlockTag, CodeBlock& other, SymbolTable* symTab)
{
setNumParameters(other.numParameters());
optimizeAfterWarmUp();
+ jitAfterWarmUp();
if (other.m_rareData) {
createRareDataIfNecessary();
@@ -1501,6 +1511,7 @@ CodeBlock::CodeBlock(ScriptExecutable* ownerExecutable, CodeType codeType, JSGlo
ASSERT(m_source);
optimizeAfterWarmUp();
+ jitAfterWarmUp();
#if DUMP_CODE_BLOCK_STATISTICS
liveCodeBlockSet.add(this);
@@ -1518,7 +1529,11 @@ CodeBlock::~CodeBlock()
#if ENABLE(VERBOSE_VALUE_PROFILE)
dumpValueProfiles();
#endif
-
+
+#if ENABLE(LLINT)
+ while (m_incomingLLIntCalls.begin() != m_incomingLLIntCalls.end())
+ m_incomingLLIntCalls.begin()->remove();
+#endif // ENABLE(LLINT)
#if ENABLE(JIT)
// We may be destroyed before any CodeBlocks that refer to us are destroyed.
// Consider that two CodeBlocks become unreachable at the same time. There
@@ -1730,13 +1745,74 @@ void CodeBlock::finalizeUnconditionally()
#else
static const bool verboseUnlinking = false;
#endif
-#endif
+#endif // ENABLE(JIT)
+#if ENABLE(LLINT)
+ Interpreter* interpreter = m_globalData->interpreter;
+ // interpreter->classicEnabled() returns true if the old C++ interpreter is enabled. If that's enabled
+ // then we're not using LLInt.
+ if (!interpreter->classicEnabled()) {
+ for (size_t size = m_propertyAccessInstructions.size(), i = 0; i < size; ++i) {
+ Instruction* curInstruction = &instructions()[m_propertyAccessInstructions[i]];
+ switch (interpreter->getOpcodeID(curInstruction[0].u.opcode)) {
+ case op_get_by_id:
+ case op_put_by_id:
+ if (!curInstruction[4].u.structure || Heap::isMarked(curInstruction[4].u.structure.get()))
+ break;
+ if (verboseUnlinking)
+ dataLog("Clearing LLInt property access with structure %p.\n", curInstruction[4].u.structure.get());
+ curInstruction[4].u.structure.clear();
+ curInstruction[5].u.operand = 0;
+ break;
+ case op_put_by_id_transition_direct:
+ case op_put_by_id_transition_normal:
+ if (Heap::isMarked(curInstruction[4].u.structure.get())
+ && Heap::isMarked(curInstruction[6].u.structure.get())
+ && Heap::isMarked(curInstruction[7].u.structureChain.get()))
+ break;
+ if (verboseUnlinking) {
+ dataLog("Clearing LLInt put transition with structures %p -> %p, chain %p.\n",
+ curInstruction[4].u.structure.get(),
+ curInstruction[6].u.structure.get(),
+ curInstruction[7].u.structureChain.get());
+ }
+ curInstruction[4].u.structure.clear();
+ curInstruction[6].u.structure.clear();
+ curInstruction[7].u.structureChain.clear();
+ curInstruction[0].u.opcode = interpreter->getOpcode(op_put_by_id);
+ break;
+ default:
+ ASSERT_NOT_REACHED();
+ }
+ }
+ for (size_t size = m_globalResolveInstructions.size(), i = 0; i < size; ++i) {
+ Instruction* curInstruction = &instructions()[m_globalResolveInstructions[i]];
+ ASSERT(interpreter->getOpcodeID(curInstruction[0].u.opcode) == op_resolve_global
+ || interpreter->getOpcodeID(curInstruction[0].u.opcode) == op_resolve_global_dynamic);
+ if (!curInstruction[3].u.structure || Heap::isMarked(curInstruction[3].u.structure.get()))
+ continue;
+ if (verboseUnlinking)
+ dataLog("Clearing LLInt global resolve cache with structure %p.\n", curInstruction[3].u.structure.get());
+ curInstruction[3].u.structure.clear();
+ curInstruction[4].u.operand = 0;
+ }
+ for (unsigned i = 0; i < m_llintCallLinkInfos.size(); ++i) {
+ if (m_llintCallLinkInfos[i].isLinked() && !Heap::isMarked(m_llintCallLinkInfos[i].callee.get())) {
+ if (verboseUnlinking)
+ dataLog("Clearing LLInt call from %p.\n", this);
+ m_llintCallLinkInfos[i].unlink();
+ }
+ if (!!m_llintCallLinkInfos[i].lastSeenCallee && !Heap::isMarked(m_llintCallLinkInfos[i].lastSeenCallee.get()))
+ m_llintCallLinkInfos[i].lastSeenCallee.clear();
+ }
+ }
+#endif // ENABLE(LLINT)
+
#if ENABLE(DFG_JIT)
// Check if we're not live. If we are, then jettison.
if (!(shouldImmediatelyAssumeLivenessDuringScan() || m_dfgData->livenessHasBeenProved)) {
if (verboseUnlinking)
- printf("Code block %p has dead weak references, jettisoning during GC.\n", this);
+ dataLog("Code block %p has dead weak references, jettisoning during GC.\n", this);
// Make sure that the baseline JIT knows that it should re-warm-up before
// optimizing.
@@ -1754,7 +1830,7 @@ void CodeBlock::finalizeUnconditionally()
for (unsigned i = 0; i < numberOfCallLinkInfos(); ++i) {
if (callLinkInfo(i).isLinked() && !Heap::isMarked(callLinkInfo(i).callee.get())) {
if (verboseUnlinking)
- printf("Clearing call from %p.\n", this);
+ dataLog("Clearing call from %p to %p.\n", this, callLinkInfo(i).callee.get());
callLinkInfo(i).unlink(*m_globalData, repatchBuffer);
}
if (!!callLinkInfo(i).lastSeenCallee
@@ -1764,7 +1840,7 @@ void CodeBlock::finalizeUnconditionally()
for (size_t size = m_globalResolveInfos.size(), i = 0; i < size; ++i) {
if (m_globalResolveInfos[i].structure && !Heap::isMarked(m_globalResolveInfos[i].structure.get())) {
if (verboseUnlinking)
- printf("Clearing resolve info in %p.\n", this);
+ dataLog("Clearing resolve info in %p.\n", this);
m_globalResolveInfos[i].structure.clear();
}
}
@@ -1778,7 +1854,7 @@ void CodeBlock::finalizeUnconditionally()
continue;
if (verboseUnlinking)
- printf("Clearing structure cache (kind %d) in %p.\n", stubInfo.accessType, this);
+ dataLog("Clearing structure cache (kind %d) in %p.\n", stubInfo.accessType, this);
if (isGetByIdAccess(accessType)) {
if (getJITCode().jitType() == JITCode::DFGJIT)
@@ -1808,7 +1884,7 @@ void CodeBlock::finalizeUnconditionally()
|| !Heap::isMarked(m_methodCallLinkInfos[i].cachedFunction.get())
|| !Heap::isMarked(m_methodCallLinkInfos[i].cachedPrototype.get())) {
if (verboseUnlinking)
- printf("Clearing method call in %p.\n", this);
+ dataLog("Clearing method call in %p.\n", this);
m_methodCallLinkInfos[i].reset(repatchBuffer, getJITType());
StructureStubInfo& stubInfo = getStubInfo(m_methodCallLinkInfos[i].bytecodeIndex);
@@ -1851,11 +1927,13 @@ void CodeBlock::stronglyVisitStrongReferences(SlotVisitor& visitor)
visitor.append(&m_functionExprs[i]);
for (size_t i = 0; i < m_functionDecls.size(); ++i)
visitor.append(&m_functionDecls[i]);
-#if ENABLE(INTERPRETER)
- for (size_t size = m_propertyAccessInstructions.size(), i = 0; i < size; ++i)
- visitStructures(visitor, &instructions()[m_propertyAccessInstructions[i]]);
- for (size_t size = m_globalResolveInstructions.size(), i = 0; i < size; ++i)
- visitStructures(visitor, &instructions()[m_globalResolveInstructions[i]]);
+#if ENABLE(CLASSIC_INTERPRETER)
+ if (m_globalData->interpreter->classicEnabled()) {
+ for (size_t size = m_propertyAccessInstructions.size(), i = 0; i < size; ++i)
+ visitStructures(visitor, &instructions()[m_propertyAccessInstructions[i]]);
+ for (size_t size = m_globalResolveInstructions.size(), i = 0; i < size; ++i)
+ visitStructures(visitor, &instructions()[m_globalResolveInstructions[i]]);
+ }
#endif
#if ENABLE(DFG_JIT)
@@ -1863,10 +1941,13 @@ void CodeBlock::stronglyVisitStrongReferences(SlotVisitor& visitor)
// Make sure that executables that we have inlined don't die.
// FIXME: If they would have otherwise died, we should probably trigger recompilation.
for (size_t i = 0; i < inlineCallFrames().size(); ++i) {
- visitor.append(&inlineCallFrames()[i].executable);
- visitor.append(&inlineCallFrames()[i].callee);
+ InlineCallFrame& inlineCallFrame = inlineCallFrames()[i];
+ visitor.append(&inlineCallFrame.executable);
+ visitor.append(&inlineCallFrame.callee);
}
}
+
+ m_lazyOperandValueProfiles.computeUpdatedPredictions();
#endif
#if ENABLE(VALUE_PROFILER)
@@ -1976,7 +2057,7 @@ void CodeBlock::expressionRangeForBytecodeOffset(unsigned bytecodeOffset, int& d
return;
}
-#if ENABLE(INTERPRETER)
+#if ENABLE(CLASSIC_INTERPRETER)
bool CodeBlock::hasGlobalResolveInstructionAtBytecodeOffset(unsigned bytecodeOffset)
{
if (m_globalResolveInstructions.isEmpty())
@@ -2023,7 +2104,7 @@ void CodeBlock::shrinkToFit()
{
instructions().shrinkToFit();
-#if ENABLE(INTERPRETER)
+#if ENABLE(CLASSIC_INTERPRETER)
m_propertyAccessInstructions.shrinkToFit();
m_globalResolveInstructions.shrinkToFit();
#endif
@@ -2068,12 +2149,18 @@ unsigned CodeBlock::addOrFindConstant(JSValue v)
}
return addConstant(v);
}
-
+
#if ENABLE(JIT)
void CodeBlock::unlinkCalls()
{
if (!!m_alternative)
m_alternative->unlinkCalls();
+#if ENABLE(LLINT)
+ for (size_t i = 0; i < m_llintCallLinkInfos.size(); ++i) {
+ if (m_llintCallLinkInfos[i].isLinked())
+ m_llintCallLinkInfos[i].unlink();
+ }
+#endif
if (!(m_callLinkInfos.size() || m_methodCallLinkInfos.size()))
return;
if (!m_globalData->canUseJIT())
@@ -2088,10 +2175,62 @@ void CodeBlock::unlinkCalls()
void CodeBlock::unlinkIncomingCalls()
{
+#if ENABLE(LLINT)
+ while (m_incomingLLIntCalls.begin() != m_incomingLLIntCalls.end())
+ m_incomingLLIntCalls.begin()->unlink();
+#endif
+ if (m_incomingCalls.isEmpty())
+ return;
RepatchBuffer repatchBuffer(this);
while (m_incomingCalls.begin() != m_incomingCalls.end())
m_incomingCalls.begin()->unlink(*m_globalData, repatchBuffer);
}
+
+unsigned CodeBlock::bytecodeOffset(ExecState* exec, ReturnAddressPtr returnAddress)
+{
+#if ENABLE(LLINT)
+ if (returnAddress.value() >= bitwise_cast<void*>(&llint_begin)
+ && returnAddress.value() <= bitwise_cast<void*>(&llint_end)) {
+ ASSERT(exec->codeBlock());
+ ASSERT(exec->codeBlock() == this);
+ ASSERT(JITCode::isBaselineCode(getJITType()));
+ Instruction* instruction = exec->currentVPC();
+ ASSERT(instruction);
+
+ // The LLInt stores the PC after the call instruction rather than the PC of
+ // the call instruction. This requires some correcting. We rely on the fact
+ // that the preceding instruction must be one of the call instructions, so
+ // either it's a call_varargs or it's a call, construct, or eval.
+ ASSERT(OPCODE_LENGTH(op_call_varargs) <= OPCODE_LENGTH(op_call));
+ ASSERT(OPCODE_LENGTH(op_call) == OPCODE_LENGTH(op_construct));
+ ASSERT(OPCODE_LENGTH(op_call) == OPCODE_LENGTH(op_call_eval));
+ if (instruction[-OPCODE_LENGTH(op_call_varargs)].u.pointer == bitwise_cast<void*>(llint_op_call_varargs)) {
+ // We know that the preceding instruction must be op_call_varargs because there is no way that
+ // the pointer to the call_varargs could be an operand to the call.
+ instruction -= OPCODE_LENGTH(op_call_varargs);
+ ASSERT(instruction[-OPCODE_LENGTH(op_call)].u.pointer != bitwise_cast<void*>(llint_op_call)
+ && instruction[-OPCODE_LENGTH(op_call)].u.pointer != bitwise_cast<void*>(llint_op_construct)
+ && instruction[-OPCODE_LENGTH(op_call)].u.pointer != bitwise_cast<void*>(llint_op_call_eval));
+ } else {
+ // Must be that the last instruction was some op_call.
+ ASSERT(instruction[-OPCODE_LENGTH(op_call)].u.pointer == bitwise_cast<void*>(llint_op_call)
+ || instruction[-OPCODE_LENGTH(op_call)].u.pointer == bitwise_cast<void*>(llint_op_construct)
+ || instruction[-OPCODE_LENGTH(op_call)].u.pointer == bitwise_cast<void*>(llint_op_call_eval));
+ instruction -= OPCODE_LENGTH(op_call);
+ }
+
+ return bytecodeOffset(instruction);
+ }
+#else
+ UNUSED_PARAM(exec);
+#endif
+ if (!m_rareData)
+ return 1;
+ Vector<CallReturnOffsetToBytecodeOffset>& callIndices = m_rareData->m_callReturnIndexVector;
+ if (!callIndices.size())
+ return 1;
+ return binarySearch<CallReturnOffsetToBytecodeOffset, unsigned, getCallReturnOffset>(callIndices.begin(), callIndices.size(), getJITCode().offsetOf(returnAddress.value()))->bytecodeOffset;
+}
#endif
void CodeBlock::clearEvalCache()
@@ -2187,31 +2326,52 @@ bool FunctionCodeBlock::canCompileWithDFGInternal()
void ProgramCodeBlock::jettison()
{
- ASSERT(getJITType() != JITCode::BaselineJIT);
+ ASSERT(JITCode::isOptimizingJIT(getJITType()));
ASSERT(this == replacement());
static_cast<ProgramExecutable*>(ownerExecutable())->jettisonOptimizedCode(*globalData());
}
void EvalCodeBlock::jettison()
{
- ASSERT(getJITType() != JITCode::BaselineJIT);
+ ASSERT(JITCode::isOptimizingJIT(getJITType()));
ASSERT(this == replacement());
static_cast<EvalExecutable*>(ownerExecutable())->jettisonOptimizedCode(*globalData());
}
void FunctionCodeBlock::jettison()
{
- ASSERT(getJITType() != JITCode::BaselineJIT);
+ ASSERT(JITCode::isOptimizingJIT(getJITType()));
ASSERT(this == replacement());
static_cast<FunctionExecutable*>(ownerExecutable())->jettisonOptimizedCodeFor(*globalData(), m_isConstructor ? CodeForConstruct : CodeForCall);
}
+
+void ProgramCodeBlock::jitCompileImpl(JSGlobalData& globalData)
+{
+ ASSERT(getJITType() == JITCode::InterpreterThunk);
+ ASSERT(this == replacement());
+ return static_cast<ProgramExecutable*>(ownerExecutable())->jitCompile(globalData);
+}
+
+void EvalCodeBlock::jitCompileImpl(JSGlobalData& globalData)
+{
+ ASSERT(getJITType() == JITCode::InterpreterThunk);
+ ASSERT(this == replacement());
+ return static_cast<EvalExecutable*>(ownerExecutable())->jitCompile(globalData);
+}
+
+void FunctionCodeBlock::jitCompileImpl(JSGlobalData& globalData)
+{
+ ASSERT(getJITType() == JITCode::InterpreterThunk);
+ ASSERT(this == replacement());
+ return static_cast<FunctionExecutable*>(ownerExecutable())->jitCompileFor(globalData, m_isConstructor ? CodeForConstruct : CodeForCall);
+}
#endif
#if ENABLE(VALUE_PROFILER)
bool CodeBlock::shouldOptimizeNow()
{
#if ENABLE(JIT_VERBOSE_OSR)
- printf("Considering optimizing %p...\n", this);
+ dataLog("Considering optimizing %p...\n", this);
#endif
#if ENABLE(VERBOSE_VALUE_PROFILE)
@@ -2239,7 +2399,7 @@ bool CodeBlock::shouldOptimizeNow()
}
#if ENABLE(JIT_VERBOSE_OSR)
- printf("Profile hotness: %lf, %lf\n", (double)numberOfLiveNonArgumentValueProfiles / numberOfValueProfiles(), (double)numberOfSamplesInProfiles / ValueProfile::numberOfBuckets / numberOfValueProfiles());
+ dataLog("Profile hotness: %lf, %lf\n", (double)numberOfLiveNonArgumentValueProfiles / numberOfValueProfiles(), (double)numberOfSamplesInProfiles / ValueProfile::numberOfBuckets / numberOfValueProfiles());
#endif
if ((!numberOfValueProfiles() || (double)numberOfLiveNonArgumentValueProfiles / numberOfValueProfiles() >= Options::desiredProfileLivenessRate)
@@ -2270,7 +2430,7 @@ void CodeBlock::tallyFrequentExitSites()
continue;
#if DFG_ENABLE(DEBUG_VERBOSE)
- fprintf(stderr, "OSR exit #%u (bc#%u, @%u, %s) for code block %p occurred frequently; counting as frequent exit site.\n", i, exit.m_codeOrigin.bytecodeIndex, exit.m_nodeIndex, DFG::exitKindToString(exit.m_kind), this);
+ dataLog("OSR exit #%u (bc#%u, @%u, %s) for code block %p occurred frequently; counting as frequent exit site.\n", i, exit.m_codeOrigin.bytecodeIndex, exit.m_nodeIndex, DFG::exitKindToString(exit.m_kind), this);
#endif
}
}
@@ -2279,30 +2439,30 @@ void CodeBlock::tallyFrequentExitSites()
#if ENABLE(VERBOSE_VALUE_PROFILE)
void CodeBlock::dumpValueProfiles()
{
- fprintf(stderr, "ValueProfile for %p:\n", this);
+ dataLog("ValueProfile for %p:\n", this);
for (unsigned i = 0; i < totalNumberOfValueProfiles(); ++i) {
ValueProfile* profile = getFromAllValueProfiles(i);
if (profile->m_bytecodeOffset < 0) {
ASSERT(profile->m_bytecodeOffset == -1);
- fprintf(stderr, " arg = %u: ", i);
+ dataLog(" arg = %u: ", i);
} else
- fprintf(stderr, " bc = %d: ", profile->m_bytecodeOffset);
+ dataLog(" bc = %d: ", profile->m_bytecodeOffset);
if (!profile->numberOfSamples() && profile->m_prediction == PredictNone) {
- fprintf(stderr, "<empty>\n");
+ dataLog("<empty>\n");
continue;
}
- profile->dump(stderr);
- fprintf(stderr, "\n");
+ profile->dump(WTF::dataFile());
+ dataLog("\n");
}
- fprintf(stderr, "RareCaseProfile for %p:\n", this);
+ dataLog("RareCaseProfile for %p:\n", this);
for (unsigned i = 0; i < numberOfRareCaseProfiles(); ++i) {
RareCaseProfile* profile = rareCaseProfile(i);
- fprintf(stderr, " bc = %d: %u\n", profile->m_bytecodeOffset, profile->m_counter);
+ dataLog(" bc = %d: %u\n", profile->m_bytecodeOffset, profile->m_counter);
}
- fprintf(stderr, "SpecialFastCaseProfile for %p:\n", this);
+ dataLog("SpecialFastCaseProfile for %p:\n", this);
for (unsigned i = 0; i < numberOfSpecialFastCaseProfiles(); ++i) {
RareCaseProfile* profile = specialFastCaseProfile(i);
- fprintf(stderr, " bc = %d: %u\n", profile->m_bytecodeOffset, profile->m_counter);
+ dataLog(" bc = %d: %u\n", profile->m_bytecodeOffset, profile->m_counter);
}
}
#endif
diff --git a/Source/JavaScriptCore/bytecode/CodeBlock.h b/Source/JavaScriptCore/bytecode/CodeBlock.h
index bc2feeb2a..195aa62ca 100644
--- a/Source/JavaScriptCore/bytecode/CodeBlock.h
+++ b/Source/JavaScriptCore/bytecode/CodeBlock.h
@@ -30,6 +30,7 @@
#ifndef CodeBlock_h
#define CodeBlock_h
+#include "BytecodeConventions.h"
#include "CallLinkInfo.h"
#include "CallReturnOffsetToBytecodeOffset.h"
#include "CodeOrigin.h"
@@ -50,6 +51,8 @@
#include "JITWriteBarrier.h"
#include "JSGlobalObject.h"
#include "JumpTable.h"
+#include "LLIntCallLinkInfo.h"
+#include "LazyOperandValueProfile.h"
#include "LineInfo.h"
#include "Nodes.h"
#include "PredictionTracker.h"
@@ -65,16 +68,11 @@
#include <wtf/Vector.h>
#include "StructureStubInfo.h"
-// Register numbers used in bytecode operations have different meaning according to their ranges:
-// 0x80000000-0xFFFFFFFF Negative indices from the CallFrame pointer are entries in the call frame, see RegisterFile.h.
-// 0x00000000-0x3FFFFFFF Forwards indices from the CallFrame pointer are local vars and temporaries with the function's callframe.
-// 0x40000000-0x7FFFFFFF Positive indices from 0x40000000 specify entries in the constant pool on the CodeBlock.
-static const int FirstConstantRegisterIndex = 0x40000000;
-
namespace JSC {
- class ExecState;
class DFGCodeBlocks;
+ class ExecState;
+ class LLIntOffsetsExtractor;
inline int unmodifiedArgumentsRegister(int argumentsRegister) { return argumentsRegister - 1; }
@@ -83,6 +81,7 @@ namespace JSC {
class CodeBlock : public UnconditionalFinalizer, public WeakReferenceHarvester {
WTF_MAKE_FAST_ALLOCATED;
friend class JIT;
+ friend class LLIntOffsetsExtractor;
public:
enum CopyParsedBlockTag { CopyParsedBlock };
protected:
@@ -123,7 +122,7 @@ namespace JSC {
while (result->alternative())
result = result->alternative();
ASSERT(result);
- ASSERT(result->getJITType() == JITCode::BaselineJIT);
+ ASSERT(JITCode::isBaselineCode(result->getJITType()));
return result;
}
#endif
@@ -192,15 +191,7 @@ namespace JSC {
return *(binarySearch<MethodCallLinkInfo, unsigned, getMethodCallLinkInfoBytecodeIndex>(m_methodCallLinkInfos.begin(), m_methodCallLinkInfos.size(), bytecodeIndex));
}
- unsigned bytecodeOffset(ReturnAddressPtr returnAddress)
- {
- if (!m_rareData)
- return 1;
- Vector<CallReturnOffsetToBytecodeOffset>& callIndices = m_rareData->m_callReturnIndexVector;
- if (!callIndices.size())
- return 1;
- return binarySearch<CallReturnOffsetToBytecodeOffset, unsigned, getCallReturnOffset>(callIndices.begin(), callIndices.size(), getJITCode().offsetOf(returnAddress.value()))->bytecodeOffset;
- }
+ unsigned bytecodeOffset(ExecState*, ReturnAddressPtr);
unsigned bytecodeOffsetForCallAtIndex(unsigned index)
{
@@ -221,11 +212,17 @@ namespace JSC {
{
m_incomingCalls.push(incoming);
}
+#if ENABLE(LLINT)
+ void linkIncomingCall(LLIntCallLinkInfo* incoming)
+ {
+ m_incomingLLIntCalls.push(incoming);
+ }
+#endif // ENABLE(LLINT)
void unlinkIncomingCalls();
-#endif
+#endif // ENABLE(JIT)
-#if ENABLE(DFG_JIT)
+#if ENABLE(DFG_JIT) || ENABLE(LLINT)
void setJITCodeMap(PassOwnPtr<CompactJITCodeMap> jitCodeMap)
{
m_jitCodeMap = jitCodeMap;
@@ -234,7 +231,9 @@ namespace JSC {
{
return m_jitCodeMap.get();
}
+#endif
+#if ENABLE(DFG_JIT)
void createDFGDataIfNecessary()
{
if (!!m_dfgData)
@@ -333,12 +332,11 @@ namespace JSC {
}
#endif
-#if ENABLE(INTERPRETER)
unsigned bytecodeOffset(Instruction* returnAddress)
{
+ ASSERT(returnAddress >= instructions().begin() && returnAddress < instructions().end());
return static_cast<Instruction*>(returnAddress) - instructions().begin();
}
-#endif
void setIsNumericCompareFunction(bool isNumericCompareFunction) { m_isNumericCompareFunction = isNumericCompareFunction; }
bool isNumericCompareFunction() { return m_isNumericCompareFunction; }
@@ -376,6 +374,20 @@ namespace JSC {
ExecutableMemoryHandle* executableMemory() { return getJITCode().getExecutableMemory(); }
virtual JSObject* compileOptimized(ExecState*, ScopeChainNode*) = 0;
virtual void jettison() = 0;
+ bool jitCompile(JSGlobalData& globalData)
+ {
+ if (getJITType() != JITCode::InterpreterThunk) {
+ ASSERT(getJITType() == JITCode::BaselineJIT);
+ return false;
+ }
+#if ENABLE(JIT)
+ jitCompileImpl(globalData);
+ return true;
+#else
+ UNUSED_PARAM(globalData);
+ return false;
+#endif
+ }
virtual CodeBlock* replacement() = 0;
enum CompileWithDFGState {
@@ -395,13 +407,13 @@ namespace JSC {
bool hasOptimizedReplacement()
{
- ASSERT(getJITType() == JITCode::BaselineJIT);
+ ASSERT(JITCode::isBaselineCode(getJITType()));
bool result = replacement()->getJITType() > getJITType();
#if !ASSERT_DISABLED
if (result)
ASSERT(replacement()->getJITType() == JITCode::DFGJIT);
else {
- ASSERT(replacement()->getJITType() == JITCode::BaselineJIT);
+ ASSERT(JITCode::isBaselineCode(replacement()->getJITType()));
ASSERT(replacement() == this);
}
#endif
@@ -460,18 +472,21 @@ namespace JSC {
void clearEvalCache();
-#if ENABLE(INTERPRETER)
void addPropertyAccessInstruction(unsigned propertyAccessInstruction)
{
- if (!m_globalData->canUseJIT())
- m_propertyAccessInstructions.append(propertyAccessInstruction);
+ m_propertyAccessInstructions.append(propertyAccessInstruction);
}
void addGlobalResolveInstruction(unsigned globalResolveInstruction)
{
- if (!m_globalData->canUseJIT())
- m_globalResolveInstructions.append(globalResolveInstruction);
+ m_globalResolveInstructions.append(globalResolveInstruction);
}
bool hasGlobalResolveInstructionAtBytecodeOffset(unsigned bytecodeOffset);
+#if ENABLE(LLINT)
+ LLIntCallLinkInfo* addLLIntCallLinkInfo()
+ {
+ m_llintCallLinkInfos.append(LLIntCallLinkInfo());
+ return &m_llintCallLinkInfos.last();
+ }
#endif
#if ENABLE(JIT)
void setNumberOfStructureStubInfos(size_t size) { m_structureStubInfos.grow(size); }
@@ -480,8 +495,7 @@ namespace JSC {
void addGlobalResolveInfo(unsigned globalResolveInstruction)
{
- if (m_globalData->canUseJIT())
- m_globalResolveInfos.append(GlobalResolveInfo(globalResolveInstruction));
+ m_globalResolveInfos.append(GlobalResolveInfo(globalResolveInstruction));
}
GlobalResolveInfo& globalResolveInfo(int index) { return m_globalResolveInfos[index]; }
bool hasGlobalResolveInfoAtBytecodeOffset(unsigned bytecodeOffset);
@@ -492,6 +506,7 @@ namespace JSC {
void addMethodCallLinkInfos(unsigned n) { ASSERT(m_globalData->canUseJIT()); m_methodCallLinkInfos.grow(n); }
MethodCallLinkInfo& methodCallLinkInfo(int index) { return m_methodCallLinkInfos[index]; }
+ size_t numberOfMethodCallLinkInfos() { return m_methodCallLinkInfos.size(); }
#endif
#if ENABLE(VALUE_PROFILER)
@@ -533,6 +548,10 @@ namespace JSC {
bytecodeOffset].u.opcode)) - 1].u.profile == result);
return result;
}
+ PredictedType valueProfilePredictionForBytecodeOffset(int bytecodeOffset)
+ {
+ return valueProfileForBytecodeOffset(bytecodeOffset)->computeUpdatedPrediction();
+ }
unsigned totalNumberOfValueProfiles()
{
@@ -559,12 +578,16 @@ namespace JSC {
bool likelyToTakeSlowCase(int bytecodeOffset)
{
+ if (!numberOfRareCaseProfiles())
+ return false;
unsigned value = rareCaseProfileForBytecodeOffset(bytecodeOffset)->m_counter;
return value >= Options::likelyToTakeSlowCaseMinimumCount && static_cast<double>(value) / m_executionEntryCount >= Options::likelyToTakeSlowCaseThreshold;
}
bool couldTakeSlowCase(int bytecodeOffset)
{
+ if (!numberOfRareCaseProfiles())
+ return false;
unsigned value = rareCaseProfileForBytecodeOffset(bytecodeOffset)->m_counter;
return value >= Options::couldTakeSlowCaseMinimumCount && static_cast<double>(value) / m_executionEntryCount >= Options::couldTakeSlowCaseThreshold;
}
@@ -583,12 +606,16 @@ namespace JSC {
bool likelyToTakeSpecialFastCase(int bytecodeOffset)
{
+ if (!numberOfRareCaseProfiles())
+ return false;
unsigned specialFastCaseCount = specialFastCaseProfileForBytecodeOffset(bytecodeOffset)->m_counter;
return specialFastCaseCount >= Options::likelyToTakeSlowCaseMinimumCount && static_cast<double>(specialFastCaseCount) / m_executionEntryCount >= Options::likelyToTakeSlowCaseThreshold;
}
bool likelyToTakeDeepestSlowCase(int bytecodeOffset)
{
+ if (!numberOfRareCaseProfiles())
+ return false;
unsigned slowCaseCount = rareCaseProfileForBytecodeOffset(bytecodeOffset)->m_counter;
unsigned specialFastCaseCount = specialFastCaseProfileForBytecodeOffset(bytecodeOffset)->m_counter;
unsigned value = slowCaseCount - specialFastCaseCount;
@@ -597,6 +624,8 @@ namespace JSC {
bool likelyToTakeAnySlowCase(int bytecodeOffset)
{
+ if (!numberOfRareCaseProfiles())
+ return false;
unsigned slowCaseCount = rareCaseProfileForBytecodeOffset(bytecodeOffset)->m_counter;
unsigned specialFastCaseCount = specialFastCaseProfileForBytecodeOffset(bytecodeOffset)->m_counter;
unsigned value = slowCaseCount + specialFastCaseCount;
@@ -694,11 +723,16 @@ namespace JSC {
bool addFrequentExitSite(const DFG::FrequentExitSite& site)
{
- ASSERT(getJITType() == JITCode::BaselineJIT);
+ ASSERT(JITCode::isBaselineCode(getJITType()));
return m_exitProfile.add(site);
}
DFG::ExitProfile& exitProfile() { return m_exitProfile; }
+
+ CompressedLazyOperandValueProfileHolder& lazyOperandValueProfiles()
+ {
+ return m_lazyOperandValueProfiles;
+ }
#endif
// Constant Pool
@@ -802,6 +836,29 @@ namespace JSC {
void copyPostParseDataFrom(CodeBlock* alternative);
void copyPostParseDataFromAlternative();
+ // Functions for controlling when JITting kicks in, in a mixed mode
+ // execution world.
+
+ void dontJITAnytimeSoon()
+ {
+ m_llintExecuteCounter = Options::executionCounterValueForDontJITAnytimeSoon;
+ }
+
+ void jitAfterWarmUp()
+ {
+ m_llintExecuteCounter = Options::executionCounterValueForJITAfterWarmUp;
+ }
+
+ void jitSoon()
+ {
+ m_llintExecuteCounter = Options::executionCounterValueForJITSoon;
+ }
+
+ int32_t llintExecuteCounter() const
+ {
+ return m_llintExecuteCounter;
+ }
+
// Functions for controlling when tiered compilation kicks in. This
// controls both when the optimizing compiler is invoked and when OSR
// entry happens. Two triggers exist: the loop trigger and the return
@@ -994,6 +1051,9 @@ namespace JSC {
bool m_shouldDiscardBytecode;
protected:
+#if ENABLE(JIT)
+ virtual void jitCompileImpl(JSGlobalData&) = 0;
+#endif
virtual void visitWeakReferences(SlotVisitor&);
virtual void finalizeUnconditionally();
@@ -1075,9 +1135,11 @@ namespace JSC {
RefPtr<SourceProvider> m_source;
unsigned m_sourceOffset;
-#if ENABLE(INTERPRETER)
Vector<unsigned> m_propertyAccessInstructions;
Vector<unsigned> m_globalResolveInstructions;
+#if ENABLE(LLINT)
+ SegmentedVector<LLIntCallLinkInfo, 8> m_llintCallLinkInfos;
+ SentinelLinkedList<LLIntCallLinkInfo, BasicRawSentinelNode<LLIntCallLinkInfo> > m_incomingLLIntCalls;
#endif
#if ENABLE(JIT)
Vector<StructureStubInfo> m_structureStubInfos;
@@ -1088,9 +1150,10 @@ namespace JSC {
MacroAssemblerCodePtr m_jitCodeWithArityCheck;
SentinelLinkedList<CallLinkInfo, BasicRawSentinelNode<CallLinkInfo> > m_incomingCalls;
#endif
-#if ENABLE(DFG_JIT)
+#if ENABLE(DFG_JIT) || ENABLE(LLINT)
OwnPtr<CompactJITCodeMap> m_jitCodeMap;
-
+#endif
+#if ENABLE(DFG_JIT)
struct WeakReferenceTransition {
WeakReferenceTransition() { }
@@ -1130,6 +1193,7 @@ namespace JSC {
// This is relevant to non-DFG code blocks that serve as the profiled code block
// for DFG code blocks.
DFG::ExitProfile m_exitProfile;
+ CompressedLazyOperandValueProfileHolder m_lazyOperandValueProfiles;
#endif
#if ENABLE(VALUE_PROFILER)
Vector<ValueProfile> m_argumentValueProfiles;
@@ -1153,12 +1217,14 @@ namespace JSC {
OwnPtr<CodeBlock> m_alternative;
+ int32_t m_llintExecuteCounter;
+
int32_t m_jitExecuteCounter;
uint32_t m_speculativeSuccessCounter;
uint32_t m_speculativeFailCounter;
uint8_t m_optimizationDelayCounter;
uint8_t m_reoptimizationRetryCounter;
-
+
struct RareData {
WTF_MAKE_FAST_ALLOCATED;
public:
@@ -1234,6 +1300,7 @@ namespace JSC {
protected:
virtual JSObject* compileOptimized(ExecState*, ScopeChainNode*);
virtual void jettison();
+ virtual void jitCompileImpl(JSGlobalData&);
virtual CodeBlock* replacement();
virtual bool canCompileWithDFGInternal();
#endif
@@ -1268,6 +1335,7 @@ namespace JSC {
protected:
virtual JSObject* compileOptimized(ExecState*, ScopeChainNode*);
virtual void jettison();
+ virtual void jitCompileImpl(JSGlobalData&);
virtual CodeBlock* replacement();
virtual bool canCompileWithDFGInternal();
#endif
@@ -1305,6 +1373,7 @@ namespace JSC {
protected:
virtual JSObject* compileOptimized(ExecState*, ScopeChainNode*);
virtual void jettison();
+ virtual void jitCompileImpl(JSGlobalData&);
virtual CodeBlock* replacement();
virtual bool canCompileWithDFGInternal();
#endif
@@ -1331,6 +1400,17 @@ namespace JSC {
bool m_oldValueOfShouldDiscardBytecode;
};
+ inline CodeBlock* baselineCodeBlockForOriginAndBaselineCodeBlock(const CodeOrigin& codeOrigin, CodeBlock* baselineCodeBlock)
+ {
+ if (codeOrigin.inlineCallFrame) {
+ ExecutableBase* executable = codeOrigin.inlineCallFrame->executable.get();
+ ASSERT(executable->structure()->classInfo() == &FunctionExecutable::s_info);
+ return static_cast<FunctionExecutable*>(executable)->baselineCodeBlockFor(codeOrigin.inlineCallFrame->isCall ? CodeForCall : CodeForConstruct);
+ }
+ return baselineCodeBlock;
+ }
+
+
inline Register& ExecState::r(int index)
{
CodeBlock* codeBlock = this->codeBlock();
diff --git a/Source/JavaScriptCore/bytecode/GetByIdStatus.cpp b/Source/JavaScriptCore/bytecode/GetByIdStatus.cpp
index 5eff1d4a0..11aead3df 100644
--- a/Source/JavaScriptCore/bytecode/GetByIdStatus.cpp
+++ b/Source/JavaScriptCore/bytecode/GetByIdStatus.cpp
@@ -27,20 +27,49 @@
#include "GetByIdStatus.h"
#include "CodeBlock.h"
+#include "LowLevelInterpreter.h"
namespace JSC {
+GetByIdStatus GetByIdStatus::computeFromLLInt(CodeBlock* profiledBlock, unsigned bytecodeIndex, Identifier& ident)
+{
+ UNUSED_PARAM(profiledBlock);
+ UNUSED_PARAM(bytecodeIndex);
+ UNUSED_PARAM(ident);
+#if ENABLE(LLINT)
+ Instruction* instruction = profiledBlock->instructions().begin() + bytecodeIndex;
+
+ if (instruction[0].u.opcode == llint_op_method_check)
+ instruction++;
+
+ Structure* structure = instruction[4].u.structure.get();
+ if (!structure)
+ return GetByIdStatus(NoInformation, StructureSet(), notFound, false);
+
+ size_t offset = structure->get(*profiledBlock->globalData(), ident);
+ if (offset == notFound)
+ return GetByIdStatus(NoInformation, StructureSet(), notFound, false);
+
+ return GetByIdStatus(SimpleDirect, StructureSet(structure), offset, false);
+#else
+ return GetByIdStatus(NoInformation, StructureSet(), notFound, false);
+#endif
+}
+
GetByIdStatus GetByIdStatus::computeFor(CodeBlock* profiledBlock, unsigned bytecodeIndex, Identifier& ident)
{
UNUSED_PARAM(profiledBlock);
UNUSED_PARAM(bytecodeIndex);
UNUSED_PARAM(ident);
#if ENABLE(JIT) && ENABLE(VALUE_PROFILER)
+ if (!profiledBlock->numberOfStructureStubInfos())
+ return computeFromLLInt(profiledBlock, bytecodeIndex, ident);
+
// First check if it makes either calls, in which case we want to be super careful, or
// if it's not set at all, in which case we punt.
StructureStubInfo& stubInfo = profiledBlock->getStubInfo(bytecodeIndex);
if (!stubInfo.seen)
- return GetByIdStatus(NoInformation, StructureSet(), notFound);
+ return computeFromLLInt(profiledBlock, bytecodeIndex, ident);
PolymorphicAccessStructureList* list;
int listSize;
@@ -60,18 +89,19 @@ GetByIdStatus GetByIdStatus::computeFor(CodeBlock* profiledBlock, unsigned bytec
}
for (int i = 0; i < listSize; ++i) {
if (!list->list[i].isDirect)
- return GetByIdStatus(MakesCalls, StructureSet(), notFound);
+ return GetByIdStatus(MakesCalls, StructureSet(), notFound, true);
}
// Next check if it takes slow case, in which case we want to be kind of careful.
if (profiledBlock->likelyToTakeSlowCase(bytecodeIndex))
- return GetByIdStatus(TakesSlowPath, StructureSet(), notFound);
+ return GetByIdStatus(TakesSlowPath, StructureSet(), notFound, true);
// Finally figure out if we can derive an access strategy.
GetByIdStatus result;
+ result.m_wasSeenInJIT = true;
switch (stubInfo.accessType) {
case access_unset:
- return GetByIdStatus(NoInformation, StructureSet(), notFound);
+ return computeFromLLInt(profiledBlock, bytecodeIndex, ident);
case access_get_by_id_self: {
Structure* structure = stubInfo.u.getByIdSelf.baseObjectStructure.get();
@@ -130,7 +160,7 @@ GetByIdStatus GetByIdStatus::computeFor(CodeBlock* profiledBlock, unsigned bytec
return result;
#else // ENABLE(JIT)
- return GetByIdStatus(NoInformation, StructureSet(), notFound);
+ return GetByIdStatus(NoInformation, StructureSet(), notFound, false);
#endif // ENABLE(JIT)
}
diff --git a/Source/JavaScriptCore/bytecode/GetByIdStatus.h b/Source/JavaScriptCore/bytecode/GetByIdStatus.h
index 00e50e76d..39476c009 100644
--- a/Source/JavaScriptCore/bytecode/GetByIdStatus.h
+++ b/Source/JavaScriptCore/bytecode/GetByIdStatus.h
@@ -49,10 +49,11 @@ public:
{
}
- GetByIdStatus(State state, const StructureSet& structureSet, size_t offset)
+ GetByIdStatus(State state, const StructureSet& structureSet, size_t offset, bool wasSeenInJIT)
: m_state(state)
, m_structureSet(structureSet)
, m_offset(offset)
+ , m_wasSeenInJIT(wasSeenInJIT)
{
ASSERT((state == SimpleDirect) == (offset != notFound));
}
@@ -70,10 +71,15 @@ public:
const StructureSet& structureSet() const { return m_structureSet; }
size_t offset() const { return m_offset; }
+ bool wasSeenInJIT() const { return m_wasSeenInJIT; }
+
private:
+ static GetByIdStatus computeFromLLInt(CodeBlock*, unsigned bytecodeIndex, Identifier&);
+
State m_state;
StructureSet m_structureSet;
size_t m_offset;
+ bool m_wasSeenInJIT;
};
} // namespace JSC
diff --git a/Source/JavaScriptCore/bytecode/Instruction.h b/Source/JavaScriptCore/bytecode/Instruction.h
index 92118eeb2..c4989d2db 100644
--- a/Source/JavaScriptCore/bytecode/Instruction.h
+++ b/Source/JavaScriptCore/bytecode/Instruction.h
@@ -48,6 +48,7 @@ namespace JSC {
class JSCell;
class Structure;
class StructureChain;
+ struct LLIntCallLinkInfo;
struct ValueProfile;
#if ENABLE(JIT)
@@ -146,9 +147,14 @@ namespace JSC {
#endif
struct Instruction {
+ Instruction()
+ {
+ u.jsCell.clear();
+ }
+
Instruction(Opcode opcode)
{
-#if !ENABLE(COMPUTED_GOTO_INTERPRETER)
+#if !ENABLE(COMPUTED_GOTO_CLASSIC_INTERPRETER)
// We have to initialize one of the pointer members to ensure that
// the entire struct is initialized, when opcode is not a pointer.
u.jsCell.clear();
@@ -182,6 +188,8 @@ namespace JSC {
Instruction(PropertySlot::GetValueFunc getterFunc) { u.getterFunc = getterFunc; }
+ Instruction(LLIntCallLinkInfo* callLinkInfo) { u.callLinkInfo = callLinkInfo; }
+
Instruction(ValueProfile* profile) { u.profile = profile; }
union {
@@ -191,7 +199,9 @@ namespace JSC {
WriteBarrierBase<StructureChain> structureChain;
WriteBarrierBase<JSCell> jsCell;
PropertySlot::GetValueFunc getterFunc;
+ LLIntCallLinkInfo* callLinkInfo;
ValueProfile* profile;
+ void* pointer;
} u;
private:
diff --git a/Source/JavaScriptCore/bytecode/LLIntCallLinkInfo.h b/Source/JavaScriptCore/bytecode/LLIntCallLinkInfo.h
new file mode 100644
index 000000000..bfb951018
--- /dev/null
+++ b/Source/JavaScriptCore/bytecode/LLIntCallLinkInfo.h
@@ -0,0 +1,66 @@
+/*
+ * Copyright (C) 2012 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef LLIntCallLinkInfo_h
+#define LLIntCallLinkInfo_h
+
+#include "JSFunction.h"
+#include "MacroAssemblerCodeRef.h"
+#include <wtf/SentinelLinkedList.h>
+
+namespace JSC {
+
+struct Instruction;
+
+struct LLIntCallLinkInfo : public BasicRawSentinelNode<LLIntCallLinkInfo> {
+ LLIntCallLinkInfo()
+ {
+ }
+
+ ~LLIntCallLinkInfo()
+ {
+ if (isOnList())
+ remove();
+ }
+
+ bool isLinked() { return callee; }
+
+ void unlink()
+ {
+ callee.clear();
+ machineCodeTarget = MacroAssemblerCodePtr();
+ if (isOnList())
+ remove();
+ }
+
+ WriteBarrier<JSFunction> callee;
+ WriteBarrier<JSFunction> lastSeenCallee;
+ MacroAssemblerCodePtr machineCodeTarget;
+};
+
+} // namespace JSC
+
+#endif // LLIntCallLinkInfo_h
+
diff --git a/Source/JavaScriptCore/bytecode/LazyOperandValueProfile.cpp b/Source/JavaScriptCore/bytecode/LazyOperandValueProfile.cpp
new file mode 100644
index 000000000..f199b6923
--- /dev/null
+++ b/Source/JavaScriptCore/bytecode/LazyOperandValueProfile.cpp
@@ -0,0 +1,100 @@
+/*
+ * Copyright (C) 2012 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "config.h"
+#include "LazyOperandValueProfile.h"
+
+#if ENABLE(VALUE_PROFILER)
+
+namespace JSC {
+
+CompressedLazyOperandValueProfileHolder::CompressedLazyOperandValueProfileHolder() { }
+CompressedLazyOperandValueProfileHolder::~CompressedLazyOperandValueProfileHolder() { }
+
+void CompressedLazyOperandValueProfileHolder::computeUpdatedPredictions()
+{
+ if (!m_data)
+ return;
+
+ for (unsigned i = 0; i < m_data->size(); ++i)
+ m_data->at(i).computeUpdatedPrediction();
+}
+
+LazyOperandValueProfile* CompressedLazyOperandValueProfileHolder::add(
+ const LazyOperandValueProfileKey& key)
+{
+ if (!m_data)
+ m_data = adoptPtr(new LazyOperandValueProfile::List());
+ else {
+ for (unsigned i = 0; i < m_data->size(); ++i) {
+ if (m_data->at(i).key() == key)
+ return &m_data->at(i);
+ }
+ }
+
+ m_data->append(LazyOperandValueProfile(key));
+ return &m_data->last();
+}
+
+LazyOperandValueProfileParser::LazyOperandValueProfileParser(
+ CompressedLazyOperandValueProfileHolder& holder)
+ : m_holder(holder)
+{
+ if (!m_holder.m_data)
+ return;
+
+ LazyOperandValueProfile::List& data = *m_holder.m_data;
+ for (unsigned i = 0; i < data.size(); ++i)
+ m_map.add(data[i].key(), &data[i]);
+}
+
+LazyOperandValueProfileParser::~LazyOperandValueProfileParser() { }
+
+LazyOperandValueProfile* LazyOperandValueProfileParser::getIfPresent(
+ const LazyOperandValueProfileKey& key) const
+{
+ HashMap<LazyOperandValueProfileKey, LazyOperandValueProfile*>::const_iterator iter =
+ m_map.find(key);
+
+ if (iter == m_map.end())
+ return 0;
+
+ return iter->second;
+}
+
+PredictedType LazyOperandValueProfileParser::prediction(
+ const LazyOperandValueProfileKey& key) const
+{
+ LazyOperandValueProfile* profile = getIfPresent(key);
+ if (!profile)
+ return PredictNone;
+
+ return profile->computeUpdatedPrediction();
+}
+
+} // namespace JSC
+
+#endif // ENABLE(VALUE_PROFILER)
+
diff --git a/Source/JavaScriptCore/bytecode/LazyOperandValueProfile.h b/Source/JavaScriptCore/bytecode/LazyOperandValueProfile.h
new file mode 100644
index 000000000..d0260f991
--- /dev/null
+++ b/Source/JavaScriptCore/bytecode/LazyOperandValueProfile.h
@@ -0,0 +1,189 @@
+/*
+ * Copyright (C) 2012 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef LazyOperandValueProfile_h
+#define LazyOperandValueProfile_h
+
+#include <wtf/Platform.h>
+
+#if ENABLE(VALUE_PROFILER)
+
+#include "ValueProfile.h"
+#include <wtf/HashMap.h>
+#include <wtf/Noncopyable.h>
+#include <wtf/OwnPtr.h>
+#include <wtf/SegmentedVector.h>
+
+namespace JSC {
+
+class ScriptExecutable;
+
+class LazyOperandValueProfileKey {
+public:
+ LazyOperandValueProfileKey()
+ : m_bytecodeOffset(0) // 0 = empty value
+ , m_operand(-1) // not a valid operand index in our current scheme
+ {
+ }
+
+ LazyOperandValueProfileKey(WTF::HashTableDeletedValueType)
+ : m_bytecodeOffset(1) // 1 = deleted value
+ , m_operand(-1) // not a valid operand index in our current scheme
+ {
+ }
+
+ LazyOperandValueProfileKey(unsigned bytecodeOffset, int operand)
+ : m_bytecodeOffset(bytecodeOffset)
+ , m_operand(operand)
+ {
+ ASSERT(operand != -1);
+ }
+
+ bool operator!() const
+ {
+ return m_operand == -1;
+ }
+
+ bool operator==(const LazyOperandValueProfileKey& other) const
+ {
+ return m_bytecodeOffset == other.m_bytecodeOffset
+ && m_operand == other.m_operand;
+ }
+
+ unsigned hash() const
+ {
+ return WTF::intHash(m_bytecodeOffset) + m_operand;
+ }
+
+ unsigned bytecodeOffset() const
+ {
+ ASSERT(!!*this);
+ return m_bytecodeOffset;
+ }
+ int operand() const
+ {
+ ASSERT(!!*this);
+ return m_operand;
+ }
+
+ bool isHashTableDeletedValue() const
+ {
+ return m_operand == -1 && m_bytecodeOffset;
+ }
+private:
+ unsigned m_bytecodeOffset;
+ int m_operand;
+};
+
+struct LazyOperandValueProfileKeyHash {
+ static unsigned hash(const LazyOperandValueProfileKey& key) { return key.hash(); }
+ static bool equal(
+ const LazyOperandValueProfileKey& a,
+ const LazyOperandValueProfileKey& b) { return a == b; }
+ static const bool safeToCompareToEmptyOrDeleted = true;
+};
+
+} // namespace JSC
+
+namespace WTF {
+
+template<typename T> struct DefaultHash;
+template<> struct DefaultHash<JSC::LazyOperandValueProfileKey> {
+ typedef JSC::LazyOperandValueProfileKeyHash Hash;
+};
+
+template<typename T> struct HashTraits;
+template<> struct HashTraits<JSC::LazyOperandValueProfileKey> : public GenericHashTraits<JSC::LazyOperandValueProfileKey> {
+ static void constructDeletedValue(JSC::LazyOperandValueProfileKey& slot) { new (NotNull, &slot) JSC::LazyOperandValueProfileKey(HashTableDeletedValue); }
+ static bool isDeletedValue(const JSC::LazyOperandValueProfileKey& value) { return value.isHashTableDeletedValue(); }
+};
+
+} // namespace WTF
+
+namespace JSC {
+
+struct LazyOperandValueProfile : public MinimalValueProfile {
+ LazyOperandValueProfile()
+ : MinimalValueProfile()
+ , m_operand(-1)
+ {
+ }
+
+ explicit LazyOperandValueProfile(const LazyOperandValueProfileKey& key)
+ : MinimalValueProfile(key.bytecodeOffset())
+ , m_operand(key.operand())
+ {
+ }
+
+ LazyOperandValueProfileKey key() const
+ {
+ return LazyOperandValueProfileKey(m_bytecodeOffset, m_operand);
+ }
+
+ int m_operand;
+
+ typedef SegmentedVector<LazyOperandValueProfile, 8> List;
+};
+
+class LazyOperandValueProfileParser;
+
+class CompressedLazyOperandValueProfileHolder {
+ WTF_MAKE_NONCOPYABLE(CompressedLazyOperandValueProfileHolder);
+public:
+ CompressedLazyOperandValueProfileHolder();
+ ~CompressedLazyOperandValueProfileHolder();
+
+ void computeUpdatedPredictions();
+
+ LazyOperandValueProfile* add(const LazyOperandValueProfileKey& key);
+
+private:
+ friend class LazyOperandValueProfileParser;
+ OwnPtr<LazyOperandValueProfile::List> m_data;
+};
+
+class LazyOperandValueProfileParser {
+ WTF_MAKE_NONCOPYABLE(LazyOperandValueProfileParser);
+public:
+ explicit LazyOperandValueProfileParser(
+ CompressedLazyOperandValueProfileHolder& holder);
+ ~LazyOperandValueProfileParser();
+
+ LazyOperandValueProfile* getIfPresent(
+ const LazyOperandValueProfileKey& key) const;
+
+ PredictedType prediction(const LazyOperandValueProfileKey& key) const;
+private:
+ CompressedLazyOperandValueProfileHolder& m_holder;
+ HashMap<LazyOperandValueProfileKey, LazyOperandValueProfile*> m_map;
+};
+
+} // namespace JSC
+
+#endif // ENABLE(VALUE_PROFILER)
+
+#endif // LazyOperandValueProfile_h
+
+
diff --git a/Source/JavaScriptCore/bytecode/MethodCallLinkStatus.cpp b/Source/JavaScriptCore/bytecode/MethodCallLinkStatus.cpp
index e7d721c29..795b41b69 100644
--- a/Source/JavaScriptCore/bytecode/MethodCallLinkStatus.cpp
+++ b/Source/JavaScriptCore/bytecode/MethodCallLinkStatus.cpp
@@ -35,6 +35,11 @@ MethodCallLinkStatus MethodCallLinkStatus::computeFor(CodeBlock* profiledBlock,
UNUSED_PARAM(profiledBlock);
UNUSED_PARAM(bytecodeIndex);
#if ENABLE(JIT) && ENABLE(VALUE_PROFILER)
+ // NOTE: This does not have an LLInt fall-back because LLInt does not do any method
+ // call link caching.
+ if (!profiledBlock->numberOfMethodCallLinkInfos())
+ return MethodCallLinkStatus();
+
MethodCallLinkInfo& methodCall = profiledBlock->getMethodCallLinkInfo(bytecodeIndex);
if (!methodCall.seen || !methodCall.cachedStructure)
diff --git a/Source/JavaScriptCore/bytecode/MethodOfGettingAValueProfile.cpp b/Source/JavaScriptCore/bytecode/MethodOfGettingAValueProfile.cpp
new file mode 100644
index 000000000..857ed9c87
--- /dev/null
+++ b/Source/JavaScriptCore/bytecode/MethodOfGettingAValueProfile.cpp
@@ -0,0 +1,69 @@
+/*
+ * Copyright (C) 2012 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "config.h"
+#include "MethodOfGettingAValueProfile.h"
+
+#if ENABLE(DFG_JIT)
+
+#include "CodeBlock.h"
+
+namespace JSC {
+
+MethodOfGettingAValueProfile MethodOfGettingAValueProfile::fromLazyOperand(
+ CodeBlock* codeBlock, const LazyOperandValueProfileKey& key)
+{
+ MethodOfGettingAValueProfile result;
+ result.m_kind = LazyOperand;
+ result.u.lazyOperand.codeBlock = codeBlock;
+ result.u.lazyOperand.bytecodeOffset = key.bytecodeOffset();
+ result.u.lazyOperand.operand = key.operand();
+ return result;
+}
+
+EncodedJSValue* MethodOfGettingAValueProfile::getSpecFailBucket(unsigned index) const
+{
+ switch (m_kind) {
+ case None:
+ return 0;
+
+ case Ready:
+ return u.profile->specFailBucket(index);
+
+ case LazyOperand:
+ return u.lazyOperand.codeBlock->lazyOperandValueProfiles().add(
+ LazyOperandValueProfileKey(
+ u.lazyOperand.bytecodeOffset, u.lazyOperand.operand))->specFailBucket(index);
+
+ default:
+ ASSERT_NOT_REACHED();
+ return 0;
+ }
+}
+
+} // namespace JSC
+
+#endif // ENABLE(DFG_JIT)
+
diff --git a/Source/JavaScriptCore/bytecode/MethodOfGettingAValueProfile.h b/Source/JavaScriptCore/bytecode/MethodOfGettingAValueProfile.h
new file mode 100644
index 000000000..0f5c2be7b
--- /dev/null
+++ b/Source/JavaScriptCore/bytecode/MethodOfGettingAValueProfile.h
@@ -0,0 +1,99 @@
+/*
+ * Copyright (C) 2012 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef MethodOfGettingAValueProfile_h
+#define MethodOfGettingAValueProfile_h
+
+#include <wtf/Platform.h>
+
+// This is guarded by ENABLE_DFG_JIT only because it uses some value profiles
+// that are currently only used if the DFG is enabled (i.e. they are not
+// available in the profile-only configuration). Hopefully someday all of
+// these #if's will disappear...
+#if ENABLE(DFG_JIT)
+
+#include "JSValue.h"
+
+namespace JSC {
+
+class CodeBlock;
+class LazyOperandValueProfileKey;
+struct ValueProfile;
+
+class MethodOfGettingAValueProfile {
+public:
+ MethodOfGettingAValueProfile()
+ : m_kind(None)
+ {
+ }
+
+ explicit MethodOfGettingAValueProfile(ValueProfile* profile)
+ {
+ if (profile) {
+ m_kind = Ready;
+ u.profile = profile;
+ } else
+ m_kind = None;
+ }
+
+ static MethodOfGettingAValueProfile fromLazyOperand(
+ CodeBlock*, const LazyOperandValueProfileKey&);
+
+ bool operator!() const { return m_kind == None; }
+
+ // This logically has a pointer to a "There exists X such that
+ // ValueProfileBase<X>". But since C++ does not have existential
+ // templates, I cannot return it. So instead, for any methods that
+ // users of this class would like to call, we'll just have to provide
+ // a method here that does it through an indirection. Or we could
+ // possibly just make ValueProfile less template-based. But last I
+ // tried that, it felt more yucky than this class.
+
+ EncodedJSValue* getSpecFailBucket(unsigned index) const;
+
+private:
+ enum Kind {
+ None,
+ Ready,
+ LazyOperand
+ };
+
+ Kind m_kind;
+ union {
+ ValueProfile* profile;
+ struct {
+ CodeBlock* codeBlock;
+ unsigned bytecodeOffset;
+ int operand;
+ } lazyOperand;
+ } u;
+};
+
+} // namespace JSC
+
+#endif // ENABLE(DFG_JIT)
+
+#endif // MethodOfGettingAValueProfile_h
+
diff --git a/Source/JavaScriptCore/bytecode/Opcode.cpp b/Source/JavaScriptCore/bytecode/Opcode.cpp
index 2490804cd..a27714026 100644
--- a/Source/JavaScriptCore/bytecode/Opcode.cpp
+++ b/Source/JavaScriptCore/bytecode/Opcode.cpp
@@ -39,16 +39,12 @@ using namespace std;
namespace JSC {
-#if !defined(NDEBUG) || ENABLE(OPCODE_SAMPLING) || ENABLE(CODEBLOCK_SAMPLING) || ENABLE(OPCODE_STATS)
-
const char* const opcodeNames[] = {
#define OPCODE_NAME_ENTRY(opcode, size) #opcode,
FOR_EACH_OPCODE_ID(OPCODE_NAME_ENTRY)
#undef OPCODE_NAME_ENTRY
};
-#endif
-
#if ENABLE(OPCODE_STATS)
long long OpcodeStats::opcodeCounts[numOpcodeIDs];
@@ -118,19 +114,19 @@ OpcodeStats::~OpcodeStats()
*(currentPairIndex++) = make_pair(i, j);
qsort(sortedPairIndices, numOpcodeIDs * numOpcodeIDs, sizeof(pair<int, int>), compareOpcodePairIndices);
- printf("\nExecuted opcode statistics\n");
+ dataLog("\nExecuted opcode statistics\n");
- printf("Total instructions executed: %lld\n\n", totalInstructions);
+ dataLog("Total instructions executed: %lld\n\n", totalInstructions);
- printf("All opcodes by frequency:\n\n");
+ dataLog("All opcodes by frequency:\n\n");
for (int i = 0; i < numOpcodeIDs; ++i) {
int index = sortedIndices[i];
- printf("%s:%s %lld - %.2f%%\n", opcodeNames[index], padOpcodeName((OpcodeID)index, 28), opcodeCounts[index], ((double) opcodeCounts[index]) / ((double) totalInstructions) * 100.0);
+ dataLog("%s:%s %lld - %.2f%%\n", opcodeNames[index], padOpcodeName((OpcodeID)index, 28), opcodeCounts[index], ((double) opcodeCounts[index]) / ((double) totalInstructions) * 100.0);
}
- printf("\n");
- printf("2-opcode sequences by frequency: %lld\n\n", totalInstructions);
+ dataLog("\n");
+ dataLog("2-opcode sequences by frequency: %lld\n\n", totalInstructions);
for (int i = 0; i < numOpcodeIDs * numOpcodeIDs; ++i) {
pair<int, int> indexPair = sortedPairIndices[i];
@@ -139,11 +135,11 @@ OpcodeStats::~OpcodeStats()
if (!count)
break;
- printf("%s%s %s:%s %lld %.2f%%\n", opcodeNames[indexPair.first], padOpcodeName((OpcodeID)indexPair.first, 28), opcodeNames[indexPair.second], padOpcodeName((OpcodeID)indexPair.second, 28), count, ((double) count) / ((double) totalInstructionPairs) * 100.0);
+ dataLog("%s%s %s:%s %lld %.2f%%\n", opcodeNames[indexPair.first], padOpcodeName((OpcodeID)indexPair.first, 28), opcodeNames[indexPair.second], padOpcodeName((OpcodeID)indexPair.second, 28), count, ((double) count) / ((double) totalInstructionPairs) * 100.0);
}
- printf("\n");
- printf("Most common opcodes and sequences:\n");
+ dataLog("\n");
+ dataLog("Most common opcodes and sequences:\n");
for (int i = 0; i < numOpcodeIDs; ++i) {
int index = sortedIndices[i];
@@ -151,7 +147,7 @@ OpcodeStats::~OpcodeStats()
double opcodeProportion = ((double) opcodeCount) / ((double) totalInstructions);
if (opcodeProportion < 0.0001)
break;
- printf("\n%s:%s %lld - %.2f%%\n", opcodeNames[index], padOpcodeName((OpcodeID)index, 28), opcodeCount, opcodeProportion * 100.0);
+ dataLog("\n%s:%s %lld - %.2f%%\n", opcodeNames[index], padOpcodeName((OpcodeID)index, 28), opcodeCount, opcodeProportion * 100.0);
for (int j = 0; j < numOpcodeIDs * numOpcodeIDs; ++j) {
pair<int, int> indexPair = sortedPairIndices[j];
@@ -164,11 +160,11 @@ OpcodeStats::~OpcodeStats()
if (indexPair.first != index && indexPair.second != index)
continue;
- printf(" %s%s %s:%s %lld - %.2f%%\n", opcodeNames[indexPair.first], padOpcodeName((OpcodeID)indexPair.first, 28), opcodeNames[indexPair.second], padOpcodeName((OpcodeID)indexPair.second, 28), pairCount, pairProportion * 100.0);
+ dataLog(" %s%s %s:%s %lld - %.2f%%\n", opcodeNames[indexPair.first], padOpcodeName((OpcodeID)indexPair.first, 28), opcodeNames[indexPair.second], padOpcodeName((OpcodeID)indexPair.second, 28), pairCount, pairProportion * 100.0);
}
}
- printf("\n");
+ dataLog("\n");
}
void OpcodeStats::recordInstruction(int opcode)
diff --git a/Source/JavaScriptCore/bytecode/Opcode.h b/Source/JavaScriptCore/bytecode/Opcode.h
index 57633a338..a47fa5e9b 100644
--- a/Source/JavaScriptCore/bytecode/Opcode.h
+++ b/Source/JavaScriptCore/bytecode/Opcode.h
@@ -123,6 +123,8 @@ namespace JSC {
macro(op_get_arguments_length, 4) \
macro(op_put_by_id, 9) \
macro(op_put_by_id_transition, 9) \
+ macro(op_put_by_id_transition_direct, 9) \
+ macro(op_put_by_id_transition_normal, 9) \
macro(op_put_by_id_replace, 9) \
macro(op_put_by_id_generic, 9) \
macro(op_del_by_id, 4) \
@@ -201,6 +203,7 @@ namespace JSC {
typedef enum { FOR_EACH_OPCODE_ID(OPCODE_ID_ENUM) } OpcodeID;
#undef OPCODE_ID_ENUM
+ const int maxOpcodeLength = 9;
const int numOpcodeIDs = op_end + 1;
#define OPCODE_ID_LENGTHS(id, length) const int id##_length = length;
@@ -217,7 +220,7 @@ namespace JSC {
FOR_EACH_OPCODE_ID(VERIFY_OPCODE_ID);
#undef VERIFY_OPCODE_ID
-#if ENABLE(COMPUTED_GOTO_INTERPRETER)
+#if ENABLE(COMPUTED_GOTO_CLASSIC_INTERPRETER) || ENABLE(LLINT)
#if COMPILER(RVCT) || COMPILER(INTEL)
typedef void* Opcode;
#else
@@ -227,8 +230,6 @@ namespace JSC {
typedef OpcodeID Opcode;
#endif
-#if !defined(NDEBUG) || ENABLE(OPCODE_SAMPLING) || ENABLE(CODEBLOCK_SAMPLING) || ENABLE(OPCODE_STATS)
-
#define PADDING_STRING " "
#define PADDING_STRING_LENGTH static_cast<unsigned>(strlen(PADDING_STRING))
@@ -244,8 +245,6 @@ namespace JSC {
#undef PADDING_STRING_LENGTH
#undef PADDING_STRING
-#endif
-
#if ENABLE(OPCODE_STATS)
struct OpcodeStats {
diff --git a/Source/JavaScriptCore/bytecode/PolymorphicPutByIdList.cpp b/Source/JavaScriptCore/bytecode/PolymorphicPutByIdList.cpp
new file mode 100644
index 000000000..170615b73
--- /dev/null
+++ b/Source/JavaScriptCore/bytecode/PolymorphicPutByIdList.cpp
@@ -0,0 +1,148 @@
+/*
+ * Copyright (C) 2012 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "config.h"
+#include "PolymorphicPutByIdList.h"
+
+#if ENABLE(JIT)
+
+#include "StructureStubInfo.h"
+
+namespace JSC {
+
+PutByIdAccess PutByIdAccess::fromStructureStubInfo(
+ StructureStubInfo& stubInfo,
+ MacroAssemblerCodePtr initialSlowPath)
+{
+ PutByIdAccess result;
+
+ switch (stubInfo.accessType) {
+ case access_put_by_id_replace:
+ result.m_type = Replace;
+ result.m_oldStructure.copyFrom(stubInfo.u.putByIdReplace.baseObjectStructure);
+ result.m_stubRoutine = MacroAssemblerCodeRef::createSelfManagedCodeRef(initialSlowPath);
+ break;
+
+ case access_put_by_id_transition_direct:
+ case access_put_by_id_transition_normal:
+ result.m_type = Transition;
+ result.m_oldStructure.copyFrom(stubInfo.u.putByIdTransition.previousStructure);
+ result.m_newStructure.copyFrom(stubInfo.u.putByIdTransition.structure);
+ result.m_chain.copyFrom(stubInfo.u.putByIdTransition.chain);
+ result.m_stubRoutine = stubInfo.stubRoutine;
+ break;
+
+ default:
+ ASSERT_NOT_REACHED();
+ }
+
+ return result;
+}
+
+bool PutByIdAccess::visitWeak() const
+{
+ switch (m_type) {
+ case Replace:
+ if (!Heap::isMarked(m_oldStructure.get()))
+ return false;
+ break;
+ case Transition:
+ if (!Heap::isMarked(m_oldStructure.get()))
+ return false;
+ if (!Heap::isMarked(m_newStructure.get()))
+ return false;
+ if (!Heap::isMarked(m_chain.get()))
+ return false;
+ break;
+ default:
+ ASSERT_NOT_REACHED();
+ return false;
+ }
+ return true;
+}
+
+PolymorphicPutByIdList::PolymorphicPutByIdList(
+ PutKind putKind,
+ StructureStubInfo& stubInfo,
+ MacroAssemblerCodePtr initialSlowPath)
+ : m_kind(putKind)
+{
+ m_list.append(PutByIdAccess::fromStructureStubInfo(stubInfo, initialSlowPath));
+}
+
+PolymorphicPutByIdList* PolymorphicPutByIdList::from(
+ PutKind putKind,
+ StructureStubInfo& stubInfo,
+ MacroAssemblerCodePtr initialSlowPath)
+{
+ if (stubInfo.accessType == access_put_by_id_list)
+ return stubInfo.u.putByIdList.list;
+
+ ASSERT(stubInfo.accessType == access_put_by_id_replace
+ || stubInfo.accessType == access_put_by_id_transition_normal
+ || stubInfo.accessType == access_put_by_id_transition_direct);
+
+ PolymorphicPutByIdList* result =
+ new PolymorphicPutByIdList(putKind, stubInfo, initialSlowPath);
+
+ stubInfo.initPutByIdList(result);
+
+ return result;
+}
+
+PolymorphicPutByIdList::~PolymorphicPutByIdList() { }
+
+bool PolymorphicPutByIdList::isFull() const
+{
+ ASSERT(size() <= POLYMORPHIC_LIST_CACHE_SIZE);
+ return size() == POLYMORPHIC_LIST_CACHE_SIZE;
+}
+
+bool PolymorphicPutByIdList::isAlmostFull() const
+{
+ ASSERT(size() <= POLYMORPHIC_LIST_CACHE_SIZE);
+ return size() >= POLYMORPHIC_LIST_CACHE_SIZE - 1;
+}
+
+void PolymorphicPutByIdList::addAccess(const PutByIdAccess& putByIdAccess)
+{
+ ASSERT(!isFull());
+ // Make sure that the resizing optimizes for space, not time.
+ m_list.resize(m_list.size() + 1);
+ m_list.last() = putByIdAccess;
+}
+
+bool PolymorphicPutByIdList::visitWeak() const
+{
+ for (unsigned i = 0; i < size(); ++i) {
+ if (!at(i).visitWeak())
+ return false;
+ }
+ return true;
+}
+
+} // namespace JSC
+
+#endif // ENABLE(JIT)
diff --git a/Source/JavaScriptCore/bytecode/PolymorphicPutByIdList.h b/Source/JavaScriptCore/bytecode/PolymorphicPutByIdList.h
new file mode 100644
index 000000000..60b632d52
--- /dev/null
+++ b/Source/JavaScriptCore/bytecode/PolymorphicPutByIdList.h
@@ -0,0 +1,190 @@
+/*
+ * Copyright (C) 2012 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef PolymorphicPutByIdList_h
+#define PolymorphicPutByIdList_h
+
+#include <wtf/Platform.h>
+
+#if ENABLE(JIT)
+
+#include "CodeOrigin.h"
+#include "MacroAssembler.h"
+#include "Opcode.h"
+#include "PutKind.h"
+#include "Structure.h"
+#include <wtf/Vector.h>
+
+namespace JSC {
+
+struct StructureStubInfo;
+
+class PutByIdAccess {
+public:
+ enum AccessType {
+ Invalid,
+ Transition,
+ Replace
+ };
+
+ PutByIdAccess()
+ : m_type(Invalid)
+ {
+ }
+
+ static PutByIdAccess transition(
+ JSGlobalData& globalData,
+ JSCell* owner,
+ Structure* oldStructure,
+ Structure* newStructure,
+ StructureChain* chain,
+ MacroAssemblerCodeRef stubRoutine)
+ {
+ PutByIdAccess result;
+ result.m_type = Transition;
+ result.m_oldStructure.set(globalData, owner, oldStructure);
+ result.m_newStructure.set(globalData, owner, newStructure);
+ result.m_chain.set(globalData, owner, chain);
+ result.m_stubRoutine = stubRoutine;
+ return result;
+ }
+
+ static PutByIdAccess replace(
+ JSGlobalData& globalData,
+ JSCell* owner,
+ Structure* structure,
+ MacroAssemblerCodeRef stubRoutine)
+ {
+ PutByIdAccess result;
+ result.m_type = Replace;
+ result.m_oldStructure.set(globalData, owner, structure);
+ result.m_stubRoutine = stubRoutine;
+ return result;
+ }
+
+ static PutByIdAccess fromStructureStubInfo(
+ StructureStubInfo&,
+ MacroAssemblerCodePtr initialSlowPath);
+
+ bool isSet() const { return m_type != Invalid; }
+ bool operator!() const { return !isSet(); }
+
+ AccessType type() const { return m_type; }
+
+ bool isTransition() const { return m_type == Transition; }
+ bool isReplace() const { return m_type == Replace; }
+
+ Structure* oldStructure() const
+ {
+ // Using this instead of isSet() to make this assertion robust against the possibility
+ // of additional access types being added.
+ ASSERT(isTransition() || isReplace());
+
+ return m_oldStructure.get();
+ }
+
+ Structure* structure() const
+ {
+ ASSERT(isReplace());
+ return m_oldStructure.get();
+ }
+
+ Structure* newStructure() const
+ {
+ ASSERT(isTransition());
+ return m_newStructure.get();
+ }
+
+ StructureChain* chain() const
+ {
+ ASSERT(isTransition());
+ return m_chain.get();
+ }
+
+ MacroAssemblerCodeRef stubRoutine() const
+ {
+ ASSERT(isTransition() || isReplace());
+ return m_stubRoutine;
+ }
+
+ bool visitWeak() const;
+
+private:
+ AccessType m_type;
+ WriteBarrier<Structure> m_oldStructure;
+ WriteBarrier<Structure> m_newStructure;
+ WriteBarrier<StructureChain> m_chain;
+ MacroAssemblerCodeRef m_stubRoutine;
+};
+
+class PolymorphicPutByIdList {
+ WTF_MAKE_FAST_ALLOCATED;
+public:
+ // Initialize from a stub info; this will place one element in the list and it will
+ // be created by converting the stub info's put by id access information into our
+ // PutByIdAccess.
+ PolymorphicPutByIdList(
+ PutKind,
+ StructureStubInfo&,
+ MacroAssemblerCodePtr initialSlowPath);
+
+ // Either creates a new polymorphic put list, or returns the one that is already
+ // in place.
+ static PolymorphicPutByIdList* from(
+ PutKind,
+ StructureStubInfo&,
+ MacroAssemblerCodePtr initialSlowPath);
+
+ ~PolymorphicPutByIdList();
+
+ MacroAssemblerCodePtr currentSlowPathTarget() const
+ {
+ return m_list.last().stubRoutine().code();
+ }
+
+ void addAccess(const PutByIdAccess&);
+
+ bool isEmpty() const { return m_list.isEmpty(); }
+ unsigned size() const { return m_list.size(); }
+ bool isFull() const;
+ bool isAlmostFull() const; // True if adding an element would make isFull() true.
+ const PutByIdAccess& at(unsigned i) const { return m_list[i]; }
+ const PutByIdAccess& operator[](unsigned i) const { return m_list[i]; }
+
+ PutKind kind() const { return m_kind; }
+
+ bool visitWeak() const;
+
+private:
+ Vector<PutByIdAccess, 2> m_list;
+ PutKind m_kind;
+};
+
+} // namespace JSC
+
+#endif // ENABLE(JIT)
+
+#endif // PolymorphicPutByIdList_h
+
diff --git a/Source/JavaScriptCore/bytecode/PredictedType.cpp b/Source/JavaScriptCore/bytecode/PredictedType.cpp
index a8118adf9..2b490c24e 100644
--- a/Source/JavaScriptCore/bytecode/PredictedType.cpp
+++ b/Source/JavaScriptCore/bytecode/PredictedType.cpp
@@ -29,6 +29,7 @@
#include "config.h"
#include "PredictedType.h"
+#include "JSArray.h"
#include "JSByteArray.h"
#include "JSFunction.h"
#include "ValueProfile.h"
diff --git a/Source/JavaScriptCore/bytecode/PutByIdStatus.cpp b/Source/JavaScriptCore/bytecode/PutByIdStatus.cpp
index 45a5e614c..209d4cd5e 100644
--- a/Source/JavaScriptCore/bytecode/PutByIdStatus.cpp
+++ b/Source/JavaScriptCore/bytecode/PutByIdStatus.cpp
@@ -27,27 +27,69 @@
#include "PutByIdStatus.h"
#include "CodeBlock.h"
+#include "LowLevelInterpreter.h"
#include "Structure.h"
#include "StructureChain.h"
namespace JSC {
+PutByIdStatus PutByIdStatus::computeFromLLInt(CodeBlock* profiledBlock, unsigned bytecodeIndex, Identifier& ident)
+{
+ UNUSED_PARAM(profiledBlock);
+ UNUSED_PARAM(bytecodeIndex);
+ UNUSED_PARAM(ident);
+#if ENABLE(LLINT)
+ Instruction* instruction = profiledBlock->instructions().begin() + bytecodeIndex;
+
+ Structure* structure = instruction[4].u.structure.get();
+ if (!structure)
+ return PutByIdStatus(NoInformation, 0, 0, 0, notFound);
+
+ if (instruction[0].u.opcode == llint_op_put_by_id) {
+ size_t offset = structure->get(*profiledBlock->globalData(), ident);
+ if (offset == notFound)
+ return PutByIdStatus(NoInformation, 0, 0, 0, notFound);
+
+ return PutByIdStatus(SimpleReplace, structure, 0, 0, offset);
+ }
+
+ ASSERT(instruction[0].u.opcode == llint_op_put_by_id_transition_direct
+ || instruction[0].u.opcode == llint_op_put_by_id_transition_normal);
+
+ Structure* newStructure = instruction[6].u.structure.get();
+ StructureChain* chain = instruction[7].u.structureChain.get();
+ ASSERT(newStructure);
+ ASSERT(chain);
+
+ size_t offset = newStructure->get(*profiledBlock->globalData(), ident);
+ if (offset == notFound)
+ return PutByIdStatus(NoInformation, 0, 0, 0, notFound);
+
+ return PutByIdStatus(SimpleTransition, structure, newStructure, chain, offset);
+#else
+ return PutByIdStatus(NoInformation, 0, 0, 0, notFound);
+#endif
+}
+
PutByIdStatus PutByIdStatus::computeFor(CodeBlock* profiledBlock, unsigned bytecodeIndex, Identifier& ident)
{
UNUSED_PARAM(profiledBlock);
UNUSED_PARAM(bytecodeIndex);
UNUSED_PARAM(ident);
#if ENABLE(JIT) && ENABLE(VALUE_PROFILER)
+ if (!profiledBlock->numberOfStructureStubInfos())
+ return computeFromLLInt(profiledBlock, bytecodeIndex, ident);
+
if (profiledBlock->likelyToTakeSlowCase(bytecodeIndex))
return PutByIdStatus(TakesSlowPath, 0, 0, 0, notFound);
StructureStubInfo& stubInfo = profiledBlock->getStubInfo(bytecodeIndex);
if (!stubInfo.seen)
- return PutByIdStatus(NoInformation, 0, 0, 0, notFound);
+ return computeFromLLInt(profiledBlock, bytecodeIndex, ident);
switch (stubInfo.accessType) {
case access_unset:
- return PutByIdStatus(NoInformation, 0, 0, 0, notFound);
+ return computeFromLLInt(profiledBlock, bytecodeIndex, ident);
case access_put_by_id_replace: {
size_t offset = stubInfo.u.putByIdReplace.baseObjectStructure->get(
diff --git a/Source/JavaScriptCore/bytecode/PutByIdStatus.h b/Source/JavaScriptCore/bytecode/PutByIdStatus.h
index b33f4d09c..a6d95a449 100644
--- a/Source/JavaScriptCore/bytecode/PutByIdStatus.h
+++ b/Source/JavaScriptCore/bytecode/PutByIdStatus.h
@@ -93,6 +93,8 @@ public:
size_t offset() const { return m_offset; }
private:
+ static PutByIdStatus computeFromLLInt(CodeBlock*, unsigned bytecodeIndex, Identifier&);
+
State m_state;
Structure* m_oldStructure;
Structure* m_newStructure;
diff --git a/Source/JavaScriptCore/heap/BumpBlock.h b/Source/JavaScriptCore/bytecode/PutKind.h
index b9f271ca8..7a1dd642e 100644
--- a/Source/JavaScriptCore/heap/BumpBlock.h
+++ b/Source/JavaScriptCore/bytecode/PutKind.h
@@ -1,5 +1,5 @@
/*
- * Copyright (C) 2011 Apple Inc. All rights reserved.
+ * Copyright (C) 2012 Apple Inc. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
@@ -23,31 +23,14 @@
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
-#ifndef BumpBlock_h
-#define BumpBlock_h
-
-#include "HeapBlock.h"
+#ifndef PutKind_h
+#define PutKind_h
namespace JSC {
-class BumpSpace;
-
-class BumpBlock : public HeapBlock {
- friend class BumpSpace;
-public:
- BumpBlock(PageAllocationAligned& allocation)
- : HeapBlock(allocation)
- , m_offset(m_payload)
- , m_isPinned(false)
- {
- }
-
-private:
- void* m_offset;
- uintptr_t m_isPinned;
- char m_payload[1];
-};
+enum PutKind { Direct, NotDirect };
} // namespace JSC
-#endif
+#endif // PutKind_h
+
diff --git a/Source/JavaScriptCore/bytecode/SamplingTool.cpp b/Source/JavaScriptCore/bytecode/SamplingTool.cpp
index 0dec25fb7..077f041f4 100644
--- a/Source/JavaScriptCore/bytecode/SamplingTool.cpp
+++ b/Source/JavaScriptCore/bytecode/SamplingTool.cpp
@@ -67,14 +67,14 @@ void SamplingFlags::stop()
total += s_flagCounts[i];
if (total) {
- printf("\nSamplingFlags: sample counts with flags set: (%lld total)\n", total);
+ dataLog("\nSamplingFlags: sample counts with flags set: (%lld total)\n", total);
for (unsigned i = 0; i <= 32; ++i) {
if (s_flagCounts[i])
- printf(" [ %02d ] : %lld\t\t(%03.2f%%)\n", i, s_flagCounts[i], (100.0 * s_flagCounts[i]) / total);
+ dataLog(" [ %02d ] : %lld\t\t(%03.2f%%)\n", i, s_flagCounts[i], (100.0 * s_flagCounts[i]) / total);
}
- printf("\n");
+ dataLog("\n");
} else
- printf("\nSamplingFlags: no samples.\n\n");
+ dataLog("\nSamplingFlags: no samples.\n\n");
}
uint64_t SamplingFlags::s_flagCounts[33];
@@ -151,7 +151,7 @@ void SamplingRegion::dump()
void SamplingRegion::dumpInternal()
{
if (!s_spectrum) {
- printf("\nSamplingRegion: was never sampled.\n\n");
+ dataLog("\nSamplingRegion: was never sampled.\n\n");
return;
}
@@ -161,10 +161,10 @@ void SamplingRegion::dumpInternal()
for (unsigned i = list.size(); i--;)
total += list[i].count;
- printf("\nSamplingRegion: sample counts for regions: (%lu samples)\n", total);
+ dataLog("\nSamplingRegion: sample counts for regions: (%lu samples)\n", total);
for (unsigned i = list.size(); i--;)
- printf(" %3.2lf%% %s\n", (100.0 * list[i].count) / total, list[i].key);
+ dataLog(" %3.2lf%% %s\n", (100.0 * list[i].count) / total, list[i].key);
}
#else // ENABLE(SAMPLING_REGIONS)
void SamplingRegion::dump() { }
@@ -210,7 +210,7 @@ bool SamplingThread::s_running = false;
unsigned SamplingThread::s_hertz = 10000;
ThreadIdentifier SamplingThread::s_samplingThread;
-void* SamplingThread::threadStartFunc(void*)
+void SamplingThread::threadStartFunc(void*)
{
while (s_running) {
sleepForMicroseconds(hertz2us(s_hertz));
@@ -225,8 +225,6 @@ void* SamplingThread::threadStartFunc(void*)
SamplingTool::sample();
#endif
}
-
- return 0;
}
@@ -243,7 +241,7 @@ void SamplingThread::stop()
{
ASSERT(s_running);
s_running = false;
- waitForThreadCompletion(s_samplingThread, 0);
+ waitForThreadCompletion(s_samplingThread);
}
@@ -373,10 +371,10 @@ void SamplingTool::dump(ExecState* exec)
// (2) Print Opcode sampling results.
- printf("\nBytecode samples [*]\n");
- printf(" sample %% of %% of | cti cti %%\n");
- printf("opcode count VM total | count of self\n");
- printf("------------------------------------------------------- | ----------------\n");
+ dataLog("\nBytecode samples [*]\n");
+ dataLog(" sample %% of %% of | cti cti %%\n");
+ dataLog("opcode count VM total | count of self\n");
+ dataLog("------------------------------------------------------- | ----------------\n");
for (int i = 0; i < numOpcodeIDs; ++i) {
long long count = opcodeSampleInfo[i].count;
@@ -391,18 +389,18 @@ void SamplingTool::dump(ExecState* exec)
double percentOfTotal = (static_cast<double>(count) * 100) / m_sampleCount;
long long countInCTIFunctions = opcodeSampleInfo[i].countInCTIFunctions;
double percentInCTIFunctions = (static_cast<double>(countInCTIFunctions) * 100) / count;
- fprintf(stdout, "%s:%s%-6lld %.3f%%\t%.3f%%\t | %-6lld %.3f%%\n", opcodeName, opcodePadding, count, percentOfVM, percentOfTotal, countInCTIFunctions, percentInCTIFunctions);
+ debugDebugPrintf("%s:%s%-6lld %.3f%%\t%.3f%%\t | %-6lld %.3f%%\n", opcodeName, opcodePadding, count, percentOfVM, percentOfTotal, countInCTIFunctions, percentInCTIFunctions);
}
- printf("\n[*] Samples inside host code are not charged to any Bytecode.\n\n");
- printf("\tSamples inside VM:\t\t%lld / %lld (%.3f%%)\n", m_opcodeSampleCount, m_sampleCount, (static_cast<double>(m_opcodeSampleCount) * 100) / m_sampleCount);
- printf("\tSamples inside host code:\t%lld / %lld (%.3f%%)\n\n", m_sampleCount - m_opcodeSampleCount, m_sampleCount, (static_cast<double>(m_sampleCount - m_opcodeSampleCount) * 100) / m_sampleCount);
- printf("\tsample count:\tsamples inside this opcode\n");
- printf("\t%% of VM:\tsample count / all opcode samples\n");
- printf("\t%% of total:\tsample count / all samples\n");
- printf("\t--------------\n");
- printf("\tcti count:\tsamples inside a CTI function called by this opcode\n");
- printf("\tcti %% of self:\tcti count / sample count\n");
+ dataLog("\n[*] Samples inside host code are not charged to any Bytecode.\n\n");
+ dataLog("\tSamples inside VM:\t\t%lld / %lld (%.3f%%)\n", m_opcodeSampleCount, m_sampleCount, (static_cast<double>(m_opcodeSampleCount) * 100) / m_sampleCount);
+ dataLog("\tSamples inside host code:\t%lld / %lld (%.3f%%)\n\n", m_sampleCount - m_opcodeSampleCount, m_sampleCount, (static_cast<double>(m_sampleCount - m_opcodeSampleCount) * 100) / m_sampleCount);
+ dataLog("\tsample count:\tsamples inside this opcode\n");
+ dataLog("\t%% of VM:\tsample count / all opcode samples\n");
+ dataLog("\t%% of total:\tsample count / all samples\n");
+ dataLog("\t--------------\n");
+ dataLog("\tcti count:\tsamples inside a CTI function called by this opcode\n");
+ dataLog("\tcti %% of self:\tcti count / sample count\n");
#if ENABLE(CODEBLOCK_SAMPLING)
@@ -418,7 +416,7 @@ void SamplingTool::dump(ExecState* exec)
// (4) Print data from 'codeBlockSamples' array.
- printf("\nCodeBlock samples\n\n");
+ dataLog("\nCodeBlock samples\n\n");
for (int i = 0; i < scopeCount; ++i) {
ScriptSampleRecord* record = codeBlockSamples[i];
@@ -428,21 +426,21 @@ void SamplingTool::dump(ExecState* exec)
if (blockPercent >= 1) {
//Instruction* code = codeBlock->instructions().begin();
- printf("#%d: %s:%d: %d / %lld (%.3f%%)\n", i + 1, record->m_executable->sourceURL().utf8().data(), codeBlock->lineNumberForBytecodeOffset(0), record->m_sampleCount, m_sampleCount, blockPercent);
+ dataLog("#%d: %s:%d: %d / %lld (%.3f%%)\n", i + 1, record->m_executable->sourceURL().utf8().data(), codeBlock->lineNumberForBytecodeOffset(0), record->m_sampleCount, m_sampleCount, blockPercent);
if (i < 10) {
HashMap<unsigned,unsigned> lineCounts;
codeBlock->dump(exec);
- printf(" Opcode and line number samples [*]\n\n");
+ dataLog(" Opcode and line number samples [*]\n\n");
for (unsigned op = 0; op < record->m_size; ++op) {
int count = record->m_samples[op];
if (count) {
- printf(" [% 4d] has sample count: % 4d\n", op, count);
+ dataLog(" [% 4d] has sample count: % 4d\n", op, count);
unsigned line = codeBlock->lineNumberForBytecodeOffset(op);
lineCounts.set(line, (lineCounts.contains(line) ? lineCounts.get(line) : 0) + count);
}
}
- printf("\n");
+ dataLog("\n");
int linesCount = lineCounts.size();
Vector<LineCountInfo> lineCountInfo(linesCount);
@@ -455,12 +453,12 @@ void SamplingTool::dump(ExecState* exec)
qsort(lineCountInfo.begin(), linesCount, sizeof(LineCountInfo), compareLineCountInfoSampling);
for (lineno = 0; lineno < linesCount; ++lineno) {
- printf(" Line #%d has sample count %d.\n", lineCountInfo[lineno].line, lineCountInfo[lineno].count);
+ dataLog(" Line #%d has sample count %d.\n", lineCountInfo[lineno].line, lineCountInfo[lineno].count);
}
- printf("\n");
- printf(" [*] Samples inside host code are charged to the calling Bytecode.\n");
- printf(" Samples on a call / return boundary are not charged to a specific opcode or line.\n\n");
- printf(" Samples on a call / return boundary: %d / %d (%.3f%%)\n\n", record->m_sampleCount - record->m_opcodeSampleCount, record->m_sampleCount, (static_cast<double>(record->m_sampleCount - record->m_opcodeSampleCount) * 100) / record->m_sampleCount);
+ dataLog("\n");
+ dataLog(" [*] Samples inside host code are charged to the calling Bytecode.\n");
+ dataLog(" Samples on a call / return boundary are not charged to a specific opcode or line.\n\n");
+ dataLog(" Samples on a call / return boundary: %d / %d (%.3f%%)\n\n", record->m_sampleCount - record->m_opcodeSampleCount, record->m_sampleCount, (static_cast<double>(record->m_sampleCount - record->m_opcodeSampleCount) * 100) / record->m_sampleCount);
}
}
}
diff --git a/Source/JavaScriptCore/bytecode/SamplingTool.h b/Source/JavaScriptCore/bytecode/SamplingTool.h
index 32a44ad69..28fd528d0 100644
--- a/Source/JavaScriptCore/bytecode/SamplingTool.h
+++ b/Source/JavaScriptCore/bytecode/SamplingTool.h
@@ -223,7 +223,7 @@ namespace JSC {
JS_EXPORT_PRIVATE static void start(unsigned hertz=10000);
JS_EXPORT_PRIVATE static void stop();
- static void* threadStartFunc(void*);
+ static void threadStartFunc(void*);
};
class SamplingTool {
diff --git a/Source/JavaScriptCore/bytecode/StructureStubInfo.cpp b/Source/JavaScriptCore/bytecode/StructureStubInfo.cpp
index ec18782d5..f2657b785 100644
--- a/Source/JavaScriptCore/bytecode/StructureStubInfo.cpp
+++ b/Source/JavaScriptCore/bytecode/StructureStubInfo.cpp
@@ -27,6 +27,7 @@
#include "StructureStubInfo.h"
#include "JSObject.h"
+#include "PolymorphicPutByIdList.h"
#include "ScopeChain.h"
namespace JSC {
@@ -45,6 +46,9 @@ void StructureStubInfo::deref()
delete polymorphicStructures;
return;
}
+ case access_put_by_id_list:
+ delete u.putByIdList.list;
+ return;
case access_get_by_id_self:
case access_get_by_id_proto:
case access_get_by_id_chain:
@@ -82,18 +86,14 @@ bool StructureStubInfo::visitWeakReferences()
break;
case access_get_by_id_self_list: {
PolymorphicAccessStructureList* polymorphicStructures = u.getByIdSelfList.structureList;
- if (!polymorphicStructures->visitWeak(u.getByIdSelfList.listSize)) {
- delete polymorphicStructures;
+ if (!polymorphicStructures->visitWeak(u.getByIdSelfList.listSize))
return false;
- }
break;
}
case access_get_by_id_proto_list: {
PolymorphicAccessStructureList* polymorphicStructures = u.getByIdProtoList.structureList;
- if (!polymorphicStructures->visitWeak(u.getByIdSelfList.listSize)) {
- delete polymorphicStructures;
+ if (!polymorphicStructures->visitWeak(u.getByIdSelfList.listSize))
return false;
- }
break;
}
case access_put_by_id_transition_normal:
@@ -107,6 +107,10 @@ bool StructureStubInfo::visitWeakReferences()
if (!Heap::isMarked(u.putByIdReplace.baseObjectStructure.get()))
return false;
break;
+ case access_put_by_id_list:
+ if (!u.putByIdList.list->visitWeak())
+ return false;
+ break;
default:
// The rest of the instructions don't require references, so there is no need to
// do anything.
diff --git a/Source/JavaScriptCore/bytecode/StructureStubInfo.h b/Source/JavaScriptCore/bytecode/StructureStubInfo.h
index d6b6092d0..8fad5c0cc 100644
--- a/Source/JavaScriptCore/bytecode/StructureStubInfo.h
+++ b/Source/JavaScriptCore/bytecode/StructureStubInfo.h
@@ -26,6 +26,8 @@
#ifndef StructureStubInfo_h
#define StructureStubInfo_h
+#include <wtf/Platform.h>
+
#if ENABLE(JIT)
#include "CodeOrigin.h"
@@ -36,6 +38,8 @@
namespace JSC {
+ class PolymorphicPutByIdList;
+
enum AccessType {
access_get_by_id_self,
access_get_by_id_proto,
@@ -45,6 +49,7 @@ namespace JSC {
access_put_by_id_transition_normal,
access_put_by_id_transition_direct,
access_put_by_id_replace,
+ access_put_by_id_list,
access_unset,
access_get_by_id_generic,
access_put_by_id_generic,
@@ -75,6 +80,7 @@ namespace JSC {
case access_put_by_id_transition_normal:
case access_put_by_id_transition_direct:
case access_put_by_id_replace:
+ case access_put_by_id_list:
case access_put_by_id_generic:
return true;
default:
@@ -149,10 +155,16 @@ namespace JSC {
u.putByIdReplace.baseObjectStructure.set(globalData, owner, baseObjectStructure);
}
+ void initPutByIdList(PolymorphicPutByIdList* list)
+ {
+ accessType = access_put_by_id_list;
+ u.putByIdList.list = list;
+ }
+
void reset()
{
accessType = access_unset;
-
+ deref();
stubRoutine = MacroAssemblerCodeRef();
}
@@ -227,6 +239,9 @@ namespace JSC {
struct {
WriteBarrierBase<Structure> baseObjectStructure;
} putByIdReplace;
+ struct {
+ PolymorphicPutByIdList* list;
+ } putByIdList;
} u;
MacroAssemblerCodeRef stubRoutine;
diff --git a/Source/JavaScriptCore/bytecode/ValueProfile.cpp b/Source/JavaScriptCore/bytecode/ValueProfile.cpp
deleted file mode 100644
index 2d7770aed..000000000
--- a/Source/JavaScriptCore/bytecode/ValueProfile.cpp
+++ /dev/null
@@ -1,52 +0,0 @@
-/*
- * Copyright (C) 2011 Apple Inc. All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- *
- * 1. Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * 2. Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in the
- * documentation and/or other materials provided with the distribution.
- * 3. Neither the name of Apple Computer, Inc. ("Apple") nor the names of
- * its contributors may be used to endorse or promote products derived
- * from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY APPLE AND ITS CONTRIBUTORS "AS IS" AND ANY
- * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
- * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
- * DISCLAIMED. IN NO EVENT SHALL APPLE OR ITS CONTRIBUTORS BE LIABLE FOR ANY
- * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
- * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
- * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
- * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
- * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- */
-
-#include "config.h"
-#include "ValueProfile.h"
-
-namespace JSC {
-
-#if ENABLE(VALUE_PROFILER)
-PredictedType ValueProfile::computeUpdatedPrediction()
-{
- for (unsigned i = 0; i < totalNumberOfBuckets; ++i) {
- JSValue value = JSValue::decode(m_buckets[i]);
- if (!value)
- continue;
-
- m_numberOfSamplesInPrediction++;
- mergePrediction(m_prediction, predictionFromValue(value));
-
- m_buckets[i] = JSValue::encode(JSValue());
- }
-
- return m_prediction;
-}
-#endif // ENABLE(VALUE_PROFILER)
-
-} // namespace JSC
diff --git a/Source/JavaScriptCore/bytecode/ValueProfile.h b/Source/JavaScriptCore/bytecode/ValueProfile.h
index 02a1d6bf9..73e363a8b 100644
--- a/Source/JavaScriptCore/bytecode/ValueProfile.h
+++ b/Source/JavaScriptCore/bytecode/ValueProfile.h
@@ -1,5 +1,5 @@
/*
- * Copyright (C) 2011 Apple Inc. All rights reserved.
+ * Copyright (C) 2011, 2012 Apple Inc. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
@@ -29,6 +29,10 @@
#ifndef ValueProfile_h
#define ValueProfile_h
+#include <wtf/Platform.h>
+
+#if ENABLE(VALUE_PROFILER)
+
#include "JSArray.h"
#include "PredictedType.h"
#include "Structure.h"
@@ -36,15 +40,14 @@
namespace JSC {
-#if ENABLE(VALUE_PROFILER)
-struct ValueProfile {
- static const unsigned logNumberOfBuckets = 0; // 1 bucket
- static const unsigned numberOfBuckets = 1 << logNumberOfBuckets;
+template<unsigned numberOfBucketsArgument>
+struct ValueProfileBase {
+ static const unsigned numberOfBuckets = numberOfBucketsArgument;
static const unsigned numberOfSpecFailBuckets = 1;
static const unsigned bucketIndexMask = numberOfBuckets - 1;
static const unsigned totalNumberOfBuckets = numberOfBuckets + numberOfSpecFailBuckets;
- ValueProfile()
+ ValueProfileBase()
: m_bytecodeOffset(-1)
, m_prediction(PredictNone)
, m_numberOfSamplesInPrediction(0)
@@ -53,7 +56,7 @@ struct ValueProfile {
m_buckets[i] = JSValue::encode(JSValue());
}
- ValueProfile(int bytecodeOffset)
+ ValueProfileBase(int bytecodeOffset)
: m_bytecodeOffset(bytecodeOffset)
, m_prediction(PredictNone)
, m_numberOfSamplesInPrediction(0)
@@ -103,7 +106,6 @@ struct ValueProfile {
return false;
}
-#ifndef NDEBUG
void dump(FILE* out)
{
fprintf(out,
@@ -123,10 +125,23 @@ struct ValueProfile {
}
}
}
-#endif
// Updates the prediction and returns the new one.
- PredictedType computeUpdatedPrediction();
+ PredictedType computeUpdatedPrediction()
+ {
+ for (unsigned i = 0; i < totalNumberOfBuckets; ++i) {
+ JSValue value = JSValue::decode(m_buckets[i]);
+ if (!value)
+ continue;
+
+ m_numberOfSamplesInPrediction++;
+ mergePrediction(m_prediction, predictionFromValue(value));
+
+ m_buckets[i] = JSValue::encode(JSValue());
+ }
+
+ return m_prediction;
+ }
int m_bytecodeOffset; // -1 for prologue
@@ -136,7 +151,32 @@ struct ValueProfile {
EncodedJSValue m_buckets[totalNumberOfBuckets];
};
-inline int getValueProfileBytecodeOffset(ValueProfile* valueProfile)
+struct MinimalValueProfile : public ValueProfileBase<0> {
+ MinimalValueProfile(): ValueProfileBase<0>() { }
+ MinimalValueProfile(int bytecodeOffset): ValueProfileBase<0>(bytecodeOffset) { }
+};
+
+template<unsigned logNumberOfBucketsArgument>
+struct ValueProfileWithLogNumberOfBuckets : public ValueProfileBase<1 << logNumberOfBucketsArgument> {
+ static const unsigned logNumberOfBuckets = logNumberOfBucketsArgument;
+
+ ValueProfileWithLogNumberOfBuckets()
+ : ValueProfileBase<1 << logNumberOfBucketsArgument>()
+ {
+ }
+ ValueProfileWithLogNumberOfBuckets(int bytecodeOffset)
+ : ValueProfileBase<1 << logNumberOfBucketsArgument>(bytecodeOffset)
+ {
+ }
+};
+
+struct ValueProfile : public ValueProfileWithLogNumberOfBuckets<0> {
+ ValueProfile(): ValueProfileWithLogNumberOfBuckets<0>() { }
+ ValueProfile(int bytecodeOffset): ValueProfileWithLogNumberOfBuckets<0>(bytecodeOffset) { }
+};
+
+template<typename T>
+inline int getValueProfileBytecodeOffset(T* valueProfile)
{
return valueProfile->m_bytecodeOffset;
}
@@ -158,9 +198,10 @@ inline int getRareCaseProfileBytecodeOffset(RareCaseProfile* rareCaseProfile)
{
return rareCaseProfile->m_bytecodeOffset;
}
-#endif
-}
+} // namespace JSC
+
+#endif // ENABLE(VALUE_PROFILER)
-#endif
+#endif // ValueProfile_h
diff --git a/Source/JavaScriptCore/bytecompiler/BytecodeGenerator.cpp b/Source/JavaScriptCore/bytecompiler/BytecodeGenerator.cpp
index 437dd53b0..6fa0ce96b 100644
--- a/Source/JavaScriptCore/bytecompiler/BytecodeGenerator.cpp
+++ b/Source/JavaScriptCore/bytecompiler/BytecodeGenerator.cpp
@@ -32,8 +32,10 @@
#include "BytecodeGenerator.h"
#include "BatchedTransitionOptimizer.h"
+#include "JSActivation.h"
#include "JSFunction.h"
#include "Interpreter.h"
+#include "LowLevelInterpreter.h"
#include "ScopeChain.h"
#include "StrongInlines.h"
#include "UString.h"
@@ -413,7 +415,7 @@ BytecodeGenerator::BytecodeGenerator(FunctionBodyNode* functionBody, ScopeChainN
if (!functionBody->captures(ident))
addVar(ident, varStack[i].second & DeclarationStacks::IsConstant);
}
-
+
if (m_shouldEmitDebugHooks)
codeBlock->m_numCapturedVars = codeBlock->m_numVars;
@@ -560,19 +562,6 @@ RegisterID* BytecodeGenerator::createLazyRegisterIfNecessary(RegisterID* reg)
return reg;
}
-bool BytecodeGenerator::isLocal(const Identifier& ident)
-{
- if (ident == propertyNames().thisIdentifier)
- return true;
-
- return shouldOptimizeLocals() && symbolTable().contains(ident.impl());
-}
-
-bool BytecodeGenerator::isLocalConstant(const Identifier& ident)
-{
- return symbolTable().get(ident.impl()).isReadOnly();
-}
-
RegisterID* BytecodeGenerator::newRegister()
{
m_calleeRegisters.append(m_calleeRegisters.size());
@@ -1174,15 +1163,8 @@ ResolveResult BytecodeGenerator::resolve(const Identifier& property)
}
// Cases where we cannot statically optimize the lookup.
- if (property == propertyNames().arguments || !canOptimizeNonLocals()) {
- if (shouldOptimizeLocals() && m_codeType == GlobalCode) {
- ScopeChainIterator iter = m_scopeChain->begin();
- JSObject* globalObject = iter->get();
- ASSERT((++iter) == m_scopeChain->end());
- return ResolveResult::globalResolve(globalObject);
- } else
- return ResolveResult::dynamicResolve(0);
- }
+ if (property == propertyNames().arguments || !canOptimizeNonLocals())
+ return ResolveResult::dynamicResolve(0);
ScopeChainIterator iter = m_scopeChain->begin();
ScopeChainIterator end = m_scopeChain->end();
@@ -1207,6 +1189,10 @@ ResolveResult BytecodeGenerator::resolve(const Identifier& property)
return ResolveResult::dynamicIndexedGlobalResolve(entry.getIndex(), depth, currentScope, flags);
return ResolveResult::indexedGlobalResolve(entry.getIndex(), currentScope, flags);
}
+#if !ASSERT_DISABLED
+ if (JSActivation* activation = jsDynamicCast<JSActivation*>(currentVariableObject))
+ ASSERT(activation->isValidScopedLookup(entry.getIndex()));
+#endif
return ResolveResult::lexicalResolve(entry.getIndex(), depth, flags);
}
bool scopeRequiresDynamicChecks = false;
@@ -1293,9 +1279,7 @@ RegisterID* BytecodeGenerator::emitResolve(RegisterID* dst, const ResolveResult&
#if ENABLE(JIT)
m_codeBlock->addGlobalResolveInfo(instructions().size());
#endif
-#if ENABLE(INTERPRETER)
m_codeBlock->addGlobalResolveInstruction(instructions().size());
-#endif
bool dynamic = resolveResult.isDynamic() && resolveResult.depth();
ValueProfile* profile = emitProfiledOpcode(dynamic ? op_resolve_global_dynamic : op_resolve_global);
instructions().append(dst->index());
@@ -1387,7 +1371,7 @@ RegisterID* BytecodeGenerator::emitResolveWithBase(RegisterID* baseDst, Register
#if ENABLE(JIT)
m_codeBlock->addGlobalResolveInfo(instructions().size());
#endif
-#if ENABLE(INTERPRETER)
+#if ENABLE(CLASSIC_INTERPRETER)
m_codeBlock->addGlobalResolveInstruction(instructions().size());
#endif
ValueProfile* profile = emitProfiledOpcode(op_resolve_global);
@@ -1399,9 +1383,6 @@ RegisterID* BytecodeGenerator::emitResolveWithBase(RegisterID* baseDst, Register
return baseDst;
}
-
-
-
ValueProfile* profile = emitProfiledOpcode(op_resolve_with_base);
instructions().append(baseDst->index());
instructions().append(propDst->index());
@@ -1509,9 +1490,7 @@ void BytecodeGenerator::emitMethodCheck()
RegisterID* BytecodeGenerator::emitGetById(RegisterID* dst, RegisterID* base, const Identifier& property)
{
-#if ENABLE(INTERPRETER)
m_codeBlock->addPropertyAccessInstruction(instructions().size());
-#endif
ValueProfile* profile = emitProfiledOpcode(op_get_by_id);
instructions().append(dst->index());
@@ -1537,9 +1516,7 @@ RegisterID* BytecodeGenerator::emitGetArgumentsLength(RegisterID* dst, RegisterI
RegisterID* BytecodeGenerator::emitPutById(RegisterID* base, const Identifier& property, RegisterID* value)
{
-#if ENABLE(INTERPRETER)
m_codeBlock->addPropertyAccessInstruction(instructions().size());
-#endif
emitOpcode(op_put_by_id);
instructions().append(base->index());
@@ -1555,9 +1532,7 @@ RegisterID* BytecodeGenerator::emitPutById(RegisterID* base, const Identifier& p
RegisterID* BytecodeGenerator::emitDirectPutById(RegisterID* base, const Identifier& property, RegisterID* value)
{
-#if ENABLE(INTERPRETER)
m_codeBlock->addPropertyAccessInstruction(instructions().size());
-#endif
emitOpcode(op_put_by_id);
instructions().append(base->index());
@@ -1838,7 +1813,11 @@ RegisterID* BytecodeGenerator::emitCall(OpcodeID opcodeID, RegisterID* dst, Regi
instructions().append(func->index()); // func
instructions().append(callArguments.argumentCountIncludingThis()); // argCount
instructions().append(callArguments.registerOffset()); // registerOffset
+#if ENABLE(LLINT)
+ instructions().append(m_codeBlock->addLLIntCallLinkInfo());
+#else
instructions().append(0);
+#endif
instructions().append(0);
if (dst != ignoredResult()) {
ValueProfile* profile = emitProfiledOpcode(op_call_put_result);
@@ -1942,7 +1921,11 @@ RegisterID* BytecodeGenerator::emitConstruct(RegisterID* dst, RegisterID* func,
instructions().append(func->index()); // func
instructions().append(callArguments.argumentCountIncludingThis()); // argCount
instructions().append(callArguments.registerOffset()); // registerOffset
+#if ENABLE(LLINT)
+ instructions().append(m_codeBlock->addLLIntCallLinkInfo());
+#else
instructions().append(0);
+#endif
instructions().append(0);
if (dst != ignoredResult()) {
ValueProfile* profile = emitProfiledOpcode(op_call_put_result);
@@ -2203,7 +2186,11 @@ RegisterID* BytecodeGenerator::emitCatch(RegisterID* targetRegister, Label* star
{
m_usesExceptions = true;
#if ENABLE(JIT)
+#if ENABLE(LLINT)
+ HandlerInfo info = { start->bind(0, 0), end->bind(0, 0), instructions().size(), m_dynamicScopeDepth + m_baseScopeDepth, CodeLocationLabel(MacroAssemblerCodePtr::createFromExecutableAddress(bitwise_cast<void*>(&llint_op_catch))) };
+#else
HandlerInfo info = { start->bind(0, 0), end->bind(0, 0), instructions().size(), m_dynamicScopeDepth + m_baseScopeDepth, CodeLocationLabel() };
+#endif
#else
HandlerInfo info = { start->bind(0, 0), end->bind(0, 0), instructions().size(), m_dynamicScopeDepth + m_baseScopeDepth };
#endif
diff --git a/Source/JavaScriptCore/bytecompiler/BytecodeGenerator.h b/Source/JavaScriptCore/bytecompiler/BytecodeGenerator.h
index c9ec5d852..d61b42b76 100644
--- a/Source/JavaScriptCore/bytecompiler/BytecodeGenerator.h
+++ b/Source/JavaScriptCore/bytecompiler/BytecodeGenerator.h
@@ -262,9 +262,6 @@ namespace JSC {
// Returns the register storing "this"
RegisterID* thisRegister() { return &m_thisRegister; }
- bool isLocal(const Identifier&);
- bool isLocalConstant(const Identifier&);
-
// Returns the next available temporary register. Registers returned by
// newTemporary require a modified form of reference counting: any
// register with a refcount of 0 is considered "available", meaning that
diff --git a/Source/JavaScriptCore/bytecompiler/NodesCodegen.cpp b/Source/JavaScriptCore/bytecompiler/NodesCodegen.cpp
index 46ec698de..2d4181912 100644
--- a/Source/JavaScriptCore/bytecompiler/NodesCodegen.cpp
+++ b/Source/JavaScriptCore/bytecompiler/NodesCodegen.cpp
@@ -142,7 +142,7 @@ RegisterID* ThisNode::emitBytecode(BytecodeGenerator& generator, RegisterID* dst
bool ResolveNode::isPure(BytecodeGenerator& generator) const
{
- return generator.isLocal(m_ident);
+ return generator.resolve(m_ident).isStatic();
}
RegisterID* ResolveNode::emitBytecode(BytecodeGenerator& generator, RegisterID* dst)
@@ -255,7 +255,7 @@ RegisterID* PropertyListNode::emitBytecode(BytecodeGenerator& generator, Registe
if (node->m_type == PropertyNode::Constant)
continue;
- GetterSetterPair pair(node, 0);
+ GetterSetterPair pair(node, static_cast<PropertyNode*>(0));
std::pair<GetterSetterMap::iterator, bool> result = map.add(node->name().impl(), pair);
if (!result.second)
result.first->second.second = node;
@@ -792,7 +792,7 @@ RegisterID* PrefixResolveNode::emitBytecode(BytecodeGenerator& generator, Regist
{
ResolveResult resolveResult = generator.resolve(m_ident);
if (RegisterID* local = resolveResult.local()) {
- if (generator.isLocalConstant(m_ident)) {
+ if (resolveResult.isReadOnly()) {
if (dst == generator.ignoredResult())
return 0;
RefPtr<RegisterID> r0 = generator.emitLoad(generator.finalDestination(dst), (m_operator == OpPlusPlus) ? 1.0 : -1.0);
diff --git a/Source/JavaScriptCore/dfg/DFGAbstractState.cpp b/Source/JavaScriptCore/dfg/DFGAbstractState.cpp
index 72c1759c7..ee0cc9ab7 100644
--- a/Source/JavaScriptCore/dfg/DFGAbstractState.cpp
+++ b/Source/JavaScriptCore/dfg/DFGAbstractState.cpp
@@ -48,10 +48,10 @@ namespace JSC { namespace DFG {
#define FLAG_FOR_MERGE_TO_SUCCESSORS 20
#define FLAG_FOR_STRUCTURE_CLOBBERING 21
-AbstractState::AbstractState(CodeBlock* codeBlock, Graph& graph)
- : m_codeBlock(codeBlock)
+AbstractState::AbstractState(Graph& graph)
+ : m_codeBlock(graph.m_codeBlock)
, m_graph(graph)
- , m_variables(codeBlock->numParameters(), graph.m_localVars)
+ , m_variables(m_codeBlock->numParameters(), graph.m_localVars)
, m_block(0)
{
size_t maxBlockSize = 0;
@@ -104,7 +104,16 @@ void AbstractState::initialize(Graph& graph)
BasicBlock* root = graph.m_blocks[0].get();
root->cfaShouldRevisit = true;
for (size_t i = 0; i < root->valuesAtHead.numberOfArguments(); ++i) {
- PredictedType prediction = graph[root->variablesAtHead.argument(i)].variableAccessData()->prediction();
+ Node& node = graph[root->variablesAtHead.argument(i)];
+ ASSERT(node.op == SetArgument);
+ if (!node.shouldGenerate()) {
+ // The argument is dead. We don't do any checks for such arguments, and so
+ // for the purpose of the analysis, they contain no value.
+ root->valuesAtHead.argument(i).clear();
+ continue;
+ }
+
+ PredictedType prediction = node.variableAccessData()->prediction();
if (isInt32Prediction(prediction))
root->valuesAtHead.argument(i).set(PredictInt32);
else if (isArrayPrediction(prediction))
@@ -153,14 +162,14 @@ bool AbstractState::endBasicBlock(MergeMode mergeMode)
if (mergeMode != DontMerge || !ASSERT_DISABLED) {
for (size_t argument = 0; argument < block->variablesAtTail.numberOfArguments(); ++argument) {
#if DFG_ENABLE(DEBUG_PROPAGATION_VERBOSE)
- printf(" Merging state for argument %zu.\n", argument);
+ dataLog(" Merging state for argument %zu.\n", argument);
#endif
changed |= mergeStateAtTail(block->valuesAtTail.argument(argument), m_variables.argument(argument), block->variablesAtTail.argument(argument));
}
for (size_t local = 0; local < block->variablesAtTail.numberOfLocals(); ++local) {
#if DFG_ENABLE(DEBUG_PROPAGATION_VERBOSE)
- printf(" Merging state for local %zu.\n", local);
+ dataLog(" Merging state for local %zu.\n", local);
#endif
changed |= mergeStateAtTail(block->valuesAtTail.local(local), m_variables.local(local), block->variablesAtTail.local(local));
}
@@ -196,10 +205,13 @@ bool AbstractState::execute(NodeIndex nodeIndex)
switch (node.op) {
case JSConstant:
case WeakJSConstant: {
- JSValue value = m_graph.valueOfJSConstant(m_codeBlock, nodeIndex);
- if (value.isCell())
- m_haveStructures = true;
- forNode(nodeIndex).set(value);
+ JSValue value = m_graph.valueOfJSConstant(nodeIndex);
+ // Have to be careful here! It's tempting to call set(value), but
+ // that would be wrong, since that would constitute a proof that this
+ // value will always have the same structure. The whole point of a value
+ // having a structure is that it may change in the future - for example
+ // between when we compile the code and when we run it.
+ forNode(nodeIndex).set(predictionFromValue(value));
break;
}
@@ -264,7 +276,7 @@ bool AbstractState::execute(NodeIndex nodeIndex)
case ValueAdd:
case ArithAdd: {
- if (m_graph.addShouldSpeculateInteger(node, m_codeBlock)) {
+ if (m_graph.addShouldSpeculateInteger(node)) {
forNode(node.child1()).filter(PredictInt32);
forNode(node.child2()).filter(PredictInt32);
forNode(nodeIndex).set(PredictInt32);
@@ -276,14 +288,18 @@ bool AbstractState::execute(NodeIndex nodeIndex)
forNode(nodeIndex).set(PredictDouble);
break;
}
- ASSERT(node.op == ValueAdd);
- clobberStructures(nodeIndex);
- forNode(nodeIndex).set(PredictString | PredictInt32 | PredictNumber);
+ if (node.op == ValueAdd) {
+ clobberStructures(nodeIndex);
+ forNode(nodeIndex).set(PredictString | PredictInt32 | PredictNumber);
+ break;
+ }
+ // We don't handle this yet. :-(
+ m_isValid = false;
break;
}
case ArithSub: {
- if (m_graph.addShouldSpeculateInteger(node, m_codeBlock)) {
+ if (m_graph.addShouldSpeculateInteger(node)) {
forNode(node.child1()).filter(PredictInt32);
forNode(node.child2()).filter(PredictInt32);
forNode(nodeIndex).set(PredictInt32);
@@ -937,7 +953,7 @@ inline bool AbstractState::mergeStateAtTail(AbstractValue& destination, Abstract
return false;
#if DFG_ENABLE(DEBUG_PROPAGATION_VERBOSE)
- printf(" It's live, node @%u.\n", nodeIndex);
+ dataLog(" It's live, node @%u.\n", nodeIndex);
#endif
switch (node.op) {
@@ -947,7 +963,7 @@ inline bool AbstractState::mergeStateAtTail(AbstractValue& destination, Abstract
// The block transfers the value from head to tail.
source = &inVariable;
#if DFG_ENABLE(DEBUG_PROPAGATION_VERBOSE)
- printf(" Transfering from head to tail.\n");
+ dataLog(" Transfering from head to tail.\n");
#endif
break;
@@ -955,7 +971,7 @@ inline bool AbstractState::mergeStateAtTail(AbstractValue& destination, Abstract
// The block refines the value with additional speculations.
source = &forNode(nodeIndex);
#if DFG_ENABLE(DEBUG_PROPAGATION_VERBOSE)
- printf(" Refining.\n");
+ dataLog(" Refining.\n");
#endif
break;
@@ -964,7 +980,7 @@ inline bool AbstractState::mergeStateAtTail(AbstractValue& destination, Abstract
// before and after setting it.
source = &forNode(node.child1());
#if DFG_ENABLE(DEBUG_PROPAGATION_VERBOSE)
- printf(" Setting.\n");
+ dataLog(" Setting.\n");
#endif
break;
@@ -978,7 +994,7 @@ inline bool AbstractState::mergeStateAtTail(AbstractValue& destination, Abstract
// Abstract execution did not change the output value of the variable, for this
// basic block, on this iteration.
#if DFG_ENABLE(DEBUG_PROPAGATION_VERBOSE)
- printf(" Not changed!\n");
+ dataLog(" Not changed!\n");
#endif
return false;
}
@@ -988,7 +1004,7 @@ inline bool AbstractState::mergeStateAtTail(AbstractValue& destination, Abstract
// true to indicate that the fixpoint must go on!
destination = *source;
#if DFG_ENABLE(DEBUG_PROPAGATION_VERBOSE)
- printf(" Changed!\n");
+ dataLog(" Changed!\n");
#endif
return true;
}
diff --git a/Source/JavaScriptCore/dfg/DFGAbstractState.h b/Source/JavaScriptCore/dfg/DFGAbstractState.h
index 015563485..256e7495f 100644
--- a/Source/JavaScriptCore/dfg/DFGAbstractState.h
+++ b/Source/JavaScriptCore/dfg/DFGAbstractState.h
@@ -92,7 +92,7 @@ public:
MergeToSuccessors
};
- AbstractState(CodeBlock*, Graph&);
+ AbstractState(Graph&);
~AbstractState();
diff --git a/Source/JavaScriptCore/dfg/DFGArithNodeFlagsInferencePhase.cpp b/Source/JavaScriptCore/dfg/DFGArithNodeFlagsInferencePhase.cpp
new file mode 100644
index 000000000..f55533a61
--- /dev/null
+++ b/Source/JavaScriptCore/dfg/DFGArithNodeFlagsInferencePhase.cpp
@@ -0,0 +1,230 @@
+/*
+ * Copyright (C) 2011 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "config.h"
+#include "DFGArithNodeFlagsInferencePhase.h"
+
+#if ENABLE(DFG_JIT)
+
+#include "DFGGraph.h"
+#include "DFGPhase.h"
+
+namespace JSC { namespace DFG {
+
+class ArithNodeFlagsInferencePhase : public Phase {
+public:
+ ArithNodeFlagsInferencePhase(Graph& graph)
+ : Phase(graph, "arithmetic node flags inference")
+ {
+ }
+
+ void run()
+ {
+#if DFG_ENABLE(DEBUG_PROPAGATION_VERBOSE)
+ m_count = 0;
+#endif
+ do {
+ m_changed = false;
+
+ // Up here we start with a backward pass because we suspect that to be
+ // more profitable.
+ propagateBackward();
+ if (!m_changed)
+ break;
+
+ m_changed = false;
+ propagateForward();
+ } while (m_changed);
+ }
+
+private:
+ bool isNotNegZero(NodeIndex nodeIndex)
+ {
+ if (!m_graph.isNumberConstant(nodeIndex))
+ return false;
+ double value = m_graph.valueOfNumberConstant(nodeIndex);
+ return !value && 1.0 / value < 0.0;
+ }
+
+ bool isNotZero(NodeIndex nodeIndex)
+ {
+ if (!m_graph.isNumberConstant(nodeIndex))
+ return false;
+ return !!m_graph.valueOfNumberConstant(nodeIndex);
+ }
+
+ void propagate(Node& node)
+ {
+ if (!node.shouldGenerate())
+ return;
+
+ NodeType op = node.op;
+ ArithNodeFlags flags = 0;
+
+ if (node.hasArithNodeFlags())
+ flags = node.rawArithNodeFlags();
+
+#if DFG_ENABLE(DEBUG_PROPAGATION_VERBOSE)
+ dataLog(" %s @%u: %s ", Graph::opName(op), m_compileIndex, arithNodeFlagsAsString(flags));
+#endif
+
+ flags &= NodeUsedAsMask;
+
+ bool changed = false;
+
+ switch (op) {
+ case ValueToInt32:
+ case BitAnd:
+ case BitOr:
+ case BitXor:
+ case BitLShift:
+ case BitRShift:
+ case BitURShift: {
+ // These operations are perfectly happy with truncated integers,
+ // so we don't want to propagate anything.
+ break;
+ }
+
+ case UInt32ToNumber: {
+ changed |= m_graph[node.child1()].mergeArithNodeFlags(flags);
+ break;
+ }
+
+ case ArithAdd:
+ case ValueAdd: {
+ if (isNotNegZero(node.child1().index()) || isNotNegZero(node.child2().index()))
+ flags &= ~NodeNeedsNegZero;
+
+ changed |= m_graph[node.child1()].mergeArithNodeFlags(flags);
+ changed |= m_graph[node.child2()].mergeArithNodeFlags(flags);
+ break;
+ }
+
+ case ArithSub: {
+ if (isNotZero(node.child1().index()) || isNotZero(node.child2().index()))
+ flags &= ~NodeNeedsNegZero;
+
+ changed |= m_graph[node.child1()].mergeArithNodeFlags(flags);
+ changed |= m_graph[node.child2()].mergeArithNodeFlags(flags);
+ break;
+ }
+
+ case ArithMul:
+ case ArithDiv: {
+ // As soon as a multiply happens, we can easily end up in the part
+ // of the double domain where the point at which you do truncation
+ // can change the outcome. So, ArithMul always checks for overflow
+ // no matter what, and always forces its inputs to check as well.
+
+ flags |= NodeUsedAsNumber | NodeNeedsNegZero;
+ changed |= m_graph[node.child1()].mergeArithNodeFlags(flags);
+ changed |= m_graph[node.child2()].mergeArithNodeFlags(flags);
+ break;
+ }
+
+ case ArithMin:
+ case ArithMax: {
+ flags |= NodeUsedAsNumber;
+ changed |= m_graph[node.child1()].mergeArithNodeFlags(flags);
+ changed |= m_graph[node.child2()].mergeArithNodeFlags(flags);
+ break;
+ }
+
+ case ArithAbs: {
+ flags &= ~NodeNeedsNegZero;
+ changed |= m_graph[node.child1()].mergeArithNodeFlags(flags);
+ break;
+ }
+
+ case PutByVal: {
+ changed |= m_graph[node.child1()].mergeArithNodeFlags(flags | NodeUsedAsNumber | NodeNeedsNegZero);
+ changed |= m_graph[node.child2()].mergeArithNodeFlags(flags | NodeUsedAsNumber);
+ changed |= m_graph[node.child3()].mergeArithNodeFlags(flags | NodeUsedAsNumber | NodeNeedsNegZero);
+ break;
+ }
+
+ case GetByVal: {
+ changed |= m_graph[node.child1()].mergeArithNodeFlags(flags | NodeUsedAsNumber | NodeNeedsNegZero);
+ changed |= m_graph[node.child2()].mergeArithNodeFlags(flags | NodeUsedAsNumber);
+ break;
+ }
+
+ default:
+ flags |= NodeUsedAsNumber | NodeNeedsNegZero;
+ if (op & NodeHasVarArgs) {
+ for (unsigned childIdx = node.firstChild(); childIdx < node.firstChild() + node.numChildren(); childIdx++)
+ changed |= m_graph[m_graph.m_varArgChildren[childIdx]].mergeArithNodeFlags(flags);
+ } else {
+ if (!node.child1())
+ break;
+ changed |= m_graph[node.child1()].mergeArithNodeFlags(flags);
+ if (!node.child2())
+ break;
+ changed |= m_graph[node.child2()].mergeArithNodeFlags(flags);
+ if (!node.child3())
+ break;
+ changed |= m_graph[node.child3()].mergeArithNodeFlags(flags);
+ }
+ break;
+ }
+
+#if DFG_ENABLE(DEBUG_PROPAGATION_VERBOSE)
+ dataLog("%s\n", changed ? "CHANGED" : "");
+#endif
+
+ m_changed |= changed;
+ }
+
+ void propagateForward()
+ {
+#if DFG_ENABLE(DEBUG_PROPAGATION_VERBOSE)
+ dataLog("Propagating arithmetic node flags forward [%u]\n", ++m_count);
+#endif
+ for (m_compileIndex = 0; m_compileIndex < m_graph.size(); ++m_compileIndex)
+ propagate(m_graph[m_compileIndex]);
+ }
+
+ void propagateBackward()
+ {
+#if DFG_ENABLE(DEBUG_PROPAGATION_VERBOSE)
+ dataLog("Propagating arithmetic node flags backward [%u]\n", ++m_count);
+#endif
+ for (m_compileIndex = m_graph.size(); m_compileIndex-- > 0;)
+ propagate(m_graph[m_compileIndex]);
+ }
+
+ NodeIndex m_compileIndex;
+ bool m_changed;
+};
+
+void performArithNodeFlagsInference(Graph& graph)
+{
+ runPhase<ArithNodeFlagsInferencePhase>(graph);
+}
+
+} } // namespace JSC::DFG
+
+#endif // ENABLE(DFG_JIT)
+
diff --git a/Source/JavaScriptCore/dfg/DFGArithNodeFlagsInferencePhase.h b/Source/JavaScriptCore/dfg/DFGArithNodeFlagsInferencePhase.h
new file mode 100644
index 000000000..64546e253
--- /dev/null
+++ b/Source/JavaScriptCore/dfg/DFGArithNodeFlagsInferencePhase.h
@@ -0,0 +1,51 @@
+/*
+ * Copyright (C) 2011 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef DFGArithNodeFlagsInferencePhase_h
+#define DFGArithNodeFlagsInferencePhase_h
+
+#include <wtf/Platform.h>
+
+#if ENABLE(DFG_JIT)
+
+namespace JSC { namespace DFG {
+
+class Graph;
+
+// Determine which arithmetic nodes' results are only used in a context that
+// truncates to integer anyway. This is great for optimizing away checks for
+// overflow and negative zero. NB the way this phase integrates into the rest
+// of the DFG makes it non-optional. Instead of proving that a node is only
+// used in integer context, it actually does the opposite: finds nodes that
+// are used in non-integer contexts. Hence failing to run this phase will make
+// the compiler assume that all nodes are just used as integers!
+
+void performArithNodeFlagsInference(Graph&);
+
+} } // namespace JSC::DFG::Phase
+
+#endif // ENABLE(DFG_JIT)
+
+#endif // DFGArithNodeFlagsInferencePhase_h
diff --git a/Source/JavaScriptCore/dfg/DFGAssemblyHelpers.h b/Source/JavaScriptCore/dfg/DFGAssemblyHelpers.h
index 0d7dd3a27..00a226d4c 100644
--- a/Source/JavaScriptCore/dfg/DFGAssemblyHelpers.h
+++ b/Source/JavaScriptCore/dfg/DFGAssemblyHelpers.h
@@ -287,16 +287,6 @@ public:
return codeOrigin.inlineCallFrame->callee->jsExecutable()->isStrictMode();
}
- static CodeBlock* baselineCodeBlockForOriginAndBaselineCodeBlock(const CodeOrigin& codeOrigin, CodeBlock* baselineCodeBlock)
- {
- if (codeOrigin.inlineCallFrame) {
- ExecutableBase* executable = codeOrigin.inlineCallFrame->executable.get();
- ASSERT(executable->structure()->classInfo() == &FunctionExecutable::s_info);
- return static_cast<FunctionExecutable*>(executable)->baselineCodeBlockFor(codeOrigin.inlineCallFrame->isCall ? CodeForCall : CodeForConstruct);
- }
- return baselineCodeBlock;
- }
-
CodeBlock* baselineCodeBlockFor(const CodeOrigin& codeOrigin)
{
return baselineCodeBlockForOriginAndBaselineCodeBlock(codeOrigin, baselineCodeBlock());
diff --git a/Source/JavaScriptCore/dfg/DFGByteCodeParser.cpp b/Source/JavaScriptCore/dfg/DFGByteCodeParser.cpp
index 2a5d249b3..0e575db4e 100644
--- a/Source/JavaScriptCore/dfg/DFGByteCodeParser.cpp
+++ b/Source/JavaScriptCore/dfg/DFGByteCodeParser.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (C) 2011 Apple Inc. All rights reserved.
+ * Copyright (C) 2011, 2012 Apple Inc. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
@@ -45,10 +45,10 @@ namespace JSC { namespace DFG {
// This class is used to compile the dataflow graph from a CodeBlock.
class ByteCodeParser {
public:
- ByteCodeParser(JSGlobalData* globalData, CodeBlock* codeBlock, CodeBlock* profiledBlock, Graph& graph)
- : m_globalData(globalData)
- , m_codeBlock(codeBlock)
- , m_profiledBlock(profiledBlock)
+ ByteCodeParser(Graph& graph)
+ : m_globalData(&graph.m_globalData)
+ , m_codeBlock(graph.m_codeBlock)
+ , m_profiledBlock(graph.m_profiledBlock)
, m_graph(graph)
, m_currentBlock(0)
, m_currentIndex(0)
@@ -57,10 +57,10 @@ public:
, m_constantNull(UINT_MAX)
, m_constantNaN(UINT_MAX)
, m_constant1(UINT_MAX)
- , m_constants(codeBlock->numberOfConstantRegisters())
- , m_numArguments(codeBlock->numParameters())
- , m_numLocals(codeBlock->m_numCalleeRegisters)
- , m_preservedVars(codeBlock->m_numVars)
+ , m_constants(m_codeBlock->numberOfConstantRegisters())
+ , m_numArguments(m_codeBlock->numParameters())
+ , m_numLocals(m_codeBlock->m_numCalleeRegisters)
+ , m_preservedVars(m_codeBlock->m_numVars)
, m_parameterSlots(0)
, m_numPassedVarArgs(0)
, m_globalResolveNumber(0)
@@ -69,7 +69,7 @@ public:
{
ASSERT(m_profiledBlock);
- for (int i = 0; i < codeBlock->m_numVars; ++i)
+ for (int i = 0; i < m_codeBlock->m_numVars; ++i)
m_preservedVars.set(i);
}
@@ -108,6 +108,8 @@ private:
};
template<PhiStackType stackType>
void processPhiStack();
+
+ void fixVariableAccessPredictions();
// Add spill locations to nodes.
void allocateVirtualRegisters();
@@ -155,6 +157,22 @@ private:
{
setDirect(m_inlineStackTop->remapOperand(operand), value);
}
+
+ NodeIndex injectLazyOperandPrediction(NodeIndex nodeIndex)
+ {
+ Node& node = m_graph[nodeIndex];
+ ASSERT(node.op == GetLocal);
+ ASSERT(node.codeOrigin.bytecodeIndex == m_currentIndex);
+ PredictedType prediction =
+ m_inlineStackTop->m_lazyOperands.prediction(
+ LazyOperandValueProfileKey(m_currentIndex, node.local()));
+#if DFG_ENABLE(DEBUG_VERBOSE)
+ dataLog("Lazy operand [@%u, bc#%u, r%d] prediction: %s\n",
+ nodeIndex, m_currentIndex, node.local(), predictionToString(prediction));
+#endif
+ node.variableAccessData()->predict(prediction);
+ return nodeIndex;
+ }
// Used in implementing get/set, above, where the operand is a local variable.
NodeIndex getLocal(unsigned operand)
@@ -170,12 +188,13 @@ private:
Node& flushChild = m_graph[nodePtr->child1()];
if (flushChild.op == Phi) {
VariableAccessData* variableAccessData = flushChild.variableAccessData();
- nodeIndex = addToGraph(GetLocal, OpInfo(variableAccessData), nodePtr->child1().index());
+ nodeIndex = injectLazyOperandPrediction(addToGraph(GetLocal, OpInfo(variableAccessData), nodePtr->child1().index()));
m_currentBlock->variablesAtTail.local(operand) = nodeIndex;
return nodeIndex;
}
nodePtr = &flushChild;
}
+ ASSERT(nodePtr->op != Flush);
if (nodePtr->op == GetLocal)
return nodeIndex;
ASSERT(nodePtr->op == SetLocal);
@@ -190,7 +209,7 @@ private:
NodeIndex phi = addToGraph(Phi, OpInfo(variableAccessData));
m_localPhiStack.append(PhiStackEntry(m_currentBlock, phi, operand));
- nodeIndex = addToGraph(GetLocal, OpInfo(variableAccessData), phi);
+ nodeIndex = injectLazyOperandPrediction(addToGraph(GetLocal, OpInfo(variableAccessData), phi));
m_currentBlock->variablesAtTail.local(operand) = nodeIndex;
m_currentBlock->variablesAtHead.setLocalFirstTime(operand, nodeIndex);
@@ -219,17 +238,20 @@ private:
Node& flushChild = m_graph[nodePtr->child1()];
if (flushChild.op == Phi) {
VariableAccessData* variableAccessData = flushChild.variableAccessData();
- nodeIndex = addToGraph(GetLocal, OpInfo(variableAccessData), nodePtr->child1().index());
+ nodeIndex = injectLazyOperandPrediction(addToGraph(GetLocal, OpInfo(variableAccessData), nodePtr->child1().index()));
m_currentBlock->variablesAtTail.local(operand) = nodeIndex;
return nodeIndex;
}
nodePtr = &flushChild;
}
+
+ ASSERT(nodePtr->op != Flush);
+
if (nodePtr->op == SetArgument) {
// We're getting an argument in the first basic block; link
// the GetLocal to the SetArgument.
ASSERT(nodePtr->local() == static_cast<VirtualRegister>(operand));
- nodeIndex = addToGraph(GetLocal, OpInfo(nodePtr->variableAccessData()), nodeIndex);
+ nodeIndex = injectLazyOperandPrediction(addToGraph(GetLocal, OpInfo(nodePtr->variableAccessData()), nodeIndex));
m_currentBlock->variablesAtTail.argument(argument) = nodeIndex;
return nodeIndex;
}
@@ -245,7 +267,7 @@ private:
NodeIndex phi = addToGraph(Phi, OpInfo(variableAccessData));
m_argumentPhiStack.append(PhiStackEntry(m_currentBlock, phi, argument));
- nodeIndex = addToGraph(GetLocal, OpInfo(variableAccessData), phi);
+ nodeIndex = injectLazyOperandPrediction(addToGraph(GetLocal, OpInfo(variableAccessData), phi));
m_currentBlock->variablesAtTail.argument(argument) = nodeIndex;
m_currentBlock->variablesAtHead.setArgumentFirstTime(argument, nodeIndex);
@@ -282,13 +304,10 @@ private:
if (nodeIndex != NoNode) {
Node& node = m_graph[nodeIndex];
- if (node.op == Flush || node.op == SetArgument) {
- // If a local has already been flushed, or if it's an argument in the
- // first basic block, then there is really no need to flush it. In fact
- // emitting a Flush instruction could just confuse things, since the
- // getArgument() code assumes that we never see a Flush of a SetArgument.
- return;
- }
+ if (node.op == Flush)
+ nodeIndex = node.child1().index();
+
+ ASSERT(m_graph[nodeIndex].op != Flush);
addToGraph(Flush, OpInfo(node.variableAccessData()), nodeIndex);
return;
@@ -585,11 +604,9 @@ private:
{
UNUSED_PARAM(nodeIndex);
- ValueProfile* profile = m_inlineStackTop->m_profiledBlock->valueProfileForBytecodeOffset(bytecodeIndex);
- ASSERT(profile);
- PredictedType prediction = profile->computeUpdatedPrediction();
+ PredictedType prediction = m_inlineStackTop->m_profiledBlock->valueProfilePredictionForBytecodeOffset(bytecodeIndex);
#if DFG_ENABLE(DEBUG_VERBOSE)
- printf("Dynamic [@%u, bc#%u] prediction: %s\n", nodeIndex, bytecodeIndex, predictionToString(prediction));
+ dataLog("Dynamic [@%u, bc#%u] prediction: %s\n", nodeIndex, bytecodeIndex, predictionToString(prediction));
#endif
return prediction;
@@ -626,7 +643,7 @@ private:
return nodeIndex;
#if DFG_ENABLE(DEBUG_VERBOSE)
- printf("Making %s @%u safe at bc#%u because slow-case counter is at %u and exit profiles say %d, %d\n", Graph::opName(m_graph[nodeIndex].op), nodeIndex, m_currentIndex, m_inlineStackTop->m_profiledBlock->rareCaseProfileForBytecodeOffset(m_currentIndex)->m_counter, m_inlineStackTop->m_exitProfile.hasExitSite(m_currentIndex, Overflow), m_inlineStackTop->m_exitProfile.hasExitSite(m_currentIndex, NegativeZero));
+ dataLog("Making %s @%u safe at bc#%u because slow-case counter is at %u and exit profiles say %d, %d\n", Graph::opName(m_graph[nodeIndex].op), nodeIndex, m_currentIndex, m_inlineStackTop->m_profiledBlock->rareCaseProfileForBytecodeOffset(m_currentIndex)->m_counter, m_inlineStackTop->m_exitProfile.hasExitSite(m_currentIndex, Overflow), m_inlineStackTop->m_exitProfile.hasExitSite(m_currentIndex, NegativeZero));
#endif
switch (m_graph[nodeIndex].op) {
@@ -642,13 +659,13 @@ private:
if (m_inlineStackTop->m_profiledBlock->likelyToTakeDeepestSlowCase(m_currentIndex)
|| m_inlineStackTop->m_exitProfile.hasExitSite(m_currentIndex, Overflow)) {
#if DFG_ENABLE(DEBUG_VERBOSE)
- printf("Making ArithMul @%u take deepest slow case.\n", nodeIndex);
+ dataLog("Making ArithMul @%u take deepest slow case.\n", nodeIndex);
#endif
m_graph[nodeIndex].mergeArithNodeFlags(NodeMayOverflow | NodeMayNegZero);
} else if (m_inlineStackTop->m_profiledBlock->likelyToTakeSlowCase(m_currentIndex)
|| m_inlineStackTop->m_exitProfile.hasExitSite(m_currentIndex, NegativeZero)) {
#if DFG_ENABLE(DEBUG_VERBOSE)
- printf("Making ArithMul @%u take faster slow case.\n", nodeIndex);
+ dataLog("Making ArithMul @%u take faster slow case.\n", nodeIndex);
#endif
m_graph[nodeIndex].mergeArithNodeFlags(NodeMayNegZero);
}
@@ -678,7 +695,7 @@ private:
return nodeIndex;
#if DFG_ENABLE(DEBUG_VERBOSE)
- printf("Making %s @%u safe at bc#%u because special fast-case counter is at %u and exit profiles say %d, %d\n", Graph::opName(m_graph[nodeIndex].op), nodeIndex, m_currentIndex, m_inlineStackTop->m_profiledBlock->specialFastCaseProfileForBytecodeOffset(m_currentIndex)->m_counter, m_inlineStackTop->m_exitProfile.hasExitSite(m_currentIndex, Overflow), m_inlineStackTop->m_exitProfile.hasExitSite(m_currentIndex, NegativeZero));
+ dataLog("Making %s @%u safe at bc#%u because special fast-case counter is at %u and exit profiles say %d, %d\n", Graph::opName(m_graph[nodeIndex].op), nodeIndex, m_currentIndex, m_inlineStackTop->m_profiledBlock->specialFastCaseProfileForBytecodeOffset(m_currentIndex)->m_counter, m_inlineStackTop->m_exitProfile.hasExitSite(m_currentIndex, Overflow), m_inlineStackTop->m_exitProfile.hasExitSite(m_currentIndex, NegativeZero));
#endif
// FIXME: It might be possible to make this more granular. The DFG certainly can
@@ -850,6 +867,11 @@ private:
VirtualRegister m_returnValue;
+ // Predictions about variable types collected from the profiled code block,
+ // which are based on OSR exit profiles that past DFG compilatins of this
+ // code block had gathered.
+ LazyOperandValueProfileParser m_lazyOperands;
+
// Did we see any returns? We need to handle the (uncommon but necessary)
// case where a procedure that does not return was inlined.
bool m_didReturn;
@@ -912,13 +934,13 @@ void ByteCodeParser::handleCall(Interpreter* interpreter, Instruction* currentIn
enum { ConstantFunction, LinkedFunction, UnknownFunction } callType;
#if DFG_ENABLE(DEBUG_VERBOSE)
- printf("Slow case count for call at @%zu bc#%u: %u/%u; exit profile: %d.\n", m_graph.size(), m_currentIndex, m_inlineStackTop->m_profiledBlock->rareCaseProfileForBytecodeOffset(m_currentIndex)->m_counter, m_inlineStackTop->m_profiledBlock->executionEntryCount(), m_inlineStackTop->m_exitProfile.hasExitSite(m_currentIndex, BadCache));
+ dataLog("Slow case count for call at @%zu bc#%u: %u/%u; exit profile: %d.\n", m_graph.size(), m_currentIndex, m_inlineStackTop->m_profiledBlock->rareCaseProfileForBytecodeOffset(m_currentIndex)->m_counter, m_inlineStackTop->m_profiledBlock->executionEntryCount(), m_inlineStackTop->m_exitProfile.hasExitSite(m_currentIndex, BadCache));
#endif
CallLinkStatus callLinkStatus = CallLinkStatus::computeFor(
m_inlineStackTop->m_profiledBlock, m_currentIndex);
- if (m_graph.isFunctionConstant(m_codeBlock, callTarget))
+ if (m_graph.isFunctionConstant(callTarget))
callType = ConstantFunction;
else if (callLinkStatus.isSet() && !callLinkStatus.couldTakeSlowPath()
&& !m_inlineStackTop->m_exitProfile.hasExitSite(m_currentIndex, BadCache))
@@ -946,7 +968,7 @@ void ByteCodeParser::handleCall(Interpreter* interpreter, Instruction* currentIn
Intrinsic intrinsic;
bool certainAboutExpectedFunction;
if (callType == ConstantFunction) {
- expectedFunction = m_graph.valueOfFunctionConstant(m_codeBlock, callTarget);
+ expectedFunction = m_graph.valueOfFunctionConstant(callTarget);
intrinsic = expectedFunction->executable()->intrinsicFor(kind);
certainAboutExpectedFunction = true;
} else {
@@ -1022,6 +1044,9 @@ bool ByteCodeParser::handleInlining(bool usesResult, int callTarget, NodeIndex c
// If we get here then it looks like we should definitely inline this code. Proceed
// with parsing the code to get bytecode, so that we can then parse the bytecode.
+ // Note that if LLInt is enabled, the bytecode will always be available. Also note
+ // that if LLInt is enabled, we may inline a code block that has never been JITted
+ // before!
CodeBlock* codeBlock = m_codeBlockCache.get(CodeBlockKey(executable, kind), expectedFunction->scope());
if (!codeBlock)
return false;
@@ -1029,7 +1054,7 @@ bool ByteCodeParser::handleInlining(bool usesResult, int callTarget, NodeIndex c
ASSERT(canInlineFunctionFor(codeBlock, kind));
#if DFG_ENABLE(DEBUG_VERBOSE)
- printf("Inlining executable %p.\n", executable);
+ dataLog("Inlining executable %p.\n", executable);
#endif
// Now we know without a doubt that we are committed to inlining. So begin the process
@@ -1110,7 +1135,7 @@ bool ByteCodeParser::handleInlining(bool usesResult, int callTarget, NodeIndex c
// caller. It doesn't need to be linked to, but it needs outgoing links.
if (!inlineStackEntry.m_unlinkedBlocks.isEmpty()) {
#if DFG_ENABLE(DEBUG_VERBOSE)
- printf("Reascribing bytecode index of block %p from bc#%u to bc#%u (inline return case).\n", lastBlock, lastBlock->bytecodeBegin, m_currentIndex);
+ dataLog("Reascribing bytecode index of block %p from bc#%u to bc#%u (inline return case).\n", lastBlock, lastBlock->bytecodeBegin, m_currentIndex);
#endif
// For debugging purposes, set the bytecodeBegin. Note that this doesn't matter
// for release builds because this block will never serve as a potential target
@@ -1122,7 +1147,7 @@ bool ByteCodeParser::handleInlining(bool usesResult, int callTarget, NodeIndex c
m_currentBlock = m_graph.m_blocks.last().get();
#if DFG_ENABLE(DEBUG_VERBOSE)
- printf("Done inlining executable %p, continuing code generation at epilogue.\n", executable);
+ dataLog("Done inlining executable %p, continuing code generation at epilogue.\n", executable);
#endif
return true;
}
@@ -1149,7 +1174,7 @@ bool ByteCodeParser::handleInlining(bool usesResult, int callTarget, NodeIndex c
// Need to create a new basic block for the continuation at the caller.
OwnPtr<BasicBlock> block = adoptPtr(new BasicBlock(nextOffset, m_graph.size(), m_numArguments, m_numLocals));
#if DFG_ENABLE(DEBUG_VERBOSE)
- printf("Creating inline epilogue basic block %p, #%zu for %p bc#%u at inline depth %u.\n", block.get(), m_graph.m_blocks.size(), m_inlineStackTop->executable(), m_currentIndex, CodeOrigin::inlineDepthForCallFrame(m_inlineStackTop->m_inlineCallFrame));
+ dataLog("Creating inline epilogue basic block %p, #%zu for %p bc#%u at inline depth %u.\n", block.get(), m_graph.m_blocks.size(), m_inlineStackTop->executable(), m_currentIndex, CodeOrigin::inlineDepthForCallFrame(m_inlineStackTop->m_inlineCallFrame));
#endif
m_currentBlock = block.get();
ASSERT(m_inlineStackTop->m_caller->m_blockLinkingTargets.isEmpty() || m_graph.m_blocks[m_inlineStackTop->m_caller->m_blockLinkingTargets.last()]->bytecodeBegin < nextOffset);
@@ -1161,7 +1186,7 @@ bool ByteCodeParser::handleInlining(bool usesResult, int callTarget, NodeIndex c
// At this point we return and continue to generate code for the caller, but
// in the new basic block.
#if DFG_ENABLE(DEBUG_VERBOSE)
- printf("Done inlining executable %p, continuing code generation in new block.\n", executable);
+ dataLog("Done inlining executable %p, continuing code generation in new block.\n", executable);
#endif
return true;
}
@@ -1343,7 +1368,7 @@ bool ByteCodeParser::parseBlock(unsigned limit)
addToGraph(Jump, OpInfo(m_currentIndex));
else {
#if DFG_ENABLE(DEBUG_VERBOSE)
- printf("Refusing to plant jump at limit %u because block %p is empty.\n", limit, m_currentBlock);
+ dataLog("Refusing to plant jump at limit %u because block %p is empty.\n", limit, m_currentBlock);
#endif
}
return shouldContinueParsing;
@@ -1722,7 +1747,7 @@ bool ByteCodeParser::parseBlock(unsigned limit)
m_inlineStackTop->m_profiledBlock, m_currentIndex);
if (methodCallStatus.isSet()
- && !getByIdStatus.isSet()
+ && !getByIdStatus.wasSeenInJIT()
&& !m_inlineStackTop->m_exitProfile.hasExitSite(m_currentIndex, BadCache)) {
// It's monomorphic as far as we can tell, since the method_check was linked
// but the slow path (i.e. the normal get_by_id) never fired.
@@ -1767,7 +1792,7 @@ bool ByteCodeParser::parseBlock(unsigned limit)
m_inlineStackTop->m_profiledBlock, m_currentIndex, identifier);
#if DFG_ENABLE(DEBUG_VERBOSE)
- printf("Slow case count for GetById @%zu bc#%u: %u; exit profile: %d\n", m_graph.size(), m_currentIndex, m_inlineStackTop->m_profiledBlock->rareCaseProfileForBytecodeOffset(m_currentIndex)->m_counter, m_inlineStackTop->m_exitProfile.hasExitSite(m_currentIndex, BadCache));
+ dataLog("Slow case count for GetById @%zu bc#%u: %u; exit profile: %d\n", m_graph.size(), m_currentIndex, m_inlineStackTop->m_profiledBlock->rareCaseProfileForBytecodeOffset(m_currentIndex)->m_counter, m_inlineStackTop->m_exitProfile.hasExitSite(m_currentIndex, BadCache));
#endif
if (getByIdStatus.isSimpleDirect()
@@ -1791,7 +1816,9 @@ bool ByteCodeParser::parseBlock(unsigned limit)
NEXT_OPCODE(op_get_by_id);
}
- case op_put_by_id: {
+ case op_put_by_id:
+ case op_put_by_id_transition_direct:
+ case op_put_by_id_transition_normal: {
NodeIndex value = get(currentInstruction[3].u.operand);
NodeIndex base = get(currentInstruction[1].u.operand);
unsigned identifierNumber = m_inlineStackTop->m_identifierRemap[currentInstruction[2].u.operand];
@@ -1807,7 +1834,7 @@ bool ByteCodeParser::parseBlock(unsigned limit)
bool hasExitSite = m_inlineStackTop->m_exitProfile.hasExitSite(m_currentIndex, BadCache);
#if DFG_ENABLE(DEBUG_VERBOSE)
- printf("Slow case count for PutById @%zu bc#%u: %u; exit profile: %d\n", m_graph.size(), m_currentIndex, m_inlineStackTop->m_profiledBlock->rareCaseProfileForBytecodeOffset(m_currentIndex)->m_counter, m_inlineStackTop->m_exitProfile.hasExitSite(m_currentIndex, BadCache));
+ dataLog("Slow case count for PutById @%zu bc#%u: %u; exit profile: %d\n", m_graph.size(), m_currentIndex, m_inlineStackTop->m_profiledBlock->rareCaseProfileForBytecodeOffset(m_currentIndex)->m_counter, m_inlineStackTop->m_exitProfile.hasExitSite(m_currentIndex, BadCache));
#endif
if (!hasExitSite && putByIdStatus.isSimpleReplace()) {
@@ -2186,12 +2213,12 @@ void ByteCodeParser::processPhiStack()
VariableAccessData* dataForPhi = m_graph[entry.m_phi].variableAccessData();
#if DFG_ENABLE(DEBUG_PROPAGATION_VERBOSE)
- printf(" Handling phi entry for var %u, phi @%u.\n", entry.m_varNo, entry.m_phi);
+ dataLog(" Handling phi entry for var %u, phi @%u.\n", entry.m_varNo, entry.m_phi);
#endif
for (size_t i = 0; i < predecessors.size(); ++i) {
#if DFG_ENABLE(DEBUG_PROPAGATION_VERBOSE)
- printf(" Dealing with predecessor block %u.\n", predecessors[i]);
+ dataLog(" Dealing with predecessor block %u.\n", predecessors[i]);
#endif
BasicBlock* predecessorBlock = m_graph.m_blocks[predecessors[i]].get();
@@ -2201,7 +2228,7 @@ void ByteCodeParser::processPhiStack()
NodeIndex valueInPredecessor = var;
if (valueInPredecessor == NoNode) {
#if DFG_ENABLE(DEBUG_PROPAGATION_VERBOSE)
- printf(" Did not find node, adding phi.\n");
+ dataLog(" Did not find node, adding phi.\n");
#endif
valueInPredecessor = addToGraph(Phi, OpInfo(newVariableAccessData(stackType == ArgumentPhiStack ? argumentToOperand(varNo) : static_cast<int>(varNo))));
@@ -2213,7 +2240,7 @@ void ByteCodeParser::processPhiStack()
phiStack.append(PhiStackEntry(predecessorBlock, valueInPredecessor, varNo));
} else if (m_graph[valueInPredecessor].op == GetLocal) {
#if DFG_ENABLE(DEBUG_PROPAGATION_VERBOSE)
- printf(" Found GetLocal @%u.\n", valueInPredecessor);
+ dataLog(" Found GetLocal @%u.\n", valueInPredecessor);
#endif
// We want to ensure that the VariableAccessDatas are identical between the
@@ -2225,7 +2252,7 @@ void ByteCodeParser::processPhiStack()
valueInPredecessor = m_graph[valueInPredecessor].child1().index();
} else {
#if DFG_ENABLE(DEBUG_PROPAGATION_VERBOSE)
- printf(" Found @%u.\n", valueInPredecessor);
+ dataLog(" Found @%u.\n", valueInPredecessor);
#endif
}
ASSERT(m_graph[valueInPredecessor].op == SetLocal || m_graph[valueInPredecessor].op == Phi || m_graph[valueInPredecessor].op == Flush || (m_graph[valueInPredecessor].op == SetArgument && stackType == ArgumentPhiStack));
@@ -2236,48 +2263,48 @@ void ByteCodeParser::processPhiStack()
Node* phiNode = &m_graph[entry.m_phi];
#if DFG_ENABLE(DEBUG_PROPAGATION_VERBOSE)
- printf(" Ref count of @%u = %u.\n", entry.m_phi, phiNode->refCount());
+ dataLog(" Ref count of @%u = %u.\n", entry.m_phi, phiNode->refCount());
#endif
if (phiNode->refCount()) {
#if DFG_ENABLE(DEBUG_PROPAGATION_VERBOSE)
- printf(" Reffing @%u.\n", valueInPredecessor);
+ dataLog(" Reffing @%u.\n", valueInPredecessor);
#endif
m_graph.ref(valueInPredecessor);
}
if (!phiNode->child1()) {
#if DFG_ENABLE(DEBUG_PROPAGATION_VERBOSE)
- printf(" Setting @%u->child1 = @%u.\n", entry.m_phi, valueInPredecessor);
+ dataLog(" Setting @%u->child1 = @%u.\n", entry.m_phi, valueInPredecessor);
#endif
phiNode->children.setChild1(NodeUse(valueInPredecessor));
#if DFG_ENABLE(DEBUG_PROPAGATION_VERBOSE)
- printf(" Children of @%u: ", entry.m_phi);
- phiNode->dumpChildren(stdout);
- printf(".\n");
+ dataLog(" Children of @%u: ", entry.m_phi);
+ phiNode->dumpChildren(WTF::dataFile());
+ dataLog(".\n");
#endif
continue;
}
if (!phiNode->child2()) {
#if DFG_ENABLE(DEBUG_PROPAGATION_VERBOSE)
- printf(" Setting @%u->child2 = @%u.\n", entry.m_phi, valueInPredecessor);
+ dataLog(" Setting @%u->child2 = @%u.\n", entry.m_phi, valueInPredecessor);
#endif
phiNode->children.setChild2(NodeUse(valueInPredecessor));
#if DFG_ENABLE(DEBUG_PROPAGATION_VERBOSE)
- printf(" Children of @%u: ", entry.m_phi);
- phiNode->dumpChildren(stdout);
- printf(".\n");
+ dataLog(" Children of @%u: ", entry.m_phi);
+ phiNode->dumpChildren(WTF::dataFile());
+ dataLog(".\n");
#endif
continue;
}
if (!phiNode->child3()) {
#if DFG_ENABLE(DEBUG_PROPAGATION_VERBOSE)
- printf(" Setting @%u->child3 = @%u.\n", entry.m_phi, valueInPredecessor);
+ dataLog(" Setting @%u->child3 = @%u.\n", entry.m_phi, valueInPredecessor);
#endif
phiNode->children.setChild3(NodeUse(valueInPredecessor));
#if DFG_ENABLE(DEBUG_PROPAGATION_VERBOSE)
- printf(" Children of @%u: ", entry.m_phi);
- phiNode->dumpChildren(stdout);
- printf(".\n");
+ dataLog(" Children of @%u: ", entry.m_phi);
+ phiNode->dumpChildren(WTF::dataFile());
+ dataLog(".\n");
#endif
continue;
}
@@ -2285,7 +2312,7 @@ void ByteCodeParser::processPhiStack()
NodeIndex newPhi = addToGraph(Phi, OpInfo(dataForPhi));
#if DFG_ENABLE(DEBUG_PROPAGATION_VERBOSE)
- printf(" Splitting @%u, created @%u.\n", entry.m_phi, newPhi);
+ dataLog(" Splitting @%u, created @%u.\n", entry.m_phi, newPhi);
#endif
phiNode = &m_graph[entry.m_phi]; // reload after vector resize
@@ -2296,22 +2323,30 @@ void ByteCodeParser::processPhiStack()
newPhiNode.children = phiNode->children;
#if DFG_ENABLE(DEBUG_PROPAGATION_VERBOSE)
- printf(" Children of @%u: ", newPhi);
- newPhiNode.dumpChildren(stdout);
- printf(".\n");
+ dataLog(" Children of @%u: ", newPhi);
+ newPhiNode.dumpChildren(WTF::dataFile());
+ dataLog(".\n");
#endif
phiNode->children.initialize(newPhi, valueInPredecessor, NoNode);
#if DFG_ENABLE(DEBUG_PROPAGATION_VERBOSE)
- printf(" Children of @%u: ", entry.m_phi);
- phiNode->dumpChildren(stdout);
- printf(".\n");
+ dataLog(" Children of @%u: ", entry.m_phi);
+ phiNode->dumpChildren(WTF::dataFile());
+ dataLog(".\n");
#endif
}
}
}
+void ByteCodeParser::fixVariableAccessPredictions()
+{
+ for (unsigned i = 0; i < m_graph.m_variableAccessData.size(); ++i) {
+ VariableAccessData* data = &m_graph.m_variableAccessData[i];
+ data->find()->predict(data->nonUnifiedPrediction());
+ }
+}
+
void ByteCodeParser::linkBlock(BasicBlock* block, Vector<BlockIndex>& possibleTargets)
{
ASSERT(block->end != NoNode);
@@ -2324,7 +2359,7 @@ void ByteCodeParser::linkBlock(BasicBlock* block, Vector<BlockIndex>& possibleTa
case Jump:
node.setTakenBlockIndex(m_graph.blockIndexForBytecodeOffset(possibleTargets, node.takenBytecodeOffsetDuringParsing()));
#if DFG_ENABLE(DEBUG_VERBOSE)
- printf("Linked basic block %p to %p, #%u.\n", block, m_graph.m_blocks[node.takenBlockIndex()].get(), node.takenBlockIndex());
+ dataLog("Linked basic block %p to %p, #%u.\n", block, m_graph.m_blocks[node.takenBlockIndex()].get(), node.takenBlockIndex());
#endif
break;
@@ -2332,13 +2367,13 @@ void ByteCodeParser::linkBlock(BasicBlock* block, Vector<BlockIndex>& possibleTa
node.setTakenBlockIndex(m_graph.blockIndexForBytecodeOffset(possibleTargets, node.takenBytecodeOffsetDuringParsing()));
node.setNotTakenBlockIndex(m_graph.blockIndexForBytecodeOffset(possibleTargets, node.notTakenBytecodeOffsetDuringParsing()));
#if DFG_ENABLE(DEBUG_VERBOSE)
- printf("Linked basic block %p to %p, #%u and %p, #%u.\n", block, m_graph.m_blocks[node.takenBlockIndex()].get(), node.takenBlockIndex(), m_graph.m_blocks[node.notTakenBlockIndex()].get(), node.notTakenBlockIndex());
+ dataLog("Linked basic block %p to %p, #%u and %p, #%u.\n", block, m_graph.m_blocks[node.takenBlockIndex()].get(), node.takenBlockIndex(), m_graph.m_blocks[node.notTakenBlockIndex()].get(), node.notTakenBlockIndex());
#endif
break;
default:
#if DFG_ENABLE(DEBUG_VERBOSE)
- printf("Marking basic block %p as linked.\n", block);
+ dataLog("Marking basic block %p as linked.\n", block);
#endif
break;
}
@@ -2414,6 +2449,7 @@ ByteCodeParser::InlineStackEntry::InlineStackEntry(ByteCodeParser* byteCodeParse
, m_exitProfile(profiledBlock->exitProfile())
, m_callsiteBlockHead(callsiteBlockHead)
, m_returnValue(returnValueVR)
+ , m_lazyOperands(profiledBlock->lazyOperandValueProfiles())
, m_didReturn(false)
, m_didEarlyReturn(false)
, m_caller(byteCodeParser->m_inlineStackTop)
@@ -2495,7 +2531,7 @@ void ByteCodeParser::parseCodeBlock()
// The maximum bytecode offset to go into the current basicblock is either the next jump target, or the end of the instructions.
unsigned limit = jumpTargetIndex < codeBlock->numberOfJumpTargets() ? codeBlock->jumpTarget(jumpTargetIndex) : codeBlock->instructions().size();
#if DFG_ENABLE(DEBUG_VERBOSE)
- printf("Parsing bytecode with limit %p bc#%u at inline depth %u.\n", m_inlineStackTop->executable(), limit, CodeOrigin::inlineDepthForCallFrame(m_inlineStackTop->m_inlineCallFrame));
+ dataLog("Parsing bytecode with limit %p bc#%u at inline depth %u.\n", m_inlineStackTop->executable(), limit, CodeOrigin::inlineDepthForCallFrame(m_inlineStackTop->m_inlineCallFrame));
#endif
ASSERT(m_currentIndex < limit);
@@ -2517,13 +2553,13 @@ void ByteCodeParser::parseCodeBlock()
// Change its bytecode begin and continue.
m_currentBlock = m_graph.m_blocks.last().get();
#if DFG_ENABLE(DEBUG_VERBOSE)
- printf("Reascribing bytecode index of block %p from bc#%u to bc#%u (peephole case).\n", m_currentBlock, m_currentBlock->bytecodeBegin, m_currentIndex);
+ dataLog("Reascribing bytecode index of block %p from bc#%u to bc#%u (peephole case).\n", m_currentBlock, m_currentBlock->bytecodeBegin, m_currentIndex);
#endif
m_currentBlock->bytecodeBegin = m_currentIndex;
} else {
OwnPtr<BasicBlock> block = adoptPtr(new BasicBlock(m_currentIndex, m_graph.size(), m_numArguments, m_numLocals));
#if DFG_ENABLE(DEBUG_VERBOSE)
- printf("Creating basic block %p, #%zu for %p bc#%u at inline depth %u.\n", block.get(), m_graph.m_blocks.size(), m_inlineStackTop->executable(), m_currentIndex, CodeOrigin::inlineDepthForCallFrame(m_inlineStackTop->m_inlineCallFrame));
+ dataLog("Creating basic block %p, #%zu for %p bc#%u at inline depth %u.\n", block.get(), m_graph.m_blocks.size(), m_inlineStackTop->executable(), m_currentIndex, CodeOrigin::inlineDepthForCallFrame(m_inlineStackTop->m_inlineCallFrame));
#endif
m_currentBlock = block.get();
ASSERT(m_inlineStackTop->m_unlinkedBlocks.isEmpty() || m_graph.m_blocks[m_inlineStackTop->m_unlinkedBlocks.last().m_blockIndex]->bytecodeBegin < m_currentIndex);
@@ -2571,17 +2607,19 @@ bool ByteCodeParser::parse()
linkBlocks(inlineStackEntry.m_unlinkedBlocks, inlineStackEntry.m_blockLinkingTargets);
determineReachability();
#if DFG_ENABLE(DEBUG_PROPAGATION_VERBOSE)
- printf("Processing local variable phis.\n");
+ dataLog("Processing local variable phis.\n");
#endif
m_currentProfilingIndex = m_currentIndex;
processPhiStack<LocalPhiStack>();
#if DFG_ENABLE(DEBUG_PROPAGATION_VERBOSE)
- printf("Processing argument phis.\n");
+ dataLog("Processing argument phis.\n");
#endif
processPhiStack<ArgumentPhiStack>();
+ fixVariableAccessPredictions();
+
m_graph.m_preservedVars = m_preservedVars;
m_graph.m_localVars = m_numLocals;
m_graph.m_parameterSlots = m_parameterSlots;
@@ -2589,15 +2627,13 @@ bool ByteCodeParser::parse()
return true;
}
-bool parse(Graph& graph, JSGlobalData* globalData, CodeBlock* codeBlock)
+bool parse(Graph& graph)
{
#if DFG_DEBUG_LOCAL_DISBALE
UNUSED_PARAM(graph);
- UNUSED_PARAM(globalData);
- UNUSED_PARAM(codeBlock);
return false;
#else
- return ByteCodeParser(globalData, codeBlock, codeBlock->alternative(), graph).parse();
+ return ByteCodeParser(graph).parse();
#endif
}
diff --git a/Source/JavaScriptCore/dfg/DFGByteCodeParser.h b/Source/JavaScriptCore/dfg/DFGByteCodeParser.h
index d4efe61db..558cf0167 100644
--- a/Source/JavaScriptCore/dfg/DFGByteCodeParser.h
+++ b/Source/JavaScriptCore/dfg/DFGByteCodeParser.h
@@ -39,7 +39,7 @@ namespace DFG {
// Populate the Graph with a basic block of code from the CodeBlock,
// starting at the provided bytecode index.
-bool parse(Graph&, JSGlobalData*, CodeBlock*);
+bool parse(Graph&);
} } // namespace JSC::DFG
diff --git a/Source/JavaScriptCore/dfg/DFGCFAPhase.cpp b/Source/JavaScriptCore/dfg/DFGCFAPhase.cpp
new file mode 100644
index 000000000..ac1e26c19
--- /dev/null
+++ b/Source/JavaScriptCore/dfg/DFGCFAPhase.cpp
@@ -0,0 +1,132 @@
+/*
+ * Copyright (C) 2011 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "config.h"
+#include "DFGCFAPhase.h"
+
+#if ENABLE(DFG_JIT)
+
+#include "DFGAbstractState.h"
+#include "DFGGraph.h"
+#include "DFGPhase.h"
+
+namespace JSC { namespace DFG {
+
+class CFAPhase : public Phase {
+public:
+ CFAPhase(Graph& graph)
+ : Phase(graph, "control flow analysis")
+ , m_state(graph)
+ {
+ }
+
+ void run()
+ {
+#if DFG_ENABLE(DEBUG_PROPAGATION_VERBOSE)
+ m_count = 0;
+#endif
+
+ // This implements a pseudo-worklist-based forward CFA, except that the visit order
+ // of blocks is the bytecode program order (which is nearly topological), and
+ // instead of a worklist we just walk all basic blocks checking if cfaShouldRevisit
+ // is set to true. This is likely to balance the efficiency properties of both
+ // worklist-based and forward fixpoint-based approaches. Like a worklist-based
+ // approach, it won't visit code if it's meaningless to do so (nothing changed at
+ // the head of the block or the predecessors have not been visited). Like a forward
+ // fixpoint-based approach, it has a high probability of only visiting a block
+ // after all predecessors have been visited. Only loops will cause this analysis to
+ // revisit blocks, and the amount of revisiting is proportional to loop depth.
+
+ AbstractState::initialize(m_graph);
+
+ do {
+ m_changed = false;
+ performForwardCFA();
+ } while (m_changed);
+ }
+
+private:
+ void performBlockCFA(BlockIndex blockIndex)
+ {
+ BasicBlock* block = m_graph.m_blocks[blockIndex].get();
+ if (!block->cfaShouldRevisit)
+ return;
+#if DFG_ENABLE(DEBUG_PROPAGATION_VERBOSE)
+ dataLog(" Block #%u (bc#%u):\n", blockIndex, block->bytecodeBegin);
+#endif
+ m_state.beginBasicBlock(block);
+#if DFG_ENABLE(DEBUG_PROPAGATION_VERBOSE)
+ dataLog(" head vars: ");
+ dumpOperands(block->valuesAtHead, WTF::dataFile());
+ dataLog("\n");
+#endif
+ for (NodeIndex nodeIndex = block->begin; nodeIndex < block->end; ++nodeIndex) {
+ if (!m_graph[nodeIndex].shouldGenerate())
+ continue;
+#if DFG_ENABLE(DEBUG_PROPAGATION_VERBOSE)
+ dataLog(" %s @%u: ", Graph::opName(m_graph[nodeIndex].op), nodeIndex);
+ m_state.dump(WTF::dataFile());
+ dataLog("\n");
+#endif
+ if (!m_state.execute(nodeIndex))
+ break;
+ }
+#if DFG_ENABLE(DEBUG_PROPAGATION_VERBOSE)
+ dataLog(" tail regs: ");
+ m_state.dump(WTF::dataFile());
+ dataLog("\n");
+#endif
+ m_changed |= m_state.endBasicBlock(AbstractState::MergeToSuccessors);
+#if DFG_ENABLE(DEBUG_PROPAGATION_VERBOSE)
+ dataLog(" tail vars: ");
+ dumpOperands(block->valuesAtTail, WTF::dataFile());
+ dataLog("\n");
+#endif
+ }
+
+ void performForwardCFA()
+ {
+#if DFG_ENABLE(DEBUG_PROPAGATION_VERBOSE)
+ dataLog("CFA [%u]\n", ++m_count);
+#endif
+
+ for (BlockIndex block = 0; block < m_graph.m_blocks.size(); ++block)
+ performBlockCFA(block);
+ }
+
+private:
+ AbstractState m_state;
+
+ bool m_changed;
+};
+
+void performCFA(Graph& graph)
+{
+ runPhase<CFAPhase>(graph);
+}
+
+} } // namespace JSC::DFG
+
+#endif // ENABLE(DFG_JIT)
diff --git a/Source/JavaScriptCore/dfg/DFGCFAPhase.h b/Source/JavaScriptCore/dfg/DFGCFAPhase.h
new file mode 100644
index 000000000..2b626c81f
--- /dev/null
+++ b/Source/JavaScriptCore/dfg/DFGCFAPhase.h
@@ -0,0 +1,49 @@
+/*
+ * Copyright (C) 2011 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef DFGCFAPhase_h
+#define DFGCFAPhase_h
+
+#include <wtf/Platform.h>
+
+#if ENABLE(DFG_JIT)
+
+namespace JSC { namespace DFG {
+
+class Graph;
+
+// Global control flow analysis. This phase transforms the combination of type
+// predictions and type guards into type proofs, and flows them globally within
+// the code block. It's also responsible for identifying dead code, and in the
+// future should be used as a hook for constant propagation.
+
+void performCFA(Graph&);
+
+} } // namespace JSC::DFG
+
+#endif // ENABLE(DFG_JIT)
+
+#endif // DFGCFAPhase_h
+
diff --git a/Source/JavaScriptCore/dfg/DFGCSEPhase.cpp b/Source/JavaScriptCore/dfg/DFGCSEPhase.cpp
new file mode 100644
index 000000000..a3c27ebc1
--- /dev/null
+++ b/Source/JavaScriptCore/dfg/DFGCSEPhase.cpp
@@ -0,0 +1,733 @@
+/*
+ * Copyright (C) 2011 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "config.h"
+#include "DFGCSEPhase.h"
+
+#if ENABLE(DFG_JIT)
+
+#include "DFGGraph.h"
+#include "DFGPhase.h"
+
+namespace JSC { namespace DFG {
+
+class CSEPhase : public Phase {
+public:
+ CSEPhase(Graph& graph)
+ : Phase(graph, "common subexpression elimination")
+ {
+ // Replacements are used to implement local common subexpression elimination.
+ m_replacements.resize(m_graph.size());
+
+ for (unsigned i = 0; i < m_graph.size(); ++i)
+ m_replacements[i] = NoNode;
+
+ for (unsigned i = 0; i < LastNodeId; ++i)
+ m_lastSeen[i] = NoNode;
+ }
+
+ void run()
+ {
+ for (unsigned block = 0; block < m_graph.m_blocks.size(); ++block)
+ performBlockCSE(*m_graph.m_blocks[block]);
+ }
+
+private:
+
+ NodeIndex canonicalize(NodeIndex nodeIndex)
+ {
+ if (nodeIndex == NoNode)
+ return NoNode;
+
+ if (m_graph[nodeIndex].op == ValueToInt32)
+ nodeIndex = m_graph[nodeIndex].child1().index();
+
+ return nodeIndex;
+ }
+ NodeIndex canonicalize(NodeUse nodeUse)
+ {
+ return canonicalize(nodeUse.indexUnchecked());
+ }
+
+ // Computes where the search for a candidate for CSE should start. Don't call
+ // this directly; call startIndex() instead as it does logging in debug mode.
+ NodeIndex computeStartIndexForChildren(NodeIndex child1 = NoNode, NodeIndex child2 = NoNode, NodeIndex child3 = NoNode)
+ {
+ const unsigned limit = 300;
+
+ NodeIndex start = m_start;
+ if (m_compileIndex - start > limit)
+ start = m_compileIndex - limit;
+
+ ASSERT(start >= m_start);
+
+ NodeIndex child = canonicalize(child1);
+ if (child == NoNode)
+ return start;
+
+ if (start < child)
+ start = child;
+
+ child = canonicalize(child2);
+ if (child == NoNode)
+ return start;
+
+ if (start < child)
+ start = child;
+
+ child = canonicalize(child3);
+ if (child == NoNode)
+ return start;
+
+ if (start < child)
+ start = child;
+
+ return start;
+ }
+
+ NodeIndex startIndexForChildren(NodeIndex child1 = NoNode, NodeIndex child2 = NoNode, NodeIndex child3 = NoNode)
+ {
+ NodeIndex result = computeStartIndexForChildren(child1, child2, child3);
+#if DFG_ENABLE(DEBUG_PROPAGATION_VERBOSE)
+ dataLog(" lookback %u: ", result);
+#endif
+ return result;
+ }
+
+ NodeIndex startIndex()
+ {
+ Node& node = m_graph[m_compileIndex];
+ return startIndexForChildren(
+ node.child1().indexUnchecked(),
+ node.child2().indexUnchecked(),
+ node.child3().indexUnchecked());
+ }
+
+ NodeIndex endIndexForPureCSE()
+ {
+ NodeIndex result = m_lastSeen[m_graph[m_compileIndex].op & NodeIdMask];
+ if (result == NoNode)
+ result = 0;
+ else
+ result++;
+ ASSERT(result <= m_compileIndex);
+#if DFG_ENABLE(DEBUG_PROPAGATION_VERBOSE)
+ dataLog(" limit %u: ", result);
+#endif
+ return result;
+ }
+
+ NodeIndex pureCSE(Node& node)
+ {
+ NodeIndex child1 = canonicalize(node.child1());
+ NodeIndex child2 = canonicalize(node.child2());
+ NodeIndex child3 = canonicalize(node.child3());
+
+ NodeIndex start = startIndex();
+ for (NodeIndex index = endIndexForPureCSE(); index-- > start;) {
+ Node& otherNode = m_graph[index];
+ if (node.op != otherNode.op)
+ continue;
+
+ if (node.arithNodeFlagsForCompare() != otherNode.arithNodeFlagsForCompare())
+ continue;
+
+ NodeIndex otherChild = canonicalize(otherNode.child1());
+ if (otherChild == NoNode)
+ return index;
+ if (otherChild != child1)
+ continue;
+
+ otherChild = canonicalize(otherNode.child2());
+ if (otherChild == NoNode)
+ return index;
+ if (otherChild != child2)
+ continue;
+
+ otherChild = canonicalize(otherNode.child3());
+ if (otherChild == NoNode)
+ return index;
+ if (otherChild != child3)
+ continue;
+
+ return index;
+ }
+ return NoNode;
+ }
+
+ bool isPredictedNumerical(Node& node)
+ {
+ PredictedType left = m_graph[node.child1()].prediction();
+ PredictedType right = m_graph[node.child2()].prediction();
+ return isNumberPrediction(left) && isNumberPrediction(right);
+ }
+
+ bool logicalNotIsPure(Node& node)
+ {
+ PredictedType prediction = m_graph[node.child1()].prediction();
+ return isBooleanPrediction(prediction) || !prediction;
+ }
+
+ bool byValIsPure(Node& node)
+ {
+ return m_graph[node.child2()].shouldSpeculateInteger()
+ && ((node.op == PutByVal || node.op == PutByValAlias)
+ ? isActionableMutableArrayPrediction(m_graph[node.child1()].prediction())
+ : isActionableArrayPrediction(m_graph[node.child1()].prediction()));
+ }
+
+ bool clobbersWorld(NodeIndex nodeIndex)
+ {
+ Node& node = m_graph[nodeIndex];
+ if (node.op & NodeClobbersWorld)
+ return true;
+ if (!(node.op & NodeMightClobber))
+ return false;
+ switch (node.op) {
+ case ValueAdd:
+ case CompareLess:
+ case CompareLessEq:
+ case CompareGreater:
+ case CompareGreaterEq:
+ case CompareEq:
+ return !isPredictedNumerical(node);
+ case LogicalNot:
+ return !logicalNotIsPure(node);
+ case GetByVal:
+ return !byValIsPure(node);
+ default:
+ ASSERT_NOT_REACHED();
+ return true; // If by some oddity we hit this case in release build it's safer to have CSE assume the worst.
+ }
+ }
+
+ NodeIndex impureCSE(Node& node)
+ {
+ NodeIndex child1 = canonicalize(node.child1());
+ NodeIndex child2 = canonicalize(node.child2());
+ NodeIndex child3 = canonicalize(node.child3());
+
+ NodeIndex start = startIndex();
+ for (NodeIndex index = m_compileIndex; index-- > start;) {
+ Node& otherNode = m_graph[index];
+ if (node.op == otherNode.op
+ && node.arithNodeFlagsForCompare() == otherNode.arithNodeFlagsForCompare()) {
+ NodeIndex otherChild = canonicalize(otherNode.child1());
+ if (otherChild == NoNode)
+ return index;
+ if (otherChild == child1) {
+ otherChild = canonicalize(otherNode.child2());
+ if (otherChild == NoNode)
+ return index;
+ if (otherChild == child2) {
+ otherChild = canonicalize(otherNode.child3());
+ if (otherChild == NoNode)
+ return index;
+ if (otherChild == child3)
+ return index;
+ }
+ }
+ }
+ if (clobbersWorld(index))
+ break;
+ }
+ return NoNode;
+ }
+
+ NodeIndex globalVarLoadElimination(unsigned varNumber, JSGlobalObject* globalObject)
+ {
+ NodeIndex start = startIndexForChildren();
+ for (NodeIndex index = m_compileIndex; index-- > start;) {
+ Node& node = m_graph[index];
+ switch (node.op) {
+ case GetGlobalVar:
+ if (node.varNumber() == varNumber && codeBlock()->globalObjectFor(node.codeOrigin) == globalObject)
+ return index;
+ break;
+ case PutGlobalVar:
+ if (node.varNumber() == varNumber && codeBlock()->globalObjectFor(node.codeOrigin) == globalObject)
+ return node.child1().index();
+ break;
+ default:
+ break;
+ }
+ if (clobbersWorld(index))
+ break;
+ }
+ return NoNode;
+ }
+
+ NodeIndex getByValLoadElimination(NodeIndex child1, NodeIndex child2)
+ {
+ NodeIndex start = startIndexForChildren(child1, child2);
+ for (NodeIndex index = m_compileIndex; index-- > start;) {
+ Node& node = m_graph[index];
+ switch (node.op) {
+ case GetByVal:
+ if (!byValIsPure(node))
+ return NoNode;
+ if (node.child1() == child1 && canonicalize(node.child2()) == canonicalize(child2))
+ return index;
+ break;
+ case PutByVal:
+ case PutByValAlias:
+ if (!byValIsPure(node))
+ return NoNode;
+ if (node.child1() == child1 && canonicalize(node.child2()) == canonicalize(child2))
+ return node.child3().index();
+ // We must assume that the PutByVal will clobber the location we're getting from.
+ // FIXME: We can do better; if we know that the PutByVal is accessing an array of a
+ // different type than the GetByVal, then we know that they won't clobber each other.
+ return NoNode;
+ case PutStructure:
+ case PutByOffset:
+ // GetByVal currently always speculates that it's accessing an
+ // array with an integer index, which means that it's impossible
+ // for a structure change or a put to property storage to affect
+ // the GetByVal.
+ break;
+ case ArrayPush:
+ // A push cannot affect previously existing elements in the array.
+ break;
+ default:
+ if (clobbersWorld(index))
+ return NoNode;
+ break;
+ }
+ }
+ return NoNode;
+ }
+
+ bool checkFunctionElimination(JSFunction* function, NodeIndex child1)
+ {
+ NodeIndex start = startIndexForChildren(child1);
+ for (NodeIndex index = endIndexForPureCSE(); index-- > start;) {
+ Node& node = m_graph[index];
+ if (node.op == CheckFunction && node.child1() == child1 && node.function() == function)
+ return true;
+ }
+ return false;
+ }
+
+ bool checkStructureLoadElimination(const StructureSet& structureSet, NodeIndex child1)
+ {
+ NodeIndex start = startIndexForChildren(child1);
+ for (NodeIndex index = m_compileIndex; index-- > start;) {
+ Node& node = m_graph[index];
+ switch (node.op) {
+ case CheckStructure:
+ if (node.child1() == child1
+ && structureSet.isSupersetOf(node.structureSet()))
+ return true;
+ break;
+
+ case PutStructure:
+ if (node.child1() == child1
+ && structureSet.contains(node.structureTransitionData().newStructure))
+ return true;
+ if (structureSet.contains(node.structureTransitionData().previousStructure))
+ return false;
+ break;
+
+ case PutByOffset:
+ // Setting a property cannot change the structure.
+ break;
+
+ case PutByVal:
+ case PutByValAlias:
+ if (byValIsPure(node)) {
+ // If PutByVal speculates that it's accessing an array with an
+ // integer index, then it's impossible for it to cause a structure
+ // change.
+ break;
+ }
+ return false;
+
+ default:
+ if (clobbersWorld(index))
+ return false;
+ break;
+ }
+ }
+ return false;
+ }
+
+ NodeIndex getByOffsetLoadElimination(unsigned identifierNumber, NodeIndex child1)
+ {
+ NodeIndex start = startIndexForChildren(child1);
+ for (NodeIndex index = m_compileIndex; index-- > start;) {
+ Node& node = m_graph[index];
+ switch (node.op) {
+ case GetByOffset:
+ if (node.child1() == child1
+ && m_graph.m_storageAccessData[node.storageAccessDataIndex()].identifierNumber == identifierNumber)
+ return index;
+ break;
+
+ case PutByOffset:
+ if (m_graph.m_storageAccessData[node.storageAccessDataIndex()].identifierNumber == identifierNumber) {
+ if (node.child2() == child1)
+ return node.child3().index();
+ return NoNode;
+ }
+ break;
+
+ case PutStructure:
+ // Changing the structure cannot change the outcome of a property get.
+ break;
+
+ case PutByVal:
+ case PutByValAlias:
+ if (byValIsPure(node)) {
+ // If PutByVal speculates that it's accessing an array with an
+ // integer index, then it's impossible for it to cause a structure
+ // change.
+ break;
+ }
+ return NoNode;
+
+ default:
+ if (clobbersWorld(index))
+ return NoNode;
+ break;
+ }
+ }
+ return NoNode;
+ }
+
+ NodeIndex getPropertyStorageLoadElimination(NodeIndex child1)
+ {
+ NodeIndex start = startIndexForChildren(child1);
+ for (NodeIndex index = m_compileIndex; index-- > start;) {
+ Node& node = m_graph[index];
+ switch (node.op) {
+ case GetPropertyStorage:
+ if (node.child1() == child1)
+ return index;
+ break;
+
+ case PutByOffset:
+ case PutStructure:
+ // Changing the structure or putting to the storage cannot
+ // change the property storage pointer.
+ break;
+
+ case PutByVal:
+ case PutByValAlias:
+ if (byValIsPure(node)) {
+ // If PutByVal speculates that it's accessing an array with an
+ // integer index, then it's impossible for it to cause a structure
+ // change.
+ break;
+ }
+ return NoNode;
+
+ default:
+ if (clobbersWorld(index))
+ return NoNode;
+ break;
+ }
+ }
+ return NoNode;
+ }
+
+ NodeIndex getIndexedPropertyStorageLoadElimination(NodeIndex child1, bool hasIntegerIndexPrediction)
+ {
+ NodeIndex start = startIndexForChildren(child1);
+ for (NodeIndex index = m_compileIndex; index-- > start;) {
+ Node& node = m_graph[index];
+ switch (node.op) {
+ case GetIndexedPropertyStorage: {
+ PredictedType basePrediction = m_graph[node.child2()].prediction();
+ bool nodeHasIntegerIndexPrediction = !(!(basePrediction & PredictInt32) && basePrediction);
+ if (node.child1() == child1 && hasIntegerIndexPrediction == nodeHasIntegerIndexPrediction)
+ return index;
+ break;
+ }
+
+ case PutByOffset:
+ case PutStructure:
+ // Changing the structure or putting to the storage cannot
+ // change the property storage pointer.
+ break;
+
+ case PutByValAlias:
+ // PutByValAlias can't change the indexed storage pointer
+ break;
+
+ case PutByVal:
+ if (isFixedIndexedStorageObjectPrediction(m_graph[node.child1()].prediction()) && byValIsPure(node))
+ break;
+ return NoNode;
+
+ default:
+ if (clobbersWorld(index))
+ return NoNode;
+ break;
+ }
+ }
+ return NoNode;
+ }
+
+ NodeIndex getScopeChainLoadElimination(unsigned depth)
+ {
+ NodeIndex start = startIndexForChildren();
+ for (NodeIndex index = endIndexForPureCSE(); index-- > start;) {
+ Node& node = m_graph[index];
+ if (node.op == GetScopeChain
+ && node.scopeChainDepth() == depth)
+ return index;
+ }
+ return NoNode;
+ }
+
+ void performSubstitution(NodeUse& child, bool addRef = true)
+ {
+ // Check if this operand is actually unused.
+ if (!child)
+ return;
+
+ // Check if there is any replacement.
+ NodeIndex replacement = m_replacements[child.index()];
+ if (replacement == NoNode)
+ return;
+
+ child.setIndex(replacement);
+
+ // There is definitely a replacement. Assert that the replacement does not
+ // have a replacement.
+ ASSERT(m_replacements[child.index()] == NoNode);
+
+ if (addRef)
+ m_graph[child].ref();
+ }
+
+ void setReplacement(NodeIndex replacement)
+ {
+ if (replacement == NoNode)
+ return;
+
+ // Be safe. Don't try to perform replacements if the predictions don't
+ // agree.
+ if (m_graph[m_compileIndex].prediction() != m_graph[replacement].prediction())
+ return;
+
+#if DFG_ENABLE(DEBUG_PROPAGATION_VERBOSE)
+ dataLog(" Replacing @%u -> @%u", m_compileIndex, replacement);
+#endif
+
+ Node& node = m_graph[m_compileIndex];
+ node.op = Phantom;
+ node.setRefCount(1);
+
+ // At this point we will eliminate all references to this node.
+ m_replacements[m_compileIndex] = replacement;
+ }
+
+ void eliminate()
+ {
+#if DFG_ENABLE(DEBUG_PROPAGATION_VERBOSE)
+ dataLog(" Eliminating @%u", m_compileIndex);
+#endif
+
+ Node& node = m_graph[m_compileIndex];
+ ASSERT(node.refCount() == 1);
+ ASSERT(node.mustGenerate());
+ node.op = Phantom;
+ }
+
+ void performNodeCSE(Node& node)
+ {
+ bool shouldGenerate = node.shouldGenerate();
+
+ if (node.op & NodeHasVarArgs) {
+ for (unsigned childIdx = node.firstChild(); childIdx < node.firstChild() + node.numChildren(); childIdx++)
+ performSubstitution(m_graph.m_varArgChildren[childIdx], shouldGenerate);
+ } else {
+ performSubstitution(node.children.child1(), shouldGenerate);
+ performSubstitution(node.children.child2(), shouldGenerate);
+ performSubstitution(node.children.child3(), shouldGenerate);
+ }
+
+ if (!shouldGenerate)
+ return;
+
+#if DFG_ENABLE(DEBUG_PROPAGATION_VERBOSE)
+ dataLog(" %s @%u: ", Graph::opName(m_graph[m_compileIndex].op), m_compileIndex);
+#endif
+
+ // NOTE: there are some nodes that we deliberately don't CSE even though we
+ // probably could, like StrCat and ToPrimitive. That's because there is no
+ // evidence that doing CSE on these nodes would result in a performance
+ // progression. Hence considering these nodes in CSE would just mean that this
+ // code does more work with no win. Of course, we may want to reconsider this,
+ // since StrCat is trivially CSE-able. It's not trivially doable for
+ // ToPrimitive, but we could change that with some speculations if we really
+ // needed to.
+
+ switch (node.op) {
+
+ // Handle the pure nodes. These nodes never have any side-effects.
+ case BitAnd:
+ case BitOr:
+ case BitXor:
+ case BitRShift:
+ case BitLShift:
+ case BitURShift:
+ case ArithAdd:
+ case ArithSub:
+ case ArithMul:
+ case ArithMod:
+ case ArithDiv:
+ case ArithAbs:
+ case ArithMin:
+ case ArithMax:
+ case ArithSqrt:
+ case GetByteArrayLength:
+ case GetInt8ArrayLength:
+ case GetInt16ArrayLength:
+ case GetInt32ArrayLength:
+ case GetUint8ArrayLength:
+ case GetUint8ClampedArrayLength:
+ case GetUint16ArrayLength:
+ case GetUint32ArrayLength:
+ case GetFloat32ArrayLength:
+ case GetFloat64ArrayLength:
+ case GetCallee:
+ case GetStringLength:
+ case StringCharAt:
+ case StringCharCodeAt:
+ setReplacement(pureCSE(node));
+ break;
+
+ case GetArrayLength:
+ setReplacement(impureCSE(node));
+ break;
+
+ case GetScopeChain:
+ setReplacement(getScopeChainLoadElimination(node.scopeChainDepth()));
+ break;
+
+ // Handle nodes that are conditionally pure: these are pure, and can
+ // be CSE'd, so long as the prediction is the one we want.
+ case ValueAdd:
+ case CompareLess:
+ case CompareLessEq:
+ case CompareGreater:
+ case CompareGreaterEq:
+ case CompareEq: {
+ if (isPredictedNumerical(node)) {
+ NodeIndex replacementIndex = pureCSE(node);
+ if (replacementIndex != NoNode && isPredictedNumerical(m_graph[replacementIndex]))
+ setReplacement(replacementIndex);
+ }
+ break;
+ }
+
+ case LogicalNot: {
+ if (logicalNotIsPure(node)) {
+ NodeIndex replacementIndex = pureCSE(node);
+ if (replacementIndex != NoNode && logicalNotIsPure(m_graph[replacementIndex]))
+ setReplacement(replacementIndex);
+ }
+ break;
+ }
+
+ // Finally handle heap accesses. These are not quite pure, but we can still
+ // optimize them provided that some subtle conditions are met.
+ case GetGlobalVar:
+ setReplacement(globalVarLoadElimination(node.varNumber(), codeBlock()->globalObjectFor(node.codeOrigin)));
+ break;
+
+ case GetByVal:
+ if (byValIsPure(node))
+ setReplacement(getByValLoadElimination(node.child1().index(), node.child2().index()));
+ break;
+
+ case PutByVal:
+ if (byValIsPure(node) && getByValLoadElimination(node.child1().index(), node.child2().index()) != NoNode)
+ node.op = PutByValAlias;
+ break;
+
+ case CheckStructure:
+ if (checkStructureLoadElimination(node.structureSet(), node.child1().index()))
+ eliminate();
+ break;
+
+ case CheckFunction:
+ if (checkFunctionElimination(node.function(), node.child1().index()))
+ eliminate();
+ break;
+
+ case GetIndexedPropertyStorage: {
+ PredictedType basePrediction = m_graph[node.child2()].prediction();
+ bool nodeHasIntegerIndexPrediction = !(!(basePrediction & PredictInt32) && basePrediction);
+ setReplacement(getIndexedPropertyStorageLoadElimination(node.child1().index(), nodeHasIntegerIndexPrediction));
+ break;
+ }
+
+ case GetPropertyStorage:
+ setReplacement(getPropertyStorageLoadElimination(node.child1().index()));
+ break;
+
+ case GetByOffset:
+ setReplacement(getByOffsetLoadElimination(m_graph.m_storageAccessData[node.storageAccessDataIndex()].identifierNumber, node.child1().index()));
+ break;
+
+ default:
+ // do nothing.
+ break;
+ }
+
+ m_lastSeen[node.op & NodeIdMask] = m_compileIndex;
+#if DFG_ENABLE(DEBUG_PROPAGATION_VERBOSE)
+ dataLog("\n");
+#endif
+ }
+
+ void performBlockCSE(BasicBlock& block)
+ {
+ m_start = block.begin;
+ NodeIndex end = block.end;
+ for (m_compileIndex = m_start; m_compileIndex < end; ++m_compileIndex)
+ performNodeCSE(m_graph[m_compileIndex]);
+ }
+
+ NodeIndex m_start;
+ NodeIndex m_compileIndex;
+ Vector<NodeIndex, 16> m_replacements;
+ FixedArray<NodeIndex, LastNodeId> m_lastSeen;
+};
+
+void performCSE(Graph& graph)
+{
+ runPhase<CSEPhase>(graph);
+}
+
+} } // namespace JSC::DFG
+
+#endif // ENABLE(DFG_JIT)
+
+
diff --git a/Source/JavaScriptCore/dfg/DFGCSEPhase.h b/Source/JavaScriptCore/dfg/DFGCSEPhase.h
new file mode 100644
index 000000000..3f13f102b
--- /dev/null
+++ b/Source/JavaScriptCore/dfg/DFGCSEPhase.h
@@ -0,0 +1,49 @@
+/*
+ * Copyright (C) 2011 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef DFGCSEPhase_h
+#define DFGCSEPhase_h
+
+#include <wtf/Platform.h>
+
+#if ENABLE(DFG_JIT)
+
+namespace JSC { namespace DFG {
+
+class Graph;
+
+// Block-local common subexpression elimination. This is an optional phase, but
+// it is rather profitable. It has fairly accurate heap modeling and will match
+// a wide range of subexpression similarities. It's known to produce big wins
+// on a few benchmarks, and is relatively cheap to run.
+
+void performCSE(Graph&);
+
+} } // namespace JSC::DFG
+
+#endif // ENABLE(DFG_JIT)
+
+#endif // DFGCSEPhase_h
+
diff --git a/Source/JavaScriptCore/dfg/DFGCapabilities.h b/Source/JavaScriptCore/dfg/DFGCapabilities.h
index 2653c73b0..e339714e9 100644
--- a/Source/JavaScriptCore/dfg/DFGCapabilities.h
+++ b/Source/JavaScriptCore/dfg/DFGCapabilities.h
@@ -111,6 +111,8 @@ inline bool canCompileOpcode(OpcodeID opcodeID)
case op_put_scoped_var:
case op_get_by_id:
case op_put_by_id:
+ case op_put_by_id_transition_direct:
+ case op_put_by_id_transition_normal:
case op_get_global_var:
case op_put_global_var:
case op_jmp:
diff --git a/Source/JavaScriptCore/dfg/DFGCommon.h b/Source/JavaScriptCore/dfg/DFGCommon.h
index 14edff3a5..330504c3e 100644
--- a/Source/JavaScriptCore/dfg/DFGCommon.h
+++ b/Source/JavaScriptCore/dfg/DFGCommon.h
@@ -71,6 +71,8 @@
#define DFG_ENABLE_SUCCESS_STATS 0
// Used to enable conditionally supported opcodes that currently result in performance regressions.
#define DFG_ENABLE_RESTRICTIONS 1
+// Enable verification that the DFG is able to insert code for control flow edges.
+#define DFG_ENABLE_EDGE_CODE_VERIFICATION 0
namespace JSC { namespace DFG {
diff --git a/Source/JavaScriptCore/dfg/DFGDriver.cpp b/Source/JavaScriptCore/dfg/DFGDriver.cpp
index 27a0dab75..124d7e637 100644
--- a/Source/JavaScriptCore/dfg/DFGDriver.cpp
+++ b/Source/JavaScriptCore/dfg/DFGDriver.cpp
@@ -28,9 +28,13 @@
#if ENABLE(DFG_JIT)
+#include "DFGArithNodeFlagsInferencePhase.h"
#include "DFGByteCodeParser.h"
+#include "DFGCFAPhase.h"
+#include "DFGCSEPhase.h"
#include "DFGJITCompiler.h"
-#include "DFGPropagator.h"
+#include "DFGPredictionPropagationPhase.h"
+#include "DFGVirtualRegisterAllocationPhase.h"
namespace JSC { namespace DFG {
@@ -44,19 +48,28 @@ inline bool compile(CompileMode compileMode, JSGlobalData& globalData, CodeBlock
ASSERT(codeBlock->alternative()->getJITType() == JITCode::BaselineJIT);
#if DFG_ENABLE(DEBUG_VERBOSE)
- fprintf(stderr, "DFG compiling code block %p(%p), number of instructions = %u.\n", codeBlock, codeBlock->alternative(), codeBlock->instructionCount());
+ dataLog("DFG compiling code block %p(%p), number of instructions = %u.\n", codeBlock, codeBlock->alternative(), codeBlock->instructionCount());
#endif
- Graph dfg;
- if (!parse(dfg, &globalData, codeBlock))
+ Graph dfg(globalData, codeBlock);
+ if (!parse(dfg))
return false;
if (compileMode == CompileFunction)
- dfg.predictArgumentTypes(codeBlock);
-
- propagate(dfg, &globalData, codeBlock);
+ dfg.predictArgumentTypes();
+
+ performArithNodeFlagsInference(dfg);
+ performPredictionPropagation(dfg);
+ performCSE(dfg);
+ performVirtualRegisterAllocation(dfg);
+ performCFA(dfg);
+
+#if DFG_ENABLE(DEBUG_VERBOSE)
+ dataLog("Graph after optimization:\n");
+ dfg.dump();
+#endif
- JITCompiler dataFlowJIT(&globalData, dfg, codeBlock);
+ JITCompiler dataFlowJIT(dfg);
if (compileMode == CompileFunction) {
ASSERT(jitCodeWithArityCheck);
diff --git a/Source/JavaScriptCore/dfg/DFGGraph.cpp b/Source/JavaScriptCore/dfg/DFGGraph.cpp
index c304dc8e7..b8eec93c7 100644
--- a/Source/JavaScriptCore/dfg/DFGGraph.cpp
+++ b/Source/JavaScriptCore/dfg/DFGGraph.cpp
@@ -80,7 +80,7 @@ const char* Graph::nameOfVariableAccessData(VariableAccessData* variableAccessDa
static void printWhiteSpace(unsigned amount)
{
while (amount-- > 0)
- printf(" ");
+ dataLog(" ");
}
void Graph::dumpCodeOrigin(NodeIndex nodeIndex)
@@ -107,17 +107,17 @@ void Graph::dumpCodeOrigin(NodeIndex nodeIndex)
// Print the pops.
for (unsigned i = previousInlineStack.size(); i-- > indexOfDivergence;) {
printWhiteSpace(i * 2);
- printf("<-- %p\n", previousInlineStack[i].inlineCallFrame->executable.get());
+ dataLog("<-- %p\n", previousInlineStack[i].inlineCallFrame->executable.get());
}
// Print the pushes.
for (unsigned i = indexOfDivergence; i < currentInlineStack.size(); ++i) {
printWhiteSpace(i * 2);
- printf("--> %p\n", currentInlineStack[i].inlineCallFrame->executable.get());
+ dataLog("--> %p\n", currentInlineStack[i].inlineCallFrame->executable.get());
}
}
-void Graph::dump(NodeIndex nodeIndex, CodeBlock* codeBlock)
+void Graph::dump(NodeIndex nodeIndex)
{
Node& node = at(nodeIndex);
NodeType op = node.op;
@@ -150,64 +150,58 @@ void Graph::dump(NodeIndex nodeIndex, CodeBlock* codeBlock)
// $# - the index in the CodeBlock of a constant { for numeric constants the value is displayed | for integers, in both decimal and hex }.
// id# - the index in the CodeBlock of an identifier { if codeBlock is passed to dump(), the string representation is displayed }.
// var# - the index of a var on the global object, used by GetGlobalVar/PutGlobalVar operations.
- printf("% 4d:%s<%c%u:", (int)nodeIndex, skipped ? " skipped " : " ", mustGenerate ? '!' : ' ', refCount);
+ dataLog("% 4d:%s<%c%u:", (int)nodeIndex, skipped ? " skipped " : " ", mustGenerate ? '!' : ' ', refCount);
if (node.hasResult() && !skipped && node.hasVirtualRegister())
- printf("%u", node.virtualRegister());
+ dataLog("%u", node.virtualRegister());
else
- printf("-");
- printf(">\t%s(", opName(op));
+ dataLog("-");
+ dataLog(">\t%s(", opName(op));
bool hasPrinted = false;
if (op & NodeHasVarArgs) {
for (unsigned childIdx = node.firstChild(); childIdx < node.firstChild() + node.numChildren(); childIdx++) {
if (hasPrinted)
- printf(", ");
+ dataLog(", ");
else
hasPrinted = true;
- printf("@%u", m_varArgChildren[childIdx].index());
+ dataLog("@%u", m_varArgChildren[childIdx].index());
}
} else {
if (!!node.child1())
- printf("@%u", node.child1().index());
+ dataLog("@%u", node.child1().index());
if (!!node.child2())
- printf(", @%u", node.child2().index());
+ dataLog(", @%u", node.child2().index());
if (!!node.child3())
- printf(", @%u", node.child3().index());
+ dataLog(", @%u", node.child3().index());
hasPrinted = !!node.child1();
}
if (node.hasArithNodeFlags()) {
- printf("%s%s", hasPrinted ? ", " : "", arithNodeFlagsAsString(node.rawArithNodeFlags()));
+ dataLog("%s%s", hasPrinted ? ", " : "", arithNodeFlagsAsString(node.rawArithNodeFlags()));
hasPrinted = true;
}
if (node.hasVarNumber()) {
- printf("%svar%u", hasPrinted ? ", " : "", node.varNumber());
+ dataLog("%svar%u", hasPrinted ? ", " : "", node.varNumber());
hasPrinted = true;
}
if (node.hasIdentifier()) {
- if (codeBlock)
- printf("%sid%u{%s}", hasPrinted ? ", " : "", node.identifierNumber(), codeBlock->identifier(node.identifierNumber()).ustring().utf8().data());
- else
- printf("%sid%u", hasPrinted ? ", " : "", node.identifierNumber());
+ dataLog("%sid%u{%s}", hasPrinted ? ", " : "", node.identifierNumber(), m_codeBlock->identifier(node.identifierNumber()).ustring().utf8().data());
hasPrinted = true;
}
if (node.hasStructureSet()) {
for (size_t i = 0; i < node.structureSet().size(); ++i) {
- printf("%sstruct(%p)", hasPrinted ? ", " : "", node.structureSet()[i]);
+ dataLog("%sstruct(%p)", hasPrinted ? ", " : "", node.structureSet()[i]);
hasPrinted = true;
}
}
if (node.hasStructureTransitionData()) {
- printf("%sstruct(%p -> %p)", hasPrinted ? ", " : "", node.structureTransitionData().previousStructure, node.structureTransitionData().newStructure);
+ dataLog("%sstruct(%p -> %p)", hasPrinted ? ", " : "", node.structureTransitionData().previousStructure, node.structureTransitionData().newStructure);
hasPrinted = true;
}
if (node.hasStorageAccessData()) {
StorageAccessData& storageAccessData = m_storageAccessData[node.storageAccessDataIndex()];
- if (codeBlock)
- printf("%sid%u{%s}", hasPrinted ? ", " : "", storageAccessData.identifierNumber, codeBlock->identifier(storageAccessData.identifierNumber).ustring().utf8().data());
- else
- printf("%sid%u", hasPrinted ? ", " : "", storageAccessData.identifierNumber);
+ dataLog("%sid%u{%s}", hasPrinted ? ", " : "", storageAccessData.identifierNumber, m_codeBlock->identifier(storageAccessData.identifierNumber).ustring().utf8().data());
- printf(", %lu", static_cast<unsigned long>(storageAccessData.offset));
+ dataLog(", %lu", static_cast<unsigned long>(storageAccessData.offset));
hasPrinted = true;
}
ASSERT(node.hasVariableAccessData() == node.hasLocal());
@@ -215,85 +209,83 @@ void Graph::dump(NodeIndex nodeIndex, CodeBlock* codeBlock)
VariableAccessData* variableAccessData = node.variableAccessData();
int operand = variableAccessData->operand();
if (operandIsArgument(operand))
- printf("%sarg%u(%s)", hasPrinted ? ", " : "", operandToArgument(operand), nameOfVariableAccessData(variableAccessData));
+ dataLog("%sarg%u(%s)", hasPrinted ? ", " : "", operandToArgument(operand), nameOfVariableAccessData(variableAccessData));
else
- printf("%sr%u(%s)", hasPrinted ? ", " : "", operand, nameOfVariableAccessData(variableAccessData));
+ dataLog("%sr%u(%s)", hasPrinted ? ", " : "", operand, nameOfVariableAccessData(variableAccessData));
hasPrinted = true;
}
- if (node.hasConstantBuffer() && codeBlock) {
+ if (node.hasConstantBuffer()) {
if (hasPrinted)
- printf(", ");
- printf("%u:[", node.startConstant());
+ dataLog(", ");
+ dataLog("%u:[", node.startConstant());
for (unsigned i = 0; i < node.numConstants(); ++i) {
if (i)
- printf(", ");
- printf("%s", codeBlock->constantBuffer(node.startConstant())[i].description());
+ dataLog(", ");
+ dataLog("%s", m_codeBlock->constantBuffer(node.startConstant())[i].description());
}
- printf("]");
+ dataLog("]");
hasPrinted = true;
}
if (op == JSConstant) {
- printf("%s$%u", hasPrinted ? ", " : "", node.constantNumber());
- if (codeBlock) {
- JSValue value = valueOfJSConstant(codeBlock, nodeIndex);
- printf(" = %s", value.description());
- }
+ dataLog("%s$%u", hasPrinted ? ", " : "", node.constantNumber());
+ JSValue value = valueOfJSConstant(nodeIndex);
+ dataLog(" = %s", value.description());
hasPrinted = true;
}
if (op == WeakJSConstant) {
- printf("%s%p", hasPrinted ? ", " : "", node.weakConstant());
+ dataLog("%s%p", hasPrinted ? ", " : "", node.weakConstant());
hasPrinted = true;
}
if (node.isBranch() || node.isJump()) {
- printf("%sT:#%u", hasPrinted ? ", " : "", node.takenBlockIndex());
+ dataLog("%sT:#%u", hasPrinted ? ", " : "", node.takenBlockIndex());
hasPrinted = true;
}
if (node.isBranch()) {
- printf("%sF:#%u", hasPrinted ? ", " : "", node.notTakenBlockIndex());
+ dataLog("%sF:#%u", hasPrinted ? ", " : "", node.notTakenBlockIndex());
hasPrinted = true;
}
(void)hasPrinted;
- printf(")");
+ dataLog(")");
if (!skipped) {
if (node.hasVariableAccessData())
- printf(" predicting %s, double ratio %lf%s", predictionToString(node.variableAccessData()->prediction()), node.variableAccessData()->doubleVoteRatio(), node.variableAccessData()->shouldUseDoubleFormat() ? ", forcing double" : "");
+ dataLog(" predicting %s, double ratio %lf%s", predictionToString(node.variableAccessData()->prediction()), node.variableAccessData()->doubleVoteRatio(), node.variableAccessData()->shouldUseDoubleFormat() ? ", forcing double" : "");
else if (node.hasHeapPrediction())
- printf(" predicting %s", predictionToString(node.getHeapPrediction()));
+ dataLog(" predicting %s", predictionToString(node.getHeapPrediction()));
else if (node.hasVarNumber())
- printf(" predicting %s", predictionToString(getGlobalVarPrediction(node.varNumber())));
+ dataLog(" predicting %s", predictionToString(getGlobalVarPrediction(node.varNumber())));
}
- printf("\n");
+ dataLog("\n");
}
-void Graph::dump(CodeBlock* codeBlock)
+void Graph::dump()
{
for (size_t b = 0; b < m_blocks.size(); ++b) {
BasicBlock* block = m_blocks[b].get();
- printf("Block #%u (bc#%u): %s%s\n", (int)b, block->bytecodeBegin, block->isReachable ? "" : " (skipped)", block->isOSRTarget ? " (OSR target)" : "");
- printf(" vars before: ");
+ dataLog("Block #%u (bc#%u): %s%s\n", (int)b, block->bytecodeBegin, block->isReachable ? "" : " (skipped)", block->isOSRTarget ? " (OSR target)" : "");
+ dataLog(" vars before: ");
if (block->cfaHasVisited)
- dumpOperands(block->valuesAtHead, stdout);
+ dumpOperands(block->valuesAtHead, WTF::dataFile());
else
- printf("<empty>");
- printf("\n");
- printf(" var links: ");
- dumpOperands(block->variablesAtHead, stdout);
- printf("\n");
+ dataLog("<empty>");
+ dataLog("\n");
+ dataLog(" var links: ");
+ dumpOperands(block->variablesAtHead, WTF::dataFile());
+ dataLog("\n");
for (size_t i = block->begin; i < block->end; ++i)
- dump(i, codeBlock);
- printf(" vars after: ");
+ dump(i);
+ dataLog(" vars after: ");
if (block->cfaHasVisited)
- dumpOperands(block->valuesAtTail, stdout);
+ dumpOperands(block->valuesAtTail, WTF::dataFile());
else
- printf("<empty>");
- printf("\n");
+ dataLog("<empty>");
+ dataLog("\n");
}
- printf("Phi Nodes:\n");
+ dataLog("Phi Nodes:\n");
for (size_t i = m_blocks.last()->end; i < size(); ++i)
- dump(i, codeBlock);
+ dump(i);
}
// FIXME: Convert this to be iterative, not recursive.
@@ -334,22 +326,18 @@ void Graph::derefChildren(NodeIndex op)
DO_TO_CHILDREN(at(op), deref);
}
-void Graph::predictArgumentTypes(CodeBlock* codeBlock)
+void Graph::predictArgumentTypes()
{
- ASSERT(codeBlock);
- ASSERT(codeBlock->alternative());
-
- CodeBlock* profiledCodeBlock = codeBlock->alternative();
- ASSERT(codeBlock->numParameters() >= 1);
- for (size_t arg = 0; arg < static_cast<size_t>(codeBlock->numParameters()); ++arg) {
- ValueProfile* profile = profiledCodeBlock->valueProfileForArgument(arg);
+ ASSERT(m_codeBlock->numParameters() >= 1);
+ for (size_t arg = 0; arg < static_cast<size_t>(m_codeBlock->numParameters()); ++arg) {
+ ValueProfile* profile = m_profiledBlock->valueProfileForArgument(arg);
if (!profile)
continue;
at(m_arguments[arg]).variableAccessData()->predict(profile->computeUpdatedPrediction());
#if DFG_ENABLE(DEBUG_VERBOSE)
- printf("Argument [%zu] prediction: %s\n", arg, predictionToString(at(m_arguments[arg]).variableAccessData()->prediction()));
+ dataLog("Argument [%zu] prediction: %s\n", arg, predictionToString(at(m_arguments[arg]).variableAccessData()->prediction()));
#endif
}
}
diff --git a/Source/JavaScriptCore/dfg/DFGGraph.h b/Source/JavaScriptCore/dfg/DFGGraph.h
index ecd77b3a4..88d6a4eec 100644
--- a/Source/JavaScriptCore/dfg/DFGGraph.h
+++ b/Source/JavaScriptCore/dfg/DFGGraph.h
@@ -29,8 +29,10 @@
#if ENABLE(DFG_JIT)
#include "CodeBlock.h"
+#include "DFGAssemblyHelpers.h"
#include "DFGBasicBlock.h"
#include "DFGNode.h"
+#include "MethodOfGettingAValueProfile.h"
#include "PredictionTracker.h"
#include "RegisterFile.h"
#include <wtf/BitVector.h>
@@ -71,6 +73,14 @@ struct ResolveGlobalData {
// Nodes that are 'dead' remain in the vector with refCount 0.
class Graph : public Vector<Node, 64> {
public:
+ Graph(JSGlobalData& globalData, CodeBlock* codeBlock)
+ : m_globalData(globalData)
+ , m_codeBlock(codeBlock)
+ , m_profiledBlock(codeBlock->alternative())
+ {
+ ASSERT(m_profiledBlock);
+ }
+
using Vector<Node, 64>::operator[];
using Vector<Node, 64>::at;
@@ -128,8 +138,8 @@ public:
}
// CodeBlock is optional, but may allow additional information to be dumped (e.g. Identifier names).
- void dump(CodeBlock* = 0);
- void dump(NodeIndex, CodeBlock* = 0);
+ void dump();
+ void dump(NodeIndex);
// Dump the code origin of the given node as a diff from the code origin of the
// preceding node.
@@ -147,12 +157,12 @@ public:
return m_predictions.getGlobalVarPrediction(varNumber);
}
- PredictedType getJSConstantPrediction(Node& node, CodeBlock* codeBlock)
+ PredictedType getJSConstantPrediction(Node& node)
{
- return predictionFromValue(node.valueOfJSConstant(codeBlock));
+ return predictionFromValue(node.valueOfJSConstant(m_codeBlock));
}
- bool addShouldSpeculateInteger(Node& add, CodeBlock* codeBlock)
+ bool addShouldSpeculateInteger(Node& add)
{
ASSERT(add.op == ValueAdd || add.op == ArithAdd || add.op == ArithSub);
@@ -160,16 +170,16 @@ public:
Node& right = at(add.child2());
if (left.hasConstant())
- return addImmediateShouldSpeculateInteger(codeBlock, add, right, left);
+ return addImmediateShouldSpeculateInteger(add, right, left);
if (right.hasConstant())
- return addImmediateShouldSpeculateInteger(codeBlock, add, left, right);
+ return addImmediateShouldSpeculateInteger(add, left, right);
return Node::shouldSpeculateInteger(left, right) && add.canSpeculateInteger();
}
- bool addShouldSpeculateInteger(NodeIndex nodeIndex, CodeBlock* codeBlock)
+ bool addShouldSpeculateInteger(NodeIndex nodeIndex)
{
- return addShouldSpeculateInteger(at(nodeIndex), codeBlock);
+ return addShouldSpeculateInteger(at(nodeIndex));
}
// Helper methods to check nodes for constants.
@@ -181,50 +191,50 @@ public:
{
return at(nodeIndex).hasConstant();
}
- bool isInt32Constant(CodeBlock* codeBlock, NodeIndex nodeIndex)
+ bool isInt32Constant(NodeIndex nodeIndex)
{
- return at(nodeIndex).isInt32Constant(codeBlock);
+ return at(nodeIndex).isInt32Constant(m_codeBlock);
}
- bool isDoubleConstant(CodeBlock* codeBlock, NodeIndex nodeIndex)
+ bool isDoubleConstant(NodeIndex nodeIndex)
{
- return at(nodeIndex).isDoubleConstant(codeBlock);
+ return at(nodeIndex).isDoubleConstant(m_codeBlock);
}
- bool isNumberConstant(CodeBlock* codeBlock, NodeIndex nodeIndex)
+ bool isNumberConstant(NodeIndex nodeIndex)
{
- return at(nodeIndex).isNumberConstant(codeBlock);
+ return at(nodeIndex).isNumberConstant(m_codeBlock);
}
- bool isBooleanConstant(CodeBlock* codeBlock, NodeIndex nodeIndex)
+ bool isBooleanConstant(NodeIndex nodeIndex)
{
- return at(nodeIndex).isBooleanConstant(codeBlock);
+ return at(nodeIndex).isBooleanConstant(m_codeBlock);
}
- bool isFunctionConstant(CodeBlock* codeBlock, NodeIndex nodeIndex)
+ bool isFunctionConstant(NodeIndex nodeIndex)
{
if (!isJSConstant(nodeIndex))
return false;
- if (!getJSFunction(valueOfJSConstant(codeBlock, nodeIndex)))
+ if (!getJSFunction(valueOfJSConstant(nodeIndex)))
return false;
return true;
}
// Helper methods get constant values from nodes.
- JSValue valueOfJSConstant(CodeBlock* codeBlock, NodeIndex nodeIndex)
+ JSValue valueOfJSConstant(NodeIndex nodeIndex)
{
- return at(nodeIndex).valueOfJSConstant(codeBlock);
+ return at(nodeIndex).valueOfJSConstant(m_codeBlock);
}
- int32_t valueOfInt32Constant(CodeBlock* codeBlock, NodeIndex nodeIndex)
+ int32_t valueOfInt32Constant(NodeIndex nodeIndex)
{
- return valueOfJSConstant(codeBlock, nodeIndex).asInt32();
+ return valueOfJSConstant(nodeIndex).asInt32();
}
- double valueOfNumberConstant(CodeBlock* codeBlock, NodeIndex nodeIndex)
+ double valueOfNumberConstant(NodeIndex nodeIndex)
{
- return valueOfJSConstant(codeBlock, nodeIndex).asNumber();
+ return valueOfJSConstant(nodeIndex).asNumber();
}
- bool valueOfBooleanConstant(CodeBlock* codeBlock, NodeIndex nodeIndex)
+ bool valueOfBooleanConstant(NodeIndex nodeIndex)
{
- return valueOfJSConstant(codeBlock, nodeIndex).asBoolean();
+ return valueOfJSConstant(nodeIndex).asBoolean();
}
- JSFunction* valueOfFunctionConstant(CodeBlock* codeBlock, NodeIndex nodeIndex)
+ JSFunction* valueOfFunctionConstant(NodeIndex nodeIndex)
{
- JSCell* function = getJSFunction(valueOfJSConstant(codeBlock, nodeIndex));
+ JSCell* function = getJSFunction(valueOfJSConstant(nodeIndex));
ASSERT(function);
return asFunction(function);
}
@@ -234,7 +244,7 @@ public:
// This is O(n), and should only be used for verbose dumps.
const char* nameOfVariableAccessData(VariableAccessData*);
- void predictArgumentTypes(CodeBlock*);
+ void predictArgumentTypes();
StructureSet* addStructureSet(const StructureSet& structureSet)
{
@@ -249,14 +259,20 @@ public:
return &m_structureTransitionData.last();
}
- ValueProfile* valueProfileFor(NodeIndex nodeIndex, CodeBlock* profiledBlock)
+ CodeBlock* baselineCodeBlockFor(const CodeOrigin& codeOrigin)
+ {
+ return baselineCodeBlockForOriginAndBaselineCodeBlock(codeOrigin, m_profiledBlock);
+ }
+
+ ValueProfile* valueProfileFor(NodeIndex nodeIndex)
{
if (nodeIndex == NoNode)
return 0;
Node& node = at(nodeIndex);
+ CodeBlock* profiledBlock = baselineCodeBlockFor(node.codeOrigin);
- if (node.op == GetLocal) {
+ if (node.hasLocal()) {
if (!operandIsArgument(node.local()))
return 0;
int argument = operandToArgument(node.local());
@@ -270,6 +286,28 @@ public:
return 0;
}
+
+ MethodOfGettingAValueProfile methodOfGettingAValueProfileFor(NodeIndex nodeIndex)
+ {
+ if (nodeIndex == NoNode)
+ return MethodOfGettingAValueProfile();
+
+ Node& node = at(nodeIndex);
+ CodeBlock* profiledBlock = baselineCodeBlockFor(node.codeOrigin);
+
+ if (node.op == GetLocal) {
+ return MethodOfGettingAValueProfile::fromLazyOperand(
+ profiledBlock,
+ LazyOperandValueProfileKey(
+ node.codeOrigin.bytecodeIndex, node.local()));
+ }
+
+ return MethodOfGettingAValueProfile(valueProfileFor(nodeIndex));
+ }
+
+ JSGlobalData& m_globalData;
+ CodeBlock* m_codeBlock;
+ CodeBlock* m_profiledBlock;
Vector< OwnPtr<BasicBlock> , 8> m_blocks;
Vector<NodeUse, 16> m_varArgChildren;
@@ -284,11 +322,11 @@ public:
unsigned m_parameterSlots;
private:
- bool addImmediateShouldSpeculateInteger(CodeBlock* codeBlock, Node& add, Node& variable, Node& immediate)
+ bool addImmediateShouldSpeculateInteger(Node& add, Node& variable, Node& immediate)
{
ASSERT(immediate.hasConstant());
- JSValue immediateValue = immediate.valueOfJSConstant(codeBlock);
+ JSValue immediateValue = immediate.valueOfJSConstant(m_codeBlock);
if (!immediateValue.isNumber())
return false;
diff --git a/Source/JavaScriptCore/dfg/DFGJITCompiler.cpp b/Source/JavaScriptCore/dfg/DFGJITCompiler.cpp
index ac5f314a1..7b2bbc788 100644
--- a/Source/JavaScriptCore/dfg/DFGJITCompiler.cpp
+++ b/Source/JavaScriptCore/dfg/DFGJITCompiler.cpp
@@ -116,7 +116,7 @@ void JITCompiler::link(LinkBuffer& linkBuffer)
{
// Link the code, populate data in CodeBlock data structures.
#if DFG_ENABLE(DEBUG_VERBOSE)
- fprintf(stderr, "JIT code for %p start at [%p, %p). Size = %zu.\n", m_codeBlock, linkBuffer.debugAddress(), static_cast<char*>(linkBuffer.debugAddress()) + linkBuffer.debugSize(), linkBuffer.debugSize());
+ dataLog("JIT code for %p start at [%p, %p). Size = %zu.\n", m_codeBlock, linkBuffer.debugAddress(), static_cast<char*>(linkBuffer.debugAddress()) + linkBuffer.debugSize(), linkBuffer.debugSize());
#endif
// Link all calls out from the JIT code to their respective functions.
@@ -134,27 +134,16 @@ void JITCompiler::link(LinkBuffer& linkBuffer)
m_codeBlock->callReturnIndexVector().append(CallReturnOffsetToBytecodeOffset(returnAddressOffset, exceptionInfo));
}
}
+
+ Vector<CodeOriginAtCallReturnOffset>& codeOrigins = m_codeBlock->codeOrigins();
+ codeOrigins.resize(m_exceptionChecks.size());
- unsigned numCallsFromInlineCode = 0;
for (unsigned i = 0; i < m_exceptionChecks.size(); ++i) {
- if (m_exceptionChecks[i].m_codeOrigin.inlineCallFrame)
- numCallsFromInlineCode++;
- }
-
- if (numCallsFromInlineCode) {
- Vector<CodeOriginAtCallReturnOffset>& codeOrigins = m_codeBlock->codeOrigins();
- codeOrigins.resize(numCallsFromInlineCode);
-
- for (unsigned i = 0, j = 0; i < m_exceptionChecks.size(); ++i) {
- CallExceptionRecord& record = m_exceptionChecks[i];
- if (record.m_codeOrigin.inlineCallFrame) {
- unsigned returnAddressOffset = linkBuffer.returnAddressOffset(m_exceptionChecks[i].m_call);
- codeOrigins[j].codeOrigin = record.m_codeOrigin;
- codeOrigins[j].callReturnOffset = returnAddressOffset;
- record.m_token.assertCodeOriginIndex(j);
- j++;
- }
- }
+ CallExceptionRecord& record = m_exceptionChecks[i];
+ unsigned returnAddressOffset = linkBuffer.returnAddressOffset(m_exceptionChecks[i].m_call);
+ codeOrigins[i].codeOrigin = record.m_codeOrigin;
+ codeOrigins[i].callReturnOffset = returnAddressOffset;
+ record.m_token.assertCodeOriginIndex(i);
}
m_codeBlock->setNumberOfStructureStubInfos(m_propertyAccesses.size());
@@ -212,6 +201,9 @@ void JITCompiler::compile(JITCode& entry)
SpeculativeJIT speculative(*this);
compileBody(speculative);
+ // Create OSR entry trampolines if necessary.
+ speculative.createOSREntries();
+
LinkBuffer linkBuffer(*m_globalData, this, m_codeBlock);
link(linkBuffer);
speculative.linkOSREntries(linkBuffer);
@@ -251,7 +243,10 @@ void JITCompiler::compileFunction(JITCode& entry, MacroAssemblerCodePtr& entryWi
registerFileCheck.link(this);
move(stackPointerRegister, GPRInfo::argumentGPR0);
poke(GPRInfo::callFrameRegister, OBJECT_OFFSETOF(struct JITStackFrame, callFrame) / sizeof(void*));
+
+ CallBeginToken token = beginCall();
Call callRegisterFileCheck = call();
+ notifyCall(callRegisterFileCheck, CodeOrigin(0), token);
jump(fromRegisterFileCheck);
// The fast entry point into a function does not check the correct number of arguments
@@ -266,9 +261,14 @@ void JITCompiler::compileFunction(JITCode& entry, MacroAssemblerCodePtr& entryWi
branch32(AboveOrEqual, GPRInfo::regT1, Imm32(m_codeBlock->numParameters())).linkTo(fromArityCheck, this);
move(stackPointerRegister, GPRInfo::argumentGPR0);
poke(GPRInfo::callFrameRegister, OBJECT_OFFSETOF(struct JITStackFrame, callFrame) / sizeof(void*));
+ token = beginCall();
Call callArityCheck = call();
+ notifyCall(callArityCheck, CodeOrigin(0), token);
move(GPRInfo::regT0, GPRInfo::callFrameRegister);
jump(fromArityCheck);
+
+ // Create OSR entry trampolines if necessary.
+ speculative.createOSREntries();
// === Link ===
diff --git a/Source/JavaScriptCore/dfg/DFGJITCompiler.h b/Source/JavaScriptCore/dfg/DFGJITCompiler.h
index 5596876fc..a0c68fe4b 100644
--- a/Source/JavaScriptCore/dfg/DFGJITCompiler.h
+++ b/Source/JavaScriptCore/dfg/DFGJITCompiler.h
@@ -92,11 +92,7 @@ public:
ASSERT_UNUSED(codeOriginIndex, codeOriginIndex < UINT_MAX);
ASSERT_UNUSED(codeOriginIndex, codeOriginIndex == m_codeOriginIndex);
}
-
- void assertNoCodeOriginIndex() const
- {
- ASSERT(m_codeOriginIndex == UINT_MAX);
- }
+
private:
#if !ASSERT_DISABLED
unsigned m_codeOriginIndex;
@@ -191,8 +187,8 @@ struct PropertyAccessRecord {
// call to be linked).
class JITCompiler : public CCallHelpers {
public:
- JITCompiler(JSGlobalData* globalData, Graph& dfg, CodeBlock* codeBlock)
- : CCallHelpers(globalData, codeBlock)
+ JITCompiler(Graph& dfg)
+ : CCallHelpers(&dfg.m_globalData, dfg.m_codeBlock)
, m_graph(dfg)
, m_currentCodeOriginIndex(0)
{
@@ -205,22 +201,16 @@ public:
Graph& graph() { return m_graph; }
// Just get a token for beginning a call.
- CallBeginToken nextCallBeginToken(CodeOrigin codeOrigin)
+ CallBeginToken beginJSCall()
{
- if (!codeOrigin.inlineCallFrame)
- return CallBeginToken();
return CallBeginToken(m_currentCodeOriginIndex++);
}
// Get a token for beginning a call, and set the current code origin index in
// the call frame.
- CallBeginToken beginCall(CodeOrigin codeOrigin)
+ CallBeginToken beginCall()
{
- unsigned codeOriginIndex;
- if (!codeOrigin.inlineCallFrame)
- codeOriginIndex = UINT_MAX;
- else
- codeOriginIndex = m_currentCodeOriginIndex++;
+ unsigned codeOriginIndex = m_currentCodeOriginIndex++;
store32(TrustedImm32(codeOriginIndex), tagFor(static_cast<VirtualRegister>(RegisterFile::ArgumentCount)));
return CallBeginToken(codeOriginIndex);
}
@@ -254,21 +244,6 @@ public:
m_exceptionChecks.append(CallExceptionRecord(functionCall, exceptionCheck, codeOrigin, token));
}
- // Helper methods to check nodes for constants.
- bool isConstant(NodeIndex nodeIndex) { return graph().isConstant(nodeIndex); }
- bool isJSConstant(NodeIndex nodeIndex) { return graph().isJSConstant(nodeIndex); }
- bool isInt32Constant(NodeIndex nodeIndex) { return graph().isInt32Constant(codeBlock(), nodeIndex); }
- bool isDoubleConstant(NodeIndex nodeIndex) { return graph().isDoubleConstant(codeBlock(), nodeIndex); }
- bool isNumberConstant(NodeIndex nodeIndex) { return graph().isNumberConstant(codeBlock(), nodeIndex); }
- bool isBooleanConstant(NodeIndex nodeIndex) { return graph().isBooleanConstant(codeBlock(), nodeIndex); }
- bool isFunctionConstant(NodeIndex nodeIndex) { return graph().isFunctionConstant(codeBlock(), nodeIndex); }
- // Helper methods get constant values from nodes.
- JSValue valueOfJSConstant(NodeIndex nodeIndex) { return graph().valueOfJSConstant(codeBlock(), nodeIndex); }
- int32_t valueOfInt32Constant(NodeIndex nodeIndex) { return graph().valueOfInt32Constant(codeBlock(), nodeIndex); }
- double valueOfNumberConstant(NodeIndex nodeIndex) { return graph().valueOfNumberConstant(codeBlock(), nodeIndex); }
- bool valueOfBooleanConstant(NodeIndex nodeIndex) { return graph().valueOfBooleanConstant(codeBlock(), nodeIndex); }
- JSFunction* valueOfFunctionConstant(NodeIndex nodeIndex) { return graph().valueOfFunctionConstant(codeBlock(), nodeIndex); }
-
// Helper methods to get predictions
PredictedType getPrediction(Node& node) { return node.prediction(); }
PredictedType getPrediction(NodeIndex nodeIndex) { return getPrediction(graph()[nodeIndex]); }
@@ -277,7 +252,7 @@ public:
#if USE(JSVALUE32_64)
void* addressOfDoubleConstant(NodeIndex nodeIndex)
{
- ASSERT(isNumberConstant(nodeIndex));
+ ASSERT(m_graph.isNumberConstant(nodeIndex));
unsigned constantIndex = graph()[nodeIndex].constantNumber();
return &(codeBlock()->constantRegister(FirstConstantRegisterIndex + constantIndex));
}
@@ -339,14 +314,6 @@ public:
#endif
}
- ValueProfile* valueProfileFor(NodeIndex nodeIndex)
- {
- if (nodeIndex == NoNode)
- return 0;
-
- return m_graph.valueProfileFor(nodeIndex, baselineCodeBlockFor(m_graph[nodeIndex].codeOrigin));
- }
-
private:
// Internal implementation to compile.
void compileEntry();
diff --git a/Source/JavaScriptCore/dfg/DFGNode.h b/Source/JavaScriptCore/dfg/DFGNode.h
index 2b6ebdb7f..87dae7786 100644
--- a/Source/JavaScriptCore/dfg/DFGNode.h
+++ b/Source/JavaScriptCore/dfg/DFGNode.h
@@ -485,7 +485,7 @@ struct Node {
{
return variableAccessData()->local();
}
-
+
bool hasIdentifier()
{
switch (op) {
diff --git a/Source/JavaScriptCore/dfg/DFGOSREntry.cpp b/Source/JavaScriptCore/dfg/DFGOSREntry.cpp
index cbcd1319a..65f4cfcdd 100644
--- a/Source/JavaScriptCore/dfg/DFGOSREntry.cpp
+++ b/Source/JavaScriptCore/dfg/DFGOSREntry.cpp
@@ -45,7 +45,7 @@ void* prepareOSREntry(ExecState* exec, CodeBlock* codeBlock, unsigned bytecodeIn
ASSERT(codeBlock->numberOfDFGOSREntries());
#if ENABLE(JIT_VERBOSE_OSR)
- printf("OSR in %p(%p) from bc#%u\n", codeBlock, codeBlock->alternative(), bytecodeIndex);
+ dataLog("OSR in %p(%p) from bc#%u\n", codeBlock, codeBlock->alternative(), bytecodeIndex);
#endif
JSGlobalData* globalData = &exec->globalData();
@@ -80,9 +80,9 @@ void* prepareOSREntry(ExecState* exec, CodeBlock* codeBlock, unsigned bytecodeIn
for (size_t argument = 0; argument < entry->m_expectedValues.numberOfArguments(); ++argument) {
if (argument >= exec->argumentCountIncludingThis()) {
#if ENABLE(JIT_VERBOSE_OSR)
- printf(" OSR failed because argument %zu was not passed, expected ", argument);
- entry->m_expectedValues.argument(argument).dump(stdout);
- printf(".\n");
+ dataLog(" OSR failed because argument %zu was not passed, expected ", argument);
+ entry->m_expectedValues.argument(argument).dump(WTF::dataFile());
+ dataLog(".\n");
#endif
return 0;
}
@@ -95,9 +95,9 @@ void* prepareOSREntry(ExecState* exec, CodeBlock* codeBlock, unsigned bytecodeIn
if (!entry->m_expectedValues.argument(argument).validate(value)) {
#if ENABLE(JIT_VERBOSE_OSR)
- printf(" OSR failed because argument %zu is %s, expected ", argument, value.description());
- entry->m_expectedValues.argument(argument).dump(stdout);
- printf(".\n");
+ dataLog(" OSR failed because argument %zu is %s, expected ", argument, value.description());
+ entry->m_expectedValues.argument(argument).dump(WTF::dataFile());
+ dataLog(".\n");
#endif
return 0;
}
@@ -107,7 +107,7 @@ void* prepareOSREntry(ExecState* exec, CodeBlock* codeBlock, unsigned bytecodeIn
if (entry->m_localsForcedDouble.get(local)) {
if (!exec->registers()[local].jsValue().isNumber()) {
#if ENABLE(JIT_VERBOSE_OSR)
- printf(" OSR failed because variable %zu is %s, expected number.\n", local, exec->registers()[local].jsValue().description());
+ dataLog(" OSR failed because variable %zu is %s, expected number.\n", local, exec->registers()[local].jsValue().description());
#endif
return 0;
}
@@ -115,9 +115,9 @@ void* prepareOSREntry(ExecState* exec, CodeBlock* codeBlock, unsigned bytecodeIn
}
if (!entry->m_expectedValues.local(local).validate(exec->registers()[local].jsValue())) {
#if ENABLE(JIT_VERBOSE_OSR)
- printf(" OSR failed because variable %zu is %s, expected ", local, exec->registers()[local].jsValue().description());
- entry->m_expectedValues.local(local).dump(stdout);
- printf(".\n");
+ dataLog(" OSR failed because variable %zu is %s, expected ", local, exec->registers()[local].jsValue().description());
+ entry->m_expectedValues.local(local).dump(WTF::dataFile());
+ dataLog(".\n");
#endif
return 0;
}
@@ -132,13 +132,13 @@ void* prepareOSREntry(ExecState* exec, CodeBlock* codeBlock, unsigned bytecodeIn
if (!globalData->interpreter->registerFile().grow(&exec->registers()[codeBlock->m_numCalleeRegisters])) {
#if ENABLE(JIT_VERBOSE_OSR)
- printf(" OSR failed because stack growth failed.\n");
+ dataLog(" OSR failed because stack growth failed.\n");
#endif
return 0;
}
#if ENABLE(JIT_VERBOSE_OSR)
- printf(" OSR should succeed.\n");
+ dataLog(" OSR should succeed.\n");
#endif
#if USE(JSVALUE64)
@@ -158,7 +158,7 @@ void* prepareOSREntry(ExecState* exec, CodeBlock* codeBlock, unsigned bytecodeIn
void* result = codeBlock->getJITCode().executableAddressAtOffset(entry->m_machineCodeOffset);
#if ENABLE(JIT_VERBOSE_OSR)
- printf(" OSR returning machine code address %p.\n", result);
+ dataLog(" OSR returning machine code address %p.\n", result);
#endif
return result;
diff --git a/Source/JavaScriptCore/dfg/DFGOSRExit.cpp b/Source/JavaScriptCore/dfg/DFGOSRExit.cpp
index 113f2ea0d..95e4d8bf2 100644
--- a/Source/JavaScriptCore/dfg/DFGOSRExit.cpp
+++ b/Source/JavaScriptCore/dfg/DFGOSRExit.cpp
@@ -33,7 +33,7 @@
namespace JSC { namespace DFG {
-OSRExit::OSRExit(ExitKind kind, JSValueSource jsValueSource, ValueProfile* valueProfile, MacroAssembler::Jump check, SpeculativeJIT* jit, unsigned recoveryIndex)
+OSRExit::OSRExit(ExitKind kind, JSValueSource jsValueSource, MethodOfGettingAValueProfile valueProfile, MacroAssembler::Jump check, SpeculativeJIT* jit, unsigned recoveryIndex)
: m_jsValueSource(jsValueSource)
, m_valueProfile(valueProfile)
, m_check(check)
@@ -67,7 +67,7 @@ bool OSRExit::considerAddingAsFrequentExitSiteSlow(CodeBlock* dfgCodeBlock, Code
if (static_cast<double>(m_count) / dfgCodeBlock->speculativeFailCounter() <= Options::osrExitProminenceForFrequentExitSite)
return false;
- return AssemblyHelpers::baselineCodeBlockForOriginAndBaselineCodeBlock(m_codeOrigin, profiledCodeBlock)->addFrequentExitSite(FrequentExitSite(m_codeOrigin.bytecodeIndex, m_kind));
+ return baselineCodeBlockForOriginAndBaselineCodeBlock(m_codeOrigin, profiledCodeBlock)->addFrequentExitSite(FrequentExitSite(m_codeOrigin.bytecodeIndex, m_kind));
}
} } // namespace JSC::DFG
diff --git a/Source/JavaScriptCore/dfg/DFGOSRExit.h b/Source/JavaScriptCore/dfg/DFGOSRExit.h
index 8e3fa6a5d..c28f7cbef 100644
--- a/Source/JavaScriptCore/dfg/DFGOSRExit.h
+++ b/Source/JavaScriptCore/dfg/DFGOSRExit.h
@@ -37,6 +37,7 @@
#include "DFGGPRInfo.h"
#include "DFGOperands.h"
#include "MacroAssembler.h"
+#include "MethodOfGettingAValueProfile.h"
#include "ValueProfile.h"
#include "ValueRecovery.h"
#include <wtf/Vector.h>
@@ -82,12 +83,12 @@ private:
// This structure describes how to exit the speculative path by
// going into baseline code.
struct OSRExit {
- OSRExit(ExitKind, JSValueSource, ValueProfile*, MacroAssembler::Jump, SpeculativeJIT*, unsigned recoveryIndex = 0);
+ OSRExit(ExitKind, JSValueSource, MethodOfGettingAValueProfile, MacroAssembler::Jump, SpeculativeJIT*, unsigned recoveryIndex = 0);
MacroAssemblerCodeRef m_code;
JSValueSource m_jsValueSource;
- ValueProfile* m_valueProfile;
+ MethodOfGettingAValueProfile m_valueProfile;
CorrectableJumpPoint m_check;
NodeIndex m_nodeIndex;
diff --git a/Source/JavaScriptCore/dfg/DFGOSRExitCompiler.cpp b/Source/JavaScriptCore/dfg/DFGOSRExitCompiler.cpp
index 1b88c4ffc..a195ee3ba 100644
--- a/Source/JavaScriptCore/dfg/DFGOSRExitCompiler.cpp
+++ b/Source/JavaScriptCore/dfg/DFGOSRExitCompiler.cpp
@@ -48,12 +48,27 @@ void compileOSRExit(ExecState* exec)
uint32_t exitIndex = globalData->osrExitIndex;
OSRExit& exit = codeBlock->osrExit(exitIndex);
+ // Make sure all code on our inline stack is JIT compiled. This is necessary since
+ // we may opt to inline a code block even before it had ever been compiled by the
+ // JIT, but our OSR exit infrastructure currently only works if the target of the
+ // OSR exit is JIT code. This could be changed since there is nothing particularly
+ // hard about doing an OSR exit into the interpreter, but for now this seems to make
+ // sense in that if we're OSR exiting from inlined code of a DFG code block, then
+ // probably it's a good sign that the thing we're exiting into is hot. Even more
+ // interestingly, since the code was inlined, it may never otherwise get JIT
+ // compiled since the act of inlining it may ensure that it otherwise never runs.
+ for (CodeOrigin codeOrigin = exit.m_codeOrigin; codeOrigin.inlineCallFrame; codeOrigin = codeOrigin.inlineCallFrame->caller) {
+ static_cast<FunctionExecutable*>(codeOrigin.inlineCallFrame->executable.get())
+ ->baselineCodeBlockFor(codeOrigin.inlineCallFrame->isCall ? CodeForCall : CodeForConstruct)
+ ->jitCompile(*globalData);
+ }
+
SpeculationRecovery* recovery = 0;
if (exit.m_recoveryIndex)
recovery = &codeBlock->speculationRecovery(exit.m_recoveryIndex - 1);
#if DFG_ENABLE(DEBUG_VERBOSE)
- fprintf(stderr, "Generating OSR exit #%u (bc#%u, @%u, %s) for code block %p.\n", exitIndex, exit.m_codeOrigin.bytecodeIndex, exit.m_nodeIndex, exitKindToString(exit.m_kind), codeBlock);
+ dataLog("Generating OSR exit #%u (bc#%u, @%u, %s) for code block %p.\n", exitIndex, exit.m_codeOrigin.bytecodeIndex, exit.m_nodeIndex, exitKindToString(exit.m_kind), codeBlock);
#endif
{
@@ -66,7 +81,7 @@ void compileOSRExit(ExecState* exec)
exit.m_code = patchBuffer.finalizeCode();
#if DFG_ENABLE(DEBUG_VERBOSE)
- fprintf(stderr, "OSR exit code at [%p, %p).\n", patchBuffer.debugAddress(), static_cast<char*>(patchBuffer.debugAddress()) + patchBuffer.debugSize());
+ dataLog("OSR exit code at [%p, %p).\n", patchBuffer.debugAddress(), static_cast<char*>(patchBuffer.debugAddress()) + patchBuffer.debugSize());
#endif
}
diff --git a/Source/JavaScriptCore/dfg/DFGOSRExitCompiler32_64.cpp b/Source/JavaScriptCore/dfg/DFGOSRExitCompiler32_64.cpp
index 4e33d7b02..a672234a3 100644
--- a/Source/JavaScriptCore/dfg/DFGOSRExitCompiler32_64.cpp
+++ b/Source/JavaScriptCore/dfg/DFGOSRExitCompiler32_64.cpp
@@ -36,15 +36,15 @@ void OSRExitCompiler::compileExit(const OSRExit& exit, SpeculationRecovery* reco
{
// 1) Pro-forma stuff.
#if DFG_ENABLE(DEBUG_VERBOSE)
- fprintf(stderr, "OSR exit for Node @%d (", (int)exit.m_nodeIndex);
+ dataLog("OSR exit for Node @%d (", (int)exit.m_nodeIndex);
for (CodeOrigin codeOrigin = exit.m_codeOrigin; ; codeOrigin = codeOrigin.inlineCallFrame->caller) {
- fprintf(stderr, "bc#%u", codeOrigin.bytecodeIndex);
+ dataLog("bc#%u", codeOrigin.bytecodeIndex);
if (!codeOrigin.inlineCallFrame)
break;
- fprintf(stderr, " -> %p ", codeOrigin.inlineCallFrame->executable.get());
+ dataLog(" -> %p ", codeOrigin.inlineCallFrame->executable.get());
}
- fprintf(stderr, ") at JIT offset 0x%x ", m_jit.debugOffset());
- exit.dump(stderr);
+ dataLog(") at JIT offset 0x%x ", m_jit.debugOffset());
+ exit.dump(WTF::dataFile());
#endif
#if DFG_ENABLE(VERBOSE_SPECULATION_FAILURE)
SpeculationFailureDebugInfo* debugInfo = new SpeculationFailureDebugInfo;
@@ -83,6 +83,8 @@ void OSRExitCompiler::compileExit(const OSRExit& exit, SpeculationRecovery* reco
// 3) Refine some value profile, if appropriate.
if (!!exit.m_jsValueSource && !!exit.m_valueProfile) {
+ EncodedJSValue* bucket = exit.m_valueProfile.getSpecFailBucket(0);
+
if (exit.m_jsValueSource.isAddress()) {
// Save a register so we can use it.
GPRReg scratch = GPRInfo::regT0;
@@ -91,16 +93,16 @@ void OSRExitCompiler::compileExit(const OSRExit& exit, SpeculationRecovery* reco
EncodedJSValue* scratchBuffer = static_cast<EncodedJSValue*>(m_jit.globalData()->scratchBufferForSize(sizeof(uint32_t)));
m_jit.store32(scratch, scratchBuffer);
m_jit.load32(exit.m_jsValueSource.asAddress(OBJECT_OFFSETOF(EncodedValueDescriptor, asBits.tag)), scratch);
- m_jit.store32(scratch, &bitwise_cast<EncodedValueDescriptor*>(exit.m_valueProfile->specFailBucket(0))->asBits.tag);
+ m_jit.store32(scratch, &bitwise_cast<EncodedValueDescriptor*>(bucket)->asBits.tag);
m_jit.load32(exit.m_jsValueSource.asAddress(OBJECT_OFFSETOF(EncodedValueDescriptor, asBits.payload)), scratch);
- m_jit.store32(scratch, &bitwise_cast<EncodedValueDescriptor*>(exit.m_valueProfile->specFailBucket(0))->asBits.payload);
+ m_jit.store32(scratch, &bitwise_cast<EncodedValueDescriptor*>(bucket)->asBits.payload);
m_jit.load32(scratchBuffer, scratch);
} else if (exit.m_jsValueSource.hasKnownTag()) {
- m_jit.store32(AssemblyHelpers::Imm32(exit.m_jsValueSource.tag()), &bitwise_cast<EncodedValueDescriptor*>(exit.m_valueProfile->specFailBucket(0))->asBits.tag);
- m_jit.store32(exit.m_jsValueSource.payloadGPR(), &bitwise_cast<EncodedValueDescriptor*>(exit.m_valueProfile->specFailBucket(0))->asBits.payload);
+ m_jit.store32(AssemblyHelpers::Imm32(exit.m_jsValueSource.tag()), &bitwise_cast<EncodedValueDescriptor*>(bucket)->asBits.tag);
+ m_jit.store32(exit.m_jsValueSource.payloadGPR(), &bitwise_cast<EncodedValueDescriptor*>(bucket)->asBits.payload);
} else {
- m_jit.store32(exit.m_jsValueSource.tagGPR(), &bitwise_cast<EncodedValueDescriptor*>(exit.m_valueProfile->specFailBucket(0))->asBits.tag);
- m_jit.store32(exit.m_jsValueSource.payloadGPR(), &bitwise_cast<EncodedValueDescriptor*>(exit.m_valueProfile->specFailBucket(0))->asBits.payload);
+ m_jit.store32(exit.m_jsValueSource.tagGPR(), &bitwise_cast<EncodedValueDescriptor*>(bucket)->asBits.tag);
+ m_jit.store32(exit.m_jsValueSource.payloadGPR(), &bitwise_cast<EncodedValueDescriptor*>(bucket)->asBits.payload);
}
}
@@ -653,7 +655,7 @@ void OSRExitCompiler::compileExit(const OSRExit& exit, SpeculationRecovery* reco
m_jit.jump(GPRInfo::regT2);
#if DFG_ENABLE(DEBUG_VERBOSE)
- fprintf(stderr, " -> %p\n", jumpTarget);
+ dataLog(" -> %p\n", jumpTarget);
#endif
}
diff --git a/Source/JavaScriptCore/dfg/DFGOSRExitCompiler64.cpp b/Source/JavaScriptCore/dfg/DFGOSRExitCompiler64.cpp
index 98c891ac7..f5e03973c 100644
--- a/Source/JavaScriptCore/dfg/DFGOSRExitCompiler64.cpp
+++ b/Source/JavaScriptCore/dfg/DFGOSRExitCompiler64.cpp
@@ -36,15 +36,15 @@ void OSRExitCompiler::compileExit(const OSRExit& exit, SpeculationRecovery* reco
{
// 1) Pro-forma stuff.
#if DFG_ENABLE(DEBUG_VERBOSE)
- fprintf(stderr, "OSR exit for Node @%d (", (int)exit.m_nodeIndex);
+ dataLog("OSR exit for Node @%d (", (int)exit.m_nodeIndex);
for (CodeOrigin codeOrigin = exit.m_codeOrigin; ; codeOrigin = codeOrigin.inlineCallFrame->caller) {
- fprintf(stderr, "bc#%u", codeOrigin.bytecodeIndex);
+ dataLog("bc#%u", codeOrigin.bytecodeIndex);
if (!codeOrigin.inlineCallFrame)
break;
- fprintf(stderr, " -> %p ", codeOrigin.inlineCallFrame->executable.get());
+ dataLog(" -> %p ", codeOrigin.inlineCallFrame->executable.get());
}
- fprintf(stderr, ") ");
- exit.dump(stderr);
+ dataLog(") ");
+ exit.dump(WTF::dataFile());
#endif
#if DFG_ENABLE(VERBOSE_SPECULATION_FAILURE)
SpeculationFailureDebugInfo* debugInfo = new SpeculationFailureDebugInfo;
@@ -88,14 +88,20 @@ void OSRExitCompiler::compileExit(const OSRExit& exit, SpeculationRecovery* reco
// 3) Refine some value profile, if appropriate.
if (!!exit.m_jsValueSource && !!exit.m_valueProfile) {
+ EncodedJSValue* bucket = exit.m_valueProfile.getSpecFailBucket(0);
+
+#if DFG_ENABLE(VERBOSE_SPECULATION_FAILURE)
+ dataLog(" (have exit profile, bucket %p) ", bucket);
+#endif
+
if (exit.m_jsValueSource.isAddress()) {
// We can't be sure that we have a spare register. So use the tagTypeNumberRegister,
// since we know how to restore it.
m_jit.loadPtr(AssemblyHelpers::Address(exit.m_jsValueSource.asAddress()), GPRInfo::tagTypeNumberRegister);
- m_jit.storePtr(GPRInfo::tagTypeNumberRegister, exit.m_valueProfile->specFailBucket(0));
+ m_jit.storePtr(GPRInfo::tagTypeNumberRegister, bucket);
m_jit.move(AssemblyHelpers::TrustedImmPtr(bitwise_cast<void*>(TagTypeNumber)), GPRInfo::tagTypeNumberRegister);
} else
- m_jit.storePtr(exit.m_jsValueSource.gpr(), exit.m_valueProfile->specFailBucket(0));
+ m_jit.storePtr(exit.m_jsValueSource.gpr(), bucket);
}
// 4) Figure out how many scratch slots we'll need. We need one for every GPR/FPR
@@ -184,24 +190,24 @@ void OSRExitCompiler::compileExit(const OSRExit& exit, SpeculationRecovery* reco
}
#if DFG_ENABLE(DEBUG_VERBOSE)
- fprintf(stderr, " ");
+ dataLog(" ");
if (numberOfPoisonedVirtualRegisters)
- fprintf(stderr, "Poisoned=%u ", numberOfPoisonedVirtualRegisters);
+ dataLog("Poisoned=%u ", numberOfPoisonedVirtualRegisters);
if (numberOfDisplacedVirtualRegisters)
- fprintf(stderr, "Displaced=%u ", numberOfDisplacedVirtualRegisters);
+ dataLog("Displaced=%u ", numberOfDisplacedVirtualRegisters);
if (haveUnboxedInt32s)
- fprintf(stderr, "UnboxedInt32 ");
+ dataLog("UnboxedInt32 ");
if (haveUnboxedDoubles)
- fprintf(stderr, "UnboxedDoubles ");
+ dataLog("UnboxedDoubles ");
if (haveUInt32s)
- fprintf(stderr, "UInt32 ");
+ dataLog("UInt32 ");
if (haveFPRs)
- fprintf(stderr, "FPR ");
+ dataLog("FPR ");
if (haveConstants)
- fprintf(stderr, "Constants ");
+ dataLog("Constants ");
if (haveUndefined)
- fprintf(stderr, "Undefined ");
- fprintf(stderr, " ");
+ dataLog("Undefined ");
+ dataLog(" ");
#endif
EncodedJSValue* scratchBuffer = static_cast<EncodedJSValue*>(m_jit.globalData()->scratchBufferForSize(sizeof(EncodedJSValue) * std::max(haveUInt32s ? 2u : 0u, numberOfPoisonedVirtualRegisters + (numberOfDisplacedVirtualRegisters <= GPRInfo::numberOfRegisters ? 0 : numberOfDisplacedVirtualRegisters))));
@@ -624,7 +630,7 @@ void OSRExitCompiler::compileExit(const OSRExit& exit, SpeculationRecovery* reco
m_jit.jump(GPRInfo::regT1);
#if DFG_ENABLE(DEBUG_VERBOSE)
- fprintf(stderr, "-> %p\n", jumpTarget);
+ dataLog("-> %p\n", jumpTarget);
#endif
}
diff --git a/Source/JavaScriptCore/dfg/DFGOperations.cpp b/Source/JavaScriptCore/dfg/DFGOperations.cpp
index 569b4fe86..165a21416 100644
--- a/Source/JavaScriptCore/dfg/DFGOperations.cpp
+++ b/Source/JavaScriptCore/dfg/DFGOperations.cpp
@@ -26,11 +26,10 @@
#include "config.h"
#include "DFGOperations.h"
-#if ENABLE(DFG_JIT)
-
#include "CodeBlock.h"
#include "DFGOSRExit.h"
#include "DFGRepatch.h"
+#include "HostCallReturnValue.h"
#include "GetterSetter.h"
#include "InlineASM.h"
#include "Interpreter.h"
@@ -38,6 +37,8 @@
#include "JSGlobalData.h"
#include "Operations.h"
+#if ENABLE(DFG_JIT)
+
#if CPU(X86_64)
#define FUNCTION_WRAPPER_WITH_RETURN_ADDRESS(function, register) \
@@ -611,6 +612,70 @@ void DFG_OPERATION operationPutByIdDirectNonStrictOptimizeWithReturnAddress(Exec
stubInfo.seen = true;
}
+V_FUNCTION_WRAPPER_WITH_RETURN_ADDRESS_EJCI(operationPutByIdStrictBuildList);
+void DFG_OPERATION operationPutByIdStrictBuildListWithReturnAddress(ExecState* exec, EncodedJSValue encodedValue, JSCell* base, Identifier* propertyName, ReturnAddressPtr returnAddress)
+{
+ JSGlobalData* globalData = &exec->globalData();
+ NativeCallFrameTracer tracer(globalData, exec);
+
+ JSValue value = JSValue::decode(encodedValue);
+ JSValue baseValue(base);
+ PutPropertySlot slot(true);
+
+ baseValue.put(exec, *propertyName, value, slot);
+
+ StructureStubInfo& stubInfo = exec->codeBlock()->getStubInfo(returnAddress);
+ dfgBuildPutByIdList(exec, baseValue, *propertyName, slot, stubInfo, NotDirect);
+}
+
+V_FUNCTION_WRAPPER_WITH_RETURN_ADDRESS_EJCI(operationPutByIdNonStrictBuildList);
+void DFG_OPERATION operationPutByIdNonStrictBuildListWithReturnAddress(ExecState* exec, EncodedJSValue encodedValue, JSCell* base, Identifier* propertyName, ReturnAddressPtr returnAddress)
+{
+ JSGlobalData* globalData = &exec->globalData();
+ NativeCallFrameTracer tracer(globalData, exec);
+
+ JSValue value = JSValue::decode(encodedValue);
+ JSValue baseValue(base);
+ PutPropertySlot slot(false);
+
+ baseValue.put(exec, *propertyName, value, slot);
+
+ StructureStubInfo& stubInfo = exec->codeBlock()->getStubInfo(returnAddress);
+ dfgBuildPutByIdList(exec, baseValue, *propertyName, slot, stubInfo, NotDirect);
+}
+
+V_FUNCTION_WRAPPER_WITH_RETURN_ADDRESS_EJCI(operationPutByIdDirectStrictBuildList);
+void DFG_OPERATION operationPutByIdDirectStrictBuildListWithReturnAddress(ExecState* exec, EncodedJSValue encodedValue, JSCell* base, Identifier* propertyName, ReturnAddressPtr returnAddress)
+{
+ JSGlobalData* globalData = &exec->globalData();
+ NativeCallFrameTracer tracer(globalData, exec);
+
+ JSValue value = JSValue::decode(encodedValue);
+ PutPropertySlot slot(true);
+
+ ASSERT(base->isObject());
+ asObject(base)->putDirect(exec->globalData(), *propertyName, value, slot);
+
+ StructureStubInfo& stubInfo = exec->codeBlock()->getStubInfo(returnAddress);
+ dfgBuildPutByIdList(exec, base, *propertyName, slot, stubInfo, Direct);
+}
+
+V_FUNCTION_WRAPPER_WITH_RETURN_ADDRESS_EJCI(operationPutByIdDirectNonStrictBuildList);
+void DFG_OPERATION operationPutByIdDirectNonStrictBuildListWithReturnAddress(ExecState* exec, EncodedJSValue encodedValue, JSCell* base, Identifier* propertyName, ReturnAddressPtr returnAddress)
+{
+ JSGlobalData* globalData = &exec->globalData();
+ NativeCallFrameTracer tracer(globalData, exec);
+
+ JSValue value = JSValue::decode(encodedValue);
+ PutPropertySlot slot(false);
+
+ ASSERT(base->isObject());
+ asObject(base)->putDirect(exec->globalData(), *propertyName, value, slot);
+
+ StructureStubInfo& stubInfo = exec->codeBlock()->getStubInfo(returnAddress);
+ dfgBuildPutByIdList(exec, base, *propertyName, slot, stubInfo, Direct);
+}
+
size_t DFG_OPERATION operationCompareLess(ExecState* exec, EncodedJSValue encodedOp1, EncodedJSValue encodedOp2)
{
JSGlobalData* globalData = &exec->globalData();
@@ -673,50 +738,6 @@ size_t DFG_OPERATION operationCompareStrictEq(ExecState* exec, EncodedJSValue en
return JSValue::strictEqual(exec, JSValue::decode(encodedOp1), JSValue::decode(encodedOp2));
}
-EncodedJSValue DFG_OPERATION getHostCallReturnValue();
-EncodedJSValue DFG_OPERATION getHostCallReturnValueWithExecState(ExecState*);
-
-#if CPU(X86_64)
-asm (
-".globl " SYMBOL_STRING(getHostCallReturnValue) "\n"
-HIDE_SYMBOL(getHostCallReturnValue) "\n"
-SYMBOL_STRING(getHostCallReturnValue) ":" "\n"
- "mov -40(%r13), %r13\n"
- "mov %r13, %rdi\n"
- "jmp " SYMBOL_STRING_RELOCATION(getHostCallReturnValueWithExecState) "\n"
-);
-#elif CPU(X86)
-asm (
-".globl " SYMBOL_STRING(getHostCallReturnValue) "\n"
-HIDE_SYMBOL(getHostCallReturnValue) "\n"
-SYMBOL_STRING(getHostCallReturnValue) ":" "\n"
- "mov -40(%edi), %edi\n"
- "mov %edi, 4(%esp)\n"
- "jmp " SYMBOL_STRING_RELOCATION(getHostCallReturnValueWithExecState) "\n"
-);
-#elif CPU(ARM_THUMB2)
-asm (
-".text" "\n"
-".align 2" "\n"
-".globl " SYMBOL_STRING(getHostCallReturnValue) "\n"
-HIDE_SYMBOL(getHostCallReturnValue) "\n"
-".thumb" "\n"
-".thumb_func " THUMB_FUNC_PARAM(getHostCallReturnValue) "\n"
-SYMBOL_STRING(getHostCallReturnValue) ":" "\n"
- "ldr r5, [r5, #-40]" "\n"
- "cpy r0, r5" "\n"
- "b " SYMBOL_STRING_RELOCATION(getHostCallReturnValueWithExecState) "\n"
-);
-#endif
-
-EncodedJSValue DFG_OPERATION getHostCallReturnValueWithExecState(ExecState* exec)
-{
- JSGlobalData* globalData = &exec->globalData();
- NativeCallFrameTracer tracer(globalData, exec);
-
- return JSValue::encode(exec->globalData().hostCallReturnValue);
-}
-
static void* handleHostCall(ExecState* execCallee, JSValue callee, CodeSpecializationKind kind)
{
ExecState* exec = execCallee->callerFrame();
@@ -724,6 +745,7 @@ static void* handleHostCall(ExecState* execCallee, JSValue callee, CodeSpecializ
execCallee->setScopeChain(exec->scopeChain());
execCallee->setCodeBlock(0);
+ execCallee->clearReturnPC();
if (kind == CodeForCall) {
CallData callData;
@@ -1021,7 +1043,7 @@ void DFG_OPERATION debugOperationPrintSpeculationFailure(ExecState* exec, void*
SpeculationFailureDebugInfo* debugInfo = static_cast<SpeculationFailureDebugInfo*>(debugInfoRaw);
CodeBlock* codeBlock = debugInfo->codeBlock;
CodeBlock* alternative = codeBlock->alternative();
- printf("Speculation failure in %p at @%u with executeCounter = %d, reoptimizationRetryCounter = %u, optimizationDelayCounter = %u, success/fail %u/%u\n", codeBlock, debugInfo->nodeIndex, alternative ? alternative->jitExecuteCounter() : 0, alternative ? alternative->reoptimizationRetryCounter() : 0, alternative ? alternative->optimizationDelayCounter() : 0, codeBlock->speculativeSuccessCounter(), codeBlock->speculativeFailCounter());
+ dataLog("Speculation failure in %p at @%u with executeCounter = %d, reoptimizationRetryCounter = %u, optimizationDelayCounter = %u, success/fail %u/%u\n", codeBlock, debugInfo->nodeIndex, alternative ? alternative->jitExecuteCounter() : 0, alternative ? alternative->reoptimizationRetryCounter() : 0, alternative ? alternative->optimizationDelayCounter() : 0, codeBlock->speculativeSuccessCounter(), codeBlock->speculativeFailCounter());
}
#endif
@@ -1029,3 +1051,52 @@ void DFG_OPERATION debugOperationPrintSpeculationFailure(ExecState* exec, void*
} } // namespace JSC::DFG
#endif
+
+#if COMPILER(GCC)
+
+namespace JSC {
+
+#if CPU(X86_64)
+asm (
+".globl " SYMBOL_STRING(getHostCallReturnValue) "\n"
+HIDE_SYMBOL(getHostCallReturnValue) "\n"
+SYMBOL_STRING(getHostCallReturnValue) ":" "\n"
+ "mov -40(%r13), %r13\n"
+ "mov %r13, %rdi\n"
+ "jmp " SYMBOL_STRING_RELOCATION(getHostCallReturnValueWithExecState) "\n"
+);
+#elif CPU(X86)
+asm (
+".globl " SYMBOL_STRING(getHostCallReturnValue) "\n"
+HIDE_SYMBOL(getHostCallReturnValue) "\n"
+SYMBOL_STRING(getHostCallReturnValue) ":" "\n"
+ "mov -40(%edi), %edi\n"
+ "mov %edi, 4(%esp)\n"
+ "jmp " SYMBOL_STRING_RELOCATION(getHostCallReturnValueWithExecState) "\n"
+);
+#elif CPU(ARM_THUMB2)
+asm (
+".text" "\n"
+".align 2" "\n"
+".globl " SYMBOL_STRING(getHostCallReturnValue) "\n"
+HIDE_SYMBOL(getHostCallReturnValue) "\n"
+".thumb" "\n"
+".thumb_func " THUMB_FUNC_PARAM(getHostCallReturnValue) "\n"
+SYMBOL_STRING(getHostCallReturnValue) ":" "\n"
+ "ldr r5, [r5, #-40]" "\n"
+ "cpy r0, r5" "\n"
+ "b " SYMBOL_STRING_RELOCATION(getHostCallReturnValueWithExecState) "\n"
+);
+#endif
+
+extern "C" EncodedJSValue HOST_CALL_RETURN_VALUE_OPTION getHostCallReturnValueWithExecState(ExecState* exec)
+{
+ if (!exec)
+ return JSValue::encode(JSValue());
+ return JSValue::encode(exec->globalData().hostCallReturnValue);
+}
+
+} // namespace JSC
+
+#endif // COMPILER(GCC)
+
diff --git a/Source/JavaScriptCore/dfg/DFGOperations.h b/Source/JavaScriptCore/dfg/DFGOperations.h
index 5de9d3af3..cdb88de27 100644
--- a/Source/JavaScriptCore/dfg/DFGOperations.h
+++ b/Source/JavaScriptCore/dfg/DFGOperations.h
@@ -28,7 +28,8 @@
#if ENABLE(DFG_JIT)
-#include <dfg/DFGJITCompiler.h>
+#include "DFGJITCompiler.h"
+#include "PutKind.h"
namespace JSC {
@@ -36,8 +37,6 @@ struct GlobalResolveInfo;
namespace DFG {
-enum PutKind { Direct, NotDirect };
-
extern "C" {
#if CALLING_CONVENTION_IS_STDCALL
@@ -130,6 +129,10 @@ void DFG_OPERATION operationPutByIdStrictOptimize(ExecState*, EncodedJSValue enc
void DFG_OPERATION operationPutByIdNonStrictOptimize(ExecState*, EncodedJSValue encodedValue, JSCell* base, Identifier*);
void DFG_OPERATION operationPutByIdDirectStrictOptimize(ExecState*, EncodedJSValue encodedValue, JSCell* base, Identifier*);
void DFG_OPERATION operationPutByIdDirectNonStrictOptimize(ExecState*, EncodedJSValue encodedValue, JSCell* base, Identifier*);
+void DFG_OPERATION operationPutByIdStrictBuildList(ExecState*, EncodedJSValue encodedValue, JSCell* base, Identifier*);
+void DFG_OPERATION operationPutByIdNonStrictBuildList(ExecState*, EncodedJSValue encodedValue, JSCell* base, Identifier*);
+void DFG_OPERATION operationPutByIdDirectStrictBuildList(ExecState*, EncodedJSValue encodedValue, JSCell* base, Identifier*);
+void DFG_OPERATION operationPutByIdDirectNonStrictBuildList(ExecState*, EncodedJSValue encodedValue, JSCell* base, Identifier*);
// These comparisons return a boolean within a size_t such that the value is zero extended to fill the register.
size_t DFG_OPERATION operationCompareLess(ExecState*, EncodedJSValue encodedOp1, EncodedJSValue encodedOp2);
size_t DFG_OPERATION operationCompareLessEq(ExecState*, EncodedJSValue encodedOp1, EncodedJSValue encodedOp2);
diff --git a/Source/JavaScriptCore/dfg/DFGPropagator.h b/Source/JavaScriptCore/dfg/DFGPhase.cpp
index e24c06b2b..bc1eabff4 100644
--- a/Source/JavaScriptCore/dfg/DFGPropagator.h
+++ b/Source/JavaScriptCore/dfg/DFGPhase.cpp
@@ -23,24 +23,26 @@
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
-#ifndef DFGPropagator_h
-#define DFGPropagator_h
+#include "config.h"
+#include "DFGPhase.h"
#if ENABLE(DFG_JIT)
-#include <dfg/DFGGraph.h>
+namespace JSC { namespace DFG {
-namespace JSC {
+#if DFG_ENABLE(DEBUG_PROPAGATION_VERBOSE)
+void Phase::beginPhase()
+{
+ dataLog("Beginning DFG phase %s.\n", m_name);
+ dataLog("Graph before %s:\n", m_name);
+ m_graph.dump(m_codeBlock);
+}
-class CodeBlock;
-class JSGlobalData;
-
-namespace DFG {
-
-// Propagate dynamic predictions from value sources to variables.
-void propagate(Graph&, JSGlobalData*, CodeBlock*);
+void Phase::endPhase()
+{
+}
+#endif
} } // namespace JSC::DFG
-#endif
-#endif
+#endif // ENABLE(DFG_JIT)
diff --git a/Source/JavaScriptCore/dfg/DFGPhase.h b/Source/JavaScriptCore/dfg/DFGPhase.h
new file mode 100644
index 000000000..1d344c0c3
--- /dev/null
+++ b/Source/JavaScriptCore/dfg/DFGPhase.h
@@ -0,0 +1,87 @@
+/*
+ * Copyright (C) 2011 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef DFGPhase_h
+#define DFGPhase_h
+
+#include <wtf/Platform.h>
+
+#if ENABLE(DFG_JIT)
+
+#include "DFGCommon.h"
+#include "DFGGraph.h"
+
+namespace JSC { namespace DFG {
+
+class Phase {
+public:
+ Phase(Graph& graph, const char* name)
+ : m_graph(graph)
+ , m_name(name)
+ {
+ beginPhase();
+ }
+
+ ~Phase()
+ {
+ endPhase();
+ }
+
+ // Each phase must have a run() method.
+
+protected:
+ // Things you need to have a DFG compiler phase.
+ Graph& m_graph;
+
+ JSGlobalData& globalData() { return m_graph.m_globalData; }
+ CodeBlock* codeBlock() { return m_graph.m_codeBlock; }
+ CodeBlock* profiledBlock() { return m_graph.m_profiledBlock; }
+
+ const char* m_name;
+
+private:
+ // Call these hooks when starting and finishing.
+#if DFG_ENABLE(DEBUG_PROPAGATION_VERBOSE)
+ void beginPhase();
+ void endPhase();
+#else // DFG_ENABLE(DEBUG_PROPAGATION_VERBOSE)
+ void beginPhase() { }
+ void endPhase() { }
+#endif // DFG_ENABLE(DEBUG_PROPAGATION_VERBOSE)
+};
+
+template<typename PhaseType>
+void runPhase(Graph& graph)
+{
+ PhaseType phase(graph);
+ phase.run();
+}
+
+} } // namespace JSC::DFG
+
+#endif // ENABLE(DFG_JIT)
+
+#endif // DFGPhase_h
+
diff --git a/Source/JavaScriptCore/dfg/DFGPredictionPropagationPhase.cpp b/Source/JavaScriptCore/dfg/DFGPredictionPropagationPhase.cpp
new file mode 100644
index 000000000..b4c9e075a
--- /dev/null
+++ b/Source/JavaScriptCore/dfg/DFGPredictionPropagationPhase.cpp
@@ -0,0 +1,709 @@
+/*
+ * Copyright (C) 2011 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "config.h"
+#include "DFGPredictionPropagationPhase.h"
+
+#if ENABLE(DFG_JIT)
+
+#include "DFGGraph.h"
+#include "DFGPhase.h"
+
+namespace JSC { namespace DFG {
+
+class PredictionPropagationPhase : public Phase {
+public:
+ PredictionPropagationPhase(Graph& graph)
+ : Phase(graph, "prediction propagation")
+ {
+ }
+
+ void run()
+ {
+#if DFG_ENABLE(DEBUG_PROPAGATION_VERBOSE)
+ m_count = 0;
+#endif
+ // Two stage process: first propagate predictions, then propagate while doing double voting.
+
+ do {
+ m_changed = false;
+
+ // Forward propagation is near-optimal for both topologically-sorted and
+ // DFS-sorted code.
+ propagateForward();
+ if (!m_changed)
+ break;
+
+ // Backward propagation reduces the likelihood that pathological code will
+ // cause slowness. Loops (especially nested ones) resemble backward flow.
+ // This pass captures two cases: (1) it detects if the forward fixpoint
+ // found a sound solution and (2) short-circuits backward flow.
+ m_changed = false;
+ propagateBackward();
+ } while (m_changed);
+
+ do {
+ m_changed = false;
+ doRoundOfDoubleVoting();
+ propagateForward();
+ if (!m_changed)
+ break;
+
+ m_changed = false;
+ doRoundOfDoubleVoting();
+ propagateBackward();
+ } while (m_changed);
+
+ fixup();
+ }
+
+private:
+ bool setPrediction(PredictedType prediction)
+ {
+ ASSERT(m_graph[m_compileIndex].hasResult());
+
+ // setPrediction() is used when we know that there is no way that we can change
+ // our minds about what the prediction is going to be. There is no semantic
+ // difference between setPrediction() and mergePrediction() other than the
+ // increased checking to validate this property.
+ ASSERT(m_graph[m_compileIndex].prediction() == PredictNone || m_graph[m_compileIndex].prediction() == prediction);
+
+ return m_graph[m_compileIndex].predict(prediction);
+ }
+
+ bool mergePrediction(PredictedType prediction)
+ {
+ ASSERT(m_graph[m_compileIndex].hasResult());
+
+ return m_graph[m_compileIndex].predict(prediction);
+ }
+
+ void propagate(Node& node)
+ {
+ if (!node.shouldGenerate())
+ return;
+
+ NodeType op = node.op;
+
+#if DFG_ENABLE(DEBUG_PROPAGATION_VERBOSE)
+ dataLog(" %s @%u: ", Graph::opName(op), m_compileIndex);
+#endif
+
+ bool changed = false;
+
+ switch (op) {
+ case JSConstant:
+ case WeakJSConstant: {
+ changed |= setPrediction(predictionFromValue(m_graph.valueOfJSConstant(m_compileIndex)));
+ break;
+ }
+
+ case GetLocal: {
+ PredictedType prediction = node.variableAccessData()->prediction();
+ if (prediction)
+ changed |= mergePrediction(prediction);
+ break;
+ }
+
+ case SetLocal: {
+ changed |= node.variableAccessData()->predict(m_graph[node.child1()].prediction());
+ break;
+ }
+
+ case BitAnd:
+ case BitOr:
+ case BitXor:
+ case BitRShift:
+ case BitLShift:
+ case BitURShift:
+ case ValueToInt32: {
+ changed |= setPrediction(PredictInt32);
+ break;
+ }
+
+ case ArrayPop:
+ case ArrayPush: {
+ if (node.getHeapPrediction())
+ changed |= mergePrediction(node.getHeapPrediction());
+ break;
+ }
+
+ case StringCharCodeAt: {
+ changed |= mergePrediction(PredictInt32);
+ break;
+ }
+
+ case ArithMod: {
+ PredictedType left = m_graph[node.child1()].prediction();
+ PredictedType right = m_graph[node.child2()].prediction();
+
+ if (left && right) {
+ if (isInt32Prediction(mergePredictions(left, right)) && nodeCanSpeculateInteger(node.arithNodeFlags()))
+ changed |= mergePrediction(PredictInt32);
+ else
+ changed |= mergePrediction(PredictDouble);
+ }
+ break;
+ }
+
+ case UInt32ToNumber: {
+ if (nodeCanSpeculateInteger(node.arithNodeFlags()))
+ changed |= setPrediction(PredictInt32);
+ else
+ changed |= setPrediction(PredictNumber);
+ break;
+ }
+
+ case ValueAdd: {
+ PredictedType left = m_graph[node.child1()].prediction();
+ PredictedType right = m_graph[node.child2()].prediction();
+
+ if (left && right) {
+ if (isNumberPrediction(left) && isNumberPrediction(right)) {
+ if (m_graph.addShouldSpeculateInteger(node))
+ changed |= mergePrediction(PredictInt32);
+ else
+ changed |= mergePrediction(PredictDouble);
+ } else if (!(left & PredictNumber) || !(right & PredictNumber)) {
+ // left or right is definitely something other than a number.
+ changed |= mergePrediction(PredictString);
+ } else
+ changed |= mergePrediction(PredictString | PredictInt32 | PredictDouble);
+ }
+ break;
+ }
+
+ case ArithAdd:
+ case ArithSub: {
+ PredictedType left = m_graph[node.child1()].prediction();
+ PredictedType right = m_graph[node.child2()].prediction();
+
+ if (left && right) {
+ if (m_graph.addShouldSpeculateInteger(node))
+ changed |= mergePrediction(PredictInt32);
+ else
+ changed |= mergePrediction(PredictDouble);
+ }
+ break;
+ }
+
+ case ArithMul:
+ case ArithMin:
+ case ArithMax:
+ case ArithDiv: {
+ PredictedType left = m_graph[node.child1()].prediction();
+ PredictedType right = m_graph[node.child2()].prediction();
+
+ if (left && right) {
+ if (isInt32Prediction(mergePredictions(left, right)) && nodeCanSpeculateInteger(node.arithNodeFlags()))
+ changed |= mergePrediction(PredictInt32);
+ else
+ changed |= mergePrediction(PredictDouble);
+ }
+ break;
+ }
+
+ case ArithSqrt: {
+ changed |= setPrediction(PredictDouble);
+ break;
+ }
+
+ case ArithAbs: {
+ PredictedType child = m_graph[node.child1()].prediction();
+ if (child) {
+ if (nodeCanSpeculateInteger(node.arithNodeFlags()))
+ changed |= mergePrediction(child);
+ else
+ changed |= setPrediction(PredictDouble);
+ }
+ break;
+ }
+
+ case LogicalNot:
+ case CompareLess:
+ case CompareLessEq:
+ case CompareGreater:
+ case CompareGreaterEq:
+ case CompareEq:
+ case CompareStrictEq:
+ case InstanceOf: {
+ changed |= setPrediction(PredictBoolean);
+ break;
+ }
+
+ case GetById: {
+ if (node.getHeapPrediction())
+ changed |= mergePrediction(node.getHeapPrediction());
+ else if (codeBlock()->identifier(node.identifierNumber()) == globalData().propertyNames->length) {
+ // If there is no prediction from value profiles, check if we might be
+ // able to infer the type ourselves.
+ bool isArray = isArrayPrediction(m_graph[node.child1()].prediction());
+ bool isString = isStringPrediction(m_graph[node.child1()].prediction());
+ bool isByteArray = m_graph[node.child1()].shouldSpeculateByteArray();
+ bool isInt8Array = m_graph[node.child1()].shouldSpeculateInt8Array();
+ bool isInt16Array = m_graph[node.child1()].shouldSpeculateInt16Array();
+ bool isInt32Array = m_graph[node.child1()].shouldSpeculateInt32Array();
+ bool isUint8Array = m_graph[node.child1()].shouldSpeculateUint8Array();
+ bool isUint8ClampedArray = m_graph[node.child1()].shouldSpeculateUint8ClampedArray();
+ bool isUint16Array = m_graph[node.child1()].shouldSpeculateUint16Array();
+ bool isUint32Array = m_graph[node.child1()].shouldSpeculateUint32Array();
+ bool isFloat32Array = m_graph[node.child1()].shouldSpeculateFloat32Array();
+ bool isFloat64Array = m_graph[node.child1()].shouldSpeculateFloat64Array();
+ if (isArray || isString || isByteArray || isInt8Array || isInt16Array || isInt32Array || isUint8Array || isUint8ClampedArray || isUint16Array || isUint32Array || isFloat32Array || isFloat64Array)
+ changed |= mergePrediction(PredictInt32);
+ }
+ break;
+ }
+
+ case GetByIdFlush:
+ if (node.getHeapPrediction())
+ changed |= mergePrediction(node.getHeapPrediction());
+ break;
+
+ case GetByVal: {
+ if (m_graph[node.child1()].shouldSpeculateUint32Array() || m_graph[node.child1()].shouldSpeculateFloat32Array() || m_graph[node.child1()].shouldSpeculateFloat64Array())
+ changed |= mergePrediction(PredictDouble);
+ else if (node.getHeapPrediction())
+ changed |= mergePrediction(node.getHeapPrediction());
+ break;
+ }
+
+ case GetPropertyStorage:
+ case GetIndexedPropertyStorage: {
+ changed |= setPrediction(PredictOther);
+ break;
+ }
+
+ case GetByOffset: {
+ if (node.getHeapPrediction())
+ changed |= mergePrediction(node.getHeapPrediction());
+ break;
+ }
+
+ case Call:
+ case Construct: {
+ if (node.getHeapPrediction())
+ changed |= mergePrediction(node.getHeapPrediction());
+ break;
+ }
+
+ case ConvertThis: {
+ PredictedType prediction = m_graph[node.child1()].prediction();
+ if (prediction) {
+ if (prediction & ~PredictObjectMask) {
+ prediction &= PredictObjectMask;
+ prediction = mergePredictions(prediction, PredictObjectOther);
+ }
+ changed |= mergePrediction(prediction);
+ }
+ break;
+ }
+
+ case GetGlobalVar: {
+ PredictedType prediction = m_graph.getGlobalVarPrediction(node.varNumber());
+ if (prediction)
+ changed |= mergePrediction(prediction);
+ break;
+ }
+
+ case PutGlobalVar: {
+ changed |= m_graph.predictGlobalVar(node.varNumber(), m_graph[node.child1()].prediction());
+ break;
+ }
+
+ case GetScopedVar:
+ case Resolve:
+ case ResolveBase:
+ case ResolveBaseStrictPut:
+ case ResolveGlobal: {
+ PredictedType prediction = node.getHeapPrediction();
+ if (prediction)
+ changed |= mergePrediction(prediction);
+ break;
+ }
+
+ case GetScopeChain: {
+ changed |= setPrediction(PredictCellOther);
+ break;
+ }
+
+ case GetCallee: {
+ changed |= setPrediction(PredictFunction);
+ break;
+ }
+
+ case CreateThis:
+ case NewObject: {
+ changed |= setPrediction(PredictFinalObject);
+ break;
+ }
+
+ case NewArray:
+ case NewArrayBuffer: {
+ changed |= setPrediction(PredictArray);
+ break;
+ }
+
+ case NewRegexp: {
+ changed |= setPrediction(PredictObjectOther);
+ break;
+ }
+
+ case StringCharAt:
+ case StrCat: {
+ changed |= setPrediction(PredictString);
+ break;
+ }
+
+ case ToPrimitive: {
+ PredictedType child = m_graph[node.child1()].prediction();
+ if (child) {
+ if (isObjectPrediction(child)) {
+ // I'd love to fold this case into the case below, but I can't, because
+ // removing PredictObjectMask from something that only has an object
+ // prediction and nothing else means we have an ill-formed PredictedType
+ // (strong predict-none). This should be killed once we remove all traces
+ // of static (aka weak) predictions.
+ changed |= mergePrediction(PredictString);
+ } else if (child & PredictObjectMask) {
+ // Objects get turned into strings. So if the input has hints of objectness,
+ // the output will have hinsts of stringiness.
+ changed |= mergePrediction(mergePredictions(child & ~PredictObjectMask, PredictString));
+ } else
+ changed |= mergePrediction(child);
+ }
+ break;
+ }
+
+ case GetArrayLength:
+ case GetByteArrayLength:
+ case GetInt8ArrayLength:
+ case GetInt16ArrayLength:
+ case GetInt32ArrayLength:
+ case GetUint8ArrayLength:
+ case GetUint8ClampedArrayLength:
+ case GetUint16ArrayLength:
+ case GetUint32ArrayLength:
+ case GetFloat32ArrayLength:
+ case GetFloat64ArrayLength:
+ case GetStringLength: {
+ // This node should never be visible at this stage of compilation. It is
+ // inserted by fixup(), which follows this phase.
+ ASSERT_NOT_REACHED();
+ break;
+ }
+
+#ifndef NDEBUG
+ // These get ignored because they don't return anything.
+ case PutScopedVar:
+ case DFG::Jump:
+ case Branch:
+ case Breakpoint:
+ case Return:
+ case CheckHasInstance:
+ case Phi:
+ case Flush:
+ case Throw:
+ case ThrowReferenceError:
+ case ForceOSRExit:
+ case SetArgument:
+ case PutByVal:
+ case PutByValAlias:
+ case PutById:
+ case PutByIdDirect:
+ case CheckStructure:
+ case CheckFunction:
+ case PutStructure:
+ case PutByOffset:
+ break;
+
+ // These gets ignored because it doesn't do anything.
+ case Phantom:
+ case InlineStart:
+ case Nop:
+ break;
+#else
+ default:
+ break;
+#endif
+ }
+
+#if DFG_ENABLE(DEBUG_PROPAGATION_VERBOSE)
+ dataLog("%s\n", predictionToString(m_graph[m_compileIndex].prediction()));
+#endif
+
+ m_changed |= changed;
+ }
+
+ void propagateForward()
+ {
+#if DFG_ENABLE(DEBUG_PROPAGATION_VERBOSE)
+ dataLog("Propagating predictions forward [%u]\n", ++m_count);
+#endif
+ for (m_compileIndex = 0; m_compileIndex < m_graph.size(); ++m_compileIndex)
+ propagate(m_graph[m_compileIndex]);
+ }
+
+ void propagateBackward()
+ {
+#if DFG_ENABLE(DEBUG_PROPAGATION_VERBOSE)
+ dataLog("Propagating predictions backward [%u]\n", ++m_count);
+#endif
+ for (m_compileIndex = m_graph.size(); m_compileIndex-- > 0;)
+ propagate(m_graph[m_compileIndex]);
+ }
+
+ void vote(NodeUse nodeUse, VariableAccessData::Ballot ballot)
+ {
+ switch (m_graph[nodeUse].op) {
+ case ValueToInt32:
+ case UInt32ToNumber:
+ nodeUse = m_graph[nodeUse].child1();
+ break;
+ default:
+ break;
+ }
+
+ if (m_graph[nodeUse].op == GetLocal)
+ m_graph[nodeUse].variableAccessData()->vote(ballot);
+ }
+
+ void vote(Node& node, VariableAccessData::Ballot ballot)
+ {
+ if (node.op & NodeHasVarArgs) {
+ for (unsigned childIdx = node.firstChild(); childIdx < node.firstChild() + node.numChildren(); childIdx++)
+ vote(m_graph.m_varArgChildren[childIdx], ballot);
+ return;
+ }
+
+ if (!node.child1())
+ return;
+ vote(node.child1(), ballot);
+ if (!node.child2())
+ return;
+ vote(node.child2(), ballot);
+ if (!node.child3())
+ return;
+ vote(node.child3(), ballot);
+ }
+
+ void doRoundOfDoubleVoting()
+ {
+#if DFG_ENABLE(DEBUG_PROPAGATION_VERBOSE)
+ dataLog("Voting on double uses of locals [%u]\n", m_count);
+#endif
+ for (unsigned i = 0; i < m_graph.m_variableAccessData.size(); ++i)
+ m_graph.m_variableAccessData[i].find()->clearVotes();
+ for (m_compileIndex = 0; m_compileIndex < m_graph.size(); ++m_compileIndex) {
+ Node& node = m_graph[m_compileIndex];
+ switch (node.op) {
+ case ValueAdd:
+ case ArithAdd:
+ case ArithSub: {
+ PredictedType left = m_graph[node.child1()].prediction();
+ PredictedType right = m_graph[node.child2()].prediction();
+
+ VariableAccessData::Ballot ballot;
+
+ if (isNumberPrediction(left) && isNumberPrediction(right)
+ && !m_graph.addShouldSpeculateInteger(node))
+ ballot = VariableAccessData::VoteDouble;
+ else
+ ballot = VariableAccessData::VoteValue;
+
+ vote(node.child1(), ballot);
+ vote(node.child2(), ballot);
+ break;
+ }
+
+ case ArithMul:
+ case ArithMin:
+ case ArithMax:
+ case ArithMod:
+ case ArithDiv: {
+ PredictedType left = m_graph[node.child1()].prediction();
+ PredictedType right = m_graph[node.child2()].prediction();
+
+ VariableAccessData::Ballot ballot;
+
+ if (isNumberPrediction(left) && isNumberPrediction(right) && !(Node::shouldSpeculateInteger(m_graph[node.child1()], m_graph[node.child1()]) && node.canSpeculateInteger()))
+ ballot = VariableAccessData::VoteDouble;
+ else
+ ballot = VariableAccessData::VoteValue;
+
+ vote(node.child1(), ballot);
+ vote(node.child2(), ballot);
+ break;
+ }
+
+ case ArithAbs:
+ VariableAccessData::Ballot ballot;
+ if (!(m_graph[node.child1()].shouldSpeculateInteger() && node.canSpeculateInteger()))
+ ballot = VariableAccessData::VoteDouble;
+ else
+ ballot = VariableAccessData::VoteValue;
+
+ vote(node.child1(), ballot);
+ break;
+
+ case ArithSqrt:
+ vote(node.child1(), VariableAccessData::VoteDouble);
+ break;
+
+ case SetLocal: {
+ PredictedType prediction = m_graph[node.child1()].prediction();
+ if (isDoublePrediction(prediction))
+ node.variableAccessData()->vote(VariableAccessData::VoteDouble);
+ else if (!isNumberPrediction(prediction) || isInt32Prediction(prediction))
+ node.variableAccessData()->vote(VariableAccessData::VoteValue);
+ break;
+ }
+
+ default:
+ vote(node, VariableAccessData::VoteValue);
+ break;
+ }
+ }
+ for (unsigned i = 0; i < m_graph.m_variableAccessData.size(); ++i)
+ m_changed |= m_graph.m_variableAccessData[i].find()->tallyVotesForShouldUseDoubleFormat();
+ }
+
+ void fixupNode(Node& node)
+ {
+ if (!node.shouldGenerate())
+ return;
+
+ NodeType op = node.op;
+
+#if DFG_ENABLE(DEBUG_PROPAGATION_VERBOSE)
+ dataLog(" %s @%u: ", Graph::opName(op), m_compileIndex);
+#endif
+
+ switch (op) {
+ case GetById: {
+ if (!isInt32Prediction(m_graph[m_compileIndex].prediction()))
+ break;
+ if (codeBlock()->identifier(node.identifierNumber()) != globalData().propertyNames->length)
+ break;
+ bool isArray = isArrayPrediction(m_graph[node.child1()].prediction());
+ bool isString = isStringPrediction(m_graph[node.child1()].prediction());
+ bool isByteArray = m_graph[node.child1()].shouldSpeculateByteArray();
+ bool isInt8Array = m_graph[node.child1()].shouldSpeculateInt8Array();
+ bool isInt16Array = m_graph[node.child1()].shouldSpeculateInt16Array();
+ bool isInt32Array = m_graph[node.child1()].shouldSpeculateInt32Array();
+ bool isUint8Array = m_graph[node.child1()].shouldSpeculateUint8Array();
+ bool isUint8ClampedArray = m_graph[node.child1()].shouldSpeculateUint8ClampedArray();
+ bool isUint16Array = m_graph[node.child1()].shouldSpeculateUint16Array();
+ bool isUint32Array = m_graph[node.child1()].shouldSpeculateUint32Array();
+ bool isFloat32Array = m_graph[node.child1()].shouldSpeculateFloat32Array();
+ bool isFloat64Array = m_graph[node.child1()].shouldSpeculateFloat64Array();
+ if (!isArray && !isString && !isByteArray && !isInt8Array && !isInt16Array && !isInt32Array && !isUint8Array && !isUint8ClampedArray && !isUint16Array && !isUint32Array && !isFloat32Array && !isFloat64Array)
+ break;
+
+#if DFG_ENABLE(DEBUG_PROPAGATION_VERBOSE)
+ dataLog(" @%u -> %s", m_compileIndex, isArray ? "GetArrayLength" : "GetStringLength");
+#endif
+ if (isArray)
+ node.op = GetArrayLength;
+ else if (isString)
+ node.op = GetStringLength;
+ else if (isByteArray)
+ node.op = GetByteArrayLength;
+ else if (isInt8Array)
+ node.op = GetInt8ArrayLength;
+ else if (isInt16Array)
+ node.op = GetInt16ArrayLength;
+ else if (isInt32Array)
+ node.op = GetInt32ArrayLength;
+ else if (isUint8Array)
+ node.op = GetUint8ArrayLength;
+ else if (isUint8ClampedArray)
+ node.op = GetUint8ClampedArrayLength;
+ else if (isUint16Array)
+ node.op = GetUint16ArrayLength;
+ else if (isUint32Array)
+ node.op = GetUint32ArrayLength;
+ else if (isFloat32Array)
+ node.op = GetFloat32ArrayLength;
+ else if (isFloat64Array)
+ node.op = GetFloat64ArrayLength;
+ else
+ ASSERT_NOT_REACHED();
+ m_graph.deref(m_compileIndex); // No longer MustGenerate
+ break;
+ }
+ case GetIndexedPropertyStorage: {
+ PredictedType basePrediction = m_graph[node.child2()].prediction();
+ if (!(basePrediction & PredictInt32) && basePrediction) {
+ node.op = Nop;
+ m_graph.clearAndDerefChild1(node);
+ m_graph.clearAndDerefChild2(node);
+ m_graph.clearAndDerefChild3(node);
+ node.setRefCount(0);
+ }
+ break;
+ }
+ case GetByVal:
+ case StringCharAt:
+ case StringCharCodeAt: {
+ if (!!node.child3() && m_graph[node.child3()].op == Nop)
+ node.children.child3() = NodeUse();
+ break;
+ }
+ default:
+ break;
+ }
+
+#if DFG_ENABLE(DEBUG_PROPAGATION_VERBOSE)
+ dataLog("\n");
+#endif
+ }
+
+ void fixup()
+ {
+#if DFG_ENABLE(DEBUG_PROPAGATION_VERBOSE)
+ dataLog("Performing Fixup\n");
+#endif
+ for (m_compileIndex = 0; m_compileIndex < m_graph.size(); ++m_compileIndex)
+ fixupNode(m_graph[m_compileIndex]);
+ }
+
+ NodeIndex m_compileIndex;
+ bool m_changed;
+
+#if DFG_ENABLE(DEBUG_PROPAGATION_VERBOSE)
+ unsigned m_count;
+#endif
+};
+
+void performPredictionPropagation(Graph& graph)
+{
+ runPhase<PredictionPropagationPhase>(graph);
+}
+
+} } // namespace JSC::DFG
+
+#endif // ENABLE(DFG_JIT)
+
diff --git a/Source/JavaScriptCore/dfg/DFGPredictionPropagationPhase.h b/Source/JavaScriptCore/dfg/DFGPredictionPropagationPhase.h
new file mode 100644
index 000000000..fe127136a
--- /dev/null
+++ b/Source/JavaScriptCore/dfg/DFGPredictionPropagationPhase.h
@@ -0,0 +1,52 @@
+/*
+ * Copyright (C) 2011 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef DFGPredictionPropagationPhase_h
+#define DFGPredictionPropagationPhase_h
+
+#include <wtf/Platform.h>
+
+#if ENABLE(DFG_JIT)
+
+namespace JSC { namespace DFG {
+
+class Graph;
+
+// Propagate predictions gathered at heap load sites by the value profiler, and
+// from slow path executions, to generate a prediction for each node in the graph.
+// This is a crucial phase of compilation, since before running this phase, we
+// have no idea what types any node (or most variables) could possibly have, unless
+// that node is either a heap load, a call, a GetLocal for an argument, or an
+// arithmetic op that had definitely taken slow path. Most nodes (even most
+// arithmetic nodes) do not qualify for any of these categories. But after running
+// this phase, we'll have full information for the expected type of each node.
+
+void performPredictionPropagation(Graph&);
+
+} } // namespace JSC::DFG::Phase
+
+#endif // ENABLE(DFG_JIT)
+
+#endif // DFGPredictionPropagationPhase_h
diff --git a/Source/JavaScriptCore/dfg/DFGPropagator.cpp b/Source/JavaScriptCore/dfg/DFGPropagator.cpp
deleted file mode 100644
index f00f13e96..000000000
--- a/Source/JavaScriptCore/dfg/DFGPropagator.cpp
+++ /dev/null
@@ -1,1743 +0,0 @@
-/*
- * Copyright (C) 2011 Apple Inc. All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- * 1. Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * 2. Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in the
- * documentation and/or other materials provided with the distribution.
- *
- * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
- * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
- * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
- * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
- * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
- * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
- * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
- * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- */
-
-#include "config.h"
-#include "DFGPropagator.h"
-
-#if ENABLE(DFG_JIT)
-
-#include "DFGAbstractState.h"
-#include "DFGGraph.h"
-#include "DFGScoreBoard.h"
-#include <wtf/FixedArray.h>
-
-namespace JSC { namespace DFG {
-
-class Propagator {
-public:
- Propagator(Graph& graph, JSGlobalData& globalData, CodeBlock* codeBlock, CodeBlock* profiledBlock)
- : m_graph(graph)
- , m_globalData(globalData)
- , m_codeBlock(codeBlock)
- , m_profiledBlock(profiledBlock)
- {
- // Replacements are used to implement local common subexpression elimination.
- m_replacements.resize(m_graph.size());
-
- for (unsigned i = 0; i < m_graph.size(); ++i)
- m_replacements[i] = NoNode;
-
- for (unsigned i = 0; i < LastNodeId; ++i)
- m_lastSeen[i] = NoNode;
- }
-
- void fixpoint()
- {
-#if DFG_ENABLE(DEBUG_PROPAGATION_VERBOSE)
- m_graph.dump(m_codeBlock);
-#endif
-
- propagateArithNodeFlags();
- propagatePredictions();
- fixup();
-
-#if DFG_ENABLE(DEBUG_PROPAGATION_VERBOSE)
- printf("Graph after propagation fixup:\n");
- m_graph.dump(m_codeBlock);
-#endif
-
- localCSE();
-
-#if DFG_ENABLE(DEBUG_PROPAGATION_VERBOSE)
- printf("Graph after CSE:\n");
- m_graph.dump(m_codeBlock);
-#endif
-
- allocateVirtualRegisters();
-
-#if DFG_ENABLE(DEBUG_PROPAGATION_VERBOSE)
- printf("Graph after virtual register allocation:\n");
- m_graph.dump(m_codeBlock);
-#endif
-
- globalCFA();
-
-#if DFG_ENABLE(DEBUG_VERBOSE)
- printf("Graph after propagation:\n");
- m_graph.dump(m_codeBlock);
-#endif
- }
-
-private:
- bool isNotNegZero(NodeIndex nodeIndex)
- {
- if (!m_graph.isNumberConstant(m_codeBlock, nodeIndex))
- return false;
- double value = m_graph.valueOfNumberConstant(m_codeBlock, nodeIndex);
- return !value && 1.0 / value < 0.0;
- }
-
- bool isNotZero(NodeIndex nodeIndex)
- {
- if (!m_graph.isNumberConstant(m_codeBlock, nodeIndex))
- return false;
- return !!m_graph.valueOfNumberConstant(m_codeBlock, nodeIndex);
- }
-
- void propagateArithNodeFlags(Node& node)
- {
- if (!node.shouldGenerate())
- return;
-
- NodeType op = node.op;
- ArithNodeFlags flags = 0;
-
- if (node.hasArithNodeFlags())
- flags = node.rawArithNodeFlags();
-
-#if DFG_ENABLE(DEBUG_PROPAGATION_VERBOSE)
- printf(" %s @%u: %s ", Graph::opName(op), m_compileIndex, arithNodeFlagsAsString(flags));
-#endif
-
- flags &= NodeUsedAsMask;
-
- bool changed = false;
-
- switch (op) {
- case ValueToInt32:
- case BitAnd:
- case BitOr:
- case BitXor:
- case BitLShift:
- case BitRShift:
- case BitURShift: {
- // These operations are perfectly happy with truncated integers,
- // so we don't want to propagate anything.
- break;
- }
-
- case UInt32ToNumber: {
- changed |= m_graph[node.child1()].mergeArithNodeFlags(flags);
- break;
- }
-
- case ArithAdd:
- case ValueAdd: {
- if (isNotNegZero(node.child1().index()) || isNotNegZero(node.child2().index()))
- flags &= ~NodeNeedsNegZero;
-
- changed |= m_graph[node.child1()].mergeArithNodeFlags(flags);
- changed |= m_graph[node.child2()].mergeArithNodeFlags(flags);
- break;
- }
-
- case ArithSub: {
- if (isNotZero(node.child1().index()) || isNotZero(node.child2().index()))
- flags &= ~NodeNeedsNegZero;
-
- changed |= m_graph[node.child1()].mergeArithNodeFlags(flags);
- changed |= m_graph[node.child2()].mergeArithNodeFlags(flags);
- break;
- }
-
- case ArithMul:
- case ArithDiv: {
- // As soon as a multiply happens, we can easily end up in the part
- // of the double domain where the point at which you do truncation
- // can change the outcome. So, ArithMul always checks for overflow
- // no matter what, and always forces its inputs to check as well.
-
- flags |= NodeUsedAsNumber | NodeNeedsNegZero;
- changed |= m_graph[node.child1()].mergeArithNodeFlags(flags);
- changed |= m_graph[node.child2()].mergeArithNodeFlags(flags);
- break;
- }
-
- case ArithMin:
- case ArithMax: {
- flags |= NodeUsedAsNumber;
- changed |= m_graph[node.child1()].mergeArithNodeFlags(flags);
- changed |= m_graph[node.child2()].mergeArithNodeFlags(flags);
- break;
- }
-
- case ArithAbs: {
- flags &= ~NodeNeedsNegZero;
- changed |= m_graph[node.child1()].mergeArithNodeFlags(flags);
- break;
- }
-
- case PutByVal: {
- changed |= m_graph[node.child1()].mergeArithNodeFlags(flags | NodeUsedAsNumber | NodeNeedsNegZero);
- changed |= m_graph[node.child2()].mergeArithNodeFlags(flags | NodeUsedAsNumber);
- changed |= m_graph[node.child3()].mergeArithNodeFlags(flags | NodeUsedAsNumber | NodeNeedsNegZero);
- break;
- }
-
- case GetByVal: {
- changed |= m_graph[node.child1()].mergeArithNodeFlags(flags | NodeUsedAsNumber | NodeNeedsNegZero);
- changed |= m_graph[node.child2()].mergeArithNodeFlags(flags | NodeUsedAsNumber);
- break;
- }
-
- default:
- flags |= NodeUsedAsNumber | NodeNeedsNegZero;
- if (op & NodeHasVarArgs) {
- for (unsigned childIdx = node.firstChild(); childIdx < node.firstChild() + node.numChildren(); childIdx++)
- changed |= m_graph[m_graph.m_varArgChildren[childIdx]].mergeArithNodeFlags(flags);
- } else {
- if (!node.child1())
- break;
- changed |= m_graph[node.child1()].mergeArithNodeFlags(flags);
- if (!node.child2())
- break;
- changed |= m_graph[node.child2()].mergeArithNodeFlags(flags);
- if (!node.child3())
- break;
- changed |= m_graph[node.child3()].mergeArithNodeFlags(flags);
- }
- break;
- }
-
-#if DFG_ENABLE(DEBUG_PROPAGATION_VERBOSE)
- printf("%s\n", changed ? "CHANGED" : "");
-#endif
-
- m_changed |= changed;
- }
-
- void propagateArithNodeFlagsForward()
- {
-#if DFG_ENABLE(DEBUG_PROPAGATION_VERBOSE)
- printf("Propagating arithmetic node flags forward [%u]\n", ++m_count);
-#endif
- for (m_compileIndex = 0; m_compileIndex < m_graph.size(); ++m_compileIndex)
- propagateArithNodeFlags(m_graph[m_compileIndex]);
- }
-
- void propagateArithNodeFlagsBackward()
- {
-#if DFG_ENABLE(DEBUG_PROPAGATION_VERBOSE)
- printf("Propagating arithmetic node flags backward [%u]\n", ++m_count);
-#endif
- for (m_compileIndex = m_graph.size(); m_compileIndex-- > 0;)
- propagateArithNodeFlags(m_graph[m_compileIndex]);
- }
-
- void propagateArithNodeFlags()
- {
-#if DFG_ENABLE(DEBUG_PROPAGATION_VERBOSE)
- m_count = 0;
-#endif
- do {
- m_changed = false;
-
- // Up here we start with a backward pass because we suspect that to be
- // more profitable.
- propagateArithNodeFlagsBackward();
- if (!m_changed)
- break;
-
- m_changed = false;
- propagateArithNodeFlagsForward();
- } while (m_changed);
- }
-
- bool setPrediction(PredictedType prediction)
- {
- ASSERT(m_graph[m_compileIndex].hasResult());
-
- // setPrediction() is used when we know that there is no way that we can change
- // our minds about what the prediction is going to be. There is no semantic
- // difference between setPrediction() and mergePrediction() other than the
- // increased checking to validate this property.
- ASSERT(m_graph[m_compileIndex].prediction() == PredictNone || m_graph[m_compileIndex].prediction() == prediction);
-
- return m_graph[m_compileIndex].predict(prediction);
- }
-
- bool mergePrediction(PredictedType prediction)
- {
- ASSERT(m_graph[m_compileIndex].hasResult());
-
- return m_graph[m_compileIndex].predict(prediction);
- }
-
- void propagateNodePredictions(Node& node)
- {
- if (!node.shouldGenerate())
- return;
-
- NodeType op = node.op;
-
-#if DFG_ENABLE(DEBUG_PROPAGATION_VERBOSE)
- printf(" %s @%u: ", Graph::opName(op), m_compileIndex);
-#endif
-
- bool changed = false;
-
- switch (op) {
- case JSConstant:
- case WeakJSConstant: {
- changed |= setPrediction(predictionFromValue(m_graph.valueOfJSConstant(m_codeBlock, m_compileIndex)));
- break;
- }
-
- case GetLocal: {
- PredictedType prediction = node.variableAccessData()->prediction();
- if (prediction)
- changed |= mergePrediction(prediction);
- break;
- }
-
- case SetLocal: {
- changed |= node.variableAccessData()->predict(m_graph[node.child1()].prediction());
- break;
- }
-
- case BitAnd:
- case BitOr:
- case BitXor:
- case BitRShift:
- case BitLShift:
- case BitURShift:
- case ValueToInt32: {
- changed |= setPrediction(PredictInt32);
- break;
- }
-
- case ArrayPop:
- case ArrayPush: {
- if (node.getHeapPrediction())
- changed |= mergePrediction(node.getHeapPrediction());
- break;
- }
-
- case StringCharCodeAt: {
- changed |= mergePrediction(PredictInt32);
- break;
- }
-
- case ArithMod: {
- PredictedType left = m_graph[node.child1()].prediction();
- PredictedType right = m_graph[node.child2()].prediction();
-
- if (left && right) {
- if (isInt32Prediction(mergePredictions(left, right)) && nodeCanSpeculateInteger(node.arithNodeFlags()))
- changed |= mergePrediction(PredictInt32);
- else
- changed |= mergePrediction(PredictDouble);
- }
- break;
- }
-
- case UInt32ToNumber: {
- if (nodeCanSpeculateInteger(node.arithNodeFlags()))
- changed |= setPrediction(PredictInt32);
- else
- changed |= setPrediction(PredictNumber);
- break;
- }
-
- case ValueAdd: {
- PredictedType left = m_graph[node.child1()].prediction();
- PredictedType right = m_graph[node.child2()].prediction();
-
- if (left && right) {
- if (isNumberPrediction(left) && isNumberPrediction(right)) {
- if (m_graph.addShouldSpeculateInteger(node, m_codeBlock))
- changed |= mergePrediction(PredictInt32);
- else
- changed |= mergePrediction(PredictDouble);
- } else if (!(left & PredictNumber) || !(right & PredictNumber)) {
- // left or right is definitely something other than a number.
- changed |= mergePrediction(PredictString);
- } else
- changed |= mergePrediction(PredictString | PredictInt32 | PredictDouble);
- }
- break;
- }
-
- case ArithAdd:
- case ArithSub: {
- PredictedType left = m_graph[node.child1()].prediction();
- PredictedType right = m_graph[node.child2()].prediction();
-
- if (left && right) {
- if (m_graph.addShouldSpeculateInteger(node, m_codeBlock))
- changed |= mergePrediction(PredictInt32);
- else
- changed |= mergePrediction(PredictDouble);
- }
- break;
- }
-
- case ArithMul:
- case ArithMin:
- case ArithMax:
- case ArithDiv: {
- PredictedType left = m_graph[node.child1()].prediction();
- PredictedType right = m_graph[node.child2()].prediction();
-
- if (left && right) {
- if (isInt32Prediction(mergePredictions(left, right)) && nodeCanSpeculateInteger(node.arithNodeFlags()))
- changed |= mergePrediction(PredictInt32);
- else
- changed |= mergePrediction(PredictDouble);
- }
- break;
- }
-
- case ArithSqrt: {
- changed |= setPrediction(PredictDouble);
- break;
- }
-
- case ArithAbs: {
- PredictedType child = m_graph[node.child1()].prediction();
- if (child) {
- if (nodeCanSpeculateInteger(node.arithNodeFlags()))
- changed |= mergePrediction(child);
- else
- changed |= setPrediction(PredictDouble);
- }
- break;
- }
-
- case LogicalNot:
- case CompareLess:
- case CompareLessEq:
- case CompareGreater:
- case CompareGreaterEq:
- case CompareEq:
- case CompareStrictEq:
- case InstanceOf: {
- changed |= setPrediction(PredictBoolean);
- break;
- }
-
- case GetById: {
- if (node.getHeapPrediction())
- changed |= mergePrediction(node.getHeapPrediction());
- else if (m_codeBlock->identifier(node.identifierNumber()) == m_globalData.propertyNames->length) {
- // If there is no prediction from value profiles, check if we might be
- // able to infer the type ourselves.
- bool isArray = isArrayPrediction(m_graph[node.child1()].prediction());
- bool isString = isStringPrediction(m_graph[node.child1()].prediction());
- bool isByteArray = m_graph[node.child1()].shouldSpeculateByteArray();
- bool isInt8Array = m_graph[node.child1()].shouldSpeculateInt8Array();
- bool isInt16Array = m_graph[node.child1()].shouldSpeculateInt16Array();
- bool isInt32Array = m_graph[node.child1()].shouldSpeculateInt32Array();
- bool isUint8Array = m_graph[node.child1()].shouldSpeculateUint8Array();
- bool isUint8ClampedArray = m_graph[node.child1()].shouldSpeculateUint8ClampedArray();
- bool isUint16Array = m_graph[node.child1()].shouldSpeculateUint16Array();
- bool isUint32Array = m_graph[node.child1()].shouldSpeculateUint32Array();
- bool isFloat32Array = m_graph[node.child1()].shouldSpeculateFloat32Array();
- bool isFloat64Array = m_graph[node.child1()].shouldSpeculateFloat64Array();
- if (isArray || isString || isByteArray || isInt8Array || isInt16Array || isInt32Array || isUint8Array || isUint8ClampedArray || isUint16Array || isUint32Array || isFloat32Array || isFloat64Array)
- changed |= mergePrediction(PredictInt32);
- }
- break;
- }
-
- case GetByIdFlush:
- if (node.getHeapPrediction())
- changed |= mergePrediction(node.getHeapPrediction());
- break;
-
- case GetByVal: {
- if (m_graph[node.child1()].shouldSpeculateUint32Array() || m_graph[node.child1()].shouldSpeculateFloat32Array() || m_graph[node.child1()].shouldSpeculateFloat64Array())
- changed |= mergePrediction(PredictDouble);
- else if (node.getHeapPrediction())
- changed |= mergePrediction(node.getHeapPrediction());
- break;
- }
-
- case GetPropertyStorage:
- case GetIndexedPropertyStorage: {
- changed |= setPrediction(PredictOther);
- break;
- }
-
- case GetByOffset: {
- if (node.getHeapPrediction())
- changed |= mergePrediction(node.getHeapPrediction());
- break;
- }
-
- case Call:
- case Construct: {
- if (node.getHeapPrediction())
- changed |= mergePrediction(node.getHeapPrediction());
- break;
- }
-
- case ConvertThis: {
- PredictedType prediction = m_graph[node.child1()].prediction();
- if (prediction) {
- if (prediction & ~PredictObjectMask) {
- prediction &= PredictObjectMask;
- prediction = mergePredictions(prediction, PredictObjectOther);
- }
- changed |= mergePrediction(prediction);
- }
- break;
- }
-
- case GetGlobalVar: {
- PredictedType prediction = m_graph.getGlobalVarPrediction(node.varNumber());
- if (prediction)
- changed |= mergePrediction(prediction);
- break;
- }
-
- case PutGlobalVar: {
- changed |= m_graph.predictGlobalVar(node.varNumber(), m_graph[node.child1()].prediction());
- break;
- }
-
- case GetScopedVar:
- case Resolve:
- case ResolveBase:
- case ResolveBaseStrictPut:
- case ResolveGlobal: {
- PredictedType prediction = node.getHeapPrediction();
- if (prediction)
- changed |= mergePrediction(prediction);
- break;
- }
-
- case GetScopeChain: {
- changed |= setPrediction(PredictCellOther);
- break;
- }
-
- case GetCallee: {
- changed |= setPrediction(PredictFunction);
- break;
- }
-
- case CreateThis:
- case NewObject: {
- changed |= setPrediction(PredictFinalObject);
- break;
- }
-
- case NewArray:
- case NewArrayBuffer: {
- changed |= setPrediction(PredictArray);
- break;
- }
-
- case NewRegexp: {
- changed |= setPrediction(PredictObjectOther);
- break;
- }
-
- case StringCharAt:
- case StrCat: {
- changed |= setPrediction(PredictString);
- break;
- }
-
- case ToPrimitive: {
- PredictedType child = m_graph[node.child1()].prediction();
- if (child) {
- if (isObjectPrediction(child)) {
- // I'd love to fold this case into the case below, but I can't, because
- // removing PredictObjectMask from something that only has an object
- // prediction and nothing else means we have an ill-formed PredictedType
- // (strong predict-none). This should be killed once we remove all traces
- // of static (aka weak) predictions.
- changed |= mergePrediction(PredictString);
- } else if (child & PredictObjectMask) {
- // Objects get turned into strings. So if the input has hints of objectness,
- // the output will have hinsts of stringiness.
- changed |= mergePrediction(mergePredictions(child & ~PredictObjectMask, PredictString));
- } else
- changed |= mergePrediction(child);
- }
- break;
- }
-
- case GetArrayLength:
- case GetByteArrayLength:
- case GetInt8ArrayLength:
- case GetInt16ArrayLength:
- case GetInt32ArrayLength:
- case GetUint8ArrayLength:
- case GetUint8ClampedArrayLength:
- case GetUint16ArrayLength:
- case GetUint32ArrayLength:
- case GetFloat32ArrayLength:
- case GetFloat64ArrayLength:
- case GetStringLength: {
- // This node should never be visible at this stage of compilation. It is
- // inserted by fixup(), which follows this phase.
- ASSERT_NOT_REACHED();
- break;
- }
-
-#ifndef NDEBUG
- // These get ignored because they don't return anything.
- case PutScopedVar:
- case DFG::Jump:
- case Branch:
- case Breakpoint:
- case Return:
- case CheckHasInstance:
- case Phi:
- case Flush:
- case Throw:
- case ThrowReferenceError:
- case ForceOSRExit:
- case SetArgument:
- case PutByVal:
- case PutByValAlias:
- case PutById:
- case PutByIdDirect:
- case CheckStructure:
- case CheckFunction:
- case PutStructure:
- case PutByOffset:
- break;
-
- // These gets ignored because it doesn't do anything.
- case Phantom:
- case InlineStart:
- case Nop:
- break;
-#else
- default:
- break;
-#endif
- }
-
-#if DFG_ENABLE(DEBUG_PROPAGATION_VERBOSE)
- printf("%s\n", predictionToString(m_graph[m_compileIndex].prediction()));
-#endif
-
- m_changed |= changed;
- }
-
- void propagatePredictionsForward()
- {
-#if DFG_ENABLE(DEBUG_PROPAGATION_VERBOSE)
- printf("Propagating predictions forward [%u]\n", ++m_count);
-#endif
- for (m_compileIndex = 0; m_compileIndex < m_graph.size(); ++m_compileIndex)
- propagateNodePredictions(m_graph[m_compileIndex]);
- }
-
- void propagatePredictionsBackward()
- {
-#if DFG_ENABLE(DEBUG_PROPAGATION_VERBOSE)
- printf("Propagating predictions backward [%u]\n", ++m_count);
-#endif
- for (m_compileIndex = m_graph.size(); m_compileIndex-- > 0;)
- propagateNodePredictions(m_graph[m_compileIndex]);
- }
-
- void vote(NodeUse nodeUse, VariableAccessData::Ballot ballot)
- {
- switch (m_graph[nodeUse].op) {
- case ValueToInt32:
- case UInt32ToNumber:
- nodeUse = m_graph[nodeUse].child1();
- break;
- default:
- break;
- }
-
- if (m_graph[nodeUse].op == GetLocal)
- m_graph[nodeUse].variableAccessData()->vote(ballot);
- }
-
- void vote(Node& node, VariableAccessData::Ballot ballot)
- {
- if (node.op & NodeHasVarArgs) {
- for (unsigned childIdx = node.firstChild(); childIdx < node.firstChild() + node.numChildren(); childIdx++)
- vote(m_graph.m_varArgChildren[childIdx], ballot);
- return;
- }
-
- if (!node.child1())
- return;
- vote(node.child1(), ballot);
- if (!node.child2())
- return;
- vote(node.child2(), ballot);
- if (!node.child3())
- return;
- vote(node.child3(), ballot);
- }
-
- void doRoundOfDoubleVoting()
- {
-#if DFG_ENABLE(DEBUG_PROPAGATION_VERBOSE)
- printf("Voting on double uses of locals [%u]\n", m_count);
-#endif
- for (unsigned i = 0; i < m_graph.m_variableAccessData.size(); ++i)
- m_graph.m_variableAccessData[i].find()->clearVotes();
- for (m_compileIndex = 0; m_compileIndex < m_graph.size(); ++m_compileIndex) {
- Node& node = m_graph[m_compileIndex];
- switch (node.op) {
- case ValueAdd:
- case ArithAdd:
- case ArithSub: {
- PredictedType left = m_graph[node.child1()].prediction();
- PredictedType right = m_graph[node.child2()].prediction();
-
- VariableAccessData::Ballot ballot;
-
- if (isNumberPrediction(left) && isNumberPrediction(right)
- && !m_graph.addShouldSpeculateInteger(node, m_codeBlock))
- ballot = VariableAccessData::VoteDouble;
- else
- ballot = VariableAccessData::VoteValue;
-
- vote(node.child1(), ballot);
- vote(node.child2(), ballot);
- break;
- }
-
- case ArithMul:
- case ArithMin:
- case ArithMax:
- case ArithMod:
- case ArithDiv: {
- PredictedType left = m_graph[node.child1()].prediction();
- PredictedType right = m_graph[node.child2()].prediction();
-
- VariableAccessData::Ballot ballot;
-
- if (isNumberPrediction(left) && isNumberPrediction(right) && !(Node::shouldSpeculateInteger(m_graph[node.child1()], m_graph[node.child1()]) && node.canSpeculateInteger()))
- ballot = VariableAccessData::VoteDouble;
- else
- ballot = VariableAccessData::VoteValue;
-
- vote(node.child1(), ballot);
- vote(node.child2(), ballot);
- break;
- }
-
- case ArithAbs:
- VariableAccessData::Ballot ballot;
- if (!(m_graph[node.child1()].shouldSpeculateInteger() && node.canSpeculateInteger()))
- ballot = VariableAccessData::VoteDouble;
- else
- ballot = VariableAccessData::VoteValue;
-
- vote(node.child1(), ballot);
- break;
-
- case ArithSqrt:
- vote(node.child1(), VariableAccessData::VoteDouble);
- break;
-
- case SetLocal: {
- PredictedType prediction = m_graph[node.child1()].prediction();
- if (isDoublePrediction(prediction))
- node.variableAccessData()->vote(VariableAccessData::VoteDouble);
- else if (!isNumberPrediction(prediction) || isInt32Prediction(prediction))
- node.variableAccessData()->vote(VariableAccessData::VoteValue);
- break;
- }
-
- default:
- vote(node, VariableAccessData::VoteValue);
- break;
- }
- }
- for (unsigned i = 0; i < m_graph.m_variableAccessData.size(); ++i)
- m_changed |= m_graph.m_variableAccessData[i].find()->tallyVotesForShouldUseDoubleFormat();
- }
-
- void propagatePredictions()
- {
-#if DFG_ENABLE(DEBUG_PROPAGATION_VERBOSE)
- m_count = 0;
-#endif
- // Two stage process: first propagate predictions, then propagate while doing double voting.
-
- do {
- m_changed = false;
-
- // Forward propagation is near-optimal for both topologically-sorted and
- // DFS-sorted code.
- propagatePredictionsForward();
- if (!m_changed)
- break;
-
- // Backward propagation reduces the likelihood that pathological code will
- // cause slowness. Loops (especially nested ones) resemble backward flow.
- // This pass captures two cases: (1) it detects if the forward fixpoint
- // found a sound solution and (2) short-circuits backward flow.
- m_changed = false;
- propagatePredictionsBackward();
- } while (m_changed);
-
- do {
- m_changed = false;
- doRoundOfDoubleVoting();
- propagatePredictionsForward();
- if (!m_changed)
- break;
-
- m_changed = false;
- doRoundOfDoubleVoting();
- propagatePredictionsBackward();
- } while (m_changed);
- }
-
- void fixupNode(Node& node)
- {
- if (!node.shouldGenerate())
- return;
-
- NodeType op = node.op;
-
-#if DFG_ENABLE(DEBUG_PROPAGATION_VERBOSE)
- printf(" %s @%u: ", Graph::opName(op), m_compileIndex);
-#endif
-
- switch (op) {
- case GetById: {
- if (!isInt32Prediction(m_graph[m_compileIndex].prediction()))
- break;
- if (m_codeBlock->identifier(node.identifierNumber()) != m_globalData.propertyNames->length)
- break;
- bool isArray = isArrayPrediction(m_graph[node.child1()].prediction());
- bool isString = isStringPrediction(m_graph[node.child1()].prediction());
- bool isByteArray = m_graph[node.child1()].shouldSpeculateByteArray();
- bool isInt8Array = m_graph[node.child1()].shouldSpeculateInt8Array();
- bool isInt16Array = m_graph[node.child1()].shouldSpeculateInt16Array();
- bool isInt32Array = m_graph[node.child1()].shouldSpeculateInt32Array();
- bool isUint8Array = m_graph[node.child1()].shouldSpeculateUint8Array();
- bool isUint8ClampedArray = m_graph[node.child1()].shouldSpeculateUint8ClampedArray();
- bool isUint16Array = m_graph[node.child1()].shouldSpeculateUint16Array();
- bool isUint32Array = m_graph[node.child1()].shouldSpeculateUint32Array();
- bool isFloat32Array = m_graph[node.child1()].shouldSpeculateFloat32Array();
- bool isFloat64Array = m_graph[node.child1()].shouldSpeculateFloat64Array();
- if (!isArray && !isString && !isByteArray && !isInt8Array && !isInt16Array && !isInt32Array && !isUint8Array && !isUint8ClampedArray && !isUint16Array && !isUint32Array && !isFloat32Array && !isFloat64Array)
- break;
-
-#if DFG_ENABLE(DEBUG_PROPAGATION_VERBOSE)
- printf(" @%u -> %s", m_compileIndex, isArray ? "GetArrayLength" : "GetStringLength");
-#endif
- if (isArray)
- node.op = GetArrayLength;
- else if (isString)
- node.op = GetStringLength;
- else if (isByteArray)
- node.op = GetByteArrayLength;
- else if (isInt8Array)
- node.op = GetInt8ArrayLength;
- else if (isInt16Array)
- node.op = GetInt16ArrayLength;
- else if (isInt32Array)
- node.op = GetInt32ArrayLength;
- else if (isUint8Array)
- node.op = GetUint8ArrayLength;
- else if (isUint8ClampedArray)
- node.op = GetUint8ClampedArrayLength;
- else if (isUint16Array)
- node.op = GetUint16ArrayLength;
- else if (isUint32Array)
- node.op = GetUint32ArrayLength;
- else if (isFloat32Array)
- node.op = GetFloat32ArrayLength;
- else if (isFloat64Array)
- node.op = GetFloat64ArrayLength;
- else
- ASSERT_NOT_REACHED();
- m_graph.deref(m_compileIndex); // No longer MustGenerate
- break;
- }
- case GetIndexedPropertyStorage: {
- PredictedType basePrediction = m_graph[node.child2()].prediction();
- if (!(basePrediction & PredictInt32) && basePrediction) {
- node.op = Nop;
- m_graph.clearAndDerefChild1(node);
- m_graph.clearAndDerefChild2(node);
- m_graph.clearAndDerefChild3(node);
- node.setRefCount(0);
- }
- break;
- }
- case GetByVal:
- case StringCharAt:
- case StringCharCodeAt: {
- if (!!node.child3() && m_graph[node.child3()].op == Nop)
- node.children.child3() = NodeUse();
- break;
- }
- default:
- break;
- }
-
-#if DFG_ENABLE(DEBUG_PROPAGATION_VERBOSE)
- printf("\n");
-#endif
- }
-
- void fixup()
- {
-#if DFG_ENABLE(DEBUG_PROPAGATION_VERBOSE)
- printf("Performing Fixup\n");
-#endif
- for (m_compileIndex = 0; m_compileIndex < m_graph.size(); ++m_compileIndex)
- fixupNode(m_graph[m_compileIndex]);
- }
-
- NodeIndex canonicalize(NodeIndex nodeIndex)
- {
- if (nodeIndex == NoNode)
- return NoNode;
-
- if (m_graph[nodeIndex].op == ValueToInt32)
- nodeIndex = m_graph[nodeIndex].child1().index();
-
- return nodeIndex;
- }
- NodeIndex canonicalize(NodeUse nodeUse)
- {
- return canonicalize(nodeUse.indexUnchecked());
- }
-
- // Computes where the search for a candidate for CSE should start. Don't call
- // this directly; call startIndex() instead as it does logging in debug mode.
- NodeIndex computeStartIndexForChildren(NodeIndex child1 = NoNode, NodeIndex child2 = NoNode, NodeIndex child3 = NoNode)
- {
- const unsigned limit = 300;
-
- NodeIndex start = m_start;
- if (m_compileIndex - start > limit)
- start = m_compileIndex - limit;
-
- ASSERT(start >= m_start);
-
- NodeIndex child = canonicalize(child1);
- if (child == NoNode)
- return start;
-
- if (start < child)
- start = child;
-
- child = canonicalize(child2);
- if (child == NoNode)
- return start;
-
- if (start < child)
- start = child;
-
- child = canonicalize(child3);
- if (child == NoNode)
- return start;
-
- if (start < child)
- start = child;
-
- return start;
- }
-
- NodeIndex startIndexForChildren(NodeIndex child1 = NoNode, NodeIndex child2 = NoNode, NodeIndex child3 = NoNode)
- {
- NodeIndex result = computeStartIndexForChildren(child1, child2, child3);
-#if DFG_ENABLE(DEBUG_PROPAGATION_VERBOSE)
- printf(" lookback %u: ", result);
-#endif
- return result;
- }
-
- NodeIndex startIndex()
- {
- Node& node = m_graph[m_compileIndex];
- return startIndexForChildren(
- node.child1().indexUnchecked(),
- node.child2().indexUnchecked(),
- node.child3().indexUnchecked());
- }
-
- NodeIndex endIndexForPureCSE()
- {
- NodeIndex result = m_lastSeen[m_graph[m_compileIndex].op & NodeIdMask];
- if (result == NoNode)
- result = 0;
- else
- result++;
- ASSERT(result <= m_compileIndex);
-#if DFG_ENABLE(DEBUG_PROPAGATION_VERBOSE)
- printf(" limit %u: ", result);
-#endif
- return result;
- }
-
- NodeIndex pureCSE(Node& node)
- {
- NodeIndex child1 = canonicalize(node.child1());
- NodeIndex child2 = canonicalize(node.child2());
- NodeIndex child3 = canonicalize(node.child3());
-
- NodeIndex start = startIndex();
- for (NodeIndex index = endIndexForPureCSE(); index-- > start;) {
- Node& otherNode = m_graph[index];
- if (node.op != otherNode.op)
- continue;
-
- if (node.arithNodeFlagsForCompare() != otherNode.arithNodeFlagsForCompare())
- continue;
-
- NodeIndex otherChild = canonicalize(otherNode.child1());
- if (otherChild == NoNode)
- return index;
- if (otherChild != child1)
- continue;
-
- otherChild = canonicalize(otherNode.child2());
- if (otherChild == NoNode)
- return index;
- if (otherChild != child2)
- continue;
-
- otherChild = canonicalize(otherNode.child3());
- if (otherChild == NoNode)
- return index;
- if (otherChild != child3)
- continue;
-
- return index;
- }
- return NoNode;
- }
-
- bool isPredictedNumerical(Node& node)
- {
- PredictedType left = m_graph[node.child1()].prediction();
- PredictedType right = m_graph[node.child2()].prediction();
- return isNumberPrediction(left) && isNumberPrediction(right);
- }
-
- bool logicalNotIsPure(Node& node)
- {
- PredictedType prediction = m_graph[node.child1()].prediction();
- return isBooleanPrediction(prediction) || !prediction;
- }
-
- bool byValIsPure(Node& node)
- {
- return m_graph[node.child2()].shouldSpeculateInteger()
- && ((node.op == PutByVal || node.op == PutByValAlias)
- ? isActionableMutableArrayPrediction(m_graph[node.child1()].prediction())
- : isActionableArrayPrediction(m_graph[node.child1()].prediction()));
- }
-
- bool clobbersWorld(NodeIndex nodeIndex)
- {
- Node& node = m_graph[nodeIndex];
- if (node.op & NodeClobbersWorld)
- return true;
- if (!(node.op & NodeMightClobber))
- return false;
- switch (node.op) {
- case ValueAdd:
- case CompareLess:
- case CompareLessEq:
- case CompareGreater:
- case CompareGreaterEq:
- case CompareEq:
- return !isPredictedNumerical(node);
- case LogicalNot:
- return !logicalNotIsPure(node);
- case GetByVal:
- return !byValIsPure(node);
- default:
- ASSERT_NOT_REACHED();
- return true; // If by some oddity we hit this case in release build it's safer to have CSE assume the worst.
- }
- }
-
- NodeIndex impureCSE(Node& node)
- {
- NodeIndex child1 = canonicalize(node.child1());
- NodeIndex child2 = canonicalize(node.child2());
- NodeIndex child3 = canonicalize(node.child3());
-
- NodeIndex start = startIndex();
- for (NodeIndex index = m_compileIndex; index-- > start;) {
- Node& otherNode = m_graph[index];
- if (node.op == otherNode.op
- && node.arithNodeFlagsForCompare() == otherNode.arithNodeFlagsForCompare()) {
- NodeIndex otherChild = canonicalize(otherNode.child1());
- if (otherChild == NoNode)
- return index;
- if (otherChild == child1) {
- otherChild = canonicalize(otherNode.child2());
- if (otherChild == NoNode)
- return index;
- if (otherChild == child2) {
- otherChild = canonicalize(otherNode.child3());
- if (otherChild == NoNode)
- return index;
- if (otherChild == child3)
- return index;
- }
- }
- }
- if (clobbersWorld(index))
- break;
- }
- return NoNode;
- }
-
- NodeIndex globalVarLoadElimination(unsigned varNumber, JSGlobalObject* globalObject)
- {
- NodeIndex start = startIndexForChildren();
- for (NodeIndex index = m_compileIndex; index-- > start;) {
- Node& node = m_graph[index];
- switch (node.op) {
- case GetGlobalVar:
- if (node.varNumber() == varNumber && m_codeBlock->globalObjectFor(node.codeOrigin) == globalObject)
- return index;
- break;
- case PutGlobalVar:
- if (node.varNumber() == varNumber && m_codeBlock->globalObjectFor(node.codeOrigin) == globalObject)
- return node.child1().index();
- break;
- default:
- break;
- }
- if (clobbersWorld(index))
- break;
- }
- return NoNode;
- }
-
- NodeIndex getByValLoadElimination(NodeIndex child1, NodeIndex child2)
- {
- NodeIndex start = startIndexForChildren(child1, child2);
- for (NodeIndex index = m_compileIndex; index-- > start;) {
- Node& node = m_graph[index];
- switch (node.op) {
- case GetByVal:
- if (!byValIsPure(node))
- return NoNode;
- if (node.child1() == child1 && canonicalize(node.child2()) == canonicalize(child2))
- return index;
- break;
- case PutByVal:
- case PutByValAlias:
- if (!byValIsPure(node))
- return NoNode;
- if (node.child1() == child1 && canonicalize(node.child2()) == canonicalize(child2))
- return node.child3().index();
- // We must assume that the PutByVal will clobber the location we're getting from.
- // FIXME: We can do better; if we know that the PutByVal is accessing an array of a
- // different type than the GetByVal, then we know that they won't clobber each other.
- return NoNode;
- case PutStructure:
- case PutByOffset:
- // GetByVal currently always speculates that it's accessing an
- // array with an integer index, which means that it's impossible
- // for a structure change or a put to property storage to affect
- // the GetByVal.
- break;
- case ArrayPush:
- // A push cannot affect previously existing elements in the array.
- break;
- default:
- if (clobbersWorld(index))
- return NoNode;
- break;
- }
- }
- return NoNode;
- }
-
- bool checkFunctionElimination(JSFunction* function, NodeIndex child1)
- {
- NodeIndex start = startIndexForChildren(child1);
- for (NodeIndex index = endIndexForPureCSE(); index-- > start;) {
- Node& node = m_graph[index];
- if (node.op == CheckFunction && node.child1() == child1 && node.function() == function)
- return true;
- }
- return false;
- }
-
- bool checkStructureLoadElimination(const StructureSet& structureSet, NodeIndex child1)
- {
- NodeIndex start = startIndexForChildren(child1);
- for (NodeIndex index = m_compileIndex; index-- > start;) {
- Node& node = m_graph[index];
- switch (node.op) {
- case CheckStructure:
- if (node.child1() == child1
- && structureSet.isSupersetOf(node.structureSet()))
- return true;
- break;
-
- case PutStructure:
- if (node.child1() == child1
- && structureSet.contains(node.structureTransitionData().newStructure))
- return true;
- if (structureSet.contains(node.structureTransitionData().previousStructure))
- return false;
- break;
-
- case PutByOffset:
- // Setting a property cannot change the structure.
- break;
-
- case PutByVal:
- case PutByValAlias:
- if (byValIsPure(node)) {
- // If PutByVal speculates that it's accessing an array with an
- // integer index, then it's impossible for it to cause a structure
- // change.
- break;
- }
- return false;
-
- default:
- if (clobbersWorld(index))
- return false;
- break;
- }
- }
- return false;
- }
-
- NodeIndex getByOffsetLoadElimination(unsigned identifierNumber, NodeIndex child1)
- {
- NodeIndex start = startIndexForChildren(child1);
- for (NodeIndex index = m_compileIndex; index-- > start;) {
- Node& node = m_graph[index];
- switch (node.op) {
- case GetByOffset:
- if (node.child1() == child1
- && m_graph.m_storageAccessData[node.storageAccessDataIndex()].identifierNumber == identifierNumber)
- return index;
- break;
-
- case PutByOffset:
- if (m_graph.m_storageAccessData[node.storageAccessDataIndex()].identifierNumber == identifierNumber) {
- if (node.child2() == child1)
- return node.child3().index();
- return NoNode;
- }
- break;
-
- case PutStructure:
- // Changing the structure cannot change the outcome of a property get.
- break;
-
- case PutByVal:
- case PutByValAlias:
- if (byValIsPure(node)) {
- // If PutByVal speculates that it's accessing an array with an
- // integer index, then it's impossible for it to cause a structure
- // change.
- break;
- }
- return NoNode;
-
- default:
- if (clobbersWorld(index))
- return NoNode;
- break;
- }
- }
- return NoNode;
- }
-
- NodeIndex getPropertyStorageLoadElimination(NodeIndex child1)
- {
- NodeIndex start = startIndexForChildren(child1);
- for (NodeIndex index = m_compileIndex; index-- > start;) {
- Node& node = m_graph[index];
- switch (node.op) {
- case GetPropertyStorage:
- if (node.child1() == child1)
- return index;
- break;
-
- case PutByOffset:
- case PutStructure:
- // Changing the structure or putting to the storage cannot
- // change the property storage pointer.
- break;
-
- case PutByVal:
- case PutByValAlias:
- if (byValIsPure(node)) {
- // If PutByVal speculates that it's accessing an array with an
- // integer index, then it's impossible for it to cause a structure
- // change.
- break;
- }
- return NoNode;
-
- default:
- if (clobbersWorld(index))
- return NoNode;
- break;
- }
- }
- return NoNode;
- }
-
- NodeIndex getIndexedPropertyStorageLoadElimination(NodeIndex child1, bool hasIntegerIndexPrediction)
- {
- NodeIndex start = startIndexForChildren(child1);
- for (NodeIndex index = m_compileIndex; index-- > start;) {
- Node& node = m_graph[index];
- switch (node.op) {
- case GetIndexedPropertyStorage: {
- PredictedType basePrediction = m_graph[node.child2()].prediction();
- bool nodeHasIntegerIndexPrediction = !(!(basePrediction & PredictInt32) && basePrediction);
- if (node.child1() == child1 && hasIntegerIndexPrediction == nodeHasIntegerIndexPrediction)
- return index;
- break;
- }
-
- case PutByOffset:
- case PutStructure:
- // Changing the structure or putting to the storage cannot
- // change the property storage pointer.
- break;
-
- case PutByValAlias:
- // PutByValAlias can't change the indexed storage pointer
- break;
-
- case PutByVal:
- if (isFixedIndexedStorageObjectPrediction(m_graph[node.child1()].prediction()) && byValIsPure(node))
- break;
- return NoNode;
-
- default:
- if (clobbersWorld(index))
- return NoNode;
- break;
- }
- }
- return NoNode;
- }
-
- NodeIndex getScopeChainLoadElimination(unsigned depth)
- {
- NodeIndex start = startIndexForChildren();
- for (NodeIndex index = endIndexForPureCSE(); index-- > start;) {
- Node& node = m_graph[index];
- if (node.op == GetScopeChain
- && node.scopeChainDepth() == depth)
- return index;
- }
- return NoNode;
- }
-
- void performSubstitution(NodeUse& child, bool addRef = true)
- {
- // Check if this operand is actually unused.
- if (!child)
- return;
-
- // Check if there is any replacement.
- NodeIndex replacement = m_replacements[child.index()];
- if (replacement == NoNode)
- return;
-
- child.setIndex(replacement);
-
- // There is definitely a replacement. Assert that the replacement does not
- // have a replacement.
- ASSERT(m_replacements[child.index()] == NoNode);
-
- if (addRef)
- m_graph[child].ref();
- }
-
- void setReplacement(NodeIndex replacement)
- {
- if (replacement == NoNode)
- return;
-
- // Be safe. Don't try to perform replacements if the predictions don't
- // agree.
- if (m_graph[m_compileIndex].prediction() != m_graph[replacement].prediction())
- return;
-
-#if DFG_ENABLE(DEBUG_PROPAGATION_VERBOSE)
- printf(" Replacing @%u -> @%u", m_compileIndex, replacement);
-#endif
-
- Node& node = m_graph[m_compileIndex];
- node.op = Phantom;
- node.setRefCount(1);
-
- // At this point we will eliminate all references to this node.
- m_replacements[m_compileIndex] = replacement;
- }
-
- void eliminate()
- {
-#if DFG_ENABLE(DEBUG_PROPAGATION_VERBOSE)
- printf(" Eliminating @%u", m_compileIndex);
-#endif
-
- Node& node = m_graph[m_compileIndex];
- ASSERT(node.refCount() == 1);
- ASSERT(node.mustGenerate());
- node.op = Phantom;
- }
-
- void performNodeCSE(Node& node)
- {
- bool shouldGenerate = node.shouldGenerate();
-
- if (node.op & NodeHasVarArgs) {
- for (unsigned childIdx = node.firstChild(); childIdx < node.firstChild() + node.numChildren(); childIdx++)
- performSubstitution(m_graph.m_varArgChildren[childIdx], shouldGenerate);
- } else {
- performSubstitution(node.children.child1(), shouldGenerate);
- performSubstitution(node.children.child2(), shouldGenerate);
- performSubstitution(node.children.child3(), shouldGenerate);
- }
-
- if (!shouldGenerate)
- return;
-
-#if DFG_ENABLE(DEBUG_PROPAGATION_VERBOSE)
- printf(" %s @%u: ", Graph::opName(m_graph[m_compileIndex].op), m_compileIndex);
-#endif
-
- // NOTE: there are some nodes that we deliberately don't CSE even though we
- // probably could, like StrCat and ToPrimitive. That's because there is no
- // evidence that doing CSE on these nodes would result in a performance
- // progression. Hence considering these nodes in CSE would just mean that this
- // code does more work with no win. Of course, we may want to reconsider this,
- // since StrCat is trivially CSE-able. It's not trivially doable for
- // ToPrimitive, but we could change that with some speculations if we really
- // needed to.
-
- switch (node.op) {
-
- // Handle the pure nodes. These nodes never have any side-effects.
- case BitAnd:
- case BitOr:
- case BitXor:
- case BitRShift:
- case BitLShift:
- case BitURShift:
- case ArithAdd:
- case ArithSub:
- case ArithMul:
- case ArithMod:
- case ArithDiv:
- case ArithAbs:
- case ArithMin:
- case ArithMax:
- case ArithSqrt:
- case GetByteArrayLength:
- case GetInt8ArrayLength:
- case GetInt16ArrayLength:
- case GetInt32ArrayLength:
- case GetUint8ArrayLength:
- case GetUint8ClampedArrayLength:
- case GetUint16ArrayLength:
- case GetUint32ArrayLength:
- case GetFloat32ArrayLength:
- case GetFloat64ArrayLength:
- case GetCallee:
- case GetStringLength:
- case StringCharAt:
- case StringCharCodeAt:
- setReplacement(pureCSE(node));
- break;
-
- case GetArrayLength:
- setReplacement(impureCSE(node));
- break;
-
- case GetScopeChain:
- setReplacement(getScopeChainLoadElimination(node.scopeChainDepth()));
- break;
-
- // Handle nodes that are conditionally pure: these are pure, and can
- // be CSE'd, so long as the prediction is the one we want.
- case ValueAdd:
- case CompareLess:
- case CompareLessEq:
- case CompareGreater:
- case CompareGreaterEq:
- case CompareEq: {
- if (isPredictedNumerical(node)) {
- NodeIndex replacementIndex = pureCSE(node);
- if (replacementIndex != NoNode && isPredictedNumerical(m_graph[replacementIndex]))
- setReplacement(replacementIndex);
- }
- break;
- }
-
- case LogicalNot: {
- if (logicalNotIsPure(node)) {
- NodeIndex replacementIndex = pureCSE(node);
- if (replacementIndex != NoNode && logicalNotIsPure(m_graph[replacementIndex]))
- setReplacement(replacementIndex);
- }
- break;
- }
-
- // Finally handle heap accesses. These are not quite pure, but we can still
- // optimize them provided that some subtle conditions are met.
- case GetGlobalVar:
- setReplacement(globalVarLoadElimination(node.varNumber(), m_codeBlock->globalObjectFor(node.codeOrigin)));
- break;
-
- case GetByVal:
- if (byValIsPure(node))
- setReplacement(getByValLoadElimination(node.child1().index(), node.child2().index()));
- break;
-
- case PutByVal:
- if (byValIsPure(node) && getByValLoadElimination(node.child1().index(), node.child2().index()) != NoNode)
- node.op = PutByValAlias;
- break;
-
- case CheckStructure:
- if (checkStructureLoadElimination(node.structureSet(), node.child1().index()))
- eliminate();
- break;
-
- case CheckFunction:
- if (checkFunctionElimination(node.function(), node.child1().index()))
- eliminate();
- break;
-
- case GetIndexedPropertyStorage: {
- PredictedType basePrediction = m_graph[node.child2()].prediction();
- bool nodeHasIntegerIndexPrediction = !(!(basePrediction & PredictInt32) && basePrediction);
- setReplacement(getIndexedPropertyStorageLoadElimination(node.child1().index(), nodeHasIntegerIndexPrediction));
- break;
- }
-
- case GetPropertyStorage:
- setReplacement(getPropertyStorageLoadElimination(node.child1().index()));
- break;
-
- case GetByOffset:
- setReplacement(getByOffsetLoadElimination(m_graph.m_storageAccessData[node.storageAccessDataIndex()].identifierNumber, node.child1().index()));
- break;
-
- default:
- // do nothing.
- break;
- }
-
- m_lastSeen[node.op & NodeIdMask] = m_compileIndex;
-#if DFG_ENABLE(DEBUG_PROPAGATION_VERBOSE)
- printf("\n");
-#endif
- }
-
- void performBlockCSE(BasicBlock& block)
- {
- m_start = block.begin;
- NodeIndex end = block.end;
- for (m_compileIndex = m_start; m_compileIndex < end; ++m_compileIndex)
- performNodeCSE(m_graph[m_compileIndex]);
- }
-
- void localCSE()
- {
-#if DFG_ENABLE(DEBUG_PROPAGATION_VERBOSE)
- printf("Performing local CSE:");
-#endif
- for (unsigned block = 0; block < m_graph.m_blocks.size(); ++block)
- performBlockCSE(*m_graph.m_blocks[block]);
- }
-
- void allocateVirtualRegisters()
- {
-#if DFG_ENABLE(DEBUG_VERBOSE)
- printf("Preserved vars: ");
- m_graph.m_preservedVars.dump(stdout);
- printf("\n");
-#endif
- ScoreBoard scoreBoard(m_graph, m_graph.m_preservedVars);
- unsigned sizeExcludingPhiNodes = m_graph.m_blocks.last()->end;
- for (size_t i = 0; i < sizeExcludingPhiNodes; ++i) {
- Node& node = m_graph[i];
-
- if (!node.shouldGenerate())
- continue;
-
- // GetLocal nodes are effectively phi nodes in the graph, referencing
- // results from prior blocks.
- if (node.op != GetLocal) {
- // First, call use on all of the current node's children, then
- // allocate a VirtualRegister for this node. We do so in this
- // order so that if a child is on its last use, and a
- // VirtualRegister is freed, then it may be reused for node.
- if (node.op & NodeHasVarArgs) {
- for (unsigned childIdx = node.firstChild(); childIdx < node.firstChild() + node.numChildren(); childIdx++)
- scoreBoard.use(m_graph.m_varArgChildren[childIdx]);
- } else {
- scoreBoard.use(node.child1());
- scoreBoard.use(node.child2());
- scoreBoard.use(node.child3());
- }
- }
-
- if (!node.hasResult())
- continue;
-
- node.setVirtualRegister(scoreBoard.allocate());
- // 'mustGenerate' nodes have their useCount artificially elevated,
- // call use now to account for this.
- if (node.mustGenerate())
- scoreBoard.use(i);
- }
-
- // 'm_numCalleeRegisters' is the number of locals and temporaries allocated
- // for the function (and checked for on entry). Since we perform a new and
- // different allocation of temporaries, more registers may now be required.
- unsigned calleeRegisters = scoreBoard.highWatermark() + m_graph.m_parameterSlots;
- if ((unsigned)m_codeBlock->m_numCalleeRegisters < calleeRegisters)
- m_codeBlock->m_numCalleeRegisters = calleeRegisters;
-#if DFG_ENABLE(DEBUG_VERBOSE)
- printf("Num callee registers: %u\n", calleeRegisters);
-#endif
- }
-
- void performBlockCFA(AbstractState& state, BlockIndex blockIndex)
- {
- BasicBlock* block = m_graph.m_blocks[blockIndex].get();
- if (!block->cfaShouldRevisit)
- return;
-#if DFG_ENABLE(DEBUG_PROPAGATION_VERBOSE)
- printf(" Block #%u (bc#%u):\n", blockIndex, block->bytecodeBegin);
-#endif
- state.beginBasicBlock(block);
-#if DFG_ENABLE(DEBUG_PROPAGATION_VERBOSE)
- printf(" head vars: ");
- dumpOperands(block->valuesAtHead, stdout);
- printf("\n");
-#endif
- for (NodeIndex nodeIndex = block->begin; nodeIndex < block->end; ++nodeIndex) {
- if (!m_graph[nodeIndex].shouldGenerate())
- continue;
-#if DFG_ENABLE(DEBUG_PROPAGATION_VERBOSE)
- printf(" %s @%u: ", Graph::opName(m_graph[nodeIndex].op), nodeIndex);
- state.dump(stdout);
- printf("\n");
-#endif
- if (!state.execute(nodeIndex))
- break;
- }
-#if DFG_ENABLE(DEBUG_PROPAGATION_VERBOSE)
- printf(" tail regs: ");
- state.dump(stdout);
- printf("\n");
-#endif
- m_changed |= state.endBasicBlock(AbstractState::MergeToSuccessors);
-#if DFG_ENABLE(DEBUG_PROPAGATION_VERBOSE)
- printf(" tail vars: ");
- dumpOperands(block->valuesAtTail, stdout);
- printf("\n");
-#endif
- }
-
- void performForwardCFA(AbstractState& state)
- {
-#if DFG_ENABLE(DEBUG_PROPAGATION_VERBOSE)
- printf("CFA [%u]\n", ++m_count);
-#endif
-
- for (BlockIndex block = 0; block < m_graph.m_blocks.size(); ++block)
- performBlockCFA(state, block);
- }
-
- void globalCFA()
- {
-#if DFG_ENABLE(DEBUG_PROPAGATION_VERBOSE)
- m_count = 0;
-#endif
-
- // This implements a pseudo-worklist-based forward CFA, except that the visit order
- // of blocks is the bytecode program order (which is nearly topological), and
- // instead of a worklist we just walk all basic blocks checking if cfaShouldRevisit
- // is set to true. This is likely to balance the efficiency properties of both
- // worklist-based and forward fixpoint-based approaches. Like a worklist-based
- // approach, it won't visit code if it's meaningless to do so (nothing changed at
- // the head of the block or the predecessors have not been visited). Like a forward
- // fixpoint-based approach, it has a high probability of only visiting a block
- // after all predecessors have been visited. Only loops will cause this analysis to
- // revisit blocks, and the amount of revisiting is proportional to loop depth.
-
- AbstractState::initialize(m_graph);
-
- AbstractState state(m_codeBlock, m_graph);
-
- do {
- m_changed = false;
- performForwardCFA(state);
- } while (m_changed);
- }
-
- Graph& m_graph;
- JSGlobalData& m_globalData;
- CodeBlock* m_codeBlock;
- CodeBlock* m_profiledBlock;
-
- NodeIndex m_start;
- NodeIndex m_compileIndex;
-
-#if DFG_ENABLE(DEBUG_PROPAGATION_VERBOSE)
- unsigned m_count;
-#endif
-
- bool m_changed;
-
- Vector<NodeIndex, 16> m_replacements;
- FixedArray<NodeIndex, LastNodeId> m_lastSeen;
-};
-
-void propagate(Graph& graph, JSGlobalData* globalData, CodeBlock* codeBlock)
-{
- ASSERT(codeBlock);
- CodeBlock* profiledBlock = codeBlock->alternative();
- ASSERT(profiledBlock);
-
- Propagator propagator(graph, *globalData, codeBlock, profiledBlock);
- propagator.fixpoint();
-
-}
-
-} } // namespace JSC::DFG
-
-#endif // ENABLE(DFG_JIT)
-
diff --git a/Source/JavaScriptCore/dfg/DFGRegisterBank.h b/Source/JavaScriptCore/dfg/DFGRegisterBank.h
index 11cc70931..85dc246f2 100644
--- a/Source/JavaScriptCore/dfg/DFGRegisterBank.h
+++ b/Source/JavaScriptCore/dfg/DFGRegisterBank.h
@@ -232,11 +232,11 @@ public:
// For each register, print the VirtualRegister 'name'.
for (uint32_t i =0; i < NUM_REGS; ++i) {
if (m_data[i].name != InvalidVirtualRegister)
- fprintf(stderr, "[%02d]", m_data[i].name);
+ dataLog("[%02d]", m_data[i].name);
else
- fprintf(stderr, "[--]");
+ dataLog("[--]");
}
- fprintf(stderr, "\n");
+ dataLog("\n");
}
#endif
diff --git a/Source/JavaScriptCore/dfg/DFGRepatch.cpp b/Source/JavaScriptCore/dfg/DFGRepatch.cpp
index f2928c290..edf3c9505 100644
--- a/Source/JavaScriptCore/dfg/DFGRepatch.cpp
+++ b/Source/JavaScriptCore/dfg/DFGRepatch.cpp
@@ -32,6 +32,7 @@
#include "DFGSpeculativeJIT.h"
#include "LinkBuffer.h"
#include "Operations.h"
+#include "PolymorphicPutByIdList.h"
#include "RepatchBuffer.h"
namespace JSC { namespace DFG {
@@ -361,7 +362,7 @@ static bool tryBuildGetByIDList(ExecState* exec, JSValue baseValue, const Identi
// place that we made it from. It just so happens to be the place that we are at
// right now!
stubJit.store32(
- MacroAssembler::TrustedImm32(exec->codeOriginIndexForDFGWithInlining()),
+ MacroAssembler::TrustedImm32(exec->codeOriginIndexForDFG()),
CCallHelpers::tagFor(static_cast<VirtualRegister>(RegisterFile::ArgumentCount)));
operationCall = stubJit.call();
@@ -492,7 +493,7 @@ void dfgBuildGetByIDProtoList(ExecState* exec, JSValue baseValue, const Identifi
dfgRepatchCall(exec->codeBlock(), stubInfo.callReturnLocation, operationGetById);
}
-static V_DFGOperation_EJCI appropriatePutByIdFunction(const PutPropertySlot &slot, PutKind putKind)
+static V_DFGOperation_EJCI appropriateGenericPutByIdFunction(const PutPropertySlot &slot, PutKind putKind)
{
if (slot.isStrictMode()) {
if (putKind == Direct)
@@ -504,6 +505,18 @@ static V_DFGOperation_EJCI appropriatePutByIdFunction(const PutPropertySlot &slo
return operationPutByIdNonStrict;
}
+static V_DFGOperation_EJCI appropriateListBuildingPutByIdFunction(const PutPropertySlot &slot, PutKind putKind)
+{
+ if (slot.isStrictMode()) {
+ if (putKind == Direct)
+ return operationPutByIdDirectStrictBuildList;
+ return operationPutByIdStrictBuildList;
+ }
+ if (putKind == Direct)
+ return operationPutByIdDirectNonStrictBuildList;
+ return operationPutByIdNonStrictBuildList;
+}
+
static void testPrototype(MacroAssembler &stubJit, GPRReg scratchGPR, JSValue prototype, MacroAssembler::JumpList& failureCases)
{
if (prototype.isNull())
@@ -515,7 +528,187 @@ static void testPrototype(MacroAssembler &stubJit, GPRReg scratchGPR, JSValue pr
failureCases.append(stubJit.branchPtr(MacroAssembler::NotEqual, MacroAssembler::Address(scratchGPR, JSCell::structureOffset()), MacroAssembler::TrustedImmPtr(prototype.asCell()->structure())));
}
-static bool tryCachePutByID(ExecState* exec, JSValue baseValue, const Identifier&, const PutPropertySlot& slot, StructureStubInfo& stubInfo, PutKind putKind)
+static void emitPutReplaceStub(
+ ExecState* exec,
+ JSValue,
+ const Identifier&,
+ const PutPropertySlot& slot,
+ StructureStubInfo& stubInfo,
+ PutKind,
+ Structure* structure,
+ CodeLocationLabel failureLabel,
+ MacroAssemblerCodeRef& stubRoutine)
+{
+ JSGlobalData* globalData = &exec->globalData();
+ GPRReg baseGPR = static_cast<GPRReg>(stubInfo.baseGPR);
+#if USE(JSVALUE32_64)
+ GPRReg valueTagGPR = static_cast<GPRReg>(stubInfo.valueTagGPR);
+#endif
+ GPRReg valueGPR = static_cast<GPRReg>(stubInfo.valueGPR);
+ GPRReg scratchGPR = static_cast<GPRReg>(stubInfo.scratchGPR);
+ bool needToRestoreScratch = false;
+#if ENABLE(GGC) || ENABLE(WRITE_BARRIER_PROFILING)
+ GPRReg scratchGPR2;
+ const bool writeBarrierNeeded = true;
+#else
+ const bool writeBarrierNeeded = false;
+#endif
+
+ MacroAssembler stubJit;
+
+ if (scratchGPR == InvalidGPRReg && (writeBarrierNeeded || !structure->isUsingInlineStorage())) {
+ scratchGPR = SpeculativeJIT::selectScratchGPR(baseGPR, valueGPR);
+ needToRestoreScratch = true;
+ stubJit.push(scratchGPR);
+ }
+
+ MacroAssembler::Jump badStructure = stubJit.branchPtr(
+ MacroAssembler::NotEqual,
+ MacroAssembler::Address(baseGPR, JSCell::structureOffset()),
+ MacroAssembler::TrustedImmPtr(structure));
+
+#if ENABLE(GGC) || ENABLE(WRITE_BARRIER_PROFILING)
+ scratchGPR2 = SpeculativeJIT::selectScratchGPR(baseGPR, valueGPR, scratchGPR);
+ stubJit.push(scratchGPR2);
+ SpeculativeJIT::writeBarrier(stubJit, baseGPR, scratchGPR, scratchGPR2, WriteBarrierForPropertyAccess);
+ stubJit.pop(scratchGPR2);
+#endif
+
+#if USE(JSVALUE64)
+ if (structure->isUsingInlineStorage())
+ stubJit.storePtr(valueGPR, MacroAssembler::Address(baseGPR, JSObject::offsetOfInlineStorage() + slot.cachedOffset() * sizeof(JSValue)));
+ else {
+ stubJit.loadPtr(MacroAssembler::Address(baseGPR, JSObject::offsetOfPropertyStorage()), scratchGPR);
+ stubJit.storePtr(valueGPR, MacroAssembler::Address(scratchGPR, slot.cachedOffset() * sizeof(JSValue)));
+ }
+#elif USE(JSVALUE32_64)
+ if (structure->isUsingInlineStorage()) {
+ stubJit.store32(valueGPR, MacroAssembler::Address(baseGPR, JSObject::offsetOfInlineStorage() + slot.cachedOffset() * sizeof(JSValue) + OBJECT_OFFSETOF(EncodedValueDescriptor, asBits.payload)));
+ stubJit.store32(valueTagGPR, MacroAssembler::Address(baseGPR, JSObject::offsetOfInlineStorage() + slot.cachedOffset() * sizeof(JSValue) + OBJECT_OFFSETOF(EncodedValueDescriptor, asBits.tag)));
+ } else {
+ stubJit.loadPtr(MacroAssembler::Address(baseGPR, JSObject::offsetOfPropertyStorage()), scratchGPR);
+ stubJit.store32(valueGPR, MacroAssembler::Address(scratchGPR, slot.cachedOffset() * sizeof(JSValue) + OBJECT_OFFSETOF(EncodedValueDescriptor, asBits.payload)));
+ stubJit.store32(valueTagGPR, MacroAssembler::Address(scratchGPR, slot.cachedOffset() * sizeof(JSValue) + OBJECT_OFFSETOF(EncodedValueDescriptor, asBits.tag)));
+ }
+#endif
+
+ MacroAssembler::Jump success;
+ MacroAssembler::Jump failure;
+
+ if (needToRestoreScratch) {
+ stubJit.pop(scratchGPR);
+ success = stubJit.jump();
+
+ badStructure.link(&stubJit);
+ stubJit.pop(scratchGPR);
+ failure = stubJit.jump();
+ } else {
+ success = stubJit.jump();
+ failure = badStructure;
+ }
+
+ LinkBuffer patchBuffer(*globalData, &stubJit, exec->codeBlock());
+ patchBuffer.link(success, stubInfo.callReturnLocation.labelAtOffset(stubInfo.deltaCallToDone));
+ patchBuffer.link(failure, failureLabel);
+
+ stubRoutine = patchBuffer.finalizeCode();
+}
+
+static void emitPutTransitionStub(
+ ExecState* exec,
+ JSValue,
+ const Identifier&,
+ const PutPropertySlot& slot,
+ StructureStubInfo& stubInfo,
+ PutKind putKind,
+ Structure* structure,
+ Structure* oldStructure,
+ StructureChain* prototypeChain,
+ CodeLocationLabel failureLabel,
+ MacroAssemblerCodeRef& stubRoutine)
+{
+ JSGlobalData* globalData = &exec->globalData();
+
+ GPRReg baseGPR = static_cast<GPRReg>(stubInfo.baseGPR);
+#if USE(JSVALUE32_64)
+ GPRReg valueTagGPR = static_cast<GPRReg>(stubInfo.valueTagGPR);
+#endif
+ GPRReg valueGPR = static_cast<GPRReg>(stubInfo.valueGPR);
+ GPRReg scratchGPR = static_cast<GPRReg>(stubInfo.scratchGPR);
+ bool needToRestoreScratch = false;
+
+ ASSERT(scratchGPR != baseGPR);
+
+ MacroAssembler stubJit;
+
+ MacroAssembler::JumpList failureCases;
+
+ if (scratchGPR == InvalidGPRReg) {
+ scratchGPR = SpeculativeJIT::selectScratchGPR(baseGPR, valueGPR);
+ stubJit.push(scratchGPR);
+ needToRestoreScratch = true;
+ }
+
+ failureCases.append(stubJit.branchPtr(MacroAssembler::NotEqual, MacroAssembler::Address(baseGPR, JSCell::structureOffset()), MacroAssembler::TrustedImmPtr(oldStructure)));
+
+ testPrototype(stubJit, scratchGPR, oldStructure->storedPrototype(), failureCases);
+
+ if (putKind == NotDirect) {
+ for (WriteBarrier<Structure>* it = prototypeChain->head(); *it; ++it)
+ testPrototype(stubJit, scratchGPR, (*it)->storedPrototype(), failureCases);
+ }
+
+#if ENABLE(GGC) || ENABLE(WRITE_BARRIER_PROFILING)
+ // Must always emit this write barrier as the structure transition itself requires it
+ GPRReg scratch2 = SpeculativeJIT::selectScratchGPR(baseGPR, valueGPR, scratchGPR);
+ stubJit.push(scratch2);
+ SpeculativeJIT::writeBarrier(stubJit, baseGPR, scratchGPR, scratch2, WriteBarrierForPropertyAccess);
+ stubJit.pop(scratch2);
+#endif
+
+ stubJit.storePtr(MacroAssembler::TrustedImmPtr(structure), MacroAssembler::Address(baseGPR, JSCell::structureOffset()));
+#if USE(JSVALUE64)
+ if (structure->isUsingInlineStorage())
+ stubJit.storePtr(valueGPR, MacroAssembler::Address(baseGPR, JSObject::offsetOfInlineStorage() + slot.cachedOffset() * sizeof(JSValue)));
+ else {
+ stubJit.loadPtr(MacroAssembler::Address(baseGPR, JSObject::offsetOfPropertyStorage()), scratchGPR);
+ stubJit.storePtr(valueGPR, MacroAssembler::Address(scratchGPR, slot.cachedOffset() * sizeof(JSValue)));
+ }
+#elif USE(JSVALUE32_64)
+ if (structure->isUsingInlineStorage()) {
+ stubJit.store32(valueGPR, MacroAssembler::Address(baseGPR, JSObject::offsetOfInlineStorage() + slot.cachedOffset() * sizeof(JSValue) + OBJECT_OFFSETOF(EncodedValueDescriptor, asBits.payload)));
+ stubJit.store32(valueTagGPR, MacroAssembler::Address(baseGPR, JSObject::offsetOfInlineStorage() + slot.cachedOffset() * sizeof(JSValue) + OBJECT_OFFSETOF(EncodedValueDescriptor, asBits.tag)));
+ } else {
+ stubJit.loadPtr(MacroAssembler::Address(baseGPR, JSObject::offsetOfPropertyStorage()), scratchGPR);
+ stubJit.store32(valueGPR, MacroAssembler::Address(scratchGPR, slot.cachedOffset() * sizeof(JSValue) + OBJECT_OFFSETOF(EncodedValueDescriptor, asBits.payload)));
+ stubJit.store32(valueTagGPR, MacroAssembler::Address(scratchGPR, slot.cachedOffset() * sizeof(JSValue) + OBJECT_OFFSETOF(EncodedValueDescriptor, asBits.tag)));
+ }
+#endif
+
+ MacroAssembler::Jump success;
+ MacroAssembler::Jump failure;
+
+ if (needToRestoreScratch) {
+ stubJit.pop(scratchGPR);
+ success = stubJit.jump();
+
+ failureCases.link(&stubJit);
+ stubJit.pop(scratchGPR);
+ failure = stubJit.jump();
+ } else
+ success = stubJit.jump();
+
+ LinkBuffer patchBuffer(*globalData, &stubJit, exec->codeBlock());
+ patchBuffer.link(success, stubInfo.callReturnLocation.labelAtOffset(stubInfo.deltaCallToDone));
+ if (needToRestoreScratch)
+ patchBuffer.link(failure, failureLabel);
+ else
+ patchBuffer.link(failureCases, failureLabel);
+
+ stubRoutine = patchBuffer.finalizeCode();
+}
+
+static bool tryCachePutByID(ExecState* exec, JSValue baseValue, const Identifier& ident, const PutPropertySlot& slot, StructureStubInfo& stubInfo, PutKind putKind)
{
CodeBlock* codeBlock = exec->codeBlock();
JSGlobalData* globalData = &exec->globalData();
@@ -545,99 +738,26 @@ static bool tryCachePutByID(ExecState* exec, JSValue baseValue, const Identifier
StructureChain* prototypeChain = structure->prototypeChain(exec);
- GPRReg baseGPR = static_cast<GPRReg>(stubInfo.baseGPR);
-#if USE(JSVALUE32_64)
- GPRReg valueTagGPR = static_cast<GPRReg>(stubInfo.valueTagGPR);
-#endif
- GPRReg valueGPR = static_cast<GPRReg>(stubInfo.valueGPR);
- GPRReg scratchGPR = static_cast<GPRReg>(stubInfo.scratchGPR);
- bool needToRestoreScratch = false;
-
- ASSERT(scratchGPR != baseGPR);
-
- MacroAssembler stubJit;
-
- MacroAssembler::JumpList failureCases;
-
- if (scratchGPR == InvalidGPRReg) {
- scratchGPR = SpeculativeJIT::selectScratchGPR(baseGPR, valueGPR);
- stubJit.push(scratchGPR);
- needToRestoreScratch = true;
- }
-
- failureCases.append(stubJit.branchPtr(MacroAssembler::NotEqual, MacroAssembler::Address(baseGPR, JSCell::structureOffset()), MacroAssembler::TrustedImmPtr(oldStructure)));
-
- testPrototype(stubJit, scratchGPR, oldStructure->storedPrototype(), failureCases);
-
- if (putKind == NotDirect) {
- for (WriteBarrier<Structure>* it = prototypeChain->head(); *it; ++it)
- testPrototype(stubJit, scratchGPR, (*it)->storedPrototype(), failureCases);
- }
-
-#if ENABLE(GGC) || ENABLE(WRITE_BARRIER_PROFILING)
- // Must always emit this write barrier as the structure transition itself requires it
- GPRReg scratch2 = SpeculativeJIT::selectScratchGPR(baseGPR, valueGPR, scratchGPR);
- stubJit.push(scratch2);
- SpeculativeJIT::writeBarrier(stubJit, baseGPR, scratchGPR, scratch2, WriteBarrierForPropertyAccess);
- stubJit.pop(scratch2);
-#endif
-
- stubJit.storePtr(MacroAssembler::TrustedImmPtr(structure), MacroAssembler::Address(baseGPR, JSCell::structureOffset()));
-#if USE(JSVALUE64)
- if (structure->isUsingInlineStorage())
- stubJit.storePtr(valueGPR, MacroAssembler::Address(baseGPR, JSObject::offsetOfInlineStorage() + slot.cachedOffset() * sizeof(JSValue)));
- else {
- stubJit.loadPtr(MacroAssembler::Address(baseGPR, JSObject::offsetOfPropertyStorage()), scratchGPR);
- stubJit.storePtr(valueGPR, MacroAssembler::Address(scratchGPR, slot.cachedOffset() * sizeof(JSValue)));
- }
-#elif USE(JSVALUE32_64)
- if (structure->isUsingInlineStorage()) {
- stubJit.store32(valueGPR, MacroAssembler::Address(baseGPR, JSObject::offsetOfInlineStorage() + slot.cachedOffset() * sizeof(JSValue) + OBJECT_OFFSETOF(EncodedValueDescriptor, asBits.payload)));
- stubJit.store32(valueTagGPR, MacroAssembler::Address(baseGPR, JSObject::offsetOfInlineStorage() + slot.cachedOffset() * sizeof(JSValue) + OBJECT_OFFSETOF(EncodedValueDescriptor, asBits.tag)));
- } else {
- stubJit.loadPtr(MacroAssembler::Address(baseGPR, JSObject::offsetOfPropertyStorage()), scratchGPR);
- stubJit.store32(valueGPR, MacroAssembler::Address(scratchGPR, slot.cachedOffset() * sizeof(JSValue) + OBJECT_OFFSETOF(EncodedValueDescriptor, asBits.payload)));
- stubJit.store32(valueTagGPR, MacroAssembler::Address(scratchGPR, slot.cachedOffset() * sizeof(JSValue) + OBJECT_OFFSETOF(EncodedValueDescriptor, asBits.tag)));
- }
-#endif
-
- MacroAssembler::Jump success;
- MacroAssembler::Jump failure;
-
- if (needToRestoreScratch) {
- stubJit.pop(scratchGPR);
- success = stubJit.jump();
-
- failureCases.link(&stubJit);
- stubJit.pop(scratchGPR);
- failure = stubJit.jump();
- } else
- success = stubJit.jump();
-
- LinkBuffer patchBuffer(*globalData, &stubJit, codeBlock);
- patchBuffer.link(success, stubInfo.callReturnLocation.labelAtOffset(stubInfo.deltaCallToDone));
- if (needToRestoreScratch)
- patchBuffer.link(failure, stubInfo.callReturnLocation.labelAtOffset(stubInfo.deltaCallToSlowCase));
- else
- patchBuffer.link(failureCases, stubInfo.callReturnLocation.labelAtOffset(stubInfo.deltaCallToSlowCase));
-
- stubInfo.stubRoutine = patchBuffer.finalizeCode();
+ emitPutTransitionStub(
+ exec, baseValue, ident, slot, stubInfo, putKind,
+ structure, oldStructure, prototypeChain,
+ stubInfo.callReturnLocation.labelAtOffset(stubInfo.deltaCallToSlowCase),
+ stubInfo.stubRoutine);
RepatchBuffer repatchBuffer(codeBlock);
repatchBuffer.relink(stubInfo.callReturnLocation.jumpAtOffset(stubInfo.deltaCallToStructCheck), CodeLocationLabel(stubInfo.stubRoutine.code()));
- repatchBuffer.relink(stubInfo.callReturnLocation, appropriatePutByIdFunction(slot, putKind));
+ repatchBuffer.relink(stubInfo.callReturnLocation, appropriateListBuildingPutByIdFunction(slot, putKind));
stubInfo.initPutByIdTransition(*globalData, codeBlock->ownerExecutable(), oldStructure, structure, prototypeChain, putKind == Direct);
return true;
}
- dfgRepatchByIdSelfAccess(codeBlock, stubInfo, structure, slot.cachedOffset(), appropriatePutByIdFunction(slot, putKind), false);
+ dfgRepatchByIdSelfAccess(codeBlock, stubInfo, structure, slot.cachedOffset(), appropriateListBuildingPutByIdFunction(slot, putKind), false);
stubInfo.initPutByIdReplace(*globalData, codeBlock->ownerExecutable(), structure);
return true;
}
- // FIXME: should support the transition case!
return false;
}
@@ -645,7 +765,91 @@ void dfgRepatchPutByID(ExecState* exec, JSValue baseValue, const Identifier& pro
{
bool cached = tryCachePutByID(exec, baseValue, propertyName, slot, stubInfo, putKind);
if (!cached)
- dfgRepatchCall(exec->codeBlock(), stubInfo.callReturnLocation, appropriatePutByIdFunction(slot, putKind));
+ dfgRepatchCall(exec->codeBlock(), stubInfo.callReturnLocation, appropriateGenericPutByIdFunction(slot, putKind));
+}
+
+static bool tryBuildPutByIdList(ExecState* exec, JSValue baseValue, const Identifier& propertyName, const PutPropertySlot& slot, StructureStubInfo& stubInfo, PutKind putKind)
+{
+ CodeBlock* codeBlock = exec->codeBlock();
+ JSGlobalData* globalData = &exec->globalData();
+
+ if (!baseValue.isCell())
+ return false;
+ JSCell* baseCell = baseValue.asCell();
+ Structure* structure = baseCell->structure();
+ Structure* oldStructure = structure->previousID();
+
+ if (!slot.isCacheable())
+ return false;
+ if (structure->isUncacheableDictionary())
+ return false;
+
+ // Optimize self access.
+ if (slot.base() == baseValue) {
+ PolymorphicPutByIdList* list;
+ MacroAssemblerCodeRef stubRoutine;
+
+ if (slot.type() == PutPropertySlot::NewProperty) {
+ if (structure->isDictionary())
+ return false;
+
+ // skip optimizing the case where we need a realloc
+ if (oldStructure->propertyStorageCapacity() != structure->propertyStorageCapacity())
+ return false;
+
+ normalizePrototypeChain(exec, baseCell);
+
+ StructureChain* prototypeChain = structure->prototypeChain(exec);
+
+ // We're now committed to creating the stub. Mogrify the meta-data accordingly.
+ list = PolymorphicPutByIdList::from(
+ putKind, stubInfo,
+ stubInfo.callReturnLocation.labelAtOffset(stubInfo.deltaCallToSlowCase));
+
+ emitPutTransitionStub(
+ exec, baseValue, propertyName, slot, stubInfo, putKind,
+ structure, oldStructure, prototypeChain,
+ CodeLocationLabel(list->currentSlowPathTarget()),
+ stubRoutine);
+
+ list->addAccess(
+ PutByIdAccess::transition(
+ *globalData, codeBlock->ownerExecutable(),
+ oldStructure, structure, prototypeChain,
+ stubRoutine));
+ } else {
+ // We're now committed to creating the stub. Mogrify the meta-data accordingly.
+ list = PolymorphicPutByIdList::from(
+ putKind, stubInfo,
+ stubInfo.callReturnLocation.labelAtOffset(stubInfo.deltaCallToSlowCase));
+
+ emitPutReplaceStub(
+ exec, baseValue, propertyName, slot, stubInfo, putKind,
+ structure, CodeLocationLabel(list->currentSlowPathTarget()), stubRoutine);
+
+ list->addAccess(
+ PutByIdAccess::replace(
+ *globalData, codeBlock->ownerExecutable(),
+ structure, stubRoutine));
+ }
+
+ RepatchBuffer repatchBuffer(codeBlock);
+ repatchBuffer.relink(stubInfo.callReturnLocation.jumpAtOffset(stubInfo.deltaCallToStructCheck), CodeLocationLabel(stubRoutine.code()));
+
+ if (list->isFull())
+ repatchBuffer.relink(stubInfo.callReturnLocation, appropriateGenericPutByIdFunction(slot, putKind));
+
+ return true;
+ }
+
+ return false;
+}
+
+void dfgBuildPutByIdList(ExecState* exec, JSValue baseValue, const Identifier& propertyName, const PutPropertySlot& slot, StructureStubInfo& stubInfo, PutKind putKind)
+{
+ bool cached = tryBuildPutByIdList(exec, baseValue, propertyName, slot, stubInfo, putKind);
+ if (!cached)
+ dfgRepatchCall(exec->codeBlock(), stubInfo.callReturnLocation, appropriateGenericPutByIdFunction(slot, putKind));
}
void dfgLinkFor(ExecState* exec, CallLinkInfo& callLinkInfo, CodeBlock* calleeCodeBlock, JSFunction* callee, MacroAssemblerCodePtr codePtr, CodeSpecializationKind kind)
@@ -687,14 +891,14 @@ void dfgResetPutByID(RepatchBuffer& repatchBuffer, StructureStubInfo& stubInfo)
{
V_DFGOperation_EJCI unoptimizedFunction = bitwise_cast<V_DFGOperation_EJCI>(MacroAssembler::readCallTarget(stubInfo.callReturnLocation).executableAddress());
V_DFGOperation_EJCI optimizedFunction;
- if (unoptimizedFunction == operationPutByIdStrict)
+ if (unoptimizedFunction == operationPutByIdStrict || unoptimizedFunction == operationPutByIdStrictBuildList)
optimizedFunction = operationPutByIdStrictOptimize;
- else if (unoptimizedFunction == operationPutByIdNonStrict)
+ else if (unoptimizedFunction == operationPutByIdNonStrict || unoptimizedFunction == operationPutByIdNonStrictBuildList)
optimizedFunction = operationPutByIdNonStrictOptimize;
- else if (unoptimizedFunction == operationPutByIdDirectStrict)
+ else if (unoptimizedFunction == operationPutByIdDirectStrict || unoptimizedFunction == operationPutByIdDirectStrictBuildList)
optimizedFunction = operationPutByIdDirectStrictOptimize;
else {
- ASSERT(unoptimizedFunction == operationPutByIdDirectNonStrict);
+ ASSERT(unoptimizedFunction == operationPutByIdDirectNonStrict || unoptimizedFunction == operationPutByIdDirectNonStrictBuildList);
optimizedFunction = operationPutByIdDirectNonStrictOptimize;
}
repatchBuffer.relink(stubInfo.callReturnLocation, optimizedFunction);
diff --git a/Source/JavaScriptCore/dfg/DFGRepatch.h b/Source/JavaScriptCore/dfg/DFGRepatch.h
index f146128fb..83d4e976d 100644
--- a/Source/JavaScriptCore/dfg/DFGRepatch.h
+++ b/Source/JavaScriptCore/dfg/DFGRepatch.h
@@ -26,10 +26,12 @@
#ifndef DFGRepatch_h
#define DFGRepatch_h
+#include <wtf/Platform.h>
+
#if ENABLE(DFG_JIT)
-#include <dfg/DFGJITCompiler.h>
-#include <dfg/DFGOperations.h>
+#include "DFGJITCompiler.h"
+#include "DFGOperations.h"
namespace JSC { namespace DFG {
@@ -37,6 +39,7 @@ void dfgRepatchGetByID(ExecState*, JSValue, const Identifier&, const PropertySlo
void dfgBuildGetByIDList(ExecState*, JSValue, const Identifier&, const PropertySlot&, StructureStubInfo&);
void dfgBuildGetByIDProtoList(ExecState*, JSValue, const Identifier&, const PropertySlot&, StructureStubInfo&);
void dfgRepatchPutByID(ExecState*, JSValue, const Identifier&, const PutPropertySlot&, StructureStubInfo&, PutKind);
+void dfgBuildPutByIdList(ExecState*, JSValue, const Identifier&, const PutPropertySlot&, StructureStubInfo&, PutKind);
void dfgLinkFor(ExecState*, CallLinkInfo&, CodeBlock*, JSFunction* callee, MacroAssemblerCodePtr, CodeSpecializationKind);
void dfgResetGetByID(RepatchBuffer&, StructureStubInfo&);
void dfgResetPutByID(RepatchBuffer&, StructureStubInfo&);
diff --git a/Source/JavaScriptCore/dfg/DFGScoreBoard.h b/Source/JavaScriptCore/dfg/DFGScoreBoard.h
index cc3272812..912b3e8fd 100644
--- a/Source/JavaScriptCore/dfg/DFGScoreBoard.h
+++ b/Source/JavaScriptCore/dfg/DFGScoreBoard.h
@@ -119,26 +119,26 @@ public:
#ifndef NDEBUG
void dump()
{
- printf(" USED: [ ");
+ dataLog(" USED: [ ");
for (unsigned i = 0; i < m_used.size(); ++i) {
if (!m_free.contains(i)) {
- printf("%d:", i);
+ dataLog("%d:", i);
if (m_used[i] == max())
- printf("local ");
+ dataLog("local ");
else
- printf("%d ", m_used[i]);
+ dataLog("%d ", m_used[i]);
}
}
- printf("]\n");
+ dataLog("]\n");
- printf(" FREE: [ ");
+ dataLog(" FREE: [ ");
for (unsigned i = 0; i < m_used.size(); ++i) {
if (m_free.contains(i) && m_used[i] != max()) {
ASSERT(!m_used[i]);
- printf("%d ", i);
+ dataLog("%d ", i);
}
}
- printf("]\n");
+ dataLog("]\n");
}
#endif
diff --git a/Source/JavaScriptCore/dfg/DFGSpeculativeJIT.cpp b/Source/JavaScriptCore/dfg/DFGSpeculativeJIT.cpp
index 77b3e54b1..8578337f5 100644
--- a/Source/JavaScriptCore/dfg/DFGSpeculativeJIT.cpp
+++ b/Source/JavaScriptCore/dfg/DFGSpeculativeJIT.cpp
@@ -430,33 +430,33 @@ static const char* dataFormatString(DataFormat format)
void SpeculativeJIT::dump(const char* label)
{
if (label)
- fprintf(stderr, "<%s>\n", label);
+ dataLog("<%s>\n", label);
- fprintf(stderr, " gprs:\n");
+ dataLog(" gprs:\n");
m_gprs.dump();
- fprintf(stderr, " fprs:\n");
+ dataLog(" fprs:\n");
m_fprs.dump();
- fprintf(stderr, " VirtualRegisters:\n");
+ dataLog(" VirtualRegisters:\n");
for (unsigned i = 0; i < m_generationInfo.size(); ++i) {
GenerationInfo& info = m_generationInfo[i];
if (info.alive())
- fprintf(stderr, " % 3d:%s%s", i, dataFormatString(info.registerFormat()), dataFormatString(info.spillFormat()));
+ dataLog(" % 3d:%s%s", i, dataFormatString(info.registerFormat()), dataFormatString(info.spillFormat()));
else
- fprintf(stderr, " % 3d:[__][__]", i);
+ dataLog(" % 3d:[__][__]", i);
if (info.registerFormat() == DataFormatDouble)
- fprintf(stderr, ":fpr%d\n", info.fpr());
+ dataLog(":fpr%d\n", info.fpr());
else if (info.registerFormat() != DataFormatNone
#if USE(JSVALUE32_64)
&& !(info.registerFormat() & DataFormatJS)
#endif
) {
ASSERT(info.gpr() != InvalidGPRReg);
- fprintf(stderr, ":%s\n", GPRInfo::debugName(info.gpr()));
+ dataLog(":%s\n", GPRInfo::debugName(info.gpr()));
} else
- fprintf(stderr, "\n");
+ dataLog("\n");
}
if (label)
- fprintf(stderr, "</%s>\n", label);
+ dataLog("</%s>\n", label);
}
#endif
@@ -468,13 +468,13 @@ void SpeculativeJIT::checkConsistency()
for (gpr_iterator iter = m_gprs.begin(); iter != m_gprs.end(); ++iter) {
if (iter.isLocked()) {
- fprintf(stderr, "DFG_CONSISTENCY_CHECK failed: gpr %s is locked.\n", iter.debugName());
+ dataLog("DFG_CONSISTENCY_CHECK failed: gpr %s is locked.\n", iter.debugName());
failed = true;
}
}
for (fpr_iterator iter = m_fprs.begin(); iter != m_fprs.end(); ++iter) {
if (iter.isLocked()) {
- fprintf(stderr, "DFG_CONSISTENCY_CHECK failed: fpr %s is locked.\n", iter.debugName());
+ dataLog("DFG_CONSISTENCY_CHECK failed: fpr %s is locked.\n", iter.debugName());
failed = true;
}
}
@@ -502,7 +502,7 @@ void SpeculativeJIT::checkConsistency()
GPRReg gpr = info.gpr();
ASSERT(gpr != InvalidGPRReg);
if (m_gprs.name(gpr) != virtualRegister) {
- fprintf(stderr, "DFG_CONSISTENCY_CHECK failed: name mismatch for virtual register %d (gpr %s).\n", virtualRegister, GPRInfo::debugName(gpr));
+ dataLog("DFG_CONSISTENCY_CHECK failed: name mismatch for virtual register %d (gpr %s).\n", virtualRegister, GPRInfo::debugName(gpr));
failed = true;
}
break;
@@ -511,7 +511,7 @@ void SpeculativeJIT::checkConsistency()
FPRReg fpr = info.fpr();
ASSERT(fpr != InvalidFPRReg);
if (m_fprs.name(fpr) != virtualRegister) {
- fprintf(stderr, "DFG_CONSISTENCY_CHECK failed: name mismatch for virtual register %d (fpr %s).\n", virtualRegister, FPRInfo::debugName(fpr));
+ dataLog("DFG_CONSISTENCY_CHECK failed: name mismatch for virtual register %d (fpr %s).\n", virtualRegister, FPRInfo::debugName(fpr));
failed = true;
}
break;
@@ -527,18 +527,18 @@ void SpeculativeJIT::checkConsistency()
GenerationInfo& info = m_generationInfo[virtualRegister];
#if USE(JSVALUE64)
if (iter.regID() != info.gpr()) {
- fprintf(stderr, "DFG_CONSISTENCY_CHECK failed: name mismatch for gpr %s (virtual register %d).\n", iter.debugName(), virtualRegister);
+ dataLog("DFG_CONSISTENCY_CHECK failed: name mismatch for gpr %s (virtual register %d).\n", iter.debugName(), virtualRegister);
failed = true;
}
#else
if (!(info.registerFormat() & DataFormatJS)) {
if (iter.regID() != info.gpr()) {
- fprintf(stderr, "DFG_CONSISTENCY_CHECK failed: name mismatch for gpr %s (virtual register %d).\n", iter.debugName(), virtualRegister);
+ dataLog("DFG_CONSISTENCY_CHECK failed: name mismatch for gpr %s (virtual register %d).\n", iter.debugName(), virtualRegister);
failed = true;
}
} else {
if (iter.regID() != info.tagGPR() && iter.regID() != info.payloadGPR()) {
- fprintf(stderr, "DFG_CONSISTENCY_CHECK failed: name mismatch for gpr %s (virtual register %d).\n", iter.debugName(), virtualRegister);
+ dataLog("DFG_CONSISTENCY_CHECK failed: name mismatch for gpr %s (virtual register %d).\n", iter.debugName(), virtualRegister);
failed = true;
}
}
@@ -552,7 +552,7 @@ void SpeculativeJIT::checkConsistency()
GenerationInfo& info = m_generationInfo[virtualRegister];
if (iter.regID() != info.fpr()) {
- fprintf(stderr, "DFG_CONSISTENCY_CHECK failed: name mismatch for fpr %s (virtual register %d).\n", iter.debugName(), virtualRegister);
+ dataLog("DFG_CONSISTENCY_CHECK failed: name mismatch for fpr %s (virtual register %d).\n", iter.debugName(), virtualRegister);
failed = true;
}
}
@@ -803,10 +803,8 @@ void SpeculativeJIT::compilePeepHoleDoubleBranch(Node& node, NodeIndex branchNod
SpeculateDoubleOperand op1(this, node.child1());
SpeculateDoubleOperand op2(this, node.child2());
- addBranch(m_jit.branchDouble(condition, op1.fpr(), op2.fpr()), taken);
-
- if (notTaken != (m_block + 1))
- addBranch(m_jit.jump(), notTaken);
+ branchDouble(condition, op1.fpr(), op2.fpr(), taken);
+ jump(notTaken);
}
void SpeculativeJIT::compilePeepHoleObjectEquality(Node& node, NodeIndex branchNodeIndex, const ClassInfo* classInfo, PredictionChecker predictionCheck)
@@ -835,9 +833,8 @@ void SpeculativeJIT::compilePeepHoleObjectEquality(Node& node, NodeIndex branchN
if (!predictionCheck(m_state.forNode(node.child2()).m_type))
speculationCheck(BadType, JSValueSource::unboxedCell(op2GPR), node.child2().index(), m_jit.branchPtr(MacroAssembler::NotEqual, MacroAssembler::Address(op2GPR, JSCell::classInfoOffset()), MacroAssembler::TrustedImmPtr(classInfo)));
- addBranch(m_jit.branchPtr(condition, op1GPR, op2GPR), taken);
- if (notTaken != (m_block + 1))
- addBranch(m_jit.jump(), notTaken);
+ branchPtr(condition, op1GPR, op2GPR, taken);
+ jump(notTaken);
}
void SpeculativeJIT::compilePeepHoleIntegerBranch(Node& node, NodeIndex branchNodeIndex, JITCompiler::RelationalCondition condition)
@@ -858,20 +855,18 @@ void SpeculativeJIT::compilePeepHoleIntegerBranch(Node& node, NodeIndex branchNo
if (isInt32Constant(node.child1().index())) {
int32_t imm = valueOfInt32Constant(node.child1().index());
SpeculateIntegerOperand op2(this, node.child2());
- addBranch(m_jit.branch32(condition, JITCompiler::Imm32(imm), op2.gpr()), taken);
+ branch32(condition, JITCompiler::Imm32(imm), op2.gpr(), taken);
} else if (isInt32Constant(node.child2().index())) {
SpeculateIntegerOperand op1(this, node.child1());
int32_t imm = valueOfInt32Constant(node.child2().index());
- addBranch(m_jit.branch32(condition, op1.gpr(), JITCompiler::Imm32(imm)), taken);
+ branch32(condition, op1.gpr(), JITCompiler::Imm32(imm), taken);
} else {
SpeculateIntegerOperand op1(this, node.child1());
SpeculateIntegerOperand op2(this, node.child2());
- addBranch(m_jit.branch32(condition, op1.gpr(), op2.gpr()), taken);
+ branch32(condition, op1.gpr(), op2.gpr(), taken);
}
- // Check for fall through, otherwise we need to jump.
- if (notTaken != (m_block + 1))
- addBranch(m_jit.jump(), notTaken);
+ jump(notTaken);
}
// Returns true if the compare is fused with a subsequent branch.
@@ -957,13 +952,20 @@ void SpeculativeJIT::compile(BasicBlock& block)
m_lastSetOperand = std::numeric_limits<int>::max();
m_codeOriginForOSR = CodeOrigin();
+
+ if (DFG_ENABLE_EDGE_CODE_VERIFICATION) {
+ JITCompiler::Jump verificationSucceeded =
+ m_jit.branch32(JITCompiler::Equal, GPRInfo::regT0, Imm32(m_block));
+ m_jit.breakpoint();
+ verificationSucceeded.link(&m_jit);
+ }
for (; m_compileIndex < block.end; ++m_compileIndex) {
Node& node = at(m_compileIndex);
m_codeOriginForOSR = node.codeOrigin;
if (!node.shouldGenerate()) {
#if DFG_ENABLE(DEBUG_VERBOSE)
- fprintf(stderr, "SpeculativeJIT skipping Node @%d (bc#%u) at JIT offset 0x%x ", (int)m_compileIndex, node.codeOrigin.bytecodeIndex, m_jit.debugOffset());
+ dataLog("SpeculativeJIT skipping Node @%d (bc#%u) at JIT offset 0x%x ", (int)m_compileIndex, node.codeOrigin.bytecodeIndex, m_jit.debugOffset());
#endif
switch (node.op) {
case SetLocal:
@@ -990,7 +992,7 @@ void SpeculativeJIT::compile(BasicBlock& block)
} else {
#if DFG_ENABLE(DEBUG_VERBOSE)
- fprintf(stderr, "SpeculativeJIT generating Node @%d (bc#%u) at JIT offset 0x%x ", (int)m_compileIndex, node.codeOrigin.bytecodeIndex, m_jit.debugOffset());
+ dataLog("SpeculativeJIT generating Node @%d (bc#%u) at JIT offset 0x%x ", (int)m_compileIndex, node.codeOrigin.bytecodeIndex, m_jit.debugOffset());
#endif
#if DFG_ENABLE(JIT_BREAK_ON_EVERY_NODE)
m_jit.breakpoint();
@@ -1011,20 +1013,20 @@ void SpeculativeJIT::compile(BasicBlock& block)
#if DFG_ENABLE(DEBUG_VERBOSE)
if (node.hasResult()) {
GenerationInfo& info = m_generationInfo[node.virtualRegister()];
- fprintf(stderr, "-> %s, vr#%d", dataFormatToString(info.registerFormat()), (int)node.virtualRegister());
+ dataLog("-> %s, vr#%d", dataFormatToString(info.registerFormat()), (int)node.virtualRegister());
if (info.registerFormat() != DataFormatNone) {
if (info.registerFormat() == DataFormatDouble)
- fprintf(stderr, ", %s", FPRInfo::debugName(info.fpr()));
+ dataLog(", %s", FPRInfo::debugName(info.fpr()));
#if USE(JSVALUE32_64)
else if (info.registerFormat() & DataFormatJS)
- fprintf(stderr, ", %s %s", GPRInfo::debugName(info.tagGPR()), GPRInfo::debugName(info.payloadGPR()));
+ dataLog(", %s %s", GPRInfo::debugName(info.tagGPR()), GPRInfo::debugName(info.payloadGPR()));
#endif
else
- fprintf(stderr, ", %s", GPRInfo::debugName(info.gpr()));
+ dataLog(", %s", GPRInfo::debugName(info.gpr()));
}
- fprintf(stderr, " ");
+ dataLog(" ");
} else
- fprintf(stderr, " ");
+ dataLog(" ");
#endif
}
@@ -1032,14 +1034,14 @@ void SpeculativeJIT::compile(BasicBlock& block)
for (size_t i = 0; i < m_arguments.size(); ++i)
computeValueRecoveryFor(argumentToOperand(i)).dump(stderr);
- fprintf(stderr, " : ");
+ dataLog(" : ");
for (int operand = 0; operand < (int)m_variables.size(); ++operand)
computeValueRecoveryFor(operand).dump(stderr);
#endif
#if DFG_ENABLE(DEBUG_VERBOSE)
- fprintf(stderr, "\n");
+ dataLog("\n");
#endif
// Make sure that the abstract state is rematerialized for the next node.
@@ -1071,144 +1073,155 @@ void SpeculativeJIT::checkArgumentTypes()
m_variables[i] = ValueSource(ValueInRegisterFile);
for (int i = 0; i < m_jit.codeBlock()->numParameters(); ++i) {
- VariableAccessData* variableAccessData = at(m_jit.graph().m_arguments[i]).variableAccessData();
+ NodeIndex nodeIndex = m_jit.graph().m_arguments[i];
+ Node& node = at(nodeIndex);
+ ASSERT(node.op == SetArgument);
+ if (!node.shouldGenerate()) {
+ // The argument is dead. We don't do any checks for such arguments.
+ continue;
+ }
+
+ VariableAccessData* variableAccessData = node.variableAccessData();
VirtualRegister virtualRegister = variableAccessData->local();
PredictedType predictedType = variableAccessData->prediction();
+
+ JSValueSource valueSource = JSValueSource(JITCompiler::addressFor(virtualRegister));
+
#if USE(JSVALUE64)
if (isInt32Prediction(predictedType))
- speculationCheck(Uncountable, JSValueRegs(), NoNode, m_jit.branchPtr(MacroAssembler::Below, JITCompiler::addressFor(virtualRegister), GPRInfo::tagTypeNumberRegister));
+ speculationCheck(BadType, valueSource, nodeIndex, m_jit.branchPtr(MacroAssembler::Below, JITCompiler::addressFor(virtualRegister), GPRInfo::tagTypeNumberRegister));
else if (isArrayPrediction(predictedType)) {
GPRTemporary temp(this);
m_jit.loadPtr(JITCompiler::addressFor(virtualRegister), temp.gpr());
- speculationCheck(Uncountable, JSValueRegs(), NoNode, m_jit.branchTestPtr(MacroAssembler::NonZero, temp.gpr(), GPRInfo::tagMaskRegister));
- speculationCheck(Uncountable, JSValueRegs(), NoNode, m_jit.branchPtr(MacroAssembler::NotEqual, MacroAssembler::Address(temp.gpr(), JSCell::classInfoOffset()), MacroAssembler::TrustedImmPtr(&JSArray::s_info)));
+ speculationCheck(BadType, valueSource, nodeIndex, m_jit.branchTestPtr(MacroAssembler::NonZero, temp.gpr(), GPRInfo::tagMaskRegister));
+ speculationCheck(BadType, valueSource, nodeIndex, m_jit.branchPtr(MacroAssembler::NotEqual, MacroAssembler::Address(temp.gpr(), JSCell::classInfoOffset()), MacroAssembler::TrustedImmPtr(&JSArray::s_info)));
} else if (isByteArrayPrediction(predictedType)) {
GPRTemporary temp(this);
m_jit.loadPtr(JITCompiler::addressFor(virtualRegister), temp.gpr());
- speculationCheck(Uncountable, JSValueRegs(), NoNode, m_jit.branchTestPtr(MacroAssembler::NonZero, temp.gpr(), GPRInfo::tagMaskRegister));
- speculationCheck(Uncountable, JSValueRegs(), NoNode, m_jit.branchPtr(MacroAssembler::NotEqual, MacroAssembler::Address(temp.gpr(), JSCell::classInfoOffset()), MacroAssembler::TrustedImmPtr(&JSByteArray::s_info)));
+ speculationCheck(BadType, valueSource, nodeIndex, m_jit.branchTestPtr(MacroAssembler::NonZero, temp.gpr(), GPRInfo::tagMaskRegister));
+ speculationCheck(BadType, valueSource, nodeIndex, m_jit.branchPtr(MacroAssembler::NotEqual, MacroAssembler::Address(temp.gpr(), JSCell::classInfoOffset()), MacroAssembler::TrustedImmPtr(&JSByteArray::s_info)));
} else if (isBooleanPrediction(predictedType)) {
GPRTemporary temp(this);
m_jit.loadPtr(JITCompiler::addressFor(virtualRegister), temp.gpr());
m_jit.xorPtr(TrustedImm32(static_cast<int32_t>(ValueFalse)), temp.gpr());
- speculationCheck(Uncountable, JSValueRegs(), NoNode, m_jit.branchTestPtr(MacroAssembler::NonZero, temp.gpr(), TrustedImm32(static_cast<int32_t>(~1))));
+ speculationCheck(BadType, valueSource, nodeIndex, m_jit.branchTestPtr(MacroAssembler::NonZero, temp.gpr(), TrustedImm32(static_cast<int32_t>(~1))));
} else if (isInt8ArrayPrediction(predictedType)) {
GPRTemporary temp(this);
m_jit.loadPtr(JITCompiler::addressFor(virtualRegister), temp.gpr());
- speculationCheck(Uncountable, JSValueRegs(), NoNode, m_jit.branchTestPtr(MacroAssembler::NonZero, temp.gpr(), GPRInfo::tagMaskRegister));
- speculationCheck(Uncountable, JSValueRegs(), NoNode, m_jit.branchPtr(MacroAssembler::NotEqual, MacroAssembler::Address(temp.gpr(), JSCell::classInfoOffset()), MacroAssembler::TrustedImmPtr(m_jit.globalData()->int8ArrayDescriptor().m_classInfo)));
+ speculationCheck(BadType, valueSource, nodeIndex, m_jit.branchTestPtr(MacroAssembler::NonZero, temp.gpr(), GPRInfo::tagMaskRegister));
+ speculationCheck(BadType, valueSource, nodeIndex, m_jit.branchPtr(MacroAssembler::NotEqual, MacroAssembler::Address(temp.gpr(), JSCell::classInfoOffset()), MacroAssembler::TrustedImmPtr(m_jit.globalData()->int8ArrayDescriptor().m_classInfo)));
} else if (isInt16ArrayPrediction(predictedType)) {
GPRTemporary temp(this);
m_jit.loadPtr(JITCompiler::addressFor(virtualRegister), temp.gpr());
- speculationCheck(Uncountable, JSValueRegs(), NoNode, m_jit.branchTestPtr(MacroAssembler::NonZero, temp.gpr(), GPRInfo::tagMaskRegister));
- speculationCheck(Uncountable, JSValueRegs(), NoNode, m_jit.branchPtr(MacroAssembler::NotEqual, MacroAssembler::Address(temp.gpr(), JSCell::classInfoOffset()), MacroAssembler::TrustedImmPtr(m_jit.globalData()->int16ArrayDescriptor().m_classInfo)));
+ speculationCheck(BadType, valueSource, nodeIndex, m_jit.branchTestPtr(MacroAssembler::NonZero, temp.gpr(), GPRInfo::tagMaskRegister));
+ speculationCheck(BadType, valueSource, nodeIndex, m_jit.branchPtr(MacroAssembler::NotEqual, MacroAssembler::Address(temp.gpr(), JSCell::classInfoOffset()), MacroAssembler::TrustedImmPtr(m_jit.globalData()->int16ArrayDescriptor().m_classInfo)));
} else if (isInt32ArrayPrediction(predictedType)) {
GPRTemporary temp(this);
m_jit.loadPtr(JITCompiler::addressFor(virtualRegister), temp.gpr());
- speculationCheck(Uncountable, JSValueRegs(), NoNode, m_jit.branchTestPtr(MacroAssembler::NonZero, temp.gpr(), GPRInfo::tagMaskRegister));
- speculationCheck(Uncountable, JSValueRegs(), NoNode, m_jit.branchPtr(MacroAssembler::NotEqual, MacroAssembler::Address(temp.gpr(), JSCell::classInfoOffset()), MacroAssembler::TrustedImmPtr(m_jit.globalData()->int32ArrayDescriptor().m_classInfo)));
+ speculationCheck(BadType, valueSource, nodeIndex, m_jit.branchTestPtr(MacroAssembler::NonZero, temp.gpr(), GPRInfo::tagMaskRegister));
+ speculationCheck(BadType, valueSource, nodeIndex, m_jit.branchPtr(MacroAssembler::NotEqual, MacroAssembler::Address(temp.gpr(), JSCell::classInfoOffset()), MacroAssembler::TrustedImmPtr(m_jit.globalData()->int32ArrayDescriptor().m_classInfo)));
} else if (isUint8ArrayPrediction(predictedType)) {
GPRTemporary temp(this);
m_jit.loadPtr(JITCompiler::addressFor(virtualRegister), temp.gpr());
- speculationCheck(Uncountable, JSValueRegs(), NoNode, m_jit.branchTestPtr(MacroAssembler::NonZero, temp.gpr(), GPRInfo::tagMaskRegister));
- speculationCheck(Uncountable, JSValueRegs(), NoNode, m_jit.branchPtr(MacroAssembler::NotEqual, MacroAssembler::Address(temp.gpr(), JSCell::classInfoOffset()), MacroAssembler::TrustedImmPtr(m_jit.globalData()->uint8ArrayDescriptor().m_classInfo)));
+ speculationCheck(BadType, valueSource, nodeIndex, m_jit.branchTestPtr(MacroAssembler::NonZero, temp.gpr(), GPRInfo::tagMaskRegister));
+ speculationCheck(BadType, valueSource, nodeIndex, m_jit.branchPtr(MacroAssembler::NotEqual, MacroAssembler::Address(temp.gpr(), JSCell::classInfoOffset()), MacroAssembler::TrustedImmPtr(m_jit.globalData()->uint8ArrayDescriptor().m_classInfo)));
} else if (isUint8ClampedArrayPrediction(predictedType)) {
GPRTemporary temp(this);
m_jit.loadPtr(JITCompiler::addressFor(virtualRegister), temp.gpr());
- speculationCheck(Uncountable, JSValueRegs(), NoNode, m_jit.branchTestPtr(MacroAssembler::NonZero, temp.gpr(), GPRInfo::tagMaskRegister));
- speculationCheck(Uncountable, JSValueRegs(), NoNode, m_jit.branchPtr(MacroAssembler::NotEqual, MacroAssembler::Address(temp.gpr(), JSCell::classInfoOffset()), MacroAssembler::TrustedImmPtr(m_jit.globalData()->uint8ClampedArrayDescriptor().m_classInfo)));
+ speculationCheck(BadType, valueSource, nodeIndex, m_jit.branchTestPtr(MacroAssembler::NonZero, temp.gpr(), GPRInfo::tagMaskRegister));
+ speculationCheck(BadType, valueSource, nodeIndex, m_jit.branchPtr(MacroAssembler::NotEqual, MacroAssembler::Address(temp.gpr(), JSCell::classInfoOffset()), MacroAssembler::TrustedImmPtr(m_jit.globalData()->uint8ClampedArrayDescriptor().m_classInfo)));
} else if (isUint16ArrayPrediction(predictedType)) {
GPRTemporary temp(this);
m_jit.loadPtr(JITCompiler::addressFor(virtualRegister), temp.gpr());
- speculationCheck(Uncountable, JSValueRegs(), NoNode, m_jit.branchTestPtr(MacroAssembler::NonZero, temp.gpr(), GPRInfo::tagMaskRegister));
- speculationCheck(Uncountable, JSValueRegs(), NoNode, m_jit.branchPtr(MacroAssembler::NotEqual, MacroAssembler::Address(temp.gpr(), JSCell::classInfoOffset()), MacroAssembler::TrustedImmPtr(m_jit.globalData()->uint16ArrayDescriptor().m_classInfo)));
+ speculationCheck(BadType, valueSource, nodeIndex, m_jit.branchTestPtr(MacroAssembler::NonZero, temp.gpr(), GPRInfo::tagMaskRegister));
+ speculationCheck(BadType, valueSource, nodeIndex, m_jit.branchPtr(MacroAssembler::NotEqual, MacroAssembler::Address(temp.gpr(), JSCell::classInfoOffset()), MacroAssembler::TrustedImmPtr(m_jit.globalData()->uint16ArrayDescriptor().m_classInfo)));
} else if (isUint32ArrayPrediction(predictedType)) {
GPRTemporary temp(this);
m_jit.loadPtr(JITCompiler::addressFor(virtualRegister), temp.gpr());
- speculationCheck(Uncountable, JSValueRegs(), NoNode, m_jit.branchTestPtr(MacroAssembler::NonZero, temp.gpr(), GPRInfo::tagMaskRegister));
- speculationCheck(Uncountable, JSValueRegs(), NoNode, m_jit.branchPtr(MacroAssembler::NotEqual, MacroAssembler::Address(temp.gpr(), JSCell::classInfoOffset()), MacroAssembler::TrustedImmPtr(m_jit.globalData()->uint32ArrayDescriptor().m_classInfo)));
+ speculationCheck(BadType, valueSource, nodeIndex, m_jit.branchTestPtr(MacroAssembler::NonZero, temp.gpr(), GPRInfo::tagMaskRegister));
+ speculationCheck(BadType, valueSource, nodeIndex, m_jit.branchPtr(MacroAssembler::NotEqual, MacroAssembler::Address(temp.gpr(), JSCell::classInfoOffset()), MacroAssembler::TrustedImmPtr(m_jit.globalData()->uint32ArrayDescriptor().m_classInfo)));
} else if (isFloat32ArrayPrediction(predictedType)) {
GPRTemporary temp(this);
m_jit.loadPtr(JITCompiler::addressFor(virtualRegister), temp.gpr());
- speculationCheck(Uncountable, JSValueRegs(), NoNode, m_jit.branchTestPtr(MacroAssembler::NonZero, temp.gpr(), GPRInfo::tagMaskRegister));
- speculationCheck(Uncountable, JSValueRegs(), NoNode, m_jit.branchPtr(MacroAssembler::NotEqual, MacroAssembler::Address(temp.gpr(), JSCell::classInfoOffset()), MacroAssembler::TrustedImmPtr(m_jit.globalData()->float32ArrayDescriptor().m_classInfo)));
+ speculationCheck(BadType, valueSource, nodeIndex, m_jit.branchTestPtr(MacroAssembler::NonZero, temp.gpr(), GPRInfo::tagMaskRegister));
+ speculationCheck(BadType, valueSource, nodeIndex, m_jit.branchPtr(MacroAssembler::NotEqual, MacroAssembler::Address(temp.gpr(), JSCell::classInfoOffset()), MacroAssembler::TrustedImmPtr(m_jit.globalData()->float32ArrayDescriptor().m_classInfo)));
} else if (isFloat64ArrayPrediction(predictedType)) {
GPRTemporary temp(this);
m_jit.loadPtr(JITCompiler::addressFor(virtualRegister), temp.gpr());
- speculationCheck(Uncountable, JSValueRegs(), NoNode, m_jit.branchTestPtr(MacroAssembler::NonZero, temp.gpr(), GPRInfo::tagMaskRegister));
- speculationCheck(Uncountable, JSValueRegs(), NoNode, m_jit.branchPtr(MacroAssembler::NotEqual, MacroAssembler::Address(temp.gpr(), JSCell::classInfoOffset()), MacroAssembler::TrustedImmPtr(m_jit.globalData()->float64ArrayDescriptor().m_classInfo)));
+ speculationCheck(BadType, valueSource, nodeIndex, m_jit.branchTestPtr(MacroAssembler::NonZero, temp.gpr(), GPRInfo::tagMaskRegister));
+ speculationCheck(BadType, valueSource, nodeIndex, m_jit.branchPtr(MacroAssembler::NotEqual, MacroAssembler::Address(temp.gpr(), JSCell::classInfoOffset()), MacroAssembler::TrustedImmPtr(m_jit.globalData()->float64ArrayDescriptor().m_classInfo)));
}
#else
if (isInt32Prediction(predictedType))
- speculationCheck(Uncountable, JSValueRegs(), NoNode, m_jit.branch32(MacroAssembler::NotEqual, JITCompiler::tagFor(virtualRegister), TrustedImm32(JSValue::Int32Tag)));
+ speculationCheck(BadType, valueSource, nodeIndex, m_jit.branch32(MacroAssembler::NotEqual, JITCompiler::tagFor(virtualRegister), TrustedImm32(JSValue::Int32Tag)));
else if (isArrayPrediction(predictedType)) {
GPRTemporary temp(this);
m_jit.load32(JITCompiler::tagFor(virtualRegister), temp.gpr());
- speculationCheck(Uncountable, JSValueRegs(), NoNode, m_jit.branch32(MacroAssembler::NotEqual, temp.gpr(), TrustedImm32(JSValue::CellTag)));
+ speculationCheck(BadType, valueSource, nodeIndex, m_jit.branch32(MacroAssembler::NotEqual, temp.gpr(), TrustedImm32(JSValue::CellTag)));
m_jit.load32(JITCompiler::payloadFor(virtualRegister), temp.gpr());
- speculationCheck(Uncountable, JSValueRegs(), NoNode, m_jit.branchPtr(MacroAssembler::NotEqual, MacroAssembler::Address(temp.gpr(), JSCell::classInfoOffset()), MacroAssembler::TrustedImmPtr(&JSArray::s_info)));
+ speculationCheck(BadType, valueSource, nodeIndex, m_jit.branchPtr(MacroAssembler::NotEqual, MacroAssembler::Address(temp.gpr(), JSCell::classInfoOffset()), MacroAssembler::TrustedImmPtr(&JSArray::s_info)));
} else if (isByteArrayPrediction(predictedType)) {
GPRTemporary temp(this);
m_jit.load32(JITCompiler::tagFor(virtualRegister), temp.gpr());
- speculationCheck(Uncountable, JSValueRegs(), NoNode, m_jit.branch32(MacroAssembler::NotEqual, temp.gpr(), TrustedImm32(JSValue::CellTag)));
+ speculationCheck(BadType, valueSource, nodeIndex, m_jit.branch32(MacroAssembler::NotEqual, temp.gpr(), TrustedImm32(JSValue::CellTag)));
m_jit.load32(JITCompiler::payloadFor(virtualRegister), temp.gpr());
- speculationCheck(Uncountable, JSValueRegs(), NoNode, m_jit.branchPtr(MacroAssembler::NotEqual, MacroAssembler::Address(temp.gpr(), JSCell::classInfoOffset()), MacroAssembler::TrustedImmPtr(&JSByteArray::s_info)));
+ speculationCheck(BadType, valueSource, nodeIndex, m_jit.branchPtr(MacroAssembler::NotEqual, MacroAssembler::Address(temp.gpr(), JSCell::classInfoOffset()), MacroAssembler::TrustedImmPtr(&JSByteArray::s_info)));
} else if (isBooleanPrediction(predictedType))
- speculationCheck(Uncountable, JSValueRegs(), NoNode, m_jit.branch32(MacroAssembler::NotEqual, JITCompiler::tagFor(virtualRegister), TrustedImm32(JSValue::BooleanTag)));
+ speculationCheck(BadType, valueSource, nodeIndex, m_jit.branch32(MacroAssembler::NotEqual, JITCompiler::tagFor(virtualRegister), TrustedImm32(JSValue::BooleanTag)));
else if (isInt8ArrayPrediction(predictedType)) {
GPRTemporary temp(this);
m_jit.load32(JITCompiler::tagFor(virtualRegister), temp.gpr());
- speculationCheck(Uncountable, JSValueRegs(), NoNode, m_jit.branch32(MacroAssembler::NotEqual, temp.gpr(), TrustedImm32(JSValue::CellTag)));
+ speculationCheck(BadType, valueSource, nodeIndex, m_jit.branch32(MacroAssembler::NotEqual, temp.gpr(), TrustedImm32(JSValue::CellTag)));
m_jit.load32(JITCompiler::payloadFor(virtualRegister), temp.gpr());
- speculationCheck(Uncountable, JSValueRegs(), NoNode, m_jit.branchPtr(MacroAssembler::NotEqual, MacroAssembler::Address(temp.gpr(), JSCell::classInfoOffset()), MacroAssembler::TrustedImmPtr(m_jit.globalData()->int8ArrayDescriptor().m_classInfo)));
+ speculationCheck(BadType, valueSource, nodeIndex, m_jit.branchPtr(MacroAssembler::NotEqual, MacroAssembler::Address(temp.gpr(), JSCell::classInfoOffset()), MacroAssembler::TrustedImmPtr(m_jit.globalData()->int8ArrayDescriptor().m_classInfo)));
} else if (isInt16ArrayPrediction(predictedType)) {
GPRTemporary temp(this);
m_jit.load32(JITCompiler::tagFor(virtualRegister), temp.gpr());
- speculationCheck(Uncountable, JSValueRegs(), NoNode, m_jit.branch32(MacroAssembler::NotEqual, temp.gpr(), TrustedImm32(JSValue::CellTag)));
+ speculationCheck(BadType, valueSource, nodeIndex, m_jit.branch32(MacroAssembler::NotEqual, temp.gpr(), TrustedImm32(JSValue::CellTag)));
m_jit.load32(JITCompiler::payloadFor(virtualRegister), temp.gpr());
- speculationCheck(Uncountable, JSValueRegs(), NoNode, m_jit.branchPtr(MacroAssembler::NotEqual, MacroAssembler::Address(temp.gpr(), JSCell::classInfoOffset()), MacroAssembler::TrustedImmPtr(m_jit.globalData()->int16ArrayDescriptor().m_classInfo)));
+ speculationCheck(BadType, valueSource, nodeIndex, m_jit.branchPtr(MacroAssembler::NotEqual, MacroAssembler::Address(temp.gpr(), JSCell::classInfoOffset()), MacroAssembler::TrustedImmPtr(m_jit.globalData()->int16ArrayDescriptor().m_classInfo)));
} else if (isInt32ArrayPrediction(predictedType)) {
GPRTemporary temp(this);
m_jit.load32(JITCompiler::tagFor(virtualRegister), temp.gpr());
- speculationCheck(Uncountable, JSValueRegs(), NoNode, m_jit.branch32(MacroAssembler::NotEqual, temp.gpr(), TrustedImm32(JSValue::CellTag)));
+ speculationCheck(BadType, valueSource, nodeIndex, m_jit.branch32(MacroAssembler::NotEqual, temp.gpr(), TrustedImm32(JSValue::CellTag)));
m_jit.load32(JITCompiler::payloadFor(virtualRegister), temp.gpr());
- speculationCheck(Uncountable, JSValueRegs(), NoNode, m_jit.branchPtr(MacroAssembler::NotEqual, MacroAssembler::Address(temp.gpr(), JSCell::classInfoOffset()), MacroAssembler::TrustedImmPtr(m_jit.globalData()->int32ArrayDescriptor().m_classInfo)));
+ speculationCheck(BadType, valueSource, nodeIndex, m_jit.branchPtr(MacroAssembler::NotEqual, MacroAssembler::Address(temp.gpr(), JSCell::classInfoOffset()), MacroAssembler::TrustedImmPtr(m_jit.globalData()->int32ArrayDescriptor().m_classInfo)));
} else if (isUint8ArrayPrediction(predictedType)) {
GPRTemporary temp(this);
m_jit.load32(JITCompiler::tagFor(virtualRegister), temp.gpr());
- speculationCheck(Uncountable, JSValueRegs(), NoNode, m_jit.branch32(MacroAssembler::NotEqual, temp.gpr(), TrustedImm32(JSValue::CellTag)));
+ speculationCheck(BadType, valueSource, nodeIndex, m_jit.branch32(MacroAssembler::NotEqual, temp.gpr(), TrustedImm32(JSValue::CellTag)));
m_jit.load32(JITCompiler::payloadFor(virtualRegister), temp.gpr());
- speculationCheck(Uncountable, JSValueRegs(), NoNode, m_jit.branchPtr(MacroAssembler::NotEqual, MacroAssembler::Address(temp.gpr(), JSCell::classInfoOffset()), MacroAssembler::TrustedImmPtr(m_jit.globalData()->uint8ArrayDescriptor().m_classInfo)));
+ speculationCheck(BadType, valueSource, nodeIndex, m_jit.branchPtr(MacroAssembler::NotEqual, MacroAssembler::Address(temp.gpr(), JSCell::classInfoOffset()), MacroAssembler::TrustedImmPtr(m_jit.globalData()->uint8ArrayDescriptor().m_classInfo)));
} else if (isUint8ClampedArrayPrediction(predictedType)) {
GPRTemporary temp(this);
m_jit.load32(JITCompiler::tagFor(virtualRegister), temp.gpr());
- speculationCheck(Uncountable, JSValueRegs(), NoNode, m_jit.branch32(MacroAssembler::NotEqual, temp.gpr(), TrustedImm32(JSValue::CellTag)));
+ speculationCheck(BadType, valueSource, nodeIndex, m_jit.branch32(MacroAssembler::NotEqual, temp.gpr(), TrustedImm32(JSValue::CellTag)));
m_jit.load32(JITCompiler::payloadFor(virtualRegister), temp.gpr());
- speculationCheck(Uncountable, JSValueRegs(), NoNode, m_jit.branchPtr(MacroAssembler::NotEqual, MacroAssembler::Address(temp.gpr(), JSCell::classInfoOffset()), MacroAssembler::TrustedImmPtr(m_jit.globalData()->uint8ClampedArrayDescriptor().m_classInfo)));
+ speculationCheck(BadType, valueSource, nodeIndex, m_jit.branchPtr(MacroAssembler::NotEqual, MacroAssembler::Address(temp.gpr(), JSCell::classInfoOffset()), MacroAssembler::TrustedImmPtr(m_jit.globalData()->uint8ClampedArrayDescriptor().m_classInfo)));
} else if (isUint16ArrayPrediction(predictedType)) {
GPRTemporary temp(this);
m_jit.load32(JITCompiler::tagFor(virtualRegister), temp.gpr());
- speculationCheck(Uncountable, JSValueRegs(), NoNode, m_jit.branch32(MacroAssembler::NotEqual, temp.gpr(), TrustedImm32(JSValue::CellTag)));
+ speculationCheck(BadType, valueSource, nodeIndex, m_jit.branch32(MacroAssembler::NotEqual, temp.gpr(), TrustedImm32(JSValue::CellTag)));
m_jit.load32(JITCompiler::payloadFor(virtualRegister), temp.gpr());
- speculationCheck(Uncountable, JSValueRegs(), NoNode, m_jit.branchPtr(MacroAssembler::NotEqual, MacroAssembler::Address(temp.gpr(), JSCell::classInfoOffset()), MacroAssembler::TrustedImmPtr(m_jit.globalData()->uint16ArrayDescriptor().m_classInfo)));
+ speculationCheck(BadType, valueSource, nodeIndex, m_jit.branchPtr(MacroAssembler::NotEqual, MacroAssembler::Address(temp.gpr(), JSCell::classInfoOffset()), MacroAssembler::TrustedImmPtr(m_jit.globalData()->uint16ArrayDescriptor().m_classInfo)));
} else if (isUint32ArrayPrediction(predictedType)) {
GPRTemporary temp(this);
m_jit.load32(JITCompiler::tagFor(virtualRegister), temp.gpr());
- speculationCheck(Uncountable, JSValueRegs(), NoNode, m_jit.branch32(MacroAssembler::NotEqual, temp.gpr(), TrustedImm32(JSValue::CellTag)));
+ speculationCheck(BadType, valueSource, nodeIndex, m_jit.branch32(MacroAssembler::NotEqual, temp.gpr(), TrustedImm32(JSValue::CellTag)));
m_jit.load32(JITCompiler::payloadFor(virtualRegister), temp.gpr());
- speculationCheck(Uncountable, JSValueRegs(), NoNode, m_jit.branchPtr(MacroAssembler::NotEqual, MacroAssembler::Address(temp.gpr(), JSCell::classInfoOffset()), MacroAssembler::TrustedImmPtr(m_jit.globalData()->uint32ArrayDescriptor().m_classInfo)));
+ speculationCheck(BadType, valueSource, nodeIndex, m_jit.branchPtr(MacroAssembler::NotEqual, MacroAssembler::Address(temp.gpr(), JSCell::classInfoOffset()), MacroAssembler::TrustedImmPtr(m_jit.globalData()->uint32ArrayDescriptor().m_classInfo)));
} else if (isFloat32ArrayPrediction(predictedType)) {
GPRTemporary temp(this);
m_jit.load32(JITCompiler::tagFor(virtualRegister), temp.gpr());
- speculationCheck(Uncountable, JSValueRegs(), NoNode, m_jit.branch32(MacroAssembler::NotEqual, temp.gpr(), TrustedImm32(JSValue::CellTag)));
+ speculationCheck(BadType, valueSource, nodeIndex, m_jit.branch32(MacroAssembler::NotEqual, temp.gpr(), TrustedImm32(JSValue::CellTag)));
m_jit.load32(JITCompiler::payloadFor(virtualRegister), temp.gpr());
- speculationCheck(Uncountable, JSValueRegs(), NoNode, m_jit.branchPtr(MacroAssembler::NotEqual, MacroAssembler::Address(temp.gpr(), JSCell::classInfoOffset()), MacroAssembler::TrustedImmPtr(m_jit.globalData()->float32ArrayDescriptor().m_classInfo)));
+ speculationCheck(BadType, valueSource, nodeIndex, m_jit.branchPtr(MacroAssembler::NotEqual, MacroAssembler::Address(temp.gpr(), JSCell::classInfoOffset()), MacroAssembler::TrustedImmPtr(m_jit.globalData()->float32ArrayDescriptor().m_classInfo)));
} else if (isFloat64ArrayPrediction(predictedType)) {
GPRTemporary temp(this);
m_jit.load32(JITCompiler::tagFor(virtualRegister), temp.gpr());
- speculationCheck(Uncountable, JSValueRegs(), NoNode, m_jit.branch32(MacroAssembler::NotEqual, temp.gpr(), TrustedImm32(JSValue::CellTag)));
+ speculationCheck(BadType, valueSource, nodeIndex, m_jit.branch32(MacroAssembler::NotEqual, temp.gpr(), TrustedImm32(JSValue::CellTag)));
m_jit.load32(JITCompiler::payloadFor(virtualRegister), temp.gpr());
- speculationCheck(Uncountable, JSValueRegs(), NoNode, m_jit.branchPtr(MacroAssembler::NotEqual, MacroAssembler::Address(temp.gpr(), JSCell::classInfoOffset()), MacroAssembler::TrustedImmPtr(m_jit.globalData()->float64ArrayDescriptor().m_classInfo)));
+ speculationCheck(BadType, valueSource, nodeIndex, m_jit.branchPtr(MacroAssembler::NotEqual, MacroAssembler::Address(temp.gpr(), JSCell::classInfoOffset()), MacroAssembler::TrustedImmPtr(m_jit.globalData()->float64ArrayDescriptor().m_classInfo)));
}
#endif
}
@@ -1218,6 +1231,9 @@ bool SpeculativeJIT::compile()
{
checkArgumentTypes();
+ if (DFG_ENABLE_EDGE_CODE_VERIFICATION)
+ m_jit.move(Imm32(0), GPRInfo::regT0);
+
ASSERT(!m_compileIndex);
for (m_block = 0; m_block < m_jit.graph().m_blocks.size(); ++m_block)
compile(*m_jit.graph().m_blocks[m_block]);
@@ -1225,13 +1241,37 @@ bool SpeculativeJIT::compile()
return true;
}
+void SpeculativeJIT::createOSREntries()
+{
+ for (BlockIndex blockIndex = 0; blockIndex < m_jit.graph().m_blocks.size(); ++blockIndex) {
+ BasicBlock& block = *m_jit.graph().m_blocks[blockIndex];
+ if (!block.isOSRTarget)
+ continue;
+
+ // Currently we only need to create OSR entry trampolines when using edge code
+ // verification. But in the future, we'll need this for other things as well (like
+ // when we have global reg alloc).
+ // If we don't need OSR entry trampolin
+ if (!DFG_ENABLE_EDGE_CODE_VERIFICATION) {
+ m_osrEntryHeads.append(m_blockHeads[blockIndex]);
+ continue;
+ }
+
+ m_osrEntryHeads.append(m_jit.label());
+ m_jit.move(Imm32(blockIndex), GPRInfo::regT0);
+ m_jit.jump().linkTo(m_blockHeads[blockIndex], &m_jit);
+ }
+}
+
void SpeculativeJIT::linkOSREntries(LinkBuffer& linkBuffer)
{
+ unsigned osrEntryIndex = 0;
for (BlockIndex blockIndex = 0; blockIndex < m_jit.graph().m_blocks.size(); ++blockIndex) {
BasicBlock& block = *m_jit.graph().m_blocks[blockIndex];
if (block.isOSRTarget)
- m_jit.noticeOSREntry(block, m_blockHeads[blockIndex], linkBuffer);
+ m_jit.noticeOSREntry(block, m_osrEntryHeads[osrEntryIndex++], linkBuffer);
}
+ ASSERT(osrEntryIndex == m_osrEntryHeads.size());
}
ValueRecovery SpeculativeJIT::computeValueRecoveryFor(const ValueSource& valueSource)
@@ -1253,8 +1293,8 @@ ValueRecovery SpeculativeJIT::computeValueRecoveryFor(const ValueSource& valueSo
return ValueRecovery::alreadyInRegisterFileAsUnboxedDouble();
case HaveNode: {
- if (m_jit.isConstant(valueSource.nodeIndex()))
- return ValueRecovery::constant(m_jit.valueOfJSConstant(valueSource.nodeIndex()));
+ if (isConstant(valueSource.nodeIndex()))
+ return ValueRecovery::constant(valueOfJSConstant(valueSource.nodeIndex()));
Node* nodePtr = &at(valueSource.nodeIndex());
if (!nodePtr->shouldGenerate()) {
@@ -2184,7 +2224,7 @@ void SpeculativeJIT::compileSoftModulo(Node& node)
void SpeculativeJIT::compileAdd(Node& node)
{
- if (m_jit.graph().addShouldSpeculateInteger(node, m_jit.codeBlock())) {
+ if (m_jit.graph().addShouldSpeculateInteger(node)) {
if (isNumberConstant(node.child1().index())) {
int32_t imm1 = valueOfNumberConstantAsInt32(node.child1().index());
SpeculateIntegerOperand op2(this, node.child2());
@@ -2258,13 +2298,18 @@ void SpeculativeJIT::compileAdd(Node& node)
return;
}
- ASSERT(node.op == ValueAdd);
- compileValueAdd(node);
+ if (node.op == ValueAdd) {
+ compileValueAdd(node);
+ return;
+ }
+
+ // We don't handle this yet. :-(
+ terminateSpeculativeExecution(Uncountable, JSValueRegs(), NoNode);
}
void SpeculativeJIT::compileArithSub(Node& node)
{
- if (m_jit.graph().addShouldSpeculateInteger(node, m_jit.codeBlock())) {
+ if (m_jit.graph().addShouldSpeculateInteger(node)) {
if (isNumberConstant(node.child2().index())) {
SpeculateIntegerOperand op1(this, node.child1());
int32_t imm2 = valueOfNumberConstantAsInt32(node.child2().index());
@@ -2428,24 +2473,23 @@ bool SpeculativeJIT::compileStrictEqForConstant(Node& node, NodeUse value, JSVal
}
#if USE(JSVALUE64)
- addBranch(m_jit.branchPtr(condition, op1.gpr(), MacroAssembler::TrustedImmPtr(bitwise_cast<void*>(JSValue::encode(constant)))), taken);
+ branchPtr(condition, op1.gpr(), MacroAssembler::TrustedImmPtr(bitwise_cast<void*>(JSValue::encode(constant))), taken);
#else
GPRReg payloadGPR = op1.payloadGPR();
GPRReg tagGPR = op1.tagGPR();
if (condition == MacroAssembler::Equal) {
// Drop down if not equal, go elsewhere if equal.
MacroAssembler::Jump notEqual = m_jit.branch32(MacroAssembler::NotEqual, tagGPR, MacroAssembler::Imm32(constant.tag()));
- addBranch(m_jit.branch32(MacroAssembler::Equal, payloadGPR, MacroAssembler::Imm32(constant.payload())), taken);
+ branch32(MacroAssembler::Equal, payloadGPR, MacroAssembler::Imm32(constant.payload()), taken);
notEqual.link(&m_jit);
} else {
// Drop down if equal, go elsehwere if not equal.
- addBranch(m_jit.branch32(MacroAssembler::NotEqual, tagGPR, MacroAssembler::Imm32(constant.tag())), taken);
- addBranch(m_jit.branch32(MacroAssembler::NotEqual, payloadGPR, MacroAssembler::Imm32(constant.payload())), taken);
+ branch32(MacroAssembler::NotEqual, tagGPR, MacroAssembler::Imm32(constant.tag()), taken);
+ branch32(MacroAssembler::NotEqual, payloadGPR, MacroAssembler::Imm32(constant.payload()), taken);
}
#endif
- if (notTaken != (m_block + 1))
- addBranch(m_jit.jump(), notTaken);
+ jump(notTaken);
use(node.child1());
use(node.child2());
diff --git a/Source/JavaScriptCore/dfg/DFGSpeculativeJIT.h b/Source/JavaScriptCore/dfg/DFGSpeculativeJIT.h
index cfb2189cf..5d95b064f 100644
--- a/Source/JavaScriptCore/dfg/DFGSpeculativeJIT.h
+++ b/Source/JavaScriptCore/dfg/DFGSpeculativeJIT.h
@@ -176,6 +176,7 @@ public:
SpeculativeJIT(JITCompiler&);
bool compile();
+ void createOSREntries();
void linkOSREntries(LinkBuffer&);
Node& at(NodeIndex nodeIndex)
@@ -462,7 +463,7 @@ private:
if (registerFormat == DataFormatCell) {
ASSERT(info.gpr() == target);
- if (node.isConstant()) {
+ if (node.hasConstant()) {
JSValue value = valueOfJSConstant(nodeIndex);
ASSERT(value.isCell());
m_jit.move(ImmPtr(value.asCell()), target);
@@ -734,15 +735,15 @@ private:
bool isKnownNotCell(NodeIndex);
// Checks/accessors for constant values.
- bool isConstant(NodeIndex nodeIndex) { return m_jit.isConstant(nodeIndex); }
- bool isJSConstant(NodeIndex nodeIndex) { return m_jit.isJSConstant(nodeIndex); }
- bool isInt32Constant(NodeIndex nodeIndex) { return m_jit.isInt32Constant(nodeIndex); }
- bool isDoubleConstant(NodeIndex nodeIndex) { return m_jit.isDoubleConstant(nodeIndex); }
- bool isNumberConstant(NodeIndex nodeIndex) { return m_jit.isNumberConstant(nodeIndex); }
- bool isBooleanConstant(NodeIndex nodeIndex) { return m_jit.isBooleanConstant(nodeIndex); }
- bool isFunctionConstant(NodeIndex nodeIndex) { return m_jit.isFunctionConstant(nodeIndex); }
- int32_t valueOfInt32Constant(NodeIndex nodeIndex) { return m_jit.valueOfInt32Constant(nodeIndex); }
- double valueOfNumberConstant(NodeIndex nodeIndex) { return m_jit.valueOfNumberConstant(nodeIndex); }
+ bool isConstant(NodeIndex nodeIndex) { return m_jit.graph().isConstant(nodeIndex); }
+ bool isJSConstant(NodeIndex nodeIndex) { return m_jit.graph().isJSConstant(nodeIndex); }
+ bool isInt32Constant(NodeIndex nodeIndex) { return m_jit.graph().isInt32Constant(nodeIndex); }
+ bool isDoubleConstant(NodeIndex nodeIndex) { return m_jit.graph().isDoubleConstant(nodeIndex); }
+ bool isNumberConstant(NodeIndex nodeIndex) { return m_jit.graph().isNumberConstant(nodeIndex); }
+ bool isBooleanConstant(NodeIndex nodeIndex) { return m_jit.graph().isBooleanConstant(nodeIndex); }
+ bool isFunctionConstant(NodeIndex nodeIndex) { return m_jit.graph().isFunctionConstant(nodeIndex); }
+ int32_t valueOfInt32Constant(NodeIndex nodeIndex) { return m_jit.graph().valueOfInt32Constant(nodeIndex); }
+ double valueOfNumberConstant(NodeIndex nodeIndex) { return m_jit.graph().valueOfNumberConstant(nodeIndex); }
int32_t valueOfNumberConstantAsInt32(NodeIndex nodeIndex)
{
if (isInt32Constant(nodeIndex))
@@ -752,9 +753,9 @@ private:
#if USE(JSVALUE32_64)
void* addressOfDoubleConstant(NodeIndex nodeIndex) { return m_jit.addressOfDoubleConstant(nodeIndex); }
#endif
- JSValue valueOfJSConstant(NodeIndex nodeIndex) { return m_jit.valueOfJSConstant(nodeIndex); }
- bool valueOfBooleanConstant(NodeIndex nodeIndex) { return m_jit.valueOfBooleanConstant(nodeIndex); }
- JSFunction* valueOfFunctionConstant(NodeIndex nodeIndex) { return m_jit.valueOfFunctionConstant(nodeIndex); }
+ JSValue valueOfJSConstant(NodeIndex nodeIndex) { return m_jit.graph().valueOfJSConstant(nodeIndex); }
+ bool valueOfBooleanConstant(NodeIndex nodeIndex) { return m_jit.graph().valueOfBooleanConstant(nodeIndex); }
+ JSFunction* valueOfFunctionConstant(NodeIndex nodeIndex) { return m_jit.graph().valueOfFunctionConstant(nodeIndex); }
bool isNullConstant(NodeIndex nodeIndex)
{
if (!isConstant(nodeIndex))
@@ -1407,7 +1408,7 @@ private:
JITCompiler::Call appendCallWithExceptionCheck(const FunctionPtr& function)
{
CodeOrigin codeOrigin = at(m_compileIndex).codeOrigin;
- CallBeginToken token = m_jit.beginCall(codeOrigin);
+ CallBeginToken token = m_jit.beginCall();
JITCompiler::Call call = m_jit.appendCall(function);
m_jit.addExceptionCheck(call, codeOrigin, token);
return call;
@@ -1466,6 +1467,161 @@ private:
return call;
}
#endif
+
+ void branchDouble(JITCompiler::DoubleCondition cond, FPRReg left, FPRReg right, BlockIndex destination)
+ {
+ if (!haveEdgeCodeToEmit(destination))
+ return addBranch(m_jit.branchDouble(cond, left, right), destination);
+
+ JITCompiler::Jump notTaken = m_jit.branchDouble(JITCompiler::invert(cond), left, right);
+ emitEdgeCode(destination);
+ addBranch(m_jit.jump(), destination);
+ notTaken.link(&m_jit);
+ }
+
+ void branchDoubleNonZero(FPRReg value, FPRReg scratch, BlockIndex destination)
+ {
+ if (!haveEdgeCodeToEmit(destination))
+ return addBranch(m_jit.branchDoubleNonZero(value, scratch), destination);
+
+ JITCompiler::Jump notTaken = m_jit.branchDoubleZeroOrNaN(value, scratch);
+ emitEdgeCode(destination);
+ addBranch(m_jit.jump(), destination);
+ notTaken.link(&m_jit);
+ }
+
+ template<typename T, typename U>
+ void branch32(JITCompiler::RelationalCondition cond, T left, U right, BlockIndex destination)
+ {
+ if (!haveEdgeCodeToEmit(destination))
+ return addBranch(m_jit.branch32(cond, left, right), destination);
+
+ JITCompiler::Jump notTaken = m_jit.branch32(JITCompiler::invert(cond), left, right);
+ emitEdgeCode(destination);
+ addBranch(m_jit.jump(), destination);
+ notTaken.link(&m_jit);
+ }
+
+ template<typename T, typename U>
+ void branchTest32(JITCompiler::ResultCondition cond, T value, U mask, BlockIndex destination)
+ {
+ ASSERT(JITCompiler::isInvertible(cond));
+
+ if (!haveEdgeCodeToEmit(destination))
+ return addBranch(m_jit.branchTest32(cond, value, mask), destination);
+
+ JITCompiler::Jump notTaken = m_jit.branchTest32(JITCompiler::invert(cond), value, mask);
+ emitEdgeCode(destination);
+ addBranch(m_jit.jump(), destination);
+ notTaken.link(&m_jit);
+ }
+
+ template<typename T>
+ void branchTest32(JITCompiler::ResultCondition cond, T value, BlockIndex destination)
+ {
+ ASSERT(JITCompiler::isInvertible(cond));
+
+ if (!haveEdgeCodeToEmit(destination))
+ return addBranch(m_jit.branchTest32(cond, value), destination);
+
+ JITCompiler::Jump notTaken = m_jit.branchTest32(JITCompiler::invert(cond), value);
+ emitEdgeCode(destination);
+ addBranch(m_jit.jump(), destination);
+ notTaken.link(&m_jit);
+ }
+
+ template<typename T, typename U>
+ void branchPtr(JITCompiler::RelationalCondition cond, T left, U right, BlockIndex destination)
+ {
+ if (!haveEdgeCodeToEmit(destination))
+ return addBranch(m_jit.branchPtr(cond, left, right), destination);
+
+ JITCompiler::Jump notTaken = m_jit.branchPtr(JITCompiler::invert(cond), left, right);
+ emitEdgeCode(destination);
+ addBranch(m_jit.jump(), destination);
+ notTaken.link(&m_jit);
+ }
+
+ template<typename T, typename U>
+ void branchTestPtr(JITCompiler::ResultCondition cond, T value, U mask, BlockIndex destination)
+ {
+ ASSERT(JITCompiler::isInvertible(cond));
+
+ if (!haveEdgeCodeToEmit(destination))
+ return addBranch(m_jit.branchTestPtr(cond, value, mask), destination);
+
+ JITCompiler::Jump notTaken = m_jit.branchTestPtr(JITCompiler::invert(cond), value, mask);
+ emitEdgeCode(destination);
+ addBranch(m_jit.jump(), destination);
+ notTaken.link(&m_jit);
+ }
+
+ template<typename T>
+ void branchTestPtr(JITCompiler::ResultCondition cond, T value, BlockIndex destination)
+ {
+ ASSERT(JITCompiler::isInvertible(cond));
+
+ if (!haveEdgeCodeToEmit(destination))
+ return addBranch(m_jit.branchTestPtr(cond, value), destination);
+
+ JITCompiler::Jump notTaken = m_jit.branchTestPtr(JITCompiler::invert(cond), value);
+ emitEdgeCode(destination);
+ addBranch(m_jit.jump(), destination);
+ notTaken.link(&m_jit);
+ }
+
+ template<typename T, typename U>
+ void branchTest8(JITCompiler::ResultCondition cond, T value, U mask, BlockIndex destination)
+ {
+ ASSERT(JITCompiler::isInvertible(cond));
+
+ if (!haveEdgeCodeToEmit(destination))
+ return addBranch(m_jit.branchTest8(cond, value, mask), destination);
+
+ JITCompiler::Jump notTaken = m_jit.branchTest8(JITCompiler::invert(cond), value, mask);
+ emitEdgeCode(destination);
+ addBranch(m_jit.jump(), destination);
+ notTaken.link(&m_jit);
+ }
+
+ template<typename T>
+ void branchTest8(JITCompiler::ResultCondition cond, T value, BlockIndex destination)
+ {
+ ASSERT(JITCompiler::isInvertible(cond));
+
+ if (!haveEdgeCodeToEmit(destination))
+ return addBranch(m_jit.branchTest8(cond, value), destination);
+
+ JITCompiler::Jump notTaken = m_jit.branchTest8(JITCompiler::invert(cond), value);
+ emitEdgeCode(destination);
+ addBranch(m_jit.jump(), destination);
+ notTaken.link(&m_jit);
+ }
+
+ enum FallThroughMode {
+ AtFallThroughPoint,
+ ForceJump
+ };
+ void jump(BlockIndex destination, FallThroughMode fallThroughMode = AtFallThroughPoint)
+ {
+ if (haveEdgeCodeToEmit(destination))
+ emitEdgeCode(destination);
+ if (destination == m_block + 1
+ && fallThroughMode == AtFallThroughPoint)
+ return;
+ addBranch(m_jit.jump(), destination);
+ }
+
+ inline bool haveEdgeCodeToEmit(BlockIndex)
+ {
+ return DFG_ENABLE_EDGE_CODE_VERIFICATION;
+ }
+ void emitEdgeCode(BlockIndex destination)
+ {
+ if (!DFG_ENABLE_EDGE_CODE_VERIFICATION)
+ return;
+ m_jit.move(Imm32(destination), GPRInfo::regT0);
+ }
void addBranch(const MacroAssembler::Jump& jump, BlockIndex destination)
{
@@ -1565,7 +1721,7 @@ private:
template<typename T>
void emitAllocateJSFinalObject(T structure, GPRReg resultGPR, GPRReg scratchGPR, MacroAssembler::JumpList& slowPath)
{
- MarkedAllocator* allocator = &m_jit.globalData()->heap.allocatorForObject(sizeof(JSFinalObject));
+ MarkedAllocator* allocator = &m_jit.globalData()->heap.allocatorForObjectWithoutDestructor(sizeof(JSFinalObject));
m_jit.loadPtr(&allocator->m_firstFreeCell, resultGPR);
slowPath.append(m_jit.branchTestPtr(MacroAssembler::Zero, resultGPR));
@@ -1603,7 +1759,7 @@ private:
{
if (!m_compileOkay)
return;
- m_jit.codeBlock()->appendOSRExit(OSRExit(kind, jsValueSource, m_jit.valueProfileFor(nodeIndex), jumpToFail, this));
+ m_jit.codeBlock()->appendOSRExit(OSRExit(kind, jsValueSource, m_jit.graph().methodOfGettingAValueProfileFor(nodeIndex), jumpToFail, this));
}
void speculationCheck(ExitKind kind, JSValueSource jsValueSource, NodeUse nodeUse, MacroAssembler::Jump jumpToFail)
{
@@ -1626,7 +1782,7 @@ private:
if (!m_compileOkay)
return;
m_jit.codeBlock()->appendSpeculationRecovery(recovery);
- m_jit.codeBlock()->appendOSRExit(OSRExit(kind, jsValueSource, m_jit.valueProfileFor(nodeIndex), jumpToFail, this, m_jit.codeBlock()->numberOfSpeculationRecoveries()));
+ m_jit.codeBlock()->appendOSRExit(OSRExit(kind, jsValueSource, m_jit.graph().methodOfGettingAValueProfileFor(nodeIndex), jumpToFail, this, m_jit.codeBlock()->numberOfSpeculationRecoveries()));
}
void speculationCheck(ExitKind kind, JSValueSource jsValueSource, NodeUse nodeUse, MacroAssembler::Jump jumpToFail, const SpeculationRecovery& recovery)
{
@@ -1637,7 +1793,7 @@ private:
void terminateSpeculativeExecution(ExitKind kind, JSValueRegs jsValueRegs, NodeIndex nodeIndex)
{
#if DFG_ENABLE(DEBUG_VERBOSE)
- fprintf(stderr, "SpeculativeJIT was terminated.\n");
+ dataLog("SpeculativeJIT was terminated.\n");
#endif
if (!m_compileOkay)
return;
@@ -1699,6 +1855,8 @@ private:
RegisterBank<FPRInfo> m_fprs;
Vector<MacroAssembler::Label> m_blockHeads;
+ Vector<MacroAssembler::Label> m_osrEntryHeads;
+
struct BranchRecord {
BranchRecord(MacroAssembler::Jump jump, BlockIndex destination)
: jump(jump)
@@ -2329,7 +2487,7 @@ inline SpeculativeJIT::SpeculativeJIT(JITCompiler& jit)
, m_arguments(jit.codeBlock()->numParameters())
, m_variables(jit.graph().m_localVars)
, m_lastSetOperand(std::numeric_limits<int>::max())
- , m_state(m_jit.codeBlock(), m_jit.graph())
+ , m_state(m_jit.graph())
{
}
diff --git a/Source/JavaScriptCore/dfg/DFGSpeculativeJIT32_64.cpp b/Source/JavaScriptCore/dfg/DFGSpeculativeJIT32_64.cpp
index a2cdec086..8c4d8c030 100644
--- a/Source/JavaScriptCore/dfg/DFGSpeculativeJIT32_64.cpp
+++ b/Source/JavaScriptCore/dfg/DFGSpeculativeJIT32_64.cpp
@@ -634,21 +634,20 @@ void SpeculativeJIT::nonSpeculativePeepholeBranchNull(NodeUse operand, NodeIndex
notCell = m_jit.branch32(MacroAssembler::NotEqual, argTagGPR, TrustedImm32(JSValue::CellTag));
m_jit.loadPtr(JITCompiler::Address(argPayloadGPR, JSCell::structureOffset()), resultGPR);
- addBranch(m_jit.branchTest8(invert ? JITCompiler::Zero : JITCompiler::NonZero, JITCompiler::Address(resultGPR, Structure::typeInfoFlagsOffset()), JITCompiler::TrustedImm32(MasqueradesAsUndefined)), taken);
+ branchTest8(invert ? JITCompiler::Zero : JITCompiler::NonZero, JITCompiler::Address(resultGPR, Structure::typeInfoFlagsOffset()), JITCompiler::TrustedImm32(MasqueradesAsUndefined), taken);
if (!isKnownCell(operand.index())) {
- addBranch(m_jit.jump(), notTaken);
+ jump(notTaken, ForceJump);
notCell.link(&m_jit);
// null or undefined?
COMPILE_ASSERT((JSValue::UndefinedTag | 1) == JSValue::NullTag, UndefinedTag_OR_1_EQUALS_NullTag);
m_jit.move(argTagGPR, resultGPR);
m_jit.or32(TrustedImm32(1), resultGPR);
- addBranch(m_jit.branch32(invert ? JITCompiler::NotEqual : JITCompiler::Equal, resultGPR, JITCompiler::TrustedImm32(JSValue::NullTag)), taken);
+ branch32(invert ? JITCompiler::NotEqual : JITCompiler::Equal, resultGPR, JITCompiler::TrustedImm32(JSValue::NullTag), taken);
}
- if (notTaken != (m_block + 1))
- addBranch(m_jit.jump(), notTaken);
+ jump(notTaken);
}
bool SpeculativeJIT::nonSpeculativeCompareNull(Node& node, NodeUse operand, bool invert)
@@ -708,7 +707,7 @@ void SpeculativeJIT::nonSpeculativePeepholeBranch(Node& node, NodeIndex branchNo
flushRegisters();
callOperation(helperFunction, resultGPR, arg1TagGPR, arg1PayloadGPR, arg2TagGPR, arg2PayloadGPR);
- addBranch(m_jit.branchTest32(callResultCondition, resultGPR), taken);
+ branchTest32(callResultCondition, resultGPR, taken);
} else {
GPRTemporary result(this);
GPRReg resultGPR = result.gpr();
@@ -721,10 +720,10 @@ void SpeculativeJIT::nonSpeculativePeepholeBranch(Node& node, NodeIndex branchNo
if (!isKnownInteger(node.child2().index()))
slowPath.append(m_jit.branch32(MacroAssembler::NotEqual, arg2TagGPR, JITCompiler::TrustedImm32(JSValue::Int32Tag)));
- addBranch(m_jit.branch32(cond, arg1PayloadGPR, arg2PayloadGPR), taken);
+ branch32(cond, arg1PayloadGPR, arg2PayloadGPR, taken);
if (!isKnownInteger(node.child1().index()) || !isKnownInteger(node.child2().index())) {
- addBranch(m_jit.jump(), notTaken);
+ jump(notTaken, ForceJump);
slowPath.link(&m_jit);
@@ -732,12 +731,11 @@ void SpeculativeJIT::nonSpeculativePeepholeBranch(Node& node, NodeIndex branchNo
callOperation(helperFunction, resultGPR, arg1TagGPR, arg1PayloadGPR, arg2TagGPR, arg2PayloadGPR);
silentFillAllRegisters(resultGPR);
- addBranch(m_jit.branchTest32(callResultCondition, resultGPR), taken);
+ branchTest32(callResultCondition, resultGPR, taken);
}
}
- if (notTaken != (m_block + 1))
- addBranch(m_jit.jump(), notTaken);
+ jump(notTaken);
}
void SpeculativeJIT::nonSpeculativeNonPeepholeCompare(Node& node, MacroAssembler::RelationalCondition cond, S_DFGOperation_EJJ helperFunction)
@@ -825,13 +823,13 @@ void SpeculativeJIT::nonSpeculativePeepholeStrictEq(Node& node, NodeIndex branch
if (isKnownCell(node.child1().index()) && isKnownCell(node.child2().index())) {
// see if we get lucky: if the arguments are cells and they reference the same
// cell, then they must be strictly equal.
- addBranch(m_jit.branchPtr(JITCompiler::Equal, arg1PayloadGPR, arg2PayloadGPR), invert ? notTaken : taken);
+ branchPtr(JITCompiler::Equal, arg1PayloadGPR, arg2PayloadGPR, invert ? notTaken : taken);
silentSpillAllRegisters(resultPayloadGPR);
callOperation(operationCompareStrictEqCell, resultPayloadGPR, arg1TagGPR, arg1PayloadGPR, arg2TagGPR, arg2PayloadGPR);
silentFillAllRegisters(resultPayloadGPR);
- addBranch(m_jit.branchTest32(invert ? JITCompiler::Zero : JITCompiler::NonZero, resultPayloadGPR), taken);
+ branchTest32(invert ? JITCompiler::Zero : JITCompiler::NonZero, resultPayloadGPR, taken);
} else {
// FIXME: Add fast paths for twoCells, number etc.
@@ -839,11 +837,10 @@ void SpeculativeJIT::nonSpeculativePeepholeStrictEq(Node& node, NodeIndex branch
callOperation(operationCompareStrictEq, resultPayloadGPR, arg1TagGPR, arg1PayloadGPR, arg2TagGPR, arg2PayloadGPR);
silentFillAllRegisters(resultPayloadGPR);
- addBranch(m_jit.branchTest32(invert ? JITCompiler::Zero : JITCompiler::NonZero, resultPayloadGPR), taken);
+ branchTest32(invert ? JITCompiler::Zero : JITCompiler::NonZero, resultPayloadGPR, taken);
}
- if (notTaken != (m_block + 1))
- addBranch(m_jit.jump(), notTaken);
+ jump(notTaken);
}
void SpeculativeJIT::nonSpeculativeNonPeepholeStrictEq(Node& node, bool invert)
@@ -953,7 +950,7 @@ void SpeculativeJIT::emitCall(Node& node)
m_jit.addPtr(Imm32(m_jit.codeBlock()->m_numCalleeRegisters * sizeof(Register)), GPRInfo::callFrameRegister);
CodeOrigin codeOrigin = at(m_compileIndex).codeOrigin;
- CallBeginToken token = m_jit.nextCallBeginToken(codeOrigin);
+ CallBeginToken token = m_jit.beginJSCall();
JITCompiler::Call fastCall = m_jit.nearCall();
m_jit.notifyCall(fastCall, codeOrigin, token);
@@ -963,11 +960,11 @@ void SpeculativeJIT::emitCall(Node& node)
m_jit.addPtr(Imm32(m_jit.codeBlock()->m_numCalleeRegisters * sizeof(Register)), GPRInfo::callFrameRegister, GPRInfo::argumentGPR0);
m_jit.poke(GPRInfo::argumentGPR0);
- token = m_jit.beginCall(codeOrigin);
+ token = m_jit.beginCall();
JITCompiler::Call slowCall = m_jit.appendCall(slowCallFunction);
m_jit.addFastExceptionCheck(slowCall, codeOrigin, token);
m_jit.addPtr(Imm32(m_jit.codeBlock()->m_numCalleeRegisters * sizeof(Register)), GPRInfo::callFrameRegister);
- token = m_jit.nextCallBeginToken(codeOrigin);
+ token = m_jit.beginJSCall();
JITCompiler::Call theCall = m_jit.call(GPRInfo::returnValueGPR);
m_jit.notifyCall(theCall, codeOrigin, token);
@@ -984,7 +981,7 @@ template<bool strict>
GPRReg SpeculativeJIT::fillSpeculateIntInternal(NodeIndex nodeIndex, DataFormat& returnFormat)
{
#if DFG_ENABLE(DEBUG_VERBOSE)
- fprintf(stderr, "SpecInt@%d ", nodeIndex);
+ dataLog("SpecInt@%d ", nodeIndex);
#endif
if (isKnownNotInteger(nodeIndex)) {
terminateSpeculativeExecution(Uncountable, JSValueRegs(), NoNode);
@@ -1080,7 +1077,7 @@ GPRReg SpeculativeJIT::fillSpeculateIntStrict(NodeIndex nodeIndex)
FPRReg SpeculativeJIT::fillSpeculateDouble(NodeIndex nodeIndex)
{
#if DFG_ENABLE(DEBUG_VERBOSE)
- fprintf(stderr, "SpecDouble@%d ", nodeIndex);
+ dataLog("SpecDouble@%d ", nodeIndex);
#endif
if (isKnownNotNumber(nodeIndex)) {
terminateSpeculativeExecution(Uncountable, JSValueRegs(), NoNode);
@@ -1210,7 +1207,7 @@ FPRReg SpeculativeJIT::fillSpeculateDouble(NodeIndex nodeIndex)
GPRReg SpeculativeJIT::fillSpeculateCell(NodeIndex nodeIndex)
{
#if DFG_ENABLE(DEBUG_VERBOSE)
- fprintf(stderr, "SpecCell@%d ", nodeIndex);
+ dataLog("SpecCell@%d ", nodeIndex);
#endif
if (isKnownNotCell(nodeIndex)) {
terminateSpeculativeExecution(Uncountable, JSValueRegs(), NoNode);
@@ -1283,7 +1280,7 @@ GPRReg SpeculativeJIT::fillSpeculateCell(NodeIndex nodeIndex)
GPRReg SpeculativeJIT::fillSpeculateBoolean(NodeIndex nodeIndex)
{
#if DFG_ENABLE(DEBUG_VERBOSE)
- fprintf(stderr, "SpecBool@%d ", nodeIndex);
+ dataLog("SpecBool@%d ", nodeIndex);
#endif
if (isKnownNotBoolean(nodeIndex)) {
terminateSpeculativeExecution(Uncountable, JSValueRegs(), NoNode);
@@ -1546,7 +1543,7 @@ void SpeculativeJIT::emitObjectOrOtherBranch(NodeUse nodeUse, BlockIndex taken,
MacroAssembler::Jump notCell = m_jit.branch32(MacroAssembler::NotEqual, valueTagGPR, TrustedImm32(JSValue::CellTag));
if (needSpeculationCheck)
speculationCheck(BadType, JSValueRegs(valueTagGPR, valuePayloadGPR), nodeUse, m_jit.branchPtr(MacroAssembler::NotEqual, MacroAssembler::Address(valuePayloadGPR, JSCell::classInfoOffset()), MacroAssembler::TrustedImmPtr(classInfo)));
- addBranch(m_jit.jump(), taken);
+ jump(taken, ForceJump);
notCell.link(&m_jit);
@@ -1557,8 +1554,7 @@ void SpeculativeJIT::emitObjectOrOtherBranch(NodeUse nodeUse, BlockIndex taken,
speculationCheck(BadType, JSValueRegs(valueTagGPR, valuePayloadGPR), nodeUse, m_jit.branch32(MacroAssembler::NotEqual, scratchGPR, TrustedImm32(JSValue::NullTag)));
}
- if (notTaken != (m_block + 1))
- addBranch(m_jit.jump(), notTaken);
+ jump(notTaken);
noResult(m_compileIndex);
}
@@ -1579,9 +1575,8 @@ void SpeculativeJIT::emitBranch(Node& node)
notTaken = tmp;
}
- addBranch(m_jit.branchTest32(condition, value.gpr(), TrustedImm32(1)), taken);
- if (notTaken != (m_block + 1))
- addBranch(m_jit.jump(), notTaken);
+ branchTest32(condition, value.gpr(), TrustedImm32(1), taken);
+ jump(notTaken);
noResult(m_compileIndex);
} else if (at(node.child1()).shouldSpeculateFinalObjectOrOther()) {
@@ -1600,15 +1595,14 @@ void SpeculativeJIT::emitBranch(Node& node)
}
SpeculateIntegerOperand value(this, node.child1());
- addBranch(m_jit.branchTest32(invert ? MacroAssembler::Zero : MacroAssembler::NonZero, value.gpr()), taken);
+ branchTest32(invert ? MacroAssembler::Zero : MacroAssembler::NonZero, value.gpr(), taken);
} else {
SpeculateDoubleOperand value(this, node.child1());
FPRTemporary scratch(this);
- addBranch(m_jit.branchDoubleNonZero(value.fpr(), scratch.fpr()), taken);
+ branchDoubleNonZero(value.fpr(), scratch.fpr(), taken);
}
- if (notTaken != (m_block + 1))
- addBranch(m_jit.jump(), notTaken);
+ jump(notTaken);
noResult(m_compileIndex);
} else {
@@ -1626,17 +1620,16 @@ void SpeculativeJIT::emitBranch(Node& node)
JITCompiler::Jump slowPath = m_jit.branch32(JITCompiler::NotEqual, valueTagGPR, JITCompiler::TrustedImm32(JSValue::BooleanTag));
fastPath.link(&m_jit);
- addBranch(m_jit.branchTest32(JITCompiler::Zero, valuePayloadGPR), notTaken);
- addBranch(m_jit.jump(), taken);
+ branchTest32(JITCompiler::Zero, valuePayloadGPR, notTaken);
+ jump(taken, ForceJump);
slowPath.link(&m_jit);
silentSpillAllRegisters(resultGPR);
callOperation(dfgConvertJSValueToBoolean, resultGPR, valueTagGPR, valuePayloadGPR);
silentFillAllRegisters(resultGPR);
- addBranch(m_jit.branchTest32(JITCompiler::NonZero, resultGPR), taken);
- if (notTaken != (m_block + 1))
- addBranch(m_jit.jump(), notTaken);
+ branchTest32(JITCompiler::NonZero, resultGPR, taken);
+ jump(notTaken);
noResult(m_compileIndex, UseChildrenCalledExplicitly);
}
@@ -2600,8 +2593,7 @@ void SpeculativeJIT::compile(Node& node)
case DFG::Jump: {
BlockIndex taken = node.takenBlockIndex();
- if (taken != (m_block + 1))
- addBranch(m_jit.jump(), taken);
+ jump(taken);
noResult(m_compileIndex);
break;
}
@@ -2622,9 +2614,8 @@ void SpeculativeJIT::compile(Node& node)
notTaken = tmp;
}
- addBranch(m_jit.branchTest32(condition, op.gpr()), taken);
- if (notTaken != (m_block + 1))
- addBranch(m_jit.jump(), notTaken);
+ branchTest32(condition, op.gpr(), taken);
+ jump(notTaken);
noResult(m_compileIndex);
break;
diff --git a/Source/JavaScriptCore/dfg/DFGSpeculativeJIT64.cpp b/Source/JavaScriptCore/dfg/DFGSpeculativeJIT64.cpp
index 0dc207f75..6d375f81f 100644
--- a/Source/JavaScriptCore/dfg/DFGSpeculativeJIT64.cpp
+++ b/Source/JavaScriptCore/dfg/DFGSpeculativeJIT64.cpp
@@ -611,20 +611,19 @@ void SpeculativeJIT::nonSpeculativePeepholeBranchNull(NodeUse operand, NodeIndex
notCell = m_jit.branchTestPtr(MacroAssembler::NonZero, argGPR, GPRInfo::tagMaskRegister);
m_jit.loadPtr(JITCompiler::Address(argGPR, JSCell::structureOffset()), resultGPR);
- addBranch(m_jit.branchTest8(invert ? JITCompiler::Zero : JITCompiler::NonZero, JITCompiler::Address(resultGPR, Structure::typeInfoFlagsOffset()), JITCompiler::TrustedImm32(MasqueradesAsUndefined)), taken);
+ branchTest8(invert ? JITCompiler::Zero : JITCompiler::NonZero, JITCompiler::Address(resultGPR, Structure::typeInfoFlagsOffset()), JITCompiler::TrustedImm32(MasqueradesAsUndefined), taken);
if (!isKnownCell(operand.index())) {
- addBranch(m_jit.jump(), notTaken);
+ jump(notTaken, ForceJump);
notCell.link(&m_jit);
m_jit.move(argGPR, resultGPR);
m_jit.andPtr(JITCompiler::TrustedImm32(~TagBitUndefined), resultGPR);
- addBranch(m_jit.branchPtr(invert ? JITCompiler::NotEqual : JITCompiler::Equal, resultGPR, JITCompiler::TrustedImmPtr(reinterpret_cast<void*>(ValueNull))), taken);
+ branchPtr(invert ? JITCompiler::NotEqual : JITCompiler::Equal, resultGPR, JITCompiler::TrustedImmPtr(reinterpret_cast<void*>(ValueNull)), taken);
}
- if (notTaken != (m_block + 1))
- addBranch(m_jit.jump(), notTaken);
+ jump(notTaken);
}
bool SpeculativeJIT::nonSpeculativeCompareNull(Node& node, NodeUse operand, bool invert)
@@ -682,7 +681,7 @@ void SpeculativeJIT::nonSpeculativePeepholeBranch(Node& node, NodeIndex branchNo
flushRegisters();
callOperation(helperFunction, resultGPR, arg1GPR, arg2GPR);
- addBranch(m_jit.branchTest32(callResultCondition, resultGPR), taken);
+ branchTest32(callResultCondition, resultGPR, taken);
} else {
GPRTemporary result(this, arg2);
GPRReg resultGPR = result.gpr();
@@ -695,10 +694,10 @@ void SpeculativeJIT::nonSpeculativePeepholeBranch(Node& node, NodeIndex branchNo
if (!isKnownInteger(node.child2().index()))
slowPath.append(m_jit.branchPtr(MacroAssembler::Below, arg2GPR, GPRInfo::tagTypeNumberRegister));
- addBranch(m_jit.branch32(cond, arg1GPR, arg2GPR), taken);
+ branch32(cond, arg1GPR, arg2GPR, taken);
if (!isKnownInteger(node.child1().index()) || !isKnownInteger(node.child2().index())) {
- addBranch(m_jit.jump(), notTaken);
+ jump(notTaken, ForceJump);
slowPath.link(&m_jit);
@@ -706,12 +705,11 @@ void SpeculativeJIT::nonSpeculativePeepholeBranch(Node& node, NodeIndex branchNo
callOperation(helperFunction, resultGPR, arg1GPR, arg2GPR);
silentFillAllRegisters(resultGPR);
- addBranch(m_jit.branchTest32(callResultCondition, resultGPR), taken);
+ branchTest32(callResultCondition, resultGPR, taken);
}
}
- if (notTaken != (m_block + 1))
- addBranch(m_jit.jump(), notTaken);
+ jump(notTaken);
}
void SpeculativeJIT::nonSpeculativeNonPeepholeCompare(Node& node, MacroAssembler::RelationalCondition cond, S_DFGOperation_EJJ helperFunction)
@@ -798,13 +796,13 @@ void SpeculativeJIT::nonSpeculativePeepholeStrictEq(Node& node, NodeIndex branch
if (isKnownCell(node.child1().index()) && isKnownCell(node.child2().index())) {
// see if we get lucky: if the arguments are cells and they reference the same
// cell, then they must be strictly equal.
- addBranch(m_jit.branchPtr(JITCompiler::Equal, arg1GPR, arg2GPR), invert ? notTaken : taken);
+ branchPtr(JITCompiler::Equal, arg1GPR, arg2GPR, invert ? notTaken : taken);
silentSpillAllRegisters(resultGPR);
callOperation(operationCompareStrictEqCell, resultGPR, arg1GPR, arg2GPR);
silentFillAllRegisters(resultGPR);
- addBranch(m_jit.branchTest32(invert ? JITCompiler::Zero : JITCompiler::NonZero, resultGPR), taken);
+ branchTest32(invert ? JITCompiler::Zero : JITCompiler::NonZero, resultGPR, taken);
} else {
m_jit.orPtr(arg1GPR, arg2GPR, resultGPR);
@@ -812,11 +810,11 @@ void SpeculativeJIT::nonSpeculativePeepholeStrictEq(Node& node, NodeIndex branch
JITCompiler::Jump numberCase = m_jit.branchTestPtr(JITCompiler::NonZero, resultGPR, GPRInfo::tagTypeNumberRegister);
- addBranch(m_jit.branch32(invert ? JITCompiler::NotEqual : JITCompiler::Equal, arg1GPR, arg2GPR), taken);
- addBranch(m_jit.jump(), notTaken);
+ branch32(invert ? JITCompiler::NotEqual : JITCompiler::Equal, arg1GPR, arg2GPR, taken);
+ jump(notTaken, ForceJump);
twoCellsCase.link(&m_jit);
- addBranch(m_jit.branchPtr(JITCompiler::Equal, arg1GPR, arg2GPR), invert ? notTaken : taken);
+ branchPtr(JITCompiler::Equal, arg1GPR, arg2GPR, invert ? notTaken : taken);
numberCase.link(&m_jit);
@@ -824,11 +822,10 @@ void SpeculativeJIT::nonSpeculativePeepholeStrictEq(Node& node, NodeIndex branch
callOperation(operationCompareStrictEq, resultGPR, arg1GPR, arg2GPR);
silentFillAllRegisters(resultGPR);
- addBranch(m_jit.branchTest32(invert ? JITCompiler::Zero : JITCompiler::NonZero, resultGPR), taken);
+ branchTest32(invert ? JITCompiler::Zero : JITCompiler::NonZero, resultGPR, taken);
}
- if (notTaken != (m_block + 1))
- addBranch(m_jit.jump(), notTaken);
+ jump(notTaken);
}
void SpeculativeJIT::nonSpeculativeNonPeepholeStrictEq(Node& node, bool invert)
@@ -954,7 +951,7 @@ void SpeculativeJIT::emitCall(Node& node)
m_jit.addPtr(Imm32(m_jit.codeBlock()->m_numCalleeRegisters * sizeof(Register)), GPRInfo::callFrameRegister);
CodeOrigin codeOrigin = at(m_compileIndex).codeOrigin;
- CallBeginToken token = m_jit.nextCallBeginToken(codeOrigin);
+ CallBeginToken token = m_jit.beginJSCall();
JITCompiler::Call fastCall = m_jit.nearCall();
m_jit.notifyCall(fastCall, codeOrigin, token);
@@ -963,11 +960,11 @@ void SpeculativeJIT::emitCall(Node& node)
slowPath.link(&m_jit);
m_jit.addPtr(Imm32(m_jit.codeBlock()->m_numCalleeRegisters * sizeof(Register)), GPRInfo::callFrameRegister, GPRInfo::argumentGPR0);
- token = m_jit.beginCall(codeOrigin);
+ token = m_jit.beginCall();
JITCompiler::Call slowCall = m_jit.appendCall(slowCallFunction);
m_jit.addFastExceptionCheck(slowCall, codeOrigin, token);
m_jit.addPtr(Imm32(m_jit.codeBlock()->m_numCalleeRegisters * sizeof(Register)), GPRInfo::callFrameRegister);
- token = m_jit.nextCallBeginToken(codeOrigin);
+ token = m_jit.beginJSCall();
JITCompiler::Call theCall = m_jit.call(GPRInfo::returnValueGPR);
m_jit.notifyCall(theCall, codeOrigin, token);
@@ -984,7 +981,7 @@ template<bool strict>
GPRReg SpeculativeJIT::fillSpeculateIntInternal(NodeIndex nodeIndex, DataFormat& returnFormat)
{
#if DFG_ENABLE(DEBUG_VERBOSE)
- fprintf(stderr, "SpecInt@%d ", nodeIndex);
+ dataLog("SpecInt@%d ", nodeIndex);
#endif
Node& node = at(nodeIndex);
VirtualRegister virtualRegister = node.virtualRegister();
@@ -1129,7 +1126,7 @@ GPRReg SpeculativeJIT::fillSpeculateIntStrict(NodeIndex nodeIndex)
FPRReg SpeculativeJIT::fillSpeculateDouble(NodeIndex nodeIndex)
{
#if DFG_ENABLE(DEBUG_VERBOSE)
- fprintf(stderr, "SpecDouble@%d ", nodeIndex);
+ dataLog("SpecDouble@%d ", nodeIndex);
#endif
Node& node = at(nodeIndex);
VirtualRegister virtualRegister = node.virtualRegister();
@@ -1280,7 +1277,7 @@ FPRReg SpeculativeJIT::fillSpeculateDouble(NodeIndex nodeIndex)
GPRReg SpeculativeJIT::fillSpeculateCell(NodeIndex nodeIndex)
{
#if DFG_ENABLE(DEBUG_VERBOSE)
- fprintf(stderr, "SpecCell@%d ", nodeIndex);
+ dataLog("SpecCell@%d ", nodeIndex);
#endif
Node& node = at(nodeIndex);
VirtualRegister virtualRegister = node.virtualRegister();
@@ -1353,7 +1350,7 @@ GPRReg SpeculativeJIT::fillSpeculateCell(NodeIndex nodeIndex)
GPRReg SpeculativeJIT::fillSpeculateBoolean(NodeIndex nodeIndex)
{
#if DFG_ENABLE(DEBUG_VERBOSE)
- fprintf(stderr, "SpecBool@%d ", nodeIndex);
+ dataLog("SpecBool@%d ", nodeIndex);
#endif
Node& node = at(nodeIndex);
VirtualRegister virtualRegister = node.virtualRegister();
@@ -1633,7 +1630,7 @@ void SpeculativeJIT::emitObjectOrOtherBranch(NodeUse nodeUse, BlockIndex taken,
MacroAssembler::Jump notCell = m_jit.branchTestPtr(MacroAssembler::NonZero, valueGPR, GPRInfo::tagMaskRegister);
if (needSpeculationCheck)
speculationCheck(BadType, JSValueRegs(valueGPR), nodeUse.index(), m_jit.branchPtr(MacroAssembler::NotEqual, MacroAssembler::Address(valueGPR, JSCell::classInfoOffset()), MacroAssembler::TrustedImmPtr(classInfo)));
- addBranch(m_jit.jump(), taken);
+ jump(taken, ForceJump);
notCell.link(&m_jit);
@@ -1642,8 +1639,7 @@ void SpeculativeJIT::emitObjectOrOtherBranch(NodeUse nodeUse, BlockIndex taken,
m_jit.andPtr(MacroAssembler::TrustedImm32(~TagBitUndefined), scratchGPR);
speculationCheck(BadType, JSValueRegs(valueGPR), nodeUse.index(), m_jit.branchPtr(MacroAssembler::NotEqual, scratchGPR, MacroAssembler::TrustedImmPtr(reinterpret_cast<void*>(ValueNull))));
}
- if (notTaken != (m_block + 1))
- addBranch(m_jit.jump(), notTaken);
+ jump(notTaken);
noResult(m_compileIndex);
}
@@ -1666,9 +1662,8 @@ void SpeculativeJIT::emitBranch(Node& node)
notTaken = tmp;
}
- addBranch(m_jit.branchTest32(condition, valueGPR, TrustedImm32(true)), taken);
- if (notTaken != (m_block + 1))
- addBranch(m_jit.jump(), notTaken);
+ branchTest32(condition, valueGPR, TrustedImm32(true), taken);
+ jump(notTaken);
noResult(m_compileIndex);
} else if (at(node.child1()).shouldSpeculateFinalObjectOrOther()) {
@@ -1687,15 +1682,14 @@ void SpeculativeJIT::emitBranch(Node& node)
}
SpeculateIntegerOperand value(this, node.child1());
- addBranch(m_jit.branchTest32(invert ? MacroAssembler::Zero : MacroAssembler::NonZero, value.gpr()), taken);
+ branchTest32(invert ? MacroAssembler::Zero : MacroAssembler::NonZero, value.gpr(), taken);
} else {
SpeculateDoubleOperand value(this, node.child1());
FPRTemporary scratch(this);
- addBranch(m_jit.branchDoubleNonZero(value.fpr(), scratch.fpr()), taken);
+ branchDoubleNonZero(value.fpr(), scratch.fpr(), taken);
}
- if (notTaken != (m_block + 1))
- addBranch(m_jit.jump(), notTaken);
+ jump(notTaken);
noResult(m_compileIndex);
} else {
@@ -1705,18 +1699,18 @@ void SpeculativeJIT::emitBranch(Node& node)
bool predictBoolean = isBooleanPrediction(m_jit.getPrediction(node.child1()));
if (predictBoolean) {
- addBranch(m_jit.branchPtr(MacroAssembler::Equal, valueGPR, MacroAssembler::ImmPtr(JSValue::encode(jsBoolean(false)))), notTaken);
- addBranch(m_jit.branchPtr(MacroAssembler::Equal, valueGPR, MacroAssembler::ImmPtr(JSValue::encode(jsBoolean(true)))), taken);
+ branchPtr(MacroAssembler::Equal, valueGPR, MacroAssembler::ImmPtr(JSValue::encode(jsBoolean(false))), notTaken);
+ branchPtr(MacroAssembler::Equal, valueGPR, MacroAssembler::ImmPtr(JSValue::encode(jsBoolean(true))), taken);
speculationCheck(BadType, JSValueRegs(valueGPR), node.child1(), m_jit.jump());
value.use();
} else {
- addBranch(m_jit.branchPtr(MacroAssembler::Equal, valueGPR, MacroAssembler::ImmPtr(JSValue::encode(jsNumber(0)))), notTaken);
- addBranch(m_jit.branchPtr(MacroAssembler::AboveOrEqual, valueGPR, GPRInfo::tagTypeNumberRegister), taken);
+ branchPtr(MacroAssembler::Equal, valueGPR, MacroAssembler::ImmPtr(JSValue::encode(jsNumber(0))), notTaken);
+ branchPtr(MacroAssembler::AboveOrEqual, valueGPR, GPRInfo::tagTypeNumberRegister, taken);
if (!predictBoolean) {
- addBranch(m_jit.branchPtr(MacroAssembler::Equal, valueGPR, MacroAssembler::ImmPtr(JSValue::encode(jsBoolean(false)))), notTaken);
- addBranch(m_jit.branchPtr(MacroAssembler::Equal, valueGPR, MacroAssembler::ImmPtr(JSValue::encode(jsBoolean(true)))), taken);
+ branchPtr(MacroAssembler::Equal, valueGPR, MacroAssembler::ImmPtr(JSValue::encode(jsBoolean(false))), notTaken);
+ branchPtr(MacroAssembler::Equal, valueGPR, MacroAssembler::ImmPtr(JSValue::encode(jsBoolean(true))), taken);
}
value.use();
@@ -1725,9 +1719,8 @@ void SpeculativeJIT::emitBranch(Node& node)
callOperation(dfgConvertJSValueToBoolean, resultGPR, valueGPR);
silentFillAllRegisters(resultGPR);
- addBranch(m_jit.branchTest32(MacroAssembler::NonZero, resultGPR), taken);
- if (notTaken != (m_block + 1))
- addBranch(m_jit.jump(), notTaken);
+ branchTest32(MacroAssembler::NonZero, resultGPR, taken);
+ jump(notTaken);
}
noResult(m_compileIndex, UseChildrenCalledExplicitly);
@@ -2630,8 +2623,7 @@ void SpeculativeJIT::compile(Node& node)
case DFG::Jump: {
BlockIndex taken = node.takenBlockIndex();
- if (taken != (m_block + 1))
- addBranch(m_jit.jump(), taken);
+ jump(taken);
noResult(m_compileIndex);
break;
}
@@ -2652,9 +2644,8 @@ void SpeculativeJIT::compile(Node& node)
notTaken = tmp;
}
- addBranch(m_jit.branchTest32(condition, op.gpr()), taken);
- if (notTaken != (m_block + 1))
- addBranch(m_jit.jump(), notTaken);
+ branchTest32(condition, op.gpr(), taken);
+ jump(notTaken);
noResult(m_compileIndex);
break;
diff --git a/Source/JavaScriptCore/dfg/DFGVariableAccessData.h b/Source/JavaScriptCore/dfg/DFGVariableAccessData.h
index 3cc53748a..bd626f9fb 100644
--- a/Source/JavaScriptCore/dfg/DFGVariableAccessData.h
+++ b/Source/JavaScriptCore/dfg/DFGVariableAccessData.h
@@ -70,6 +70,11 @@ public:
return mergePrediction(find()->m_prediction, prediction);
}
+ PredictedType nonUnifiedPrediction()
+ {
+ return m_prediction;
+ }
+
PredictedType prediction()
{
return find()->m_prediction;
diff --git a/Source/JavaScriptCore/dfg/DFGVirtualRegisterAllocationPhase.cpp b/Source/JavaScriptCore/dfg/DFGVirtualRegisterAllocationPhase.cpp
new file mode 100644
index 000000000..c0b9fae65
--- /dev/null
+++ b/Source/JavaScriptCore/dfg/DFGVirtualRegisterAllocationPhase.cpp
@@ -0,0 +1,104 @@
+/*
+ * Copyright (C) 2011 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "config.h"
+#include "DFGVirtualRegisterAllocationPhase.h"
+
+#if ENABLE(DFG_JIT)
+
+#include "DFGGraph.h"
+#include "DFGScoreBoard.h"
+
+namespace JSC { namespace DFG {
+
+class VirtualRegisterAllocationPhase : public Phase {
+public:
+ VirtualRegisterAllocationPhase(Graph& graph)
+ : Phase(graph, "virtual register allocation")
+ {
+ }
+
+ void run()
+ {
+#if DFG_ENABLE(DEBUG_VERBOSE)
+ dataLog("Preserved vars: ");
+ m_graph.m_preservedVars.dump(WTF::dataFile());
+ dataLog("\n");
+#endif
+ ScoreBoard scoreBoard(m_graph, m_graph.m_preservedVars);
+ unsigned sizeExcludingPhiNodes = m_graph.m_blocks.last()->end;
+ for (size_t i = 0; i < sizeExcludingPhiNodes; ++i) {
+ Node& node = m_graph[i];
+
+ if (!node.shouldGenerate())
+ continue;
+
+ // GetLocal nodes are effectively phi nodes in the graph, referencing
+ // results from prior blocks.
+ if (node.op != GetLocal) {
+ // First, call use on all of the current node's children, then
+ // allocate a VirtualRegister for this node. We do so in this
+ // order so that if a child is on its last use, and a
+ // VirtualRegister is freed, then it may be reused for node.
+ if (node.op & NodeHasVarArgs) {
+ for (unsigned childIdx = node.firstChild(); childIdx < node.firstChild() + node.numChildren(); childIdx++)
+ scoreBoard.use(m_graph.m_varArgChildren[childIdx]);
+ } else {
+ scoreBoard.use(node.child1());
+ scoreBoard.use(node.child2());
+ scoreBoard.use(node.child3());
+ }
+ }
+
+ if (!node.hasResult())
+ continue;
+
+ node.setVirtualRegister(scoreBoard.allocate());
+ // 'mustGenerate' nodes have their useCount artificially elevated,
+ // call use now to account for this.
+ if (node.mustGenerate())
+ scoreBoard.use(i);
+ }
+
+ // 'm_numCalleeRegisters' is the number of locals and temporaries allocated
+ // for the function (and checked for on entry). Since we perform a new and
+ // different allocation of temporaries, more registers may now be required.
+ unsigned calleeRegisters = scoreBoard.highWatermark() + m_graph.m_parameterSlots;
+ if ((unsigned)codeBlock()->m_numCalleeRegisters < calleeRegisters)
+ codeBlock()->m_numCalleeRegisters = calleeRegisters;
+#if DFG_ENABLE(DEBUG_VERBOSE)
+ dataLog("Num callee registers: %u\n", calleeRegisters);
+#endif
+ }
+};
+
+void performVirtualRegisterAllocation(Graph& graph)
+{
+ runPhase<VirtualRegisterAllocationPhase>(graph);
+}
+
+} } // namespace JSC::DFG
+
+#endif // ENABLE(DFG_JIT)
diff --git a/Source/JavaScriptCore/dfg/DFGVirtualRegisterAllocationPhase.h b/Source/JavaScriptCore/dfg/DFGVirtualRegisterAllocationPhase.h
new file mode 100644
index 000000000..abfa6ae64
--- /dev/null
+++ b/Source/JavaScriptCore/dfg/DFGVirtualRegisterAllocationPhase.h
@@ -0,0 +1,52 @@
+/*
+ * Copyright (C) 2011 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef DFGVirtualRegisterAllocationPhase_h
+#define DFGVirtualRegisterAllocationPhase_h
+
+#include <wtf/Platform.h>
+
+#if ENABLE(DFG_JIT)
+
+#include "DFGPhase.h"
+
+namespace JSC { namespace DFG {
+
+class Graph;
+
+// Prior to running this phase, we have no idea where in the call frame nodes
+// will have their values spilled. This phase fixes that by giving each node
+// a spill slot. The spill slot index (i.e. the virtual register) is also used
+// for look-up tables for the linear scan register allocator that the backend
+// uses.
+
+void performVirtualRegisterAllocation(Graph&);
+
+} } // namespace JSC::DFG
+
+#endif // ENABLE(DFG_JIT)
+
+#endif // DFGVirtualRegisterAllocationPhase_h
+
diff --git a/Source/JavaScriptCore/heap/ConservativeRoots.cpp b/Source/JavaScriptCore/heap/ConservativeRoots.cpp
index a509f06e1..d63faebf3 100644
--- a/Source/JavaScriptCore/heap/ConservativeRoots.cpp
+++ b/Source/JavaScriptCore/heap/ConservativeRoots.cpp
@@ -26,8 +26,8 @@
#include "config.h"
#include "ConservativeRoots.h"
-#include "BumpSpace.h"
-#include "BumpSpaceInlineMethods.h"
+#include "CopiedSpace.h"
+#include "CopiedSpaceInlineMethods.h"
#include "CodeBlock.h"
#include "DFGCodeBlocks.h"
#include "JSCell.h"
@@ -36,12 +36,12 @@
namespace JSC {
-ConservativeRoots::ConservativeRoots(const MarkedBlockSet* blocks, BumpSpace* bumpSpace)
+ConservativeRoots::ConservativeRoots(const MarkedBlockSet* blocks, CopiedSpace* copiedSpace)
: m_roots(m_inlineRoots)
, m_size(0)
, m_capacity(inlineCapacity)
, m_blocks(blocks)
- , m_bumpSpace(bumpSpace)
+ , m_copiedSpace(copiedSpace)
{
}
@@ -72,9 +72,9 @@ inline void ConservativeRoots::genericAddPointer(void* p, TinyBloomFilter filter
{
markHook.mark(p);
- BumpBlock* block;
- if (m_bumpSpace->contains(p, block))
- m_bumpSpace->pin(block);
+ CopiedBlock* block;
+ if (m_copiedSpace->contains(p, block))
+ m_copiedSpace->pin(block);
MarkedBlock* candidate = MarkedBlock::blockFor(p);
if (filter.ruleOut(reinterpret_cast<Bits>(candidate))) {
diff --git a/Source/JavaScriptCore/heap/ConservativeRoots.h b/Source/JavaScriptCore/heap/ConservativeRoots.h
index 40b0996d0..9d9e9ba0c 100644
--- a/Source/JavaScriptCore/heap/ConservativeRoots.h
+++ b/Source/JavaScriptCore/heap/ConservativeRoots.h
@@ -38,7 +38,7 @@ class Heap;
class ConservativeRoots {
public:
- ConservativeRoots(const MarkedBlockSet*, BumpSpace*);
+ ConservativeRoots(const MarkedBlockSet*, CopiedSpace*);
~ConservativeRoots();
void add(void* begin, void* end);
@@ -63,7 +63,7 @@ private:
size_t m_size;
size_t m_capacity;
const MarkedBlockSet* m_blocks;
- BumpSpace* m_bumpSpace;
+ CopiedSpace* m_copiedSpace;
JSCell* m_inlineRoots[inlineCapacity];
};
diff --git a/Source/JavaScriptCore/heap/CopiedAllocator.h b/Source/JavaScriptCore/heap/CopiedAllocator.h
new file mode 100644
index 000000000..c5ba50d78
--- /dev/null
+++ b/Source/JavaScriptCore/heap/CopiedAllocator.h
@@ -0,0 +1,106 @@
+/*
+ * Copyright (C) 2011 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef CopiedAllocator_h
+#define CopiedAllocator_h
+
+#include "CopiedBlock.h"
+
+namespace JSC {
+
+class CopiedAllocator {
+public:
+ CopiedAllocator();
+ void* allocate(size_t);
+ bool fitsInCurrentBlock(size_t);
+ bool wasLastAllocation(void*, size_t);
+ void startedCopying();
+ void resetCurrentBlock(CopiedBlock*);
+ void resetLastAllocation(void*);
+ size_t currentUtilization();
+
+private:
+ CopiedBlock* currentBlock() { return m_currentBlock; }
+
+ char* m_currentOffset;
+ CopiedBlock* m_currentBlock;
+};
+
+inline CopiedAllocator::CopiedAllocator()
+ : m_currentOffset(0)
+ , m_currentBlock(0)
+{
+}
+
+inline void* CopiedAllocator::allocate(size_t bytes)
+{
+ ASSERT(m_currentOffset);
+ ASSERT(is8ByteAligned(reinterpret_cast<void*>(bytes)));
+ ASSERT(fitsInCurrentBlock(bytes));
+ void* ptr = static_cast<void*>(m_currentOffset);
+ m_currentOffset += bytes;
+ ASSERT(is8ByteAligned(ptr));
+ return ptr;
+}
+
+inline bool CopiedAllocator::fitsInCurrentBlock(size_t bytes)
+{
+ return m_currentOffset + bytes < reinterpret_cast<char*>(m_currentBlock) + HeapBlock::s_blockSize && m_currentOffset + bytes > m_currentOffset;
+}
+
+inline bool CopiedAllocator::wasLastAllocation(void* ptr, size_t size)
+{
+ return static_cast<char*>(ptr) + size == m_currentOffset && ptr > m_currentBlock && ptr < reinterpret_cast<char*>(m_currentBlock) + HeapBlock::s_blockSize;
+}
+
+inline void CopiedAllocator::startedCopying()
+{
+ if (m_currentBlock)
+ m_currentBlock->m_offset = static_cast<void*>(m_currentOffset);
+ m_currentOffset = 0;
+ m_currentBlock = 0;
+}
+
+inline void CopiedAllocator::resetCurrentBlock(CopiedBlock* newBlock)
+{
+ if (m_currentBlock)
+ m_currentBlock->m_offset = static_cast<void*>(m_currentOffset);
+ m_currentBlock = newBlock;
+ m_currentOffset = static_cast<char*>(newBlock->m_offset);
+}
+
+inline size_t CopiedAllocator::currentUtilization()
+{
+ return static_cast<size_t>(m_currentOffset - m_currentBlock->m_payload);
+}
+
+inline void CopiedAllocator::resetLastAllocation(void* ptr)
+{
+ m_currentOffset = static_cast<char*>(ptr);
+}
+
+} // namespace JSC
+
+#endif
diff --git a/Source/JavaScriptCore/heap/CopiedBlock.h b/Source/JavaScriptCore/heap/CopiedBlock.h
new file mode 100644
index 000000000..a57c1150c
--- /dev/null
+++ b/Source/JavaScriptCore/heap/CopiedBlock.h
@@ -0,0 +1,67 @@
+/*
+ * Copyright (C) 2011 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef CopiedBlock_h
+#define CopiedBlock_h
+
+#include "HeapBlock.h"
+#include "JSValue.h"
+#include "JSValueInlineMethods.h"
+
+namespace JSC {
+
+class CopiedSpace;
+
+class CopiedBlock : public HeapBlock {
+ friend class CopiedSpace;
+ friend class CopiedAllocator;
+public:
+ CopiedBlock(PageAllocationAligned& allocation)
+ : HeapBlock(allocation)
+ , m_offset(m_payload)
+ , m_isPinned(false)
+ {
+ ASSERT(is8ByteAligned(static_cast<void*>(m_payload)));
+#if USE(JSVALUE64)
+ memset(static_cast<void*>(m_payload), 0, static_cast<size_t>((reinterpret_cast<char*>(this) + allocation.size()) - m_payload));
+#else
+ JSValue emptyValue;
+ JSValue* limit = reinterpret_cast<JSValue*>(reinterpret_cast<char*>(this) + allocation.size());
+ for (JSValue* currentValue = reinterpret_cast<JSValue*>(m_payload); currentValue < limit; currentValue++)
+ *currentValue = emptyValue;
+#endif
+ }
+
+private:
+ void* m_offset;
+ uintptr_t m_isPinned;
+ uintptr_t m_padding;
+ uintptr_t m_dummy;
+ char m_payload[1];
+};
+
+} // namespace JSC
+
+#endif
diff --git a/Source/JavaScriptCore/heap/BumpSpaceInlineMethods.h b/Source/JavaScriptCore/heap/CopiedSpace.cpp
index 3454631b0..3310d2c58 100644
--- a/Source/JavaScriptCore/heap/BumpSpaceInlineMethods.h
+++ b/Source/JavaScriptCore/heap/CopiedSpace.cpp
@@ -23,21 +23,15 @@
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
-#ifndef BumpSpaceInlineMethods_h
-#define BumpSpaceInlineMethods_h
+#include "config.h"
+#include "CopiedSpace.h"
-#include "BumpBlock.h"
-#include "BumpSpace.h"
-#include "Heap.h"
-#include "HeapBlock.h"
-#include "JSGlobalData.h"
-#include <wtf/CheckedBoolean.h>
+#include "CopiedSpaceInlineMethods.h"
namespace JSC {
-inline BumpSpace::BumpSpace(Heap* heap)
+CopiedSpace::CopiedSpace(Heap* heap)
: m_heap(heap)
- , m_currentBlock(0)
, m_toSpace(0)
, m_fromSpace(0)
, m_totalMemoryAllocated(0)
@@ -47,92 +41,116 @@ inline BumpSpace::BumpSpace(Heap* heap)
{
}
-inline void BumpSpace::init()
+void CopiedSpace::init()
{
m_toSpace = &m_blocks1;
m_fromSpace = &m_blocks2;
- m_totalMemoryAllocated += s_blockSize * s_initialBlockNum;
+ m_totalMemoryAllocated += HeapBlock::s_blockSize * s_initialBlockNum;
if (!addNewBlock())
CRASH();
}
-inline bool BumpSpace::contains(void* ptr, BumpBlock*& result)
+CheckedBoolean CopiedSpace::tryAllocateSlowCase(size_t bytes, void** outPtr)
{
- BumpBlock* block = blockFor(ptr);
- result = block;
- return !m_toSpaceFilter.ruleOut(reinterpret_cast<Bits>(block)) && m_toSpaceSet.contains(block);
+ if (isOversize(bytes))
+ return tryAllocateOversize(bytes, outPtr);
+
+ m_totalMemoryUtilized += m_allocator.currentUtilization();
+ if (!addNewBlock()) {
+ *outPtr = 0;
+ return false;
+ }
+ *outPtr = m_allocator.allocate(bytes);
+ ASSERT(*outPtr);
+ return true;
}
-inline void BumpSpace::pin(BumpBlock* block)
+CheckedBoolean CopiedSpace::tryAllocateOversize(size_t bytes, void** outPtr)
{
- block->m_isPinned = true;
+ ASSERT(isOversize(bytes));
+
+ size_t blockSize = WTF::roundUpToMultipleOf<s_pageSize>(sizeof(CopiedBlock) + bytes);
+ PageAllocationAligned allocation = PageAllocationAligned::allocate(blockSize, s_pageSize, OSAllocator::JSGCHeapPages);
+ if (!static_cast<bool>(allocation)) {
+ *outPtr = 0;
+ return false;
+ }
+ CopiedBlock* block = new (NotNull, allocation.base()) CopiedBlock(allocation);
+ m_oversizeBlocks.push(block);
+ ASSERT(is8ByteAligned(block->m_offset));
+
+ m_oversizeFilter.add(reinterpret_cast<Bits>(block));
+
+ m_totalMemoryAllocated += blockSize;
+ m_totalMemoryUtilized += bytes;
+
+ *outPtr = block->m_offset;
+ return true;
}
-inline void BumpSpace::startedCopying()
+CheckedBoolean CopiedSpace::tryReallocate(void** ptr, size_t oldSize, size_t newSize)
{
- DoublyLinkedList<HeapBlock>* temp = m_fromSpace;
- m_fromSpace = m_toSpace;
- m_toSpace = temp;
+ if (oldSize >= newSize)
+ return true;
+
+ void* oldPtr = *ptr;
+ ASSERT(!m_heap->globalData()->isInitializingObject());
- m_toSpaceFilter.reset();
+ if (isOversize(oldSize) || isOversize(newSize))
+ return tryReallocateOversize(ptr, oldSize, newSize);
- m_totalMemoryUtilized = 0;
+ if (m_allocator.wasLastAllocation(oldPtr, oldSize)) {
+ m_allocator.resetLastAllocation(oldPtr);
+ if (m_allocator.fitsInCurrentBlock(newSize)) {
+ m_totalMemoryUtilized += newSize - oldSize;
+ return m_allocator.allocate(newSize);
+ }
+ }
+ m_totalMemoryUtilized -= oldSize;
- ASSERT(!m_inCopyingPhase);
- ASSERT(!m_numberOfLoanedBlocks);
- m_inCopyingPhase = true;
+ void* result = 0;
+ if (!tryAllocate(newSize, &result)) {
+ *ptr = 0;
+ return false;
+ }
+ memcpy(result, oldPtr, oldSize);
+ *ptr = result;
+ return true;
}
-inline void BumpSpace::doneCopying()
+CheckedBoolean CopiedSpace::tryReallocateOversize(void** ptr, size_t oldSize, size_t newSize)
{
- {
- MutexLocker locker(m_loanedBlocksLock);
- while (m_numberOfLoanedBlocks > 0)
- m_loanedBlocksCondition.wait(m_loanedBlocksLock);
- }
-
- ASSERT(m_inCopyingPhase);
- m_inCopyingPhase = false;
- while (!m_fromSpace->isEmpty()) {
- BumpBlock* block = static_cast<BumpBlock*>(m_fromSpace->removeHead());
- if (block->m_isPinned) {
- block->m_isPinned = false;
- m_toSpace->push(block);
- continue;
- }
+ ASSERT(isOversize(oldSize) || isOversize(newSize));
+ ASSERT(newSize > oldSize);
- m_toSpaceSet.remove(block);
- {
- MutexLocker locker(m_heap->m_freeBlockLock);
- m_heap->m_freeBlocks.push(block);
- m_heap->m_numberOfFreeBlocks++;
- }
+ void* oldPtr = *ptr;
+
+ void* newPtr = 0;
+ if (!tryAllocateOversize(newSize, &newPtr)) {
+ *ptr = 0;
+ return false;
}
+ memcpy(newPtr, oldPtr, oldSize);
- BumpBlock* curr = static_cast<BumpBlock*>(m_oversizeBlocks.head());
- while (curr) {
- BumpBlock* next = static_cast<BumpBlock*>(curr->next());
- if (!curr->m_isPinned) {
- m_oversizeBlocks.remove(curr);
- m_totalMemoryAllocated -= curr->m_allocation.size();
- m_totalMemoryUtilized -= curr->m_allocation.size() - sizeof(BumpBlock);
- curr->m_allocation.deallocate();
- } else
- curr->m_isPinned = false;
- curr = next;
+ if (isOversize(oldSize)) {
+ CopiedBlock* oldBlock = oversizeBlockFor(oldPtr);
+ m_oversizeBlocks.remove(oldBlock);
+ oldBlock->m_allocation.deallocate();
+ m_totalMemoryAllocated -= oldSize + sizeof(CopiedBlock);
}
+
+ m_totalMemoryUtilized -= oldSize;
- if (!(m_currentBlock = static_cast<BumpBlock*>(m_toSpace->head())))
- if (!addNewBlock())
- CRASH();
+ *ptr = newPtr;
+ return true;
}
-inline void BumpSpace::doneFillingBlock(BumpBlock* block)
+void CopiedSpace::doneFillingBlock(CopiedBlock* block)
{
ASSERT(block);
- ASSERT(block->m_offset < reinterpret_cast<char*>(block) + s_blockSize);
+ ASSERT(block->m_offset < reinterpret_cast<char*>(block) + HeapBlock::s_blockSize);
ASSERT(m_inCopyingPhase);
if (block->m_offset == block->m_payload) {
@@ -161,27 +179,56 @@ inline void BumpSpace::doneFillingBlock(BumpBlock* block)
}
}
-inline void BumpSpace::recycleBlock(BumpBlock* block)
+void CopiedSpace::doneCopying()
{
{
- MutexLocker locker(m_heap->m_freeBlockLock);
- m_heap->m_freeBlocks.push(block);
- m_heap->m_numberOfFreeBlocks++;
+ MutexLocker locker(m_loanedBlocksLock);
+ while (m_numberOfLoanedBlocks > 0)
+ m_loanedBlocksCondition.wait(m_loanedBlocksLock);
}
- {
- MutexLocker locker(m_loanedBlocksLock);
- ASSERT(m_numberOfLoanedBlocks > 0);
- m_numberOfLoanedBlocks--;
- if (!m_numberOfLoanedBlocks)
- m_loanedBlocksCondition.signal();
+ ASSERT(m_inCopyingPhase);
+ m_inCopyingPhase = false;
+ while (!m_fromSpace->isEmpty()) {
+ CopiedBlock* block = static_cast<CopiedBlock*>(m_fromSpace->removeHead());
+ if (block->m_isPinned) {
+ block->m_isPinned = false;
+ m_toSpace->push(block);
+ continue;
+ }
+
+ m_toSpaceSet.remove(block);
+ {
+ MutexLocker locker(m_heap->m_freeBlockLock);
+ m_heap->m_freeBlocks.push(block);
+ m_heap->m_numberOfFreeBlocks++;
+ }
}
+
+ CopiedBlock* curr = static_cast<CopiedBlock*>(m_oversizeBlocks.head());
+ while (curr) {
+ CopiedBlock* next = static_cast<CopiedBlock*>(curr->next());
+ if (!curr->m_isPinned) {
+ m_oversizeBlocks.remove(curr);
+ m_totalMemoryAllocated -= curr->m_allocation.size();
+ m_totalMemoryUtilized -= curr->m_allocation.size() - sizeof(CopiedBlock);
+ curr->m_allocation.deallocate();
+ } else
+ curr->m_isPinned = false;
+ curr = next;
+ }
+
+ if (!m_toSpace->head()) {
+ if (!addNewBlock())
+ CRASH();
+ } else
+ m_allocator.resetCurrentBlock(static_cast<CopiedBlock*>(m_toSpace->head()));
}
-inline CheckedBoolean BumpSpace::getFreshBlock(AllocationEffort allocationEffort, BumpBlock** outBlock)
+CheckedBoolean CopiedSpace::getFreshBlock(AllocationEffort allocationEffort, CopiedBlock** outBlock)
{
HeapBlock* heapBlock = 0;
- BumpBlock* block = 0;
+ CopiedBlock* block = 0;
{
MutexLocker locker(m_heap->m_freeBlockLock);
if (!m_heap->m_freeBlocks.isEmpty()) {
@@ -190,7 +237,7 @@ inline CheckedBoolean BumpSpace::getFreshBlock(AllocationEffort allocationEffort
}
}
if (heapBlock)
- block = new (NotNull, heapBlock) BumpBlock(heapBlock->m_allocation);
+ block = new (NotNull, heapBlock) CopiedBlock(heapBlock->m_allocation);
else if (allocationEffort == AllocationMustSucceed) {
if (!allocateNewBlock(&block)) {
*outBlock = 0;
@@ -209,192 +256,9 @@ inline CheckedBoolean BumpSpace::getFreshBlock(AllocationEffort allocationEffort
}
}
ASSERT(block);
- ASSERT(isPointerAligned(block->m_offset));
+ ASSERT(is8ByteAligned(block->m_offset));
*outBlock = block;
return true;
}
-inline CheckedBoolean BumpSpace::borrowBlock(BumpBlock** outBlock)
-{
- BumpBlock* block = 0;
- if (!getFreshBlock(AllocationMustSucceed, &block)) {
- *outBlock = 0;
- return false;
- }
-
- ASSERT(m_inCopyingPhase);
- MutexLocker locker(m_loanedBlocksLock);
- m_numberOfLoanedBlocks++;
-
- ASSERT(block->m_offset == block->m_payload);
- *outBlock = block;
- return true;
-}
-
-inline CheckedBoolean BumpSpace::addNewBlock()
-{
- BumpBlock* block = 0;
- if (!getFreshBlock(AllocationCanFail, &block))
- return false;
-
- m_toSpace->push(block);
- m_currentBlock = block;
- return true;
-}
-
-inline CheckedBoolean BumpSpace::allocateNewBlock(BumpBlock** outBlock)
-{
- PageAllocationAligned allocation = PageAllocationAligned::allocate(s_blockSize, s_blockSize, OSAllocator::JSGCHeapPages);
- if (!static_cast<bool>(allocation)) {
- *outBlock = 0;
- return false;
- }
-
- {
- MutexLocker locker(m_memoryStatsLock);
- m_totalMemoryAllocated += s_blockSize;
- }
-
- *outBlock = new (NotNull, allocation.base()) BumpBlock(allocation);
- return true;
-}
-
-inline bool BumpSpace::fitsInBlock(BumpBlock* block, size_t bytes)
-{
- return static_cast<char*>(block->m_offset) + bytes < reinterpret_cast<char*>(block) + s_blockSize && static_cast<char*>(block->m_offset) + bytes > block->m_offset;
-}
-
-inline bool BumpSpace::fitsInCurrentBlock(size_t bytes)
-{
- return fitsInBlock(m_currentBlock, bytes);
-}
-
-inline CheckedBoolean BumpSpace::tryAllocate(size_t bytes, void** outPtr)
-{
- ASSERT(!m_heap->globalData()->isInitializingObject());
-
- if (isOversize(bytes) || !fitsInCurrentBlock(bytes))
- return tryAllocateSlowCase(bytes, outPtr);
-
- *outPtr = allocateFromBlock(m_currentBlock, bytes);
- return true;
-}
-
-inline CheckedBoolean BumpSpace::tryAllocateOversize(size_t bytes, void** outPtr)
-{
- ASSERT(isOversize(bytes));
-
- size_t blockSize = WTF::roundUpToMultipleOf<s_pageSize>(sizeof(BumpBlock) + bytes);
- PageAllocationAligned allocation = PageAllocationAligned::allocate(blockSize, s_pageSize, OSAllocator::JSGCHeapPages);
- if (!static_cast<bool>(allocation)) {
- *outPtr = 0;
- return false;
- }
- BumpBlock* block = new (NotNull, allocation.base()) BumpBlock(allocation);
- m_oversizeBlocks.push(block);
- ASSERT(isPointerAligned(block->m_offset));
-
- m_oversizeFilter.add(reinterpret_cast<Bits>(block));
-
- m_totalMemoryAllocated += blockSize;
- m_totalMemoryUtilized += bytes;
-
- *outPtr = block->m_offset;
- return true;
-}
-
-inline void* BumpSpace::allocateFromBlock(BumpBlock* block, size_t bytes)
-{
- ASSERT(!isOversize(bytes));
- ASSERT(fitsInBlock(block, bytes));
- ASSERT(isPointerAligned(block->m_offset));
-
- void* ptr = block->m_offset;
- ASSERT(block->m_offset >= block->m_payload && block->m_offset < reinterpret_cast<char*>(block) + s_blockSize);
- block->m_offset = static_cast<void*>((static_cast<char*>(ptr) + bytes));
- ASSERT(block->m_offset >= block->m_payload && block->m_offset < reinterpret_cast<char*>(block) + s_blockSize);
-
- ASSERT(isPointerAligned(ptr));
- return ptr;
-}
-
-inline CheckedBoolean BumpSpace::tryReallocate(void** ptr, size_t oldSize, size_t newSize)
-{
- if (oldSize >= newSize)
- return true;
-
- void* oldPtr = *ptr;
- ASSERT(!m_heap->globalData()->isInitializingObject());
-
- if (isOversize(oldSize) || isOversize(newSize))
- return tryReallocateOversize(ptr, oldSize, newSize);
-
- if (static_cast<char*>(oldPtr) + oldSize == m_currentBlock->m_offset && oldPtr > m_currentBlock && oldPtr < reinterpret_cast<char*>(m_currentBlock) + s_blockSize) {
- m_currentBlock->m_offset = oldPtr;
- if (fitsInCurrentBlock(newSize)) {
- m_totalMemoryUtilized += newSize - oldSize;
- return allocateFromBlock(m_currentBlock, newSize);
- }
- }
- m_totalMemoryUtilized -= oldSize;
-
- void* result = 0;
- if (!tryAllocate(newSize, &result)) {
- *ptr = 0;
- return false;
- }
- memcpy(result, oldPtr, oldSize);
- *ptr = result;
- return true;
-}
-
-inline CheckedBoolean BumpSpace::tryReallocateOversize(void** ptr, size_t oldSize, size_t newSize)
-{
- ASSERT(isOversize(oldSize) || isOversize(newSize));
- ASSERT(newSize > oldSize);
-
- void* oldPtr = *ptr;
-
- void* newPtr = 0;
- if (!tryAllocateOversize(newSize, &newPtr)) {
- *ptr = 0;
- return false;
- }
- memcpy(newPtr, oldPtr, oldSize);
-
- if (isOversize(oldSize)) {
- BumpBlock* oldBlock = oversizeBlockFor(oldPtr);
- m_oversizeBlocks.remove(oldBlock);
- oldBlock->m_allocation.deallocate();
- m_totalMemoryAllocated -= oldSize + sizeof(BumpBlock);
- }
-
- m_totalMemoryUtilized -= oldSize;
-
- *ptr = newPtr;
- return true;
-}
-
-inline bool BumpSpace::isOversize(size_t bytes)
-{
- return bytes > s_maxAllocationSize;
-}
-
-inline bool BumpSpace::isPinned(void* ptr)
-{
- return blockFor(ptr)->m_isPinned;
-}
-
-inline BumpBlock* BumpSpace::oversizeBlockFor(void* ptr)
-{
- return reinterpret_cast<BumpBlock*>(reinterpret_cast<size_t>(ptr) & s_pageMask);
-}
-
-inline BumpBlock* BumpSpace::blockFor(void* ptr)
-{
- return reinterpret_cast<BumpBlock*>(reinterpret_cast<size_t>(ptr) & s_blockMask);
-}
-
} // namespace JSC
-
-#endif
diff --git a/Source/JavaScriptCore/heap/BumpSpace.h b/Source/JavaScriptCore/heap/CopiedSpace.h
index 30e6b74fe..285e2b9a2 100644
--- a/Source/JavaScriptCore/heap/BumpSpace.h
+++ b/Source/JavaScriptCore/heap/CopiedSpace.h
@@ -23,9 +23,10 @@
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
-#ifndef BumpSpace_h
-#define BumpSpace_h
+#ifndef CopiedSpace_h
+#define CopiedSpace_h
+#include "CopiedAllocator.h"
#include "HeapBlock.h"
#include "TinyBloomFilter.h"
#include <wtf/Assertions.h>
@@ -40,13 +41,13 @@
namespace JSC {
class Heap;
-class BumpBlock;
+class CopiedBlock;
class HeapBlock;
-class BumpSpace {
+class CopiedSpace {
friend class SlotVisitor;
public:
- BumpSpace(Heap*);
+ CopiedSpace(Heap*);
void init();
CheckedBoolean tryAllocate(size_t, void**);
@@ -56,42 +57,41 @@ public:
void doneCopying();
bool isInCopyPhase() { return m_inCopyingPhase; }
- void pin(BumpBlock*);
+ void pin(CopiedBlock*);
bool isPinned(void*);
- bool contains(void*, BumpBlock*&);
+ bool contains(void*, CopiedBlock*&);
size_t totalMemoryAllocated() { return m_totalMemoryAllocated; }
size_t totalMemoryUtilized() { return m_totalMemoryUtilized; }
- static BumpBlock* blockFor(void*);
+ static CopiedBlock* blockFor(void*);
private:
CheckedBoolean tryAllocateSlowCase(size_t, void**);
CheckedBoolean addNewBlock();
- CheckedBoolean allocateNewBlock(BumpBlock**);
- bool fitsInCurrentBlock(size_t);
+ CheckedBoolean allocateNewBlock(CopiedBlock**);
- static void* allocateFromBlock(BumpBlock*, size_t);
+ static void* allocateFromBlock(CopiedBlock*, size_t);
CheckedBoolean tryAllocateOversize(size_t, void**);
CheckedBoolean tryReallocateOversize(void**, size_t, size_t);
static bool isOversize(size_t);
- CheckedBoolean borrowBlock(BumpBlock**);
- CheckedBoolean getFreshBlock(AllocationEffort, BumpBlock**);
- void doneFillingBlock(BumpBlock*);
- void recycleBlock(BumpBlock*);
- static bool fitsInBlock(BumpBlock*, size_t);
- static BumpBlock* oversizeBlockFor(void* ptr);
+ CheckedBoolean borrowBlock(CopiedBlock**);
+ CheckedBoolean getFreshBlock(AllocationEffort, CopiedBlock**);
+ void doneFillingBlock(CopiedBlock*);
+ void recycleBlock(CopiedBlock*);
+ static bool fitsInBlock(CopiedBlock*, size_t);
+ static CopiedBlock* oversizeBlockFor(void* ptr);
Heap* m_heap;
- BumpBlock* m_currentBlock;
+ CopiedAllocator m_allocator;
TinyBloomFilter m_toSpaceFilter;
TinyBloomFilter m_oversizeFilter;
- HashSet<BumpBlock*> m_toSpaceSet;
+ HashSet<CopiedBlock*> m_toSpaceSet;
Mutex m_toSpaceLock;
Mutex m_memoryStatsLock;
@@ -112,12 +112,11 @@ private:
ThreadCondition m_loanedBlocksCondition;
size_t m_numberOfLoanedBlocks;
- static const size_t s_blockSize = 64 * KB;
static const size_t s_maxAllocationSize = 32 * KB;
static const size_t s_pageSize = 4 * KB;
static const size_t s_pageMask = ~(s_pageSize - 1);
static const size_t s_initialBlockNum = 16;
- static const size_t s_blockMask = ~(s_blockSize - 1);
+ static const size_t s_blockMask = ~(HeapBlock::s_blockSize - 1);
};
} // namespace JSC
diff --git a/Source/JavaScriptCore/heap/CopiedSpaceInlineMethods.h b/Source/JavaScriptCore/heap/CopiedSpaceInlineMethods.h
new file mode 100644
index 000000000..9a1f63cec
--- /dev/null
+++ b/Source/JavaScriptCore/heap/CopiedSpaceInlineMethods.h
@@ -0,0 +1,184 @@
+/*
+ * Copyright (C) 2011 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef CopiedSpaceInlineMethods_h
+#define CopiedSpaceInlineMethods_h
+
+#include "CopiedBlock.h"
+#include "CopiedSpace.h"
+#include "Heap.h"
+#include "HeapBlock.h"
+#include "JSGlobalData.h"
+#include <wtf/CheckedBoolean.h>
+
+namespace JSC {
+
+inline bool CopiedSpace::contains(void* ptr, CopiedBlock*& result)
+{
+ CopiedBlock* block = blockFor(ptr);
+ result = block;
+ return !m_toSpaceFilter.ruleOut(reinterpret_cast<Bits>(block)) && m_toSpaceSet.contains(block);
+}
+
+inline void CopiedSpace::pin(CopiedBlock* block)
+{
+ block->m_isPinned = true;
+}
+
+inline void CopiedSpace::startedCopying()
+{
+ DoublyLinkedList<HeapBlock>* temp = m_fromSpace;
+ m_fromSpace = m_toSpace;
+ m_toSpace = temp;
+
+ m_toSpaceFilter.reset();
+ m_allocator.startedCopying();
+
+ m_totalMemoryUtilized = 0;
+
+ ASSERT(!m_inCopyingPhase);
+ ASSERT(!m_numberOfLoanedBlocks);
+ m_inCopyingPhase = true;
+}
+
+inline void CopiedSpace::recycleBlock(CopiedBlock* block)
+{
+ {
+ MutexLocker locker(m_heap->m_freeBlockLock);
+ m_heap->m_freeBlocks.push(block);
+ m_heap->m_numberOfFreeBlocks++;
+ }
+
+ {
+ MutexLocker locker(m_loanedBlocksLock);
+ ASSERT(m_numberOfLoanedBlocks > 0);
+ m_numberOfLoanedBlocks--;
+ if (!m_numberOfLoanedBlocks)
+ m_loanedBlocksCondition.signal();
+ }
+}
+
+inline CheckedBoolean CopiedSpace::borrowBlock(CopiedBlock** outBlock)
+{
+ CopiedBlock* block = 0;
+ if (!getFreshBlock(AllocationMustSucceed, &block)) {
+ *outBlock = 0;
+ return false;
+ }
+
+ ASSERT(m_inCopyingPhase);
+ MutexLocker locker(m_loanedBlocksLock);
+ m_numberOfLoanedBlocks++;
+
+ ASSERT(block->m_offset == block->m_payload);
+ *outBlock = block;
+ return true;
+}
+
+inline CheckedBoolean CopiedSpace::addNewBlock()
+{
+ CopiedBlock* block = 0;
+ if (!getFreshBlock(AllocationCanFail, &block))
+ return false;
+
+ m_toSpace->push(block);
+ m_toSpaceFilter.add(reinterpret_cast<Bits>(block));
+ m_toSpaceSet.add(block);
+ m_allocator.resetCurrentBlock(block);
+ return true;
+}
+
+inline CheckedBoolean CopiedSpace::allocateNewBlock(CopiedBlock** outBlock)
+{
+ PageAllocationAligned allocation = PageAllocationAligned::allocate(HeapBlock::s_blockSize, HeapBlock::s_blockSize, OSAllocator::JSGCHeapPages);
+ if (!static_cast<bool>(allocation)) {
+ *outBlock = 0;
+ return false;
+ }
+
+ {
+ MutexLocker locker(m_memoryStatsLock);
+ m_totalMemoryAllocated += HeapBlock::s_blockSize;
+ }
+
+ *outBlock = new (NotNull, allocation.base()) CopiedBlock(allocation);
+ return true;
+}
+
+inline bool CopiedSpace::fitsInBlock(CopiedBlock* block, size_t bytes)
+{
+ return static_cast<char*>(block->m_offset) + bytes < reinterpret_cast<char*>(block) + HeapBlock::s_blockSize && static_cast<char*>(block->m_offset) + bytes > block->m_offset;
+}
+
+inline CheckedBoolean CopiedSpace::tryAllocate(size_t bytes, void** outPtr)
+{
+ ASSERT(!m_heap->globalData()->isInitializingObject());
+
+ if (isOversize(bytes) || !m_allocator.fitsInCurrentBlock(bytes))
+ return tryAllocateSlowCase(bytes, outPtr);
+
+ *outPtr = m_allocator.allocate(bytes);
+ ASSERT(*outPtr);
+ return true;
+}
+
+inline void* CopiedSpace::allocateFromBlock(CopiedBlock* block, size_t bytes)
+{
+ ASSERT(!isOversize(bytes));
+ ASSERT(fitsInBlock(block, bytes));
+ ASSERT(is8ByteAligned(block->m_offset));
+
+ void* ptr = block->m_offset;
+ ASSERT(block->m_offset >= block->m_payload && block->m_offset < reinterpret_cast<char*>(block) + HeapBlock::s_blockSize);
+ block->m_offset = static_cast<void*>((static_cast<char*>(ptr) + bytes));
+ ASSERT(block->m_offset >= block->m_payload && block->m_offset < reinterpret_cast<char*>(block) + HeapBlock::s_blockSize);
+
+ ASSERT(is8ByteAligned(ptr));
+ return ptr;
+}
+
+inline bool CopiedSpace::isOversize(size_t bytes)
+{
+ return bytes > s_maxAllocationSize;
+}
+
+inline bool CopiedSpace::isPinned(void* ptr)
+{
+ return blockFor(ptr)->m_isPinned;
+}
+
+inline CopiedBlock* CopiedSpace::oversizeBlockFor(void* ptr)
+{
+ return reinterpret_cast<CopiedBlock*>(reinterpret_cast<size_t>(ptr) & s_pageMask);
+}
+
+inline CopiedBlock* CopiedSpace::blockFor(void* ptr)
+{
+ return reinterpret_cast<CopiedBlock*>(reinterpret_cast<size_t>(ptr) & s_blockMask);
+}
+
+} // namespace JSC
+
+#endif
diff --git a/Source/JavaScriptCore/heap/GCAssertions.h b/Source/JavaScriptCore/heap/GCAssertions.h
new file mode 100644
index 000000000..f044df6f0
--- /dev/null
+++ b/Source/JavaScriptCore/heap/GCAssertions.h
@@ -0,0 +1,56 @@
+/*
+ * Copyright (C) 2012 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. AND ITS CONTRIBUTORS ``AS IS''
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
+ * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR ITS CONTRIBUTORS
+ * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
+ * THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef GCAssertions_h
+#define GCAssertions_h
+
+#include "Assertions.h"
+
+#if ENABLE(GC_VALIDATION)
+#define ASSERT_GC_OBJECT_LOOKS_VALID(cell) do { \
+ if (!(cell))\
+ CRASH();\
+ if (cell->unvalidatedStructure()->unvalidatedStructure() != cell->unvalidatedStructure()->unvalidatedStructure()->unvalidatedStructure())\
+ CRASH();\
+} while (0)
+
+#define ASSERT_GC_OBJECT_INHERITS(object, classInfo) do {\
+ ASSERT_GC_OBJECT_LOOKS_VALID(object); \
+ if (!object->inherits(classInfo)) \
+ CRASH();\
+} while (0)
+
+#else
+#define ASSERT_GC_OBJECT_LOOKS_VALID(cell) do { (void)cell; } while (0)
+#define ASSERT_GC_OBJECT_INHERITS(object, classInfo) do { (void)object; (void)classInfo; } while (0)
+#endif
+
+#if COMPILER_SUPPORTS(HAS_TRIVIAL_DESTRUCTOR)
+#define ASSERT_HAS_TRIVIAL_DESTRUCTOR(klass) COMPILE_ASSERT(__has_trivial_destructor(klass), klass##_has_trivial_destructor_check)
+#else
+#define ASSERT_HAS_TRIVIAL_DESTRUCTOR(klass)
+#endif
+
+#endif // GCAssertions_h
diff --git a/Source/JavaScriptCore/heap/Heap.cpp b/Source/JavaScriptCore/heap/Heap.cpp
index 9f5094a58..1333c7b2c 100644
--- a/Source/JavaScriptCore/heap/Heap.cpp
+++ b/Source/JavaScriptCore/heap/Heap.cpp
@@ -21,8 +21,8 @@
#include "config.h"
#include "Heap.h"
-#include "BumpSpace.h"
-#include "BumpSpaceInlineMethods.h"
+#include "CopiedSpace.h"
+#include "CopiedSpaceInlineMethods.h"
#include "CodeBlock.h"
#include "ConservativeRoots.h"
#include "GCActivityCallback.h"
@@ -77,7 +77,7 @@ struct GCTimer {
}
~GCTimer()
{
- printf("%s: %.2lfms (avg. %.2lf, min. %.2lf, max. %.2lf)\n", m_name, m_time * 1000, m_time * 1000 / m_count, m_min*1000, m_max*1000);
+ dataLog("%s: %.2lfms (avg. %.2lf, min. %.2lf, max. %.2lf)\n", m_name, m_time * 1000, m_time * 1000 / m_count, m_min*1000, m_max*1000);
}
double m_time;
double m_min;
@@ -127,7 +127,7 @@ struct GCCounter {
}
~GCCounter()
{
- printf("%s: %zu values (avg. %zu, min. %zu, max. %zu)\n", m_name, m_total, m_total / m_count, m_min, m_max);
+ dataLog("%s: %zu values (avg. %zu, min. %zu, max. %zu)\n", m_name, m_total, m_total / m_count, m_min, m_max);
}
const char* m_name;
size_t m_count;
@@ -345,7 +345,7 @@ Heap::~Heap()
m_blockFreeingThreadShouldQuit = true;
m_freeBlockCondition.broadcast();
}
- waitForThreadCompletion(m_blockFreeingThread, 0);
+ waitForThreadCompletion(m_blockFreeingThread);
// The destroy function must already have been called, so assert this.
ASSERT(!m_globalData);
@@ -381,8 +381,8 @@ void Heap::destroy()
ASSERT(!size());
#if ENABLE(SIMPLE_HEAP_PROFILING)
- m_slotVisitor.m_visitedTypeCounts.dump(stderr, "Visited Type Counts");
- m_destroyedTypeCounts.dump(stderr, "Destroyed Type Counts");
+ m_slotVisitor.m_visitedTypeCounts.dump(WTF::dataFile(), "Visited Type Counts");
+ m_destroyedTypeCounts.dump(WTF::dataFile(), "Destroyed Type Counts");
#endif
releaseFreeBlocks();
@@ -407,10 +407,9 @@ void Heap::waitForRelativeTime(double relative)
waitForRelativeTimeWhileHoldingLock(relative);
}
-void* Heap::blockFreeingThreadStartFunc(void* heap)
+void Heap::blockFreeingThreadStartFunc(void* heap)
{
static_cast<Heap*>(heap)->blockFreeingThreadMain();
- return 0;
}
void Heap::blockFreeingThreadMain()
diff --git a/Source/JavaScriptCore/heap/Heap.h b/Source/JavaScriptCore/heap/Heap.h
index 1d0ac5407..bcacee6d5 100644
--- a/Source/JavaScriptCore/heap/Heap.h
+++ b/Source/JavaScriptCore/heap/Heap.h
@@ -40,7 +40,7 @@
namespace JSC {
- class BumpSpace;
+ class CopiedSpace;
class CodeBlock;
class GCActivityCallback;
class GlobalCodeBlock;
@@ -50,6 +50,7 @@ namespace JSC {
class JSGlobalData;
class JSValue;
class LiveObjectIterator;
+ class LLIntOffsetsExtractor;
class MarkedArgumentBuffer;
class RegisterFile;
class UString;
@@ -95,8 +96,9 @@ namespace JSC {
// true if an allocation or collection is in progress
inline bool isBusy();
- MarkedAllocator& allocatorForObject(size_t bytes) { return m_objectSpace.allocatorFor(bytes); }
- void* allocate(size_t);
+ MarkedAllocator& firstAllocatorWithoutDestructors() { return m_objectSpace.firstAllocator(); }
+ MarkedAllocator& allocatorForObjectWithoutDestructor(size_t bytes) { return m_objectSpace.allocatorFor(bytes); }
+ MarkedAllocator& allocatorForObjectWithDestructor(size_t bytes) { return m_objectSpace.destructorAllocatorFor(bytes); }
CheckedBoolean tryAllocateStorage(size_t, void**);
CheckedBoolean tryReallocateStorage(void**, size_t, size_t);
@@ -136,12 +138,17 @@ namespace JSC {
void getConservativeRegisterRoots(HashSet<JSCell*>& roots);
private:
+ friend class CodeBlock;
+ friend class LLIntOffsetsExtractor;
friend class MarkedSpace;
friend class MarkedAllocator;
friend class MarkedBlock;
- friend class BumpSpace;
+ friend class CopiedSpace;
friend class SlotVisitor;
- friend class CodeBlock;
+ template<typename T> friend void* allocateCell(Heap&);
+
+ void* allocateWithDestructor(size_t);
+ void* allocateWithoutDestructor(size_t);
size_t waterMark();
size_t highWaterMark();
@@ -183,7 +190,7 @@ namespace JSC {
void waitForRelativeTimeWhileHoldingLock(double relative);
void waitForRelativeTime(double relative);
void blockFreeingThreadMain();
- static void* blockFreeingThreadStartFunc(void* heap);
+ static void blockFreeingThreadStartFunc(void* heap);
const HeapSize m_heapSize;
const size_t m_minBytesPerCycle;
@@ -193,7 +200,7 @@ namespace JSC {
OperationInProgress m_operationInProgress;
MarkedSpace m_objectSpace;
- BumpSpace m_storageSpace;
+ CopiedSpace m_storageSpace;
DoublyLinkedList<HeapBlock> m_freeBlocks;
size_t m_numberOfFreeBlocks;
@@ -334,10 +341,16 @@ namespace JSC {
return forEachProtectedCell(functor);
}
- inline void* Heap::allocate(size_t bytes)
+ inline void* Heap::allocateWithDestructor(size_t bytes)
+ {
+ ASSERT(isValidAllocation(bytes));
+ return m_objectSpace.allocateWithDestructor(bytes);
+ }
+
+ inline void* Heap::allocateWithoutDestructor(size_t bytes)
{
ASSERT(isValidAllocation(bytes));
- return m_objectSpace.allocate(bytes);
+ return m_objectSpace.allocateWithoutDestructor(bytes);
}
inline CheckedBoolean Heap::tryAllocateStorage(size_t bytes, void** outPtr)
diff --git a/Source/JavaScriptCore/heap/HeapBlock.h b/Source/JavaScriptCore/heap/HeapBlock.h
index b0ecb2059..591520d2b 100644
--- a/Source/JavaScriptCore/heap/HeapBlock.h
+++ b/Source/JavaScriptCore/heap/HeapBlock.h
@@ -28,6 +28,7 @@
#include <wtf/DoublyLinkedList.h>
#include <wtf/PageAllocationAligned.h>
+#include <wtf/StdLibExtras.h>
namespace JSC {
@@ -47,6 +48,8 @@ public:
HeapBlock* m_prev;
HeapBlock* m_next;
PageAllocationAligned m_allocation;
+
+ static const size_t s_blockSize = 64 * KB;
};
} // namespace JSC
diff --git a/Source/JavaScriptCore/heap/MarkStack.cpp b/Source/JavaScriptCore/heap/MarkStack.cpp
index 9a3092396..129a7ab67 100644
--- a/Source/JavaScriptCore/heap/MarkStack.cpp
+++ b/Source/JavaScriptCore/heap/MarkStack.cpp
@@ -26,8 +26,8 @@
#include "config.h"
#include "MarkStack.h"
-#include "BumpSpace.h"
-#include "BumpSpaceInlineMethods.h"
+#include "CopiedSpace.h"
+#include "CopiedSpaceInlineMethods.h"
#include "ConservativeRoots.h"
#include "Heap.h"
#include "Options.h"
@@ -226,16 +226,15 @@ void MarkStackThreadSharedData::markingThreadMain()
slotVisitor.drainFromShared(SlotVisitor::SlaveDrain);
}
-void* MarkStackThreadSharedData::markingThreadStartFunc(void* shared)
+void MarkStackThreadSharedData::markingThreadStartFunc(void* shared)
{
static_cast<MarkStackThreadSharedData*>(shared)->markingThreadMain();
- return 0;
}
#endif
MarkStackThreadSharedData::MarkStackThreadSharedData(JSGlobalData* globalData)
: m_globalData(globalData)
- , m_bumpSpace(&globalData->heap.m_storageSpace)
+ , m_copiedSpace(&globalData->heap.m_storageSpace)
, m_sharedMarkStack(m_segmentAllocator)
, m_numberOfActiveParallelMarkers(0)
, m_parallelMarkersShouldExit(false)
@@ -258,7 +257,7 @@ MarkStackThreadSharedData::~MarkStackThreadSharedData()
m_markingCondition.broadcast();
}
for (unsigned i = 0; i < m_markingThreads.size(); ++i)
- waitForThreadCompletion(m_markingThreads[i], 0);
+ waitForThreadCompletion(m_markingThreads[i]);
#endif
}
@@ -304,7 +303,7 @@ ALWAYS_INLINE static void visitChildren(SlotVisitor& visitor, const JSCell* cell
#endif
ASSERT(Heap::isMarked(cell));
-
+
if (isJSString(cell)) {
JSString::visitChildren(const_cast<JSCell*>(cell), visitor);
return;
@@ -402,7 +401,7 @@ void SlotVisitor::drainFromShared(SharedDrainMode sharedDrainMode)
while (true) {
// Did we reach termination?
if (!m_shared.m_numberOfActiveParallelMarkers && m_shared.m_sharedMarkStack.isEmpty()) {
- // Let any sleeping slaves know it's time for them to give their private BumpBlocks back
+ // Let any sleeping slaves know it's time for them to give their private CopiedBlocks back
m_shared.m_markingCondition.broadcast();
return;
}
@@ -459,32 +458,32 @@ void MarkStack::mergeOpaqueRoots()
void SlotVisitor::startCopying()
{
ASSERT(!m_copyBlock);
- if (!m_shared.m_bumpSpace->borrowBlock(&m_copyBlock))
+ if (!m_shared.m_copiedSpace->borrowBlock(&m_copyBlock))
CRASH();
}
void* SlotVisitor::allocateNewSpace(void* ptr, size_t bytes)
{
- if (BumpSpace::isOversize(bytes)) {
- m_shared.m_bumpSpace->pin(BumpSpace::oversizeBlockFor(ptr));
+ if (CopiedSpace::isOversize(bytes)) {
+ m_shared.m_copiedSpace->pin(CopiedSpace::oversizeBlockFor(ptr));
return 0;
}
- if (m_shared.m_bumpSpace->isPinned(ptr))
+ if (m_shared.m_copiedSpace->isPinned(ptr))
return 0;
// The only time it's possible to have a null copy block is if we have just started copying.
if (!m_copyBlock)
startCopying();
- if (!BumpSpace::fitsInBlock(m_copyBlock, bytes)) {
+ if (!CopiedSpace::fitsInBlock(m_copyBlock, bytes)) {
// We don't need to lock across these two calls because the master thread won't
// call doneCopying() because this thread is considered active.
- m_shared.m_bumpSpace->doneFillingBlock(m_copyBlock);
- if (!m_shared.m_bumpSpace->borrowBlock(&m_copyBlock))
+ m_shared.m_copiedSpace->doneFillingBlock(m_copyBlock);
+ if (!m_shared.m_copiedSpace->borrowBlock(&m_copyBlock))
CRASH();
}
- return BumpSpace::allocateFromBlock(m_copyBlock, bytes);
+ return CopiedSpace::allocateFromBlock(m_copyBlock, bytes);
}
void SlotVisitor::copy(void** ptr, size_t bytes)
@@ -524,7 +523,7 @@ void SlotVisitor::doneCopying()
if (!m_copyBlock)
return;
- m_shared.m_bumpSpace->doneFillingBlock(m_copyBlock);
+ m_shared.m_copiedSpace->doneFillingBlock(m_copyBlock);
m_copyBlock = 0;
}
diff --git a/Source/JavaScriptCore/heap/MarkStack.h b/Source/JavaScriptCore/heap/MarkStack.h
index 6923cdd8a..0695b1b32 100644
--- a/Source/JavaScriptCore/heap/MarkStack.h
+++ b/Source/JavaScriptCore/heap/MarkStack.h
@@ -26,7 +26,7 @@
#ifndef MarkStack_h
#define MarkStack_h
-#include "BumpSpace.h"
+#include "CopiedSpace.h"
#include "HandleTypes.h"
#include "Options.h"
#include "JSValue.h"
@@ -178,11 +178,11 @@ namespace JSC {
#if ENABLE(PARALLEL_GC)
void markingThreadMain();
- static void* markingThreadStartFunc(void* heap);
+ static void markingThreadStartFunc(void* heap);
#endif
JSGlobalData* m_globalData;
- BumpSpace* m_bumpSpace;
+ CopiedSpace* m_copiedSpace;
MarkStackSegmentAllocator m_segmentAllocator;
diff --git a/Source/JavaScriptCore/heap/MarkedAllocator.cpp b/Source/JavaScriptCore/heap/MarkedAllocator.cpp
index 8239fbaed..eb6d2c691 100644
--- a/Source/JavaScriptCore/heap/MarkedAllocator.cpp
+++ b/Source/JavaScriptCore/heap/MarkedAllocator.cpp
@@ -97,11 +97,11 @@ MarkedBlock* MarkedAllocator::allocateBlock(AllocationEffort allocationEffort)
block = 0;
}
if (block)
- block = MarkedBlock::recycle(block, m_heap, m_cellSize);
+ block = MarkedBlock::recycle(block, m_heap, m_cellSize, m_cellsNeedDestruction);
else if (allocationEffort == AllocationCanFail)
return 0;
else
- block = MarkedBlock::create(m_heap, m_cellSize);
+ block = MarkedBlock::create(m_heap, m_cellSize, m_cellsNeedDestruction);
m_markedSpace->didAddBlock(block);
diff --git a/Source/JavaScriptCore/heap/MarkedAllocator.h b/Source/JavaScriptCore/heap/MarkedAllocator.h
index 5644c691b..1c6af77a2 100644
--- a/Source/JavaScriptCore/heap/MarkedAllocator.h
+++ b/Source/JavaScriptCore/heap/MarkedAllocator.h
@@ -8,6 +8,7 @@ namespace JSC {
class Heap;
class MarkedSpace;
+class LLIntOffsetsExtractor;
namespace DFG {
class SpeculativeJIT;
@@ -22,6 +23,7 @@ public:
void reset();
void zapFreeList();
size_t cellSize() { return m_cellSize; }
+ bool cellsNeedDestruction() { return m_cellsNeedDestruction; }
void* allocate();
Heap* heap() { return m_heap; }
@@ -29,11 +31,11 @@ public:
void addBlock(MarkedBlock*);
void removeBlock(MarkedBlock*);
- void setHeap(Heap* heap) { m_heap = heap; }
- void setCellSize(size_t cellSize) { m_cellSize = cellSize; }
- void setMarkedSpace(MarkedSpace* space) { m_markedSpace = space; }
+ void init(Heap*, MarkedSpace*, size_t cellSize, bool cellsNeedDestruction);
private:
+ friend class LLIntOffsetsExtractor;
+
JS_EXPORT_PRIVATE void* allocateSlowCase();
void* tryAllocate();
void* tryAllocateHelper();
@@ -43,6 +45,7 @@ private:
MarkedBlock* m_currentBlock;
DoublyLinkedList<HeapBlock> m_blockList;
size_t m_cellSize;
+ bool m_cellsNeedDestruction;
Heap* m_heap;
MarkedSpace* m_markedSpace;
};
@@ -51,11 +54,20 @@ inline MarkedAllocator::MarkedAllocator()
: m_firstFreeCell(0)
, m_currentBlock(0)
, m_cellSize(0)
+ , m_cellsNeedDestruction(true)
, m_heap(0)
, m_markedSpace(0)
{
}
-
+
+inline void MarkedAllocator::init(Heap* heap, MarkedSpace* markedSpace, size_t cellSize, bool cellsNeedDestruction)
+{
+ m_heap = heap;
+ m_markedSpace = markedSpace;
+ m_cellSize = cellSize;
+ m_cellsNeedDestruction = cellsNeedDestruction;
+}
+
inline void* MarkedAllocator::allocate()
{
MarkedBlock::FreeCell* firstFreeCell = m_firstFreeCell;
diff --git a/Source/JavaScriptCore/heap/MarkedBlock.cpp b/Source/JavaScriptCore/heap/MarkedBlock.cpp
index dd9233300..75c21e7dd 100644
--- a/Source/JavaScriptCore/heap/MarkedBlock.cpp
+++ b/Source/JavaScriptCore/heap/MarkedBlock.cpp
@@ -32,17 +32,17 @@
namespace JSC {
-MarkedBlock* MarkedBlock::create(Heap* heap, size_t cellSize)
+MarkedBlock* MarkedBlock::create(Heap* heap, size_t cellSize, bool cellsNeedDestruction)
{
PageAllocationAligned allocation = PageAllocationAligned::allocate(blockSize, blockSize, OSAllocator::JSGCHeapPages);
if (!static_cast<bool>(allocation))
CRASH();
- return new (NotNull, allocation.base()) MarkedBlock(allocation, heap, cellSize);
+ return new (NotNull, allocation.base()) MarkedBlock(allocation, heap, cellSize, cellsNeedDestruction);
}
-MarkedBlock* MarkedBlock::recycle(MarkedBlock* block, Heap* heap, size_t cellSize)
+MarkedBlock* MarkedBlock::recycle(MarkedBlock* block, Heap* heap, size_t cellSize, bool cellsNeedDestruction)
{
- return new (NotNull, block) MarkedBlock(block->m_allocation, heap, cellSize);
+ return new (NotNull, block) MarkedBlock(block->m_allocation, heap, cellSize, cellsNeedDestruction);
}
void MarkedBlock::destroy(MarkedBlock* block)
@@ -50,10 +50,11 @@ void MarkedBlock::destroy(MarkedBlock* block)
block->m_allocation.deallocate();
}
-MarkedBlock::MarkedBlock(PageAllocationAligned& allocation, Heap* heap, size_t cellSize)
+MarkedBlock::MarkedBlock(PageAllocationAligned& allocation, Heap* heap, size_t cellSize, bool cellsNeedDestruction)
: HeapBlock(allocation)
, m_atomsPerCell((cellSize + atomSize - 1) / atomSize)
, m_endAtom(atomsPerBlock - m_atomsPerCell + 1)
+ , m_cellsNeedDestruction(cellsNeedDestruction)
, m_state(New) // All cells start out unmarked.
, m_heap(heap)
{
@@ -70,16 +71,16 @@ inline void MarkedBlock::callDestructor(JSCell* cell)
#if ENABLE(SIMPLE_HEAP_PROFILING)
m_heap->m_destroyedTypeCounts.countVPtr(vptr);
#endif
- if (cell->classInfo() != &JSFinalObject::s_info)
- cell->methodTable()->destroy(cell);
+ cell->methodTable()->destroy(cell);
cell->zap();
}
-template<MarkedBlock::BlockState blockState, MarkedBlock::SweepMode sweepMode>
+template<MarkedBlock::BlockState blockState, MarkedBlock::SweepMode sweepMode, bool destructorCallNeeded>
MarkedBlock::FreeCell* MarkedBlock::specializedSweep()
{
ASSERT(blockState != Allocated && blockState != FreeListed);
+ ASSERT(destructorCallNeeded || sweepMode != SweepOnly);
// This produces a free list that is ordered in reverse through the block.
// This is fine, since the allocation code makes no assumptions about the
@@ -93,7 +94,7 @@ MarkedBlock::FreeCell* MarkedBlock::specializedSweep()
if (blockState == Zapped && !cell->isZapped())
continue;
- if (blockState != New)
+ if (destructorCallNeeded && blockState != New)
callDestructor(cell);
if (sweepMode == SweepToFreeList) {
@@ -111,10 +112,21 @@ MarkedBlock::FreeCell* MarkedBlock::sweep(SweepMode sweepMode)
{
HEAP_LOG_BLOCK_STATE_TRANSITION(this);
+ if (sweepMode == SweepOnly && !m_cellsNeedDestruction)
+ return 0;
+
+ if (m_cellsNeedDestruction)
+ return sweepHelper<true>(sweepMode);
+ return sweepHelper<false>(sweepMode);
+}
+
+template<bool destructorCallNeeded>
+MarkedBlock::FreeCell* MarkedBlock::sweepHelper(SweepMode sweepMode)
+{
switch (m_state) {
case New:
ASSERT(sweepMode == SweepToFreeList);
- return specializedSweep<New, SweepToFreeList>();
+ return specializedSweep<New, SweepToFreeList, destructorCallNeeded>();
case FreeListed:
// Happens when a block transitions to fully allocated.
ASSERT(sweepMode == SweepToFreeList);
@@ -124,12 +136,12 @@ MarkedBlock::FreeCell* MarkedBlock::sweep(SweepMode sweepMode)
return 0;
case Marked:
return sweepMode == SweepToFreeList
- ? specializedSweep<Marked, SweepToFreeList>()
- : specializedSweep<Marked, SweepOnly>();
+ ? specializedSweep<Marked, SweepToFreeList, destructorCallNeeded>()
+ : specializedSweep<Marked, SweepOnly, destructorCallNeeded>();
case Zapped:
return sweepMode == SweepToFreeList
- ? specializedSweep<Zapped, SweepToFreeList>()
- : specializedSweep<Zapped, SweepOnly>();
+ ? specializedSweep<Zapped, SweepToFreeList, destructorCallNeeded>()
+ : specializedSweep<Zapped, SweepOnly, destructorCallNeeded>();
}
ASSERT_NOT_REACHED();
diff --git a/Source/JavaScriptCore/heap/MarkedBlock.h b/Source/JavaScriptCore/heap/MarkedBlock.h
index 0a4ebe47e..5f70b69d4 100644
--- a/Source/JavaScriptCore/heap/MarkedBlock.h
+++ b/Source/JavaScriptCore/heap/MarkedBlock.h
@@ -26,6 +26,7 @@
#include "HeapBlock.h"
#include <wtf/Bitmap.h>
+#include <wtf/DataLog.h>
#include <wtf/DoublyLinkedList.h>
#include <wtf/HashFunctions.h>
#include <wtf/PageAllocationAligned.h>
@@ -36,9 +37,11 @@
#define HEAP_LOG_BLOCK_STATE_TRANSITIONS 0
#if HEAP_LOG_BLOCK_STATE_TRANSITIONS
-#define HEAP_LOG_BLOCK_STATE_TRANSITION(block) do { \
- printf("%s:%d %s: block %s = %p, %d\n", \
- __FILE__, __LINE__, __FUNCTION__, #block, (block), (block)->m_state); \
+#define HEAP_LOG_BLOCK_STATE_TRANSITION(block) do { \
+ dataLog( \
+ "%s:%d %s: block %s = %p, %d\n", \
+ __FILE__, __LINE__, __FUNCTION__, \
+ #block, (block), (block)->m_state); \
} while (false)
#else
#define HEAP_LOG_BLOCK_STATE_TRANSITION(block) ((void)0)
@@ -89,8 +92,8 @@ namespace JSC {
void returnValue() { }
};
- static MarkedBlock* create(Heap*, size_t cellSize);
- static MarkedBlock* recycle(MarkedBlock*, Heap*, size_t cellSize);
+ static MarkedBlock* create(Heap*, size_t cellSize, bool cellsNeedDestruction);
+ static MarkedBlock* recycle(MarkedBlock*, Heap*, size_t cellSize, bool cellsNeedDestruction);
static void destroy(MarkedBlock*);
static bool isAtomAligned(const void*);
@@ -115,6 +118,7 @@ namespace JSC {
bool markCountIsZero(); // Faster than markCount().
size_t cellSize();
+ bool cellsNeedDestruction();
size_t size();
size_t capacity();
@@ -159,14 +163,15 @@ namespace JSC {
static const size_t atomAlignmentMask = atomSize - 1; // atomSize must be a power of two.
enum BlockState { New, FreeListed, Allocated, Marked, Zapped };
+ template<bool destructorCallNeeded> FreeCell* sweepHelper(SweepMode = SweepOnly);
typedef char Atom[atomSize];
- MarkedBlock(PageAllocationAligned&, Heap*, size_t cellSize);
+ MarkedBlock(PageAllocationAligned&, Heap*, size_t cellSize, bool cellsNeedDestruction);
Atom* atoms();
size_t atomNumber(const void*);
void callDestructor(JSCell*);
- template<BlockState, SweepMode> FreeCell* specializedSweep();
+ template<BlockState, SweepMode, bool destructorCallNeeded> FreeCell* specializedSweep();
#if ENABLE(GGC)
CardSet<bytesPerCard, blockSize> m_cards;
@@ -179,6 +184,7 @@ namespace JSC {
#else
WTF::Bitmap<atomsPerBlock, WTF::BitmapNotAtomic> m_marks;
#endif
+ bool m_cellsNeedDestruction;
BlockState m_state;
Heap* m_heap;
};
@@ -243,6 +249,11 @@ namespace JSC {
return m_atomsPerCell * atomSize;
}
+ inline bool MarkedBlock::cellsNeedDestruction()
+ {
+ return m_cellsNeedDestruction;
+ }
+
inline size_t MarkedBlock::size()
{
return markCount() * cellSize();
diff --git a/Source/JavaScriptCore/heap/MarkedSpace.cpp b/Source/JavaScriptCore/heap/MarkedSpace.cpp
index 87dc0493d..bf839011d 100644
--- a/Source/JavaScriptCore/heap/MarkedSpace.cpp
+++ b/Source/JavaScriptCore/heap/MarkedSpace.cpp
@@ -36,15 +36,13 @@ MarkedSpace::MarkedSpace(Heap* heap)
, m_heap(heap)
{
for (size_t cellSize = preciseStep; cellSize <= preciseCutoff; cellSize += preciseStep) {
- allocatorFor(cellSize).setCellSize(cellSize);
- allocatorFor(cellSize).setHeap(heap);
- allocatorFor(cellSize).setMarkedSpace(this);
+ allocatorFor(cellSize).init(heap, this, cellSize, false);
+ destructorAllocatorFor(cellSize).init(heap, this, cellSize, true);
}
for (size_t cellSize = impreciseStep; cellSize <= impreciseCutoff; cellSize += impreciseStep) {
- allocatorFor(cellSize).setCellSize(cellSize);
- allocatorFor(cellSize).setHeap(heap);
- allocatorFor(cellSize).setMarkedSpace(this);
+ allocatorFor(cellSize).init(heap, this, cellSize, false);
+ destructorAllocatorFor(cellSize).init(heap, this, cellSize, true);
}
}
@@ -53,20 +51,28 @@ void MarkedSpace::resetAllocators()
m_waterMark = 0;
m_nurseryWaterMark = 0;
- for (size_t cellSize = preciseStep; cellSize <= preciseCutoff; cellSize += preciseStep)
+ for (size_t cellSize = preciseStep; cellSize <= preciseCutoff; cellSize += preciseStep) {
allocatorFor(cellSize).reset();
+ destructorAllocatorFor(cellSize).reset();
+ }
- for (size_t cellSize = impreciseStep; cellSize <= impreciseCutoff; cellSize += impreciseStep)
+ for (size_t cellSize = impreciseStep; cellSize <= impreciseCutoff; cellSize += impreciseStep) {
allocatorFor(cellSize).reset();
+ destructorAllocatorFor(cellSize).reset();
+ }
}
void MarkedSpace::canonicalizeCellLivenessData()
{
- for (size_t cellSize = preciseStep; cellSize <= preciseCutoff; cellSize += preciseStep)
+ for (size_t cellSize = preciseStep; cellSize <= preciseCutoff; cellSize += preciseStep) {
allocatorFor(cellSize).zapFreeList();
+ destructorAllocatorFor(cellSize).zapFreeList();
+ }
- for (size_t cellSize = impreciseStep; cellSize <= impreciseCutoff; cellSize += impreciseStep)
+ for (size_t cellSize = impreciseStep; cellSize <= impreciseCutoff; cellSize += impreciseStep) {
allocatorFor(cellSize).zapFreeList();
+ destructorAllocatorFor(cellSize).zapFreeList();
+ }
}
@@ -107,7 +113,7 @@ inline void TakeIfUnmarked::operator()(MarkedBlock* block)
if (!block->markCountIsZero())
return;
- m_markedSpace->allocatorFor(block->cellSize()).removeBlock(block);
+ m_markedSpace->allocatorFor(block).removeBlock(block);
m_empties.append(block);
}
diff --git a/Source/JavaScriptCore/heap/MarkedSpace.h b/Source/JavaScriptCore/heap/MarkedSpace.h
index 21a0b48de..cfcf3f8dc 100644
--- a/Source/JavaScriptCore/heap/MarkedSpace.h
+++ b/Source/JavaScriptCore/heap/MarkedSpace.h
@@ -41,6 +41,7 @@ namespace JSC {
class Heap;
class JSCell;
class LiveObjectIterator;
+class LLIntOffsetsExtractor;
class WeakGCHandle;
class SlotVisitor;
@@ -51,8 +52,12 @@ public:
MarkedSpace(Heap*);
+ MarkedAllocator& firstAllocator();
MarkedAllocator& allocatorFor(size_t);
- void* allocate(size_t);
+ MarkedAllocator& allocatorFor(MarkedBlock*);
+ MarkedAllocator& destructorAllocatorFor(size_t);
+ void* allocateWithDestructor(size_t);
+ void* allocateWithoutDestructor(size_t);
void resetAllocators();
@@ -76,6 +81,8 @@ public:
void didConsumeFreeList(MarkedBlock*);
private:
+ friend class LLIntOffsetsExtractor;
+
// [ 32... 256 ]
static const size_t preciseStep = MarkedBlock::atomSize;
static const size_t preciseCutoff = 256;
@@ -86,8 +93,14 @@ private:
static const size_t impreciseCutoff = maxCellSize;
static const size_t impreciseCount = impreciseCutoff / impreciseStep;
- FixedArray<MarkedAllocator, preciseCount> m_preciseSizeClasses;
- FixedArray<MarkedAllocator, impreciseCount> m_impreciseSizeClasses;
+ struct Subspace {
+ FixedArray<MarkedAllocator, preciseCount> preciseAllocators;
+ FixedArray<MarkedAllocator, impreciseCount> impreciseAllocators;
+ };
+
+ Subspace m_destructorSpace;
+ Subspace m_normalSpace;
+
size_t m_waterMark;
size_t m_nurseryWaterMark;
Heap* m_heap;
@@ -120,27 +133,54 @@ template<typename Functor> inline typename Functor::ReturnType MarkedSpace::forE
return forEachCell(functor);
}
+inline MarkedAllocator& MarkedSpace::firstAllocator()
+{
+ return m_normalSpace.preciseAllocators[0];
+}
+
inline MarkedAllocator& MarkedSpace::allocatorFor(size_t bytes)
{
ASSERT(bytes && bytes <= maxCellSize);
if (bytes <= preciseCutoff)
- return m_preciseSizeClasses[(bytes - 1) / preciseStep];
- return m_impreciseSizeClasses[(bytes - 1) / impreciseStep];
+ return m_normalSpace.preciseAllocators[(bytes - 1) / preciseStep];
+ return m_normalSpace.impreciseAllocators[(bytes - 1) / impreciseStep];
+}
+
+inline MarkedAllocator& MarkedSpace::allocatorFor(MarkedBlock* block)
+{
+ if (block->cellsNeedDestruction())
+ return destructorAllocatorFor(block->cellSize());
+ return allocatorFor(block->cellSize());
}
-inline void* MarkedSpace::allocate(size_t bytes)
+inline MarkedAllocator& MarkedSpace::destructorAllocatorFor(size_t bytes)
+{
+ ASSERT(bytes && bytes <= maxCellSize);
+ if (bytes <= preciseCutoff)
+ return m_destructorSpace.preciseAllocators[(bytes - 1) / preciseStep];
+ return m_destructorSpace.impreciseAllocators[(bytes - 1) / impreciseStep];
+}
+
+inline void* MarkedSpace::allocateWithoutDestructor(size_t bytes)
{
return allocatorFor(bytes).allocate();
}
+inline void* MarkedSpace::allocateWithDestructor(size_t bytes)
+{
+ return destructorAllocatorFor(bytes).allocate();
+}
+
template <typename Functor> inline typename Functor::ReturnType MarkedSpace::forEachBlock(Functor& functor)
{
for (size_t i = 0; i < preciseCount; ++i) {
- m_preciseSizeClasses[i].forEachBlock(functor);
+ m_normalSpace.preciseAllocators[i].forEachBlock(functor);
+ m_destructorSpace.preciseAllocators[i].forEachBlock(functor);
}
for (size_t i = 0; i < impreciseCount; ++i) {
- m_impreciseSizeClasses[i].forEachBlock(functor);
+ m_normalSpace.impreciseAllocators[i].forEachBlock(functor);
+ m_destructorSpace.impreciseAllocators[i].forEachBlock(functor);
}
return functor.returnValue();
diff --git a/Source/JavaScriptCore/heap/PassWeak.h b/Source/JavaScriptCore/heap/PassWeak.h
new file mode 100644
index 000000000..b7aa7b10d
--- /dev/null
+++ b/Source/JavaScriptCore/heap/PassWeak.h
@@ -0,0 +1,147 @@
+/*
+ * Copyright (C) 2012 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef PassWeak_h
+#define PassWeak_h
+
+#include "Assertions.h"
+#include "Handle.h"
+#include "NullPtr.h"
+#include "TypeTraits.h"
+
+namespace JSC {
+
+template<typename T> class Weak;
+template<typename T> class PassWeak;
+template<typename T> PassWeak<T> adoptWeak(HandleSlot);
+
+template<typename T> class PassWeak : public Handle<T> {
+ using Handle<T>::slot;
+ using Handle<T>::setSlot;
+
+public:
+ typedef typename Handle<T>::ExternalType ExternalType;
+
+ PassWeak() : Handle<T>() { }
+ PassWeak(std::nullptr_t) : Handle<T>() { }
+
+ PassWeak(JSGlobalData& globalData, ExternalType externalType = ExternalType(), WeakHandleOwner* weakOwner = 0, void* context = 0)
+ : Handle<T>(globalData.heap.handleHeap()->allocate())
+ {
+ HandleHeap::heapFor(slot())->makeWeak(slot(), weakOwner, context);
+ JSValue value = HandleTypes<T>::toJSValue(externalType);
+ HandleHeap::heapFor(slot())->writeBarrier(slot(), value);
+ *slot() = value;
+ }
+
+ // It somewhat breaks the type system to allow transfer of ownership out of
+ // a const PassWeak. However, it makes it much easier to work with PassWeak
+ // temporaries, and we don't have a need to use real const PassWeaks anyway.
+ PassWeak(const PassWeak& o) : Handle<T>(o.leakHandle()) { }
+ template<typename U> PassWeak(const PassWeak<U>& o) : Handle<T>(o.leakHandle()) { }
+
+ ~PassWeak()
+ {
+ if (!slot())
+ return;
+ HandleHeap::heapFor(slot())->deallocate(slot());
+ setSlot(0);
+ }
+
+ ExternalType get() const { return HandleTypes<T>::getFromSlot(slot()); }
+
+ HandleSlot leakHandle() const WARN_UNUSED_RETURN;
+
+private:
+ friend PassWeak adoptWeak<T>(HandleSlot);
+
+ explicit PassWeak(HandleSlot slot) : Handle<T>(slot) { }
+};
+
+template<typename T> inline HandleSlot PassWeak<T>::leakHandle() const
+{
+ HandleSlot slot = this->slot();
+ const_cast<PassWeak<T>*>(this)->setSlot(0);
+ return slot;
+}
+
+template<typename T> PassWeak<T> adoptWeak(HandleSlot slot)
+{
+ return PassWeak<T>(slot);
+}
+
+template<typename T, typename U> inline bool operator==(const PassWeak<T>& a, const PassWeak<U>& b)
+{
+ return a.get() == b.get();
+}
+
+template<typename T, typename U> inline bool operator==(const PassWeak<T>& a, const Weak<U>& b)
+{
+ return a.get() == b.get();
+}
+
+template<typename T, typename U> inline bool operator==(const Weak<T>& a, const PassWeak<U>& b)
+{
+ return a.get() == b.get();
+}
+
+template<typename T, typename U> inline bool operator==(const PassWeak<T>& a, U* b)
+{
+ return a.get() == b;
+}
+
+template<typename T, typename U> inline bool operator==(T* a, const PassWeak<U>& b)
+{
+ return a == b.get();
+}
+
+template<typename T, typename U> inline bool operator!=(const PassWeak<T>& a, const PassWeak<U>& b)
+{
+ return a.get() != b.get();
+}
+
+template<typename T, typename U> inline bool operator!=(const PassWeak<T>& a, const Weak<U>& b)
+{
+ return a.get() != b.get();
+}
+
+template<typename T, typename U> inline bool operator!=(const Weak<T>& a, const PassWeak<U>& b)
+{
+ return a.get() != b.get();
+}
+
+template<typename T, typename U> inline bool operator!=(const PassWeak<T>& a, U* b)
+{
+ return a.get() != b;
+}
+
+template<typename T, typename U> inline bool operator!=(T* a, const PassWeak<U>& b)
+{
+ return a != b.get();
+}
+
+} // namespace JSC
+
+#endif // PassWeak_h
diff --git a/Source/JavaScriptCore/heap/SlotVisitor.h b/Source/JavaScriptCore/heap/SlotVisitor.h
index e49a9a637..6584db703 100644
--- a/Source/JavaScriptCore/heap/SlotVisitor.h
+++ b/Source/JavaScriptCore/heap/SlotVisitor.h
@@ -26,7 +26,7 @@
#ifndef SlotVisitor_h
#define SlotVisitor_h
-#include "BumpSpace.h"
+#include "CopiedSpace.h"
#include "MarkStack.h"
namespace JSC {
@@ -78,7 +78,7 @@ private:
donateSlow();
}
- BumpBlock* m_copyBlock;
+ CopiedBlock* m_copyBlock;
};
inline SlotVisitor::SlotVisitor(MarkStackThreadSharedData& shared)
diff --git a/Source/JavaScriptCore/heap/Weak.h b/Source/JavaScriptCore/heap/Weak.h
index f0c028d71..96fe1b58c 100644
--- a/Source/JavaScriptCore/heap/Weak.h
+++ b/Source/JavaScriptCore/heap/Weak.h
@@ -1,5 +1,5 @@
/*
- * Copyright (C) 2009 Apple Inc. All rights reserved.
+ * Copyright (C) 2009, 2012 Apple Inc. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
@@ -30,11 +30,14 @@
#include "Handle.h"
#include "HandleHeap.h"
#include "JSGlobalData.h"
+#include "PassWeak.h"
namespace JSC {
// A weakly referenced handle that becomes 0 when the value it points to is garbage collected.
template <typename T> class Weak : public Handle<T> {
+ WTF_MAKE_NONCOPYABLE(Weak);
+
using Handle<T>::slot;
using Handle<T>::setSlot;
@@ -46,11 +49,18 @@ public:
{
}
- Weak(JSGlobalData& globalData, ExternalType value = ExternalType(), WeakHandleOwner* weakOwner = 0, void* context = 0)
+ Weak(std::nullptr_t)
+ : Handle<T>()
+ {
+ }
+
+ Weak(JSGlobalData& globalData, ExternalType externalType = ExternalType(), WeakHandleOwner* weakOwner = 0, void* context = 0)
: Handle<T>(globalData.heap.handleHeap()->allocate())
{
HandleHeap::heapFor(slot())->makeWeak(slot(), weakOwner, context);
- set(value);
+ JSValue value = HandleTypes<T>::toJSValue(externalType);
+ HandleHeap::heapFor(slot())->writeBarrier(slot(), value);
+ *slot() = value;
}
enum AdoptTag { Adopt };
@@ -59,23 +69,7 @@ public:
{
validateCell(get());
}
-
- Weak(const Weak& other)
- : Handle<T>()
- {
- if (!other.slot())
- return;
- setSlot(HandleHeap::heapFor(other.slot())->copyWeak(other.slot()));
- }
- template <typename U> Weak(const Weak<U>& other)
- : Handle<T>()
- {
- if (!other.slot())
- return;
- setSlot(HandleHeap::heapFor(other.slot())->copyWeak(other.slot()));
- }
-
enum HashTableDeletedValueTag { HashTableDeletedValue };
bool isHashTableDeletedValue() const { return slot() == hashTableDeletedValue(); }
Weak(HashTableDeletedValueTag)
@@ -83,6 +77,11 @@ public:
{
}
+ template<typename U> Weak(const PassWeak<U>& other)
+ : Handle<T>(other.leakHandle())
+ {
+ }
+
~Weak()
{
clear();
@@ -93,8 +92,12 @@ public:
Handle<T>::swap(other);
}
+ Weak& operator=(const PassWeak<T>&);
+
ExternalType get() const { return HandleTypes<T>::getFromSlot(slot()); }
+ PassWeak<T> release() { PassWeak<T> tmp = adoptWeak<T>(slot()); setSlot(0); return tmp; }
+
void clear()
{
if (!slot())
@@ -103,32 +106,6 @@ public:
setSlot(0);
}
- void set(JSGlobalData& globalData, ExternalType value, WeakHandleOwner* weakOwner = 0, void* context = 0)
- {
- if (!slot()) {
- setSlot(globalData.heap.handleHeap()->allocate());
- HandleHeap::heapFor(slot())->makeWeak(slot(), weakOwner, context);
- }
- ASSERT(HandleHeap::heapFor(slot())->hasWeakOwner(slot(), weakOwner));
- set(value);
- }
-
- template <typename U> Weak& operator=(const Weak<U>& other)
- {
- clear();
- if (other.slot())
- setSlot(HandleHeap::heapFor(other.slot())->copyWeak(other.slot()));
- return *this;
- }
-
- Weak& operator=(const Weak& other)
- {
- clear();
- if (other.slot())
- setSlot(HandleHeap::heapFor(other.slot())->copyWeak(other.slot()));
- return *this;
- }
-
HandleSlot leakHandle()
{
ASSERT(HandleHeap::heapFor(slot())->hasFinalizer(slot()));
@@ -139,14 +116,6 @@ public:
private:
static HandleSlot hashTableDeletedValue() { return reinterpret_cast<HandleSlot>(-1); }
-
- void set(ExternalType externalType)
- {
- ASSERT(slot());
- JSValue value = HandleTypes<T>::toJSValue(externalType);
- HandleHeap::heapFor(slot())->writeBarrier(slot(), value);
- *slot() = value;
- }
};
template<class T> inline void swap(Weak<T>& a, Weak<T>& b)
@@ -154,6 +123,13 @@ template<class T> inline void swap(Weak<T>& a, Weak<T>& b)
a.swap(b);
}
+template<typename T> inline Weak<T>& Weak<T>::operator=(const PassWeak<T>& o)
+{
+ clear();
+ setSlot(o.leakHandle());
+ return *this;
+}
+
} // namespace JSC
namespace WTF {
@@ -162,7 +138,23 @@ template<typename T> struct VectorTraits<JSC::Weak<T> > : SimpleClassVectorTrait
static const bool canCompareWithMemcmp = false;
};
-template<typename P> struct HashTraits<JSC::Weak<P> > : SimpleClassHashTraits<JSC::Weak<P> > { };
+template<typename T> struct HashTraits<JSC::Weak<T> > : SimpleClassHashTraits<JSC::Weak<T> > {
+ typedef JSC::Weak<T> StorageType;
+
+ typedef std::nullptr_t EmptyValueType;
+ static EmptyValueType emptyValue() { return nullptr; }
+
+ typedef JSC::PassWeak<T> PassInType;
+ static void store(PassInType value, StorageType& storage) { storage = value; }
+
+ typedef JSC::PassWeak<T> PassOutType;
+ static PassOutType passOut(StorageType& value) { return value.release(); }
+ static PassOutType passOut(EmptyValueType) { return PassOutType(); }
+
+ typedef typename StorageType::ExternalType PeekType;
+ static PeekType peek(const StorageType& value) { return value.get(); }
+ static PeekType peek(EmptyValueType) { return PeekType(); }
+};
}
diff --git a/Source/JavaScriptCore/interpreter/AbstractPC.cpp b/Source/JavaScriptCore/interpreter/AbstractPC.cpp
index 863915bda..755a0e303 100644
--- a/Source/JavaScriptCore/interpreter/AbstractPC.cpp
+++ b/Source/JavaScriptCore/interpreter/AbstractPC.cpp
@@ -45,7 +45,8 @@ AbstractPC::AbstractPC(JSGlobalData& globalData, ExecState* exec)
}
#endif
-#if ENABLE(INTERPRETER)
+#if ENABLE(CLASSIC_INTERPRETER)
+ UNUSED_PARAM(globalData);
m_pointer = exec->returnVPC();
m_mode = Interpreter;
#endif
diff --git a/Source/JavaScriptCore/interpreter/AbstractPC.h b/Source/JavaScriptCore/interpreter/AbstractPC.h
index dffaaf343..5ed74472e 100644
--- a/Source/JavaScriptCore/interpreter/AbstractPC.h
+++ b/Source/JavaScriptCore/interpreter/AbstractPC.h
@@ -60,7 +60,7 @@ public:
}
#endif
-#if ENABLE(INTERPRETER)
+#if ENABLE(CLASSIC_INTERPRETER)
AbstractPC(Instruction* vPC)
: m_pointer(vPC)
, m_mode(Interpreter)
diff --git a/Source/JavaScriptCore/interpreter/CallFrame.cpp b/Source/JavaScriptCore/interpreter/CallFrame.cpp
index 3ef5bd26f..b0e5ea0f6 100644
--- a/Source/JavaScriptCore/interpreter/CallFrame.cpp
+++ b/Source/JavaScriptCore/interpreter/CallFrame.cpp
@@ -40,7 +40,7 @@ void CallFrame::dumpCaller()
JSValue function;
interpreter()->retrieveLastCaller(this, signedLineNumber, sourceID, urlString, function);
- printf("Callpoint => %s:%d\n", urlString.utf8().data(), signedLineNumber);
+ dataLog("Callpoint => %s:%d\n", urlString.utf8().data(), signedLineNumber);
}
RegisterFile* CallFrame::registerFile()
@@ -50,6 +50,29 @@ RegisterFile* CallFrame::registerFile()
#endif
+#if USE(JSVALUE32_64)
+unsigned CallFrame::bytecodeOffsetForNonDFGCode() const
+{
+ ASSERT(codeBlock());
+ return currentVPC() - codeBlock()->instructions().begin();
+}
+
+void CallFrame::setBytecodeOffsetForNonDFGCode(unsigned offset)
+{
+ ASSERT(codeBlock());
+ setCurrentVPC(codeBlock()->instructions().begin() + offset);
+}
+#else
+Instruction* CallFrame::currentVPC() const
+{
+ return codeBlock()->instructions().begin() + bytecodeOffsetForNonDFGCode();
+}
+void CallFrame::setCurrentVPC(Instruction* vpc)
+{
+ setBytecodeOffsetForNonDFGCode(vpc - codeBlock()->instructions().begin());
+}
+#endif
+
#if ENABLE(DFG_JIT)
bool CallFrame::isInlineCallFrameSlow()
{
@@ -96,15 +119,15 @@ CallFrame* CallFrame::trueCallFrame(AbstractPC pc)
if (pc.isSet()) {
ReturnAddressPtr currentReturnPC = pc.jitReturnAddress();
- if (!machineCodeBlock->codeOriginForReturn(currentReturnPC, codeOrigin))
- return this; // Not currently in inlined code.
+ bool hasCodeOrigin = machineCodeBlock->codeOriginForReturn(currentReturnPC, codeOrigin);
+ ASSERT_UNUSED(hasCodeOrigin, hasCodeOrigin);
} else {
- unsigned index = codeOriginIndexForDFGWithInlining();
- if (index == UINT_MAX)
- return this; // Not currently in inlined code.
-
+ unsigned index = codeOriginIndexForDFG();
codeOrigin = machineCodeBlock->codeOrigin(index);
}
+
+ if (!codeOrigin.inlineCallFrame)
+ return this; // Not currently in inlined code.
for (InlineCallFrame* inlineCallFrame = codeOrigin.inlineCallFrame; inlineCallFrame;) {
InlineCallFrame* nextInlineCallFrame = inlineCallFrame->caller.inlineCallFrame;
@@ -140,10 +163,10 @@ CallFrame* CallFrame::trueCallerFrame()
//
// machineCaller -> The caller according to the machine, which may be zero or
// more frames above the true caller due to inlining.
-
+
// Am I an inline call frame? If so, we're done.
if (isInlineCallFrame())
- return callerFrame();
+ return callerFrame()->removeHostCallFrameFlag();
// I am a machine call frame, so the question is: is my caller a machine call frame
// that has inlines or a machine call frame that doesn't?
@@ -153,10 +176,10 @@ CallFrame* CallFrame::trueCallerFrame()
ASSERT(!machineCaller->isInlineCallFrame());
// Figure out how we want to get the current code location.
- if (hasHostCallFrameFlag() || returnAddressIsInCtiTrampoline(returnPC()))
- return machineCaller->trueCallFrameFromVMCode();
+ if (!hasReturnPC() || returnAddressIsInCtiTrampoline(returnPC()))
+ return machineCaller->trueCallFrameFromVMCode()->removeHostCallFrameFlag();
- return machineCaller->trueCallFrame(returnPC());
+ return machineCaller->trueCallFrame(returnPC())->removeHostCallFrameFlag();
}
#endif
diff --git a/Source/JavaScriptCore/interpreter/CallFrame.h b/Source/JavaScriptCore/interpreter/CallFrame.h
index 4fadfab28..5bf2b9488 100644
--- a/Source/JavaScriptCore/interpreter/CallFrame.h
+++ b/Source/JavaScriptCore/interpreter/CallFrame.h
@@ -104,13 +104,30 @@ namespace JSC {
CallFrame* callerFrame() const { return this[RegisterFile::CallerFrame].callFrame(); }
#if ENABLE(JIT)
ReturnAddressPtr returnPC() const { return ReturnAddressPtr(this[RegisterFile::ReturnPC].vPC()); }
+ bool hasReturnPC() const { return !!this[RegisterFile::ReturnPC].vPC(); }
+ void clearReturnPC() { registers()[RegisterFile::ReturnPC] = static_cast<Instruction*>(0); }
#endif
AbstractPC abstractReturnPC(JSGlobalData& globalData) { return AbstractPC(globalData, this); }
- unsigned bytecodeOffsetForBaselineJIT() { return this[RegisterFile::ArgumentCount].tag(); }
+#if USE(JSVALUE32_64)
+ unsigned bytecodeOffsetForNonDFGCode() const;
+ void setBytecodeOffsetForNonDFGCode(unsigned offset);
+#else
+ unsigned bytecodeOffsetForNonDFGCode() const
+ {
+ ASSERT(codeBlock());
+ return this[RegisterFile::ArgumentCount].tag();
+ }
+
+ void setBytecodeOffsetForNonDFGCode(unsigned offset)
+ {
+ ASSERT(codeBlock());
+ this[RegisterFile::ArgumentCount].tag() = static_cast<int32_t>(offset);
+ }
+#endif
#if ENABLE(DFG_JIT)
InlineCallFrame* inlineCallFrame() const { return this[RegisterFile::ReturnPC].asInlineCallFrame(); }
- unsigned codeOriginIndexForDFGWithInlining() const { return this[RegisterFile::ArgumentCount].tag(); }
+ unsigned codeOriginIndexForDFG() const { return this[RegisterFile::ArgumentCount].tag(); }
#else
// This will never be called if !ENABLE(DFG_JIT) since all calls should be guarded by
// isInlineCallFrame(). But to make it easier to write code without having a bunch of
@@ -121,9 +138,22 @@ namespace JSC {
return 0;
}
#endif
-#if ENABLE(INTERPRETER)
+#if ENABLE(CLASSIC_INTERPRETER)
Instruction* returnVPC() const { return this[RegisterFile::ReturnPC].vPC(); }
#endif
+#if USE(JSVALUE32_64)
+ Instruction* currentVPC() const
+ {
+ return bitwise_cast<Instruction*>(this[RegisterFile::ArgumentCount].tag());
+ }
+ void setCurrentVPC(Instruction* vpc)
+ {
+ this[RegisterFile::ArgumentCount].tag() = bitwise_cast<int32_t>(vpc);
+ }
+#else
+ Instruction* currentVPC() const;
+ void setCurrentVPC(Instruction* vpc);
+#endif
void setCallerFrame(CallFrame* callerFrame) { static_cast<Register*>(this)[RegisterFile::CallerFrame] = callerFrame; }
void setScopeChain(ScopeChainNode* scopeChain) { static_cast<Register*>(this)[RegisterFile::ScopeChain] = scopeChain; }
diff --git a/Source/JavaScriptCore/interpreter/Interpreter.cpp b/Source/JavaScriptCore/interpreter/Interpreter.cpp
index d42e869f1..336f109c0 100644
--- a/Source/JavaScriptCore/interpreter/Interpreter.cpp
+++ b/Source/JavaScriptCore/interpreter/Interpreter.cpp
@@ -45,7 +45,6 @@
#include "JSActivation.h"
#include "JSArray.h"
#include "JSByteArray.h"
-#include "JSFunction.h"
#include "JSNotAnObject.h"
#include "JSPropertyNameIterator.h"
#include "LiteralParser.h"
@@ -60,6 +59,7 @@
#include "Register.h"
#include "SamplingTool.h"
#include "StrictEvalActivation.h"
+#include "StrongInlines.h"
#include "UStringConcatenate.h"
#include <limits.h>
#include <stdio.h>
@@ -69,7 +69,7 @@
#include "JIT.h"
#endif
-#define WTF_USE_GCC_COMPUTED_GOTO_WORKAROUND (ENABLE(COMPUTED_GOTO_INTERPRETER) && !defined(__llvm__))
+#define WTF_USE_GCC_COMPUTED_GOTO_WORKAROUND ((ENABLE(COMPUTED_GOTO_CLASSIC_INTERPRETER) || ENABLE(LLINT)) && !defined(__llvm__))
using namespace std;
@@ -83,7 +83,7 @@ static int depth(CodeBlock* codeBlock, ScopeChainNode* sc)
return sc->localDepth();
}
-#if ENABLE(INTERPRETER)
+#if ENABLE(CLASSIC_INTERPRETER)
static NEVER_INLINE JSValue concatenateStrings(ExecState* exec, Register* strings, unsigned count)
{
return jsString(exec, strings, count);
@@ -365,7 +365,7 @@ NEVER_INLINE bool Interpreter::resolveThisAndProperty(CallFrame* callFrame, Inst
return false;
}
-#endif // ENABLE(INTERPRETER)
+#endif // ENABLE(CLASSIC_INTERPRETER)
ALWAYS_INLINE CallFrame* Interpreter::slideRegisterWindowForCall(CodeBlock* newCodeBlock, RegisterFile* registerFile, CallFrame* callFrame, size_t registerOffset, int argumentCountIncludingThis)
{
@@ -394,7 +394,7 @@ ALWAYS_INLINE CallFrame* Interpreter::slideRegisterWindowForCall(CodeBlock* newC
return newCallFrame;
}
-#if ENABLE(INTERPRETER)
+#if ENABLE(CLASSIC_INTERPRETER)
static NEVER_INLINE bool isInvalidParamForIn(CallFrame* callFrame, JSValue value, JSValue& exceptionData)
{
if (value.isObject())
@@ -543,36 +543,61 @@ Interpreter::Interpreter()
#if !ASSERT_DISABLED
, m_initialized(false)
#endif
- , m_enabled(false)
+ , m_classicEnabled(false)
+{
+}
+
+Interpreter::~Interpreter()
{
+#if ENABLE(LLINT)
+ if (m_classicEnabled)
+ delete[] m_opcodeTable;
+#endif
}
-void Interpreter::initialize(bool canUseJIT)
+void Interpreter::initialize(LLInt::Data* llintData, bool canUseJIT)
{
-#if ENABLE(COMPUTED_GOTO_INTERPRETER)
+ UNUSED_PARAM(llintData);
+ UNUSED_PARAM(canUseJIT);
+#if ENABLE(COMPUTED_GOTO_CLASSIC_INTERPRETER) || ENABLE(LLINT)
+#if !ENABLE(COMPUTED_GOTO_CLASSIC_INTERPRETER)
+ // Having LLInt enabled, but not being able to use the JIT, and not having
+ // a computed goto interpreter, is not supported. Not because we cannot
+ // support it, but because I decided to draw the line at the number of
+ // permutations of execution engines that I wanted this code to grok.
+ ASSERT(canUseJIT);
+#endif
if (canUseJIT) {
+#if ENABLE(LLINT)
+ m_opcodeTable = llintData->opcodeMap();
+ for (int i = 0; i < numOpcodeIDs; ++i)
+ m_opcodeIDTable.add(m_opcodeTable[i], static_cast<OpcodeID>(i));
+#else
// If the JIT is present, don't use jump destinations for opcodes.
for (int i = 0; i < numOpcodeIDs; ++i) {
Opcode opcode = bitwise_cast<void*>(static_cast<uintptr_t>(i));
m_opcodeTable[i] = opcode;
}
+#endif
} else {
+#if ENABLE(LLINT)
+ m_opcodeTable = new Opcode[numOpcodeIDs];
+#endif
privateExecute(InitializeAndReturn, 0, 0);
for (int i = 0; i < numOpcodeIDs; ++i)
m_opcodeIDTable.add(m_opcodeTable[i], static_cast<OpcodeID>(i));
- m_enabled = true;
+ m_classicEnabled = true;
}
#else
- UNUSED_PARAM(canUseJIT);
-#if ENABLE(INTERPRETER)
- m_enabled = true;
+#if ENABLE(CLASSIC_INTERPRETER)
+ m_classicEnabled = true;
#else
- m_enabled = false;
+ m_classicEnabled = false;
#endif
-#endif // ENABLE(COMPUTED_GOTO_INTERPRETER)
+#endif // ENABLE(COMPUTED_GOTO_CLASSIC_INTERPRETER)
#if !ASSERT_DISABLED
m_initialized = true;
#endif
@@ -592,10 +617,10 @@ void Interpreter::dumpCallFrame(CallFrame* callFrame)
void Interpreter::dumpRegisters(CallFrame* callFrame)
{
- printf("Register frame: \n\n");
- printf("-----------------------------------------------------------------------------\n");
- printf(" use | address | value \n");
- printf("-----------------------------------------------------------------------------\n");
+ dataLog("Register frame: \n\n");
+ dataLog("-----------------------------------------------------------------------------\n");
+ dataLog(" use | address | value \n");
+ dataLog("-----------------------------------------------------------------------------\n");
CodeBlock* codeBlock = callFrame->codeBlock();
const Register* it;
@@ -605,30 +630,30 @@ void Interpreter::dumpRegisters(CallFrame* callFrame)
it = callFrame->registers() - RegisterFile::CallFrameHeaderSize - codeBlock->numParameters();
v = (*it).jsValue();
#if USE(JSVALUE32_64)
- printf("[this] | %10p | %-16s 0x%llx \n", it, v.description(), JSValue::encode(v)); ++it;
+ dataLog("[this] | %10p | %-16s 0x%llx \n", it, v.description(), JSValue::encode(v)); ++it;
#else
- printf("[this] | %10p | %-16s %p \n", it, v.description(), JSValue::encode(v)); ++it;
+ dataLog("[this] | %10p | %-16s %p \n", it, v.description(), JSValue::encode(v)); ++it;
#endif
end = it + max(codeBlock->numParameters() - 1, 0); // - 1 to skip "this"
if (it != end) {
do {
v = (*it).jsValue();
#if USE(JSVALUE32_64)
- printf("[param] | %10p | %-16s 0x%llx \n", it, v.description(), JSValue::encode(v));
+ dataLog("[param] | %10p | %-16s 0x%llx \n", it, v.description(), JSValue::encode(v));
#else
- printf("[param] | %10p | %-16s %p \n", it, v.description(), JSValue::encode(v));
+ dataLog("[param] | %10p | %-16s %p \n", it, v.description(), JSValue::encode(v));
#endif
++it;
} while (it != end);
}
- printf("-----------------------------------------------------------------------------\n");
- printf("[CodeBlock] | %10p | %p \n", it, (*it).codeBlock()); ++it;
- printf("[ScopeChain] | %10p | %p \n", it, (*it).scopeChain()); ++it;
- printf("[CallerRegisters] | %10p | %d \n", it, (*it).i()); ++it;
- printf("[ReturnPC] | %10p | %p \n", it, (*it).vPC()); ++it;
- printf("[ArgumentCount] | %10p | %d \n", it, (*it).i()); ++it;
- printf("[Callee] | %10p | %p \n", it, (*it).function()); ++it;
- printf("-----------------------------------------------------------------------------\n");
+ dataLog("-----------------------------------------------------------------------------\n");
+ dataLog("[CodeBlock] | %10p | %p \n", it, (*it).codeBlock()); ++it;
+ dataLog("[ScopeChain] | %10p | %p \n", it, (*it).scopeChain()); ++it;
+ dataLog("[CallerRegisters] | %10p | %d \n", it, (*it).i()); ++it;
+ dataLog("[ReturnPC] | %10p | %p \n", it, (*it).vPC()); ++it;
+ dataLog("[ArgumentCount] | %10p | %d \n", it, (*it).i()); ++it;
+ dataLog("[Callee] | %10p | %p \n", it, (*it).function()); ++it;
+ dataLog("-----------------------------------------------------------------------------\n");
int registerCount = 0;
@@ -637,39 +662,41 @@ void Interpreter::dumpRegisters(CallFrame* callFrame)
do {
v = (*it).jsValue();
#if USE(JSVALUE32_64)
- printf("[r%2d] | %10p | %-16s 0x%llx \n", registerCount, it, v.description(), JSValue::encode(v));
+ dataLog("[r%2d] | %10p | %-16s 0x%llx \n", registerCount, it, v.description(), JSValue::encode(v));
#else
- printf("[r%2d] | %10p | %-16s %p \n", registerCount, it, v.description(), JSValue::encode(v));
+ dataLog("[r%2d] | %10p | %-16s %p \n", registerCount, it, v.description(), JSValue::encode(v));
#endif
++it;
++registerCount;
} while (it != end);
}
- printf("-----------------------------------------------------------------------------\n");
+ dataLog("-----------------------------------------------------------------------------\n");
end = it + codeBlock->m_numCalleeRegisters - codeBlock->m_numVars;
if (it != end) {
do {
v = (*it).jsValue();
#if USE(JSVALUE32_64)
- printf("[r%2d] | %10p | %-16s 0x%llx \n", registerCount, it, v.description(), JSValue::encode(v));
+ dataLog("[r%2d] | %10p | %-16s 0x%llx \n", registerCount, it, v.description(), JSValue::encode(v));
#else
- printf("[r%2d] | %10p | %-16s %p \n", registerCount, it, v.description(), JSValue::encode(v));
+ dataLog("[r%2d] | %10p | %-16s %p \n", registerCount, it, v.description(), JSValue::encode(v));
#endif
++it;
++registerCount;
} while (it != end);
}
- printf("-----------------------------------------------------------------------------\n");
+ dataLog("-----------------------------------------------------------------------------\n");
}
#endif
bool Interpreter::isOpcode(Opcode opcode)
{
-#if ENABLE(COMPUTED_GOTO_INTERPRETER)
- if (!m_enabled)
+#if ENABLE(COMPUTED_GOTO_CLASSIC_INTERPRETER) || ENABLE(LLINT)
+#if !ENABLE(LLINT)
+ if (!m_classicEnabled)
return opcode >= 0 && static_cast<OpcodeID>(bitwise_cast<uintptr_t>(opcode)) <= op_end;
+#endif
return opcode != HashTraits<Opcode>::emptyValue()
&& !HashTraits<Opcode>::isDeletedValue(opcode)
&& m_opcodeIDTable.contains(opcode);
@@ -724,13 +751,13 @@ NEVER_INLINE bool Interpreter::unwindCallFrame(CallFrame*& callFrame, JSValue ex
// the beginning of next instruction to execute. To get an offset
// inside the call instruction that triggered the exception we
// have to subtract 1.
-#if ENABLE(JIT) && ENABLE(INTERPRETER)
+#if ENABLE(JIT) && ENABLE(CLASSIC_INTERPRETER)
if (callerFrame->globalData().canUseJIT())
- bytecodeOffset = codeBlock->bytecodeOffset(callFrame->returnPC());
+ bytecodeOffset = codeBlock->bytecodeOffset(callerFrame, callFrame->returnPC());
else
bytecodeOffset = codeBlock->bytecodeOffset(callFrame->returnVPC()) - 1;
#elif ENABLE(JIT)
- bytecodeOffset = codeBlock->bytecodeOffset(callFrame->returnPC());
+ bytecodeOffset = codeBlock->bytecodeOffset(callerFrame, callFrame->returnPC());
#else
bytecodeOffset = codeBlock->bytecodeOffset(callFrame->returnVPC()) - 1;
#endif
@@ -790,6 +817,154 @@ static void appendSourceToError(CallFrame* callFrame, ErrorInstance* exception,
exception->putDirect(*globalData, globalData->propertyNames->message, jsString(globalData, message));
}
+static int getLineNumberForCallFrame(CallFrame* callFrame)
+{
+ callFrame = callFrame->removeHostCallFrameFlag();
+ CodeBlock* codeBlock = callFrame->codeBlock();
+ if (!codeBlock)
+ return -1;
+#if ENABLE(INTERPRETER)
+ if (!globalData->canUseJIT())
+ return codeBlock->lineNumberForBytecodeOffset(callFrame->bytecodeOffsetForNonDFGCode() - 1);
+#endif
+#if ENABLE(JIT)
+#if ENABLE(DFG_JIT)
+ if (codeBlock->getJITType() == JITCode::DFGJIT)
+ return codeBlock->lineNumberForBytecodeOffset(codeBlock->codeOrigin(callFrame->codeOriginIndexForDFG()).bytecodeIndex);
+#endif
+ return codeBlock->lineNumberForBytecodeOffset(callFrame->bytecodeOffsetForNonDFGCode());
+#endif
+}
+
+static CallFrame* getCallerInfo(JSGlobalData* globalData, CallFrame* callFrame, int& lineNumber)
+{
+ UNUSED_PARAM(globalData);
+ unsigned bytecodeOffset = 0;
+ lineNumber = -1;
+ ASSERT(!callFrame->hasHostCallFrameFlag());
+ CallFrame* callerFrame = callFrame->codeBlock() ? callFrame->trueCallerFrame() : 0;
+ bool callframeIsHost = callerFrame->addHostCallFrameFlag() == callFrame->callerFrame();
+ ASSERT(!callerFrame->hasHostCallFrameFlag());
+
+ if (callerFrame == CallFrame::noCaller() || !callerFrame || !callerFrame->codeBlock())
+ return callerFrame;
+
+ CodeBlock* callerCodeBlock = callerFrame->codeBlock();
+
+ if (callframeIsHost) {
+ // Don't need to deal with inline callframes here as by definition we haven't
+ // inlined a call with an intervening native call frame.
+#if ENABLE(INTERPRETER)
+ if (!globalData->canUseJIT()) {
+ bytecodeOffset = callerFrame->bytecodeOffsetForNonDFGCode();
+ lineNumber = callerCodeBlock->lineNumberForBytecodeOffset(bytecodeOffset - 1);
+ return callerFrame;
+ }
+#endif
+#if ENABLE(JIT)
+#if ENABLE(DFG_JIT)
+ if (callerCodeBlock && callerCodeBlock->getJITType() == JITCode::DFGJIT)
+ bytecodeOffset = callerCodeBlock->codeOrigin(callerFrame->codeOriginIndexForDFG()).bytecodeIndex;
+ else
+#endif
+ bytecodeOffset = callerFrame->bytecodeOffsetForNonDFGCode();
+#endif
+ } else {
+#if ENABLE(INTERPRETER)
+ if (!globalData->canUseJIT()) {
+ bytecodeOffset = callerCodeBlock->bytecodeOffset(callFrame->returnVPC());
+ lineNumber = callerCodeBlock->lineNumberForBytecodeOffset(bytecodeOffset - 1);
+ return callerFrame;
+ }
+#endif
+#if ENABLE(JIT)
+ #if ENABLE(DFG_JIT)
+ if (callFrame->isInlineCallFrame()) {
+ InlineCallFrame* icf = callFrame->inlineCallFrame();
+ bytecodeOffset = icf->caller.bytecodeIndex;
+ if (InlineCallFrame* parentCallFrame = icf->caller.inlineCallFrame) {
+ FunctionExecutable* executable = static_cast<FunctionExecutable*>(parentCallFrame->executable.get());
+ CodeBlock* newCodeBlock = executable->baselineCodeBlockFor(parentCallFrame->isCall ? CodeForCall : CodeForConstruct);
+ ASSERT(newCodeBlock);
+ ASSERT(newCodeBlock->instructionCount() > bytecodeOffset);
+ callerCodeBlock = newCodeBlock;
+ }
+ } else if (callerCodeBlock && callerCodeBlock->getJITType() == JITCode::DFGJIT) {
+ CodeOrigin origin;
+ if (!callerCodeBlock->codeOriginForReturn(callFrame->returnPC(), origin))
+ ASSERT_NOT_REACHED();
+ bytecodeOffset = origin.bytecodeIndex;
+ if (InlineCallFrame* icf = origin.inlineCallFrame) {
+ FunctionExecutable* executable = static_cast<FunctionExecutable*>(icf->executable.get());
+ CodeBlock* newCodeBlock = executable->baselineCodeBlockFor(icf->isCall ? CodeForCall : CodeForConstruct);
+ ASSERT(newCodeBlock);
+ ASSERT(newCodeBlock->instructionCount() > bytecodeOffset);
+ callerCodeBlock = newCodeBlock;
+ }
+ } else
+ #endif
+ bytecodeOffset = callerCodeBlock->bytecodeOffset(callerFrame, callFrame->returnPC());
+#endif
+ }
+
+ lineNumber = callerCodeBlock->lineNumberForBytecodeOffset(bytecodeOffset);
+ return callerFrame;
+}
+
+static ALWAYS_INLINE const UString getSourceURLFromCallFrame(CallFrame* callFrame)
+{
+ ASSERT(!callFrame->hasHostCallFrameFlag());
+#if ENABLE(INTERPRETER)
+#if ENABLE(JIT)
+ if (callFrame->globalData().canUseJIT())
+ return callFrame->codeBlock()->ownerExecutable()->sourceURL();
+#endif
+ return callFrame->codeBlock()->source()->url();
+
+#else
+ return callFrame->codeBlock()->ownerExecutable()->sourceURL();
+#endif
+}
+
+static StackFrameCodeType getStackFrameCodeType(CallFrame* callFrame)
+{
+ ASSERT(!callFrame->hasHostCallFrameFlag());
+
+ switch (callFrame->codeBlock()->codeType()) {
+ case EvalCode:
+ return StackFrameEvalCode;
+ case FunctionCode:
+ return StackFrameFunctionCode;
+ case GlobalCode:
+ return StackFrameGlobalCode;
+ }
+ ASSERT_NOT_REACHED();
+ return StackFrameGlobalCode;
+}
+
+void Interpreter::getStackTrace(JSGlobalData* globalData, int line, Vector<StackFrame>& results)
+{
+ CallFrame* callFrame = globalData->topCallFrame->removeHostCallFrameFlag()->trueCallFrameFromVMCode();
+ if (!callFrame || callFrame == CallFrame::noCaller())
+ return;
+
+ if (line == -1)
+ line = getLineNumberForCallFrame(callFrame);
+
+ while (callFrame && callFrame != CallFrame::noCaller()) {
+ UString sourceURL;
+ if (callFrame->codeBlock()) {
+ sourceURL = getSourceURLFromCallFrame(callFrame);
+ StackFrame s = { Strong<JSObject>(*globalData, callFrame->callee()), getStackFrameCodeType(callFrame), Strong<ExecutableBase>(*globalData, callFrame->codeBlock()->ownerExecutable()), line, sourceURL};
+ results.append(s);
+ } else {
+ StackFrame s = { Strong<JSObject>(*globalData, callFrame->callee()), StackFrameNativeCode, Strong<ExecutableBase>(), -1, UString()};
+ results.append(s);
+ }
+ callFrame = getCallerInfo(globalData, callFrame, line);
+ }
+}
+
NEVER_INLINE HandlerInfo* Interpreter::throwException(CallFrame*& callFrame, JSValue& exceptionValue, unsigned bytecodeOffset)
{
CodeBlock* codeBlock = callFrame->codeBlock();
@@ -808,7 +983,9 @@ NEVER_INLINE HandlerInfo* Interpreter::throwException(CallFrame*& callFrame, JSV
// FIXME: should only really be adding these properties to VM generated exceptions,
// but the inspector currently requires these for all thrown objects.
- addErrorInfo(callFrame, exception, codeBlock->lineNumberForBytecodeOffset(bytecodeOffset), codeBlock->ownerExecutable()->source());
+ Vector<StackFrame> stackTrace;
+ getStackTrace(&callFrame->globalData(), codeBlock->lineNumberForBytecodeOffset(bytecodeOffset), stackTrace);
+ addErrorInfo(callFrame, exception, codeBlock->lineNumberForBytecodeOffset(bytecodeOffset), codeBlock->ownerExecutable()->source(), stackTrace);
}
isInterrupt = isInterruptedExecutionException(exception) || isTerminatedExecutionException(exception);
@@ -1280,15 +1457,15 @@ JSValue Interpreter::execute(CallFrameClosure& closure)
m_reentryDepth++;
#if ENABLE(JIT)
-#if ENABLE(INTERPRETER)
+#if ENABLE(CLASSIC_INTERPRETER)
if (closure.newCallFrame->globalData().canUseJIT())
#endif
result = closure.functionExecutable->generatedJITCodeForCall().execute(&m_registerFile, closure.newCallFrame, closure.globalData);
-#if ENABLE(INTERPRETER)
+#if ENABLE(CLASSIC_INTERPRETER)
else
#endif
#endif
-#if ENABLE(INTERPRETER)
+#if ENABLE(CLASSIC_INTERPRETER)
result = privateExecute(Normal, &m_registerFile, closure.newCallFrame);
#endif
m_reentryDepth--;
@@ -1386,15 +1563,15 @@ JSValue Interpreter::execute(EvalExecutable* eval, CallFrame* callFrame, JSValue
m_reentryDepth++;
#if ENABLE(JIT)
-#if ENABLE(INTERPRETER)
+#if ENABLE(CLASSIC_INTERPRETER)
if (callFrame->globalData().canUseJIT())
#endif
result = eval->generatedJITCode().execute(&m_registerFile, newCallFrame, scopeChain->globalData);
-#if ENABLE(INTERPRETER)
+#if ENABLE(CLASSIC_INTERPRETER)
else
#endif
#endif
-#if ENABLE(INTERPRETER)
+#if ENABLE(CLASSIC_INTERPRETER)
result = privateExecute(Normal, &m_registerFile, newCallFrame);
#endif
m_reentryDepth--;
@@ -1437,7 +1614,7 @@ NEVER_INLINE void Interpreter::debug(CallFrame* callFrame, DebugHookID debugHook
}
}
-#if ENABLE(INTERPRETER)
+#if ENABLE(CLASSIC_INTERPRETER)
NEVER_INLINE ScopeChainNode* Interpreter::createExceptionScope(CallFrame* callFrame, const Instruction* vPC)
{
int dst = vPC[1].u.operand;
@@ -1669,35 +1846,35 @@ NEVER_INLINE void Interpreter::uncacheGetByID(CodeBlock*, Instruction* vPC)
vPC[4] = 0;
}
-#endif // ENABLE(INTERPRETER)
+#endif // ENABLE(CLASSIC_INTERPRETER)
JSValue Interpreter::privateExecute(ExecutionFlag flag, RegisterFile* registerFile, CallFrame* callFrame)
{
// One-time initialization of our address tables. We have to put this code
// here because our labels are only in scope inside this function.
if (UNLIKELY(flag == InitializeAndReturn)) {
- #if ENABLE(COMPUTED_GOTO_INTERPRETER)
+ #if ENABLE(COMPUTED_GOTO_CLASSIC_INTERPRETER)
#define LIST_OPCODE_LABEL(id, length) &&id,
static Opcode labels[] = { FOR_EACH_OPCODE_ID(LIST_OPCODE_LABEL) };
for (size_t i = 0; i < WTF_ARRAY_LENGTH(labels); ++i)
m_opcodeTable[i] = labels[i];
#undef LIST_OPCODE_LABEL
- #endif // ENABLE(COMPUTED_GOTO_INTERPRETER)
+ #endif // ENABLE(COMPUTED_GOTO_CLASSIC_INTERPRETER)
return JSValue();
}
ASSERT(m_initialized);
- ASSERT(m_enabled);
+ ASSERT(m_classicEnabled);
#if ENABLE(JIT)
-#if ENABLE(INTERPRETER)
+#if ENABLE(CLASSIC_INTERPRETER)
// Mixing Interpreter + JIT is not supported.
if (callFrame->globalData().canUseJIT())
#endif
ASSERT_NOT_REACHED();
#endif
-#if !ENABLE(INTERPRETER)
+#if !ENABLE(CLASSIC_INTERPRETER)
UNUSED_PARAM(registerFile);
UNUSED_PARAM(callFrame);
return JSValue();
@@ -1743,20 +1920,31 @@ JSValue Interpreter::privateExecute(ExecutionFlag flag, RegisterFile* registerFi
#define SAMPLE(codeBlock, vPC)
#endif
-#if ENABLE(COMPUTED_GOTO_INTERPRETER)
+#define UPDATE_BYTECODE_OFFSET() \
+ do {\
+ callFrame->setBytecodeOffsetForNonDFGCode(vPC - codeBlock->instructions().data() + 1);\
+ } while (0)
+
+#if ENABLE(COMPUTED_GOTO_CLASSIC_INTERPRETER)
#define NEXT_INSTRUCTION() SAMPLE(codeBlock, vPC); goto *vPC->u.opcode
#if ENABLE(OPCODE_STATS)
- #define DEFINE_OPCODE(opcode) opcode: OpcodeStats::recordInstruction(opcode);
+ #define DEFINE_OPCODE(opcode) \
+ opcode:\
+ OpcodeStats::recordInstruction(opcode);\
+ UPDATE_BYTECODE_OFFSET();
#else
- #define DEFINE_OPCODE(opcode) opcode:
+ #define DEFINE_OPCODE(opcode) opcode: UPDATE_BYTECODE_OFFSET();
#endif
NEXT_INSTRUCTION();
#else
#define NEXT_INSTRUCTION() SAMPLE(codeBlock, vPC); goto interpreterLoopStart
#if ENABLE(OPCODE_STATS)
- #define DEFINE_OPCODE(opcode) case opcode: OpcodeStats::recordInstruction(opcode);
+ #define DEFINE_OPCODE(opcode) \
+ case opcode:\
+ OpcodeStats::recordInstruction(opcode);\
+ UPDATE_BYTECODE_OFFSET();
#else
- #define DEFINE_OPCODE(opcode) case opcode:
+ #define DEFINE_OPCODE(opcode) case opcode: UPDATE_BYTECODE_OFFSET();
#endif
while (1) { // iterator loop begins
interpreterLoopStart:;
@@ -3327,6 +3515,8 @@ skip_id_custom_self:
#if USE(GCC_COMPUTED_GOTO_WORKAROUND)
skip_put_by_id:
#endif
+ DEFINE_OPCODE(op_put_by_id_transition_direct)
+ DEFINE_OPCODE(op_put_by_id_transition_normal)
DEFINE_OPCODE(op_put_by_id_transition) {
/* op_put_by_id_transition base(r) property(id) value(r) oldStructure(sID) newStructure(sID) structureChain(chain) offset(n) direct(b)
@@ -4883,7 +5073,7 @@ skip_id_custom_self:
vPC += target;
NEXT_INSTRUCTION();
}
-#if ENABLE(COMPUTED_GOTO_INTERPRETER)
+#if ENABLE(COMPUTED_GOTO_CLASSIC_INTERPRETER)
// Appease GCC
goto *(&&skip_new_scope);
#endif
@@ -4899,7 +5089,7 @@ skip_id_custom_self:
vPC += OPCODE_LENGTH(op_push_new_scope);
NEXT_INSTRUCTION();
}
-#if ENABLE(COMPUTED_GOTO_INTERPRETER)
+#if ENABLE(COMPUTED_GOTO_CLASSIC_INTERPRETER)
skip_new_scope:
#endif
DEFINE_OPCODE(op_catch) {
@@ -5089,14 +5279,14 @@ skip_id_custom_self:
NEXT_INSTRUCTION();
}
}
-#if !ENABLE(COMPUTED_GOTO_INTERPRETER)
+#if !ENABLE(COMPUTED_GOTO_CLASSIC_INTERPRETER)
} // iterator loop ends
#endif
#undef NEXT_INSTRUCTION
#undef DEFINE_OPCODE
#undef CHECK_FOR_EXCEPTION
#undef CHECK_FOR_TIMEOUT
-#endif // ENABLE(INTERPRETER)
+#endif // ENABLE(CLASSIC_INTERPRETER)
}
JSValue Interpreter::retrieveArgumentsFromVMCode(CallFrame* callFrame, JSFunction* function) const
@@ -5155,15 +5345,15 @@ void Interpreter::retrieveLastCaller(CallFrame* callFrame, int& lineNumber, intp
if (!callerCodeBlock)
return;
unsigned bytecodeOffset = 0;
-#if ENABLE(INTERPRETER)
+#if ENABLE(CLASSIC_INTERPRETER)
if (!callerFrame->globalData().canUseJIT())
bytecodeOffset = callerCodeBlock->bytecodeOffset(callFrame->returnVPC());
#if ENABLE(JIT)
else
- bytecodeOffset = callerCodeBlock->bytecodeOffset(callFrame->returnPC());
+ bytecodeOffset = callerCodeBlock->bytecodeOffset(callerFrame, callFrame->returnPC());
#endif
#else
- bytecodeOffset = callerCodeBlock->bytecodeOffset(callFrame->returnPC());
+ bytecodeOffset = callerCodeBlock->bytecodeOffset(callerFrame, callFrame->returnPC());
#endif
lineNumber = callerCodeBlock->lineNumberForBytecodeOffset(bytecodeOffset - 1);
sourceID = callerCodeBlock->ownerExecutable()->sourceID();
diff --git a/Source/JavaScriptCore/interpreter/Interpreter.h b/Source/JavaScriptCore/interpreter/Interpreter.h
index 884c4248e..51881a565 100644
--- a/Source/JavaScriptCore/interpreter/Interpreter.h
+++ b/Source/JavaScriptCore/interpreter/Interpreter.h
@@ -31,8 +31,10 @@
#include "ArgList.h"
#include "JSCell.h"
+#include "JSFunction.h"
#include "JSValue.h"
#include "JSObject.h"
+#include "LLIntData.h"
#include "Opcode.h"
#include "RegisterFile.h"
@@ -42,9 +44,10 @@ namespace JSC {
class CodeBlock;
class EvalExecutable;
+ class ExecutableBase;
class FunctionExecutable;
- class JSFunction;
class JSGlobalObject;
+ class LLIntOffsetsExtractor;
class ProgramExecutable;
class Register;
class ScopeChainNode;
@@ -62,6 +65,63 @@ namespace JSC {
WillExecuteStatement
};
+ enum StackFrameCodeType {
+ StackFrameGlobalCode,
+ StackFrameEvalCode,
+ StackFrameFunctionCode,
+ StackFrameNativeCode
+ };
+
+ struct StackFrame {
+ Strong<JSObject> callee;
+ StackFrameCodeType codeType;
+ Strong<ExecutableBase> executable;
+ int line;
+ UString sourceURL;
+ UString toString(CallFrame* callFrame) const
+ {
+ bool hasSourceURLInfo = !sourceURL.isNull() && !sourceURL.isEmpty();
+ bool hasLineInfo = line > -1;
+ String traceLine;
+ JSObject* stackFrameCallee = callee.get();
+
+ switch (codeType) {
+ case StackFrameEvalCode:
+ if (hasSourceURLInfo) {
+ traceLine = hasLineInfo ? String::format("eval code@%s:%d", sourceURL.ascii().data(), line)
+ : String::format("eval code@%s", sourceURL.ascii().data());
+ } else
+ traceLine = String::format("eval code");
+ break;
+ case StackFrameNativeCode: {
+ if (callee) {
+ UString functionName = getCalculatedDisplayName(callFrame, stackFrameCallee);
+ traceLine = String::format("%s@[native code]", functionName.ascii().data());
+ } else
+ traceLine = "[native code]";
+ break;
+ }
+ case StackFrameFunctionCode: {
+ UString functionName = getCalculatedDisplayName(callFrame, stackFrameCallee);
+ if (hasSourceURLInfo) {
+ traceLine = hasLineInfo ? String::format("%s@%s:%d", functionName.ascii().data(), sourceURL.ascii().data(), line)
+ : String::format("%s@%s", functionName.ascii().data(), sourceURL.ascii().data());
+ } else
+ traceLine = String::format("%s\n", functionName.ascii().data());
+ break;
+ }
+ case StackFrameGlobalCode:
+ if (hasSourceURLInfo) {
+ traceLine = hasLineInfo ? String::format("global code@%s:%d", sourceURL.ascii().data(), line)
+ : String::format("global code@%s", sourceURL.ascii().data());
+ } else
+ traceLine = String::format("global code");
+
+ }
+ return traceLine.impl();
+ }
+ };
+
class TopCallFrameSetter {
public:
TopCallFrameSetter(JSGlobalData& global, CallFrame* callFrame)
@@ -90,29 +150,31 @@ namespace JSC {
}
};
-#if PLATFORM(IOS)
// We use a smaller reentrancy limit on iPhone because of the high amount of
// stack space required on the web thread.
- enum { MaxLargeThreadReentryDepth = 93, MaxSmallThreadReentryDepth = 16 };
+#if PLATFORM(IOS)
+ enum { MaxLargeThreadReentryDepth = 64, MaxSmallThreadReentryDepth = 16 };
#else
enum { MaxLargeThreadReentryDepth = 256, MaxSmallThreadReentryDepth = 16 };
#endif // PLATFORM(IOS)
class Interpreter {
WTF_MAKE_FAST_ALLOCATED;
- friend class JIT;
friend class CachedCall;
+ friend class LLIntOffsetsExtractor;
+ friend class JIT;
public:
Interpreter();
+ ~Interpreter();
- void initialize(bool canUseJIT);
+ void initialize(LLInt::Data*, bool canUseJIT);
RegisterFile& registerFile() { return m_registerFile; }
Opcode getOpcode(OpcodeID id)
{
ASSERT(m_initialized);
-#if ENABLE(COMPUTED_GOTO_INTERPRETER)
+#if ENABLE(COMPUTED_GOTO_CLASSIC_INTERPRETER) || ENABLE(LLINT)
return m_opcodeTable[id];
#else
return id;
@@ -122,9 +184,12 @@ namespace JSC {
OpcodeID getOpcodeID(Opcode opcode)
{
ASSERT(m_initialized);
-#if ENABLE(COMPUTED_GOTO_INTERPRETER)
+#if ENABLE(LLINT)
+ ASSERT(isOpcode(opcode));
+ return m_opcodeIDTable.get(opcode);
+#elif ENABLE(COMPUTED_GOTO_CLASSIC_INTERPRETER)
ASSERT(isOpcode(opcode));
- if (!m_enabled)
+ if (!m_classicEnabled)
return static_cast<OpcodeID>(bitwise_cast<uintptr_t>(opcode));
return m_opcodeIDTable.get(opcode);
@@ -132,6 +197,11 @@ namespace JSC {
return opcode;
#endif
}
+
+ bool classicEnabled()
+ {
+ return m_classicEnabled;
+ }
bool isOpcode(Opcode);
@@ -151,6 +221,8 @@ namespace JSC {
NEVER_INLINE HandlerInfo* throwException(CallFrame*&, JSValue&, unsigned bytecodeOffset);
NEVER_INLINE void debug(CallFrame*, DebugHookID, int firstLine, int lastLine);
+ static const UString getTraceLine(CallFrame*, StackFrameCodeType, const UString&, int);
+ JS_EXPORT_PRIVATE static void getStackTrace(JSGlobalData*, int line, Vector<StackFrame>& results);
void dumpSampleData(ExecState* exec);
void startSampling();
@@ -162,7 +234,7 @@ namespace JSC {
void endRepeatCall(CallFrameClosure&);
JSValue execute(CallFrameClosure&);
-#if ENABLE(INTERPRETER)
+#if ENABLE(CLASSIC_INTERPRETER)
NEVER_INLINE bool resolve(CallFrame*, Instruction*, JSValue& exceptionValue);
NEVER_INLINE bool resolveSkip(CallFrame*, Instruction*, JSValue& exceptionValue);
NEVER_INLINE bool resolveGlobal(CallFrame*, Instruction*, JSValue& exceptionValue);
@@ -176,7 +248,7 @@ namespace JSC {
void uncacheGetByID(CodeBlock*, Instruction* vPC);
void tryCachePutByID(CallFrame*, CodeBlock*, Instruction*, JSValue baseValue, const PutPropertySlot&);
void uncachePutByID(CodeBlock*, Instruction* vPC);
-#endif // ENABLE(INTERPRETER)
+#endif // ENABLE(CLASSIC_INTERPRETER)
NEVER_INLINE bool unwindCallFrame(CallFrame*&, JSValue, unsigned& bytecodeOffset, CodeBlock*&);
@@ -199,7 +271,10 @@ namespace JSC {
RegisterFile m_registerFile;
-#if ENABLE(COMPUTED_GOTO_INTERPRETER)
+#if ENABLE(LLINT)
+ Opcode* m_opcodeTable; // Maps OpcodeID => Opcode for compiling
+ HashMap<Opcode, OpcodeID> m_opcodeIDTable; // Maps Opcode => OpcodeID for decompiling
+#elif ENABLE(COMPUTED_GOTO_CLASSIC_INTERPRETER)
Opcode m_opcodeTable[numOpcodeIDs]; // Maps OpcodeID => Opcode for compiling
HashMap<Opcode, OpcodeID> m_opcodeIDTable; // Maps Opcode => OpcodeID for decompiling
#endif
@@ -207,7 +282,7 @@ namespace JSC {
#if !ASSERT_DISABLED
bool m_initialized;
#endif
- bool m_enabled;
+ bool m_classicEnabled;
};
// This value must not be an object that would require this conversion (WebCore's global object).
diff --git a/Source/JavaScriptCore/interpreter/RegisterFile.h b/Source/JavaScriptCore/interpreter/RegisterFile.h
index e45b869a1..21ad7fbae 100644
--- a/Source/JavaScriptCore/interpreter/RegisterFile.h
+++ b/Source/JavaScriptCore/interpreter/RegisterFile.h
@@ -39,6 +39,7 @@ namespace JSC {
class ConservativeRoots;
class DFGCodeBlocks;
+ class LLIntOffsetsExtractor;
class RegisterFile {
WTF_MAKE_NONCOPYABLE(RegisterFile);
@@ -81,6 +82,8 @@ namespace JSC {
}
private:
+ friend class LLIntOffsetsExtractor;
+
bool growSlowCase(Register*);
void releaseExcessCapacity();
void addToCommittedByteCount(long);
diff --git a/Source/JavaScriptCore/jit/ExecutableAllocator.h b/Source/JavaScriptCore/jit/ExecutableAllocator.h
index bc8b816c8..7520913d0 100644
--- a/Source/JavaScriptCore/jit/ExecutableAllocator.h
+++ b/Source/JavaScriptCore/jit/ExecutableAllocator.h
@@ -89,12 +89,12 @@ inline size_t roundUpAllocationSize(size_t request, size_t granularity)
}
-#if ENABLE(JIT) && ENABLE(ASSEMBLER)
-
namespace JSC {
typedef WTF::MetaAllocatorHandle ExecutableMemoryHandle;
+#if ENABLE(JIT) && ENABLE(ASSEMBLER)
+
class ExecutableAllocator {
enum ProtectionSetting { Writable, Executable };
@@ -235,8 +235,8 @@ private:
#endif
};
-} // namespace JSC
-
#endif // ENABLE(JIT) && ENABLE(ASSEMBLER)
+} // namespace JSC
+
#endif // !defined(ExecutableAllocator)
diff --git a/Source/JavaScriptCore/jit/ExecutableAllocatorFixedVMPool.cpp b/Source/JavaScriptCore/jit/ExecutableAllocatorFixedVMPool.cpp
index 3fe631e3b..37a57e8b7 100644
--- a/Source/JavaScriptCore/jit/ExecutableAllocatorFixedVMPool.cpp
+++ b/Source/JavaScriptCore/jit/ExecutableAllocatorFixedVMPool.cpp
@@ -59,7 +59,7 @@ public:
: MetaAllocator(32) // round up all allocations to 32 bytes
{
m_reservation = PageReservation::reserveWithGuardPages(fixedPoolSize, OSAllocator::JSJITCodePages, EXECUTABLE_POOL_WRITABLE, true);
-#if !ENABLE(INTERPRETER)
+#if !ENABLE(CLASSIC_INTERPRETER)
if (!m_reservation)
CRASH();
#endif
diff --git a/Source/JavaScriptCore/heap/BumpSpace.cpp b/Source/JavaScriptCore/jit/HostCallReturnValue.cpp
index 4eb0284dd..924bc7671 100644
--- a/Source/JavaScriptCore/heap/BumpSpace.cpp
+++ b/Source/JavaScriptCore/jit/HostCallReturnValue.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (C) 2011 Apple Inc. All rights reserved.
+ * Copyright (C) 2012 Apple Inc. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
@@ -24,26 +24,17 @@
*/
#include "config.h"
-#include "BumpSpace.h"
+#include "HostCallReturnValue.h"
-#include "BumpSpaceInlineMethods.h"
+#include "CallFrame.h"
+#include "InlineASM.h"
+#include "JSObject.h"
+#include "JSValueInlineMethods.h"
+#include "ScopeChain.h"
namespace JSC {
-CheckedBoolean BumpSpace::tryAllocateSlowCase(size_t bytes, void** outPtr)
-{
- if (isOversize(bytes))
- return tryAllocateOversize(bytes, outPtr);
-
- m_totalMemoryUtilized += static_cast<size_t>(static_cast<char*>(m_currentBlock->m_offset) - m_currentBlock->m_payload);
- if (!addNewBlock()) {
- *outPtr = 0;
- return false;
- }
- m_toSpaceFilter.add(reinterpret_cast<Bits>(m_currentBlock));
- m_toSpaceSet.add(m_currentBlock);
- *outPtr = allocateFromBlock(m_currentBlock, bytes);
- return true;
-}
+// Nothing to see here.
} // namespace JSC
+
diff --git a/Source/JavaScriptCore/jit/HostCallReturnValue.h b/Source/JavaScriptCore/jit/HostCallReturnValue.h
new file mode 100644
index 000000000..12fe10b10
--- /dev/null
+++ b/Source/JavaScriptCore/jit/HostCallReturnValue.h
@@ -0,0 +1,67 @@
+/*
+ * Copyright (C) 2012 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef HostCallReturnValue_h
+#define HostCallReturnValue_h
+
+#include "JSValue.h"
+#include "MacroAssemblerCodeRef.h"
+#include <wtf/Platform.h>
+
+// Unfortunately this only works on GCC-like compilers. And it's currently only used
+// by LLInt and DFG, which also are restricted to GCC-like compilers. We should
+// probably fix that at some point.
+#if COMPILER(GCC)
+
+#if CALLING_CONVENTION_IS_STDCALL
+#define HOST_CALL_RETURN_VALUE_OPTION CDECL
+#else
+#define HOST_CALL_RETURN_VALUE_OPTION
+#endif
+
+namespace JSC {
+
+extern "C" EncodedJSValue HOST_CALL_RETURN_VALUE_OPTION getHostCallReturnValue();
+
+// This is a public declaration only to convince CLANG not to elide it.
+extern "C" EncodedJSValue HOST_CALL_RETURN_VALUE_OPTION getHostCallReturnValueWithExecState(ExecState*);
+
+inline void initializeHostCallReturnValue()
+{
+ getHostCallReturnValueWithExecState(0);
+}
+
+}
+
+#else // COMPILER(GCC)
+
+namespace JSC {
+inline void initializeHostCallReturnValue() { }
+}
+
+#endif // COMPILER(GCC)
+
+#endif // HostCallReturnValue_h
+
diff --git a/Source/JavaScriptCore/jit/JIT.cpp b/Source/JavaScriptCore/jit/JIT.cpp
index c8584a316..2adc596ce 100644
--- a/Source/JavaScriptCore/jit/JIT.cpp
+++ b/Source/JavaScriptCore/jit/JIT.cpp
@@ -219,7 +219,7 @@ void JIT::privateCompileMainPass()
m_labels[m_bytecodeOffset] = label();
#if ENABLE(JIT_VERBOSE)
- printf("Old JIT emitting code for bc#%u at offset 0x%lx.\n", m_bytecodeOffset, (long)debugOffset());
+ dataLog("Old JIT emitting code for bc#%u at offset 0x%lx.\n", m_bytecodeOffset, (long)debugOffset());
#endif
switch (m_interpreter->getOpcodeID(currentInstruction->u.opcode)) {
@@ -325,6 +325,8 @@ void JIT::privateCompileMainPass()
DEFINE_OP(op_profile_will_call)
DEFINE_OP(op_push_new_scope)
DEFINE_OP(op_push_scope)
+ case op_put_by_id_transition_direct:
+ case op_put_by_id_transition_normal:
DEFINE_OP(op_put_by_id)
DEFINE_OP(op_put_by_index)
DEFINE_OP(op_put_by_val)
@@ -429,7 +431,7 @@ void JIT::privateCompileSlowCases()
#endif
#if ENABLE(JIT_VERBOSE)
- printf("Old JIT emitting slow code for bc#%u at offset 0x%lx.\n", m_bytecodeOffset, (long)debugOffset());
+ dataLog("Old JIT emitting slow code for bc#%u at offset 0x%lx.\n", m_bytecodeOffset, (long)debugOffset());
#endif
switch (m_interpreter->getOpcodeID(currentInstruction->u.opcode)) {
@@ -486,6 +488,8 @@ void JIT::privateCompileSlowCases()
DEFINE_SLOWCASE_OP(op_post_inc)
DEFINE_SLOWCASE_OP(op_pre_dec)
DEFINE_SLOWCASE_OP(op_pre_inc)
+ case op_put_by_id_transition_direct:
+ case op_put_by_id_transition_normal:
DEFINE_SLOWCASE_OP(op_put_by_id)
DEFINE_SLOWCASE_OP(op_put_by_val)
DEFINE_SLOWCASE_OP(op_resolve_global)
@@ -525,6 +529,10 @@ void JIT::privateCompileSlowCases()
JITCode JIT::privateCompile(CodePtr* functionEntryArityCheck)
{
+#if ENABLE(JIT_VERBOSE_OSR)
+ printf("Compiling JIT code!\n");
+#endif
+
#if ENABLE(VALUE_PROFILER)
m_canBeOptimized = m_codeBlock->canCompileWithDFG();
#endif
@@ -693,8 +701,12 @@ JITCode JIT::privateCompile(CodePtr* functionEntryArityCheck)
info.callReturnLocation = m_codeBlock->structureStubInfo(m_methodCallCompilationInfo[i].propertyAccessIndex).callReturnLocation;
}
-#if ENABLE(DFG_JIT)
- if (m_canBeOptimized) {
+#if ENABLE(DFG_JIT) || ENABLE(LLINT)
+ if (m_canBeOptimized
+#if ENABLE(LLINT)
+ || true
+#endif
+ ) {
CompactJITCodeMap::Encoder jitCodeMapEncoder;
for (unsigned bytecodeOffset = 0; bytecodeOffset < m_labels.size(); ++bytecodeOffset) {
if (m_labels[bytecodeOffset].isSet())
@@ -710,7 +722,7 @@ JITCode JIT::privateCompile(CodePtr* functionEntryArityCheck)
CodeRef result = patchBuffer.finalizeCode();
#if ENABLE(JIT_VERBOSE)
- printf("JIT generated code for %p at [%p, %p).\n", m_codeBlock, result.executableMemory()->start(), result.executableMemory()->end());
+ dataLog("JIT generated code for %p at [%p, %p).\n", m_codeBlock, result.executableMemory()->start(), result.executableMemory()->end());
#endif
return JITCode(result, JITCode::BaselineJIT);
diff --git a/Source/JavaScriptCore/jit/JIT.h b/Source/JavaScriptCore/jit/JIT.h
index 8dd332893..a2bc4272a 100644
--- a/Source/JavaScriptCore/jit/JIT.h
+++ b/Source/JavaScriptCore/jit/JIT.h
@@ -335,7 +335,7 @@ namespace JSC {
void emitWriteBarrier(RegisterID owner, RegisterID valueTag, RegisterID scratch, RegisterID scratch2, WriteBarrierMode, WriteBarrierUseKind);
void emitWriteBarrier(JSCell* owner, RegisterID value, RegisterID scratch, WriteBarrierMode, WriteBarrierUseKind);
- template<typename ClassType, typename StructureType> void emitAllocateBasicJSObject(StructureType, RegisterID result, RegisterID storagePtr);
+ template<typename ClassType, bool destructor, typename StructureType> void emitAllocateBasicJSObject(StructureType, RegisterID result, RegisterID storagePtr);
template<typename T> void emitAllocateJSFinalObject(T structure, RegisterID result, RegisterID storagePtr);
void emitAllocateJSFunction(FunctionExecutable*, RegisterID scopeChain, RegisterID result, RegisterID storagePtr);
@@ -500,7 +500,7 @@ namespace JSC {
#if ENABLE(OPCODE_SAMPLING)
#error "OPCODE_SAMPLING is not yet supported"
#else
- static const int patchOffsetGetByIdSlowCaseCall = 56;
+ static const int patchOffsetGetByIdSlowCaseCall = 64;
#endif
static const int patchOffsetOpCallCompareToJump = 32;
static const int patchOffsetMethodCheckProtoObj = 32;
@@ -518,7 +518,7 @@ namespace JSC {
#if ENABLE(OPCODE_SAMPLING)
#error "OPCODE_SAMPLING is not yet supported"
#else
- static const int patchOffsetGetByIdSlowCaseCall = 56;
+ static const int patchOffsetGetByIdSlowCaseCall = 64;
#endif
static const int patchOffsetOpCallCompareToJump = 32;
static const int patchOffsetMethodCheckProtoObj = 32;
diff --git a/Source/JavaScriptCore/jit/JITCode.h b/Source/JavaScriptCore/jit/JITCode.h
index f63c4a1a8..3ae5ff234 100644
--- a/Source/JavaScriptCore/jit/JITCode.h
+++ b/Source/JavaScriptCore/jit/JITCode.h
@@ -48,7 +48,7 @@ namespace JSC {
JITCode() { }
#endif
public:
- enum JITType { HostCallThunk, BaselineJIT, DFGJIT };
+ enum JITType { None, HostCallThunk, InterpreterThunk, BaselineJIT, DFGJIT };
static JITType bottomTierJIT()
{
@@ -66,8 +66,19 @@ namespace JSC {
return DFGJIT;
}
+ static bool isOptimizingJIT(JITType jitType)
+ {
+ return jitType == DFGJIT;
+ }
+
+ static bool isBaselineCode(JITType jitType)
+ {
+ return jitType == InterpreterThunk || jitType == BaselineJIT;
+ }
+
#if ENABLE(JIT)
JITCode()
+ : m_jitType(None)
{
}
@@ -75,6 +86,7 @@ namespace JSC {
: m_ref(ref)
, m_jitType(jitType)
{
+ ASSERT(jitType != None);
}
bool operator !() const
diff --git a/Source/JavaScriptCore/jit/JITDriver.h b/Source/JavaScriptCore/jit/JITDriver.h
index 4b8df4751..b204c7737 100644
--- a/Source/JavaScriptCore/jit/JITDriver.h
+++ b/Source/JavaScriptCore/jit/JITDriver.h
@@ -33,15 +33,21 @@
#include "BytecodeGenerator.h"
#include "DFGDriver.h"
#include "JIT.h"
+#include "LLIntEntrypoints.h"
namespace JSC {
template<typename CodeBlockType>
inline bool jitCompileIfAppropriate(JSGlobalData& globalData, OwnPtr<CodeBlockType>& codeBlock, JITCode& jitCode, JITCode::JITType jitType)
{
+ if (jitType == codeBlock->getJITType())
+ return true;
+
if (!globalData.canUseJIT())
return true;
+ codeBlock->unlinkIncomingCalls();
+
bool dfgCompiled = false;
if (jitType == JITCode::DFGJIT)
dfgCompiled = DFG::tryCompile(globalData, codeBlock.get(), jitCode);
@@ -62,9 +68,14 @@ inline bool jitCompileIfAppropriate(JSGlobalData& globalData, OwnPtr<CodeBlockTy
inline bool jitCompileFunctionIfAppropriate(JSGlobalData& globalData, OwnPtr<FunctionCodeBlock>& codeBlock, JITCode& jitCode, MacroAssemblerCodePtr& jitCodeWithArityCheck, SharedSymbolTable*& symbolTable, JITCode::JITType jitType)
{
+ if (jitType == codeBlock->getJITType())
+ return true;
+
if (!globalData.canUseJIT())
return true;
+ codeBlock->unlinkIncomingCalls();
+
bool dfgCompiled = false;
if (jitType == JITCode::DFGJIT)
dfgCompiled = DFG::tryCompileFunction(globalData, codeBlock.get(), jitCode, jitCodeWithArityCheck);
@@ -79,7 +90,6 @@ inline bool jitCompileFunctionIfAppropriate(JSGlobalData& globalData, OwnPtr<Fun
}
jitCode = JIT::compile(&globalData, codeBlock.get(), &jitCodeWithArityCheck);
}
-
codeBlock->setJITCode(jitCode, jitCodeWithArityCheck);
return true;
diff --git a/Source/JavaScriptCore/jit/JITExceptions.cpp b/Source/JavaScriptCore/jit/JITExceptions.cpp
index 24baca41b..2edd3408f 100644
--- a/Source/JavaScriptCore/jit/JITExceptions.cpp
+++ b/Source/JavaScriptCore/jit/JITExceptions.cpp
@@ -64,7 +64,7 @@ ExceptionHandler genericThrow(JSGlobalData* globalData, ExecState* callFrame, JS
ExceptionHandler jitThrow(JSGlobalData* globalData, ExecState* callFrame, JSValue exceptionValue, ReturnAddressPtr faultLocation)
{
- return genericThrow(globalData, callFrame, exceptionValue, callFrame->codeBlock()->bytecodeOffset(faultLocation));
+ return genericThrow(globalData, callFrame, exceptionValue, callFrame->codeBlock()->bytecodeOffset(callFrame, faultLocation));
}
}
diff --git a/Source/JavaScriptCore/jit/JITInlineMethods.h b/Source/JavaScriptCore/jit/JITInlineMethods.h
index dfcfbd499..e0310569d 100644
--- a/Source/JavaScriptCore/jit/JITInlineMethods.h
+++ b/Source/JavaScriptCore/jit/JITInlineMethods.h
@@ -265,8 +265,13 @@ ALWAYS_INLINE void JIT::restoreArgumentReference()
ALWAYS_INLINE void JIT::updateTopCallFrame()
{
ASSERT(static_cast<int>(m_bytecodeOffset) >= 0);
- if (m_bytecodeOffset)
+ if (m_bytecodeOffset) {
+#if USE(JSVALUE32_64)
+ storePtr(TrustedImmPtr(m_codeBlock->instructions().begin() + m_bytecodeOffset + 1), intTagFor(RegisterFile::ArgumentCount));
+#else
store32(Imm32(m_bytecodeOffset + 1), intTagFor(RegisterFile::ArgumentCount));
+#endif
+ }
storePtr(callFrameRegister, &m_globalData->topCallFrame);
}
@@ -402,9 +407,13 @@ ALWAYS_INLINE bool JIT::isOperandConstantImmediateChar(unsigned src)
return m_codeBlock->isConstantRegisterIndex(src) && getConstantOperand(src).isString() && asString(getConstantOperand(src).asCell())->length() == 1;
}
-template <typename ClassType, typename StructureType> inline void JIT::emitAllocateBasicJSObject(StructureType structure, RegisterID result, RegisterID storagePtr)
+template <typename ClassType, bool destructor, typename StructureType> inline void JIT::emitAllocateBasicJSObject(StructureType structure, RegisterID result, RegisterID storagePtr)
{
- MarkedAllocator* allocator = &m_globalData->heap.allocatorForObject(sizeof(ClassType));
+ MarkedAllocator* allocator = 0;
+ if (destructor)
+ allocator = &m_globalData->heap.allocatorForObjectWithDestructor(sizeof(ClassType));
+ else
+ allocator = &m_globalData->heap.allocatorForObjectWithoutDestructor(sizeof(ClassType));
loadPtr(&allocator->m_firstFreeCell, result);
addSlowCase(branchTestPtr(Zero, result));
@@ -428,12 +437,12 @@ template <typename ClassType, typename StructureType> inline void JIT::emitAlloc
template <typename T> inline void JIT::emitAllocateJSFinalObject(T structure, RegisterID result, RegisterID scratch)
{
- emitAllocateBasicJSObject<JSFinalObject>(structure, result, scratch);
+ emitAllocateBasicJSObject<JSFinalObject, false, T>(structure, result, scratch);
}
inline void JIT::emitAllocateJSFunction(FunctionExecutable* executable, RegisterID scopeChain, RegisterID result, RegisterID storagePtr)
{
- emitAllocateBasicJSObject<JSFunction>(TrustedImmPtr(m_codeBlock->globalObject()->namedFunctionStructure()), result, storagePtr);
+ emitAllocateBasicJSObject<JSFunction, true>(TrustedImmPtr(m_codeBlock->globalObject()->namedFunctionStructure()), result, storagePtr);
// store the function's scope chain
storePtr(scopeChain, Address(result, JSFunction::offsetOfScopeChain()));
@@ -676,6 +685,9 @@ inline void JIT::map(unsigned bytecodeOffset, int virtualRegisterIndex, Register
m_mappedVirtualRegisterIndex = virtualRegisterIndex;
m_mappedTag = tag;
m_mappedPayload = payload;
+
+ ASSERT(!canBeOptimized() || m_mappedPayload == regT0);
+ ASSERT(!canBeOptimized() || m_mappedTag == regT1);
}
inline void JIT::unmap(RegisterID registerID)
diff --git a/Source/JavaScriptCore/jit/JITOpcodes.cpp b/Source/JavaScriptCore/jit/JITOpcodes.cpp
index 8a2077e47..bc53d2cd8 100644
--- a/Source/JavaScriptCore/jit/JITOpcodes.cpp
+++ b/Source/JavaScriptCore/jit/JITOpcodes.cpp
@@ -80,7 +80,8 @@ PassRefPtr<ExecutableMemoryHandle> JIT::privateCompileCTIMachineTrampolines(JSGl
// Also initialize ReturnPC for use by lazy linking and exceptions.
preserveReturnAddressAfterCall(regT3);
emitPutToCallFrameHeader(regT3, RegisterFile::ReturnPC);
-
+
+ storePtr(callFrameRegister, &m_globalData->topCallFrame);
restoreArgumentReference();
Call callLazyLinkCall = call();
restoreReturnAddressBeforeReturn(regT3);
@@ -99,7 +100,8 @@ PassRefPtr<ExecutableMemoryHandle> JIT::privateCompileCTIMachineTrampolines(JSGl
// Also initialize ReturnPC for use by lazy linking and exeptions.
preserveReturnAddressAfterCall(regT3);
emitPutToCallFrameHeader(regT3, RegisterFile::ReturnPC);
-
+
+ storePtr(callFrameRegister, &m_globalData->topCallFrame);
restoreArgumentReference();
Call callLazyLinkConstruct = call();
restoreReturnAddressBeforeReturn(regT3);
@@ -118,6 +120,7 @@ PassRefPtr<ExecutableMemoryHandle> JIT::privateCompileCTIMachineTrampolines(JSGl
loadPtr(Address(regT0, OBJECT_OFFSETOF(JSFunction, m_executable)), regT2);
Jump hasCodeBlock1 = branch32(GreaterThanOrEqual, Address(regT2, OBJECT_OFFSETOF(FunctionExecutable, m_numParametersForCall)), TrustedImm32(0));
preserveReturnAddressAfterCall(regT3);
+ storePtr(callFrameRegister, &m_globalData->topCallFrame);
restoreArgumentReference();
Call callCompileCall = call();
restoreReturnAddressBeforeReturn(regT3);
@@ -140,6 +143,7 @@ PassRefPtr<ExecutableMemoryHandle> JIT::privateCompileCTIMachineTrampolines(JSGl
loadPtr(Address(regT0, OBJECT_OFFSETOF(JSFunction, m_executable)), regT2);
Jump hasCodeBlock2 = branch32(GreaterThanOrEqual, Address(regT2, OBJECT_OFFSETOF(FunctionExecutable, m_numParametersForConstruct)), TrustedImm32(0));
preserveReturnAddressAfterCall(regT3);
+ storePtr(callFrameRegister, &m_globalData->topCallFrame);
restoreArgumentReference();
Call callCompileConstruct = call();
restoreReturnAddressBeforeReturn(regT3);
diff --git a/Source/JavaScriptCore/jit/JITOpcodes32_64.cpp b/Source/JavaScriptCore/jit/JITOpcodes32_64.cpp
index 99594c3f1..1a09302cf 100644
--- a/Source/JavaScriptCore/jit/JITOpcodes32_64.cpp
+++ b/Source/JavaScriptCore/jit/JITOpcodes32_64.cpp
@@ -79,7 +79,8 @@ PassRefPtr<ExecutableMemoryHandle> JIT::privateCompileCTIMachineTrampolines(JSGl
// Also initialize ReturnPC for use by lazy linking and exceptions.
preserveReturnAddressAfterCall(regT3);
emitPutToCallFrameHeader(regT3, RegisterFile::ReturnPC);
-
+
+ storePtr(callFrameRegister, &m_globalData->topCallFrame);
restoreArgumentReference();
Call callLazyLinkCall = call();
restoreReturnAddressBeforeReturn(regT3);
@@ -98,7 +99,8 @@ PassRefPtr<ExecutableMemoryHandle> JIT::privateCompileCTIMachineTrampolines(JSGl
// Also initialize ReturnPC for use by lazy linking and exeptions.
preserveReturnAddressAfterCall(regT3);
emitPutToCallFrameHeader(regT3, RegisterFile::ReturnPC);
-
+
+ storePtr(callFrameRegister, &m_globalData->topCallFrame);
restoreArgumentReference();
Call callLazyLinkConstruct = call();
restoreReturnAddressBeforeReturn(regT3);
@@ -117,6 +119,8 @@ PassRefPtr<ExecutableMemoryHandle> JIT::privateCompileCTIMachineTrampolines(JSGl
loadPtr(Address(regT0, OBJECT_OFFSETOF(JSFunction, m_executable)), regT2);
Jump hasCodeBlock1 = branch32(GreaterThanOrEqual, Address(regT2, OBJECT_OFFSETOF(FunctionExecutable, m_numParametersForCall)), TrustedImm32(0));
preserveReturnAddressAfterCall(regT3);
+
+ storePtr(callFrameRegister, &m_globalData->topCallFrame);
restoreArgumentReference();
Call callCompileCall = call();
restoreReturnAddressBeforeReturn(regT3);
@@ -139,6 +143,8 @@ PassRefPtr<ExecutableMemoryHandle> JIT::privateCompileCTIMachineTrampolines(JSGl
loadPtr(Address(regT0, OBJECT_OFFSETOF(JSFunction, m_executable)), regT2);
Jump hasCodeBlock2 = branch32(GreaterThanOrEqual, Address(regT2, OBJECT_OFFSETOF(FunctionExecutable, m_numParametersForConstruct)), TrustedImm32(0));
preserveReturnAddressAfterCall(regT3);
+
+ storePtr(callFrameRegister, &m_globalData->topCallFrame);
restoreArgumentReference();
Call callCompileConstruct = call();
restoreReturnAddressBeforeReturn(regT3);
@@ -348,7 +354,8 @@ JIT::Label JIT::privateCompileCTINativeCall(JSGlobalData* globalData, bool isCon
move(TrustedImmPtr(&globalData->exceptionLocation), regT2);
storePtr(regT1, regT2);
poke(callFrameRegister, OBJECT_OFFSETOF(struct JITStackFrame, callFrame) / sizeof(void*));
-
+
+ storePtr(callFrameRegister, &m_globalData->topCallFrame);
// Set the return address.
move(TrustedImmPtr(FunctionPtr(ctiVMThrowTrampoline).value()), regT1);
restoreReturnAddressBeforeReturn(regT1);
@@ -484,7 +491,8 @@ JIT::CodeRef JIT::privateCompileCTINativeCall(JSGlobalData* globalData, NativeFu
move(TrustedImmPtr(&globalData->exceptionLocation), regT2);
storePtr(regT1, regT2);
poke(callFrameRegister, OBJECT_OFFSETOF(struct JITStackFrame, callFrame) / sizeof(void*));
-
+
+ storePtr(callFrameRegister, &m_globalData->topCallFrame);
// Set the return address.
move(TrustedImmPtr(FunctionPtr(ctiVMThrowTrampoline).value()), regT1);
restoreReturnAddressBeforeReturn(regT1);
diff --git a/Source/JavaScriptCore/jit/JITPropertyAccess.cpp b/Source/JavaScriptCore/jit/JITPropertyAccess.cpp
index 9fa29e2d9..99c038e55 100644
--- a/Source/JavaScriptCore/jit/JITPropertyAccess.cpp
+++ b/Source/JavaScriptCore/jit/JITPropertyAccess.cpp
@@ -526,8 +526,16 @@ void JIT::privateCompilePutByIdTransition(StructureStubInfo* stubInfo, Structure
}
}
- Call callTarget;
-
+ // If we succeed in all of our checks, and the code was optimizable, then make sure we
+ // decrement the rare case counter.
+#if ENABLE(VALUE_PROFILER)
+ if (m_codeBlock->canCompileWithDFG()) {
+ sub32(
+ TrustedImm32(1),
+ AbsoluteAddress(&m_codeBlock->rareCaseProfileForBytecodeOffset(stubInfo->bytecodeIndex)->m_counter));
+ }
+#endif
+
// emit a call only if storage realloc is needed
bool willNeedStorageRealloc = oldStructure->propertyStorageCapacity() != newStructure->propertyStorageCapacity();
if (willNeedStorageRealloc) {
diff --git a/Source/JavaScriptCore/jit/JITPropertyAccess32_64.cpp b/Source/JavaScriptCore/jit/JITPropertyAccess32_64.cpp
index 2c81a5ff6..1ee2915dc 100644
--- a/Source/JavaScriptCore/jit/JITPropertyAccess32_64.cpp
+++ b/Source/JavaScriptCore/jit/JITPropertyAccess32_64.cpp
@@ -493,6 +493,16 @@ void JIT::privateCompilePutByIdTransition(StructureStubInfo* stubInfo, Structure
testPrototype((*it)->storedPrototype(), failureCases);
}
+ // If we succeed in all of our checks, and the code was optimizable, then make sure we
+ // decrement the rare case counter.
+#if ENABLE(VALUE_PROFILER)
+ if (m_codeBlock->canCompileWithDFG()) {
+ sub32(
+ TrustedImm32(1),
+ AbsoluteAddress(&m_codeBlock->rareCaseProfileForBytecodeOffset(stubInfo->bytecodeIndex)->m_counter));
+ }
+#endif
+
// Reallocate property storage if needed.
Call callTarget;
bool willNeedStorageRealloc = oldStructure->propertyStorageCapacity() != newStructure->propertyStorageCapacity();
diff --git a/Source/JavaScriptCore/jit/JITStubs.cpp b/Source/JavaScriptCore/jit/JITStubs.cpp
index 386d0dfa1..a0a816505 100644
--- a/Source/JavaScriptCore/jit/JITStubs.cpp
+++ b/Source/JavaScriptCore/jit/JITStubs.cpp
@@ -1446,6 +1446,7 @@ DEFINE_STUB_FUNCTION(void, op_put_by_id_direct)
PutPropertySlot slot(callFrame->codeBlock()->isStrictMode());
JSValue baseValue = stackFrame.args[0].jsValue();
ASSERT(baseValue.isObject());
+
asObject(baseValue)->putDirect(callFrame->globalData(), ident, stackFrame.args[2].jsValue(), slot);
CodeBlock* codeBlock = stackFrame.callFrame->codeBlock();
@@ -1931,16 +1932,16 @@ DEFINE_STUB_FUNCTION(void, optimize_from_loop)
unsigned bytecodeIndex = stackFrame.args[0].int32();
#if ENABLE(JIT_VERBOSE_OSR)
- printf("Entered optimize_from_loop with executeCounter = %d, reoptimizationRetryCounter = %u, optimizationDelayCounter = %u\n", codeBlock->jitExecuteCounter(), codeBlock->reoptimizationRetryCounter(), codeBlock->optimizationDelayCounter());
+ dataLog("%p: Entered optimize_from_loop with executeCounter = %d, reoptimizationRetryCounter = %u, optimizationDelayCounter = %u\n", codeBlock, codeBlock->jitExecuteCounter(), codeBlock->reoptimizationRetryCounter(), codeBlock->optimizationDelayCounter());
#endif
if (codeBlock->hasOptimizedReplacement()) {
#if ENABLE(JIT_VERBOSE_OSR)
- printf("Considering loop OSR into %p(%p) with success/fail %u/%u.\n", codeBlock, codeBlock->replacement(), codeBlock->replacement()->speculativeSuccessCounter(), codeBlock->replacement()->speculativeFailCounter());
+ dataLog("Considering loop OSR into %p(%p) with success/fail %u/%u.\n", codeBlock, codeBlock->replacement(), codeBlock->replacement()->speculativeSuccessCounter(), codeBlock->replacement()->speculativeFailCounter());
#endif
if (codeBlock->replacement()->shouldReoptimizeFromLoopNow()) {
#if ENABLE(JIT_VERBOSE_OSR)
- printf("Triggering reoptimization of %p(%p) (in loop).\n", codeBlock, codeBlock->replacement());
+ dataLog("Triggering reoptimization of %p(%p) (in loop).\n", codeBlock, codeBlock->replacement());
#endif
codeBlock->reoptimize();
return;
@@ -1948,7 +1949,7 @@ DEFINE_STUB_FUNCTION(void, optimize_from_loop)
} else {
if (!codeBlock->shouldOptimizeNow()) {
#if ENABLE(JIT_VERBOSE_OSR)
- printf("Delaying optimization for %p (in loop) because of insufficient profiling.\n", codeBlock);
+ dataLog("Delaying optimization for %p (in loop) because of insufficient profiling.\n", codeBlock);
#endif
return;
}
@@ -1958,14 +1959,14 @@ DEFINE_STUB_FUNCTION(void, optimize_from_loop)
JSObject* error = codeBlock->compileOptimized(callFrame, scopeChain);
#if ENABLE(JIT_VERBOSE_OSR)
if (error)
- fprintf(stderr, "WARNING: optimized compilation from loop failed.\n");
+ dataLog("WARNING: optimized compilation from loop failed.\n");
#else
UNUSED_PARAM(error);
#endif
if (codeBlock->replacement() == codeBlock) {
#if ENABLE(JIT_VERBOSE_OSR)
- printf("Optimizing %p from loop failed.\n", codeBlock);
+ dataLog("Optimizing %p from loop failed.\n", codeBlock);
#endif
ASSERT(codeBlock->getJITType() == JITCode::BaselineJIT);
@@ -1979,7 +1980,7 @@ DEFINE_STUB_FUNCTION(void, optimize_from_loop)
if (void* address = DFG::prepareOSREntry(callFrame, optimizedCodeBlock, bytecodeIndex)) {
#if ENABLE(JIT_VERBOSE_OSR)
- printf("Optimizing %p from loop succeeded, performing OSR after a delay of %u.\n", codeBlock, codeBlock->optimizationDelayCounter());
+ dataLog("Optimizing %p from loop succeeded, performing OSR after a delay of %u.\n", codeBlock, codeBlock->optimizationDelayCounter());
#endif
codeBlock->optimizeSoon();
@@ -1989,7 +1990,7 @@ DEFINE_STUB_FUNCTION(void, optimize_from_loop)
}
#if ENABLE(JIT_VERBOSE_OSR)
- printf("Optimizing %p from loop succeeded, OSR failed, after a delay of %u.\n", codeBlock, codeBlock->optimizationDelayCounter());
+ dataLog("Optimizing %p from loop succeeded, OSR failed, after a delay of %u.\n", codeBlock, codeBlock->optimizationDelayCounter());
#endif
// Count the OSR failure as a speculation failure. If this happens a lot, then
@@ -1997,7 +1998,7 @@ DEFINE_STUB_FUNCTION(void, optimize_from_loop)
optimizedCodeBlock->countSpeculationFailure();
#if ENABLE(JIT_VERBOSE_OSR)
- printf("Encountered loop OSR failure into %p(%p) with success/fail %u/%u.\n", codeBlock, codeBlock->replacement(), codeBlock->replacement()->speculativeSuccessCounter(), codeBlock->replacement()->speculativeFailCounter());
+ dataLog("Encountered loop OSR failure into %p(%p) with success/fail %u/%u.\n", codeBlock, codeBlock->replacement(), codeBlock->replacement()->speculativeSuccessCounter(), codeBlock->replacement()->speculativeFailCounter());
#endif
// We are a lot more conservative about triggering reoptimization after OSR failure than
@@ -2010,7 +2011,7 @@ DEFINE_STUB_FUNCTION(void, optimize_from_loop)
// reoptimization trigger.
if (optimizedCodeBlock->shouldReoptimizeNow()) {
#if ENABLE(JIT_VERBOSE_OSR)
- printf("Triggering reoptimization of %p(%p) (in loop after OSR fail).\n", codeBlock, codeBlock->replacement());
+ dataLog("Triggering reoptimization of %p(%p) (in loop after OSR fail).\n", codeBlock, codeBlock->replacement());
#endif
codeBlock->reoptimize();
return;
@@ -2029,20 +2030,20 @@ DEFINE_STUB_FUNCTION(void, optimize_from_ret)
CodeBlock* codeBlock = callFrame->codeBlock();
#if ENABLE(JIT_VERBOSE_OSR)
- printf("Entered optimize_from_ret with executeCounter = %d, reoptimizationRetryCounter = %u, optimizationDelayCounter = %u\n", codeBlock->jitExecuteCounter(), codeBlock->reoptimizationRetryCounter(), codeBlock->optimizationDelayCounter());
+ dataLog("Entered optimize_from_ret with executeCounter = %d, reoptimizationRetryCounter = %u, optimizationDelayCounter = %u\n", codeBlock->jitExecuteCounter(), codeBlock->reoptimizationRetryCounter(), codeBlock->optimizationDelayCounter());
#endif
if (codeBlock->hasOptimizedReplacement()) {
#if ENABLE(JIT_VERBOSE_OSR)
- printf("Returning from old JIT call frame with optimized replacement %p(%p), with success/fail %u/%u", codeBlock, codeBlock->replacement(), codeBlock->replacement()->speculativeSuccessCounter(), codeBlock->replacement()->speculativeFailCounter());
+ dataLog("Returning from old JIT call frame with optimized replacement %p(%p), with success/fail %u/%u", codeBlock, codeBlock->replacement(), codeBlock->replacement()->speculativeSuccessCounter(), codeBlock->replacement()->speculativeFailCounter());
CallFrame* callerFrame = callFrame->callerFrame();
if (callerFrame)
- printf(", callerFrame = %p, returnPC = %p, caller code block = %p", callerFrame, callFrame->returnPC().value(), callerFrame->codeBlock());
- printf("\n");
+ dataLog(", callerFrame = %p, returnPC = %p, caller code block = %p", callerFrame, callFrame->returnPC().value(), callerFrame->codeBlock());
+ dataLog("\n");
#endif
if (codeBlock->replacement()->shouldReoptimizeNow()) {
#if ENABLE(JIT_VERBOSE_OSR)
- printf("Triggering reoptimization of %p(%p) (in return).\n", codeBlock, codeBlock->replacement());
+ dataLog("Triggering reoptimization of %p(%p) (in return).\n", codeBlock, codeBlock->replacement());
#endif
codeBlock->reoptimize();
}
@@ -2053,7 +2054,7 @@ DEFINE_STUB_FUNCTION(void, optimize_from_ret)
if (!codeBlock->shouldOptimizeNow()) {
#if ENABLE(JIT_VERBOSE_OSR)
- printf("Delaying optimization for %p (in return) because of insufficient profiling.\n", codeBlock);
+ dataLog("Delaying optimization for %p (in return) because of insufficient profiling.\n", codeBlock);
#endif
return;
}
@@ -2062,11 +2063,11 @@ DEFINE_STUB_FUNCTION(void, optimize_from_ret)
JSObject* error = codeBlock->compileOptimized(callFrame, scopeChain);
if (error)
- fprintf(stderr, "WARNING: optimized compilation from ret failed.\n");
+ dataLog("WARNING: optimized compilation from ret failed.\n");
if (codeBlock->replacement() == codeBlock) {
#if ENABLE(JIT_VERBOSE_OSR)
- printf("Optimizing %p from return failed.\n", codeBlock);
+ dataLog("Optimizing %p from return failed.\n", codeBlock);
#endif
ASSERT(codeBlock->getJITType() == JITCode::BaselineJIT);
@@ -2077,7 +2078,7 @@ DEFINE_STUB_FUNCTION(void, optimize_from_ret)
ASSERT(codeBlock->replacement()->getJITType() == JITCode::DFGJIT);
#if ENABLE(JIT_VERBOSE_OSR)
- printf("Optimizing %p from return succeeded after a delay of %u.\n", codeBlock, codeBlock->optimizationDelayCounter());
+ dataLog("Optimizing %p from return succeeded after a delay of %u.\n", codeBlock, codeBlock->optimizationDelayCounter());
#endif
codeBlock->optimizeSoon();
@@ -2186,45 +2187,13 @@ DEFINE_STUB_FUNCTION(void*, op_construct_jitCompile)
return result;
}
-inline CallFrame* arityCheckFor(CallFrame* callFrame, RegisterFile* registerFile, CodeSpecializationKind kind)
-{
- JSFunction* callee = asFunction(callFrame->callee());
- ASSERT(!callee->isHostFunction());
- CodeBlock* newCodeBlock = &callee->jsExecutable()->generatedBytecodeFor(kind);
- int argumentCountIncludingThis = callFrame->argumentCountIncludingThis();
-
- // This ensures enough space for the worst case scenario of zero arguments passed by the caller.
- if (!registerFile->grow(callFrame->registers() + newCodeBlock->numParameters() + newCodeBlock->m_numCalleeRegisters))
- return 0;
-
- ASSERT(argumentCountIncludingThis < newCodeBlock->numParameters());
-
- // Too few arguments -- copy call frame and arguments, then fill in missing arguments with undefined.
- size_t delta = newCodeBlock->numParameters() - argumentCountIncludingThis;
- Register* src = callFrame->registers();
- Register* dst = callFrame->registers() + delta;
-
- int i;
- int end = -CallFrame::offsetFor(argumentCountIncludingThis);
- for (i = -1; i >= end; --i)
- dst[i] = src[i];
-
- end -= delta;
- for ( ; i >= end; --i)
- dst[i] = jsUndefined();
-
- CallFrame* newCallFrame = CallFrame::create(dst);
- ASSERT((void*)newCallFrame <= registerFile->end());
- return newCallFrame;
-}
-
DEFINE_STUB_FUNCTION(void*, op_call_arityCheck)
{
STUB_INIT_STACK_FRAME(stackFrame);
CallFrame* callFrame = stackFrame.callFrame;
- CallFrame* newCallFrame = arityCheckFor(callFrame, stackFrame.registerFile, CodeForCall);
+ CallFrame* newCallFrame = CommonSlowPaths::arityCheckFor(callFrame, stackFrame.registerFile, CodeForCall);
if (!newCallFrame)
return throwExceptionFromOpCall<void*>(stackFrame, callFrame, STUB_RETURN_ADDRESS, createStackOverflowError(callFrame->callerFrame()));
@@ -2237,7 +2206,7 @@ DEFINE_STUB_FUNCTION(void*, op_construct_arityCheck)
CallFrame* callFrame = stackFrame.callFrame;
- CallFrame* newCallFrame = arityCheckFor(callFrame, stackFrame.registerFile, CodeForConstruct);
+ CallFrame* newCallFrame = CommonSlowPaths::arityCheckFor(callFrame, stackFrame.registerFile, CodeForConstruct);
if (!newCallFrame)
return throwExceptionFromOpCall<void*>(stackFrame, callFrame, STUB_RETURN_ADDRESS, createStackOverflowError(callFrame->callerFrame()));
@@ -2314,6 +2283,7 @@ DEFINE_STUB_FUNCTION(EncodedJSValue, op_call_NotJSFunction)
STUB_INIT_STACK_FRAME(stackFrame);
CallFrame* callFrame = stackFrame.callFrame;
+
JSValue callee = callFrame->calleeAsValue();
CallData callData;
@@ -3600,15 +3570,15 @@ MacroAssemblerCodeRef JITThunks::ctiStub(JSGlobalData* globalData, ThunkGenerato
NativeExecutable* JITThunks::hostFunctionStub(JSGlobalData* globalData, NativeFunction function, NativeFunction constructor)
{
- std::pair<HostFunctionStubMap::iterator, bool> entry = m_hostFunctionStubMap->add(function, Weak<NativeExecutable>());
- if (!*entry.first->second)
- entry.first->second.set(*globalData, NativeExecutable::create(*globalData, JIT::compileCTINativeCall(globalData, function), function, MacroAssemblerCodeRef::createSelfManagedCodeRef(ctiNativeConstruct()), constructor, NoIntrinsic));
- return entry.first->second.get();
+ std::pair<HostFunctionStubMap::iterator, bool> result = m_hostFunctionStubMap->add(function, PassWeak<NativeExecutable>());
+ if (!result.first->second)
+ result.first->second = PassWeak<NativeExecutable>(*globalData, NativeExecutable::create(*globalData, JIT::compileCTINativeCall(globalData, function), function, MacroAssemblerCodeRef::createSelfManagedCodeRef(ctiNativeConstruct()), constructor, NoIntrinsic));
+ return result.first->second.get();
}
NativeExecutable* JITThunks::hostFunctionStub(JSGlobalData* globalData, NativeFunction function, ThunkGenerator generator, Intrinsic intrinsic)
{
- std::pair<HostFunctionStubMap::iterator, bool> entry = m_hostFunctionStubMap->add(function, Weak<NativeExecutable>());
+ std::pair<HostFunctionStubMap::iterator, bool> entry = m_hostFunctionStubMap->add(function, PassWeak<NativeExecutable>());
if (!*entry.first->second) {
MacroAssemblerCodeRef code;
if (generator) {
@@ -3618,7 +3588,7 @@ NativeExecutable* JITThunks::hostFunctionStub(JSGlobalData* globalData, NativeFu
code = MacroAssemblerCodeRef();
} else
code = JIT::compileCTINativeCall(globalData, function);
- entry.first->second.set(*globalData, NativeExecutable::create(*globalData, code, function, MacroAssemblerCodeRef::createSelfManagedCodeRef(ctiNativeConstruct()), callHostFunctionAsConstructor, intrinsic));
+ entry.first->second = PassWeak<NativeExecutable>(*globalData, NativeExecutable::create(*globalData, code, function, MacroAssemblerCodeRef::createSelfManagedCodeRef(ctiNativeConstruct()), callHostFunctionAsConstructor, intrinsic));
}
return entry.first->second.get();
}
diff --git a/Source/JavaScriptCore/jit/JITStubs.h b/Source/JavaScriptCore/jit/JITStubs.h
index fe5f522e9..890d99747 100644
--- a/Source/JavaScriptCore/jit/JITStubs.h
+++ b/Source/JavaScriptCore/jit/JITStubs.h
@@ -37,8 +37,6 @@
#include "ThunkGenerators.h"
#include <wtf/HashMap.h>
-#if ENABLE(JIT)
-
namespace JSC {
struct StructureStubInfo;
@@ -263,6 +261,8 @@ namespace JSC {
#define JITSTACKFRAME_ARGS_INDEX (OBJECT_OFFSETOF(JITStackFrame, args) / sizeof(void*))
+#if ENABLE(JIT)
+
#define STUB_ARGS_DECLARATION void** args
#define STUB_ARGS (args)
@@ -456,8 +456,8 @@ extern "C" {
void* JIT_STUB cti_vm_throw(STUB_ARGS_DECLARATION);
} // extern "C"
-} // namespace JSC
-
#endif // ENABLE(JIT)
+} // namespace JSC
+
#endif // JITStubs_h
diff --git a/Source/JavaScriptCore/jit/JSInterfaceJIT.h b/Source/JavaScriptCore/jit/JSInterfaceJIT.h
index d54dedc1a..05d1ce5ad 100644
--- a/Source/JavaScriptCore/jit/JSInterfaceJIT.h
+++ b/Source/JavaScriptCore/jit/JSInterfaceJIT.h
@@ -26,8 +26,10 @@
#ifndef JSInterfaceJIT_h
#define JSInterfaceJIT_h
+#include "BytecodeConventions.h"
#include "JITCode.h"
#include "JITStubs.h"
+#include "JSString.h"
#include "JSValue.h"
#include "MacroAssembler.h"
#include "RegisterFile.h"
diff --git a/Source/JavaScriptCore/jsc.cpp b/Source/JavaScriptCore/jsc.cpp
index 47ec8c608..9f207c510 100644
--- a/Source/JavaScriptCore/jsc.cpp
+++ b/Source/JavaScriptCore/jsc.cpp
@@ -27,7 +27,9 @@
#include "CurrentTime.h"
#include "ExceptionHelpers.h"
#include "InitializeThreading.h"
+#include "Interpreter.h"
#include "JSArray.h"
+#include "JSCTypedArrayStubs.h"
#include "JSFunction.h"
#include "JSLock.h"
#include "JSString.h"
@@ -78,6 +80,7 @@ static bool fillBufferWithContentsOfFile(const UString& fileName, Vector<char>&
static EncodedJSValue JSC_HOST_CALL functionPrint(ExecState*);
static EncodedJSValue JSC_HOST_CALL functionDebug(ExecState*);
+static EncodedJSValue JSC_HOST_CALL functionJSCStack(ExecState*);
static EncodedJSValue JSC_HOST_CALL functionGC(ExecState*);
#ifndef NDEBUG
static EncodedJSValue JSC_HOST_CALL functionReleaseExecutableMemory(ExecState*);
@@ -184,12 +187,24 @@ protected:
addFunction(globalData, "run", functionRun, 1);
addFunction(globalData, "load", functionLoad, 1);
addFunction(globalData, "checkSyntax", functionCheckSyntax, 1);
+ addFunction(globalData, "jscStack", functionJSCStack, 1);
addFunction(globalData, "readline", functionReadline, 0);
addFunction(globalData, "preciseTime", functionPreciseTime, 0);
#if ENABLE(SAMPLING_FLAGS)
addFunction(globalData, "setSamplingFlags", functionSetSamplingFlags, 1);
addFunction(globalData, "clearSamplingFlags", functionClearSamplingFlags, 1);
#endif
+
+#if ENABLE(COMMANDLINE_TYPEDARRAYS)
+ addConstructableFunction(globalData, "Uint8Array", constructJSUint8Array, 1);
+ addConstructableFunction(globalData, "Uint16Array", constructJSUint16Array, 1);
+ addConstructableFunction(globalData, "Uint32Array", constructJSUint32Array, 1);
+ addConstructableFunction(globalData, "Int8Array", constructJSInt8Array, 1);
+ addConstructableFunction(globalData, "Int16Array", constructJSInt16Array, 1);
+ addConstructableFunction(globalData, "Int32Array", constructJSInt32Array, 1);
+ addConstructableFunction(globalData, "Float32Array", constructJSFloat32Array, 1);
+ addConstructableFunction(globalData, "Float64Array", constructJSFloat64Array, 1);
+#endif
JSObject* array = constructEmptyArray(globalExec());
for (size_t i = 0; i < arguments.size(); ++i)
@@ -202,6 +217,12 @@ protected:
Identifier identifier(globalExec(), name);
putDirect(globalData, identifier, JSFunction::create(globalExec(), this, arguments, identifier, function));
}
+
+ void addConstructableFunction(JSGlobalData& globalData, const char* name, NativeFunction function, unsigned arguments)
+ {
+ Identifier identifier(globalExec(), name);
+ putDirect(globalData, identifier, JSFunction::create(globalExec(), this, arguments, identifier, function, function));
+ }
};
COMPILE_ASSERT(!IsInteger<GlobalObject>::value, WTF_IsInteger_GlobalObject_false);
ASSERT_CLASS_FITS_IN_CELL(GlobalObject);
@@ -252,6 +273,22 @@ EncodedJSValue JSC_HOST_CALL functionDebug(ExecState* exec)
return JSValue::encode(jsUndefined());
}
+EncodedJSValue JSC_HOST_CALL functionJSCStack(ExecState* exec)
+{
+ String trace = "--> Stack trace:\n";
+ Vector<StackFrame> stackTrace;
+ Interpreter::getStackTrace(&exec->globalData(), -1, stackTrace);
+ int i = 0;
+
+ for (Vector<StackFrame>::iterator iter = stackTrace.begin(); iter < stackTrace.end(); iter++) {
+ StackFrame level = *iter;
+ trace += String::format(" %i %s\n", i, level.toString(exec).utf8().data());
+ i++;
+ }
+ fprintf(stderr, "%s", trace.utf8().data());
+ return JSValue::encode(jsUndefined());
+}
+
EncodedJSValue JSC_HOST_CALL functionGC(ExecState* exec)
{
JSLock lock(SilenceAssertionsOnly);
diff --git a/Source/JavaScriptCore/llint/LLIntCommon.h b/Source/JavaScriptCore/llint/LLIntCommon.h
new file mode 100644
index 000000000..6b908eae2
--- /dev/null
+++ b/Source/JavaScriptCore/llint/LLIntCommon.h
@@ -0,0 +1,49 @@
+/*
+ * Copyright (C) 2012 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef LLIntCommon_h
+#define LLIntCommon_h
+
+// Print every instruction executed.
+#define LLINT_EXECUTION_TRACING 0
+
+// Print some information for some of the more subtle slow paths.
+#define LLINT_SLOW_PATH_TRACING 0
+
+// Disable inline allocation in the interpreter. This is great if you're changing
+// how the GC allocates.
+#define LLINT_ALWAYS_ALLOCATE_SLOW 0
+
+// Enable OSR into the JIT. Disabling this while the LLInt is enabled effectively
+// turns off all JIT'ing, since in LLInt's parlance, OSR subsumes any form of JIT
+// invocation.
+#if ENABLE(JIT)
+#define LLINT_OSR_TO_JIT 1
+#else
+#define LLINT_OSR_TO_JIT 0
+#endif
+
+#endif // LLIntCommon_h
+
diff --git a/Source/JavaScriptCore/llint/LLIntData.cpp b/Source/JavaScriptCore/llint/LLIntData.cpp
new file mode 100644
index 000000000..c0fe78142
--- /dev/null
+++ b/Source/JavaScriptCore/llint/LLIntData.cpp
@@ -0,0 +1,116 @@
+/*
+ * Copyright (C) 2011 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "config.h"
+#include "LLIntData.h"
+
+#if ENABLE(LLINT)
+
+#include "BytecodeConventions.h"
+#include "CodeType.h"
+#include "Instruction.h"
+#include "LowLevelInterpreter.h"
+#include "Opcode.h"
+
+namespace JSC { namespace LLInt {
+
+Data::Data()
+ : m_exceptionInstructions(new Instruction[maxOpcodeLength + 1])
+ , m_opcodeMap(new Opcode[numOpcodeIDs])
+{
+ for (int i = 0; i < maxOpcodeLength + 1; ++i)
+ m_exceptionInstructions[i].u.pointer = bitwise_cast<void*>(&llint_throw_from_slow_path_trampoline);
+#define OPCODE_ENTRY(opcode, length) m_opcodeMap[opcode] = bitwise_cast<void*>(&llint_##opcode);
+ FOR_EACH_OPCODE_ID(OPCODE_ENTRY);
+#undef OPCODE_ENTRY
+}
+
+#if COMPILER(CLANG)
+#pragma clang diagnostic push
+#pragma clang diagnostic ignored "-Wmissing-noreturn"
+#endif
+void Data::performAssertions(JSGlobalData& globalData)
+{
+ UNUSED_PARAM(globalData);
+
+ // Assertions to match LowLevelInterpreter.asm. If you change any of this code, be
+ // prepared to change LowLevelInterpreter.asm as well!!
+ ASSERT(RegisterFile::CallFrameHeaderSize * 8 == 48);
+ ASSERT(RegisterFile::ArgumentCount * 8 == -48);
+ ASSERT(RegisterFile::CallerFrame * 8 == -40);
+ ASSERT(RegisterFile::Callee * 8 == -32);
+ ASSERT(RegisterFile::ScopeChain * 8 == -24);
+ ASSERT(RegisterFile::ReturnPC * 8 == -16);
+ ASSERT(RegisterFile::CodeBlock * 8 == -8);
+ ASSERT(CallFrame::argumentOffsetIncludingThis(0) == -RegisterFile::CallFrameHeaderSize - 1);
+#if CPU(BIG_ENDIAN)
+ ASSERT(OBJECT_OFFSETOF(EncodedValueDescriptor, asBits.tag) == 0);
+ ASSERT(OBJECT_OFFSETOF(EncodedValueDescriptor, asBits.payload) == 4);
+#else
+ ASSERT(OBJECT_OFFSETOF(EncodedValueDescriptor, asBits.tag) == 4);
+ ASSERT(OBJECT_OFFSETOF(EncodedValueDescriptor, asBits.payload) == 0);
+#endif
+ ASSERT(JSValue::Int32Tag == -1);
+ ASSERT(JSValue::BooleanTag == -2);
+ ASSERT(JSValue::NullTag == -3);
+ ASSERT(JSValue::UndefinedTag == -4);
+ ASSERT(JSValue::CellTag == -5);
+ ASSERT(JSValue::EmptyValueTag == -6);
+ ASSERT(JSValue::DeletedValueTag == -7);
+ ASSERT(JSValue::LowestTag == -7);
+ ASSERT(StringType == 5);
+ ASSERT(ObjectType == 13);
+ ASSERT(MasqueradesAsUndefined == 1);
+ ASSERT(ImplementsHasInstance == 2);
+ ASSERT(ImplementsDefaultHasInstance == 8);
+ ASSERT(&globalData.heap.allocatorForObjectWithoutDestructor(sizeof(JSFinalObject)) - &globalData.heap.firstAllocatorWithoutDestructors() == 3);
+ ASSERT(FirstConstantRegisterIndex == 0x40000000);
+ ASSERT(GlobalCode == 0);
+ ASSERT(EvalCode == 1);
+ ASSERT(FunctionCode == 2);
+
+ // FIXME: make these assertions less horrible.
+#if !ASSERT_DISABLED
+ Vector<int> testVector;
+ testVector.resize(42);
+ ASSERT(bitwise_cast<size_t*>(&testVector)[0] == 42);
+ ASSERT(bitwise_cast<int**>(&testVector)[1] == testVector.begin());
+#endif
+
+ ASSERT(StringImpl::s_hashFlag8BitBuffer == 64);
+}
+#if COMPILER(CLANG)
+#pragma clang diagnostic pop
+#endif
+
+Data::~Data()
+{
+ delete[] m_exceptionInstructions;
+ delete[] m_opcodeMap;
+}
+
+} } // namespace JSC::LLInt
+
+#endif // ENABLE(LLINT)
diff --git a/Source/JavaScriptCore/llint/LLIntData.h b/Source/JavaScriptCore/llint/LLIntData.h
new file mode 100644
index 000000000..ba8daedf1
--- /dev/null
+++ b/Source/JavaScriptCore/llint/LLIntData.h
@@ -0,0 +1,93 @@
+/*
+ * Copyright (C) 2011 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef LLIntData_h
+#define LLIntData_h
+
+#include "Opcode.h"
+#include <wtf/Platform.h>
+
+namespace JSC {
+
+class JSGlobalData;
+struct Instruction;
+
+namespace LLInt {
+
+#if ENABLE(LLINT)
+class Data {
+public:
+ Data();
+ ~Data();
+
+ void performAssertions(JSGlobalData&);
+
+ Instruction* exceptionInstructions()
+ {
+ return m_exceptionInstructions;
+ }
+
+ Opcode* opcodeMap()
+ {
+ return m_opcodeMap;
+ }
+private:
+ Instruction* m_exceptionInstructions;
+ Opcode* m_opcodeMap;
+};
+#else // ENABLE(LLINT)
+
+#if COMPILER(CLANG)
+#pragma clang diagnostic push
+#pragma clang diagnostic ignored "-Wmissing-noreturn"
+#endif
+
+class Data {
+public:
+ void performAssertions(JSGlobalData&) { }
+
+ Instruction* exceptionInstructions()
+ {
+ ASSERT_NOT_REACHED();
+ return 0;
+ }
+
+ Opcode* opcodeMap()
+ {
+ ASSERT_NOT_REACHED();
+ return 0;
+ }
+};
+
+#if COMPILER(CLANG)
+#pragma clang diagnostic pop
+#endif
+
+#endif // ENABLE(LLINT)
+
+} } // namespace JSC::LLInt
+
+#endif // LLIntData_h
+
diff --git a/Source/JavaScriptCore/llint/LLIntEntrypoints.cpp b/Source/JavaScriptCore/llint/LLIntEntrypoints.cpp
new file mode 100644
index 000000000..f610f4b4c
--- /dev/null
+++ b/Source/JavaScriptCore/llint/LLIntEntrypoints.cpp
@@ -0,0 +1,86 @@
+/*
+ * Copyright (C) 2012 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "config.h"
+#include "LLIntEntrypoints.h"
+
+#if ENABLE(LLINT)
+
+#include "JITCode.h"
+#include "JSGlobalData.h"
+#include "LLIntThunks.h"
+#include "LowLevelInterpreter.h"
+
+namespace JSC { namespace LLInt {
+
+void getFunctionEntrypoint(JSGlobalData& globalData, CodeSpecializationKind kind, JITCode& jitCode, MacroAssemblerCodePtr& arityCheck)
+{
+ if (!globalData.canUseJIT()) {
+ if (kind == CodeForCall) {
+ jitCode = JITCode::HostFunction(MacroAssemblerCodeRef::createSelfManagedCodeRef(MacroAssemblerCodePtr(bitwise_cast<void*>(&llint_function_for_call_prologue))));
+ arityCheck = MacroAssemblerCodePtr(bitwise_cast<void*>(&llint_function_for_call_arity_check));
+ return;
+ }
+
+ ASSERT(kind == CodeForConstruct);
+ jitCode = JITCode::HostFunction(MacroAssemblerCodeRef::createSelfManagedCodeRef(MacroAssemblerCodePtr(bitwise_cast<void*>(&llint_function_for_construct_prologue))));
+ arityCheck = MacroAssemblerCodePtr(bitwise_cast<void*>(&llint_function_for_construct_arity_check));
+ return;
+ }
+
+ if (kind == CodeForCall) {
+ jitCode = JITCode(globalData.getCTIStub(functionForCallEntryThunkGenerator), JITCode::InterpreterThunk);
+ arityCheck = globalData.getCTIStub(functionForCallArityCheckThunkGenerator).code();
+ return;
+ }
+
+ ASSERT(kind == CodeForConstruct);
+ jitCode = JITCode(globalData.getCTIStub(functionForConstructEntryThunkGenerator), JITCode::InterpreterThunk);
+ arityCheck = globalData.getCTIStub(functionForConstructArityCheckThunkGenerator).code();
+}
+
+void getEvalEntrypoint(JSGlobalData& globalData, JITCode& jitCode)
+{
+ if (!globalData.canUseJIT()) {
+ jitCode = JITCode::HostFunction(MacroAssemblerCodeRef::createSelfManagedCodeRef(MacroAssemblerCodePtr(bitwise_cast<void*>(&llint_eval_prologue))));
+ return;
+ }
+
+ jitCode = JITCode(globalData.getCTIStub(evalEntryThunkGenerator), JITCode::InterpreterThunk);
+}
+
+void getProgramEntrypoint(JSGlobalData& globalData, JITCode& jitCode)
+{
+ if (!globalData.canUseJIT()) {
+ jitCode = JITCode::HostFunction(MacroAssemblerCodeRef::createSelfManagedCodeRef(MacroAssemblerCodePtr(bitwise_cast<void*>(&llint_program_prologue))));
+ return;
+ }
+
+ jitCode = JITCode(globalData.getCTIStub(programEntryThunkGenerator), JITCode::InterpreterThunk);
+}
+
+} } // namespace JSC::LLInt
+
+#endif // ENABLE(LLINT)
diff --git a/Source/JavaScriptCore/llint/LLIntEntrypoints.h b/Source/JavaScriptCore/llint/LLIntEntrypoints.h
new file mode 100644
index 000000000..dd7c27798
--- /dev/null
+++ b/Source/JavaScriptCore/llint/LLIntEntrypoints.h
@@ -0,0 +1,64 @@
+/*
+ * Copyright (C) 2012 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef LLIntEntrypoints_h
+#define LLIntEntrypoints_h
+
+#include <wtf/Platform.h>
+
+#if ENABLE(LLINT)
+
+#include "CodeSpecializationKind.h"
+
+namespace JSC {
+
+class EvalCodeBlock;
+class JITCode;
+class JSGlobalData;
+class MacroAssemblerCodePtr;
+class MacroAssemblerCodeRef;
+class ProgramCodeBlock;
+
+namespace LLInt {
+
+void getFunctionEntrypoint(JSGlobalData&, CodeSpecializationKind, JITCode&, MacroAssemblerCodePtr& arityCheck);
+void getEvalEntrypoint(JSGlobalData&, JITCode&);
+void getProgramEntrypoint(JSGlobalData&, JITCode&);
+
+inline void getEntrypoint(JSGlobalData& globalData, EvalCodeBlock*, JITCode& jitCode)
+{
+ getEvalEntrypoint(globalData, jitCode);
+}
+
+inline void getEntrypoint(JSGlobalData& globalData, ProgramCodeBlock*, JITCode& jitCode)
+{
+ getProgramEntrypoint(globalData, jitCode);
+}
+
+} } // namespace JSC::LLInt
+
+#endif // ENABLE(LLINT)
+
+#endif // LLIntEntrypoints_h
diff --git a/Source/JavaScriptCore/llint/LLIntExceptions.cpp b/Source/JavaScriptCore/llint/LLIntExceptions.cpp
new file mode 100644
index 000000000..a7d1a965a
--- /dev/null
+++ b/Source/JavaScriptCore/llint/LLIntExceptions.cpp
@@ -0,0 +1,80 @@
+/*
+ * Copyright (C) 2011 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "config.h"
+#include "LLIntExceptions.h"
+
+#if ENABLE(LLINT)
+
+#include "CallFrame.h"
+#include "CodeBlock.h"
+#include "Instruction.h"
+#include "JITExceptions.h"
+#include "LLIntCommon.h"
+#include "LowLevelInterpreter.h"
+
+namespace JSC { namespace LLInt {
+
+void interpreterThrowInCaller(ExecState* exec, ReturnAddressPtr pc)
+{
+ JSGlobalData* globalData = &exec->globalData();
+#if LLINT_SLOW_PATH_TRACING
+ dataLog("Throwing exception %s.\n", globalData->exception.description());
+#endif
+ genericThrow(
+ globalData, exec, globalData->exception,
+ exec->codeBlock()->bytecodeOffset(exec, pc));
+}
+
+Instruction* returnToThrowForThrownException(ExecState* exec)
+{
+ return exec->globalData().llintData.exceptionInstructions();
+}
+
+Instruction* returnToThrow(ExecState* exec, Instruction* pc)
+{
+ JSGlobalData* globalData = &exec->globalData();
+#if LLINT_SLOW_PATH_TRACING
+ dataLog("Throwing exception %s (returnToThrow).\n", globalData->exception.description());
+#endif
+ genericThrow(globalData, exec, globalData->exception, pc - exec->codeBlock()->instructions().begin());
+
+ return globalData->llintData.exceptionInstructions();
+}
+
+void* callToThrow(ExecState* exec, Instruction* pc)
+{
+ JSGlobalData* globalData = &exec->globalData();
+#if LLINT_SLOW_PATH_TRACING
+ dataLog("Throwing exception %s (callToThrow).\n", globalData->exception.description());
+#endif
+ genericThrow(globalData, exec, globalData->exception, pc - exec->codeBlock()->instructions().begin());
+
+ return bitwise_cast<void*>(&llint_throw_during_call_trampoline);
+}
+
+} } // namespace JSC::LLInt
+
+#endif // ENABLE(LLINT)
diff --git a/Source/JavaScriptCore/llint/LLIntExceptions.h b/Source/JavaScriptCore/llint/LLIntExceptions.h
new file mode 100644
index 000000000..3baa3f4a5
--- /dev/null
+++ b/Source/JavaScriptCore/llint/LLIntExceptions.h
@@ -0,0 +1,66 @@
+/*
+ * Copyright (C) 2011 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef LLIntExceptions_h
+#define LLIntExceptions_h
+
+#include <wtf/Platform.h>
+#include <wtf/StdLibExtras.h>
+
+#if ENABLE(LLINT)
+
+#include "MacroAssemblerCodeRef.h"
+
+namespace JSC {
+
+class ExecState;
+struct Instruction;
+
+namespace LLInt {
+
+// Throw the currently active exception in the context of the caller's call frame.
+void interpreterThrowInCaller(ExecState* callerFrame, ReturnAddressPtr);
+
+// Tells you where to jump to if you want to return-to-throw, after you've already
+// set up all information needed to throw the exception.
+Instruction* returnToThrowForThrownException(ExecState*);
+
+// Saves the current PC in the global data for safe-keeping, and gives you a PC
+// that you can tell the interpreter to go to, which when advanced between 1
+// and 9 slots will give you an "instruction" that threads to the interpreter's
+// exception handler. Note that if you give it the PC for exception handling,
+// it's smart enough to just return that PC without doing anything else; this
+// lets you thread exception handling through common helper functions used by
+// other helpers.
+Instruction* returnToThrow(ExecState*, Instruction*);
+
+// Use this when you're throwing to a call thunk.
+void* callToThrow(ExecState*, Instruction*);
+
+} } // namespace JSC::LLInt
+
+#endif // ENABLE(LLINT)
+
+#endif // LLIntExceptions_h
diff --git a/Source/JavaScriptCore/llint/LLIntOfflineAsmConfig.h b/Source/JavaScriptCore/llint/LLIntOfflineAsmConfig.h
new file mode 100644
index 000000000..9fe86fac4
--- /dev/null
+++ b/Source/JavaScriptCore/llint/LLIntOfflineAsmConfig.h
@@ -0,0 +1,90 @@
+/*
+ * Copyright (C) 2012 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef LLIntOfflineAsmConfig_h
+#define LLIntOfflineAsmConfig_h
+
+#include "LLIntCommon.h"
+#include <wtf/Assertions.h>
+#include <wtf/InlineASM.h>
+#include <wtf/Platform.h>
+
+#if CPU(X86)
+#define OFFLINE_ASM_X86 1
+#else
+#define OFFLINE_ASM_X86 0
+#endif
+
+#if CPU(ARM_THUMB2)
+#define OFFLINE_ASM_ARMv7 1
+#else
+#define OFFLINE_ASM_ARMv7 0
+#endif
+
+#if !ASSERT_DISABLED
+#define OFFLINE_ASM_ASSERT_ENABLED 1
+#else
+#define OFFLINE_ASM_ASSERT_ENABLED 0
+#endif
+
+#if CPU(BIG_ENDIAN)
+#define OFFLINE_ASM_BIG_ENDIAN 1
+#else
+#define OFFLINE_ASM_BIG_ENDIAN 0
+#endif
+
+#if LLINT_OSR_TO_JIT
+#define OFFLINE_ASM_JIT_ENABLED 1
+#else
+#define OFFLINE_ASM_JIT_ENABLED 0
+#endif
+
+#if LLINT_EXECUTION_TRACING
+#define OFFLINE_ASM_EXECUTION_TRACING 1
+#else
+#define OFFLINE_ASM_EXECUTION_TRACING 0
+#endif
+
+#if LLINT_ALWAYS_ALLOCATE_SLOW
+#define OFFLINE_ASM_ALWAYS_ALLOCATE_SLOW 1
+#else
+#define OFFLINE_ASM_ALWAYS_ALLOCATE_SLOW 0
+#endif
+
+#if CPU(ARM_THUMB2)
+#define OFFLINE_ASM_GLOBAL_LABEL(label) \
+ ".globl " SYMBOL_STRING(label) "\n" \
+ HIDE_SYMBOL(name) "\n" \
+ ".thumb\n" \
+ ".thumb_func " THUMB_FUNC_PARAM(label) "\n" \
+ SYMBOL_STRING(label) ":\n"
+#else
+#define OFFLINE_ASM_GLOBAL_LABEL(label) \
+ ".globl " SYMBOL_STRING(label) "\n" \
+ HIDE_SYMBOL(name) "\n" \
+ SYMBOL_STRING(label) ":\n"
+#endif
+
+#endif // LLIntOfflineAsmConfig_h
diff --git a/Source/JavaScriptCore/llint/LLIntOffsetsExtractor.cpp b/Source/JavaScriptCore/llint/LLIntOffsetsExtractor.cpp
new file mode 100644
index 000000000..5b76cd521
--- /dev/null
+++ b/Source/JavaScriptCore/llint/LLIntOffsetsExtractor.cpp
@@ -0,0 +1,84 @@
+/*
+ * Copyright (C) 2012 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "config.h"
+
+#include "CodeBlock.h"
+#include "Executable.h"
+#include "Heap.h"
+#include "Interpreter.h"
+#include "JITStubs.h"
+#include "JSArray.h"
+#include "JSCell.h"
+#include "JSFunction.h"
+#include "JSGlobalData.h"
+#include "JSGlobalObject.h"
+#include "JSObject.h"
+#include "JSPropertyNameIterator.h"
+#include "JSString.h"
+#include "JSTypeInfo.h"
+#include "JSVariableObject.h"
+#include "JumpTable.h"
+#include "LLIntOfflineAsmConfig.h"
+#include "MarkedSpace.h"
+#include "RegisterFile.h"
+#include "ScopeChain.h"
+#include "Structure.h"
+#include "StructureChain.h"
+#include "ValueProfile.h"
+#include <wtf/text/StringImpl.h>
+
+namespace JSC {
+
+#define OFFLINE_ASM_OFFSETOF(clazz, field) OBJECT_OFFSETOF(clazz, field)
+
+class LLIntOffsetsExtractor {
+public:
+ static const unsigned* dummy();
+};
+
+const unsigned* LLIntOffsetsExtractor::dummy()
+{
+// This is a file generated by offlineasm/generate_offsets_extractor.rb, and contains code
+// to create a table of offsets, sizes, and a header identifying what combination of
+// Platform.h macros we have set. We include it inside of a method on LLIntOffsetsExtractor
+// because the fields whose offsets we're extracting are mostly private. So we make their
+// classes friends with LLIntOffsetsExtractor, and include the header here, to get the C++
+// compiler to kindly step aside and yield to our best intentions.
+#include "LLIntDesiredOffsets.h"
+ return extractorTable;
+}
+
+} // namespace JSC
+
+int main(int, char**)
+{
+ // Out of an abundance of caution, make sure that LLIntOffsetsExtractor::dummy() is live,
+ // and the extractorTable is live, too.
+ printf("%p\n", JSC::LLIntOffsetsExtractor::dummy());
+ return 0;
+}
+
+
diff --git a/Source/JavaScriptCore/llint/LLIntSlowPaths.cpp b/Source/JavaScriptCore/llint/LLIntSlowPaths.cpp
new file mode 100644
index 000000000..3203d25d2
--- /dev/null
+++ b/Source/JavaScriptCore/llint/LLIntSlowPaths.cpp
@@ -0,0 +1,1558 @@
+/*
+ * Copyright (C) 2011, 2012 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "config.h"
+#include "LLIntSlowPaths.h"
+
+#if ENABLE(LLINT)
+
+#include "Arguments.h"
+#include "CallFrame.h"
+#include "CommonSlowPaths.h"
+#include "GetterSetter.h"
+#include "HostCallReturnValue.h"
+#include "Interpreter.h"
+#include "JIT.h"
+#include "JITDriver.h"
+#include "JSActivation.h"
+#include "JSByteArray.h"
+#include "JSGlobalObjectFunctions.h"
+#include "JSPropertyNameIterator.h"
+#include "JSStaticScopeObject.h"
+#include "JSString.h"
+#include "JSValue.h"
+#include "LLIntCommon.h"
+#include "LLIntExceptions.h"
+#include "LowLevelInterpreter.h"
+#include "Operations.h"
+
+namespace JSC { namespace LLInt {
+
+#define LLINT_BEGIN_NO_SET_PC() \
+ JSGlobalData& globalData = exec->globalData(); \
+ NativeCallFrameTracer tracer(&globalData, exec)
+
+#define LLINT_SET_PC_FOR_STUBS() \
+ exec->setCurrentVPC(pc + 1)
+
+#define LLINT_BEGIN() \
+ LLINT_BEGIN_NO_SET_PC(); \
+ LLINT_SET_PC_FOR_STUBS()
+
+#define LLINT_OP(index) (exec->uncheckedR(pc[index].u.operand))
+#define LLINT_OP_C(index) (exec->r(pc[index].u.operand))
+
+#define LLINT_RETURN_TWO(first, second) do { \
+ union { \
+ struct { \
+ void* a; \
+ void* b; \
+ } pair; \
+ int64_t i; \
+ } __rt_u; \
+ __rt_u.pair.a = first; \
+ __rt_u.pair.b = second; \
+ return __rt_u.i; \
+ } while (false)
+
+#define LLINT_END_IMPL() LLINT_RETURN_TWO(pc, exec)
+
+#define LLINT_THROW(exceptionToThrow) do { \
+ globalData.exception = (exceptionToThrow); \
+ pc = returnToThrow(exec, pc); \
+ LLINT_END_IMPL(); \
+ } while (false)
+
+#define LLINT_CHECK_EXCEPTION() do { \
+ if (UNLIKELY(globalData.exception)) { \
+ pc = returnToThrow(exec, pc); \
+ LLINT_END_IMPL(); \
+ } \
+ } while (false)
+
+#define LLINT_END() do { \
+ LLINT_CHECK_EXCEPTION(); \
+ LLINT_END_IMPL(); \
+ } while (false)
+
+#define LLINT_BRANCH(opcode, condition) do { \
+ bool __b_condition = (condition); \
+ LLINT_CHECK_EXCEPTION(); \
+ if (__b_condition) \
+ pc += pc[OPCODE_LENGTH(opcode) - 1].u.operand; \
+ else \
+ pc += OPCODE_LENGTH(opcode); \
+ LLINT_END_IMPL(); \
+ } while (false)
+
+#define LLINT_RETURN(value) do { \
+ JSValue __r_returnValue = (value); \
+ LLINT_CHECK_EXCEPTION(); \
+ LLINT_OP(1) = __r_returnValue; \
+ LLINT_END_IMPL(); \
+ } while (false)
+
+#define LLINT_RETURN_PROFILED(opcode, value) do { \
+ JSValue __rp_returnValue = (value); \
+ LLINT_CHECK_EXCEPTION(); \
+ LLINT_OP(1) = __rp_returnValue; \
+ pc[OPCODE_LENGTH(opcode) - 1].u.profile->m_buckets[0] = \
+ JSValue::encode(__rp_returnValue); \
+ LLINT_END_IMPL(); \
+ } while (false)
+
+#define LLINT_CALL_END_IMPL(exec, callTarget) LLINT_RETURN_TWO((callTarget), (exec))
+
+#define LLINT_CALL_THROW(exec, pc, exceptionToThrow) do { \
+ ExecState* __ct_exec = (exec); \
+ Instruction* __ct_pc = (pc); \
+ globalData.exception = (exceptionToThrow); \
+ LLINT_CALL_END_IMPL(__ct_exec, callToThrow(__ct_exec, __ct_pc)); \
+ } while (false)
+
+#define LLINT_CALL_CHECK_EXCEPTION(exec, pc) do { \
+ ExecState* __cce_exec = (exec); \
+ Instruction* __cce_pc = (pc); \
+ if (UNLIKELY(globalData.exception)) \
+ LLINT_CALL_END_IMPL(__cce_exec, callToThrow(__cce_exec, __cce_pc)); \
+ } while (false)
+
+#define LLINT_CALL_RETURN(exec, pc, callTarget) do { \
+ ExecState* __cr_exec = (exec); \
+ Instruction* __cr_pc = (pc); \
+ void* __cr_callTarget = (callTarget); \
+ LLINT_CALL_CHECK_EXCEPTION(__cr_exec->callerFrame(), __cr_pc); \
+ LLINT_CALL_END_IMPL(__cr_exec, __cr_callTarget); \
+ } while (false)
+
+extern "C" SlowPathReturnType llint_trace_operand(ExecState* exec, Instruction* pc, int fromWhere, int operand)
+{
+ LLINT_BEGIN();
+ dataLog("%p / %p: executing bc#%zu, op#%u: Trace(%d): %d: %d\n",
+ exec->codeBlock(),
+ exec,
+ static_cast<intptr_t>(pc - exec->codeBlock()->instructions().begin()),
+ exec->globalData().interpreter->getOpcodeID(pc[0].u.opcode),
+ fromWhere,
+ operand,
+ pc[operand].u.operand);
+ LLINT_END();
+}
+
+extern "C" SlowPathReturnType llint_trace_value(ExecState* exec, Instruction* pc, int fromWhere, int operand)
+{
+ LLINT_BEGIN();
+ JSValue value = LLINT_OP_C(operand).jsValue();
+ union {
+ struct {
+ uint32_t tag;
+ uint32_t payload;
+ } bits;
+ EncodedJSValue asValue;
+ } u;
+ u.asValue = JSValue::encode(value);
+ dataLog("%p / %p: executing bc#%zu, op#%u: Trace(%d): %d: %d: %08x:%08x: %s\n",
+ exec->codeBlock(),
+ exec,
+ static_cast<intptr_t>(pc - exec->codeBlock()->instructions().begin()),
+ exec->globalData().interpreter->getOpcodeID(pc[0].u.opcode),
+ fromWhere,
+ operand,
+ pc[operand].u.operand,
+ u.bits.tag,
+ u.bits.payload,
+ value.description());
+ LLINT_END();
+}
+
+LLINT_SLOW_PATH_DECL(trace_prologue)
+{
+ LLINT_BEGIN();
+ dataLog("%p / %p: in prologue.\n", exec->codeBlock(), exec);
+ LLINT_END();
+}
+
+static void traceFunctionPrologue(ExecState* exec, const char* comment, CodeSpecializationKind kind)
+{
+ JSFunction* callee = asFunction(exec->callee());
+ FunctionExecutable* executable = callee->jsExecutable();
+ CodeBlock* codeBlock = &executable->generatedBytecodeFor(kind);
+ dataLog("%p / %p: in %s of function %p, executable %p; numVars = %u, numParameters = %u, numCalleeRegisters = %u, caller = %p.\n",
+ codeBlock, exec, comment, callee, executable,
+ codeBlock->m_numVars, codeBlock->numParameters(), codeBlock->m_numCalleeRegisters,
+ exec->callerFrame());
+}
+
+LLINT_SLOW_PATH_DECL(trace_prologue_function_for_call)
+{
+ LLINT_BEGIN();
+ traceFunctionPrologue(exec, "call prologue", CodeForCall);
+ LLINT_END();
+}
+
+LLINT_SLOW_PATH_DECL(trace_prologue_function_for_construct)
+{
+ LLINT_BEGIN();
+ traceFunctionPrologue(exec, "construct prologue", CodeForConstruct);
+ LLINT_END();
+}
+
+LLINT_SLOW_PATH_DECL(trace_arityCheck_for_call)
+{
+ LLINT_BEGIN();
+ traceFunctionPrologue(exec, "call arity check", CodeForCall);
+ LLINT_END();
+}
+
+LLINT_SLOW_PATH_DECL(trace_arityCheck_for_construct)
+{
+ LLINT_BEGIN();
+ traceFunctionPrologue(exec, "construct arity check", CodeForConstruct);
+ LLINT_END();
+}
+
+LLINT_SLOW_PATH_DECL(trace)
+{
+ LLINT_BEGIN();
+ dataLog("%p / %p: executing bc#%zu, %s, scope %p\n",
+ exec->codeBlock(),
+ exec,
+ static_cast<intptr_t>(pc - exec->codeBlock()->instructions().begin()),
+ opcodeNames[exec->globalData().interpreter->getOpcodeID(pc[0].u.opcode)],
+ exec->scopeChain());
+ LLINT_END();
+}
+
+LLINT_SLOW_PATH_DECL(special_trace)
+{
+ LLINT_BEGIN();
+ dataLog("%p / %p: executing special case bc#%zu, op#%u, return PC is %p\n",
+ exec->codeBlock(),
+ exec,
+ static_cast<intptr_t>(pc - exec->codeBlock()->instructions().begin()),
+ exec->globalData().interpreter->getOpcodeID(pc[0].u.opcode),
+ exec->returnPC().value());
+ LLINT_END();
+}
+
+inline bool shouldJIT(ExecState* exec)
+{
+ // You can modify this to turn off JITting without rebuilding the world.
+ return exec->globalData().canUseJIT();
+}
+
+enum EntryKind { Prologue, ArityCheck };
+static SlowPathReturnType entryOSR(ExecState* exec, Instruction* pc, CodeBlock* codeBlock, const char *name, EntryKind kind)
+{
+#if ENABLE(JIT_VERBOSE_OSR)
+ dataLog("%p: Entered %s with executeCounter = %d\n", codeBlock, name, codeBlock->llintExecuteCounter());
+#endif
+
+ if (!shouldJIT(exec)) {
+ codeBlock->dontJITAnytimeSoon();
+ LLINT_RETURN_TWO(0, exec);
+ }
+ if (!codeBlock->jitCompile(exec->globalData())) {
+#if ENABLE(JIT_VERBOSE_OSR)
+ dataLog(" Code was already compiled.\n");
+#endif
+ }
+ codeBlock->jitSoon();
+ if (kind == Prologue)
+ LLINT_RETURN_TWO(codeBlock->getJITCode().executableAddressAtOffset(0), exec);
+ ASSERT(kind == ArityCheck);
+ LLINT_RETURN_TWO(codeBlock->getJITCodeWithArityCheck().executableAddress(), exec);
+}
+
+LLINT_SLOW_PATH_DECL(entry_osr)
+{
+ return entryOSR(exec, pc, exec->codeBlock(), "entry_osr", Prologue);
+}
+
+LLINT_SLOW_PATH_DECL(entry_osr_function_for_call)
+{
+ return entryOSR(exec, pc, &asFunction(exec->callee())->jsExecutable()->generatedBytecodeFor(CodeForCall), "entry_osr_function_for_call", Prologue);
+}
+
+LLINT_SLOW_PATH_DECL(entry_osr_function_for_construct)
+{
+ return entryOSR(exec, pc, &asFunction(exec->callee())->jsExecutable()->generatedBytecodeFor(CodeForConstruct), "entry_osr_function_for_construct", Prologue);
+}
+
+LLINT_SLOW_PATH_DECL(entry_osr_function_for_call_arityCheck)
+{
+ return entryOSR(exec, pc, &asFunction(exec->callee())->jsExecutable()->generatedBytecodeFor(CodeForCall), "entry_osr_function_for_call_arityCheck", ArityCheck);
+}
+
+LLINT_SLOW_PATH_DECL(entry_osr_function_for_construct_arityCheck)
+{
+ return entryOSR(exec, pc, &asFunction(exec->callee())->jsExecutable()->generatedBytecodeFor(CodeForConstruct), "entry_osr_function_for_construct_arityCheck", ArityCheck);
+}
+
+LLINT_SLOW_PATH_DECL(loop_osr)
+{
+ CodeBlock* codeBlock = exec->codeBlock();
+
+#if ENABLE(JIT_VERBOSE_OSR)
+ dataLog("%p: Entered loop_osr with executeCounter = %d\n", codeBlock, codeBlock->llintExecuteCounter());
+#endif
+
+ if (!shouldJIT(exec)) {
+ codeBlock->dontJITAnytimeSoon();
+ LLINT_RETURN_TWO(0, exec);
+ }
+
+ if (!codeBlock->jitCompile(exec->globalData())) {
+#if ENABLE(JIT_VERBOSE_OSR)
+ dataLog(" Code was already compiled.\n");
+#endif
+ }
+ codeBlock->jitSoon();
+
+ ASSERT(codeBlock->getJITType() == JITCode::BaselineJIT);
+
+ Vector<BytecodeAndMachineOffset> map;
+ codeBlock->jitCodeMap()->decode(map);
+ BytecodeAndMachineOffset* mapping = binarySearch<BytecodeAndMachineOffset, unsigned, BytecodeAndMachineOffset::getBytecodeIndex>(map.begin(), map.size(), pc - codeBlock->instructions().begin());
+ ASSERT(mapping);
+ ASSERT(mapping->m_bytecodeIndex == static_cast<unsigned>(pc - codeBlock->instructions().begin()));
+
+ void* jumpTarget = codeBlock->getJITCode().executableAddressAtOffset(mapping->m_machineCodeOffset);
+ ASSERT(jumpTarget);
+
+ LLINT_RETURN_TWO(jumpTarget, exec);
+}
+
+LLINT_SLOW_PATH_DECL(replace)
+{
+ CodeBlock* codeBlock = exec->codeBlock();
+
+#if ENABLE(JIT_VERBOSE_OSR)
+ dataLog("%p: Entered replace with executeCounter = %d\n", codeBlock, codeBlock->llintExecuteCounter());
+#endif
+
+ if (shouldJIT(exec)) {
+ if (!codeBlock->jitCompile(exec->globalData())) {
+#if ENABLE(JIT_VERBOSE_OSR)
+ dataLog(" Code was already compiled.\n");
+#endif
+ }
+ codeBlock->jitSoon();
+ } else
+ codeBlock->dontJITAnytimeSoon();
+ LLINT_END_IMPL();
+}
+
+LLINT_SLOW_PATH_DECL(register_file_check)
+{
+ LLINT_BEGIN();
+#if LLINT_SLOW_PATH_TRACING
+ dataLog("Checking stack height with exec = %p.\n", exec);
+ dataLog("CodeBlock = %p.\n", exec->codeBlock());
+ dataLog("Num callee registers = %u.\n", exec->codeBlock()->m_numCalleeRegisters);
+ dataLog("Num vars = %u.\n", exec->codeBlock()->m_numVars);
+ dataLog("Current end is at %p.\n", exec->globalData().interpreter->registerFile().end());
+#endif
+ ASSERT(&exec->registers()[exec->codeBlock()->m_numCalleeRegisters] > exec->globalData().interpreter->registerFile().end());
+ if (UNLIKELY(!globalData.interpreter->registerFile().grow(&exec->registers()[exec->codeBlock()->m_numCalleeRegisters]))) {
+ ReturnAddressPtr returnPC = exec->returnPC();
+ exec = exec->callerFrame();
+ globalData.exception = createStackOverflowError(exec);
+ interpreterThrowInCaller(exec, returnPC);
+ pc = returnToThrowForThrownException(exec);
+ }
+ LLINT_END_IMPL();
+}
+
+LLINT_SLOW_PATH_DECL(slow_path_call_arityCheck)
+{
+ LLINT_BEGIN();
+ ExecState* newExec = CommonSlowPaths::arityCheckFor(exec, &globalData.interpreter->registerFile(), CodeForCall);
+ if (!newExec) {
+ ReturnAddressPtr returnPC = exec->returnPC();
+ exec = exec->callerFrame();
+ globalData.exception = createStackOverflowError(exec);
+ interpreterThrowInCaller(exec, returnPC);
+ LLINT_RETURN_TWO(bitwise_cast<void*>(static_cast<uintptr_t>(1)), exec);
+ }
+ LLINT_RETURN_TWO(0, newExec);
+}
+
+LLINT_SLOW_PATH_DECL(slow_path_construct_arityCheck)
+{
+ LLINT_BEGIN();
+ ExecState* newExec = CommonSlowPaths::arityCheckFor(exec, &globalData.interpreter->registerFile(), CodeForConstruct);
+ if (!newExec) {
+ ReturnAddressPtr returnPC = exec->returnPC();
+ exec = exec->callerFrame();
+ globalData.exception = createStackOverflowError(exec);
+ interpreterThrowInCaller(exec, returnPC);
+ LLINT_RETURN_TWO(bitwise_cast<void*>(static_cast<uintptr_t>(1)), exec);
+ }
+ LLINT_RETURN_TWO(0, newExec);
+}
+
+LLINT_SLOW_PATH_DECL(slow_path_create_activation)
+{
+ LLINT_BEGIN();
+#if LLINT_SLOW_PATH_TRACING
+ dataLog("Creating an activation, exec = %p!\n", exec);
+#endif
+ JSActivation* activation = JSActivation::create(globalData, exec, static_cast<FunctionExecutable*>(exec->codeBlock()->ownerExecutable()));
+ exec->setScopeChain(exec->scopeChain()->push(activation));
+ LLINT_RETURN(JSValue(activation));
+}
+
+LLINT_SLOW_PATH_DECL(slow_path_create_arguments)
+{
+ LLINT_BEGIN();
+ JSValue arguments = JSValue(Arguments::create(globalData, exec));
+ LLINT_CHECK_EXCEPTION();
+ exec->uncheckedR(pc[1].u.operand) = arguments;
+ exec->uncheckedR(unmodifiedArgumentsRegister(pc[1].u.operand)) = arguments;
+ LLINT_END();
+}
+
+LLINT_SLOW_PATH_DECL(slow_path_create_this)
+{
+ LLINT_BEGIN();
+ JSFunction* constructor = asFunction(exec->callee());
+
+#if !ASSERT_DISABLED
+ ConstructData constructData;
+ ASSERT(constructor->methodTable()->getConstructData(constructor, constructData) == ConstructTypeJS);
+#endif
+
+ Structure* structure;
+ JSValue proto = LLINT_OP(2).jsValue();
+ if (proto.isObject())
+ structure = asObject(proto)->inheritorID(globalData);
+ else
+ structure = constructor->scope()->globalObject->emptyObjectStructure();
+
+ LLINT_RETURN(constructEmptyObject(exec, structure));
+}
+
+LLINT_SLOW_PATH_DECL(slow_path_convert_this)
+{
+ LLINT_BEGIN();
+ JSValue v1 = LLINT_OP(1).jsValue();
+ ASSERT(v1.isPrimitive());
+ LLINT_RETURN(v1.toThisObject(exec));
+}
+
+LLINT_SLOW_PATH_DECL(slow_path_new_object)
+{
+ LLINT_BEGIN();
+ LLINT_RETURN(constructEmptyObject(exec));
+}
+
+LLINT_SLOW_PATH_DECL(slow_path_new_array)
+{
+ LLINT_BEGIN();
+ LLINT_RETURN(constructArray(exec, bitwise_cast<JSValue*>(&LLINT_OP(2)), pc[3].u.operand));
+}
+
+LLINT_SLOW_PATH_DECL(slow_path_new_array_buffer)
+{
+ LLINT_BEGIN();
+ LLINT_RETURN(constructArray(exec, exec->codeBlock()->constantBuffer(pc[2].u.operand), pc[3].u.operand));
+}
+
+LLINT_SLOW_PATH_DECL(slow_path_new_regexp)
+{
+ LLINT_BEGIN();
+ RegExp* regExp = exec->codeBlock()->regexp(pc[2].u.operand);
+ if (!regExp->isValid())
+ LLINT_THROW(createSyntaxError(exec, "Invalid flag supplied to RegExp constructor."));
+ LLINT_RETURN(RegExpObject::create(globalData, exec->lexicalGlobalObject(), exec->lexicalGlobalObject()->regExpStructure(), regExp));
+}
+
+LLINT_SLOW_PATH_DECL(slow_path_not)
+{
+ LLINT_BEGIN();
+ LLINT_RETURN(jsBoolean(!LLINT_OP_C(2).jsValue().toBoolean(exec)));
+}
+
+LLINT_SLOW_PATH_DECL(slow_path_eq)
+{
+ LLINT_BEGIN();
+ LLINT_RETURN(jsBoolean(JSValue::equal(exec, LLINT_OP_C(2).jsValue(), LLINT_OP_C(3).jsValue())));
+}
+
+LLINT_SLOW_PATH_DECL(slow_path_neq)
+{
+ LLINT_BEGIN();
+ LLINT_RETURN(jsBoolean(!JSValue::equal(exec, LLINT_OP_C(2).jsValue(), LLINT_OP_C(3).jsValue())));
+}
+
+LLINT_SLOW_PATH_DECL(slow_path_stricteq)
+{
+ LLINT_BEGIN();
+ LLINT_RETURN(jsBoolean(JSValue::strictEqual(exec, LLINT_OP_C(2).jsValue(), LLINT_OP_C(3).jsValue())));
+}
+
+LLINT_SLOW_PATH_DECL(slow_path_nstricteq)
+{
+ LLINT_BEGIN();
+ LLINT_RETURN(jsBoolean(!JSValue::strictEqual(exec, LLINT_OP_C(2).jsValue(), LLINT_OP_C(3).jsValue())));
+}
+
+LLINT_SLOW_PATH_DECL(slow_path_less)
+{
+ LLINT_BEGIN();
+ LLINT_RETURN(jsBoolean(jsLess<true>(exec, LLINT_OP_C(2).jsValue(), LLINT_OP_C(3).jsValue())));
+}
+
+LLINT_SLOW_PATH_DECL(slow_path_lesseq)
+{
+ LLINT_BEGIN();
+ LLINT_RETURN(jsBoolean(jsLessEq<true>(exec, LLINT_OP_C(2).jsValue(), LLINT_OP_C(3).jsValue())));
+}
+
+LLINT_SLOW_PATH_DECL(slow_path_greater)
+{
+ LLINT_BEGIN();
+ LLINT_RETURN(jsBoolean(jsLess<false>(exec, LLINT_OP_C(3).jsValue(), LLINT_OP_C(2).jsValue())));
+}
+
+LLINT_SLOW_PATH_DECL(slow_path_greatereq)
+{
+ LLINT_BEGIN();
+ LLINT_RETURN(jsBoolean(jsLessEq<false>(exec, LLINT_OP_C(3).jsValue(), LLINT_OP_C(2).jsValue())));
+}
+
+LLINT_SLOW_PATH_DECL(slow_path_pre_inc)
+{
+ LLINT_BEGIN();
+ LLINT_RETURN(jsNumber(LLINT_OP(1).jsValue().toNumber(exec) + 1));
+}
+
+LLINT_SLOW_PATH_DECL(slow_path_pre_dec)
+{
+ LLINT_BEGIN();
+ LLINT_RETURN(jsNumber(LLINT_OP(1).jsValue().toNumber(exec) - 1));
+}
+
+LLINT_SLOW_PATH_DECL(slow_path_post_inc)
+{
+ LLINT_BEGIN();
+ double result = LLINT_OP(2).jsValue().toNumber(exec);
+ LLINT_OP(2) = jsNumber(result + 1);
+ LLINT_RETURN(jsNumber(result));
+}
+
+LLINT_SLOW_PATH_DECL(slow_path_post_dec)
+{
+ LLINT_BEGIN();
+ double result = LLINT_OP(2).jsValue().toNumber(exec);
+ LLINT_OP(2) = jsNumber(result - 1);
+ LLINT_RETURN(jsNumber(result));
+}
+
+LLINT_SLOW_PATH_DECL(slow_path_to_jsnumber)
+{
+ LLINT_BEGIN();
+ LLINT_RETURN(jsNumber(LLINT_OP_C(2).jsValue().toNumber(exec)));
+}
+
+LLINT_SLOW_PATH_DECL(slow_path_negate)
+{
+ LLINT_BEGIN();
+ LLINT_RETURN(jsNumber(-LLINT_OP_C(2).jsValue().toNumber(exec)));
+}
+
+LLINT_SLOW_PATH_DECL(slow_path_add)
+{
+ LLINT_BEGIN();
+ JSValue v1 = LLINT_OP_C(2).jsValue();
+ JSValue v2 = LLINT_OP_C(3).jsValue();
+
+#if LLINT_SLOW_PATH_TRACING
+ dataLog("Trying to add %s", v1.description());
+ dataLog(" to %s.\n", v2.description());
+#endif
+
+ if (v1.isString() && !v2.isObject())
+ LLINT_RETURN(jsString(exec, asString(v1), v2.toString(exec)));
+
+ if (v1.isNumber() && v2.isNumber())
+ LLINT_RETURN(jsNumber(v1.asNumber() + v2.asNumber()));
+
+ LLINT_RETURN(jsAddSlowCase(exec, v1, v2));
+}
+
+LLINT_SLOW_PATH_DECL(slow_path_mul)
+{
+ LLINT_BEGIN();
+ LLINT_RETURN(jsNumber(LLINT_OP_C(2).jsValue().toNumber(exec) * LLINT_OP_C(3).jsValue().toNumber(exec)));
+}
+
+LLINT_SLOW_PATH_DECL(slow_path_sub)
+{
+ LLINT_BEGIN();
+ LLINT_RETURN(jsNumber(LLINT_OP_C(2).jsValue().toNumber(exec) - LLINT_OP_C(3).jsValue().toNumber(exec)));
+}
+
+LLINT_SLOW_PATH_DECL(slow_path_div)
+{
+ LLINT_BEGIN();
+ LLINT_RETURN(jsNumber(LLINT_OP_C(2).jsValue().toNumber(exec) / LLINT_OP_C(3).jsValue().toNumber(exec)));
+}
+
+LLINT_SLOW_PATH_DECL(slow_path_mod)
+{
+ LLINT_BEGIN();
+ LLINT_RETURN(jsNumber(fmod(LLINT_OP_C(2).jsValue().toNumber(exec), LLINT_OP_C(3).jsValue().toNumber(exec))));
+}
+
+LLINT_SLOW_PATH_DECL(slow_path_lshift)
+{
+ LLINT_BEGIN();
+ LLINT_RETURN(jsNumber(LLINT_OP_C(2).jsValue().toInt32(exec) << (LLINT_OP_C(3).jsValue().toUInt32(exec) & 31)));
+}
+
+LLINT_SLOW_PATH_DECL(slow_path_rshift)
+{
+ LLINT_BEGIN();
+ LLINT_RETURN(jsNumber(LLINT_OP_C(2).jsValue().toInt32(exec) >> (LLINT_OP_C(3).jsValue().toUInt32(exec) & 31)));
+}
+
+LLINT_SLOW_PATH_DECL(slow_path_urshift)
+{
+ LLINT_BEGIN();
+ LLINT_RETURN(jsNumber(LLINT_OP_C(2).jsValue().toUInt32(exec) >> (LLINT_OP_C(3).jsValue().toUInt32(exec) & 31)));
+}
+
+LLINT_SLOW_PATH_DECL(slow_path_bitand)
+{
+ LLINT_BEGIN();
+ LLINT_RETURN(jsNumber(LLINT_OP_C(2).jsValue().toInt32(exec) & LLINT_OP_C(3).jsValue().toInt32(exec)));
+}
+
+LLINT_SLOW_PATH_DECL(slow_path_bitor)
+{
+ LLINT_BEGIN();
+ LLINT_RETURN(jsNumber(LLINT_OP_C(2).jsValue().toInt32(exec) | LLINT_OP_C(3).jsValue().toInt32(exec)));
+}
+
+LLINT_SLOW_PATH_DECL(slow_path_bitxor)
+{
+ LLINT_BEGIN();
+ LLINT_RETURN(jsNumber(LLINT_OP_C(2).jsValue().toInt32(exec) ^ LLINT_OP_C(3).jsValue().toInt32(exec)));
+}
+
+LLINT_SLOW_PATH_DECL(slow_path_bitnot)
+{
+ LLINT_BEGIN();
+ LLINT_RETURN(jsNumber(~LLINT_OP_C(2).jsValue().toInt32(exec)));
+}
+
+LLINT_SLOW_PATH_DECL(slow_path_check_has_instance)
+{
+ LLINT_BEGIN();
+ JSValue baseVal = LLINT_OP_C(1).jsValue();
+#ifndef NDEBUG
+ TypeInfo typeInfo(UnspecifiedType);
+ ASSERT(!baseVal.isObject()
+ || !(typeInfo = asObject(baseVal)->structure()->typeInfo()).implementsHasInstance());
+#endif
+ LLINT_THROW(createInvalidParamError(exec, "instanceof", baseVal));
+}
+
+LLINT_SLOW_PATH_DECL(slow_path_instanceof)
+{
+ LLINT_BEGIN();
+ LLINT_RETURN(jsBoolean(CommonSlowPaths::opInstanceOfSlow(exec, LLINT_OP_C(2).jsValue(), LLINT_OP_C(3).jsValue(), LLINT_OP_C(4).jsValue())));
+}
+
+LLINT_SLOW_PATH_DECL(slow_path_typeof)
+{
+ LLINT_BEGIN();
+ LLINT_RETURN(jsTypeStringForValue(exec, LLINT_OP_C(2).jsValue()));
+}
+
+LLINT_SLOW_PATH_DECL(slow_path_is_undefined)
+{
+ LLINT_BEGIN();
+ JSValue v = LLINT_OP_C(2).jsValue();
+ LLINT_RETURN(jsBoolean(v.isCell() ? v.asCell()->structure()->typeInfo().masqueradesAsUndefined() : v.isUndefined()));
+}
+
+LLINT_SLOW_PATH_DECL(slow_path_is_boolean)
+{
+ LLINT_BEGIN();
+ LLINT_RETURN(jsBoolean(LLINT_OP_C(2).jsValue().isBoolean()));
+}
+
+LLINT_SLOW_PATH_DECL(slow_path_is_number)
+{
+ LLINT_BEGIN();
+ LLINT_RETURN(jsBoolean(LLINT_OP_C(2).jsValue().isNumber()));
+}
+
+LLINT_SLOW_PATH_DECL(slow_path_is_string)
+{
+ LLINT_BEGIN();
+ LLINT_RETURN(jsBoolean(isJSString(LLINT_OP_C(2).jsValue())));
+}
+
+LLINT_SLOW_PATH_DECL(slow_path_is_object)
+{
+ LLINT_BEGIN();
+ LLINT_RETURN(jsBoolean(jsIsObjectType(LLINT_OP_C(2).jsValue())));
+}
+
+LLINT_SLOW_PATH_DECL(slow_path_is_function)
+{
+ LLINT_BEGIN();
+ LLINT_RETURN(jsBoolean(jsIsFunctionType(LLINT_OP_C(2).jsValue())));
+}
+
+LLINT_SLOW_PATH_DECL(slow_path_in)
+{
+ LLINT_BEGIN();
+ LLINT_RETURN(jsBoolean(CommonSlowPaths::opIn(exec, LLINT_OP_C(2).jsValue(), LLINT_OP_C(3).jsValue())));
+}
+
+LLINT_SLOW_PATH_DECL(slow_path_resolve)
+{
+ LLINT_BEGIN();
+ LLINT_RETURN_PROFILED(op_resolve, CommonSlowPaths::opResolve(exec, exec->codeBlock()->identifier(pc[2].u.operand)));
+}
+
+LLINT_SLOW_PATH_DECL(slow_path_resolve_skip)
+{
+ LLINT_BEGIN();
+ LLINT_RETURN_PROFILED(
+ op_resolve_skip,
+ CommonSlowPaths::opResolveSkip(
+ exec,
+ exec->codeBlock()->identifier(pc[2].u.operand),
+ pc[3].u.operand));
+}
+
+static JSValue resolveGlobal(ExecState* exec, Instruction* pc)
+{
+ CodeBlock* codeBlock = exec->codeBlock();
+ JSGlobalObject* globalObject = codeBlock->globalObject();
+ ASSERT(globalObject->isGlobalObject());
+ int property = pc[2].u.operand;
+ Structure* structure = pc[3].u.structure.get();
+
+ ASSERT_UNUSED(structure, structure != globalObject->structure());
+
+ Identifier& ident = codeBlock->identifier(property);
+ PropertySlot slot(globalObject);
+
+ if (globalObject->getPropertySlot(exec, ident, slot)) {
+ JSValue result = slot.getValue(exec, ident);
+ if (slot.isCacheableValue() && !globalObject->structure()->isUncacheableDictionary()
+ && slot.slotBase() == globalObject) {
+ pc[3].u.structure.set(
+ exec->globalData(), codeBlock->ownerExecutable(), globalObject->structure());
+ pc[4] = slot.cachedOffset();
+ }
+
+ return result;
+ }
+
+ exec->globalData().exception = createUndefinedVariableError(exec, ident);
+ return JSValue();
+}
+
+LLINT_SLOW_PATH_DECL(slow_path_resolve_global)
+{
+ LLINT_BEGIN();
+ LLINT_RETURN_PROFILED(op_resolve_global, resolveGlobal(exec, pc));
+}
+
+LLINT_SLOW_PATH_DECL(slow_path_resolve_global_dynamic)
+{
+ LLINT_BEGIN();
+ LLINT_RETURN_PROFILED(op_resolve_global_dynamic, resolveGlobal(exec, pc));
+}
+
+LLINT_SLOW_PATH_DECL(slow_path_resolve_for_resolve_global_dynamic)
+{
+ LLINT_BEGIN();
+ LLINT_RETURN_PROFILED(op_resolve_global_dynamic, CommonSlowPaths::opResolve(exec, exec->codeBlock()->identifier(pc[2].u.operand)));
+}
+
+LLINT_SLOW_PATH_DECL(slow_path_resolve_base)
+{
+ LLINT_BEGIN();
+ Identifier& ident = exec->codeBlock()->identifier(pc[2].u.operand);
+ if (pc[3].u.operand) {
+ JSValue base = JSC::resolveBase(exec, ident, exec->scopeChain(), true);
+ if (!base)
+ LLINT_THROW(createErrorForInvalidGlobalAssignment(exec, ident.ustring()));
+ LLINT_RETURN(base);
+ }
+
+ LLINT_RETURN_PROFILED(op_resolve_base, JSC::resolveBase(exec, ident, exec->scopeChain(), false));
+}
+
+LLINT_SLOW_PATH_DECL(slow_path_ensure_property_exists)
+{
+ LLINT_BEGIN();
+ JSObject* object = asObject(LLINT_OP(1).jsValue());
+ PropertySlot slot(object);
+ Identifier& ident = exec->codeBlock()->identifier(pc[2].u.operand);
+ if (!object->getPropertySlot(exec, ident, slot))
+ LLINT_THROW(createErrorForInvalidGlobalAssignment(exec, ident.ustring()));
+ LLINT_END();
+}
+
+LLINT_SLOW_PATH_DECL(slow_path_resolve_with_base)
+{
+ LLINT_BEGIN();
+ JSValue result = CommonSlowPaths::opResolveWithBase(exec, exec->codeBlock()->identifier(pc[3].u.operand), LLINT_OP(1));
+ LLINT_CHECK_EXCEPTION();
+ LLINT_OP(2) = result;
+ // FIXME: technically should have profiling, but we don't do it because the DFG won't use it.
+ LLINT_END();
+}
+
+LLINT_SLOW_PATH_DECL(slow_path_resolve_with_this)
+{
+ LLINT_BEGIN();
+ JSValue result = CommonSlowPaths::opResolveWithThis(exec, exec->codeBlock()->identifier(pc[3].u.operand), LLINT_OP(1));
+ LLINT_CHECK_EXCEPTION();
+ LLINT_OP(2) = result;
+ // FIXME: technically should have profiling, but we don't do it because the DFG won't use it.
+ LLINT_END();
+}
+
+LLINT_SLOW_PATH_DECL(slow_path_get_by_id)
+{
+ LLINT_BEGIN();
+ CodeBlock* codeBlock = exec->codeBlock();
+ Identifier& ident = codeBlock->identifier(pc[3].u.operand);
+ JSValue baseValue = LLINT_OP_C(2).jsValue();
+ PropertySlot slot(baseValue);
+
+ JSValue result = baseValue.get(exec, ident, slot);
+ LLINT_CHECK_EXCEPTION();
+ LLINT_OP(1) = result;
+
+ if (baseValue.isCell()
+ && slot.isCacheable()
+ && slot.slotBase() == baseValue
+ && slot.cachedPropertyType() == PropertySlot::Value) {
+
+ JSCell* baseCell = baseValue.asCell();
+ Structure* structure = baseCell->structure();
+
+ if (!structure->isUncacheableDictionary()
+ && !structure->typeInfo().prohibitsPropertyCaching()) {
+ pc[4].u.structure.set(
+ globalData, codeBlock->ownerExecutable(), structure);
+ pc[5].u.operand = slot.cachedOffset() * sizeof(JSValue);
+ }
+ }
+
+ pc[OPCODE_LENGTH(op_get_by_id) - 1].u.profile->m_buckets[0] = JSValue::encode(result);
+ LLINT_END();
+}
+
+LLINT_SLOW_PATH_DECL(slow_path_get_arguments_length)
+{
+ LLINT_BEGIN();
+ CodeBlock* codeBlock = exec->codeBlock();
+ Identifier& ident = codeBlock->identifier(pc[3].u.operand);
+ JSValue baseValue = LLINT_OP(2).jsValue();
+ PropertySlot slot(baseValue);
+ LLINT_RETURN(baseValue.get(exec, ident, slot));
+}
+
+LLINT_SLOW_PATH_DECL(slow_path_put_by_id)
+{
+ LLINT_BEGIN();
+ CodeBlock* codeBlock = exec->codeBlock();
+ Identifier& ident = codeBlock->identifier(pc[2].u.operand);
+
+ JSValue baseValue = LLINT_OP_C(1).jsValue();
+ PutPropertySlot slot(codeBlock->isStrictMode());
+ if (pc[8].u.operand)
+ asObject(baseValue)->putDirect(globalData, ident, LLINT_OP_C(3).jsValue(), slot);
+ else
+ baseValue.put(exec, ident, LLINT_OP_C(3).jsValue(), slot);
+ LLINT_CHECK_EXCEPTION();
+
+ if (baseValue.isCell()
+ && slot.isCacheable()) {
+
+ JSCell* baseCell = baseValue.asCell();
+ Structure* structure = baseCell->structure();
+
+ if (!structure->isUncacheableDictionary()
+ && !structure->typeInfo().prohibitsPropertyCaching()
+ && baseCell == slot.base()) {
+
+ if (slot.type() == PutPropertySlot::NewProperty) {
+ if (!structure->isDictionary() && structure->previousID()->propertyStorageCapacity() == structure->propertyStorageCapacity()) {
+ // This is needed because some of the methods we call
+ // below may GC.
+ pc[0].u.opcode = bitwise_cast<void*>(&llint_op_put_by_id);
+
+ normalizePrototypeChain(exec, baseCell);
+
+ ASSERT(structure->previousID()->isObject());
+ pc[4].u.structure.set(
+ globalData, codeBlock->ownerExecutable(), structure->previousID());
+ pc[5].u.operand = slot.cachedOffset() * sizeof(JSValue);
+ pc[6].u.structure.set(
+ globalData, codeBlock->ownerExecutable(), structure);
+ StructureChain* chain = structure->prototypeChain(exec);
+ ASSERT(chain);
+ pc[7].u.structureChain.set(
+ globalData, codeBlock->ownerExecutable(), chain);
+
+ if (pc[8].u.operand)
+ pc[0].u.opcode = bitwise_cast<void*>(&llint_op_put_by_id_transition_direct);
+ else
+ pc[0].u.opcode = bitwise_cast<void*>(&llint_op_put_by_id_transition_normal);
+ }
+ } else {
+ pc[0].u.opcode = bitwise_cast<void*>(&llint_op_put_by_id);
+ pc[4].u.structure.set(
+ globalData, codeBlock->ownerExecutable(), structure);
+ pc[5].u.operand = slot.cachedOffset() * sizeof(JSValue);
+ }
+ }
+ }
+
+ LLINT_END();
+}
+
+LLINT_SLOW_PATH_DECL(slow_path_del_by_id)
+{
+ LLINT_BEGIN();
+ CodeBlock* codeBlock = exec->codeBlock();
+ JSObject* baseObject = LLINT_OP_C(2).jsValue().toObject(exec);
+ bool couldDelete = baseObject->methodTable()->deleteProperty(baseObject, exec, codeBlock->identifier(pc[3].u.operand));
+ LLINT_CHECK_EXCEPTION();
+ if (!couldDelete && codeBlock->isStrictMode())
+ LLINT_THROW(createTypeError(exec, "Unable to delete property."));
+ LLINT_RETURN(jsBoolean(couldDelete));
+}
+
+inline JSValue getByVal(ExecState* exec, JSValue baseValue, JSValue subscript)
+{
+ if (LIKELY(baseValue.isCell() && subscript.isString())) {
+ if (JSValue result = baseValue.asCell()->fastGetOwnProperty(exec, asString(subscript)->value(exec)))
+ return result;
+ }
+
+ if (subscript.isUInt32()) {
+ uint32_t i = subscript.asUInt32();
+ if (isJSString(baseValue) && asString(baseValue)->canGetIndex(i))
+ return asString(baseValue)->getIndex(exec, i);
+
+ if (isJSByteArray(baseValue) && asByteArray(baseValue)->canAccessIndex(i))
+ return asByteArray(baseValue)->getIndex(exec, i);
+
+ return baseValue.get(exec, i);
+ }
+
+ Identifier property(exec, subscript.toString(exec)->value(exec));
+ return baseValue.get(exec, property);
+}
+
+LLINT_SLOW_PATH_DECL(slow_path_get_by_val)
+{
+ LLINT_BEGIN();
+ LLINT_RETURN_PROFILED(op_get_by_val, getByVal(exec, LLINT_OP_C(2).jsValue(), LLINT_OP_C(3).jsValue()));
+}
+
+LLINT_SLOW_PATH_DECL(slow_path_get_argument_by_val)
+{
+ LLINT_BEGIN();
+ JSValue arguments = LLINT_OP(2).jsValue();
+ if (!arguments) {
+ arguments = Arguments::create(globalData, exec);
+ LLINT_CHECK_EXCEPTION();
+ LLINT_OP(2) = arguments;
+ exec->uncheckedR(unmodifiedArgumentsRegister(pc[2].u.operand)) = arguments;
+ }
+
+ LLINT_RETURN(getByVal(exec, arguments, LLINT_OP_C(3).jsValue()));
+}
+
+LLINT_SLOW_PATH_DECL(slow_path_get_by_pname)
+{
+ LLINT_BEGIN();
+ LLINT_RETURN(getByVal(exec, LLINT_OP(2).jsValue(), LLINT_OP(3).jsValue()));
+}
+
+LLINT_SLOW_PATH_DECL(slow_path_put_by_val)
+{
+ LLINT_BEGIN();
+
+ JSValue baseValue = LLINT_OP_C(1).jsValue();
+ JSValue subscript = LLINT_OP_C(2).jsValue();
+ JSValue value = LLINT_OP_C(3).jsValue();
+
+ if (LIKELY(subscript.isUInt32())) {
+ uint32_t i = subscript.asUInt32();
+ if (isJSArray(baseValue)) {
+ JSArray* jsArray = asArray(baseValue);
+ if (jsArray->canSetIndex(i))
+ jsArray->setIndex(globalData, i, value);
+ else
+ JSArray::putByIndex(jsArray, exec, i, value);
+ LLINT_END();
+ }
+ if (isJSByteArray(baseValue)
+ && asByteArray(baseValue)->canAccessIndex(i)) {
+ JSByteArray* jsByteArray = asByteArray(baseValue);
+ if (value.isInt32()) {
+ jsByteArray->setIndex(i, value.asInt32());
+ LLINT_END();
+ }
+ if (value.isNumber()) {
+ jsByteArray->setIndex(i, value.asNumber());
+ LLINT_END();
+ }
+ }
+ baseValue.put(exec, i, value);
+ LLINT_END();
+ }
+
+ Identifier property(exec, subscript.toString(exec)->value(exec));
+ LLINT_CHECK_EXCEPTION();
+ PutPropertySlot slot(exec->codeBlock()->isStrictMode());
+ baseValue.put(exec, property, value, slot);
+ LLINT_END();
+}
+
+LLINT_SLOW_PATH_DECL(slow_path_del_by_val)
+{
+ LLINT_BEGIN();
+ JSValue baseValue = LLINT_OP_C(2).jsValue();
+ JSObject* baseObject = baseValue.toObject(exec);
+
+ JSValue subscript = LLINT_OP_C(3).jsValue();
+
+ bool couldDelete;
+
+ uint32_t i;
+ if (subscript.getUInt32(i))
+ couldDelete = baseObject->methodTable()->deletePropertyByIndex(baseObject, exec, i);
+ else {
+ LLINT_CHECK_EXCEPTION();
+ Identifier property(exec, subscript.toString(exec)->value(exec));
+ LLINT_CHECK_EXCEPTION();
+ couldDelete = baseObject->methodTable()->deleteProperty(baseObject, exec, property);
+ }
+
+ if (!couldDelete && exec->codeBlock()->isStrictMode())
+ LLINT_THROW(createTypeError(exec, "Unable to delete property."));
+
+ LLINT_RETURN(jsBoolean(couldDelete));
+}
+
+LLINT_SLOW_PATH_DECL(slow_path_put_by_index)
+{
+ LLINT_BEGIN();
+ LLINT_OP_C(1).jsValue().put(exec, pc[2].u.operand, LLINT_OP_C(3).jsValue());
+ LLINT_END();
+}
+
+LLINT_SLOW_PATH_DECL(slow_path_put_getter_setter)
+{
+ LLINT_BEGIN();
+ ASSERT(LLINT_OP(1).jsValue().isObject());
+ JSObject* baseObj = asObject(LLINT_OP(1).jsValue());
+
+ GetterSetter* accessor = GetterSetter::create(exec);
+ LLINT_CHECK_EXCEPTION();
+
+ JSValue getter = LLINT_OP(3).jsValue();
+ JSValue setter = LLINT_OP(4).jsValue();
+ ASSERT(getter.isObject() || getter.isUndefined());
+ ASSERT(setter.isObject() || setter.isUndefined());
+ ASSERT(getter.isObject() || setter.isObject());
+
+ if (!getter.isUndefined())
+ accessor->setGetter(globalData, asObject(getter));
+ if (!setter.isUndefined())
+ accessor->setSetter(globalData, asObject(setter));
+ baseObj->putDirectAccessor(
+ globalData,
+ exec->codeBlock()->identifier(pc[2].u.operand),
+ accessor, Accessor);
+ LLINT_END();
+}
+
+LLINT_SLOW_PATH_DECL(slow_path_jmp_scopes)
+{
+ LLINT_BEGIN();
+ unsigned count = pc[1].u.operand;
+ ScopeChainNode* tmp = exec->scopeChain();
+ while (count--)
+ tmp = tmp->pop();
+ exec->setScopeChain(tmp);
+ pc += pc[2].u.operand;
+ LLINT_END();
+}
+
+LLINT_SLOW_PATH_DECL(slow_path_jtrue)
+{
+ LLINT_BEGIN();
+ LLINT_BRANCH(op_jtrue, LLINT_OP_C(1).jsValue().toBoolean(exec));
+}
+
+LLINT_SLOW_PATH_DECL(slow_path_jfalse)
+{
+ LLINT_BEGIN();
+ LLINT_BRANCH(op_jfalse, !LLINT_OP_C(1).jsValue().toBoolean(exec));
+}
+
+LLINT_SLOW_PATH_DECL(slow_path_jless)
+{
+ LLINT_BEGIN();
+ LLINT_BRANCH(op_jless, jsLess<true>(exec, LLINT_OP_C(1).jsValue(), LLINT_OP_C(2).jsValue()));
+}
+
+LLINT_SLOW_PATH_DECL(slow_path_jnless)
+{
+ LLINT_BEGIN();
+ LLINT_BRANCH(op_jnless, !jsLess<true>(exec, LLINT_OP_C(1).jsValue(), LLINT_OP_C(2).jsValue()));
+}
+
+LLINT_SLOW_PATH_DECL(slow_path_jgreater)
+{
+ LLINT_BEGIN();
+ LLINT_BRANCH(op_jgreater, jsLess<false>(exec, LLINT_OP_C(2).jsValue(), LLINT_OP_C(1).jsValue()));
+}
+
+LLINT_SLOW_PATH_DECL(slow_path_jngreater)
+{
+ LLINT_BEGIN();
+ LLINT_BRANCH(op_jngreater, !jsLess<false>(exec, LLINT_OP_C(2).jsValue(), LLINT_OP_C(1).jsValue()));
+}
+
+LLINT_SLOW_PATH_DECL(slow_path_jlesseq)
+{
+ LLINT_BEGIN();
+ LLINT_BRANCH(op_jlesseq, jsLessEq<true>(exec, LLINT_OP_C(1).jsValue(), LLINT_OP_C(2).jsValue()));
+}
+
+LLINT_SLOW_PATH_DECL(slow_path_jnlesseq)
+{
+ LLINT_BEGIN();
+ LLINT_BRANCH(op_jnlesseq, !jsLessEq<true>(exec, LLINT_OP_C(1).jsValue(), LLINT_OP_C(2).jsValue()));
+}
+
+LLINT_SLOW_PATH_DECL(slow_path_jgreatereq)
+{
+ LLINT_BEGIN();
+ LLINT_BRANCH(op_jgreatereq, jsLessEq<false>(exec, LLINT_OP_C(2).jsValue(), LLINT_OP_C(1).jsValue()));
+}
+
+LLINT_SLOW_PATH_DECL(slow_path_jngreatereq)
+{
+ LLINT_BEGIN();
+ LLINT_BRANCH(op_jngreatereq, !jsLessEq<false>(exec, LLINT_OP_C(2).jsValue(), LLINT_OP_C(1).jsValue()));
+}
+
+LLINT_SLOW_PATH_DECL(slow_path_switch_imm)
+{
+ LLINT_BEGIN();
+ JSValue scrutinee = LLINT_OP_C(3).jsValue();
+ ASSERT(scrutinee.isDouble());
+ double value = scrutinee.asDouble();
+ int32_t intValue = static_cast<int32_t>(value);
+ int defaultOffset = pc[2].u.operand;
+ if (value == intValue) {
+ CodeBlock* codeBlock = exec->codeBlock();
+ pc += codeBlock->immediateSwitchJumpTable(pc[1].u.operand).offsetForValue(intValue, defaultOffset);
+ } else
+ pc += defaultOffset;
+ LLINT_END();
+}
+
+LLINT_SLOW_PATH_DECL(slow_path_switch_string)
+{
+ LLINT_BEGIN();
+ JSValue scrutinee = LLINT_OP_C(3).jsValue();
+ int defaultOffset = pc[2].u.operand;
+ if (!scrutinee.isString())
+ pc += defaultOffset;
+ else {
+ CodeBlock* codeBlock = exec->codeBlock();
+ pc += codeBlock->stringSwitchJumpTable(pc[1].u.operand).offsetForValue(asString(scrutinee)->value(exec).impl(), defaultOffset);
+ }
+ LLINT_END();
+}
+
+LLINT_SLOW_PATH_DECL(slow_path_new_func)
+{
+ LLINT_BEGIN();
+ CodeBlock* codeBlock = exec->codeBlock();
+ ASSERT(codeBlock->codeType() != FunctionCode
+ || !codeBlock->needsFullScopeChain()
+ || exec->uncheckedR(codeBlock->activationRegister()).jsValue());
+#if LLINT_SLOW_PATH_TRACING
+ dataLog("Creating function!\n");
+#endif
+ LLINT_RETURN(codeBlock->functionDecl(pc[2].u.operand)->make(exec, exec->scopeChain()));
+}
+
+LLINT_SLOW_PATH_DECL(slow_path_new_func_exp)
+{
+ LLINT_BEGIN();
+ CodeBlock* codeBlock = exec->codeBlock();
+ FunctionExecutable* function = codeBlock->functionExpr(pc[2].u.operand);
+ JSFunction* func = function->make(exec, exec->scopeChain());
+
+ if (!function->name().isNull()) {
+ JSStaticScopeObject* functionScopeObject = JSStaticScopeObject::create(exec, function->name(), func, ReadOnly | DontDelete);
+ func->setScope(globalData, func->scope()->push(functionScopeObject));
+ }
+
+ LLINT_RETURN(func);
+}
+
+static SlowPathReturnType handleHostCall(ExecState* execCallee, Instruction* pc, JSValue callee, CodeSpecializationKind kind)
+{
+ ExecState* exec = execCallee->callerFrame();
+ JSGlobalData& globalData = exec->globalData();
+
+ execCallee->setScopeChain(exec->scopeChain());
+ execCallee->setCodeBlock(0);
+ execCallee->clearReturnPC();
+
+ if (kind == CodeForCall) {
+ CallData callData;
+ CallType callType = getCallData(callee, callData);
+
+ ASSERT(callType != CallTypeJS);
+
+ if (callType == CallTypeHost) {
+ globalData.hostCallReturnValue = JSValue::decode(callData.native.function(execCallee));
+
+ LLINT_CALL_RETURN(execCallee, pc, reinterpret_cast<void*>(getHostCallReturnValue));
+ }
+
+#if LLINT_SLOW_PATH_TRACING
+ dataLog("Call callee is not a function: %s\n", callee.description());
+#endif
+
+ ASSERT(callType == CallTypeNone);
+ LLINT_CALL_THROW(exec, pc, createNotAFunctionError(exec, callee));
+ }
+
+ ASSERT(kind == CodeForConstruct);
+
+ ConstructData constructData;
+ ConstructType constructType = getConstructData(callee, constructData);
+
+ ASSERT(constructType != ConstructTypeJS);
+
+ if (constructType == ConstructTypeHost) {
+ globalData.hostCallReturnValue = JSValue::decode(constructData.native.function(execCallee));
+
+ LLINT_CALL_RETURN(execCallee, pc, reinterpret_cast<void*>(getHostCallReturnValue));
+ }
+
+#if LLINT_SLOW_PATH_TRACING
+ dataLog("Constructor callee is not a function: %s\n", callee.description());
+#endif
+
+ ASSERT(constructType == ConstructTypeNone);
+ LLINT_CALL_THROW(exec, pc, createNotAConstructorError(exec, callee));
+}
+
+inline SlowPathReturnType setUpCall(ExecState* execCallee, Instruction* pc, CodeSpecializationKind kind, JSValue calleeAsValue, LLIntCallLinkInfo* callLinkInfo = 0)
+{
+#if LLINT_SLOW_PATH_TRACING
+ dataLog("Performing call with recorded PC = %p\n", execCallee->callerFrame()->currentVPC());
+#endif
+
+ JSCell* calleeAsFunctionCell = getJSFunction(calleeAsValue);
+ if (!calleeAsFunctionCell)
+ return handleHostCall(execCallee, pc, calleeAsValue, kind);
+
+ JSFunction* callee = asFunction(calleeAsFunctionCell);
+ ScopeChainNode* scope = callee->scopeUnchecked();
+ JSGlobalData& globalData = *scope->globalData;
+ execCallee->setScopeChain(scope);
+ ExecutableBase* executable = callee->executable();
+
+ MacroAssemblerCodePtr codePtr;
+ CodeBlock* codeBlock = 0;
+ if (executable->isHostFunction())
+ codePtr = executable->generatedJITCodeFor(kind).addressForCall();
+ else {
+ FunctionExecutable* functionExecutable = static_cast<FunctionExecutable*>(executable);
+ JSObject* error = functionExecutable->compileFor(execCallee, callee->scope(), kind);
+ if (error)
+ LLINT_CALL_THROW(execCallee->callerFrame(), pc, error);
+ codeBlock = &functionExecutable->generatedBytecodeFor(kind);
+ ASSERT(codeBlock);
+ if (execCallee->argumentCountIncludingThis() < static_cast<size_t>(codeBlock->numParameters()))
+ codePtr = functionExecutable->generatedJITCodeWithArityCheckFor(kind);
+ else
+ codePtr = functionExecutable->generatedJITCodeFor(kind).addressForCall();
+ }
+
+ if (callLinkInfo) {
+ if (callLinkInfo->isOnList())
+ callLinkInfo->remove();
+ ExecState* execCaller = execCallee->callerFrame();
+ callLinkInfo->callee.set(globalData, execCaller->codeBlock()->ownerExecutable(), callee);
+ callLinkInfo->lastSeenCallee.set(globalData, execCaller->codeBlock()->ownerExecutable(), callee);
+ callLinkInfo->machineCodeTarget = codePtr;
+ if (codeBlock)
+ codeBlock->linkIncomingCall(callLinkInfo);
+ }
+
+ LLINT_CALL_RETURN(execCallee, pc, codePtr.executableAddress());
+}
+
+inline SlowPathReturnType genericCall(ExecState* exec, Instruction* pc, CodeSpecializationKind kind)
+{
+ // This needs to:
+ // - Set up a call frame.
+ // - Figure out what to call and compile it if necessary.
+ // - If possible, link the call's inline cache.
+ // - Return a tuple of machine code address to call and the new call frame.
+
+ JSValue calleeAsValue = LLINT_OP_C(1).jsValue();
+
+ ExecState* execCallee = exec + pc[3].u.operand;
+
+ execCallee->setArgumentCountIncludingThis(pc[2].u.operand);
+ execCallee->uncheckedR(RegisterFile::Callee) = calleeAsValue;
+ execCallee->setCallerFrame(exec);
+
+ ASSERT(pc[4].u.callLinkInfo);
+ return setUpCall(execCallee, pc, kind, calleeAsValue, pc[4].u.callLinkInfo);
+}
+
+LLINT_SLOW_PATH_DECL(slow_path_call)
+{
+ LLINT_BEGIN_NO_SET_PC();
+ return genericCall(exec, pc, CodeForCall);
+}
+
+LLINT_SLOW_PATH_DECL(slow_path_construct)
+{
+ LLINT_BEGIN_NO_SET_PC();
+ return genericCall(exec, pc, CodeForConstruct);
+}
+
+LLINT_SLOW_PATH_DECL(slow_path_call_varargs)
+{
+ LLINT_BEGIN();
+ // This needs to:
+ // - Set up a call frame while respecting the variable arguments.
+ // - Figure out what to call and compile it if necessary.
+ // - Return a tuple of machine code address to call and the new call frame.
+
+ JSValue calleeAsValue = LLINT_OP_C(1).jsValue();
+
+ ExecState* execCallee = loadVarargs(
+ exec, &globalData.interpreter->registerFile(),
+ LLINT_OP_C(2).jsValue(), LLINT_OP_C(3).jsValue(), pc[4].u.operand);
+ LLINT_CALL_CHECK_EXCEPTION(exec, pc);
+
+ execCallee->uncheckedR(RegisterFile::Callee) = calleeAsValue;
+ execCallee->setCallerFrame(exec);
+ exec->uncheckedR(RegisterFile::ArgumentCount).tag() = bitwise_cast<int32_t>(pc + OPCODE_LENGTH(op_call_varargs));
+
+ return setUpCall(execCallee, pc, CodeForCall, calleeAsValue);
+}
+
+LLINT_SLOW_PATH_DECL(slow_path_call_eval)
+{
+ LLINT_BEGIN_NO_SET_PC();
+ JSValue calleeAsValue = LLINT_OP(1).jsValue();
+
+ ExecState* execCallee = exec + pc[3].u.operand;
+
+ execCallee->setArgumentCountIncludingThis(pc[2].u.operand);
+ execCallee->setCallerFrame(exec);
+ execCallee->uncheckedR(RegisterFile::Callee) = calleeAsValue;
+ execCallee->setScopeChain(exec->scopeChain());
+ execCallee->setReturnPC(bitwise_cast<Instruction*>(&llint_generic_return_point));
+ execCallee->setCodeBlock(0);
+ exec->uncheckedR(RegisterFile::ArgumentCount).tag() = bitwise_cast<int32_t>(pc + OPCODE_LENGTH(op_call_eval));
+
+ if (!isHostFunction(calleeAsValue, globalFuncEval))
+ return setUpCall(execCallee, pc, CodeForCall, calleeAsValue);
+
+ globalData.hostCallReturnValue = eval(execCallee);
+ LLINT_CALL_RETURN(execCallee, pc, reinterpret_cast<void*>(getHostCallReturnValue));
+}
+
+LLINT_SLOW_PATH_DECL(slow_path_tear_off_activation)
+{
+ LLINT_BEGIN();
+ ASSERT(exec->codeBlock()->needsFullScopeChain());
+ JSValue activationValue = LLINT_OP(1).jsValue();
+ if (!activationValue) {
+ if (JSValue v = exec->uncheckedR(unmodifiedArgumentsRegister(pc[2].u.operand)).jsValue()) {
+ if (!exec->codeBlock()->isStrictMode())
+ asArguments(v)->tearOff(exec);
+ }
+ LLINT_END();
+ }
+ JSActivation* activation = asActivation(activationValue);
+ activation->tearOff(globalData);
+ if (JSValue v = exec->uncheckedR(unmodifiedArgumentsRegister(pc[2].u.operand)).jsValue())
+ asArguments(v)->didTearOffActivation(globalData, activation);
+ LLINT_END();
+}
+
+LLINT_SLOW_PATH_DECL(slow_path_tear_off_arguments)
+{
+ LLINT_BEGIN();
+ ASSERT(exec->codeBlock()->usesArguments() && !exec->codeBlock()->needsFullScopeChain());
+ asArguments(exec->uncheckedR(unmodifiedArgumentsRegister(pc[1].u.operand)).jsValue())->tearOff(exec);
+ LLINT_END();
+}
+
+LLINT_SLOW_PATH_DECL(slow_path_strcat)
+{
+ LLINT_BEGIN();
+ LLINT_RETURN(jsString(exec, &LLINT_OP(2), pc[3].u.operand));
+}
+
+LLINT_SLOW_PATH_DECL(slow_path_to_primitive)
+{
+ LLINT_BEGIN();
+ LLINT_RETURN(LLINT_OP_C(2).jsValue().toPrimitive(exec));
+}
+
+LLINT_SLOW_PATH_DECL(slow_path_get_pnames)
+{
+ LLINT_BEGIN();
+ JSValue v = LLINT_OP(2).jsValue();
+ if (v.isUndefinedOrNull()) {
+ pc += pc[5].u.operand;
+ LLINT_END();
+ }
+
+ JSObject* o = v.toObject(exec);
+ Structure* structure = o->structure();
+ JSPropertyNameIterator* jsPropertyNameIterator = structure->enumerationCache();
+ if (!jsPropertyNameIterator || jsPropertyNameIterator->cachedPrototypeChain() != structure->prototypeChain(exec))
+ jsPropertyNameIterator = JSPropertyNameIterator::create(exec, o);
+
+ LLINT_OP(1) = JSValue(jsPropertyNameIterator);
+ LLINT_OP(2) = JSValue(o);
+ LLINT_OP(3) = Register::withInt(0);
+ LLINT_OP(4) = Register::withInt(jsPropertyNameIterator->size());
+
+ pc += OPCODE_LENGTH(op_get_pnames);
+ LLINT_END();
+}
+
+LLINT_SLOW_PATH_DECL(slow_path_next_pname)
+{
+ LLINT_BEGIN();
+ JSObject* base = asObject(LLINT_OP(2).jsValue());
+ JSString* property = asString(LLINT_OP(1).jsValue());
+ if (base->hasProperty(exec, Identifier(exec, property->value(exec)))) {
+ // Go to target.
+ pc += pc[6].u.operand;
+ } // Else, don't change the PC, so the interpreter will reloop.
+ LLINT_END();
+}
+
+LLINT_SLOW_PATH_DECL(slow_path_push_scope)
+{
+ LLINT_BEGIN();
+ JSValue v = LLINT_OP(1).jsValue();
+ JSObject* o = v.toObject(exec);
+ LLINT_CHECK_EXCEPTION();
+
+ LLINT_OP(1) = o;
+ exec->setScopeChain(exec->scopeChain()->push(o));
+
+ LLINT_END();
+}
+
+LLINT_SLOW_PATH_DECL(slow_path_pop_scope)
+{
+ LLINT_BEGIN();
+ exec->setScopeChain(exec->scopeChain()->pop());
+ LLINT_END();
+}
+
+LLINT_SLOW_PATH_DECL(slow_path_push_new_scope)
+{
+ LLINT_BEGIN();
+ CodeBlock* codeBlock = exec->codeBlock();
+ JSObject* scope = JSStaticScopeObject::create(exec, codeBlock->identifier(pc[2].u.operand), LLINT_OP(3).jsValue(), DontDelete);
+ exec->setScopeChain(exec->scopeChain()->push(scope));
+ LLINT_RETURN(scope);
+}
+
+LLINT_SLOW_PATH_DECL(slow_path_throw)
+{
+ LLINT_BEGIN();
+ LLINT_THROW(LLINT_OP_C(1).jsValue());
+}
+
+LLINT_SLOW_PATH_DECL(slow_path_throw_reference_error)
+{
+ LLINT_BEGIN();
+ LLINT_THROW(createReferenceError(exec, LLINT_OP_C(1).jsValue().toString(exec)->value(exec)));
+}
+
+LLINT_SLOW_PATH_DECL(slow_path_debug)
+{
+ LLINT_BEGIN();
+ int debugHookID = pc[1].u.operand;
+ int firstLine = pc[2].u.operand;
+ int lastLine = pc[3].u.operand;
+
+ globalData.interpreter->debug(exec, static_cast<DebugHookID>(debugHookID), firstLine, lastLine);
+
+ LLINT_END();
+}
+
+LLINT_SLOW_PATH_DECL(slow_path_profile_will_call)
+{
+ LLINT_BEGIN();
+ (*Profiler::enabledProfilerReference())->willExecute(exec, LLINT_OP(1).jsValue());
+ LLINT_END();
+}
+
+LLINT_SLOW_PATH_DECL(slow_path_profile_did_call)
+{
+ LLINT_BEGIN();
+ (*Profiler::enabledProfilerReference())->didExecute(exec, LLINT_OP(1).jsValue());
+ LLINT_END();
+}
+
+} } // namespace JSC::LLInt
+
+#endif // ENABLE(LLINT)
diff --git a/Source/JavaScriptCore/llint/LLIntSlowPaths.h b/Source/JavaScriptCore/llint/LLIntSlowPaths.h
new file mode 100644
index 000000000..fe684d306
--- /dev/null
+++ b/Source/JavaScriptCore/llint/LLIntSlowPaths.h
@@ -0,0 +1,171 @@
+/*
+ * Copyright (C) 2011 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef LLIntSlowPaths_h
+#define LLIntSlowPaths_h
+
+#include <wtf/Platform.h>
+#include <wtf/StdLibExtras.h>
+
+#if ENABLE(LLINT)
+
+namespace JSC {
+
+class ExecState;
+struct Instruction;
+
+namespace LLInt {
+
+typedef int64_t SlowPathReturnType;
+
+extern "C" SlowPathReturnType llint_trace_operand(ExecState*, Instruction*, int fromWhere, int operand);
+extern "C" SlowPathReturnType llint_trace_value(ExecState*, Instruction*, int fromWhere, int operand);
+
+#define LLINT_SLOW_PATH_DECL(name) \
+ extern "C" SlowPathReturnType llint_##name(ExecState* exec, Instruction* pc)
+
+LLINT_SLOW_PATH_DECL(trace_prologue);
+LLINT_SLOW_PATH_DECL(trace_prologue_function_for_call);
+LLINT_SLOW_PATH_DECL(trace_prologue_function_for_construct);
+LLINT_SLOW_PATH_DECL(trace_arityCheck_for_call);
+LLINT_SLOW_PATH_DECL(trace_arityCheck_for_construct);
+LLINT_SLOW_PATH_DECL(trace);
+LLINT_SLOW_PATH_DECL(special_trace);
+LLINT_SLOW_PATH_DECL(entry_osr);
+LLINT_SLOW_PATH_DECL(entry_osr_function_for_call);
+LLINT_SLOW_PATH_DECL(entry_osr_function_for_construct);
+LLINT_SLOW_PATH_DECL(entry_osr_function_for_call_arityCheck);
+LLINT_SLOW_PATH_DECL(entry_osr_function_for_construct_arityCheck);
+LLINT_SLOW_PATH_DECL(loop_osr);
+LLINT_SLOW_PATH_DECL(replace);
+LLINT_SLOW_PATH_DECL(register_file_check);
+LLINT_SLOW_PATH_DECL(slow_path_call_arityCheck);
+LLINT_SLOW_PATH_DECL(slow_path_construct_arityCheck);
+LLINT_SLOW_PATH_DECL(slow_path_create_activation);
+LLINT_SLOW_PATH_DECL(slow_path_create_arguments);
+LLINT_SLOW_PATH_DECL(slow_path_create_this);
+LLINT_SLOW_PATH_DECL(slow_path_convert_this);
+LLINT_SLOW_PATH_DECL(slow_path_new_object);
+LLINT_SLOW_PATH_DECL(slow_path_new_array);
+LLINT_SLOW_PATH_DECL(slow_path_new_array_buffer);
+LLINT_SLOW_PATH_DECL(slow_path_new_regexp);
+LLINT_SLOW_PATH_DECL(slow_path_not);
+LLINT_SLOW_PATH_DECL(slow_path_eq);
+LLINT_SLOW_PATH_DECL(slow_path_neq);
+LLINT_SLOW_PATH_DECL(slow_path_stricteq);
+LLINT_SLOW_PATH_DECL(slow_path_nstricteq);
+LLINT_SLOW_PATH_DECL(slow_path_less);
+LLINT_SLOW_PATH_DECL(slow_path_lesseq);
+LLINT_SLOW_PATH_DECL(slow_path_greater);
+LLINT_SLOW_PATH_DECL(slow_path_greatereq);
+LLINT_SLOW_PATH_DECL(slow_path_pre_inc);
+LLINT_SLOW_PATH_DECL(slow_path_pre_dec);
+LLINT_SLOW_PATH_DECL(slow_path_post_inc);
+LLINT_SLOW_PATH_DECL(slow_path_post_dec);
+LLINT_SLOW_PATH_DECL(slow_path_to_jsnumber);
+LLINT_SLOW_PATH_DECL(slow_path_negate);
+LLINT_SLOW_PATH_DECL(slow_path_add);
+LLINT_SLOW_PATH_DECL(slow_path_mul);
+LLINT_SLOW_PATH_DECL(slow_path_sub);
+LLINT_SLOW_PATH_DECL(slow_path_div);
+LLINT_SLOW_PATH_DECL(slow_path_mod);
+LLINT_SLOW_PATH_DECL(slow_path_lshift);
+LLINT_SLOW_PATH_DECL(slow_path_rshift);
+LLINT_SLOW_PATH_DECL(slow_path_urshift);
+LLINT_SLOW_PATH_DECL(slow_path_bitand);
+LLINT_SLOW_PATH_DECL(slow_path_bitor);
+LLINT_SLOW_PATH_DECL(slow_path_bitxor);
+LLINT_SLOW_PATH_DECL(slow_path_bitnot);
+LLINT_SLOW_PATH_DECL(slow_path_check_has_instance);
+LLINT_SLOW_PATH_DECL(slow_path_instanceof);
+LLINT_SLOW_PATH_DECL(slow_path_typeof);
+LLINT_SLOW_PATH_DECL(slow_path_is_undefined);
+LLINT_SLOW_PATH_DECL(slow_path_is_boolean);
+LLINT_SLOW_PATH_DECL(slow_path_is_number);
+LLINT_SLOW_PATH_DECL(slow_path_is_string);
+LLINT_SLOW_PATH_DECL(slow_path_is_object);
+LLINT_SLOW_PATH_DECL(slow_path_is_function);
+LLINT_SLOW_PATH_DECL(slow_path_in);
+LLINT_SLOW_PATH_DECL(slow_path_resolve);
+LLINT_SLOW_PATH_DECL(slow_path_resolve_skip);
+LLINT_SLOW_PATH_DECL(slow_path_resolve_global);
+LLINT_SLOW_PATH_DECL(slow_path_resolve_global_dynamic);
+LLINT_SLOW_PATH_DECL(slow_path_resolve_for_resolve_global_dynamic);
+LLINT_SLOW_PATH_DECL(slow_path_resolve_base);
+LLINT_SLOW_PATH_DECL(slow_path_ensure_property_exists);
+LLINT_SLOW_PATH_DECL(slow_path_resolve_with_base);
+LLINT_SLOW_PATH_DECL(slow_path_resolve_with_this);
+LLINT_SLOW_PATH_DECL(slow_path_get_by_id);
+LLINT_SLOW_PATH_DECL(slow_path_get_arguments_length);
+LLINT_SLOW_PATH_DECL(slow_path_put_by_id);
+LLINT_SLOW_PATH_DECL(slow_path_del_by_id);
+LLINT_SLOW_PATH_DECL(slow_path_get_by_val);
+LLINT_SLOW_PATH_DECL(slow_path_get_argument_by_val);
+LLINT_SLOW_PATH_DECL(slow_path_get_by_pname);
+LLINT_SLOW_PATH_DECL(slow_path_put_by_val);
+LLINT_SLOW_PATH_DECL(slow_path_del_by_val);
+LLINT_SLOW_PATH_DECL(slow_path_put_by_index);
+LLINT_SLOW_PATH_DECL(slow_path_put_getter_setter);
+LLINT_SLOW_PATH_DECL(slow_path_jmp_scopes);
+LLINT_SLOW_PATH_DECL(slow_path_jtrue);
+LLINT_SLOW_PATH_DECL(slow_path_jfalse);
+LLINT_SLOW_PATH_DECL(slow_path_jless);
+LLINT_SLOW_PATH_DECL(slow_path_jnless);
+LLINT_SLOW_PATH_DECL(slow_path_jgreater);
+LLINT_SLOW_PATH_DECL(slow_path_jngreater);
+LLINT_SLOW_PATH_DECL(slow_path_jlesseq);
+LLINT_SLOW_PATH_DECL(slow_path_jnlesseq);
+LLINT_SLOW_PATH_DECL(slow_path_jgreatereq);
+LLINT_SLOW_PATH_DECL(slow_path_jngreatereq);
+LLINT_SLOW_PATH_DECL(slow_path_switch_imm);
+LLINT_SLOW_PATH_DECL(slow_path_switch_char);
+LLINT_SLOW_PATH_DECL(slow_path_switch_string);
+LLINT_SLOW_PATH_DECL(slow_path_new_func);
+LLINT_SLOW_PATH_DECL(slow_path_new_func_exp);
+LLINT_SLOW_PATH_DECL(slow_path_call);
+LLINT_SLOW_PATH_DECL(slow_path_construct);
+LLINT_SLOW_PATH_DECL(slow_path_call_varargs);
+LLINT_SLOW_PATH_DECL(slow_path_call_eval);
+LLINT_SLOW_PATH_DECL(slow_path_tear_off_activation);
+LLINT_SLOW_PATH_DECL(slow_path_tear_off_arguments);
+LLINT_SLOW_PATH_DECL(slow_path_strcat);
+LLINT_SLOW_PATH_DECL(slow_path_to_primitive);
+LLINT_SLOW_PATH_DECL(slow_path_get_pnames);
+LLINT_SLOW_PATH_DECL(slow_path_next_pname);
+LLINT_SLOW_PATH_DECL(slow_path_push_scope);
+LLINT_SLOW_PATH_DECL(slow_path_pop_scope);
+LLINT_SLOW_PATH_DECL(slow_path_push_new_scope);
+LLINT_SLOW_PATH_DECL(slow_path_throw);
+LLINT_SLOW_PATH_DECL(slow_path_throw_reference_error);
+LLINT_SLOW_PATH_DECL(slow_path_debug);
+LLINT_SLOW_PATH_DECL(slow_path_profile_will_call);
+LLINT_SLOW_PATH_DECL(slow_path_profile_did_call);
+
+} } // namespace JSC::LLInt
+
+#endif // ENABLE(LLINT)
+
+#endif // LLIntSlowPaths_h
+
diff --git a/Source/JavaScriptCore/llint/LLIntThunks.cpp b/Source/JavaScriptCore/llint/LLIntThunks.cpp
new file mode 100644
index 000000000..ddb0c46c2
--- /dev/null
+++ b/Source/JavaScriptCore/llint/LLIntThunks.cpp
@@ -0,0 +1,81 @@
+/*
+ * Copyright (C) 2012 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "config.h"
+#include "LLIntThunks.h"
+
+#if ENABLE(LLINT)
+
+#include "JSInterfaceJIT.h"
+#include "LinkBuffer.h"
+#include "LowLevelInterpreter.h"
+
+namespace JSC { namespace LLInt {
+
+static MacroAssemblerCodeRef generateThunkWithJumpTo(JSGlobalData* globalData, void (*target)())
+{
+ JSInterfaceJIT jit;
+
+ // FIXME: there's probably a better way to do it on X86, but I'm not sure I care.
+ jit.move(JSInterfaceJIT::TrustedImmPtr(bitwise_cast<void*>(target)), JSInterfaceJIT::regT0);
+ jit.jump(JSInterfaceJIT::regT0);
+
+ LinkBuffer patchBuffer(*globalData, &jit, GLOBAL_THUNK_ID);
+ return patchBuffer.finalizeCode();
+}
+
+MacroAssemblerCodeRef functionForCallEntryThunkGenerator(JSGlobalData* globalData)
+{
+ return generateThunkWithJumpTo(globalData, llint_function_for_call_prologue);
+}
+
+MacroAssemblerCodeRef functionForConstructEntryThunkGenerator(JSGlobalData* globalData)
+{
+ return generateThunkWithJumpTo(globalData, llint_function_for_construct_prologue);
+}
+
+MacroAssemblerCodeRef functionForCallArityCheckThunkGenerator(JSGlobalData* globalData)
+{
+ return generateThunkWithJumpTo(globalData, llint_function_for_call_arity_check);
+}
+
+MacroAssemblerCodeRef functionForConstructArityCheckThunkGenerator(JSGlobalData* globalData)
+{
+ return generateThunkWithJumpTo(globalData, llint_function_for_construct_arity_check);
+}
+
+MacroAssemblerCodeRef evalEntryThunkGenerator(JSGlobalData* globalData)
+{
+ return generateThunkWithJumpTo(globalData, llint_eval_prologue);
+}
+
+MacroAssemblerCodeRef programEntryThunkGenerator(JSGlobalData* globalData)
+{
+ return generateThunkWithJumpTo(globalData, llint_program_prologue);
+}
+
+} } // namespace JSC::LLInt
+
+#endif // ENABLE(LLINT)
diff --git a/Source/JavaScriptCore/llint/LLIntThunks.h b/Source/JavaScriptCore/llint/LLIntThunks.h
new file mode 100644
index 000000000..ee119e0b9
--- /dev/null
+++ b/Source/JavaScriptCore/llint/LLIntThunks.h
@@ -0,0 +1,52 @@
+/*
+ * Copyright (C) 2012 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef LLIntThunks_h
+#define LLIntThunks_h
+
+#include <wtf/Platform.h>
+
+#if ENABLE(LLINT)
+
+#include "MacroAssemblerCodeRef.h"
+
+namespace JSC {
+
+class JSGlobalData;
+
+namespace LLInt {
+
+MacroAssemblerCodeRef functionForCallEntryThunkGenerator(JSGlobalData*);
+MacroAssemblerCodeRef functionForConstructEntryThunkGenerator(JSGlobalData*);
+MacroAssemblerCodeRef functionForCallArityCheckThunkGenerator(JSGlobalData*);
+MacroAssemblerCodeRef functionForConstructArityCheckThunkGenerator(JSGlobalData*);
+MacroAssemblerCodeRef evalEntryThunkGenerator(JSGlobalData*);
+MacroAssemblerCodeRef programEntryThunkGenerator(JSGlobalData*);
+
+} } // namespace JSC::LLInt
+
+#endif // ENABLE(LLINT)
+
+#endif // LLIntThunks_h
diff --git a/Source/JavaScriptCore/llint/LowLevelInterpreter.asm b/Source/JavaScriptCore/llint/LowLevelInterpreter.asm
new file mode 100644
index 000000000..a9f83f680
--- /dev/null
+++ b/Source/JavaScriptCore/llint/LowLevelInterpreter.asm
@@ -0,0 +1,2390 @@
+# Copyright (C) 2011, 2012 Apple Inc. All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions
+# are met:
+# 1. Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# 2. Redistributions in binary form must reproduce the above copyright
+# notice, this list of conditions and the following disclaimer in the
+# documentation and/or other materials provided with the distribution.
+#
+# THIS SOFTWARE IS PROVIDED BY APPLE INC. AND ITS CONTRIBUTORS ``AS IS''
+# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
+# THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR ITS CONTRIBUTORS
+# BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
+# THE POSSIBILITY OF SUCH DAMAGE.
+
+
+# Crash course on the language that this is written in (which I just call
+# "assembly" even though it's more than that):
+#
+# - Mostly gas-style operand ordering. The last operand tends to be the
+# destination. So "a := b" is written as "mov b, a". But unlike gas,
+# comparisons are in-order, so "if (a < b)" is written as
+# "bilt a, b, ...".
+#
+# - "b" = byte, "h" = 16-bit word, "i" = 32-bit word, "p" = pointer.
+# Currently this is just 32-bit so "i" and "p" are interchangeable
+# except when an op supports one but not the other.
+#
+# - In general, valid operands for macro invocations and instructions are
+# registers (eg "t0"), addresses (eg "4[t0]"), base-index addresses
+# (eg "7[t0, t1, 2]"), absolute addresses (eg "0xa0000000[]"), or labels
+# (eg "_foo" or ".foo"). Macro invocations can also take anonymous
+# macros as operands. Instructions cannot take anonymous macros.
+#
+# - Labels must have names that begin with either "_" or ".". A "." label
+# is local and gets renamed before code gen to minimize namespace
+# pollution. A "_" label is an extern symbol (i.e. ".globl"). The "_"
+# may or may not be removed during code gen depending on whether the asm
+# conventions for C name mangling on the target platform mandate a "_"
+# prefix.
+#
+# - A "macro" is a lambda expression, which may be either anonymous or
+# named. But this has caveats. "macro" can take zero or more arguments,
+# which may be macros or any valid operands, but it can only return
+# code. But you can do Turing-complete things via continuation passing
+# style: "macro foo (a, b) b(a) end foo(foo, foo)". Actually, don't do
+# that, since you'll just crash the assembler.
+#
+# - An "if" is a conditional on settings. Any identifier supplied in the
+# predicate of an "if" is assumed to be a #define that is available
+# during code gen. So you can't use "if" for computation in a macro, but
+# you can use it to select different pieces of code for different
+# platforms.
+#
+# - Arguments to macros follow lexical scoping rather than dynamic scoping.
+# Const's also follow lexical scoping and may override (hide) arguments
+# or other consts. All variables (arguments and constants) can be bound
+# to operands. Additionally, arguments (but not constants) can be bound
+# to macros.
+
+
+# Below we have a bunch of constant declarations. Each constant must have
+# a corresponding ASSERT() in LLIntData.cpp.
+
+# These declarations must match interpreter/RegisterFile.h.
+const CallFrameHeaderSize = 48
+const ArgumentCount = -48
+const CallerFrame = -40
+const Callee = -32
+const ScopeChain = -24
+const ReturnPC = -16
+const CodeBlock = -8
+
+const ThisArgumentOffset = -CallFrameHeaderSize - 8
+
+# Declare some aliases for the registers we will use.
+const PC = t4
+
+# Offsets needed for reasoning about value representation.
+if BIG_ENDIAN
+ const TagOffset = 0
+ const PayloadOffset = 4
+else
+ const TagOffset = 4
+ const PayloadOffset = 0
+end
+
+# Value representation constants.
+const Int32Tag = -1
+const BooleanTag = -2
+const NullTag = -3
+const UndefinedTag = -4
+const CellTag = -5
+const EmptyValueTag = -6
+const DeletedValueTag = -7
+const LowestTag = DeletedValueTag
+
+# Type constants.
+const StringType = 5
+const ObjectType = 13
+
+# Type flags constants.
+const MasqueradesAsUndefined = 1
+const ImplementsHasInstance = 2
+const ImplementsDefaultHasInstance = 8
+
+# Heap allocation constants.
+const JSFinalObjectSizeClassIndex = 3
+
+# Bytecode operand constants.
+const FirstConstantRegisterIndex = 0x40000000
+
+# Code type constants.
+const GlobalCode = 0
+const EvalCode = 1
+const FunctionCode = 2
+
+# The interpreter steals the tag word of the argument count.
+const LLIntReturnPC = ArgumentCount + TagOffset
+
+# This must match wtf/Vector.h.
+const VectorSizeOffset = 0
+const VectorBufferOffset = 4
+
+# String flags.
+const HashFlags8BitBuffer = 64
+
+# Utilities
+macro crash()
+ storei 0, 0xbbadbeef[]
+ move 0, t0
+ call t0
+end
+
+macro assert(assertion)
+ if ASSERT_ENABLED
+ assertion(.ok)
+ crash()
+ .ok:
+ end
+end
+
+macro preserveReturnAddressAfterCall(destinationRegister)
+ if ARMv7
+ move lr, destinationRegister
+ elsif X86
+ pop destinationRegister
+ else
+ error
+ end
+end
+
+macro restoreReturnAddressBeforeReturn(sourceRegister)
+ if ARMv7
+ move sourceRegister, lr
+ elsif X86
+ push sourceRegister
+ else
+ error
+ end
+end
+
+macro dispatch(advance)
+ addp advance * 4, PC
+ jmp [PC]
+end
+
+macro dispatchBranchWithOffset(pcOffset)
+ lshifti 2, pcOffset
+ addp pcOffset, PC
+ jmp [PC]
+end
+
+macro dispatchBranch(pcOffset)
+ loadi pcOffset, t0
+ dispatchBranchWithOffset(t0)
+end
+
+macro dispatchAfterCall()
+ loadi ArgumentCount + TagOffset[cfr], PC
+ jmp [PC]
+end
+
+macro cCall2(function, arg1, arg2)
+ if ARMv7
+ move arg1, t0
+ move arg2, t1
+ elsif X86
+ poke arg1, 0
+ poke arg2, 1
+ else
+ error
+ end
+ call function
+end
+
+# This barely works. arg3 and arg4 should probably be immediates.
+macro cCall4(function, arg1, arg2, arg3, arg4)
+ if ARMv7
+ move arg1, t0
+ move arg2, t1
+ move arg3, t2
+ move arg4, t3
+ elsif X86
+ poke arg1, 0
+ poke arg2, 1
+ poke arg3, 2
+ poke arg4, 3
+ else
+ error
+ end
+ call function
+end
+
+macro callSlowPath(slow_path)
+ cCall2(slow_path, cfr, PC)
+ move t0, PC
+ move t1, cfr
+end
+
+# Debugging operation if you'd like to print an operand in the instruction stream. fromWhere
+# should be an immediate integer - any integer you like; use it to identify the place you're
+# debugging from. operand should likewise be an immediate, and should identify the operand
+# in the instruction stream you'd like to print out.
+macro traceOperand(fromWhere, operand)
+ cCall4(_llint_trace_operand, cfr, PC, fromWhere, operand)
+ move t0, PC
+ move t1, cfr
+end
+
+# Debugging operation if you'd like to print the value of an operand in the instruction
+# stream. Same as traceOperand(), but assumes that the operand is a register, and prints its
+# value.
+macro traceValue(fromWhere, operand)
+ cCall4(_llint_trace_value, cfr, PC, fromWhere, operand)
+ move t0, PC
+ move t1, cfr
+end
+
+macro traceExecution()
+ if EXECUTION_TRACING
+ callSlowPath(_llint_trace)
+ end
+end
+
+# Call a slow_path for call opcodes.
+macro callCallSlowPath(advance, slow_path, action)
+ addp advance * 4, PC, t0
+ storep t0, ArgumentCount + TagOffset[cfr]
+ cCall2(slow_path, cfr, PC)
+ move t1, cfr
+ action(t0)
+end
+
+macro slowPathForCall(advance, slow_path)
+ callCallSlowPath(
+ advance,
+ slow_path,
+ macro (callee)
+ call callee
+ dispatchAfterCall()
+ end)
+end
+
+macro checkSwitchToJIT(increment, action)
+ if JIT_ENABLED
+ loadp CodeBlock[cfr], t0
+ baddis increment, CodeBlock::m_llintExecuteCounter[t0], .continue
+ action()
+ .continue:
+ end
+end
+
+macro checkSwitchToJITForLoop()
+ checkSwitchToJIT(
+ 1,
+ macro ()
+ storei PC, ArgumentCount + TagOffset[cfr]
+ cCall2(_llint_loop_osr, cfr, PC)
+ move t1, cfr
+ btpz t0, .recover
+ jmp t0
+ .recover:
+ loadi ArgumentCount + TagOffset[cfr], PC
+ end)
+end
+
+macro checkSwitchToJITForEpilogue()
+ checkSwitchToJIT(
+ 10,
+ macro ()
+ callSlowPath(_llint_replace)
+ end)
+end
+
+macro assertNotConstant(index)
+ assert(macro (ok) bilt index, FirstConstantRegisterIndex, ok end)
+end
+
+# Index, tag, and payload must be different registers. Index is not
+# changed.
+macro loadConstantOrVariable(index, tag, payload)
+ bigteq index, FirstConstantRegisterIndex, .constant
+ loadi TagOffset[cfr, index, 8], tag
+ loadi PayloadOffset[cfr, index, 8], payload
+ jmp .done
+.constant:
+ loadp CodeBlock[cfr], payload
+ loadp CodeBlock::m_constantRegisters + VectorBufferOffset[payload], payload
+ # There is a bit of evil here: if the index contains a value >= FirstConstantRegisterIndex,
+ # then value << 3 will be equal to (value - FirstConstantRegisterIndex) << 3.
+ loadp TagOffset[payload, index, 8], tag
+ loadp PayloadOffset[payload, index, 8], payload
+.done:
+end
+
+# Index and payload may be the same register. Index may be clobbered.
+macro loadConstantOrVariable2Reg(index, tag, payload)
+ bigteq index, FirstConstantRegisterIndex, .constant
+ loadi TagOffset[cfr, index, 8], tag
+ loadi PayloadOffset[cfr, index, 8], payload
+ jmp .done
+.constant:
+ loadp CodeBlock[cfr], tag
+ loadp CodeBlock::m_constantRegisters + VectorBufferOffset[tag], tag
+ # There is a bit of evil here: if the index contains a value >= FirstConstantRegisterIndex,
+ # then value << 3 will be equal to (value - FirstConstantRegisterIndex) << 3.
+ lshifti 3, index
+ addp index, tag
+ loadp PayloadOffset[tag], payload
+ loadp TagOffset[tag], tag
+.done:
+end
+
+macro loadConstantOrVariablePayloadTagCustom(index, tagCheck, payload)
+ bigteq index, FirstConstantRegisterIndex, .constant
+ tagCheck(TagOffset[cfr, index, 8])
+ loadi PayloadOffset[cfr, index, 8], payload
+ jmp .done
+.constant:
+ loadp CodeBlock[cfr], payload
+ loadp CodeBlock::m_constantRegisters + VectorBufferOffset[payload], payload
+ # There is a bit of evil here: if the index contains a value >= FirstConstantRegisterIndex,
+ # then value << 3 will be equal to (value - FirstConstantRegisterIndex) << 3.
+ tagCheck(TagOffset[payload, index, 8])
+ loadp PayloadOffset[payload, index, 8], payload
+.done:
+end
+
+# Index and payload must be different registers. Index is not mutated. Use
+# this if you know what the tag of the variable should be. Doing the tag
+# test as part of loading the variable reduces register use, but may not
+# be faster than doing loadConstantOrVariable followed by a branch on the
+# tag.
+macro loadConstantOrVariablePayload(index, expectedTag, payload, slow)
+ loadConstantOrVariablePayloadTagCustom(
+ index,
+ macro (actualTag) bineq actualTag, expectedTag, slow end,
+ payload)
+end
+
+macro loadConstantOrVariablePayloadUnchecked(index, payload)
+ loadConstantOrVariablePayloadTagCustom(
+ index,
+ macro (actualTag) end,
+ payload)
+end
+
+macro writeBarrier(tag, payload)
+ # Nothing to do, since we don't have a generational or incremental collector.
+end
+
+macro valueProfile(tag, payload, profile)
+ if JIT_ENABLED
+ storei tag, ValueProfile::m_buckets + TagOffset[profile]
+ storei payload, ValueProfile::m_buckets + PayloadOffset[profile]
+ end
+end
+
+
+# Indicate the beginning of LLInt.
+_llint_begin:
+ crash()
+
+
+# Entrypoints into the interpreter
+
+macro functionForCallCodeBlockGetter(targetRegister)
+ loadp Callee[cfr], targetRegister
+ loadp JSFunction::m_executable[targetRegister], targetRegister
+ loadp FunctionExecutable::m_codeBlockForCall[targetRegister], targetRegister
+end
+
+macro functionForConstructCodeBlockGetter(targetRegister)
+ loadp Callee[cfr], targetRegister
+ loadp JSFunction::m_executable[targetRegister], targetRegister
+ loadp FunctionExecutable::m_codeBlockForConstruct[targetRegister], targetRegister
+end
+
+macro notFunctionCodeBlockGetter(targetRegister)
+ loadp CodeBlock[cfr], targetRegister
+end
+
+macro functionCodeBlockSetter(sourceRegister)
+ storep sourceRegister, CodeBlock[cfr]
+end
+
+macro notFunctionCodeBlockSetter(sourceRegister)
+ # Nothing to do!
+end
+
+# Do the bare minimum required to execute code. Sets up the PC, leave the CodeBlock*
+# in t1. May also trigger prologue entry OSR.
+macro prologue(codeBlockGetter, codeBlockSetter, osrSlowPath, traceSlowPath)
+ preserveReturnAddressAfterCall(t2)
+
+ # Set up the call frame and check if we should OSR.
+ storep t2, ReturnPC[cfr]
+ if EXECUTION_TRACING
+ callSlowPath(traceSlowPath)
+ end
+ codeBlockGetter(t1)
+ if JIT_ENABLED
+ baddis 5, CodeBlock::m_llintExecuteCounter[t1], .continue
+ cCall2(osrSlowPath, cfr, PC)
+ move t1, cfr
+ btpz t0, .recover
+ loadp ReturnPC[cfr], t2
+ restoreReturnAddressBeforeReturn(t2)
+ jmp t0
+ .recover:
+ codeBlockGetter(t1)
+ .continue:
+ end
+ codeBlockSetter(t1)
+
+ # Set up the PC.
+ loadp CodeBlock::m_instructions[t1], t0
+ loadp CodeBlock::Instructions::m_instructions + VectorBufferOffset[t0], PC
+end
+
+# Expects that CodeBlock is in t1, which is what prologue() leaves behind.
+# Must call dispatch(0) after calling this.
+macro functionInitialization(profileArgSkip)
+ if JIT_ENABLED
+ # Profile the arguments. Unfortunately, we have no choice but to do this. This
+ # code is pretty horrendous because of the difference in ordering between
+ # arguments and value profiles, the desire to have a simple loop-down-to-zero
+ # loop, and the desire to use only three registers so as to preserve the PC and
+ # the code block. It is likely that this code should be rewritten in a more
+ # optimal way for architectures that have more than five registers available
+ # for arbitrary use in the interpreter.
+ loadi CodeBlock::m_numParameters[t1], t0
+ addi -profileArgSkip, t0 # Use addi because that's what has the peephole
+ assert(macro (ok) bigteq t0, 0, ok end)
+ btiz t0, .argumentProfileDone
+ loadp CodeBlock::m_argumentValueProfiles + VectorBufferOffset[t1], t3
+ muli sizeof ValueProfile, t0, t2 # Aaaaahhhh! Need strength reduction!
+ negi t0
+ lshifti 3, t0
+ addp t2, t3
+ .argumentProfileLoop:
+ loadi ThisArgumentOffset + TagOffset + 8 - profileArgSkip * 8[cfr, t0], t2
+ subp sizeof ValueProfile, t3
+ storei t2, profileArgSkip * sizeof ValueProfile + ValueProfile::m_buckets + TagOffset[t3]
+ loadi ThisArgumentOffset + PayloadOffset + 8 - profileArgSkip * 8[cfr, t0], t2
+ storei t2, profileArgSkip * sizeof ValueProfile + ValueProfile::m_buckets + PayloadOffset[t3]
+ baddinz 8, t0, .argumentProfileLoop
+ .argumentProfileDone:
+ end
+
+ # Check stack height.
+ loadi CodeBlock::m_numCalleeRegisters[t1], t0
+ loadp CodeBlock::m_globalData[t1], t2
+ loadp JSGlobalData::interpreter[t2], t2 # FIXME: Can get to the RegisterFile from the JITStackFrame
+ lshifti 3, t0
+ addp t0, cfr, t0
+ bpaeq Interpreter::m_registerFile + RegisterFile::m_end[t2], t0, .stackHeightOK
+
+ # Stack height check failed - need to call a slow_path.
+ callSlowPath(_llint_register_file_check)
+.stackHeightOK:
+end
+
+# Expects that CodeBlock is in t1, which is what prologue() leaves behind.
+macro functionArityCheck(doneLabel, slow_path)
+ loadi PayloadOffset + ArgumentCount[cfr], t0
+ biaeq t0, CodeBlock::m_numParameters[t1], doneLabel
+ cCall2(slow_path, cfr, PC) # This slow_path has a simple protocol: t0 = 0 => no error, t0 != 0 => error
+ move t1, cfr
+ btiz t0, .continue
+ loadp JITStackFrame::globalData[sp], t1
+ loadp JSGlobalData::callFrameForThrow[t1], t0
+ jmp JSGlobalData::targetMachinePCForThrow[t1]
+.continue:
+ # Reload CodeBlock and PC, since the slow_path clobbered it.
+ loadp CodeBlock[cfr], t1
+ loadp CodeBlock::m_instructions[t1], t0
+ loadp CodeBlock::Instructions::m_instructions + VectorBufferOffset[t0], PC
+ jmp doneLabel
+end
+
+_llint_program_prologue:
+ prologue(notFunctionCodeBlockGetter, notFunctionCodeBlockSetter, _llint_entry_osr, _llint_trace_prologue)
+ dispatch(0)
+
+
+_llint_eval_prologue:
+ prologue(notFunctionCodeBlockGetter, notFunctionCodeBlockSetter, _llint_entry_osr, _llint_trace_prologue)
+ dispatch(0)
+
+
+_llint_function_for_call_prologue:
+ prologue(functionForCallCodeBlockGetter, functionCodeBlockSetter, _llint_entry_osr_function_for_call, _llint_trace_prologue_function_for_call)
+.functionForCallBegin:
+ functionInitialization(0)
+ dispatch(0)
+
+
+_llint_function_for_construct_prologue:
+ prologue(functionForConstructCodeBlockGetter, functionCodeBlockSetter, _llint_entry_osr_function_for_construct, _llint_trace_prologue_function_for_construct)
+.functionForConstructBegin:
+ functionInitialization(1)
+ dispatch(0)
+
+
+_llint_function_for_call_arity_check:
+ prologue(functionForCallCodeBlockGetter, functionCodeBlockSetter, _llint_entry_osr_function_for_call_arityCheck, _llint_trace_arityCheck_for_call)
+ functionArityCheck(.functionForCallBegin, _llint_slow_path_call_arityCheck)
+
+
+_llint_function_for_construct_arity_check:
+ prologue(functionForConstructCodeBlockGetter, functionCodeBlockSetter, _llint_entry_osr_function_for_construct_arityCheck, _llint_trace_arityCheck_for_construct)
+ functionArityCheck(.functionForConstructBegin, _llint_slow_path_construct_arityCheck)
+
+# Instruction implementations
+
+_llint_op_enter:
+ traceExecution()
+ loadp CodeBlock[cfr], t2
+ loadi CodeBlock::m_numVars[t2], t2
+ btiz t2, .opEnterDone
+ move UndefinedTag, t0
+ move 0, t1
+.opEnterLoop:
+ subi 1, t2
+ storei t0, TagOffset[cfr, t2, 8]
+ storei t1, PayloadOffset[cfr, t2, 8]
+ btinz t2, .opEnterLoop
+.opEnterDone:
+ dispatch(1)
+
+
+_llint_op_create_activation:
+ traceExecution()
+ loadi 4[PC], t0
+ bineq TagOffset[cfr, t0, 8], EmptyValueTag, .opCreateActivationDone
+ callSlowPath(_llint_slow_path_create_activation)
+.opCreateActivationDone:
+ dispatch(2)
+
+
+_llint_op_init_lazy_reg:
+ traceExecution()
+ loadi 4[PC], t0
+ storei EmptyValueTag, TagOffset[cfr, t0, 8]
+ storei 0, PayloadOffset[cfr, t0, 8]
+ dispatch(2)
+
+
+_llint_op_create_arguments:
+ traceExecution()
+ loadi 4[PC], t0
+ bineq TagOffset[cfr, t0, 8], EmptyValueTag, .opCreateArgumentsDone
+ callSlowPath(_llint_slow_path_create_arguments)
+.opCreateArgumentsDone:
+ dispatch(2)
+
+
+macro allocateBasicJSObject(sizeClassIndex, classInfoOffset, structure, result, scratch1, scratch2, slowCase)
+ if ALWAYS_ALLOCATE_SLOW
+ jmp slowCase
+ else
+ const offsetOfMySizeClass =
+ JSGlobalData::heap +
+ Heap::m_objectSpace +
+ MarkedSpace::m_normalSpace +
+ MarkedSpace::Subspace::preciseAllocators +
+ sizeClassIndex * sizeof MarkedAllocator
+
+ # FIXME: we can get the global data in one load from the stack.
+ loadp CodeBlock[cfr], scratch1
+ loadp CodeBlock::m_globalData[scratch1], scratch1
+
+ # Get the object from the free list.
+ loadp offsetOfMySizeClass + MarkedAllocator::m_firstFreeCell[scratch1], result
+ btpz result, slowCase
+
+ # Remove the object from the free list.
+ loadp [result], scratch2
+ storep scratch2, offsetOfMySizeClass + MarkedAllocator::m_firstFreeCell[scratch1]
+
+ # Initialize the object.
+ loadp classInfoOffset[scratch1], scratch2
+ storep scratch2, [result]
+ storep structure, JSCell::m_structure[result]
+ storep 0, JSObject::m_inheritorID[result]
+ addp sizeof JSObject, result, scratch1
+ storep scratch1, JSObject::m_propertyStorage[result]
+ end
+end
+
+_llint_op_create_this:
+ traceExecution()
+ loadi 8[PC], t0
+ assertNotConstant(t0)
+ bineq TagOffset[cfr, t0, 8], CellTag, .opCreateThisSlow
+ loadi PayloadOffset[cfr, t0, 8], t0
+ loadp JSCell::m_structure[t0], t1
+ bbb Structure::m_typeInfo + TypeInfo::m_type[t1], ObjectType, .opCreateThisSlow
+ loadp JSObject::m_inheritorID[t0], t2
+ btpz t2, .opCreateThisSlow
+ allocateBasicJSObject(JSFinalObjectSizeClassIndex, JSGlobalData::jsFinalObjectClassInfo, t2, t0, t1, t3, .opCreateThisSlow)
+ loadi 4[PC], t1
+ storei CellTag, TagOffset[cfr, t1, 8]
+ storei t0, PayloadOffset[cfr, t1, 8]
+ dispatch(3)
+
+.opCreateThisSlow:
+ callSlowPath(_llint_slow_path_create_this)
+ dispatch(3)
+
+
+_llint_op_get_callee:
+ traceExecution()
+ loadi 4[PC], t0
+ loadp PayloadOffset + Callee[cfr], t1
+ storei CellTag, TagOffset[cfr, t0, 8]
+ storei t1, PayloadOffset[cfr, t0, 8]
+ dispatch(2)
+
+
+_llint_op_convert_this:
+ traceExecution()
+ loadi 4[PC], t0
+ bineq TagOffset[cfr, t0, 8], CellTag, .opConvertThisSlow
+ loadi PayloadOffset[cfr, t0, 8], t0
+ loadp JSCell::m_structure[t0], t0
+ bbb Structure::m_typeInfo + TypeInfo::m_type[t0], ObjectType, .opConvertThisSlow
+ dispatch(2)
+
+.opConvertThisSlow:
+ callSlowPath(_llint_slow_path_convert_this)
+ dispatch(2)
+
+
+_llint_op_new_object:
+ traceExecution()
+ loadp CodeBlock[cfr], t0
+ loadp CodeBlock::m_globalObject[t0], t0
+ loadp JSGlobalObject::m_emptyObjectStructure[t0], t1
+ allocateBasicJSObject(JSFinalObjectSizeClassIndex, JSGlobalData::jsFinalObjectClassInfo, t1, t0, t2, t3, .opNewObjectSlow)
+ loadi 4[PC], t1
+ storei CellTag, TagOffset[cfr, t1, 8]
+ storei t0, PayloadOffset[cfr, t1, 8]
+ dispatch(2)
+
+.opNewObjectSlow:
+ callSlowPath(_llint_slow_path_new_object)
+ dispatch(2)
+
+
+_llint_op_new_array:
+ traceExecution()
+ callSlowPath(_llint_slow_path_new_array)
+ dispatch(4)
+
+
+_llint_op_new_array_buffer:
+ traceExecution()
+ callSlowPath(_llint_slow_path_new_array_buffer)
+ dispatch(4)
+
+
+_llint_op_new_regexp:
+ traceExecution()
+ callSlowPath(_llint_slow_path_new_regexp)
+ dispatch(3)
+
+
+_llint_op_mov:
+ traceExecution()
+ loadi 8[PC], t1
+ loadi 4[PC], t0
+ loadConstantOrVariable(t1, t2, t3)
+ storei t2, TagOffset[cfr, t0, 8]
+ storei t3, PayloadOffset[cfr, t0, 8]
+ dispatch(3)
+
+
+_llint_op_not:
+ traceExecution()
+ loadi 8[PC], t0
+ loadi 4[PC], t1
+ loadConstantOrVariable(t0, t2, t3)
+ bineq t2, BooleanTag, .opNotSlow
+ xori 1, t3
+ storei t2, TagOffset[cfr, t1, 8]
+ storei t3, PayloadOffset[cfr, t1, 8]
+ dispatch(3)
+
+.opNotSlow:
+ callSlowPath(_llint_slow_path_not)
+ dispatch(3)
+
+
+_llint_op_eq:
+ traceExecution()
+ loadi 12[PC], t2
+ loadi 8[PC], t0
+ loadConstantOrVariable(t2, t3, t1)
+ loadConstantOrVariable2Reg(t0, t2, t0)
+ bineq t2, t3, .opEqSlow
+ bieq t2, CellTag, .opEqSlow
+ bib t2, LowestTag, .opEqSlow
+ loadi 4[PC], t2
+ cieq t0, t1, t0
+ storei BooleanTag, TagOffset[cfr, t2, 8]
+ storei t0, PayloadOffset[cfr, t2, 8]
+ dispatch(4)
+
+.opEqSlow:
+ callSlowPath(_llint_slow_path_eq)
+ dispatch(4)
+
+
+_llint_op_eq_null:
+ traceExecution()
+ loadi 8[PC], t0
+ loadi 4[PC], t3
+ assertNotConstant(t0)
+ loadi TagOffset[cfr, t0, 8], t1
+ loadi PayloadOffset[cfr, t0, 8], t0
+ bineq t1, CellTag, .opEqNullImmediate
+ loadp JSCell::m_structure[t0], t1
+ tbnz Structure::m_typeInfo + TypeInfo::m_flags[t1], MasqueradesAsUndefined, t1
+ jmp .opEqNullNotImmediate
+.opEqNullImmediate:
+ cieq t1, NullTag, t2
+ cieq t1, UndefinedTag, t1
+ ori t2, t1
+.opEqNullNotImmediate:
+ storei BooleanTag, TagOffset[cfr, t3, 8]
+ storei t1, PayloadOffset[cfr, t3, 8]
+ dispatch(3)
+
+
+_llint_op_neq:
+ traceExecution()
+ loadi 12[PC], t2
+ loadi 8[PC], t0
+ loadConstantOrVariable(t2, t3, t1)
+ loadConstantOrVariable2Reg(t0, t2, t0)
+ bineq t2, t3, .opNeqSlow
+ bieq t2, CellTag, .opNeqSlow
+ bib t2, LowestTag, .opNeqSlow
+ loadi 4[PC], t2
+ cineq t0, t1, t0
+ storei BooleanTag, TagOffset[cfr, t2, 8]
+ storei t0, PayloadOffset[cfr, t2, 8]
+ dispatch(4)
+
+.opNeqSlow:
+ callSlowPath(_llint_slow_path_neq)
+ dispatch(4)
+
+
+_llint_op_neq_null:
+ traceExecution()
+ loadi 8[PC], t0
+ loadi 4[PC], t3
+ assertNotConstant(t0)
+ loadi TagOffset[cfr, t0, 8], t1
+ loadi PayloadOffset[cfr, t0, 8], t0
+ bineq t1, CellTag, .opNeqNullImmediate
+ loadp JSCell::m_structure[t0], t1
+ tbz Structure::m_typeInfo + TypeInfo::m_flags[t1], MasqueradesAsUndefined, t1
+ jmp .opNeqNullNotImmediate
+.opNeqNullImmediate:
+ cineq t1, NullTag, t2
+ cineq t1, UndefinedTag, t1
+ andi t2, t1
+.opNeqNullNotImmediate:
+ storei BooleanTag, TagOffset[cfr, t3, 8]
+ storei t1, PayloadOffset[cfr, t3, 8]
+ dispatch(3)
+
+
+macro strictEq(equalityOperation, slow_path)
+ loadi 12[PC], t2
+ loadi 8[PC], t0
+ loadConstantOrVariable(t2, t3, t1)
+ loadConstantOrVariable2Reg(t0, t2, t0)
+ bineq t2, t3, .slow
+ bib t2, LowestTag, .slow
+ bineq t2, CellTag, .notString
+ loadp JSCell::m_structure[t0], t2
+ loadp JSCell::m_structure[t1], t3
+ bbneq Structure::m_typeInfo + TypeInfo::m_type[t2], StringType, .notString
+ bbeq Structure::m_typeInfo + TypeInfo::m_type[t3], StringType, .slow
+.notString:
+ loadi 4[PC], t2
+ equalityOperation(t0, t1, t0)
+ storei BooleanTag, TagOffset[cfr, t2, 8]
+ storei t0, PayloadOffset[cfr, t2, 8]
+ dispatch(4)
+
+.slow:
+ callSlowPath(slow_path)
+ dispatch(4)
+end
+
+_llint_op_stricteq:
+ traceExecution()
+ strictEq(macro (left, right, result) cieq left, right, result end, _llint_slow_path_stricteq)
+
+
+_llint_op_nstricteq:
+ traceExecution()
+ strictEq(macro (left, right, result) cineq left, right, result end, _llint_slow_path_nstricteq)
+
+
+_llint_op_less:
+ traceExecution()
+ callSlowPath(_llint_slow_path_less)
+ dispatch(4)
+
+
+_llint_op_lesseq:
+ traceExecution()
+ callSlowPath(_llint_slow_path_lesseq)
+ dispatch(4)
+
+
+_llint_op_greater:
+ traceExecution()
+ callSlowPath(_llint_slow_path_greater)
+ dispatch(4)
+
+
+_llint_op_greatereq:
+ traceExecution()
+ callSlowPath(_llint_slow_path_greatereq)
+ dispatch(4)
+
+
+_llint_op_pre_inc:
+ traceExecution()
+ loadi 4[PC], t0
+ bineq TagOffset[cfr, t0, 8], Int32Tag, .opPreIncSlow
+ loadi PayloadOffset[cfr, t0, 8], t1
+ baddio 1, t1, .opPreIncSlow
+ storei t1, PayloadOffset[cfr, t0, 8]
+ dispatch(2)
+
+.opPreIncSlow:
+ callSlowPath(_llint_slow_path_pre_inc)
+ dispatch(2)
+
+
+_llint_op_pre_dec:
+ traceExecution()
+ loadi 4[PC], t0
+ bineq TagOffset[cfr, t0, 8], Int32Tag, .opPreDecSlow
+ loadi PayloadOffset[cfr, t0, 8], t1
+ bsubio 1, t1, .opPreDecSlow
+ storei t1, PayloadOffset[cfr, t0, 8]
+ dispatch(2)
+
+.opPreDecSlow:
+ callSlowPath(_llint_slow_path_pre_dec)
+ dispatch(2)
+
+
+_llint_op_post_inc:
+ traceExecution()
+ loadi 8[PC], t0
+ loadi 4[PC], t1
+ bineq TagOffset[cfr, t0, 8], Int32Tag, .opPostIncSlow
+ bieq t0, t1, .opPostIncDone
+ loadi PayloadOffset[cfr, t0, 8], t2
+ move t2, t3
+ baddio 1, t3, .opPostIncSlow
+ storei Int32Tag, TagOffset[cfr, t1, 8]
+ storei t2, PayloadOffset[cfr, t1, 8]
+ storei t3, PayloadOffset[cfr, t0, 8]
+.opPostIncDone:
+ dispatch(3)
+
+.opPostIncSlow:
+ callSlowPath(_llint_slow_path_post_inc)
+ dispatch(3)
+
+
+_llint_op_post_dec:
+ traceExecution()
+ loadi 8[PC], t0
+ loadi 4[PC], t1
+ bineq TagOffset[cfr, t0, 8], Int32Tag, .opPostDecSlow
+ bieq t0, t1, .opPostDecDone
+ loadi PayloadOffset[cfr, t0, 8], t2
+ move t2, t3
+ bsubio 1, t3, .opPostDecSlow
+ storei Int32Tag, TagOffset[cfr, t1, 8]
+ storei t2, PayloadOffset[cfr, t1, 8]
+ storei t3, PayloadOffset[cfr, t0, 8]
+.opPostDecDone:
+ dispatch(3)
+
+.opPostDecSlow:
+ callSlowPath(_llint_slow_path_post_dec)
+ dispatch(3)
+
+
+_llint_op_to_jsnumber:
+ traceExecution()
+ loadi 8[PC], t0
+ loadi 4[PC], t1
+ loadConstantOrVariable(t0, t2, t3)
+ bieq t2, Int32Tag, .opToJsnumberIsInt
+ biaeq t2, EmptyValueTag, .opToJsnumberSlow
+.opToJsnumberIsInt:
+ storei t2, TagOffset[cfr, t1, 8]
+ storei t3, PayloadOffset[cfr, t1, 8]
+ dispatch(3)
+
+.opToJsnumberSlow:
+ callSlowPath(_llint_slow_path_to_jsnumber)
+ dispatch(3)
+
+
+_llint_op_negate:
+ traceExecution()
+ loadi 8[PC], t0
+ loadi 4[PC], t3
+ loadConstantOrVariable(t0, t1, t2)
+ bineq t1, Int32Tag, .opNegateSrcNotInt
+ btiz t2, 0x7fffffff, .opNegateSlow
+ negi t2
+ storei Int32Tag, TagOffset[cfr, t3, 8]
+ storei t2, PayloadOffset[cfr, t3, 8]
+ dispatch(3)
+.opNegateSrcNotInt:
+ bia t1, LowestTag, .opNegateSlow
+ xori 0x80000000, t1
+ storei t1, TagOffset[cfr, t3, 8]
+ storei t2, PayloadOffset[cfr, t3, 8]
+ dispatch(3)
+
+.opNegateSlow:
+ callSlowPath(_llint_slow_path_negate)
+ dispatch(3)
+
+
+macro binaryOpCustomStore(integerOperationAndStore, doubleOperation, slow_path)
+ loadi 12[PC], t2
+ loadi 8[PC], t0
+ loadConstantOrVariable(t2, t3, t1)
+ loadConstantOrVariable2Reg(t0, t2, t0)
+ bineq t2, Int32Tag, .op1NotInt
+ bineq t3, Int32Tag, .op2NotInt
+ loadi 4[PC], t2
+ integerOperationAndStore(t3, t1, t0, .slow, t2)
+ dispatch(5)
+
+.op1NotInt:
+ # First operand is definitely not an int, the second operand could be anything.
+ bia t2, LowestTag, .slow
+ bib t3, LowestTag, .op1NotIntOp2Double
+ bineq t3, Int32Tag, .slow
+ ci2d t1, ft1
+ jmp .op1NotIntReady
+.op1NotIntOp2Double:
+ fii2d t1, t3, ft1
+.op1NotIntReady:
+ loadi 4[PC], t1
+ fii2d t0, t2, ft0
+ doubleOperation(ft1, ft0)
+ stored ft0, [cfr, t1, 8]
+ dispatch(5)
+
+.op2NotInt:
+ # First operand is definitely an int, the second operand is definitely not.
+ loadi 4[PC], t2
+ bia t3, LowestTag, .slow
+ ci2d t0, ft0
+ fii2d t1, t3, ft1
+ doubleOperation(ft1, ft0)
+ stored ft0, [cfr, t2, 8]
+ dispatch(5)
+
+.slow:
+ callSlowPath(slow_path)
+ dispatch(5)
+end
+
+macro binaryOp(integerOperation, doubleOperation, slow_path)
+ binaryOpCustomStore(
+ macro (int32Tag, left, right, slow, index)
+ integerOperation(left, right, slow)
+ storei int32Tag, TagOffset[cfr, index, 8]
+ storei right, PayloadOffset[cfr, index, 8]
+ end,
+ doubleOperation, slow_path)
+end
+
+_llint_op_add:
+ traceExecution()
+ binaryOp(
+ macro (left, right, slow) baddio left, right, slow end,
+ macro (left, right) addd left, right end,
+ _llint_slow_path_add)
+
+
+_llint_op_mul:
+ traceExecution()
+ binaryOpCustomStore(
+ macro (int32Tag, left, right, slow, index)
+ const scratch = int32Tag # We know that we can reuse the int32Tag register since it has a constant.
+ move right, scratch
+ bmulio left, scratch, slow
+ btinz scratch, .done
+ bilt left, 0, slow
+ bilt right, 0, slow
+ .done:
+ storei Int32Tag, TagOffset[cfr, index, 8]
+ storei scratch, PayloadOffset[cfr, index, 8]
+ end,
+ macro (left, right) muld left, right end,
+ _llint_slow_path_mul)
+
+
+_llint_op_sub:
+ traceExecution()
+ binaryOp(
+ macro (left, right, slow) bsubio left, right, slow end,
+ macro (left, right) subd left, right end,
+ _llint_slow_path_sub)
+
+
+_llint_op_div:
+ traceExecution()
+ binaryOpCustomStore(
+ macro (int32Tag, left, right, slow, index)
+ ci2d left, ft0
+ ci2d right, ft1
+ divd ft0, ft1
+ bcd2i ft1, right, .notInt
+ storei int32Tag, TagOffset[cfr, index, 8]
+ storei right, PayloadOffset[cfr, index, 8]
+ jmp .done
+ .notInt:
+ stored ft1, [cfr, index, 8]
+ .done:
+ end,
+ macro (left, right) divd left, right end,
+ _llint_slow_path_div)
+
+
+_llint_op_mod:
+ traceExecution()
+ callSlowPath(_llint_slow_path_mod)
+ dispatch(4)
+
+
+macro bitOp(operation, slow_path, advance)
+ loadi 12[PC], t2
+ loadi 8[PC], t0
+ loadConstantOrVariable(t2, t3, t1)
+ loadConstantOrVariable2Reg(t0, t2, t0)
+ bineq t3, Int32Tag, .slow
+ bineq t2, Int32Tag, .slow
+ loadi 4[PC], t2
+ operation(t1, t0, .slow)
+ storei t3, TagOffset[cfr, t2, 8]
+ storei t0, PayloadOffset[cfr, t2, 8]
+ dispatch(advance)
+
+.slow:
+ callSlowPath(slow_path)
+ dispatch(advance)
+end
+
+_llint_op_lshift:
+ traceExecution()
+ bitOp(
+ macro (left, right, slow) lshifti left, right end,
+ _llint_slow_path_lshift,
+ 4)
+
+
+_llint_op_rshift:
+ traceExecution()
+ bitOp(
+ macro (left, right, slow) rshifti left, right end,
+ _llint_slow_path_rshift,
+ 4)
+
+
+_llint_op_urshift:
+ traceExecution()
+ bitOp(
+ macro (left, right, slow)
+ urshifti left, right
+ bilt right, 0, slow
+ end,
+ _llint_slow_path_urshift,
+ 4)
+
+
+_llint_op_bitand:
+ traceExecution()
+ bitOp(
+ macro (left, right, slow) andi left, right end,
+ _llint_slow_path_bitand,
+ 5)
+
+
+_llint_op_bitxor:
+ traceExecution()
+ bitOp(
+ macro (left, right, slow) xori left, right end,
+ _llint_slow_path_bitxor,
+ 5)
+
+
+_llint_op_bitor:
+ traceExecution()
+ bitOp(
+ macro (left, right, slow) ori left, right end,
+ _llint_slow_path_bitor,
+ 5)
+
+
+_llint_op_bitnot:
+ traceExecution()
+ loadi 8[PC], t1
+ loadi 4[PC], t0
+ loadConstantOrVariable(t1, t2, t3)
+ bineq t2, Int32Tag, .opBitnotSlow
+ noti t3
+ storei t2, TagOffset[cfr, t0, 8]
+ storei t3, PayloadOffset[cfr, t0, 8]
+ dispatch(3)
+
+.opBitnotSlow:
+ callSlowPath(_llint_slow_path_bitnot)
+ dispatch(3)
+
+
+_llint_op_check_has_instance:
+ traceExecution()
+ loadi 4[PC], t1
+ loadConstantOrVariablePayload(t1, CellTag, t0, .opCheckHasInstanceSlow)
+ loadp JSCell::m_structure[t0], t0
+ btbz Structure::m_typeInfo + TypeInfo::m_flags[t0], ImplementsHasInstance, .opCheckHasInstanceSlow
+ dispatch(2)
+
+.opCheckHasInstanceSlow:
+ callSlowPath(_llint_slow_path_check_has_instance)
+ dispatch(2)
+
+
+_llint_op_instanceof:
+ traceExecution()
+ # Check that baseVal implements the default HasInstance behavior.
+ # FIXME: This should be deprecated.
+ loadi 12[PC], t1
+ loadConstantOrVariablePayloadUnchecked(t1, t0)
+ loadp JSCell::m_structure[t0], t0
+ btbz Structure::m_typeInfo + TypeInfo::m_flags[t0], ImplementsDefaultHasInstance, .opInstanceofSlow
+
+ # Actually do the work.
+ loadi 16[PC], t0
+ loadi 4[PC], t3
+ loadConstantOrVariablePayload(t0, CellTag, t1, .opInstanceofSlow)
+ loadp JSCell::m_structure[t1], t2
+ bbb Structure::m_typeInfo + TypeInfo::m_type[t2], ObjectType, .opInstanceofSlow
+ loadi 8[PC], t0
+ loadConstantOrVariablePayload(t0, CellTag, t2, .opInstanceofSlow)
+
+ # Register state: t1 = prototype, t2 = value
+ move 1, t0
+.opInstanceofLoop:
+ loadp JSCell::m_structure[t2], t2
+ loadi Structure::m_prototype + PayloadOffset[t2], t2
+ bpeq t2, t1, .opInstanceofDone
+ btinz t2, .opInstanceofLoop
+
+ move 0, t0
+.opInstanceofDone:
+ storei BooleanTag, TagOffset[cfr, t3, 8]
+ storei t0, PayloadOffset[cfr, t3, 8]
+ dispatch(5)
+
+.opInstanceofSlow:
+ callSlowPath(_llint_slow_path_instanceof)
+ dispatch(5)
+
+
+_llint_op_typeof:
+ traceExecution()
+ callSlowPath(_llint_slow_path_typeof)
+ dispatch(3)
+
+
+_llint_op_is_undefined:
+ traceExecution()
+ callSlowPath(_llint_slow_path_is_undefined)
+ dispatch(3)
+
+
+_llint_op_is_boolean:
+ traceExecution()
+ callSlowPath(_llint_slow_path_is_boolean)
+ dispatch(3)
+
+
+_llint_op_is_number:
+ traceExecution()
+ callSlowPath(_llint_slow_path_is_number)
+ dispatch(3)
+
+
+_llint_op_is_string:
+ traceExecution()
+ callSlowPath(_llint_slow_path_is_string)
+ dispatch(3)
+
+
+_llint_op_is_object:
+ traceExecution()
+ callSlowPath(_llint_slow_path_is_object)
+ dispatch(3)
+
+
+_llint_op_is_function:
+ traceExecution()
+ callSlowPath(_llint_slow_path_is_function)
+ dispatch(3)
+
+
+_llint_op_in:
+ traceExecution()
+ callSlowPath(_llint_slow_path_in)
+ dispatch(4)
+
+
+_llint_op_resolve:
+ traceExecution()
+ callSlowPath(_llint_slow_path_resolve)
+ dispatch(4)
+
+
+_llint_op_resolve_skip:
+ traceExecution()
+ callSlowPath(_llint_slow_path_resolve_skip)
+ dispatch(5)
+
+
+macro resolveGlobal(size, slow)
+ # Operands are as follows:
+ # 4[PC] Destination for the load.
+ # 8[PC] Property identifier index in the code block.
+ # 12[PC] Structure pointer, initialized to 0 by bytecode generator.
+ # 16[PC] Offset in global object, initialized to 0 by bytecode generator.
+ loadp CodeBlock[cfr], t0
+ loadp CodeBlock::m_globalObject[t0], t0
+ loadp JSCell::m_structure[t0], t1
+ bpneq t1, 12[PC], slow
+ loadi 16[PC], t1
+ loadp JSObject::m_propertyStorage[t0], t0
+ loadi TagOffset[t0, t1, 8], t2
+ loadi PayloadOffset[t0, t1, 8], t3
+ loadi 4[PC], t0
+ storei t2, TagOffset[cfr, t0, 8]
+ storei t3, PayloadOffset[cfr, t0, 8]
+ loadi (size - 1) * 4[PC], t0
+ valueProfile(t2, t3, t0)
+end
+
+_llint_op_resolve_global:
+ traceExecution()
+ resolveGlobal(6, .opResolveGlobalSlow)
+ dispatch(6)
+
+.opResolveGlobalSlow:
+ callSlowPath(_llint_slow_path_resolve_global)
+ dispatch(6)
+
+
+# Gives you the scope in t0, while allowing you to optionally perform additional checks on the
+# scopes as they are traversed. scopeCheck() is called with two arguments: the register
+# holding the scope, and a register that can be used for scratch. Note that this does not
+# use t3, so you can hold stuff in t3 if need be.
+macro getScope(deBruijinIndexOperand, scopeCheck)
+ loadp ScopeChain + PayloadOffset[cfr], t0
+ loadi deBruijinIndexOperand, t2
+
+ btiz t2, .done
+
+ loadp CodeBlock[cfr], t1
+ bineq CodeBlock::m_codeType[t1], FunctionCode, .loop
+ btbz CodeBlock::m_needsFullScopeChain[t1], .loop
+
+ loadi CodeBlock::m_activationRegister[t1], t1
+
+ # Need to conditionally skip over one scope.
+ bieq TagOffset[cfr, t1, 8], EmptyValueTag, .noActivation
+ scopeCheck(t0, t1)
+ loadp ScopeChainNode::next[t0], t0
+.noActivation:
+ subi 1, t2
+
+ btiz t2, .done
+.loop:
+ scopeCheck(t0, t1)
+ loadp ScopeChainNode::next[t0], t0
+ subi 1, t2
+ btinz t2, .loop
+
+.done:
+end
+
+_llint_op_resolve_global_dynamic:
+ traceExecution()
+ loadp JITStackFrame::globalData[sp], t3
+ loadp JSGlobalData::activationStructure[t3], t3
+ getScope(
+ 20[PC],
+ macro (scope, scratch)
+ loadp ScopeChainNode::object[scope], scratch
+ bpneq JSCell::m_structure[scratch], t3, .opResolveGlobalDynamicSuperSlow
+ end)
+ resolveGlobal(7, .opResolveGlobalDynamicSlow)
+ dispatch(7)
+
+.opResolveGlobalDynamicSuperSlow:
+ callSlowPath(_llint_slow_path_resolve_for_resolve_global_dynamic)
+ dispatch(7)
+
+.opResolveGlobalDynamicSlow:
+ callSlowPath(_llint_slow_path_resolve_global_dynamic)
+ dispatch(7)
+
+
+_llint_op_get_scoped_var:
+ traceExecution()
+ # Operands are as follows:
+ # 4[PC] Destination for the load.
+ # 8[PC] Index of register in the scope.
+ # 12[PC] De Bruijin index.
+ getScope(12[PC], macro (scope, scratch) end)
+ loadi 4[PC], t1
+ loadi 8[PC], t2
+ loadp ScopeChainNode::object[t0], t0
+ loadp JSVariableObject::m_registers[t0], t0
+ loadi TagOffset[t0, t2, 8], t3
+ loadi PayloadOffset[t0, t2, 8], t0
+ storei t3, TagOffset[cfr, t1, 8]
+ storei t0, PayloadOffset[cfr, t1, 8]
+ loadi 16[PC], t1
+ valueProfile(t3, t0, t1)
+ dispatch(5)
+
+
+_llint_op_put_scoped_var:
+ traceExecution()
+ getScope(8[PC], macro (scope, scratch) end)
+ loadi 12[PC], t1
+ loadConstantOrVariable(t1, t3, t2)
+ loadi 4[PC], t1
+ writeBarrier(t3, t2)
+ loadp ScopeChainNode::object[t0], t0
+ loadp JSVariableObject::m_registers[t0], t0
+ storei t3, TagOffset[t0, t1, 8]
+ storei t2, PayloadOffset[t0, t1, 8]
+ dispatch(4)
+
+
+_llint_op_get_global_var:
+ traceExecution()
+ loadi 8[PC], t1
+ loadi 4[PC], t3
+ loadp CodeBlock[cfr], t0
+ loadp CodeBlock::m_globalObject[t0], t0
+ loadp JSGlobalObject::m_registers[t0], t0
+ loadi TagOffset[t0, t1, 8], t2
+ loadi PayloadOffset[t0, t1, 8], t1
+ storei t2, TagOffset[cfr, t3, 8]
+ storei t1, PayloadOffset[cfr, t3, 8]
+ loadi 12[PC], t3
+ valueProfile(t2, t1, t3)
+ dispatch(4)
+
+
+_llint_op_put_global_var:
+ traceExecution()
+ loadi 8[PC], t1
+ loadp CodeBlock[cfr], t0
+ loadp CodeBlock::m_globalObject[t0], t0
+ loadp JSGlobalObject::m_registers[t0], t0
+ loadConstantOrVariable(t1, t2, t3)
+ loadi 4[PC], t1
+ writeBarrier(t2, t3)
+ storei t2, TagOffset[t0, t1, 8]
+ storei t3, PayloadOffset[t0, t1, 8]
+ dispatch(3)
+
+
+_llint_op_resolve_base:
+ traceExecution()
+ callSlowPath(_llint_slow_path_resolve_base)
+ dispatch(5)
+
+
+_llint_op_ensure_property_exists:
+ traceExecution()
+ callSlowPath(_llint_slow_path_ensure_property_exists)
+ dispatch(3)
+
+
+_llint_op_resolve_with_base:
+ traceExecution()
+ callSlowPath(_llint_slow_path_resolve_with_base)
+ dispatch(5)
+
+
+_llint_op_resolve_with_this:
+ traceExecution()
+ callSlowPath(_llint_slow_path_resolve_with_this)
+ dispatch(5)
+
+
+_llint_op_get_by_id:
+ traceExecution()
+ # We only do monomorphic get_by_id caching for now, and we do not modify the
+ # opcode. We do, however, allow for the cache to change anytime if fails, since
+ # ping-ponging is free. At best we get lucky and the get_by_id will continue
+ # to take fast path on the new cache. At worst we take slow path, which is what
+ # we would have been doing anyway.
+ loadi 8[PC], t0
+ loadi 16[PC], t1
+ loadConstantOrVariablePayload(t0, CellTag, t3, .opGetByIdSlow)
+ loadi 20[PC], t2
+ loadp JSObject::m_propertyStorage[t3], t0
+ bpneq JSCell::m_structure[t3], t1, .opGetByIdSlow
+ loadi 4[PC], t1
+ loadi TagOffset[t0, t2], t3
+ loadi PayloadOffset[t0, t2], t2
+ storei t3, TagOffset[cfr, t1, 8]
+ storei t2, PayloadOffset[cfr, t1, 8]
+ loadi 32[PC], t1
+ valueProfile(t3, t2, t1)
+ dispatch(9)
+
+.opGetByIdSlow:
+ callSlowPath(_llint_slow_path_get_by_id)
+ dispatch(9)
+
+
+_llint_op_get_arguments_length:
+ traceExecution()
+ loadi 8[PC], t0
+ loadi 4[PC], t1
+ bineq TagOffset[cfr, t0, 8], EmptyValueTag, .opGetArgumentsLengthSlow
+ loadi ArgumentCount + PayloadOffset[cfr], t2
+ subi 1, t2
+ storei Int32Tag, TagOffset[cfr, t1, 8]
+ storei t2, PayloadOffset[cfr, t1, 8]
+ dispatch(4)
+
+.opGetArgumentsLengthSlow:
+ callSlowPath(_llint_slow_path_get_arguments_length)
+ dispatch(4)
+
+
+_llint_op_put_by_id:
+ traceExecution()
+ loadi 4[PC], t3
+ loadi 16[PC], t1
+ loadConstantOrVariablePayload(t3, CellTag, t0, .opPutByIdSlow)
+ loadi 12[PC], t2
+ loadp JSObject::m_propertyStorage[t0], t3
+ bpneq JSCell::m_structure[t0], t1, .opPutByIdSlow
+ loadi 20[PC], t1
+ loadConstantOrVariable2Reg(t2, t0, t2)
+ writeBarrier(t0, t2)
+ storei t0, TagOffset[t3, t1]
+ storei t2, PayloadOffset[t3, t1]
+ dispatch(9)
+
+.opPutByIdSlow:
+ callSlowPath(_llint_slow_path_put_by_id)
+ dispatch(9)
+
+
+macro putByIdTransition(additionalChecks)
+ traceExecution()
+ loadi 4[PC], t3
+ loadi 16[PC], t1
+ loadConstantOrVariablePayload(t3, CellTag, t0, .opPutByIdSlow)
+ loadi 12[PC], t2
+ bpneq JSCell::m_structure[t0], t1, .opPutByIdSlow
+ additionalChecks(t1, t3, .opPutByIdSlow)
+ loadi 20[PC], t1
+ loadp JSObject::m_propertyStorage[t0], t3
+ addp t1, t3
+ loadConstantOrVariable2Reg(t2, t1, t2)
+ writeBarrier(t1, t2)
+ storei t1, TagOffset[t3]
+ loadi 24[PC], t1
+ storei t2, PayloadOffset[t3]
+ storep t1, JSCell::m_structure[t0]
+ dispatch(9)
+end
+
+_llint_op_put_by_id_transition_direct:
+ putByIdTransition(macro (oldStructure, scratch, slow) end)
+
+
+_llint_op_put_by_id_transition_normal:
+ putByIdTransition(
+ macro (oldStructure, scratch, slow)
+ const protoCell = oldStructure # Reusing the oldStructure register for the proto
+
+ loadp 28[PC], scratch
+ assert(macro (ok) btpnz scratch, ok end)
+ loadp StructureChain::m_vector[scratch], scratch
+ assert(macro (ok) btpnz scratch, ok end)
+ bieq Structure::m_prototype + TagOffset[oldStructure], NullTag, .done
+ .loop:
+ loadi Structure::m_prototype + PayloadOffset[oldStructure], protoCell
+ loadp JSCell::m_structure[protoCell], oldStructure
+ bpneq oldStructure, [scratch], slow
+ addp 4, scratch
+ bineq Structure::m_prototype + TagOffset[oldStructure], NullTag, .loop
+ .done:
+ end)
+
+
+_llint_op_del_by_id:
+ traceExecution()
+ callSlowPath(_llint_slow_path_del_by_id)
+ dispatch(4)
+
+
+_llint_op_get_by_val:
+ traceExecution()
+ loadp CodeBlock[cfr], t1
+ loadi 8[PC], t2
+ loadi 12[PC], t3
+ loadp CodeBlock::m_globalData[t1], t1
+ loadConstantOrVariablePayload(t2, CellTag, t0, .opGetByValSlow)
+ loadp JSGlobalData::jsArrayClassInfo[t1], t2
+ loadConstantOrVariablePayload(t3, Int32Tag, t1, .opGetByValSlow)
+ bpneq [t0], t2, .opGetByValSlow
+ loadp JSArray::m_storage[t0], t3
+ biaeq t1, JSArray::m_vectorLength[t0], .opGetByValSlow
+ loadi 4[PC], t0
+ loadi ArrayStorage::m_vector + TagOffset[t3, t1, 8], t2
+ loadi ArrayStorage::m_vector + PayloadOffset[t3, t1, 8], t1
+ bieq t2, EmptyValueTag, .opGetByValSlow
+ storei t2, TagOffset[cfr, t0, 8]
+ storei t1, PayloadOffset[cfr, t0, 8]
+ loadi 16[PC], t0
+ valueProfile(t2, t1, t0)
+ dispatch(5)
+
+.opGetByValSlow:
+ callSlowPath(_llint_slow_path_get_by_val)
+ dispatch(5)
+
+
+_llint_op_get_argument_by_val:
+ traceExecution()
+ loadi 8[PC], t0
+ loadi 12[PC], t1
+ bineq TagOffset[cfr, t0, 8], EmptyValueTag, .opGetArgumentByValSlow
+ loadConstantOrVariablePayload(t1, Int32Tag, t2, .opGetArgumentByValSlow)
+ addi 1, t2
+ loadi ArgumentCount + PayloadOffset[cfr], t1
+ biaeq t2, t1, .opGetArgumentByValSlow
+ negi t2
+ loadi 4[PC], t3
+ loadi ThisArgumentOffset + TagOffset[cfr, t2, 8], t0
+ loadi ThisArgumentOffset + PayloadOffset[cfr, t2, 8], t1
+ storei t0, TagOffset[cfr, t3, 8]
+ storei t1, PayloadOffset[cfr, t3, 8]
+ dispatch(5)
+
+.opGetArgumentByValSlow:
+ callSlowPath(_llint_slow_path_get_argument_by_val)
+ dispatch(5)
+
+
+_llint_op_get_by_pname:
+ traceExecution()
+ loadi 12[PC], t0
+ loadConstantOrVariablePayload(t0, CellTag, t1, .opGetByPnameSlow)
+ loadi 16[PC], t0
+ bpneq t1, PayloadOffset[cfr, t0, 8], .opGetByPnameSlow
+ loadi 8[PC], t0
+ loadConstantOrVariablePayload(t0, CellTag, t2, .opGetByPnameSlow)
+ loadi 20[PC], t0
+ loadi PayloadOffset[cfr, t0, 8], t3
+ loadp JSCell::m_structure[t2], t0
+ bpneq t0, JSPropertyNameIterator::m_cachedStructure[t3], .opGetByPnameSlow
+ loadi 24[PC], t0
+ loadi [cfr, t0, 8], t0
+ subi 1, t0
+ biaeq t0, JSPropertyNameIterator::m_numCacheableSlots[t3], .opGetByPnameSlow
+ loadp JSObject::m_propertyStorage[t2], t2
+ loadi TagOffset[t2, t0, 8], t1
+ loadi PayloadOffset[t2, t0, 8], t3
+ loadi 4[PC], t0
+ storei t1, TagOffset[cfr, t0, 8]
+ storei t3, PayloadOffset[cfr, t0, 8]
+ dispatch(7)
+
+.opGetByPnameSlow:
+ callSlowPath(_llint_slow_path_get_by_pname)
+ dispatch(7)
+
+
+_llint_op_put_by_val:
+ traceExecution()
+ loadi 4[PC], t0
+ loadConstantOrVariablePayload(t0, CellTag, t1, .opPutByValSlow)
+ loadi 8[PC], t0
+ loadConstantOrVariablePayload(t0, Int32Tag, t2, .opPutByValSlow)
+ loadp CodeBlock[cfr], t0
+ loadp CodeBlock::m_globalData[t0], t0
+ loadp JSGlobalData::jsArrayClassInfo[t0], t0
+ bpneq [t1], t0, .opPutByValSlow
+ biaeq t2, JSArray::m_vectorLength[t1], .opPutByValSlow
+ loadp JSArray::m_storage[t1], t0
+ bieq ArrayStorage::m_vector + TagOffset[t0, t2, 8], EmptyValueTag, .opPutByValEmpty
+.opPutByValStoreResult:
+ loadi 12[PC], t3
+ loadConstantOrVariable2Reg(t3, t1, t3)
+ writeBarrier(t1, t3)
+ storei t1, ArrayStorage::m_vector + TagOffset[t0, t2, 8]
+ storei t3, ArrayStorage::m_vector + PayloadOffset[t0, t2, 8]
+ dispatch(4)
+
+.opPutByValEmpty:
+ addi 1, ArrayStorage::m_numValuesInVector[t0]
+ bib t2, ArrayStorage::m_length[t0], .opPutByValStoreResult
+ addi 1, t2, t1
+ storei t1, ArrayStorage::m_length[t0]
+ jmp .opPutByValStoreResult
+
+.opPutByValSlow:
+ callSlowPath(_llint_slow_path_put_by_val)
+ dispatch(4)
+
+
+_llint_op_del_by_val:
+ traceExecution()
+ callSlowPath(_llint_slow_path_del_by_val)
+ dispatch(4)
+
+
+_llint_op_put_by_index:
+ traceExecution()
+ callSlowPath(_llint_slow_path_put_by_index)
+ dispatch(4)
+
+
+_llint_op_put_getter_setter:
+ traceExecution()
+ callSlowPath(_llint_slow_path_put_getter_setter)
+ dispatch(5)
+
+
+_llint_op_loop:
+ nop
+_llint_op_jmp:
+ traceExecution()
+ dispatchBranch(4[PC])
+
+
+_llint_op_jmp_scopes:
+ traceExecution()
+ callSlowPath(_llint_slow_path_jmp_scopes)
+ dispatch(0)
+
+
+macro jumpTrueOrFalse(conditionOp, slow)
+ loadi 4[PC], t1
+ loadConstantOrVariablePayload(t1, BooleanTag, t0, .slow)
+ conditionOp(t0, .target)
+ dispatch(3)
+
+.target:
+ dispatchBranch(8[PC])
+
+.slow:
+ callSlowPath(slow)
+ dispatch(0)
+end
+
+_llint_op_loop_if_true:
+ nop
+_llint_op_jtrue:
+ traceExecution()
+ jumpTrueOrFalse(
+ macro (value, target) btinz value, target end,
+ _llint_slow_path_jtrue)
+
+
+_llint_op_loop_if_false:
+ nop
+_llint_op_jfalse:
+ traceExecution()
+ jumpTrueOrFalse(
+ macro (value, target) btiz value, target end,
+ _llint_slow_path_jfalse)
+
+
+macro equalNull(cellHandler, immediateHandler)
+ loadi 4[PC], t0
+ loadi TagOffset[cfr, t0, 8], t1
+ loadi PayloadOffset[cfr, t0, 8], t0
+ bineq t1, CellTag, .immediate
+ loadp JSCell::m_structure[t0], t2
+ cellHandler(Structure::m_typeInfo + TypeInfo::m_flags[t2], .target)
+ dispatch(3)
+
+.target:
+ dispatchBranch(8[PC])
+
+.immediate:
+ ori 1, t1
+ immediateHandler(t1, .target)
+ dispatch(3)
+end
+
+_llint_op_jeq_null:
+ traceExecution()
+ equalNull(
+ macro (value, target) btbnz value, MasqueradesAsUndefined, target end,
+ macro (value, target) bieq value, NullTag, target end)
+
+
+_llint_op_jneq_null:
+ traceExecution()
+ equalNull(
+ macro (value, target) btbz value, MasqueradesAsUndefined, target end,
+ macro (value, target) bineq value, NullTag, target end)
+
+
+_llint_op_jneq_ptr:
+ traceExecution()
+ loadi 4[PC], t0
+ loadi 8[PC], t1
+ bineq TagOffset[cfr, t0, 8], CellTag, .opJneqPtrBranch
+ bpeq PayloadOffset[cfr, t0, 8], t1, .opJneqPtrFallThrough
+.opJneqPtrBranch:
+ dispatchBranch(12[PC])
+.opJneqPtrFallThrough:
+ dispatch(4)
+
+
+macro compare(integerCompare, doubleCompare, slow_path)
+ loadi 4[PC], t2
+ loadi 8[PC], t3
+ loadConstantOrVariable(t2, t0, t1)
+ loadConstantOrVariable2Reg(t3, t2, t3)
+ bineq t0, Int32Tag, .op1NotInt
+ bineq t2, Int32Tag, .op2NotInt
+ integerCompare(t1, t3, .jumpTarget)
+ dispatch(4)
+
+.op1NotInt:
+ bia t0, LowestTag, .slow
+ bib t2, LowestTag, .op1NotIntOp2Double
+ bineq t2, Int32Tag, .slow
+ ci2d t3, ft1
+ jmp .op1NotIntReady
+.op1NotIntOp2Double:
+ fii2d t3, t2, ft1
+.op1NotIntReady:
+ fii2d t1, t0, ft0
+ doubleCompare(ft0, ft1, .jumpTarget)
+ dispatch(4)
+
+.op2NotInt:
+ ci2d t1, ft0
+ bia t2, LowestTag, .slow
+ fii2d t3, t2, ft1
+ doubleCompare(ft0, ft1, .jumpTarget)
+ dispatch(4)
+
+.jumpTarget:
+ dispatchBranch(12[PC])
+
+.slow:
+ callSlowPath(slow_path)
+ dispatch(0)
+end
+
+_llint_op_loop_if_less:
+ nop
+_llint_op_jless:
+ traceExecution()
+ compare(
+ macro (left, right, target) bilt left, right, target end,
+ macro (left, right, target) bdlt left, right, target end,
+ _llint_slow_path_jless)
+
+
+_llint_op_jnless:
+ traceExecution()
+ compare(
+ macro (left, right, target) bigteq left, right, target end,
+ macro (left, right, target) bdgtequn left, right, target end,
+ _llint_slow_path_jnless)
+
+
+_llint_op_loop_if_greater:
+ nop
+_llint_op_jgreater:
+ traceExecution()
+ compare(
+ macro (left, right, target) bigt left, right, target end,
+ macro (left, right, target) bdgt left, right, target end,
+ _llint_slow_path_jgreater)
+
+
+_llint_op_jngreater:
+ traceExecution()
+ compare(
+ macro (left, right, target) bilteq left, right, target end,
+ macro (left, right, target) bdltequn left, right, target end,
+ _llint_slow_path_jngreater)
+
+
+_llint_op_loop_if_lesseq:
+ nop
+_llint_op_jlesseq:
+ traceExecution()
+ compare(
+ macro (left, right, target) bilteq left, right, target end,
+ macro (left, right, target) bdlteq left, right, target end,
+ _llint_slow_path_jlesseq)
+
+
+_llint_op_jnlesseq:
+ traceExecution()
+ compare(
+ macro (left, right, target) bigt left, right, target end,
+ macro (left, right, target) bdgtun left, right, target end,
+ _llint_slow_path_jnlesseq)
+
+
+_llint_op_loop_if_greatereq:
+ nop
+_llint_op_jgreatereq:
+ traceExecution()
+ compare(
+ macro (left, right, target) bigteq left, right, target end,
+ macro (left, right, target) bdgteq left, right, target end,
+ _llint_slow_path_jgreatereq)
+
+
+_llint_op_jngreatereq:
+ traceExecution()
+ compare(
+ macro (left, right, target) bilt left, right, target end,
+ macro (left, right, target) bdltun left, right, target end,
+ _llint_slow_path_jngreatereq)
+
+
+_llint_op_loop_hint:
+ traceExecution()
+ checkSwitchToJITForLoop()
+ dispatch(1)
+
+
+_llint_op_switch_imm:
+ traceExecution()
+ loadi 12[PC], t2
+ loadi 4[PC], t3
+ loadConstantOrVariable(t2, t1, t0)
+ loadp CodeBlock[cfr], t2
+ loadp CodeBlock::m_rareData[t2], t2
+ muli sizeof SimpleJumpTable, t3 # FIXME: would be nice to peephole this!
+ loadp CodeBlock::RareData::m_immediateSwitchJumpTables + VectorBufferOffset[t2], t2
+ addp t3, t2
+ bineq t1, Int32Tag, .opSwitchImmNotInt
+ subi SimpleJumpTable::min[t2], t0
+ biaeq t0, SimpleJumpTable::branchOffsets + VectorSizeOffset[t2], .opSwitchImmFallThrough
+ loadp SimpleJumpTable::branchOffsets + VectorBufferOffset[t2], t3
+ loadi [t3, t0, 4], t1
+ btiz t1, .opSwitchImmFallThrough
+ dispatchBranchWithOffset(t1)
+
+.opSwitchImmNotInt:
+ bib t1, LowestTag, .opSwitchImmSlow # Go to slow path if it's a double.
+.opSwitchImmFallThrough:
+ dispatchBranch(8[PC])
+
+.opSwitchImmSlow:
+ callSlowPath(_llint_slow_path_switch_imm)
+ dispatch(0)
+
+
+_llint_op_switch_char:
+ traceExecution()
+ loadi 12[PC], t2
+ loadi 4[PC], t3
+ loadConstantOrVariable(t2, t1, t0)
+ loadp CodeBlock[cfr], t2
+ loadp CodeBlock::m_rareData[t2], t2
+ muli sizeof SimpleJumpTable, t3
+ loadp CodeBlock::RareData::m_characterSwitchJumpTables + VectorBufferOffset[t2], t2
+ addp t3, t2
+ bineq t1, CellTag, .opSwitchCharFallThrough
+ loadp JSCell::m_structure[t0], t1
+ bbneq Structure::m_typeInfo + TypeInfo::m_type[t1], StringType, .opSwitchCharFallThrough
+ loadp JSString::m_value[t0], t0
+ bineq StringImpl::m_length[t0], 1, .opSwitchCharFallThrough
+ loadp StringImpl::m_data8[t0], t1
+ btinz StringImpl::m_hashAndFlags[t0], HashFlags8BitBuffer, .opSwitchChar8Bit
+ loadh [t1], t0
+ jmp .opSwitchCharReady
+.opSwitchChar8Bit:
+ loadb [t1], t0
+.opSwitchCharReady:
+ subi SimpleJumpTable::min[t2], t0
+ biaeq t0, SimpleJumpTable::branchOffsets + VectorSizeOffset[t2], .opSwitchCharFallThrough
+ loadp SimpleJumpTable::branchOffsets + VectorBufferOffset[t2], t2
+ loadi [t2, t0, 4], t1
+ btiz t1, .opSwitchImmFallThrough
+ dispatchBranchWithOffset(t1)
+
+.opSwitchCharFallThrough:
+ dispatchBranch(8[PC])
+
+
+_llint_op_switch_string:
+ traceExecution()
+ callSlowPath(_llint_slow_path_switch_string)
+ dispatch(0)
+
+
+_llint_op_new_func:
+ traceExecution()
+ btiz 12[PC], .opNewFuncUnchecked
+ loadi 4[PC], t1
+ bineq TagOffset[cfr, t1, 8], EmptyValueTag, .opNewFuncDone
+.opNewFuncUnchecked:
+ callSlowPath(_llint_slow_path_new_func)
+.opNewFuncDone:
+ dispatch(4)
+
+
+_llint_op_new_func_exp:
+ traceExecution()
+ callSlowPath(_llint_slow_path_new_func_exp)
+ dispatch(3)
+
+
+macro doCall(slow_path)
+ loadi 4[PC], t0
+ loadi 16[PC], t1
+ loadp LLIntCallLinkInfo::callee[t1], t2
+ loadConstantOrVariablePayload(t0, CellTag, t3, .opCallSlow)
+ bineq t3, t2, .opCallSlow
+ loadi 12[PC], t3
+ addp 24, PC
+ lshifti 3, t3
+ addp cfr, t3 # t3 contains the new value of cfr
+ loadp JSFunction::m_scopeChain[t2], t0
+ storei t2, Callee + PayloadOffset[t3]
+ storei t0, ScopeChain + PayloadOffset[t3]
+ loadi 8 - 24[PC], t2
+ storei PC, ArgumentCount + TagOffset[cfr]
+ storep cfr, CallerFrame[t3]
+ storei t2, ArgumentCount + PayloadOffset[t3]
+ storei CellTag, Callee + TagOffset[t3]
+ storei CellTag, ScopeChain + TagOffset[t3]
+ move t3, cfr
+ call LLIntCallLinkInfo::machineCodeTarget[t1]
+ dispatchAfterCall()
+
+.opCallSlow:
+ slowPathForCall(6, slow_path)
+end
+
+_llint_op_call:
+ traceExecution()
+ doCall(_llint_slow_path_call)
+
+
+_llint_op_construct:
+ traceExecution()
+ doCall(_llint_slow_path_construct)
+
+
+_llint_op_call_varargs:
+ traceExecution()
+ slowPathForCall(6, _llint_slow_path_call_varargs)
+
+
+_llint_op_call_eval:
+ traceExecution()
+
+ # Eval is executed in one of two modes:
+ #
+ # 1) We find that we're really invoking eval() in which case the
+ # execution is perfomed entirely inside the slow_path, and it
+ # returns the PC of a function that just returns the return value
+ # that the eval returned.
+ #
+ # 2) We find that we're invoking something called eval() that is not
+ # the real eval. Then the slow_path returns the PC of the thing to
+ # call, and we call it.
+ #
+ # This allows us to handle two cases, which would require a total of
+ # up to four pieces of state that cannot be easily packed into two
+ # registers (C functions can return up to two registers, easily):
+ #
+ # - The call frame register. This may or may not have been modified
+ # by the slow_path, but the convention is that it returns it. It's not
+ # totally clear if that's necessary, since the cfr is callee save.
+ # But that's our style in this here interpreter so we stick with it.
+ #
+ # - A bit to say if the slow_path successfully executed the eval and has
+ # the return value, or did not execute the eval but has a PC for us
+ # to call.
+ #
+ # - Either:
+ # - The JS return value (two registers), or
+ #
+ # - The PC to call.
+ #
+ # It turns out to be easier to just always have this return the cfr
+ # and a PC to call, and that PC may be a dummy thunk that just
+ # returns the JS value that the eval returned.
+
+ slowPathForCall(4, _llint_slow_path_call_eval)
+
+
+_llint_generic_return_point:
+ dispatchAfterCall()
+
+
+_llint_op_tear_off_activation:
+ traceExecution()
+ loadi 4[PC], t0
+ loadi 8[PC], t1
+ bineq TagOffset[cfr, t0, 8], EmptyValueTag, .opTearOffActivationCreated
+ bieq TagOffset[cfr, t1, 8], EmptyValueTag, .opTearOffActivationNotCreated
+.opTearOffActivationCreated:
+ callSlowPath(_llint_slow_path_tear_off_activation)
+.opTearOffActivationNotCreated:
+ dispatch(3)
+
+
+_llint_op_tear_off_arguments:
+ traceExecution()
+ loadi 4[PC], t0
+ subi 1, t0 # Get the unmodifiedArgumentsRegister
+ bieq TagOffset[cfr, t0, 8], EmptyValueTag, .opTearOffArgumentsNotCreated
+ callSlowPath(_llint_slow_path_tear_off_arguments)
+.opTearOffArgumentsNotCreated:
+ dispatch(2)
+
+
+macro doReturn()
+ loadp ReturnPC[cfr], t2
+ loadp CallerFrame[cfr], cfr
+ restoreReturnAddressBeforeReturn(t2)
+ ret
+end
+
+_llint_op_ret:
+ traceExecution()
+ checkSwitchToJITForEpilogue()
+ loadi 4[PC], t2
+ loadConstantOrVariable(t2, t1, t0)
+ doReturn()
+
+
+_llint_op_call_put_result:
+ loadi 4[PC], t2
+ loadi 8[PC], t3
+ storei t1, TagOffset[cfr, t2, 8]
+ storei t0, PayloadOffset[cfr, t2, 8]
+ valueProfile(t1, t0, t3)
+ traceExecution() # Needs to be here because it would clobber t1, t0
+ dispatch(3)
+
+
+_llint_op_ret_object_or_this:
+ traceExecution()
+ checkSwitchToJITForEpilogue()
+ loadi 4[PC], t2
+ loadConstantOrVariable(t2, t1, t0)
+ bineq t1, CellTag, .opRetObjectOrThisNotObject
+ loadp JSCell::m_structure[t0], t2
+ bbb Structure::m_typeInfo + TypeInfo::m_type[t2], ObjectType, .opRetObjectOrThisNotObject
+ doReturn()
+
+.opRetObjectOrThisNotObject:
+ loadi 8[PC], t2
+ loadConstantOrVariable(t2, t1, t0)
+ doReturn()
+
+
+_llint_op_method_check:
+ traceExecution()
+ # We ignore method checks and use normal get_by_id optimizations.
+ dispatch(1)
+
+
+_llint_op_strcat:
+ traceExecution()
+ callSlowPath(_llint_slow_path_strcat)
+ dispatch(4)
+
+
+_llint_op_to_primitive:
+ traceExecution()
+ loadi 8[PC], t2
+ loadi 4[PC], t3
+ loadConstantOrVariable(t2, t1, t0)
+ bineq t1, CellTag, .opToPrimitiveIsImm
+ loadp JSCell::m_structure[t0], t2
+ bbneq Structure::m_typeInfo + TypeInfo::m_type[t2], StringType, .opToPrimitiveSlowCase
+.opToPrimitiveIsImm:
+ storei t1, TagOffset[cfr, t3, 8]
+ storei t0, PayloadOffset[cfr, t3, 8]
+ dispatch(3)
+
+.opToPrimitiveSlowCase:
+ callSlowPath(_llint_slow_path_to_primitive)
+ dispatch(3)
+
+
+_llint_op_get_pnames:
+ traceExecution()
+ callSlowPath(_llint_slow_path_get_pnames)
+ dispatch(0) # The slow_path either advances the PC or jumps us to somewhere else.
+
+
+_llint_op_next_pname:
+ traceExecution()
+ loadi 12[PC], t1
+ loadi 16[PC], t2
+ loadi PayloadOffset[cfr, t1, 8], t0
+ bieq t0, PayloadOffset[cfr, t2, 8], .opNextPnameEnd
+ loadi 20[PC], t2
+ loadi PayloadOffset[cfr, t2, 8], t2
+ loadp JSPropertyNameIterator::m_jsStrings[t2], t3
+ loadi [t3, t0, 8], t3
+ addi 1, t0
+ storei t0, PayloadOffset[cfr, t1, 8]
+ loadi 4[PC], t1
+ storei CellTag, TagOffset[cfr, t1, 8]
+ storei t3, PayloadOffset[cfr, t1, 8]
+ loadi 8[PC], t3
+ loadi PayloadOffset[cfr, t3, 8], t3
+ loadp JSCell::m_structure[t3], t1
+ bpneq t1, JSPropertyNameIterator::m_cachedStructure[t2], .opNextPnameSlow
+ loadp JSPropertyNameIterator::m_cachedPrototypeChain[t2], t0
+ loadp StructureChain::m_vector[t0], t0
+ btpz [t0], .opNextPnameTarget
+.opNextPnameCheckPrototypeLoop:
+ bieq Structure::m_prototype + TagOffset[t1], NullTag, .opNextPnameSlow
+ loadp Structure::m_prototype + PayloadOffset[t1], t2
+ loadp JSCell::m_structure[t2], t1
+ bpneq t1, [t0], .opNextPnameSlow
+ addp 4, t0
+ btpnz [t0], .opNextPnameCheckPrototypeLoop
+.opNextPnameTarget:
+ dispatchBranch(24[PC])
+
+.opNextPnameEnd:
+ dispatch(7)
+
+.opNextPnameSlow:
+ callSlowPath(_llint_slow_path_next_pname) # This either keeps the PC where it was (causing us to loop) or sets it to target.
+ dispatch(0)
+
+
+_llint_op_push_scope:
+ traceExecution()
+ callSlowPath(_llint_slow_path_push_scope)
+ dispatch(2)
+
+
+_llint_op_pop_scope:
+ traceExecution()
+ callSlowPath(_llint_slow_path_pop_scope)
+ dispatch(1)
+
+
+_llint_op_push_new_scope:
+ traceExecution()
+ callSlowPath(_llint_slow_path_push_new_scope)
+ dispatch(4)
+
+
+_llint_op_catch:
+ # This is where we end up from the JIT's throw trampoline (because the
+ # machine code return address will be set to _llint_op_catch), and from
+ # the interpreter's throw trampoline (see _llint_throw_trampoline).
+ # The JIT throwing protocol calls for the cfr to be in t0. The throwing
+ # code must have known that we were throwing to the interpreter, and have
+ # set JSGlobalData::targetInterpreterPCForThrow.
+ move t0, cfr
+ loadp JITStackFrame::globalData[sp], t3
+ loadi JSGlobalData::targetInterpreterPCForThrow[t3], PC
+ loadi JSGlobalData::exception + PayloadOffset[t3], t0
+ loadi JSGlobalData::exception + TagOffset[t3], t1
+ storei 0, JSGlobalData::exception + PayloadOffset[t3]
+ storei EmptyValueTag, JSGlobalData::exception + TagOffset[t3]
+ loadi 4[PC], t2
+ storei t0, PayloadOffset[cfr, t2, 8]
+ storei t1, TagOffset[cfr, t2, 8]
+ traceExecution() # This needs to be here because we don't want to clobber t0, t1, t2, t3 above.
+ dispatch(2)
+
+
+_llint_op_throw:
+ traceExecution()
+ callSlowPath(_llint_slow_path_throw)
+ dispatch(2)
+
+
+_llint_op_throw_reference_error:
+ traceExecution()
+ callSlowPath(_llint_slow_path_throw_reference_error)
+ dispatch(2)
+
+
+_llint_op_jsr:
+ traceExecution()
+ loadi 4[PC], t0
+ addi 3 * 4, PC, t1
+ storei t1, [cfr, t0, 8]
+ dispatchBranch(8[PC])
+
+
+_llint_op_sret:
+ traceExecution()
+ loadi 4[PC], t0
+ loadp [cfr, t0, 8], PC
+ dispatch(0)
+
+
+_llint_op_debug:
+ traceExecution()
+ callSlowPath(_llint_slow_path_debug)
+ dispatch(4)
+
+
+_llint_op_profile_will_call:
+ traceExecution()
+ loadp JITStackFrame::enabledProfilerReference[sp], t0
+ btpz [t0], .opProfileWillCallDone
+ callSlowPath(_llint_slow_path_profile_will_call)
+.opProfileWillCallDone:
+ dispatch(2)
+
+
+_llint_op_profile_did_call:
+ traceExecution()
+ loadp JITStackFrame::enabledProfilerReference[sp], t0
+ btpz [t0], .opProfileWillCallDone
+ callSlowPath(_llint_slow_path_profile_did_call)
+.opProfileDidCallDone:
+ dispatch(2)
+
+
+_llint_op_end:
+ traceExecution()
+ checkSwitchToJITForEpilogue()
+ loadi 4[PC], t0
+ loadi TagOffset[cfr, t0, 8], t1
+ loadi PayloadOffset[cfr, t0, 8], t0
+ doReturn()
+
+
+_llint_throw_from_slow_path_trampoline:
+ # When throwing from the interpreter (i.e. throwing from LLIntSlowPaths), so
+ # the throw target is not necessarily interpreted code, we come to here.
+ # This essentially emulates the JIT's throwing protocol.
+ loadp JITStackFrame::globalData[sp], t1
+ loadp JSGlobalData::callFrameForThrow[t1], t0
+ jmp JSGlobalData::targetMachinePCForThrow[t1]
+
+
+_llint_throw_during_call_trampoline:
+ preserveReturnAddressAfterCall(t2)
+ loadp JITStackFrame::globalData[sp], t1
+ loadp JSGlobalData::callFrameForThrow[t1], t0
+ jmp JSGlobalData::targetMachinePCForThrow[t1]
+
+
+# Lastly, make sure that we can link even though we don't support all opcodes.
+# These opcodes should never arise when using LLInt or either JIT. We assert
+# as much.
+
+macro notSupported()
+ if ASSERT_ENABLED
+ crash()
+ else
+ # We should use whatever the smallest possible instruction is, just to
+ # ensure that there is a gap between instruction labels. If multiple
+ # smallest instructions exist, we should pick the one that is most
+ # likely result in execution being halted. Currently that is the break
+ # instruction on all architectures we're interested in. (Break is int3
+ # on Intel, which is 1 byte, and bkpt on ARMv7, which is 2 bytes.)
+ break
+ end
+end
+
+_llint_op_get_array_length:
+ notSupported()
+
+_llint_op_get_by_id_chain:
+ notSupported()
+
+_llint_op_get_by_id_custom_chain:
+ notSupported()
+
+_llint_op_get_by_id_custom_proto:
+ notSupported()
+
+_llint_op_get_by_id_custom_self:
+ notSupported()
+
+_llint_op_get_by_id_generic:
+ notSupported()
+
+_llint_op_get_by_id_getter_chain:
+ notSupported()
+
+_llint_op_get_by_id_getter_proto:
+ notSupported()
+
+_llint_op_get_by_id_getter_self:
+ notSupported()
+
+_llint_op_get_by_id_proto:
+ notSupported()
+
+_llint_op_get_by_id_self:
+ notSupported()
+
+_llint_op_get_string_length:
+ notSupported()
+
+_llint_op_put_by_id_generic:
+ notSupported()
+
+_llint_op_put_by_id_replace:
+ notSupported()
+
+_llint_op_put_by_id_transition:
+ notSupported()
+
+
+# Indicate the end of LLInt.
+_llint_end:
+ crash()
+
diff --git a/Source/JavaScriptCore/llint/LowLevelInterpreter.cpp b/Source/JavaScriptCore/llint/LowLevelInterpreter.cpp
new file mode 100644
index 000000000..b95a50082
--- /dev/null
+++ b/Source/JavaScriptCore/llint/LowLevelInterpreter.cpp
@@ -0,0 +1,38 @@
+/*
+ * Copyright (C) 2012 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "config.h"
+#include "LowLevelInterpreter.h"
+
+#if ENABLE(LLINT)
+
+#include "LLIntOfflineAsmConfig.h"
+#include <wtf/InlineASM.h>
+
+// This is a file generated by offlineasm, which contains all of the assembly code
+// for the interpreter, as compiled from LowLevelInterpreter.asm.
+#include "LLIntAssembly.h"
+
+#endif // ENABLE(LLINT)
diff --git a/Source/JavaScriptCore/llint/LowLevelInterpreter.h b/Source/JavaScriptCore/llint/LowLevelInterpreter.h
new file mode 100644
index 000000000..e5a54a45d
--- /dev/null
+++ b/Source/JavaScriptCore/llint/LowLevelInterpreter.h
@@ -0,0 +1,53 @@
+/*
+ * Copyright (C) 2012 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef LowLevelInterpreter_h
+#define LowLevelInterpreter_h
+
+#include <wtf/Platform.h>
+
+#if ENABLE(LLINT)
+
+#include "Opcode.h"
+
+#define LLINT_INSTRUCTION_DECL(opcode, length) extern "C" void llint_##opcode();
+ FOR_EACH_OPCODE_ID(LLINT_INSTRUCTION_DECL);
+#undef LLINT_INSTRUCTION_DECL
+
+extern "C" void llint_begin();
+extern "C" void llint_end();
+extern "C" void llint_program_prologue();
+extern "C" void llint_eval_prologue();
+extern "C" void llint_function_for_call_prologue();
+extern "C" void llint_function_for_construct_prologue();
+extern "C" void llint_function_for_call_arity_check();
+extern "C" void llint_function_for_construct_arity_check();
+extern "C" void llint_generic_return_point();
+extern "C" void llint_throw_from_slow_path_trampoline();
+extern "C" void llint_throw_during_call_trampoline();
+
+#endif // ENABLE(LLINT)
+
+#endif // LowLevelInterpreter_h
diff --git a/Source/JavaScriptCore/offlineasm/armv7.rb b/Source/JavaScriptCore/offlineasm/armv7.rb
new file mode 100644
index 000000000..eb8df6869
--- /dev/null
+++ b/Source/JavaScriptCore/offlineasm/armv7.rb
@@ -0,0 +1,1032 @@
+# Copyright (C) 2011 Apple Inc. All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions
+# are met:
+# 1. Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# 2. Redistributions in binary form must reproduce the above copyright
+# notice, this list of conditions and the following disclaimer in the
+# documentation and/or other materials provided with the distribution.
+#
+# THIS SOFTWARE IS PROVIDED BY APPLE INC. AND ITS CONTRIBUTORS ``AS IS''
+# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
+# THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR ITS CONTRIBUTORS
+# BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
+# THE POSSIBILITY OF SUCH DAMAGE.
+
+require "ast"
+require "opt"
+
+class Node
+ def armV7Single
+ doubleOperand = armV7Operand
+ raise "Bogus register name #{doubleOperand}" unless doubleOperand =~ /^d/
+ "s" + ($~.post_match.to_i * 2).to_s
+ end
+end
+
+class SpecialRegister < NoChildren
+ def initialize(name)
+ @name = name
+ end
+
+ def armV7Operand
+ @name
+ end
+
+ def address?
+ false
+ end
+
+ def label?
+ false
+ end
+
+ def immediate?
+ false
+ end
+
+ def register?
+ true
+ end
+end
+
+ARMv7_EXTRA_GPRS = [SpecialRegister.new("r9"), SpecialRegister.new("r8"), SpecialRegister.new("r3")]
+ARMv7_EXTRA_FPRS = [SpecialRegister.new("d7")]
+ARMv7_SCRATCH_FPR = SpecialRegister.new("d8")
+
+def armV7MoveImmediate(value, register)
+ # Currently we only handle the simple cases, and fall back to mov/movt for the complex ones.
+ if value >= 0 && value < 256
+ $asm.puts "movw #{register.armV7Operand}, \##{value}"
+ elsif (~value) >= 0 && (~value) < 256
+ $asm.puts "mvn #{register.armV7Operand}, \##{~value}"
+ else
+ $asm.puts "movw #{register.armV7Operand}, \##{value & 0xffff}"
+ if (value & 0xffff0000) != 0
+ $asm.puts "movt #{register.armV7Operand}, \##{value >> 16}"
+ end
+ end
+end
+
+class RegisterID
+ def armV7Operand
+ case name
+ when "t0", "a0", "r0"
+ "r0"
+ when "t1", "a1", "r1"
+ "r1"
+ when "t2", "a2"
+ "r2"
+ when "a3"
+ "r3"
+ when "t3"
+ "r4"
+ when "t4"
+ "r7"
+ when "cfr"
+ "r5"
+ when "lr"
+ "lr"
+ when "sp"
+ "sp"
+ else
+ raise "Bad register #{name} for ARMv7 at #{codeOriginString}"
+ end
+ end
+end
+
+class FPRegisterID
+ def armV7Operand
+ case name
+ when "ft0", "fr"
+ "d0"
+ when "ft1"
+ "d1"
+ when "ft2"
+ "d2"
+ when "ft3"
+ "d3"
+ when "ft4"
+ "d4"
+ when "ft5"
+ "d5"
+ else
+ raise "Bad register #{name} for ARMv7 at #{codeOriginString}"
+ end
+ end
+end
+
+class Immediate
+ def armV7Operand
+ raise "Invalid immediate #{value} at #{codeOriginString}" if value < 0 or value > 255
+ "\##{value}"
+ end
+end
+
+class Address
+ def armV7Operand
+ raise "Bad offset at #{codeOriginString}" if offset.value < -0xff or offset.value > 0xfff
+ "[#{base.armV7Operand}, \##{offset.value}]"
+ end
+end
+
+class BaseIndex
+ def armV7Operand
+ raise "Bad offset at #{codeOriginString}" if offset.value != 0
+ "[#{base.armV7Operand}, #{index.armV7Operand}, lsl \##{scaleShift}]"
+ end
+end
+
+class AbsoluteAddress
+ def armV7Operand
+ raise "Unconverted absolute address at #{codeOriginString}"
+ end
+end
+
+#
+# Lowering of branch ops. For example:
+#
+# baddiz foo, bar, baz
+#
+# will become:
+#
+# addi foo, bar
+# bz baz
+#
+
+def armV7LowerBranchOps(list)
+ newList = []
+ list.each {
+ | node |
+ if node.is_a? Instruction
+ case node.opcode
+ when /^b(addi|subi|ori|addp)/
+ op = $1
+ branch = "b" + $~.post_match
+
+ case op
+ when "addi", "addp"
+ op = "addis"
+ when "subi"
+ op = "subis"
+ when "ori"
+ op = "oris"
+ end
+
+ newList << Instruction.new(node.codeOrigin, op, node.operands[0..-2])
+ newList << Instruction.new(node.codeOrigin, branch, [node.operands[-1]])
+ when "bmulio"
+ tmp1 = Tmp.new(node.codeOrigin, :gpr)
+ tmp2 = Tmp.new(node.codeOrigin, :gpr)
+ newList << Instruction.new(node.codeOrigin, "smulli", [node.operands[0], node.operands[1], node.operands[1], tmp1])
+ newList << Instruction.new(node.codeOrigin, "rshifti", [node.operands[-2], Immediate.new(node.codeOrigin, 31), tmp2])
+ newList << Instruction.new(node.codeOrigin, "bineq", [tmp1, tmp2, node.operands[-1]])
+ when /^bmuli/
+ condition = $~.post_match
+ newList << Instruction.new(node.codeOrigin, "muli", node.operands[0..-2])
+ newList << Instruction.new(node.codeOrigin, "bti" + condition, [node.operands[-2], node.operands[-1]])
+ else
+ newList << node
+ end
+ else
+ newList << node
+ end
+ }
+ newList
+end
+
+#
+# Lowering of shift ops. For example:
+#
+# lshifti foo, bar
+#
+# will become:
+#
+# andi foo, 31, tmp
+# lshifti tmp, bar
+#
+
+def armV7SanitizeShift(operand, list)
+ return operand if operand.immediate?
+
+ tmp = Tmp.new(operand.codeOrigin, :gpr)
+ list << Instruction.new(operand.codeOrigin, "andi", [operand, Immediate.new(operand.codeOrigin, 31), tmp])
+ tmp
+end
+
+def armV7LowerShiftOps(list)
+ newList = []
+ list.each {
+ | node |
+ if node.is_a? Instruction
+ case node.opcode
+ when "lshifti", "rshifti", "urshifti"
+ if node.operands.size == 2
+ newList << Instruction.new(node.codeOrigin, node.opcode, [armV7SanitizeShift(node.operands[0], newList), node.operands[1]])
+ else
+ newList << Instruction.new(node.codeOrigin, node.opcode, [node.operands[0], armV7SanitizeShift(node.operands[1], newList), node.operands[2]])
+ raise "Wrong number of operands for shift at #{node.codeOriginString}" unless node.operands.size == 3
+ end
+ else
+ newList << node
+ end
+ else
+ newList << node
+ end
+ }
+ newList
+end
+
+#
+# Lowering of malformed addresses. For example:
+#
+# loadp 10000[foo], bar
+#
+# will become:
+#
+# move 10000, tmp
+# addp foo, tmp
+# loadp 0[tmp], bar
+#
+
+class Node
+ def armV7LowerMalformedAddressesRecurse(list)
+ mapChildren {
+ | node |
+ node.armV7LowerMalformedAddressesRecurse(list)
+ }
+ end
+end
+
+class Address
+ def armV7LowerMalformedAddressesRecurse(list)
+ if offset.value < -0xff or offset.value > 0xfff
+ tmp = Tmp.new(codeOrigin, :gpr)
+ list << Instruction.new(codeOrigin, "move", [offset, tmp])
+ list << Instruction.new(codeOrigin, "addp", [base, tmp])
+ Address.new(codeOrigin, tmp, Immediate.new(codeOrigin, 0))
+ else
+ self
+ end
+ end
+end
+
+class BaseIndex
+ def armV7LowerMalformedAddressesRecurse(list)
+ if offset.value != 0
+ tmp = Tmp.new(codeOrigin, :gpr)
+ list << Instruction.new(codeOrigin, "move", [offset, tmp])
+ list << Instruction.new(codeOrigin, "addp", [base, tmp])
+ BaseIndex.new(codeOrigin, tmp, index, scale, Immediate.new(codeOrigin, 0))
+ else
+ self
+ end
+ end
+end
+
+class AbsoluteAddress
+ def armV7LowerMalformedAddressesRecurse(list)
+ tmp = Tmp.new(codeOrigin, :gpr)
+ list << Instruction.new(codeOrigin, "move", [address, tmp])
+ Address.new(codeOrigin, tmp, Immediate.new(codeOrigin, 0))
+ end
+end
+
+def armV7LowerMalformedAddresses(list)
+ newList = []
+ list.each {
+ | node |
+ newList << node.armV7LowerMalformedAddressesRecurse(newList)
+ }
+ newList
+end
+
+#
+# Lowering of malformed addresses in double loads and stores. For example:
+#
+# loadd [foo, bar, 8], baz
+#
+# becomes:
+#
+# leap [foo, bar, 8], tmp
+# loadd [tmp], baz
+#
+
+class Node
+ def armV7DoubleAddress(list)
+ self
+ end
+end
+
+class BaseIndex
+ def armV7DoubleAddress(list)
+ tmp = Tmp.new(codeOrigin, :gpr)
+ list << Instruction.new(codeOrigin, "leap", [self, tmp])
+ Address.new(codeOrigin, tmp, Immediate.new(codeOrigin, 0))
+ end
+end
+
+def armV7LowerMalformedAddressesDouble(list)
+ newList = []
+ list.each {
+ | node |
+ if node.is_a? Instruction
+ case node.opcode
+ when "loadd"
+ newList << Instruction.new(node.codeOrigin, "loadd", [node.operands[0].armV7DoubleAddress(newList), node.operands[1]])
+ when "stored"
+ newList << Instruction.new(node.codeOrigin, "stored", [node.operands[0], node.operands[1].armV7DoubleAddress(newList)])
+ else
+ newList << node
+ end
+ else
+ newList << node
+ end
+ }
+ newList
+end
+
+#
+# Lowering of misplaced immediates. For example:
+#
+# storei 0, [foo]
+#
+# will become:
+#
+# move 0, tmp
+# storei tmp, [foo]
+#
+
+def armV7LowerMisplacedImmediates(list)
+ newList = []
+ list.each {
+ | node |
+ if node.is_a? Instruction
+ case node.opcode
+ when "storei", "storep"
+ operands = node.operands
+ newOperands = []
+ operands.each {
+ | operand |
+ if operand.is_a? Immediate
+ tmp = Tmp.new(operand.codeOrigin, :gpr)
+ newList << Instruction.new(operand.codeOrigin, "move", [operand, tmp])
+ newOperands << tmp
+ else
+ newOperands << operand
+ end
+ }
+ newList << Instruction.new(node.codeOrigin, node.opcode, newOperands)
+ else
+ newList << node
+ end
+ else
+ newList << node
+ end
+ }
+ newList
+end
+
+#
+# Lowering of malformed immediates except when used in a "move" instruction.
+# For example:
+#
+# addp 642641, foo
+#
+# will become:
+#
+# move 642641, tmp
+# addp tmp, foo
+#
+
+class Node
+ def armV7LowerMalformedImmediatesRecurse(list)
+ mapChildren {
+ | node |
+ node.armV7LowerMalformedImmediatesRecurse(list)
+ }
+ end
+end
+
+class Address
+ def armV7LowerMalformedImmediatesRecurse(list)
+ self
+ end
+end
+
+class BaseIndex
+ def armV7LowerMalformedImmediatesRecurse(list)
+ self
+ end
+end
+
+class AbsoluteAddress
+ def armV7LowerMalformedImmediatesRecurse(list)
+ self
+ end
+end
+
+class Immediate
+ def armV7LowerMalformedImmediatesRecurse(list)
+ if value < 0 or value > 255
+ tmp = Tmp.new(codeOrigin, :gpr)
+ list << Instruction.new(codeOrigin, "move", [self, tmp])
+ tmp
+ else
+ self
+ end
+ end
+end
+
+def armV7LowerMalformedImmediates(list)
+ newList = []
+ list.each {
+ | node |
+ if node.is_a? Instruction
+ case node.opcode
+ when "move"
+ newList << node
+ when "addi", "addp", "addis", "subi", "subp", "subis"
+ if node.operands[0].is_a? Immediate and
+ node.operands[0].value < 0 and
+ node.operands[0].value >= 255 and
+ node.operands.size == 2
+ if node.opcode =~ /add/
+ newOpcode = "sub" + node.opcode[-1..-1]
+ else
+ newOpcode = "add" + node.opcode[-1..-1]
+ end
+ newList << Instruction.new(node.codeOrigin, newOpcode,
+ [Immediate.new(-node.operands[0].value)] + node.operands[1..-1])
+ else
+ newList << node.armV7LowerMalformedImmediatesRecurse(newList)
+ end
+ when "muli"
+ if node.operands[0].is_a? Immediate
+ tmp = Tmp.new(codeOrigin, :gpr)
+ newList << Instruction.new(node.codeOrigin, "move", [node.operands[0], tmp])
+ newList << Instruction.new(node.codeOrigin, "muli", [tmp] + node.operands[1..-1])
+ else
+ newList << node.armV7LowerMalformedImmediatesRecurse(newList)
+ end
+ else
+ newList << node.armV7LowerMalformedImmediatesRecurse(newList)
+ end
+ else
+ newList << node
+ end
+ }
+ newList
+end
+
+#
+# Lowering of misplaced addresses. For example:
+#
+# addi foo, [bar]
+#
+# will become:
+#
+# loadi [bar], tmp
+# addi foo, tmp
+# storei tmp, [bar]
+#
+# Another example:
+#
+# addi [foo], bar
+#
+# will become:
+#
+# loadi [foo], tmp
+# addi tmp, bar
+#
+
+def armV7AsRegister(preList, postList, operand, suffix, needStore)
+ return operand unless operand.address?
+
+ tmp = Tmp.new(operand.codeOrigin, if suffix == "d" then :fpr else :gpr end)
+ preList << Instruction.new(operand.codeOrigin, "load" + suffix, [operand, tmp])
+ if needStore
+ postList << Instruction.new(operand.codeOrigin, "store" + suffix, [tmp, operand])
+ end
+ tmp
+end
+
+def armV7AsRegisters(preList, postList, operands, suffix)
+ newOperands = []
+ operands.each_with_index {
+ | operand, index |
+ newOperands << armV7AsRegister(preList, postList, operand, suffix, index == operands.size - 1)
+ }
+ newOperands
+end
+
+def armV7LowerMisplacedAddresses(list)
+ newList = []
+ list.each {
+ | node |
+ if node.is_a? Instruction
+ postInstructions = []
+ case node.opcode
+ when "addi", "addp", "addis", "andi", "andp", "lshifti", "muli", "negi", "noti", "ori", "oris",
+ "orp", "rshifti", "urshifti", "subi", "subp", "subis", "xori", "xorp", /^bi/, /^bp/, /^bti/,
+ /^btp/, /^ci/, /^cp/, /^ti/
+ newList << Instruction.new(node.codeOrigin,
+ node.opcode,
+ armV7AsRegisters(newList, postInstructions, node.operands, "i"))
+ when "bbeq", "bbneq", "bba", "bbaeq", "bbb", "bbbeq", "btbo", "btbz", "btbnz", "tbz", "tbnz",
+ "tbo"
+ newList << Instruction.new(node.codeOrigin,
+ node.opcode,
+ armV7AsRegisters(newList, postInstructions, node.operands, "b"))
+ when "bbgt", "bbgteq", "bblt", "bblteq", "btbs", "tbs"
+ newList << Instruction.new(node.codeOrigin,
+ node.opcode,
+ armV7AsRegisters(newList, postInstructions, node.operands, "bs"))
+ when "addd", "divd", "subd", "muld", "sqrtd", /^bd/
+ newList << Instruction.new(node.codeOrigin,
+ node.opcode,
+ armV7AsRegisters(newList, postInstructions, node.operands, "d"))
+ when "jmp", "call"
+ newList << Instruction.new(node.codeOrigin,
+ node.opcode,
+ [armV7AsRegister(newList, postInstructions, node.operands[0], "p", false)])
+ else
+ newList << node
+ end
+ newList += postInstructions
+ else
+ newList << node
+ end
+ }
+ newList
+end
+
+#
+# Lowering of register reuse in compare instructions. For example:
+#
+# cieq t0, t1, t0
+#
+# will become:
+#
+# mov tmp, t0
+# cieq tmp, t1, t0
+#
+
+def armV7LowerRegisterReuse(list)
+ newList = []
+ list.each {
+ | node |
+ if node.is_a? Instruction
+ case node.opcode
+ when "cieq", "cineq", "cia", "ciaeq", "cib", "cibeq", "cigt", "cigteq", "cilt", "cilteq",
+ "cpeq", "cpneq", "cpa", "cpaeq", "cpb", "cpbeq", "cpgt", "cpgteq", "cplt", "cplteq",
+ "tio", "tis", "tiz", "tinz", "tbo", "tbs", "tbz", "tbnz"
+ if node.operands.size == 2
+ if node.operands[0] == node.operands[1]
+ tmp = Tmp.new(node.codeOrigin, :gpr)
+ newList << Instruction.new(node.codeOrigin, "move", [node.operands[0], tmp])
+ newList << Instruction.new(node.codeOrigin, node.opcode, [tmp, node.operands[1]])
+ else
+ newList << node
+ end
+ else
+ raise "Wrong number of arguments at #{node.codeOriginString}" unless node.operands.size == 3
+ if node.operands[0] == node.operands[2]
+ tmp = Tmp.new(node.codeOrigin, :gpr)
+ newList << Instruction.new(node.codeOrigin, "move", [node.operands[0], tmp])
+ newList << Instruction.new(node.codeOrigin, node.opcode, [tmp, node.operands[1], node.operands[2]])
+ elsif node.operands[1] == node.operands[2]
+ tmp = Tmp.new(node.codeOrigin, :gpr)
+ newList << Instruction.new(node.codeOrigin, "move", [node.operands[1], tmp])
+ newList << Instruction.new(node.codeOrigin, node.opcode, [node.operands[0], tmp, node.operands[2]])
+ else
+ newList << node
+ end
+ end
+ else
+ newList << node
+ end
+ else
+ newList << node
+ end
+ }
+ newList
+end
+
+#
+# Lea support.
+#
+
+class Address
+ def armV7EmitLea(destination)
+ if destination == base
+ $asm.puts "adds #{destination.armV7Operand}, \##{offset.value}"
+ else
+ $asm.puts "adds #{destination.armV7Operand}, #{base.armV7Operand}, \##{offset.value}"
+ end
+ end
+end
+
+class BaseIndex
+ def armV7EmitLea(destination)
+ raise "Malformed BaseIndex, offset should be zero at #{codeOriginString}" unless offset.value == 0
+ $asm.puts "add.w #{destination.armV7Operand}, #{base.armV7Operand}, #{index.armV7Operand}, lsl \##{scaleShift}"
+ end
+end
+
+# FIXME: we could support AbsoluteAddress for lea, but we don't.
+
+#
+# Actual lowering code follows.
+#
+
+class Sequence
+ def lowerARMv7
+ myList = @list
+
+ # Verify that we will only see instructions and labels.
+ myList.each {
+ | node |
+ unless node.is_a? Instruction or
+ node.is_a? Label or
+ node.is_a? LocalLabel or
+ node.is_a? Skip
+ raise "Unexpected #{node.inspect} at #{node.codeOrigin}"
+ end
+ }
+
+ myList = armV7LowerBranchOps(myList)
+ myList = armV7LowerShiftOps(myList)
+ myList = armV7LowerMalformedAddresses(myList)
+ myList = armV7LowerMalformedAddressesDouble(myList)
+ myList = armV7LowerMisplacedImmediates(myList)
+ myList = armV7LowerMalformedImmediates(myList)
+ myList = armV7LowerMisplacedAddresses(myList)
+ myList = armV7LowerRegisterReuse(myList)
+ myList = assignRegistersToTemporaries(myList, :gpr, ARMv7_EXTRA_GPRS)
+ myList = assignRegistersToTemporaries(myList, :fpr, ARMv7_EXTRA_FPRS)
+ myList.each {
+ | node |
+ node.lower("ARMv7")
+ }
+ end
+end
+
+def armV7Operands(operands)
+ operands.map{|v| v.armV7Operand}.join(", ")
+end
+
+def armV7FlippedOperands(operands)
+ armV7Operands([operands[-1]] + operands[0..-2])
+end
+
+def emitArmV7Compact(opcode2, opcode3, operands)
+ if operands.size == 3
+ $asm.puts "#{opcode3} #{armV7FlippedOperands(operands)}"
+ else
+ raise unless operands.size == 2
+ raise unless operands[1].is_a? RegisterID
+ if operands[0].is_a? Immediate
+ $asm.puts "#{opcode3} #{operands[1].armV7Operand}, #{operands[1].armV7Operand}, #{operands[0].armV7Operand}"
+ else
+ $asm.puts "#{opcode2} #{armV7FlippedOperands(operands)}"
+ end
+ end
+end
+
+def emitArmV7(opcode, operands)
+ if operands.size == 3
+ $asm.puts "#{opcode} #{armV7FlippedOperands(operands)}"
+ else
+ raise unless operands.size == 2
+ $asm.puts "#{opcode} #{operands[1].armV7Operand}, #{operands[1].armV7Operand}, #{operands[0].armV7Operand}"
+ end
+end
+
+def emitArmV7DoubleBranch(branchOpcode, operands)
+ $asm.puts "vcmpe.f64 #{armV7Operands(operands[0..1])}"
+ $asm.puts "vmrs apsr_nzcv, fpscr"
+ $asm.puts "#{branchOpcode} #{operands[2].asmLabel}"
+end
+
+def emitArmV7Test(operands)
+ value = operands[0]
+ case operands.size
+ when 2
+ mask = Immediate.new(codeOrigin, -1)
+ when 3
+ mask = operands[1]
+ else
+ raise "Expected 2 or 3 operands but got #{operands.size} at #{codeOriginString}"
+ end
+
+ if mask.is_a? Immediate and mask.value == -1
+ $asm.puts "tst #{value.armV7Operand}, #{value.armV7Operand}"
+ elsif mask.is_a? Immediate
+ $asm.puts "tst.w #{value.armV7Operand}, #{mask.armV7Operand}"
+ else
+ $asm.puts "tst #{value.armV7Operand}, #{mask.armV7Operand}"
+ end
+end
+
+def emitArmV7Compare(operands, code)
+ $asm.puts "movs #{operands[2].armV7Operand}, \#0"
+ $asm.puts "cmp #{operands[0].armV7Operand}, #{operands[1].armV7Operand}"
+ $asm.puts "it #{code}"
+ $asm.puts "mov#{code} #{operands[2].armV7Operand}, \#1"
+end
+
+def emitArmV7TestSet(operands, code)
+ $asm.puts "movs #{operands[-1].armV7Operand}, \#0"
+ emitArmV7Test(operands)
+ $asm.puts "it #{code}"
+ $asm.puts "mov#{code} #{operands[-1].armV7Operand}, \#1"
+end
+
+class Instruction
+ def lowerARMv7
+ $asm.comment codeOriginString
+ case opcode
+ when "addi", "addp", "addis"
+ if opcode == "addis"
+ suffix = "s"
+ else
+ suffix = ""
+ end
+ if operands.size == 3 and operands[0].is_a? Immediate
+ raise unless operands[1].is_a? RegisterID
+ raise unless operands[2].is_a? RegisterID
+ if operands[0].value == 0 and suffix.empty?
+ unless operands[1] == operands[2]
+ $asm.puts "mov #{operands[2].armV7Operand}, #{operands[1].armV7Operand}"
+ end
+ else
+ $asm.puts "adds #{operands[2].armV7Operand}, #{operands[1].armV7Operand}, #{operands[0].armV7Operand}"
+ end
+ elsif operands.size == 3 and operands[0].is_a? RegisterID
+ raise unless operands[1].is_a? RegisterID
+ raise unless operands[2].is_a? RegisterID
+ $asm.puts "adds #{armV7FlippedOperands(operands)}"
+ else
+ if operands[0].is_a? Immediate
+ unless Immediate.new(nil, 0) == operands[0]
+ $asm.puts "adds #{armV7FlippedOperands(operands)}"
+ end
+ else
+ $asm.puts "add#{suffix} #{armV7FlippedOperands(operands)}"
+ end
+ end
+ when "andi", "andp"
+ emitArmV7Compact("ands", "and", operands)
+ when "ori", "orp"
+ emitArmV7Compact("orrs", "orr", operands)
+ when "oris"
+ emitArmV7Compact("orrs", "orrs", operands)
+ when "xori", "xorp"
+ emitArmV7Compact("eors", "eor", operands)
+ when "lshifti"
+ emitArmV7Compact("lsls", "lsls", operands)
+ when "rshifti"
+ emitArmV7Compact("asrs", "asrs", operands)
+ when "urshifti"
+ emitArmV7Compact("lsrs", "lsrs", operands)
+ when "muli"
+ if operands.size == 2 or operands[0] == operands[2] or operands[1] == operands[2]
+ emitArmV7("muls", operands)
+ else
+ $asm.puts "mov #{operands[2].armV7Operand}, #{operands[0].armV7Operand}"
+ $asm.puts "muls #{operands[2].armV7Operand}, #{operands[2].armV7Operand}, #{operands[1].armV7Operand}"
+ end
+ when "subi", "subp", "subis"
+ emitArmV7Compact("subs", "subs", operands)
+ when "negi"
+ $asm.puts "rsbs #{operands[0].armV7Operand}, #{operands[0].armV7Operand}, \#0"
+ when "noti"
+ $asm.puts "mvns #{operands[0].armV7Operand}, #{operands[0].armV7Operand}"
+ when "loadi", "loadp"
+ $asm.puts "ldr #{armV7FlippedOperands(operands)}"
+ when "storei", "storep"
+ $asm.puts "str #{armV7Operands(operands)}"
+ when "loadb"
+ $asm.puts "ldrb #{armV7FlippedOperands(operands)}"
+ when "loadbs"
+ $asm.puts "ldrsb.w #{armV7FlippedOperands(operands)}"
+ when "storeb"
+ $asm.puts "strb #{armV7Operands(operands)}"
+ when "loadh"
+ $asm.puts "ldrh #{armV7FlippedOperands(operands)}"
+ when "loadhs"
+ $asm.puts "ldrsh.w #{armV7FlippedOperands(operands)}"
+ when "storeh"
+ $asm.puts "strh #{armV7Operands(operands)}"
+ when "loadd"
+ $asm.puts "vldr.64 #{armV7FlippedOperands(operands)}"
+ when "stored"
+ $asm.puts "vstr.64 #{armV7Operands(operands)}"
+ when "addd"
+ emitArmV7("vadd.f64", operands)
+ when "divd"
+ emitArmV7("vdiv.f64", operands)
+ when "subd"
+ emitArmV7("vsub.f64", operands)
+ when "muld"
+ emitArmV7("vmul.f64", operands)
+ when "sqrtd"
+ $asm.puts "vsqrt.f64 #{armV7FlippedOperands(operands)}"
+ when "ci2d"
+ $asm.puts "vmov #{operands[1].armV7Single}, #{operands[0].armV7Operand}"
+ $asm.puts "vcvt.f64.s32 #{operands[1].armV7Operand}, #{operands[1].armV7Single}"
+ when "bdeq"
+ emitArmV7DoubleBranch("beq", operands)
+ when "bdneq"
+ $asm.puts "vcmpe.f64 #{armV7Operands(operands[0..1])}"
+ $asm.puts "vmrs apsr_nzcv, fpscr"
+ isUnordered = LocalLabel.unique("bdneq")
+ $asm.puts "bvs #{LabelReference.new(codeOrigin, isUnordered).asmLabel}"
+ $asm.puts "bne #{operands[2].asmLabel}"
+ isUnordered.lower("ARMv7")
+ when "bdgt"
+ emitArmV7DoubleBranch("bgt", operands)
+ when "bdgteq"
+ emitArmV7DoubleBranch("bge", operands)
+ when "bdlt"
+ emitArmV7DoubleBranch("bmi", operands)
+ when "bdlteq"
+ emitArmV7DoubleBranch("bls", operands)
+ when "bdequn"
+ $asm.puts "vcmpe.f64 #{armV7Operands(operands[0..1])}"
+ $asm.puts "vmrs apsr_nzcv, fpscr"
+ $asm.puts "bvs #{operands[2].asmLabel}"
+ $asm.puts "beq #{operands[2].asmLabel}"
+ when "bdnequn"
+ emitArmV7DoubleBranch("bne", operands)
+ when "bdgtun"
+ emitArmV7DoubleBranch("bhi", operands)
+ when "bdgtequn"
+ emitArmV7DoubleBranch("bpl", operands)
+ when "bdltun"
+ emitArmV7DoubleBranch("blt", operands)
+ when "bdltequn"
+ emitArmV7DoubleBranch("ble", operands)
+ when "btd2i"
+ # FIXME: may be a good idea to just get rid of this instruction, since the interpreter
+ # currently does not use it.
+ raise "ARMv7 does not support this opcode yet, #{codeOrigin}"
+ when "td2i"
+ $asm.puts "vcvt.s32.f64 #{ARMv7_SCRATCH_FPR.armV7Single}, #{operands[0].armV7Operand}"
+ $asm.puts "vmov #{operands[1].armV7Operand}, #{ARMv7_SCRATCH_FPR.armV7Single}"
+ when "bcd2i"
+ $asm.puts "vcvt.s32.f64 #{ARMv7_SCRATCH_FPR.armV7Single}, #{operands[0].armV7Operand}"
+ $asm.puts "vmov #{operands[1].armV7Operand}, #{ARMv7_SCRATCH_FPR.armV7Single}"
+ $asm.puts "vcvt.f64.s32 #{ARMv7_SCRATCH_FPR.armV7Operand}, #{ARMv7_SCRATCH_FPR.armV7Single}"
+ emitArmV7DoubleBranch("bne", [ARMv7_SCRATCH_FPR, operands[0], operands[2]])
+ $asm.puts "tst #{operands[1].armV7Operand}, #{operands[1].armV7Operand}"
+ $asm.puts "beq #{operands[2].asmLabel}"
+ when "movdz"
+ # FIXME: either support this or remove it.
+ raise "ARMv7 does not support this opcode yet, #{codeOrigin}"
+ when "pop"
+ $asm.puts "pop #{operands[0].armV7Operand}"
+ when "push"
+ $asm.puts "push #{operands[0].armV7Operand}"
+ when "move", "sxi2p", "zxi2p"
+ if operands[0].is_a? Immediate
+ armV7MoveImmediate(operands[0].value, operands[1])
+ else
+ $asm.puts "mov #{armV7FlippedOperands(operands)}"
+ end
+ when "nop"
+ $asm.puts "nop"
+ when "bieq", "bpeq", "bbeq"
+ if Immediate.new(nil, 0) == operands[0]
+ $asm.puts "tst #{operands[1].armV7Operand}, #{operands[1].armV7Operand}"
+ elsif Immediate.new(nil, 0) == operands[1]
+ $asm.puts "tst #{operands[0].armV7Operand}, #{operands[0].armV7Operand}"
+ else
+ $asm.puts "cmp #{armV7Operands(operands[0..1])}"
+ end
+ $asm.puts "beq #{operands[2].asmLabel}"
+ when "bineq", "bpneq", "bbneq"
+ if Immediate.new(nil, 0) == operands[0]
+ $asm.puts "tst #{operands[1].armV7Operand}, #{operands[1].armV7Operand}"
+ elsif Immediate.new(nil, 0) == operands[1]
+ $asm.puts "tst #{operands[0].armV7Operand}, #{operands[0].armV7Operand}"
+ else
+ $asm.puts "cmp #{armV7Operands(operands[0..1])}"
+ end
+ $asm.puts "bne #{operands[2].asmLabel}"
+ when "bia", "bpa", "bba"
+ $asm.puts "cmp #{armV7Operands(operands[0..1])}"
+ $asm.puts "bhi #{operands[2].asmLabel}"
+ when "biaeq", "bpaeq", "bbaeq"
+ $asm.puts "cmp #{armV7Operands(operands[0..1])}"
+ $asm.puts "bhs #{operands[2].asmLabel}"
+ when "bib", "bpb", "bbb"
+ $asm.puts "cmp #{armV7Operands(operands[0..1])}"
+ $asm.puts "blo #{operands[2].asmLabel}"
+ when "bibeq", "bpbeq", "bbbeq"
+ $asm.puts "cmp #{armV7Operands(operands[0..1])}"
+ $asm.puts "bls #{operands[2].asmLabel}"
+ when "bigt", "bpgt", "bbgt"
+ $asm.puts "cmp #{armV7Operands(operands[0..1])}"
+ $asm.puts "bgt #{operands[2].asmLabel}"
+ when "bigteq", "bpgteq", "bbgteq"
+ $asm.puts "cmp #{armV7Operands(operands[0..1])}"
+ $asm.puts "bge #{operands[2].asmLabel}"
+ when "bilt", "bplt", "bblt"
+ $asm.puts "cmp #{armV7Operands(operands[0..1])}"
+ $asm.puts "blt #{operands[2].asmLabel}"
+ when "bilteq", "bplteq", "bblteq"
+ $asm.puts "cmp #{armV7Operands(operands[0..1])}"
+ $asm.puts "ble #{operands[2].asmLabel}"
+ when "btiz", "btpz", "btbz"
+ emitArmV7Test(operands)
+ $asm.puts "beq #{operands[-1].asmLabel}"
+ when "btinz", "btpnz", "btbnz"
+ emitArmV7Test(operands)
+ $asm.puts "bne #{operands[-1].asmLabel}"
+ when "btio", "btpo", "btbo"
+ emitArmV7Test(operands)
+ $asm.puts "bvs #{operands[-1].asmLabel}"
+ when "btis", "btps", "btbs"
+ emitArmV7Test(operands)
+ $asm.puts "bmi #{operands[-1].asmLabel}"
+ when "jmp"
+ if operands[0].label?
+ $asm.puts "b #{operands[0].asmLabel}"
+ else
+ $asm.puts "mov pc, #{operands[0].armV7Operand}"
+ end
+ when "call"
+ if operands[0].label?
+ $asm.puts "blx #{operands[0].asmLabel}"
+ else
+ $asm.puts "blx #{operands[0].armV7Operand}"
+ end
+ when "break"
+ $asm.puts "bkpt"
+ when "ret"
+ $asm.puts "bx lr"
+ when "cieq", "cpeq"
+ emitArmV7Compare(operands, "eq")
+ when "cineq", "cpneq"
+ emitArmV7Compare(operands, "ne")
+ when "cia", "cpa"
+ emitArmV7Compare(operands, "hi")
+ when "ciaeq", "cpaeq"
+ emitArmV7Compare(operands, "hs")
+ when "cib", "cpb"
+ emitArmV7Compare(operands, "lo")
+ when "cibeq", "cpbeq"
+ emitArmV7Compare(operands, "ls")
+ when "cigt", "cpgt"
+ emitArmV7Compare(operands, "gt")
+ when "cigteq", "cpgteq"
+ emitArmV7Compare(operands, "ge")
+ when "cilt", "cplt"
+ emitArmV7Compare(operands, "lt")
+ when "cilteq", "cplteq"
+ emitArmV7Compare(operands, "le")
+ when "tio", "tbo"
+ emitArmV7TestSet(operands, "vs")
+ when "tis", "tbs"
+ emitArmV7TestSet(operands, "mi")
+ when "tiz", "tbz"
+ emitArmV7TestSet(operands, "eq")
+ when "tinz", "tbnz"
+ emitArmV7TestSet(operands, "ne")
+ when "peek"
+ $asm.puts "ldr #{operands[1].armV7Operand}, [sp, \##{operands[0].value * 4}]"
+ when "poke"
+ $asm.puts "str #{operands[1].armV7Operand}, [sp, \##{operands[0].value * 4}]"
+ when "fii2d"
+ $asm.puts "vmov #{operands[2].armV7Operand}, #{operands[0].armV7Operand}, #{operands[1].armV7Operand}"
+ when "fd2ii"
+ $asm.puts "vmov #{operands[1].armV7Operand}, #{operands[2].armV7Operand}, #{operands[0].armV7Operand}"
+ when "bo"
+ $asm.puts "bvs #{operands[0].asmLabel}"
+ when "bs"
+ $asm.puts "bmi #{operands[0].asmLabel}"
+ when "bz"
+ $asm.puts "beq #{operands[0].asmLabel}"
+ when "bnz"
+ $asm.puts "bne #{operands[0].asmLabel}"
+ when "leai", "leap"
+ operands[0].armV7EmitLea(operands[1])
+ when "smulli"
+ raise "Wrong number of arguments to smull in #{self.inspect} at #{codeOriginString}" unless operands.length == 4
+ $asm.puts "smull #{operands[2].armV7Operand}, #{operands[3].armV7Operand}, #{operands[0].armV7Operand}, #{operands[1].armV7Operand}"
+ else
+ raise "Unhandled opcode #{opcode} at #{codeOriginString}"
+ end
+ end
+end
+
diff --git a/Source/JavaScriptCore/offlineasm/asm.rb b/Source/JavaScriptCore/offlineasm/asm.rb
new file mode 100644
index 000000000..a93a8c5dd
--- /dev/null
+++ b/Source/JavaScriptCore/offlineasm/asm.rb
@@ -0,0 +1,176 @@
+#!/usr/bin/env ruby
+
+# Copyright (C) 2011 Apple Inc. All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions
+# are met:
+# 1. Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# 2. Redistributions in binary form must reproduce the above copyright
+# notice, this list of conditions and the following disclaimer in the
+# documentation and/or other materials provided with the distribution.
+#
+# THIS SOFTWARE IS PROVIDED BY APPLE INC. AND ITS CONTRIBUTORS ``AS IS''
+# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
+# THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR ITS CONTRIBUTORS
+# BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
+# THE POSSIBILITY OF SUCH DAMAGE.
+
+$: << File.dirname(__FILE__)
+
+require "backends"
+require "digest/sha1"
+require "offsets"
+require "parser"
+require "self_hash"
+require "settings"
+require "transform"
+
+class Assembler
+ def initialize(outp)
+ @outp = outp
+ @state = :cpp
+ @commentState = :none
+ @comment = nil
+ end
+
+ def enterAsm
+ @outp.puts "asm ("
+ @state = :asm
+ end
+
+ def leaveAsm
+ putsLastComment
+ @outp.puts ");"
+ @state = :cpp
+ end
+
+ def inAsm
+ enterAsm
+ yield
+ leaveAsm
+ end
+
+ def lastComment
+ if @comment
+ result = "// #{@comment}"
+ else
+ result = ""
+ end
+ @commentState = :none
+ @comment = nil
+ result
+ end
+
+ def putsLastComment
+ comment = lastComment
+ unless comment.empty?
+ @outp.puts comment
+ end
+ end
+
+ def puts(*line)
+ raise unless @state == :asm
+ @outp.puts("\"\\t" + line.join('') + "\\n\" #{lastComment}")
+ end
+
+ def print(line)
+ raise unless @state == :asm
+ @outp.print("\"" + line + "\"")
+ end
+
+ def putsLabel(labelName)
+ raise unless @state == :asm
+ @outp.puts("OFFLINE_ASM_GLOBAL_LABEL(#{labelName}) #{lastComment}")
+ end
+
+ def putsLocalLabel(labelName)
+ raise unless @state == :asm
+ @outp.puts("LOCAL_LABEL_STRING(#{labelName}) \":\\n\" #{lastComment}")
+ end
+
+ def self.labelReference(labelName)
+ "\" SYMBOL_STRING(#{labelName}) \""
+ end
+
+ def self.localLabelReference(labelName)
+ "\" LOCAL_LABEL_STRING(#{labelName}) \""
+ end
+
+ def comment(text)
+ case @commentState
+ when :none
+ @comment = text
+ @commentState = :one
+ when :one
+ @outp.puts "// #{@comment}"
+ @outp.puts "// #{text}"
+ @comment = nil
+ @commentState = :many
+ when :many
+ @outp.puts "// #{text}"
+ else
+ raise
+ end
+ end
+end
+
+asmFile = ARGV.shift
+offsetsFile = ARGV.shift
+outputFlnm = ARGV.shift
+
+$stderr.puts "offlineasm: Parsing #{asmFile} and #{offsetsFile} and creating assembly file #{outputFlnm}."
+
+configurationList = offsetsAndConfigurationIndex(offsetsFile)
+inputData = IO::read(asmFile)
+
+inputHash =
+ "// offlineasm input hash: " + Digest::SHA1.hexdigest(inputData) +
+ " " + Digest::SHA1.hexdigest(configurationList.map{|v| (v[0] + [v[1]]).join(' ')}.join(' ')) +
+ " " + selfHash
+
+if FileTest.exist? outputFlnm
+ File.open(outputFlnm, "r") {
+ | inp |
+ firstLine = inp.gets
+ if firstLine and firstLine.chomp == inputHash
+ $stderr.puts "offlineasm: Nothing changed."
+ exit 0
+ end
+ }
+end
+
+File.open(outputFlnm, "w") {
+ | outp |
+ $output = outp
+ $output.puts inputHash
+
+ $asm = Assembler.new($output)
+
+ ast = parse(lex(inputData))
+
+ configurationList.each {
+ | configuration |
+ offsetsList = configuration[0]
+ configIndex = configuration[1]
+ forSettings(computeSettingsCombinations(ast)[configIndex], ast) {
+ | concreteSettings, lowLevelAST, backend |
+ lowLevelAST = lowLevelAST.resolve(*buildOffsetsMap(lowLevelAST, offsetsList))
+ emitCodeInConfiguration(concreteSettings, lowLevelAST, backend) {
+ $asm.inAsm {
+ lowLevelAST.lower(backend)
+ }
+ }
+ }
+ }
+}
+
+$stderr.puts "offlineasm: Assembly file #{outputFlnm} successfully generated."
+
diff --git a/Source/JavaScriptCore/offlineasm/ast.rb b/Source/JavaScriptCore/offlineasm/ast.rb
new file mode 100644
index 000000000..f67b0fc60
--- /dev/null
+++ b/Source/JavaScriptCore/offlineasm/ast.rb
@@ -0,0 +1,1039 @@
+# Copyright (C) 2011 Apple Inc. All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions
+# are met:
+# 1. Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# 2. Redistributions in binary form must reproduce the above copyright
+# notice, this list of conditions and the following disclaimer in the
+# documentation and/or other materials provided with the distribution.
+#
+# THIS SOFTWARE IS PROVIDED BY APPLE INC. AND ITS CONTRIBUTORS ``AS IS''
+# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
+# THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR ITS CONTRIBUTORS
+# BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
+# THE POSSIBILITY OF SUCH DAMAGE.
+
+#
+# Base utility types for the AST.
+#
+
+# Valid methods for Node:
+#
+# node.children -> Returns an array of immediate children.
+#
+# node.descendents -> Returns an array of all strict descendants (children
+# and children of children, transitively).
+#
+# node.flatten -> Returns an array containing the strict descendants and
+# the node itself.
+#
+# node.filter(type) -> Returns an array containing those elements in
+# node.flatten that are of the given type (is_a? type returns true).
+#
+# node.mapChildren{|v| ...} -> Returns a new node with all children
+# replaced according to the given block.
+#
+# Examples:
+#
+# node.filter(Setting).uniq -> Returns all of the settings that the AST's
+# IfThenElse blocks depend on.
+#
+# node.filter(StructOffset).uniq -> Returns all of the structure offsets
+# that the AST depends on.
+
+class Node
+ attr_reader :codeOrigin
+
+ def initialize(codeOrigin)
+ @codeOrigin = codeOrigin
+ end
+
+ def codeOriginString
+ "line number #{@codeOrigin}"
+ end
+
+ def descendants
+ children.collect{|v| v.flatten}.flatten
+ end
+
+ def flatten
+ [self] + descendants
+ end
+
+ def filter(type)
+ flatten.select{|v| v.is_a? type}
+ end
+end
+
+class NoChildren < Node
+ def initialize(codeOrigin)
+ super(codeOrigin)
+ end
+
+ def children
+ []
+ end
+
+ def mapChildren
+ self
+ end
+end
+
+class StructOffsetKey
+ attr_reader :struct, :field
+
+ def initialize(struct, field)
+ @struct = struct
+ @field = field
+ end
+
+ def hash
+ @struct.hash + @field.hash * 3
+ end
+
+ def eql?(other)
+ @struct == other.struct and @field == other.field
+ end
+end
+
+#
+# AST nodes.
+#
+
+class StructOffset < NoChildren
+ attr_reader :struct, :field
+
+ def initialize(codeOrigin, struct, field)
+ super(codeOrigin)
+ @struct = struct
+ @field = field
+ end
+
+ @@mapping = {}
+
+ def self.forField(codeOrigin, struct, field)
+ key = StructOffsetKey.new(struct, field)
+
+ unless @@mapping[key]
+ @@mapping[key] = StructOffset.new(codeOrigin, struct, field)
+ end
+ @@mapping[key]
+ end
+
+ def dump
+ "#{struct}::#{field}"
+ end
+
+ def <=>(other)
+ if @struct != other.struct
+ return @struct <=> other.struct
+ end
+ @field <=> other.field
+ end
+
+ def address?
+ false
+ end
+
+ def label?
+ false
+ end
+
+ def immediate?
+ true
+ end
+
+ def register?
+ false
+ end
+end
+
+class Sizeof < NoChildren
+ attr_reader :struct
+
+ def initialize(codeOrigin, struct)
+ super(codeOrigin)
+ @struct = struct
+ end
+
+ @@mapping = {}
+
+ def self.forName(codeOrigin, struct)
+ unless @@mapping[struct]
+ @@mapping[struct] = Sizeof.new(codeOrigin, struct)
+ end
+ @@mapping[struct]
+ end
+
+ def dump
+ "sizeof #{@struct}"
+ end
+
+ def <=>(other)
+ @struct <=> other.struct
+ end
+
+ def address?
+ false
+ end
+
+ def label?
+ false
+ end
+
+ def immediate?
+ true
+ end
+
+ def register?
+ false
+ end
+end
+
+class Immediate < NoChildren
+ attr_reader :value
+
+ def initialize(codeOrigin, value)
+ super(codeOrigin)
+ @value = value
+ raise "Bad immediate value #{value.inspect} at #{codeOriginString}" unless value.is_a? Integer
+ end
+
+ def dump
+ "#{value}"
+ end
+
+ def ==(other)
+ other.is_a? Immediate and other.value == @value
+ end
+
+ def address?
+ false
+ end
+
+ def label?
+ false
+ end
+
+ def immediate?
+ true
+ end
+
+ def register?
+ false
+ end
+end
+
+class AddImmediates < Node
+ attr_reader :left, :right
+
+ def initialize(codeOrigin, left, right)
+ super(codeOrigin)
+ @left = left
+ @right = right
+ end
+
+ def children
+ [@left, @right]
+ end
+
+ def mapChildren
+ AddImmediates.new(codeOrigin, (yield @left), (yield @right))
+ end
+
+ def dump
+ "(#{left.dump} + #{right.dump})"
+ end
+
+ def address?
+ false
+ end
+
+ def label?
+ false
+ end
+
+ def immediate?
+ true
+ end
+
+ def register?
+ false
+ end
+end
+
+class SubImmediates < Node
+ attr_reader :left, :right
+
+ def initialize(codeOrigin, left, right)
+ super(codeOrigin)
+ @left = left
+ @right = right
+ end
+
+ def children
+ [@left, @right]
+ end
+
+ def mapChildren
+ SubImmediates.new(codeOrigin, (yield @left), (yield @right))
+ end
+
+ def dump
+ "(#{left.dump} - #{right.dump})"
+ end
+
+ def address?
+ false
+ end
+
+ def label?
+ false
+ end
+
+ def immediate?
+ true
+ end
+
+ def register?
+ false
+ end
+end
+
+class MulImmediates < Node
+ attr_reader :left, :right
+
+ def initialize(codeOrigin, left, right)
+ super(codeOrigin)
+ @left = left
+ @right = right
+ end
+
+ def children
+ [@left, @right]
+ end
+
+ def mapChildren
+ MulImmediates.new(codeOrigin, (yield @left), (yield @right))
+ end
+
+ def dump
+ "(#{left.dump} * #{right.dump})"
+ end
+
+ def address?
+ false
+ end
+
+ def label?
+ false
+ end
+
+ def immediate?
+ true
+ end
+
+ def register?
+ false
+ end
+end
+
+class NegImmediate < Node
+ attr_reader :child
+
+ def initialize(codeOrigin, child)
+ super(codeOrigin)
+ @child = child
+ end
+
+ def children
+ [@child]
+ end
+
+ def mapChildren
+ NegImmediate.new(codeOrigin, (yield @child))
+ end
+
+ def dump
+ "(-#{@child.dump})"
+ end
+
+ def address?
+ false
+ end
+
+ def label?
+ false
+ end
+
+ def immediate?
+ true
+ end
+
+ def register?
+ false
+ end
+end
+
+class RegisterID < NoChildren
+ attr_reader :name
+
+ def initialize(codeOrigin, name)
+ super(codeOrigin)
+ @name = name
+ end
+
+ @@mapping = {}
+
+ def self.forName(codeOrigin, name)
+ unless @@mapping[name]
+ @@mapping[name] = RegisterID.new(codeOrigin, name)
+ end
+ @@mapping[name]
+ end
+
+ def dump
+ name
+ end
+
+ def address?
+ false
+ end
+
+ def label?
+ false
+ end
+
+ def immediate?
+ false
+ end
+
+ def register?
+ true
+ end
+end
+
+class FPRegisterID < NoChildren
+ attr_reader :name
+
+ def initialize(codeOrigin, name)
+ super(codeOrigin)
+ @name = name
+ end
+
+ @@mapping = {}
+
+ def self.forName(codeOrigin, name)
+ unless @@mapping[name]
+ @@mapping[name] = FPRegisterID.new(codeOrigin, name)
+ end
+ @@mapping[name]
+ end
+
+ def dump
+ name
+ end
+
+ def address?
+ false
+ end
+
+ def label?
+ false
+ end
+
+ def immediate?
+ false
+ end
+
+ def register?
+ true
+ end
+end
+
+class Variable < NoChildren
+ attr_reader :name
+
+ def initialize(codeOrigin, name)
+ super(codeOrigin)
+ @name = name
+ end
+
+ @@mapping = {}
+
+ def self.forName(codeOrigin, name)
+ unless @@mapping[name]
+ @@mapping[name] = Variable.new(codeOrigin, name)
+ end
+ @@mapping[name]
+ end
+
+ def dump
+ name
+ end
+end
+
+class Address < Node
+ attr_reader :base, :offset
+
+ def initialize(codeOrigin, base, offset)
+ super(codeOrigin)
+ @base = base
+ @offset = offset
+ raise "Bad base for address #{base.inspect} at #{codeOriginString}" unless base.is_a? Variable or base.register?
+ raise "Bad offset for address #{offset.inspect} at #{codeOriginString}" unless offset.is_a? Variable or offset.immediate?
+ end
+
+ def children
+ [@base, @offset]
+ end
+
+ def mapChildren
+ Address.new(codeOrigin, (yield @base), (yield @offset))
+ end
+
+ def dump
+ "#{offset.dump}[#{base.dump}]"
+ end
+
+ def address?
+ true
+ end
+
+ def label?
+ false
+ end
+
+ def immediate?
+ false
+ end
+
+ def register?
+ false
+ end
+end
+
+class BaseIndex < Node
+ attr_reader :base, :index, :scale, :offset
+
+ def initialize(codeOrigin, base, index, scale, offset)
+ super(codeOrigin)
+ @base = base
+ @index = index
+ @scale = scale
+ raise unless [1, 2, 4, 8].member? @scale
+ @offset = offset
+ end
+
+ def scaleShift
+ case scale
+ when 1
+ 0
+ when 2
+ 1
+ when 4
+ 2
+ when 8
+ 3
+ else
+ raise "Bad scale at #{codeOriginString}"
+ end
+ end
+
+ def children
+ [@base, @index, @offset]
+ end
+
+ def mapChildren
+ BaseIndex.new(codeOrigin, (yield @base), (yield @index), @scale, (yield @offset))
+ end
+
+ def dump
+ "#{offset.dump}[#{base.dump}, #{index.dump}, #{scale}]"
+ end
+
+ def address?
+ true
+ end
+
+ def label?
+ false
+ end
+
+ def immediate?
+ false
+ end
+
+ def register?
+ false
+ end
+end
+
+class AbsoluteAddress < NoChildren
+ attr_reader :address
+
+ def initialize(codeOrigin, address)
+ super(codeOrigin)
+ @address = address
+ end
+
+ def dump
+ "#{address.dump}[]"
+ end
+
+ def address?
+ true
+ end
+
+ def label?
+ false
+ end
+
+ def immediate?
+ false
+ end
+
+ def register?
+ false
+ end
+end
+
+class Instruction < Node
+ attr_reader :opcode, :operands
+
+ def initialize(codeOrigin, opcode, operands)
+ super(codeOrigin)
+ @opcode = opcode
+ @operands = operands
+ end
+
+ def children
+ operands
+ end
+
+ def mapChildren(&proc)
+ Instruction.new(codeOrigin, @opcode, @operands.map(&proc))
+ end
+
+ def dump
+ "\t" + opcode.to_s + " " + operands.collect{|v| v.dump}.join(", ")
+ end
+end
+
+class Error < NoChildren
+ def initialize(codeOrigin)
+ super(codeOrigin)
+ end
+
+ def dump
+ "\terror"
+ end
+end
+
+class ConstDecl < Node
+ attr_reader :variable, :value
+
+ def initialize(codeOrigin, variable, value)
+ super(codeOrigin)
+ @variable = variable
+ @value = value
+ end
+
+ def children
+ [@variable, @value]
+ end
+
+ def mapChildren
+ ConstDecl.new(codeOrigin, (yield @variable), (yield @value))
+ end
+
+ def dump
+ "const #{@variable.dump} = #{@value.dump}"
+ end
+end
+
+$labelMapping = {}
+
+class Label < NoChildren
+ attr_reader :name
+
+ def initialize(codeOrigin, name)
+ super(codeOrigin)
+ @name = name
+ end
+
+ def self.forName(codeOrigin, name)
+ if $labelMapping[name]
+ raise "Label name collision: #{name}" unless $labelMapping[name].is_a? Label
+ else
+ $labelMapping[name] = Label.new(codeOrigin, name)
+ end
+ $labelMapping[name]
+ end
+
+ def dump
+ "#{name}:"
+ end
+end
+
+class LocalLabel < NoChildren
+ attr_reader :name
+
+ def initialize(codeOrigin, name)
+ super(codeOrigin)
+ @name = name
+ end
+
+ @@uniqueNameCounter = 0
+
+ def self.forName(codeOrigin, name)
+ if $labelMapping[name]
+ raise "Label name collision: #{name}" unless $labelMapping[name].is_a? LocalLabel
+ else
+ $labelMapping[name] = LocalLabel.new(codeOrigin, name)
+ end
+ $labelMapping[name]
+ end
+
+ def self.unique(comment)
+ newName = "_#{comment}"
+ if $labelMapping[newName]
+ while $labelMapping[newName = "_#{@@uniqueNameCounter}_#{comment}"]
+ @@uniqueNameCounter += 1
+ end
+ end
+ forName(nil, newName)
+ end
+
+ def cleanName
+ if name =~ /^\./
+ "_" + name[1..-1]
+ else
+ name
+ end
+ end
+
+ def dump
+ "#{name}:"
+ end
+end
+
+class LabelReference < Node
+ attr_reader :label
+
+ def initialize(codeOrigin, label)
+ super(codeOrigin)
+ @label = label
+ end
+
+ def children
+ [@label]
+ end
+
+ def mapChildren
+ LabelReference.new(codeOrigin, (yield @label))
+ end
+
+ def name
+ label.name
+ end
+
+ def dump
+ label.name
+ end
+
+ def address?
+ false
+ end
+
+ def label?
+ true
+ end
+end
+
+class LocalLabelReference < NoChildren
+ attr_reader :label
+
+ def initialize(codeOrigin, label)
+ super(codeOrigin)
+ @label = label
+ end
+
+ def children
+ [@label]
+ end
+
+ def mapChildren
+ LocalLabelReference.new(codeOrigin, (yield @label))
+ end
+
+ def name
+ label.name
+ end
+
+ def dump
+ label.name
+ end
+
+ def address?
+ false
+ end
+
+ def label?
+ true
+ end
+end
+
+class Sequence < Node
+ attr_reader :list
+
+ def initialize(codeOrigin, list)
+ super(codeOrigin)
+ @list = list
+ end
+
+ def children
+ list
+ end
+
+ def mapChildren(&proc)
+ Sequence.new(codeOrigin, @list.map(&proc))
+ end
+
+ def dump
+ list.collect{|v| v.dump}.join("\n")
+ end
+end
+
+class True < NoChildren
+ def initialize
+ super(nil)
+ end
+
+ @@instance = True.new
+
+ def self.instance
+ @@instance
+ end
+
+ def value
+ true
+ end
+
+ def dump
+ "true"
+ end
+end
+
+class False < NoChildren
+ def initialize
+ super(nil)
+ end
+
+ @@instance = False.new
+
+ def self.instance
+ @@instance
+ end
+
+ def value
+ false
+ end
+
+ def dump
+ "false"
+ end
+end
+
+class TrueClass
+ def asNode
+ True.instance
+ end
+end
+
+class FalseClass
+ def asNode
+ False.instance
+ end
+end
+
+class Setting < NoChildren
+ attr_reader :name
+
+ def initialize(codeOrigin, name)
+ super(codeOrigin)
+ @name = name
+ end
+
+ @@mapping = {}
+
+ def self.forName(codeOrigin, name)
+ unless @@mapping[name]
+ @@mapping[name] = Setting.new(codeOrigin, name)
+ end
+ @@mapping[name]
+ end
+
+ def dump
+ name
+ end
+end
+
+class And < Node
+ attr_reader :left, :right
+
+ def initialize(codeOrigin, left, right)
+ super(codeOrigin)
+ @left = left
+ @right = right
+ end
+
+ def children
+ [@left, @right]
+ end
+
+ def mapChildren
+ And.new(codeOrigin, (yield @left), (yield @right))
+ end
+
+ def dump
+ "(#{left.dump} and #{right.dump})"
+ end
+end
+
+class Or < Node
+ attr_reader :left, :right
+
+ def initialize(codeOrigin, left, right)
+ super(codeOrigin)
+ @left = left
+ @right = right
+ end
+
+ def children
+ [@left, @right]
+ end
+
+ def mapChildren
+ Or.new(codeOrigin, (yield @left), (yield @right))
+ end
+
+ def dump
+ "(#{left.dump} or #{right.dump})"
+ end
+end
+
+class Not < Node
+ attr_reader :child
+
+ def initialize(codeOrigin, child)
+ super(codeOrigin)
+ @child = child
+ end
+
+ def children
+ [@left, @right]
+ end
+
+ def mapChildren
+ Not.new(codeOrigin, (yield @child))
+ end
+
+ def dump
+ "(not #{child.dump})"
+ end
+end
+
+class Skip < NoChildren
+ def initialize(codeOrigin)
+ super(codeOrigin)
+ end
+
+ def dump
+ "\tskip"
+ end
+end
+
+class IfThenElse < Node
+ attr_reader :predicate, :thenCase
+ attr_accessor :elseCase
+
+ def initialize(codeOrigin, predicate, thenCase)
+ super(codeOrigin)
+ @predicate = predicate
+ @thenCase = thenCase
+ @elseCase = Skip.new(codeOrigin)
+ end
+
+ def children
+ if @elseCase
+ [@predicate, @thenCase, @elseCase]
+ else
+ [@predicate, @thenCase]
+ end
+ end
+
+ def mapChildren
+ IfThenElse.new(codeOrigin, (yield @predicate), (yield @thenCase), (yield @elseCase))
+ end
+
+ def dump
+ "if #{predicate.dump}\n" + thenCase.dump + "\nelse\n" + elseCase.dump + "\nend"
+ end
+end
+
+class Macro < Node
+ attr_reader :name, :variables, :body
+
+ def initialize(codeOrigin, name, variables, body)
+ super(codeOrigin)
+ @name = name
+ @variables = variables
+ @body = body
+ end
+
+ def children
+ @variables + [@body]
+ end
+
+ def mapChildren
+ Macro.new(codeOrigin, @name, @variables.map{|v| yield v}, (yield @body))
+ end
+
+ def dump
+ "macro #{name}(" + variables.collect{|v| v.dump}.join(", ") + ")\n" + body.dump + "\nend"
+ end
+end
+
+class MacroCall < Node
+ attr_reader :name, :operands
+
+ def initialize(codeOrigin, name, operands)
+ super(codeOrigin)
+ @name = name
+ @operands = operands
+ raise unless @operands
+ @operands.each{|v| raise unless v}
+ end
+
+ def children
+ @operands
+ end
+
+ def mapChildren(&proc)
+ MacroCall.new(codeOrigin, @name, @operands.map(&proc))
+ end
+
+ def dump
+ "\t#{name}(" + operands.collect{|v| v.dump}.join(", ") + ")"
+ end
+end
+
diff --git a/Source/JavaScriptCore/offlineasm/backends.rb b/Source/JavaScriptCore/offlineasm/backends.rb
new file mode 100644
index 000000000..2c65b517d
--- /dev/null
+++ b/Source/JavaScriptCore/offlineasm/backends.rb
@@ -0,0 +1,96 @@
+# Copyright (C) 2011 Apple Inc. All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions
+# are met:
+# 1. Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# 2. Redistributions in binary form must reproduce the above copyright
+# notice, this list of conditions and the following disclaimer in the
+# documentation and/or other materials provided with the distribution.
+#
+# THIS SOFTWARE IS PROVIDED BY APPLE INC. AND ITS CONTRIBUTORS ``AS IS''
+# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
+# THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR ITS CONTRIBUTORS
+# BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
+# THE POSSIBILITY OF SUCH DAMAGE.
+
+require "armv7"
+require "ast"
+require "x86"
+
+BACKENDS =
+ [
+ "X86",
+ "ARMv7"
+ ]
+
+# Keep the set of working backends separate from the set of backends that might be
+# supported. This is great because the BACKENDS list is almost like a reserved
+# words list, in that it causes settings resolution to treat those words specially.
+# Hence this lets us set aside the name of a backend we might want to support in
+# the future while not actually supporting the backend yet.
+WORKING_BACKENDS =
+ [
+ "X86",
+ "ARMv7"
+ ]
+
+BACKEND_PATTERN = Regexp.new('\\A(' + BACKENDS.join(')|(') + ')\\Z')
+
+class Node
+ def lower(name)
+ send("lower" + name)
+ end
+end
+
+# Overrides for lower() for those nodes that are backend-agnostic
+
+class Label
+ def lower(name)
+ $asm.putsLabel(self.name[1..-1])
+ end
+end
+
+class LocalLabel
+ def lower(name)
+ $asm.putsLocalLabel "_offlineasm_#{self.name[1..-1]}"
+ end
+end
+
+class LabelReference
+ def asmLabel
+ Assembler.labelReference(name[1..-1])
+ end
+end
+
+class LocalLabelReference
+ def asmLabel
+ Assembler.localLabelReference("_offlineasm_"+name[1..-1])
+ end
+end
+
+class Skip
+ def lower(name)
+ end
+end
+
+class Sequence
+ def lower(name)
+ if respond_to? "lower#{name}"
+ send("lower#{name}")
+ else
+ @list.each {
+ | node |
+ node.lower(name)
+ }
+ end
+ end
+end
+
diff --git a/Source/JavaScriptCore/offlineasm/generate_offset_extractor.rb b/Source/JavaScriptCore/offlineasm/generate_offset_extractor.rb
new file mode 100644
index 000000000..8bdf4505d
--- /dev/null
+++ b/Source/JavaScriptCore/offlineasm/generate_offset_extractor.rb
@@ -0,0 +1,146 @@
+#!/usr/bin/env ruby
+
+# Copyright (C) 2011 Apple Inc. All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions
+# are met:
+# 1. Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# 2. Redistributions in binary form must reproduce the above copyright
+# notice, this list of conditions and the following disclaimer in the
+# documentation and/or other materials provided with the distribution.
+#
+# THIS SOFTWARE IS PROVIDED BY APPLE INC. AND ITS CONTRIBUTORS ``AS IS''
+# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
+# THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR ITS CONTRIBUTORS
+# BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
+# THE POSSIBILITY OF SUCH DAMAGE.
+
+$: << File.dirname(__FILE__)
+
+require "backends"
+require "digest/sha1"
+require "offsets"
+require "parser"
+require "self_hash"
+require "settings"
+require "transform"
+
+inputFlnm = ARGV.shift
+outputFlnm = ARGV.shift
+
+$stderr.puts "offlineasm: Parsing #{inputFlnm} and creating offset extractor #{outputFlnm}."
+
+def emitMagicNumber
+ OFFSET_MAGIC_NUMBERS.each {
+ | number |
+ $output.puts "#{number},"
+ }
+end
+
+inputData = IO::read(inputFlnm)
+inputHash = "// offlineasm input hash: #{Digest::SHA1.hexdigest(inputData)} #{selfHash}"
+
+if FileTest.exist? outputFlnm
+ File.open(outputFlnm, "r") {
+ | inp |
+ firstLine = inp.gets
+ if firstLine and firstLine.chomp == inputHash
+ $stderr.puts "offlineasm: Nothing changed."
+ exit 0
+ end
+ }
+end
+
+originalAST = parse(lex(inputData))
+
+#
+# Optimize the AST to make configuration extraction faster. This reduces the AST to a form
+# that only contains the things that matter for our purposes: offsets, sizes, and if
+# statements.
+#
+
+class Node
+ def offsetsPruneTo(sequence)
+ children.each {
+ | child |
+ child.offsetsPruneTo(sequence)
+ }
+ end
+
+ def offsetsPrune
+ result = Sequence.new(codeOrigin, [])
+ offsetsPruneTo(result)
+ result
+ end
+end
+
+class IfThenElse
+ def offsetsPruneTo(sequence)
+ ifThenElse = IfThenElse.new(codeOrigin, predicate, thenCase.offsetsPrune)
+ ifThenElse.elseCase = elseCase.offsetsPrune
+ sequence.list << ifThenElse
+ end
+end
+
+class StructOffset
+ def offsetsPruneTo(sequence)
+ sequence.list << self
+ end
+end
+
+class Sizeof
+ def offsetsPruneTo(sequence)
+ sequence.list << self
+ end
+end
+
+prunedAST = originalAST.offsetsPrune
+
+File.open(outputFlnm, "w") {
+ | outp |
+ $output = outp
+ outp.puts inputHash
+ length = 0
+ emitCodeInAllConfigurations(prunedAST) {
+ | settings, ast, backend, index |
+ offsetsList = ast.filter(StructOffset).uniq.sort
+ sizesList = ast.filter(Sizeof).uniq.sort
+ length += OFFSET_HEADER_MAGIC_NUMBERS.size + (OFFSET_MAGIC_NUMBERS.size + 1) * (1 + offsetsList.size + sizesList.size)
+ }
+ outp.puts "static const unsigned extractorTable[#{length}] = {"
+ emitCodeInAllConfigurations(prunedAST) {
+ | settings, ast, backend, index |
+ OFFSET_HEADER_MAGIC_NUMBERS.each {
+ | number |
+ $output.puts "#{number},"
+ }
+
+ offsetsList = ast.filter(StructOffset).uniq.sort
+ sizesList = ast.filter(Sizeof).uniq.sort
+
+ emitMagicNumber
+ outp.puts "#{index},"
+ offsetsList.each {
+ | offset |
+ emitMagicNumber
+ outp.puts "OFFLINE_ASM_OFFSETOF(#{offset.struct}, #{offset.field}),"
+ }
+ sizesList.each {
+ | offset |
+ emitMagicNumber
+ outp.puts "sizeof(#{offset.struct}),"
+ }
+ }
+ outp.puts "};"
+}
+
+$stderr.puts "offlineasm: offset extractor #{outputFlnm} successfully generated."
+
diff --git a/Source/JavaScriptCore/offlineasm/instructions.rb b/Source/JavaScriptCore/offlineasm/instructions.rb
new file mode 100644
index 000000000..497b47371
--- /dev/null
+++ b/Source/JavaScriptCore/offlineasm/instructions.rb
@@ -0,0 +1,217 @@
+# Copyright (C) 2011 Apple Inc. All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions
+# are met:
+# 1. Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# 2. Redistributions in binary form must reproduce the above copyright
+# notice, this list of conditions and the following disclaimer in the
+# documentation and/or other materials provided with the distribution.
+#
+# THIS SOFTWARE IS PROVIDED BY APPLE INC. AND ITS CONTRIBUTORS ``AS IS''
+# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
+# THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR ITS CONTRIBUTORS
+# BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
+# THE POSSIBILITY OF SUCH DAMAGE.
+
+# Interesting invariant, which we take advantage of: branching instructions
+# always begin with "b", and no non-branching instructions begin with "b".
+# Terminal instructions are "jmp" and "ret".
+
+MACRO_INSTRUCTIONS =
+ [
+ "addi",
+ "andi",
+ "lshifti",
+ "muli",
+ "negi",
+ "noti",
+ "ori",
+ "rshifti",
+ "urshifti",
+ "subi",
+ "xori",
+ "loadi",
+ "loadb",
+ "loadbs",
+ "loadh",
+ "loadhs",
+ "storei",
+ "storeb",
+ "loadd",
+ "moved",
+ "stored",
+ "addd",
+ "divd",
+ "subd",
+ "muld",
+ "sqrtd",
+ "ci2d",
+ "fii2d", # usage: fii2d <gpr with least significant bits>, <gpr with most significant bits>, <fpr>
+ "fd2ii", # usage: fd2ii <fpr>, <gpr with least significant bits>, <gpr with most significant bits>
+ "bdeq",
+ "bdneq",
+ "bdgt",
+ "bdgteq",
+ "bdlt",
+ "bdlteq",
+ "bdequn",
+ "bdnequn",
+ "bdgtun",
+ "bdgtequn",
+ "bdltun",
+ "bdltequn",
+ "btd2i",
+ "td2i",
+ "bcd2i",
+ "movdz",
+ "pop",
+ "push",
+ "move",
+ "sxi2p",
+ "zxi2p",
+ "nop",
+ "bieq",
+ "bineq",
+ "bia",
+ "biaeq",
+ "bib",
+ "bibeq",
+ "bigt",
+ "bigteq",
+ "bilt",
+ "bilteq",
+ "bbeq",
+ "bbneq",
+ "bba",
+ "bbaeq",
+ "bbb",
+ "bbbeq",
+ "bbgt",
+ "bbgteq",
+ "bblt",
+ "bblteq",
+ "btio",
+ "btis",
+ "btiz",
+ "btinz",
+ "btbo",
+ "btbs",
+ "btbz",
+ "btbnz",
+ "jmp",
+ "baddio",
+ "baddis",
+ "baddiz",
+ "baddinz",
+ "bsubio",
+ "bsubis",
+ "bsubiz",
+ "bsubinz",
+ "bmulio",
+ "bmulis",
+ "bmuliz",
+ "bmulinz",
+ "borio",
+ "boris",
+ "boriz",
+ "borinz",
+ "break",
+ "call",
+ "ret",
+ "cieq",
+ "cineq",
+ "cia",
+ "ciaeq",
+ "cib",
+ "cibeq",
+ "cigt",
+ "cigteq",
+ "cilt",
+ "cilteq",
+ "tio",
+ "tis",
+ "tiz",
+ "tinz",
+ "tbo",
+ "tbs",
+ "tbz",
+ "tbnz",
+ "peek",
+ "poke",
+ "bpeq",
+ "bpneq",
+ "bpa",
+ "bpaeq",
+ "bpb",
+ "bpbeq",
+ "bpgt",
+ "bpgteq",
+ "bplt",
+ "bplteq",
+ "addp",
+ "andp",
+ "orp",
+ "subp",
+ "xorp",
+ "loadp",
+ "cpeq",
+ "cpneq",
+ "cpa",
+ "cpaeq",
+ "cpb",
+ "cpbeq",
+ "cpgt",
+ "cpgteq",
+ "cplt",
+ "cplteq",
+ "storep",
+ "btpo",
+ "btps",
+ "btpz",
+ "btpnz",
+ "baddpo",
+ "baddps",
+ "baddpz",
+ "baddpnz",
+ "bo",
+ "bs",
+ "bz",
+ "bnz",
+ "leai",
+ "leap",
+ ]
+
+X86_INSTRUCTIONS =
+ [
+ "cdqi",
+ "idivi"
+ ]
+
+ARMv7_INSTRUCTIONS =
+ [
+ "smulli",
+ "addis",
+ "subis",
+ "oris"
+ ]
+
+INSTRUCTIONS = MACRO_INSTRUCTIONS + X86_INSTRUCTIONS + ARMv7_INSTRUCTIONS
+
+INSTRUCTION_PATTERN = Regexp.new('\\A((' + INSTRUCTIONS.join(')|(') + '))\\Z')
+
+def isBranch(instruction)
+ instruction =~ /^b/
+end
+
+def hasFallThrough(instruction)
+ instruction != "ret" and instruction != "jmp"
+end
+
diff --git a/Source/JavaScriptCore/offlineasm/offsets.rb b/Source/JavaScriptCore/offlineasm/offsets.rb
new file mode 100644
index 000000000..21e1706d2
--- /dev/null
+++ b/Source/JavaScriptCore/offlineasm/offsets.rb
@@ -0,0 +1,173 @@
+# Copyright (C) 2011 Apple Inc. All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions
+# are met:
+# 1. Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# 2. Redistributions in binary form must reproduce the above copyright
+# notice, this list of conditions and the following disclaimer in the
+# documentation and/or other materials provided with the distribution.
+#
+# THIS SOFTWARE IS PROVIDED BY APPLE INC. AND ITS CONTRIBUTORS ``AS IS''
+# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
+# THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR ITS CONTRIBUTORS
+# BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
+# THE POSSIBILITY OF SUCH DAMAGE.
+
+require "ast"
+
+OFFSET_HEADER_MAGIC_NUMBERS = [ 0x9e43fd66, 0x4379bfba ]
+OFFSET_MAGIC_NUMBERS = [ 0xec577ac7, 0x0ff5e755 ]
+
+#
+# offsetsList(ast)
+# sizesList(ast)
+#
+# Returns a list of offsets and sizes used by the AST.
+#
+
+def offsetsList(ast)
+ ast.filter(StructOffset).uniq.sort
+end
+
+def sizesList(ast)
+ ast.filter(Sizeof).uniq.sort
+end
+
+#
+# offsetsAndConfigurationIndex(ast, file) ->
+# [[offsets, index], ...]
+#
+# Parses the offsets from a file and returns a list of offsets and the
+# index of the configuration that is valid in this build target.
+#
+
+def offsetsAndConfigurationIndex(file)
+ endiannessMarkerBytes = nil
+ result = []
+
+ def readInt(endianness, bytes)
+ if endianness == :little
+ # Little endian
+ (bytes[0] << 0 |
+ bytes[1] << 8 |
+ bytes[2] << 16 |
+ bytes[3] << 24)
+ else
+ # Big endian
+ (bytes[0] << 24 |
+ bytes[1] << 16 |
+ bytes[2] << 8 |
+ bytes[3] << 0)
+ end
+ end
+
+ def prepareMagic(endianness, numbers)
+ magicBytes = []
+ numbers.each {
+ | number |
+ currentBytes = []
+ 4.times {
+ currentBytes << (number & 0xff)
+ number >>= 8
+ }
+ if endianness == :big
+ currentBytes.reverse!
+ end
+ magicBytes += currentBytes
+ }
+ magicBytes
+ end
+
+ fileBytes = []
+
+ File.open(file, "r") {
+ | inp |
+ loop {
+ byte = inp.getbyte
+ break unless byte
+ fileBytes << byte
+ }
+ }
+
+ def sliceByteArrays(byteArray, pattern)
+ result = []
+ lastSlicePoint = 0
+ (byteArray.length - pattern.length + 1).times {
+ | index |
+ foundOne = true
+ pattern.length.times {
+ | subIndex |
+ if byteArray[index + subIndex] != pattern[subIndex]
+ foundOne = false
+ break
+ end
+ }
+ if foundOne
+ result << byteArray[lastSlicePoint...index]
+ lastSlicePoint = index + pattern.length
+ end
+ }
+
+ result << byteArray[lastSlicePoint...(byteArray.length)]
+
+ result
+ end
+
+ [:little, :big].each {
+ | endianness |
+ headerMagicBytes = prepareMagic(endianness, OFFSET_HEADER_MAGIC_NUMBERS)
+ magicBytes = prepareMagic(endianness, OFFSET_MAGIC_NUMBERS)
+
+ bigArray = sliceByteArrays(fileBytes, headerMagicBytes)
+ unless bigArray.size <= 1
+ bigArray[1..-1].each {
+ | configArray |
+ array = sliceByteArrays(configArray, magicBytes)
+ index = readInt(endianness, array[1])
+ offsets = []
+ array[2..-1].each {
+ | data |
+ offsets << readInt(endianness, data)
+ }
+ result << [offsets, index]
+ }
+ end
+ }
+
+ raise unless result.length >= 1
+ raise if result.map{|v| v[1]}.uniq.size < result.map{|v| v[1]}.size
+
+ result
+end
+
+#
+# buildOffsetsMap(ast, offsetsList) -> [offsets, sizes]
+#
+# Builds a mapping between StructOffset nodes and their values.
+#
+
+def buildOffsetsMap(ast, offsetsList)
+ offsetsMap = {}
+ sizesMap = {}
+ astOffsetsList = offsetsList(ast)
+ astSizesList = sizesList(ast)
+ raise unless astOffsetsList.size + astSizesList.size == offsetsList.size
+ offsetsList(ast).each_with_index {
+ | structOffset, index |
+ offsetsMap[structOffset] = offsetsList.shift
+ }
+ sizesList(ast).each_with_index {
+ | sizeof, index |
+ sizesMap[sizeof] = offsetsList.shift
+ }
+ [offsetsMap, sizesMap]
+end
+
diff --git a/Source/JavaScriptCore/offlineasm/opt.rb b/Source/JavaScriptCore/offlineasm/opt.rb
new file mode 100644
index 000000000..3170d3ae1
--- /dev/null
+++ b/Source/JavaScriptCore/offlineasm/opt.rb
@@ -0,0 +1,134 @@
+# Copyright (C) 2011 Apple Inc. All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions
+# are met:
+# 1. Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# 2. Redistributions in binary form must reproduce the above copyright
+# notice, this list of conditions and the following disclaimer in the
+# documentation and/or other materials provided with the distribution.
+#
+# THIS SOFTWARE IS PROVIDED BY APPLE INC. AND ITS CONTRIBUTORS ``AS IS''
+# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
+# THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR ITS CONTRIBUTORS
+# BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
+# THE POSSIBILITY OF SUCH DAMAGE.
+
+require "ast"
+
+#
+# "Optimization" passes. These are used to lower the representation for
+# backends that cannot handle some of our higher-level instructions.
+#
+
+#
+# A temporary - a variable that will be allocated to a register after we're
+# done.
+#
+
+class Node
+ def replaceTemporariesWithRegisters(kind)
+ mapChildren {
+ | node |
+ node.replaceTemporariesWithRegisters(kind)
+ }
+ end
+end
+
+class Tmp < NoChildren
+ attr_reader :firstMention, :lastMention
+ attr_reader :kind
+ attr_accessor :register
+
+ def initialize(codeOrigin, kind)
+ super(codeOrigin)
+ @kind = kind
+ end
+
+ def dump
+ "$tmp#{object_id}"
+ end
+
+ def mention!(position)
+ if not @firstMention or position < @firstMention
+ @firstMention = position
+ end
+ if not @lastMention or position > @lastMention
+ @lastMention = position
+ end
+ end
+
+ def replaceTemporariesWithRegisters(kind)
+ if @kind == kind
+ raise "Did not allocate register to temporary at #{codeOriginString}" unless @register
+ @register
+ else
+ self
+ end
+ end
+
+ def address?
+ false
+ end
+
+ def label?
+ false
+ end
+
+ def immediate?
+ false
+ end
+
+ def register?
+ true
+ end
+end
+
+# Assign registers to temporaries, by finding which temporaries interfere
+# with each other. Note that this relies on temporary live ranges not crossing
+# basic block boundaries.
+
+def assignRegistersToTemporaries(list, kind, registers)
+ list.each_with_index {
+ | node, index |
+ node.filter(Tmp).uniq.each {
+ | tmp |
+ if tmp.kind == kind
+ tmp.mention! index
+ end
+ }
+ }
+
+ freeRegisters = registers.dup
+ list.each_with_index {
+ | node, index |
+ tmpList = node.filter(Tmp).uniq
+ tmpList.each {
+ | tmp |
+ if tmp.kind == kind and tmp.firstMention == index
+ raise "Could not allocate register to temporary at #{node.codeOriginString}" if freeRegisters.empty?
+ tmp.register = freeRegisters.pop
+ end
+ }
+ tmpList.each {
+ | tmp |
+ if tmp.kind == kind and tmp.lastMention == index
+ freeRegisters.push tmp.register
+ raise "Register allocation inconsistency at #{node.codeOriginString}" if freeRegisters.size > registers.size
+ end
+ }
+ }
+
+ list.map {
+ | node |
+ node.replaceTemporariesWithRegisters(kind)
+ }
+end
+
diff --git a/Source/JavaScriptCore/offlineasm/parser.rb b/Source/JavaScriptCore/offlineasm/parser.rb
new file mode 100644
index 000000000..f0e4b0045
--- /dev/null
+++ b/Source/JavaScriptCore/offlineasm/parser.rb
@@ -0,0 +1,586 @@
+# Copyright (C) 2011 Apple Inc. All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions
+# are met:
+# 1. Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# 2. Redistributions in binary form must reproduce the above copyright
+# notice, this list of conditions and the following disclaimer in the
+# documentation and/or other materials provided with the distribution.
+#
+# THIS SOFTWARE IS PROVIDED BY APPLE INC. AND ITS CONTRIBUTORS ``AS IS''
+# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
+# THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR ITS CONTRIBUTORS
+# BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
+# THE POSSIBILITY OF SUCH DAMAGE.
+
+require "ast"
+require "instructions"
+require "registers"
+
+class Token
+ attr_reader :codeOrigin, :string
+
+ def initialize(codeOrigin, string)
+ @codeOrigin = codeOrigin
+ @string = string
+ end
+
+ def ==(other)
+ if other.is_a? Token
+ @string == other.string
+ else
+ @string == other
+ end
+ end
+
+ def =~(other)
+ @string =~ other
+ end
+
+ def to_s
+ "#{@string.inspect} at line #{codeOrigin}"
+ end
+
+ def parseError(*comment)
+ if comment.empty?
+ raise "Parse error: #{to_s}"
+ else
+ raise "Parse error: #{to_s}: #{comment[0]}"
+ end
+ end
+end
+
+#
+# The lexer. Takes a string and returns an array of tokens.
+#
+
+def lex(str)
+ result = []
+ lineNumber = 1
+ while not str.empty?
+ case str
+ when /\A\#([^\n]*)/
+ # comment, ignore
+ when /\A\n/
+ result << Token.new(lineNumber, $&)
+ lineNumber += 1
+ when /\A[a-zA-Z]([a-zA-Z0-9_]*)/
+ result << Token.new(lineNumber, $&)
+ when /\A\.([a-zA-Z0-9_]*)/
+ result << Token.new(lineNumber, $&)
+ when /\A_([a-zA-Z0-9_]*)/
+ result << Token.new(lineNumber, $&)
+ when /\A([ \t]+)/
+ # whitespace, ignore
+ when /\A0x([0-9a-fA-F]+)/
+ result << Token.new(lineNumber, $&.hex.to_s)
+ when /\A0([0-7]+)/
+ result << Token.new(lineNumber, $&.oct.to_s)
+ when /\A([0-9]+)/
+ result << Token.new(lineNumber, $&)
+ when /\A::/
+ result << Token.new(lineNumber, $&)
+ when /\A[:,\(\)\[\]=\+\-*]/
+ result << Token.new(lineNumber, $&)
+ else
+ raise "Lexer error at line number #{lineNumber}, unexpected sequence #{str[0..20].inspect}"
+ end
+ str = $~.post_match
+ end
+ result
+end
+
+#
+# Token identification.
+#
+
+def isRegister(token)
+ token =~ REGISTER_PATTERN
+end
+
+def isInstruction(token)
+ token =~ INSTRUCTION_PATTERN
+end
+
+def isKeyword(token)
+ token =~ /\A((true)|(false)|(if)|(then)|(else)|(elsif)|(end)|(and)|(or)|(not)|(macro)|(const)|(sizeof)|(error))\Z/ or
+ token =~ REGISTER_PATTERN or
+ token =~ INSTRUCTION_PATTERN
+end
+
+def isIdentifier(token)
+ token =~ /\A[a-zA-Z]([a-zA-Z0-9_]*)\Z/ and not isKeyword(token)
+end
+
+def isLabel(token)
+ token =~ /\A_([a-zA-Z0-9_]*)\Z/
+end
+
+def isLocalLabel(token)
+ token =~ /\A\.([a-zA-Z0-9_]*)\Z/
+end
+
+def isVariable(token)
+ isIdentifier(token) or isRegister(token)
+end
+
+def isInteger(token)
+ token =~ /\A[0-9]/
+end
+
+#
+# The parser. Takes an array of tokens and returns an AST. Methods
+# other than parse(tokens) are not for public consumption.
+#
+
+class Parser
+ def initialize(tokens)
+ @tokens = tokens
+ @idx = 0
+ end
+
+ def parseError(*comment)
+ if @tokens[@idx]
+ @tokens[@idx].parseError(*comment)
+ else
+ if comment.empty?
+ raise "Parse error at end of file"
+ else
+ raise "Parse error at end of file: #{comment[0]}"
+ end
+ end
+ end
+
+ def consume(regexp)
+ if regexp
+ parseError unless @tokens[@idx] =~ regexp
+ else
+ parseError unless @idx == @tokens.length
+ end
+ @idx += 1
+ end
+
+ def skipNewLine
+ while @tokens[@idx] == "\n"
+ @idx += 1
+ end
+ end
+
+ def parsePredicateAtom
+ if @tokens[@idx] == "not"
+ @idx += 1
+ parsePredicateAtom
+ elsif @tokens[@idx] == "("
+ @idx += 1
+ skipNewLine
+ result = parsePredicate
+ parseError unless @tokens[@idx] == ")"
+ @idx += 1
+ result
+ elsif @tokens[@idx] == "true"
+ result = True.instance
+ @idx += 1
+ result
+ elsif @tokens[@idx] == "false"
+ result = False.instance
+ @idx += 1
+ result
+ elsif isIdentifier @tokens[@idx]
+ result = Setting.forName(@tokens[@idx].codeOrigin, @tokens[@idx].string)
+ @idx += 1
+ result
+ else
+ parseError
+ end
+ end
+
+ def parsePredicateAnd
+ result = parsePredicateAtom
+ while @tokens[@idx] == "and"
+ codeOrigin = @tokens[@idx].codeOrigin
+ @idx += 1
+ skipNewLine
+ right = parsePredicateAtom
+ result = And.new(codeOrigin, result, right)
+ end
+ result
+ end
+
+ def parsePredicate
+ # some examples of precedence:
+ # not a and b -> (not a) and b
+ # a and b or c -> (a and b) or c
+ # a or b and c -> a or (b and c)
+
+ result = parsePredicateAnd
+ while @tokens[@idx] == "or"
+ codeOrigin = @tokens[@idx].codeOrigin
+ @idx += 1
+ skipNewLine
+ right = parsePredicateAnd
+ result = Or.new(codeOrigin, result, right)
+ end
+ result
+ end
+
+ def parseVariable
+ if isRegister(@tokens[@idx])
+ if @tokens[@idx] =~ FPR_PATTERN
+ result = FPRegisterID.forName(@tokens[@idx].codeOrigin, @tokens[@idx].string)
+ else
+ result = RegisterID.forName(@tokens[@idx].codeOrigin, @tokens[@idx].string)
+ end
+ elsif isIdentifier(@tokens[@idx])
+ result = Variable.forName(@tokens[@idx].codeOrigin, @tokens[@idx].string)
+ else
+ parseError
+ end
+ @idx += 1
+ result
+ end
+
+ def parseAddress(offset)
+ parseError unless @tokens[@idx] == "["
+ codeOrigin = @tokens[@idx].codeOrigin
+
+ # Three possibilities:
+ # [] -> AbsoluteAddress
+ # [a] -> Address
+ # [a,b] -> BaseIndex with scale = 1
+ # [a,b,c] -> BaseIndex
+
+ @idx += 1
+ if @tokens[@idx] == "]"
+ @idx += 1
+ return AbsoluteAddress.new(codeOrigin, offset)
+ end
+ a = parseVariable
+ if @tokens[@idx] == "]"
+ result = Address.new(codeOrigin, a, offset)
+ else
+ parseError unless @tokens[@idx] == ","
+ @idx += 1
+ b = parseVariable
+ if @tokens[@idx] == "]"
+ result = BaseIndex.new(codeOrigin, a, b, 1, offset)
+ else
+ parseError unless @tokens[@idx] == ","
+ @idx += 1
+ parseError unless ["1", "2", "4", "8"].member? @tokens[@idx].string
+ c = @tokens[@idx].string.to_i
+ @idx += 1
+ parseError unless @tokens[@idx] == "]"
+ result = BaseIndex.new(codeOrigin, a, b, c, offset)
+ end
+ end
+ @idx += 1
+ result
+ end
+
+ def parseColonColon
+ skipNewLine
+ codeOrigin = @tokens[@idx].codeOrigin
+ parseError unless isIdentifier @tokens[@idx]
+ names = [@tokens[@idx].string]
+ @idx += 1
+ while @tokens[@idx] == "::"
+ @idx += 1
+ parseError unless isIdentifier @tokens[@idx]
+ names << @tokens[@idx].string
+ @idx += 1
+ end
+ raise if names.empty?
+ [codeOrigin, names]
+ end
+
+ def parseExpressionAtom
+ skipNewLine
+ if @tokens[@idx] == "-"
+ @idx += 1
+ NegImmediate.new(@tokens[@idx - 1].codeOrigin, parseExpressionAtom)
+ elsif @tokens[@idx] == "("
+ @idx += 1
+ result = parseExpression
+ parseError unless @tokens[@idx] == ")"
+ @idx += 1
+ result
+ elsif isInteger @tokens[@idx]
+ result = Immediate.new(@tokens[@idx].codeOrigin, @tokens[@idx].string.to_i)
+ @idx += 1
+ result
+ elsif isIdentifier @tokens[@idx]
+ codeOrigin, names = parseColonColon
+ if names.size > 1
+ StructOffset.forField(codeOrigin, names[0..-2].join('::'), names[-1])
+ else
+ Variable.forName(codeOrigin, names[0])
+ end
+ elsif isRegister @tokens[@idx]
+ parseVariable
+ elsif @tokens[@idx] == "sizeof"
+ @idx += 1
+ codeOrigin, names = parseColonColon
+ Sizeof.forName(codeOrigin, names.join('::'))
+ else
+ parseError
+ end
+ end
+
+ def parseExpressionMul
+ skipNewLine
+ result = parseExpressionAtom
+ while @tokens[@idx] == "*"
+ if @tokens[@idx] == "*"
+ @idx += 1
+ result = MulImmediates.new(@tokens[@idx - 1].codeOrigin, result, parseExpressionAtom)
+ else
+ raise
+ end
+ end
+ result
+ end
+
+ def couldBeExpression
+ @tokens[@idx] == "-" or @tokens[@idx] == "sizeof" or isInteger(@tokens[@idx]) or isVariable(@tokens[@idx]) or @tokens[@idx] == "("
+ end
+
+ def parseExpression
+ skipNewLine
+ result = parseExpressionMul
+ while @tokens[@idx] == "+" or @tokens[@idx] == "-"
+ if @tokens[@idx] == "+"
+ @idx += 1
+ result = AddImmediates.new(@tokens[@idx - 1].codeOrigin, result, parseExpressionMul)
+ elsif @tokens[@idx] == "-"
+ @idx += 1
+ result = SubImmediates.new(@tokens[@idx - 1].codeOrigin, result, parseExpressionMul)
+ else
+ raise
+ end
+ end
+ result
+ end
+
+ def parseOperand(comment)
+ skipNewLine
+ if couldBeExpression
+ expr = parseExpression
+ if @tokens[@idx] == "["
+ parseAddress(expr)
+ else
+ expr
+ end
+ elsif @tokens[@idx] == "["
+ parseAddress(Immediate.new(@tokens[@idx].codeOrigin, 0))
+ elsif isLabel @tokens[@idx]
+ result = LabelReference.new(@tokens[@idx].codeOrigin, Label.forName(@tokens[@idx].codeOrigin, @tokens[@idx].string))
+ @idx += 1
+ result
+ elsif isLocalLabel @tokens[@idx]
+ result = LocalLabelReference.new(@tokens[@idx].codeOrigin, LocalLabel.forName(@tokens[@idx].codeOrigin, @tokens[@idx].string))
+ @idx += 1
+ result
+ else
+ parseError(comment)
+ end
+ end
+
+ def parseMacroVariables
+ skipNewLine
+ consume(/\A\(\Z/)
+ variables = []
+ loop {
+ skipNewLine
+ if @tokens[@idx] == ")"
+ @idx += 1
+ break
+ elsif isIdentifier(@tokens[@idx])
+ variables << Variable.forName(@tokens[@idx].codeOrigin, @tokens[@idx].string)
+ @idx += 1
+ skipNewLine
+ if @tokens[@idx] == ")"
+ @idx += 1
+ break
+ elsif @tokens[@idx] == ","
+ @idx += 1
+ else
+ parseError
+ end
+ else
+ parseError
+ end
+ }
+ variables
+ end
+
+ def parseSequence(final, comment)
+ firstCodeOrigin = @tokens[@idx].codeOrigin
+ list = []
+ loop {
+ if (@idx == @tokens.length and not final) or (final and @tokens[@idx] =~ final)
+ break
+ elsif @tokens[@idx] == "\n"
+ # ignore
+ @idx += 1
+ elsif @tokens[@idx] == "const"
+ @idx += 1
+ parseError unless isVariable @tokens[@idx]
+ variable = Variable.forName(@tokens[@idx].codeOrigin, @tokens[@idx].string)
+ @idx += 1
+ parseError unless @tokens[@idx] == "="
+ @idx += 1
+ value = parseOperand("while inside of const #{variable.name}")
+ list << ConstDecl.new(@tokens[@idx].codeOrigin, variable, value)
+ elsif @tokens[@idx] == "error"
+ list << Error.new(@tokens[@idx].codeOrigin)
+ @idx += 1
+ elsif @tokens[@idx] == "if"
+ codeOrigin = @tokens[@idx].codeOrigin
+ @idx += 1
+ skipNewLine
+ predicate = parsePredicate
+ consume(/\A((then)|(\n))\Z/)
+ skipNewLine
+ ifThenElse = IfThenElse.new(codeOrigin, predicate, parseSequence(/\A((else)|(end)|(elsif))\Z/, "while inside of \"if #{predicate.dump}\""))
+ list << ifThenElse
+ while @tokens[@idx] == "elsif"
+ codeOrigin = @tokens[@idx].codeOrigin
+ @idx += 1
+ skipNewLine
+ predicate = parsePredicate
+ consume(/\A((then)|(\n))\Z/)
+ skipNewLine
+ elseCase = IfThenElse.new(codeOrigin, predicate, parseSequence(/\A((else)|(end)|(elsif))\Z/, "while inside of \"if #{predicate.dump}\""))
+ ifThenElse.elseCase = elseCase
+ ifThenElse = elseCase
+ end
+ if @tokens[@idx] == "else"
+ @idx += 1
+ ifThenElse.elseCase = parseSequence(/\Aend\Z/, "while inside of else case for \"if #{predicate.dump}\"")
+ @idx += 1
+ else
+ parseError unless @tokens[@idx] == "end"
+ @idx += 1
+ end
+ elsif @tokens[@idx] == "macro"
+ codeOrigin = @tokens[@idx].codeOrigin
+ @idx += 1
+ skipNewLine
+ parseError unless isIdentifier(@tokens[@idx])
+ name = @tokens[@idx].string
+ @idx += 1
+ variables = parseMacroVariables
+ body = parseSequence(/\Aend\Z/, "while inside of macro #{name}")
+ @idx += 1
+ list << Macro.new(codeOrigin, name, variables, body)
+ elsif isInstruction @tokens[@idx]
+ codeOrigin = @tokens[@idx].codeOrigin
+ name = @tokens[@idx].string
+ @idx += 1
+ if (not final and @idx == @tokens.size) or (final and @tokens[@idx] =~ final)
+ # Zero operand instruction, and it's the last one.
+ list << Instruction.new(codeOrigin, name, [])
+ break
+ elsif @tokens[@idx] == "\n"
+ # Zero operand instruction.
+ list << Instruction.new(codeOrigin, name, [])
+ @idx += 1
+ else
+ # It's definitely an instruction, and it has at least one operand.
+ operands = []
+ endOfSequence = false
+ loop {
+ operands << parseOperand("while inside of instruction #{name}")
+ if (not final and @idx == @tokens.size) or (final and @tokens[@idx] =~ final)
+ # The end of the instruction and of the sequence.
+ endOfSequence = true
+ break
+ elsif @tokens[@idx] == ","
+ # Has another operand.
+ @idx += 1
+ elsif @tokens[@idx] == "\n"
+ # The end of the instruction.
+ @idx += 1
+ break
+ else
+ parseError("Expected a comma, newline, or #{final} after #{operands.last.dump}")
+ end
+ }
+ list << Instruction.new(codeOrigin, name, operands)
+ if endOfSequence
+ break
+ end
+ end
+ elsif isIdentifier @tokens[@idx]
+ codeOrigin = @tokens[@idx].codeOrigin
+ name = @tokens[@idx].string
+ @idx += 1
+ if @tokens[@idx] == "("
+ # Macro invocation.
+ @idx += 1
+ operands = []
+ skipNewLine
+ if @tokens[@idx] == ")"
+ @idx += 1
+ else
+ loop {
+ skipNewLine
+ if @tokens[@idx] == "macro"
+ # It's a macro lambda!
+ codeOriginInner = @tokens[@idx].codeOrigin
+ @idx += 1
+ variables = parseMacroVariables
+ body = parseSequence(/\Aend\Z/, "while inside of anonymous macro passed as argument to #{name}")
+ @idx += 1
+ operands << Macro.new(codeOriginInner, nil, variables, body)
+ else
+ operands << parseOperand("while inside of macro call to #{name}")
+ end
+ skipNewLine
+ if @tokens[@idx] == ")"
+ @idx += 1
+ break
+ elsif @tokens[@idx] == ","
+ @idx += 1
+ else
+ parseError "Unexpected #{@tokens[@idx].string.inspect} while parsing invocation of macro #{name}"
+ end
+ }
+ end
+ list << MacroCall.new(codeOrigin, name, operands)
+ else
+ parseError "Expected \"(\" after #{name}"
+ end
+ elsif isLabel @tokens[@idx] or isLocalLabel @tokens[@idx]
+ codeOrigin = @tokens[@idx].codeOrigin
+ name = @tokens[@idx].string
+ @idx += 1
+ parseError unless @tokens[@idx] == ":"
+ # It's a label.
+ if isLabel name
+ list << Label.forName(codeOrigin, name)
+ else
+ list << LocalLabel.forName(codeOrigin, name)
+ end
+ @idx += 1
+ else
+ parseError "Expecting terminal #{final} #{comment}"
+ end
+ }
+ Sequence.new(firstCodeOrigin, list)
+ end
+end
+
+def parse(tokens)
+ parser = Parser.new(tokens)
+ parser.parseSequence(nil, "")
+end
+
diff --git a/Source/JavaScriptCore/offlineasm/registers.rb b/Source/JavaScriptCore/offlineasm/registers.rb
new file mode 100644
index 000000000..75fae4192
--- /dev/null
+++ b/Source/JavaScriptCore/offlineasm/registers.rb
@@ -0,0 +1,60 @@
+# Copyright (C) 2011 Apple Inc. All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions
+# are met:
+# 1. Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# 2. Redistributions in binary form must reproduce the above copyright
+# notice, this list of conditions and the following disclaimer in the
+# documentation and/or other materials provided with the distribution.
+#
+# THIS SOFTWARE IS PROVIDED BY APPLE INC. AND ITS CONTRIBUTORS ``AS IS''
+# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
+# THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR ITS CONTRIBUTORS
+# BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
+# THE POSSIBILITY OF SUCH DAMAGE.
+
+GPRS =
+ [
+ "t0",
+ "t1",
+ "t2",
+ "t3",
+ "t4",
+ "cfr",
+ "a0",
+ "a1",
+ "r0",
+ "r1",
+ "sp",
+ "lr"
+ ]
+
+FPRS =
+ [
+ "ft0",
+ "ft1",
+ "ft2",
+ "ft3",
+ "ft4",
+ "ft5",
+ "fa0",
+ "fa1",
+ "fa2",
+ "fa3",
+ "fr"
+ ]
+
+REGISTERS = GPRS + FPRS
+
+GPR_PATTERN = Regexp.new('\\A((' + GPRS.join(')|(') + '))\\Z')
+FPR_PATTERN = Regexp.new('\\A((' + FPRS.join(')|(') + '))\\Z')
+
+REGISTER_PATTERN = Regexp.new('\\A((' + REGISTERS.join(')|(') + '))\\Z')
diff --git a/Source/JavaScriptCore/offlineasm/self_hash.rb b/Source/JavaScriptCore/offlineasm/self_hash.rb
new file mode 100644
index 000000000..a7b51e112
--- /dev/null
+++ b/Source/JavaScriptCore/offlineasm/self_hash.rb
@@ -0,0 +1,46 @@
+# Copyright (C) 2011 Apple Inc. All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions
+# are met:
+# 1. Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# 2. Redistributions in binary form must reproduce the above copyright
+# notice, this list of conditions and the following disclaimer in the
+# documentation and/or other materials provided with the distribution.
+#
+# THIS SOFTWARE IS PROVIDED BY APPLE INC. AND ITS CONTRIBUTORS ``AS IS''
+# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
+# THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR ITS CONTRIBUTORS
+# BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
+# THE POSSIBILITY OF SUCH DAMAGE.
+
+require "digest/sha1"
+require "pathname"
+
+#
+# selfHash -> SHA1 hexdigest
+#
+# Returns a hash of the offlineasm source code. This allows dependency
+# tracking for not just changes in input, but also changes in the assembler
+# itself.
+#
+
+def selfHash
+ contents = ""
+ myPath = Pathname.new(__FILE__).dirname
+ Dir.foreach(myPath) {
+ | entry |
+ if entry =~ /\.rb$/
+ contents += IO::read(myPath + entry)
+ end
+ }
+ return Digest::SHA1.hexdigest(contents)
+end
+
diff --git a/Source/JavaScriptCore/offlineasm/settings.rb b/Source/JavaScriptCore/offlineasm/settings.rb
new file mode 100644
index 000000000..34598181c
--- /dev/null
+++ b/Source/JavaScriptCore/offlineasm/settings.rb
@@ -0,0 +1,205 @@
+# Copyright (C) 2011 Apple Inc. All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions
+# are met:
+# 1. Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# 2. Redistributions in binary form must reproduce the above copyright
+# notice, this list of conditions and the following disclaimer in the
+# documentation and/or other materials provided with the distribution.
+#
+# THIS SOFTWARE IS PROVIDED BY APPLE INC. AND ITS CONTRIBUTORS ``AS IS''
+# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
+# THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR ITS CONTRIBUTORS
+# BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
+# THE POSSIBILITY OF SUCH DAMAGE.
+
+require "ast"
+require "backends"
+require "parser"
+require "transform"
+
+#
+# computeSettingsCombinations(ast) -> settingsCombiations
+#
+# Computes an array of settings maps, where a settings map constitutes
+# a configuration for the assembly code being generated. The map
+# contains key value pairs where keys are settings names (strings) and
+# the values are booleans (true for enabled, false for disabled).
+#
+
+def computeSettingsCombinations(ast)
+ settingsCombinations = []
+
+ def settingsCombinator(settingsCombinations, mapSoFar, remaining)
+ if remaining.empty?
+ settingsCombinations << mapSoFar
+ return
+ end
+
+ newMap = mapSoFar.dup
+ newMap[remaining[0]] = true
+ settingsCombinator(settingsCombinations, newMap, remaining[1..-1])
+
+ newMap = mapSoFar.dup
+ newMap[remaining[0]] = false
+ settingsCombinator(settingsCombinations, newMap, remaining[1..-1])
+ end
+
+ settingsCombinator(settingsCombinations, {}, (ast.filter(Setting).uniq.collect{|v| v.name} + ["X86", "ARMv7"]).uniq)
+
+ settingsCombinations
+end
+
+#
+# forSettings(concreteSettings, ast) {
+# | concreteSettings, lowLevelAST, backend | ... }
+#
+# Determines if the settings combination is valid, and if so, calls
+# the block with the information you need to generate code.
+#
+
+def forSettings(concreteSettings, ast)
+ # Check which architectures this combinator claims to support.
+ numClaimedBackends = 0
+ selectedBackend = nil
+ BACKENDS.each {
+ | backend |
+ isSupported = concreteSettings[backend]
+ raise unless isSupported != nil
+ numClaimedBackends += if isSupported then 1 else 0 end
+ if isSupported
+ selectedBackend = backend
+ end
+ }
+
+ return if numClaimedBackends > 1
+
+ # Resolve the AST down to a low-level form (no macros or conditionals).
+ lowLevelAST = ast.resolveSettings(concreteSettings)
+
+ yield concreteSettings, lowLevelAST, selectedBackend
+end
+
+#
+# forEachValidSettingsCombination(ast) {
+# | concreteSettings, ast, backend, index | ... }
+#
+# forEachValidSettingsCombination(ast, settingsCombinations) {
+# | concreteSettings, ast, backend, index | ... }
+#
+# Executes the given block for each valid settings combination in the
+# settings map. The ast passed into the block is resolved
+# (ast.resolve) against the settings.
+#
+# The first form will call computeSettingsCombinations(ast) for you.
+#
+
+def forEachValidSettingsCombination(ast, *optionalSettingsCombinations)
+ raise if optionalSettingsCombinations.size > 1
+
+ if optionalSettingsCombinations.empty?
+ settingsCombinations = computeSettingsCombinations(ast)
+ else
+ settingsCombinations = optionalSettingsCombiations[0]
+ end
+
+ settingsCombinations.each_with_index {
+ | concreteSettings, index |
+ forSettings(concreteSettings, ast) {
+ | concreteSettings_, lowLevelAST, backend |
+ yield concreteSettings, lowLevelAST, backend, index
+ }
+ }
+end
+
+#
+# cppSettingsTest(concreteSettings)
+#
+# Returns the C++ code used to test if we are in a configuration that
+# corresponds to the given concrete settings.
+#
+
+def cppSettingsTest(concreteSettings)
+ "#if " + concreteSettings.to_a.collect{
+ | pair |
+ (if pair[1]
+ ""
+ else
+ "!"
+ end) + "OFFLINE_ASM_" + pair[0]
+ }.join(" && ")
+end
+
+#
+# isASTErroneous(ast)
+#
+# Tests to see if the AST claims that there is an error - i.e. if the
+# user's code, after settings resolution, has Error nodes.
+#
+
+def isASTErroneous(ast)
+ not ast.filter(Error).empty?
+end
+
+#
+# assertConfiguration(concreteSettings)
+#
+# Emits a check that asserts that we're using the given configuration.
+#
+
+def assertConfiguration(concreteSettings)
+ $output.puts cppSettingsTest(concreteSettings)
+ $output.puts "#else"
+ $output.puts "#error \"Configuration mismatch.\""
+ $output.puts "#endif"
+end
+
+#
+# emitCodeInConfiguration(concreteSettings, ast, backend) {
+# | concreteSettings, ast, backend | ... }
+#
+# Emits all relevant guards to see if the configuration holds and
+# calls the block if the configuration is not erroneous.
+#
+
+def emitCodeInConfiguration(concreteSettings, ast, backend)
+ $output.puts cppSettingsTest(concreteSettings)
+
+ if isASTErroneous(ast)
+ $output.puts "#error \"Invalid configuration.\""
+ elsif not WORKING_BACKENDS.include? backend
+ $output.puts "#error \"This backend is not supported yet.\""
+ else
+ yield concreteSettings, ast, backend
+ end
+
+ $output.puts "#endif"
+end
+
+#
+# emitCodeInAllConfigurations(ast) {
+# | concreteSettings, ast, backend, index | ... }
+#
+# Emits guard codes for all valid configurations, and calls the block
+# for those configurations that are valid and not erroneous.
+#
+
+def emitCodeInAllConfigurations(ast)
+ forEachValidSettingsCombination(ast) {
+ | concreteSettings, lowLevelAST, backend, index |
+ $output.puts cppSettingsTest(concreteSettings)
+ yield concreteSettings, lowLevelAST, backend, index
+ $output.puts "#endif"
+ }
+end
+
+
+
diff --git a/Source/JavaScriptCore/offlineasm/transform.rb b/Source/JavaScriptCore/offlineasm/transform.rb
new file mode 100644
index 000000000..5f5024d9e
--- /dev/null
+++ b/Source/JavaScriptCore/offlineasm/transform.rb
@@ -0,0 +1,342 @@
+# Copyright (C) 2011 Apple Inc. All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions
+# are met:
+# 1. Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# 2. Redistributions in binary form must reproduce the above copyright
+# notice, this list of conditions and the following disclaimer in the
+# documentation and/or other materials provided with the distribution.
+#
+# THIS SOFTWARE IS PROVIDED BY APPLE INC. AND ITS CONTRIBUTORS ``AS IS''
+# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
+# THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR ITS CONTRIBUTORS
+# BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
+# THE POSSIBILITY OF SUCH DAMAGE.
+
+require "ast"
+
+#
+# node.resolveSettings(settings)
+#
+# Construct a new AST that does not have any IfThenElse nodes by
+# substituting concrete boolean values for each Setting.
+#
+
+class Node
+ def resolveSettings(settings)
+ mapChildren {
+ | child |
+ child.resolveSettings(settings)
+ }
+ end
+end
+
+class True
+ def resolveSettings(settings)
+ self
+ end
+end
+
+class False
+ def resolveSettings(settings)
+ self
+ end
+end
+
+class Setting
+ def resolveSettings(settings)
+ settings[@name].asNode
+ end
+end
+
+class And
+ def resolveSettings(settings)
+ (@left.resolveSettings(settings).value and @right.resolveSettings(settings).value).asNode
+ end
+end
+
+class Or
+ def resolveSettings(settings)
+ (@left.resolveSettings(settings).value or @right.resolveSettings(settings).value).asNode
+ end
+end
+
+class Not
+ def resolveSettings(settings)
+ (not @child.resolveSettings(settings).value).asNode
+ end
+end
+
+class IfThenElse
+ def resolveSettings(settings)
+ if @predicate.resolveSettings(settings).value
+ @thenCase.resolveSettings(settings)
+ else
+ @elseCase.resolveSettings(settings)
+ end
+ end
+end
+
+class Sequence
+ def resolveSettings(settings)
+ newList = []
+ @list.each {
+ | item |
+ item = item.resolveSettings(settings)
+ if item.is_a? Sequence
+ newList += item.list
+ else
+ newList << item
+ end
+ }
+ Sequence.new(codeOrigin, newList)
+ end
+end
+
+#
+# node.demacroify(macros)
+# node.substitute(mapping)
+#
+# demacroify() constructs a new AST that does not have any Macro
+# nodes, while substitute() replaces Variable nodes with the given
+# nodes in the mapping.
+#
+
+class Node
+ def demacroify(macros)
+ mapChildren {
+ | child |
+ child.demacroify(macros)
+ }
+ end
+
+ def substitute(mapping)
+ mapChildren {
+ | child |
+ child.substitute(mapping)
+ }
+ end
+
+ def substituteLabels(mapping)
+ mapChildren {
+ | child |
+ child.substituteLabels(mapping)
+ }
+ end
+end
+
+class Macro
+ def substitute(mapping)
+ myMapping = {}
+ mapping.each_pair {
+ | key, value |
+ unless @variables.include? key
+ myMapping[key] = value
+ end
+ }
+ mapChildren {
+ | child |
+ child.substitute(myMapping)
+ }
+ end
+end
+
+class Variable
+ def substitute(mapping)
+ if mapping[self]
+ mapping[self]
+ else
+ self
+ end
+ end
+end
+
+class LocalLabel
+ def substituteLabels(mapping)
+ if mapping[self]
+ mapping[self]
+ else
+ self
+ end
+ end
+end
+
+class Sequence
+ def substitute(constants)
+ newList = []
+ myConstants = constants.dup
+ @list.each {
+ | item |
+ if item.is_a? ConstDecl
+ myConstants[item.variable] = item.value.substitute(myConstants)
+ else
+ newList << item.substitute(myConstants)
+ end
+ }
+ Sequence.new(codeOrigin, newList)
+ end
+
+ def renameLabels(comment)
+ mapping = {}
+
+ @list.each {
+ | item |
+ if item.is_a? LocalLabel
+ mapping[item] = LocalLabel.unique(if comment then comment + "_" else "" end + item.cleanName)
+ end
+ }
+
+ substituteLabels(mapping)
+ end
+
+ def demacroify(macros)
+ myMacros = macros.dup
+ @list.each {
+ | item |
+ if item.is_a? Macro
+ myMacros[item.name] = item
+ end
+ }
+ newList = []
+ @list.each {
+ | item |
+ if item.is_a? Macro
+ # Ignore.
+ elsif item.is_a? MacroCall
+ mapping = {}
+ myMyMacros = myMacros.dup
+ raise "Could not find macro #{item.name} at #{item.codeOriginString}" unless myMacros[item.name]
+ raise "Argument count mismatch for call to #{item.name} at #{item.codeOriginString}" unless item.operands.size == myMacros[item.name].variables.size
+ item.operands.size.times {
+ | idx |
+ if item.operands[idx].is_a? Variable and myMacros[item.operands[idx].name]
+ myMyMacros[myMacros[item.name].variables[idx].name] = myMacros[item.operands[idx].name]
+ mapping[myMacros[item.name].variables[idx].name] = nil
+ elsif item.operands[idx].is_a? Macro
+ myMyMacros[myMacros[item.name].variables[idx].name] = item.operands[idx]
+ mapping[myMacros[item.name].variables[idx].name] = nil
+ else
+ myMyMacros[myMacros[item.name].variables[idx]] = nil
+ mapping[myMacros[item.name].variables[idx]] = item.operands[idx]
+ end
+ }
+ newList += myMacros[item.name].body.substitute(mapping).demacroify(myMyMacros).renameLabels(item.name).list
+ else
+ newList << item.demacroify(myMacros)
+ end
+ }
+ Sequence.new(codeOrigin, newList).substitute({})
+ end
+end
+
+#
+# node.resolveOffsets(offsets, sizes)
+#
+# Construct a new AST that has offset values instead of symbolic
+# offsets.
+#
+
+class Node
+ def resolveOffsets(offsets, sizes)
+ mapChildren {
+ | child |
+ child.resolveOffsets(offsets, sizes)
+ }
+ end
+end
+
+class StructOffset
+ def resolveOffsets(offsets, sizes)
+ if offsets[self]
+ Immediate.new(codeOrigin, offsets[self])
+ else
+ self
+ end
+ end
+end
+
+class Sizeof
+ def resolveOffsets(offsets, sizes)
+ if sizes[self]
+ Immediate.new(codeOrigin, sizes[self])
+ else
+ puts "Could not find #{self.inspect} in #{sizes.keys.inspect}"
+ puts "sizes = #{sizes.inspect}"
+ self
+ end
+ end
+end
+
+#
+# node.fold
+#
+# Resolve constant references and compute arithmetic expressions.
+#
+
+class Node
+ def fold
+ mapChildren {
+ | child |
+ child.fold
+ }
+ end
+end
+
+class AddImmediates
+ def fold
+ @left = @left.fold
+ @right = @right.fold
+ return self unless @left.is_a? Immediate
+ return self unless @right.is_a? Immediate
+ Immediate.new(codeOrigin, @left.value + @right.value)
+ end
+end
+
+class SubImmediates
+ def fold
+ @left = @left.fold
+ @right = @right.fold
+ return self unless @left.is_a? Immediate
+ return self unless @right.is_a? Immediate
+ Immediate.new(codeOrigin, @left.value - @right.value)
+ end
+end
+
+class MulImmediates
+ def fold
+ @left = @left.fold
+ @right = @right.fold
+ return self unless @left.is_a? Immediate
+ return self unless @right.is_a? Immediate
+ Immediate.new(codeOrigin, @left.value * @right.value)
+ end
+end
+
+class NegImmediate
+ def fold
+ @child = @child.fold
+ return self unless @child.is_a? Immediate
+ Immediate.new(codeOrigin, -@child.value)
+ end
+end
+
+#
+# node.resolveAfterSettings(offsets, sizes)
+#
+# Compile assembly against a set of offsets.
+#
+
+class Node
+ def resolve(offsets, sizes)
+ demacroify({}).resolveOffsets(offsets, sizes).fold
+ end
+end
+
diff --git a/Source/JavaScriptCore/offlineasm/x86.rb b/Source/JavaScriptCore/offlineasm/x86.rb
new file mode 100644
index 000000000..b89f2d90c
--- /dev/null
+++ b/Source/JavaScriptCore/offlineasm/x86.rb
@@ -0,0 +1,681 @@
+# Copyright (C) 2011 Apple Inc. All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions
+# are met:
+# 1. Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# 2. Redistributions in binary form must reproduce the above copyright
+# notice, this list of conditions and the following disclaimer in the
+# documentation and/or other materials provided with the distribution.
+#
+# THIS SOFTWARE IS PROVIDED BY APPLE INC. AND ITS CONTRIBUTORS ``AS IS''
+# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
+# THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR ITS CONTRIBUTORS
+# BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
+# THE POSSIBILITY OF SUCH DAMAGE.
+
+class RegisterID
+ def supports8BitOnX86
+ case name
+ when "t0", "a0", "r0", "t1", "a1", "r1", "t2", "t3"
+ true
+ when "t4", "cfr"
+ false
+ else
+ raise
+ end
+ end
+
+ def x86Operand(kind)
+ case name
+ when "t0", "a0", "r0"
+ case kind
+ when :byte
+ "%al"
+ when :half
+ "%ax"
+ when :int
+ "%eax"
+ else
+ raise
+ end
+ when "t1", "a1", "r1"
+ case kind
+ when :byte
+ "%dl"
+ when :half
+ "%dx"
+ when :int
+ "%edx"
+ else
+ raise
+ end
+ when "t2"
+ case kind
+ when :byte
+ "%cl"
+ when :half
+ "%cx"
+ when :int
+ "%ecx"
+ else
+ raise
+ end
+ when "t3"
+ case kind
+ when :byte
+ "%bl"
+ when :half
+ "%bx"
+ when :int
+ "%ebx"
+ else
+ raise
+ end
+ when "t4"
+ case kind
+ when :byte
+ "%sil"
+ when :half
+ "%si"
+ when :int
+ "%esi"
+ else
+ raise
+ end
+ when "cfr"
+ case kind
+ when :byte
+ "%dil"
+ when :half
+ "%di"
+ when :int
+ "%edi"
+ else
+ raise
+ end
+ when "sp"
+ case kind
+ when :byte
+ "%spl"
+ when :half
+ "%sp"
+ when :int
+ "%esp"
+ else
+ raise
+ end
+ else
+ raise "Bad register #{name} for X86 at #{codeOriginString}"
+ end
+ end
+ def x86CallOperand(kind)
+ "*#{x86Operand(kind)}"
+ end
+end
+
+class FPRegisterID
+ def x86Operand(kind)
+ raise unless kind == :double
+ case name
+ when "ft0", "fa0", "fr"
+ "%xmm0"
+ when "ft1", "fa1"
+ "%xmm1"
+ when "ft2", "fa2"
+ "%xmm2"
+ when "ft3", "fa3"
+ "%xmm3"
+ when "ft4"
+ "%xmm4"
+ when "ft5"
+ "%xmm5"
+ else
+ raise "Bad register #{name} for X86 at #{codeOriginString}"
+ end
+ end
+ def x86CallOperand(kind)
+ "*#{x86Operand(kind)}"
+ end
+end
+
+class Immediate
+ def x86Operand(kind)
+ "$#{value}"
+ end
+ def x86CallOperand(kind)
+ "#{value}"
+ end
+end
+
+class Address
+ def supports8BitOnX86
+ true
+ end
+
+ def x86Operand(kind)
+ "#{offset.value}(#{base.x86Operand(:int)})"
+ end
+ def x86CallOperand(kind)
+ "*#{x86Operand(kind)}"
+ end
+end
+
+class BaseIndex
+ def supports8BitOnX86
+ true
+ end
+
+ def x86Operand(kind)
+ "#{offset.value}(#{base.x86Operand(:int)}, #{index.x86Operand(:int)}, #{scale})"
+ end
+
+ def x86CallOperand(kind)
+ "*#{x86operand(kind)}"
+ end
+end
+
+class AbsoluteAddress
+ def supports8BitOnX86
+ true
+ end
+
+ def x86Operand(kind)
+ "#{address.value}"
+ end
+
+ def x86CallOperand(kind)
+ "*#{address.value}"
+ end
+end
+
+class LabelReference
+ def x86CallOperand(kind)
+ asmLabel
+ end
+end
+
+class LocalLabelReference
+ def x86CallOperand(kind)
+ asmLabel
+ end
+end
+
+class Instruction
+ def x86Operands(*kinds)
+ raise unless kinds.size == operands.size
+ result = []
+ kinds.size.times {
+ | idx |
+ result << operands[idx].x86Operand(kinds[idx])
+ }
+ result.join(", ")
+ end
+
+ def x86Suffix(kind)
+ case kind
+ when :byte
+ "b"
+ when :half
+ "w"
+ when :int
+ "l"
+ when :double
+ "sd"
+ else
+ raise
+ end
+ end
+
+ def handleX86OpWithNumOperands(opcode, kind, numOperands)
+ if numOperands == 3
+ if operands[0] == operands[2]
+ $asm.puts "#{opcode} #{operands[1].x86Operand(kind)}, #{operands[2].x86Operand(kind)}"
+ elsif operands[1] == operands[2]
+ $asm.puts "#{opcode} #{operands[0].x86Operand(kind)}, #{operands[2].x86Operand(kind)}"
+ else
+ $asm.puts "mov#{x86Suffix(kind)} #{operands[0].x86Operand(kind)}, #{operands[2].x86Operand(kind)}"
+ $asm.puts "#{opcode} #{operands[1].x86Operand(kind)}, #{operands[2].x86Operand(kind)}"
+ end
+ else
+ $asm.puts "#{opcode} #{operands[0].x86Operand(kind)}, #{operands[1].x86Operand(kind)}"
+ end
+ end
+
+ def handleX86Op(opcode, kind)
+ handleX86OpWithNumOperands(opcode, kind, operands.size)
+ end
+
+ def handleX86Shift(opcode, kind)
+ if operands[0].is_a? Immediate or operands[0] == RegisterID.forName(nil, "t2")
+ $asm.puts "#{opcode} #{operands[0].x86Operand(:byte)}, #{operands[1].x86Operand(kind)}"
+ else
+ $asm.puts "xchgl #{operands[0].x86Operand(:int)}, %ecx"
+ $asm.puts "#{opcode} %cl, #{operands[1].x86Operand(kind)}"
+ $asm.puts "xchgl #{operands[0].x86Operand(:int)}, %ecx"
+ end
+ end
+
+ def handleX86DoubleBranch(branchOpcode, mode)
+ case mode
+ when :normal
+ $asm.puts "ucomisd #{operands[1].x86Operand(:double)}, #{operands[0].x86Operand(:double)}"
+ when :reverse
+ $asm.puts "ucomisd #{operands[0].x86Operand(:double)}, #{operands[1].x86Operand(:double)}"
+ else
+ raise mode.inspect
+ end
+ $asm.puts "#{branchOpcode} #{operands[2].asmLabel}"
+ end
+
+ def handleX86IntCompare(opcodeSuffix, kind)
+ if operands[0].is_a? Immediate and operands[0].value == 0 and operands[1].is_a? RegisterID and (opcodeSuffix == "e" or opcodeSuffix == "ne")
+ $asm.puts "test#{x86Suffix(kind)} #{operands[1].x86Operand(kind)}"
+ elsif operands[1].is_a? Immediate and operands[1].value == 0 and operands[0].is_a? RegisterID and (opcodeSuffix == "e" or opcodeSuffix == "ne")
+ $asm.puts "test#{x86Suffix(kind)} #{operands[0].x86Operand(kind)}"
+ else
+ $asm.puts "cmp#{x86Suffix(kind)} #{operands[1].x86Operand(kind)}, #{operands[0].x86Operand(kind)}"
+ end
+ end
+
+ def handleX86IntBranch(branchOpcode, kind)
+ handleX86IntCompare(branchOpcode[1..-1], kind)
+ $asm.puts "#{branchOpcode} #{operands[2].asmLabel}"
+ end
+
+ def handleX86Set(setOpcode, operand)
+ if operand.supports8BitOnX86
+ $asm.puts "#{setOpcode} #{operand.x86Operand(:byte)}"
+ $asm.puts "movzbl #{operand.x86Operand(:byte)}, #{operand.x86Operand(:int)}"
+ else
+ $asm.puts "xchgl #{operand.x86Operand(:int)}, %eax"
+ $asm.puts "#{setOpcode} %al"
+ $asm.puts "movzbl %al, %eax"
+ $asm.puts "xchgl #{operand.x86Operand(:int)}, %eax"
+ end
+ end
+
+ def handleX86IntCompareSet(setOpcode, kind)
+ handleX86IntCompare(setOpcode[3..-1], kind)
+ handleX86Set(setOpcode, operands[2])
+ end
+
+ def handleX86Test(kind)
+ value = operands[0]
+ case operands.size
+ when 2
+ mask = Immediate.new(codeOrigin, -1)
+ when 3
+ mask = operands[1]
+ else
+ raise "Expected 2 or 3 operands, but got #{operands.size} at #{codeOriginString}"
+ end
+
+ if mask.is_a? Immediate and mask.value == -1
+ if value.is_a? RegisterID
+ $asm.puts "test#{x86Suffix(kind)} #{value.x86Operand(kind)}, #{value.x86Operand(kind)}"
+ else
+ $asm.puts "cmp#{x86Suffix(kind)} $0, #{value.x86Operand(kind)}"
+ end
+ else
+ $asm.puts "test#{x86Suffix(kind)} #{mask.x86Operand(kind)}, #{value.x86Operand(kind)}"
+ end
+ end
+
+ def handleX86BranchTest(branchOpcode, kind)
+ handleX86Test(kind)
+ $asm.puts "#{branchOpcode} #{operands.last.asmLabel}"
+ end
+
+ def handleX86SetTest(setOpcode, kind)
+ handleX86Test(kind)
+ handleX86Set(setOpcode, operands.last)
+ end
+
+ def handleX86OpBranch(opcode, branchOpcode, kind)
+ handleX86OpWithNumOperands(opcode, kind, operands.size - 1)
+ case operands.size
+ when 4
+ jumpTarget = operands[3]
+ when 3
+ jumpTarget = operands[2]
+ else
+ raise self.inspect
+ end
+ $asm.puts "#{branchOpcode} #{jumpTarget.asmLabel}"
+ end
+
+ def handleX86SubBranch(branchOpcode, kind)
+ if operands.size == 4 and operands[1] == operands[2]
+ $asm.puts "negl #{operands[2].x86Operand(:int)}"
+ $asm.puts "addl #{operands[0].x86Operand(:int)}, #{operands[2].x86Operand(:int)}"
+ else
+ handleX86OpWithNumOperands("sub#{x86Suffix(kind)}", kind, operands.size - 1)
+ end
+ case operands.size
+ when 4
+ jumpTarget = operands[3]
+ when 3
+ jumpTarget = operands[2]
+ else
+ raise self.inspect
+ end
+ $asm.puts "#{branchOpcode} #{jumpTarget.asmLabel}"
+ end
+
+ def lowerX86
+ $asm.comment codeOriginString
+ case opcode
+ when "addi", "addp"
+ if operands.size == 3 and operands[0].is_a? Immediate
+ raise unless operands[1].is_a? RegisterID
+ raise unless operands[2].is_a? RegisterID
+ if operands[0].value == 0
+ unless operands[1] == operands[2]
+ $asm.puts "movl #{operands[1].x86Operand(:int)}, #{operands[2].x86Operand(:int)}"
+ end
+ else
+ $asm.puts "leal #{operands[0].value}(#{operands[1].x86Operand(:int)}), #{operands[2].x86Operand(:int)}"
+ end
+ elsif operands.size == 3 and operands[0].is_a? RegisterID
+ raise unless operands[1].is_a? RegisterID
+ raise unless operands[2].is_a? RegisterID
+ $asm.puts "leal (#{operands[0].x86Operand(:int)}, #{operands[1].x86Operand(:int)}), #{operands[2].x86Operand(:int)}"
+ else
+ unless Immediate.new(nil, 0) == operands[0]
+ $asm.puts "addl #{x86Operands(:int, :int)}"
+ end
+ end
+ when "andi", "andp"
+ handleX86Op("andl", :int)
+ when "lshifti"
+ handleX86Shift("sall", :int)
+ when "muli"
+ if operands.size == 3 and operands[0].is_a? Immediate
+ $asm.puts "imull #{x86Operands(:int, :int, :int)}"
+ else
+ # FIXME: could do some peephole in case the left operand is immediate and it's
+ # a power of two.
+ handleX86Op("imull", :int)
+ end
+ when "negi"
+ $asm.puts "negl #{x86Operands(:int)}"
+ when "noti"
+ $asm.puts "notl #{x86Operands(:int)}"
+ when "ori", "orp"
+ handleX86Op("orl", :int)
+ when "rshifti"
+ handleX86Shift("sarl", :int)
+ when "urshifti"
+ handleX86Shift("shrl", :int)
+ when "subi", "subp"
+ if operands.size == 3 and operands[1] == operands[2]
+ $asm.puts "negl #{operands[2].x86Operand(:int)}"
+ $asm.puts "addl #{operands[0].x86Operand(:int)}, #{operands[2].x86Operand(:int)}"
+ else
+ handleX86Op("subl", :int)
+ end
+ when "xori", "xorp"
+ handleX86Op("xorl", :int)
+ when "loadi", "storei", "loadp", "storep"
+ $asm.puts "movl #{x86Operands(:int, :int)}"
+ when "loadb"
+ $asm.puts "movzbl #{operands[0].x86Operand(:byte)}, #{operands[1].x86Operand(:int)}"
+ when "loadbs"
+ $asm.puts "movsbl #{operands[0].x86Operand(:byte)}, #{operands[1].x86Operand(:int)}"
+ when "loadh"
+ $asm.puts "movzwl #{operands[0].x86Operand(:half)}, #{operands[1].x86Operand(:int)}"
+ when "loadhs"
+ $asm.puts "movswl #{operands[0].x86Operand(:half)}, #{operands[1].x86Operand(:int)}"
+ when "storeb"
+ $asm.puts "movb #{x86Operands(:byte, :byte)}"
+ when "loadd", "moved", "stored"
+ $asm.puts "movsd #{x86Operands(:double, :double)}"
+ when "addd"
+ $asm.puts "addsd #{x86Operands(:double, :double)}"
+ when "divd"
+ $asm.puts "divsd #{x86Operands(:double, :double)}"
+ when "subd"
+ $asm.puts "subsd #{x86Operands(:double, :double)}"
+ when "muld"
+ $asm.puts "mulsd #{x86Operands(:double, :double)}"
+ when "sqrtd"
+ $asm.puts "sqrtsd #{operands[0].x86Operand(:double)}, #{operands[1].x86Operand(:double)}"
+ when "ci2d"
+ $asm.puts "cvtsi2sd #{operands[0].x86Operand(:int)}, #{operands[1].x86Operand(:double)}"
+ when "bdeq"
+ isUnordered = LocalLabel.unique("bdeq")
+ $asm.puts "ucomisd #{operands[0].x86Operand(:double)}, #{operands[1].x86Operand(:double)}"
+ $asm.puts "jp #{LabelReference.new(codeOrigin, isUnordered).asmLabel}"
+ $asm.puts "je #{LabelReference.new(codeOrigin, operands[2]).asmLabel}"
+ isUnordered.lower("X86")
+ when "bdneq"
+ handleX86DoubleBranch("jne", :normal)
+ when "bdgt"
+ handleX86DoubleBranch("ja", :normal)
+ when "bdgteq"
+ handleX86DoubleBranch("jae", :normal)
+ when "bdlt"
+ handleX86DoubleBranch("ja", :reverse)
+ when "bdlteq"
+ handleX86DoubleBranch("jae", :reverse)
+ when "bdequn"
+ handleX86DoubleBranch("je", :normal)
+ when "bdnequn"
+ isUnordered = LocalLabel.unique("bdnequn")
+ isEqual = LocalLabel.unique("bdnequn")
+ $asm.puts "ucomisd #{operands[0].x86Operand(:double)}, #{operands[1].x86Operand(:double)}"
+ $asm.puts "jp #{LabelReference.new(codeOrigin, isUnordered).asmLabel}"
+ $asm.puts "je #{LabelReference.new(codeOrigin, isEqual).asmLabel}"
+ isUnordered.lower("X86")
+ $asm.puts "jmp #{operands[2].asmLabel}"
+ isEqual.lower("X86")
+ when "bdgtun"
+ handleX86DoubleBranch("jb", :reverse)
+ when "bdgtequn"
+ handleX86DoubleBranch("jbe", :reverse)
+ when "bdltun"
+ handleX86DoubleBranch("jb", :normal)
+ when "bdltequn"
+ handleX86DoubleBranch("jbe", :normal)
+ when "btd2i"
+ $asm.puts "cvttsd2si #{operands[0].x86Operand(:double)}, #{operands[1].x86Operand(:int)}"
+ $asm.puts "cmpl $0x80000000 #{operands[1].x86Operand(:int)}"
+ $asm.puts "je #{operands[2].asmLabel}"
+ when "td2i"
+ $asm.puts "cvttsd2si #{operands[0].x86Operand(:double)}, #{operands[1].x86Operand(:int)}"
+ when "bcd2i"
+ $asm.puts "cvttsd2si #{operands[0].x86Operand(:double)}, #{operands[1].x86Operand(:int)}"
+ $asm.puts "testl #{operands[1].x86Operand(:int)}, #{operands[1].x86Operand(:int)}"
+ $asm.puts "je #{operands[2].asmLabel}"
+ $asm.puts "cvtsi2sd #{operands[1].x86Operand(:int)}, %xmm7"
+ $asm.puts "ucomisd #{operands[0].x86Operand(:double)}, %xmm7"
+ $asm.puts "jp #{operands[2].asmLabel}"
+ $asm.puts "jne #{operands[2].asmLabel}"
+ when "movdz"
+ $asm.puts "xorpd #{operands[0].x86Operand(:double)}, #{operands[0].x86Operand(:double)}"
+ when "pop"
+ $asm.puts "pop #{operands[0].x86Operand(:int)}"
+ when "push"
+ $asm.puts "push #{operands[0].x86Operand(:int)}"
+ when "move", "sxi2p", "zxi2p"
+ if Immediate.new(nil, 0) == operands[0] and operands[1].is_a? RegisterID
+ $asm.puts "xorl #{operands[1].x86Operand(:int)}, #{operands[1].x86Operand(:int)}"
+ elsif operands[0] != operands[1]
+ $asm.puts "movl #{x86Operands(:int, :int)}"
+ end
+ when "nop"
+ $asm.puts "nop"
+ when "bieq", "bpeq"
+ handleX86IntBranch("je", :int)
+ when "bineq", "bpneq"
+ handleX86IntBranch("jne", :int)
+ when "bia", "bpa"
+ handleX86IntBranch("ja", :int)
+ when "biaeq", "bpaeq"
+ handleX86IntBranch("jae", :int)
+ when "bib", "bpb"
+ handleX86IntBranch("jb", :int)
+ when "bibeq", "bpbeq"
+ handleX86IntBranch("jbe", :int)
+ when "bigt", "bpgt"
+ handleX86IntBranch("jg", :int)
+ when "bigteq", "bpgteq"
+ handleX86IntBranch("jge", :int)
+ when "bilt", "bplt"
+ handleX86IntBranch("jl", :int)
+ when "bilteq", "bplteq"
+ handleX86IntBranch("jle", :int)
+ when "bbeq"
+ handleX86IntBranch("je", :byte)
+ when "bbneq"
+ handleX86IntBranch("jne", :byte)
+ when "bba"
+ handleX86IntBranch("ja", :byte)
+ when "bbaeq"
+ handleX86IntBranch("jae", :byte)
+ when "bbb"
+ handleX86IntBranch("jb", :byte)
+ when "bbbeq"
+ handleX86IntBranch("jbe", :byte)
+ when "bbgt"
+ handleX86IntBranch("jg", :byte)
+ when "bbgteq"
+ handleX86IntBranch("jge", :byte)
+ when "bblt"
+ handleX86IntBranch("jl", :byte)
+ when "bblteq"
+ handleX86IntBranch("jlteq", :byte)
+ when "btio", "btpo"
+ handleX86BranchTest("jo", :int)
+ when "btis", "btps"
+ handleX86BranchTest("js", :int)
+ when "btiz", "btpz"
+ handleX86BranchTest("jz", :int)
+ when "btinz", "btpnz"
+ handleX86BranchTest("jnz", :int)
+ when "btbo"
+ handleX86BranchTest("jo", :byte)
+ when "btbs"
+ handleX86BranchTest("js", :byte)
+ when "btbz"
+ handleX86BranchTest("jz", :byte)
+ when "btbnz"
+ handleX86BranchTest("jnz", :byte)
+ when "jmp"
+ $asm.puts "jmp #{operands[0].x86CallOperand(:int)}"
+ when "baddio", "baddpo"
+ handleX86OpBranch("addl", "jo", :int)
+ when "baddis", "baddps"
+ handleX86OpBranch("addl", "js", :int)
+ when "baddiz", "baddpz"
+ handleX86OpBranch("addl", "jz", :int)
+ when "baddinz", "baddpnz"
+ handleX86OpBranch("addl", "jnz", :int)
+ when "bsubio"
+ handleX86SubBranch("jo", :int)
+ when "bsubis"
+ handleX86SubBranch("js", :int)
+ when "bsubiz"
+ handleX86SubBranch("jz", :int)
+ when "bsubinz"
+ handleX86SubBranch("jnz", :int)
+ when "bmulio"
+ handleX86OpBranch("imull", "jo", :int)
+ when "bmulis"
+ handleX86OpBranch("imull", "js", :int)
+ when "bmuliz"
+ handleX86OpBranch("imull", "jz", :int)
+ when "bmulinz"
+ handleX86OpBranch("imull", "jnz", :int)
+ when "borio"
+ handleX86OpBranch("orl", "jo", :int)
+ when "boris"
+ handleX86OpBranch("orl", "js", :int)
+ when "boriz"
+ handleX86OpBranch("orl", "jz", :int)
+ when "borinz"
+ handleX86OpBranch("orl", "jnz", :int)
+ when "break"
+ $asm.puts "int $3"
+ when "call"
+ $asm.puts "call #{operands[0].x86CallOperand(:int)}"
+ when "ret"
+ $asm.puts "ret"
+ when "cieq", "cpeq"
+ handleX86IntCompareSet("sete", :int)
+ when "cineq", "cpneq"
+ handleX86IntCompareSet("setne", :int)
+ when "cia", "cpa"
+ handleX86IntCompareSet("seta", :int)
+ when "ciaeq", "cpaeq"
+ handleX86IntCompareSet("setae", :int)
+ when "cib", "cpb"
+ handleX86IntCompareSet("setb", :int)
+ when "cibeq", "cpbeq"
+ handleX86IntCompareSet("setbe", :int)
+ when "cigt", "cpgt"
+ handleX86IntCompareSet("setg", :int)
+ when "cigteq", "cpgteq"
+ handleX86IntCompareSet("setge", :int)
+ when "cilt", "cplt"
+ handleX86IntCompareSet("setl", :int)
+ when "cilteq", "cplteq"
+ handleX86IntCompareSet("setle", :int)
+ when "tio"
+ handleX86SetTest("seto", :int)
+ when "tis"
+ handleX86SetTest("sets", :int)
+ when "tiz"
+ handleX86SetTest("setz", :int)
+ when "tinz"
+ handleX86SetTest("setnz", :int)
+ when "tbo"
+ handleX86SetTest("seto", :byte)
+ when "tbs"
+ handleX86SetTest("sets", :byte)
+ when "tbz"
+ handleX86SetTest("setz", :byte)
+ when "tbnz"
+ handleX86SetTest("setnz", :byte)
+ when "peek"
+ $asm.puts "movl #{operands[0].value * 4}(%esp), #{operands[1].x86Operand(:int)}"
+ when "poke"
+ $asm.puts "movl #{operands[0].x86Operand(:int)}, #{operands[1].value * 4}(%esp)"
+ when "cdqi"
+ $asm.puts "cdq"
+ when "idivi"
+ $asm.puts "idivl #{operands[0].x86Operand(:int)}"
+ when "fii2d"
+ $asm.puts "movd #{operands[0].x86Operand(:int)}, #{operands[2].x86Operand(:double)}"
+ $asm.puts "movd #{operands[1].x86Operand(:int)}, %xmm7"
+ $asm.puts "psllq $32, %xmm7"
+ $asm.puts "por %xmm7, #{operands[2].x86Operand(:double)}"
+ when "fd2ii"
+ $asm.puts "movd #{operands[0].x86Operand(:double)}, #{operands[1].x86Operand(:int)}"
+ $asm.puts "movsd #{operands[0].x86Operand(:double)}, %xmm7"
+ $asm.puts "psrlq $32, %xmm7"
+ $asm.puts "movsd %xmm7, #{operands[2].x86Operand(:int)}"
+ when "bo"
+ $asm.puts "jo #{operands[0].asmLabel}"
+ when "bs"
+ $asm.puts "js #{operands[0].asmLabel}"
+ when "bz"
+ $asm.puts "jz #{operands[0].asmLabel}"
+ when "bnz"
+ $asm.puts "jnz #{operands[0].asmLabel}"
+ when "leai", "leap"
+ $asm.puts "leal #{operands[0].x86Operand(:int)}, #{operands[1].x86Operand(:int)}"
+ else
+ raise "Bad opcode: #{opcode}"
+ end
+ end
+end
+
diff --git a/Source/JavaScriptCore/os-win32/inttypes.h b/Source/JavaScriptCore/os-win32/inttypes.h
index 0ed6718d8..af7390e98 100644
--- a/Source/JavaScriptCore/os-win32/inttypes.h
+++ b/Source/JavaScriptCore/os-win32/inttypes.h
@@ -38,6 +38,11 @@
#error "This inttypes.h file should only be compiled with MSVC"
#endif
+#ifdef WTF_COMPILER_MSVC7_OR_LOWER
+// https://bugs.webkit.org/show_bug.cgi?id=76210
+#error "Visual Studio 2005 or newer is required"
+#endif
+
#if _MSC_VER > 1000
#pragma once
#endif
@@ -63,22 +68,22 @@
#define PRIdFAST16 "hd"
#define PRIiFAST16 "hi"
-#define PRId32 "I32d"
-#define PRIi32 "I32i"
-#define PRIdLEAST32 "I32d"
-#define PRIiLEAST32 "I32i"
-#define PRIdFAST32 "I32d"
-#define PRIiFAST32 "I32i"
+#define PRId32 "d"
+#define PRIi32 "i"
+#define PRIdLEAST32 "d"
+#define PRIiLEAST32 "i"
+#define PRIdFAST32 "d"
+#define PRIiFAST32 "i"
-#define PRId64 "I64d"
-#define PRIi64 "I64i"
-#define PRIdLEAST64 "I64d"
-#define PRIiLEAST64 "I64i"
-#define PRIdFAST64 "I64d"
-#define PRIiFAST64 "I64i"
+#define PRId64 "lld"
+#define PRIi64 "lli"
+#define PRIdLEAST64 "lld"
+#define PRIiLEAST64 "lli"
+#define PRIdFAST64 "lld"
+#define PRIiFAST64 "lli"
-#define PRIdMAX "I64d"
-#define PRIiMAX "I64i"
+#define PRIdMAX "lld"
+#define PRIiMAX "lli"
#define PRIdPTR "Id"
#define PRIiPTR "Ii"
@@ -110,36 +115,36 @@
#define PRIxFAST16 "hx"
#define PRIXFAST16 "hX"
-#define PRIo32 "I32o"
-#define PRIu32 "I32u"
-#define PRIx32 "I32x"
-#define PRIX32 "I32X"
-#define PRIoLEAST32 "I32o"
-#define PRIuLEAST32 "I32u"
-#define PRIxLEAST32 "I32x"
-#define PRIXLEAST32 "I32X"
-#define PRIoFAST32 "I32o"
-#define PRIuFAST32 "I32u"
-#define PRIxFAST32 "I32x"
-#define PRIXFAST32 "I32X"
-
-#define PRIo64 "I64o"
-#define PRIu64 "I64u"
-#define PRIx64 "I64x"
-#define PRIX64 "I64X"
-#define PRIoLEAST64 "I64o"
-#define PRIuLEAST64 "I64u"
-#define PRIxLEAST64 "I64x"
-#define PRIXLEAST64 "I64X"
-#define PRIoFAST64 "I64o"
-#define PRIuFAST64 "I64u"
-#define PRIxFAST64 "I64x"
-#define PRIXFAST64 "I64X"
-
-#define PRIoMAX "I64o"
-#define PRIuMAX "I64u"
-#define PRIxMAX "I64x"
-#define PRIXMAX "I64X"
+#define PRIo32 "o"
+#define PRIu32 "u"
+#define PRIx32 "x"
+#define PRIX32 "X"
+#define PRIoLEAST32 "o"
+#define PRIuLEAST32 "u"
+#define PRIxLEAST32 "x"
+#define PRIXLEAST32 "X"
+#define PRIoFAST32 "o"
+#define PRIuFAST32 "u"
+#define PRIxFAST32 "x"
+#define PRIXFAST32 "X"
+
+#define PRIo64 "llo"
+#define PRIu64 "llu"
+#define PRIx64 "llx"
+#define PRIX64 "llX"
+#define PRIoLEAST64 "llo"
+#define PRIuLEAST64 "llu"
+#define PRIxLEAST64 "llx"
+#define PRIXLEAST64 "llX"
+#define PRIoFAST64 "llo"
+#define PRIuFAST64 "llu"
+#define PRIxFAST64 "llx"
+#define PRIXFAST64 "llX"
+
+#define PRIoMAX "llo"
+#define PRIuMAX "llu"
+#define PRIxMAX "llx"
+#define PRIXMAX "llX"
#define PRIoPTR "Io"
#define PRIuPTR "Iu"
@@ -168,19 +173,19 @@
#define SCNdFAST32 "ld"
#define SCNiFAST32 "li"
-#define SCNd64 "I64d"
-#define SCNi64 "I64i"
-#define SCNdLEAST64 "I64d"
-#define SCNiLEAST64 "I64i"
-#define SCNdFAST64 "I64d"
-#define SCNiFAST64 "I64i"
+#define SCNd64 "lld"
+#define SCNi64 "lli"
+#define SCNdLEAST64 "lld"
+#define SCNiLEAST64 "lli"
+#define SCNdFAST64 "lld"
+#define SCNiFAST64 "lli"
-#define SCNdMAX "I64d"
-#define SCNiMAX "I64i"
+#define SCNdMAX "lld"
+#define SCNiMAX "lli"
#ifdef _WIN64
-# define SCNdPTR "I64d"
-# define SCNiPTR "I64i"
+# define SCNdPTR "lld"
+# define SCNiPTR "lli"
#else
# define SCNdPTR "ld"
# define SCNiPTR "li"
@@ -226,29 +231,29 @@
#define SCNxFAST32 "lx"
#define SCNXFAST32 "lX"
-#define SCNo64 "I64o"
-#define SCNu64 "I64u"
-#define SCNx64 "I64x"
-#define SCNX64 "I64X"
-#define SCNoLEAST64 "I64o"
-#define SCNuLEAST64 "I64u"
-#define SCNxLEAST64 "I64x"
-#define SCNXLEAST64 "I64X"
-#define SCNoFAST64 "I64o"
-#define SCNuFAST64 "I64u"
-#define SCNxFAST64 "I64x"
-#define SCNXFAST64 "I64X"
-
-#define SCNoMAX "I64o"
-#define SCNuMAX "I64u"
-#define SCNxMAX "I64x"
-#define SCNXMAX "I64X"
+#define SCNo64 "llo"
+#define SCNu64 "llu"
+#define SCNx64 "llx"
+#define SCNX64 "llX"
+#define SCNoLEAST64 "llo"
+#define SCNuLEAST64 "llu"
+#define SCNxLEAST64 "llx"
+#define SCNXLEAST64 "llX"
+#define SCNoFAST64 "llo"
+#define SCNuFAST64 "llu"
+#define SCNxFAST64 "llx"
+#define SCNXFAST64 "llX"
+
+#define SCNoMAX "llo"
+#define SCNuMAX "llu"
+#define SCNxMAX "llx"
+#define SCNXMAX "llX"
#ifdef _WIN64
-# define SCNoPTR "I64o"
-# define SCNuPTR "I64u"
-# define SCNxPTR "I64x"
-# define SCNXPTR "I64X"
+# define SCNoPTR "llo"
+# define SCNuPTR "llu"
+# define SCNxPTR "llx"
+# define SCNXPTR "llX"
#else
# define SCNoPTR "lo"
# define SCNuPTR "lu"
diff --git a/Source/JavaScriptCore/parser/ASTBuilder.h b/Source/JavaScriptCore/parser/ASTBuilder.h
index b6ea004b5..b8718709b 100644
--- a/Source/JavaScriptCore/parser/ASTBuilder.h
+++ b/Source/JavaScriptCore/parser/ASTBuilder.h
@@ -265,7 +265,6 @@ public:
FunctionBodyNode* createFunctionBody(int lineNumber, bool inStrictContext)
{
- usesClosures();
return FunctionBodyNode::create(m_globalData, lineNumber, inStrictContext);
}
@@ -624,9 +623,7 @@ private:
void incConstants() { m_scope.m_numConstants++; }
void usesThis() { m_scope.m_features |= ThisFeature; }
void usesCatch() { m_scope.m_features |= CatchFeature; }
- void usesClosures() { m_scope.m_features |= ClosureFeature; }
void usesArguments() { m_scope.m_features |= ArgumentsFeature; }
- void usesAssignment() { m_scope.m_features |= AssignFeature; }
void usesWith() { m_scope.m_features |= WithFeature; }
void usesEval()
{
@@ -904,7 +901,6 @@ ExpressionNode* ASTBuilder::makeBinaryNode(int lineNumber, int token, pair<Expre
ExpressionNode* ASTBuilder::makeAssignNode(int lineNumber, ExpressionNode* loc, Operator op, ExpressionNode* expr, bool locHasAssignments, bool exprHasAssignments, int start, int divot, int end)
{
- usesAssignment();
if (!loc->isLocation())
return new (m_globalData) AssignErrorNode(lineNumber, loc, op, expr, divot, divot - start, end - divot);
@@ -942,7 +938,6 @@ ExpressionNode* ASTBuilder::makeAssignNode(int lineNumber, ExpressionNode* loc,
ExpressionNode* ASTBuilder::makePrefixNode(int lineNumber, ExpressionNode* expr, Operator op, int start, int divot, int end)
{
- usesAssignment();
if (!expr->isLocation())
return new (m_globalData) PrefixErrorNode(lineNumber, expr, op, divot, divot - start, end - divot);
@@ -965,7 +960,6 @@ ExpressionNode* ASTBuilder::makePrefixNode(int lineNumber, ExpressionNode* expr,
ExpressionNode* ASTBuilder::makePostfixNode(int lineNumber, ExpressionNode* expr, Operator op, int start, int divot, int end)
{
- usesAssignment();
if (!expr->isLocation())
return new (m_globalData) PostfixErrorNode(lineNumber, expr, op, divot, divot - start, end - divot);
diff --git a/Source/JavaScriptCore/parser/Keywords.table b/Source/JavaScriptCore/parser/Keywords.table
index 333b4762d..27c4e53a2 100644
--- a/Source/JavaScriptCore/parser/Keywords.table
+++ b/Source/JavaScriptCore/parser/Keywords.table
@@ -1,12 +1,12 @@
-# main keywords
+# Main keywords.
@begin mainTable 47
-# types
+# Types.
null NULLTOKEN
true TRUETOKEN
false FALSETOKEN
-# keywords
+# Keywords.
break BREAK
case CASE
catch CATCH
@@ -35,7 +35,7 @@ typeof TYPEOF
with WITH
debugger DEBUGGER
-# reserved for future use
+# Reserved for future use.
class RESERVED
enum RESERVED
export RESERVED
@@ -46,7 +46,7 @@ super RESERVED
# technically RESERVED_IF_STRICT in ES5, but may be reserved in ES6.
let RESERVED
-# reserved for future use in strict code
+# Reserved for future use in strict code.
implements RESERVED_IF_STRICT
interface RESERVED_IF_STRICT
package RESERVED_IF_STRICT
@@ -57,4 +57,3 @@ static RESERVED_IF_STRICT
yield RESERVED_IF_STRICT
@end
-
diff --git a/Source/JavaScriptCore/parser/Lexer.cpp b/Source/JavaScriptCore/parser/Lexer.cpp
index e38b52480..015c1509d 100644
--- a/Source/JavaScriptCore/parser/Lexer.cpp
+++ b/Source/JavaScriptCore/parser/Lexer.cpp
@@ -3,6 +3,7 @@
* Copyright (C) 2006, 2007, 2008, 2009 Apple Inc. All Rights Reserved.
* Copyright (C) 2007 Cameron Zwarich (cwzwarich@uwaterloo.ca)
* Copyright (C) 2010 Zoltan Herczeg (zherczeg@inf.u-szeged.hu)
+ * Copyright (C) 2012 Mathias Bynens (mathias@qiwi.be)
*
* This library is free software; you can redistribute it and/or
* modify it under the terms of the GNU Library General Public
@@ -379,8 +380,8 @@ static inline bool isIdentStart(int c)
static NEVER_INLINE bool isNonASCIIIdentPart(int c)
{
- return category(c) & (Letter_Uppercase | Letter_Lowercase | Letter_Titlecase | Letter_Modifier | Letter_Other
- | Mark_NonSpacing | Mark_SpacingCombining | Number_DecimalDigit | Punctuation_Connector);
+ return (category(c) & (Letter_Uppercase | Letter_Lowercase | Letter_Titlecase | Letter_Modifier | Letter_Other
+ | Mark_NonSpacing | Mark_SpacingCombining | Number_DecimalDigit | Punctuation_Connector)) || c == 0x200C || c == 0x200D;
}
static ALWAYS_INLINE bool isIdentPart(int c)
@@ -1328,7 +1329,7 @@ inNumberAfterDecimalPoint:
}
// Null-terminate string for strtod.
m_buffer8.append('\0');
- tokenData->doubleValue = WTF::strtod(reinterpret_cast<const char*>(m_buffer8.data()), 0);
+ tokenData->doubleValue = WTF::strtod<WTF::AllowTrailingJunk>(reinterpret_cast<const char*>(m_buffer8.data()), 0);
}
token = NUMBER;
}
diff --git a/Source/JavaScriptCore/parser/Nodes.h b/Source/JavaScriptCore/parser/Nodes.h
index 278d17ef4..0373766b5 100644
--- a/Source/JavaScriptCore/parser/Nodes.h
+++ b/Source/JavaScriptCore/parser/Nodes.h
@@ -51,17 +51,14 @@ namespace JSC {
const CodeFeatures NoFeatures = 0;
const CodeFeatures EvalFeature = 1 << 0;
- const CodeFeatures ClosureFeature = 1 << 1;
- const CodeFeatures AssignFeature = 1 << 2;
- const CodeFeatures ArgumentsFeature = 1 << 3;
- const CodeFeatures WithFeature = 1 << 4;
- const CodeFeatures CatchFeature = 1 << 5;
- const CodeFeatures ThisFeature = 1 << 6;
- const CodeFeatures StrictModeFeature = 1 << 7;
- const CodeFeatures ShadowsArgumentsFeature = 1 << 8;
+ const CodeFeatures ArgumentsFeature = 1 << 1;
+ const CodeFeatures WithFeature = 1 << 2;
+ const CodeFeatures CatchFeature = 1 << 3;
+ const CodeFeatures ThisFeature = 1 << 4;
+ const CodeFeatures StrictModeFeature = 1 << 5;
+ const CodeFeatures ShadowsArgumentsFeature = 1 << 6;
-
- const CodeFeatures AllFeatures = EvalFeature | ClosureFeature | AssignFeature | ArgumentsFeature | WithFeature | CatchFeature | ThisFeature | StrictModeFeature | ShadowsArgumentsFeature;
+ const CodeFeatures AllFeatures = EvalFeature | ArgumentsFeature | WithFeature | CatchFeature | ThisFeature | StrictModeFeature | ShadowsArgumentsFeature;
enum Operator {
OpEqual,
@@ -1493,7 +1490,7 @@ namespace JSC {
void finishParsing(PassRefPtr<FunctionParameters>, const Identifier&);
const Identifier& ident() { return m_ident; }
- void setInferredName(const Identifier& inferredName) { m_inferredName = inferredName; }
+ void setInferredName(const Identifier& inferredName) { ASSERT(!inferredName.isNull()); m_inferredName = inferredName; }
const Identifier& inferredName() { return m_inferredName.isEmpty() ? m_ident : m_inferredName; }
static const bool scopeIsFunction = true;
diff --git a/Source/JavaScriptCore/parser/Parser.cpp b/Source/JavaScriptCore/parser/Parser.cpp
index 25ada5606..939d2696c 100644
--- a/Source/JavaScriptCore/parser/Parser.cpp
+++ b/Source/JavaScriptCore/parser/Parser.cpp
@@ -1,7 +1,7 @@
/*
* Copyright (C) 1999-2001 Harri Porten (porten@kde.org)
* Copyright (C) 2001 Peter Kelly (pmk@post.com)
- * Copyright (C) 2003, 2006, 2007, 2008, 2009 Apple Inc. All rights reserved.
+ * Copyright (C) 2003, 2006, 2007, 2008, 2009, 2010 Apple Inc. All rights reserved.
*
* This library is free software; you can redistribute it and/or
* modify it under the terms of the GNU Library General Public
@@ -774,7 +774,6 @@ template <FunctionRequirements requirements, bool nameIsInContainingScope, class
functionScope->setIsFunction();
if (match(IDENT)) {
name = m_token.m_data.ident;
- failIfTrueWithMessage(*name == m_globalData->propertyNames->underscoreProto, "Cannot name a function __proto__");
next();
if (!nameIsInContainingScope)
failIfFalseIfStrict(functionScope->declareVariable(name));
diff --git a/Source/JavaScriptCore/parser/Parser.h b/Source/JavaScriptCore/parser/Parser.h
index f3d96ff3e..9b76242d4 100644
--- a/Source/JavaScriptCore/parser/Parser.h
+++ b/Source/JavaScriptCore/parser/Parser.h
@@ -1,7 +1,7 @@
/*
* Copyright (C) 1999-2001 Harri Porten (porten@kde.org)
* Copyright (C) 2001 Peter Kelly (pmk@post.com)
- * Copyright (C) 2003, 2006, 2007, 2008, 2009 Apple Inc. All rights reserved.
+ * Copyright (C) 2003, 2006, 2007, 2008, 2009, 2010, 2011 Apple Inc. All rights reserved.
*
* This library is free software; you can redistribute it and/or
* modify it under the terms of the GNU Library General Public
@@ -37,6 +37,15 @@
#include <wtf/Noncopyable.h>
#include <wtf/OwnPtr.h>
#include <wtf/RefPtr.h>
+namespace JSC {
+struct Scope;
+}
+
+namespace WTF {
+template <> struct VectorTraits<JSC::Scope> : SimpleClassVectorTraits {
+ static const bool canInitializeWithMemset = false; // Not all Scope data members initialize to 0.
+};
+}
namespace JSC {
@@ -1016,7 +1025,7 @@ PassRefPtr<ParsedNode> Parser<LexerType>::parse(JSGlobalObject* lexicalGlobalObj
else if (isEvalNode<ParsedNode>())
*exception = createSyntaxError(lexicalGlobalObject, errMsg);
else
- *exception = addErrorInfo(&lexicalGlobalObject->globalData(), createSyntaxError(lexicalGlobalObject, errMsg), errLine, *m_source);
+ *exception = addErrorInfo(&lexicalGlobalObject->globalData(), createSyntaxError(lexicalGlobalObject, errMsg), errLine, *m_source, Vector<StackFrame>());
}
if (debugger && !ParsedNode::scopeIsFunction)
diff --git a/Source/JavaScriptCore/profiler/Profile.cpp b/Source/JavaScriptCore/profiler/Profile.cpp
index 1a84518c1..49d6de97a 100644
--- a/Source/JavaScriptCore/profiler/Profile.cpp
+++ b/Source/JavaScriptCore/profiler/Profile.cpp
@@ -28,6 +28,7 @@
#include "ProfileNode.h"
#include <stdio.h>
+#include <wtf/DataLog.h>
namespace JSC {
@@ -102,7 +103,7 @@ void Profile::restoreAll()
#ifndef NDEBUG
void Profile::debugPrintData() const
{
- printf("Call graph:\n");
+ dataLog("Call graph:\n");
m_head->debugPrintData(0);
}
@@ -118,18 +119,18 @@ void Profile::debugPrintDataSampleStyle() const
typedef Vector<NameCountPair> NameCountPairVector;
FunctionCallHashCount countedFunctions;
- printf("Call graph:\n");
+ dataLog("Call graph:\n");
m_head->debugPrintDataSampleStyle(0, countedFunctions);
- printf("\nTotal number in stack:\n");
+ dataLog("\nTotal number in stack:\n");
NameCountPairVector sortedFunctions(countedFunctions.size());
copyToVector(countedFunctions, sortedFunctions);
std::sort(sortedFunctions.begin(), sortedFunctions.end(), functionNameCountPairComparator);
for (NameCountPairVector::iterator it = sortedFunctions.begin(); it != sortedFunctions.end(); ++it)
- printf(" %-12d%s\n", (*it).second, UString((*it).first).utf8().data());
+ dataLog(" %-12d%s\n", (*it).second, UString((*it).first).utf8().data());
- printf("\nSort by top of stack, same collapsed (when >= 5):\n");
+ dataLog("\nSort by top of stack, same collapsed (when >= 5):\n");
}
#endif
diff --git a/Source/JavaScriptCore/profiler/ProfileNode.cpp b/Source/JavaScriptCore/profiler/ProfileNode.cpp
index 8f20bbeff..ab43d1511 100644
--- a/Source/JavaScriptCore/profiler/ProfileNode.cpp
+++ b/Source/JavaScriptCore/profiler/ProfileNode.cpp
@@ -32,6 +32,7 @@
#include "Profiler.h"
#include <stdio.h>
#include <wtf/DateMath.h>
+#include <wtf/DataLog.h>
#include <wtf/text/StringHash.h>
#if OS(WINDOWS)
@@ -293,9 +294,9 @@ void ProfileNode::debugPrintData(int indentLevel) const
{
// Print function names
for (int i = 0; i < indentLevel; ++i)
- printf(" ");
+ dataLog(" ");
- printf("Function Name %s %d SelfTime %.3fms/%.3f%% TotalTime %.3fms/%.3f%% VSelf %.3fms VTotal %.3fms Visible %s Next Sibling %s\n",
+ dataLog("Function Name %s %d SelfTime %.3fms/%.3f%% TotalTime %.3fms/%.3f%% VSelf %.3fms VTotal %.3fms Visible %s Next Sibling %s\n",
functionName().utf8().data(),
m_numberOfCalls, m_actualSelfTime, selfPercent(), m_actualTotalTime, totalPercent(),
m_visibleSelfTime, m_visibleTotalTime,
@@ -312,20 +313,20 @@ void ProfileNode::debugPrintData(int indentLevel) const
// print the profiled data in a format that matches the tool sample's output.
double ProfileNode::debugPrintDataSampleStyle(int indentLevel, FunctionCallHashCount& countedFunctions) const
{
- printf(" ");
+ dataLog(" ");
// Print function names
const char* name = functionName().utf8().data();
double sampleCount = m_actualTotalTime * 1000;
if (indentLevel) {
for (int i = 0; i < indentLevel; ++i)
- printf(" ");
+ dataLog(" ");
countedFunctions.add(functionName().impl());
- printf("%.0f %s\n", sampleCount ? sampleCount : 1, name);
+ dataLog("%.0f %s\n", sampleCount ? sampleCount : 1, name);
} else
- printf("%s\n", name);
+ dataLog("%s\n", name);
++indentLevel;
@@ -337,11 +338,11 @@ double ProfileNode::debugPrintDataSampleStyle(int indentLevel, FunctionCallHashC
sumOfChildrensCount *= 1000; //
// Print remainder of samples to match sample's output
if (sumOfChildrensCount < sampleCount) {
- printf(" ");
+ dataLog(" ");
while (indentLevel--)
- printf(" ");
+ dataLog(" ");
- printf("%.0f %s\n", sampleCount - sumOfChildrensCount, functionName().utf8().data());
+ dataLog("%.0f %s\n", sampleCount - sumOfChildrensCount, functionName().utf8().data());
}
return m_actualTotalTime;
diff --git a/Source/JavaScriptCore/runtime/Arguments.cpp b/Source/JavaScriptCore/runtime/Arguments.cpp
index 6a675ab84..a099adb75 100644
--- a/Source/JavaScriptCore/runtime/Arguments.cpp
+++ b/Source/JavaScriptCore/runtime/Arguments.cpp
@@ -243,6 +243,9 @@ bool Arguments::deletePropertyByIndex(JSCell* cell, ExecState* exec, unsigned i)
{
Arguments* thisObject = jsCast<Arguments*>(cell);
if (i < thisObject->d->numArguments) {
+ if (!Base::deletePropertyByIndex(cell, exec, i))
+ return false;
+
if (!thisObject->d->deletedArguments) {
thisObject->d->deletedArguments = adoptArrayPtr(new bool[thisObject->d->numArguments]);
memset(thisObject->d->deletedArguments.get(), 0, sizeof(bool) * thisObject->d->numArguments);
@@ -258,10 +261,16 @@ bool Arguments::deletePropertyByIndex(JSCell* cell, ExecState* exec, unsigned i)
bool Arguments::deleteProperty(JSCell* cell, ExecState* exec, const Identifier& propertyName)
{
+ if (exec->globalData().isInDefineOwnProperty())
+ return Base::deleteProperty(cell, exec, propertyName);
+
Arguments* thisObject = jsCast<Arguments*>(cell);
bool isArrayIndex;
unsigned i = propertyName.toArrayIndex(isArrayIndex);
if (isArrayIndex && i < thisObject->d->numArguments) {
+ if (!Base::deleteProperty(cell, exec, propertyName))
+ return false;
+
if (!thisObject->d->deletedArguments) {
thisObject->d->deletedArguments = adoptArrayPtr(new bool[thisObject->d->numArguments]);
memset(thisObject->d->deletedArguments.get(), 0, sizeof(bool) * thisObject->d->numArguments);
@@ -285,12 +294,74 @@ bool Arguments::deleteProperty(JSCell* cell, ExecState* exec, const Identifier&
thisObject->createStrictModeCalleeIfNecessary(exec);
}
- if (propertyName == exec->propertyNames().caller && !thisObject->d->isStrictMode)
+ if (propertyName == exec->propertyNames().caller && thisObject->d->isStrictMode)
thisObject->createStrictModeCallerIfNecessary(exec);
return JSObject::deleteProperty(thisObject, exec, propertyName);
}
+bool Arguments::defineOwnProperty(JSObject* object, ExecState* exec, const Identifier& propertyName, PropertyDescriptor& descriptor, bool shouldThrow)
+{
+ Arguments* thisObject = jsCast<Arguments*>(object);
+ bool isArrayIndex;
+ unsigned i = propertyName.toArrayIndex(isArrayIndex);
+ if (isArrayIndex && i < thisObject->d->numArguments) {
+ if (!Base::defineOwnProperty(object, exec, propertyName, descriptor, shouldThrow))
+ return false;
+
+ if (!thisObject->d->deletedArguments) {
+ thisObject->d->deletedArguments = adoptArrayPtr(new bool[thisObject->d->numArguments]);
+ memset(thisObject->d->deletedArguments.get(), 0, sizeof(bool) * thisObject->d->numArguments);
+ }
+ // From ES 5.1, 10.6 Arguments Object
+ // 5. If the value of isMapped is not undefined, then
+ if (!thisObject->d->deletedArguments[i]) {
+ // a. If IsAccessorDescriptor(Desc) is true, then
+ if (descriptor.isAccessorDescriptor()) {
+ // i. Call the [[Delete]] internal method of map passing P, and false as the arguments.
+ thisObject->d->deletedArguments[i] = true;
+ } else if (descriptor.value()) { // b. Else i. If Desc.[[Value]] is present, then
+ // 1. Call the [[Put]] internal method of map passing P, Desc.[[Value]], and Throw as the arguments.
+ // ii. If Desc.[[Writable]] is present and its value is false, then
+ thisObject->argument(i).set(exec->globalData(), thisObject, descriptor.value());
+ if (descriptor.writablePresent() && !descriptor.writable())
+ thisObject->d->deletedArguments[i] = true; // 1. Call the [[Delete]] internal method of map passing P and false as arguments.
+ }
+ }
+
+ return true;
+ }
+
+ if (propertyName == exec->propertyNames().length && !thisObject->d->overrodeLength) {
+ thisObject->d->overrodeLength = true;
+ if (!descriptor.isAccessorDescriptor()) {
+ if (!descriptor.value())
+ descriptor.setValue(jsNumber(thisObject->d->numArguments));
+ if (!descriptor.configurablePresent())
+ descriptor.setConfigurable(true);
+ }
+ if (!descriptor.configurablePresent())
+ descriptor.setConfigurable(true);
+ }
+
+ if (propertyName == exec->propertyNames().callee && !thisObject->d->overrodeCallee) {
+ thisObject->d->overrodeCallee = true;
+ if (!descriptor.isAccessorDescriptor()) {
+ if (!descriptor.value())
+ descriptor.setValue(thisObject->d->callee.get());
+ if (!descriptor.configurablePresent())
+ descriptor.setConfigurable(true);
+ }
+ if (!descriptor.configurablePresent())
+ descriptor.setConfigurable(true);
+ }
+
+ if (propertyName == exec->propertyNames().caller && thisObject->d->isStrictMode)
+ thisObject->createStrictModeCallerIfNecessary(exec);
+
+ return Base::defineOwnProperty(object, exec, propertyName, descriptor, shouldThrow);
+}
+
void Arguments::tearOff(CallFrame* callFrame)
{
if (isTornOff())
diff --git a/Source/JavaScriptCore/runtime/Arguments.h b/Source/JavaScriptCore/runtime/Arguments.h
index 3564fe447..ee54a49eb 100644
--- a/Source/JavaScriptCore/runtime/Arguments.h
+++ b/Source/JavaScriptCore/runtime/Arguments.h
@@ -117,6 +117,7 @@ namespace JSC {
static void putByIndex(JSCell*, ExecState*, unsigned propertyName, JSValue);
static bool deleteProperty(JSCell*, ExecState*, const Identifier& propertyName);
static bool deletePropertyByIndex(JSCell*, ExecState*, unsigned propertyName);
+ static bool defineOwnProperty(JSObject*, ExecState*, const Identifier& propertyName, PropertyDescriptor&, bool shouldThrow);
void createStrictModeCallerIfNecessary(ExecState*);
void createStrictModeCalleeIfNecessary(ExecState*);
diff --git a/Source/JavaScriptCore/runtime/CodeSpecializationKind.h b/Source/JavaScriptCore/runtime/CodeSpecializationKind.h
new file mode 100644
index 000000000..ba2a54f37
--- /dev/null
+++ b/Source/JavaScriptCore/runtime/CodeSpecializationKind.h
@@ -0,0 +1,36 @@
+/*
+ * Copyright (C) 2012 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef CodeSpecializationKind_h
+#define CodeSpecializationKind_h
+
+namespace JSC {
+
+enum CodeSpecializationKind { CodeForCall, CodeForConstruct };
+
+} // namespace JSC
+
+#endif // CodeSpecializationKind_h
+
diff --git a/Source/JavaScriptCore/runtime/CommonIdentifiers.h b/Source/JavaScriptCore/runtime/CommonIdentifiers.h
index 08d8644b5..d79e5c783 100644
--- a/Source/JavaScriptCore/runtime/CommonIdentifiers.h
+++ b/Source/JavaScriptCore/runtime/CommonIdentifiers.h
@@ -62,6 +62,7 @@
macro(prototype) \
macro(set) \
macro(source) \
+ macro(stack) \
macro(test) \
macro(toExponential) \
macro(toFixed) \
diff --git a/Source/JavaScriptCore/runtime/CommonSlowPaths.h b/Source/JavaScriptCore/runtime/CommonSlowPaths.h
index 86c4bd5c2..345af2ebe 100644
--- a/Source/JavaScriptCore/runtime/CommonSlowPaths.h
+++ b/Source/JavaScriptCore/runtime/CommonSlowPaths.h
@@ -27,6 +27,7 @@
#define CommonSlowPaths_h
#include "CodeBlock.h"
+#include "CodeSpecializationKind.h"
#include "ExceptionHelpers.h"
#include "JSArray.h"
@@ -41,6 +42,38 @@ namespace JSC {
namespace CommonSlowPaths {
+ALWAYS_INLINE ExecState* arityCheckFor(ExecState* exec, RegisterFile* registerFile, CodeSpecializationKind kind)
+{
+ JSFunction* callee = asFunction(exec->callee());
+ ASSERT(!callee->isHostFunction());
+ CodeBlock* newCodeBlock = &callee->jsExecutable()->generatedBytecodeFor(kind);
+ int argumentCountIncludingThis = exec->argumentCountIncludingThis();
+
+ // This ensures enough space for the worst case scenario of zero arguments passed by the caller.
+ if (!registerFile->grow(exec->registers() + newCodeBlock->numParameters() + newCodeBlock->m_numCalleeRegisters))
+ return 0;
+
+ ASSERT(argumentCountIncludingThis < newCodeBlock->numParameters());
+
+ // Too few arguments -- copy call frame and arguments, then fill in missing arguments with undefined.
+ size_t delta = newCodeBlock->numParameters() - argumentCountIncludingThis;
+ Register* src = exec->registers();
+ Register* dst = exec->registers() + delta;
+
+ int i;
+ int end = -ExecState::offsetFor(argumentCountIncludingThis);
+ for (i = -1; i >= end; --i)
+ dst[i] = src[i];
+
+ end -= delta;
+ for ( ; i >= end; --i)
+ dst[i] = jsUndefined();
+
+ ExecState* newExec = ExecState::create(dst);
+ ASSERT((void*)newExec <= registerFile->end());
+ return newExec;
+}
+
ALWAYS_INLINE bool opInstanceOfSlow(ExecState* exec, JSValue value, JSValue baseVal, JSValue proto)
{
ASSERT(!value.isCell() || !baseVal.isCell() || !proto.isCell()
diff --git a/Source/JavaScriptCore/runtime/DatePrototype.cpp b/Source/JavaScriptCore/runtime/DatePrototype.cpp
index 922fb0a86..ddea33786 100644
--- a/Source/JavaScriptCore/runtime/DatePrototype.cpp
+++ b/Source/JavaScriptCore/runtime/DatePrototype.cpp
@@ -60,7 +60,7 @@
#include <sys/timeb.h>
#endif
-#if PLATFORM(MAC) || PLATFORM(IOS) || PLATFORM(WX) || (PLATFORM(QT) && OS(DARWIN))
+#if PLATFORM(MAC) || PLATFORM(IOS) || (PLATFORM(WX) && OS(DARWIN)) || (PLATFORM(QT) && OS(DARWIN))
#include <CoreFoundation/CoreFoundation.h>
#elif USE(ICU_UNICODE)
#include <unicode/udat.h>
@@ -130,7 +130,7 @@ namespace JSC {
enum LocaleDateTimeFormat { LocaleDateAndTime, LocaleDate, LocaleTime };
-#if PLATFORM(MAC) || PLATFORM(IOS) || PLATFORM(WX) || (PLATFORM(QT) && OS(DARWIN))
+#if PLATFORM(MAC) || PLATFORM(IOS) || (PLATFORM(WX) && OS(DARWIN)) || (PLATFORM(QT) && OS(DARWIN))
// FIXME: Since this is superior to the strftime-based version, why limit this to PLATFORM(MAC)?
// Instead we should consider using this whenever USE(CF) is true.
diff --git a/Source/JavaScriptCore/runtime/Error.cpp b/Source/JavaScriptCore/runtime/Error.cpp
index 0947e3c20..243dc8856 100644
--- a/Source/JavaScriptCore/runtime/Error.cpp
+++ b/Source/JavaScriptCore/runtime/Error.cpp
@@ -26,7 +26,9 @@
#include "ConstructData.h"
#include "ErrorConstructor.h"
+#include "ExceptionHelpers.h"
#include "FunctionPrototype.h"
+#include "JSArray.h"
#include "JSFunction.h"
#include "JSGlobalObject.h"
#include "JSObject.h"
@@ -116,7 +118,7 @@ JSObject* createURIError(ExecState* exec, const UString& message)
return createURIError(exec->lexicalGlobalObject(), message);
}
-JSObject* addErrorInfo(JSGlobalData* globalData, JSObject* error, int line, const SourceCode& source)
+JSObject* addErrorInfo(JSGlobalData* globalData, JSObject* error, int line, const SourceCode& source, const Vector<StackFrame>& stackTrace)
{
const UString& sourceURL = source.provider()->url();
@@ -124,13 +126,34 @@ JSObject* addErrorInfo(JSGlobalData* globalData, JSObject* error, int line, cons
error->putDirect(*globalData, Identifier(globalData, linePropertyName), jsNumber(line), ReadOnly | DontDelete);
if (!sourceURL.isNull())
error->putDirect(*globalData, Identifier(globalData, sourceURLPropertyName), jsString(globalData, sourceURL), ReadOnly | DontDelete);
+ if (!stackTrace.isEmpty()) {
+ JSGlobalObject* globalObject = 0;
+ if (isTerminatedExecutionException(error) || isInterruptedExecutionException(error))
+ globalObject = globalData->dynamicGlobalObject;
+ else
+ globalObject = error->globalObject();
+ // We use the tryCreateUninitialized creation mechanism and related initialization
+ // functions as they're the only mechanism we currently have that will guarantee we
+ // don't call setters on the prototype. Technically it's faster than the alternative,
+ // but the numerous allocations that take place in this loop makes that last bit
+ // somewhat moot.
+ JSArray* stackTraceArray = JSArray::tryCreateUninitialized(*globalData, globalObject->arrayStructure(), stackTrace.size());
+ if (!stackTraceArray)
+ return error;
+ for (unsigned i = 0; i < stackTrace.size(); i++) {
+ UString stackLevel = stackTrace[i].toString(globalObject->globalExec());
+ stackTraceArray->initializeIndex(*globalData, i, jsString(globalData, stackLevel));
+ }
+ stackTraceArray->completeInitialization(stackTrace.size());
+ error->putDirect(*globalData, globalData->propertyNames->stack, stackTraceArray, ReadOnly | DontDelete);
+ }
return error;
}
-JSObject* addErrorInfo(ExecState* exec, JSObject* error, int line, const SourceCode& source)
+JSObject* addErrorInfo(ExecState* exec, JSObject* error, int line, const SourceCode& source, const Vector<StackFrame>& stackTrace)
{
- return addErrorInfo(&exec->globalData(), error, line, source);
+ return addErrorInfo(&exec->globalData(), error, line, source, stackTrace);
}
bool hasErrorInfo(ExecState* exec, JSObject* error)
diff --git a/Source/JavaScriptCore/runtime/Error.h b/Source/JavaScriptCore/runtime/Error.h
index 88b540a35..59b39495f 100644
--- a/Source/JavaScriptCore/runtime/Error.h
+++ b/Source/JavaScriptCore/runtime/Error.h
@@ -24,6 +24,7 @@
#define Error_h
#include "InternalFunction.h"
+#include "Interpreter.h"
#include "JSObject.h"
#include <stdint.h>
@@ -56,9 +57,9 @@ namespace JSC {
// Methods to add
bool hasErrorInfo(ExecState*, JSObject* error);
- JSObject* addErrorInfo(JSGlobalData*, JSObject* error, int line, const SourceCode&);
+ JSObject* addErrorInfo(JSGlobalData*, JSObject* error, int line, const SourceCode&, const Vector<StackFrame>&);
// ExecState wrappers.
- JSObject* addErrorInfo(ExecState*, JSObject* error, int line, const SourceCode&);
+ JSObject* addErrorInfo(ExecState*, JSObject* error, int line, const SourceCode&, const Vector<StackFrame>&);
// Methods to throw Errors.
JS_EXPORT_PRIVATE JSValue throwError(ExecState*, JSValue);
diff --git a/Source/JavaScriptCore/runtime/Executable.cpp b/Source/JavaScriptCore/runtime/Executable.cpp
index bf49767ab..25ddf764a 100644
--- a/Source/JavaScriptCore/runtime/Executable.cpp
+++ b/Source/JavaScriptCore/runtime/Executable.cpp
@@ -29,6 +29,7 @@
#include "BytecodeGenerator.h"
#include "CodeBlock.h"
#include "DFGDriver.h"
+#include "ExecutionHarness.h"
#include "JIT.h"
#include "JITDriver.h"
#include "Parser.h"
@@ -39,10 +40,12 @@ namespace JSC {
const ClassInfo ExecutableBase::s_info = { "Executable", 0, 0, 0, CREATE_METHOD_TABLE(ExecutableBase) };
+#if ENABLE(JIT)
void ExecutableBase::destroy(JSCell* cell)
{
jsCast<ExecutableBase*>(cell)->ExecutableBase::~ExecutableBase();
}
+#endif
inline void ExecutableBase::clearCode()
{
@@ -67,10 +70,12 @@ Intrinsic ExecutableBase::intrinsic() const
const ClassInfo NativeExecutable::s_info = { "NativeExecutable", &ExecutableBase::s_info, 0, 0, CREATE_METHOD_TABLE(NativeExecutable) };
+#if ENABLE(JIT)
void NativeExecutable::destroy(JSCell* cell)
{
jsCast<NativeExecutable*>(cell)->NativeExecutable::~NativeExecutable();
}
+#endif
#if ENABLE(DFG_JIT)
Intrinsic NativeExecutable::intrinsic() const
@@ -84,7 +89,7 @@ Intrinsic NativeExecutable::intrinsic() const
template<typename T>
static void jettisonCodeBlock(JSGlobalData& globalData, OwnPtr<T>& codeBlock)
{
- ASSERT(codeBlock->getJITType() != JITCode::BaselineJIT);
+ ASSERT(JITCode::isOptimizingJIT(codeBlock->getJITType()));
ASSERT(codeBlock->alternative());
OwnPtr<T> codeBlockToJettison = codeBlock.release();
codeBlock = static_pointer_cast<T>(codeBlockToJettison->releaseAlternative());
@@ -100,10 +105,12 @@ void NativeExecutable::finalize(JSCell* cell)
const ClassInfo ScriptExecutable::s_info = { "ScriptExecutable", &ExecutableBase::s_info, 0, 0, CREATE_METHOD_TABLE(ScriptExecutable) };
+#if ENABLE(JIT)
void ScriptExecutable::destroy(JSCell* cell)
{
jsCast<ScriptExecutable*>(cell)->ScriptExecutable::~ScriptExecutable();
}
+#endif
const ClassInfo EvalExecutable::s_info = { "EvalExecutable", &ScriptExecutable::s_info, 0, 0, CREATE_METHOD_TABLE(EvalExecutable) };
@@ -169,9 +176,32 @@ JSObject* EvalExecutable::compileOptimized(ExecState* exec, ScopeChainNode* scop
return error;
}
+#if ENABLE(JIT)
+void EvalExecutable::jitCompile(JSGlobalData& globalData)
+{
+ bool result = jitCompileIfAppropriate(globalData, m_evalCodeBlock, m_jitCodeForCall, JITCode::bottomTierJIT());
+ ASSERT_UNUSED(result, result);
+}
+#endif
+
+inline const char* samplingDescription(JITCode::JITType jitType)
+{
+ switch (jitType) {
+ case JITCode::InterpreterThunk:
+ return "Interpreter Compilation (TOTAL)";
+ case JITCode::BaselineJIT:
+ return "Baseline Compilation (TOTAL)";
+ case JITCode::DFGJIT:
+ return "DFG Compilation (TOTAL)";
+ default:
+ ASSERT_NOT_REACHED();
+ return 0;
+ }
+}
+
JSObject* EvalExecutable::compileInternal(ExecState* exec, ScopeChainNode* scopeChainNode, JITCode::JITType jitType)
{
- SamplingRegion samplingRegion(jitType == JITCode::BaselineJIT ? "Baseline Compilation (TOTAL)" : "DFG Compilation (TOTAL)");
+ SamplingRegion samplingRegion(samplingDescription(jitType));
#if !ENABLE(JIT)
UNUSED_PARAM(jitType);
@@ -212,12 +242,12 @@ JSObject* EvalExecutable::compileInternal(ExecState* exec, ScopeChainNode* scope
}
#if ENABLE(JIT)
- if (!jitCompileIfAppropriate(*globalData, m_evalCodeBlock, m_jitCodeForCall, jitType))
+ if (!prepareForExecution(*globalData, m_evalCodeBlock, m_jitCodeForCall, jitType))
return 0;
#endif
#if ENABLE(JIT)
-#if ENABLE(INTERPRETER)
+#if ENABLE(CLASSIC_INTERPRETER)
if (!m_jitCodeForCall)
Heap::heap(this)->reportExtraMemoryCost(sizeof(*m_evalCodeBlock));
else
@@ -297,9 +327,17 @@ JSObject* ProgramExecutable::compileOptimized(ExecState* exec, ScopeChainNode* s
return error;
}
+#if ENABLE(JIT)
+void ProgramExecutable::jitCompile(JSGlobalData& globalData)
+{
+ bool result = jitCompileIfAppropriate(globalData, m_programCodeBlock, m_jitCodeForCall, JITCode::bottomTierJIT());
+ ASSERT_UNUSED(result, result);
+}
+#endif
+
JSObject* ProgramExecutable::compileInternal(ExecState* exec, ScopeChainNode* scopeChainNode, JITCode::JITType jitType)
{
- SamplingRegion samplingRegion(jitType == JITCode::BaselineJIT ? "Baseline Compilation (TOTAL)" : "DFG Compilation (TOTAL)");
+ SamplingRegion samplingRegion(samplingDescription(jitType));
#if !ENABLE(JIT)
UNUSED_PARAM(jitType);
@@ -338,12 +376,12 @@ JSObject* ProgramExecutable::compileInternal(ExecState* exec, ScopeChainNode* sc
}
#if ENABLE(JIT)
- if (!jitCompileIfAppropriate(*globalData, m_programCodeBlock, m_jitCodeForCall, jitType))
+ if (!prepareForExecution(*globalData, m_programCodeBlock, m_jitCodeForCall, jitType))
return 0;
#endif
#if ENABLE(JIT)
-#if ENABLE(INTERPRETER)
+#if ENABLE(CLASSIC_INTERPRETER)
if (!m_jitCodeForCall)
Heap::heap(this)->reportExtraMemoryCost(sizeof(*m_programCodeBlock));
else
@@ -414,7 +452,7 @@ FunctionCodeBlock* FunctionExecutable::baselineCodeBlockFor(CodeSpecializationKi
while (result->alternative())
result = static_cast<FunctionCodeBlock*>(result->alternative());
ASSERT(result);
- ASSERT(result->getJITType() == JITCode::BaselineJIT);
+ ASSERT(JITCode::isBaselineCode(result->getJITType()));
return result;
}
@@ -440,6 +478,20 @@ JSObject* FunctionExecutable::compileOptimizedForConstruct(ExecState* exec, Scop
return error;
}
+#if ENABLE(JIT)
+void FunctionExecutable::jitCompileForCall(JSGlobalData& globalData)
+{
+ bool result = jitCompileFunctionIfAppropriate(globalData, m_codeBlockForCall, m_jitCodeForCall, m_jitCodeForCallWithArityCheck, m_symbolTable, JITCode::bottomTierJIT());
+ ASSERT_UNUSED(result, result);
+}
+
+void FunctionExecutable::jitCompileForConstruct(JSGlobalData& globalData)
+{
+ bool result = jitCompileFunctionIfAppropriate(globalData, m_codeBlockForConstruct, m_jitCodeForConstruct, m_jitCodeForConstructWithArityCheck, m_symbolTable, JITCode::bottomTierJIT());
+ ASSERT_UNUSED(result, result);
+}
+#endif
+
FunctionCodeBlock* FunctionExecutable::codeBlockWithBytecodeFor(CodeSpecializationKind kind)
{
FunctionCodeBlock* codeBlock = baselineCodeBlockFor(kind);
@@ -484,7 +536,7 @@ PassOwnPtr<FunctionCodeBlock> FunctionExecutable::produceCodeBlockFor(ScopeChain
JSObject* FunctionExecutable::compileForCallInternal(ExecState* exec, ScopeChainNode* scopeChainNode, JITCode::JITType jitType)
{
- SamplingRegion samplingRegion(jitType == JITCode::BaselineJIT ? "Baseline Compilation (TOTAL)" : "DFG Compilation (TOTAL)");
+ SamplingRegion samplingRegion(samplingDescription(jitType));
#if !ENABLE(JIT)
UNUSED_PARAM(exec);
@@ -506,12 +558,12 @@ JSObject* FunctionExecutable::compileForCallInternal(ExecState* exec, ScopeChain
m_symbolTable = m_codeBlockForCall->sharedSymbolTable();
#if ENABLE(JIT)
- if (!jitCompileFunctionIfAppropriate(exec->globalData(), m_codeBlockForCall, m_jitCodeForCall, m_jitCodeForCallWithArityCheck, m_symbolTable, jitType))
+ if (!prepareFunctionForExecution(exec->globalData(), m_codeBlockForCall, m_jitCodeForCall, m_jitCodeForCallWithArityCheck, m_symbolTable, jitType, CodeForCall))
return 0;
#endif
#if ENABLE(JIT)
-#if ENABLE(INTERPRETER)
+#if ENABLE(CLASSIC_INTERPRETER)
if (!m_jitCodeForCall)
Heap::heap(this)->reportExtraMemoryCost(sizeof(*m_codeBlockForCall));
else
@@ -526,7 +578,7 @@ JSObject* FunctionExecutable::compileForCallInternal(ExecState* exec, ScopeChain
JSObject* FunctionExecutable::compileForConstructInternal(ExecState* exec, ScopeChainNode* scopeChainNode, JITCode::JITType jitType)
{
- SamplingRegion samplingRegion(jitType == JITCode::BaselineJIT ? "Baseline Compilation (TOTAL)" : "DFG Compilation (TOTAL)");
+ SamplingRegion samplingRegion(samplingDescription(jitType));
#if !ENABLE(JIT)
UNUSED_PARAM(jitType);
@@ -548,12 +600,12 @@ JSObject* FunctionExecutable::compileForConstructInternal(ExecState* exec, Scope
m_symbolTable = m_codeBlockForConstruct->sharedSymbolTable();
#if ENABLE(JIT)
- if (!jitCompileFunctionIfAppropriate(exec->globalData(), m_codeBlockForConstruct, m_jitCodeForConstruct, m_jitCodeForConstructWithArityCheck, m_symbolTable, jitType))
+ if (!prepareFunctionForExecution(exec->globalData(), m_codeBlockForConstruct, m_jitCodeForConstruct, m_jitCodeForConstructWithArityCheck, m_symbolTable, jitType, CodeForConstruct))
return 0;
#endif
#if ENABLE(JIT)
-#if ENABLE(INTERPRETER)
+#if ENABLE(CLASSIC_INTERPRETER)
if (!m_jitCodeForConstruct)
Heap::heap(this)->reportExtraMemoryCost(sizeof(*m_codeBlockForConstruct));
else
diff --git a/Source/JavaScriptCore/runtime/Executable.h b/Source/JavaScriptCore/runtime/Executable.h
index 6800b5a82..69e80b28e 100644
--- a/Source/JavaScriptCore/runtime/Executable.h
+++ b/Source/JavaScriptCore/runtime/Executable.h
@@ -27,6 +27,7 @@
#define Executable_h
#include "CallData.h"
+#include "CodeSpecializationKind.h"
#include "JSFunction.h"
#include "Interpreter.h"
#include "Nodes.h"
@@ -39,12 +40,12 @@ namespace JSC {
class Debugger;
class EvalCodeBlock;
class FunctionCodeBlock;
+ class LLIntOffsetsExtractor;
class ProgramCodeBlock;
class ScopeChainNode;
struct ExceptionInfo;
- enum CodeSpecializationKind { CodeForCall, CodeForConstruct };
enum CompilationKind { FirstCompilation, OptimizingCompilation };
inline bool isCall(CodeSpecializationKind kind)
@@ -77,7 +78,9 @@ namespace JSC {
public:
typedef JSCell Base;
+#if ENABLE(JIT)
static void destroy(JSCell*);
+#endif
bool isHostFunction() const
{
@@ -197,7 +200,7 @@ namespace JSC {
}
#endif
-#if ENABLE(INTERPRETER)
+#if ENABLE(CLASSIC_INTERPRETER)
static NativeExecutable* create(JSGlobalData& globalData, NativeFunction function, NativeFunction constructor)
{
ASSERT(!globalData.canUseJIT());
@@ -208,7 +211,9 @@ namespace JSC {
}
#endif
+#if ENABLE(JIT)
static void destroy(JSCell*);
+#endif
NativeFunction function() { return m_function; }
NativeFunction constructor() { return m_constructor; }
@@ -233,7 +238,7 @@ namespace JSC {
}
#endif
-#if ENABLE(INTERPRETER)
+#if ENABLE(CLASSIC_INTERPRETER)
void finishCreation(JSGlobalData& globalData)
{
ASSERT(!globalData.canUseJIT());
@@ -276,7 +281,9 @@ namespace JSC {
{
}
+#if ENABLE(JIT)
static void destroy(JSCell*);
+#endif
const SourceCode& source() { return m_source; }
intptr_t sourceID() const { return m_source.provider()->asID(); }
@@ -319,6 +326,7 @@ namespace JSC {
};
class EvalExecutable : public ScriptExecutable {
+ friend class LLIntOffsetsExtractor;
public:
typedef ScriptExecutable Base;
@@ -338,6 +346,7 @@ namespace JSC {
#if ENABLE(JIT)
void jettisonOptimizedCode(JSGlobalData&);
+ void jitCompile(JSGlobalData&);
#endif
EvalCodeBlock& generatedBytecode()
@@ -384,6 +393,7 @@ namespace JSC {
};
class ProgramExecutable : public ScriptExecutable {
+ friend class LLIntOffsetsExtractor;
public:
typedef ScriptExecutable Base;
@@ -411,6 +421,7 @@ namespace JSC {
#if ENABLE(JIT)
void jettisonOptimizedCode(JSGlobalData&);
+ void jitCompile(JSGlobalData&);
#endif
ProgramCodeBlock& generatedBytecode()
@@ -453,6 +464,7 @@ namespace JSC {
class FunctionExecutable : public ScriptExecutable {
friend class JIT;
+ friend class LLIntOffsetsExtractor;
public:
typedef ScriptExecutable Base;
@@ -508,6 +520,7 @@ namespace JSC {
#if ENABLE(JIT)
void jettisonOptimizedCodeForCall(JSGlobalData&);
+ void jitCompileForCall(JSGlobalData&);
#endif
bool isGeneratedForCall() const
@@ -535,6 +548,7 @@ namespace JSC {
#if ENABLE(JIT)
void jettisonOptimizedCodeForConstruct(JSGlobalData&);
+ void jitCompileForConstruct(JSGlobalData&);
#endif
bool isGeneratedForConstruct() const
@@ -582,6 +596,16 @@ namespace JSC {
jettisonOptimizedCodeForConstruct(globalData);
}
}
+
+ void jitCompileFor(JSGlobalData& globalData, CodeSpecializationKind kind)
+ {
+ if (kind == CodeForCall) {
+ jitCompileForCall(globalData);
+ return;
+ }
+ ASSERT(kind == CodeForConstruct);
+ jitCompileForConstruct(globalData);
+ }
#endif
bool isGeneratedFor(CodeSpecializationKind kind)
diff --git a/Source/JavaScriptCore/runtime/ExecutionHarness.h b/Source/JavaScriptCore/runtime/ExecutionHarness.h
new file mode 100644
index 000000000..774c5bf6b
--- /dev/null
+++ b/Source/JavaScriptCore/runtime/ExecutionHarness.h
@@ -0,0 +1,72 @@
+/*
+ * Copyright (C) 2012 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef ExecutionHarness_h
+#define ExecutionHarness_h
+
+#include <wtf/Platform.h>
+
+#if ENABLE(JIT)
+
+#include "JITDriver.h"
+#include "LLIntEntrypoints.h"
+
+namespace JSC {
+
+template<typename CodeBlockType>
+inline bool prepareForExecution(JSGlobalData& globalData, OwnPtr<CodeBlockType>& codeBlock, JITCode& jitCode, JITCode::JITType jitType)
+{
+#if ENABLE(LLINT)
+ if (JITCode::isBaselineCode(jitType)) {
+ // Start off in the low level interpreter.
+ LLInt::getEntrypoint(globalData, codeBlock.get(), jitCode);
+ codeBlock->setJITCode(jitCode, MacroAssemblerCodePtr());
+ return true;
+ }
+#endif // ENABLE(LLINT)
+ return jitCompileIfAppropriate(globalData, codeBlock, jitCode, jitType);
+}
+
+inline bool prepareFunctionForExecution(JSGlobalData& globalData, OwnPtr<FunctionCodeBlock>& codeBlock, JITCode& jitCode, MacroAssemblerCodePtr& jitCodeWithArityCheck, SharedSymbolTable*& symbolTable, JITCode::JITType jitType, CodeSpecializationKind kind)
+{
+#if ENABLE(LLINT)
+ if (JITCode::isBaselineCode(jitType)) {
+ // Start off in the low level interpreter.
+ LLInt::getFunctionEntrypoint(globalData, kind, jitCode, jitCodeWithArityCheck);
+ codeBlock->setJITCode(jitCode, jitCodeWithArityCheck);
+ return true;
+ }
+#else
+ UNUSED_PARAM(kind);
+#endif // ENABLE(LLINT)
+ return jitCompileFunctionIfAppropriate(globalData, codeBlock, jitCode, jitCodeWithArityCheck, symbolTable, jitType);
+}
+
+} // namespace JSC
+
+#endif // ENABLE(JIT)
+
+#endif // ExecutionHarness_h
+
diff --git a/Source/JavaScriptCore/runtime/JSActivation.h b/Source/JavaScriptCore/runtime/JSActivation.h
index c18492344..80c8aa8d0 100644
--- a/Source/JavaScriptCore/runtime/JSActivation.h
+++ b/Source/JavaScriptCore/runtime/JSActivation.h
@@ -75,6 +75,8 @@ namespace JSC {
static Structure* createStructure(JSGlobalData& globalData, JSGlobalObject* globalObject, JSValue proto) { return Structure::create(globalData, globalObject, proto, TypeInfo(ActivationObjectType, StructureFlags), &s_info); }
+ bool isValidScopedLookup(int index) { return index < m_numCapturedVars; }
+
protected:
void finishCreation(CallFrame*);
static const unsigned StructureFlags = IsEnvironmentRecord | OverridesGetOwnPropertySlot | OverridesVisitChildren | OverridesGetPropertyNames | JSVariableObject::StructureFlags;
diff --git a/Source/JavaScriptCore/runtime/JSArray.cpp b/Source/JavaScriptCore/runtime/JSArray.cpp
index c84fb5b10..71d520018 100644
--- a/Source/JavaScriptCore/runtime/JSArray.cpp
+++ b/Source/JavaScriptCore/runtime/JSArray.cpp
@@ -24,8 +24,8 @@
#include "JSArray.h"
#include "ArrayPrototype.h"
-#include "BumpSpace.h"
-#include "BumpSpaceInlineMethods.h"
+#include "CopiedSpace.h"
+#include "CopiedSpaceInlineMethods.h"
#include "CachedCall.h"
#include "Error.h"
#include "Executable.h"
@@ -42,6 +42,7 @@ using namespace WTF;
namespace JSC {
ASSERT_CLASS_FITS_IN_CELL(JSArray);
+ASSERT_HAS_TRIVIAL_DESTRUCTOR(JSArray);
// Overview of JSArray
//
@@ -160,10 +161,6 @@ void JSArray::finishCreation(JSGlobalData& globalData, unsigned initialLength)
m_storage->m_inCompactInitialization = false;
#endif
- WriteBarrier<Unknown>* vector = m_storage->m_vector;
- for (size_t i = 0; i < initialVectorLength; ++i)
- vector[i].clear();
-
checkConsistency();
}
@@ -193,10 +190,6 @@ JSArray* JSArray::tryFinishCreationUninitialized(JSGlobalData& globalData, unsig
m_storage->m_inCompactInitialization = true;
#endif
- WriteBarrier<Unknown>* vector = m_storage->m_vector;
- for (size_t i = initialLength; i < initialVectorLength; ++i)
- vector[i].clear();
-
return this;
}
@@ -222,7 +215,17 @@ inline std::pair<SparseArrayValueMap::iterator, bool> SparseArrayValueMap::add(J
inline void SparseArrayValueMap::put(ExecState* exec, JSArray* array, unsigned i, JSValue value)
{
- SparseArrayEntry& entry = add(array, i).first->second;
+ std::pair<SparseArrayValueMap::iterator, bool> result = add(array, i);
+ SparseArrayEntry& entry = result.first->second;
+
+ // To save a separate find & add, we first always add to the sparse map.
+ // In the uncommon case that this is a new property, and the array is not
+ // extensible, this is not the right thing to have done - so remove again.
+ if (result.second && !array->isExtensible()) {
+ remove(result.first);
+ // FIXME: should throw in strict mode.
+ return;
+ }
if (!(entry.attributes & Accessor)) {
if (entry.attributes & ReadOnly) {
@@ -240,7 +243,8 @@ inline void SparseArrayValueMap::put(ExecState* exec, JSArray* array, unsigned i
JSObject* setter = asGetterSetter(accessor)->setter();
if (!setter) {
- throwTypeError(exec, "setting a property that has only a getter");
+ // FIXME: should throw if being called from strict mode.
+ // throwTypeError(exec, "setting a property that has only a getter");
return;
}
@@ -383,7 +387,7 @@ void JSArray::putDescriptor(ExecState* exec, SparseArrayEntry* entryInMap, Prope
accessor->setSetter(exec->globalData(), setter);
entryInMap->set(exec->globalData(), this, accessor);
- entryInMap->attributes = descriptor.attributesOverridingCurrent(oldDescriptor) & ~DontDelete;
+ entryInMap->attributes = descriptor.attributesOverridingCurrent(oldDescriptor) & ~ReadOnly;
return;
}
@@ -464,7 +468,7 @@ bool JSArray::defineOwnNumericProperty(ExecState* exec, unsigned index, Property
// 7. If the [[Configurable]] field of current is false then
if (!current.configurable()) {
// 7.a. Reject, if the [[Configurable]] field of Desc is true.
- if (descriptor.configurablePresent() && !descriptor.configurable())
+ if (descriptor.configurablePresent() && descriptor.configurable())
return reject(exec, throwException, "Attempting to change configurable attribute of unconfigurable property.");
// 7.b. Reject, if the [[Enumerable]] field of Desc is present and the [[Enumerable]] fields of current and Desc are the Boolean negation of each other.
if (descriptor.enumerablePresent() && current.enumerable() != descriptor.enumerable())
@@ -682,7 +686,7 @@ bool JSArray::getOwnPropertyDescriptor(JSObject* object, ExecState* exec, const
{
JSArray* thisObject = jsCast<JSArray*>(object);
if (propertyName == exec->propertyNames().length) {
- descriptor.setDescriptor(jsNumber(thisObject->length()), DontDelete | DontEnum);
+ descriptor.setDescriptor(jsNumber(thisObject->length()), thisObject->isLengthWritable() ? DontDelete | DontEnum : DontDelete | DontEnum | ReadOnly);
return true;
}
@@ -784,6 +788,9 @@ NEVER_INLINE void JSArray::putByIndexBeyondVectorLength(ExecState* exec, unsigne
// First, handle cases where we don't currently have a sparse map.
if (LIKELY(!map)) {
+ // If the array is not extensible, we should have entered dictionary mode, and created the spare map.
+ ASSERT(isExtensible());
+
// Update m_length if necessary.
if (i >= storage->m_length)
storage->m_length = i + 1;
@@ -807,7 +814,7 @@ NEVER_INLINE void JSArray::putByIndexBeyondVectorLength(ExecState* exec, unsigne
unsigned length = storage->m_length;
if (i >= length) {
// Prohibit growing the array if length is not writable.
- if (map->lengthIsReadOnly()) {
+ if (map->lengthIsReadOnly() || !isExtensible()) {
// FIXME: should throw in strict mode.
return;
}
@@ -977,10 +984,6 @@ bool JSArray::increaseVectorLength(JSGlobalData& globalData, unsigned newLength)
m_storage->m_allocBase = newStorage;
ASSERT(m_storage->m_allocBase);
- WriteBarrier<Unknown>* vector = storage->m_vector;
- for (unsigned i = vectorLength; i < newVectorLength; ++i)
- vector[i].clear();
-
m_vectorLength = newVectorLength;
return true;
@@ -1000,10 +1003,8 @@ bool JSArray::increaseVectorLength(JSGlobalData& globalData, unsigned newLength)
m_indexBias = newIndexBias;
m_storage = reinterpret_cast_ptr<ArrayStorage*>(reinterpret_cast<WriteBarrier<Unknown>*>(newAllocBase) + m_indexBias);
- // Copy the ArrayStorage header & current contents of the vector, clear the new post-capacity.
+ // Copy the ArrayStorage header & current contents of the vector.
memmove(m_storage, storage, storageSize(vectorLength));
- for (unsigned i = vectorLength; i < m_vectorLength; ++i)
- m_storage->m_vector[i].clear();
// Free the old allocation, update m_allocBase.
m_storage->m_allocBase = newAllocBase;
@@ -1086,13 +1087,6 @@ bool JSArray::unshiftCountSlowCase(JSGlobalData& globalData, unsigned count)
if (newAllocBase != m_storage->m_allocBase) {
// Free the old allocation, update m_allocBase.
m_storage->m_allocBase = newAllocBase;
-
- // We need to clear any entries in the vector beyond length. We only need to
- // do this if this was a new allocation, because if we're using an existing
- // allocation the post-capacity will already be cleared, and in an existing
- // allocation we can only beshrinking the amount of post capacity.
- for (unsigned i = requiredVectorLength; i < m_vectorLength; ++i)
- m_storage->m_vector[i].clear();
}
return true;
@@ -1169,7 +1163,6 @@ bool JSArray::setLength(ExecState* exec, unsigned newLength, bool throwException
JSValue JSArray::pop(ExecState* exec)
{
checkConsistency();
-
ArrayStorage* storage = m_storage;
unsigned length = storage->m_length;
@@ -1179,47 +1172,32 @@ JSValue JSArray::pop(ExecState* exec)
return jsUndefined();
}
- --length;
-
- JSValue result;
-
- if (length < m_vectorLength) {
- WriteBarrier<Unknown>& valueSlot = storage->m_vector[length];
+ unsigned index = length - 1;
+ if (index < m_vectorLength) {
+ WriteBarrier<Unknown>& valueSlot = storage->m_vector[index];
if (valueSlot) {
--storage->m_numValuesInVector;
- result = valueSlot.get();
+ JSValue element = valueSlot.get();
valueSlot.clear();
- } else
- result = jsUndefined();
- } else {
- result = jsUndefined();
- if (SparseArrayValueMap* map = m_sparseValueMap) {
- SparseArrayValueMap::iterator it = map->find(length);
- if (it != map->notFound()) {
- unsigned attributes = it->second.attributes;
-
- result = it->second.get(exec, this);
- if (exec->hadException())
- return jsUndefined();
-
- if (attributes & DontDelete) {
- throwError(exec, createTypeError(exec, "Unable to delete property."));
- checkConsistency();
- return result;
- }
-
- map->remove(it);
- if (map->isEmpty() && !map->sparseMode())
- deallocateSparseMap();
- }
+
+ ASSERT(isLengthWritable());
+ storage->m_length = index;
+ checkConsistency();
+ return element;
}
}
- storage->m_length = length;
-
+ // Let element be the result of calling the [[Get]] internal method of O with argument indx.
+ JSValue element = get(exec, index);
+ if (exec->hadException())
+ return jsUndefined();
+ // Call the [[Delete]] internal method of O with arguments indx and true.
+ deletePropertyByIndex(this, exec, index);
+ // Call the [[Put]] internal method of O with arguments "length", indx, and true.
+ setLength(exec, index, true);
+ // Return element.
checkConsistency();
-
- return result;
+ return element;
}
// Push & putIndex are almost identical, with two small differences.
diff --git a/Source/JavaScriptCore/runtime/JSArray.h b/Source/JavaScriptCore/runtime/JSArray.h
index a3354c602..3bb4c6320 100644
--- a/Source/JavaScriptCore/runtime/JSArray.h
+++ b/Source/JavaScriptCore/runtime/JSArray.h
@@ -28,6 +28,7 @@
namespace JSC {
class JSArray;
+ class LLIntOffsetsExtractor;
struct SparseArrayEntry : public WriteBarrier<Unknown> {
typedef WriteBarrier<Unknown> Base;
@@ -116,12 +117,15 @@ namespace JSC {
unsigned m_numValuesInVector;
void* m_allocBase; // Pointer to base address returned by malloc(). Keeping this pointer does eliminate false positives from the leak detector.
#if CHECK_ARRAY_CONSISTENCY
- bool m_inCompactInitialization;
+ uintptr_t m_inCompactInitialization; // Needs to be a uintptr_t for alignment purposes.
+#else
+ uintptr_t m_padding;
#endif
WriteBarrier<Unknown> m_vector[1];
};
class JSArray : public JSNonFinalObject {
+ friend class LLIntOffsetsExtractor;
friend class Walker;
protected:
@@ -135,23 +139,14 @@ namespace JSC {
static void finalize(JSCell*);
- static JSArray* create(JSGlobalData& globalData, Structure* structure, unsigned initialLength = 0)
- {
- JSArray* array = new (NotNull, allocateCell<JSArray>(globalData.heap)) JSArray(globalData, structure);
- array->finishCreation(globalData, initialLength);
- return array;
- }
+ static JSArray* create(JSGlobalData&, Structure*, unsigned initialLength = 0);
// tryCreateUninitialized is used for fast construction of arrays whose size and
// contents are known at time of creation. Clients of this interface must:
// - null-check the result (indicating out of memory, or otherwise unable to allocate vector).
// - call 'initializeIndex' for all properties in sequence, for 0 <= i < initialLength.
// - called 'completeInitialization' after all properties have been initialized.
- static JSArray* tryCreateUninitialized(JSGlobalData& globalData, Structure* structure, unsigned initialLength)
- {
- JSArray* array = new (NotNull, allocateCell<JSArray>(globalData.heap)) JSArray(globalData, structure);
- return array->tryFinishCreationUninitialized(globalData, initialLength);
- }
+ static JSArray* tryCreateUninitialized(JSGlobalData&, Structure*, unsigned initialLength);
JS_EXPORT_PRIVATE static bool defineOwnProperty(JSObject*, ExecState*, const Identifier&, PropertyDescriptor&, bool throwException);
@@ -253,6 +248,8 @@ namespace JSC {
JS_EXPORT_PRIVATE static void visitChildren(JSCell*, SlotVisitor&);
+ void enterDictionaryMode(JSGlobalData&);
+
protected:
static const unsigned StructureFlags = OverridesGetOwnPropertySlot | OverridesVisitChildren | OverridesGetPropertyNames | JSObject::StructureFlags;
static void put(JSCell*, ExecState*, const Identifier& propertyName, JSValue, PutPropertySlot&);
@@ -274,7 +271,6 @@ namespace JSC {
void setLengthWritable(ExecState*, bool writable);
void putDescriptor(ExecState*, SparseArrayEntry*, PropertyDescriptor&, PropertyDescriptor& old);
bool defineOwnNumericProperty(ExecState*, unsigned, PropertyDescriptor&, bool throwException);
- void enterDictionaryMode(JSGlobalData&);
void allocateSparseMap(JSGlobalData&);
void deallocateSparseMap();
@@ -299,6 +295,19 @@ namespace JSC {
void* m_subclassData; // A JSArray subclass can use this to fill the vector lazily.
};
+ inline JSArray* JSArray::create(JSGlobalData& globalData, Structure* structure, unsigned initialLength)
+ {
+ JSArray* array = new (NotNull, allocateCell<JSArray>(globalData.heap)) JSArray(globalData, structure);
+ array->finishCreation(globalData, initialLength);
+ return array;
+ }
+
+ inline JSArray* JSArray::tryCreateUninitialized(JSGlobalData& globalData, Structure* structure, unsigned initialLength)
+ {
+ JSArray* array = new (NotNull, allocateCell<JSArray>(globalData.heap)) JSArray(globalData, structure);
+ return array->tryFinishCreationUninitialized(globalData, initialLength);
+ }
+
JSArray* asArray(JSValue);
inline JSArray* asArray(JSCell* cell)
diff --git a/Source/JavaScriptCore/runtime/JSCell.h b/Source/JavaScriptCore/runtime/JSCell.h
index 74833d12f..78d2d0801 100644
--- a/Source/JavaScriptCore/runtime/JSCell.h
+++ b/Source/JavaScriptCore/runtime/JSCell.h
@@ -36,9 +36,10 @@
namespace JSC {
class JSGlobalObject;
- class Structure;
+ class LLIntOffsetsExtractor;
class PropertyDescriptor;
class PropertyNameArray;
+ class Structure;
enum EnumerationMode {
ExcludeDontEnumProperties,
@@ -61,6 +62,7 @@ namespace JSC {
class JSCell {
friend class JSValue;
friend class MarkedBlock;
+ template<typename T> friend void* allocateCell(Heap&);
public:
enum CreatingEarlyCellTag { CreatingEarlyCell };
@@ -162,6 +164,8 @@ namespace JSC {
static bool getOwnPropertyDescriptor(JSObject*, ExecState*, const Identifier&, PropertyDescriptor&);
private:
+ friend class LLIntOffsetsExtractor;
+
const ClassInfo* m_classInfo;
WriteBarrier<Structure> m_structure;
};
@@ -307,14 +311,34 @@ namespace JSC {
return isCell() ? asCell()->toObject(exec, globalObject) : toObjectSlowCase(exec, globalObject);
}
- template <typename T> void* allocateCell(Heap& heap)
+#if COMPILER(CLANG)
+ template<class T>
+ struct NeedsDestructor {
+ static const bool value = !__has_trivial_destructor(T);
+ };
+#else
+ // Write manual specializations for this struct template if you care about non-clang compilers.
+ template<class T>
+ struct NeedsDestructor {
+ static const bool value = true;
+ };
+#endif
+
+ template<typename T>
+ void* allocateCell(Heap& heap)
{
#if ENABLE(GC_VALIDATION)
ASSERT(sizeof(T) == T::s_info.cellSize);
ASSERT(!heap.globalData()->isInitializingObject());
heap.globalData()->setInitializingObject(true);
#endif
- JSCell* result = static_cast<JSCell*>(heap.allocate(sizeof(T)));
+ JSCell* result = 0;
+ if (NeedsDestructor<T>::value)
+ result = static_cast<JSCell*>(heap.allocateWithDestructor(sizeof(T)));
+ else {
+ ASSERT(T::s_info.methodTable.destroy == JSCell::destroy);
+ result = static_cast<JSCell*>(heap.allocateWithoutDestructor(sizeof(T)));
+ }
result->clearStructure();
return result;
}
diff --git a/Source/JavaScriptCore/runtime/JSFunction.cpp b/Source/JavaScriptCore/runtime/JSFunction.cpp
index 72e1ce14f..253128279 100644
--- a/Source/JavaScriptCore/runtime/JSFunction.cpp
+++ b/Source/JavaScriptCore/runtime/JSFunction.cpp
@@ -50,6 +50,7 @@ EncodedJSValue JSC_HOST_CALL callHostFunctionAsConstructor(ExecState* exec)
}
ASSERT_CLASS_FITS_IN_CELL(JSFunction);
+ASSERT_HAS_TRIVIAL_DESTRUCTOR(JSFunction);
const ClassInfo JSFunction::s_info = { "Function", &Base::s_info, 0, 0, CREATE_METHOD_TABLE(JSFunction) };
@@ -108,13 +109,6 @@ void JSFunction::finishCreation(ExecState* exec, FunctionExecutable* executable,
putDirectOffset(exec->globalData(), scopeChainNode->globalObject->functionNameOffset(), executable->nameValue());
}
-void JSFunction::destroy(JSCell* cell)
-{
- JSFunction* thisObject = jsCast<JSFunction*>(cell);
- ASSERT(thisObject->classInfo()->isSubClassOf(&JSFunction::s_info));
- thisObject->JSFunction::~JSFunction();
-}
-
const UString& JSFunction::name(ExecState* exec)
{
return asString(getDirect(exec->globalData(), exec->globalData().propertyNames->name))->tryGetValue();
diff --git a/Source/JavaScriptCore/runtime/JSFunction.h b/Source/JavaScriptCore/runtime/JSFunction.h
index a12b079d7..6e8557f59 100644
--- a/Source/JavaScriptCore/runtime/JSFunction.h
+++ b/Source/JavaScriptCore/runtime/JSFunction.h
@@ -33,6 +33,7 @@ namespace JSC {
class FunctionPrototype;
class JSActivation;
class JSGlobalObject;
+ class LLIntOffsetsExtractor;
class NativeExecutable;
class SourceCode;
namespace DFG {
@@ -64,8 +65,6 @@ namespace JSC {
return function;
}
- static void destroy(JSCell*);
-
JS_EXPORT_PRIVATE const UString& name(ExecState*);
JS_EXPORT_PRIVATE const UString displayName(ExecState*);
const UString calculatedDisplayName(ExecState*);
@@ -142,6 +141,8 @@ namespace JSC {
static void visitChildren(JSCell*, SlotVisitor&);
private:
+ friend class LLIntOffsetsExtractor;
+
JS_EXPORT_PRIVATE bool isHostFunctionNonInline() const;
static JSValue argumentsGetter(ExecState*, JSValue, const Identifier&);
diff --git a/Source/JavaScriptCore/runtime/JSGlobalData.cpp b/Source/JavaScriptCore/runtime/JSGlobalData.cpp
index bbe520a1e..2bdc28ab7 100644
--- a/Source/JavaScriptCore/runtime/JSGlobalData.cpp
+++ b/Source/JavaScriptCore/runtime/JSGlobalData.cpp
@@ -35,6 +35,7 @@
#include "DebuggerActivation.h"
#include "FunctionConstructor.h"
#include "GetterSetter.h"
+#include "HostCallReturnValue.h"
#include "Interpreter.h"
#include "JSActivation.h"
#include "JSAPIValueWrapper.h"
@@ -141,6 +142,8 @@ JSGlobalData::JSGlobalData(GlobalDataType globalDataType, ThreadStackType thread
, keywords(adoptPtr(new Keywords(this)))
, interpreter(0)
, heap(this, heapSize)
+ , jsArrayClassInfo(&JSArray::s_info)
+ , jsFinalObjectClassInfo(&JSFinalObject::s_info)
#if ENABLE(DFG_JIT)
, sizeOfLastScratchBuffer(0)
#endif
@@ -160,6 +163,7 @@ JSGlobalData::JSGlobalData(GlobalDataType globalDataType, ThreadStackType thread
#if ENABLE(GC_VALIDATION)
, m_isInitializingObject(false)
#endif
+ , m_inDefineOwnProperty(false)
{
interpreter = new Interpreter;
@@ -189,7 +193,7 @@ JSGlobalData::JSGlobalData(GlobalDataType globalDataType, ThreadStackType thread
wtfThreadData().setCurrentIdentifierTable(existingEntryIdentifierTable);
-#if ENABLE(JIT) && ENABLE(INTERPRETER)
+#if ENABLE(JIT) && ENABLE(CLASSIC_INTERPRETER)
#if USE(CF)
CFStringRef canUseJITKey = CFStringCreateWithCString(0 , "JavaScriptCoreUseJIT", kCFStringEncodingMacRoman);
CFBooleanRef canUseJIT = (CFBooleanRef)CFPreferencesCopyAppValue(canUseJITKey, kCFPreferencesCurrentApplication);
@@ -209,16 +213,20 @@ JSGlobalData::JSGlobalData(GlobalDataType globalDataType, ThreadStackType thread
#endif
#endif
#if ENABLE(JIT)
-#if ENABLE(INTERPRETER)
+#if ENABLE(CLASSIC_INTERPRETER)
if (m_canUseJIT)
m_canUseJIT = executableAllocator.isValid();
#endif
jitStubs = adoptPtr(new JITThunks(this));
#endif
- interpreter->initialize(this->canUseJIT());
+ interpreter->initialize(&llintData, this->canUseJIT());
+
+ initializeHostCallReturnValue(); // This is needed to convince the linker not to drop host call return support.
heap.notifyIsSafeToCollect();
+
+ llintData.performAssertions(*this);
}
void JSGlobalData::clearBuiltinStructures()
@@ -383,7 +391,7 @@ static ThunkGenerator thunkGeneratorForIntrinsic(Intrinsic intrinsic)
NativeExecutable* JSGlobalData::getHostFunction(NativeFunction function, NativeFunction constructor)
{
-#if ENABLE(INTERPRETER)
+#if ENABLE(CLASSIC_INTERPRETER)
if (!canUseJIT())
return NativeExecutable::create(*this, function, constructor);
#endif
@@ -502,17 +510,17 @@ void JSGlobalData::dumpRegExpTrace()
RTTraceList::iterator iter = ++m_rtTraceList->begin();
if (iter != m_rtTraceList->end()) {
- printf("\nRegExp Tracing\n");
- printf(" match() matches\n");
- printf("Regular Expression JIT Address calls found\n");
- printf("----------------------------------------+----------------+----------+----------\n");
+ dataLog("\nRegExp Tracing\n");
+ dataLog(" match() matches\n");
+ dataLog("Regular Expression JIT Address calls found\n");
+ dataLog("----------------------------------------+----------------+----------+----------\n");
unsigned reCount = 0;
for (; iter != m_rtTraceList->end(); ++iter, ++reCount)
(*iter)->printTraceData();
- printf("%d Regular Expressions\n", reCount);
+ dataLog("%d Regular Expressions\n", reCount);
}
m_rtTraceList->clear();
diff --git a/Source/JavaScriptCore/runtime/JSGlobalData.h b/Source/JavaScriptCore/runtime/JSGlobalData.h
index 92817f2a2..7e54c00db 100644
--- a/Source/JavaScriptCore/runtime/JSGlobalData.h
+++ b/Source/JavaScriptCore/runtime/JSGlobalData.h
@@ -30,15 +30,16 @@
#define JSGlobalData_h
#include "CachedTranscendentalFunction.h"
-#include "Intrinsic.h"
#include "DateInstanceCache.h"
#include "ExecutableAllocator.h"
#include "Heap.h"
-#include "Strong.h"
+#include "Intrinsic.h"
#include "JITStubs.h"
#include "JSValue.h"
+#include "LLIntData.h"
#include "NumericStrings.h"
#include "SmallStrings.h"
+#include "Strong.h"
#include "Terminator.h"
#include "TimeoutChecker.h"
#include "WeakRandom.h"
@@ -65,6 +66,7 @@ namespace JSC {
class JSGlobalObject;
class JSObject;
class Keywords;
+ class LLIntOffsetsExtractor;
class NativeExecutable;
class ParserArena;
class RegExpCache;
@@ -211,13 +213,23 @@ namespace JSC {
codeBlocksBeingCompiled.removeLast();
}
+ void setInDefineOwnProperty(bool inDefineOwnProperty)
+ {
+ m_inDefineOwnProperty = inDefineOwnProperty;
+ }
+
+ bool isInDefineOwnProperty()
+ {
+ return m_inDefineOwnProperty;
+ }
+
#if ENABLE(ASSEMBLER)
ExecutableAllocator executableAllocator;
#endif
#if !ENABLE(JIT)
bool canUseJIT() { return false; } // interpreter only
-#elif !ENABLE(INTERPRETER)
+#elif !ENABLE(CLASSIC_INTERPRETER)
bool canUseJIT() { return true; } // jit only
#else
bool canUseJIT() { return m_canUseJIT; }
@@ -241,7 +253,12 @@ namespace JSC {
Heap heap;
JSValue exception;
-#if ENABLE(JIT)
+
+ const ClassInfo* const jsArrayClassInfo;
+ const ClassInfo* const jsFinalObjectClassInfo;
+
+ LLInt::Data llintData;
+
ReturnAddressPtr exceptionLocation;
JSValue hostCallReturnValue;
CallFrame* callFrameForThrow;
@@ -271,7 +288,6 @@ namespace JSC {
return scratchBuffers.last();
}
#endif
-#endif
HashMap<OpaqueJSClass*, OwnPtr<OpaqueJSClassContextData> > opaqueJSClassData;
@@ -332,7 +348,7 @@ namespace JSC {
ASSERT(!m_##type##ArrayDescriptor.m_classInfo || m_##type##ArrayDescriptor.m_classInfo == descriptor.m_classInfo); \
m_##type##ArrayDescriptor = descriptor; \
} \
- const TypedArrayDescriptor& type##ArrayDescriptor() const { return m_##type##ArrayDescriptor; }
+ const TypedArrayDescriptor& type##ArrayDescriptor() const { ASSERT(m_##type##ArrayDescriptor.m_classInfo); return m_##type##ArrayDescriptor; }
registerTypedArrayFunction(int8, Int8);
registerTypedArrayFunction(int16, Int16);
@@ -346,15 +362,19 @@ namespace JSC {
#undef registerTypedArrayFunction
private:
+ friend class LLIntOffsetsExtractor;
+
JSGlobalData(GlobalDataType, ThreadStackType, HeapSize);
static JSGlobalData*& sharedInstanceInternal();
void createNativeThunk();
-#if ENABLE(JIT) && ENABLE(INTERPRETER)
+#if ENABLE(JIT) && ENABLE(CLASSIC_INTERPRETER)
bool m_canUseJIT;
#endif
#if ENABLE(GC_VALIDATION)
bool m_isInitializingObject;
#endif
+ bool m_inDefineOwnProperty;
+
TypedArrayDescriptor m_int8ArrayDescriptor;
TypedArrayDescriptor m_int16ArrayDescriptor;
TypedArrayDescriptor m_int32ArrayDescriptor;
diff --git a/Source/JavaScriptCore/runtime/JSGlobalObject.cpp b/Source/JavaScriptCore/runtime/JSGlobalObject.cpp
index e648fbe21..8d3975848 100644
--- a/Source/JavaScriptCore/runtime/JSGlobalObject.cpp
+++ b/Source/JavaScriptCore/runtime/JSGlobalObject.cpp
@@ -78,7 +78,7 @@ namespace JSC {
const ClassInfo JSGlobalObject::s_info = { "GlobalObject", &JSVariableObject::s_info, 0, ExecState::globalObjectTable, CREATE_METHOD_TABLE(JSGlobalObject) };
-const GlobalObjectMethodTable JSGlobalObject::s_globalObjectMethodTable = { &supportsProfiling, &supportsRichSourceInfo, &shouldInterruptScript };
+const GlobalObjectMethodTable JSGlobalObject::s_globalObjectMethodTable = { &allowsAccessFrom, &supportsProfiling, &supportsRichSourceInfo, &shouldInterruptScript };
/* Source for JSGlobalObject.lut.h
@begin globalObjectTable
@@ -205,6 +205,10 @@ void JSGlobalObject::reset(JSValue prototype)
m_callFunction.set(exec->globalData(), this, callFunction);
m_applyFunction.set(exec->globalData(), this, applyFunction);
m_objectPrototype.set(exec->globalData(), this, ObjectPrototype::create(exec, this, ObjectPrototype::createStructure(exec->globalData(), this, jsNull())));
+ GetterSetter* protoAccessor = GetterSetter::create(exec);
+ protoAccessor->setGetter(exec->globalData(), JSFunction::create(exec, this, 0, Identifier(), globalFuncProtoGetter));
+ protoAccessor->setSetter(exec->globalData(), JSFunction::create(exec, this, 0, Identifier(), globalFuncProtoSetter));
+ m_objectPrototype->putDirectAccessor(exec->globalData(), exec->propertyNames().underscoreProto, protoAccessor, Accessor | DontEnum);
m_functionPrototype->structure()->setPrototypeWithoutTransition(exec->globalData(), m_objectPrototype.get());
m_emptyObjectStructure.set(exec->globalData(), this, m_objectPrototype->inheritorID(exec->globalData()));
diff --git a/Source/JavaScriptCore/runtime/JSGlobalObject.h b/Source/JavaScriptCore/runtime/JSGlobalObject.h
index b67ccb764..cbc436e1a 100644
--- a/Source/JavaScriptCore/runtime/JSGlobalObject.h
+++ b/Source/JavaScriptCore/runtime/JSGlobalObject.h
@@ -44,6 +44,7 @@ namespace JSC {
class FunctionPrototype;
class GetterSetter;
class GlobalCodeBlock;
+ class LLIntOffsetsExtractor;
class NativeErrorConstructor;
class ProgramCodeBlock;
class RegExpConstructor;
@@ -56,6 +57,9 @@ namespace JSC {
typedef Vector<ExecState*, 16> ExecStateStack;
struct GlobalObjectMethodTable {
+ typedef bool (*AllowsAccessFromFunctionPtr)(const JSGlobalObject*, ExecState*);
+ AllowsAccessFromFunctionPtr allowsAccessFrom;
+
typedef bool (*SupportsProfilingFunctionPtr)(const JSGlobalObject*);
SupportsProfilingFunctionPtr supportsProfiling;
@@ -279,6 +283,7 @@ namespace JSC {
const GlobalObjectMethodTable* globalObjectMethodTable() const { return m_globalObjectMethodTable; }
+ static bool allowsAccessFrom(const JSGlobalObject*, ExecState*) { return true; }
static bool supportsProfiling(const JSGlobalObject*) { return false; }
static bool supportsRichSourceInfo(const JSGlobalObject*) { return true; }
@@ -336,6 +341,8 @@ namespace JSC {
JS_EXPORT_PRIVATE void addStaticGlobals(GlobalPropertyInfo*, int count);
private:
+ friend class LLIntOffsetsExtractor;
+
// FIXME: Fold reset into init.
JS_EXPORT_PRIVATE void init(JSObject* thisValue);
void reset(JSValue prototype);
diff --git a/Source/JavaScriptCore/runtime/JSGlobalObjectFunctions.cpp b/Source/JavaScriptCore/runtime/JSGlobalObjectFunctions.cpp
index b82ab62ab..db8ee1d85 100644
--- a/Source/JavaScriptCore/runtime/JSGlobalObjectFunctions.cpp
+++ b/Source/JavaScriptCore/runtime/JSGlobalObjectFunctions.cpp
@@ -297,7 +297,7 @@ static double parseInt(const UString& s, const CharType* data, int radix)
}
if (number >= mantissaOverflowLowerBound) {
if (radix == 10)
- number = WTF::strtod(s.substringSharingImpl(firstDigitPosition, p - firstDigitPosition).utf8().data(), 0);
+ number = WTF::strtod<WTF::AllowTrailingJunk>(s.substringSharingImpl(firstDigitPosition, p - firstDigitPosition).utf8().data(), 0);
else if (radix == 2 || radix == 4 || radix == 8 || radix == 16 || radix == 32)
number = parseIntOverflow(s.substringSharingImpl(firstDigitPosition, p - firstDigitPosition).utf8().data(), p - firstDigitPosition, radix);
}
@@ -369,7 +369,7 @@ static double jsStrDecimalLiteral(const CharType*& data, const CharType* end)
}
byteBuffer.append(0);
char* endOfNumber;
- double number = WTF::strtod(byteBuffer.data(), &endOfNumber);
+ double number = WTF::strtod<WTF::AllowTrailingJunk>(byteBuffer.data(), &endOfNumber);
// Check if strtod found a number; if so return it.
ptrdiff_t consumed = endOfNumber - byteBuffer.data();
@@ -714,4 +714,40 @@ EncodedJSValue JSC_HOST_CALL globalFuncThrowTypeError(ExecState* exec)
return throwVMTypeError(exec);
}
+EncodedJSValue JSC_HOST_CALL globalFuncProtoGetter(ExecState* exec)
+{
+ if (!exec->thisValue().isObject())
+ return JSValue::encode(exec->thisValue().synthesizePrototype(exec));
+
+ JSObject* thisObject = asObject(exec->thisValue());
+ if (!thisObject->allowsAccessFrom(exec->trueCallerFrame()))
+ return JSValue::encode(jsUndefined());
+
+ return JSValue::encode(thisObject->prototype());
+}
+
+EncodedJSValue JSC_HOST_CALL globalFuncProtoSetter(ExecState* exec)
+{
+ JSValue value = exec->argument(0);
+
+ // Setting __proto__ of a primitive should have no effect.
+ if (!exec->thisValue().isObject())
+ return JSValue::encode(jsUndefined());
+
+ JSObject* thisObject = asObject(exec->thisValue());
+ if (!thisObject->allowsAccessFrom(exec->trueCallerFrame()))
+ return JSValue::encode(jsUndefined());
+
+ // Setting __proto__ to a non-object, non-null value is silently ignored to match Mozilla.
+ if (!value.isObject() && !value.isNull())
+ return JSValue::encode(jsUndefined());
+
+ if (!thisObject->isExtensible())
+ return throwVMError(exec, createTypeError(exec, StrictModeReadonlyPropertyWriteError));
+
+ if (!thisObject->setPrototypeWithCycleCheck(exec->globalData(), value))
+ throwError(exec, createError(exec, "cyclic __proto__ value"));
+ return JSValue::encode(jsUndefined());
+}
+
} // namespace JSC
diff --git a/Source/JavaScriptCore/runtime/JSGlobalObjectFunctions.h b/Source/JavaScriptCore/runtime/JSGlobalObjectFunctions.h
index 1183dfac5..8833bf6d0 100644
--- a/Source/JavaScriptCore/runtime/JSGlobalObjectFunctions.h
+++ b/Source/JavaScriptCore/runtime/JSGlobalObjectFunctions.h
@@ -48,6 +48,8 @@ namespace JSC {
EncodedJSValue JSC_HOST_CALL globalFuncEscape(ExecState*);
EncodedJSValue JSC_HOST_CALL globalFuncUnescape(ExecState*);
EncodedJSValue JSC_HOST_CALL globalFuncThrowTypeError(ExecState*);
+ EncodedJSValue JSC_HOST_CALL globalFuncProtoGetter(ExecState*);
+ EncodedJSValue JSC_HOST_CALL globalFuncProtoSetter(ExecState*);
static const double mantissaOverflowLowerBound = 9007199254740992.0;
double parseIntOverflow(const LChar*, int length, int radix);
diff --git a/Source/JavaScriptCore/runtime/JSObject.cpp b/Source/JavaScriptCore/runtime/JSObject.cpp
index ba2d2a52a..acc4a181e 100644
--- a/Source/JavaScriptCore/runtime/JSObject.cpp
+++ b/Source/JavaScriptCore/runtime/JSObject.cpp
@@ -24,7 +24,7 @@
#include "config.h"
#include "JSObject.h"
-#include "BumpSpaceInlineMethods.h"
+#include "CopiedSpaceInlineMethods.h"
#include "DatePrototype.h"
#include "ErrorConstructor.h"
#include "GetterSetter.h"
@@ -48,6 +48,7 @@ ASSERT_CLASS_FITS_IN_CELL(JSNonFinalObject);
ASSERT_CLASS_FITS_IN_CELL(JSFinalObject);
ASSERT_HAS_TRIVIAL_DESTRUCTOR(JSObject);
+ASSERT_HAS_TRIVIAL_DESTRUCTOR(JSFinalObject);
const char* StrictModeReadonlyPropertyWriteError = "Attempted to assign to readonly property.";
@@ -55,16 +56,6 @@ const ClassInfo JSObject::s_info = { "Object", 0, 0, 0, CREATE_METHOD_TABLE(JSOb
const ClassInfo JSFinalObject::s_info = { "Object", &Base::s_info, 0, 0, CREATE_METHOD_TABLE(JSFinalObject) };
-void JSFinalObject::destroy(JSCell* cell)
-{
- jsCast<JSFinalObject*>(cell)->JSFinalObject::~JSFinalObject();
-}
-
-void JSNonFinalObject::destroy(JSCell* cell)
-{
- jsCast<JSNonFinalObject*>(cell)->JSNonFinalObject::~JSNonFinalObject();
-}
-
static inline void getClassPropertyNames(ExecState* exec, const ClassInfo* classInfo, PropertyNameArray& propertyNames, EnumerationMode mode)
{
// Add properties from the static hashtables of properties
@@ -84,11 +75,6 @@ static inline void getClassPropertyNames(ExecState* exec, const ClassInfo* class
}
}
-void JSObject::destroy(JSCell* cell)
-{
- jsCast<JSObject*>(cell)->JSObject::~JSObject();
-}
-
void JSObject::visitChildren(JSCell* cell, SlotVisitor& visitor)
{
JSObject* thisObject = jsCast<JSObject*>(cell);
@@ -146,47 +132,36 @@ void JSObject::put(JSCell* cell, ExecState* exec, const Identifier& propertyName
ASSERT(!Heap::heap(value) || Heap::heap(value) == Heap::heap(thisObject));
JSGlobalData& globalData = exec->globalData();
- if (propertyName == exec->propertyNames().underscoreProto) {
- // Setting __proto__ to a non-object, non-null value is silently ignored to match Mozilla.
- if (!value.isObject() && !value.isNull())
- return;
-
- if (!thisObject->isExtensible()) {
- if (slot.isStrictMode())
- throwTypeError(exec, StrictModeReadonlyPropertyWriteError);
- return;
- }
-
- if (!thisObject->setPrototypeWithCycleCheck(globalData, value))
- throwError(exec, createError(exec, "cyclic __proto__ value"));
- return;
- }
-
// Check if there are any setters or getters in the prototype chain
JSValue prototype;
- for (JSObject* obj = thisObject; !obj->structure()->hasGetterSetterProperties(); obj = asObject(prototype)) {
- prototype = obj->prototype();
- if (prototype.isNull()) {
- if (!thisObject->putDirectInternal<PutModePut>(globalData, propertyName, value, 0, slot, getJSFunction(value)) && slot.isStrictMode())
- throwTypeError(exec, StrictModeReadonlyPropertyWriteError);
- return;
+ if (propertyName != exec->propertyNames().underscoreProto) {
+ for (JSObject* obj = thisObject; !obj->structure()->hasReadOnlyOrGetterSetterPropertiesExcludingProto(); obj = asObject(prototype)) {
+ prototype = obj->prototype();
+ if (prototype.isNull()) {
+ if (!thisObject->putDirectInternal<PutModePut>(globalData, propertyName, value, 0, slot, getJSFunction(value)) && slot.isStrictMode())
+ throwTypeError(exec, StrictModeReadonlyPropertyWriteError);
+ return;
+ }
}
}
-
- unsigned attributes;
- JSCell* specificValue;
- if ((thisObject->structure()->get(globalData, propertyName, attributes, specificValue) != WTF::notFound) && attributes & ReadOnly) {
- if (slot.isStrictMode())
- throwError(exec, createTypeError(exec, StrictModeReadonlyPropertyWriteError));
- return;
- }
for (JSObject* obj = thisObject; ; obj = asObject(prototype)) {
- if (JSValue gs = obj->getDirect(globalData, propertyName)) {
+ unsigned attributes;
+ JSCell* specificValue;
+ size_t offset = obj->structure()->get(globalData, propertyName, attributes, specificValue);
+ if (offset != WTF::notFound) {
+ if (attributes & ReadOnly) {
+ if (slot.isStrictMode())
+ throwError(exec, createTypeError(exec, StrictModeReadonlyPropertyWriteError));
+ return;
+ }
+
+ JSValue gs = obj->getDirectOffset(offset);
if (gs.isGetterSetter()) {
JSObject* setterFunc = asGetterSetter(gs)->setter();
if (!setterFunc) {
- throwSetterError(exec);
+ if (slot.isStrictMode())
+ throwSetterError(exec);
return;
}
@@ -229,10 +204,31 @@ void JSObject::putDirectVirtual(JSObject* object, ExecState* exec, const Identif
object->putDirectInternal<PutModeDefineOwnProperty>(exec->globalData(), propertyName, value, attributes, slot, getJSFunction(value));
}
+bool JSObject::setPrototypeWithCycleCheck(JSGlobalData& globalData, JSValue prototype)
+{
+ JSValue checkFor = this;
+ if (this->isGlobalObject())
+ checkFor = static_cast<JSGlobalObject*>(this)->globalExec()->thisValue();
+
+ JSValue nextPrototype = prototype;
+ while (nextPrototype && nextPrototype.isObject()) {
+ if (nextPrototype == checkFor)
+ return false;
+ nextPrototype = asObject(nextPrototype)->prototype();
+ }
+ setPrototype(globalData, prototype);
+ return true;
+}
+
+bool JSObject::allowsAccessFrom(ExecState* exec)
+{
+ JSGlobalObject* globalObject = isGlobalThis() ? static_cast<JSGlobalThis*>(this)->unwrappedObject() : this->globalObject();
+ return globalObject->globalObjectMethodTable()->allowsAccessFrom(globalObject, exec);
+}
+
void JSObject::putDirectAccessor(JSGlobalData& globalData, const Identifier& propertyName, JSValue value, unsigned attributes)
{
ASSERT(value.isGetterSetter() && (attributes & Accessor));
- ASSERT(propertyName != globalData.propertyNames->underscoreProto);
PutPropertySlot slot;
putDirectInternal<PutModeDefineOwnProperty>(globalData, propertyName, value, attributes, slot, getJSFunction(value));
@@ -243,7 +239,10 @@ void JSObject::putDirectAccessor(JSGlobalData& globalData, const Identifier& pro
if (slot.type() != PutPropertySlot::NewProperty)
setStructure(globalData, Structure::attributeChangeTransition(globalData, structure(), propertyName, attributes));
- structure()->setHasGetterSetterProperties(true);
+ if (attributes & ReadOnly)
+ structure()->setContainsReadOnlyProperties();
+
+ structure()->setHasGetterSetterProperties(propertyName == globalData.propertyNames->underscoreProto);
}
bool JSObject::hasProperty(ExecState* exec, const Identifier& propertyName) const
@@ -269,7 +268,7 @@ bool JSObject::deleteProperty(JSCell* cell, ExecState* exec, const Identifier& p
unsigned attributes;
JSCell* specificValue;
if (thisObject->structure()->get(exec->globalData(), propertyName, attributes, specificValue) != WTF::notFound) {
- if ((attributes & DontDelete))
+ if (attributes & DontDelete && !exec->globalData().isInDefineOwnProperty())
return false;
thisObject->removeDirect(exec->globalData(), propertyName);
return true;
@@ -277,7 +276,7 @@ bool JSObject::deleteProperty(JSCell* cell, ExecState* exec, const Identifier& p
// Look in the static hashtable of properties
const HashEntry* entry = thisObject->findPropertyHashEntry(exec, propertyName);
- if (entry && entry->attributes() & DontDelete)
+ if (entry && entry->attributes() & DontDelete && !exec->globalData().isInDefineOwnProperty())
return false; // this builtin property can't be deleted
// FIXME: Should the code here actually do some deletion?
@@ -479,6 +478,8 @@ void JSObject::freeze(JSGlobalData& globalData)
void JSObject::preventExtensions(JSGlobalData& globalData)
{
+ if (isJSArray(this))
+ asArray(this)->enterDictionaryMode(globalData);
if (isExtensible())
setStructure(globalData, Structure::preventExtensionsTransition(globalData, structure()));
}
@@ -623,6 +624,8 @@ static bool putDescriptor(ExecState* exec, JSObject* target, const Identifier& p
else if (oldDescriptor.value())
newValue = oldDescriptor.value();
target->putDirect(exec->globalData(), propertyName, newValue, attributes & ~Accessor);
+ if (attributes & ReadOnly)
+ target->structure()->setContainsReadOnlyProperties();
return true;
}
attributes &= ~ReadOnly;
@@ -641,12 +644,30 @@ static bool putDescriptor(ExecState* exec, JSObject* target, const Identifier& p
return true;
}
+class DefineOwnPropertyScope {
+public:
+ DefineOwnPropertyScope(ExecState* exec)
+ : m_globalData(exec->globalData())
+ {
+ m_globalData.setInDefineOwnProperty(true);
+ }
+
+ ~DefineOwnPropertyScope()
+ {
+ m_globalData.setInDefineOwnProperty(false);
+ }
+
+private:
+ JSGlobalData& m_globalData;
+};
+
bool JSObject::defineOwnProperty(JSObject* object, ExecState* exec, const Identifier& propertyName, PropertyDescriptor& descriptor, bool throwException)
{
- // __proto__ is magic; we don't currently support setting it as a regular property.
- // Silent filter out calls to set __proto__ at an early stage; pretend all is okay.
- if (propertyName == exec->propertyNames().underscoreProto)
- return true;
+ // Track on the globaldata that we're in define property.
+ // Currently DefineOwnProperty uses delete to remove properties when they are being replaced
+ // (particularly when changing attributes), however delete won't allow non-configurable (i.e.
+ // DontDelete) properties to be deleted. For now, we can use this flag to make this work.
+ DefineOwnPropertyScope scope(exec);
// If we have a new property we can just put it on normally
PropertyDescriptor current;
@@ -711,21 +732,15 @@ bool JSObject::defineOwnProperty(JSObject* object, ExecState* exec, const Identi
return false;
}
if (!current.writable()) {
- if (descriptor.value() || !sameValue(exec, current.value(), descriptor.value())) {
+ if (descriptor.value() && !sameValue(exec, current.value(), descriptor.value())) {
if (throwException)
throwError(exec, createTypeError(exec, "Attempting to change value of a readonly property."));
return false;
}
}
- } else if (current.attributesEqual(descriptor)) {
- if (!descriptor.value())
- return true;
- PutPropertySlot slot;
- object->methodTable()->put(object, exec, propertyName, descriptor.value(), slot);
- if (exec->hadException())
- return false;
- return true;
}
+ if (current.attributesEqual(descriptor) && !descriptor.value())
+ return true;
object->methodTable()->deleteProperty(object, exec, propertyName);
return putDescriptor(exec, object, propertyName, descriptor, current.attributesWithOverride(descriptor), current);
}
@@ -748,15 +763,14 @@ bool JSObject::defineOwnProperty(JSObject* object, ExecState* exec, const Identi
if (!accessor)
return false;
GetterSetter* getterSetter = asGetterSetter(accessor);
- if (current.attributesEqual(descriptor)) {
- if (descriptor.setterPresent())
- getterSetter->setSetter(exec->globalData(), descriptor.setterObject());
- if (descriptor.getterPresent())
- getterSetter->setGetter(exec->globalData(), descriptor.getterObject());
+ if (descriptor.setterPresent())
+ getterSetter->setSetter(exec->globalData(), descriptor.setterObject());
+ if (descriptor.getterPresent())
+ getterSetter->setGetter(exec->globalData(), descriptor.getterObject());
+ if (current.attributesEqual(descriptor))
return true;
- }
object->methodTable()->deleteProperty(object, exec, propertyName);
- unsigned attrs = current.attributesWithOverride(descriptor);
+ unsigned attrs = descriptor.attributesOverridingCurrent(current);
object->putDirectAccessor(exec->globalData(), propertyName, getterSetter, attrs | Accessor);
return true;
}
diff --git a/Source/JavaScriptCore/runtime/JSObject.h b/Source/JavaScriptCore/runtime/JSObject.h
index e9194fa01..c117cffaf 100644
--- a/Source/JavaScriptCore/runtime/JSObject.h
+++ b/Source/JavaScriptCore/runtime/JSObject.h
@@ -49,6 +49,7 @@ namespace JSC {
class GetterSetter;
class HashEntry;
class InternalFunction;
+ class LLIntOffsetsExtractor;
class MarkedBlock;
class PropertyDescriptor;
class PropertyNameArray;
@@ -84,8 +85,6 @@ namespace JSC {
public:
typedef JSCell Base;
- JS_EXPORT_PRIVATE static void destroy(JSCell*);
-
JS_EXPORT_PRIVATE static void visitChildren(JSCell*, SlotVisitor&);
JS_EXPORT_PRIVATE static UString className(const JSObject*);
@@ -107,6 +106,8 @@ namespace JSC {
JS_EXPORT_PRIVATE static bool getOwnPropertySlotByIndex(JSCell*, ExecState*, unsigned propertyName, PropertySlot&);
JS_EXPORT_PRIVATE static bool getOwnPropertyDescriptor(JSObject*, ExecState*, const Identifier&, PropertyDescriptor&);
+ bool allowsAccessFrom(ExecState*);
+
JS_EXPORT_PRIVATE static void put(JSCell*, ExecState*, const Identifier& propertyName, JSValue, PutPropertySlot&);
JS_EXPORT_PRIVATE static void putByIndex(JSCell*, ExecState*, unsigned propertyName, JSValue);
@@ -264,6 +265,8 @@ namespace JSC {
JSObject(JSGlobalData&, Structure*, PropertyStorage inlineStorage);
private:
+ friend class LLIntOffsetsExtractor;
+
// Nobody should ever ask any of these questions on something already known to be a JSObject.
using JSCell::isAPIValueWrapper;
using JSCell::isGetterSetter;
@@ -323,8 +326,6 @@ COMPILE_ASSERT((JSFinalObject_inlineStorageCapacity >= JSNonFinalObject_inlineSt
return Structure::create(globalData, globalObject, prototype, TypeInfo(ObjectType, StructureFlags), &s_info);
}
- JS_EXPORT_PRIVATE static void destroy(JSCell*);
-
protected:
explicit JSNonFinalObject(JSGlobalData& globalData, Structure* structure)
: JSObject(globalData, structure, m_inlineStorage)
@@ -343,6 +344,8 @@ COMPILE_ASSERT((JSFinalObject_inlineStorageCapacity >= JSNonFinalObject_inlineSt
WriteBarrier<Unknown> m_inlineStorage[JSNonFinalObject_inlineStorageCapacity];
};
+ class JSFinalObject;
+
// JSFinalObject is a type of JSObject that contains sufficent internal
// storage to fully make use of the colloctor cell containing it.
class JSFinalObject : public JSObject {
@@ -351,13 +354,7 @@ COMPILE_ASSERT((JSFinalObject_inlineStorageCapacity >= JSNonFinalObject_inlineSt
public:
typedef JSObject Base;
- static JSFinalObject* create(ExecState* exec, Structure* structure)
- {
- JSFinalObject* finalObject = new (NotNull, allocateCell<JSFinalObject>(*exec->heap())) JSFinalObject(exec->globalData(), structure);
- finalObject->finishCreation(exec->globalData());
- return finalObject;
- }
-
+ static JSFinalObject* create(ExecState*, Structure*);
static Structure* createStructure(JSGlobalData& globalData, JSGlobalObject* globalObject, JSValue prototype)
{
return Structure::create(globalData, globalObject, prototype, TypeInfo(FinalObjectType, StructureFlags), &s_info);
@@ -374,9 +371,9 @@ COMPILE_ASSERT((JSFinalObject_inlineStorageCapacity >= JSNonFinalObject_inlineSt
ASSERT(classInfo());
}
- static void destroy(JSCell*);
-
private:
+ friend class LLIntOffsetsExtractor;
+
explicit JSFinalObject(JSGlobalData& globalData, Structure* structure)
: JSObject(globalData, structure, m_inlineStorage)
{
@@ -387,6 +384,13 @@ COMPILE_ASSERT((JSFinalObject_inlineStorageCapacity >= JSNonFinalObject_inlineSt
WriteBarrierBase<Unknown> m_inlineStorage[JSFinalObject_inlineStorageCapacity];
};
+inline JSFinalObject* JSFinalObject::create(ExecState* exec, Structure* structure)
+{
+ JSFinalObject* finalObject = new (NotNull, allocateCell<JSFinalObject>(*exec->heap())) JSFinalObject(exec->globalData(), structure);
+ finalObject->finishCreation(exec->globalData());
+ return finalObject;
+}
+
inline bool isJSFinalObject(JSCell* cell)
{
return cell->classInfo() == &JSFinalObject::s_info;
@@ -489,19 +493,6 @@ inline JSValue JSObject::prototype() const
return structure()->storedPrototype();
}
-inline bool JSObject::setPrototypeWithCycleCheck(JSGlobalData& globalData, JSValue prototype)
-{
- JSValue nextPrototypeValue = prototype;
- while (nextPrototypeValue && nextPrototypeValue.isObject()) {
- JSObject* nextPrototype = asObject(nextPrototypeValue)->unwrappedObject();
- if (nextPrototype == this)
- return false;
- nextPrototypeValue = nextPrototype->prototype();
- }
- setPrototype(globalData, prototype);
- return true;
-}
-
inline void JSObject::setPrototype(JSGlobalData& globalData, JSValue prototype)
{
ASSERT(prototype);
@@ -553,12 +544,6 @@ ALWAYS_INLINE bool JSObject::inlineGetOwnPropertySlot(ExecState* exec, const Ide
return true;
}
- // non-standard Netscape extension
- if (propertyName == exec->propertyNames().underscoreProto) {
- slot.setValue(prototype());
- return true;
- }
-
return false;
}
@@ -806,8 +791,6 @@ inline JSValue JSValue::get(ExecState* exec, const Identifier& propertyName, Pro
{
if (UNLIKELY(!isCell())) {
JSObject* prototype = synthesizePrototype(exec);
- if (propertyName == exec->propertyNames().underscoreProto)
- return prototype;
if (!prototype->getPropertySlot(exec, propertyName, slot))
return jsUndefined();
return slot.getValue(exec, propertyName);
diff --git a/Source/JavaScriptCore/runtime/JSPropertyNameIterator.h b/Source/JavaScriptCore/runtime/JSPropertyNameIterator.h
index d52e3ea61..7530d7532 100644
--- a/Source/JavaScriptCore/runtime/JSPropertyNameIterator.h
+++ b/Source/JavaScriptCore/runtime/JSPropertyNameIterator.h
@@ -38,6 +38,7 @@ namespace JSC {
class Identifier;
class JSObject;
+ class LLIntOffsetsExtractor;
class JSPropertyNameIterator : public JSCell {
friend class JIT;
@@ -96,6 +97,8 @@ namespace JSC {
}
private:
+ friend class LLIntOffsetsExtractor;
+
JSPropertyNameIterator(ExecState*, PropertyNameArrayData* propertyNameArrayData, size_t numCacheableSlot);
WriteBarrier<Structure> m_cachedStructure;
diff --git a/Source/JavaScriptCore/runtime/JSString.cpp b/Source/JavaScriptCore/runtime/JSString.cpp
index 4e98f9d18..cfa7d03b4 100644
--- a/Source/JavaScriptCore/runtime/JSString.cpp
+++ b/Source/JavaScriptCore/runtime/JSString.cpp
@@ -189,7 +189,7 @@ void JSString::outOfMemory(ExecState* exec) const
{
for (size_t i = 0; i < s_maxInternalRopeLength && m_fibers[i]; ++i)
m_fibers[i].clear();
- ASSERT(!isRope());
+ ASSERT(isRope());
ASSERT(m_value == UString());
if (exec)
throwOutOfMemoryError(exec);
@@ -253,10 +253,6 @@ bool JSString::getOwnPropertySlot(JSCell* cell, ExecState* exec, const Identifie
// This function should only be called by JSValue::get.
if (thisObject->getStringPropertySlot(exec, propertyName, slot))
return true;
- if (propertyName == exec->propertyNames().underscoreProto) {
- slot.setValue(exec->lexicalGlobalObject()->stringPrototype());
- return true;
- }
slot.setBase(thisObject);
JSObject* object;
for (JSValue prototype = exec->lexicalGlobalObject()->stringPrototype(); !prototype.isNull(); prototype = object->prototype()) {
diff --git a/Source/JavaScriptCore/runtime/JSString.h b/Source/JavaScriptCore/runtime/JSString.h
index c0637a6e0..32a32788a 100644
--- a/Source/JavaScriptCore/runtime/JSString.h
+++ b/Source/JavaScriptCore/runtime/JSString.h
@@ -32,6 +32,7 @@
namespace JSC {
class JSString;
+ class LLIntOffsetsExtractor;
JSString* jsEmptyString(JSGlobalData*);
JSString* jsEmptyString(ExecState*);
@@ -240,6 +241,8 @@ namespace JSC {
static void visitChildren(JSCell*, SlotVisitor&);
private:
+ friend class LLIntOffsetsExtractor;
+
JS_EXPORT_PRIVATE void resolveRope(ExecState*) const;
void resolveRopeSlowCase8(LChar*) const;
void resolveRopeSlowCase(UChar*) const;
diff --git a/Source/JavaScriptCore/runtime/JSTypeInfo.h b/Source/JavaScriptCore/runtime/JSTypeInfo.h
index 3e23aa253..83a3594db 100644
--- a/Source/JavaScriptCore/runtime/JSTypeInfo.h
+++ b/Source/JavaScriptCore/runtime/JSTypeInfo.h
@@ -34,6 +34,8 @@
namespace JSC {
+ class LLIntOffsetsExtractor;
+
static const unsigned MasqueradesAsUndefined = 1; // WebCore uses MasqueradesAsUndefined to make document.all undetectable.
static const unsigned ImplementsHasInstance = 1 << 1;
static const unsigned OverridesHasInstance = 1 << 2;
@@ -87,6 +89,8 @@ namespace JSC {
}
private:
+ friend class LLIntOffsetsExtractor;
+
bool isSetOnFlags1(unsigned flag) const { ASSERT(flag <= (1 << 7)); return m_flags & flag; }
bool isSetOnFlags2(unsigned flag) const { ASSERT(flag >= (1 << 8)); return m_flags2 & (flag >> 8); }
diff --git a/Source/JavaScriptCore/runtime/JSValue.cpp b/Source/JavaScriptCore/runtime/JSValue.cpp
index 72cf5a8d5..e3843f02b 100644
--- a/Source/JavaScriptCore/runtime/JSValue.cpp
+++ b/Source/JavaScriptCore/runtime/JSValue.cpp
@@ -118,7 +118,7 @@ JSObject* JSValue::synthesizePrototype(ExecState* exec) const
char* JSValue::description()
{
- static const size_t size = 64;
+ static const size_t size = 128;
static char description[size];
if (!*this)
@@ -127,14 +127,14 @@ char* JSValue::description()
snprintf(description, size, "Int32: %d", asInt32());
else if (isDouble()) {
#if USE(JSVALUE64)
- snprintf(description, size, "Double: %lf, %lx", asDouble(), reinterpretDoubleToIntptr(asDouble()));
+ snprintf(description, size, "Double: %lx, %lf", reinterpretDoubleToIntptr(asDouble()), asDouble());
#else
union {
double asDouble;
uint32_t asTwoInt32s[2];
} u;
u.asDouble = asDouble();
- snprintf(description, size, "Double: %lf, %08x:%08x", asDouble(), u.asTwoInt32s[1], u.asTwoInt32s[0]);
+ snprintf(description, size, "Double: %08x:%08x, %lf", u.asTwoInt32s[1], u.asTwoInt32s[0], asDouble());
#endif
} else if (isCell())
snprintf(description, size, "Cell: %p", asCell());
diff --git a/Source/JavaScriptCore/runtime/JSValue.h b/Source/JavaScriptCore/runtime/JSValue.h
index b18c181f5..9f797e05d 100644
--- a/Source/JavaScriptCore/runtime/JSValue.h
+++ b/Source/JavaScriptCore/runtime/JSValue.h
@@ -55,6 +55,9 @@ namespace JSC {
class SpeculativeJIT;
}
#endif
+ namespace LLInt {
+ class Data;
+ }
struct ClassInfo;
struct Instruction;
@@ -118,6 +121,7 @@ namespace JSC {
friend class DFG::OSRExitCompiler;
friend class DFG::SpeculativeJIT;
#endif
+ friend class LLInt::Data;
public:
static EncodedJSValue encode(JSValue);
@@ -234,6 +238,8 @@ namespace JSC {
char* description();
+ JS_EXPORT_PRIVATE JSObject* synthesizePrototype(ExecState*) const;
+
private:
template <class T> JSValue(WriteBarrierBase<T>);
@@ -246,7 +252,6 @@ namespace JSC {
JS_EXPORT_PRIVATE JSObject* toObjectSlowCase(ExecState*, JSGlobalObject*) const;
JS_EXPORT_PRIVATE JSObject* toThisObjectSlowCase(ExecState*) const;
- JS_EXPORT_PRIVATE JSObject* synthesizePrototype(ExecState*) const;
JSObject* synthesizeObject(ExecState*) const;
#if USE(JSVALUE32_64)
diff --git a/Source/JavaScriptCore/runtime/JSVariableObject.h b/Source/JavaScriptCore/runtime/JSVariableObject.h
index c1d05ff74..8d058f1fc 100644
--- a/Source/JavaScriptCore/runtime/JSVariableObject.h
+++ b/Source/JavaScriptCore/runtime/JSVariableObject.h
@@ -38,10 +38,12 @@
namespace JSC {
+ class LLIntOffsetsExtractor;
class Register;
class JSVariableObject : public JSNonFinalObject {
friend class JIT;
+ friend class LLIntOffsetsExtractor;
public:
typedef JSNonFinalObject Base;
diff --git a/Source/JavaScriptCore/runtime/LiteralParser.cpp b/Source/JavaScriptCore/runtime/LiteralParser.cpp
index b22b81503..3bde3ff08 100644
--- a/Source/JavaScriptCore/runtime/LiteralParser.cpp
+++ b/Source/JavaScriptCore/runtime/LiteralParser.cpp
@@ -1,5 +1,6 @@
/*
* Copyright (C) 2009 Apple Inc. All rights reserved.
+ * Copyright (C) 2012 Mathias Bynens (mathias@qiwi.be)
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
@@ -294,7 +295,7 @@ ALWAYS_INLINE TokenType LiteralParser<LChar>::Lexer::lexIdentifier(LiteralParser
template <>
ALWAYS_INLINE TokenType LiteralParser<UChar>::Lexer::lexIdentifier(LiteralParserToken<UChar>& token)
{
- while (m_ptr < m_end && (isASCIIAlphanumeric(*m_ptr) || *m_ptr == '_' || *m_ptr == '$'))
+ while (m_ptr < m_end && (isASCIIAlphanumeric(*m_ptr) || *m_ptr == '_' || *m_ptr == '$' || *m_ptr == 0x200C || *m_ptr == 0x200D))
m_ptr++;
token.stringIs8Bit = 0;
token.stringToken16 = token.start;
@@ -536,7 +537,7 @@ TokenType LiteralParser<CharType>::Lexer::lexNumber(LiteralParserToken<CharType>
}
buffer[i] = 0;
char* end;
- token.numberToken = WTF::strtod(buffer.data(), &end);
+ token.numberToken = WTF::strtod<WTF::AllowTrailingJunk>(buffer.data(), &end);
ASSERT(buffer.data() + (token.end - token.start) == end);
return TokNumber;
}
diff --git a/Source/JavaScriptCore/runtime/ObjectConstructor.cpp b/Source/JavaScriptCore/runtime/ObjectConstructor.cpp
index d96c1de7f..b7dd71655 100644
--- a/Source/JavaScriptCore/runtime/ObjectConstructor.cpp
+++ b/Source/JavaScriptCore/runtime/ObjectConstructor.cpp
@@ -138,11 +138,10 @@ EncodedJSValue JSC_HOST_CALL objectConstructorGetPrototypeOf(ExecState* exec)
{
if (!exec->argument(0).isObject())
return throwVMError(exec, createTypeError(exec, "Requested prototype of a value that is not an object."));
-
- // This uses JSValue::get() instead of directly accessing the prototype from the object
- // (using JSObject::prototype()) in order to allow objects to override the behavior, such
- // as returning jsUndefined() for cross-origin access.
- return JSValue::encode(exec->argument(0).get(exec, exec->propertyNames().underscoreProto));
+ JSObject* object = asObject(exec->argument(0));
+ if (!object->allowsAccessFrom(exec->trueCallerFrame()))
+ return JSValue::encode(jsUndefined());
+ return JSValue::encode(object->prototype());
}
EncodedJSValue JSC_HOST_CALL objectConstructorGetOwnPropertyDescriptor(ExecState* exec)
@@ -342,9 +341,7 @@ EncodedJSValue JSC_HOST_CALL objectConstructorDefineProperties(ExecState* exec)
{
if (!exec->argument(0).isObject())
return throwVMError(exec, createTypeError(exec, "Properties can only be defined on Objects."));
- if (!exec->argument(1).isObject())
- return throwVMError(exec, createTypeError(exec, "Property descriptor list must be an Object."));
- return JSValue::encode(defineProperties(exec, asObject(exec->argument(0)), asObject(exec->argument(1))));
+ return JSValue::encode(defineProperties(exec, asObject(exec->argument(0)), exec->argument(1).toObject(exec)));
}
EncodedJSValue JSC_HOST_CALL objectConstructorCreate(ExecState* exec)
@@ -362,19 +359,79 @@ EncodedJSValue JSC_HOST_CALL objectConstructorCreate(ExecState* exec)
EncodedJSValue JSC_HOST_CALL objectConstructorSeal(ExecState* exec)
{
+ // 1. If Type(O) is not Object throw a TypeError exception.
JSValue obj = exec->argument(0);
if (!obj.isObject())
return throwVMError(exec, createTypeError(exec, "Object.seal can only be called on Objects."));
- asObject(obj)->seal(exec->globalData());
+ JSObject* object = asObject(obj);
+
+ if (isJSFinalObject(object)) {
+ object->seal(exec->globalData());
+ return JSValue::encode(obj);
+ }
+
+ // 2. For each named own property name P of O,
+ PropertyNameArray properties(exec);
+ object->methodTable()->getOwnPropertyNames(object, exec, properties, IncludeDontEnumProperties);
+ PropertyNameArray::const_iterator end = properties.end();
+ for (PropertyNameArray::const_iterator iter = properties.begin(); iter != end; ++iter) {
+ // a. Let desc be the result of calling the [[GetOwnProperty]] internal method of O with P.
+ PropertyDescriptor desc;
+ if (!object->methodTable()->getOwnPropertyDescriptor(object, exec, *iter, desc))
+ continue;
+ // b. If desc.[[Configurable]] is true, set desc.[[Configurable]] to false.
+ desc.setConfigurable(false);
+ // c. Call the [[DefineOwnProperty]] internal method of O with P, desc, and true as arguments.
+ object->methodTable()->defineOwnProperty(object, exec, *iter, desc, true);
+ if (exec->hadException())
+ return JSValue::encode(obj);
+ }
+
+ // 3. Set the [[Extensible]] internal property of O to false.
+ object->preventExtensions(exec->globalData());
+
+ // 4. Return O.
return JSValue::encode(obj);
}
EncodedJSValue JSC_HOST_CALL objectConstructorFreeze(ExecState* exec)
{
+ // 1. If Type(O) is not Object throw a TypeError exception.
JSValue obj = exec->argument(0);
if (!obj.isObject())
return throwVMError(exec, createTypeError(exec, "Object.freeze can only be called on Objects."));
- asObject(obj)->freeze(exec->globalData());
+ JSObject* object = asObject(obj);
+
+ if (isJSFinalObject(object)) {
+ object->freeze(exec->globalData());
+ return JSValue::encode(obj);
+ }
+
+ // 2. For each named own property name P of O,
+ PropertyNameArray properties(exec);
+ object->methodTable()->getOwnPropertyNames(object, exec, properties, IncludeDontEnumProperties);
+ PropertyNameArray::const_iterator end = properties.end();
+ for (PropertyNameArray::const_iterator iter = properties.begin(); iter != end; ++iter) {
+ // a. Let desc be the result of calling the [[GetOwnProperty]] internal method of O with P.
+ PropertyDescriptor desc;
+ if (!object->methodTable()->getOwnPropertyDescriptor(object, exec, *iter, desc))
+ continue;
+ // b. If IsDataDescriptor(desc) is true, then
+ // i. If desc.[[Writable]] is true, set desc.[[Writable]] to false.
+ if (desc.isDataDescriptor())
+ desc.setWritable(false);
+ // c. If desc.[[Configurable]] is true, set desc.[[Configurable]] to false.
+ desc.setConfigurable(false);
+ // d. Call the [[DefineOwnProperty]] internal method of O with P, desc, and true as arguments.
+ object->methodTable()->defineOwnProperty(object, exec, *iter, desc, true);
+ if (exec->hadException())
+ return JSValue::encode(obj);
+ }
+
+ // 3. Set the [[Extensible]] internal property of O to false.
+ object->preventExtensions(exec->globalData());
+
+ // 4. Return O.
return JSValue::encode(obj);
}
@@ -389,18 +446,63 @@ EncodedJSValue JSC_HOST_CALL objectConstructorPreventExtensions(ExecState* exec)
EncodedJSValue JSC_HOST_CALL objectConstructorIsSealed(ExecState* exec)
{
+ // 1. If Type(O) is not Object throw a TypeError exception.
JSValue obj = exec->argument(0);
if (!obj.isObject())
return throwVMError(exec, createTypeError(exec, "Object.isSealed can only be called on Objects."));
- return JSValue::encode(jsBoolean(asObject(obj)->isSealed(exec->globalData())));
+ JSObject* object = asObject(obj);
+
+ if (isJSFinalObject(object))
+ return JSValue::encode(jsBoolean(object->isSealed(exec->globalData())));
+
+ // 2. For each named own property name P of O,
+ PropertyNameArray properties(exec);
+ object->methodTable()->getOwnPropertyNames(object, exec, properties, IncludeDontEnumProperties);
+ PropertyNameArray::const_iterator end = properties.end();
+ for (PropertyNameArray::const_iterator iter = properties.begin(); iter != end; ++iter) {
+ // a. Let desc be the result of calling the [[GetOwnProperty]] internal method of O with P.
+ PropertyDescriptor desc;
+ if (!object->methodTable()->getOwnPropertyDescriptor(object, exec, *iter, desc))
+ continue;
+ // b. If desc.[[Configurable]] is true, then return false.
+ if (desc.configurable())
+ return JSValue::encode(jsBoolean(false));
+ }
+
+ // 3. If the [[Extensible]] internal property of O is false, then return true.
+ // 4. Otherwise, return false.
+ return JSValue::encode(jsBoolean(!object->isExtensible()));
}
EncodedJSValue JSC_HOST_CALL objectConstructorIsFrozen(ExecState* exec)
{
+ // 1. If Type(O) is not Object throw a TypeError exception.
JSValue obj = exec->argument(0);
if (!obj.isObject())
return throwVMError(exec, createTypeError(exec, "Object.isFrozen can only be called on Objects."));
- return JSValue::encode(jsBoolean(asObject(obj)->isFrozen(exec->globalData())));
+ JSObject* object = asObject(obj);
+
+ if (isJSFinalObject(object))
+ return JSValue::encode(jsBoolean(object->isFrozen(exec->globalData())));
+
+ // 2. For each named own property name P of O,
+ PropertyNameArray properties(exec);
+ object->methodTable()->getOwnPropertyNames(object, exec, properties, IncludeDontEnumProperties);
+ PropertyNameArray::const_iterator end = properties.end();
+ for (PropertyNameArray::const_iterator iter = properties.begin(); iter != end; ++iter) {
+ // a. Let desc be the result of calling the [[GetOwnProperty]] internal method of O with P.
+ PropertyDescriptor desc;
+ if (!object->methodTable()->getOwnPropertyDescriptor(object, exec, *iter, desc))
+ continue;
+ // b. If IsDataDescriptor(desc) is true then
+ // i. If desc.[[Writable]] is true, return false. c. If desc.[[Configurable]] is true, then return false.
+ if ((desc.isDataDescriptor() && desc.writable()) || desc.configurable())
+ return JSValue::encode(jsBoolean(false));
+ }
+
+ // 3. If the [[Extensible]] internal property of O is false, then return true.
+ // 4. Otherwise, return false.
+ return JSValue::encode(jsBoolean(!object->isExtensible()));
}
EncodedJSValue JSC_HOST_CALL objectConstructorIsExtensible(ExecState* exec)
diff --git a/Source/JavaScriptCore/runtime/ObjectPrototype.cpp b/Source/JavaScriptCore/runtime/ObjectPrototype.cpp
index 674bd7b7d..6ad312c7c 100644
--- a/Source/JavaScriptCore/runtime/ObjectPrototype.cpp
+++ b/Source/JavaScriptCore/runtime/ObjectPrototype.cpp
@@ -80,7 +80,7 @@ void ObjectPrototype::finishCreation(JSGlobalData& globalData, JSGlobalObject*)
void ObjectPrototype::put(JSCell* cell, ExecState* exec, const Identifier& propertyName, JSValue value, PutPropertySlot& slot)
{
ObjectPrototype* thisObject = jsCast<ObjectPrototype*>(cell);
- JSNonFinalObject::put(cell, exec, propertyName, value, slot);
+ Base::put(cell, exec, propertyName, value, slot);
if (thisObject->m_hasNoPropertiesWithUInt32Names) {
bool isUInt32;
@@ -89,12 +89,26 @@ void ObjectPrototype::put(JSCell* cell, ExecState* exec, const Identifier& prope
}
}
+bool ObjectPrototype::defineOwnProperty(JSObject* object, ExecState* exec, const Identifier& propertyName, PropertyDescriptor& descriptor, bool shouldThrow)
+{
+ ObjectPrototype* thisObject = jsCast<ObjectPrototype*>(object);
+ bool result = Base::defineOwnProperty(object, exec, propertyName, descriptor, shouldThrow);
+
+ if (thisObject->m_hasNoPropertiesWithUInt32Names) {
+ bool isUInt32;
+ propertyName.toUInt32(isUInt32);
+ thisObject->m_hasNoPropertiesWithUInt32Names = !isUInt32;
+ }
+
+ return result;
+}
+
bool ObjectPrototype::getOwnPropertySlotByIndex(JSCell* cell, ExecState* exec, unsigned propertyName, PropertySlot& slot)
{
ObjectPrototype* thisObject = jsCast<ObjectPrototype*>(cell);
if (thisObject->m_hasNoPropertiesWithUInt32Names)
return false;
- return JSNonFinalObject::getOwnPropertySlotByIndex(thisObject, exec, propertyName, slot);
+ return Base::getOwnPropertySlotByIndex(thisObject, exec, propertyName, slot);
}
bool ObjectPrototype::getOwnPropertySlot(JSCell* cell, ExecState* exec, const Identifier& propertyName, PropertySlot &slot)
diff --git a/Source/JavaScriptCore/runtime/ObjectPrototype.h b/Source/JavaScriptCore/runtime/ObjectPrototype.h
index 4c49e97a7..b9b8a30d4 100644
--- a/Source/JavaScriptCore/runtime/ObjectPrototype.h
+++ b/Source/JavaScriptCore/runtime/ObjectPrototype.h
@@ -51,6 +51,7 @@ namespace JSC {
private:
ObjectPrototype(ExecState*, Structure*);
static void put(JSCell*, ExecState*, const Identifier&, JSValue, PutPropertySlot&);
+ static bool defineOwnProperty(JSObject*, ExecState*, const Identifier& propertyName, PropertyDescriptor&, bool shouldThrow);
static bool getOwnPropertySlot(JSCell*, ExecState*, const Identifier&, PropertySlot&);
static bool getOwnPropertySlotByIndex(JSCell*, ExecState*, unsigned propertyName, PropertySlot&);
diff --git a/Source/JavaScriptCore/runtime/Options.cpp b/Source/JavaScriptCore/runtime/Options.cpp
index ddfba6e7c..5500508cf 100644
--- a/Source/JavaScriptCore/runtime/Options.cpp
+++ b/Source/JavaScriptCore/runtime/Options.cpp
@@ -52,6 +52,10 @@ unsigned maximumFunctionForConstructInlineCandidateInstructionCount;
unsigned maximumInliningDepth;
+int32_t executionCounterValueForJITAfterWarmUp;
+int32_t executionCounterValueForDontJITAnytimeSoon;
+int32_t executionCounterValueForJITSoon;
+
int32_t executionCounterValueForOptimizeAfterWarmUp;
int32_t executionCounterValueForOptimizeAfterLongWarmUp;
int32_t executionCounterValueForDontOptimizeAnytimeSoon;
@@ -137,6 +141,10 @@ void initializeOptions()
SET(maximumInliningDepth, 5);
+ SET(executionCounterValueForJITAfterWarmUp, -100);
+ SET(executionCounterValueForDontJITAnytimeSoon, std::numeric_limits<int32_t>::min());
+ SET(executionCounterValueForJITSoon, -100);
+
SET(executionCounterValueForOptimizeAfterWarmUp, -1000);
SET(executionCounterValueForOptimizeAfterLongWarmUp, -5000);
SET(executionCounterValueForDontOptimizeAnytimeSoon, std::numeric_limits<int32_t>::min());
@@ -185,6 +193,8 @@ void initializeOptions()
if (cpusToUse < 1)
cpusToUse = 1;
+ cpusToUse = 1;
+
SET(numberOfGCMarkers, cpusToUse);
ASSERT(executionCounterValueForDontOptimizeAnytimeSoon <= executionCounterValueForOptimizeAfterLongWarmUp);
diff --git a/Source/JavaScriptCore/runtime/Options.h b/Source/JavaScriptCore/runtime/Options.h
index feebd37bb..b9e68f90c 100644
--- a/Source/JavaScriptCore/runtime/Options.h
+++ b/Source/JavaScriptCore/runtime/Options.h
@@ -37,6 +37,10 @@ extern unsigned maximumFunctionForConstructInlineCandidateInstructionCount;
extern unsigned maximumInliningDepth; // Depth of inline stack, so 1 = no inlining, 2 = one level, etc.
+extern int32_t executionCounterValueForJITAfterWarmUp;
+extern int32_t executionCounterValueForDontJITAnytimeSoon;
+extern int32_t executionCounterValueForJITSoon;
+
extern int32_t executionCounterValueForOptimizeAfterWarmUp;
extern int32_t executionCounterValueForOptimizeAfterLongWarmUp;
extern int32_t executionCounterValueForDontOptimizeAnytimeSoon;
diff --git a/Source/JavaScriptCore/runtime/PropertyDescriptor.cpp b/Source/JavaScriptCore/runtime/PropertyDescriptor.cpp
index e3458e4b9..0cb629584 100644
--- a/Source/JavaScriptCore/runtime/PropertyDescriptor.cpp
+++ b/Source/JavaScriptCore/runtime/PropertyDescriptor.cpp
@@ -217,12 +217,16 @@ unsigned PropertyDescriptor::attributesWithOverride(const PropertyDescriptor& ot
newAttributes ^= DontDelete;
if (sharedSeen & EnumerablePresent && mismatch & DontEnum)
newAttributes ^= DontEnum;
+ if (isAccessorDescriptor() && other.isDataDescriptor())
+ newAttributes |= ReadOnly;
return newAttributes;
}
unsigned PropertyDescriptor::attributesOverridingCurrent(const PropertyDescriptor& current) const
{
unsigned currentAttributes = current.m_attributes;
+ if (isDataDescriptor() && current.isAccessorDescriptor())
+ currentAttributes |= ReadOnly;
unsigned overrideMask = 0;
if (writablePresent())
overrideMask |= ReadOnly;
diff --git a/Source/JavaScriptCore/runtime/RegExp.cpp b/Source/JavaScriptCore/runtime/RegExp.cpp
index 69bca5df0..2b7feb4b5 100644
--- a/Source/JavaScriptCore/runtime/RegExp.cpp
+++ b/Source/JavaScriptCore/runtime/RegExp.cpp
@@ -413,24 +413,24 @@ void RegExp::matchCompareWithInterpreter(const UString& s, int startOffset, int*
differences++;
if (differences) {
- fprintf(stderr, "RegExp Discrepency for /%s/\n string input ", pattern().utf8().data());
+ dataLog("RegExp Discrepency for /%s/\n string input ", pattern().utf8().data());
unsigned segmentLen = s.length() - static_cast<unsigned>(startOffset);
- fprintf(stderr, (segmentLen < 150) ? "\"%s\"\n" : "\"%148s...\"\n", s.utf8().data() + startOffset);
+ dataLog((segmentLen < 150) ? "\"%s\"\n" : "\"%148s...\"\n", s.utf8().data() + startOffset);
if (jitResult != interpreterResult) {
- fprintf(stderr, " JIT result = %d, blah interpreted result = %d\n", jitResult, interpreterResult);
+ dataLog(" JIT result = %d, blah interpreted result = %d\n", jitResult, interpreterResult);
differences--;
} else {
- fprintf(stderr, " Correct result = %d\n", jitResult);
+ dataLog(" Correct result = %d\n", jitResult);
}
if (differences) {
for (unsigned j = 2, i = 0; i < m_numSubpatterns; j +=2, i++) {
if (offsetVector[j] != interpreterOffsetVector[j])
- fprintf(stderr, " JIT offset[%d] = %d, interpreted offset[%d] = %d\n", j, offsetVector[j], j, interpreterOffsetVector[j]);
+ dataLog(" JIT offset[%d] = %d, interpreted offset[%d] = %d\n", j, offsetVector[j], j, interpreterOffsetVector[j]);
if ((offsetVector[j] >= 0) && (offsetVector[j+1] != interpreterOffsetVector[j+1]))
- fprintf(stderr, " JIT offset[%d] = %d, interpreted offset[%d] = %d\n", j+1, offsetVector[j+1], j+1, interpreterOffsetVector[j+1]);
+ dataLog(" JIT offset[%d] = %d, interpreted offset[%d] = %d\n", j+1, offsetVector[j+1], j+1, interpreterOffsetVector[j+1]);
}
}
}
diff --git a/Source/JavaScriptCore/runtime/RegExpCache.cpp b/Source/JavaScriptCore/runtime/RegExpCache.cpp
index cd96301db..d5edbbc7f 100644
--- a/Source/JavaScriptCore/runtime/RegExpCache.cpp
+++ b/Source/JavaScriptCore/runtime/RegExpCache.cpp
@@ -46,7 +46,7 @@ RegExp* RegExpCache::lookupOrCreate(const UString& patternString, RegExpFlags fl
// We need to do a second lookup to add the RegExp as
// allocating it may have caused a gc cycle, which in
// turn may have removed items from the cache.
- m_weakCache.add(key, Weak<RegExp>(*m_globalData, regExp, this));
+ m_weakCache.add(key, PassWeak<RegExp>(*m_globalData, regExp, this));
return regExp;
}
diff --git a/Source/JavaScriptCore/runtime/SamplingCounter.cpp b/Source/JavaScriptCore/runtime/SamplingCounter.cpp
index e5fb25a93..abed763ca 100644
--- a/Source/JavaScriptCore/runtime/SamplingCounter.cpp
+++ b/Source/JavaScriptCore/runtime/SamplingCounter.cpp
@@ -35,10 +35,10 @@ void AbstractSamplingCounter::dump()
{
#if ENABLE(SAMPLING_COUNTERS)
if (s_abstractSamplingCounterChain != &s_abstractSamplingCounterChainEnd) {
- printf("\nSampling Counter Values:\n");
+ dataLog("\nSampling Counter Values:\n");
for (AbstractSamplingCounter* currCounter = s_abstractSamplingCounterChain; (currCounter != &s_abstractSamplingCounterChainEnd); currCounter = currCounter->m_next)
- printf("\t%s\t: %lld\n", currCounter->m_name, currCounter->m_counter);
- printf("\n\n");
+ dataLog("\t%s\t: %lld\n", currCounter->m_name, currCounter->m_counter);
+ dataLog("\n\n");
}
s_completed = true;
#endif
diff --git a/Source/JavaScriptCore/runtime/SamplingCounter.h b/Source/JavaScriptCore/runtime/SamplingCounter.h
index 329a5cfd3..8413b5458 100644
--- a/Source/JavaScriptCore/runtime/SamplingCounter.h
+++ b/Source/JavaScriptCore/runtime/SamplingCounter.h
@@ -159,7 +159,7 @@ public:
~DeletableSamplingCounter()
{
if (!s_completed)
- fprintf(stderr, "DeletableSamplingCounter \"%s\" deleted early (with count %lld)\n", m_name, m_counter);
+ dataFile("DeletableSamplingCounter \"%s\" deleted early (with count %lld)\n", m_name, m_counter);
// Our m_referer pointer should know where the pointer to this node is,
// and m_next should know that this node is the previous node in the list.
ASSERT(*m_referer == this);
diff --git a/Source/JavaScriptCore/runtime/ScopeChain.cpp b/Source/JavaScriptCore/runtime/ScopeChain.cpp
index 099f7fde6..e7ea07508 100644
--- a/Source/JavaScriptCore/runtime/ScopeChain.cpp
+++ b/Source/JavaScriptCore/runtime/ScopeChain.cpp
@@ -42,12 +42,12 @@ void ScopeChainNode::print()
o->methodTable()->getPropertyNames(o, globalObject->globalExec(), propertyNames, ExcludeDontEnumProperties);
PropertyNameArray::const_iterator propEnd = propertyNames.end();
- fprintf(stderr, "----- [scope %p] -----\n", o);
+ dataLog("----- [scope %p] -----\n", o);
for (PropertyNameArray::const_iterator propIter = propertyNames.begin(); propIter != propEnd; propIter++) {
Identifier name = *propIter;
- fprintf(stderr, "%s, ", name.ustring().utf8().data());
+ dataLog("%s, ", name.ustring().utf8().data());
}
- fprintf(stderr, "\n");
+ dataLog("\n");
}
}
diff --git a/Source/JavaScriptCore/runtime/ScopeChain.h b/Source/JavaScriptCore/runtime/ScopeChain.h
index 6e358d779..c382008f1 100644
--- a/Source/JavaScriptCore/runtime/ScopeChain.h
+++ b/Source/JavaScriptCore/runtime/ScopeChain.h
@@ -30,6 +30,7 @@ namespace JSC {
class JSGlobalData;
class JSGlobalObject;
class JSObject;
+ class LLIntOffsetsExtractor;
class ScopeChainIterator;
class SlotVisitor;
@@ -91,6 +92,8 @@ namespace JSC {
static JS_EXPORTDATA const ClassInfo s_info;
private:
+ friend class LLIntOffsetsExtractor;
+
static const unsigned StructureFlags = OverridesVisitChildren;
};
diff --git a/Source/JavaScriptCore/runtime/Structure.cpp b/Source/JavaScriptCore/runtime/Structure.cpp
index f387cf283..6ee419da6 100644
--- a/Source/JavaScriptCore/runtime/Structure.cpp
+++ b/Source/JavaScriptCore/runtime/Structure.cpp
@@ -142,17 +142,17 @@ void Structure::dumpStatistics()
}
}
- printf("Number of live Structures: %d\n", liveStructureSet.size());
- printf("Number of Structures using the single item optimization for transition map: %d\n", numberUsingSingleSlot);
- printf("Number of Structures that are leaf nodes: %d\n", numberLeaf);
- printf("Number of Structures that singletons: %d\n", numberSingletons);
- printf("Number of Structures with PropertyMaps: %d\n", numberWithPropertyMaps);
-
- printf("Size of a single Structures: %d\n", static_cast<unsigned>(sizeof(Structure)));
- printf("Size of sum of all property maps: %d\n", totalPropertyMapsSize);
- printf("Size of average of all property maps: %f\n", static_cast<double>(totalPropertyMapsSize) / static_cast<double>(liveStructureSet.size()));
+ dataLog("Number of live Structures: %d\n", liveStructureSet.size());
+ dataLog("Number of Structures using the single item optimization for transition map: %d\n", numberUsingSingleSlot);
+ dataLog("Number of Structures that are leaf nodes: %d\n", numberLeaf);
+ dataLog("Number of Structures that singletons: %d\n", numberSingletons);
+ dataLog("Number of Structures with PropertyMaps: %d\n", numberWithPropertyMaps);
+
+ dataLog("Size of a single Structures: %d\n", static_cast<unsigned>(sizeof(Structure)));
+ dataLog("Size of sum of all property maps: %d\n", totalPropertyMapsSize);
+ dataLog("Size of average of all property maps: %f\n", static_cast<double>(totalPropertyMapsSize) / static_cast<double>(liveStructureSet.size()));
#else
- printf("Dumping Structure statistics is not enabled.\n");
+ dataLog("Dumping Structure statistics is not enabled.\n");
#endif
}
@@ -167,6 +167,7 @@ Structure::Structure(JSGlobalData& globalData, JSGlobalObject* globalObject, JSV
, m_dictionaryKind(NoneDictionaryKind)
, m_isPinnedPropertyTable(false)
, m_hasGetterSetterProperties(false)
+ , m_hasReadOnlyOrGetterSetterPropertiesExcludingProto(false)
, m_hasNonEnumerableProperties(false)
, m_attributesInPrevious(0)
, m_specificFunctionThrashCount(0)
@@ -188,6 +189,7 @@ Structure::Structure(JSGlobalData& globalData)
, m_dictionaryKind(NoneDictionaryKind)
, m_isPinnedPropertyTable(false)
, m_hasGetterSetterProperties(false)
+ , m_hasReadOnlyOrGetterSetterPropertiesExcludingProto(false)
, m_hasNonEnumerableProperties(false)
, m_attributesInPrevious(0)
, m_specificFunctionThrashCount(0)
@@ -207,6 +209,7 @@ Structure::Structure(JSGlobalData& globalData, const Structure* previous)
, m_dictionaryKind(previous->m_dictionaryKind)
, m_isPinnedPropertyTable(false)
, m_hasGetterSetterProperties(previous->m_hasGetterSetterProperties)
+ , m_hasReadOnlyOrGetterSetterPropertiesExcludingProto(previous->m_hasReadOnlyOrGetterSetterPropertiesExcludingProto)
, m_hasNonEnumerableProperties(previous->m_hasNonEnumerableProperties)
, m_attributesInPrevious(0)
, m_specificFunctionThrashCount(previous->m_specificFunctionThrashCount)
@@ -322,7 +325,7 @@ Structure* Structure::addPropertyTransition(JSGlobalData& globalData, Structure*
transition->growPropertyStorageCapacity();
return transition;
}
-
+
Structure* transition = create(globalData, structure);
transition->m_cachedPrototypeChain.setMayBeNull(globalData, transition, structure->m_cachedPrototypeChain.get());
@@ -467,9 +470,12 @@ Structure* Structure::freezeTransition(JSGlobalData& globalData, Structure* stru
Structure* transition = preventExtensionsTransition(globalData, structure);
if (transition->m_propertyTable) {
+ PropertyTable::iterator iter = transition->m_propertyTable->begin();
PropertyTable::iterator end = transition->m_propertyTable->end();
- for (PropertyTable::iterator iter = transition->m_propertyTable->begin(); iter != end; ++iter)
- iter->attributes |= (DontDelete | ReadOnly);
+ if (iter != end)
+ transition->m_hasReadOnlyOrGetterSetterPropertiesExcludingProto = true;
+ for (; iter != end; ++iter)
+ iter->attributes |= iter->attributes & Accessor ? DontDelete : (DontDelete | ReadOnly);
}
return transition;
@@ -520,7 +526,9 @@ bool Structure::isFrozen(JSGlobalData& globalData)
PropertyTable::iterator end = m_propertyTable->end();
for (PropertyTable::iterator iter = m_propertyTable->begin(); iter != end; ++iter) {
- if ((iter->attributes & (DontDelete | ReadOnly)) != (DontDelete | ReadOnly))
+ if (!(iter->attributes & DontDelete))
+ return false;
+ if (!(iter->attributes & (ReadOnly | Accessor)))
return false;
}
return true;
@@ -601,11 +609,11 @@ static PropertyMapStatisticsExitLogger logger;
PropertyMapStatisticsExitLogger::~PropertyMapStatisticsExitLogger()
{
- printf("\nJSC::PropertyMap statistics\n\n");
- printf("%d probes\n", numProbes);
- printf("%d collisions (%.1f%%)\n", numCollisions, 100.0 * numCollisions / numProbes);
- printf("%d rehashes\n", numRehashes);
- printf("%d removes\n", numRemoves);
+ dataLog("\nJSC::PropertyMap statistics\n\n");
+ dataLog("%d probes\n", numProbes);
+ dataLog("%d collisions (%.1f%%)\n", numCollisions, 100.0 * numCollisions / numProbes);
+ dataLog("%d rehashes\n", numRehashes);
+ dataLog("%d removes\n", numRemoves);
}
#endif
diff --git a/Source/JavaScriptCore/runtime/Structure.h b/Source/JavaScriptCore/runtime/Structure.h
index ced296856..46cf732e1 100644
--- a/Source/JavaScriptCore/runtime/Structure.h
+++ b/Source/JavaScriptCore/runtime/Structure.h
@@ -45,6 +45,7 @@
namespace JSC {
+ class LLIntOffsetsExtractor;
class PropertyNameArray;
class PropertyNameArrayData;
class StructureChain;
@@ -145,7 +146,17 @@ namespace JSC {
}
bool hasGetterSetterProperties() const { return m_hasGetterSetterProperties; }
- void setHasGetterSetterProperties(bool hasGetterSetterProperties) { m_hasGetterSetterProperties = hasGetterSetterProperties; }
+ bool hasReadOnlyOrGetterSetterPropertiesExcludingProto() const { return m_hasReadOnlyOrGetterSetterPropertiesExcludingProto; }
+ void setHasGetterSetterProperties(bool is__proto__)
+ {
+ m_hasGetterSetterProperties = true;
+ if (!is__proto__)
+ m_hasReadOnlyOrGetterSetterPropertiesExcludingProto = true;
+ }
+ void setContainsReadOnlyProperties()
+ {
+ m_hasReadOnlyOrGetterSetterPropertiesExcludingProto = true;
+ }
bool hasNonEnumerableProperties() const { return m_hasNonEnumerableProperties; }
@@ -196,6 +207,8 @@ namespace JSC {
static JS_EXPORTDATA const ClassInfo s_info;
private:
+ friend class LLIntOffsetsExtractor;
+
JS_EXPORT_PRIVATE Structure(JSGlobalData&, JSGlobalObject*, JSValue prototype, const TypeInfo&, const ClassInfo*);
Structure(JSGlobalData&);
Structure(JSGlobalData&, const Structure*);
@@ -282,6 +295,7 @@ namespace JSC {
unsigned m_dictionaryKind : 2;
bool m_isPinnedPropertyTable : 1;
bool m_hasGetterSetterProperties : 1;
+ bool m_hasReadOnlyOrGetterSetterPropertiesExcludingProto : 1;
bool m_hasNonEnumerableProperties : 1;
unsigned m_attributesInPrevious : 7;
unsigned m_specificFunctionThrashCount : 2;
diff --git a/Source/JavaScriptCore/runtime/StructureChain.h b/Source/JavaScriptCore/runtime/StructureChain.h
index df7a37fa7..3b19d4cf1 100644
--- a/Source/JavaScriptCore/runtime/StructureChain.h
+++ b/Source/JavaScriptCore/runtime/StructureChain.h
@@ -37,6 +37,7 @@
namespace JSC {
+ class LLIntOffsetsExtractor;
class Structure;
class StructureChain : public JSCell {
@@ -74,6 +75,8 @@ namespace JSC {
}
private:
+ friend class LLIntOffsetsExtractor;
+
StructureChain(JSGlobalData&, Structure*);
static void destroy(JSCell*);
OwnArrayPtr<WriteBarrier<Structure> > m_vector;
diff --git a/Source/JavaScriptCore/runtime/StructureTransitionTable.h b/Source/JavaScriptCore/runtime/StructureTransitionTable.h
index 536237a33..517992470 100644
--- a/Source/JavaScriptCore/runtime/StructureTransitionTable.h
+++ b/Source/JavaScriptCore/runtime/StructureTransitionTable.h
@@ -29,7 +29,6 @@
#include "UString.h"
#include "WeakGCMap.h"
#include <wtf/HashFunctions.h>
-#include <wtf/HashTraits.h>
#include <wtf/OwnPtr.h>
#include <wtf/RefPtr.h>
@@ -55,22 +54,6 @@ class StructureTransitionTable {
static const bool safeToCompareToEmptyOrDeleted = true;
};
- struct HashTraits {
- typedef WTF::HashTraits<RefPtr<StringImpl> > FirstTraits;
- typedef WTF::GenericHashTraits<unsigned> SecondTraits;
- typedef std::pair<FirstTraits::TraitType, SecondTraits::TraitType > TraitType;
-
- static const bool emptyValueIsZero = FirstTraits::emptyValueIsZero && SecondTraits::emptyValueIsZero;
- static TraitType emptyValue() { return std::make_pair(FirstTraits::emptyValue(), SecondTraits::emptyValue()); }
-
- static const bool needsDestruction = FirstTraits::needsDestruction || SecondTraits::needsDestruction;
-
- static const int minimumTableSize = FirstTraits::minimumTableSize;
-
- static void constructDeletedValue(TraitType& slot) { FirstTraits::constructDeletedValue(slot.first); }
- static bool isDeletedValue(const TraitType& value) { return FirstTraits::isDeletedValue(value.first); }
- };
-
struct WeakGCMapFinalizerCallback {
static void* finalizerContextFor(Hash::Key)
{
@@ -83,7 +66,7 @@ class StructureTransitionTable {
}
};
- typedef WeakGCMap<Hash::Key, Structure, WeakGCMapFinalizerCallback, Hash, HashTraits> TransitionMap;
+ typedef WeakGCMap<Hash::Key, Structure, WeakGCMapFinalizerCallback, Hash> TransitionMap;
static Hash::Key keyForWeakGCMapFinalizer(void* context, Structure*);
diff --git a/Source/JavaScriptCore/runtime/WriteBarrier.h b/Source/JavaScriptCore/runtime/WriteBarrier.h
index 6ac52b7c7..7e9db12fb 100644
--- a/Source/JavaScriptCore/runtime/WriteBarrier.h
+++ b/Source/JavaScriptCore/runtime/WriteBarrier.h
@@ -26,6 +26,7 @@
#ifndef WriteBarrier_h
#define WriteBarrier_h
+#include "GCAssertions.h"
#include "HandleTypes.h"
#include "Heap.h"
#include "SamplingCounter.h"
@@ -73,6 +74,13 @@ public:
validateCell(value);
setEarlyValue(globalData, owner, value);
}
+
+ // This is meant to be used like operator=, but is called copyFrom instead, in
+ // order to kindly inform the C++ compiler that its advice is not appreciated.
+ void copyFrom(const WriteBarrierBase<T>& other)
+ {
+ m_cell = other.m_cell;
+ }
void setMayBeNull(JSGlobalData& globalData, const JSCell* owner, T* value)
{
diff --git a/Source/JavaScriptCore/shell/CMakeLists.txt b/Source/JavaScriptCore/shell/CMakeLists.txt
index 44f02b93c..b9d64dbae 100644
--- a/Source/JavaScriptCore/shell/CMakeLists.txt
+++ b/Source/JavaScriptCore/shell/CMakeLists.txt
@@ -1,6 +1,3 @@
-SET(JSC_HEADERS
-)
-
SET(JSC_SOURCES
../jsc.cpp
)
@@ -9,11 +6,11 @@ SET(JSC_LIBRARIES
${JavaScriptCore_LIBRARY_NAME}
)
-INCLUDE_IF_EXISTS(${JAVASCRIPTCORE_DIR}/shell/Platform${PORT}.cmake)
+WEBKIT_INCLUDE_CONFIG_FILES_IF_EXISTS()
WEBKIT_WRAP_SOURCELIST(${JSC_SOURCES})
INCLUDE_DIRECTORIES(./ ${JavaScriptCore_INCLUDE_DIRECTORIES})
-ADD_EXECUTABLE(${JSC_EXECUTABLE_NAME} ${JSC_HEADERS} ${JSC_SOURCES})
+ADD_EXECUTABLE(${JSC_EXECUTABLE_NAME} ${JSC_SOURCES})
TARGET_LINK_LIBRARIES(${JSC_EXECUTABLE_NAME} ${JSC_LIBRARIES})
IF (JSC_LINK_FLAGS)
diff --git a/Source/JavaScriptCore/tests/mozilla/ecma/String/15.5.4.11-2.js b/Source/JavaScriptCore/tests/mozilla/ecma/String/15.5.4.11-2.js
index b75f3e8d6..b224ffa83 100644
--- a/Source/JavaScriptCore/tests/mozilla/ecma/String/15.5.4.11-2.js
+++ b/Source/JavaScriptCore/tests/mozilla/ecma/String/15.5.4.11-2.js
@@ -84,7 +84,7 @@ function getTestCases() {
// Georgian
// Range: U+10A0 to U+10FF
for ( var i = 0x10A0; i <= 0x10FF; i++ ) {
- var U = new Array(new Unicode( i, 4 ), new Unicode( i, 5 ));
+ var U = new Array(new Unicode( i, 4 ), new Unicode( i, 5 ), new Unicode( i, 6.1));
/*
array[item++] = new TestCase( SECTION,
@@ -92,7 +92,7 @@ function getTestCases() {
String.fromCharCode(U.lower),
eval("var s = new String( String.fromCharCode("+i+") ); s.toLowerCase()") );
*/
- array[item++] = new TestCaseDualExpected( SECTION,
+ array[item++] = new TestCaseMultiExpected( SECTION,
"var s = new String( String.fromCharCode("+i+") ); s.toLowerCase().charCodeAt(0)",
U,
eval("var s = new String( String.fromCharCode(i) ); s.toLowerCase().charCodeAt(0)") );
@@ -106,7 +106,7 @@ function getTestCases() {
*
*/
-function TestCaseDualExpected( n, d, e, a ) {
+function TestCaseMultiExpected( n, d, e, a ) {
this.name = n;
this.description = d;
this.expect = e;
@@ -115,26 +115,27 @@ function TestCaseDualExpected( n, d, e, a ) {
this.reason = "";
this.bugnumber = BUGNUMBER;
- this.passed = getTestCaseResultDualExpected( this.expect, this.actual );
+ this.passed = getTestCaseResultMultiExpected( this.expect, this.actual );
if ( DEBUG ) {
writeLineToLog( "added " + this.description );
}
}
// Added so that either Unicode 4.0 or 5.0 results will be considered correct.
-function writeTestCaseResultDualExpected( expect, actual, string ) {
- var passed = getTestCaseResultDualExpected( expect, actual );
+function writeTestCaseResultMultiExpected( expect, actual, string ) {
+ var passed = getTestCaseResultMultiExpected( expect, actual );
writeFormattedResult( expect[1].lower, actual, string, passed );
return passed;
}
/*
- * Added so that either Unicode 4.0 or 5.0 results will be considered correct.
+ * Added so that either Unicode 4.0, 5.0 or 6.1 results will be considered correct.
* Compare expected result to the actual result and figure out whether
* the test case passed.
*/
-function getTestCaseResultDualExpected( expect, actual ) {
+function getTestCaseResultMultiExpected( expect, actual ) {
expectedU4 = expect[0].lower;
expectedU5 = expect[1].lower;
+ expectedU6_1 = expect[2].lower;
// because ( NaN == NaN ) always returns false, need to do
// a special compare to see if we got the right result.
if ( actual != actual ) {
@@ -159,22 +160,30 @@ function getTestCaseResultDualExpected( expect, actual ) {
expectedU5 = "NaN number";
}
}
+ if ( expectedU6_1 != expectedU6_1 ) {
+ if ( typeof expectedU6_1 == "object" ) {
+ expectedU6_1 = "NaN object";
+ } else {
+ expectedU6_1 = "NaN number";
+ }
+ }
- var passed = ( expectedU4 == actual || expectedU5 == actual ) ? true : false;
+ var passed = ( expectedU4 == actual || expectedU5 == actual || expectedU6_1 == actual ) ? true : false;
// if both objects are numbers
// need to replace w/ IEEE standard for rounding
if ( !passed &&
typeof(actual) == "number" &&
(typeof(expectedU4) == "number" ||
- typeof(expectedU5) == "number")) {
- if (( Math.abs(actual-expectedU4) < 0.0000001 ) || ( Math.abs(actual-expectedU5) < 0.0000001 )) {
+ typeof(expectedU5) == "number" ||
+ typeof(expectedU6_1) == "number")) {
+ if (( Math.abs(actual-expectedU4) < 0.0000001 ) || ( Math.abs(actual-expectedU5) < 0.0000001 ) || ( Math.abs(actual-expectedU6_1) < 0.0000001 )) {
passed = true;
}
}
// verify type is the same
- if ( typeof(expectedU4) != typeof(actual) && typeof(expectedU5) != typeof(actual) ) {
+ if ( typeof(expectedU4) != typeof(actual) && typeof(expectedU5) != typeof(actual) && typeof(expectedU6_1) != typeof(actual) ) {
passed = false;
}
@@ -183,7 +192,7 @@ function getTestCaseResultDualExpected( expect, actual ) {
function test() {
for ( tc=0; tc < testcases.length; tc++ ) {
- testcases[tc].passed = writeTestCaseResultDualExpected(
+ testcases[tc].passed = writeTestCaseResultMultiExpected(
testcases[tc].expect,
testcases[tc].actual,
testcases[tc].description +" = "+ testcases[tc].actual );
@@ -438,7 +447,12 @@ function GetUnicodeValues( c, version ) {
// Georgian
// Range: U+10A0 to U+10F0
- if ( version == 5 ) {
+ if ( version >= 5 ) {
+ if ( version >= 6.1 && ( c == 0x10c7 || c == 0x10cd ) ) {
+ u[0] = c;
+ u[1] = c + 7264; //48;
+ return u;
+ }
if ( c >= 0x10A0 && c <= 0x10C5 ) {
u[0] = c;
u[1] = c + 7264; //48;
@@ -630,4 +644,4 @@ function DecimalToHexString( n ) {
}
return h;
-} \ No newline at end of file
+}
diff --git a/Source/JavaScriptCore/tools/CodeProfile.cpp b/Source/JavaScriptCore/tools/CodeProfile.cpp
index 7794f58d3..349353eb2 100644
--- a/Source/JavaScriptCore/tools/CodeProfile.cpp
+++ b/Source/JavaScriptCore/tools/CodeProfile.cpp
@@ -123,10 +123,14 @@ void CodeProfile::sample(void* pc, void** framePointer)
if (type != EngineFrame)
return;
- // Walk up the stack.
#if PLATFORM(MAC) && CPU(X86_64)
+ // Walk up the stack.
pc = framePointer[1];
framePointer = reinterpret_cast<void**>(*framePointer);
+#elif OS(LINUX) && CPU(X86)
+ // Don't unwind the stack as some dependent third party libraries
+ // may be compiled with -fomit-frame-pointer.
+ framePointer = 0;
#else
// This platform is not yet supported!
ASSERT_NOT_REACHED();
@@ -139,7 +143,7 @@ void CodeProfile::sample(void* pc, void** framePointer)
void CodeProfile::report()
{
- fprintf(stdout, "<CodeProfiling %s:%d>\n", m_file.data(), m_lineNo);
+ dataLog("<CodeProfiling %s:%d>\n", m_file.data(), m_lineNo);
// How many frames of C-code to print - 0, if not verbose, 1 if verbose, up to 1024 if very verbose.
unsigned recursionLimit = CodeProfiling::beVeryVerbose() ? 1024 : CodeProfiling::beVerbose();
@@ -176,13 +180,13 @@ void CodeProfile::report()
}
// Output the profile tree.
- fprintf(stdout, "Total samples: %lld\n", static_cast<long long>(profile.childCount()));
+ dataLog("Total samples: %lld\n", static_cast<long long>(profile.childCount()));
profile.dump();
for (size_t i = 0 ; i < m_children.size(); ++i)
m_children[i]->report();
- fprintf(stdout, "</CodeProfiling %s:%d>\n", m_file.data(), m_lineNo);
+ dataLog("</CodeProfiling %s:%d>\n", m_file.data(), m_lineNo);
}
}
diff --git a/Source/JavaScriptCore/tools/CodeProfiling.cpp b/Source/JavaScriptCore/tools/CodeProfiling.cpp
index d927a49f6..f11603854 100644
--- a/Source/JavaScriptCore/tools/CodeProfiling.cpp
+++ b/Source/JavaScriptCore/tools/CodeProfiling.cpp
@@ -33,6 +33,10 @@
#include <signal.h>
#endif
+#if OS(LINUX)
+#include <sys/time.h>
+#endif
+
namespace JSC {
volatile CodeProfile* CodeProfiling::s_profileStack = 0;
@@ -44,7 +48,7 @@ WTF::MetaAllocatorTracker* CodeProfiling::s_tracker = 0;
#pragma clang diagnostic ignored "-Wmissing-noreturn"
#endif
-#if PLATFORM(MAC) && CPU(X86_64)
+#if (PLATFORM(MAC) && CPU(X86_64)) || (OS(LINUX) && CPU(X86))
// Helper function to start & stop the timer.
// Presently we're using the wall-clock timer, since this seems to give the best results.
static void setProfileTimer(unsigned usec)
@@ -69,6 +73,13 @@ static void profilingTimer(int, siginfo_t*, void* uap)
CodeProfiling::sample(reinterpret_cast<void*>(context->__ss.__rip),
reinterpret_cast<void**>(context->__ss.__rbp));
}
+#elif OS(LINUX) && CPU(X86)
+static void profilingTimer(int, siginfo_t*, void* uap)
+{
+ mcontext_t context = static_cast<ucontext_t*>(uap)->uc_mcontext;
+ CodeProfiling::sample(reinterpret_cast<void*>(context.gregs[REG_EIP]),
+ reinterpret_cast<void**>(context.gregs[REG_EBP]));
+}
#endif
// Callback triggered when the timer is fired.
@@ -132,10 +143,10 @@ void CodeProfiling::begin(const SourceCode& source)
if (alreadyProfiling)
return;
-#if PLATFORM(MAC) && CPU(X86_64)
+#if (PLATFORM(MAC) && CPU(X86_64)) || (OS(LINUX) && CPU(X86))
// Regsiter a signal handler & itimer.
struct sigaction action;
- action.sa_sigaction = reinterpret_cast<void (*)(int, struct __siginfo *, void *)>(profilingTimer);
+ action.sa_sigaction = reinterpret_cast<void (*)(int, siginfo_t *, void *)>(profilingTimer);
sigfillset(&action.sa_mask);
action.sa_flags = SA_SIGINFO;
sigaction(SIGALRM, &action, 0);
@@ -156,7 +167,7 @@ void CodeProfiling::end()
if (s_profileStack)
return;
-#if PLATFORM(MAC) && CPU(X86_64)
+#if (PLATFORM(MAC) && CPU(X86_64)) || (OS(LINUX) && CPU(X86))
// Stop profiling
setProfileTimer(0);
#endif
diff --git a/Source/JavaScriptCore/tools/ProfileTreeNode.h b/Source/JavaScriptCore/tools/ProfileTreeNode.h
index 6c5fdc185..60d59928a 100644
--- a/Source/JavaScriptCore/tools/ProfileTreeNode.h
+++ b/Source/JavaScriptCore/tools/ProfileTreeNode.h
@@ -95,8 +95,8 @@ private:
// Print the number of samples, the name of this node, and the number of samples that are stack-top
// in this node (samples directly within this node, excluding samples in children.
for (unsigned i = 0; i < indent; ++i)
- fprintf(stdout, " ");
- fprintf(stdout, "% 8lld: %s (%lld stack top)\n",
+ dataLog(" ");
+ dataLog("% 8lld: %s (%lld stack top)\n",
static_cast<long long>(entry->second.count()),
entry->first.utf8().data(),
static_cast<long long>(entry->second.count() - entry->second.childCount()));
diff --git a/Source/JavaScriptCore/wtf/Assertions.h b/Source/JavaScriptCore/wtf/Assertions.h
index 42dc0bd35..e7358dccb 100644
--- a/Source/JavaScriptCore/wtf/Assertions.h
+++ b/Source/JavaScriptCore/wtf/Assertions.h
@@ -322,8 +322,12 @@ while (0)
/* COMPILE_ASSERT */
#ifndef COMPILE_ASSERT
+#if COMPILER_SUPPORTS(C_STATIC_ASSERT)
+#define COMPILE_ASSERT(exp, name) _Static_assert((exp), #name)
+#else
#define COMPILE_ASSERT(exp, name) typedef int dummy##name [(exp) ? 1 : -1]
#endif
+#endif
/* FATAL */
@@ -370,29 +374,4 @@ while (0)
#define LOG_VERBOSE(channel, ...) WTFLogVerbose(__FILE__, __LINE__, WTF_PRETTY_FUNCTION, &JOIN_LOG_CHANNEL_WITH_PREFIX(LOG_CHANNEL_PREFIX, channel), __VA_ARGS__)
#endif
-#if ENABLE(GC_VALIDATION)
-#define ASSERT_GC_OBJECT_LOOKS_VALID(cell) do { \
- if (!(cell))\
- CRASH();\
- if (cell->unvalidatedStructure()->unvalidatedStructure() != cell->unvalidatedStructure()->unvalidatedStructure()->unvalidatedStructure())\
- CRASH();\
-} while (0)
-
-#define ASSERT_GC_OBJECT_INHERITS(object, classInfo) do {\
- ASSERT_GC_OBJECT_LOOKS_VALID(object); \
- if (!object->inherits(classInfo)) \
- CRASH();\
-} while (0)
-
-#else
-#define ASSERT_GC_OBJECT_LOOKS_VALID(cell) do { (void)cell; } while (0)
-#define ASSERT_GC_OBJECT_INHERITS(object, classInfo) do { (void)object; (void)classInfo; } while (0)
-#endif
-
-#if COMPILER(CLANG)
-#define ASSERT_HAS_TRIVIAL_DESTRUCTOR(klass) COMPILE_ASSERT(__has_trivial_destructor(klass), klass##_has_trivial_destructor_check)
-#else
-#define ASSERT_HAS_TRIVIAL_DESTRUCTOR(klass)
-#endif
-
#endif /* WTF_Assertions_h */
diff --git a/Source/JavaScriptCore/wtf/CMakeLists.txt b/Source/JavaScriptCore/wtf/CMakeLists.txt
index c22ae5185..63fc59c73 100644
--- a/Source/JavaScriptCore/wtf/CMakeLists.txt
+++ b/Source/JavaScriptCore/wtf/CMakeLists.txt
@@ -3,8 +3,6 @@ SET(WTF_HEADERS
AVLTree.h
Alignment.h
AlwaysInline.h
- ArrayBuffer.cpp
- ArrayBufferView.cpp
Assertions.h
Atomics.h
BitVector.h
@@ -17,6 +15,7 @@ SET(WTF_HEADERS
CryptographicallyRandomNumber.h
CurrentTime.h
DateMath.h
+ DataLog.h
DecimalNumber.h
Decoder.h
Deque.h
@@ -45,7 +44,6 @@ SET(WTF_HEADERS
MallocZoneSupport.h
MathExtras.h
MessageQueue.h
- MetaAllocator.cpp
MetaAllocator.h
MetaAllocatorHandle.h
NonCopyingSort.h
@@ -134,18 +132,22 @@ SET(WTF_HEADERS
)
SET(WTF_SOURCES
+ ArrayBuffer.cpp
+ ArrayBufferView.cpp
Assertions.cpp
BitVector.cpp
ByteArray.cpp
CryptographicallyRandomNumber.cpp
CurrentTime.cpp
DateMath.cpp
+ DataLog.cpp
DecimalNumber.cpp
DynamicAnnotations.cpp
FastMalloc.cpp
HashTable.cpp
- MainThread.cpp
MD5.cpp
+ MainThread.cpp
+ MetaAllocator.cpp
OSRandomSource.cpp
NumberOfCores.cpp
PageAllocationAligned.cpp
@@ -191,9 +193,6 @@ SET(WTF_INCLUDE_DIRECTORIES
"${THIRDPARTY_DIR}"
)
-SET(WTF_LIBRARIES
-)
-
IF (ENABLE_FAST_MALLOC)
LIST(APPEND WTF_SOURCES
TCSystemAlloc.cpp
@@ -202,7 +201,7 @@ ELSE ()
ADD_DEFINITIONS(-DUSE_SYSTEM_MALLOC=1)
ENDIF()
-INCLUDE_IF_EXISTS(${JAVASCRIPTCORE_DIR}/wtf/Platform${PORT}.cmake)
+WEBKIT_INCLUDE_CONFIG_FILES_IF_EXISTS()
WEBKIT_WRAP_SOURCELIST(${WTF_SOURCES})
INCLUDE_DIRECTORIES(${WTF_INCLUDE_DIRECTORIES})
diff --git a/Source/JavaScriptCore/wtf/Compiler.h b/Source/JavaScriptCore/wtf/Compiler.h
index 96ad6e40e..b8a019299 100644
--- a/Source/JavaScriptCore/wtf/Compiler.h
+++ b/Source/JavaScriptCore/wtf/Compiler.h
@@ -48,6 +48,9 @@
#define WTF_COMPILER_SUPPORTS_CXX_DELETED_FUNCTIONS __has_feature(cxx_deleted_functions)
#define WTF_COMPILER_SUPPORTS_CXX_NULLPTR __has_feature(cxx_nullptr)
#define WTF_COMPILER_SUPPORTS_BLOCKS __has_feature(blocks)
+#define WTF_COMPILER_SUPPORTS_C_STATIC_ASSERT __has_extension(c_static_assert)
+
+#define WTF_COMPILER_SUPPORTS_HAS_TRIVIAL_DESTRUCTOR __has_extension(has_trivial_destructor)
#endif
diff --git a/Source/JavaScriptCore/wtf/DataLog.cpp b/Source/JavaScriptCore/wtf/DataLog.cpp
new file mode 100644
index 000000000..5a290e45a
--- /dev/null
+++ b/Source/JavaScriptCore/wtf/DataLog.cpp
@@ -0,0 +1,99 @@
+/*
+ * Copyright (C) 2012 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "config.h"
+#include "DataLog.h"
+#include <stdarg.h>
+#include <wtf/Threading.h>
+
+#define DATA_LOG_TO_FILE 0
+
+// Uncomment to force logging to the given file regardless of what the environment variable says.
+// #define DATA_LOG_FILENAME "/tmp/WTFLog.txt"
+
+namespace WTF {
+
+#if DATA_LOG_TO_FILE
+static FILE* file;
+
+static void initializeLogFileOnce()
+{
+#ifdef DATA_LOG_FILENAME
+ const char* filename = DATA_LOG_FILENAME
+#else
+ const char* filename = getenv("WTF_DATA_LOG_FILENAME");
+#endif
+ if (filename) {
+ file = fopen(filename, "w");
+ if (!file)
+ fprintf(stderr, "Warning: Could not open log file %s for writing.\n", filename);
+ }
+ if (!file)
+ file = stderr;
+
+ setvbuf(file, 0, _IONBF, 0); // Prefer unbuffered output, so that we get a full log upon crash or deadlock.
+}
+
+#if OS(DARWIN)
+static pthread_once_t initializeLogFileOnceKey = PTHREAD_ONCE_INIT;
+#endif
+
+static void initializeLogFile()
+{
+#if OS(DARWIN)
+ pthread_once(&initializeLogFileOnceKey, initializeLogFileOnce);
+#else
+ if (!file)
+ initializeLogFileOnce();
+#endif
+}
+
+FILE* dataFile()
+{
+ initializeLogFile();
+ return file;
+}
+#else // DATA_LOG_TO_FILE
+FILE* dataFile()
+{
+ return stderr;
+}
+#endif // DATA_LOG_TO_FILE
+
+void dataLogV(const char* format, va_list argList)
+{
+ vfprintf(dataFile(), format, argList);
+}
+
+void dataLog(const char* format, ...)
+{
+ va_list argList;
+ va_start(argList, format);
+ dataLogV(format, argList);
+ va_end(argList);
+}
+
+} // namespace WTF
+
diff --git a/Source/JavaScriptCore/wtf/DataLog.h b/Source/JavaScriptCore/wtf/DataLog.h
new file mode 100644
index 000000000..bcbebb9e2
--- /dev/null
+++ b/Source/JavaScriptCore/wtf/DataLog.h
@@ -0,0 +1,46 @@
+/*
+ * Copyright (C) 2012 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef DataLog_h
+#define DataLog_h
+
+#include <stdarg.h>
+#include <stdio.h>
+#include <wtf/Platform.h>
+#include <wtf/StdLibExtras.h>
+
+namespace WTF {
+
+FILE* dataFile();
+
+void dataLogV(const char* format, va_list) WTF_ATTRIBUTE_PRINTF(1, 0);
+void dataLog(const char* format, ...) WTF_ATTRIBUTE_PRINTF(1, 2);
+
+} // namespace WTF
+
+using WTF::dataLog;
+
+#endif // DataLog_h
+
diff --git a/Source/JavaScriptCore/wtf/DoublyLinkedList.h b/Source/JavaScriptCore/wtf/DoublyLinkedList.h
index 361d71d7b..cd067ef0a 100644
--- a/Source/JavaScriptCore/wtf/DoublyLinkedList.h
+++ b/Source/JavaScriptCore/wtf/DoublyLinkedList.h
@@ -77,6 +77,8 @@ public:
T* head() const;
T* removeHead();
+ T* tail() const;
+
void push(T*);
void append(T*);
void remove(T*);
@@ -116,6 +118,11 @@ template<typename T> inline T* DoublyLinkedList<T>::head() const
return m_head;
}
+template<typename T> inline T* DoublyLinkedList<T>::tail() const
+{
+ return m_tail;
+}
+
template<typename T> inline void DoublyLinkedList<T>::push(T* node)
{
if (!m_head) {
diff --git a/Source/JavaScriptCore/wtf/HashTable.cpp b/Source/JavaScriptCore/wtf/HashTable.cpp
index 71d3f86ce..94bba9b32 100644
--- a/Source/JavaScriptCore/wtf/HashTable.cpp
+++ b/Source/JavaScriptCore/wtf/HashTable.cpp
@@ -44,15 +44,15 @@ HashTableStats::~HashTableStats()
{
// Don't lock hashTableStatsMutex here because it can cause deadlocks at shutdown
// if any thread was killed while holding the mutex.
- printf("\nWTF::HashTable statistics\n\n");
- printf("%d accesses\n", numAccesses);
- printf("%d total collisions, average %.2f probes per access\n", numCollisions, 1.0 * (numAccesses + numCollisions) / numAccesses);
- printf("longest collision chain: %d\n", maxCollisions);
+ dataLog("\nWTF::HashTable statistics\n\n");
+ dataLog("%d accesses\n", numAccesses);
+ dataLog("%d total collisions, average %.2f probes per access\n", numCollisions, 1.0 * (numAccesses + numCollisions) / numAccesses);
+ dataLog("longest collision chain: %d\n", maxCollisions);
for (int i = 1; i <= maxCollisions; i++) {
- printf(" %d lookups with exactly %d collisions (%.2f%% , %.2f%% with this many or more)\n", collisionGraph[i], i, 100.0 * (collisionGraph[i] - collisionGraph[i+1]) / numAccesses, 100.0 * collisionGraph[i] / numAccesses);
+ dataLog(" %d lookups with exactly %d collisions (%.2f%% , %.2f%% with this many or more)\n", collisionGraph[i], i, 100.0 * (collisionGraph[i] - collisionGraph[i+1]) / numAccesses, 100.0 * collisionGraph[i] / numAccesses);
}
- printf("%d rehashes\n", numRehashes);
- printf("%d reinserts\n", numReinserts);
+ dataLog("%d rehashes\n", numRehashes);
+ dataLog("%d reinserts\n", numReinserts);
}
void HashTableStats::recordCollisionAtCount(int count)
diff --git a/Source/JavaScriptCore/wtf/HashTraits.h b/Source/JavaScriptCore/wtf/HashTraits.h
index 12e6b0699..e3783ed42 100644
--- a/Source/JavaScriptCore/wtf/HashTraits.h
+++ b/Source/JavaScriptCore/wtf/HashTraits.h
@@ -57,6 +57,7 @@ namespace WTF {
template<typename T> struct GenericHashTraits : GenericHashTraitsBase<IsInteger<T>::value, T> {
typedef T TraitType;
+ typedef T EmptyValueType;
static T emptyValue() { return T(); }
@@ -112,7 +113,9 @@ namespace WTF {
};
template<typename P> struct HashTraits<OwnPtr<P> > : SimpleClassHashTraits<OwnPtr<P> > {
- static std::nullptr_t emptyValue() { return nullptr; }
+ typedef std::nullptr_t EmptyValueType;
+
+ static EmptyValueType emptyValue() { return nullptr; }
typedef PassOwnPtr<P> PassInType;
static void store(PassOwnPtr<P> value, OwnPtr<P>& storage) { storage = value; }
@@ -144,9 +147,10 @@ namespace WTF {
typedef FirstTraitsArg FirstTraits;
typedef SecondTraitsArg SecondTraits;
typedef pair<typename FirstTraits::TraitType, typename SecondTraits::TraitType> TraitType;
+ typedef pair<typename FirstTraits::EmptyValueType, typename SecondTraits::EmptyValueType> EmptyValueType;
static const bool emptyValueIsZero = FirstTraits::emptyValueIsZero && SecondTraits::emptyValueIsZero;
- static TraitType emptyValue() { return make_pair(FirstTraits::emptyValue(), SecondTraits::emptyValue()); }
+ static EmptyValueType emptyValue() { return make_pair(FirstTraits::emptyValue(), SecondTraits::emptyValue()); }
static const bool needsDestruction = FirstTraits::needsDestruction || SecondTraits::needsDestruction;
diff --git a/Source/JavaScriptCore/wtf/InlineASM.h b/Source/JavaScriptCore/wtf/InlineASM.h
index 379ebd364..1c99e65a1 100644
--- a/Source/JavaScriptCore/wtf/InlineASM.h
+++ b/Source/JavaScriptCore/wtf/InlineASM.h
@@ -70,4 +70,11 @@
#define HIDE_SYMBOL(name)
#endif
+// FIXME: figure out how this works on all the platforms. I know that
+// on Linux, the preferred form is ".Lstuff" as opposed to "Lstuff".
+// Don't know about any of the others.
+#if PLATFORM(MAC)
+#define LOCAL_LABEL_STRING(name) "L" #name
+#endif
+
#endif // InlineASM_h
diff --git a/Source/JavaScriptCore/wtf/MainThread.cpp b/Source/JavaScriptCore/wtf/MainThread.cpp
index 915126589..f8686aa31 100644
--- a/Source/JavaScriptCore/wtf/MainThread.cpp
+++ b/Source/JavaScriptCore/wtf/MainThread.cpp
@@ -34,6 +34,7 @@
#include "Functional.h"
#include "StdLibExtras.h"
#include "Threading.h"
+#include <wtf/ThreadSpecific.h>
#if PLATFORM(CHROMIUM)
#error Chromium uses a different main thread implementation
@@ -101,6 +102,7 @@ void initializeMainThread()
mainThreadFunctionQueueMutex();
initializeMainThreadPlatform();
+ initializeGCThreads();
}
#else
@@ -249,4 +251,42 @@ bool isMainThread()
}
#endif
+#if ENABLE(PARALLEL_GC)
+static ThreadSpecific<bool>* isGCThread;
+#endif
+
+void initializeGCThreads()
+{
+#if ENABLE(PARALLEL_GC)
+ isGCThread = new ThreadSpecific<bool>();
+#endif
+}
+
+#if ENABLE(PARALLEL_GC)
+void registerGCThread()
+{
+ if (!isGCThread) {
+ // This happens if we're running in a process that doesn't care about
+ // MainThread.
+ return;
+ }
+
+ **isGCThread = true;
+}
+
+bool isMainThreadOrGCThread()
+{
+ if (isGCThread->isSet() && **isGCThread)
+ return true;
+
+ return isMainThread();
+}
+#elif PLATFORM(MAC)
+// This is necessary because JavaScriptCore.exp doesn't support preprocessor macros.
+bool isMainThreadOrGCThread()
+{
+ return isMainThread();
+}
+#endif
+
} // namespace WTF
diff --git a/Source/JavaScriptCore/wtf/MainThread.h b/Source/JavaScriptCore/wtf/MainThread.h
index ff76a5fd3..4839c8117 100644
--- a/Source/JavaScriptCore/wtf/MainThread.h
+++ b/Source/JavaScriptCore/wtf/MainThread.h
@@ -52,11 +52,14 @@ WTF_EXPORT_PRIVATE void callOnMainThread(const Function<void ()>&);
WTF_EXPORT_PRIVATE void setMainThreadCallbacksPaused(bool paused);
WTF_EXPORT_PRIVATE bool isMainThread();
+
+void initializeGCThreads();
+
#if ENABLE(PARALLEL_GC)
void registerGCThread();
WTF_EXPORT_PRIVATE bool isMainThreadOrGCThread();
#elif PLATFORM(MAC)
-bool isMainThreadOrGCThread();
+WTF_EXPORT_PRIVATE bool isMainThreadOrGCThread();
#else
inline bool isMainThreadOrGCThread() { return isMainThread(); }
#endif
diff --git a/Source/JavaScriptCore/wtf/MetaAllocator.cpp b/Source/JavaScriptCore/wtf/MetaAllocator.cpp
index d153faab7..ac6cba8d1 100644
--- a/Source/JavaScriptCore/wtf/MetaAllocator.cpp
+++ b/Source/JavaScriptCore/wtf/MetaAllocator.cpp
@@ -428,7 +428,7 @@ void MetaAllocator::freeFreeSpaceNode(FreeSpaceNode* node)
#if ENABLE(META_ALLOCATOR_PROFILE)
void MetaAllocator::dumpProfile()
{
- printf("num allocations = %u, num frees = %u\n", m_numAllocations, m_numFrees);
+ dataLog("num allocations = %u, num frees = %u\n", m_numAllocations, m_numFrees);
}
#endif
diff --git a/Source/JavaScriptCore/wtf/NullPtr.cpp b/Source/JavaScriptCore/wtf/NullPtr.cpp
index fb75cf6d5..d6b0429b1 100644
--- a/Source/JavaScriptCore/wtf/NullPtr.cpp
+++ b/Source/JavaScriptCore/wtf/NullPtr.cpp
@@ -27,7 +27,7 @@ SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#include "config.h"
#include "NullPtr.h"
-#if !COMPILER_SUPPORTS(CXX_NULLPTR)
+#if !(COMPILER_SUPPORTS(CXX_NULLPTR) || defined(_LIBCPP_VERSION))
std::nullptr_t nullptr;
diff --git a/Source/JavaScriptCore/wtf/NullPtr.h b/Source/JavaScriptCore/wtf/NullPtr.h
index b65f8fab5..2d0919ca6 100644
--- a/Source/JavaScriptCore/wtf/NullPtr.h
+++ b/Source/JavaScriptCore/wtf/NullPtr.h
@@ -31,7 +31,9 @@ SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
// nullptr_t type and nullptr object. They are defined in the same namespaces they
// would be in compiler and library that had the support.
-#if COMPILER_SUPPORTS(CXX_NULLPTR)
+#include <ciso646>
+
+#if COMPILER_SUPPORTS(CXX_NULLPTR) || defined(_LIBCPP_VERSION)
#include <cstddef>
diff --git a/Source/JavaScriptCore/wtf/OSAllocatorPosix.cpp b/Source/JavaScriptCore/wtf/OSAllocatorPosix.cpp
index 56c6089ff..5dbddc83e 100644
--- a/Source/JavaScriptCore/wtf/OSAllocatorPosix.cpp
+++ b/Source/JavaScriptCore/wtf/OSAllocatorPosix.cpp
@@ -102,7 +102,7 @@ void* OSAllocator::reserveAndCommit(size_t bytes, Usage usage, bool writable, bo
result = mmap(result, bytes, protection, flags, fd, 0);
if (result == MAP_FAILED) {
- #if ENABLE(INTERPRETER)
+ #if ENABLE(CLASSIC_INTERPRETER)
if (executable)
result = 0;
else
diff --git a/Source/JavaScriptCore/wtf/ParallelJobsGeneric.cpp b/Source/JavaScriptCore/wtf/ParallelJobsGeneric.cpp
index fd5b1f847..2cc0bc643 100644
--- a/Source/JavaScriptCore/wtf/ParallelJobsGeneric.cpp
+++ b/Source/JavaScriptCore/wtf/ParallelJobsGeneric.cpp
@@ -121,7 +121,7 @@ void ParallelEnvironment::ThreadPrivate::waitForFinish()
m_threadCondition.wait(m_mutex);
}
-void* ParallelEnvironment::ThreadPrivate::workerThread(void* threadData)
+void ParallelEnvironment::ThreadPrivate::workerThread(void* threadData)
{
ThreadPrivate* sharedThread = reinterpret_cast<ThreadPrivate*>(threadData);
MutexLocker lock(sharedThread->m_mutex);
@@ -136,7 +136,6 @@ void* ParallelEnvironment::ThreadPrivate::workerThread(void* threadData)
sharedThread->m_threadCondition.wait(sharedThread->m_mutex);
}
- return 0;
}
} // namespace WTF
diff --git a/Source/JavaScriptCore/wtf/ParallelJobsGeneric.h b/Source/JavaScriptCore/wtf/ParallelJobsGeneric.h
index 1c7d63c16..6de71067f 100644
--- a/Source/JavaScriptCore/wtf/ParallelJobsGeneric.h
+++ b/Source/JavaScriptCore/wtf/ParallelJobsGeneric.h
@@ -69,7 +69,7 @@ public:
return adoptRef(new ThreadPrivate());
}
- static void* workerThread(void*);
+ static void workerThread(void*);
private:
ThreadIdentifier m_threadID;
diff --git a/Source/JavaScriptCore/wtf/Platform.h b/Source/JavaScriptCore/wtf/Platform.h
index e6d43dac3..76b11fe88 100644
--- a/Source/JavaScriptCore/wtf/Platform.h
+++ b/Source/JavaScriptCore/wtf/Platform.h
@@ -351,6 +351,11 @@
#define WTF_OS_FREEBSD 1
#endif
+/* OS(HURD) - GNU/Hurd */
+#ifdef __GNU__
+#define WTF_OS_HURD 1
+#endif
+
/* OS(LINUX) - Linux */
#ifdef __linux__
#define WTF_OS_LINUX 1
@@ -394,6 +399,7 @@
|| OS(ANDROID) \
|| OS(DARWIN) \
|| OS(FREEBSD) \
+ || OS(HURD) \
|| OS(LINUX) \
|| OS(NETBSD) \
|| OS(OPENBSD) \
@@ -459,7 +465,7 @@
#define WTF_USE_CA 1
#endif
-/* USE(SKIA) for Win/Linux, CG for Mac, unless enabled */
+/* USE(SKIA) for Win/Linux/Mac/Android */
#if PLATFORM(CHROMIUM)
#if OS(DARWIN)
#if USE(SKIA_ON_MAC_CHROMIUM)
@@ -470,6 +476,9 @@
#define WTF_USE_ATSUI 1
#define WTF_USE_CORE_TEXT 1
#define WTF_USE_ICCJPEG 1
+#elif OS(ANDROID)
+#define WTF_USE_SKIA 1
+#define WTF_USE_GLES2_RENDERING 0
#else
#define WTF_USE_SKIA 1
#define WTF_USE_CHROMIUM_NET 1
@@ -594,11 +603,11 @@
#define WTF_USE_PTHREADS 1
#if PLATFORM(IOS_SIMULATOR)
- #define ENABLE_INTERPRETER 1
+ #define ENABLE_CLASSIC_INTERPRETER 1
#define ENABLE_JIT 0
#define ENABLE_YARR_JIT 0
#else
- #define ENABLE_INTERPRETER 1
+ #define ENABLE_CLASSIC_INTERPRETER 1
#define ENABLE_JIT 1
#define ENABLE_YARR_JIT 1
#endif
@@ -633,6 +642,7 @@
#define ENABLE_JIT 1
#endif
#define ENABLE_GLOBAL_FASTMALLOC_NEW 0
+#define ENABLE_LLINT 0
#if OS(DARWIN)
#define WTF_USE_CF 1
#define WTF_USE_CORE_TEXT 1
@@ -919,6 +929,12 @@
#define ENABLE_JIT 1
#endif
+/* On some of the platforms where we have a JIT, we want to also have the
+ low-level interpreter. */
+#if !defined(ENABLE_LLINT) && ENABLE(JIT) && OS(DARWIN) && (CPU(X86) || CPU(ARM_THUMB2)) && USE(JSVALUE32_64)
+#define ENABLE_LLINT 1
+#endif
+
#if !defined(ENABLE_DFG_JIT) && ENABLE(JIT)
/* Enable the DFG JIT on X86 and X86_64. Only tested on Mac and GNU/Linux. */
#if (CPU(X86) || CPU(X86_64)) && (PLATFORM(MAC) || OS(LINUX))
@@ -953,10 +969,10 @@
#endif
/* Ensure that either the JIT or the interpreter has been enabled. */
-#if !defined(ENABLE_INTERPRETER) && !ENABLE(JIT)
-#define ENABLE_INTERPRETER 1
+#if !defined(ENABLE_CLASSIC_INTERPRETER) && !ENABLE(JIT)
+#define ENABLE_CLASSIC_INTERPRETER 1
#endif
-#if !(ENABLE(JIT) || ENABLE(INTERPRETER))
+#if !(ENABLE(JIT) || ENABLE(CLASSIC_INTERPRETER))
#error You have to have at least one execution model enabled to build JSC
#endif
@@ -989,8 +1005,8 @@
#if COMPILER(GCC) || (RVCT_VERSION_AT_LEAST(4, 0, 0, 0) && defined(__GNUC__))
#define HAVE_COMPUTED_GOTO 1
#endif
-#if HAVE(COMPUTED_GOTO) && ENABLE(INTERPRETER)
-#define ENABLE_COMPUTED_GOTO_INTERPRETER 1
+#if HAVE(COMPUTED_GOTO) && ENABLE(CLASSIC_INTERPRETER)
+#define ENABLE_COMPUTED_GOTO_CLASSIC_INTERPRETER 1
#endif
/* Regular Expression Tracing - Set to 1 to trace RegExp's in jsc. Results dumped at exit */
@@ -1123,7 +1139,7 @@
#define ENABLE_COMPARE_AND_SWAP 1
#endif
-#if !defined(ENABLE_PARALLEL_GC) && (PLATFORM(MAC) || PLATFORM(IOS)) && ENABLE(COMPARE_AND_SWAP)
+#if !defined(ENABLE_PARALLEL_GC) && (PLATFORM(MAC) || PLATFORM(IOS) || PLATFORM(QT)) && ENABLE(COMPARE_AND_SWAP)
#define ENABLE_PARALLEL_GC 1
#endif
@@ -1141,7 +1157,7 @@
#define WTF_USE_COREMEDIA 1
#endif
-#if PLATFORM(MAC) || PLATFORM(GTK) || PLATFORM(EFL) || (PLATFORM(WIN) && !OS(WINCE) && !PLATFORM(WIN_CAIRO)) || PLATFORM(QT)
+#if PLATFORM(MAC) || PLATFORM(GTK) || PLATFORM(EFL) || (PLATFORM(WIN) && !OS(WINCE) && !PLATFORM(WIN_CAIRO)) || PLATFORM(QT) || PLATFORM(BLACKBERRY)
#define WTF_USE_REQUEST_ANIMATION_FRAME_TIMER 1
#endif
@@ -1174,4 +1190,8 @@
#define ENABLE_TEXT_NOTIFICATIONS_ONLY 1
#endif
+#if !defined(WTF_USE_WTFURL)
+#define WTF_USE_WTFURL 0
+#endif
+
#endif /* WTF_Platform_h */
diff --git a/Source/JavaScriptCore/wtf/PlatformEfl.cmake b/Source/JavaScriptCore/wtf/PlatformEfl.cmake
index 3887ead8c..1a13dbba3 100644
--- a/Source/JavaScriptCore/wtf/PlatformEfl.cmake
+++ b/Source/JavaScriptCore/wtf/PlatformEfl.cmake
@@ -1,6 +1,8 @@
LIST(APPEND WTF_SOURCES
efl/MainThreadEfl.cpp
efl/OwnPtrEfl.cpp
+ gobject/GOwnPtr.cpp
+ gobject/GRefPtr.cpp
OSAllocatorPosix.cpp
ThreadIdentifierDataPthreads.cpp
@@ -9,24 +11,9 @@ LIST(APPEND WTF_SOURCES
unicode/icu/CollatorICU.cpp
)
-IF (ENABLE_GLIB_SUPPORT)
- LIST(APPEND WTF_SOURCES
- gobject/GOwnPtr.cpp
- gobject/GRefPtr.cpp
- )
-
- LIST(APPEND WTF_INCLUDE_DIRECTORIES
- ${Glib_INCLUDE_DIRS}
- ${JAVASCRIPTCORE_DIR}/wtf/gobject
- )
-
- LIST(APPEND WTF_LIBRARIES
- ${Glib_LIBRARIES}
- )
-ENDIF ()
-
LIST(APPEND WTF_LIBRARIES
pthread
+ ${Glib_LIBRARIES}
${ICU_LIBRARIES}
${ICU_I18N_LIBRARIES}
${ECORE_LIBRARIES}
@@ -46,6 +33,8 @@ LIST(APPEND WTF_INCLUDE_DIRECTORIES
${ECORE_INCLUDE_DIRS}
${ECORE_EVAS_INCLUDE_DIRS}
${EVAS_INCLUDE_DIRS}
+ ${Glib_INCLUDE_DIRS}
${ICU_INCLUDE_DIRS}
+ ${JAVASCRIPTCORE_DIR}/wtf/gobject
${JAVASCRIPTCORE_DIR}/wtf/unicode/
)
diff --git a/Source/JavaScriptCore/wtf/SentinelLinkedList.h b/Source/JavaScriptCore/wtf/SentinelLinkedList.h
index ecd602452..3943aa5de 100644
--- a/Source/JavaScriptCore/wtf/SentinelLinkedList.h
+++ b/Source/JavaScriptCore/wtf/SentinelLinkedList.h
@@ -86,6 +86,8 @@ public:
iterator begin();
iterator end();
+
+ bool isEmpty() { return begin() == end(); }
private:
RawNode m_headSentinel;
diff --git a/Source/JavaScriptCore/wtf/StdLibExtras.h b/Source/JavaScriptCore/wtf/StdLibExtras.h
index e4d7c8fc0..2a0a9f950 100644
--- a/Source/JavaScriptCore/wtf/StdLibExtras.h
+++ b/Source/JavaScriptCore/wtf/StdLibExtras.h
@@ -114,6 +114,11 @@ inline bool isPointerAligned(void* p)
return !((intptr_t)(p) & (sizeof(char*) - 1));
}
+inline bool is8ByteAligned(void* p)
+{
+ return !((uintptr_t)(p) & (sizeof(double) - 1));
+}
+
/*
* C++'s idea of a reinterpret_cast lacks sufficient cojones.
*/
@@ -292,6 +297,7 @@ inline void* operator new(size_t, NotNullTag, void* location)
using WTF::KB;
using WTF::isPointerAligned;
+using WTF::is8ByteAligned;
using WTF::binarySearch;
using WTF::bitwise_cast;
using WTF::safeCast;
diff --git a/Source/JavaScriptCore/wtf/ThreadFunctionInvocation.h b/Source/JavaScriptCore/wtf/ThreadFunctionInvocation.h
index f1e147268..2d8599eb9 100644
--- a/Source/JavaScriptCore/wtf/ThreadFunctionInvocation.h
+++ b/Source/JavaScriptCore/wtf/ThreadFunctionInvocation.h
@@ -31,7 +31,7 @@
namespace WTF {
-typedef void* (*ThreadFunction)(void* argument);
+typedef void (*ThreadFunction)(void* argument);
struct ThreadFunctionInvocation {
ThreadFunctionInvocation(ThreadFunction function, void* data)
diff --git a/Source/JavaScriptCore/wtf/ThreadIdentifierDataPthreads.cpp b/Source/JavaScriptCore/wtf/ThreadIdentifierDataPthreads.cpp
index b3b690f70..0badf939a 100644
--- a/Source/JavaScriptCore/wtf/ThreadIdentifierDataPthreads.cpp
+++ b/Source/JavaScriptCore/wtf/ThreadIdentifierDataPthreads.cpp
@@ -36,8 +36,8 @@
#include "Threading.h"
-#if OS(ANDROID)
-// PTHREAD_KEYS_MAX is not defined in bionic, so explicitly define it here.
+#if OS(ANDROID) || OS(HURD)
+// PTHREAD_KEYS_MAX is not defined in bionic nor in Hurd, so explicitly define it here.
#define PTHREAD_KEYS_MAX 1024
#else
#include <limits.h>
diff --git a/Source/JavaScriptCore/wtf/ThreadSpecific.h b/Source/JavaScriptCore/wtf/ThreadSpecific.h
index 242acc0d3..f20a3f3df 100644
--- a/Source/JavaScriptCore/wtf/ThreadSpecific.h
+++ b/Source/JavaScriptCore/wtf/ThreadSpecific.h
@@ -77,12 +77,11 @@ private:
// have exited). It's unlikely that any user of this call will be in that situation - and having
// a destructor defined can be confusing, given that it has such strong pre-requisites to work correctly.
~ThreadSpecific();
-
+
T* get();
void set(T*);
void static destroy(void* ptr);
-#if USE(PTHREADS) || PLATFORM(QT) || PLATFORM(GTK) || OS(WINDOWS)
struct Data {
WTF_MAKE_NONCOPYABLE(Data);
public:
@@ -94,7 +93,6 @@ private:
void (*destructor)(void*);
#endif
};
-#endif
#if USE(PTHREADS)
pthread_key_t m_key;
@@ -239,6 +237,6 @@ inline T& ThreadSpecific<T>::operator*()
return *operator T*();
}
-}
+} // namespace WTF
-#endif
+#endif // WTF_ThreadSpecific_h
diff --git a/Source/JavaScriptCore/wtf/Threading.cpp b/Source/JavaScriptCore/wtf/Threading.cpp
index d8dbbae4f..8d658e934 100644
--- a/Source/JavaScriptCore/wtf/Threading.cpp
+++ b/Source/JavaScriptCore/wtf/Threading.cpp
@@ -25,6 +25,8 @@
#include "config.h"
#include "Threading.h"
+#include <wtf/OwnPtr.h>
+#include <wtf/PassOwnPtr.h>
#include <string.h>
@@ -47,7 +49,7 @@ public:
Mutex creationMutex;
};
-static void* threadEntryPoint(void* contextData)
+static void threadEntryPoint(void* contextData)
{
NewThreadContext* context = reinterpret_cast<NewThreadContext*>(contextData);
@@ -64,7 +66,7 @@ static void* threadEntryPoint(void* contextData)
void* data = context->data;
delete context;
- return entryPoint(data);
+ entryPoint(data);
}
ThreadIdentifier createThread(ThreadFunction entryPoint, void* data, const char* name)
@@ -86,14 +88,58 @@ ThreadIdentifier createThread(ThreadFunction entryPoint, void* data, const char*
#if PLATFORM(MAC) || PLATFORM(WIN)
+// For ABI compatibility with Safari on Mac / Windows: Safari uses the private
+// createThread() and waitForThreadCompletion() functions directly and we need
+// to keep the old ABI compatibility until it's been rebuilt.
+
+typedef void* (*ThreadFunctionWithReturnValue)(void* argument);
+
+WTF_EXPORT_PRIVATE ThreadIdentifier createThread(ThreadFunctionWithReturnValue entryPoint, void* data, const char* name);
+
+struct ThreadFunctionWithReturnValueInvocation {
+ ThreadFunctionWithReturnValueInvocation(ThreadFunctionWithReturnValue function, void* data)
+ : function(function)
+ , data(data)
+ {
+ }
+
+ ThreadFunctionWithReturnValue function;
+ void* data;
+};
+
+static void compatEntryPoint(void* param)
+{
+ // Balanced by .leakPtr() in createThread.
+ OwnPtr<ThreadFunctionWithReturnValueInvocation> invocation = adoptPtr(static_cast<ThreadFunctionWithReturnValueInvocation*>(param));
+ invocation->function(invocation->data);
+}
+
+ThreadIdentifier createThread(ThreadFunctionWithReturnValue entryPoint, void* data, const char* name)
+{
+ OwnPtr<ThreadFunctionWithReturnValueInvocation> invocation = adoptPtr(new ThreadFunctionWithReturnValueInvocation(entryPoint, data));
+
+ // Balanced by adoptPtr() in compatEntryPoint.
+ return createThread(compatEntryPoint, invocation.leakPtr(), name);
+}
+
+WTF_EXPORT_PRIVATE int waitForThreadCompletion(ThreadIdentifier, void**);
+
+int waitForThreadCompletion(ThreadIdentifier threadID, void**)
+{
+ return waitForThreadCompletion(threadID);
+}
+
// This function is deprecated but needs to be kept around for backward
// compatibility. Use the 3-argument version of createThread above.
-WTF_EXPORT_PRIVATE ThreadIdentifier createThread(ThreadFunction entryPoint, void* data);
+WTF_EXPORT_PRIVATE ThreadIdentifier createThread(ThreadFunctionWithReturnValue entryPoint, void* data);
-ThreadIdentifier createThread(ThreadFunction entryPoint, void* data)
+ThreadIdentifier createThread(ThreadFunctionWithReturnValue entryPoint, void* data)
{
- return createThread(entryPoint, data, 0);
+ OwnPtr<ThreadFunctionWithReturnValueInvocation> invocation = adoptPtr(new ThreadFunctionWithReturnValueInvocation(entryPoint, data));
+
+ // Balanced by adoptPtr() in compatEntryPoint.
+ return createThread(compatEntryPoint, invocation.leakPtr(), 0);
}
#endif
diff --git a/Source/JavaScriptCore/wtf/Threading.h b/Source/JavaScriptCore/wtf/Threading.h
index 1dee5da1c..b5d432681 100644
--- a/Source/JavaScriptCore/wtf/Threading.h
+++ b/Source/JavaScriptCore/wtf/Threading.h
@@ -78,7 +78,7 @@
namespace WTF {
typedef uint32_t ThreadIdentifier;
-typedef void* (*ThreadFunction)(void* argument);
+typedef void (*ThreadFunction)(void* argument);
// This function must be called from the main thread. It is safe to call it repeatedly.
// Darwin is an exception to this rule: it is OK to call it from any thread, the only
@@ -97,7 +97,7 @@ ThreadIdentifier createThreadInternal(ThreadFunction, void*, const char* threadN
void initializeCurrentThreadInternal(const char* threadName);
WTF_EXPORT_PRIVATE ThreadIdentifier currentThread();
-WTF_EXPORT_PRIVATE int waitForThreadCompletion(ThreadIdentifier, void**);
+WTF_EXPORT_PRIVATE int waitForThreadCompletion(ThreadIdentifier);
WTF_EXPORT_PRIVATE void detachThread(ThreadIdentifier);
WTF_EXPORT_PRIVATE void yield();
diff --git a/Source/JavaScriptCore/wtf/ThreadingPthreads.cpp b/Source/JavaScriptCore/wtf/ThreadingPthreads.cpp
index 763ec2bbb..abd350dbb 100644
--- a/Source/JavaScriptCore/wtf/ThreadingPthreads.cpp
+++ b/Source/JavaScriptCore/wtf/ThreadingPthreads.cpp
@@ -40,9 +40,12 @@
#include "HashMap.h"
#include "RandomNumberSeed.h"
#include "StdLibExtras.h"
+#include "ThreadFunctionInvocation.h"
#include "ThreadIdentifierDataPthreads.h"
#include "ThreadSpecific.h"
#include "UnusedParam.h"
+#include <wtf/OwnPtr.h>
+#include <wtf/PassOwnPtr.h>
#include <wtf/WTFThreadData.h>
#include <errno.h>
@@ -152,6 +155,15 @@ void clearPthreadHandleForIdentifier(ThreadIdentifier id)
threadMap().remove(id);
}
+static void* wtfThreadEntryPoint(void* param)
+{
+ // Balanced by .leakPtr() in createThreadInternal.
+ OwnPtr<ThreadFunctionInvocation> invocation = adoptPtr(static_cast<ThreadFunctionInvocation*>(param));
+ invocation->function(invocation->data);
+
+ return 0;
+}
+
#if PLATFORM(BLACKBERRY)
ThreadIdentifier createThreadInternal(ThreadFunction entryPoint, void* data, const char* threadName)
{
@@ -171,8 +183,9 @@ ThreadIdentifier createThreadInternal(ThreadFunction entryPoint, void* data, con
LOG_ERROR("pthread_attr_getstack() failed: %d", errno);
}
+ OwnPtr<ThreadFunctionInvocation> invocation = adoptPtr(new ThreadFunctionInvocation(entryPoint, data));
pthread_t threadHandle;
- if (pthread_create(&threadHandle, &attr, entryPoint, data)) {
+ if (pthread_create(&threadHandle, &attr, wtfThreadEntryPoint, invocation.get())) {
LOG_ERROR("pthread_create() failed: %d", errno);
threadHandle = 0;
}
@@ -183,17 +196,26 @@ ThreadIdentifier createThreadInternal(ThreadFunction entryPoint, void* data, con
if (!threadHandle)
return 0;
+ // Balanced by adoptPtr() in wtfThreadEntryPoint.
+ ThreadFunctionInvocation* leakedInvocation = invocation.leakPtr();
+ UNUSED_PARAM(leakedInvocation);
+
return establishIdentifierForPthreadHandle(threadHandle);
}
#else
ThreadIdentifier createThreadInternal(ThreadFunction entryPoint, void* data, const char*)
{
+ OwnPtr<ThreadFunctionInvocation> invocation = adoptPtr(new ThreadFunctionInvocation(entryPoint, data));
pthread_t threadHandle;
- if (pthread_create(&threadHandle, 0, entryPoint, data)) {
- LOG_ERROR("Failed to create pthread at entry point %p with data %p", entryPoint, data);
+ if (pthread_create(&threadHandle, 0, wtfThreadEntryPoint, invocation.get())) {
+ LOG_ERROR("Failed to create pthread at entry point %p with data %p", wtfThreadEntryPoint, invocation.get());
return 0;
}
+ // Balanced by adoptPtr() in wtfThreadEntryPoint.
+ ThreadFunctionInvocation* leakedInvocation = invocation.leakPtr();
+ UNUSED_PARAM(leakedInvocation);
+
return establishIdentifierForPthreadHandle(threadHandle);
}
#endif
@@ -217,7 +239,7 @@ void initializeCurrentThreadInternal(const char* threadName)
ThreadIdentifierData::initialize(id);
}
-int waitForThreadCompletion(ThreadIdentifier threadID, void** result)
+int waitForThreadCompletion(ThreadIdentifier threadID)
{
ASSERT(threadID);
@@ -225,7 +247,7 @@ int waitForThreadCompletion(ThreadIdentifier threadID, void** result)
if (!pthreadHandle)
return 0;
- int joinResult = pthread_join(pthreadHandle, result);
+ int joinResult = pthread_join(pthreadHandle, 0);
if (joinResult == EDEADLK)
LOG_ERROR("ThreadIdentifier %u was found to be deadlocked trying to quit", threadID);
diff --git a/Source/JavaScriptCore/wtf/ThreadingWin.cpp b/Source/JavaScriptCore/wtf/ThreadingWin.cpp
index ac0f73f19..bc32262ce 100644
--- a/Source/JavaScriptCore/wtf/ThreadingWin.cpp
+++ b/Source/JavaScriptCore/wtf/ThreadingWin.cpp
@@ -210,14 +210,14 @@ static void clearThreadHandleForIdentifier(ThreadIdentifier id)
static unsigned __stdcall wtfThreadEntryPoint(void* param)
{
OwnPtr<ThreadFunctionInvocation> invocation = adoptPtr(static_cast<ThreadFunctionInvocation*>(param));
- void* result = invocation->function(invocation->data);
+ invocation->function(invocation->data);
#if !USE(PTHREADS) && OS(WINDOWS)
// Do the TLS cleanup.
ThreadSpecificThreadExit();
#endif
- return reinterpret_cast<unsigned>(result);
+ return 0;
}
ThreadIdentifier createThreadInternal(ThreadFunction entryPoint, void* data, const char* threadName)
@@ -252,7 +252,7 @@ ThreadIdentifier createThreadInternal(ThreadFunction entryPoint, void* data, con
return threadID;
}
-int waitForThreadCompletion(ThreadIdentifier threadID, void** result)
+int waitForThreadCompletion(ThreadIdentifier threadID)
{
ASSERT(threadID);
diff --git a/Source/JavaScriptCore/wtf/Vector.h b/Source/JavaScriptCore/wtf/Vector.h
index 175f1a582..29bbd37d9 100644
--- a/Source/JavaScriptCore/wtf/Vector.h
+++ b/Source/JavaScriptCore/wtf/Vector.h
@@ -181,7 +181,10 @@ namespace WTF {
static void uninitializedFill(T* dst, T* dstEnd, const T& val)
{
ASSERT(sizeof(T) == sizeof(char));
- memset(dst, val, dstEnd - dst);
+#if COMPILER(GCC) && defined(_FORTIFY_SOURCE)
+ if (!__builtin_constant_p(dstEnd - dst) || (!(dstEnd - dst)))
+#endif
+ memset(dst, val, dstEnd - dst);
}
};
diff --git a/Source/JavaScriptCore/wtf/dtoa.cpp b/Source/JavaScriptCore/wtf/dtoa.cpp
index 3732fe614..4c4041e1c 100644
--- a/Source/JavaScriptCore/wtf/dtoa.cpp
+++ b/Source/JavaScriptCore/wtf/dtoa.cpp
@@ -228,25 +228,6 @@ static void multadd(BigInt& b, int m, int a) /* multiply by m and add a */
b.append((uint32_t)carry);
}
-static void s2b(BigInt& b, const char* s, int nd0, int nd, uint32_t y9)
-{
- b.sign = 0;
- b.resize(1);
- b.words()[0] = y9;
-
- int i = 9;
- if (9 < nd0) {
- s += 9;
- do {
- multadd(b, 10, *s++ - '0');
- } while (++i < nd0);
- s++;
- } else
- s += 10;
- for (; i < nd; i++)
- multadd(b, 10, *s++ - '0');
-}
-
static int hi0bits(uint32_t x)
{
int k = 0;
@@ -603,57 +584,6 @@ static ALWAYS_INLINE void diff(BigInt& c, const BigInt& aRef, const BigInt& bRef
c.resize(wa);
}
-static double ulp(U *x)
-{
- register int32_t L;
- U u;
-
- L = (word0(x) & Exp_mask) - (P - 1) * Exp_msk1;
- word0(&u) = L;
- word1(&u) = 0;
- return dval(&u);
-}
-
-static double b2d(const BigInt& a, int* e)
-{
- const uint32_t* xa;
- const uint32_t* xa0;
- uint32_t w;
- uint32_t y;
- uint32_t z;
- int k;
- U d;
-
-#define d0 word0(&d)
-#define d1 word1(&d)
-
- xa0 = a.words();
- xa = xa0 + a.size();
- y = *--xa;
- ASSERT(y);
- k = hi0bits(y);
- *e = 32 - k;
- if (k < Ebits) {
- d0 = Exp_1 | (y >> (Ebits - k));
- w = xa > xa0 ? *--xa : 0;
- d1 = (y << (32 - Ebits + k)) | (w >> (Ebits - k));
- goto returnD;
- }
- z = xa > xa0 ? *--xa : 0;
- if (k -= Ebits) {
- d0 = Exp_1 | (y << k) | (z >> (32 - k));
- y = xa > xa0 ? *--xa : 0;
- d1 = (z << k) | (y >> (32 - k));
- } else {
- d0 = Exp_1 | y;
- d1 = z;
- }
-returnD:
-#undef d0
-#undef d1
- return dval(&d);
-}
-
static ALWAYS_INLINE void d2b(BigInt& b, U* d, int* e, int* bits)
{
int de, k;
@@ -701,23 +631,6 @@ static ALWAYS_INLINE void d2b(BigInt& b, U* d, int* e, int* bits)
#undef d0
#undef d1
-static double ratio(const BigInt& a, const BigInt& b)
-{
- U da, db;
- int k, ka, kb;
-
- dval(&da) = b2d(a, &ka);
- dval(&db) = b2d(b, &kb);
- k = ka - kb + 32 * (a.size() - b.size());
- if (k > 0)
- word0(&da) += k * Exp_msk1;
- else {
- k = -k;
- word0(&db) += k * Exp_msk1;
- }
- return dval(&da) / dval(&db);
-}
-
static const double tens[] = {
1e0, 1e1, 1e2, 1e3, 1e4, 1e5, 1e6, 1e7, 1e8, 1e9,
1e10, 1e11, 1e12, 1e13, 1e14, 1e15, 1e16, 1e17, 1e18, 1e19,
@@ -735,443 +648,26 @@ static const double tinytens[] = { 1e-16, 1e-32, 1e-64, 1e-128,
#define Scale_Bit 0x10
#define n_bigtens 5
+template<AllowTrailingJunkTag allowTrailingJunk>
double strtod(const char* s00, char** se)
{
- int scale;
- int bb2, bb5, bbe, bd2, bd5, bbbits, bs2, c, dsign,
- e, e1, esign, i, j, k, nd, nd0, nf, nz, nz0, sign;
- const char *s, *s0, *s1;
- double aadj, aadj1;
- U aadj2, adj, rv, rv0;
- int32_t L;
- uint32_t y, z;
- BigInt bb, bb1, bd, bd0, bs, delta;
-
- sign = nz0 = nz = 0;
- dval(&rv) = 0;
- for (s = s00; ; s++) {
- switch (*s) {
- case '-':
- sign = 1;
- /* no break */
- case '+':
- if (*++s)
- goto break2;
- /* no break */
- case 0:
- goto ret0;
- case '\t':
- case '\n':
- case '\v':
- case '\f':
- case '\r':
- case ' ':
- continue;
- default:
- goto break2;
- }
- }
-break2:
- if (*s == '0') {
- nz0 = 1;
- while (*++s == '0') { }
- if (!*s)
- goto ret;
- }
- s0 = s;
- y = z = 0;
- for (nd = nf = 0; (c = *s) >= '0' && c <= '9'; nd++, s++)
- if (nd < 9)
- y = (10 * y) + c - '0';
- else if (nd < 16)
- z = (10 * z) + c - '0';
- nd0 = nd;
- if (c == '.') {
- c = *++s;
- if (!nd) {
- for (; c == '0'; c = *++s)
- nz++;
- if (c > '0' && c <= '9') {
- s0 = s;
- nf += nz;
- nz = 0;
- goto haveDig;
- }
- goto digDone;
- }
- for (; c >= '0' && c <= '9'; c = *++s) {
-haveDig:
- nz++;
- if (c -= '0') {
- nf += nz;
- for (i = 1; i < nz; i++)
- if (nd++ < 9)
- y *= 10;
- else if (nd <= DBL_DIG + 1)
- z *= 10;
- if (nd++ < 9)
- y = (10 * y) + c;
- else if (nd <= DBL_DIG + 1)
- z = (10 * z) + c;
- nz = 0;
- }
- }
- }
-digDone:
- e = 0;
- if (c == 'e' || c == 'E') {
- if (!nd && !nz && !nz0)
- goto ret0;
- s00 = s;
- esign = 0;
- switch (c = *++s) {
- case '-':
- esign = 1;
- case '+':
- c = *++s;
- }
- if (c >= '0' && c <= '9') {
- while (c == '0')
- c = *++s;
- if (c > '0' && c <= '9') {
- L = c - '0';
- s1 = s;
- while ((c = *++s) >= '0' && c <= '9')
- L = (10 * L) + c - '0';
- if (s - s1 > 8 || L > 19999)
- /* Avoid confusion from exponents
- * so large that e might overflow.
- */
- e = 19999; /* safe for 16 bit ints */
- else
- e = (int)L;
- if (esign)
- e = -e;
- } else
- e = 0;
- } else
- s = s00;
- }
- if (!nd) {
- if (!nz && !nz0) {
-ret0:
- s = s00;
- sign = 0;
- }
- goto ret;
- }
- e1 = e -= nf;
-
- /* Now we have nd0 digits, starting at s0, followed by a
- * decimal point, followed by nd-nd0 digits. The number we're
- * after is the integer represented by those digits times
- * 10**e */
-
- if (!nd0)
- nd0 = nd;
- k = nd < DBL_DIG + 1 ? nd : DBL_DIG + 1;
- dval(&rv) = y;
- if (k > 9)
- dval(&rv) = tens[k - 9] * dval(&rv) + z;
- if (nd <= DBL_DIG) {
- if (!e)
- goto ret;
- if (e > 0) {
- if (e <= Ten_pmax) {
- /* rv = */ rounded_product(dval(&rv), tens[e]);
- goto ret;
- }
- i = DBL_DIG - nd;
- if (e <= Ten_pmax + i) {
- /* A fancier test would sometimes let us do
- * this for larger i values.
- */
- e -= i;
- dval(&rv) *= tens[i];
- /* rv = */ rounded_product(dval(&rv), tens[e]);
- goto ret;
- }
- } else if (e >= -Ten_pmax) {
- /* rv = */ rounded_quotient(dval(&rv), tens[-e]);
- goto ret;
- }
- }
- e1 += nd - k;
-
- scale = 0;
-
- /* Get starting approximation = rv * 10**e1 */
-
- if (e1 > 0) {
- if ((i = e1 & 15))
- dval(&rv) *= tens[i];
- if (e1 &= ~15) {
- if (e1 > DBL_MAX_10_EXP) {
-ovfl:
-#if HAVE(ERRNO_H)
- errno = ERANGE;
-#endif
- /* Can't trust HUGE_VAL */
- word0(&rv) = Exp_mask;
- word1(&rv) = 0;
- goto ret;
- }
- e1 >>= 4;
- for (j = 0; e1 > 1; j++, e1 >>= 1)
- if (e1 & 1)
- dval(&rv) *= bigtens[j];
- /* The last multiplication could overflow. */
- word0(&rv) -= P * Exp_msk1;
- dval(&rv) *= bigtens[j];
- if ((z = word0(&rv) & Exp_mask) > Exp_msk1 * (DBL_MAX_EXP + Bias - P))
- goto ovfl;
- if (z > Exp_msk1 * (DBL_MAX_EXP + Bias - 1 - P)) {
- /* set to largest number */
- /* (Can't trust DBL_MAX) */
- word0(&rv) = Big0;
- word1(&rv) = Big1;
- } else
- word0(&rv) += P * Exp_msk1;
- }
- } else if (e1 < 0) {
- e1 = -e1;
- if ((i = e1 & 15))
- dval(&rv) /= tens[i];
- if (e1 >>= 4) {
- if (e1 >= 1 << n_bigtens)
- goto undfl;
- if (e1 & Scale_Bit)
- scale = 2 * P;
- for (j = 0; e1 > 0; j++, e1 >>= 1)
- if (e1 & 1)
- dval(&rv) *= tinytens[j];
- if (scale && (j = (2 * P) + 1 - ((word0(&rv) & Exp_mask) >> Exp_shift)) > 0) {
- /* scaled rv is denormal; clear j low bits */
- if (j >= 32) {
- word1(&rv) = 0;
- if (j >= 53)
- word0(&rv) = (P + 2) * Exp_msk1;
- else
- word0(&rv) &= 0xffffffff << (j - 32);
- } else
- word1(&rv) &= 0xffffffff << j;
- }
- if (!dval(&rv)) {
-undfl:
- dval(&rv) = 0.;
-#if HAVE(ERRNO_H)
- errno = ERANGE;
-#endif
- goto ret;
- }
- }
- }
-
- /* Now the hard part -- adjusting rv to the correct value.*/
-
- /* Put digits into bd: true value = bd * 10^e */
-
- s2b(bd0, s0, nd0, nd, y);
-
- for (;;) {
- bd = bd0;
- d2b(bb, &rv, &bbe, &bbbits); /* rv = bb * 2^bbe */
- i2b(bs, 1);
-
- if (e >= 0) {
- bb2 = bb5 = 0;
- bd2 = bd5 = e;
- } else {
- bb2 = bb5 = -e;
- bd2 = bd5 = 0;
- }
- if (bbe >= 0)
- bb2 += bbe;
- else
- bd2 -= bbe;
- bs2 = bb2;
- j = bbe - scale;
- i = j + bbbits - 1; /* logb(rv) */
- if (i < Emin) /* denormal */
- j += P - Emin;
- else
- j = P + 1 - bbbits;
- bb2 += j;
- bd2 += j;
- bd2 += scale;
- i = bb2 < bd2 ? bb2 : bd2;
- if (i > bs2)
- i = bs2;
- if (i > 0) {
- bb2 -= i;
- bd2 -= i;
- bs2 -= i;
- }
- if (bb5 > 0) {
- pow5mult(bs, bb5);
- mult(bb, bs);
- }
- if (bb2 > 0)
- lshift(bb, bb2);
- if (bd5 > 0)
- pow5mult(bd, bd5);
- if (bd2 > 0)
- lshift(bd, bd2);
- if (bs2 > 0)
- lshift(bs, bs2);
- diff(delta, bb, bd);
- dsign = delta.sign;
- delta.sign = 0;
- i = cmp(delta, bs);
-
- if (i < 0) {
- /* Error is less than half an ulp -- check for
- * special case of mantissa a power of two.
- */
- if (dsign || word1(&rv) || word0(&rv) & Bndry_mask
- || (word0(&rv) & Exp_mask) <= (2 * P + 1) * Exp_msk1
- ) {
- break;
- }
- if (!delta.words()[0] && delta.size() <= 1) {
- /* exact result */
- break;
- }
- lshift(delta, Log2P);
- if (cmp(delta, bs) > 0)
- goto dropDown;
- break;
- }
- if (!i) {
- /* exactly half-way between */
- if (dsign) {
- if ((word0(&rv) & Bndry_mask1) == Bndry_mask1
- && word1(&rv) == (
- (scale && (y = word0(&rv) & Exp_mask) <= 2 * P * Exp_msk1)
- ? (0xffffffff & (0xffffffff << (2 * P + 1 - (y >> Exp_shift)))) :
- 0xffffffff)) {
- /*boundary case -- increment exponent*/
- word0(&rv) = (word0(&rv) & Exp_mask) + Exp_msk1;
- word1(&rv) = 0;
- dsign = 0;
- break;
- }
- } else if (!(word0(&rv) & Bndry_mask) && !word1(&rv)) {
-dropDown:
- /* boundary case -- decrement exponent */
- if (scale) {
- L = word0(&rv) & Exp_mask;
- if (L <= (2 * P + 1) * Exp_msk1) {
- if (L > (P + 2) * Exp_msk1)
- /* round even ==> */
- /* accept rv */
- break;
- /* rv = smallest denormal */
- goto undfl;
- }
- }
- L = (word0(&rv) & Exp_mask) - Exp_msk1;
- word0(&rv) = L | Bndry_mask1;
- word1(&rv) = 0xffffffff;
- break;
- }
- if (!(word1(&rv) & LSB))
- break;
- if (dsign)
- dval(&rv) += ulp(&rv);
- else {
- dval(&rv) -= ulp(&rv);
- if (!dval(&rv))
- goto undfl;
- }
- dsign = 1 - dsign;
- break;
- }
- if ((aadj = ratio(delta, bs)) <= 2.) {
- if (dsign)
- aadj = aadj1 = 1.;
- else if (word1(&rv) || word0(&rv) & Bndry_mask) {
- if (word1(&rv) == Tiny1 && !word0(&rv))
- goto undfl;
- aadj = 1.;
- aadj1 = -1.;
- } else {
- /* special case -- power of FLT_RADIX to be */
- /* rounded down... */
-
- if (aadj < 2. / FLT_RADIX)
- aadj = 1. / FLT_RADIX;
- else
- aadj *= 0.5;
- aadj1 = -aadj;
- }
- } else {
- aadj *= 0.5;
- aadj1 = dsign ? aadj : -aadj;
- }
- y = word0(&rv) & Exp_mask;
-
- /* Check for overflow */
-
- if (y == Exp_msk1 * (DBL_MAX_EXP + Bias - 1)) {
- dval(&rv0) = dval(&rv);
- word0(&rv) -= P * Exp_msk1;
- adj.d = aadj1 * ulp(&rv);
- dval(&rv) += adj.d;
- if ((word0(&rv) & Exp_mask) >= Exp_msk1 * (DBL_MAX_EXP + Bias - P)) {
- if (word0(&rv0) == Big0 && word1(&rv0) == Big1)
- goto ovfl;
- word0(&rv) = Big0;
- word1(&rv) = Big1;
- goto cont;
- }
- word0(&rv) += P * Exp_msk1;
- } else {
- if (scale && y <= 2 * P * Exp_msk1) {
- if (aadj <= 0x7fffffff) {
- if ((z = (uint32_t)aadj) <= 0)
- z = 1;
- aadj = z;
- aadj1 = dsign ? aadj : -aadj;
- }
- dval(&aadj2) = aadj1;
- word0(&aadj2) += (2 * P + 1) * Exp_msk1 - y;
- aadj1 = dval(&aadj2);
- }
- adj.d = aadj1 * ulp(&rv);
- dval(&rv) += adj.d;
- }
- z = word0(&rv) & Exp_mask;
- if (!scale && y == z) {
- /* Can we stop now? */
- L = (int32_t)aadj;
- aadj -= L;
- /* The tolerances below are conservative. */
- if (dsign || word1(&rv) || word0(&rv) & Bndry_mask) {
- if (aadj < .4999999 || aadj > .5000001)
- break;
- } else if (aadj < .4999999 / FLT_RADIX)
- break;
- }
-cont:
- {}
- }
- if (scale) {
- word0(&rv0) = Exp_1 - 2 * P * Exp_msk1;
- word1(&rv0) = 0;
- dval(&rv) *= dval(&rv0);
-#if HAVE(ERRNO_H)
- /* try to avoid the bug of testing an 8087 register value */
- if (!word0(&rv) && !word1(&rv))
- errno = ERANGE;
-#endif
- }
-ret:
+ int length = strlen(s00);
+ double_conversion::StringToDoubleConverter converter(
+ (allowTrailingJunk ? double_conversion::StringToDoubleConverter::ALLOW_TRAILING_JUNK : 0) |
+ double_conversion::StringToDoubleConverter::ALLOW_LEADING_SPACES,
+ 0.0,
+ (allowTrailingJunk ? std::numeric_limits<double>::quiet_NaN() : 0.0),
+ "Infinity", "NaN");
+ int processedCharacterCount = 0;
+ double result = converter.StringToDouble(s00, length, &processedCharacterCount);
if (se)
- *se = const_cast<char*>(s);
- return sign ? -dval(&rv) : dval(&rv);
+ *se = const_cast<char*>(s00 + processedCharacterCount);
+ return result;
}
+template double strtod<AllowTrailingJunk>(const char*, char**);
+template double strtod<DisallowTrailingJunk>(const char*, char**);
+
static ALWAYS_INLINE int quorem(BigInt& b, BigInt& S)
{
size_t n;
diff --git a/Source/JavaScriptCore/wtf/dtoa.h b/Source/JavaScriptCore/wtf/dtoa.h
index d27c59206..a4672c07a 100644
--- a/Source/JavaScriptCore/wtf/dtoa.h
+++ b/Source/JavaScriptCore/wtf/dtoa.h
@@ -35,9 +35,12 @@ WTF_EXPORT_PRIVATE void dtoa(DtoaBuffer result, double dd, bool& sign, int& expo
WTF_EXPORT_PRIVATE void dtoaRoundSF(DtoaBuffer result, double dd, int ndigits, bool& sign, int& exponent, unsigned& precision);
WTF_EXPORT_PRIVATE void dtoaRoundDP(DtoaBuffer result, double dd, int ndigits, bool& sign, int& exponent, unsigned& precision);
+enum AllowTrailingJunkTag { DisallowTrailingJunk = 0, AllowTrailingJunk };
+
// s00: input string. Must not be 0 and must be terminated by 0.
// se: *se will have the last consumed character position + 1.
-WTF_EXPORT_PRIVATE double strtod(const char* s00, char** se);
+template<AllowTrailingJunkTag allowTrailingJunk>
+double strtod(const char* s00, char** se);
// Size = 80 for sizeof(DtoaBuffer) + some sign bits, decimal point, 'e', exponent digits.
const unsigned NumberToStringBufferLength = 96;
diff --git a/Source/JavaScriptCore/wtf/dtoa/utils.h b/Source/JavaScriptCore/wtf/dtoa/utils.h
index d5cfe9c29..64e5ad42f 100644
--- a/Source/JavaScriptCore/wtf/dtoa/utils.h
+++ b/Source/JavaScriptCore/wtf/dtoa/utils.h
@@ -49,7 +49,7 @@
defined(__ARMEL__) || \
defined(_MIPS_ARCH_MIPS32R2)
#define DOUBLE_CONVERSION_CORRECT_DOUBLE_OPERATIONS 1
-#elif CPU(MIPS) || CPU(PPC) || CPU(PPC64) || OS(WINCE) || CPU(SH4) || CPU(S390) || CPU(S390X)
+#elif CPU(MIPS) || CPU(PPC) || CPU(PPC64) || OS(WINCE) || CPU(SH4) || CPU(S390) || CPU(S390X) || CPU(IA64) || CPU(SPARC) || CPU(ALPHA)
#define DOUBLE_CONVERSION_CORRECT_DOUBLE_OPERATIONS 1
#elif defined(_M_IX86) || defined(__i386__)
#if defined(_WIN32)
diff --git a/Source/JavaScriptCore/wtf/gobject/GOwnPtr.cpp b/Source/JavaScriptCore/wtf/gobject/GOwnPtr.cpp
index c727956ce..dfe187d78 100644
--- a/Source/JavaScriptCore/wtf/gobject/GOwnPtr.cpp
+++ b/Source/JavaScriptCore/wtf/gobject/GOwnPtr.cpp
@@ -37,6 +37,11 @@ template <> void freeOwnedGPtr<GList>(GList* ptr)
g_list_free(ptr);
}
+template <> void freeOwnedGPtr<GSList>(GSList* ptr)
+{
+ g_slist_free(ptr);
+}
+
template <> void freeOwnedGPtr<GPatternSpec>(GPatternSpec* ptr)
{
if (ptr)
diff --git a/Source/JavaScriptCore/wtf/gobject/GOwnPtr.h b/Source/JavaScriptCore/wtf/gobject/GOwnPtr.h
index 221971f97..4b2dcb77b 100644
--- a/Source/JavaScriptCore/wtf/gobject/GOwnPtr.h
+++ b/Source/JavaScriptCore/wtf/gobject/GOwnPtr.h
@@ -35,6 +35,7 @@ namespace WTF {
template <typename T> inline void freeOwnedGPtr(T* ptr);
template<> void freeOwnedGPtr<GError>(GError*);
template<> void freeOwnedGPtr<GList>(GList*);
+template<> void freeOwnedGPtr<GSList>(GSList*);
template<> void freeOwnedGPtr<GPatternSpec>(GPatternSpec*);
template<> void freeOwnedGPtr<GDir>(GDir*);
template<> void freeOwnedGPtr<GTimer>(GTimer*);
diff --git a/Source/JavaScriptCore/wtf/gobject/GTypedefs.h b/Source/JavaScriptCore/wtf/gobject/GTypedefs.h
index e2b2ba6b8..cb3842b78 100644
--- a/Source/JavaScriptCore/wtf/gobject/GTypedefs.h
+++ b/Source/JavaScriptCore/wtf/gobject/GTypedefs.h
@@ -53,6 +53,7 @@ typedef struct _GInputStream GInputStream;
typedef struct _GList GList;
typedef struct _GPatternSpec GPatternSpec;
typedef struct _GPollableOutputStream GPollableOutputStream;
+typedef struct _GSList GSList;
typedef struct _GSocketClient GSocketClient;
typedef struct _GSocketConnection GSocketConnection;
typedef struct _GSource GSource;
diff --git a/Source/JavaScriptCore/wtf/mac/MainThreadMac.mm b/Source/JavaScriptCore/wtf/mac/MainThreadMac.mm
index fbc625032..5a82f40a6 100644
--- a/Source/JavaScriptCore/wtf/mac/MainThreadMac.mm
+++ b/Source/JavaScriptCore/wtf/mac/MainThreadMac.mm
@@ -35,7 +35,6 @@
#import <wtf/Assertions.h>
#import <wtf/HashSet.h>
#import <wtf/Threading.h>
-#import <wtf/ThreadSpecific.h>
@interface JSWTFMainThreadCaller : NSObject {
}
@@ -59,17 +58,6 @@ static bool mainThreadEstablishedAsPthreadMain;
static pthread_t mainThreadPthread;
static NSThread* mainThreadNSThread;
-#if ENABLE(PARALLEL_GC)
-static ThreadSpecific<bool>* isGCThread;
-
-static void initializeGCThreads()
-{
- isGCThread = new ThreadSpecific<bool>();
-}
-#else
-static void initializeGCThreads() { }
-#endif
-
void initializeMainThreadPlatform()
{
ASSERT(!staticMainThreadCaller);
@@ -145,31 +133,4 @@ bool isMainThread()
return pthread_equal(pthread_self(), mainThreadPthread);
}
-#if ENABLE(PARALLEL_GC)
-void registerGCThread()
-{
- if (!isGCThread) {
- // This happens if we're running in a process that doesn't care about
- // MainThread.
- return;
- }
-
- **isGCThread = true;
-}
-
-bool isMainThreadOrGCThread()
-{
- if (isGCThread->isSet() && **isGCThread)
- return true;
-
- return isMainThread();
-}
-#else
-// This is necessary because JavaScriptCore.exp doesn't support preprocessor macros.
-bool isMainThreadOrGCThread()
-{
- return isMainThread();
-}
-#endif
-
} // namespace WTF
diff --git a/Source/JavaScriptCore/wtf/text/StringImpl.h b/Source/JavaScriptCore/wtf/text/StringImpl.h
index 3862effb6..667335b86 100644
--- a/Source/JavaScriptCore/wtf/text/StringImpl.h
+++ b/Source/JavaScriptCore/wtf/text/StringImpl.h
@@ -43,6 +43,8 @@ typedef const struct __CFString * CFStringRef;
// Landing the file moves in one patch, will follow on with patches to change the namespaces.
namespace JSC {
struct IdentifierCStringTranslator;
+namespace LLInt { class Data; }
+class LLIntOffsetsExtractor;
template <typename T> struct IdentifierCharBufferTranslator;
struct IdentifierLCharFromUCharTranslator;
}
@@ -72,7 +74,9 @@ class StringImpl {
friend struct WTF::SubstringTranslator;
friend struct WTF::UCharBufferTranslator;
friend class AtomicStringImpl;
-
+ friend class JSC::LLInt::Data;
+ friend class JSC::LLIntOffsetsExtractor;
+
private:
enum BufferOwnership {
BufferInternal,
@@ -735,7 +739,7 @@ bool equalIgnoringNullity(const Vector<UChar, inlineCapacity>& a, StringImpl* b)
return !a.size();
if (a.size() != b->length())
return false;
- return !memcmp(a.data(), b->characters(), b->length());
+ return !memcmp(a.data(), b->characters(), b->length() * sizeof(UChar));
}
WTF_EXPORT_PRIVATE int codePointCompare(const StringImpl*, const StringImpl*);
diff --git a/Source/JavaScriptCore/wtf/text/WTFString.cpp b/Source/JavaScriptCore/wtf/text/WTFString.cpp
index df74c65af..04c970a7c 100644
--- a/Source/JavaScriptCore/wtf/text/WTFString.cpp
+++ b/Source/JavaScriptCore/wtf/text/WTFString.cpp
@@ -24,6 +24,8 @@
#include <stdarg.h>
#include <wtf/ASCIICType.h>
+#include <wtf/DataLog.h>
+#include <wtf/MathExtras.h>
#include <wtf/text/CString.h>
#include <wtf/StringExtras.h>
#include <wtf/Vector.h>
@@ -1034,7 +1036,7 @@ intptr_t charactersToIntPtr(const UChar* data, size_t length, bool* ok)
return toIntegralType<intptr_t, UChar>(data, lengthOfCharactersAsInteger<UChar>(data, length), ok, 10);
}
-template <typename CharType>
+template <typename CharType, WTF::AllowTrailingJunkTag allowTrailingJunk>
static inline double toDoubleType(const CharType* data, size_t length, bool* ok, bool* didReadNumber)
{
if (!length) {
@@ -1051,9 +1053,9 @@ static inline double toDoubleType(const CharType* data, size_t length, bool* ok,
bytes[length] = '\0';
char* start = bytes.data();
char* end;
- double val = WTF::strtod(start, &end);
+ double val = WTF::strtod<allowTrailingJunk>(start, &end);
if (ok)
- *ok = (end == 0 || *end == '\0');
+ *ok = (end == 0 || *end == '\0') && !isnan(val);
if (didReadNumber)
*didReadNumber = end - start;
return val;
@@ -1061,24 +1063,36 @@ static inline double toDoubleType(const CharType* data, size_t length, bool* ok,
double charactersToDouble(const LChar* data, size_t length, bool* ok, bool* didReadNumber)
{
- return toDoubleType<LChar>(data, length, ok, didReadNumber);
+ return toDoubleType<LChar, WTF::DisallowTrailingJunk>(data, length, ok, didReadNumber);
}
double charactersToDouble(const UChar* data, size_t length, bool* ok, bool* didReadNumber)
{
- return toDoubleType<UChar>(data, length, ok, didReadNumber);
+ return toDoubleType<UChar, WTF::DisallowTrailingJunk>(data, length, ok, didReadNumber);
}
float charactersToFloat(const LChar* data, size_t length, bool* ok, bool* didReadNumber)
{
// FIXME: This will return ok even when the string fits into a double but not a float.
- return static_cast<float>(toDoubleType<LChar>(data, length, ok, didReadNumber));
+ return static_cast<float>(toDoubleType<LChar, WTF::DisallowTrailingJunk>(data, length, ok, didReadNumber));
}
float charactersToFloat(const UChar* data, size_t length, bool* ok, bool* didReadNumber)
{
// FIXME: This will return ok even when the string fits into a double but not a float.
- return static_cast<float>(toDoubleType<UChar>(data, length, ok, didReadNumber));
+ return static_cast<float>(toDoubleType<UChar, WTF::DisallowTrailingJunk>(data, length, ok, didReadNumber));
+}
+
+float charactersToFloatIgnoringJunk(const LChar* data, size_t length, bool* ok, bool* didReadNumber)
+{
+ // FIXME: This will return ok even when the string fits into a double but not a float.
+ return static_cast<float>(toDoubleType<LChar, WTF::AllowTrailingJunk>(data, length, ok, didReadNumber));
+}
+
+float charactersToFloatIgnoringJunk(const UChar* data, size_t length, bool* ok, bool* didReadNumber)
+{
+ // FIXME: This will return ok even when the string fits into a double but not a float.
+ return static_cast<float>(toDoubleType<UChar, WTF::AllowTrailingJunk>(data, length, ok, didReadNumber));
}
const String& emptyString()
@@ -1097,7 +1111,7 @@ Vector<char> asciiDebug(String& string);
void String::show() const
{
- fprintf(stderr, "%s\n", asciiDebug(impl()).data());
+ dataLog("%s\n", asciiDebug(impl()).data());
}
String* string(const char* s)
diff --git a/Source/JavaScriptCore/wtf/text/WTFString.h b/Source/JavaScriptCore/wtf/text/WTFString.h
index 2d32e6916..edc842154 100644
--- a/Source/JavaScriptCore/wtf/text/WTFString.h
+++ b/Source/JavaScriptCore/wtf/text/WTFString.h
@@ -87,7 +87,9 @@ intptr_t charactersToIntPtr(const UChar*, size_t, bool* ok = 0); // ignores trai
WTF_EXPORT_PRIVATE double charactersToDouble(const LChar*, size_t, bool* ok = 0, bool* didReadNumber = 0);
WTF_EXPORT_PRIVATE double charactersToDouble(const UChar*, size_t, bool* ok = 0, bool* didReadNumber = 0);
float charactersToFloat(const LChar*, size_t, bool* ok = 0, bool* didReadNumber = 0);
+WTF_EXPORT_PRIVATE float charactersToFloatIgnoringJunk(const LChar*, size_t, bool* ok = 0, bool* didReadNumber = 0);
WTF_EXPORT_PRIVATE float charactersToFloat(const UChar*, size_t, bool* ok = 0, bool* didReadNumber = 0);
+WTF_EXPORT_PRIVATE float charactersToFloatIgnoringJunk(const UChar*, size_t, bool* ok = 0, bool* didReadNumber = 0);
enum FloatConversionFlags {
ShouldRoundSignificantFigures = 1 << 0,
diff --git a/Source/JavaScriptCore/wtf/url/api/ParsedURL.cpp b/Source/JavaScriptCore/wtf/url/api/ParsedURL.cpp
new file mode 100644
index 000000000..3c1519136
--- /dev/null
+++ b/Source/JavaScriptCore/wtf/url/api/ParsedURL.cpp
@@ -0,0 +1,94 @@
+/*
+ * Copyright (C) 2010 Google, Inc. All Rights Reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "config.h"
+#include "ParsedURL.h"
+
+#if USE(WTFURL)
+
+#include "URLComponent.h"
+#include "URLParser.h"
+
+namespace WTF {
+
+ParsedURL::ParsedURL(const URLString& spec)
+ : m_spec(spec)
+{
+ // FIXME: Handle non-standard URLs.
+ if (spec.string().isEmpty())
+ return;
+ URLParser<UChar>::parseStandardURL(spec.string().characters(), spec.string().length(), m_segments);
+}
+
+String ParsedURL::scheme() const
+{
+ return segment(m_segments.scheme);
+}
+
+String ParsedURL::username() const
+{
+ return segment(m_segments.username);
+}
+
+String ParsedURL::password() const
+{
+ return segment(m_segments.password);
+}
+
+String ParsedURL::host() const
+{
+ return segment(m_segments.host);
+}
+
+String ParsedURL::port() const
+{
+ return segment(m_segments.port);
+}
+
+String ParsedURL::path() const
+{
+ return segment(m_segments.path);
+}
+
+String ParsedURL::query() const
+{
+ return segment(m_segments.query);
+}
+
+String ParsedURL::fragment() const
+{
+ return segment(m_segments.fragment);
+}
+
+String ParsedURL::segment(const URLComponent& component) const
+{
+ if (!component.isValid())
+ return String();
+ return m_spec.string().substring(component.begin(), component.length());
+}
+
+}
+
+#endif // USE(WTFURL)
diff --git a/Source/JavaScriptCore/wtf/url/api/ParsedURL.h b/Source/JavaScriptCore/wtf/url/api/ParsedURL.h
new file mode 100644
index 000000000..023589564
--- /dev/null
+++ b/Source/JavaScriptCore/wtf/url/api/ParsedURL.h
@@ -0,0 +1,66 @@
+/*
+ * Copyright (C) 2010 Google, Inc. All Rights Reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef ParsedURL_h
+#define ParsedURL_h
+
+#if USE(WTFURL)
+
+#include "URLSegments.h"
+#include "URLString.h"
+
+namespace WTF {
+
+class URLComponent;
+
+class ParsedURL {
+public:
+ explicit ParsedURL(const URLString&);
+
+ // FIXME: Add a method for parsing non-canonicalized URLs.
+
+ String scheme() const;
+ String username() const;
+ String password() const;
+ String host() const;
+ String port() const;
+ String path() const;
+ String query() const;
+ String fragment() const;
+
+ URLString spec() { return m_spec; }
+
+private:
+ inline String segment(const URLComponent&) const;
+
+ URLString m_spec;
+ URLSegments m_segments;
+};
+
+}
+
+#endif // USE(WTFURL)
+
+#endif
diff --git a/Source/JavaScriptCore/wtf/url/api/URLString.h b/Source/JavaScriptCore/wtf/url/api/URLString.h
new file mode 100644
index 000000000..329f51e2f
--- /dev/null
+++ b/Source/JavaScriptCore/wtf/url/api/URLString.h
@@ -0,0 +1,59 @@
+/*
+ * Copyright (C) 2010 Google, Inc. All Rights Reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef URLString_h
+#define URLString_h
+
+#if USE(WTFURL)
+
+#include "WTFString.h"
+
+namespace WTF {
+
+// URLString represents a string that's a canonicalized URL.
+class URLString {
+public:
+ URLString() { }
+
+ const String& string() const { return m_string;}
+
+private:
+ friend class ParsedURL;
+
+ // URLString can only be constructed by a ParsedURL.
+ explicit URLString(const String& string)
+ : m_string(string)
+ {
+ }
+
+ String m_string;
+};
+
+}
+
+#endif // USE(WTFURL)
+
+#endif
+
diff --git a/Source/JavaScriptCore/wtf/url/src/RawURLBuffer.h b/Source/JavaScriptCore/wtf/url/src/RawURLBuffer.h
new file mode 100644
index 000000000..59a7f18af
--- /dev/null
+++ b/Source/JavaScriptCore/wtf/url/src/RawURLBuffer.h
@@ -0,0 +1,74 @@
+// Copyright 2010, Google Inc. All rights reserved.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following disclaimer
+// in the documentation and/or other materials provided with the
+// distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#ifndef RawURLBuffer_h
+#define RawURLBuffer_h
+
+#if USE(WTFURL)
+
+#include "URLBuffer.h"
+
+namespace WTF {
+
+// Simple implementation of the URLBuffer using new[]. This class
+// also supports a static buffer so if it is allocated on the stack, most
+// URLs can be canonicalized with no heap allocations.
+template<typename CharacterType, int inlineCapacity = 1024>
+class RawURLBuffer : public URLBuffer<CharacterType> {
+public:
+ RawURLBuffer() : URLBuffer<CharacterType>()
+ {
+ this->m_buffer = m_inlineBuffer;
+ this->m_capacity = inlineCapacity;
+ }
+
+ virtual ~RawURLBuffer()
+ {
+ if (this->m_buffer != m_inlineBuffer)
+ delete[] this->m_buffer;
+ }
+
+ virtual void resize(int size)
+ {
+ CharacterType* newBuffer = new CharacterType[size];
+ memcpy(newBuffer, this->m_buffer, sizeof(CharacterType) * (this->m_length < size ? this->m_length : size));
+ if (this->m_buffer != m_inlineBuffer)
+ delete[] this->m_buffer;
+ this->m_buffer = newBuffer;
+ this->m_capacity = size;
+ }
+
+protected:
+ CharacterType m_inlineBuffer[inlineCapacity];
+};
+
+} // namespace WTF
+
+#endif // USE(WTFURL)
+
+#endif // RawURLBuffer_h
diff --git a/Source/JavaScriptCore/wtf/url/src/URLBuffer.h b/Source/JavaScriptCore/wtf/url/src/URLBuffer.h
new file mode 100644
index 000000000..84a4f85c2
--- /dev/null
+++ b/Source/JavaScriptCore/wtf/url/src/URLBuffer.h
@@ -0,0 +1,140 @@
+// Copyright 2010, Google Inc. All rights reserved.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following disclaimer
+// in the documentation and/or other materials provided with the
+// distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#ifndef URLBuffer_h
+#define URLBuffer_h
+
+#if USE(WTFURL)
+
+namespace WTF {
+
+// Base class for the canonicalizer output, this maintains a buffer and
+// supports simple resizing and append operations on it.
+//
+// It is VERY IMPORTANT that no virtual function calls be made on the common
+// code path. We only have two virtual function calls, the destructor and a
+// resize function that is called when the existing buffer is not big enough.
+// The derived class is then in charge of setting up our buffer which we will
+// manage.
+template<typename CharacterType>
+class URLBuffer {
+public:
+ URLBuffer() : m_buffer(0), m_capacity(0), m_length(0) { }
+ virtual ~URLBuffer() { }
+
+ // Implemented to resize the buffer. This function should update the buffer
+ // pointer to point to the new buffer, and any old data up to |m_length| in
+ // the buffer must be copied over.
+ //
+ // The new size must be larger than m_capacity.
+ virtual void resize(int) = 0;
+
+ inline char at(int offset) const { return m_buffer[offset]; }
+ inline void set(int offset, CharacterType ch)
+ {
+ // FIXME: Add ASSERT(offset < length());
+ m_buffer[offset] = ch;
+ }
+
+ // Returns the current capacity of the buffer. The length() is the number of
+ // characters that have been declared to be written, but the capacity() is
+ // the number that can be written without reallocation. If the caller must
+ // write many characters at once, it can make sure there is enough capacity,
+ // write the data, then use setLength() to declare the new length().
+ int capacity() const { return m_capacity; }
+ int length() const { return m_length; }
+
+ // The output will NOT be 0-terminated. Call length() to get the length.
+ const CharacterType* data() const { return m_buffer; }
+ CharacterType* data() { return m_buffer; }
+
+ // Shortens the URL to the new length. Used for "backing up" when processing
+ // relative paths. This can also be used if an external function writes a lot
+ // of data to the buffer (when using the "Raw" version below) beyond the end,
+ // to declare the new length.
+ void setLength(int length)
+ {
+ // FIXME: Add ASSERT(length < capacity());
+ m_length = length;
+ }
+
+ // This is the most performance critical function, since it is called for
+ // every character.
+ void append(CharacterType ch)
+ {
+ // In VC2005, putting this common case first speeds up execution
+ // dramatically because this branch is predicted as taken.
+ if (m_length < m_capacity) {
+ m_buffer[m_length] = ch;
+ ++m_length;
+ return;
+ }
+
+ if (!grow(1))
+ return;
+
+ m_buffer[m_length] = ch;
+ ++m_length;
+ }
+
+ void append(const CharacterType* str, int strLength)
+ {
+ if (m_length + strLength > m_capacity) {
+ if (!grow(m_length + strLength - m_capacity))
+ return;
+ }
+ for (int i = 0; i < strLength; i++)
+ m_buffer[m_length + i] = str[i];
+ m_length += strLength;
+ }
+
+protected:
+ // Returns true if the buffer could be resized, false on OOM.
+ bool grow(int minimumAdditionalCapacity)
+ {
+ static const int minimumCapacity = 16;
+ int newCapacity = m_capacity ? m_capacity : minimumCapacity;
+ do {
+ if (newCapacity >= (1 << 30)) // Prevent overflow below.
+ return false;
+ newCapacity *= 2;
+ } while (newCapacity < m_capacity + minimumAdditionalCapacity);
+ resize(newCapacity);
+ return true;
+ }
+
+ CharacterType* m_buffer;
+ int m_capacity;
+ int m_length; // Used characters in the buffer.
+};
+
+} // namespace WTF
+
+#endif // USE(WTFURL)
+
+#endif // URLBuffer_h
diff --git a/Source/JavaScriptCore/wtf/url/src/URLCharacterTypes.cpp b/Source/JavaScriptCore/wtf/url/src/URLCharacterTypes.cpp
new file mode 100644
index 000000000..f56e7207c
--- /dev/null
+++ b/Source/JavaScriptCore/wtf/url/src/URLCharacterTypes.cpp
@@ -0,0 +1,177 @@
+// Copyright 2010, Google Inc. All rights reserved.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following disclaimer
+// in the documentation and/or other materials provided with the
+// distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#include "config.h"
+#include "URLCharacterTypes.h"
+
+#if USE(WTFURL)
+
+namespace WTF {
+
+const unsigned char URLCharacterTypes::characterTypeTable[0x100] = {
+ InvalidCharacter, InvalidCharacter, InvalidCharacter, InvalidCharacter,
+ InvalidCharacter, InvalidCharacter, InvalidCharacter, InvalidCharacter,
+ InvalidCharacter, InvalidCharacter, InvalidCharacter, InvalidCharacter,
+ InvalidCharacter, InvalidCharacter, InvalidCharacter, InvalidCharacter, // 0x00 - 0x0f
+ InvalidCharacter, InvalidCharacter, InvalidCharacter, InvalidCharacter,
+ InvalidCharacter, InvalidCharacter, InvalidCharacter, InvalidCharacter,
+ InvalidCharacter, InvalidCharacter, InvalidCharacter, InvalidCharacter,
+ InvalidCharacter, InvalidCharacter, InvalidCharacter, InvalidCharacter, // 0x10 - 0x1f
+ InvalidCharacter, // 0x20 ' ' (escape spaces in queries)
+ QueryCharacter | UserInfoCharacter, // 0x21 !
+ InvalidCharacter, // 0x22 "
+ InvalidCharacter, // 0x23 # (invalid in query since it marks the ref)
+ QueryCharacter | UserInfoCharacter, // 0x24 $
+ QueryCharacter | UserInfoCharacter, // 0x25 %
+ QueryCharacter | UserInfoCharacter, // 0x26 &
+ QueryCharacter | UserInfoCharacter, // 0x27 '
+ QueryCharacter | UserInfoCharacter, // 0x28 (
+ QueryCharacter | UserInfoCharacter, // 0x29 )
+ QueryCharacter | UserInfoCharacter, // 0x2a *
+ QueryCharacter | UserInfoCharacter, // 0x2b +
+ QueryCharacter | UserInfoCharacter, // 0x2c ,
+ QueryCharacter | UserInfoCharacter, // 0x2d -
+ QueryCharacter | UserInfoCharacter | IPv4Character, // 0x2e .
+ QueryCharacter, // 0x2f /
+ QueryCharacter | UserInfoCharacter | IPv4Character | HexCharacter | DecimalCharacter | OctalCharacter, // 0x30 0
+ QueryCharacter | UserInfoCharacter | IPv4Character | HexCharacter | DecimalCharacter | OctalCharacter, // 0x31 1
+ QueryCharacter | UserInfoCharacter | IPv4Character | HexCharacter | DecimalCharacter | OctalCharacter, // 0x32 2
+ QueryCharacter | UserInfoCharacter | IPv4Character | HexCharacter | DecimalCharacter | OctalCharacter, // 0x33 3
+ QueryCharacter | UserInfoCharacter | IPv4Character | HexCharacter | DecimalCharacter | OctalCharacter, // 0x34 4
+ QueryCharacter | UserInfoCharacter | IPv4Character | HexCharacter | DecimalCharacter | OctalCharacter, // 0x35 5
+ QueryCharacter | UserInfoCharacter | IPv4Character | HexCharacter | DecimalCharacter | OctalCharacter, // 0x36 6
+ QueryCharacter | UserInfoCharacter | IPv4Character | HexCharacter | DecimalCharacter | OctalCharacter, // 0x37 7
+ QueryCharacter | UserInfoCharacter | IPv4Character | HexCharacter | DecimalCharacter, // 0x38 8
+ QueryCharacter | UserInfoCharacter | IPv4Character | HexCharacter | DecimalCharacter, // 0x39 9
+ QueryCharacter, // 0x3a :
+ QueryCharacter, // 0x3b ;
+ InvalidCharacter, // 0x3c <
+ QueryCharacter, // 0x3d =
+ InvalidCharacter, // 0x3e >
+ QueryCharacter, // 0x3f ?
+ QueryCharacter, // 0x40 @
+ QueryCharacter | UserInfoCharacter | IPv4Character | HexCharacter, // 0x41 A
+ QueryCharacter | UserInfoCharacter | IPv4Character | HexCharacter, // 0x42 B
+ QueryCharacter | UserInfoCharacter | IPv4Character | HexCharacter, // 0x43 C
+ QueryCharacter | UserInfoCharacter | IPv4Character | HexCharacter, // 0x44 D
+ QueryCharacter | UserInfoCharacter | IPv4Character | HexCharacter, // 0x45 E
+ QueryCharacter | UserInfoCharacter | IPv4Character | HexCharacter, // 0x46 F
+ QueryCharacter | UserInfoCharacter, // 0x47 G
+ QueryCharacter | UserInfoCharacter, // 0x48 H
+ QueryCharacter | UserInfoCharacter, // 0x49 I
+ QueryCharacter | UserInfoCharacter, // 0x4a J
+ QueryCharacter | UserInfoCharacter, // 0x4b K
+ QueryCharacter | UserInfoCharacter, // 0x4c L
+ QueryCharacter | UserInfoCharacter, // 0x4d M
+ QueryCharacter | UserInfoCharacter, // 0x4e N
+ QueryCharacter | UserInfoCharacter, // 0x4f O
+ QueryCharacter | UserInfoCharacter, // 0x50 P
+ QueryCharacter | UserInfoCharacter, // 0x51 Q
+ QueryCharacter | UserInfoCharacter, // 0x52 R
+ QueryCharacter | UserInfoCharacter, // 0x53 S
+ QueryCharacter | UserInfoCharacter, // 0x54 T
+ QueryCharacter | UserInfoCharacter, // 0x55 U
+ QueryCharacter | UserInfoCharacter, // 0x56 V
+ QueryCharacter | UserInfoCharacter, // 0x57 W
+ QueryCharacter | UserInfoCharacter | IPv4Character, // 0x58 X
+ QueryCharacter | UserInfoCharacter, // 0x59 Y
+ QueryCharacter | UserInfoCharacter, // 0x5a Z
+ QueryCharacter, // 0x5b [
+ QueryCharacter, // 0x5c '\'
+ QueryCharacter, // 0x5d ]
+ QueryCharacter, // 0x5e ^
+ QueryCharacter | UserInfoCharacter, // 0x5f _
+ QueryCharacter, // 0x60 `
+ QueryCharacter | UserInfoCharacter | IPv4Character | HexCharacter, // 0x61 a
+ QueryCharacter | UserInfoCharacter | IPv4Character | HexCharacter, // 0x62 b
+ QueryCharacter | UserInfoCharacter | IPv4Character | HexCharacter, // 0x63 c
+ QueryCharacter | UserInfoCharacter | IPv4Character | HexCharacter, // 0x64 d
+ QueryCharacter | UserInfoCharacter | IPv4Character | HexCharacter, // 0x65 e
+ QueryCharacter | UserInfoCharacter | IPv4Character | HexCharacter, // 0x66 f
+ QueryCharacter | UserInfoCharacter, // 0x67 g
+ QueryCharacter | UserInfoCharacter, // 0x68 h
+ QueryCharacter | UserInfoCharacter, // 0x69 i
+ QueryCharacter | UserInfoCharacter, // 0x6a j
+ QueryCharacter | UserInfoCharacter, // 0x6b k
+ QueryCharacter | UserInfoCharacter, // 0x6c l
+ QueryCharacter | UserInfoCharacter, // 0x6d m
+ QueryCharacter | UserInfoCharacter, // 0x6e n
+ QueryCharacter | UserInfoCharacter, // 0x6f o
+ QueryCharacter | UserInfoCharacter, // 0x70 p
+ QueryCharacter | UserInfoCharacter, // 0x71 q
+ QueryCharacter | UserInfoCharacter, // 0x72 r
+ QueryCharacter | UserInfoCharacter, // 0x73 s
+ QueryCharacter | UserInfoCharacter, // 0x74 t
+ QueryCharacter | UserInfoCharacter, // 0x75 u
+ QueryCharacter | UserInfoCharacter, // 0x76 v
+ QueryCharacter | UserInfoCharacter, // 0x77 w
+ QueryCharacter | UserInfoCharacter | IPv4Character, // 0x78 x
+ QueryCharacter | UserInfoCharacter, // 0x79 y
+ QueryCharacter | UserInfoCharacter, // 0x7a z
+ QueryCharacter, // 0x7b {
+ QueryCharacter, // 0x7c |
+ QueryCharacter, // 0x7d }
+ QueryCharacter | UserInfoCharacter, // 0x7e ~
+ InvalidCharacter, // 0x7f
+ InvalidCharacter, InvalidCharacter, InvalidCharacter, InvalidCharacter,
+ InvalidCharacter, InvalidCharacter, InvalidCharacter, InvalidCharacter,
+ InvalidCharacter, InvalidCharacter, InvalidCharacter, InvalidCharacter,
+ InvalidCharacter, InvalidCharacter, InvalidCharacter, InvalidCharacter, // 0x80 - 0x8f
+ InvalidCharacter, InvalidCharacter, InvalidCharacter, InvalidCharacter,
+ InvalidCharacter, InvalidCharacter, InvalidCharacter, InvalidCharacter,
+ InvalidCharacter, InvalidCharacter, InvalidCharacter, InvalidCharacter,
+ InvalidCharacter, InvalidCharacter, InvalidCharacter, InvalidCharacter, // 0x90 - 0x9f
+ InvalidCharacter, InvalidCharacter, InvalidCharacter, InvalidCharacter,
+ InvalidCharacter, InvalidCharacter, InvalidCharacter, InvalidCharacter,
+ InvalidCharacter, InvalidCharacter, InvalidCharacter, InvalidCharacter,
+ InvalidCharacter, InvalidCharacter, InvalidCharacter, InvalidCharacter, // 0xa0 - 0xaf
+ InvalidCharacter, InvalidCharacter, InvalidCharacter, InvalidCharacter,
+ InvalidCharacter, InvalidCharacter, InvalidCharacter, InvalidCharacter,
+ InvalidCharacter, InvalidCharacter, InvalidCharacter, InvalidCharacter,
+ InvalidCharacter, InvalidCharacter, InvalidCharacter, InvalidCharacter, // 0xb0 - 0xbf
+ InvalidCharacter, InvalidCharacter, InvalidCharacter, InvalidCharacter,
+ InvalidCharacter, InvalidCharacter, InvalidCharacter, InvalidCharacter,
+ InvalidCharacter, InvalidCharacter, InvalidCharacter, InvalidCharacter,
+ InvalidCharacter, InvalidCharacter, InvalidCharacter, InvalidCharacter, // 0xc0 - 0xcf
+ InvalidCharacter, InvalidCharacter, InvalidCharacter, InvalidCharacter,
+ InvalidCharacter, InvalidCharacter, InvalidCharacter, InvalidCharacter,
+ InvalidCharacter, InvalidCharacter, InvalidCharacter, InvalidCharacter,
+ InvalidCharacter, InvalidCharacter, InvalidCharacter, InvalidCharacter, // 0xd0 - 0xdf
+ InvalidCharacter, InvalidCharacter, InvalidCharacter, InvalidCharacter,
+ InvalidCharacter, InvalidCharacter, InvalidCharacter, InvalidCharacter,
+ InvalidCharacter, InvalidCharacter, InvalidCharacter, InvalidCharacter,
+ InvalidCharacter, InvalidCharacter, InvalidCharacter, InvalidCharacter, // 0xe0 - 0xef
+ InvalidCharacter, InvalidCharacter, InvalidCharacter, InvalidCharacter,
+ InvalidCharacter, InvalidCharacter, InvalidCharacter, InvalidCharacter,
+ InvalidCharacter, InvalidCharacter, InvalidCharacter, InvalidCharacter,
+ InvalidCharacter, InvalidCharacter, InvalidCharacter, InvalidCharacter, // 0xf0 - 0xff
+};
+
+}
+
+#endif // USE(WTFURL)
diff --git a/Source/JavaScriptCore/wtf/url/src/URLCharacterTypes.h b/Source/JavaScriptCore/wtf/url/src/URLCharacterTypes.h
new file mode 100644
index 000000000..6edb98ca2
--- /dev/null
+++ b/Source/JavaScriptCore/wtf/url/src/URLCharacterTypes.h
@@ -0,0 +1,65 @@
+// Copyright 2010, Google Inc. All rights reserved.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following disclaimer
+// in the documentation and/or other materials provided with the
+// distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#ifndef URLCharacterTypes_h
+#define URLCharacterTypes_h
+
+#if USE(WTFURL)
+
+namespace WTF {
+
+class URLCharacterTypes {
+public:
+ static inline bool isQueryChar(unsigned char c) { return isCharOfType(c, QueryCharacter); }
+ static inline bool isIPv4Char(unsigned char c) { return isCharOfType(c, IPv4Character); }
+ static inline bool isHexChar(unsigned char c) { return isCharOfType(c, HexCharacter); }
+
+private:
+ enum CharTypes {
+ InvalidCharacter = 0,
+ QueryCharacter = 1 << 0,
+ UserInfoCharacter = 1 << 1,
+ IPv4Character = 1 << 2,
+ HexCharacter = 1 << 3,
+ DecimalCharacter = 1 << 4,
+ OctalCharacter = 1 << 5,
+ };
+
+ static const unsigned char characterTypeTable[0x100];
+
+ static inline bool isCharOfType(unsigned char c, CharTypes type)
+ {
+ return !!(characterTypeTable[c] & type);
+ }
+};
+
+}
+
+#endif // USE(WTFURL)
+
+#endif // URLCharacterTypes_h
diff --git a/Source/JavaScriptCore/wtf/url/src/URLComponent.h b/Source/JavaScriptCore/wtf/url/src/URLComponent.h
new file mode 100644
index 000000000..747a80b80
--- /dev/null
+++ b/Source/JavaScriptCore/wtf/url/src/URLComponent.h
@@ -0,0 +1,81 @@
+// Copyright 2010, Google Inc. All rights reserved.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following disclaimer
+// in the documentation and/or other materials provided with the
+// distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#ifndef URLComponent_h
+#define URLComponent_h
+
+#if USE(WTFURL)
+
+namespace WTF {
+
+// Represents a substring for URL parsing.
+class URLComponent {
+public:
+ URLComponent() : m_begin(0), m_length(-1) { }
+ URLComponent(int begin, int length) : m_begin(begin), m_length(length) { }
+
+ // Helper that returns a component created with the given begin and ending
+ // points. The ending point is non-inclusive.
+ static inline URLComponent fromRange(int begin, int end)
+ {
+ return URLComponent(begin, end - begin);
+ }
+
+ // Returns true if this component is valid, meaning the length is given. Even
+ // valid components may be empty to record the fact that they exist.
+ bool isValid() const { return m_length != -1; }
+
+ bool isNonEmpty() const { return m_length > 0; }
+ bool isEmptyOrInvalid() const { return m_length <= 0; }
+
+ void reset()
+ {
+ m_begin = 0;
+ m_length = -1;
+ }
+
+ bool operator==(const URLComponent& other) const { return m_begin == other.m_begin && m_length == other.m_length; }
+
+ int begin() const { return m_begin; }
+ void setBegin(int begin) { m_begin = begin; }
+
+ int length() const { return m_length; }
+ void setLength(int length) { m_length = length; }
+
+ int end() const { return m_begin + m_length; }
+
+private:
+ int m_begin; // Byte offset in the string of this component.
+ int m_length; // Will be -1 if the component is unspecified.
+};
+
+} // namespace WTF
+
+#endif // USE(WTFURL)
+
+#endif // URLComponent_h
diff --git a/Source/JavaScriptCore/wtf/url/src/URLEscape.cpp b/Source/JavaScriptCore/wtf/url/src/URLEscape.cpp
new file mode 100644
index 000000000..5acdcde24
--- /dev/null
+++ b/Source/JavaScriptCore/wtf/url/src/URLEscape.cpp
@@ -0,0 +1,43 @@
+// Copyright 2010, Google Inc. All rights reserved.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following disclaimer
+// in the documentation and/or other materials provided with the
+// distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#include "config.h"
+#include "URLEscape.h"
+
+#if USE(WTFURL)
+
+namespace WTF {
+
+const char hexCharacterTable[16] = {
+ '0', '1', '2', '3', '4', '5', '6', '7',
+ '8', '9', 'A', 'B', 'C', 'D', 'E', 'F',
+};
+
+}
+
+#endif // USE(WTFURL)
diff --git a/Source/JavaScriptCore/wtf/url/src/URLEscape.h b/Source/JavaScriptCore/wtf/url/src/URLEscape.h
new file mode 100644
index 000000000..e010012a3
--- /dev/null
+++ b/Source/JavaScriptCore/wtf/url/src/URLEscape.h
@@ -0,0 +1,53 @@
+// Copyright 2010, Google Inc. All rights reserved.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following disclaimer
+// in the documentation and/or other materials provided with the
+// distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+
+#ifndef URLEscape_h
+#define URLEscape_h
+
+#if USE(WTFURL)
+
+#include "URLBuffer.h"
+
+namespace WTF {
+
+extern const char hexCharacterTable[16];
+
+template<typename InChar, typename OutChar>
+inline void appendURLEscapedCharacter(InChar ch, URLBuffer<OutChar>& buffer)
+{
+ buffer.append('%');
+ buffer.append(hexCharacterTable[ch >> 4]);
+ buffer.append(hexCharacterTable[ch & 0xf]);
+}
+
+}
+
+#endif // USE(WTFURL)
+
+#endif
diff --git a/Source/JavaScriptCore/wtf/url/src/URLParser.h b/Source/JavaScriptCore/wtf/url/src/URLParser.h
new file mode 100644
index 000000000..01f738cf3
--- /dev/null
+++ b/Source/JavaScriptCore/wtf/url/src/URLParser.h
@@ -0,0 +1,579 @@
+/* Based on nsURLParsers.cc from Mozilla
+ * -------------------------------------
+ * Copyright (C) 1998 Netscape Communications Corporation.
+ *
+ * Other contributors:
+ * Darin Fisher (original author)
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ *
+ * Alternatively, the contents of this file may be used under the terms
+ * of either the Mozilla Public License Version 1.1, found at
+ * http://www.mozilla.org/MPL/ (the "MPL") or the GNU General Public
+ * License Version 2.0, found at http://www.fsf.org/copyleft/gpl.html
+ * (the "GPL"), in which case the provisions of the MPL or the GPL are
+ * applicable instead of those above. If you wish to allow use of your
+ * version of this file only under the terms of one of those two
+ * licenses (the MPL or the GPL) and not to allow others to use your
+ * version of this file under the LGPL, indicate your decision by
+ * deletingthe provisions above and replace them with the notice and
+ * other provisions required by the MPL or the GPL, as the case may be.
+ * If you do not delete the provisions above, a recipient may use your
+ * version of this file under any of the LGPL, the MPL or the GPL.
+ */
+
+#ifndef URLParser_h
+#define URLParser_h
+
+#include "URLComponent.h"
+#include "URLSegments.h"
+
+#if USE(WTFURL)
+
+namespace WTF {
+
+template<typename CharacterType>
+class URLParser {
+public:
+ enum SpecialPort {
+ UnspecifiedPort = -1,
+ InvalidPort = -2,
+ };
+
+ // This handles everything that may be an authority terminator, including
+ // backslash. For special backslash handling see parseAfterScheme.
+ static bool isPossibleAuthorityTerminator(CharacterType ch)
+ {
+ return isURLSlash(ch) || ch == '?' || ch == '#' || ch == ';';
+ }
+
+ // Given an already-identified auth section, breaks it into its constituent
+ // parts. The port number will be parsed and the resulting integer will be
+ // filled into the given *port variable, or -1 if there is no port number
+ // or it is invalid.
+ static void parseAuthority(const CharacterType* spec, const URLComponent& auth, URLComponent& username, URLComponent& password, URLComponent& host, URLComponent& port)
+ {
+ // FIXME: add ASSERT(auth.isValid()); // We should always get an authority.
+ if (!auth.length()) {
+ username.reset();
+ password.reset();
+ host.reset();
+ port.reset();
+ return;
+ }
+
+ // Search backwards for @, which is the separator between the user info
+ // and the server info. RFC 3986 forbids @ from occuring in auth, but
+ // someone might include it in a password unescaped.
+ int i = auth.begin() + auth.length() - 1;
+ while (i > auth.begin() && spec[i] != '@')
+ --i;
+
+ if (spec[i] == '@') {
+ // Found user info: <user-info>@<server-info>
+ parseUserInfo(spec, URLComponent(auth.begin(), i - auth.begin()), username, password);
+ parseServerInfo(spec, URLComponent::fromRange(i + 1, auth.begin() + auth.length()), host, port);
+ } else {
+ // No user info, everything is server info.
+ username.reset();
+ password.reset();
+ parseServerInfo(spec, auth, host, port);
+ }
+ }
+
+ static bool extractScheme(const CharacterType* spec, int specLength, URLComponent& scheme)
+ {
+ // Skip leading whitespace and control characters.
+ int begin = 0;
+ while (begin < specLength && shouldTrimFromURL(spec[begin]))
+ begin++;
+ if (begin == specLength)
+ return false; // Input is empty or all whitespace.
+
+ // Find the first colon character.
+ for (int i = begin; i < specLength; i++) {
+ if (spec[i] == ':') {
+ scheme = URLComponent::fromRange(begin, i);
+ return true;
+ }
+ }
+ return false; // No colon found: no scheme
+ }
+
+ // Fills in all members of the URLSegments structure (except for the
+ // scheme) for standard URLs.
+ //
+ // |spec| is the full spec being parsed, of length |specLength|.
+ // |afterScheme| is the character immediately following the scheme (after
+ // the colon) where we'll begin parsing.
+ static void parseAfterScheme(const CharacterType* spec, int specLength, int afterScheme, URLSegments& parsed)
+ {
+ int numberOfSlashes = consecutiveSlashes(spec, afterScheme, specLength);
+ int afterSlashes = afterScheme + numberOfSlashes;
+
+ // First split into two main parts, the authority (username, password,
+ // host, and port) and the full path (path, query, and reference).
+ URLComponent authority;
+ URLComponent fullPath;
+
+ // Found "//<some data>", looks like an authority section. Treat
+ // everything from there to the next slash (or end of spec) to be the
+ // authority. Note that we ignore the number of slashes and treat it as
+ // the authority.
+ int authEnd = nextAuthorityTerminator(spec, afterSlashes, specLength);
+ authority = URLComponent(afterSlashes, authEnd - afterSlashes);
+
+ if (authEnd == specLength) // No beginning of path found.
+ fullPath = URLComponent();
+ else // Everything starting from the slash to the end is the path.
+ fullPath = URLComponent(authEnd, specLength - authEnd);
+
+ // Now parse those two sub-parts.
+ parseAuthority(spec, authority, parsed.username, parsed.password, parsed.host, parsed.port);
+ parsePath(spec, fullPath, parsed.path, parsed.query, parsed.fragment);
+ }
+
+ // The main parsing function for standard URLs. Standard URLs have a scheme,
+ // host, path, etc.
+ static void parseStandardURL(const CharacterType* spec, int specLength, URLSegments& parsed)
+ {
+ // FIXME: add ASSERT(specLength >= 0);
+
+ // Strip leading & trailing spaces and control characters.
+ int begin = 0;
+ trimURL(spec, begin, specLength);
+
+ int afterScheme;
+ if (extractScheme(spec, specLength, parsed.scheme))
+ afterScheme = parsed.scheme.end() + 1; // Skip past the colon.
+ else {
+ // Say there's no scheme when there is a colon. We could also say
+ // that everything is the scheme. Both would produce an invalid
+ // URL, but this way seems less wrong in more cases.
+ parsed.scheme.reset();
+ afterScheme = begin;
+ }
+ parseAfterScheme(spec, specLength, afterScheme, parsed);
+ }
+
+ static void parsePath(const CharacterType* spec, const URLComponent& path, URLComponent& filepath, URLComponent& query, URLComponent& fragment)
+ {
+ // path = [/]<segment1>/<segment2>/<...>/<segmentN>;<param>?<query>#<fragment>
+
+ // Special case when there is no path.
+ if (!path.isValid()) {
+ filepath.reset();
+ query.reset();
+ fragment.reset();
+ return;
+ }
+ // FIXME: add ASSERT(path.length() > 0); // We should never have 0 length paths.
+
+ // Search for first occurrence of either ? or #.
+ int pathEnd = path.begin() + path.length();
+
+ int querySeparator = -1; // Index of the '?'
+ int refSeparator = -1; // Index of the '#'
+ for (int i = path.begin(); i < pathEnd; i++) {
+ switch (spec[i]) {
+ case '?':
+ if (querySeparator < 0)
+ querySeparator = i;
+ break;
+ case '#':
+ refSeparator = i;
+ i = pathEnd; // Break out of the loop.
+ break;
+ default:
+ break;
+ }
+ }
+
+ // Markers pointing to the character after each of these corresponding
+ // components. The code below works from the end back to the beginning,
+ // and will update these indices as it finds components that exist.
+ int fileEnd, queryEnd;
+
+ // Fragment: from the # to the end of the path.
+ if (refSeparator >= 0) {
+ fileEnd = refSeparator;
+ queryEnd = refSeparator;
+ fragment = URLComponent::fromRange(refSeparator + 1, pathEnd);
+ } else {
+ fileEnd = pathEnd;
+ queryEnd = pathEnd;
+ fragment.reset();
+ }
+
+ // Query fragment: everything from the ? to the next boundary (either
+ // the end of the path or the fragment fragment).
+ if (querySeparator >= 0) {
+ fileEnd = querySeparator;
+ query = URLComponent::fromRange(querySeparator + 1, queryEnd);
+ } else
+ query.reset();
+
+ // File path: treat an empty file path as no file path.
+ if (fileEnd != path.begin())
+ filepath = URLComponent::fromRange(path.begin(), fileEnd);
+ else
+ filepath.reset();
+ }
+
+ // Initializes a path URL which is merely a scheme followed by a path.
+ // Examples include "about:foo" and "javascript:alert('bar');"
+ static void parsePathURL(const CharacterType* spec, int specLength, URLSegments& parsed)
+ {
+ // Get the non-path and non-scheme parts of the URL out of the way, we
+ // never use them.
+ parsed.username.reset();
+ parsed.password.reset();
+ parsed.host.reset();
+ parsed.port.reset();
+ parsed.query.reset();
+ parsed.fragment.reset();
+
+ // Strip leading & trailing spaces and control characters.
+ // FIXME: Perhaps this is unnecessary?
+ int begin = 0;
+ trimURL(spec, begin, specLength);
+
+ // Handle empty specs or ones that contain only whitespace or control
+ // chars.
+ if (begin == specLength) {
+ parsed.scheme.reset();
+ parsed.path.reset();
+ return;
+ }
+
+ // Extract the scheme, with the path being everything following. We also
+ // handle the case where there is no scheme.
+ if (extractScheme(&spec[begin], specLength - begin, parsed.scheme)) {
+ // Offset the results since we gave extractScheme a substring.
+ parsed.scheme.setBegin(parsed.scheme.begin() + begin);
+
+ // For compatibility with the standard URL parser, we treat no path
+ // as -1, rather than having a length of 0 (we normally wouldn't
+ // care so much for these non-standard URLs).
+ if (parsed.scheme.end() == specLength - 1)
+ parsed.path.reset();
+ else
+ parsed.path = URLComponent::fromRange(parsed.scheme.end() + 1, specLength);
+ } else {
+ // No scheme found, just path.
+ parsed.scheme.reset();
+ parsed.path = URLComponent::fromRange(begin, specLength);
+ }
+ }
+
+ static void parseMailtoURL(const CharacterType* spec, int specLength, URLSegments& parsed)
+ {
+ // FIXME: add ASSERT(specLength >= 0);
+
+ // Get the non-path and non-scheme parts of the URL out of the way, we
+ // never use them.
+ parsed.username.reset();
+ parsed.password.reset();
+ parsed.host.reset();
+ parsed.port.reset();
+ parsed.fragment.reset();
+ parsed.query.reset(); // May use this; reset for convenience.
+
+ // Strip leading & trailing spaces and control characters.
+ int begin = 0;
+ trimURL(spec, begin, specLength);
+
+ // Handle empty specs or ones that contain only whitespace or control
+ // chars.
+ if (begin == specLength) {
+ parsed.scheme.reset();
+ parsed.path.reset();
+ return;
+ }
+
+ int pathBegin = -1;
+ int pathEnd = -1;
+
+ // Extract the scheme, with the path being everything following. We also
+ // handle the case where there is no scheme.
+ if (extractScheme(&spec[begin], specLength - begin, parsed.scheme)) {
+ // Offset the results since we gave extractScheme a substring.
+ parsed.scheme.setBegin(parsed.scheme.begin() + begin);
+
+ if (parsed.scheme.end() != specLength - 1) {
+ pathBegin = parsed.scheme.end() + 1;
+ pathEnd = specLength;
+ }
+ } else {
+ // No scheme found, just path.
+ parsed.scheme.reset();
+ pathBegin = begin;
+ pathEnd = specLength;
+ }
+
+ // Split [pathBegin, pathEnd) into a path + query.
+ for (int i = pathBegin; i < pathEnd; ++i) {
+ if (spec[i] == '?') {
+ parsed.query = URLComponent::fromRange(i + 1, pathEnd);
+ pathEnd = i;
+ break;
+ }
+ }
+
+ // For compatibility with the standard URL parser, treat no path as
+ // -1, rather than having a length of 0
+ if (pathBegin == pathEnd)
+ parsed.path.reset();
+ else
+ parsed.path = URLComponent::fromRange(pathBegin, pathEnd);
+ }
+
+ static int parsePort(const CharacterType* spec, const URLComponent& component)
+ {
+ // Easy success case when there is no port.
+ const int maxDigits = 5;
+ if (component.isEmptyOrInvalid())
+ return UnspecifiedPort;
+
+ URLComponent nonZeroDigits(component.end(), 0);
+ for (int i = 0; i < component.length(); ++i) {
+ if (spec[component.begin() + i] != '0') {
+ nonZeroDigits = URLComponent::fromRange(component.begin() + i, component.end());
+ break;
+ }
+ }
+ if (!nonZeroDigits.length())
+ return 0; // All digits were 0.
+
+ if (nonZeroDigits.length() > maxDigits)
+ return InvalidPort;
+
+ int port = 0;
+ for (int i = 0; i < nonZeroDigits.length(); ++i) {
+ CharacterType ch = spec[nonZeroDigits.begin() + i];
+ if (!isPortDigit(ch))
+ return InvalidPort;
+ port *= 10;
+ port += static_cast<char>(ch) - '0';
+ }
+ if (port > 65535)
+ return InvalidPort;
+ return port;
+ }
+
+ static void extractFileName(const CharacterType* spec, const URLComponent& path, URLComponent& fileName)
+ {
+ // Handle empty paths: they have no file names.
+ if (path.isEmptyOrInvalid()) {
+ fileName.reset();
+ return;
+ }
+
+ // Search backwards for a parameter, which is a normally unused field
+ // in a URL delimited by a semicolon. We parse the parameter as part of
+ // the path, but here, we don't want to count it. The last semicolon is
+ // the parameter.
+ int fileEnd = path.end();
+ for (int i = path.end() - 1; i > path.begin(); --i) {
+ if (spec[i] == ';') {
+ fileEnd = i;
+ break;
+ }
+ }
+
+ // Now search backwards from the filename end to the previous slash
+ // to find the beginning of the filename.
+ for (int i = fileEnd - 1; i >= path.begin(); --i) {
+ if (isURLSlash(spec[i])) {
+ // File name is everything following this character to the end
+ fileName = URLComponent::fromRange(i + 1, fileEnd);
+ return;
+ }
+ }
+
+ // No slash found, this means the input was degenerate (generally paths
+ // will start with a slash). Let's call everything the file name.
+ fileName = URLComponent::fromRange(path.begin(), fileEnd);
+ }
+
+ static bool extractQueryKeyValue(const CharacterType* spec, URLComponent& query, URLComponent& key, URLComponent& value)
+ {
+ if (query.isEmptyOrInvalid())
+ return false;
+
+ int start = query.begin();
+ int current = start;
+ int end = query.end();
+
+ // We assume the beginning of the input is the beginning of the "key"
+ // and we skip to the end of it.
+ key.setBegin(current);
+ while (current < end && spec[current] != '&' && spec[current] != '=')
+ ++current;
+ key.setLength(current - key.begin());
+
+ // Skip the separator after the key (if any).
+ if (current < end && spec[current] == '=')
+ ++current;
+
+ // Find the value part.
+ value.setBegin(current);
+ while (current < end && spec[current] != '&')
+ ++current;
+ value.setLength(current - value.begin());
+
+ // Finally skip the next separator if any
+ if (current < end && spec[current] == '&')
+ ++current;
+
+ // Save the new query
+ query = URLComponent::fromRange(current, end);
+ return true;
+ }
+
+// FIXME: This should be protected or private.
+public:
+ // We treat slashes and backslashes the same for IE compatibility.
+ static inline bool isURLSlash(CharacterType ch)
+ {
+ return ch == '/' || ch == '\\';
+ }
+
+ // Returns true if we should trim this character from the URL because it is
+ // a space or a control character.
+ static inline bool shouldTrimFromURL(CharacterType ch)
+ {
+ return ch <= ' ';
+ }
+
+ // Given an already-initialized begin index and end index (the index after
+ // the last CharacterType in spec), this shrinks the range to eliminate
+ // "should-be-trimmed" characters.
+ static inline void trimURL(const CharacterType* spec, int& begin, int& end)
+ {
+ // Strip leading whitespace and control characters.
+ while (begin < end && shouldTrimFromURL(spec[begin]))
+ ++begin;
+
+ // Strip trailing whitespace and control characters. We need the >i
+ // test for when the input string is all blanks; we don't want to back
+ // past the input.
+ while (end > begin && shouldTrimFromURL(spec[end - 1]))
+ --end;
+ }
+
+ // Counts the number of consecutive slashes starting at the given offset
+ // in the given string of the given length.
+ static inline int consecutiveSlashes(const CharacterType *string, int beginOffset, int stringLength)
+ {
+ int count = 0;
+ while (beginOffset + count < stringLength && isURLSlash(string[beginOffset + count]))
+ ++count;
+ return count;
+ }
+
+private:
+ // URLParser cannot be constructed.
+ URLParser();
+
+ // Returns true if the given character is a valid digit to use in a port.
+ static inline bool isPortDigit(CharacterType ch)
+ {
+ return ch >= '0' && ch <= '9';
+ }
+
+ // Returns the offset of the next authority terminator in the input starting
+ // from startOffset. If no terminator is found, the return value will be equal
+ // to specLength.
+ static int nextAuthorityTerminator(const CharacterType* spec, int startOffset, int specLength)
+ {
+ for (int i = startOffset; i < specLength; i++) {
+ if (isPossibleAuthorityTerminator(spec[i]))
+ return i;
+ }
+ return specLength; // Not found.
+ }
+
+ static void parseUserInfo(const CharacterType* spec, const URLComponent& user, URLComponent& username, URLComponent& password)
+ {
+ // Find the first colon in the user section, which separates the
+ // username and password.
+ int colonOffset = 0;
+ while (colonOffset < user.length() && spec[user.begin() + colonOffset] != ':')
+ ++colonOffset;
+
+ if (colonOffset < user.length()) {
+ // Found separator: <username>:<password>
+ username = URLComponent(user.begin(), colonOffset);
+ password = URLComponent::fromRange(user.begin() + colonOffset + 1, user.begin() + user.length());
+ } else {
+ // No separator, treat everything as the username
+ username = user;
+ password = URLComponent();
+ }
+ }
+
+ static void parseServerInfo(const CharacterType* spec, const URLComponent& serverInfo, URLComponent& host, URLComponent& port)
+ {
+ if (!serverInfo.length()) {
+ // No server info, host name is empty.
+ host.reset();
+ port.reset();
+ return;
+ }
+
+ // If the host starts with a left-bracket, assume the entire host is an
+ // IPv6 literal. Otherwise, assume none of the host is an IPv6 literal.
+ // This assumption will be overridden if we find a right-bracket.
+ //
+ // Our IPv6 address canonicalization code requires both brackets to
+ // exist, but the ability to locate an incomplete address can still be
+ // useful.
+ int ipv6Terminator = spec[serverInfo.begin()] == '[' ? serverInfo.end() : -1;
+ int colon = -1;
+
+ // Find the last right-bracket, and the last colon.
+ for (int i = serverInfo.begin(); i < serverInfo.end(); i++) {
+ switch (spec[i]) {
+ case ']':
+ ipv6Terminator = i;
+ break;
+ case ':':
+ colon = i;
+ break;
+ default:
+ break;
+ }
+ }
+
+ if (colon > ipv6Terminator) {
+ // Found a port number: <hostname>:<port>
+ host = URLComponent::fromRange(serverInfo.begin(), colon);
+ if (!host.length())
+ host.reset();
+ port = URLComponent::fromRange(colon + 1, serverInfo.end());
+ } else {
+ // No port: <hostname>
+ host = serverInfo;
+ port.reset();
+ }
+ }
+};
+
+} // namespace WTF
+
+#endif // USE(WTFURL)
+
+#endif // URLParser_h
diff --git a/Source/JavaScriptCore/wtf/url/src/URLQueryCanonicalizer.h b/Source/JavaScriptCore/wtf/url/src/URLQueryCanonicalizer.h
new file mode 100644
index 000000000..467c497fd
--- /dev/null
+++ b/Source/JavaScriptCore/wtf/url/src/URLQueryCanonicalizer.h
@@ -0,0 +1,109 @@
+// Copyright 2010, Google Inc. All rights reserved.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following disclaimer
+// in the documentation and/or other materials provided with the
+// distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+
+#ifndef URLQueryCanonicalizer_h
+#define URLQueryCanonicalizer_h
+
+#if USE(WTFURL)
+
+#include "RawURLBuffer.h"
+#include "URLBuffer.h"
+#include "URLCharacterTypes.h"
+#include "URLComponent.h"
+#include "URLEscape.h"
+
+namespace WTF {
+
+template<typename InChar, typename OutChar, void convertCharset(const InChar*, int length, URLBuffer<char>&)>
+class URLQueryCanonicalizer {
+public:
+ static void canonicalize(const InChar* spec, const URLComponent& query, URLBuffer<OutChar>& buffer, URLComponent& resultQuery)
+ {
+ if (query.length() < 0) {
+ resultQuery = URLComponent();
+ return;
+ }
+
+ buffer->append('?');
+ resultQuery.setBegin(buffer->length());
+ convertToQueryEncoding(spec, query, buffer);
+ resultQuery.setLength(buffer->length() - resultQuery.begin());
+ }
+
+private:
+ static bool isAllASCII(const InChar* spec, const URLComponent& query)
+ {
+ int end = query.end();
+ for (int i = query.begin(); i < end; ++i) {
+ if (static_cast<unsigned>(spec[i]) >= 0x80)
+ return false;
+ }
+ return true;
+ }
+
+#ifndef NDEBUG
+ static bool isRaw8Bit(const InChar* source, int length)
+ {
+ for (int i = source; i < length; ++i) {
+ if (source[i] & 0xFF != source[i])
+ return false;
+ }
+ return true;
+ }
+#endif
+
+ static void appendRaw8BitQueryString(const InChar* source, int length, URLBuffer<OutChar>* buffer)
+ {
+ ASSERT(isRaw8Bit(source, length));
+ for (int i = 0; i < length; ++i) {
+ if (!URLCharacterTypes::isQueryChar(source[i]))
+ appendURLEscapedCharacter(static_cast<unsigned char>(source[i]), buffer);
+ else
+ buffer->append(static_cast<char>(source[i]));
+ }
+ }
+
+ static void convertToQueryEncoding(const InChar* spec, const URLComponent& query, URLBuffer<OutChar>& buffer)
+ {
+ if (isAllASCII(spec, query)) {
+ appendRaw8BitQueryString(&spec[query.begin()], query.length(), buffer);
+ return;
+ }
+
+ RawURLBuffer<char, 1024> convertedQuery;
+ convertCharset(spec, query, convertedQuery);
+ appendRaw8BitQueryString(convertedQuery.data(), convertedQuery.length(), buffer);
+ }
+};
+
+}
+
+#endif // USE(WTFURL)
+
+#endif
diff --git a/Source/JavaScriptCore/wtf/url/src/URLSegments.cpp b/Source/JavaScriptCore/wtf/url/src/URLSegments.cpp
new file mode 100644
index 000000000..182b0d45b
--- /dev/null
+++ b/Source/JavaScriptCore/wtf/url/src/URLSegments.cpp
@@ -0,0 +1,114 @@
+/* Based on nsURLParsers.cc from Mozilla
+ * -------------------------------------
+ * Copyright (C) 1998 Netscape Communications Corporation.
+ *
+ * Other contributors:
+ * Darin Fisher (original author)
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ *
+ * Alternatively, the contents of this file may be used under the terms
+ * of either the Mozilla Public License Version 1.1, found at
+ * http://www.mozilla.org/MPL/ (the "MPL") or the GNU General Public
+ * License Version 2.0, found at http://www.fsf.org/copyleft/gpl.html
+ * (the "GPL"), in which case the provisions of the MPL or the GPL are
+ * applicable instead of those above. If you wish to allow use of your
+ * version of this file only under the terms of one of those two
+ * licenses (the MPL or the GPL) and not to allow others to use your
+ * version of this file under the LGPL, indicate your decision by
+ * deletingthe provisions above and replace them with the notice and
+ * other provisions required by the MPL or the GPL, as the case may be.
+ * If you do not delete the provisions above, a recipient may use your
+ * version of this file under any of the LGPL, the MPL or the GPL.
+ */
+
+#include "config.h"
+#include "URLSegments.h"
+
+#if USE(WTFURL)
+
+namespace WTF {
+
+int URLSegments::length() const
+{
+ if (fragment.isValid())
+ return fragment.end();
+ return charactersBefore(Fragment, false);
+}
+
+int URLSegments::charactersBefore(ComponentType type, bool includeDelimiter) const
+{
+ if (type == Scheme)
+ return scheme.begin();
+
+ int current = 0;
+ if (scheme.isValid())
+ current = scheme.end() + 1; // Advance over the ':' at the end of the scheme.
+
+ if (username.isValid()) {
+ if (type <= Username)
+ return username.begin();
+ current = username.end() + 1; // Advance over the '@' or ':' at the end.
+ }
+
+ if (password.isValid()) {
+ if (type <= Password)
+ return password.begin();
+ current = password.end() + 1; // Advance over the '@' at the end.
+ }
+
+ if (host.isValid()) {
+ if (type <= Host)
+ return host.begin();
+ current = host.end();
+ }
+
+ if (port.isValid()) {
+ if (type < Port || (type == Port && includeDelimiter))
+ return port.begin() - 1; // Back over delimiter.
+ if (type == Port)
+ return port.begin(); // Don't want delimiter counted.
+ current = port.end();
+ }
+
+ if (path.isValid()) {
+ if (type <= Path)
+ return path.begin();
+ current = path.end();
+ }
+
+ if (query.isValid()) {
+ if (type < Query || (type == Query && includeDelimiter))
+ return query.begin() - 1; // Back over delimiter.
+ if (type == Query)
+ return query.begin(); // Don't want delimiter counted.
+ current = query.end();
+ }
+
+ if (fragment.isValid()) {
+ if (type == Fragment && !includeDelimiter)
+ return fragment.begin(); // Back over delimiter.
+
+ // When there is a fragment and we get here, the component we wanted was before
+ // this and not found, so we always know the beginning of the fragment is right.
+ return fragment.begin() - 1; // Don't want delimiter counted.
+ }
+
+ return current;
+}
+
+} // namespace WTF
+
+#endif // USE(WTFURL)
diff --git a/Source/JavaScriptCore/wtf/url/src/URLSegments.h b/Source/JavaScriptCore/wtf/url/src/URLSegments.h
new file mode 100644
index 000000000..64d0619b8
--- /dev/null
+++ b/Source/JavaScriptCore/wtf/url/src/URLSegments.h
@@ -0,0 +1,109 @@
+// Copyright 2007, Google Inc. All rights reserved.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following disclaimer
+// in the documentation and/or other materials provided with the
+// distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#ifndef URLSegments_h
+#define URLSegments_h
+
+#include "URLComponent.h"
+
+#if USE(WTFURL)
+
+namespace WTF {
+
+// A structure that holds the identified parts of an input URL. This structure
+// does NOT store the URL itself. The caller will have to store the URL text
+// and its corresponding Parsed structure separately.
+class URLSegments {
+public:
+ // Identifies different components.
+ enum ComponentType {
+ Scheme,
+ Username,
+ Password,
+ Host,
+ Port,
+ Path,
+ Query,
+ Fragment,
+ };
+
+ URLSegments() { }
+
+ // Returns the length of the URL (the end of the last component).
+ //
+ // Note that for some invalid, non-canonical URLs, this may not be the length
+ // of the string. For example "http://": the parsed structure will only
+ // contain an entry for the four-character scheme, and it doesn't know about
+ // the "://". For all other last-components, it will return the real length.
+ int length() const;
+
+ // Returns the number of characters before the given component if it exists,
+ // or where the component would be if it did exist. This will return the
+ // string length if the component would be appended to the end.
+ //
+ // Note that this can get a little funny for the port, query, and fragment
+ // components which have a delimiter that is not counted as part of the
+ // component. The |includeDelimiter| flag controls if you want this counted
+ // as part of the component or not when the component exists.
+ //
+ // This example shows the difference between the two flags for two of these
+ // delimited components that is present (the port and query) and one that
+ // isn't (the reference). The components that this flag affects are marked
+ // with a *.
+ // 0 1 2
+ // 012345678901234567890
+ // Example input: http://foo:80/?query
+ // include_delim=true, ...=false ("<-" indicates different)
+ // Scheme: 0 0
+ // Username: 5 5
+ // Password: 5 5
+ // Host: 7 7
+ // *Port: 10 11 <-
+ // Path: 13 13
+ // *Query: 14 15 <-
+ // *Fragment: 20 20
+ //
+ int charactersBefore(ComponentType, bool includeDelimiter) const;
+
+ // Each component excludes the related delimiters and has a length of -1
+ // if that component is absent but 0 if the component exists but is empty.
+ URLComponent scheme;
+ URLComponent username;
+ URLComponent password;
+ URLComponent host;
+ URLComponent port;
+ URLComponent path;
+ URLComponent query;
+ URLComponent fragment;
+};
+
+} // namespace WTF
+
+#endif // USE(WTFURL)
+
+#endif // URLSegments_h
diff --git a/Source/JavaScriptCore/yarr/YarrInterpreter.cpp b/Source/JavaScriptCore/yarr/YarrInterpreter.cpp
index a452bb7f2..4472538e9 100644
--- a/Source/JavaScriptCore/yarr/YarrInterpreter.cpp
+++ b/Source/JavaScriptCore/yarr/YarrInterpreter.cpp
@@ -30,6 +30,7 @@
#include "UString.h"
#include "Yarr.h"
#include <wtf/BumpPointerAllocator.h>
+#include <wtf/DataLog.h>
#include <wtf/text/CString.h>
#ifndef NDEBUG
@@ -1682,10 +1683,10 @@ public:
#ifndef NDEBUG
void dumpDisjunction(ByteDisjunction* disjunction)
{
- printf("ByteDisjunction(%p):\n\t", disjunction);
+ dataLog("ByteDisjunction(%p):\n\t", disjunction);
for (unsigned i = 0; i < disjunction->terms.size(); ++i)
- printf("{ %d } ", disjunction->terms[i].type);
- printf("\n");
+ dataLog("{ %d } ", disjunction->terms[i].type);
+ dataLog("\n");
}
#endif
diff --git a/Source/JavaScriptCore/yarr/YarrJIT.cpp b/Source/JavaScriptCore/yarr/YarrJIT.cpp
index 06faeaa1a..cd861ecf3 100644
--- a/Source/JavaScriptCore/yarr/YarrJIT.cpp
+++ b/Source/JavaScriptCore/yarr/YarrJIT.cpp
@@ -379,6 +379,10 @@ class YarrGenerator : private MacroAssembler {
Label m_reentry;
JumpList m_jumps;
+ // Used for backtracking when the prior alternative did not consume any
+ // characters but matched.
+ Jump m_zeroLengthMatch;
+
// This flag is used to null out the second pattern character, when
// two are fused to match a pair together.
bool m_isDeadCode;
@@ -728,10 +732,13 @@ class YarrGenerator : private MacroAssembler {
break;
}
case 3: {
- BaseIndex address(input, index, TimesOne, (startTermPosition - m_checked) * sizeof(LChar));
- load32WithUnalignedHalfWords(address, character);
- and32(Imm32(0xffffff), character);
- break;
+ BaseIndex highAddress(input, index, TimesOne, (startTermPosition - m_checked) * sizeof(LChar));
+ load16(highAddress, character);
+ if (ignoreCaseMask)
+ or32(Imm32(ignoreCaseMask), character);
+ op.m_jumps.append(branch32(NotEqual, character, Imm32((allCharacters & 0xffff) | ignoreCaseMask)));
+ op.m_jumps.append(jumpIfCharNotEquals(allCharacters >> 16, startTermPosition + 2 - m_checked, character));
+ return;
}
case 4: {
BaseIndex address(input, index, TimesOne, (startTermPosition - m_checked) * sizeof(LChar));
@@ -809,10 +816,8 @@ class YarrGenerator : private MacroAssembler {
move(TrustedImm32(0), countRegister);
- if ((ch > 0xff) && (m_charSize == Char8)) {
- // Have a 16 bit pattern character and an 8 bit string - short circuit
- op.m_jumps.append(jump());
- } else {
+ // Unless have a 16 bit pattern character and an 8 bit string - short circuit
+ if (!((ch > 0xff) && (m_charSize == Char8))) {
JumpList failures;
Label loop(this);
failures.append(atEndOfInput());
@@ -830,7 +835,6 @@ class YarrGenerator : private MacroAssembler {
op.m_reentry = label();
storeToFrame(countRegister, term->frameLocation);
-
}
void backtrackPatternCharacterGreedy(size_t opIndex)
{
@@ -868,16 +872,13 @@ class YarrGenerator : private MacroAssembler {
const RegisterID character = regT0;
const RegisterID countRegister = regT1;
- JumpList nonGreedyFailures;
-
m_backtrackingState.link(this);
loadFromFrame(term->frameLocation, countRegister);
- if ((ch > 0xff) && (m_charSize == Char8)) {
- // Have a 16 bit pattern character and an 8 bit string - short circuit
- nonGreedyFailures.append(jump());
- } else {
+ // Unless have a 16 bit pattern character and an 8 bit string - short circuit
+ if (!((ch > 0xff) && (m_charSize == Char8))) {
+ JumpList nonGreedyFailures;
nonGreedyFailures.append(atEndOfInput());
if (term->quantityCount != quantifyInfinite)
nonGreedyFailures.append(branch32(Equal, countRegister, Imm32(term->quantityCount.unsafeGet())));
@@ -887,8 +888,8 @@ class YarrGenerator : private MacroAssembler {
add32(TrustedImm32(1), index);
jump(op.m_reentry);
+ nonGreedyFailures.link(this);
}
- nonGreedyFailures.link(this);
sub32(countRegister, index);
m_backtrackingState.fallthrough();
@@ -1385,7 +1386,7 @@ class YarrGenerator : private MacroAssembler {
op.m_checkAdjust -= disjunction->m_minimumSize;
if (op.m_checkAdjust)
op.m_jumps.append(jumpIfNoAvailableInput(op.m_checkAdjust));
-
+
m_checked += op.m_checkAdjust;
break;
}
@@ -1404,6 +1405,12 @@ class YarrGenerator : private MacroAssembler {
op.m_returnAddress = storeToFrameWithPatch(alternativeFrameLocation);
}
+ if (term->quantityType != QuantifierFixedCount && !m_ops[op.m_previousOp].m_alternative->m_minimumSize) {
+ // If the previous alternative matched without consuming characters then
+ // backtrack to try to match while consumming some input.
+ op.m_zeroLengthMatch = branch32(Equal, index, Address(stackPointerRegister, term->frameLocation * sizeof(void*)));
+ }
+
// If we reach here then the last alternative has matched - jump to the
// End node, to skip over any further alternatives.
//
@@ -1448,6 +1455,12 @@ class YarrGenerator : private MacroAssembler {
op.m_returnAddress = storeToFrameWithPatch(alternativeFrameLocation);
}
+ if (term->quantityType != QuantifierFixedCount && !m_ops[op.m_previousOp].m_alternative->m_minimumSize) {
+ // If the previous alternative matched without consuming characters then
+ // backtrack to try to match while consumming some input.
+ op.m_zeroLengthMatch = branch32(Equal, index, Address(stackPointerRegister, term->frameLocation * sizeof(void*)));
+ }
+
// If this set of alternatives contains more than one alternative,
// then the Next nodes will have planted jumps to the End, and added
// them to this node's m_jumps list.
@@ -1514,14 +1527,19 @@ class YarrGenerator : private MacroAssembler {
}
case OpParenthesesSubpatternOnceEnd: {
PatternTerm* term = op.m_term;
- unsigned parenthesesFrameLocation = term->frameLocation;
const RegisterID indexTemporary = regT0;
ASSERT(term->quantityCount == 1);
- // For Greedy/NonGreedy quantified parentheses, we must reject zero length
- // matches. If the minimum size is know to be non-zero we need not check.
- if (term->quantityType != QuantifierFixedCount && !term->parentheses.disjunction->m_minimumSize)
- op.m_jumps.append(branch32(Equal, index, Address(stackPointerRegister, parenthesesFrameLocation * sizeof(void*))));
+#ifndef NDEBUG
+ // Runtime ASSERT to make sure that the nested alternative handled the
+ // "no input consumed" check.
+ if (term->quantityType != QuantifierFixedCount && !term->parentheses.disjunction->m_minimumSize) {
+ Jump pastBreakpoint;
+ pastBreakpoint = branch32(NotEqual, index, Address(stackPointerRegister, term->frameLocation * sizeof(void*)));
+ breakpoint();
+ pastBreakpoint.link(this);
+ }
+#endif
// If the parenthese are capturing, store the ending index value to the
// captures array, offsetting as necessary.
@@ -1568,15 +1586,21 @@ class YarrGenerator : private MacroAssembler {
break;
}
case OpParenthesesSubpatternTerminalEnd: {
+ YarrOp& beginOp = m_ops[op.m_previousOp];
+#ifndef NDEBUG
PatternTerm* term = op.m_term;
- // Check for zero length matches - if the match is non-zero, then we
- // can accept it & loop back up to the head of the subpattern.
- YarrOp& beginOp = m_ops[op.m_previousOp];
- branch32(NotEqual, index, Address(stackPointerRegister, term->frameLocation * sizeof(void*)), beginOp.m_reentry);
+ // Runtime ASSERT to make sure that the nested alternative handled the
+ // "no input consumed" check.
+ Jump pastBreakpoint;
+ pastBreakpoint = branch32(NotEqual, index, Address(stackPointerRegister, term->frameLocation * sizeof(void*)));
+ breakpoint();
+ pastBreakpoint.link(this);
+#endif
- // Reject the match - backtrack back into the subpattern.
- op.m_jumps.append(jump());
+ // We know that the match is non-zero, we can accept it and
+ // loop back up to the head of the subpattern.
+ jump(beginOp.m_reentry);
// This is the entry point to jump to when we stop matching - we will
// do so once the subpattern cannot match any more.
@@ -1928,7 +1952,7 @@ class YarrGenerator : private MacroAssembler {
// An alternative that is not the last should jump to its successor.
jump(nextOp.m_reentry);
} else if (!isBegin) {
- // The last of more than one alternatives must jump back to the begnning.
+ // The last of more than one alternatives must jump back to the beginning.
nextOp.m_jumps.append(jump());
} else {
// A single alternative on its own can fall through.
@@ -1940,12 +1964,16 @@ class YarrGenerator : private MacroAssembler {
// An alternative that is not the last should jump to its successor.
m_backtrackingState.linkTo(nextOp.m_reentry, this);
} else if (!isBegin) {
- // The last of more than one alternatives must jump back to the begnning.
+ // The last of more than one alternatives must jump back to the beginning.
m_backtrackingState.takeBacktracksToJumpList(nextOp.m_jumps, this);
}
// In the case of a single alternative on its own do nothing - it can fall through.
}
+ // If there is a backtrack jump from a zero length match link it here.
+ if (op.m_zeroLengthMatch.isSet())
+ m_backtrackingState.append(op.m_zeroLengthMatch);
+
// At this point we've handled the backtracking back into this node.
// Now link any backtracks that need to jump to here.
@@ -1978,6 +2006,10 @@ class YarrGenerator : private MacroAssembler {
case OpNestedAlternativeEnd: {
PatternTerm* term = op.m_term;
+ // If there is a backtrack jump from a zero length match link it here.
+ if (op.m_zeroLengthMatch.isSet())
+ m_backtrackingState.append(op.m_zeroLengthMatch);
+
// If we backtrack into the end of a simple subpattern do nothing;
// just continue through into the last alternative. If we backtrack
// into the end of a non-simple set of alterntives we need to jump