summaryrefslogtreecommitdiff
path: root/Source/JavaScriptCore/jit
diff options
context:
space:
mode:
Diffstat (limited to 'Source/JavaScriptCore/jit')
-rw-r--r--Source/JavaScriptCore/jit/ExecutableAllocator.cpp117
-rw-r--r--Source/JavaScriptCore/jit/ExecutableAllocator.h16
-rw-r--r--Source/JavaScriptCore/jit/ExecutableAllocatorFixedVMPool.cpp19
-rw-r--r--Source/JavaScriptCore/jit/HostCallReturnValue.cpp2
-rw-r--r--Source/JavaScriptCore/jit/JIT.cpp32
-rw-r--r--Source/JavaScriptCore/jit/JIT.h25
-rw-r--r--Source/JavaScriptCore/jit/JITArithmetic.cpp60
-rw-r--r--Source/JavaScriptCore/jit/JITArithmetic32_64.cpp48
-rw-r--r--Source/JavaScriptCore/jit/JITCall.cpp7
-rw-r--r--Source/JavaScriptCore/jit/JITCall32_64.cpp4
-rw-r--r--Source/JavaScriptCore/jit/JITCompilationEffort.h39
-rw-r--r--Source/JavaScriptCore/jit/JITDriver.h25
-rw-r--r--Source/JavaScriptCore/jit/JITExceptions.cpp3
-rw-r--r--Source/JavaScriptCore/jit/JITInlineMethods.h87
-rw-r--r--Source/JavaScriptCore/jit/JITOpcodes.cpp110
-rw-r--r--Source/JavaScriptCore/jit/JITOpcodes32_64.cpp28
-rw-r--r--Source/JavaScriptCore/jit/JITPropertyAccess32_64.cpp2
-rw-r--r--Source/JavaScriptCore/jit/JITStubCall.h12
-rw-r--r--Source/JavaScriptCore/jit/JITStubs.cpp38
-rw-r--r--Source/JavaScriptCore/jit/JITStubs.h24
-rw-r--r--Source/JavaScriptCore/jit/ThunkGenerators.cpp2
21 files changed, 510 insertions, 190 deletions
diff --git a/Source/JavaScriptCore/jit/ExecutableAllocator.cpp b/Source/JavaScriptCore/jit/ExecutableAllocator.cpp
index 75137279e..5912f8652 100644
--- a/Source/JavaScriptCore/jit/ExecutableAllocator.cpp
+++ b/Source/JavaScriptCore/jit/ExecutableAllocator.cpp
@@ -29,11 +29,22 @@
#if ENABLE(EXECUTABLE_ALLOCATOR_DEMAND)
#include "CodeProfiling.h"
+#include <wtf/DataLog.h>
+#include <wtf/HashSet.h>
#include <wtf/MetaAllocator.h>
#include <wtf/PageReservation.h>
+#if ENABLE(ASSEMBLER_WX_EXCLUSIVE)
+#include <wtf/PassOwnPtr.h>
+#endif
+#include <wtf/ThreadingPrimitives.h>
#include <wtf/VMTags.h>
#endif
+// Uncomment to create an artificial executable memory usage limit. This limit
+// is imperfect and is primarily useful for testing the VM's ability to handle
+// out-of-executable-memory situations.
+// #define EXECUTABLE_MEMORY_LIMIT 1000000
+
#if ENABLE(ASSEMBLER)
using namespace WTF;
@@ -47,15 +58,48 @@ public:
DemandExecutableAllocator()
: MetaAllocator(32) // round up all allocations to 32 bytes
{
+ MutexLocker lock(allocatorsMutex());
+ allocators().add(this);
// Don't preallocate any memory here.
}
virtual ~DemandExecutableAllocator()
{
+ {
+ MutexLocker lock(allocatorsMutex());
+ allocators().remove(this);
+ }
for (unsigned i = 0; i < reservations.size(); ++i)
reservations.at(i).deallocate();
}
+ static size_t bytesAllocatedByAllAllocators()
+ {
+ size_t total = 0;
+ MutexLocker lock(allocatorsMutex());
+ for (HashSet<DemandExecutableAllocator*>::const_iterator allocator = allocators().begin(); allocator != allocators().end(); ++allocator)
+ total += (*allocator)->bytesAllocated();
+ return total;
+ }
+
+ static size_t bytesCommittedByAllocactors()
+ {
+ size_t total = 0;
+ MutexLocker lock(allocatorsMutex());
+ for (HashSet<DemandExecutableAllocator*>::const_iterator allocator = allocators().begin(); allocator != allocators().end(); ++allocator)
+ total += (*allocator)->bytesCommitted();
+ return total;
+ }
+
+#if ENABLE(META_ALLOCATOR_PROFILE)
+ static void dumpProfileFromAllAllocators()
+ {
+ MutexLocker lock(allocatorsMutex());
+ for (HashSet<DemandExecutableAllocator*>::const_iterator allocator = allocators().begin(); allocator != allocators().end(); ++allocator)
+ (*allocator)->dumpProfile();
+ }
+#endif
+
protected:
virtual void* allocateNewSpace(size_t& numPages)
{
@@ -65,6 +109,11 @@ protected:
numPages = newNumPages;
+#ifdef EXECUTABLE_MEMORY_LIMIT
+ if (bytesAllocatedByAllAllocators() >= EXECUTABLE_MEMORY_LIMIT)
+ return 0;
+#endif
+
PageReservation reservation = PageReservation::reserve(numPages * pageSize(), OSAllocator::JSJITCodePages, EXECUTABLE_POOL_WRITABLE, true);
if (!reservation)
CRASH();
@@ -86,20 +135,50 @@ protected:
private:
Vector<PageReservation, 16> reservations;
+ static HashSet<DemandExecutableAllocator*>& allocators()
+ {
+ DEFINE_STATIC_LOCAL(HashSet<DemandExecutableAllocator*>, sAllocators, ());
+ return sAllocators;
+ }
+ static Mutex& allocatorsMutex()
+ {
+ DEFINE_STATIC_LOCAL(Mutex, mutex, ());
+ return mutex;
+ }
};
-static DemandExecutableAllocator* allocator;
+#if ENABLE(ASSEMBLER_WX_EXCLUSIVE)
+void ExecutableAllocator::initializeAllocator()
+{
+}
+#else
+static DemandExecutableAllocator* gAllocator;
+
+namespace {
+static inline DemandExecutableAllocator* allocator()
+{
+ return gAllocator;
+}
+}
void ExecutableAllocator::initializeAllocator()
{
- ASSERT(!allocator);
- allocator = new DemandExecutableAllocator();
- CodeProfiling::notifyAllocator(allocator);
+ ASSERT(!gAllocator);
+ gAllocator = new DemandExecutableAllocator();
+ CodeProfiling::notifyAllocator(gAllocator);
}
+#endif
ExecutableAllocator::ExecutableAllocator(JSGlobalData&)
+#if ENABLE(ASSEMBLER_WX_EXCLUSIVE)
+ : m_allocator(adoptPtr(new DemandExecutableAllocator()))
+#endif
+{
+ ASSERT(allocator());
+}
+
+ExecutableAllocator::~ExecutableAllocator()
{
- ASSERT(allocator);
}
bool ExecutableAllocator::isValid() const
@@ -109,26 +188,44 @@ bool ExecutableAllocator::isValid() const
bool ExecutableAllocator::underMemoryPressure()
{
+#ifdef EXECUTABLE_MEMORY_LIMIT
+ return DemandExecutableAllocator::bytesAllocatedByAllAllocators() > EXECUTABLE_MEMORY_LIMIT / 2;
+#else
return false;
+#endif
+}
+
+double ExecutableAllocator::memoryPressureMultiplier(size_t addedMemoryUsage)
+{
+#ifdef EXECUTABLE_MEMORY_LIMIT
+ size_t bytesAllocated = DemandExecutableAllocator::bytesAllocatedByAllAllocators() + addedMemoryUsage;
+ if (bytesAllocated >= EXECUTABLE_MEMORY_LIMIT)
+ bytesAllocated = EXECUTABLE_MEMORY_LIMIT;
+ return static_cast<double>(EXECUTABLE_MEMORY_LIMIT) /
+ (EXECUTABLE_MEMORY_LIMIT - bytesAllocated);
+#else
+ UNUSED_PARAM(addedMemoryUsage);
+ return 1.0;
+#endif
}
-PassRefPtr<ExecutableMemoryHandle> ExecutableAllocator::allocate(JSGlobalData&, size_t sizeInBytes, void* ownerUID)
+PassRefPtr<ExecutableMemoryHandle> ExecutableAllocator::allocate(JSGlobalData&, size_t sizeInBytes, void* ownerUID, JITCompilationEffort effort)
{
- RefPtr<ExecutableMemoryHandle> result = allocator->allocate(sizeInBytes, ownerUID);
- if (!result)
+ RefPtr<ExecutableMemoryHandle> result = allocator()->allocate(sizeInBytes, ownerUID);
+ if (!result && effort == JITCompilationMustSucceed)
CRASH();
return result.release();
}
size_t ExecutableAllocator::committedByteCount()
{
- return allocator->bytesCommitted();
+ return DemandExecutableAllocator::bytesCommittedByAllocactors();
}
#if ENABLE(META_ALLOCATOR_PROFILE)
void ExecutableAllocator::dumpProfile()
{
- allocator->dumpProfile();
+ DemandExecutableAllocator::dumpProfileFromAllAllocators();
}
#endif
diff --git a/Source/JavaScriptCore/jit/ExecutableAllocator.h b/Source/JavaScriptCore/jit/ExecutableAllocator.h
index 7520913d0..8a14ac67e 100644
--- a/Source/JavaScriptCore/jit/ExecutableAllocator.h
+++ b/Source/JavaScriptCore/jit/ExecutableAllocator.h
@@ -25,6 +25,7 @@
#ifndef ExecutableAllocator_h
#define ExecutableAllocator_h
+#include "JITCompilationEffort.h"
#include <stddef.h> // for ptrdiff_t
#include <limits>
#include <wtf/Assertions.h>
@@ -95,11 +96,16 @@ typedef WTF::MetaAllocatorHandle ExecutableMemoryHandle;
#if ENABLE(JIT) && ENABLE(ASSEMBLER)
+#if ENABLE(EXECUTABLE_ALLOCATOR_DEMAND)
+class DemandExecutableAllocator;
+#endif
+
class ExecutableAllocator {
enum ProtectionSetting { Writable, Executable };
public:
ExecutableAllocator(JSGlobalData&);
+ ~ExecutableAllocator();
static void initializeAllocator();
@@ -107,13 +113,15 @@ public:
static bool underMemoryPressure();
+ static double memoryPressureMultiplier(size_t addedMemoryUsage);
+
#if ENABLE(META_ALLOCATOR_PROFILE)
static void dumpProfile();
#else
static void dumpProfile() { }
#endif
- PassRefPtr<ExecutableMemoryHandle> allocate(JSGlobalData&, size_t sizeInBytes, void* ownerUID);
+ PassRefPtr<ExecutableMemoryHandle> allocate(JSGlobalData&, size_t sizeInBytes, void* ownerUID, JITCompilationEffort);
#if ENABLE(ASSEMBLER_WX_EXCLUSIVE)
static void makeWritable(void* start, size_t size)
@@ -232,7 +240,13 @@ private:
#if ENABLE(ASSEMBLER_WX_EXCLUSIVE)
static void reprotectRegion(void*, size_t, ProtectionSetting);
+#if ENABLE(EXECUTABLE_ALLOCATOR_DEMAND)
+ // We create a MetaAllocator for each JS global object.
+ OwnPtr<DemandExecutableAllocator> m_allocator;
+ DemandExecutableAllocator* allocator() { return m_allocator.get(); }
#endif
+#endif
+
};
#endif // ENABLE(JIT) && ENABLE(ASSEMBLER)
diff --git a/Source/JavaScriptCore/jit/ExecutableAllocatorFixedVMPool.cpp b/Source/JavaScriptCore/jit/ExecutableAllocatorFixedVMPool.cpp
index 37a57e8b7..959ea744b 100644
--- a/Source/JavaScriptCore/jit/ExecutableAllocatorFixedVMPool.cpp
+++ b/Source/JavaScriptCore/jit/ExecutableAllocatorFixedVMPool.cpp
@@ -104,6 +104,10 @@ ExecutableAllocator::ExecutableAllocator(JSGlobalData&)
ASSERT(allocator);
}
+ExecutableAllocator::~ExecutableAllocator()
+{
+}
+
bool ExecutableAllocator::isValid() const
{
return !!allocator->bytesReserved();
@@ -115,10 +119,23 @@ bool ExecutableAllocator::underMemoryPressure()
return statistics.bytesAllocated > statistics.bytesReserved / 2;
}
-PassRefPtr<ExecutableMemoryHandle> ExecutableAllocator::allocate(JSGlobalData& globalData, size_t sizeInBytes, void* ownerUID)
+double ExecutableAllocator::memoryPressureMultiplier(size_t addedMemoryUsage)
+{
+ MetaAllocator::Statistics statistics = allocator->currentStatistics();
+ ASSERT(statistics.bytesAllocated <= statistics.bytesReserved);
+ size_t bytesAllocated = statistics.bytesAllocated + addedMemoryUsage;
+ if (bytesAllocated >= statistics.bytesReserved)
+ bytesAllocated = statistics.bytesReserved;
+ return static_cast<double>(statistics.bytesReserved) /
+ (statistics.bytesReserved - bytesAllocated);
+}
+
+PassRefPtr<ExecutableMemoryHandle> ExecutableAllocator::allocate(JSGlobalData& globalData, size_t sizeInBytes, void* ownerUID, JITCompilationEffort effort)
{
RefPtr<ExecutableMemoryHandle> result = allocator->allocate(sizeInBytes, ownerUID);
if (!result) {
+ if (effort == JITCompilationCanFail)
+ return result;
releaseExecutableMemory(globalData);
result = allocator->allocate(sizeInBytes, ownerUID);
if (!result)
diff --git a/Source/JavaScriptCore/jit/HostCallReturnValue.cpp b/Source/JavaScriptCore/jit/HostCallReturnValue.cpp
index 924bc7671..9d449c374 100644
--- a/Source/JavaScriptCore/jit/HostCallReturnValue.cpp
+++ b/Source/JavaScriptCore/jit/HostCallReturnValue.cpp
@@ -27,7 +27,7 @@
#include "HostCallReturnValue.h"
#include "CallFrame.h"
-#include "InlineASM.h"
+#include <wtf/InlineASM.h>
#include "JSObject.h"
#include "JSValueInlineMethods.h"
#include "ScopeChain.h"
diff --git a/Source/JavaScriptCore/jit/JIT.cpp b/Source/JavaScriptCore/jit/JIT.cpp
index 2adc596ce..541cc896a 100644
--- a/Source/JavaScriptCore/jit/JIT.cpp
+++ b/Source/JavaScriptCore/jit/JIT.cpp
@@ -35,7 +35,7 @@ JSC::MacroAssemblerX86Common::SSE2CheckState JSC::MacroAssemblerX86Common::s_sse
#endif
#include "CodeBlock.h"
-#include "CryptographicallyRandomNumber.h"
+#include <wtf/CryptographicallyRandomNumber.h>
#include "DFGNode.h" // for DFG_SUCCESS_STATS
#include "Interpreter.h"
#include "JITInlineMethods.h"
@@ -102,7 +102,7 @@ void JIT::emitOptimizationCheck(OptimizationCheckKind kind)
Jump skipOptimize = branchAdd32(Signed, TrustedImm32(kind == LoopOptimizationCheck ? Options::executionCounterIncrementForLoop : Options::executionCounterIncrementForReturn), AbsoluteAddress(m_codeBlock->addressOfJITExecuteCounter()));
JITStubCall stubCall(this, kind == LoopOptimizationCheck ? cti_optimize_from_loop : cti_optimize_from_ret);
if (kind == LoopOptimizationCheck)
- stubCall.addArgument(Imm32(m_bytecodeOffset));
+ stubCall.addArgument(TrustedImm32(m_bytecodeOffset));
stubCall.call();
skipOptimize.link(this);
}
@@ -235,14 +235,10 @@ void JIT::privateCompileMainPass()
DEFINE_UNARY_OP(op_is_object)
DEFINE_UNARY_OP(op_is_string)
DEFINE_UNARY_OP(op_is_undefined)
-#if USE(JSVALUE64)
- DEFINE_UNARY_OP(op_negate)
-#endif
DEFINE_UNARY_OP(op_typeof)
DEFINE_OP(op_add)
DEFINE_OP(op_bitand)
- DEFINE_OP(op_bitnot)
DEFINE_OP(op_bitor)
DEFINE_OP(op_bitxor)
DEFINE_OP(op_call)
@@ -302,9 +298,7 @@ void JIT::privateCompileMainPass()
DEFINE_OP(op_mod)
DEFINE_OP(op_mov)
DEFINE_OP(op_mul)
-#if USE(JSVALUE32_64)
DEFINE_OP(op_negate)
-#endif
DEFINE_OP(op_neq)
DEFINE_OP(op_neq_null)
DEFINE_OP(op_new_array)
@@ -437,7 +431,6 @@ void JIT::privateCompileSlowCases()
switch (m_interpreter->getOpcodeID(currentInstruction->u.opcode)) {
DEFINE_SLOWCASE_OP(op_add)
DEFINE_SLOWCASE_OP(op_bitand)
- DEFINE_SLOWCASE_OP(op_bitnot)
DEFINE_SLOWCASE_OP(op_bitor)
DEFINE_SLOWCASE_OP(op_bitxor)
DEFINE_SLOWCASE_OP(op_call)
@@ -475,10 +468,9 @@ void JIT::privateCompileSlowCases()
DEFINE_SLOWCASE_OP(op_method_check)
DEFINE_SLOWCASE_OP(op_mod)
DEFINE_SLOWCASE_OP(op_mul)
-#if USE(JSVALUE32_64)
DEFINE_SLOWCASE_OP(op_negate)
-#endif
DEFINE_SLOWCASE_OP(op_neq)
+ DEFINE_SLOWCASE_OP(op_new_array)
DEFINE_SLOWCASE_OP(op_new_object)
DEFINE_SLOWCASE_OP(op_new_func)
DEFINE_SLOWCASE_OP(op_new_func_exp)
@@ -509,7 +501,7 @@ void JIT::privateCompileSlowCases()
#if ENABLE(VALUE_PROFILER)
if (m_canBeOptimized)
- add32(Imm32(1), AbsoluteAddress(&rareCaseProfile->m_counter));
+ add32(TrustedImm32(1), AbsoluteAddress(&rareCaseProfile->m_counter));
#endif
emitJumpSlowToHot(jump(), 0);
@@ -527,7 +519,7 @@ void JIT::privateCompileSlowCases()
#endif
}
-JITCode JIT::privateCompile(CodePtr* functionEntryArityCheck)
+JITCode JIT::privateCompile(CodePtr* functionEntryArityCheck, JITCompilationEffort effort)
{
#if ENABLE(JIT_VERBOSE_OSR)
printf("Compiling JIT code!\n");
@@ -581,7 +573,7 @@ JITCode JIT::privateCompile(CodePtr* functionEntryArityCheck)
}
#endif
- addPtr(Imm32(m_codeBlock->m_numCalleeRegisters * sizeof(Register)), callFrameRegister, regT1);
+ addPtr(TrustedImm32(m_codeBlock->m_numCalleeRegisters * sizeof(Register)), callFrameRegister, regT1);
registerFileCheck = branchPtr(Below, AbsoluteAddress(m_globalData->interpreter->registerFile().addressOfEnd()), regT1);
}
@@ -589,7 +581,7 @@ JITCode JIT::privateCompile(CodePtr* functionEntryArityCheck)
#if ENABLE(VALUE_PROFILER)
if (m_canBeOptimized)
- add32(Imm32(1), AbsoluteAddress(&m_codeBlock->m_executionEntryCount));
+ add32(TrustedImm32(1), AbsoluteAddress(&m_codeBlock->m_executionEntryCount));
#endif
privateCompileMainPass();
@@ -625,7 +617,9 @@ JITCode JIT::privateCompile(CodePtr* functionEntryArityCheck)
ASSERT(m_jmpTable.isEmpty());
- LinkBuffer patchBuffer(*m_globalData, this, m_codeBlock);
+ LinkBuffer patchBuffer(*m_globalData, this, m_codeBlock, effort);
+ if (patchBuffer.didFailToAllocate())
+ return JITCode();
// Translate vPC offsets into addresses in JIT generated code, for switch tables.
for (unsigned i = 0; i < m_switches.size(); ++i) {
@@ -702,7 +696,7 @@ JITCode JIT::privateCompile(CodePtr* functionEntryArityCheck)
}
#if ENABLE(DFG_JIT) || ENABLE(LLINT)
- if (m_canBeOptimized
+ if (canBeOptimized()
#if ENABLE(LLINT)
|| true
#endif
@@ -721,6 +715,10 @@ JITCode JIT::privateCompile(CodePtr* functionEntryArityCheck)
CodeRef result = patchBuffer.finalizeCode();
+ m_globalData->machineCodeBytesPerBytecodeWordForBaselineJIT.add(
+ static_cast<double>(result.size()) /
+ static_cast<double>(m_codeBlock->instructions().size()));
+
#if ENABLE(JIT_VERBOSE)
dataLog("JIT generated code for %p at [%p, %p).\n", m_codeBlock, result.executableMemory()->start(), result.executableMemory()->end());
#endif
diff --git a/Source/JavaScriptCore/jit/JIT.h b/Source/JavaScriptCore/jit/JIT.h
index a2bc4272a..2d2841baf 100644
--- a/Source/JavaScriptCore/jit/JIT.h
+++ b/Source/JavaScriptCore/jit/JIT.h
@@ -199,9 +199,9 @@ namespace JSC {
static const int patchPutByIdDefaultOffset = 256;
public:
- static JITCode compile(JSGlobalData* globalData, CodeBlock* codeBlock, CodePtr* functionEntryArityCheck = 0)
+ static JITCode compile(JSGlobalData* globalData, CodeBlock* codeBlock, JITCompilationEffort effort, CodePtr* functionEntryArityCheck = 0)
{
- return JIT(globalData, codeBlock).privateCompile(functionEntryArityCheck);
+ return JIT(globalData, codeBlock).privateCompile(functionEntryArityCheck, effort);
}
static void compileGetByIdProto(JSGlobalData* globalData, CallFrame* callFrame, CodeBlock* codeBlock, StructureStubInfo* stubInfo, Structure* structure, Structure* prototypeStructure, const Identifier& ident, const PropertySlot& slot, size_t cachedOffset, ReturnAddressPtr returnAddress)
@@ -254,8 +254,13 @@ namespace JSC {
static CodeRef compileCTINativeCall(JSGlobalData* globalData, NativeFunction func)
{
- if (!globalData->canUseJIT())
+ if (!globalData->canUseJIT()) {
+#if ENABLE(LLINT)
+ return CodeRef::createLLIntCodeRef(llint_native_call_trampoline);
+#else
return CodeRef();
+#endif
+ }
JIT jit(globalData, 0);
return jit.privateCompileCTINativeCall(globalData, func);
}
@@ -291,7 +296,7 @@ namespace JSC {
void privateCompileMainPass();
void privateCompileLinkPass();
void privateCompileSlowCases();
- JITCode privateCompile(CodePtr* functionEntryArityCheck);
+ JITCode privateCompile(CodePtr* functionEntryArityCheck, JITCompilationEffort);
void privateCompileGetByIdProto(StructureStubInfo*, Structure*, Structure* prototypeStructure, const Identifier&, const PropertySlot&, size_t cachedOffset, ReturnAddressPtr returnAddress, CallFrame* callFrame);
void privateCompileGetByIdSelfList(StructureStubInfo*, PolymorphicAccessStructureList*, int, Structure*, const Identifier&, const PropertySlot&, size_t cachedOffset);
void privateCompileGetByIdProtoList(StructureStubInfo*, PolymorphicAccessStructureList*, int, Structure*, Structure* prototypeStructure, const Identifier&, const PropertySlot&, size_t cachedOffset, CallFrame* callFrame);
@@ -336,8 +341,10 @@ namespace JSC {
void emitWriteBarrier(JSCell* owner, RegisterID value, RegisterID scratch, WriteBarrierMode, WriteBarrierUseKind);
template<typename ClassType, bool destructor, typename StructureType> void emitAllocateBasicJSObject(StructureType, RegisterID result, RegisterID storagePtr);
+ void emitAllocateBasicStorage(size_t, RegisterID result, RegisterID storagePtr);
template<typename T> void emitAllocateJSFinalObject(T structure, RegisterID result, RegisterID storagePtr);
void emitAllocateJSFunction(FunctionExecutable*, RegisterID scopeChain, RegisterID result, RegisterID storagePtr);
+ void emitAllocateJSArray(unsigned valuesRegister, unsigned length, RegisterID cellResult, RegisterID storageResult, RegisterID storagePtr);
#if ENABLE(VALUE_PROFILER)
// This assumes that the value to profile is in regT0 and that regT3 is available for
@@ -464,7 +471,7 @@ namespace JSC {
#if ENABLE(OPCODE_SAMPLING)
#error "OPCODE_SAMPLING is not yet supported"
#else
- static const int patchOffsetGetByIdSlowCaseCall = 48;
+ static const int patchOffsetGetByIdSlowCaseCall = 52;
#endif
static const int patchOffsetOpCallCompareToJump = 16;
@@ -500,7 +507,7 @@ namespace JSC {
#if ENABLE(OPCODE_SAMPLING)
#error "OPCODE_SAMPLING is not yet supported"
#else
- static const int patchOffsetGetByIdSlowCaseCall = 64;
+ static const int patchOffsetGetByIdSlowCaseCall = 68;
#endif
static const int patchOffsetOpCallCompareToJump = 32;
static const int patchOffsetMethodCheckProtoObj = 32;
@@ -518,7 +525,7 @@ namespace JSC {
#if ENABLE(OPCODE_SAMPLING)
#error "OPCODE_SAMPLING is not yet supported"
#else
- static const int patchOffsetGetByIdSlowCaseCall = 64;
+ static const int patchOffsetGetByIdSlowCaseCall = 68;
#endif
static const int patchOffsetOpCallCompareToJump = 32;
static const int patchOffsetMethodCheckProtoObj = 32;
@@ -778,7 +785,6 @@ namespace JSC {
void emit_op_add(Instruction*);
void emit_op_bitand(Instruction*);
- void emit_op_bitnot(Instruction*);
void emit_op_bitor(Instruction*);
void emit_op_bitxor(Instruction*);
void emit_op_call(Instruction*);
@@ -898,7 +904,6 @@ namespace JSC {
void emitSlow_op_add(Instruction*, Vector<SlowCaseEntry>::iterator&);
void emitSlow_op_bitand(Instruction*, Vector<SlowCaseEntry>::iterator&);
- void emitSlow_op_bitnot(Instruction*, Vector<SlowCaseEntry>::iterator&);
void emitSlow_op_bitor(Instruction*, Vector<SlowCaseEntry>::iterator&);
void emitSlow_op_bitxor(Instruction*, Vector<SlowCaseEntry>::iterator&);
void emitSlow_op_call(Instruction*, Vector<SlowCaseEntry>::iterator&);
@@ -957,7 +962,7 @@ namespace JSC {
void emitSlow_op_urshift(Instruction*, Vector<SlowCaseEntry>::iterator&);
void emitSlow_op_new_func(Instruction*, Vector<SlowCaseEntry>::iterator&);
void emitSlow_op_new_func_exp(Instruction*, Vector<SlowCaseEntry>::iterator&);
-
+ void emitSlow_op_new_array(Instruction*, Vector<SlowCaseEntry>::iterator&);
void emitRightShift(Instruction*, bool isUnsigned);
void emitRightShiftSlowCase(Instruction*, Vector<SlowCaseEntry>::iterator&, bool isUnsigned);
diff --git a/Source/JavaScriptCore/jit/JITArithmetic.cpp b/Source/JavaScriptCore/jit/JITArithmetic.cpp
index 362cc6241..1b32e3bcf 100644
--- a/Source/JavaScriptCore/jit/JITArithmetic.cpp
+++ b/Source/JavaScriptCore/jit/JITArithmetic.cpp
@@ -192,6 +192,42 @@ void JIT::emitSlow_op_jngreatereq(Instruction* currentInstruction, Vector<SlowCa
#if USE(JSVALUE64)
+void JIT::emit_op_negate(Instruction* currentInstruction)
+{
+ unsigned dst = currentInstruction[1].u.operand;
+ unsigned src = currentInstruction[2].u.operand;
+
+ emitGetVirtualRegister(src, regT0);
+
+ Jump srcNotInt = emitJumpIfNotImmediateInteger(regT0);
+ addSlowCase(branchTest32(Zero, regT0, TrustedImm32(0x7fffffff)));
+ neg32(regT0);
+ emitFastArithReTagImmediate(regT0, regT0);
+
+ Jump end = jump();
+
+ srcNotInt.link(this);
+ emitJumpSlowCaseIfNotImmediateNumber(regT0);
+
+ move(TrustedImmPtr(reinterpret_cast<void*>(0x8000000000000000ull)), regT1);
+ xorPtr(regT1, regT0);
+
+ end.link(this);
+ emitPutVirtualRegister(dst);
+}
+
+void JIT::emitSlow_op_negate(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
+{
+ unsigned dst = currentInstruction[1].u.operand;
+
+ linkSlowCase(iter); // 0x7fffffff check
+ linkSlowCase(iter); // double check
+
+ JITStubCall stubCall(this, cti_op_negate);
+ stubCall.addArgument(regT1, regT0);
+ stubCall.call(dst);
+}
+
void JIT::emit_op_lshift(Instruction* currentInstruction)
{
unsigned result = currentInstruction[1].u.operand;
@@ -797,13 +833,13 @@ void JIT::compileBinaryArithOp(OpcodeID opcodeID, unsigned, unsigned op1, unsign
addSlowCase(branchMul32(Overflow, regT1, regT2));
JumpList done;
done.append(branchTest32(NonZero, regT2));
- Jump negativeZero = branch32(LessThan, regT0, Imm32(0));
- done.append(branch32(GreaterThanOrEqual, regT1, Imm32(0)));
+ Jump negativeZero = branch32(LessThan, regT0, TrustedImm32(0));
+ done.append(branch32(GreaterThanOrEqual, regT1, TrustedImm32(0)));
negativeZero.link(this);
// We only get here if we have a genuine negative zero. Record this,
// so that the speculative JIT knows that we failed speculation
// because of a negative zero.
- add32(Imm32(1), AbsoluteAddress(&profile->m_counter));
+ add32(TrustedImm32(1), AbsoluteAddress(&profile->m_counter));
addSlowCase(jump());
done.link(this);
move(regT2, regT0);
@@ -927,13 +963,13 @@ void JIT::emit_op_add(Instruction* currentInstruction)
if (isOperandConstantImmediateInt(op1)) {
emitGetVirtualRegister(op2, regT0);
emitJumpSlowCaseIfNotImmediateInteger(regT0);
- addSlowCase(branchAdd32(Overflow, Imm32(getConstantOperandImmediateInt(op1)), regT0));
- emitFastArithIntToImmNoCheck(regT0, regT0);
+ addSlowCase(branchAdd32(Overflow, regT0, Imm32(getConstantOperandImmediateInt(op1)), regT1));
+ emitFastArithIntToImmNoCheck(regT1, regT0);
} else if (isOperandConstantImmediateInt(op2)) {
emitGetVirtualRegister(op1, regT0);
emitJumpSlowCaseIfNotImmediateInteger(regT0);
- addSlowCase(branchAdd32(Overflow, Imm32(getConstantOperandImmediateInt(op2)), regT0));
- emitFastArithIntToImmNoCheck(regT0, regT0);
+ addSlowCase(branchAdd32(Overflow, regT0, Imm32(getConstantOperandImmediateInt(op2)), regT1));
+ emitFastArithIntToImmNoCheck(regT1, regT0);
} else
compileBinaryArithOp(op_add, result, op1, op2, types);
@@ -973,8 +1009,8 @@ void JIT::emit_op_mul(Instruction* currentInstruction)
#endif
emitGetVirtualRegister(op2, regT0);
emitJumpSlowCaseIfNotImmediateInteger(regT0);
- addSlowCase(branchMul32(Overflow, Imm32(value), regT0, regT0));
- emitFastArithReTagImmediate(regT0, regT0);
+ addSlowCase(branchMul32(Overflow, Imm32(value), regT0, regT1));
+ emitFastArithReTagImmediate(regT1, regT0);
} else if (isOperandConstantImmediateInt(op2) && ((value = getConstantOperandImmediateInt(op2)) > 0)) {
#if ENABLE(VALUE_PROFILER)
// Add a special fast case profile because the DFG JIT will expect one.
@@ -982,8 +1018,8 @@ void JIT::emit_op_mul(Instruction* currentInstruction)
#endif
emitGetVirtualRegister(op1, regT0);
emitJumpSlowCaseIfNotImmediateInteger(regT0);
- addSlowCase(branchMul32(Overflow, Imm32(value), regT0, regT0));
- emitFastArithReTagImmediate(regT0, regT0);
+ addSlowCase(branchMul32(Overflow, Imm32(value), regT0, regT1));
+ emitFastArithReTagImmediate(regT1, regT0);
} else
compileBinaryArithOp(op_mul, result, op1, op2, types);
@@ -1069,7 +1105,7 @@ void JIT::emit_op_div(Instruction* currentInstruction)
emitFastArithReTagImmediate(regT0, regT0);
Jump isInteger = jump();
notInteger.link(this);
- add32(Imm32(1), AbsoluteAddress(&m_codeBlock->addSpecialFastCaseProfile(m_bytecodeOffset)->m_counter));
+ add32(TrustedImm32(1), AbsoluteAddress(&m_codeBlock->addSpecialFastCaseProfile(m_bytecodeOffset)->m_counter));
moveDoubleToPtr(fpRegT0, regT0);
subPtr(tagTypeNumberRegister, regT0);
isInteger.link(this);
diff --git a/Source/JavaScriptCore/jit/JITArithmetic32_64.cpp b/Source/JavaScriptCore/jit/JITArithmetic32_64.cpp
index 4916261fe..11a758103 100644
--- a/Source/JavaScriptCore/jit/JITArithmetic32_64.cpp
+++ b/Source/JavaScriptCore/jit/JITArithmetic32_64.cpp
@@ -448,31 +448,6 @@ void JIT::emitSlow_op_bitxor(Instruction* currentInstruction, Vector<SlowCaseEnt
stubCall.call(dst);
}
-// BitNot (~)
-
-void JIT::emit_op_bitnot(Instruction* currentInstruction)
-{
- unsigned dst = currentInstruction[1].u.operand;
- unsigned src = currentInstruction[2].u.operand;
-
- emitLoad(src, regT1, regT0);
- addSlowCase(branch32(NotEqual, regT1, TrustedImm32(JSValue::Int32Tag)));
-
- not32(regT0);
- emitStoreAndMapInt32(dst, regT1, regT0, dst == src, OPCODE_LENGTH(op_bitnot));
-}
-
-void JIT::emitSlow_op_bitnot(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
-{
- unsigned dst = currentInstruction[1].u.operand;
-
- linkSlowCase(iter); // int32 check
-
- JITStubCall stubCall(this, cti_op_bitnot);
- stubCall.addArgument(regT1, regT0);
- stubCall.call(dst);
-}
-
// PostInc (i++)
void JIT::emit_op_post_inc(Instruction* currentInstruction)
@@ -504,7 +479,7 @@ void JIT::emitSlow_op_post_inc(Instruction* currentInstruction, Vector<SlowCaseE
JITStubCall stubCall(this, cti_op_post_inc);
stubCall.addArgument(srcDst);
- stubCall.addArgument(Imm32(srcDst));
+ stubCall.addArgument(TrustedImm32(srcDst));
stubCall.call(dst);
}
@@ -644,9 +619,9 @@ void JIT::emit_op_add(Instruction* currentInstruction)
void JIT::emitAdd32Constant(unsigned dst, unsigned op, int32_t constant, ResultType opType)
{
// Int32 case.
- emitLoad(op, regT1, regT0);
+ emitLoad(op, regT1, regT2);
Jump notInt32 = branch32(NotEqual, regT1, TrustedImm32(JSValue::Int32Tag));
- addSlowCase(branchAdd32(Overflow, Imm32(constant), regT0));
+ addSlowCase(branchAdd32(Overflow, regT2, Imm32(constant), regT0));
emitStoreInt32(dst, regT0, (op == dst));
// Double case.
@@ -757,8 +732,13 @@ void JIT::emitSub32Constant(unsigned dst, unsigned op, int32_t constant, ResultT
// Int32 case.
emitLoad(op, regT1, regT0);
Jump notInt32 = branch32(NotEqual, regT1, TrustedImm32(JSValue::Int32Tag));
- addSlowCase(branchSub32(Overflow, Imm32(constant), regT0));
- emitStoreInt32(dst, regT0, (op == dst));
+#if ENABLE(JIT_CONSTANT_BLINDING)
+ addSlowCase(branchSub32(Overflow, regT0, Imm32(constant), regT2, regT3));
+#else
+ addSlowCase(branchSub32(Overflow, regT0, Imm32(constant), regT2));
+#endif
+
+ emitStoreInt32(dst, regT2, (op == dst));
// Double case.
if (!supportsFloatingPoint()) {
@@ -886,7 +866,7 @@ void JIT::emitBinaryDoubleOp(OpcodeID opcodeID, unsigned dst, unsigned op1, unsi
emitStoreInt32(dst, regT2);
Jump isInteger = jump();
notInteger.link(this);
- add32(Imm32(1), AbsoluteAddress(&m_codeBlock->specialFastCaseProfileForBytecodeOffset(m_bytecodeOffset)->m_counter));
+ add32(TrustedImm32(1), AbsoluteAddress(&m_codeBlock->specialFastCaseProfileForBytecodeOffset(m_bytecodeOffset)->m_counter));
emitStoreDouble(dst, fpRegT1);
isInteger.link(this);
#else
@@ -990,7 +970,7 @@ void JIT::emitBinaryDoubleOp(OpcodeID opcodeID, unsigned dst, unsigned op1, unsi
emitStoreInt32(dst, regT2);
Jump isInteger = jump();
notInteger.link(this);
- add32(Imm32(1), AbsoluteAddress(&m_codeBlock->specialFastCaseProfileForBytecodeOffset(m_bytecodeOffset)->m_counter));
+ add32(TrustedImm32(1), AbsoluteAddress(&m_codeBlock->specialFastCaseProfileForBytecodeOffset(m_bytecodeOffset)->m_counter));
emitStoreDouble(dst, fpRegT0);
isInteger.link(this);
#else
@@ -1096,7 +1076,7 @@ void JIT::emitSlow_op_mul(Instruction* currentInstruction, Vector<SlowCaseEntry>
// We only get here if we have a genuine negative zero. Record this,
// so that the speculative JIT knows that we failed speculation
// because of a negative zero.
- add32(Imm32(1), AbsoluteAddress(&m_codeBlock->specialFastCaseProfileForBytecodeOffset(m_bytecodeOffset)->m_counter));
+ add32(TrustedImm32(1), AbsoluteAddress(&m_codeBlock->specialFastCaseProfileForBytecodeOffset(m_bytecodeOffset)->m_counter));
#endif
overflow.link(this);
@@ -1175,7 +1155,7 @@ void JIT::emit_op_div(Instruction* currentInstruction)
emitStoreInt32(dst, regT2);
end.append(jump());
notInteger.link(this);
- add32(Imm32(1), AbsoluteAddress(&m_codeBlock->specialFastCaseProfileForBytecodeOffset(m_bytecodeOffset)->m_counter));
+ add32(TrustedImm32(1), AbsoluteAddress(&m_codeBlock->specialFastCaseProfileForBytecodeOffset(m_bytecodeOffset)->m_counter));
emitStoreDouble(dst, fpRegT0);
#else
emitStoreDouble(dst, fpRegT0);
diff --git a/Source/JavaScriptCore/jit/JITCall.cpp b/Source/JavaScriptCore/jit/JITCall.cpp
index 69dc9540e..73d017d05 100644
--- a/Source/JavaScriptCore/jit/JITCall.cpp
+++ b/Source/JavaScriptCore/jit/JITCall.cpp
@@ -83,8 +83,7 @@ void JIT::compileLoadVarargs(Instruction* instruction)
slowCase.append(branchPtr(Below, AbsoluteAddress(m_globalData->interpreter->registerFile().addressOfEnd()), regT1));
// Initialize ArgumentCount.
- emitFastArithReTagImmediate(regT0, regT2);
- storePtr(regT2, Address(regT1, RegisterFile::ArgumentCount * static_cast<int>(sizeof(Register))));
+ store32(regT0, Address(regT1, RegisterFile::ArgumentCount * static_cast<int>(sizeof(Register)) + OBJECT_OFFSETOF(EncodedValueDescriptor, asBits.payload)));
// Initialize 'this'.
emitGetVirtualRegister(thisValue, regT2);
@@ -93,13 +92,13 @@ void JIT::compileLoadVarargs(Instruction* instruction)
// Copy arguments.
neg32(regT0);
signExtend32ToPtr(regT0, regT0);
- end.append(branchAddPtr(Zero, Imm32(1), regT0));
+ end.append(branchAddPtr(Zero, TrustedImm32(1), regT0));
// regT0: -argumentCount
Label copyLoop = label();
loadPtr(BaseIndex(callFrameRegister, regT0, TimesEight, CallFrame::thisArgumentOffset() * static_cast<int>(sizeof(Register))), regT2);
storePtr(regT2, BaseIndex(regT1, regT0, TimesEight, CallFrame::thisArgumentOffset() * static_cast<int>(sizeof(Register))));
- branchAddPtr(NonZero, Imm32(1), regT0).linkTo(copyLoop, this);
+ branchAddPtr(NonZero, TrustedImm32(1), regT0).linkTo(copyLoop, this);
end.append(jump());
}
diff --git a/Source/JavaScriptCore/jit/JITCall32_64.cpp b/Source/JavaScriptCore/jit/JITCall32_64.cpp
index b84ad1a49..7fb6c78b9 100644
--- a/Source/JavaScriptCore/jit/JITCall32_64.cpp
+++ b/Source/JavaScriptCore/jit/JITCall32_64.cpp
@@ -171,7 +171,7 @@ void JIT::compileLoadVarargs(Instruction* instruction)
// Copy arguments.
neg32(regT2);
- end.append(branchAdd32(Zero, Imm32(1), regT2));
+ end.append(branchAdd32(Zero, TrustedImm32(1), regT2));
// regT2: -argumentCount;
Label copyLoop = label();
@@ -179,7 +179,7 @@ void JIT::compileLoadVarargs(Instruction* instruction)
load32(BaseIndex(callFrameRegister, regT2, TimesEight, OBJECT_OFFSETOF(JSValue, u.asBits.tag) +(CallFrame::thisArgumentOffset() * static_cast<int>(sizeof(Register)))), regT1);
store32(regT0, BaseIndex(regT3, regT2, TimesEight, OBJECT_OFFSETOF(JSValue, u.asBits.payload) +(CallFrame::thisArgumentOffset() * static_cast<int>(sizeof(Register)))));
store32(regT1, BaseIndex(regT3, regT2, TimesEight, OBJECT_OFFSETOF(JSValue, u.asBits.tag) +(CallFrame::thisArgumentOffset() * static_cast<int>(sizeof(Register)))));
- branchAdd32(NonZero, Imm32(1), regT2).linkTo(copyLoop, this);
+ branchAdd32(NonZero, TrustedImm32(1), regT2).linkTo(copyLoop, this);
end.append(jump());
}
diff --git a/Source/JavaScriptCore/jit/JITCompilationEffort.h b/Source/JavaScriptCore/jit/JITCompilationEffort.h
new file mode 100644
index 000000000..5eb680178
--- /dev/null
+++ b/Source/JavaScriptCore/jit/JITCompilationEffort.h
@@ -0,0 +1,39 @@
+/*
+ * Copyright (C) 2012 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef JITCompilationEffort_h
+#define JITCompilationEffort_h
+
+namespace JSC {
+
+enum JITCompilationEffort {
+ JITCompilationCanFail,
+ JITCompilationMustSucceed
+};
+
+} // namespace JSC
+
+#endif // JITCompilationEffort_h
+
diff --git a/Source/JavaScriptCore/jit/JITDriver.h b/Source/JavaScriptCore/jit/JITDriver.h
index b204c7737..66cf51925 100644
--- a/Source/JavaScriptCore/jit/JITDriver.h
+++ b/Source/JavaScriptCore/jit/JITDriver.h
@@ -38,7 +38,7 @@
namespace JSC {
template<typename CodeBlockType>
-inline bool jitCompileIfAppropriate(JSGlobalData& globalData, OwnPtr<CodeBlockType>& codeBlock, JITCode& jitCode, JITCode::JITType jitType)
+inline bool jitCompileIfAppropriate(JSGlobalData& globalData, OwnPtr<CodeBlockType>& codeBlock, JITCode& jitCode, JITCode::JITType jitType, JITCompilationEffort effort)
{
if (jitType == codeBlock->getJITType())
return true;
@@ -48,6 +48,8 @@ inline bool jitCompileIfAppropriate(JSGlobalData& globalData, OwnPtr<CodeBlockTy
codeBlock->unlinkIncomingCalls();
+ JITCode oldJITCode = jitCode;
+
bool dfgCompiled = false;
if (jitType == JITCode::DFGJIT)
dfgCompiled = DFG::tryCompile(globalData, codeBlock.get(), jitCode);
@@ -57,16 +59,21 @@ inline bool jitCompileIfAppropriate(JSGlobalData& globalData, OwnPtr<CodeBlockTy
} else {
if (codeBlock->alternative()) {
codeBlock = static_pointer_cast<CodeBlockType>(codeBlock->releaseAlternative());
+ jitCode = oldJITCode;
+ return false;
+ }
+ jitCode = JIT::compile(&globalData, codeBlock.get(), effort);
+ if (!jitCode) {
+ jitCode = oldJITCode;
return false;
}
- jitCode = JIT::compile(&globalData, codeBlock.get());
}
codeBlock->setJITCode(jitCode, MacroAssemblerCodePtr());
return true;
}
-inline bool jitCompileFunctionIfAppropriate(JSGlobalData& globalData, OwnPtr<FunctionCodeBlock>& codeBlock, JITCode& jitCode, MacroAssemblerCodePtr& jitCodeWithArityCheck, SharedSymbolTable*& symbolTable, JITCode::JITType jitType)
+inline bool jitCompileFunctionIfAppropriate(JSGlobalData& globalData, OwnPtr<FunctionCodeBlock>& codeBlock, JITCode& jitCode, MacroAssemblerCodePtr& jitCodeWithArityCheck, SharedSymbolTable*& symbolTable, JITCode::JITType jitType, JITCompilationEffort effort)
{
if (jitType == codeBlock->getJITType())
return true;
@@ -76,6 +83,9 @@ inline bool jitCompileFunctionIfAppropriate(JSGlobalData& globalData, OwnPtr<Fun
codeBlock->unlinkIncomingCalls();
+ JITCode oldJITCode = jitCode;
+ MacroAssemblerCodePtr oldJITCodeWithArityCheck = jitCodeWithArityCheck;
+
bool dfgCompiled = false;
if (jitType == JITCode::DFGJIT)
dfgCompiled = DFG::tryCompileFunction(globalData, codeBlock.get(), jitCode, jitCodeWithArityCheck);
@@ -86,9 +96,16 @@ inline bool jitCompileFunctionIfAppropriate(JSGlobalData& globalData, OwnPtr<Fun
if (codeBlock->alternative()) {
codeBlock = static_pointer_cast<FunctionCodeBlock>(codeBlock->releaseAlternative());
symbolTable = codeBlock->sharedSymbolTable();
+ jitCode = oldJITCode;
+ jitCodeWithArityCheck = oldJITCodeWithArityCheck;
+ return false;
+ }
+ jitCode = JIT::compile(&globalData, codeBlock.get(), effort, &jitCodeWithArityCheck);
+ if (!jitCode) {
+ jitCode = oldJITCode;
+ jitCodeWithArityCheck = oldJITCodeWithArityCheck;
return false;
}
- jitCode = JIT::compile(&globalData, codeBlock.get(), &jitCodeWithArityCheck);
}
codeBlock->setJITCode(jitCode, jitCodeWithArityCheck);
diff --git a/Source/JavaScriptCore/jit/JITExceptions.cpp b/Source/JavaScriptCore/jit/JITExceptions.cpp
index 2edd3408f..ab1180716 100644
--- a/Source/JavaScriptCore/jit/JITExceptions.cpp
+++ b/Source/JavaScriptCore/jit/JITExceptions.cpp
@@ -48,8 +48,7 @@ ExceptionHandler genericThrow(JSGlobalData* globalData, ExecState* callFrame, JS
Instruction* catchPCForInterpreter = 0;
if (handler) {
catchRoutine = handler->nativeCode.executableAddress();
- if (callFrame->codeBlock()->hasInstructions())
- catchPCForInterpreter = &callFrame->codeBlock()->instructions()[handler->target];
+ catchPCForInterpreter = &callFrame->codeBlock()->instructions()[handler->target];
} else
catchRoutine = FunctionPtr(ctiOpThrowNotCaught).value();
diff --git a/Source/JavaScriptCore/jit/JITInlineMethods.h b/Source/JavaScriptCore/jit/JITInlineMethods.h
index e0310569d..998d5ac18 100644
--- a/Source/JavaScriptCore/jit/JITInlineMethods.h
+++ b/Source/JavaScriptCore/jit/JITInlineMethods.h
@@ -269,7 +269,7 @@ ALWAYS_INLINE void JIT::updateTopCallFrame()
#if USE(JSVALUE32_64)
storePtr(TrustedImmPtr(m_codeBlock->instructions().begin() + m_bytecodeOffset + 1), intTagFor(RegisterFile::ArgumentCount));
#else
- store32(Imm32(m_bytecodeOffset + 1), intTagFor(RegisterFile::ArgumentCount));
+ store32(TrustedImm32(m_bytecodeOffset + 1), intTagFor(RegisterFile::ArgumentCount));
#endif
}
storePtr(callFrameRegister, &m_globalData->topCallFrame);
@@ -459,6 +459,78 @@ inline void JIT::emitAllocateJSFunction(FunctionExecutable* executable, Register
#endif
}
+inline void JIT::emitAllocateBasicStorage(size_t size, RegisterID result, RegisterID storagePtr)
+{
+ CopiedAllocator* allocator = &m_globalData->heap.storageAllocator();
+
+ // FIXME: We need to check for wrap-around.
+ // Check to make sure that the allocation will fit in the current block.
+ loadPtr(&allocator->m_currentOffset, result);
+ addPtr(TrustedImm32(size), result);
+ loadPtr(&allocator->m_currentBlock, storagePtr);
+ addPtr(TrustedImm32(HeapBlock::s_blockSize), storagePtr);
+ addSlowCase(branchPtr(AboveOrEqual, result, storagePtr));
+
+ // Load the original offset.
+ loadPtr(&allocator->m_currentOffset, result);
+
+ // Bump the pointer forward.
+ move(result, storagePtr);
+ addPtr(TrustedImm32(size), storagePtr);
+ storePtr(storagePtr, &allocator->m_currentOffset);
+}
+
+inline void JIT::emitAllocateJSArray(unsigned valuesRegister, unsigned length, RegisterID cellResult, RegisterID storageResult, RegisterID storagePtr)
+{
+ unsigned initialLength = std::max(length, 4U);
+ size_t initialStorage = JSArray::storageSize(initialLength);
+
+ // Allocate the cell for the array.
+ emitAllocateBasicJSObject<JSArray, false>(TrustedImmPtr(m_codeBlock->globalObject()->arrayStructure()), cellResult, storagePtr);
+
+ // Allocate the backing store for the array.
+ emitAllocateBasicStorage(initialStorage, storageResult, storagePtr);
+
+ // Store all the necessary info in the ArrayStorage.
+ storePtr(storageResult, Address(storageResult, ArrayStorage::allocBaseOffset()));
+ store32(Imm32(length), Address(storageResult, ArrayStorage::lengthOffset()));
+ store32(Imm32(length), Address(storageResult, ArrayStorage::numValuesInVectorOffset()));
+
+ // Store the newly allocated ArrayStorage.
+ storePtr(storageResult, Address(cellResult, JSArray::storageOffset()));
+
+ // Store the vector length and index bias.
+ store32(Imm32(initialLength), Address(cellResult, JSArray::vectorLengthOffset()));
+ store32(TrustedImm32(0), Address(cellResult, JSArray::indexBiasOffset()));
+
+ // Initialize the subclass data and the sparse value map.
+ storePtr(TrustedImmPtr(0), Address(cellResult, JSArray::subclassDataOffset()));
+ storePtr(TrustedImmPtr(0), Address(cellResult, JSArray::sparseValueMapOffset()));
+
+ // Store the values we have.
+ for (unsigned i = 0; i < length; i++) {
+#if USE(JSVALUE64)
+ loadPtr(Address(callFrameRegister, (valuesRegister + i) * sizeof(Register)), storagePtr);
+ storePtr(storagePtr, Address(storageResult, ArrayStorage::vectorOffset() + sizeof(WriteBarrier<Unknown>) * i));
+#else
+ load32(Address(callFrameRegister, (valuesRegister + i) * sizeof(Register)), storagePtr);
+ store32(storagePtr, Address(storageResult, ArrayStorage::vectorOffset() + sizeof(WriteBarrier<Unknown>) * i));
+ load32(Address(callFrameRegister, (valuesRegister + i) * sizeof(Register) + sizeof(uint32_t)), storagePtr);
+ store32(storagePtr, Address(storageResult, ArrayStorage::vectorOffset() + sizeof(WriteBarrier<Unknown>) * i + sizeof(uint32_t)));
+#endif
+ }
+
+ // Zero out the remaining slots.
+ for (unsigned i = length; i < initialLength; i++) {
+#if USE(JSVALUE64)
+ storePtr(TrustedImmPtr(0), Address(storageResult, ArrayStorage::vectorOffset() + sizeof(WriteBarrier<Unknown>) * i));
+#else
+ store32(TrustedImm32(static_cast<int>(JSValue::EmptyValueTag)), Address(storageResult, ArrayStorage::vectorOffset() + sizeof(WriteBarrier<Unknown>) * i + OBJECT_OFFSETOF(JSValue, u.asBits.tag)));
+ store32(TrustedImm32(0), Address(storageResult, ArrayStorage::vectorOffset() + sizeof(WriteBarrier<Unknown>) * i + OBJECT_OFFSETOF(JSValue, u.asBits.payload)));
+#endif
+ }
+}
+
#if ENABLE(VALUE_PROFILER)
inline void JIT::emitValueProfilingSite(ValueProfile* valueProfile)
{
@@ -485,11 +557,11 @@ inline void JIT::emitValueProfilingSite(ValueProfile* valueProfile)
}
if (m_randomGenerator.getUint32() & 1)
- add32(Imm32(1), bucketCounterRegister);
+ add32(TrustedImm32(1), bucketCounterRegister);
else
- add32(Imm32(3), bucketCounterRegister);
- and32(Imm32(ValueProfile::bucketIndexMask), bucketCounterRegister);
- move(ImmPtr(valueProfile->m_buckets), scratch);
+ add32(TrustedImm32(3), bucketCounterRegister);
+ and32(TrustedImm32(ValueProfile::bucketIndexMask), bucketCounterRegister);
+ move(TrustedImmPtr(valueProfile->m_buckets), scratch);
#if USE(JSVALUE64)
storePtr(value, BaseIndex(scratch, bucketCounterRegister, TimesEight));
#elif USE(JSVALUE32_64)
@@ -796,7 +868,10 @@ ALWAYS_INLINE void JIT::emitGetVirtualRegister(int src, RegisterID dst)
// TODO: we want to reuse values that are already in registers if we can - add a register allocator!
if (m_codeBlock->isConstantRegisterIndex(src)) {
JSValue value = m_codeBlock->getConstant(src);
- move(ImmPtr(JSValue::encode(value)), dst);
+ if (!value.isNumber())
+ move(TrustedImmPtr(JSValue::encode(value)), dst);
+ else
+ move(ImmPtr(JSValue::encode(value)), dst);
killLastResultRegister();
return;
}
diff --git a/Source/JavaScriptCore/jit/JITOpcodes.cpp b/Source/JavaScriptCore/jit/JITOpcodes.cpp
index bc53d2cd8..2db82bf4a 100644
--- a/Source/JavaScriptCore/jit/JITOpcodes.cpp
+++ b/Source/JavaScriptCore/jit/JITOpcodes.cpp
@@ -29,6 +29,7 @@
#include "JIT.h"
#include "Arguments.h"
+#include "CopiedSpaceInlineMethods.h"
#include "Heap.h"
#include "JITInlineMethods.h"
#include "JITStubCall.h"
@@ -357,7 +358,10 @@ void JIT::emit_op_mov(Instruction* currentInstruction)
emitPutVirtualRegister(dst);
} else {
if (m_codeBlock->isConstantRegisterIndex(src)) {
- storePtr(ImmPtr(JSValue::encode(getConstantOperand(src))), Address(callFrameRegister, dst * sizeof(Register)));
+ if (!getConstantOperand(src).isNumber())
+ storePtr(TrustedImmPtr(JSValue::encode(getConstantOperand(src))), Address(callFrameRegister, dst * sizeof(Register)));
+ else
+ storePtr(ImmPtr(JSValue::encode(getConstantOperand(src))), Address(callFrameRegister, dst * sizeof(Register)));
if (dst == m_lastResultBytecodeRegister)
killLastResultRegister();
} else if ((src == m_lastResultBytecodeRegister) || (dst == m_lastResultBytecodeRegister)) {
@@ -389,7 +393,7 @@ void JIT::emit_op_jmp(Instruction* currentInstruction)
void JIT::emit_op_new_object(Instruction* currentInstruction)
{
- emitAllocateJSFinalObject(ImmPtr(m_codeBlock->globalObject()->emptyObjectStructure()), regT0, regT1);
+ emitAllocateJSFinalObject(TrustedImmPtr(m_codeBlock->globalObject()->emptyObjectStructure()), regT0, regT1);
emitPutVirtualRegister(currentInstruction[1].u.operand);
}
@@ -594,8 +598,8 @@ void JIT::emit_op_to_primitive(Instruction* currentInstruction)
void JIT::emit_op_strcat(Instruction* currentInstruction)
{
JITStubCall stubCall(this, cti_op_strcat);
- stubCall.addArgument(Imm32(currentInstruction[2].u.operand));
- stubCall.addArgument(Imm32(currentInstruction[3].u.operand));
+ stubCall.addArgument(TrustedImm32(currentInstruction[2].u.operand));
+ stubCall.addArgument(TrustedImm32(currentInstruction[3].u.operand));
stubCall.call(currentInstruction[1].u.operand);
}
@@ -609,7 +613,7 @@ void JIT::emit_op_resolve_base(Instruction* currentInstruction)
void JIT::emit_op_ensure_property_exists(Instruction* currentInstruction)
{
JITStubCall stubCall(this, cti_op_ensure_property_exists);
- stubCall.addArgument(Imm32(currentInstruction[1].u.operand));
+ stubCall.addArgument(TrustedImm32(currentInstruction[1].u.operand));
stubCall.addArgument(TrustedImmPtr(&m_codeBlock->identifier(currentInstruction[2].u.operand)));
stubCall.call(currentInstruction[1].u.operand);
}
@@ -618,7 +622,7 @@ void JIT::emit_op_resolve_skip(Instruction* currentInstruction)
{
JITStubCall stubCall(this, cti_op_resolve_skip);
stubCall.addArgument(TrustedImmPtr(&m_codeBlock->identifier(currentInstruction[2].u.operand)));
- stubCall.addArgument(Imm32(currentInstruction[3].u.operand));
+ stubCall.addArgument(TrustedImm32(currentInstruction[3].u.operand));
stubCall.callWithValueProfiling(currentInstruction[1].u.operand);
}
@@ -654,7 +658,7 @@ void JIT::emitSlow_op_resolve_global(Instruction* currentInstruction, Vector<Slo
linkSlowCase(iter);
JITStubCall stubCall(this, cti_op_resolve_global);
stubCall.addArgument(TrustedImmPtr(ident));
- stubCall.addArgument(Imm32(currentIndex));
+ stubCall.addArgument(TrustedImm32(currentIndex));
stubCall.addArgument(regT0);
stubCall.callWithValueProfiling(dst);
}
@@ -763,20 +767,11 @@ void JIT::emit_op_eq(Instruction* currentInstruction)
emitPutVirtualRegister(currentInstruction[1].u.operand);
}
-void JIT::emit_op_bitnot(Instruction* currentInstruction)
-{
- emitGetVirtualRegister(currentInstruction[2].u.operand, regT0);
- emitJumpSlowCaseIfNotImmediateInteger(regT0);
- not32(regT0);
- emitFastArithIntToImmNoCheck(regT0, regT0);
- emitPutVirtualRegister(currentInstruction[1].u.operand);
-}
-
void JIT::emit_op_resolve_with_base(Instruction* currentInstruction)
{
JITStubCall stubCall(this, cti_op_resolve_with_base);
stubCall.addArgument(TrustedImmPtr(&m_codeBlock->identifier(currentInstruction[3].u.operand)));
- stubCall.addArgument(Imm32(currentInstruction[1].u.operand));
+ stubCall.addArgument(TrustedImm32(currentInstruction[1].u.operand));
stubCall.callWithValueProfiling(currentInstruction[2].u.operand);
}
@@ -784,7 +779,7 @@ void JIT::emit_op_resolve_with_this(Instruction* currentInstruction)
{
JITStubCall stubCall(this, cti_op_resolve_with_this);
stubCall.addArgument(TrustedImmPtr(&m_codeBlock->identifier(currentInstruction[3].u.operand)));
- stubCall.addArgument(Imm32(currentInstruction[1].u.operand));
+ stubCall.addArgument(TrustedImm32(currentInstruction[1].u.operand));
stubCall.callWithValueProfiling(currentInstruction[2].u.operand);
}
@@ -971,17 +966,25 @@ void JIT::compileOpStrictEq(Instruction* currentInstruction, CompileOpStrictEqTy
unsigned src2 = currentInstruction[3].u.operand;
emitGetVirtualRegisters(src1, regT0, src2, regT1);
-
- // Jump to a slow case if either operand is a number, or if both are JSCell*s.
+
+ // Jump slow if both are cells (to cover strings).
move(regT0, regT2);
orPtr(regT1, regT2);
addSlowCase(emitJumpIfJSCell(regT2));
- addSlowCase(emitJumpIfImmediateNumber(regT2));
+
+ // Jump slow if either is a double. First test if it's an integer, which is fine, and then test
+ // if it's a double.
+ Jump leftOK = emitJumpIfImmediateInteger(regT0);
+ addSlowCase(emitJumpIfImmediateNumber(regT0));
+ leftOK.link(this);
+ Jump rightOK = emitJumpIfImmediateInteger(regT1);
+ addSlowCase(emitJumpIfImmediateNumber(regT1));
+ rightOK.link(this);
if (type == OpStrictEq)
- compare32(Equal, regT1, regT0, regT0);
+ comparePtr(Equal, regT1, regT0, regT0);
else
- compare32(NotEqual, regT1, regT0, regT0);
+ comparePtr(NotEqual, regT1, regT0, regT0);
emitTagAsBoolImmediate(regT0);
emitPutVirtualRegister(dst);
@@ -1034,7 +1037,7 @@ void JIT::emit_op_catch(Instruction* currentInstruction)
void JIT::emit_op_jmp_scopes(Instruction* currentInstruction)
{
JITStubCall stubCall(this, cti_op_jmp_scopes);
- stubCall.addArgument(Imm32(currentInstruction[1].u.operand));
+ stubCall.addArgument(TrustedImm32(currentInstruction[1].u.operand));
stubCall.call();
addJump(jump(), currentInstruction[2].u.operand);
}
@@ -1052,7 +1055,7 @@ void JIT::emit_op_switch_imm(Instruction* currentInstruction)
JITStubCall stubCall(this, cti_op_switch_imm);
stubCall.addArgument(scrutinee, regT2);
- stubCall.addArgument(Imm32(tableIndex));
+ stubCall.addArgument(TrustedImm32(tableIndex));
stubCall.call();
jump(regT0);
}
@@ -1070,7 +1073,7 @@ void JIT::emit_op_switch_char(Instruction* currentInstruction)
JITStubCall stubCall(this, cti_op_switch_char);
stubCall.addArgument(scrutinee, regT2);
- stubCall.addArgument(Imm32(tableIndex));
+ stubCall.addArgument(TrustedImm32(tableIndex));
stubCall.call();
jump(regT0);
}
@@ -1087,7 +1090,7 @@ void JIT::emit_op_switch_string(Instruction* currentInstruction)
JITStubCall stubCall(this, cti_op_switch_string);
stubCall.addArgument(scrutinee, regT2);
- stubCall.addArgument(Imm32(tableIndex));
+ stubCall.addArgument(TrustedImm32(tableIndex));
stubCall.call();
jump(regT0);
}
@@ -1095,7 +1098,10 @@ void JIT::emit_op_switch_string(Instruction* currentInstruction)
void JIT::emit_op_throw_reference_error(Instruction* currentInstruction)
{
JITStubCall stubCall(this, cti_op_throw_reference_error);
- stubCall.addArgument(ImmPtr(JSValue::encode(m_codeBlock->getConstant(currentInstruction[1].u.operand))));
+ if (!m_codeBlock->getConstant(currentInstruction[1].u.operand).isNumber())
+ stubCall.addArgument(TrustedImmPtr(JSValue::encode(m_codeBlock->getConstant(currentInstruction[1].u.operand))));
+ else
+ stubCall.addArgument(ImmPtr(JSValue::encode(m_codeBlock->getConstant(currentInstruction[1].u.operand))));
stubCall.call();
}
@@ -1106,9 +1112,9 @@ void JIT::emit_op_debug(Instruction* currentInstruction)
breakpoint();
#else
JITStubCall stubCall(this, cti_op_debug);
- stubCall.addArgument(Imm32(currentInstruction[1].u.operand));
- stubCall.addArgument(Imm32(currentInstruction[2].u.operand));
- stubCall.addArgument(Imm32(currentInstruction[3].u.operand));
+ stubCall.addArgument(TrustedImm32(currentInstruction[1].u.operand));
+ stubCall.addArgument(TrustedImm32(currentInstruction[2].u.operand));
+ stubCall.addArgument(TrustedImm32(currentInstruction[3].u.operand));
stubCall.call();
#endif
}
@@ -1318,14 +1324,6 @@ void JIT::emitSlow_op_jfalse(Instruction* currentInstruction, Vector<SlowCaseEnt
emitJumpSlowToHot(branchTest32(Zero, regT0), currentInstruction[2].u.operand); // inverted!
}
-void JIT::emitSlow_op_bitnot(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
-{
- linkSlowCase(iter);
- JITStubCall stubCall(this, cti_op_bitnot);
- stubCall.addArgument(regT0);
- stubCall.call(currentInstruction[1].u.operand);
-}
-
void JIT::emitSlow_op_jtrue(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
{
linkSlowCase(iter);
@@ -1380,6 +1378,7 @@ void JIT::emitSlow_op_stricteq(Instruction* currentInstruction, Vector<SlowCaseE
{
linkSlowCase(iter);
linkSlowCase(iter);
+ linkSlowCase(iter);
JITStubCall stubCall(this, cti_op_stricteq);
stubCall.addArgument(regT0);
stubCall.addArgument(regT1);
@@ -1390,6 +1389,7 @@ void JIT::emitSlow_op_nstricteq(Instruction* currentInstruction, Vector<SlowCase
{
linkSlowCase(iter);
linkSlowCase(iter);
+ linkSlowCase(iter);
JITStubCall stubCall(this, cti_op_nstricteq);
stubCall.addArgument(regT0);
stubCall.addArgument(regT1);
@@ -1565,7 +1565,7 @@ void JIT::emitSlow_op_resolve_global_dynamic(Instruction* currentInstruction, Ve
linkSlowCase(iter); // We managed to skip all the nodes in the scope chain, but the cache missed.
JITStubCall stubCall(this, cti_op_resolve_global);
stubCall.addArgument(TrustedImmPtr(ident));
- stubCall.addArgument(Imm32(currentIndex));
+ stubCall.addArgument(TrustedImm32(currentIndex));
stubCall.addArgument(regT0);
stubCall.callWithValueProfiling(dst);
}
@@ -1645,17 +1645,39 @@ void JIT::emitSlow_op_new_func_exp(Instruction* currentInstruction, Vector<SlowC
void JIT::emit_op_new_array(Instruction* currentInstruction)
{
+ int length = currentInstruction[3].u.operand;
+ if (CopiedSpace::isOversize(JSArray::storageSize(length))) {
+ JITStubCall stubCall(this, cti_op_new_array);
+ stubCall.addArgument(TrustedImm32(currentInstruction[2].u.operand));
+ stubCall.addArgument(TrustedImm32(currentInstruction[3].u.operand));
+ stubCall.call(currentInstruction[1].u.operand);
+ return;
+ }
+ int dst = currentInstruction[1].u.operand;
+ int values = currentInstruction[2].u.operand;
+
+ emitAllocateJSArray(values, length, regT0, regT1, regT2);
+ emitStoreCell(dst, regT0);
+}
+
+void JIT::emitSlow_op_new_array(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
+{
+ int length = currentInstruction[3].u.operand;
+ if (CopiedSpace::isOversize(JSArray::storageSize(length)))
+ return;
+ linkSlowCase(iter); // Not enough space in MarkedSpace for cell.
+ linkSlowCase(iter); // Not enough space in CopiedSpace for storage.
JITStubCall stubCall(this, cti_op_new_array);
- stubCall.addArgument(Imm32(currentInstruction[2].u.operand));
- stubCall.addArgument(Imm32(currentInstruction[3].u.operand));
+ stubCall.addArgument(TrustedImm32(currentInstruction[2].u.operand));
+ stubCall.addArgument(TrustedImm32(currentInstruction[3].u.operand));
stubCall.call(currentInstruction[1].u.operand);
}
void JIT::emit_op_new_array_buffer(Instruction* currentInstruction)
{
JITStubCall stubCall(this, cti_op_new_array_buffer);
- stubCall.addArgument(Imm32(currentInstruction[2].u.operand));
- stubCall.addArgument(Imm32(currentInstruction[3].u.operand));
+ stubCall.addArgument(TrustedImm32(currentInstruction[2].u.operand));
+ stubCall.addArgument(TrustedImm32(currentInstruction[3].u.operand));
stubCall.call(currentInstruction[1].u.operand);
}
diff --git a/Source/JavaScriptCore/jit/JITOpcodes32_64.cpp b/Source/JavaScriptCore/jit/JITOpcodes32_64.cpp
index 1a09302cf..b67696f35 100644
--- a/Source/JavaScriptCore/jit/JITOpcodes32_64.cpp
+++ b/Source/JavaScriptCore/jit/JITOpcodes32_64.cpp
@@ -536,7 +536,7 @@ void JIT::emit_op_jmp(Instruction* currentInstruction)
void JIT::emit_op_new_object(Instruction* currentInstruction)
{
- emitAllocateJSFinalObject(ImmPtr(m_codeBlock->globalObject()->emptyObjectStructure()), regT0, regT1);
+ emitAllocateJSFinalObject(TrustedImmPtr(m_codeBlock->globalObject()->emptyObjectStructure()), regT0, regT1);
emitStoreCell(currentInstruction[1].u.operand, regT0);
}
@@ -701,8 +701,8 @@ void JIT::emitSlow_op_to_primitive(Instruction* currentInstruction, Vector<SlowC
void JIT::emit_op_strcat(Instruction* currentInstruction)
{
JITStubCall stubCall(this, cti_op_strcat);
- stubCall.addArgument(Imm32(currentInstruction[2].u.operand));
- stubCall.addArgument(Imm32(currentInstruction[3].u.operand));
+ stubCall.addArgument(TrustedImm32(currentInstruction[2].u.operand));
+ stubCall.addArgument(TrustedImm32(currentInstruction[3].u.operand));
stubCall.call(currentInstruction[1].u.operand);
}
@@ -716,7 +716,7 @@ void JIT::emit_op_resolve_base(Instruction* currentInstruction)
void JIT::emit_op_ensure_property_exists(Instruction* currentInstruction)
{
JITStubCall stubCall(this, cti_op_ensure_property_exists);
- stubCall.addArgument(Imm32(currentInstruction[1].u.operand));
+ stubCall.addArgument(TrustedImm32(currentInstruction[1].u.operand));
stubCall.addArgument(TrustedImmPtr(&m_codeBlock->identifier(currentInstruction[2].u.operand)));
stubCall.call(currentInstruction[1].u.operand);
}
@@ -725,7 +725,7 @@ void JIT::emit_op_resolve_skip(Instruction* currentInstruction)
{
JITStubCall stubCall(this, cti_op_resolve_skip);
stubCall.addArgument(TrustedImmPtr(&m_codeBlock->identifier(currentInstruction[2].u.operand)));
- stubCall.addArgument(Imm32(currentInstruction[3].u.operand));
+ stubCall.addArgument(TrustedImm32(currentInstruction[3].u.operand));
stubCall.callWithValueProfiling(currentInstruction[1].u.operand);
}
@@ -766,7 +766,7 @@ void JIT::emitSlow_op_resolve_global(Instruction* currentInstruction, Vector<Slo
linkSlowCase(iter);
JITStubCall stubCall(this, cti_op_resolve_global);
stubCall.addArgument(TrustedImmPtr(ident));
- stubCall.addArgument(Imm32(currentIndex));
+ stubCall.addArgument(TrustedImm32(currentIndex));
stubCall.callWithValueProfiling(dst);
}
@@ -817,7 +817,7 @@ void JIT::emitSlow_op_jfalse(Instruction* currentInstruction, Vector<SlowCaseEnt
if (supportsFloatingPoint()) {
// regT1 contains the tag from the hot path.
- Jump notNumber = branch32(Above, regT1, Imm32(JSValue::LowestTag));
+ Jump notNumber = branch32(Above, regT1, TrustedImm32(JSValue::LowestTag));
emitLoadDouble(cond, fpRegT0);
emitJumpSlowToHot(branchDoubleZeroOrNaN(fpRegT0, fpRegT1), target);
@@ -853,7 +853,7 @@ void JIT::emitSlow_op_jtrue(Instruction* currentInstruction, Vector<SlowCaseEntr
if (supportsFloatingPoint()) {
// regT1 contains the tag from the hot path.
- Jump notNumber = branch32(Above, regT1, Imm32(JSValue::LowestTag));
+ Jump notNumber = branch32(Above, regT1, TrustedImm32(JSValue::LowestTag));
emitLoadDouble(cond, fpRegT0);
emitJumpSlowToHot(branchDoubleNonZero(fpRegT0, fpRegT1), target);
@@ -1164,7 +1164,7 @@ void JIT::emit_op_resolve_with_base(Instruction* currentInstruction)
{
JITStubCall stubCall(this, cti_op_resolve_with_base);
stubCall.addArgument(TrustedImmPtr(&m_codeBlock->identifier(currentInstruction[3].u.operand)));
- stubCall.addArgument(Imm32(currentInstruction[1].u.operand));
+ stubCall.addArgument(TrustedImm32(currentInstruction[1].u.operand));
stubCall.callWithValueProfiling(currentInstruction[2].u.operand);
}
@@ -1172,7 +1172,7 @@ void JIT::emit_op_resolve_with_this(Instruction* currentInstruction)
{
JITStubCall stubCall(this, cti_op_resolve_with_this);
stubCall.addArgument(TrustedImmPtr(&m_codeBlock->identifier(currentInstruction[3].u.operand)));
- stubCall.addArgument(Imm32(currentInstruction[1].u.operand));
+ stubCall.addArgument(TrustedImm32(currentInstruction[1].u.operand));
stubCall.callWithValueProfiling(currentInstruction[2].u.operand);
}
@@ -1364,7 +1364,7 @@ void JIT::emit_op_catch(Instruction* currentInstruction)
void JIT::emit_op_jmp_scopes(Instruction* currentInstruction)
{
JITStubCall stubCall(this, cti_op_jmp_scopes);
- stubCall.addArgument(Imm32(currentInstruction[1].u.operand));
+ stubCall.addArgument(TrustedImm32(currentInstruction[1].u.operand));
stubCall.call();
addJump(jump(), currentInstruction[2].u.operand);
}
@@ -1382,7 +1382,7 @@ void JIT::emit_op_switch_imm(Instruction* currentInstruction)
JITStubCall stubCall(this, cti_op_switch_imm);
stubCall.addArgument(scrutinee);
- stubCall.addArgument(Imm32(tableIndex));
+ stubCall.addArgument(TrustedImm32(tableIndex));
stubCall.call();
jump(regT0);
}
@@ -1400,7 +1400,7 @@ void JIT::emit_op_switch_char(Instruction* currentInstruction)
JITStubCall stubCall(this, cti_op_switch_char);
stubCall.addArgument(scrutinee);
- stubCall.addArgument(Imm32(tableIndex));
+ stubCall.addArgument(TrustedImm32(tableIndex));
stubCall.call();
jump(regT0);
}
@@ -1417,7 +1417,7 @@ void JIT::emit_op_switch_string(Instruction* currentInstruction)
JITStubCall stubCall(this, cti_op_switch_string);
stubCall.addArgument(scrutinee);
- stubCall.addArgument(Imm32(tableIndex));
+ stubCall.addArgument(TrustedImm32(tableIndex));
stubCall.call();
jump(regT0);
}
diff --git a/Source/JavaScriptCore/jit/JITPropertyAccess32_64.cpp b/Source/JavaScriptCore/jit/JITPropertyAccess32_64.cpp
index 1ee2915dc..19abdbd89 100644
--- a/Source/JavaScriptCore/jit/JITPropertyAccess32_64.cpp
+++ b/Source/JavaScriptCore/jit/JITPropertyAccess32_64.cpp
@@ -57,7 +57,7 @@ void JIT::emit_op_put_by_index(Instruction* currentInstruction)
JITStubCall stubCall(this, cti_op_put_by_index);
stubCall.addArgument(base);
- stubCall.addArgument(Imm32(property));
+ stubCall.addArgument(TrustedImm32(property));
stubCall.addArgument(value);
stubCall.call();
}
diff --git a/Source/JavaScriptCore/jit/JITStubCall.h b/Source/JavaScriptCore/jit/JITStubCall.h
index 51401a77f..a525ff227 100644
--- a/Source/JavaScriptCore/jit/JITStubCall.h
+++ b/Source/JavaScriptCore/jit/JITStubCall.h
@@ -104,12 +104,24 @@ namespace JSC {
m_jit->poke(argument, m_stackIndex);
m_stackIndex += stackIndexStep;
}
+
+ void addArgument(JIT::Imm32 argument)
+ {
+ m_jit->poke(argument, m_stackIndex);
+ m_stackIndex += stackIndexStep;
+ }
void addArgument(JIT::TrustedImmPtr argument)
{
m_jit->poke(argument, m_stackIndex);
m_stackIndex += stackIndexStep;
}
+
+ void addArgument(JIT::ImmPtr argument)
+ {
+ m_jit->poke(argument, m_stackIndex);
+ m_stackIndex += stackIndexStep;
+ }
void addArgument(JIT::RegisterID argument)
{
diff --git a/Source/JavaScriptCore/jit/JITStubs.cpp b/Source/JavaScriptCore/jit/JITStubs.cpp
index a0a816505..eebe90427 100644
--- a/Source/JavaScriptCore/jit/JITStubs.cpp
+++ b/Source/JavaScriptCore/jit/JITStubs.cpp
@@ -43,7 +43,7 @@
#include "ExceptionHelpers.h"
#include "GetterSetter.h"
#include "Heap.h"
-#include "InlineASM.h"
+#include <wtf/InlineASM.h>
#include "JIT.h"
#include "JITExceptions.h"
#include "JSActivation.h"
@@ -1929,12 +1929,16 @@ DEFINE_STUB_FUNCTION(void, optimize_from_loop)
CallFrame* callFrame = stackFrame.callFrame;
CodeBlock* codeBlock = callFrame->codeBlock();
- unsigned bytecodeIndex = stackFrame.args[0].int32();
+ unsigned bytecodeIndex = stackFrame.args[0].int32();
+
#if ENABLE(JIT_VERBOSE_OSR)
dataLog("%p: Entered optimize_from_loop with executeCounter = %d, reoptimizationRetryCounter = %u, optimizationDelayCounter = %u\n", codeBlock, codeBlock->jitExecuteCounter(), codeBlock->reoptimizationRetryCounter(), codeBlock->optimizationDelayCounter());
#endif
+ if (!codeBlock->checkIfOptimizationThresholdReached())
+ return;
+
if (codeBlock->hasOptimizedReplacement()) {
#if ENABLE(JIT_VERBOSE_OSR)
dataLog("Considering loop OSR into %p(%p) with success/fail %u/%u.\n", codeBlock, codeBlock->replacement(), codeBlock->replacement()->speculativeSuccessCounter(), codeBlock->replacement()->speculativeFailCounter());
@@ -2033,6 +2037,9 @@ DEFINE_STUB_FUNCTION(void, optimize_from_ret)
dataLog("Entered optimize_from_ret with executeCounter = %d, reoptimizationRetryCounter = %u, optimizationDelayCounter = %u\n", codeBlock->jitExecuteCounter(), codeBlock->reoptimizationRetryCounter(), codeBlock->optimizationDelayCounter());
#endif
+ if (!codeBlock->checkIfOptimizationThresholdReached())
+ return;
+
if (codeBlock->hasOptimizedReplacement()) {
#if ENABLE(JIT_VERBOSE_OSR)
dataLog("Returning from old JIT call frame with optimized replacement %p(%p), with success/fail %u/%u", codeBlock, codeBlock->replacement(), codeBlock->replacement()->speculativeSuccessCounter(), codeBlock->replacement()->speculativeFailCounter());
@@ -2546,7 +2553,7 @@ DEFINE_STUB_FUNCTION(void, op_put_by_val)
if (jsArray->canSetIndex(i))
jsArray->setIndex(*globalData, i, value);
else
- JSArray::putByIndex(jsArray, callFrame, i, value);
+ JSArray::putByIndex(jsArray, callFrame, i, value, callFrame->codeBlock()->isStrictMode());
} else if (isJSByteArray(baseValue) && asByteArray(baseValue)->canAccessIndex(i)) {
JSByteArray* jsByteArray = asByteArray(baseValue);
ctiPatchCallByReturnAddress(callFrame->codeBlock(), STUB_RETURN_ADDRESS, FunctionPtr(cti_op_put_by_val_byte_array));
@@ -2561,9 +2568,9 @@ DEFINE_STUB_FUNCTION(void, op_put_by_val)
}
}
- baseValue.put(callFrame, i, value);
+ baseValue.putByIndex(callFrame, i, value, callFrame->codeBlock()->isStrictMode());
} else
- baseValue.put(callFrame, i, value);
+ baseValue.putByIndex(callFrame, i, value, callFrame->codeBlock()->isStrictMode());
} else {
Identifier property(callFrame, subscript.toString(callFrame)->value(callFrame));
if (!stackFrame.globalData->exception) { // Don't put to an object if toString threw an exception.
@@ -2604,7 +2611,7 @@ DEFINE_STUB_FUNCTION(void, op_put_by_val_byte_array)
if (!isJSByteArray(baseValue))
ctiPatchCallByReturnAddress(callFrame->codeBlock(), STUB_RETURN_ADDRESS, FunctionPtr(cti_op_put_by_val));
- baseValue.put(callFrame, i, value);
+ baseValue.putByIndex(callFrame, i, value, callFrame->codeBlock()->isStrictMode());
} else {
Identifier property(callFrame, subscript.toString(callFrame)->value(callFrame));
if (!stackFrame.globalData->exception) { // Don't put to an object if toString threw an exception.
@@ -3036,19 +3043,6 @@ DEFINE_STUB_FUNCTION(EncodedJSValue, op_rshift)
return JSValue::encode(result);
}
-DEFINE_STUB_FUNCTION(EncodedJSValue, op_bitnot)
-{
- STUB_INIT_STACK_FRAME(stackFrame);
-
- JSValue src = stackFrame.args[0].jsValue();
-
- ASSERT(!src.isInt32());
- CallFrame* callFrame = stackFrame.callFrame;
- JSValue result = jsNumber(~src.toInt32(callFrame));
- CHECK_FOR_EXCEPTION_AT_END();
- return JSValue::encode(result);
-}
-
DEFINE_STUB_FUNCTION(EncodedJSValue, op_resolve_with_base)
{
STUB_INIT_STACK_FRAME(stackFrame);
@@ -3307,7 +3301,7 @@ DEFINE_STUB_FUNCTION(EncodedJSValue, op_stricteq)
JSValue src1 = stackFrame.args[0].jsValue();
JSValue src2 = stackFrame.args[1].jsValue();
-
+
bool result = JSValue::strictEqual(stackFrame.callFrame, src1, src2);
CHECK_FOR_EXCEPTION_AT_END();
return JSValue::encode(jsBoolean(result));
@@ -3408,7 +3402,9 @@ DEFINE_STUB_FUNCTION(void, op_put_by_index)
CallFrame* callFrame = stackFrame.callFrame;
unsigned property = stackFrame.args[1].int32();
- stackFrame.args[0].jsValue().put(callFrame, property, stackFrame.args[2].jsValue());
+ JSValue arrayValue = stackFrame.args[0].jsValue();
+ ASSERT(isJSArray(arrayValue));
+ asArray(arrayValue)->putDirectIndex(callFrame, property, stackFrame.args[2].jsValue(), false);
}
DEFINE_STUB_FUNCTION(void*, op_switch_imm)
diff --git a/Source/JavaScriptCore/jit/JITStubs.h b/Source/JavaScriptCore/jit/JITStubs.h
index 890d99747..49f666465 100644
--- a/Source/JavaScriptCore/jit/JITStubs.h
+++ b/Source/JavaScriptCore/jit/JITStubs.h
@@ -32,6 +32,7 @@
#include "CallData.h"
#include "Intrinsic.h"
+#include "LowLevelInterpreter.h"
#include "MacroAssemblerCodeRef.h"
#include "Register.h"
#include "ThunkGenerators.h"
@@ -39,6 +40,8 @@
namespace JSC {
+#if ENABLE(JIT)
+
struct StructureStubInfo;
class CodeBlock;
@@ -261,8 +264,6 @@ namespace JSC {
#define JITSTACKFRAME_ARGS_INDEX (OBJECT_OFFSETOF(JITStackFrame, args) / sizeof(void*))
-#if ENABLE(JIT)
-
#define STUB_ARGS_DECLARATION void** args
#define STUB_ARGS (args)
@@ -306,8 +307,22 @@ namespace JSC {
MacroAssemblerCodePtr ctiVirtualConstructLink() { return m_trampolineStructure.ctiVirtualConstructLink; }
MacroAssemblerCodePtr ctiVirtualCall() { return m_trampolineStructure.ctiVirtualCall; }
MacroAssemblerCodePtr ctiVirtualConstruct() { return m_trampolineStructure.ctiVirtualConstruct; }
- MacroAssemblerCodePtr ctiNativeCall() { return m_trampolineStructure.ctiNativeCall; }
- MacroAssemblerCodePtr ctiNativeConstruct() { return m_trampolineStructure.ctiNativeConstruct; }
+ MacroAssemblerCodePtr ctiNativeCall()
+ {
+#if ENABLE(LLINT)
+ if (!m_executableMemory)
+ return MacroAssemblerCodePtr::createLLIntCodePtr(llint_native_call_trampoline);
+#endif
+ return m_trampolineStructure.ctiNativeCall;
+ }
+ MacroAssemblerCodePtr ctiNativeConstruct()
+ {
+#if ENABLE(LLINT)
+ if (!m_executableMemory)
+ return MacroAssemblerCodePtr::createLLIntCodePtr(llint_native_construct_trampoline);
+#endif
+ return m_trampolineStructure.ctiNativeConstruct;
+ }
MacroAssemblerCodePtr ctiSoftModulo() { return m_trampolineStructure.ctiSoftModulo; }
MacroAssemblerCodeRef ctiStub(JSGlobalData*, ThunkGenerator);
@@ -330,7 +345,6 @@ namespace JSC {
extern "C" {
EncodedJSValue JIT_STUB cti_op_add(STUB_ARGS_DECLARATION);
EncodedJSValue JIT_STUB cti_op_bitand(STUB_ARGS_DECLARATION);
- EncodedJSValue JIT_STUB cti_op_bitnot(STUB_ARGS_DECLARATION);
EncodedJSValue JIT_STUB cti_op_bitor(STUB_ARGS_DECLARATION);
EncodedJSValue JIT_STUB cti_op_bitxor(STUB_ARGS_DECLARATION);
EncodedJSValue JIT_STUB cti_op_call_NotJSFunction(STUB_ARGS_DECLARATION);
diff --git a/Source/JavaScriptCore/jit/ThunkGenerators.cpp b/Source/JavaScriptCore/jit/ThunkGenerators.cpp
index 099796986..371aff2f9 100644
--- a/Source/JavaScriptCore/jit/ThunkGenerators.cpp
+++ b/Source/JavaScriptCore/jit/ThunkGenerators.cpp
@@ -27,7 +27,7 @@
#include "ThunkGenerators.h"
#include "CodeBlock.h"
-#include "InlineASM.h"
+#include <wtf/InlineASM.h>
#include "SpecializedThunkJIT.h"
#include <wtf/text/StringImpl.h>