summaryrefslogtreecommitdiff
path: root/Source/JavaScriptCore/llint
diff options
context:
space:
mode:
Diffstat (limited to 'Source/JavaScriptCore/llint')
-rw-r--r--Source/JavaScriptCore/llint/LLIntCLoop.cpp48
-rw-r--r--Source/JavaScriptCore/llint/LLIntCLoop.h24
-rw-r--r--Source/JavaScriptCore/llint/LLIntCommon.h15
-rw-r--r--Source/JavaScriptCore/llint/LLIntData.cpp176
-rw-r--r--Source/JavaScriptCore/llint/LLIntData.h36
-rw-r--r--Source/JavaScriptCore/llint/LLIntEntrypoint.cpp142
-rw-r--r--Source/JavaScriptCore/llint/LLIntEntrypoint.h (renamed from Source/JavaScriptCore/llint/LLIntEntrypoints.h)34
-rw-r--r--Source/JavaScriptCore/llint/LLIntEntrypoints.cpp92
-rw-r--r--Source/JavaScriptCore/llint/LLIntExceptions.cpp49
-rw-r--r--Source/JavaScriptCore/llint/LLIntExceptions.h23
-rw-r--r--Source/JavaScriptCore/llint/LLIntOfflineAsmConfig.h90
-rw-r--r--Source/JavaScriptCore/llint/LLIntOffsetsExtractor.cpp22
-rw-r--r--Source/JavaScriptCore/llint/LLIntOpcode.h44
-rw-r--r--Source/JavaScriptCore/llint/LLIntPCRanges.h54
-rw-r--r--Source/JavaScriptCore/llint/LLIntSlowPaths.cpp1408
-rw-r--r--Source/JavaScriptCore/llint/LLIntSlowPaths.h132
-rw-r--r--Source/JavaScriptCore/llint/LLIntThunks.cpp70
-rw-r--r--Source/JavaScriptCore/llint/LLIntThunks.h15
-rw-r--r--Source/JavaScriptCore/llint/LowLevelInterpreter.asm1818
-rw-r--r--Source/JavaScriptCore/llint/LowLevelInterpreter.cpp260
-rw-r--r--Source/JavaScriptCore/llint/LowLevelInterpreter.h20
-rw-r--r--Source/JavaScriptCore/llint/LowLevelInterpreter32_64.asm2046
-rw-r--r--Source/JavaScriptCore/llint/LowLevelInterpreter64.asm1905
23 files changed, 4966 insertions, 3557 deletions
diff --git a/Source/JavaScriptCore/llint/LLIntCLoop.cpp b/Source/JavaScriptCore/llint/LLIntCLoop.cpp
index 14fc04930..e3c6c6ce9 100644
--- a/Source/JavaScriptCore/llint/LLIntCLoop.cpp
+++ b/Source/JavaScriptCore/llint/LLIntCLoop.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (C) 2011, 2012 Apple Inc. All rights reserved.
+ * Copyright (C) 2012, 2013 Apple Inc. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
@@ -26,51 +26,19 @@
#include "config.h"
#include "LLIntCLoop.h"
-#include "Instruction.h"
+#if !ENABLE(JIT)
-namespace JSC {
+#include "LLIntData.h"
+namespace JSC {
namespace LLInt {
-#if ENABLE(LLINT_C_LOOP)
-
void CLoop::initialize()
{
- execute(0, llint_unused, true);
-}
-
-void* CLoop::catchRoutineFor(Instruction* catchPCForInterpreter)
-{
- return reinterpret_cast<Instruction*>(catchPCForInterpreter->u.opcode);
-}
-
-MacroAssemblerCodePtr CLoop::hostCodeEntryFor(CodeSpecializationKind kind)
-{
- MacroAssemblerCodePtr codePtr;
- codePtr = (kind == CodeForCall) ?
- MacroAssemblerCodePtr::createLLIntCodePtr(llint_native_call_trampoline) :
- MacroAssemblerCodePtr::createLLIntCodePtr(llint_native_construct_trampoline);
- return codePtr;
-}
-
-MacroAssemblerCodePtr CLoop::jsCodeEntryWithArityCheckFor(CodeSpecializationKind kind)
-{
- MacroAssemblerCodePtr codePtr;
- codePtr = (kind == CodeForCall) ?
- MacroAssemblerCodePtr::createLLIntCodePtr(llint_function_for_call_arity_check) :
- MacroAssemblerCodePtr::createLLIntCodePtr(llint_function_for_construct_arity_check);
- return codePtr;
-}
-
-MacroAssemblerCodePtr CLoop::jsCodeEntryFor(CodeSpecializationKind kind)
-{
- MacroAssemblerCodePtr codePtr;
- codePtr = (kind == CodeForCall) ?
- MacroAssemblerCodePtr::createLLIntCodePtr(llint_function_for_call_prologue) :
- MacroAssemblerCodePtr::createLLIntCodePtr(llint_function_for_construct_prologue);
- return codePtr;
+ execute(llint_entry, 0, 0, 0, true);
}
-#endif // ENABLE(LLINT_C_LOOP)
+} // namespace LLInt
+} // namespace JSC
-} } // namespace JSC::LLInt
+#endif // !ENABLE(JIT)
diff --git a/Source/JavaScriptCore/llint/LLIntCLoop.h b/Source/JavaScriptCore/llint/LLIntCLoop.h
index 231e52f66..8782b369c 100644
--- a/Source/JavaScriptCore/llint/LLIntCLoop.h
+++ b/Source/JavaScriptCore/llint/LLIntCLoop.h
@@ -1,5 +1,5 @@
/*
- * Copyright (C) 2011, 2012 Apple Inc. All rights reserved.
+ * Copyright (C) 2012, 2013 Apple Inc. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
@@ -26,34 +26,26 @@
#ifndef LLIntCLoop_h
#define LLIntCLoop_h
-#if ENABLE(LLINT_C_LOOP)
+#if !ENABLE(JIT)
-#include "CodeSpecializationKind.h"
+#include "CallFrame.h"
#include "JSCJSValue.h"
-#include "MacroAssemblerCodeRef.h"
#include "Opcode.h"
-#include "Register.h"
+#include "ProtoCallFrame.h"
namespace JSC {
-
namespace LLInt {
-const OpcodeID llint_unused = llint_end;
-
class CLoop {
public:
static void initialize();
- static JSValue execute(CallFrame*, OpcodeID bootstrapOpcodeId, bool isInitializationPass = false);
-
- static void* catchRoutineFor(Instruction* catchPCForInterpreter);
-
- static MacroAssemblerCodePtr hostCodeEntryFor(CodeSpecializationKind);
- static MacroAssemblerCodePtr jsCodeEntryWithArityCheckFor(CodeSpecializationKind);
- static MacroAssemblerCodePtr jsCodeEntryFor(CodeSpecializationKind);
+ static JSValue execute(OpcodeID entryOpcodeID, void* executableAddress, VM*, ProtoCallFrame*, bool isInitializationPass = false);
};
} } // namespace JSC::LLInt
-#endif // ENABLE(LLINT_C_LOOP)
+using JSC::LLInt::CLoop;
+
+#endif // !ENABLE(JIT)
#endif // LLIntCLoop_h
diff --git a/Source/JavaScriptCore/llint/LLIntCommon.h b/Source/JavaScriptCore/llint/LLIntCommon.h
index 1797ff02e..d32a264e5 100644
--- a/Source/JavaScriptCore/llint/LLIntCommon.h
+++ b/Source/JavaScriptCore/llint/LLIntCommon.h
@@ -1,5 +1,5 @@
/*
- * Copyright (C) 2012 Apple Inc. All rights reserved.
+ * Copyright (C) 2012, 2013 Apple Inc. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
@@ -34,19 +34,14 @@
// Disable inline allocation in the interpreter. This is great if you're changing
// how the GC allocates.
+#if ENABLE(ALLOCATION_LOGGING)
+#define LLINT_ALWAYS_ALLOCATE_SLOW 1
+#else
#define LLINT_ALWAYS_ALLOCATE_SLOW 0
+#endif
// Disable inline caching of get_by_id and put_by_id.
#define LLINT_ALWAYS_ACCESS_SLOW 0
-// Enable OSR into the JIT. Disabling this while the LLInt is enabled effectively
-// turns off all JIT'ing, since in LLInt's parlance, OSR subsumes any form of JIT
-// invocation.
-#if ENABLE(JIT)
-#define LLINT_OSR_TO_JIT 1
-#else
-#define LLINT_OSR_TO_JIT 0
-#endif
-
#endif // LLIntCommon_h
diff --git a/Source/JavaScriptCore/llint/LLIntData.cpp b/Source/JavaScriptCore/llint/LLIntData.cpp
index f1e367510..77b5614fc 100644
--- a/Source/JavaScriptCore/llint/LLIntData.cpp
+++ b/Source/JavaScriptCore/llint/LLIntData.cpp
@@ -25,37 +25,42 @@
#include "config.h"
#include "LLIntData.h"
-
-#if ENABLE(LLINT)
-
#include "BytecodeConventions.h"
+#include "CodeBlock.h"
#include "CodeType.h"
#include "Instruction.h"
+#include "JSScope.h"
#include "LLIntCLoop.h"
+#include "MaxFrameExtentForSlowPathCall.h"
#include "Opcode.h"
+#include "PropertyOffset.h"
+#include "WriteBarrier.h"
+
+#define STATIC_ASSERT(cond) static_assert(cond, "LLInt assumes " #cond)
namespace JSC { namespace LLInt {
Instruction* Data::s_exceptionInstructions = 0;
-Opcode* Data::s_opcodeMap = 0;
+Opcode Data::s_opcodeMap[numOpcodeIDs] = { };
+
+#if ENABLE(JIT)
+extern "C" void llint_entry(void*);
+#endif
void initialize()
{
Data::s_exceptionInstructions = new Instruction[maxOpcodeLength + 1];
- Data::s_opcodeMap = new Opcode[numOpcodeIDs];
- #if ENABLE(LLINT_C_LOOP)
+#if !ENABLE(JIT)
CLoop::initialize();
- #else // !ENABLE(LLINT_C_LOOP)
+#else // ENABLE(JIT)
+ llint_entry(&Data::s_opcodeMap);
+
for (int i = 0; i < maxOpcodeLength + 1; ++i)
Data::s_exceptionInstructions[i].u.pointer =
LLInt::getCodePtr(llint_throw_from_slow_path_trampoline);
- #define OPCODE_ENTRY(opcode, length) \
- Data::s_opcodeMap[opcode] = LLInt::getCodePtr(llint_##opcode);
- FOR_EACH_OPCODE_ID(OPCODE_ENTRY);
- #undef OPCODE_ENTRY
- #endif // !ENABLE(LLINT_C_LOOP)
+#endif // ENABLE(JIT)
}
#if COMPILER(CLANG)
@@ -68,14 +73,31 @@ void Data::performAssertions(VM& vm)
// Assertions to match LowLevelInterpreter.asm. If you change any of this code, be
// prepared to change LowLevelInterpreter.asm as well!!
- ASSERT(JSStack::CallFrameHeaderSize * 8 == 48);
- ASSERT(JSStack::ArgumentCount * 8 == -48);
- ASSERT(JSStack::CallerFrame * 8 == -40);
- ASSERT(JSStack::Callee * 8 == -32);
- ASSERT(JSStack::ScopeChain * 8 == -24);
- ASSERT(JSStack::ReturnPC * 8 == -16);
- ASSERT(JSStack::CodeBlock * 8 == -8);
- ASSERT(CallFrame::argumentOffsetIncludingThis(0) == -JSStack::CallFrameHeaderSize - 1);
+
+#if USE(JSVALUE64)
+ const ptrdiff_t PtrSize = 8;
+ const ptrdiff_t CallFrameHeaderSlots = 5;
+#else // USE(JSVALUE64) // i.e. 32-bit version
+ const ptrdiff_t PtrSize = 4;
+ const ptrdiff_t CallFrameHeaderSlots = 4;
+#endif
+ const ptrdiff_t SlotSize = 8;
+
+ STATIC_ASSERT(sizeof(void*) == PtrSize);
+ STATIC_ASSERT(sizeof(Register) == SlotSize);
+ STATIC_ASSERT(JSStack::CallFrameHeaderSize == CallFrameHeaderSlots);
+
+ ASSERT(!CallFrame::callerFrameOffset());
+ STATIC_ASSERT(JSStack::CallerFrameAndPCSize == (PtrSize * 2) / SlotSize);
+ ASSERT(CallFrame::returnPCOffset() == CallFrame::callerFrameOffset() + PtrSize);
+ ASSERT(JSStack::CodeBlock * sizeof(Register) == CallFrame::returnPCOffset() + PtrSize);
+ STATIC_ASSERT(JSStack::Callee * sizeof(Register) == JSStack::CodeBlock * sizeof(Register) + SlotSize);
+ STATIC_ASSERT(JSStack::ArgumentCount * sizeof(Register) == JSStack::Callee * sizeof(Register) + SlotSize);
+ STATIC_ASSERT(JSStack::ThisArgument * sizeof(Register) == JSStack::ArgumentCount * sizeof(Register) + SlotSize);
+ STATIC_ASSERT(JSStack::CallFrameHeaderSize == JSStack::ThisArgument);
+
+ ASSERT(CallFrame::argumentOffsetIncludingThis(0) == JSStack::ThisArgument);
+
#if CPU(BIG_ENDIAN)
ASSERT(OBJECT_OFFSETOF(EncodedValueDescriptor, asBits.tag) == 0);
ASSERT(OBJECT_OFFSETOF(EncodedValueDescriptor, asBits.payload) == 4);
@@ -84,52 +106,102 @@ void Data::performAssertions(VM& vm)
ASSERT(OBJECT_OFFSETOF(EncodedValueDescriptor, asBits.payload) == 0);
#endif
#if USE(JSVALUE32_64)
- ASSERT(JSValue::Int32Tag == static_cast<unsigned>(-1));
- ASSERT(JSValue::BooleanTag == static_cast<unsigned>(-2));
- ASSERT(JSValue::NullTag == static_cast<unsigned>(-3));
- ASSERT(JSValue::UndefinedTag == static_cast<unsigned>(-4));
- ASSERT(JSValue::CellTag == static_cast<unsigned>(-5));
- ASSERT(JSValue::EmptyValueTag == static_cast<unsigned>(-6));
- ASSERT(JSValue::DeletedValueTag == static_cast<unsigned>(-7));
- ASSERT(JSValue::LowestTag == static_cast<unsigned>(-7));
+ STATIC_ASSERT(JSValue::Int32Tag == static_cast<unsigned>(-1));
+ STATIC_ASSERT(JSValue::BooleanTag == static_cast<unsigned>(-2));
+ STATIC_ASSERT(JSValue::NullTag == static_cast<unsigned>(-3));
+ STATIC_ASSERT(JSValue::UndefinedTag == static_cast<unsigned>(-4));
+ STATIC_ASSERT(JSValue::CellTag == static_cast<unsigned>(-5));
+ STATIC_ASSERT(JSValue::EmptyValueTag == static_cast<unsigned>(-6));
+ STATIC_ASSERT(JSValue::DeletedValueTag == static_cast<unsigned>(-7));
+ STATIC_ASSERT(JSValue::LowestTag == static_cast<unsigned>(-7));
#else
- ASSERT(TagBitTypeOther == 0x2);
- ASSERT(TagBitBool == 0x4);
- ASSERT(TagBitUndefined == 0x8);
- ASSERT(ValueEmpty == 0x0);
- ASSERT(ValueFalse == (TagBitTypeOther | TagBitBool));
- ASSERT(ValueTrue == (TagBitTypeOther | TagBitBool | 1));
- ASSERT(ValueUndefined == (TagBitTypeOther | TagBitUndefined));
- ASSERT(ValueNull == TagBitTypeOther);
+ STATIC_ASSERT(TagBitTypeOther == 0x2);
+ STATIC_ASSERT(TagBitBool == 0x4);
+ STATIC_ASSERT(TagBitUndefined == 0x8);
+ STATIC_ASSERT(ValueEmpty == 0x0);
+ STATIC_ASSERT(ValueFalse == (TagBitTypeOther | TagBitBool));
+ STATIC_ASSERT(ValueTrue == (TagBitTypeOther | TagBitBool | 1));
+ STATIC_ASSERT(ValueUndefined == (TagBitTypeOther | TagBitUndefined));
+ STATIC_ASSERT(ValueNull == TagBitTypeOther);
#endif
- ASSERT(StringType == 5);
- ASSERT(ObjectType == 17);
- ASSERT(MasqueradesAsUndefined == 1);
- ASSERT(ImplementsHasInstance == 2);
- ASSERT(ImplementsDefaultHasInstance == 8);
- ASSERT(FirstConstantRegisterIndex == 0x40000000);
- ASSERT(GlobalCode == 0);
- ASSERT(EvalCode == 1);
- ASSERT(FunctionCode == 2);
+#if (CPU(X86_64) && !OS(WINDOWS)) || CPU(ARM64) || !ENABLE(JIT)
+ STATIC_ASSERT(!maxFrameExtentForSlowPathCall);
+#elif CPU(ARM) || CPU(SH4)
+ STATIC_ASSERT(maxFrameExtentForSlowPathCall == 24);
+#elif CPU(X86) || CPU(MIPS)
+ STATIC_ASSERT(maxFrameExtentForSlowPathCall == 40);
+#elif CPU(X86_64) && OS(WINDOWS)
+ STATIC_ASSERT(maxFrameExtentForSlowPathCall == 64);
+#endif
+
+#if !ENABLE(JIT) || USE(JSVALUE32_64)
+ ASSERT(!CodeBlock::llintBaselineCalleeSaveSpaceAsVirtualRegisters());
+#elif (CPU(X86_64) && !OS(WINDOWS)) || CPU(ARM64)
+ ASSERT(CodeBlock::llintBaselineCalleeSaveSpaceAsVirtualRegisters() == 3);
+#elif (CPU(X86_64) && OS(WINDOWS))
+ ASSERT(CodeBlock::llintBaselineCalleeSaveSpaceAsVirtualRegisters() == 3);
+#endif
+
+ STATIC_ASSERT(StringType == 6);
+ STATIC_ASSERT(SymbolType == 7);
+ STATIC_ASSERT(ObjectType == 21);
+ STATIC_ASSERT(FinalObjectType == 22);
+ STATIC_ASSERT(MasqueradesAsUndefined == 1);
+ STATIC_ASSERT(ImplementsDefaultHasInstance == 2);
+ STATIC_ASSERT(FirstConstantRegisterIndex == 0x40000000);
+ STATIC_ASSERT(GlobalCode == 0);
+ STATIC_ASSERT(EvalCode == 1);
+ STATIC_ASSERT(FunctionCode == 2);
+ STATIC_ASSERT(ModuleCode == 3);
+
+ ASSERT(!(reinterpret_cast<ptrdiff_t>((reinterpret_cast<WriteBarrier<JSCell>*>(0x4000)->slot())) - 0x4000));
+ static_assert(PutByIdPrimaryTypeMask == 0x6, "LLInt assumes PutByIdPrimaryTypeMask is == 0x6");
+ static_assert(PutByIdPrimaryTypeSecondary == 0x0, "LLInt assumes PutByIdPrimaryTypeSecondary is == 0x0");
+ static_assert(PutByIdPrimaryTypeObjectWithStructure == 0x2, "LLInt assumes PutByIdPrimaryTypeObjectWithStructure is == 0x2");
+ static_assert(PutByIdPrimaryTypeObjectWithStructureOrOther == 0x4, "LLInt assumes PutByIdPrimaryTypeObjectWithStructureOrOther is == 0x4");
+ static_assert(PutByIdSecondaryTypeMask == -0x8, "LLInt assumes PutByIdSecondaryTypeMask is == -0x8");
+ static_assert(PutByIdSecondaryTypeBottom == 0x0, "LLInt assumes PutByIdSecondaryTypeBottom is == 0x0");
+ static_assert(PutByIdSecondaryTypeBoolean == 0x8, "LLInt assumes PutByIdSecondaryTypeBoolean is == 0x8");
+ static_assert(PutByIdSecondaryTypeOther == 0x10, "LLInt assumes PutByIdSecondaryTypeOther is == 0x10");
+ static_assert(PutByIdSecondaryTypeInt32 == 0x18, "LLInt assumes PutByIdSecondaryTypeInt32 is == 0x18");
+ static_assert(PutByIdSecondaryTypeNumber == 0x20, "LLInt assumes PutByIdSecondaryTypeNumber is == 0x20");
+ static_assert(PutByIdSecondaryTypeString == 0x28, "LLInt assumes PutByIdSecondaryTypeString is == 0x28");
+ static_assert(PutByIdSecondaryTypeSymbol == 0x30, "LLInt assumes PutByIdSecondaryTypeSymbol is == 0x30");
+ static_assert(PutByIdSecondaryTypeObject == 0x38, "LLInt assumes PutByIdSecondaryTypeObject is == 0x38");
+ static_assert(PutByIdSecondaryTypeObjectOrOther == 0x40, "LLInt assumes PutByIdSecondaryTypeObjectOrOther is == 0x40");
+ static_assert(PutByIdSecondaryTypeTop == 0x48, "LLInt assumes PutByIdSecondaryTypeTop is == 0x48");
+
+ static_assert(GlobalProperty == 0, "LLInt assumes GlobalProperty ResultType is == 0");
+ static_assert(GlobalVar == 1, "LLInt assumes GlobalVar ResultType is == 1");
+ static_assert(GlobalLexicalVar == 2, "LLInt assumes GlobalLexicalVar ResultType is == 2");
+ static_assert(ClosureVar == 3, "LLInt assumes ClosureVar ResultType is == 3");
+ static_assert(LocalClosureVar == 4, "LLInt assumes LocalClosureVar ResultType is == 4");
+ static_assert(ModuleVar == 5, "LLInt assumes ModuleVar ResultType is == 5");
+ static_assert(GlobalPropertyWithVarInjectionChecks == 6, "LLInt assumes GlobalPropertyWithVarInjectionChecks ResultType is == 6");
+ static_assert(GlobalVarWithVarInjectionChecks == 7, "LLInt assumes GlobalVarWithVarInjectionChecks ResultType is == 7");
+ static_assert(GlobalLexicalVarWithVarInjectionChecks == 8, "LLInt assumes GlobalLexicalVarWithVarInjectionChecks ResultType is == 8");
+ static_assert(ClosureVarWithVarInjectionChecks == 9, "LLInt assumes ClosureVarWithVarInjectionChecks ResultType is == 9");
+
+ static_assert(InitializationMode::Initialization == 0, "LLInt assumes that InitializationMode::Initialization is 0");
+ STATIC_ASSERT(GetPutInfo::typeBits == 0x3ff);
+ STATIC_ASSERT(GetPutInfo::initializationShift == 10);
+ STATIC_ASSERT(GetPutInfo::initializationBits == 0xffc00);
+
+ STATIC_ASSERT(MarkedBlock::blockMask == ~static_cast<decltype(MarkedBlock::blockMask)>(0x3fff));
+
// FIXME: make these assertions less horrible.
#if !ASSERT_DISABLED
Vector<int> testVector;
testVector.resize(42);
-#if USE(JSVALUE64) && OS(WINDOWS)
- ASSERT(bitwise_cast<uint32_t*>(&testVector)[4] == 42);
-#else
ASSERT(bitwise_cast<uint32_t*>(&testVector)[sizeof(void*)/sizeof(uint32_t) + 1] == 42);
-#endif
ASSERT(bitwise_cast<int**>(&testVector)[0] == testVector.begin());
#endif
- ASSERT(StringImpl::s_hashFlag8BitBuffer == 64);
+ ASSERT(StringImpl::s_hashFlag8BitBuffer == 8);
}
#if COMPILER(CLANG)
#pragma clang diagnostic pop
#endif
} } // namespace JSC::LLInt
-
-#endif // ENABLE(LLINT)
diff --git a/Source/JavaScriptCore/llint/LLIntData.h b/Source/JavaScriptCore/llint/LLIntData.h
index 8ed2bceda..7e7794b14 100644
--- a/Source/JavaScriptCore/llint/LLIntData.h
+++ b/Source/JavaScriptCore/llint/LLIntData.h
@@ -28,14 +28,13 @@
#include "JSCJSValue.h"
#include "Opcode.h"
-#include <wtf/Platform.h>
namespace JSC {
class VM;
struct Instruction;
-#if ENABLE(LLINT_C_LOOP)
+#if !ENABLE(JIT)
typedef OpcodeID LLIntCode;
#else
typedef void (*LLIntCode)();
@@ -43,15 +42,13 @@ typedef void (*LLIntCode)();
namespace LLInt {
-#if ENABLE(LLINT)
-
class Data {
public:
static void performAssertions(VM&);
private:
static Instruction* s_exceptionInstructions;
- static Opcode* s_opcodeMap;
+ static Opcode s_opcodeMap[numOpcodeIDs];
friend void initialize();
@@ -87,33 +84,12 @@ ALWAYS_INLINE void* getCodePtr(OpcodeID id)
return reinterpret_cast<void*>(getOpcode(id));
}
-#else // !ENABLE(LLINT)
-
-#if COMPILER(CLANG)
-#pragma clang diagnostic push
-#pragma clang diagnostic ignored "-Wmissing-noreturn"
-#endif
-
-class Data {
-public:
- static void performAssertions(VM&) { }
-};
-
-#if COMPILER(CLANG)
-#pragma clang diagnostic pop
-#endif
-
-#endif // !ENABLE(LLINT)
-
-ALWAYS_INLINE void* getOpcode(void llintOpcode())
-{
- return bitwise_cast<void*>(llintOpcode);
-}
-
-ALWAYS_INLINE void* getCodePtr(void glueHelper())
+#if ENABLE(JIT)
+ALWAYS_INLINE LLIntCode getCodeFunctionPtr(OpcodeID codeId)
{
- return bitwise_cast<void*>(glueHelper);
+ return reinterpret_cast<LLIntCode>(getCodePtr(codeId));
}
+#endif
ALWAYS_INLINE void* getCodePtr(JSC::EncodedJSValue glueHelper())
{
diff --git a/Source/JavaScriptCore/llint/LLIntEntrypoint.cpp b/Source/JavaScriptCore/llint/LLIntEntrypoint.cpp
new file mode 100644
index 000000000..f5918b721
--- /dev/null
+++ b/Source/JavaScriptCore/llint/LLIntEntrypoint.cpp
@@ -0,0 +1,142 @@
+/*
+ * Copyright (C) 2012, 2013 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "config.h"
+#include "LLIntEntrypoint.h"
+#include "CodeBlock.h"
+#include "HeapInlines.h"
+#include "JITCode.h"
+#include "JSCellInlines.h"
+#include "JSObject.h"
+#include "LLIntThunks.h"
+#include "LowLevelInterpreter.h"
+#include "MaxFrameExtentForSlowPathCall.h"
+#include "StackAlignment.h"
+#include "VM.h"
+
+namespace JSC { namespace LLInt {
+
+static void setFunctionEntrypoint(VM& vm, CodeBlock* codeBlock)
+{
+ CodeSpecializationKind kind = codeBlock->specializationKind();
+
+#if ENABLE(JIT)
+ if (vm.canUseJIT()) {
+ if (kind == CodeForCall) {
+ codeBlock->setJITCode(
+ adoptRef(new DirectJITCode(vm.getCTIStub(functionForCallEntryThunkGenerator), vm.getCTIStub(functionForCallArityCheckThunkGenerator).code(), JITCode::InterpreterThunk)));
+ return;
+ }
+ ASSERT(kind == CodeForConstruct);
+ codeBlock->setJITCode(
+ adoptRef(new DirectJITCode(vm.getCTIStub(functionForConstructEntryThunkGenerator), vm.getCTIStub(functionForConstructArityCheckThunkGenerator).code(), JITCode::InterpreterThunk)));
+ return;
+ }
+#endif // ENABLE(JIT)
+
+ UNUSED_PARAM(vm);
+ if (kind == CodeForCall) {
+ codeBlock->setJITCode(
+ adoptRef(new DirectJITCode(MacroAssemblerCodeRef::createLLIntCodeRef(llint_function_for_call_prologue), MacroAssemblerCodePtr::createLLIntCodePtr(llint_function_for_call_arity_check), JITCode::InterpreterThunk)));
+ return;
+ }
+ ASSERT(kind == CodeForConstruct);
+ codeBlock->setJITCode(
+ adoptRef(new DirectJITCode(MacroAssemblerCodeRef::createLLIntCodeRef(llint_function_for_construct_prologue), MacroAssemblerCodePtr::createLLIntCodePtr(llint_function_for_construct_arity_check), JITCode::InterpreterThunk)));
+}
+
+static void setEvalEntrypoint(VM& vm, CodeBlock* codeBlock)
+{
+#if ENABLE(JIT)
+ if (vm.canUseJIT()) {
+ codeBlock->setJITCode(
+ adoptRef(new DirectJITCode(vm.getCTIStub(evalEntryThunkGenerator), MacroAssemblerCodePtr(), JITCode::InterpreterThunk)));
+ return;
+ }
+#endif // ENABLE(JIT)
+
+ UNUSED_PARAM(vm);
+ codeBlock->setJITCode(
+ adoptRef(new DirectJITCode(MacroAssemblerCodeRef::createLLIntCodeRef(llint_eval_prologue), MacroAssemblerCodePtr(), JITCode::InterpreterThunk)));
+}
+
+static void setProgramEntrypoint(VM& vm, CodeBlock* codeBlock)
+{
+#if ENABLE(JIT)
+ if (vm.canUseJIT()) {
+ codeBlock->setJITCode(
+ adoptRef(new DirectJITCode(vm.getCTIStub(programEntryThunkGenerator), MacroAssemblerCodePtr(), JITCode::InterpreterThunk)));
+ return;
+ }
+#endif // ENABLE(JIT)
+
+ UNUSED_PARAM(vm);
+ codeBlock->setJITCode(
+ adoptRef(new DirectJITCode(MacroAssemblerCodeRef::createLLIntCodeRef(llint_program_prologue), MacroAssemblerCodePtr(), JITCode::InterpreterThunk)));
+}
+
+static void setModuleProgramEntrypoint(VM& vm, CodeBlock* codeBlock)
+{
+#if ENABLE(JIT)
+ if (vm.canUseJIT()) {
+ codeBlock->setJITCode(
+ adoptRef(new DirectJITCode(vm.getCTIStub(moduleProgramEntryThunkGenerator), MacroAssemblerCodePtr(), JITCode::InterpreterThunk)));
+ return;
+ }
+#endif // ENABLE(JIT)
+
+ UNUSED_PARAM(vm);
+ codeBlock->setJITCode(
+ adoptRef(new DirectJITCode(MacroAssemblerCodeRef::createLLIntCodeRef(llint_module_program_prologue), MacroAssemblerCodePtr(), JITCode::InterpreterThunk)));
+}
+
+void setEntrypoint(VM& vm, CodeBlock* codeBlock)
+{
+ switch (codeBlock->codeType()) {
+ case GlobalCode:
+ setProgramEntrypoint(vm, codeBlock);
+ return;
+ case ModuleCode:
+ setModuleProgramEntrypoint(vm, codeBlock);
+ return;
+ case EvalCode:
+ setEvalEntrypoint(vm, codeBlock);
+ return;
+ case FunctionCode:
+ setFunctionEntrypoint(vm, codeBlock);
+ return;
+ }
+
+ RELEASE_ASSERT_NOT_REACHED();
+}
+
+unsigned frameRegisterCountFor(CodeBlock* codeBlock)
+{
+ ASSERT(static_cast<unsigned>(codeBlock->m_numCalleeLocals) == WTF::roundUpToMultipleOf(stackAlignmentRegisters(), static_cast<unsigned>(codeBlock->m_numCalleeLocals)));
+
+ return roundLocalRegisterCountForFramePointerOffset(codeBlock->m_numCalleeLocals + maxFrameExtentForSlowPathCallInRegisters);
+}
+
+} } // namespace JSC::LLInt
diff --git a/Source/JavaScriptCore/llint/LLIntEntrypoints.h b/Source/JavaScriptCore/llint/LLIntEntrypoint.h
index 1cecba0a6..5b8fd51cd 100644
--- a/Source/JavaScriptCore/llint/LLIntEntrypoints.h
+++ b/Source/JavaScriptCore/llint/LLIntEntrypoint.h
@@ -1,5 +1,5 @@
/*
- * Copyright (C) 2012 Apple Inc. All rights reserved.
+ * Copyright (C) 2012, 2013 Apple Inc. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
@@ -23,42 +23,22 @@
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
-#ifndef LLIntEntrypoints_h
-#define LLIntEntrypoints_h
-
-#include <wtf/Platform.h>
-
-#if ENABLE(LLINT)
+#ifndef LLIntEntrypoint_h
+#define LLIntEntrypoint_h
#include "CodeSpecializationKind.h"
namespace JSC {
-class EvalCodeBlock;
-class JITCode;
+class CodeBlock;
class VM;
-class MacroAssemblerCodePtr;
-class MacroAssemblerCodeRef;
-class ProgramCodeBlock;
namespace LLInt {
-void getFunctionEntrypoint(VM&, CodeSpecializationKind, JITCode&, MacroAssemblerCodePtr& arityCheck);
-void getEvalEntrypoint(VM&, JITCode&);
-void getProgramEntrypoint(VM&, JITCode&);
+void setEntrypoint(VM&, CodeBlock*);
-inline void getEntrypoint(VM& vm, EvalCodeBlock*, JITCode& jitCode)
-{
- getEvalEntrypoint(vm, jitCode);
-}
-
-inline void getEntrypoint(VM& vm, ProgramCodeBlock*, JITCode& jitCode)
-{
- getProgramEntrypoint(vm, jitCode);
-}
+unsigned frameRegisterCountFor(CodeBlock*);
} } // namespace JSC::LLInt
-#endif // ENABLE(LLINT)
-
-#endif // LLIntEntrypoints_h
+#endif // LLIntEntrypoint_h
diff --git a/Source/JavaScriptCore/llint/LLIntEntrypoints.cpp b/Source/JavaScriptCore/llint/LLIntEntrypoints.cpp
deleted file mode 100644
index c044568b5..000000000
--- a/Source/JavaScriptCore/llint/LLIntEntrypoints.cpp
+++ /dev/null
@@ -1,92 +0,0 @@
-/*
- * Copyright (C) 2012 Apple Inc. All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- * 1. Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * 2. Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in the
- * documentation and/or other materials provided with the distribution.
- *
- * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
- * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
- * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
- * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
- * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
- * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
- * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
- * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- */
-
-#include "config.h"
-#include "LLIntEntrypoints.h"
-
-#if ENABLE(LLINT)
-
-#include "JITCode.h"
-#include "VM.h"
-#include "JSObject.h"
-#include "LLIntThunks.h"
-#include "LowLevelInterpreter.h"
-
-
-namespace JSC { namespace LLInt {
-
-void getFunctionEntrypoint(VM& vm, CodeSpecializationKind kind, JITCode& jitCode, MacroAssemblerCodePtr& arityCheck)
-{
- if (!vm.canUseJIT()) {
- if (kind == CodeForCall) {
- jitCode = JITCode(MacroAssemblerCodeRef::createLLIntCodeRef(llint_function_for_call_prologue), JITCode::InterpreterThunk);
- arityCheck = MacroAssemblerCodePtr::createLLIntCodePtr(llint_function_for_call_arity_check);
- return;
- }
-
- ASSERT(kind == CodeForConstruct);
- jitCode = JITCode(MacroAssemblerCodeRef::createLLIntCodeRef(llint_function_for_construct_prologue), JITCode::InterpreterThunk);
- arityCheck = MacroAssemblerCodePtr::createLLIntCodePtr(llint_function_for_construct_arity_check);
- return;
- }
-
-#if ENABLE(JIT)
- if (kind == CodeForCall) {
- jitCode = JITCode(vm.getCTIStub(functionForCallEntryThunkGenerator), JITCode::InterpreterThunk);
- arityCheck = vm.getCTIStub(functionForCallArityCheckThunkGenerator).code();
- return;
- }
-
- ASSERT(kind == CodeForConstruct);
- jitCode = JITCode(vm.getCTIStub(functionForConstructEntryThunkGenerator), JITCode::InterpreterThunk);
- arityCheck = vm.getCTIStub(functionForConstructArityCheckThunkGenerator).code();
-#endif // ENABLE(JIT)
-}
-
-void getEvalEntrypoint(VM& vm, JITCode& jitCode)
-{
- if (!vm.canUseJIT()) {
- jitCode = JITCode(MacroAssemblerCodeRef::createLLIntCodeRef(llint_eval_prologue), JITCode::InterpreterThunk);
- return;
- }
-#if ENABLE(JIT)
- jitCode = JITCode(vm.getCTIStub(evalEntryThunkGenerator), JITCode::InterpreterThunk);
-#endif
-}
-
-void getProgramEntrypoint(VM& vm, JITCode& jitCode)
-{
- if (!vm.canUseJIT()) {
- jitCode = JITCode(MacroAssemblerCodeRef::createLLIntCodeRef(llint_program_prologue), JITCode::InterpreterThunk);
- return;
- }
-#if ENABLE(JIT)
- jitCode = JITCode(vm.getCTIStub(programEntryThunkGenerator), JITCode::InterpreterThunk);
-#endif
-}
-
-} } // namespace JSC::LLInt
-
-#endif // ENABLE(LLINT)
diff --git a/Source/JavaScriptCore/llint/LLIntExceptions.cpp b/Source/JavaScriptCore/llint/LLIntExceptions.cpp
index d88c16e7e..039936e73 100644
--- a/Source/JavaScriptCore/llint/LLIntExceptions.cpp
+++ b/Source/JavaScriptCore/llint/LLIntExceptions.cpp
@@ -25,74 +25,39 @@
#include "config.h"
#include "LLIntExceptions.h"
-
-#if ENABLE(LLINT)
-
#include "CallFrame.h"
#include "CodeBlock.h"
#include "Instruction.h"
-#include "JITExceptions.h"
#include "LLIntCommon.h"
#include "LowLevelInterpreter.h"
-#include "Operations.h"
+#include "JSCInlines.h"
namespace JSC { namespace LLInt {
-static void fixupPCforExceptionIfNeeded(ExecState* exec)
-{
- CodeBlock* codeBlock = exec->codeBlock();
- ASSERT(!!codeBlock);
- Instruction* pc = exec->currentVPC();
- exec->setCurrentVPC(codeBlock->adjustPCIfAtCallSite(pc));
-}
-
-void interpreterThrowInCaller(ExecState* exec, ReturnAddressPtr pc)
-{
- VM* vm = &exec->vm();
- NativeCallFrameTracer tracer(vm, exec);
-#if LLINT_SLOW_PATH_TRACING
- dataLog("Throwing exception ", vm->exception, ".\n");
-#endif
- fixupPCforExceptionIfNeeded(exec);
- genericThrow(
- vm, exec, vm->exception,
- exec->codeBlock()->bytecodeOffset(exec, pc));
-}
-
Instruction* returnToThrowForThrownException(ExecState* exec)
{
UNUSED_PARAM(exec);
return LLInt::exceptionInstructions();
}
-static void doThrow(ExecState* exec, Instruction* pc)
-{
- VM* vm = &exec->vm();
- NativeCallFrameTracer tracer(vm, exec);
- fixupPCforExceptionIfNeeded(exec);
- genericThrow(vm, exec, vm->exception, pc - exec->codeBlock()->instructions().begin());
-}
-
-Instruction* returnToThrow(ExecState* exec, Instruction* pc)
+Instruction* returnToThrow(ExecState* exec)
{
+ UNUSED_PARAM(exec);
#if LLINT_SLOW_PATH_TRACING
VM* vm = &exec->vm();
- dataLog("Throwing exception ", vm->exception, " (returnToThrow).\n");
+ dataLog("Throwing exception ", vm->exception(), " (returnToThrow).\n");
#endif
- doThrow(exec, pc);
return LLInt::exceptionInstructions();
}
-void* callToThrow(ExecState* exec, Instruction* pc)
+void* callToThrow(ExecState* exec)
{
+ UNUSED_PARAM(exec);
#if LLINT_SLOW_PATH_TRACING
VM* vm = &exec->vm();
- dataLog("Throwing exception ", vm->exception, " (callToThrow).\n");
+ dataLog("Throwing exception ", vm->exception(), " (callToThrow).\n");
#endif
- doThrow(exec, pc);
return LLInt::getCodePtr(llint_throw_during_call_trampoline);
}
} } // namespace JSC::LLInt
-
-#endif // ENABLE(LLINT)
diff --git a/Source/JavaScriptCore/llint/LLIntExceptions.h b/Source/JavaScriptCore/llint/LLIntExceptions.h
index 3baa3f4a5..bdeb5e4a7 100644
--- a/Source/JavaScriptCore/llint/LLIntExceptions.h
+++ b/Source/JavaScriptCore/llint/LLIntExceptions.h
@@ -26,11 +26,7 @@
#ifndef LLIntExceptions_h
#define LLIntExceptions_h
-#include <wtf/Platform.h>
#include <wtf/StdLibExtras.h>
-
-#if ENABLE(LLINT)
-
#include "MacroAssemblerCodeRef.h"
namespace JSC {
@@ -40,27 +36,18 @@ struct Instruction;
namespace LLInt {
-// Throw the currently active exception in the context of the caller's call frame.
-void interpreterThrowInCaller(ExecState* callerFrame, ReturnAddressPtr);
-
// Tells you where to jump to if you want to return-to-throw, after you've already
// set up all information needed to throw the exception.
Instruction* returnToThrowForThrownException(ExecState*);
-// Saves the current PC in the global data for safe-keeping, and gives you a PC
-// that you can tell the interpreter to go to, which when advanced between 1
-// and 9 slots will give you an "instruction" that threads to the interpreter's
-// exception handler. Note that if you give it the PC for exception handling,
-// it's smart enough to just return that PC without doing anything else; this
-// lets you thread exception handling through common helper functions used by
-// other helpers.
-Instruction* returnToThrow(ExecState*, Instruction*);
+// Gives you a PC that you can tell the interpreter to go to, which when advanced
+// between 1 and 9 slots will give you an "instruction" that threads to the
+// interpreter's exception handler.
+Instruction* returnToThrow(ExecState*);
// Use this when you're throwing to a call thunk.
-void* callToThrow(ExecState*, Instruction*);
+void* callToThrow(ExecState*);
} } // namespace JSC::LLInt
-#endif // ENABLE(LLINT)
-
#endif // LLIntExceptions_h
diff --git a/Source/JavaScriptCore/llint/LLIntOfflineAsmConfig.h b/Source/JavaScriptCore/llint/LLIntOfflineAsmConfig.h
index 9010757b4..51ada4fbf 100644
--- a/Source/JavaScriptCore/llint/LLIntOfflineAsmConfig.h
+++ b/Source/JavaScriptCore/llint/LLIntOfflineAsmConfig.h
@@ -29,30 +29,44 @@
#include "LLIntCommon.h"
#include <wtf/Assertions.h>
#include <wtf/InlineASM.h>
-#include <wtf/Platform.h>
-
-#if ENABLE(LLINT_C_LOOP)
+#if !ENABLE(JIT)
#define OFFLINE_ASM_C_LOOP 1
#define OFFLINE_ASM_X86 0
+#define OFFLINE_ASM_X86_WIN 0
#define OFFLINE_ASM_ARM 0
#define OFFLINE_ASM_ARMv7 0
#define OFFLINE_ASM_ARMv7_TRADITIONAL 0
+#define OFFLINE_ASM_ARM64 0
#define OFFLINE_ASM_X86_64 0
+#define OFFLINE_ASM_X86_64_WIN 0
+#define OFFLINE_ASM_ARMv7k 0
#define OFFLINE_ASM_ARMv7s 0
#define OFFLINE_ASM_MIPS 0
#define OFFLINE_ASM_SH4 0
-#else // !ENABLE(LLINT_C_LOOP)
+#else // ENABLE(JIT)
#define OFFLINE_ASM_C_LOOP 0
-#if CPU(X86)
+#if CPU(X86) && !COMPILER(MSVC)
#define OFFLINE_ASM_X86 1
#else
#define OFFLINE_ASM_X86 0
#endif
+#if CPU(X86) && COMPILER(MSVC)
+#define OFFLINE_ASM_X86_WIN 1
+#else
+#define OFFLINE_ASM_X86_WIN 0
+#endif
+
+#ifdef __ARM_ARCH_7K__
+#define OFFLINE_ASM_ARMv7k 1
+#else
+#define OFFLINE_ASM_ARMv7k 0
+#endif
+
#ifdef __ARM_ARCH_7S__
#define OFFLINE_ASM_ARMv7s 1
#else
@@ -78,12 +92,18 @@
#define OFFLINE_ASM_ARM 0
#endif
-#if CPU(X86_64)
+#if CPU(X86_64) && !COMPILER(MSVC)
#define OFFLINE_ASM_X86_64 1
#else
#define OFFLINE_ASM_X86_64 0
#endif
+#if CPU(X86_64) && COMPILER(MSVC)
+#define OFFLINE_ASM_X86_64_WIN 1
+#else
+#define OFFLINE_ASM_X86_64_WIN 0
+#endif
+
#if CPU(MIPS)
#define OFFLINE_ASM_MIPS 1
#else
@@ -96,18 +116,31 @@
#define OFFLINE_ASM_SH4 0
#endif
-#endif // !ENABLE(LLINT_C_LOOP)
+#if CPU(ARM64)
+#define OFFLINE_ASM_ARM64 1
+#else
+#define OFFLINE_ASM_ARM64 0
+#endif
-#if USE(JSVALUE64)
-#define OFFLINE_ASM_JSVALUE64 1
+#if CPU(MIPS)
+#ifdef WTF_MIPS_PIC
+#define S(x) #x
+#define SX(x) S(x)
+#define OFFLINE_ASM_CPLOAD(reg) \
+ ".set noreorder\n" \
+ ".cpload " SX(reg) "\n" \
+ ".set reorder\n"
#else
-#define OFFLINE_ASM_JSVALUE64 0
+#define OFFLINE_ASM_CPLOAD(reg)
+#endif
#endif
-#if USE(JSVALUE64) && OS(WINDOWS)
-#define OFFLINE_ASM_WIN64 1
+#endif // ENABLE(JIT)
+
+#if USE(JSVALUE64)
+#define OFFLINE_ASM_JSVALUE64 1
#else
-#define OFFLINE_ASM_WIN64 0
+#define OFFLINE_ASM_JSVALUE64 0
#endif
#if !ASSERT_DISABLED
@@ -122,41 +155,10 @@
#define OFFLINE_ASM_BIG_ENDIAN 0
#endif
-#if LLINT_OSR_TO_JIT
-#define OFFLINE_ASM_JIT_ENABLED 1
-#else
-#define OFFLINE_ASM_JIT_ENABLED 0
-#endif
-
#if LLINT_EXECUTION_TRACING
#define OFFLINE_ASM_EXECUTION_TRACING 1
#else
#define OFFLINE_ASM_EXECUTION_TRACING 0
#endif
-#if LLINT_ALWAYS_ALLOCATE_SLOW
-#define OFFLINE_ASM_ALWAYS_ALLOCATE_SLOW 1
-#else
-#define OFFLINE_ASM_ALWAYS_ALLOCATE_SLOW 0
-#endif
-
-#if ENABLE(VALUE_PROFILER)
-#define OFFLINE_ASM_VALUE_PROFILER 1
-#else
-#define OFFLINE_ASM_VALUE_PROFILER 0
-#endif
-
-#if CPU(MIPS)
-#ifdef WTF_MIPS_PIC
-#define S(x) #x
-#define SX(x) S(x)
-#define OFFLINE_ASM_CPLOAD(reg) \
- ".set noreorder\n" \
- ".cpload " SX(reg) "\n" \
- ".set reorder\n"
-#else
-#define OFFLINE_ASM_CPLOAD(reg)
-#endif
-#endif
-
#endif // LLIntOfflineAsmConfig_h
diff --git a/Source/JavaScriptCore/llint/LLIntOffsetsExtractor.cpp b/Source/JavaScriptCore/llint/LLIntOffsetsExtractor.cpp
index 0cd2f68a1..2b4e61986 100644
--- a/Source/JavaScriptCore/llint/LLIntOffsetsExtractor.cpp
+++ b/Source/JavaScriptCore/llint/LLIntOffsetsExtractor.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (C) 2012 Apple Inc. All rights reserved.
+ * Copyright (C) 2012, 2015 Apple Inc. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
@@ -27,30 +27,38 @@
#include "ArrayProfile.h"
#include "CodeBlock.h"
+#include "CommonSlowPaths.h"
+#include "Debugger.h"
+#include "DirectArguments.h"
+#include "Exception.h"
#include "Executable.h"
#include "Heap.h"
#include "Interpreter.h"
-#include "JITStubs.h"
#include "JSArray.h"
#include "JSCell.h"
#include "JSFunction.h"
#include "VM.h"
+#include "JSEnvironmentRecord.h"
#include "JSGlobalObject.h"
+#include "JSModuleRecord.h"
#include "JSObject.h"
-#include "JSPropertyNameIterator.h"
#include "JSStack.h"
#include "JSString.h"
#include "JSTypeInfo.h"
-#include "JSVariableObject.h"
#include "JumpTable.h"
#include "LLIntOfflineAsmConfig.h"
#include "MarkedSpace.h"
-
+#include "ProtoCallFrame.h"
#include "Structure.h"
#include "StructureChain.h"
+#include "TypeProfiler.h"
+#include "TypeProfilerLog.h"
+#include "VMEntryRecord.h"
#include "ValueProfile.h"
+#include "Watchdog.h"
#include <wtf/text/StringImpl.h>
+
namespace JSC {
#define OFFLINE_ASM_OFFSETOF(clazz, field) (static_cast<unsigned>(OBJECT_OFFSETOF(clazz, field)))
@@ -62,7 +70,6 @@ public:
const unsigned* LLIntOffsetsExtractor::dummy()
{
-#if ENABLE(LLINT)
// This is a file generated by offlineasm/generate_offsets_extractor.rb, and contains code
// to create a table of offsets, sizes, and a header identifying what combination of
// Platform.h macros we have set. We include it inside of a method on LLIntOffsetsExtractor
@@ -71,9 +78,6 @@ const unsigned* LLIntOffsetsExtractor::dummy()
// compiler to kindly step aside and yield to our best intentions.
#include "LLIntDesiredOffsets.h"
return extractorTable;
-#else
- return 0;
-#endif
}
} // namespace JSC
diff --git a/Source/JavaScriptCore/llint/LLIntOpcode.h b/Source/JavaScriptCore/llint/LLIntOpcode.h
index 3588f4ff3..9b26676c4 100644
--- a/Source/JavaScriptCore/llint/LLIntOpcode.h
+++ b/Source/JavaScriptCore/llint/LLIntOpcode.h
@@ -1,5 +1,5 @@
/*
- * Copyright (C) 2012 Apple Inc. All rights reserved.
+ * Copyright (C) 2012, 2013, 2014 Apple Inc. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
@@ -26,56 +26,26 @@
#ifndef LLIntOpcode_h
#define LLIntOpcode_h
-#include <wtf/Platform.h>
-
-#if ENABLE(LLINT)
-
-#if ENABLE(LLINT_C_LOOP)
+#if !ENABLE(JIT)
#define FOR_EACH_LLINT_NOJIT_NATIVE_HELPER(macro) \
- macro(getHostCallReturnValue, 1) \
- macro(ctiOpThrowNotCaught, 1)
+ FOR_EACH_CLOOP_BYTECODE_HELPER_ID(macro)
-#else // !ENABLE(LLINT_C_LOOP)
+#else // ENABLE(JIT)
#define FOR_EACH_LLINT_NOJIT_NATIVE_HELPER(macro) \
// Nothing to do here. Use the JIT impl instead.
-#endif // !ENABLE(LLINT_C_LOOP)
+#endif // !ENABLE(JIT)
#define FOR_EACH_LLINT_NATIVE_HELPER(macro) \
FOR_EACH_LLINT_NOJIT_NATIVE_HELPER(macro) \
\
- macro(llint_begin, 1) \
- \
- macro(llint_program_prologue, 1) \
- macro(llint_eval_prologue, 1) \
- macro(llint_function_for_call_prologue, 1) \
- macro(llint_function_for_construct_prologue, 1) \
- macro(llint_function_for_call_arity_check, 1) \
- macro(llint_function_for_construct_arity_check, 1) \
- macro(llint_generic_return_point, 1) \
- macro(llint_throw_from_slow_path_trampoline, 1) \
- macro(llint_throw_during_call_trampoline, 1) \
- \
- /* Native call trampolines */ \
- macro(llint_native_call_trampoline, 1) \
- macro(llint_native_construct_trampoline, 1) \
- \
- macro(llint_end, 1)
+ FOR_EACH_BYTECODE_HELPER_ID(macro)
-#if ENABLE(LLINT_C_LOOP)
-#define FOR_EACH_LLINT_OPCODE_EXTENSION(macro) FOR_EACH_LLINT_NATIVE_HELPER(macro)
-#else
-#define FOR_EACH_LLINT_OPCODE_EXTENSION(macro) // Nothing to add.
-#endif
-
-#else // !ENABLE(LLINT)
-#define FOR_EACH_LLINT_OPCODE_EXTENSION(macro) // Nothing to add.
-
-#endif // !ENABLE(LLINT)
+#define FOR_EACH_LLINT_OPCODE_EXTENSION(macro) FOR_EACH_LLINT_NATIVE_HELPER(macro)
#endif // LLIntOpcode_h
diff --git a/Source/JavaScriptCore/llint/LLIntPCRanges.h b/Source/JavaScriptCore/llint/LLIntPCRanges.h
new file mode 100644
index 000000000..fdb48598f
--- /dev/null
+++ b/Source/JavaScriptCore/llint/LLIntPCRanges.h
@@ -0,0 +1,54 @@
+/*
+ * Copyright (C) 2016 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef LLIntPCRanges_h
+#define LLIntPCRanges_h
+
+namespace JSC {
+
+namespace LLInt {
+
+// These are used just to denote where LLInt code begins and where it ends.
+extern "C" {
+ void llintPCRangeStart();
+ void llintPCRangeEnd();
+}
+
+ALWAYS_INLINE bool isLLIntPC(void* pc)
+{
+ uintptr_t pcAsInt = bitwise_cast<uintptr_t>(pc);
+ uintptr_t llintStart = bitwise_cast<uintptr_t>(llintPCRangeStart);
+ uintptr_t llintEnd = bitwise_cast<uintptr_t>(llintPCRangeEnd);
+ RELEASE_ASSERT(llintStart < llintEnd);
+ return llintStart <= pcAsInt && pcAsInt <= llintEnd;
+}
+
+#if ENABLE(JIT)
+static const GPRReg LLIntPC = GPRInfo::regT4;
+#endif
+
+} } // namespace JSC::LLInt
+
+#endif // LLIntPCRanges_h
diff --git a/Source/JavaScriptCore/llint/LLIntSlowPaths.cpp b/Source/JavaScriptCore/llint/LLIntSlowPaths.cpp
index 36a43f586..700af9cff 100644
--- a/Source/JavaScriptCore/llint/LLIntSlowPaths.cpp
+++ b/Source/JavaScriptCore/llint/LLIntSlowPaths.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (C) 2011, 2012, 2013 Apple Inc. All rights reserved.
+ * Copyright (C) 2011-2015 Apple Inc. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
@@ -26,30 +26,35 @@
#include "config.h"
#include "LLIntSlowPaths.h"
-#if ENABLE(LLINT)
-
-#include "Arguments.h"
#include "ArrayConstructor.h"
#include "CallFrame.h"
#include "CommonSlowPaths.h"
+#include "CommonSlowPathsExceptions.h"
+#include "Error.h"
+#include "ErrorHandlingScope.h"
+#include "Exception.h"
+#include "ExceptionFuzz.h"
#include "GetterSetter.h"
#include "HostCallReturnValue.h"
#include "Interpreter.h"
#include "JIT.h"
-#include "JITDriver.h"
-#include "JSActivation.h"
+#include "JITExceptions.h"
+#include "JSLexicalEnvironment.h"
+#include "JSCInlines.h"
#include "JSCJSValue.h"
+#include "JSGeneratorFunction.h"
#include "JSGlobalObjectFunctions.h"
-#include "JSNameScope.h"
-#include "JSPropertyNameIterator.h"
+#include "JSStackInlines.h"
#include "JSString.h"
#include "JSWithScope.h"
#include "LLIntCommon.h"
#include "LLIntExceptions.h"
+#include "LegacyProfiler.h"
#include "LowLevelInterpreter.h"
#include "ObjectConstructor.h"
-#include "Operations.h"
+#include "ProtoCallFrame.h"
#include "StructureRareDataInlines.h"
+#include "VMInlines.h"
#include <wtf/StringPrintStream.h>
namespace JSC { namespace LLInt {
@@ -80,17 +85,18 @@ namespace JSC { namespace LLInt {
return encodeResult(first, second); \
} while (false)
-#define LLINT_END_IMPL() LLINT_RETURN_TWO(pc, exec)
+#define LLINT_END_IMPL() LLINT_RETURN_TWO(pc, 0)
#define LLINT_THROW(exceptionToThrow) do { \
- vm.exception = (exceptionToThrow); \
- pc = returnToThrow(exec, pc); \
+ vm.throwException(exec, exceptionToThrow); \
+ pc = returnToThrow(exec); \
LLINT_END_IMPL(); \
} while (false)
#define LLINT_CHECK_EXCEPTION() do { \
- if (UNLIKELY(vm.exception)) { \
- pc = returnToThrow(exec, pc); \
+ doExceptionFuzzingIfEnabled(exec, "LLIntSlowPaths", pc); \
+ if (UNLIKELY(vm.exception())) { \
+ pc = returnToThrow(exec); \
LLINT_END_IMPL(); \
} \
} while (false)
@@ -117,7 +123,14 @@ namespace JSC { namespace LLInt {
LLINT_END_IMPL(); \
} while (false)
-#if ENABLE(VALUE_PROFILER)
+#define LLINT_RETURN_WITH_PC_ADJUSTMENT(value, pcAdjustment) do { \
+ JSValue __r_returnValue = (value); \
+ LLINT_CHECK_EXCEPTION(); \
+ LLINT_OP(1) = __r_returnValue; \
+ pc += (pcAdjustment); \
+ LLINT_END_IMPL(); \
+ } while (false)
+
#define LLINT_RETURN_PROFILED(opcode, value) do { \
JSValue __rp_returnValue = (value); \
LLINT_CHECK_EXCEPTION(); \
@@ -131,37 +144,35 @@ namespace JSC { namespace LLInt {
JSValue::encode(value); \
} while (false)
-#else // ENABLE(VALUE_PROFILER)
-#define LLINT_RETURN_PROFILED(opcode, value) LLINT_RETURN(value)
-
-#define LLINT_PROFILE_VALUE(opcode, value) do { } while (false)
-
-#endif // ENABLE(VALUE_PROFILER)
-
#define LLINT_CALL_END_IMPL(exec, callTarget) LLINT_RETURN_TWO((callTarget), (exec))
-#define LLINT_CALL_THROW(exec, pc, exceptionToThrow) do { \
+#define LLINT_CALL_THROW(exec, exceptionToThrow) do { \
ExecState* __ct_exec = (exec); \
- Instruction* __ct_pc = (pc); \
- vm.exception = (exceptionToThrow); \
- LLINT_CALL_END_IMPL(__ct_exec, callToThrow(__ct_exec, __ct_pc)); \
+ vm.throwException(__ct_exec, exceptionToThrow); \
+ LLINT_CALL_END_IMPL(0, callToThrow(__ct_exec)); \
} while (false)
-#define LLINT_CALL_CHECK_EXCEPTION(exec, pc) do { \
+#define LLINT_CALL_CHECK_EXCEPTION(exec, execCallee) do { \
ExecState* __cce_exec = (exec); \
- Instruction* __cce_pc = (pc); \
- if (UNLIKELY(vm.exception)) \
- LLINT_CALL_END_IMPL(__cce_exec, callToThrow(__cce_exec, __cce_pc)); \
+ ExecState* __cce_execCallee = (execCallee); \
+ doExceptionFuzzingIfEnabled(__cce_exec, "LLIntSlowPaths/call", nullptr); \
+ if (UNLIKELY(vm.exception())) \
+ LLINT_CALL_END_IMPL(0, callToThrow(__cce_execCallee)); \
} while (false)
-#define LLINT_CALL_RETURN(exec, pc, callTarget) do { \
+#define LLINT_CALL_RETURN(exec, execCallee, callTarget) do { \
ExecState* __cr_exec = (exec); \
- Instruction* __cr_pc = (pc); \
+ ExecState* __cr_execCallee = (execCallee); \
void* __cr_callTarget = (callTarget); \
- LLINT_CALL_CHECK_EXCEPTION(__cr_exec->callerFrame(), __cr_pc); \
- LLINT_CALL_END_IMPL(__cr_exec, __cr_callTarget); \
+ LLINT_CALL_CHECK_EXCEPTION(__cr_exec, __cr_execCallee); \
+ LLINT_CALL_END_IMPL(__cr_execCallee, __cr_callTarget); \
} while (false)
+#define LLINT_RETURN_CALLEE_FRAME(execCallee) do { \
+ ExecState* __rcf_exec = (execCallee); \
+ LLINT_RETURN_TWO(pc, __rcf_exec); \
+ } while (false)
+
extern "C" SlowPathReturnType llint_trace_operand(ExecState* exec, Instruction* pc, int fromWhere, int operand)
{
LLINT_BEGIN();
@@ -212,10 +223,10 @@ static void traceFunctionPrologue(ExecState* exec, const char* comment, CodeSpec
{
JSFunction* callee = jsCast<JSFunction*>(exec->callee());
FunctionExecutable* executable = callee->jsExecutable();
- CodeBlock* codeBlock = &executable->generatedBytecodeFor(kind);
- dataLogF("%p / %p: in %s of function %p, executable %p; numVars = %u, numParameters = %u, numCalleeRegisters = %u, caller = %p.\n",
+ CodeBlock* codeBlock = executable->codeBlockFor(kind);
+ dataLogF("%p / %p: in %s of function %p, executable %p; numVars = %u, numParameters = %u, numCalleeLocals = %u, caller = %p.\n",
codeBlock, exec, comment, callee, executable,
- codeBlock->m_numVars, codeBlock->numParameters(), codeBlock->m_numCalleeRegisters,
+ codeBlock->m_numVars, codeBlock->numParameters(), codeBlock->m_numCalleeLocals,
exec->callerFrame());
}
@@ -245,12 +256,15 @@ LLINT_SLOW_PATH_DECL(trace_arityCheck_for_construct)
LLINT_SLOW_PATH_DECL(trace)
{
- dataLogF("%p / %p: executing bc#%zu, %s, scope %p\n",
+ dataLogF("%p / %p: executing bc#%zu, %s, pc = %p\n",
exec->codeBlock(),
exec,
static_cast<intptr_t>(pc - exec->codeBlock()->instructions().begin()),
- opcodeNames[exec->vm().interpreter->getOpcodeID(pc[0].u.opcode)],
- exec->scope());
+ opcodeNames[exec->vm().interpreter->getOpcodeID(pc[0].u.opcode)], pc);
+ if (exec->vm().interpreter->getOpcodeID(pc[0].u.opcode) == op_enter) {
+ dataLogF("Frame will eventually return to %p\n", exec->returnPC().value());
+ *bitwise_cast<volatile char*>(exec->returnPC().value());
+ }
if (exec->vm().interpreter->getOpcodeID(pc[0].u.opcode) == op_ret) {
dataLogF("Will be returning to %p\n", exec->returnPC().value());
dataLogF("The new cfr will be %p\n", exec->callerFrame());
@@ -269,8 +283,10 @@ LLINT_SLOW_PATH_DECL(special_trace)
LLINT_END_IMPL();
}
+enum EntryKind { Prologue, ArityCheck };
+
#if ENABLE(JIT)
-inline bool shouldJIT(ExecState* exec)
+inline bool shouldJIT(ExecState* exec, CodeBlock*)
{
// You can modify this to turn off JITting without rebuilding the world.
return exec->vm().canUseJIT();
@@ -279,61 +295,77 @@ inline bool shouldJIT(ExecState* exec)
// Returns true if we should try to OSR.
inline bool jitCompileAndSetHeuristics(CodeBlock* codeBlock, ExecState* exec)
{
- codeBlock->updateAllValueProfilePredictions();
+ VM& vm = exec->vm();
+ DeferGCForAWhile deferGC(vm.heap); // My callers don't set top callframe, so we don't want to GC here at all.
+ codeBlock->updateAllValueProfilePredictions();
+
if (!codeBlock->checkIfJITThresholdReached()) {
-#if ENABLE(JIT_VERBOSE_OSR)
- dataLogF(" JIT threshold should be lifted.\n");
-#endif
+ if (Options::verboseOSR())
+ dataLogF(" JIT threshold should be lifted.\n");
return false;
}
-
- CodeBlock::JITCompilationResult result = codeBlock->jitCompile(exec);
- switch (result) {
- case CodeBlock::AlreadyCompiled:
-#if ENABLE(JIT_VERBOSE_OSR)
- dataLogF(" Code was already compiled.\n");
-#endif
+
+ switch (codeBlock->jitType()) {
+ case JITCode::BaselineJIT: {
+ if (Options::verboseOSR())
+ dataLogF(" Code was already compiled.\n");
codeBlock->jitSoon();
return true;
- case CodeBlock::CouldNotCompile:
-#if ENABLE(JIT_VERBOSE_OSR)
- dataLogF(" JIT compilation failed.\n");
-#endif
- codeBlock->dontJITAnytimeSoon();
+ }
+ case JITCode::InterpreterThunk: {
+ CompilationResult result = JIT::compile(&vm, codeBlock, JITCompilationCanFail);
+ switch (result) {
+ case CompilationFailed:
+ if (Options::verboseOSR())
+ dataLogF(" JIT compilation failed.\n");
+ codeBlock->dontJITAnytimeSoon();
+ return false;
+ case CompilationSuccessful:
+ if (Options::verboseOSR())
+ dataLogF(" JIT compilation successful.\n");
+ codeBlock->ownerScriptExecutable()->installCode(codeBlock);
+ codeBlock->jitSoon();
+ return true;
+ default:
+ RELEASE_ASSERT_NOT_REACHED();
+ return false;
+ }
+ }
+ default:
+ dataLog("Unexpected code block in LLInt: ", *codeBlock, "\n");
+ RELEASE_ASSERT_NOT_REACHED();
return false;
- case CodeBlock::CompiledSuccessfully:
-#if ENABLE(JIT_VERBOSE_OSR)
- dataLogF(" JIT compilation successful.\n");
-#endif
- codeBlock->jitSoon();
- return true;
}
- RELEASE_ASSERT_NOT_REACHED();
- return false;
}
-enum EntryKind { Prologue, ArityCheck };
static SlowPathReturnType entryOSR(ExecState* exec, Instruction*, CodeBlock* codeBlock, const char *name, EntryKind kind)
{
-#if ENABLE(JIT_VERBOSE_OSR)
- dataLog(*codeBlock, ": Entered ", name, " with executeCounter = ", codeBlock->llintExecuteCounter(), "\n");
-#else
- UNUSED_PARAM(name);
-#endif
+ if (Options::verboseOSR()) {
+ dataLog(
+ *codeBlock, ": Entered ", name, " with executeCounter = ",
+ codeBlock->llintExecuteCounter(), "\n");
+ }
- if (!shouldJIT(exec)) {
+ if (!shouldJIT(exec, codeBlock)) {
codeBlock->dontJITAnytimeSoon();
- LLINT_RETURN_TWO(0, exec);
+ LLINT_RETURN_TWO(0, 0);
}
if (!jitCompileAndSetHeuristics(codeBlock, exec))
- LLINT_RETURN_TWO(0, exec);
+ LLINT_RETURN_TWO(0, 0);
if (kind == Prologue)
- LLINT_RETURN_TWO(codeBlock->getJITCode().executableAddressAtOffset(0), exec);
+ LLINT_RETURN_TWO(codeBlock->jitCode()->executableAddress(), 0);
ASSERT(kind == ArityCheck);
- LLINT_RETURN_TWO(codeBlock->getJITCodeWithArityCheck().executableAddress(), exec);
+ LLINT_RETURN_TWO(codeBlock->jitCode()->addressForCall(MustCheckArity).executableAddress(), 0);
}
+#else // ENABLE(JIT)
+static SlowPathReturnType entryOSR(ExecState* exec, Instruction*, CodeBlock* codeBlock, const char*, EntryKind)
+{
+ codeBlock->dontJITAnytimeSoon();
+ LLINT_RETURN_TWO(0, exec);
+}
+#endif // ENABLE(JIT)
LLINT_SLOW_PATH_DECL(entry_osr)
{
@@ -342,41 +374,44 @@ LLINT_SLOW_PATH_DECL(entry_osr)
LLINT_SLOW_PATH_DECL(entry_osr_function_for_call)
{
- return entryOSR(exec, pc, &jsCast<JSFunction*>(exec->callee())->jsExecutable()->generatedBytecodeFor(CodeForCall), "entry_osr_function_for_call", Prologue);
+ return entryOSR(exec, pc, jsCast<JSFunction*>(exec->callee())->jsExecutable()->codeBlockForCall(), "entry_osr_function_for_call", Prologue);
}
LLINT_SLOW_PATH_DECL(entry_osr_function_for_construct)
{
- return entryOSR(exec, pc, &jsCast<JSFunction*>(exec->callee())->jsExecutable()->generatedBytecodeFor(CodeForConstruct), "entry_osr_function_for_construct", Prologue);
+ return entryOSR(exec, pc, jsCast<JSFunction*>(exec->callee())->jsExecutable()->codeBlockForConstruct(), "entry_osr_function_for_construct", Prologue);
}
LLINT_SLOW_PATH_DECL(entry_osr_function_for_call_arityCheck)
{
- return entryOSR(exec, pc, &jsCast<JSFunction*>(exec->callee())->jsExecutable()->generatedBytecodeFor(CodeForCall), "entry_osr_function_for_call_arityCheck", ArityCheck);
+ return entryOSR(exec, pc, jsCast<JSFunction*>(exec->callee())->jsExecutable()->codeBlockForCall(), "entry_osr_function_for_call_arityCheck", ArityCheck);
}
LLINT_SLOW_PATH_DECL(entry_osr_function_for_construct_arityCheck)
{
- return entryOSR(exec, pc, &jsCast<JSFunction*>(exec->callee())->jsExecutable()->generatedBytecodeFor(CodeForConstruct), "entry_osr_function_for_construct_arityCheck", ArityCheck);
+ return entryOSR(exec, pc, jsCast<JSFunction*>(exec->callee())->jsExecutable()->codeBlockForConstruct(), "entry_osr_function_for_construct_arityCheck", ArityCheck);
}
LLINT_SLOW_PATH_DECL(loop_osr)
{
CodeBlock* codeBlock = exec->codeBlock();
+
+#if ENABLE(JIT)
+ if (Options::verboseOSR()) {
+ dataLog(
+ *codeBlock, ": Entered loop_osr with executeCounter = ",
+ codeBlock->llintExecuteCounter(), "\n");
+ }
-#if ENABLE(JIT_VERBOSE_OSR)
- dataLog(*codeBlock, ": Entered loop_osr with executeCounter = ", codeBlock->llintExecuteCounter(), "\n");
-#endif
-
- if (!shouldJIT(exec)) {
+ if (!shouldJIT(exec, codeBlock)) {
codeBlock->dontJITAnytimeSoon();
- LLINT_RETURN_TWO(0, exec);
+ LLINT_RETURN_TWO(0, 0);
}
if (!jitCompileAndSetHeuristics(codeBlock, exec))
- LLINT_RETURN_TWO(0, exec);
+ LLINT_RETURN_TWO(0, 0);
- ASSERT(codeBlock->getJITType() == JITCode::BaselineJIT);
+ ASSERT(codeBlock->jitType() == JITCode::BaselineJIT);
Vector<BytecodeAndMachineOffset> map;
codeBlock->jitCodeMap()->decode(map);
@@ -384,27 +419,38 @@ LLINT_SLOW_PATH_DECL(loop_osr)
ASSERT(mapping);
ASSERT(mapping->m_bytecodeIndex == static_cast<unsigned>(pc - codeBlock->instructions().begin()));
- void* jumpTarget = codeBlock->getJITCode().executableAddressAtOffset(mapping->m_machineCodeOffset);
+ void* jumpTarget = codeBlock->jitCode()->executableAddressAtOffset(mapping->m_machineCodeOffset);
ASSERT(jumpTarget);
- LLINT_RETURN_TWO(jumpTarget, exec);
+ LLINT_RETURN_TWO(jumpTarget, exec->topOfFrame());
+#else // ENABLE(JIT)
+ UNUSED_PARAM(pc);
+ codeBlock->dontJITAnytimeSoon();
+ LLINT_RETURN_TWO(0, 0);
+#endif // ENABLE(JIT)
}
LLINT_SLOW_PATH_DECL(replace)
{
CodeBlock* codeBlock = exec->codeBlock();
+
+#if ENABLE(JIT)
+ if (Options::verboseOSR()) {
+ dataLog(
+ *codeBlock, ": Entered replace with executeCounter = ",
+ codeBlock->llintExecuteCounter(), "\n");
+ }
-#if ENABLE(JIT_VERBOSE_OSR)
- dataLog(*codeBlock, ": Entered replace with executeCounter = ", codeBlock->llintExecuteCounter(), "\n");
-#endif
-
- if (shouldJIT(exec))
+ if (shouldJIT(exec, codeBlock))
jitCompileAndSetHeuristics(codeBlock, exec);
else
codeBlock->dontJITAnytimeSoon();
LLINT_END_IMPL();
-}
+#else // ENABLE(JIT)
+ codeBlock->dontJITAnytimeSoon();
+ LLINT_END_IMPL();
#endif // ENABLE(JIT)
+}
LLINT_SLOW_PATH_DECL(stack_check)
{
@@ -412,95 +458,34 @@ LLINT_SLOW_PATH_DECL(stack_check)
#if LLINT_SLOW_PATH_TRACING
dataLogF("Checking stack height with exec = %p.\n", exec);
dataLogF("CodeBlock = %p.\n", exec->codeBlock());
- dataLogF("Num callee registers = %u.\n", exec->codeBlock()->m_numCalleeRegisters);
+ dataLogF("Num callee registers = %u.\n", exec->codeBlock()->m_numCalleeLocals);
dataLogF("Num vars = %u.\n", exec->codeBlock()->m_numVars);
- dataLogF("Current end is at %p.\n", exec->vm().interpreter->stack().end());
-#endif
- ASSERT(&exec->registers()[exec->codeBlock()->m_numCalleeRegisters] > exec->vm().interpreter->stack().end());
- if (UNLIKELY(!vm.interpreter->stack().grow(&exec->registers()[exec->codeBlock()->m_numCalleeRegisters]))) {
- ReturnAddressPtr returnPC = exec->returnPC();
- exec = exec->callerFrame();
- vm.exception = createStackOverflowError(exec);
- interpreterThrowInCaller(exec, returnPC);
- pc = returnToThrowForThrownException(exec);
- }
- LLINT_END_IMPL();
-}
-LLINT_SLOW_PATH_DECL(slow_path_call_arityCheck)
-{
- LLINT_BEGIN();
- ExecState* newExec = CommonSlowPaths::arityCheckFor(exec, &vm.interpreter->stack(), CodeForCall);
- if (!newExec) {
- ReturnAddressPtr returnPC = exec->returnPC();
- exec = exec->callerFrame();
- vm.exception = createStackOverflowError(exec);
- interpreterThrowInCaller(exec, returnPC);
- LLINT_RETURN_TWO(bitwise_cast<void*>(static_cast<uintptr_t>(1)), exec);
- }
- LLINT_RETURN_TWO(0, newExec);
-}
-
-LLINT_SLOW_PATH_DECL(slow_path_construct_arityCheck)
-{
- LLINT_BEGIN();
- ExecState* newExec = CommonSlowPaths::arityCheckFor(exec, &vm.interpreter->stack(), CodeForConstruct);
- if (!newExec) {
- ReturnAddressPtr returnPC = exec->returnPC();
- exec = exec->callerFrame();
- vm.exception = createStackOverflowError(exec);
- interpreterThrowInCaller(exec, returnPC);
- LLINT_RETURN_TWO(bitwise_cast<void*>(static_cast<uintptr_t>(1)), exec);
- }
- LLINT_RETURN_TWO(0, newExec);
-}
-
-LLINT_SLOW_PATH_DECL(slow_path_create_activation)
-{
- LLINT_BEGIN();
-#if LLINT_SLOW_PATH_TRACING
- dataLogF("Creating an activation, exec = %p!\n", exec);
+#if ENABLE(JIT)
+ dataLogF("Current end is at %p.\n", exec->vm().stackLimit());
+#else
+ dataLogF("Current end is at %p.\n", exec->vm().jsStackLimit());
#endif
- JSActivation* activation = JSActivation::create(vm, exec, exec->codeBlock());
- exec->setScope(activation);
- LLINT_RETURN(JSValue(activation));
-}
-LLINT_SLOW_PATH_DECL(slow_path_create_arguments)
-{
- LLINT_BEGIN();
- JSValue arguments = JSValue(Arguments::create(vm, exec));
- LLINT_CHECK_EXCEPTION();
- exec->uncheckedR(pc[1].u.operand) = arguments;
- exec->uncheckedR(unmodifiedArgumentsRegister(pc[1].u.operand)) = arguments;
- LLINT_END();
-}
-
-LLINT_SLOW_PATH_DECL(slow_path_create_this)
-{
- LLINT_BEGIN();
- JSFunction* constructor = jsCast<JSFunction*>(LLINT_OP(2).jsValue().asCell());
-
-#if !ASSERT_DISABLED
- ConstructData constructData;
- ASSERT(constructor->methodTable()->getConstructData(constructor, constructData) == ConstructTypeJS);
#endif
-
- size_t inlineCapacity = pc[3].u.operand;
- Structure* structure = constructor->allocationProfile(exec, inlineCapacity)->structure();
- LLINT_RETURN(constructEmptyObject(exec, structure));
-}
-
-LLINT_SLOW_PATH_DECL(slow_path_convert_this)
-{
- LLINT_BEGIN();
- JSValue v1 = LLINT_OP(1).jsValue();
- ASSERT(v1.isPrimitive());
-#if ENABLE(VALUE_PROFILER)
- pc[OPCODE_LENGTH(op_convert_this) - 1].u.profile->m_buckets[0] =
- JSValue::encode(v1.structureOrUndefined());
+ // If the stack check succeeds and we don't need to throw the error, then
+ // we'll return 0 instead. The prologue will check for a non-zero value
+ // when determining whether to set the callFrame or not.
+
+ // For JIT enabled builds which uses the C stack, the stack is not growable.
+ // Hence, if we get here, then we know a stack overflow is imminent. So, just
+ // throw the StackOverflowError unconditionally.
+#if !ENABLE(JIT)
+ ASSERT(!vm.interpreter->stack().containsAddress(exec->topOfFrame()));
+ if (LIKELY(vm.interpreter->stack().ensureCapacityFor(exec->topOfFrame())))
+ LLINT_RETURN_TWO(pc, 0);
#endif
- LLINT_RETURN(v1.toThisObject(exec));
+
+ vm.topCallFrame = exec;
+ ErrorHandlingScope errorScope(vm);
+ vm.throwException(exec, createStackOverflowError(exec));
+ pc = returnToThrow(exec);
+ LLINT_RETURN_TWO(pc, exec);
}
LLINT_SLOW_PATH_DECL(slow_path_new_object)
@@ -512,7 +497,7 @@ LLINT_SLOW_PATH_DECL(slow_path_new_object)
LLINT_SLOW_PATH_DECL(slow_path_new_array)
{
LLINT_BEGIN();
- LLINT_RETURN(constructArray(exec, pc[4].u.arrayAllocationProfile, bitwise_cast<JSValue*>(&LLINT_OP(2)), pc[3].u.operand));
+ LLINT_RETURN(constructArrayNegativeIndexed(exec, pc[4].u.arrayAllocationProfile, bitwise_cast<JSValue*>(&LLINT_OP(2)), pc[3].u.operand));
}
LLINT_SLOW_PATH_DECL(slow_path_new_array_with_size)
@@ -533,205 +518,7 @@ LLINT_SLOW_PATH_DECL(slow_path_new_regexp)
RegExp* regExp = exec->codeBlock()->regexp(pc[2].u.operand);
if (!regExp->isValid())
LLINT_THROW(createSyntaxError(exec, "Invalid flag supplied to RegExp constructor."));
- LLINT_RETURN(RegExpObject::create(vm, exec->lexicalGlobalObject(), exec->lexicalGlobalObject()->regExpStructure(), regExp));
-}
-
-LLINT_SLOW_PATH_DECL(slow_path_not)
-{
- LLINT_BEGIN();
- LLINT_RETURN(jsBoolean(!LLINT_OP_C(2).jsValue().toBoolean(exec)));
-}
-
-LLINT_SLOW_PATH_DECL(slow_path_eq)
-{
- LLINT_BEGIN();
- LLINT_RETURN(jsBoolean(JSValue::equal(exec, LLINT_OP_C(2).jsValue(), LLINT_OP_C(3).jsValue())));
-}
-
-LLINT_SLOW_PATH_DECL(slow_path_neq)
-{
- LLINT_BEGIN();
- LLINT_RETURN(jsBoolean(!JSValue::equal(exec, LLINT_OP_C(2).jsValue(), LLINT_OP_C(3).jsValue())));
-}
-
-LLINT_SLOW_PATH_DECL(slow_path_stricteq)
-{
- LLINT_BEGIN();
- LLINT_RETURN(jsBoolean(JSValue::strictEqual(exec, LLINT_OP_C(2).jsValue(), LLINT_OP_C(3).jsValue())));
-}
-
-LLINT_SLOW_PATH_DECL(slow_path_nstricteq)
-{
- LLINT_BEGIN();
- LLINT_RETURN(jsBoolean(!JSValue::strictEqual(exec, LLINT_OP_C(2).jsValue(), LLINT_OP_C(3).jsValue())));
-}
-
-LLINT_SLOW_PATH_DECL(slow_path_less)
-{
- LLINT_BEGIN();
- LLINT_RETURN(jsBoolean(jsLess<true>(exec, LLINT_OP_C(2).jsValue(), LLINT_OP_C(3).jsValue())));
-}
-
-LLINT_SLOW_PATH_DECL(slow_path_lesseq)
-{
- LLINT_BEGIN();
- LLINT_RETURN(jsBoolean(jsLessEq<true>(exec, LLINT_OP_C(2).jsValue(), LLINT_OP_C(3).jsValue())));
-}
-
-LLINT_SLOW_PATH_DECL(slow_path_greater)
-{
- LLINT_BEGIN();
- LLINT_RETURN(jsBoolean(jsLess<false>(exec, LLINT_OP_C(3).jsValue(), LLINT_OP_C(2).jsValue())));
-}
-
-LLINT_SLOW_PATH_DECL(slow_path_greatereq)
-{
- LLINT_BEGIN();
- LLINT_RETURN(jsBoolean(jsLessEq<false>(exec, LLINT_OP_C(3).jsValue(), LLINT_OP_C(2).jsValue())));
-}
-
-LLINT_SLOW_PATH_DECL(slow_path_pre_inc)
-{
- LLINT_BEGIN();
- LLINT_RETURN(jsNumber(LLINT_OP(1).jsValue().toNumber(exec) + 1));
-}
-
-LLINT_SLOW_PATH_DECL(slow_path_pre_dec)
-{
- LLINT_BEGIN();
- LLINT_RETURN(jsNumber(LLINT_OP(1).jsValue().toNumber(exec) - 1));
-}
-
-LLINT_SLOW_PATH_DECL(slow_path_to_number)
-{
- LLINT_BEGIN();
- LLINT_RETURN(jsNumber(LLINT_OP_C(2).jsValue().toNumber(exec)));
-}
-
-LLINT_SLOW_PATH_DECL(slow_path_negate)
-{
- LLINT_BEGIN();
- LLINT_RETURN(jsNumber(-LLINT_OP_C(2).jsValue().toNumber(exec)));
-}
-
-LLINT_SLOW_PATH_DECL(slow_path_add)
-{
- LLINT_BEGIN();
- JSValue v1 = LLINT_OP_C(2).jsValue();
- JSValue v2 = LLINT_OP_C(3).jsValue();
-
-#if LLINT_SLOW_PATH_TRACING
- dataLog("Trying to add ", v1, " to ", v2, ".\n");
-#endif
-
- if (v1.isString() && !v2.isObject())
- LLINT_RETURN(jsString(exec, asString(v1), v2.toString(exec)));
-
- if (v1.isNumber() && v2.isNumber())
- LLINT_RETURN(jsNumber(v1.asNumber() + v2.asNumber()));
-
- LLINT_RETURN(jsAddSlowCase(exec, v1, v2));
-}
-
-// The following arithmetic and bitwise operations need to be sure to run
-// toNumber() on their operands in order. (A call to toNumber() is idempotent
-// if an exception is already set on the ExecState.)
-
-LLINT_SLOW_PATH_DECL(slow_path_mul)
-{
- LLINT_BEGIN();
- double a = LLINT_OP_C(2).jsValue().toNumber(exec);
- double b = LLINT_OP_C(3).jsValue().toNumber(exec);
- LLINT_RETURN(jsNumber(a * b));
-}
-
-LLINT_SLOW_PATH_DECL(slow_path_sub)
-{
- LLINT_BEGIN();
- double a = LLINT_OP_C(2).jsValue().toNumber(exec);
- double b = LLINT_OP_C(3).jsValue().toNumber(exec);
- LLINT_RETURN(jsNumber(a - b));
-}
-
-LLINT_SLOW_PATH_DECL(slow_path_div)
-{
- LLINT_BEGIN();
- double a = LLINT_OP_C(2).jsValue().toNumber(exec);
- double b = LLINT_OP_C(3).jsValue().toNumber(exec);
- LLINT_RETURN(jsNumber(a / b));
-}
-
-LLINT_SLOW_PATH_DECL(slow_path_mod)
-{
- LLINT_BEGIN();
- double a = LLINT_OP_C(2).jsValue().toNumber(exec);
- double b = LLINT_OP_C(3).jsValue().toNumber(exec);
- LLINT_RETURN(jsNumber(fmod(a, b)));
-}
-
-LLINT_SLOW_PATH_DECL(slow_path_lshift)
-{
- LLINT_BEGIN();
- int32_t a = LLINT_OP_C(2).jsValue().toInt32(exec);
- uint32_t b = LLINT_OP_C(3).jsValue().toUInt32(exec);
- LLINT_RETURN(jsNumber(a << (b & 31)));
-}
-
-LLINT_SLOW_PATH_DECL(slow_path_rshift)
-{
- LLINT_BEGIN();
- int32_t a = LLINT_OP_C(2).jsValue().toInt32(exec);
- uint32_t b = LLINT_OP_C(3).jsValue().toUInt32(exec);
- LLINT_RETURN(jsNumber(a >> (b & 31)));
-}
-
-LLINT_SLOW_PATH_DECL(slow_path_urshift)
-{
- LLINT_BEGIN();
- uint32_t a = LLINT_OP_C(2).jsValue().toUInt32(exec);
- uint32_t b = LLINT_OP_C(3).jsValue().toUInt32(exec);
- LLINT_RETURN(jsNumber(a >> (b & 31)));
-}
-
-LLINT_SLOW_PATH_DECL(slow_path_bitand)
-{
- LLINT_BEGIN();
- int32_t a = LLINT_OP_C(2).jsValue().toInt32(exec);
- int32_t b = LLINT_OP_C(3).jsValue().toInt32(exec);
- LLINT_RETURN(jsNumber(a & b));
-}
-
-LLINT_SLOW_PATH_DECL(slow_path_bitor)
-{
- LLINT_BEGIN();
- int32_t a = LLINT_OP_C(2).jsValue().toInt32(exec);
- int32_t b = LLINT_OP_C(3).jsValue().toInt32(exec);
- LLINT_RETURN(jsNumber(a | b));
-}
-
-LLINT_SLOW_PATH_DECL(slow_path_bitxor)
-{
- LLINT_BEGIN();
- int32_t a = LLINT_OP_C(2).jsValue().toInt32(exec);
- int32_t b = LLINT_OP_C(3).jsValue().toInt32(exec);
- LLINT_RETURN(jsNumber(a ^ b));
-}
-
-LLINT_SLOW_PATH_DECL(slow_path_check_has_instance)
-{
- LLINT_BEGIN();
-
- JSValue value = LLINT_OP_C(2).jsValue();
- JSValue baseVal = LLINT_OP_C(3).jsValue();
- if (baseVal.isObject()) {
- JSObject* baseObject = asObject(baseVal);
- ASSERT(!baseObject->structure()->typeInfo().implementsDefaultHasInstance());
- if (baseObject->structure()->typeInfo().implementsHasInstance()) {
- pc += pc[4].u.operand;
- LLINT_RETURN(jsBoolean(baseObject->methodTable()->customHasInstance(baseObject, exec, value)));
- }
- }
- LLINT_THROW(createInvalidParameterError(exec, "instanceof", baseVal));
+ LLINT_RETURN(RegExpObject::create(vm, exec->lexicalGlobalObject()->regExpStructure(), regExp));
}
LLINT_SLOW_PATH_DECL(slow_path_instanceof)
@@ -743,158 +530,28 @@ LLINT_SLOW_PATH_DECL(slow_path_instanceof)
LLINT_RETURN(jsBoolean(JSObject::defaultHasInstance(exec, value, proto)));
}
-LLINT_SLOW_PATH_DECL(slow_path_typeof)
+LLINT_SLOW_PATH_DECL(slow_path_instanceof_custom)
{
LLINT_BEGIN();
- LLINT_RETURN(jsTypeStringForValue(exec, LLINT_OP_C(2).jsValue()));
-}
-
-LLINT_SLOW_PATH_DECL(slow_path_is_object)
-{
- LLINT_BEGIN();
- LLINT_RETURN(jsBoolean(jsIsObjectType(exec, LLINT_OP_C(2).jsValue())));
-}
-
-LLINT_SLOW_PATH_DECL(slow_path_is_function)
-{
- LLINT_BEGIN();
- LLINT_RETURN(jsBoolean(jsIsFunctionType(LLINT_OP_C(2).jsValue())));
-}
-
-LLINT_SLOW_PATH_DECL(slow_path_in)
-{
- LLINT_BEGIN();
- LLINT_RETURN(jsBoolean(CommonSlowPaths::opIn(exec, LLINT_OP_C(2).jsValue(), LLINT_OP_C(3).jsValue())));
-}
-
-LLINT_SLOW_PATH_DECL(slow_path_resolve)
-{
- LLINT_BEGIN();
- Identifier ident = exec->codeBlock()->identifier(pc[2].u.operand);
- ResolveOperations* operations = pc[3].u.resolveOperations;
- JSValue result = JSScope::resolve(exec, ident, operations);
- ASSERT(operations->size());
- if (operations->isEmpty())
- LLINT_RETURN_PROFILED(op_resolve, result);
-
- switch (operations->data()[0].m_operation) {
- case ResolveOperation::GetAndReturnGlobalProperty:
- pc[0].u.opcode = LLInt::getOpcode(llint_op_resolve_global_property);
- break;
-
- case ResolveOperation::GetAndReturnGlobalVar:
- pc[0].u.opcode = LLInt::getOpcode(llint_op_resolve_global_var);
- break;
-
- case ResolveOperation::SkipTopScopeNode:
- pc[0].u.opcode = LLInt::getOpcode(llint_op_resolve_scoped_var_with_top_scope_check);
- break;
-
- case ResolveOperation::SkipScopes:
- if (operations->data()[0].m_scopesToSkip)
- pc[0].u.opcode = LLInt::getOpcode(llint_op_resolve_scoped_var);
- else
- pc[0].u.opcode = LLInt::getOpcode(llint_op_resolve_scoped_var_on_top_scope);
- break;
-
- default:
- break;
- }
- LLINT_RETURN_PROFILED(op_resolve, result);
-}
-
-LLINT_SLOW_PATH_DECL(slow_path_put_to_base)
-{
- LLINT_BEGIN();
- PutToBaseOperation* operation = pc[4].u.putToBaseOperation;
- JSScope::resolvePut(exec, LLINT_OP_C(1).jsValue(), exec->codeBlock()->identifier(pc[2].u.operand), LLINT_OP_C(3).jsValue(), operation);
- switch (operation->m_kind) {
- case PutToBaseOperation::VariablePut:
- pc[0].u.opcode = LLInt::getOpcode(llint_op_put_to_base_variable);
- break;
-
- default:
- break;
- }
- LLINT_END();
-}
-LLINT_SLOW_PATH_DECL(slow_path_resolve_base)
-{
- LLINT_BEGIN();
- Identifier& ident = exec->codeBlock()->identifier(pc[2].u.operand);
- ResolveOperations* operations = pc[4].u.resolveOperations;
- JSValue result;
- if (pc[3].u.operand) {
- result = JSScope::resolveBase(exec, ident, true, operations, pc[5].u.putToBaseOperation);
- if (!result)
- LLINT_THROW(vm.exception);
- } else
- result = JSScope::resolveBase(exec, ident, false, operations, pc[5].u.putToBaseOperation);
-
- ASSERT(operations->size());
- if (operations->isEmpty()) {
- LLINT_PROFILE_VALUE(op_resolve_base, result);
- LLINT_RETURN(result);
- }
-
- switch (operations->data()[0].m_operation) {
- case ResolveOperation::ReturnGlobalObjectAsBase:
- pc[0].u.opcode = LLInt::getOpcode(llint_op_resolve_base_to_global);
- break;
-
- case ResolveOperation::SkipTopScopeNode:
- pc[0].u.opcode = LLInt::getOpcode(llint_op_resolve_base_to_scope_with_top_scope_check);
- break;
+ JSValue value = LLINT_OP_C(2).jsValue();
+ JSValue constructor = LLINT_OP_C(3).jsValue();
+ JSValue hasInstanceValue = LLINT_OP_C(4).jsValue();
- case ResolveOperation::SkipScopes:
- pc[0].u.opcode = LLInt::getOpcode(llint_op_resolve_base_to_scope);
- break;
+ ASSERT(constructor.isObject());
+ ASSERT(hasInstanceValue != exec->lexicalGlobalObject()->functionProtoHasInstanceSymbolFunction() || !constructor.getObject()->structure()->typeInfo().implementsDefaultHasInstance());
- default:
- break;
- }
- LLINT_PROFILE_VALUE(op_resolve_base, result);
+ JSValue result = jsBoolean(constructor.getObject()->hasInstance(exec, value, hasInstanceValue));
LLINT_RETURN(result);
}
-LLINT_SLOW_PATH_DECL(slow_path_resolve_with_base)
-{
- LLINT_BEGIN();
- ResolveOperations* operations = pc[4].u.resolveOperations;
- JSValue result = JSScope::resolveWithBase(exec, exec->codeBlock()->identifier(pc[3].u.operand), &LLINT_OP(1), operations, pc[5].u.putToBaseOperation);
- LLINT_CHECK_EXCEPTION();
- LLINT_OP(2) = result;
- LLINT_PROFILE_VALUE(op_resolve_with_base, result);
- LLINT_END();
-}
-
-LLINT_SLOW_PATH_DECL(slow_path_resolve_with_this)
-{
- LLINT_BEGIN();
- ResolveOperations* operations = pc[4].u.resolveOperations;
- JSValue result = JSScope::resolveWithThis(exec, exec->codeBlock()->identifier(pc[3].u.operand), &LLINT_OP(1), operations);
- LLINT_CHECK_EXCEPTION();
- LLINT_OP(2) = result;
- LLINT_PROFILE_VALUE(op_resolve_with_this, result);
- LLINT_END();
-}
-
-LLINT_SLOW_PATH_DECL(slow_path_init_global_const_check)
-{
- LLINT_BEGIN();
- CodeBlock* codeBlock = exec->codeBlock();
- symbolTablePut(codeBlock->globalObject(), exec, codeBlock->identifier(pc[4].u.operand), LLINT_OP_C(2).jsValue(), true);
- LLINT_END();
-}
-
LLINT_SLOW_PATH_DECL(slow_path_get_by_id)
{
LLINT_BEGIN();
CodeBlock* codeBlock = exec->codeBlock();
- Identifier& ident = codeBlock->identifier(pc[3].u.operand);
+ const Identifier& ident = codeBlock->identifier(pc[3].u.operand);
JSValue baseValue = LLINT_OP_C(2).jsValue();
- PropertySlot slot(baseValue);
+ PropertySlot slot(baseValue, PropertySlot::PropertySlot::InternalMethodType::Get);
JSValue result = baseValue.get(exec, ident, slot);
LLINT_CHECK_EXCEPTION();
@@ -904,39 +561,38 @@ LLINT_SLOW_PATH_DECL(slow_path_get_by_id)
&& baseValue.isCell()
&& slot.isCacheable()
&& slot.slotBase() == baseValue
- && slot.cachedPropertyType() == PropertySlot::Value) {
+ && slot.isCacheableValue()) {
JSCell* baseCell = baseValue.asCell();
Structure* structure = baseCell->structure();
+ // Start out by clearing out the old cache.
+ pc[0].u.opcode = LLInt::getOpcode(op_get_by_id);
+ pc[4].u.pointer = nullptr; // old structure
+ pc[5].u.pointer = nullptr; // offset
+
if (!structure->isUncacheableDictionary()
- && !structure->typeInfo().prohibitsPropertyCaching()) {
- pc[4].u.structure.set(
- vm, codeBlock->ownerExecutable(), structure);
- if (isInlineOffset(slot.cachedOffset())) {
- pc[0].u.opcode = LLInt::getOpcode(llint_op_get_by_id);
- pc[5].u.operand = offsetInInlineStorage(slot.cachedOffset()) * sizeof(JSValue) + JSObject::offsetOfInlineStorage();
- } else {
- pc[0].u.opcode = LLInt::getOpcode(llint_op_get_by_id_out_of_line);
- pc[5].u.operand = offsetInButterfly(slot.cachedOffset()) * sizeof(JSValue);
- }
+ && !structure->typeInfo().prohibitsPropertyCaching()
+ && !structure->typeInfo().newImpurePropertyFiresWatchpoints()) {
+ vm.heap.writeBarrier(codeBlock);
+
+ ConcurrentJITLocker locker(codeBlock->m_lock);
+
+ pc[4].u.structureID = structure->id();
+ pc[5].u.operand = slot.cachedOffset();
}
}
if (!LLINT_ALWAYS_ACCESS_SLOW
&& isJSArray(baseValue)
&& ident == exec->propertyNames().length) {
- pc[0].u.opcode = LLInt::getOpcode(llint_op_get_array_length);
-#if ENABLE(VALUE_PROFILER)
+ pc[0].u.opcode = LLInt::getOpcode(op_get_array_length);
ArrayProfile* arrayProfile = codeBlock->getOrAddArrayProfile(pc - codeBlock->instructions().begin());
arrayProfile->observeStructure(baseValue.asCell()->structure());
pc[4].u.arrayProfile = arrayProfile;
-#endif
}
-#if ENABLE(VALUE_PROFILER)
pc[OPCODE_LENGTH(op_get_by_id) - 1].u.profile->m_buckets[0] = JSValue::encode(result);
-#endif
LLINT_END();
}
@@ -944,9 +600,9 @@ LLINT_SLOW_PATH_DECL(slow_path_get_arguments_length)
{
LLINT_BEGIN();
CodeBlock* codeBlock = exec->codeBlock();
- Identifier& ident = codeBlock->identifier(pc[3].u.operand);
+ const Identifier& ident = codeBlock->identifier(pc[3].u.operand);
JSValue baseValue = LLINT_OP(2).jsValue();
- PropertySlot slot(baseValue);
+ PropertySlot slot(baseValue, PropertySlot::InternalMethodType::Get);
LLINT_RETURN(baseValue.get(exec, ident, slot));
}
@@ -954,19 +610,27 @@ LLINT_SLOW_PATH_DECL(slow_path_put_by_id)
{
LLINT_BEGIN();
CodeBlock* codeBlock = exec->codeBlock();
- Identifier& ident = codeBlock->identifier(pc[2].u.operand);
+ const Identifier& ident = codeBlock->identifier(pc[2].u.operand);
JSValue baseValue = LLINT_OP_C(1).jsValue();
- PutPropertySlot slot(codeBlock->isStrictMode());
- if (pc[8].u.operand)
+ PutPropertySlot slot(baseValue, codeBlock->isStrictMode(), codeBlock->putByIdContext());
+ if (pc[8].u.putByIdFlags & PutByIdIsDirect)
asObject(baseValue)->putDirect(vm, ident, LLINT_OP_C(3).jsValue(), slot);
else
- baseValue.put(exec, ident, LLINT_OP_C(3).jsValue(), slot);
+ baseValue.putInline(exec, ident, LLINT_OP_C(3).jsValue(), slot);
LLINT_CHECK_EXCEPTION();
if (!LLINT_ALWAYS_ACCESS_SLOW
&& baseValue.isCell()
- && slot.isCacheable()) {
+ && slot.isCacheablePut()) {
+
+ // Start out by clearing out the old cache.
+ pc[4].u.pointer = nullptr; // old structure
+ pc[5].u.pointer = nullptr; // offset
+ pc[6].u.pointer = nullptr; // new structure
+ pc[7].u.pointer = nullptr; // structure chain
+ pc[8].u.putByIdFlags =
+ static_cast<PutByIdFlags>(pc[8].u.putByIdFlags & PutByIdPersistentFlagsMask);
JSCell* baseCell = baseValue.asCell();
Structure* structure = baseCell->structure();
@@ -974,53 +638,38 @@ LLINT_SLOW_PATH_DECL(slow_path_put_by_id)
if (!structure->isUncacheableDictionary()
&& !structure->typeInfo().prohibitsPropertyCaching()
&& baseCell == slot.base()) {
+
+ vm.heap.writeBarrier(codeBlock);
if (slot.type() == PutPropertySlot::NewProperty) {
+ GCSafeConcurrentJITLocker locker(codeBlock->m_lock, vm.heap);
+
if (!structure->isDictionary() && structure->previousID()->outOfLineCapacity() == structure->outOfLineCapacity()) {
ASSERT(structure->previousID()->transitionWatchpointSetHasBeenInvalidated());
-
- // This is needed because some of the methods we call
- // below may GC.
- pc[0].u.opcode = LLInt::getOpcode(llint_op_put_by_id);
- if (normalizePrototypeChain(exec, baseCell) != InvalidPrototypeChain) {
+ if (normalizePrototypeChain(exec, structure) != InvalidPrototypeChain) {
ASSERT(structure->previousID()->isObject());
- pc[4].u.structure.set(
- vm, codeBlock->ownerExecutable(), structure->previousID());
- if (isInlineOffset(slot.cachedOffset()))
- pc[5].u.operand = offsetInInlineStorage(slot.cachedOffset()) * sizeof(JSValue) + JSObject::offsetOfInlineStorage();
- else
- pc[5].u.operand = offsetInButterfly(slot.cachedOffset()) * sizeof(JSValue);
- pc[6].u.structure.set(
- vm, codeBlock->ownerExecutable(), structure);
- StructureChain* chain = structure->prototypeChain(exec);
- ASSERT(chain);
- pc[7].u.structureChain.set(
- vm, codeBlock->ownerExecutable(), chain);
-
- if (pc[8].u.operand) {
- if (isInlineOffset(slot.cachedOffset()))
- pc[0].u.opcode = LLInt::getOpcode(llint_op_put_by_id_transition_direct);
- else
- pc[0].u.opcode = LLInt::getOpcode(llint_op_put_by_id_transition_direct_out_of_line);
- } else {
- if (isInlineOffset(slot.cachedOffset()))
- pc[0].u.opcode = LLInt::getOpcode(llint_op_put_by_id_transition_normal);
- else
- pc[0].u.opcode = LLInt::getOpcode(llint_op_put_by_id_transition_normal_out_of_line);
+ pc[4].u.structureID = structure->previousID()->id();
+ pc[5].u.operand = slot.cachedOffset();
+ pc[6].u.structureID = structure->id();
+ if (!(pc[8].u.putByIdFlags & PutByIdIsDirect)) {
+ StructureChain* chain = structure->prototypeChain(exec);
+ ASSERT(chain);
+ pc[7].u.structureChain.set(
+ vm, codeBlock, chain);
}
+ pc[8].u.putByIdFlags = static_cast<PutByIdFlags>(
+ pc[8].u.putByIdFlags |
+ structure->inferredTypeDescriptorFor(ident.impl()).putByIdFlags());
}
}
} else {
- pc[4].u.structure.set(
- vm, codeBlock->ownerExecutable(), structure);
- if (isInlineOffset(slot.cachedOffset())) {
- pc[0].u.opcode = LLInt::getOpcode(llint_op_put_by_id);
- pc[5].u.operand = offsetInInlineStorage(slot.cachedOffset()) * sizeof(JSValue) + JSObject::offsetOfInlineStorage();
- } else {
- pc[0].u.opcode = LLInt::getOpcode(llint_op_put_by_id_out_of_line);
- pc[5].u.operand = offsetInButterfly(slot.cachedOffset()) * sizeof(JSValue);
- }
+ structure->didCachePropertyReplacement(vm, slot.cachedOffset());
+ pc[4].u.structureID = structure->id();
+ pc[5].u.operand = slot.cachedOffset();
+ pc[8].u.putByIdFlags = static_cast<PutByIdFlags>(
+ pc[8].u.putByIdFlags |
+ structure->inferredTypeDescriptorFor(ident.impl()).putByIdFlags());
}
}
}
@@ -1043,8 +692,14 @@ LLINT_SLOW_PATH_DECL(slow_path_del_by_id)
inline JSValue getByVal(ExecState* exec, JSValue baseValue, JSValue subscript)
{
if (LIKELY(baseValue.isCell() && subscript.isString())) {
- if (JSValue result = baseValue.asCell()->fastGetOwnProperty(exec, asString(subscript)->value(exec)))
- return result;
+ VM& vm = exec->vm();
+ Structure& structure = *baseValue.asCell()->structure(vm);
+ if (JSCell::canUseFastGetOwnProperty(structure)) {
+ if (RefPtr<AtomicStringImpl> existingAtomicString = asString(subscript)->toExistingAtomicString(exec)) {
+ if (JSValue result = baseValue.asCell()->fastGetOwnProperty(vm, structure, existingAtomicString.get()))
+ return result;
+ }
+ }
}
if (subscript.isUInt32()) {
@@ -1055,10 +710,12 @@ inline JSValue getByVal(ExecState* exec, JSValue baseValue, JSValue subscript)
return baseValue.get(exec, i);
}
- if (isName(subscript))
- return baseValue.get(exec, jsCast<NameInstance*>(subscript.asCell())->privateName());
-
- Identifier property(exec, subscript.toString(exec)->value(exec));
+ baseValue.requireObjectCoercible(exec);
+ if (exec->hadException())
+ return jsUndefined();
+ auto property = subscript.toPropertyKey(exec);
+ if (exec->hadException())
+ return jsUndefined();
return baseValue.get(exec, property);
}
@@ -1068,26 +725,6 @@ LLINT_SLOW_PATH_DECL(slow_path_get_by_val)
LLINT_RETURN_PROFILED(op_get_by_val, getByVal(exec, LLINT_OP_C(2).jsValue(), LLINT_OP_C(3).jsValue()));
}
-LLINT_SLOW_PATH_DECL(slow_path_get_argument_by_val)
-{
- LLINT_BEGIN();
- JSValue arguments = LLINT_OP(2).jsValue();
- if (!arguments) {
- arguments = Arguments::create(vm, exec);
- LLINT_CHECK_EXCEPTION();
- LLINT_OP(2) = arguments;
- exec->uncheckedR(unmodifiedArgumentsRegister(pc[2].u.operand)) = arguments;
- }
-
- LLINT_RETURN_PROFILED(op_get_argument_by_val, getByVal(exec, arguments, LLINT_OP_C(3).jsValue()));
-}
-
-LLINT_SLOW_PATH_DECL(slow_path_get_by_pname)
-{
- LLINT_BEGIN();
- LLINT_RETURN(getByVal(exec, LLINT_OP_C(2).jsValue(), LLINT_OP_C(3).jsValue()));
-}
-
LLINT_SLOW_PATH_DECL(slow_path_put_by_val)
{
LLINT_BEGIN();
@@ -1110,16 +747,50 @@ LLINT_SLOW_PATH_DECL(slow_path_put_by_val)
LLINT_END();
}
- if (isName(subscript)) {
- PutPropertySlot slot(exec->codeBlock()->isStrictMode());
- baseValue.put(exec, jsCast<NameInstance*>(subscript.asCell())->privateName(), value, slot);
+ auto property = subscript.toPropertyKey(exec);
+ LLINT_CHECK_EXCEPTION();
+ PutPropertySlot slot(baseValue, exec->codeBlock()->isStrictMode());
+ baseValue.put(exec, property, value, slot);
+ LLINT_END();
+}
+
+LLINT_SLOW_PATH_DECL(slow_path_put_by_val_direct)
+{
+ LLINT_BEGIN();
+
+ JSValue baseValue = LLINT_OP_C(1).jsValue();
+ JSValue subscript = LLINT_OP_C(2).jsValue();
+ JSValue value = LLINT_OP_C(3).jsValue();
+ RELEASE_ASSERT(baseValue.isObject());
+ JSObject* baseObject = asObject(baseValue);
+ bool isStrictMode = exec->codeBlock()->isStrictMode();
+ if (LIKELY(subscript.isUInt32())) {
+ // Despite its name, JSValue::isUInt32 will return true only for positive boxed int32_t; all those values are valid array indices.
+ ASSERT(isIndex(subscript.asUInt32()));
+ baseObject->putDirectIndex(exec, subscript.asUInt32(), value, 0, isStrictMode ? PutDirectIndexShouldThrow : PutDirectIndexShouldNotThrow);
LLINT_END();
}
- Identifier property(exec, subscript.toString(exec)->value(exec));
- LLINT_CHECK_EXCEPTION();
- PutPropertySlot slot(exec->codeBlock()->isStrictMode());
- baseValue.put(exec, property, value, slot);
+ if (subscript.isDouble()) {
+ double subscriptAsDouble = subscript.asDouble();
+ uint32_t subscriptAsUInt32 = static_cast<uint32_t>(subscriptAsDouble);
+ if (subscriptAsDouble == subscriptAsUInt32 && isIndex(subscriptAsUInt32)) {
+ baseObject->putDirectIndex(exec, subscriptAsUInt32, value, 0, isStrictMode ? PutDirectIndexShouldThrow : PutDirectIndexShouldNotThrow);
+ LLINT_END();
+ }
+ }
+
+ // Don't put to an object if toString threw an exception.
+ auto property = subscript.toPropertyKey(exec);
+ if (exec->vm().exception())
+ LLINT_END();
+
+ if (Optional<uint32_t> index = parseIndex(property))
+ baseObject->putDirectIndex(exec, index.value(), value, 0, isStrictMode ? PutDirectIndexShouldThrow : PutDirectIndexShouldNotThrow);
+ else {
+ PutPropertySlot slot(baseObject, isStrictMode);
+ baseObject->putDirect(exec->vm(), property, value, slot);
+ }
LLINT_END();
}
@@ -1136,11 +807,9 @@ LLINT_SLOW_PATH_DECL(slow_path_del_by_val)
uint32_t i;
if (subscript.getUInt32(i))
couldDelete = baseObject->methodTable()->deletePropertyByIndex(baseObject, exec, i);
- else if (isName(subscript))
- couldDelete = baseObject->methodTable()->deleteProperty(baseObject, exec, jsCast<NameInstance*>(subscript.asCell())->privateName());
else {
LLINT_CHECK_EXCEPTION();
- Identifier property(exec, subscript.toString(exec)->value(exec));
+ auto property = subscript.toPropertyKey(exec);
LLINT_CHECK_EXCEPTION();
couldDelete = baseObject->methodTable()->deleteProperty(baseObject, exec, property);
}
@@ -1160,29 +829,97 @@ LLINT_SLOW_PATH_DECL(slow_path_put_by_index)
LLINT_END();
}
-LLINT_SLOW_PATH_DECL(slow_path_put_getter_setter)
+LLINT_SLOW_PATH_DECL(slow_path_put_getter_by_id)
+{
+ LLINT_BEGIN();
+ ASSERT(LLINT_OP(1).jsValue().isObject());
+ JSObject* baseObj = asObject(LLINT_OP(1).jsValue());
+
+ unsigned options = pc[3].u.operand;
+
+ JSValue getter = LLINT_OP(4).jsValue();
+ ASSERT(getter.isObject());
+
+ baseObj->putGetter(exec, exec->codeBlock()->identifier(pc[2].u.operand), asObject(getter), options);
+ LLINT_END();
+}
+
+LLINT_SLOW_PATH_DECL(slow_path_put_setter_by_id)
+{
+ LLINT_BEGIN();
+ ASSERT(LLINT_OP(1).jsValue().isObject());
+ JSObject* baseObj = asObject(LLINT_OP(1).jsValue());
+
+ unsigned options = pc[3].u.operand;
+
+ JSValue setter = LLINT_OP(4).jsValue();
+ ASSERT(setter.isObject());
+
+ baseObj->putSetter(exec, exec->codeBlock()->identifier(pc[2].u.operand), asObject(setter), options);
+ LLINT_END();
+}
+
+LLINT_SLOW_PATH_DECL(slow_path_put_getter_setter_by_id)
{
LLINT_BEGIN();
ASSERT(LLINT_OP(1).jsValue().isObject());
JSObject* baseObj = asObject(LLINT_OP(1).jsValue());
- GetterSetter* accessor = GetterSetter::create(exec);
+ GetterSetter* accessor = GetterSetter::create(vm, exec->lexicalGlobalObject());
LLINT_CHECK_EXCEPTION();
-
- JSValue getter = LLINT_OP(3).jsValue();
- JSValue setter = LLINT_OP(4).jsValue();
+
+ JSValue getter = LLINT_OP(4).jsValue();
+ JSValue setter = LLINT_OP(5).jsValue();
ASSERT(getter.isObject() || getter.isUndefined());
ASSERT(setter.isObject() || setter.isUndefined());
ASSERT(getter.isObject() || setter.isObject());
if (!getter.isUndefined())
- accessor->setGetter(vm, asObject(getter));
+ accessor->setGetter(vm, exec->lexicalGlobalObject(), asObject(getter));
if (!setter.isUndefined())
- accessor->setSetter(vm, asObject(setter));
+ accessor->setSetter(vm, exec->lexicalGlobalObject(), asObject(setter));
baseObj->putDirectAccessor(
exec,
exec->codeBlock()->identifier(pc[2].u.operand),
- accessor, Accessor);
+ accessor, pc[3].u.operand);
+ LLINT_END();
+}
+
+LLINT_SLOW_PATH_DECL(slow_path_put_getter_by_val)
+{
+ LLINT_BEGIN();
+ ASSERT(LLINT_OP(1).jsValue().isObject());
+ JSObject* baseObj = asObject(LLINT_OP(1).jsValue());
+ JSValue subscript = LLINT_OP_C(2).jsValue();
+
+ unsigned options = pc[3].u.operand;
+
+ JSValue getter = LLINT_OP(4).jsValue();
+ ASSERT(getter.isObject());
+
+ auto property = subscript.toPropertyKey(exec);
+ LLINT_CHECK_EXCEPTION();
+
+ baseObj->putGetter(exec, property, asObject(getter), options);
+ LLINT_END();
+}
+
+LLINT_SLOW_PATH_DECL(slow_path_put_setter_by_val)
+{
+ LLINT_BEGIN();
+ ASSERT(LLINT_OP(1).jsValue().isObject());
+ JSObject* baseObj = asObject(LLINT_OP(1).jsValue());
+ JSValue subscript = LLINT_OP_C(2).jsValue();
+
+ unsigned options = pc[3].u.operand;
+
+ JSValue setter = LLINT_OP(4).jsValue();
+ ASSERT(setter.isObject());
+
+ auto property = subscript.toPropertyKey(exec);
+ LLINT_CHECK_EXCEPTION();
+
+ baseObj->putSetter(exec, property, asObject(setter), options);
LLINT_END();
}
@@ -1256,7 +993,7 @@ LLINT_SLOW_PATH_DECL(slow_path_switch_imm)
int defaultOffset = pc[2].u.operand;
if (value == intValue) {
CodeBlock* codeBlock = exec->codeBlock();
- pc += codeBlock->immediateSwitchJumpTable(pc[1].u.operand).offsetForValue(intValue, defaultOffset);
+ pc += codeBlock->switchJumpTable(pc[1].u.operand).offsetForValue(intValue, defaultOffset);
} else
pc += defaultOffset;
LLINT_END();
@@ -1272,7 +1009,7 @@ LLINT_SLOW_PATH_DECL(slow_path_switch_char)
int defaultOffset = pc[2].u.operand;
StringImpl* impl = string->value(exec).impl();
CodeBlock* codeBlock = exec->codeBlock();
- pc += codeBlock->characterSwitchJumpTable(pc[1].u.operand).offsetForValue((*impl)[0], defaultOffset);
+ pc += codeBlock->switchJumpTable(pc[1].u.operand).offsetForValue((*impl)[0], defaultOffset);
LLINT_END();
}
@@ -1294,31 +1031,68 @@ LLINT_SLOW_PATH_DECL(slow_path_new_func)
{
LLINT_BEGIN();
CodeBlock* codeBlock = exec->codeBlock();
- ASSERT(codeBlock->codeType() != FunctionCode
- || !codeBlock->needsFullScopeChain()
- || exec->uncheckedR(codeBlock->activationRegister()).jsValue());
+ JSScope* scope = exec->uncheckedR(pc[2].u.operand).Register::scope();
#if LLINT_SLOW_PATH_TRACING
dataLogF("Creating function!\n");
#endif
- LLINT_RETURN(JSFunction::create(exec, codeBlock->functionDecl(pc[2].u.operand), exec->scope()));
+ LLINT_RETURN(JSFunction::create(vm, codeBlock->functionDecl(pc[3].u.operand), scope));
+}
+
+LLINT_SLOW_PATH_DECL(slow_path_new_generator_func)
+{
+ LLINT_BEGIN();
+ CodeBlock* codeBlock = exec->codeBlock();
+ JSScope* scope = exec->uncheckedR(pc[2].u.operand).Register::scope();
+#if LLINT_SLOW_PATH_TRACING
+ dataLogF("Creating function!\n");
+#endif
+ LLINT_RETURN(JSGeneratorFunction::create(vm, codeBlock->functionDecl(pc[3].u.operand), scope));
}
LLINT_SLOW_PATH_DECL(slow_path_new_func_exp)
{
LLINT_BEGIN();
+
+ CodeBlock* codeBlock = exec->codeBlock();
+ JSScope* scope = exec->uncheckedR(pc[2].u.operand).Register::scope();
+ FunctionExecutable* executable = codeBlock->functionExpr(pc[3].u.operand);
+
+ LLINT_RETURN(JSFunction::create(vm, executable, scope));
+}
+
+LLINT_SLOW_PATH_DECL(slow_path_new_generator_func_exp)
+{
+ LLINT_BEGIN();
+
+ CodeBlock* codeBlock = exec->codeBlock();
+ JSScope* scope = exec->uncheckedR(pc[2].u.operand).Register::scope();
+ FunctionExecutable* executable = codeBlock->functionExpr(pc[3].u.operand);
+
+ LLINT_RETURN(JSGeneratorFunction::create(vm, executable, scope));
+}
+
+LLINT_SLOW_PATH_DECL(slow_path_new_arrow_func_exp)
+{
+ LLINT_BEGIN();
+
CodeBlock* codeBlock = exec->codeBlock();
- FunctionExecutable* function = codeBlock->functionExpr(pc[2].u.operand);
- JSFunction* func = JSFunction::create(exec, function, exec->scope());
+ JSScope* scope = exec->uncheckedR(pc[2].u.operand).Register::scope();
+ FunctionExecutable* executable = codeBlock->functionExpr(pc[3].u.operand);
- LLINT_RETURN(func);
+ LLINT_RETURN(JSFunction::create(vm, executable, scope));
}
static SlowPathReturnType handleHostCall(ExecState* execCallee, Instruction* pc, JSValue callee, CodeSpecializationKind kind)
{
+ UNUSED_PARAM(pc);
+
+#if LLINT_SLOW_PATH_TRACING
+ dataLog("Performing host call.\n");
+#endif
+
ExecState* exec = execCallee->callerFrame();
VM& vm = exec->vm();
- execCallee->setScope(exec->scope());
execCallee->setCodeBlock(0);
execCallee->clearReturnPC();
@@ -1333,7 +1107,7 @@ static SlowPathReturnType handleHostCall(ExecState* execCallee, Instruction* pc,
execCallee->setCallee(asObject(callee));
vm.hostCallReturnValue = JSValue::decode(callData.native.function(execCallee));
- LLINT_CALL_RETURN(execCallee, pc, LLInt::getCodePtr(getHostCallReturnValue));
+ LLINT_CALL_RETURN(execCallee, execCallee, LLInt::getCodePtr(getHostCallReturnValue));
}
#if LLINT_SLOW_PATH_TRACING
@@ -1341,7 +1115,7 @@ static SlowPathReturnType handleHostCall(ExecState* execCallee, Instruction* pc,
#endif
ASSERT(callType == CallTypeNone);
- LLINT_CALL_THROW(exec, pc, createNotAFunctionError(exec, callee));
+ LLINT_CALL_THROW(exec, createNotAFunctionError(exec, callee));
}
ASSERT(kind == CodeForConstruct);
@@ -1356,7 +1130,7 @@ static SlowPathReturnType handleHostCall(ExecState* execCallee, Instruction* pc,
execCallee->setCallee(asObject(callee));
vm.hostCallReturnValue = JSValue::decode(constructData.native.function(execCallee));
- LLINT_CALL_RETURN(execCallee, pc, LLInt::getCodePtr(getHostCallReturnValue));
+ LLINT_CALL_RETURN(execCallee, execCallee, LLInt::getCodePtr(getHostCallReturnValue));
}
#if LLINT_SLOW_PATH_TRACING
@@ -1364,15 +1138,17 @@ static SlowPathReturnType handleHostCall(ExecState* execCallee, Instruction* pc,
#endif
ASSERT(constructType == ConstructTypeNone);
- LLINT_CALL_THROW(exec, pc, createNotAConstructorError(exec, callee));
+ LLINT_CALL_THROW(exec, createNotAConstructorError(exec, callee));
}
inline SlowPathReturnType setUpCall(ExecState* execCallee, Instruction* pc, CodeSpecializationKind kind, JSValue calleeAsValue, LLIntCallLinkInfo* callLinkInfo = 0)
{
+ ExecState* exec = execCallee->callerFrame();
+
#if LLINT_SLOW_PATH_TRACING
- dataLogF("Performing call with recorded PC = %p\n", execCallee->callerFrame()->currentVPC());
+ dataLogF("Performing call with recorded PC = %p\n", exec->currentVPC());
#endif
-
+
JSCell* calleeAsFunctionCell = getJSFunction(calleeAsValue);
if (!calleeAsFunctionCell)
return handleHostCall(execCallee, pc, calleeAsValue, kind);
@@ -1380,38 +1156,66 @@ inline SlowPathReturnType setUpCall(ExecState* execCallee, Instruction* pc, Code
JSFunction* callee = jsCast<JSFunction*>(calleeAsFunctionCell);
JSScope* scope = callee->scopeUnchecked();
VM& vm = *scope->vm();
- execCallee->setScope(scope);
ExecutableBase* executable = callee->executable();
-
+
MacroAssemblerCodePtr codePtr;
CodeBlock* codeBlock = 0;
- if (executable->isHostFunction())
- codePtr = executable->hostCodeEntryFor(kind);
- else {
+ bool isWebAssemblyExecutable = false;
+#if ENABLE(WEBASSEMBLY)
+ isWebAssemblyExecutable = executable->isWebAssemblyExecutable();
+#endif
+
+ if (executable->isHostFunction()) {
+ codePtr = executable->entrypointFor(kind, MustCheckArity);
+ } else if (!isWebAssemblyExecutable) {
FunctionExecutable* functionExecutable = static_cast<FunctionExecutable*>(executable);
- JSObject* error = functionExecutable->compileFor(execCallee, callee->scope(), kind);
+
+ if (!isCall(kind) && functionExecutable->constructAbility() == ConstructAbility::CannotConstruct)
+ LLINT_CALL_THROW(exec, createNotAConstructorError(exec, callee));
+
+ JSObject* error = functionExecutable->prepareForExecution(execCallee, callee, scope, kind);
if (error)
- LLINT_CALL_THROW(execCallee->callerFrame(), pc, error);
- codeBlock = &functionExecutable->generatedBytecodeFor(kind);
+ LLINT_CALL_THROW(exec, error);
+ codeBlock = functionExecutable->codeBlockFor(kind);
ASSERT(codeBlock);
+ ArityCheckMode arity;
if (execCallee->argumentCountIncludingThis() < static_cast<size_t>(codeBlock->numParameters()))
- codePtr = functionExecutable->jsCodeWithArityCheckEntryFor(kind);
+ arity = MustCheckArity;
else
- codePtr = functionExecutable->jsCodeEntryFor(kind);
+ arity = ArityCheckNotRequired;
+ codePtr = functionExecutable->entrypointFor(kind, arity);
+ } else {
+#if ENABLE(WEBASSEMBLY)
+ WebAssemblyExecutable* webAssemblyExecutable = static_cast<WebAssemblyExecutable*>(executable);
+ webAssemblyExecutable->prepareForExecution(execCallee);
+ codeBlock = webAssemblyExecutable->codeBlockForCall();
+ ASSERT(codeBlock);
+ ArityCheckMode arity;
+ if (execCallee->argumentCountIncludingThis() < static_cast<size_t>(codeBlock->numParameters()))
+ arity = MustCheckArity;
+ else
+ arity = ArityCheckNotRequired;
+ codePtr = webAssemblyExecutable->entrypointFor(kind, arity);
+#endif
}
+ ASSERT(!!codePtr);
+
if (!LLINT_ALWAYS_ACCESS_SLOW && callLinkInfo) {
+ CodeBlock* callerCodeBlock = exec->codeBlock();
+
+ ConcurrentJITLocker locker(callerCodeBlock->m_lock);
+
if (callLinkInfo->isOnList())
callLinkInfo->remove();
- ExecState* execCaller = execCallee->callerFrame();
- callLinkInfo->callee.set(vm, execCaller->codeBlock()->ownerExecutable(), callee);
- callLinkInfo->lastSeenCallee.set(vm, execCaller->codeBlock()->ownerExecutable(), callee);
+ callLinkInfo->callee.set(vm, callerCodeBlock, callee);
+ callLinkInfo->lastSeenCallee.set(vm, callerCodeBlock, callee);
callLinkInfo->machineCodeTarget = codePtr;
if (codeBlock)
- codeBlock->linkIncomingCall(callLinkInfo);
+ codeBlock->linkIncomingCall(exec, callLinkInfo);
}
- LLINT_CALL_RETURN(execCallee, pc, codePtr.executableAddress());
+ LLINT_CALL_RETURN(exec, execCallee, codePtr.executableAddress());
}
inline SlowPathReturnType genericCall(ExecState* exec, Instruction* pc, CodeSpecializationKind kind)
@@ -1422,16 +1226,16 @@ inline SlowPathReturnType genericCall(ExecState* exec, Instruction* pc, CodeSpec
// - If possible, link the call's inline cache.
// - Return a tuple of machine code address to call and the new call frame.
- JSValue calleeAsValue = LLINT_OP_C(1).jsValue();
+ JSValue calleeAsValue = LLINT_OP_C(2).jsValue();
- ExecState* execCallee = exec + pc[3].u.operand;
+ ExecState* execCallee = exec - pc[4].u.operand;
- execCallee->setArgumentCountIncludingThis(pc[2].u.operand);
+ execCallee->setArgumentCountIncludingThis(pc[3].u.operand);
execCallee->uncheckedR(JSStack::Callee) = calleeAsValue;
execCallee->setCallerFrame(exec);
- ASSERT(pc[4].u.callLinkInfo);
- return setUpCall(execCallee, pc, kind, calleeAsValue, pc[4].u.callLinkInfo);
+ ASSERT(pc[5].u.callLinkInfo);
+ return setUpCall(execCallee, pc, kind, calleeAsValue, pc[5].u.callLinkInfo);
}
LLINT_SLOW_PATH_DECL(slow_path_call)
@@ -1446,74 +1250,91 @@ LLINT_SLOW_PATH_DECL(slow_path_construct)
return genericCall(exec, pc, CodeForConstruct);
}
-LLINT_SLOW_PATH_DECL(slow_path_call_varargs)
+LLINT_SLOW_PATH_DECL(slow_path_size_frame_for_varargs)
{
LLINT_BEGIN();
// This needs to:
// - Set up a call frame while respecting the variable arguments.
+
+ unsigned numUsedStackSlots = -pc[5].u.operand;
+ unsigned length = sizeFrameForVarargs(exec, &vm.interpreter->stack(),
+ LLINT_OP_C(4).jsValue(), numUsedStackSlots, pc[6].u.operand);
+ LLINT_CALL_CHECK_EXCEPTION(exec, exec);
+
+ ExecState* execCallee = calleeFrameForVarargs(exec, numUsedStackSlots, length + 1);
+ vm.varargsLength = length;
+ vm.newCallFrameReturnValue = execCallee;
+
+ LLINT_RETURN_CALLEE_FRAME(execCallee);
+}
+
+LLINT_SLOW_PATH_DECL(slow_path_call_varargs)
+{
+ LLINT_BEGIN_NO_SET_PC();
+ // This needs to:
// - Figure out what to call and compile it if necessary.
// - Return a tuple of machine code address to call and the new call frame.
- JSValue calleeAsValue = LLINT_OP_C(1).jsValue();
+ JSValue calleeAsValue = LLINT_OP_C(2).jsValue();
- ExecState* execCallee = loadVarargs(
- exec, &vm.interpreter->stack(),
- LLINT_OP_C(2).jsValue(), LLINT_OP_C(3).jsValue(), pc[4].u.operand);
- LLINT_CALL_CHECK_EXCEPTION(exec, pc);
+ ExecState* execCallee = vm.newCallFrameReturnValue;
+
+ setupVarargsFrameAndSetThis(exec, execCallee, LLINT_OP_C(3).jsValue(), LLINT_OP_C(4).jsValue(), pc[6].u.operand, vm.varargsLength);
+ LLINT_CALL_CHECK_EXCEPTION(exec, exec);
execCallee->uncheckedR(JSStack::Callee) = calleeAsValue;
execCallee->setCallerFrame(exec);
- exec->setCurrentVPC(pc + OPCODE_LENGTH(op_call_varargs));
+ exec->setCurrentVPC(pc);
return setUpCall(execCallee, pc, CodeForCall, calleeAsValue);
}
-
+
+LLINT_SLOW_PATH_DECL(slow_path_construct_varargs)
+{
+ LLINT_BEGIN_NO_SET_PC();
+ // This needs to:
+ // - Figure out what to call and compile it if necessary.
+ // - Return a tuple of machine code address to call and the new call frame.
+
+ JSValue calleeAsValue = LLINT_OP_C(2).jsValue();
+
+ ExecState* execCallee = vm.newCallFrameReturnValue;
+
+ setupVarargsFrameAndSetThis(exec, execCallee, LLINT_OP_C(3).jsValue(), LLINT_OP_C(4).jsValue(), pc[6].u.operand, vm.varargsLength);
+ LLINT_CALL_CHECK_EXCEPTION(exec, exec);
+
+ execCallee->uncheckedR(JSStack::Callee) = calleeAsValue;
+ execCallee->setCallerFrame(exec);
+ exec->setCurrentVPC(pc);
+
+ return setUpCall(execCallee, pc, CodeForConstruct, calleeAsValue);
+}
+
LLINT_SLOW_PATH_DECL(slow_path_call_eval)
{
LLINT_BEGIN_NO_SET_PC();
- JSValue calleeAsValue = LLINT_OP(1).jsValue();
+ JSValue calleeAsValue = LLINT_OP(2).jsValue();
- ExecState* execCallee = exec + pc[3].u.operand;
+ ExecState* execCallee = exec - pc[4].u.operand;
- execCallee->setArgumentCountIncludingThis(pc[2].u.operand);
+ execCallee->setArgumentCountIncludingThis(pc[3].u.operand);
execCallee->setCallerFrame(exec);
execCallee->uncheckedR(JSStack::Callee) = calleeAsValue;
- execCallee->setScope(exec->scope());
execCallee->setReturnPC(LLInt::getCodePtr(llint_generic_return_point));
execCallee->setCodeBlock(0);
- exec->setCurrentVPC(pc + OPCODE_LENGTH(op_call_eval));
+ exec->setCurrentVPC(pc);
if (!isHostFunction(calleeAsValue, globalFuncEval))
return setUpCall(execCallee, pc, CodeForCall, calleeAsValue);
vm.hostCallReturnValue = eval(execCallee);
- LLINT_CALL_RETURN(execCallee, pc, LLInt::getCodePtr(getHostCallReturnValue));
-}
-
-LLINT_SLOW_PATH_DECL(slow_path_tear_off_activation)
-{
- LLINT_BEGIN();
- ASSERT(exec->codeBlock()->needsFullScopeChain());
- jsCast<JSActivation*>(LLINT_OP(1).jsValue())->tearOff(vm);
- LLINT_END();
-}
-
-LLINT_SLOW_PATH_DECL(slow_path_tear_off_arguments)
-{
- LLINT_BEGIN();
- ASSERT(exec->codeBlock()->usesArguments());
- Arguments* arguments = jsCast<Arguments*>(exec->uncheckedR(unmodifiedArgumentsRegister(pc[1].u.operand)).jsValue());
- if (JSValue activationValue = LLINT_OP_C(2).jsValue())
- arguments->didTearOffActivation(exec, jsCast<JSActivation*>(activationValue));
- else
- arguments->tearOff(exec);
- LLINT_END();
+ LLINT_CALL_RETURN(exec, execCallee, LLInt::getCodePtr(getHostCallReturnValue));
}
LLINT_SLOW_PATH_DECL(slow_path_strcat)
{
LLINT_BEGIN();
- LLINT_RETURN(jsString(exec, &LLINT_OP(2), pc[3].u.operand));
+ LLINT_RETURN(jsStringFromRegisterArray(exec, &LLINT_OP(2), pc[3].u.operand));
}
LLINT_SLOW_PATH_DECL(slow_path_to_primitive)
@@ -1522,129 +1343,180 @@ LLINT_SLOW_PATH_DECL(slow_path_to_primitive)
LLINT_RETURN(LLINT_OP_C(2).jsValue().toPrimitive(exec));
}
-LLINT_SLOW_PATH_DECL(slow_path_get_pnames)
+LLINT_SLOW_PATH_DECL(slow_path_throw)
{
LLINT_BEGIN();
- JSValue v = LLINT_OP(2).jsValue();
- if (v.isUndefinedOrNull()) {
- pc += pc[5].u.operand;
- LLINT_END();
- }
-
- JSObject* o = v.toObject(exec);
- Structure* structure = o->structure();
- JSPropertyNameIterator* jsPropertyNameIterator = structure->enumerationCache();
- if (!jsPropertyNameIterator || jsPropertyNameIterator->cachedPrototypeChain() != structure->prototypeChain(exec))
- jsPropertyNameIterator = JSPropertyNameIterator::create(exec, o);
-
- LLINT_OP(1) = JSValue(jsPropertyNameIterator);
- LLINT_OP(2) = JSValue(o);
- LLINT_OP(3) = Register::withInt(0);
- LLINT_OP(4) = Register::withInt(jsPropertyNameIterator->size());
-
- pc += OPCODE_LENGTH(op_get_pnames);
- LLINT_END();
+ LLINT_THROW(LLINT_OP_C(1).jsValue());
}
-LLINT_SLOW_PATH_DECL(slow_path_next_pname)
+LLINT_SLOW_PATH_DECL(slow_path_throw_static_error)
{
LLINT_BEGIN();
- JSObject* base = asObject(LLINT_OP(2).jsValue());
- JSString* property = asString(LLINT_OP(1).jsValue());
- if (base->hasProperty(exec, Identifier(exec, property->value(exec)))) {
- // Go to target.
- pc += pc[6].u.operand;
- } // Else, don't change the PC, so the interpreter will reloop.
- LLINT_END();
+ JSValue errorMessageValue = LLINT_OP_C(1).jsValue();
+ RELEASE_ASSERT(errorMessageValue.isString());
+ String errorMessage = asString(errorMessageValue)->value(exec);
+ if (pc[2].u.operand)
+ LLINT_THROW(createReferenceError(exec, errorMessage));
+ else
+ LLINT_THROW(createTypeError(exec, errorMessage));
}
-LLINT_SLOW_PATH_DECL(slow_path_push_with_scope)
+LLINT_SLOW_PATH_DECL(slow_path_handle_watchdog_timer)
+{
+ LLINT_BEGIN_NO_SET_PC();
+ ASSERT(vm.watchdog());
+ if (UNLIKELY(vm.shouldTriggerTermination(exec)))
+ LLINT_THROW(createTerminatedExecutionException(&vm));
+ LLINT_RETURN_TWO(0, exec);
+}
+
+LLINT_SLOW_PATH_DECL(slow_path_debug)
{
LLINT_BEGIN();
- JSValue v = LLINT_OP_C(1).jsValue();
- JSObject* o = v.toObject(exec);
- LLINT_CHECK_EXCEPTION();
-
- exec->setScope(JSWithScope::create(exec, o));
+ int debugHookID = pc[1].u.operand;
+ vm.interpreter->debug(exec, static_cast<DebugHookID>(debugHookID));
LLINT_END();
}
-LLINT_SLOW_PATH_DECL(slow_path_pop_scope)
+LLINT_SLOW_PATH_DECL(slow_path_profile_will_call)
{
LLINT_BEGIN();
- exec->setScope(exec->scope()->next());
+ if (LegacyProfiler* profiler = vm.enabledProfiler())
+ profiler->willExecute(exec, LLINT_OP(1).jsValue());
LLINT_END();
}
-LLINT_SLOW_PATH_DECL(slow_path_push_name_scope)
+LLINT_SLOW_PATH_DECL(slow_path_profile_did_call)
{
LLINT_BEGIN();
- CodeBlock* codeBlock = exec->codeBlock();
- JSNameScope* scope = JSNameScope::create(exec, codeBlock->identifier(pc[1].u.operand), LLINT_OP(2).jsValue(), pc[3].u.operand);
- exec->setScope(scope);
+ if (LegacyProfiler* profiler = vm.enabledProfiler())
+ profiler->didExecute(exec, LLINT_OP(1).jsValue());
LLINT_END();
}
-LLINT_SLOW_PATH_DECL(slow_path_throw)
+LLINT_SLOW_PATH_DECL(slow_path_handle_exception)
{
- LLINT_BEGIN();
- LLINT_THROW(LLINT_OP_C(1).jsValue());
+ LLINT_BEGIN_NO_SET_PC();
+ genericUnwind(&vm, exec);
+ LLINT_END_IMPL();
}
-LLINT_SLOW_PATH_DECL(slow_path_throw_static_error)
+LLINT_SLOW_PATH_DECL(slow_path_get_from_scope)
{
LLINT_BEGIN();
- if (pc[2].u.operand)
- LLINT_THROW(createReferenceError(exec, errorDescriptionForValue(exec, LLINT_OP_C(1).jsValue())->value(exec)));
- else
- LLINT_THROW(createTypeError(exec, errorDescriptionForValue(exec, LLINT_OP_C(1).jsValue())->value(exec)));
-}
-LLINT_SLOW_PATH_DECL(slow_path_handle_watchdog_timer)
-{
- LLINT_BEGIN_NO_SET_PC();
- if (UNLIKELY(vm.watchdog.didFire(exec)))
- LLINT_THROW(createTerminatedExecutionException(&vm));
- LLINT_RETURN_TWO(0, exec);
+ const Identifier& ident = exec->codeBlock()->identifier(pc[3].u.operand);
+ JSObject* scope = jsCast<JSObject*>(LLINT_OP(2).jsValue());
+ GetPutInfo getPutInfo(pc[4].u.operand);
+
+ // ModuleVar is always converted to ClosureVar for get_from_scope.
+ ASSERT(getPutInfo.resolveType() != ModuleVar);
+
+ PropertySlot slot(scope, PropertySlot::InternalMethodType::Get);
+ if (!scope->getPropertySlot(exec, ident, slot)) {
+ if (getPutInfo.resolveMode() == ThrowIfNotFound)
+ LLINT_RETURN(exec->vm().throwException(exec, createUndefinedVariableError(exec, ident)));
+ LLINT_RETURN(jsUndefined());
+ }
+
+ JSValue result = JSValue();
+ if (jsDynamicCast<JSGlobalLexicalEnvironment*>(scope)) {
+ // When we can't statically prove we need a TDZ check, we must perform the check on the slow path.
+ result = slot.getValue(exec, ident);
+ if (result == jsTDZValue())
+ LLINT_THROW(createTDZError(exec));
+ }
+
+ CommonSlowPaths::tryCacheGetFromScopeGlobal(exec, vm, pc, scope, slot, ident);
+
+ if (!result)
+ result = slot.getValue(exec, ident);
+ LLINT_RETURN(result);
}
-LLINT_SLOW_PATH_DECL(slow_path_debug)
+LLINT_SLOW_PATH_DECL(slow_path_put_to_scope)
{
LLINT_BEGIN();
- int debugHookID = pc[1].u.operand;
- int firstLine = pc[2].u.operand;
- int lastLine = pc[3].u.operand;
- int column = pc[4].u.operand;
- vm.interpreter->debug(exec, static_cast<DebugHookID>(debugHookID), firstLine, lastLine, column);
+ CodeBlock* codeBlock = exec->codeBlock();
+ const Identifier& ident = codeBlock->identifier(pc[2].u.operand);
+ JSObject* scope = jsCast<JSObject*>(LLINT_OP(1).jsValue());
+ JSValue value = LLINT_OP_C(3).jsValue();
+ GetPutInfo getPutInfo = GetPutInfo(pc[4].u.operand);
+ if (getPutInfo.resolveType() == LocalClosureVar) {
+ JSLexicalEnvironment* environment = jsCast<JSLexicalEnvironment*>(scope);
+ environment->variableAt(ScopeOffset(pc[6].u.operand)).set(vm, environment, value);
+
+ // Have to do this *after* the write, because if this puts the set into IsWatched, then we need
+ // to have already changed the value of the variable. Otherwise we might watch and constant-fold
+ // to the Undefined value from before the assignment.
+ if (WatchpointSet* set = pc[5].u.watchpointSet)
+ set->touch("Executed op_put_scope<LocalClosureVar>");
+ LLINT_END();
+ }
+
+ bool hasProperty = scope->hasProperty(exec, ident);
+ if (hasProperty
+ && jsDynamicCast<JSGlobalLexicalEnvironment*>(scope)
+ && getPutInfo.initializationMode() != Initialization) {
+ // When we can't statically prove we need a TDZ check, we must perform the check on the slow path.
+ PropertySlot slot(scope, PropertySlot::InternalMethodType::Get);
+ JSGlobalLexicalEnvironment::getOwnPropertySlot(scope, exec, ident, slot);
+ if (slot.getValue(exec, ident) == jsTDZValue())
+ LLINT_THROW(createTDZError(exec));
+ }
+
+ if (getPutInfo.resolveMode() == ThrowIfNotFound && !hasProperty)
+ LLINT_THROW(createUndefinedVariableError(exec, ident));
+
+ PutPropertySlot slot(scope, codeBlock->isStrictMode(), PutPropertySlot::UnknownContext, getPutInfo.initializationMode() == Initialization);
+ scope->methodTable()->put(scope, exec, ident, value, slot);
+ CommonSlowPaths::tryCachePutToScopeGlobal(exec, codeBlock, pc, scope, getPutInfo, slot, ident);
+
LLINT_END();
}
-LLINT_SLOW_PATH_DECL(slow_path_profile_will_call)
+LLINT_SLOW_PATH_DECL(slow_path_check_if_exception_is_uncatchable_and_notify_profiler)
{
LLINT_BEGIN();
+ RELEASE_ASSERT(!!vm.exception());
+
if (LegacyProfiler* profiler = vm.enabledProfiler())
- profiler->willExecute(exec, LLINT_OP(1).jsValue());
- LLINT_END();
+ profiler->exceptionUnwind(exec);
+
+ if (isTerminatedExecutionException(vm.exception()))
+ LLINT_RETURN_TWO(pc, bitwise_cast<void*>(static_cast<uintptr_t>(1)));
+ LLINT_RETURN_TWO(pc, 0);
}
-LLINT_SLOW_PATH_DECL(slow_path_profile_did_call)
+extern "C" SlowPathReturnType llint_throw_stack_overflow_error(VM* vm, ProtoCallFrame* protoFrame)
{
- LLINT_BEGIN();
- if (LegacyProfiler* profiler = vm.enabledProfiler())
- profiler->didExecute(exec, LLINT_OP(1).jsValue());
- LLINT_END();
+ ExecState* exec = vm->topCallFrame;
+ if (!exec)
+ exec = protoFrame->callee()->globalObject()->globalExec();
+ throwStackOverflowError(exec);
+ return encodeResult(0, 0);
}
-LLINT_SLOW_PATH_DECL(throw_from_native_call)
+#if !ENABLE(JIT)
+extern "C" SlowPathReturnType llint_stack_check_at_vm_entry(VM* vm, Register* newTopOfStack)
{
- LLINT_BEGIN();
- ASSERT(vm.exception);
- LLINT_END();
+ bool success = vm->interpreter->stack().ensureCapacityFor(newTopOfStack);
+ return encodeResult(reinterpret_cast<void*>(success), 0);
+}
+#endif
+
+extern "C" void llint_write_barrier_slow(ExecState* exec, JSCell* cell)
+{
+ VM& vm = exec->vm();
+ vm.heap.writeBarrier(cell);
}
-} } // namespace JSC::LLInt
+extern "C" NO_RETURN_DUE_TO_CRASH void llint_crash()
+{
+ CRASH();
+}
-#endif // ENABLE(LLINT)
+} } // namespace JSC::LLInt
diff --git a/Source/JavaScriptCore/llint/LLIntSlowPaths.h b/Source/JavaScriptCore/llint/LLIntSlowPaths.h
index dbf68b2f9..0d5c8da43 100644
--- a/Source/JavaScriptCore/llint/LLIntSlowPaths.h
+++ b/Source/JavaScriptCore/llint/LLIntSlowPaths.h
@@ -1,5 +1,5 @@
/*
- * Copyright (C) 2011 Apple Inc. All rights reserved.
+ * Copyright (C) 2011, 2014 Apple Inc. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
@@ -26,71 +26,20 @@
#ifndef LLIntSlowPaths_h
#define LLIntSlowPaths_h
-#include <wtf/Platform.h>
+#include "CommonSlowPaths.h"
#include <wtf/StdLibExtras.h>
-#if ENABLE(LLINT)
-
namespace JSC {
class ExecState;
struct Instruction;
+struct ProtoCallFrame;
namespace LLInt {
-#if USE(JSVALUE64)
-// According to C++ rules, a type used for the return signature of function with C linkage (i.e.
-// 'extern "C"') needs to be POD; hence putting any constructors into it could cause either compiler
-// warnings, or worse, a change in the ABI used to return these types.
-struct SlowPathReturnType {
- void* a;
- ExecState* b;
-};
-
-inline SlowPathReturnType encodeResult(void* a, ExecState* b)
-{
- SlowPathReturnType result;
- result.a = a;
- result.b = b;
- return result;
-}
-
-inline void decodeResult(SlowPathReturnType result, void*& a, ExecState*& b)
-{
- a = result.a;
- b = result.b;
-}
-
-#else // USE(JSVALUE32_64)
-typedef int64_t SlowPathReturnType;
-
-typedef union {
- struct {
- void* a;
- ExecState* b;
- } pair;
- int64_t i;
-} SlowPathReturnTypeEncoding;
-
-inline SlowPathReturnType encodeResult(void* a, ExecState* b)
-{
- SlowPathReturnTypeEncoding u;
- u.pair.a = a;
- u.pair.b = b;
- return u.i;
-}
-
-inline void decodeResult(SlowPathReturnType result, void*& a, ExecState*& b)
-{
- SlowPathReturnTypeEncoding u;
- u.i = result;
- a = u.pair.a;
- b = u.pair.b;
-}
-#endif // USE(JSVALUE32_64)
-
extern "C" SlowPathReturnType llint_trace_operand(ExecState*, Instruction*, int fromWhere, int operand);
extern "C" SlowPathReturnType llint_trace_value(ExecState*, Instruction*, int fromWhere, int operand);
+extern "C" void llint_write_barrier_slow(ExecState*, JSCell*) WTF_INTERNAL;
#define LLINT_SLOW_PATH_DECL(name) \
extern "C" SlowPathReturnType llint_##name(ExecState* exec, Instruction* pc)
@@ -113,64 +62,28 @@ LLINT_SLOW_PATH_HIDDEN_DECL(entry_osr_function_for_construct_arityCheck);
LLINT_SLOW_PATH_HIDDEN_DECL(loop_osr);
LLINT_SLOW_PATH_HIDDEN_DECL(replace);
LLINT_SLOW_PATH_HIDDEN_DECL(stack_check);
-LLINT_SLOW_PATH_HIDDEN_DECL(slow_path_call_arityCheck);
-LLINT_SLOW_PATH_HIDDEN_DECL(slow_path_construct_arityCheck);
-LLINT_SLOW_PATH_HIDDEN_DECL(slow_path_create_activation);
-LLINT_SLOW_PATH_HIDDEN_DECL(slow_path_create_arguments);
-LLINT_SLOW_PATH_HIDDEN_DECL(slow_path_create_this);
-LLINT_SLOW_PATH_HIDDEN_DECL(slow_path_convert_this);
LLINT_SLOW_PATH_HIDDEN_DECL(slow_path_new_object);
LLINT_SLOW_PATH_HIDDEN_DECL(slow_path_new_array);
LLINT_SLOW_PATH_HIDDEN_DECL(slow_path_new_array_with_size);
LLINT_SLOW_PATH_HIDDEN_DECL(slow_path_new_array_buffer);
LLINT_SLOW_PATH_HIDDEN_DECL(slow_path_new_regexp);
-LLINT_SLOW_PATH_HIDDEN_DECL(slow_path_not);
-LLINT_SLOW_PATH_HIDDEN_DECL(slow_path_eq);
-LLINT_SLOW_PATH_HIDDEN_DECL(slow_path_neq);
-LLINT_SLOW_PATH_HIDDEN_DECL(slow_path_stricteq);
-LLINT_SLOW_PATH_HIDDEN_DECL(slow_path_nstricteq);
-LLINT_SLOW_PATH_HIDDEN_DECL(slow_path_less);
-LLINT_SLOW_PATH_HIDDEN_DECL(slow_path_lesseq);
-LLINT_SLOW_PATH_HIDDEN_DECL(slow_path_greater);
-LLINT_SLOW_PATH_HIDDEN_DECL(slow_path_greatereq);
-LLINT_SLOW_PATH_HIDDEN_DECL(slow_path_pre_inc);
-LLINT_SLOW_PATH_HIDDEN_DECL(slow_path_pre_dec);
-LLINT_SLOW_PATH_HIDDEN_DECL(slow_path_to_number);
-LLINT_SLOW_PATH_HIDDEN_DECL(slow_path_negate);
-LLINT_SLOW_PATH_HIDDEN_DECL(slow_path_add);
-LLINT_SLOW_PATH_HIDDEN_DECL(slow_path_mul);
-LLINT_SLOW_PATH_HIDDEN_DECL(slow_path_sub);
-LLINT_SLOW_PATH_HIDDEN_DECL(slow_path_div);
-LLINT_SLOW_PATH_HIDDEN_DECL(slow_path_mod);
-LLINT_SLOW_PATH_HIDDEN_DECL(slow_path_lshift);
-LLINT_SLOW_PATH_HIDDEN_DECL(slow_path_rshift);
-LLINT_SLOW_PATH_HIDDEN_DECL(slow_path_urshift);
-LLINT_SLOW_PATH_HIDDEN_DECL(slow_path_bitand);
-LLINT_SLOW_PATH_HIDDEN_DECL(slow_path_bitor);
-LLINT_SLOW_PATH_HIDDEN_DECL(slow_path_bitxor);
-LLINT_SLOW_PATH_HIDDEN_DECL(slow_path_check_has_instance);
LLINT_SLOW_PATH_HIDDEN_DECL(slow_path_instanceof);
-LLINT_SLOW_PATH_HIDDEN_DECL(slow_path_typeof);
-LLINT_SLOW_PATH_HIDDEN_DECL(slow_path_is_object);
-LLINT_SLOW_PATH_HIDDEN_DECL(slow_path_is_function);
-LLINT_SLOW_PATH_HIDDEN_DECL(slow_path_in);
-LLINT_SLOW_PATH_HIDDEN_DECL(slow_path_resolve);
-LLINT_SLOW_PATH_HIDDEN_DECL(slow_path_put_to_base);
-LLINT_SLOW_PATH_HIDDEN_DECL(slow_path_resolve_base);
-LLINT_SLOW_PATH_HIDDEN_DECL(slow_path_resolve_with_base);
-LLINT_SLOW_PATH_HIDDEN_DECL(slow_path_resolve_with_this);
-LLINT_SLOW_PATH_HIDDEN_DECL(slow_path_init_global_const_check);
+LLINT_SLOW_PATH_HIDDEN_DECL(slow_path_instanceof_custom);
LLINT_SLOW_PATH_HIDDEN_DECL(slow_path_get_by_id);
LLINT_SLOW_PATH_HIDDEN_DECL(slow_path_get_arguments_length);
LLINT_SLOW_PATH_HIDDEN_DECL(slow_path_put_by_id);
LLINT_SLOW_PATH_HIDDEN_DECL(slow_path_del_by_id);
LLINT_SLOW_PATH_HIDDEN_DECL(slow_path_get_by_val);
LLINT_SLOW_PATH_HIDDEN_DECL(slow_path_get_argument_by_val);
-LLINT_SLOW_PATH_HIDDEN_DECL(slow_path_get_by_pname);
LLINT_SLOW_PATH_HIDDEN_DECL(slow_path_put_by_val);
+LLINT_SLOW_PATH_HIDDEN_DECL(slow_path_put_by_val_direct);
LLINT_SLOW_PATH_HIDDEN_DECL(slow_path_del_by_val);
LLINT_SLOW_PATH_HIDDEN_DECL(slow_path_put_by_index);
-LLINT_SLOW_PATH_HIDDEN_DECL(slow_path_put_getter_setter);
+LLINT_SLOW_PATH_HIDDEN_DECL(slow_path_put_getter_by_id);
+LLINT_SLOW_PATH_HIDDEN_DECL(slow_path_put_setter_by_id);
+LLINT_SLOW_PATH_HIDDEN_DECL(slow_path_put_getter_setter_by_id);
+LLINT_SLOW_PATH_HIDDEN_DECL(slow_path_put_getter_by_val);
+LLINT_SLOW_PATH_HIDDEN_DECL(slow_path_put_setter_by_val);
LLINT_SLOW_PATH_HIDDEN_DECL(slow_path_jtrue);
LLINT_SLOW_PATH_HIDDEN_DECL(slow_path_jfalse);
LLINT_SLOW_PATH_HIDDEN_DECL(slow_path_jless);
@@ -186,30 +99,35 @@ LLINT_SLOW_PATH_HIDDEN_DECL(slow_path_switch_char);
LLINT_SLOW_PATH_HIDDEN_DECL(slow_path_switch_string);
LLINT_SLOW_PATH_HIDDEN_DECL(slow_path_new_func);
LLINT_SLOW_PATH_HIDDEN_DECL(slow_path_new_func_exp);
+LLINT_SLOW_PATH_HIDDEN_DECL(slow_path_new_generator_func);
+LLINT_SLOW_PATH_HIDDEN_DECL(slow_path_new_generator_func_exp);
+LLINT_SLOW_PATH_HIDDEN_DECL(slow_path_new_arrow_func_exp);
LLINT_SLOW_PATH_HIDDEN_DECL(slow_path_call);
LLINT_SLOW_PATH_HIDDEN_DECL(slow_path_construct);
+LLINT_SLOW_PATH_HIDDEN_DECL(slow_path_size_frame_for_varargs);
LLINT_SLOW_PATH_HIDDEN_DECL(slow_path_call_varargs);
+LLINT_SLOW_PATH_HIDDEN_DECL(slow_path_construct_varargs);
LLINT_SLOW_PATH_HIDDEN_DECL(slow_path_call_eval);
-LLINT_SLOW_PATH_HIDDEN_DECL(slow_path_tear_off_activation);
LLINT_SLOW_PATH_HIDDEN_DECL(slow_path_tear_off_arguments);
LLINT_SLOW_PATH_HIDDEN_DECL(slow_path_strcat);
LLINT_SLOW_PATH_HIDDEN_DECL(slow_path_to_primitive);
-LLINT_SLOW_PATH_HIDDEN_DECL(slow_path_get_pnames);
-LLINT_SLOW_PATH_HIDDEN_DECL(slow_path_next_pname);
-LLINT_SLOW_PATH_HIDDEN_DECL(slow_path_push_with_scope);
-LLINT_SLOW_PATH_HIDDEN_DECL(slow_path_pop_scope);
-LLINT_SLOW_PATH_HIDDEN_DECL(slow_path_push_name_scope);
LLINT_SLOW_PATH_HIDDEN_DECL(slow_path_throw);
LLINT_SLOW_PATH_HIDDEN_DECL(slow_path_throw_static_error);
LLINT_SLOW_PATH_HIDDEN_DECL(slow_path_handle_watchdog_timer);
LLINT_SLOW_PATH_HIDDEN_DECL(slow_path_debug);
LLINT_SLOW_PATH_HIDDEN_DECL(slow_path_profile_will_call);
LLINT_SLOW_PATH_HIDDEN_DECL(slow_path_profile_did_call);
-LLINT_SLOW_PATH_HIDDEN_DECL(throw_from_native_call);
+LLINT_SLOW_PATH_HIDDEN_DECL(slow_path_handle_exception);
+LLINT_SLOW_PATH_HIDDEN_DECL(slow_path_get_from_scope);
+LLINT_SLOW_PATH_HIDDEN_DECL(slow_path_put_to_scope);
+LLINT_SLOW_PATH_HIDDEN_DECL(slow_path_check_if_exception_is_uncatchable_and_notify_profiler);
+extern "C" SlowPathReturnType llint_throw_stack_overflow_error(VM*, ProtoCallFrame*) WTF_INTERNAL;
+#if !ENABLE(JIT)
+extern "C" SlowPathReturnType llint_stack_check_at_vm_entry(VM*, Register*) WTF_INTERNAL;
+#endif
+extern "C" NO_RETURN_DUE_TO_CRASH void llint_crash() WTF_INTERNAL;
} } // namespace JSC::LLInt
-#endif // ENABLE(LLINT)
-
#endif // LLIntSlowPaths_h
diff --git a/Source/JavaScriptCore/llint/LLIntThunks.cpp b/Source/JavaScriptCore/llint/LLIntThunks.cpp
index fe57aa374..af6884e5e 100644
--- a/Source/JavaScriptCore/llint/LLIntThunks.cpp
+++ b/Source/JavaScriptCore/llint/LLIntThunks.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (C) 2012 Apple Inc. All rights reserved.
+ * Copyright (C) 2012, 2013 Apple Inc. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
@@ -26,62 +26,100 @@
#include "config.h"
#include "LLIntThunks.h"
-#if ENABLE(LLINT)
-
+#include "CallData.h"
+#include "ExceptionHelpers.h"
+#include "Interpreter.h"
+#include "JSCJSValueInlines.h"
#include "JSInterfaceJIT.h"
#include "JSObject.h"
+#include "JSStackInlines.h"
+#include "LLIntCLoop.h"
#include "LinkBuffer.h"
#include "LowLevelInterpreter.h"
+#include "ProtoCallFrame.h"
+#include "StackAlignment.h"
+#include "VM.h"
+namespace JSC {
-namespace JSC { namespace LLInt {
+#if ENABLE(JIT)
-#if !ENABLE(LLINT_C_LOOP)
+namespace LLInt {
static MacroAssemblerCodeRef generateThunkWithJumpTo(VM* vm, void (*target)(), const char *thunkKind)
{
- JSInterfaceJIT jit;
+ JSInterfaceJIT jit(vm);
// FIXME: there's probably a better way to do it on X86, but I'm not sure I care.
jit.move(JSInterfaceJIT::TrustedImmPtr(bitwise_cast<void*>(target)), JSInterfaceJIT::regT0);
jit.jump(JSInterfaceJIT::regT0);
- LinkBuffer patchBuffer(*vm, &jit, GLOBAL_THUNK_ID);
+ LinkBuffer patchBuffer(*vm, jit, GLOBAL_THUNK_ID);
return FINALIZE_CODE(patchBuffer, ("LLInt %s prologue thunk", thunkKind));
}
MacroAssemblerCodeRef functionForCallEntryThunkGenerator(VM* vm)
{
- return generateThunkWithJumpTo(vm, llint_function_for_call_prologue, "function for call");
+ return generateThunkWithJumpTo(vm, LLInt::getCodeFunctionPtr(llint_function_for_call_prologue), "function for call");
}
MacroAssemblerCodeRef functionForConstructEntryThunkGenerator(VM* vm)
{
- return generateThunkWithJumpTo(vm, llint_function_for_construct_prologue, "function for construct");
+ return generateThunkWithJumpTo(vm, LLInt::getCodeFunctionPtr(llint_function_for_construct_prologue), "function for construct");
}
MacroAssemblerCodeRef functionForCallArityCheckThunkGenerator(VM* vm)
{
- return generateThunkWithJumpTo(vm, llint_function_for_call_arity_check, "function for call with arity check");
+ return generateThunkWithJumpTo(vm, LLInt::getCodeFunctionPtr(llint_function_for_call_arity_check), "function for call with arity check");
}
MacroAssemblerCodeRef functionForConstructArityCheckThunkGenerator(VM* vm)
{
- return generateThunkWithJumpTo(vm, llint_function_for_construct_arity_check, "function for construct with arity check");
+ return generateThunkWithJumpTo(vm, LLInt::getCodeFunctionPtr(llint_function_for_construct_arity_check), "function for construct with arity check");
}
MacroAssemblerCodeRef evalEntryThunkGenerator(VM* vm)
{
- return generateThunkWithJumpTo(vm, llint_eval_prologue, "eval");
+ return generateThunkWithJumpTo(vm, LLInt::getCodeFunctionPtr(llint_eval_prologue), "eval");
}
MacroAssemblerCodeRef programEntryThunkGenerator(VM* vm)
{
- return generateThunkWithJumpTo(vm, llint_program_prologue, "program");
+ return generateThunkWithJumpTo(vm, LLInt::getCodeFunctionPtr(llint_program_prologue), "program");
+}
+
+MacroAssemblerCodeRef moduleProgramEntryThunkGenerator(VM* vm)
+{
+ return generateThunkWithJumpTo(vm, LLInt::getCodeFunctionPtr(llint_module_program_prologue), "module_program");
+}
+
+} // namespace LLInt
+
+#else // ENABLE(JIT)
+
+// Non-JIT (i.e. C Loop LLINT) case:
+
+EncodedJSValue vmEntryToJavaScript(void* executableAddress, VM* vm, ProtoCallFrame* protoCallFrame)
+{
+ JSValue result = CLoop::execute(llint_vm_entry_to_javascript, executableAddress, vm, protoCallFrame);
+ return JSValue::encode(result);
+}
+
+EncodedJSValue vmEntryToNative(void* executableAddress, VM* vm, ProtoCallFrame* protoCallFrame)
+{
+ JSValue result = CLoop::execute(llint_vm_entry_to_native, executableAddress, vm, protoCallFrame);
+ return JSValue::encode(result);
+}
+
+extern "C" VMEntryRecord* vmEntryRecord(VMEntryFrame* entryFrame)
+{
+ // The C Loop doesn't have any callee save registers, so the VMEntryRecord is allocated at the base of the frame.
+ intptr_t stackAlignment = stackAlignmentBytes();
+ intptr_t VMEntryTotalFrameSize = (sizeof(VMEntryRecord) + (stackAlignment - 1)) & ~(stackAlignment - 1);
+ return reinterpret_cast<VMEntryRecord*>(static_cast<char*>(entryFrame) - VMEntryTotalFrameSize);
}
-#endif // !ENABLE(LLINT_C_LOOP)
-} } // namespace JSC::LLInt
+#endif // ENABLE(JIT)
-#endif // ENABLE(LLINT)
+} // namespace JSC
diff --git a/Source/JavaScriptCore/llint/LLIntThunks.h b/Source/JavaScriptCore/llint/LLIntThunks.h
index b46cc00e7..95b0f4484 100644
--- a/Source/JavaScriptCore/llint/LLIntThunks.h
+++ b/Source/JavaScriptCore/llint/LLIntThunks.h
@@ -1,5 +1,5 @@
/*
- * Copyright (C) 2012 Apple Inc. All rights reserved.
+ * Copyright (C) 2012, 2013 Apple Inc. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
@@ -26,15 +26,17 @@
#ifndef LLIntThunks_h
#define LLIntThunks_h
-#include <wtf/Platform.h>
-
-#if ENABLE(LLINT)
-
#include "MacroAssemblerCodeRef.h"
namespace JSC {
class VM;
+struct ProtoCallFrame;
+
+extern "C" {
+ EncodedJSValue vmEntryToJavaScript(void*, VM*, ProtoCallFrame*);
+ EncodedJSValue vmEntryToNative(void*, VM*, ProtoCallFrame*);
+}
namespace LLInt {
@@ -44,9 +46,8 @@ MacroAssemblerCodeRef functionForCallArityCheckThunkGenerator(VM*);
MacroAssemblerCodeRef functionForConstructArityCheckThunkGenerator(VM*);
MacroAssemblerCodeRef evalEntryThunkGenerator(VM*);
MacroAssemblerCodeRef programEntryThunkGenerator(VM*);
+MacroAssemblerCodeRef moduleProgramEntryThunkGenerator(VM*);
} } // namespace JSC::LLInt
-#endif // ENABLE(LLINT)
-
#endif // LLIntThunks_h
diff --git a/Source/JavaScriptCore/llint/LowLevelInterpreter.asm b/Source/JavaScriptCore/llint/LowLevelInterpreter.asm
index 22ba11164..8e77c0e22 100644
--- a/Source/JavaScriptCore/llint/LowLevelInterpreter.asm
+++ b/Source/JavaScriptCore/llint/LowLevelInterpreter.asm
@@ -1,4 +1,4 @@
-# Copyright (C) 2011, 2012 Apple Inc. All rights reserved.
+# Copyright (C) 2011-2015 Apple Inc. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
@@ -21,40 +21,262 @@
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
# THE POSSIBILITY OF SUCH DAMAGE.
+# Crash course on the language that this is written in (which I just call
+# "assembly" even though it's more than that):
+#
+# - Mostly gas-style operand ordering. The last operand tends to be the
+# destination. So "a := b" is written as "mov b, a". But unlike gas,
+# comparisons are in-order, so "if (a < b)" is written as
+# "bilt a, b, ...".
+#
+# - "b" = byte, "h" = 16-bit word, "i" = 32-bit word, "p" = pointer.
+# For 32-bit, "i" and "p" are interchangeable except when an op supports one
+# but not the other.
+#
+# - In general, valid operands for macro invocations and instructions are
+# registers (eg "t0"), addresses (eg "4[t0]"), base-index addresses
+# (eg "7[t0, t1, 2]"), absolute addresses (eg "0xa0000000[]"), or labels
+# (eg "_foo" or ".foo"). Macro invocations can also take anonymous
+# macros as operands. Instructions cannot take anonymous macros.
+#
+# - Labels must have names that begin with either "_" or ".". A "." label
+# is local and gets renamed before code gen to minimize namespace
+# pollution. A "_" label is an extern symbol (i.e. ".globl"). The "_"
+# may or may not be removed during code gen depending on whether the asm
+# conventions for C name mangling on the target platform mandate a "_"
+# prefix.
+#
+# - A "macro" is a lambda expression, which may be either anonymous or
+# named. But this has caveats. "macro" can take zero or more arguments,
+# which may be macros or any valid operands, but it can only return
+# code. But you can do Turing-complete things via continuation passing
+# style: "macro foo (a, b) b(a, a) end foo(foo, foo)". Actually, don't do
+# that, since you'll just crash the assembler.
+#
+# - An "if" is a conditional on settings. Any identifier supplied in the
+# predicate of an "if" is assumed to be a #define that is available
+# during code gen. So you can't use "if" for computation in a macro, but
+# you can use it to select different pieces of code for different
+# platforms.
+#
+# - Arguments to macros follow lexical scoping rather than dynamic scoping.
+# Const's also follow lexical scoping and may override (hide) arguments
+# or other consts. All variables (arguments and constants) can be bound
+# to operands. Additionally, arguments (but not constants) can be bound
+# to macros.
+
+# The following general-purpose registers are available:
+#
+# - cfr and sp hold the call frame and (native) stack pointer respectively.
+# They are callee-save registers, and guaranteed to be distinct from all other
+# registers on all architectures.
+#
+# - lr is defined on non-X86 architectures (ARM64, ARMv7, ARM,
+# ARMv7_TRADITIONAL, MIPS, SH4 and CLOOP) and holds the return PC
+#
+# - pc holds the (native) program counter on 32-bits ARM architectures (ARM,
+# ARMv7, ARMv7_TRADITIONAL)
+#
+# - t0, t1, t2, t3, t4 and optionally t5 are temporary registers that can get trashed on
+# calls, and are pairwise distinct registers. t4 holds the JS program counter, so use
+# with caution in opcodes (actually, don't use it in opcodes at all, except as PC).
+#
+# - r0 and r1 are the platform's customary return registers, and thus are
+# two distinct registers
+#
+# - a0, a1, a2 and a3 are the platform's customary argument registers, and
+# thus are pairwise distinct registers. Be mindful that:
+# + On X86, there are no argument registers. a0 and a1 are edx and
+# ecx following the fastcall convention, but you should still use the stack
+# to pass your arguments. The cCall2 and cCall4 macros do this for you.
+# + On X86_64_WIN, you should allocate space on the stack for the arguments,
+# and the return convention is weird for > 8 bytes types. The only place we
+# use > 8 bytes return values is on a cCall, and cCall2 and cCall4 handle
+# this for you.
+#
+# - The only registers guaranteed to be caller-saved are r0, r1, a0, a1 and a2, and
+# you should be mindful of that in functions that are called directly from C.
+# If you need more registers, you should push and pop them like a good
+# assembly citizen, because any other register will be callee-saved on X86.
+#
+# You can additionally assume:
+#
+# - a3, t2, t3, t4 and t5 are never return registers; t0, t1, a0, a1 and a2
+# can be return registers.
+#
+# - t4 and t5 are never argument registers, t3 can only be a3, t1 can only be
+# a1; but t0 and t2 can be either a0 or a2.
+#
+# - On 64 bits, there are callee-save registers named csr0, csr1, ... csrN.
+# The last three csr registers are used used to store the PC base and
+# two special tag values. Don't use them for anything else.
+#
+# Additional platform-specific details (you shouldn't rely on this remaining
+# true):
+#
+# - For consistency with the baseline JIT, t0 is always r0 (and t1 is always
+# r1 on 32 bits platforms). You should use the r version when you need return
+# registers, and the t version otherwise: code using t0 (or t1) should still
+# work if swapped with e.g. t3, while code using r0 (or r1) should not. There
+# *may* be legacy code relying on this.
+#
+# - On all platforms other than X86, t0 can only be a0 and t2 can only be a2.
+#
+# - On all platforms other than X86 and X86_64, a2 is not a return register.
+# a2 is r0 on X86 (because we have so few registers) and r1 on X86_64 (because
+# the ABI enforces it).
+#
+# The following floating-point registers are available:
+#
+# - ft0-ft5 are temporary floating-point registers that get trashed on calls,
+# and are pairwise distinct.
+#
+# - fa0 and fa1 are the platform's customary floating-point argument
+# registers, and are both distinct. On 64-bits platforms, fa2 and fa3 are
+# additional floating-point argument registers.
+#
+# - fr is the platform's customary floating-point return register
+#
+# You can assume that ft1-ft5 or fa1-fa3 are never fr, and that ftX is never
+# faY if X != Y.
+
# First come the common protocols that both interpreters use. Note that each
# of these must have an ASSERT() in LLIntData.cpp
-# Work-around for the fact that the toolchain's awareness of armv7s results in
-# a separate slab in the fat binary, yet the offlineasm doesn't know to expect
-# it.
+# Work-around for the fact that the toolchain's awareness of armv7k / armv7s
+# results in a separate slab in the fat binary, yet the offlineasm doesn't know
+# to expect it.
+if ARMv7k
+end
if ARMv7s
end
# These declarations must match interpreter/JSStack.h.
-const CallFrameHeaderSize = 48
-const ArgumentCount = -48
-const CallerFrame = -40
-const Callee = -32
-const ScopeChain = -24
-const ReturnPC = -16
-const CodeBlock = -8
-const ThisArgumentOffset = -CallFrameHeaderSize - 8
+if JSVALUE64
+ const PtrSize = 8
+ const CallFrameHeaderSlots = 5
+else
+ const PtrSize = 4
+ const CallFrameHeaderSlots = 4
+ const CallFrameAlignSlots = 1
+end
+const SlotSize = 8
+
+const JSEnvironmentRecord_variables = (sizeof JSEnvironmentRecord + SlotSize - 1) & ~(SlotSize - 1)
+const DirectArguments_storage = (sizeof DirectArguments + SlotSize - 1) & ~(SlotSize - 1)
+
+const StackAlignment = 16
+const StackAlignmentSlots = 2
+const StackAlignmentMask = StackAlignment - 1
+
+const CallerFrameAndPCSize = 2 * PtrSize
+
+const CallerFrame = 0
+const ReturnPC = CallerFrame + PtrSize
+const CodeBlock = ReturnPC + PtrSize
+const Callee = CodeBlock + SlotSize
+const ArgumentCount = Callee + SlotSize
+const ThisArgumentOffset = ArgumentCount + SlotSize
+const FirstArgumentOffset = ThisArgumentOffset + SlotSize
+const CallFrameHeaderSize = ThisArgumentOffset
+
+# Some value representation constants.
+if JSVALUE64
+ const TagBitTypeOther = 0x2
+ const TagBitBool = 0x4
+ const TagBitUndefined = 0x8
+ const ValueEmpty = 0x0
+ const ValueFalse = TagBitTypeOther | TagBitBool
+ const ValueTrue = TagBitTypeOther | TagBitBool | 1
+ const ValueUndefined = TagBitTypeOther | TagBitUndefined
+ const ValueNull = TagBitTypeOther
+ const TagTypeNumber = 0xffff000000000000
+ const TagMask = TagTypeNumber | TagBitTypeOther
+else
+ const Int32Tag = -1
+ const BooleanTag = -2
+ const NullTag = -3
+ const UndefinedTag = -4
+ const CellTag = -5
+ const EmptyValueTag = -6
+ const DeletedValueTag = -7
+ const LowestTag = DeletedValueTag
+end
+
+# NOTE: The values below must be in sync with what is in PutByIdFlags.h.
+const PutByIdPrimaryTypeMask = 0x6
+const PutByIdPrimaryTypeSecondary = 0x0
+const PutByIdPrimaryTypeObjectWithStructure = 0x2
+const PutByIdPrimaryTypeObjectWithStructureOrOther = 0x4
+const PutByIdSecondaryTypeMask = -0x8
+const PutByIdSecondaryTypeBottom = 0x0
+const PutByIdSecondaryTypeBoolean = 0x8
+const PutByIdSecondaryTypeOther = 0x10
+const PutByIdSecondaryTypeInt32 = 0x18
+const PutByIdSecondaryTypeNumber = 0x20
+const PutByIdSecondaryTypeString = 0x28
+const PutByIdSecondaryTypeSymbol = 0x30
+const PutByIdSecondaryTypeObject = 0x38
+const PutByIdSecondaryTypeObjectOrOther = 0x40
+const PutByIdSecondaryTypeTop = 0x48
+
+const CopyBarrierSpaceBits = 3
+
+const CallOpCodeSize = 9
+
+if X86_64 or ARM64 or C_LOOP
+ const maxFrameExtentForSlowPathCall = 0
+elsif ARM or ARMv7_TRADITIONAL or ARMv7 or SH4
+ const maxFrameExtentForSlowPathCall = 24
+elsif X86 or X86_WIN
+ const maxFrameExtentForSlowPathCall = 40
+elsif MIPS
+ const maxFrameExtentForSlowPathCall = 40
+elsif X86_64_WIN
+ const maxFrameExtentForSlowPathCall = 64
+end
+
+if X86_64 or X86_64_WIN or ARM64
+ const CalleeSaveSpaceAsVirtualRegisters = 3
+else
+ const CalleeSaveSpaceAsVirtualRegisters = 0
+end
+
+const CalleeSaveSpaceStackAligned = (CalleeSaveSpaceAsVirtualRegisters * SlotSize + StackAlignment - 1) & ~StackAlignmentMask
+
+
+# Watchpoint states
+const ClearWatchpoint = 0
+const IsWatched = 1
+const IsInvalidated = 2
# Some register conventions.
if JSVALUE64
# - Use a pair of registers to represent the PC: one register for the
- # base of the stack, and one register for the index.
- # - The PC base (or PB for short) should be stored in the csr. It will
- # get clobbered on calls to other JS code, but will get saved on calls
- # to C functions.
+ # base of the bytecodes, and one register for the index.
+ # - The PC base (or PB for short) must be stored in a callee-save register.
# - C calls are still given the Instruction* rather than the PC index.
# This requires an add before the call, and a sub after.
- const PC = t4
- const PB = t6
- const tagTypeNumber = csr1
- const tagMask = csr2
-
+ const PC = t4 # When changing this, make sure LLIntPC is up to date in LLIntPCRanges.h
+ if ARM64
+ const PB = csr7
+ const tagTypeNumber = csr8
+ const tagMask = csr9
+ elsif X86_64
+ const PB = csr2
+ const tagTypeNumber = csr3
+ const tagMask = csr4
+ elsif X86_64_WIN
+ const PB = csr4
+ const tagTypeNumber = csr5
+ const tagMask = csr6
+ elsif C_LOOP
+ const PB = csr0
+ const tagTypeNumber = csr1
+ const tagMask = csr2
+ end
+
macro loadisFromInstruction(offset, dest)
loadis offset * 8[PB, PC, 8], dest
end
@@ -68,7 +290,7 @@ if JSVALUE64
end
else
- const PC = t4
+ const PC = t4 # When changing this, make sure LLIntPC is up to date in LLIntPCRanges.h
macro loadisFromInstruction(offset, dest)
loadis offset * 4[PC], dest
end
@@ -78,6 +300,12 @@ else
end
end
+if X86_64_WIN
+ const extraTempReg = t0
+else
+ const extraTempReg = t5
+end
+
# Constants for reasoning about value representation.
if BIG_ENDIAN
const TagOffset = 0
@@ -87,12 +315,6 @@ else
const PayloadOffset = 0
end
-if JSVALUE64
- const JSCellPayloadOffset = 0
-else
- const JSCellPayloadOffset = PayloadOffset
-end
-
# Constant for reasoning about butterflies.
const IsArray = 1
const IndexingShapeMask = 30
@@ -104,13 +326,14 @@ const ArrayStorageShape = 28
const SlowPutArrayStorageShape = 30
# Type constants.
-const StringType = 5
-const ObjectType = 17
+const StringType = 6
+const SymbolType = 7
+const ObjectType = 21
+const FinalObjectType = 22
# Type flags constants.
const MasqueradesAsUndefined = 1
-const ImplementsHasInstance = 2
-const ImplementsDefaultHasInstance = 8
+const ImplementsDefaultHasInstance = 2
# Bytecode operand constants.
const FirstConstantRegisterIndex = 0x40000000
@@ -119,38 +342,36 @@ const FirstConstantRegisterIndex = 0x40000000
const GlobalCode = 0
const EvalCode = 1
const FunctionCode = 2
+const ModuleCode = 3
# The interpreter steals the tag word of the argument count.
const LLIntReturnPC = ArgumentCount + TagOffset
# String flags.
-const HashFlags8BitBuffer = 64
+const HashFlags8BitBuffer = 8
# Copied from PropertyOffset.h
const firstOutOfLineOffset = 100
-# From ResolveOperations.h
-const ResolveOperationFail = 0
-const ResolveOperationSetBaseToUndefined = 1
-const ResolveOperationReturnScopeAsBase = 2
-const ResolveOperationSetBaseToScope = 3
-const ResolveOperationSetBaseToGlobal = 4
-const ResolveOperationGetAndReturnScopedVar = 5
-const ResolveOperationGetAndReturnGlobalVar = 6
-const ResolveOperationGetAndReturnGlobalVarWatchable = 7
-const ResolveOperationSkipTopScopeNode = 8
-const ResolveOperationSkipScopes = 9
-const ResolveOperationReturnGlobalObjectAsBase = 10
-const ResolveOperationGetAndReturnGlobalProperty = 11
-const ResolveOperationCheckForDynamicEntriesBeforeGlobalScope = 12
-
-const PutToBaseOperationKindUninitialised = 0
-const PutToBaseOperationKindGeneric = 1
-const PutToBaseOperationKindReadonly = 2
-const PutToBaseOperationKindGlobalVariablePut = 3
-const PutToBaseOperationKindGlobalVariablePutChecked = 4
-const PutToBaseOperationKindGlobalPropertyPut = 5
-const PutToBaseOperationKindVariablePut = 6
+# ResolveType
+const GlobalProperty = 0
+const GlobalVar = 1
+const GlobalLexicalVar = 2
+const ClosureVar = 3
+const LocalClosureVar = 4
+const ModuleVar = 5
+const GlobalPropertyWithVarInjectionChecks = 6
+const GlobalVarWithVarInjectionChecks = 7
+const GlobalLexicalVarWithVarInjectionChecks = 8
+const ClosureVarWithVarInjectionChecks = 9
+
+const ResolveTypeMask = 0x3ff
+const InitializationModeMask = 0xffc00
+const InitializationModeShift = 10
+const Initialization = 0
+
+const MarkedBlockSize = 16 * 1024
+const MarkedBlockMask = ~(MarkedBlockSize - 1)
# Allocation constants
if JSVALUE64
@@ -161,9 +382,7 @@ end
# This must match wtf/Vector.h
const VectorBufferOffset = 0
-if WIN64
- const VectorSizeOffset = 16
-elsif JSVALUE64
+if JSVALUE64
const VectorSizeOffset = 12
else
const VectorSizeOffset = 8
@@ -174,9 +393,7 @@ macro crash()
if C_LOOP
cloopCrash
else
- storei t0, 0xbbadbeef[]
- move 0, t0
- call t0
+ call _llint_crash
end
end
@@ -188,29 +405,306 @@ macro assert(assertion)
end
end
+macro checkStackPointerAlignment(tempReg, location)
+ if ARM64 or C_LOOP or SH4
+ # ARM64 will check for us!
+ # C_LOOP does not need the alignment, and can use a little perf
+ # improvement from avoiding useless work.
+ # SH4 does not need specific alignment (4 bytes).
+ else
+ if ARM or ARMv7 or ARMv7_TRADITIONAL
+ # ARM can't do logical ops with the sp as a source
+ move sp, tempReg
+ andp StackAlignmentMask, tempReg
+ else
+ andp sp, StackAlignmentMask, tempReg
+ end
+ btpz tempReg, .stackPointerOkay
+ move location, tempReg
+ break
+ .stackPointerOkay:
+ end
+end
+
+if C_LOOP or ARM64 or X86_64 or X86_64_WIN
+ const CalleeSaveRegisterCount = 0
+elsif ARM or ARMv7_TRADITIONAL or ARMv7
+ const CalleeSaveRegisterCount = 7
+elsif SH4
+ const CalleeSaveRegisterCount = 5
+elsif MIPS
+ const CalleeSaveRegisterCount = 1
+elsif X86 or X86_WIN
+ const CalleeSaveRegisterCount = 3
+end
+
+const CalleeRegisterSaveSize = CalleeSaveRegisterCount * PtrSize
+
+# VMEntryTotalFrameSize includes the space for struct VMEntryRecord and the
+# callee save registers rounded up to keep the stack aligned
+const VMEntryTotalFrameSize = (CalleeRegisterSaveSize + sizeof VMEntryRecord + StackAlignment - 1) & ~StackAlignmentMask
+
+macro pushCalleeSaves()
+ if C_LOOP or ARM64 or X86_64 or X86_64_WIN
+ elsif ARM or ARMv7_TRADITIONAL
+ emit "push {r4-r10}"
+ elsif ARMv7
+ emit "push {r4-r6, r8-r11}"
+ elsif MIPS
+ emit "addiu $sp, $sp, -4"
+ emit "sw $s4, 0($sp)"
+ # save $gp to $s4 so that we can restore it after a function call
+ emit "move $s4, $gp"
+ elsif SH4
+ emit "mov.l r13, @-r15"
+ emit "mov.l r11, @-r15"
+ emit "mov.l r10, @-r15"
+ emit "mov.l r9, @-r15"
+ emit "mov.l r8, @-r15"
+ elsif X86
+ emit "push %esi"
+ emit "push %edi"
+ emit "push %ebx"
+ elsif X86_WIN
+ emit "push esi"
+ emit "push edi"
+ emit "push ebx"
+ end
+end
+
+macro popCalleeSaves()
+ if C_LOOP or ARM64 or X86_64 or X86_64_WIN
+ elsif ARM or ARMv7_TRADITIONAL
+ emit "pop {r4-r10}"
+ elsif ARMv7
+ emit "pop {r4-r6, r8-r11}"
+ elsif MIPS
+ emit "lw $s4, 0($sp)"
+ emit "addiu $sp, $sp, 4"
+ elsif SH4
+ emit "mov.l @r15+, r8"
+ emit "mov.l @r15+, r9"
+ emit "mov.l @r15+, r10"
+ emit "mov.l @r15+, r11"
+ emit "mov.l @r15+, r13"
+ elsif X86
+ emit "pop %ebx"
+ emit "pop %edi"
+ emit "pop %esi"
+ elsif X86_WIN
+ emit "pop ebx"
+ emit "pop edi"
+ emit "pop esi"
+ end
+end
+
+macro preserveCallerPCAndCFR()
+ if C_LOOP or ARM or ARMv7 or ARMv7_TRADITIONAL or MIPS or SH4
+ push lr
+ push cfr
+ elsif X86 or X86_WIN or X86_64 or X86_64_WIN
+ push cfr
+ elsif ARM64
+ push cfr, lr
+ else
+ error
+ end
+ move sp, cfr
+end
+
+macro restoreCallerPCAndCFR()
+ move cfr, sp
+ if C_LOOP or ARM or ARMv7 or ARMv7_TRADITIONAL or MIPS or SH4
+ pop cfr
+ pop lr
+ elsif X86 or X86_WIN or X86_64 or X86_64_WIN
+ pop cfr
+ elsif ARM64
+ pop lr, cfr
+ end
+end
+
+macro preserveCalleeSavesUsedByLLInt()
+ subp CalleeSaveSpaceStackAligned, sp
+ if C_LOOP
+ elsif ARM or ARMv7_TRADITIONAL
+ elsif ARMv7
+ elsif ARM64
+ emit "stp x27, x28, [x29, #-16]"
+ emit "stp xzr, x26, [x29, #-32]"
+ elsif MIPS
+ elsif SH4
+ elsif X86
+ elsif X86_WIN
+ elsif X86_64
+ storep csr4, -8[cfr]
+ storep csr3, -16[cfr]
+ storep csr2, -24[cfr]
+ elsif X86_64_WIN
+ storep csr6, -8[cfr]
+ storep csr5, -16[cfr]
+ storep csr4, -24[cfr]
+ end
+end
+
+macro restoreCalleeSavesUsedByLLInt()
+ if C_LOOP
+ elsif ARM or ARMv7_TRADITIONAL
+ elsif ARMv7
+ elsif ARM64
+ emit "ldp xzr, x26, [x29, #-32]"
+ emit "ldp x27, x28, [x29, #-16]"
+ elsif MIPS
+ elsif SH4
+ elsif X86
+ elsif X86_WIN
+ elsif X86_64
+ loadp -24[cfr], csr2
+ loadp -16[cfr], csr3
+ loadp -8[cfr], csr4
+ elsif X86_64_WIN
+ loadp -24[cfr], csr4
+ loadp -16[cfr], csr5
+ loadp -8[cfr], csr6
+ end
+end
+
+macro copyCalleeSavesToVMCalleeSavesBuffer(vm, temp)
+ if ARM64 or X86_64 or X86_64_WIN
+ leap VM::calleeSaveRegistersBuffer[vm], temp
+ if ARM64
+ storep csr0, [temp]
+ storep csr1, 8[temp]
+ storep csr2, 16[temp]
+ storep csr3, 24[temp]
+ storep csr4, 32[temp]
+ storep csr5, 40[temp]
+ storep csr6, 48[temp]
+ storep csr7, 56[temp]
+ storep csr8, 64[temp]
+ storep csr9, 72[temp]
+ stored csfr0, 80[temp]
+ stored csfr1, 88[temp]
+ stored csfr2, 96[temp]
+ stored csfr3, 104[temp]
+ stored csfr4, 112[temp]
+ stored csfr5, 120[temp]
+ stored csfr6, 128[temp]
+ stored csfr7, 136[temp]
+ elsif X86_64
+ storep csr0, [temp]
+ storep csr1, 8[temp]
+ storep csr2, 16[temp]
+ storep csr3, 24[temp]
+ storep csr4, 32[temp]
+ elsif X86_64_WIN
+ storep csr0, [temp]
+ storep csr1, 8[temp]
+ storep csr2, 16[temp]
+ storep csr3, 24[temp]
+ storep csr4, 32[temp]
+ storep csr5, 40[temp]
+ storep csr6, 48[temp]
+ end
+ end
+end
+
+macro restoreCalleeSavesFromVMCalleeSavesBuffer(vm, temp)
+ if ARM64 or X86_64 or X86_64_WIN
+ leap VM::calleeSaveRegistersBuffer[vm], temp
+ if ARM64
+ loadp [temp], csr0
+ loadp 8[temp], csr1
+ loadp 16[temp], csr2
+ loadp 24[temp], csr3
+ loadp 32[temp], csr4
+ loadp 40[temp], csr5
+ loadp 48[temp], csr6
+ loadp 56[temp], csr7
+ loadp 64[temp], csr8
+ loadp 72[temp], csr9
+ loadd 80[temp], csfr0
+ loadd 88[temp], csfr1
+ loadd 96[temp], csfr2
+ loadd 104[temp], csfr3
+ loadd 112[temp], csfr4
+ loadd 120[temp], csfr5
+ loadd 128[temp], csfr6
+ loadd 136[temp], csfr7
+ elsif X86_64
+ loadp [temp], csr0
+ loadp 8[temp], csr1
+ loadp 16[temp], csr2
+ loadp 24[temp], csr3
+ loadp 32[temp], csr4
+ elsif X86_64_WIN
+ loadp [temp], csr0
+ loadp 8[temp], csr1
+ loadp 16[temp], csr2
+ loadp 24[temp], csr3
+ loadp 32[temp], csr4
+ loadp 40[temp], csr5
+ loadp 48[temp], csr6
+ end
+ end
+end
+
macro preserveReturnAddressAfterCall(destinationRegister)
- if C_LOOP or ARM or ARMv7 or ARMv7_TRADITIONAL or MIPS
+ if C_LOOP or ARM or ARMv7 or ARMv7_TRADITIONAL or ARM64 or MIPS or SH4
# In C_LOOP case, we're only preserving the bytecode vPC.
move lr, destinationRegister
- elsif SH4
- stspr destinationRegister
- elsif X86 or X86_64
+ elsif X86 or X86_WIN or X86_64 or X86_64_WIN
pop destinationRegister
else
error
end
end
-macro restoreReturnAddressBeforeReturn(sourceRegister)
- if C_LOOP or ARM or ARMv7 or ARMv7_TRADITIONAL or MIPS
- # In C_LOOP case, we're only restoring the bytecode vPC.
- move sourceRegister, lr
- elsif SH4
- ldspr sourceRegister
- elsif X86 or X86_64
- push sourceRegister
+macro copyBarrier(value, slow)
+ btpnz value, CopyBarrierSpaceBits, slow
+end
+
+macro functionPrologue()
+ if X86 or X86_WIN or X86_64 or X86_64_WIN
+ push cfr
+ elsif ARM64
+ push cfr, lr
+ elsif C_LOOP or ARM or ARMv7 or ARMv7_TRADITIONAL or MIPS or SH4
+ push lr
+ push cfr
+ end
+ move sp, cfr
+end
+
+macro functionEpilogue()
+ if X86 or X86_WIN or X86_64 or X86_64_WIN
+ pop cfr
+ elsif ARM64
+ pop lr, cfr
+ elsif C_LOOP or ARM or ARMv7 or ARMv7_TRADITIONAL or MIPS or SH4
+ pop cfr
+ pop lr
+ end
+end
+
+macro vmEntryRecord(entryFramePointer, resultReg)
+ subp entryFramePointer, VMEntryTotalFrameSize, resultReg
+end
+
+macro getFrameRegisterSizeForCodeBlock(codeBlock, size)
+ loadi CodeBlock::m_numCalleeLocals[codeBlock], size
+ lshiftp 3, size
+ addp maxFrameExtentForSlowPathCall, size
+end
+
+macro restoreStackPointerAfterCall()
+ loadp CodeBlock[cfr], t2
+ getFrameRegisterSizeForCodeBlock(t2, t2)
+ if ARMv7
+ subp cfr, t2, t2
+ move t2, sp
else
- error
+ subp cfr, t2, sp
end
end
@@ -220,45 +714,104 @@ macro traceExecution()
end
end
-macro callTargetFunction(callLinkInfo)
+macro callTargetFunction(callee)
if C_LOOP
- cloopCallJSFunction LLIntCallLinkInfo::machineCodeTarget[callLinkInfo]
+ cloopCallJSFunction callee
else
- call LLIntCallLinkInfo::machineCodeTarget[callLinkInfo]
- dispatchAfterCall()
+ call callee
end
+ restoreStackPointerAfterCall()
+ dispatchAfterCall()
+end
+
+macro prepareForRegularCall(callee, temp1, temp2, temp3)
+ addp CallerFrameAndPCSize, sp
end
-macro slowPathForCall(advance, slowPath)
+# sp points to the new frame
+macro prepareForTailCall(callee, temp1, temp2, temp3)
+ restoreCalleeSavesUsedByLLInt()
+
+ loadi PayloadOffset + ArgumentCount[cfr], temp2
+ loadp CodeBlock[cfr], temp1
+ loadp CodeBlock::m_numParameters[temp1], temp1
+ bilteq temp1, temp2, .noArityFixup
+ move temp1, temp2
+
+.noArityFixup:
+ # We assume < 2^28 arguments
+ muli SlotSize, temp2
+ addi StackAlignment - 1 + CallFrameHeaderSize, temp2
+ andi ~StackAlignmentMask, temp2
+
+ move cfr, temp1
+ addp temp2, temp1
+
+ loadi PayloadOffset + ArgumentCount[sp], temp2
+ # We assume < 2^28 arguments
+ muli SlotSize, temp2
+ addi StackAlignment - 1 + CallFrameHeaderSize, temp2
+ andi ~StackAlignmentMask, temp2
+
+ if ARM or ARMv7_TRADITIONAL or ARMv7 or SH4 or ARM64 or C_LOOP or MIPS
+ addp 2 * PtrSize, sp
+ subi 2 * PtrSize, temp2
+ loadp PtrSize[cfr], lr
+ else
+ addp PtrSize, sp
+ subi PtrSize, temp2
+ loadp PtrSize[cfr], temp3
+ storep temp3, [sp]
+ end
+
+ subp temp2, temp1
+ loadp [cfr], cfr
+
+.copyLoop:
+ subi PtrSize, temp2
+ loadp [sp, temp2, 1], temp3
+ storep temp3, [temp1, temp2, 1]
+ btinz temp2, .copyLoop
+
+ move temp1, sp
+ jmp callee
+end
+
+macro slowPathForCall(slowPath, prepareCall)
callCallSlowPath(
- advance,
slowPath,
- macro (callee)
- if C_LOOP
- cloopCallJSFunction callee
- else
- call callee
- dispatchAfterCall()
- end
+ # Those are r0 and r1
+ macro (callee, calleeFramePtr)
+ btpz calleeFramePtr, .dontUpdateSP
+ move calleeFramePtr, sp
+ prepareCall(callee, t2, t3, t4)
+ .dontUpdateSP:
+ callTargetFunction(callee)
end)
end
-macro arrayProfile(structureAndIndexingType, profile, scratch)
- const structure = structureAndIndexingType
- const indexingType = structureAndIndexingType
- if VALUE_PROFILER
- storep structure, ArrayProfile::m_lastSeenStructure[profile]
- end
- loadb Structure::m_indexingType[structure], indexingType
+macro arrayProfile(cellAndIndexingType, profile, scratch)
+ const cell = cellAndIndexingType
+ const indexingType = cellAndIndexingType
+ loadi JSCell::m_structureID[cell], scratch
+ storei scratch, ArrayProfile::m_lastSeenStructureID[profile]
+ loadb JSCell::m_indexingType[cell], indexingType
+end
+
+macro skipIfIsRememberedOrInEden(cell, scratch1, scratch2, continuation)
+ loadb JSCell::m_cellState[cell], scratch1
+ continuation(scratch1)
+end
+
+macro notifyWrite(set, slow)
+ bbneq WatchpointSet::m_state[set], IsInvalidated, slow
end
macro checkSwitchToJIT(increment, action)
- if JIT_ENABLED
- loadp CodeBlock[cfr], t0
- baddis increment, CodeBlock::m_llintExecuteCounter + ExecutionCounter::m_counter[t0], .continue
- action()
+ loadp CodeBlock[cfr], t0
+ baddis increment, CodeBlock::m_llintExecuteCounter + BaselineExecutionCounter::m_counter[t0], .continue
+ action()
.continue:
- end
end
macro checkSwitchToJITForEpilogue()
@@ -274,13 +827,21 @@ macro assertNotConstant(index)
end
macro functionForCallCodeBlockGetter(targetRegister)
- loadp Callee + JSCellPayloadOffset[cfr], targetRegister
+ if JSVALUE64
+ loadp Callee[cfr], targetRegister
+ else
+ loadp Callee + PayloadOffset[cfr], targetRegister
+ end
loadp JSFunction::m_executable[targetRegister], targetRegister
loadp FunctionExecutable::m_codeBlockForCall[targetRegister], targetRegister
end
macro functionForConstructCodeBlockGetter(targetRegister)
- loadp Callee + JSCellPayloadOffset[cfr], targetRegister
+ if JSVALUE64
+ loadp Callee[cfr], targetRegister
+ else
+ loadp Callee + PayloadOffset[cfr], targetRegister
+ end
loadp JSFunction::m_executable[targetRegister], targetRegister
loadp FunctionExecutable::m_codeBlockForConstruct[targetRegister], targetRegister
end
@@ -300,28 +861,51 @@ end
# Do the bare minimum required to execute code. Sets up the PC, leave the CodeBlock*
# in t1. May also trigger prologue entry OSR.
macro prologue(codeBlockGetter, codeBlockSetter, osrSlowPath, traceSlowPath)
- preserveReturnAddressAfterCall(t2)
-
# Set up the call frame and check if we should OSR.
- storep t2, ReturnPC[cfr]
+ preserveCallerPCAndCFR()
+
if EXECUTION_TRACING
+ subp maxFrameExtentForSlowPathCall, sp
callSlowPath(traceSlowPath)
+ addp maxFrameExtentForSlowPathCall, sp
end
codeBlockGetter(t1)
- if JIT_ENABLED
- baddis 5, CodeBlock::m_llintExecuteCounter + ExecutionCounter::m_counter[t1], .continue
- cCall2(osrSlowPath, cfr, PC)
- move t1, cfr
- btpz t0, .recover
- loadp ReturnPC[cfr], t2
- restoreReturnAddressBeforeReturn(t2)
- jmp t0
+ if not C_LOOP
+ baddis 5, CodeBlock::m_llintExecuteCounter + BaselineExecutionCounter::m_counter[t1], .continue
+ if JSVALUE64
+ move cfr, a0
+ move PC, a1
+ cCall2(osrSlowPath)
+ else
+ # We are after the function prologue, but before we have set up sp from the CodeBlock.
+ # Temporarily align stack pointer for this call.
+ subp 8, sp
+ move cfr, a0
+ move PC, a1
+ cCall2(osrSlowPath)
+ addp 8, sp
+ end
+ btpz r0, .recover
+ move cfr, sp # restore the previous sp
+ # pop the callerFrame since we will jump to a function that wants to save it
+ if ARM64
+ pop lr, cfr
+ elsif ARM or ARMv7 or ARMv7_TRADITIONAL or MIPS or SH4
+ pop cfr
+ pop lr
+ else
+ pop cfr
+ end
+ jmp r0
.recover:
codeBlockGetter(t1)
.continue:
end
+
codeBlockSetter(t1)
-
+
+ preserveCalleeSavesUsedByLLInt()
+
# Set up the PC.
if JSVALUE64
loadp CodeBlock::m_instructions[t1], PB
@@ -329,97 +913,250 @@ macro prologue(codeBlockGetter, codeBlockSetter, osrSlowPath, traceSlowPath)
else
loadp CodeBlock::m_instructions[t1], PC
end
-end
-# Expects that CodeBlock is in t1, which is what prologue() leaves behind.
-# Must call dispatch(0) after calling this.
-macro functionInitialization(profileArgSkip)
- if VALUE_PROFILER
- # Profile the arguments. Unfortunately, we have no choice but to do this. This
- # code is pretty horrendous because of the difference in ordering between
- # arguments and value profiles, the desire to have a simple loop-down-to-zero
- # loop, and the desire to use only three registers so as to preserve the PC and
- # the code block. It is likely that this code should be rewritten in a more
- # optimal way for architectures that have more than five registers available
- # for arbitrary use in the interpreter.
- loadi CodeBlock::m_numParameters[t1], t0
- addp -profileArgSkip, t0 # Use addi because that's what has the peephole
- assert(macro (ok) bpgteq t0, 0, ok end)
- btpz t0, .argumentProfileDone
- loadp CodeBlock::m_argumentValueProfiles + VectorBufferOffset[t1], t3
- mulp sizeof ValueProfile, t0, t2 # Aaaaahhhh! Need strength reduction!
- negp t0
- lshiftp 3, t0
- addp t2, t3
- .argumentProfileLoop:
- if JSVALUE64
- loadq ThisArgumentOffset + 8 - profileArgSkip * 8[cfr, t0], t2
- subp sizeof ValueProfile, t3
- storeq t2, profileArgSkip * sizeof ValueProfile + ValueProfile::m_buckets[t3]
- else
- loadi ThisArgumentOffset + TagOffset + 8 - profileArgSkip * 8[cfr, t0], t2
- subp sizeof ValueProfile, t3
- storei t2, profileArgSkip * sizeof ValueProfile + ValueProfile::m_buckets + TagOffset[t3]
- loadi ThisArgumentOffset + PayloadOffset + 8 - profileArgSkip * 8[cfr, t0], t2
- storei t2, profileArgSkip * sizeof ValueProfile + ValueProfile::m_buckets + PayloadOffset[t3]
- end
- baddpnz 8, t0, .argumentProfileLoop
- .argumentProfileDone:
- end
-
- # Check stack height.
- loadi CodeBlock::m_numCalleeRegisters[t1], t0
+ # Get new sp in t0 and check stack height.
+ getFrameRegisterSizeForCodeBlock(t1, t0)
+ subp cfr, t0, t0
loadp CodeBlock::m_vm[t1], t2
- loadp VM::interpreter[t2], t2 # FIXME: Can get to the JSStack from the JITStackFrame
- lshifti 3, t0
- addp t0, cfr, t0
- bpaeq Interpreter::m_stack + JSStack::m_end[t2], t0, .stackHeightOK
+ bpbeq VM::m_jsStackLimit[t2], t0, .stackHeightOK
# Stack height check failed - need to call a slow_path.
+ # Set up temporary stack pointer for call including callee saves
+ subp maxFrameExtentForSlowPathCall, sp
callSlowPath(_llint_stack_check)
+ bpeq r1, 0, .stackHeightOKGetCodeBlock
+ move r1, cfr
+ dispatch(0) # Go to exception handler in PC
+
+.stackHeightOKGetCodeBlock:
+ # Stack check slow path returned that the stack was ok.
+ # Since they were clobbered, need to get CodeBlock and new sp
+ codeBlockGetter(t1)
+ getFrameRegisterSizeForCodeBlock(t1, t0)
+ subp cfr, t0, t0
+
.stackHeightOK:
+ move t0, sp
+
+ if JSVALUE64
+ move TagTypeNumber, tagTypeNumber
+ addp TagBitTypeOther, tagTypeNumber, tagMask
+ end
end
-macro allocateJSObject(allocator, structure, result, scratch1, slowCase)
- if ALWAYS_ALLOCATE_SLOW
- jmp slowCase
+# Expects that CodeBlock is in t1, which is what prologue() leaves behind.
+# Must call dispatch(0) after calling this.
+macro functionInitialization(profileArgSkip)
+ # Profile the arguments. Unfortunately, we have no choice but to do this. This
+ # code is pretty horrendous because of the difference in ordering between
+ # arguments and value profiles, the desire to have a simple loop-down-to-zero
+ # loop, and the desire to use only three registers so as to preserve the PC and
+ # the code block. It is likely that this code should be rewritten in a more
+ # optimal way for architectures that have more than five registers available
+ # for arbitrary use in the interpreter.
+ loadi CodeBlock::m_numParameters[t1], t0
+ addp -profileArgSkip, t0 # Use addi because that's what has the peephole
+ assert(macro (ok) bpgteq t0, 0, ok end)
+ btpz t0, .argumentProfileDone
+ loadp CodeBlock::m_argumentValueProfiles + VectorBufferOffset[t1], t3
+ mulp sizeof ValueProfile, t0, t2 # Aaaaahhhh! Need strength reduction!
+ lshiftp 3, t0
+ addp t2, t3
+.argumentProfileLoop:
+ if JSVALUE64
+ loadq ThisArgumentOffset - 8 + profileArgSkip * 8[cfr, t0], t2
+ subp sizeof ValueProfile, t3
+ storeq t2, profileArgSkip * sizeof ValueProfile + ValueProfile::m_buckets[t3]
else
- const offsetOfFirstFreeCell =
- MarkedAllocator::m_freeList +
- MarkedBlock::FreeList::head
-
- # Get the object from the free list.
- loadp offsetOfFirstFreeCell[allocator], result
- btpz result, slowCase
-
- # Remove the object from the free list.
- loadp [result], scratch1
- storep scratch1, offsetOfFirstFreeCell[allocator]
-
- # Initialize the object.
- storep structure, JSCell::m_structure[result]
- storep 0, JSObject::m_butterfly[result]
+ loadi ThisArgumentOffset + TagOffset - 8 + profileArgSkip * 8[cfr, t0], t2
+ subp sizeof ValueProfile, t3
+ storei t2, profileArgSkip * sizeof ValueProfile + ValueProfile::m_buckets + TagOffset[t3]
+ loadi ThisArgumentOffset + PayloadOffset - 8 + profileArgSkip * 8[cfr, t0], t2
+ storei t2, profileArgSkip * sizeof ValueProfile + ValueProfile::m_buckets + PayloadOffset[t3]
end
+ baddpnz -8, t0, .argumentProfileLoop
+.argumentProfileDone:
+end
+
+macro allocateJSObject(allocator, structure, result, scratch1, slowCase)
+ const offsetOfFirstFreeCell =
+ MarkedAllocator::m_freeList +
+ MarkedBlock::FreeList::head
+
+ # Get the object from the free list.
+ loadp offsetOfFirstFreeCell[allocator], result
+ btpz result, slowCase
+
+ # Remove the object from the free list.
+ loadp [result], scratch1
+ storep scratch1, offsetOfFirstFreeCell[allocator]
+
+ # Initialize the object.
+ storep 0, JSObject::m_butterfly[result]
+ storeStructureWithTypeInfo(result, structure, scratch1)
end
macro doReturn()
- loadp ReturnPC[cfr], t2
- loadp CallerFrame[cfr], cfr
- restoreReturnAddressBeforeReturn(t2)
+ restoreCalleeSavesUsedByLLInt()
+ restoreCallerPCAndCFR()
ret
end
+# stub to call into JavaScript or Native functions
+# EncodedJSValue vmEntryToJavaScript(void* code, VM* vm, ProtoCallFrame* protoFrame)
+# EncodedJSValue vmEntryToNativeFunction(void* code, VM* vm, ProtoCallFrame* protoFrame)
-# Indicate the beginning of LLInt.
-_llint_begin:
- crash()
+if C_LOOP
+ _llint_vm_entry_to_javascript:
+else
+ global _vmEntryToJavaScript
+ _vmEntryToJavaScript:
+end
+ doVMEntry(makeJavaScriptCall)
+
+
+if C_LOOP
+ _llint_vm_entry_to_native:
+else
+ global _vmEntryToNative
+ _vmEntryToNative:
+end
+ doVMEntry(makeHostFunctionCall)
+
+
+if not C_LOOP
+ # void sanitizeStackForVMImpl(VM* vm)
+ global _sanitizeStackForVMImpl
+ _sanitizeStackForVMImpl:
+ # We need three non-aliased caller-save registers. We are guaranteed
+ # this for a0, a1 and a2 on all architectures.
+ if X86 or X86_WIN
+ loadp 4[sp], a0
+ end
+ const vm = a0
+ const address = a1
+ const zeroValue = a2
+
+ loadp VM::m_lastStackTop[vm], address
+ bpbeq sp, address, .zeroFillDone
+
+ move 0, zeroValue
+ .zeroFillLoop:
+ storep zeroValue, [address]
+ addp PtrSize, address
+ bpa sp, address, .zeroFillLoop
+
+ .zeroFillDone:
+ move sp, address
+ storep address, VM::m_lastStackTop[vm]
+ ret
+
+ # VMEntryRecord* vmEntryRecord(const VMEntryFrame* entryFrame)
+ global _vmEntryRecord
+ _vmEntryRecord:
+ if X86 or X86_WIN
+ loadp 4[sp], a0
+ end
+
+ vmEntryRecord(a0, r0)
+ ret
+end
+
+if C_LOOP
+ # Dummy entry point the C Loop uses to initialize.
+ _llint_entry:
+ crash()
+else
+ macro initPCRelative(pcBase)
+ if X86_64 or X86_64_WIN or X86 or X86_WIN
+ call _relativePCBase
+ _relativePCBase:
+ pop pcBase
+ elsif ARM64
+ elsif ARMv7
+ _relativePCBase:
+ move pc, pcBase
+ subp 3, pcBase # Need to back up the PC and set the Thumb2 bit
+ elsif ARM or ARMv7_TRADITIONAL
+ _relativePCBase:
+ move pc, pcBase
+ subp 8, pcBase
+ elsif MIPS
+ la _relativePCBase, pcBase
+ setcallreg pcBase # needed to set $t9 to the right value for the .cpload created by the label.
+ _relativePCBase:
+ elsif SH4
+ mova _relativePCBase, t0
+ move t0, pcBase
+ alignformova
+ _relativePCBase:
+ end
+end
+# The PC base is in t1, as this is what _llint_entry leaves behind through
+# initPCRelative(t1)
+macro setEntryAddress(index, label)
+ if X86_64 or X86_64_WIN
+ leap (label - _relativePCBase)[t1], t3
+ move index, t4
+ storep t3, [a0, t4, 8]
+ elsif X86 or X86_WIN
+ leap (label - _relativePCBase)[t1], t3
+ move index, t4
+ storep t3, [a0, t4, 4]
+ elsif ARM64
+ pcrtoaddr label, t1
+ move index, t4
+ storep t1, [a0, t4, 8]
+ elsif ARM or ARMv7 or ARMv7_TRADITIONAL
+ mvlbl (label - _relativePCBase), t4
+ addp t4, t1, t4
+ move index, t3
+ storep t4, [a0, t3, 4]
+ elsif SH4
+ move (label - _relativePCBase), t4
+ addp t4, t1, t4
+ move index, t3
+ storep t4, [a0, t3, 4]
+ flushcp # Force constant pool flush to avoid "pcrel too far" link error.
+ elsif MIPS
+ la label, t4
+ la _relativePCBase, t3
+ subp t3, t4
+ addp t4, t1, t4
+ move index, t3
+ storep t4, [a0, t3, 4]
+ end
+end
+
+global _llint_entry
+# Entry point for the llint to initialize.
+_llint_entry:
+ functionPrologue()
+ pushCalleeSaves()
+ if X86 or X86_WIN
+ loadp 20[sp], a0
+ end
+ initPCRelative(t1)
+
+ # Include generated bytecode initialization file.
+ include InitBytecodes
+
+ popCalleeSaves()
+ functionEpilogue()
+ ret
+end
_llint_program_prologue:
prologue(notFunctionCodeBlockGetter, notFunctionCodeBlockSetter, _llint_entry_osr, _llint_trace_prologue)
dispatch(0)
+_llint_module_program_prologue:
+ prologue(notFunctionCodeBlockGetter, notFunctionCodeBlockSetter, _llint_entry_osr, _llint_trace_prologue)
+ dispatch(0)
+
+
_llint_eval_prologue:
prologue(notFunctionCodeBlockGetter, notFunctionCodeBlockSetter, _llint_entry_osr, _llint_trace_prologue)
dispatch(0)
@@ -427,26 +1164,30 @@ _llint_eval_prologue:
_llint_function_for_call_prologue:
prologue(functionForCallCodeBlockGetter, functionCodeBlockSetter, _llint_entry_osr_function_for_call, _llint_trace_prologue_function_for_call)
-.functionForCallBegin:
functionInitialization(0)
dispatch(0)
_llint_function_for_construct_prologue:
prologue(functionForConstructCodeBlockGetter, functionCodeBlockSetter, _llint_entry_osr_function_for_construct, _llint_trace_prologue_function_for_construct)
-.functionForConstructBegin:
functionInitialization(1)
dispatch(0)
_llint_function_for_call_arity_check:
prologue(functionForCallCodeBlockGetter, functionCodeBlockSetter, _llint_entry_osr_function_for_call_arityCheck, _llint_trace_arityCheck_for_call)
- functionArityCheck(.functionForCallBegin, _llint_slow_path_call_arityCheck)
+ functionArityCheck(.functionForCallBegin, _slow_path_call_arityCheck)
+.functionForCallBegin:
+ functionInitialization(0)
+ dispatch(0)
_llint_function_for_construct_arity_check:
prologue(functionForConstructCodeBlockGetter, functionCodeBlockSetter, _llint_entry_osr_function_for_construct_arityCheck, _llint_trace_arityCheck_for_construct)
- functionArityCheck(.functionForConstructBegin, _llint_slow_path_construct_arityCheck)
+ functionArityCheck(.functionForConstructBegin, _slow_path_construct_arityCheck)
+.functionForConstructBegin:
+ functionInitialization(1)
+ dispatch(0)
# Value-representation-specific code.
@@ -458,6 +1199,36 @@ end
# Value-representation-agnostic code.
+_llint_op_create_direct_arguments:
+ traceExecution()
+ callSlowPath(_slow_path_create_direct_arguments)
+ dispatch(2)
+
+
+_llint_op_create_scoped_arguments:
+ traceExecution()
+ callSlowPath(_slow_path_create_scoped_arguments)
+ dispatch(3)
+
+
+_llint_op_create_out_of_band_arguments:
+ traceExecution()
+ callSlowPath(_slow_path_create_out_of_band_arguments)
+ dispatch(2)
+
+
+_llint_op_new_func:
+ traceExecution()
+ callSlowPath(_llint_slow_path_new_func)
+ dispatch(4)
+
+
+_llint_op_new_generator_func:
+ traceExecution()
+ callSlowPath(_llint_slow_path_new_generator_func)
+ dispatch(4)
+
+
_llint_op_new_array:
traceExecution()
callSlowPath(_llint_slow_path_new_array)
@@ -484,498 +1255,102 @@ _llint_op_new_regexp:
_llint_op_less:
traceExecution()
- callSlowPath(_llint_slow_path_less)
+ callSlowPath(_slow_path_less)
dispatch(4)
_llint_op_lesseq:
traceExecution()
- callSlowPath(_llint_slow_path_lesseq)
+ callSlowPath(_slow_path_lesseq)
dispatch(4)
_llint_op_greater:
traceExecution()
- callSlowPath(_llint_slow_path_greater)
+ callSlowPath(_slow_path_greater)
dispatch(4)
_llint_op_greatereq:
traceExecution()
- callSlowPath(_llint_slow_path_greatereq)
+ callSlowPath(_slow_path_greatereq)
dispatch(4)
_llint_op_mod:
traceExecution()
- callSlowPath(_llint_slow_path_mod)
+ callSlowPath(_slow_path_mod)
dispatch(4)
_llint_op_typeof:
traceExecution()
- callSlowPath(_llint_slow_path_typeof)
+ callSlowPath(_slow_path_typeof)
dispatch(3)
-_llint_op_is_object:
+_llint_op_is_object_or_null:
traceExecution()
- callSlowPath(_llint_slow_path_is_object)
+ callSlowPath(_slow_path_is_object_or_null)
dispatch(3)
-
_llint_op_is_function:
traceExecution()
- callSlowPath(_llint_slow_path_is_function)
+ callSlowPath(_slow_path_is_function)
dispatch(3)
_llint_op_in:
traceExecution()
- callSlowPath(_llint_slow_path_in)
+ callSlowPath(_slow_path_in)
dispatch(4)
-macro getPutToBaseOperationField(scratch, scratch1, fieldOffset, fieldGetter)
- loadpFromInstruction(4, scratch)
- fieldGetter(fieldOffset[scratch])
-end
-
-macro moveJSValueFromRegisterWithoutProfiling(value, destBuffer, destOffsetReg)
- storeq value, [destBuffer, destOffsetReg, 8]
-end
-
-
-macro moveJSValueFromRegistersWithoutProfiling(tag, payload, destBuffer, destOffsetReg)
- storei tag, TagOffset[destBuffer, destOffsetReg, 8]
- storei payload, PayloadOffset[destBuffer, destOffsetReg, 8]
-end
-
-macro putToBaseVariableBody(variableOffset, scratch1, scratch2, scratch3)
- loadisFromInstruction(1, scratch1)
- loadp PayloadOffset[cfr, scratch1, 8], scratch1
- loadp JSVariableObject::m_registers[scratch1], scratch1
- loadisFromInstruction(3, scratch2)
- if JSVALUE64
- loadConstantOrVariable(scratch2, scratch3)
- moveJSValueFromRegisterWithoutProfiling(scratch3, scratch1, variableOffset)
- else
- loadConstantOrVariable2Reg(scratch2, scratch3, scratch2) # scratch3=tag, scratch2=payload
- moveJSValueFromRegistersWithoutProfiling(scratch3, scratch2, scratch1, variableOffset)
- end
-end
-
-_llint_op_put_to_base_variable:
- traceExecution()
- getPutToBaseOperationField(t0, t1, PutToBaseOperation::m_offset, macro(addr)
- loadis addr, t0
- end)
- putToBaseVariableBody(t0, t1, t2, t3)
- dispatch(5)
-
-_llint_op_put_to_base:
- traceExecution()
- getPutToBaseOperationField(t0, t1, 0, macro(addr)
- leap addr, t0
- bbneq PutToBaseOperation::m_kindAsUint8[t0], PutToBaseOperationKindVariablePut, .notPutToBaseVariable
- loadis PutToBaseOperation::m_offset[t0], t0
- putToBaseVariableBody(t0, t1, t2, t3)
- dispatch(5)
- .notPutToBaseVariable:
- end)
- callSlowPath(_llint_slow_path_put_to_base)
- dispatch(5)
-
-macro getResolveOperation(resolveOperationIndex, dest)
- loadpFromInstruction(resolveOperationIndex, dest)
- loadp VectorBufferOffset[dest], dest
-end
-
-macro getScope(loadInitialScope, scopeCount, dest, scratch)
- loadInitialScope(dest)
- loadi scopeCount, scratch
- btiz scratch, .done
-.loop:
- loadp JSScope::m_next[dest], dest
- subi 1, scratch
- btinz scratch, .loop
-
-.done:
-end
-
-macro moveJSValue(sourceBuffer, sourceOffsetReg, destBuffer, destOffsetReg, profileOffset, scratchRegister)
- if JSVALUE64
- loadq [sourceBuffer, sourceOffsetReg, 8], scratchRegister
- storeq scratchRegister, [destBuffer, destOffsetReg, 8]
- loadpFromInstruction(profileOffset, destOffsetReg)
- valueProfile(scratchRegister, destOffsetReg)
- else
- loadi PayloadOffset[sourceBuffer, sourceOffsetReg, 8], scratchRegister
- storei scratchRegister, PayloadOffset[destBuffer, destOffsetReg, 8]
- loadi TagOffset[sourceBuffer, sourceOffsetReg, 8], sourceOffsetReg
- storei sourceOffsetReg, TagOffset[destBuffer, destOffsetReg, 8]
- loadpFromInstruction(profileOffset, destOffsetReg)
- valueProfile(sourceOffsetReg, scratchRegister, destOffsetReg)
- end
-end
-
-macro moveJSValueFromSlot(slot, destBuffer, destOffsetReg, profileOffset, scratchRegister)
- if JSVALUE64
- loadq [slot], scratchRegister
- storeq scratchRegister, [destBuffer, destOffsetReg, 8]
- loadpFromInstruction(profileOffset, destOffsetReg)
- valueProfile(scratchRegister, destOffsetReg)
- else
- loadi PayloadOffset[slot], scratchRegister
- storei scratchRegister, PayloadOffset[destBuffer, destOffsetReg, 8]
- loadi TagOffset[slot], slot
- storei slot, TagOffset[destBuffer, destOffsetReg, 8]
- loadpFromInstruction(profileOffset, destOffsetReg)
- valueProfile(slot, scratchRegister, destOffsetReg)
- end
-end
-
-macro moveJSValueFromRegister(value, destBuffer, destOffsetReg, profileOffset)
- storeq value, [destBuffer, destOffsetReg, 8]
- loadpFromInstruction(profileOffset, destOffsetReg)
- valueProfile(value, destOffsetReg)
-end
-
-macro moveJSValueFromRegisters(tag, payload, destBuffer, destOffsetReg, profileOffset)
- storei tag, TagOffset[destBuffer, destOffsetReg, 8]
- storei payload, PayloadOffset[destBuffer, destOffsetReg, 8]
- loadpFromInstruction(profileOffset, destOffsetReg)
- valueProfile(tag, payload, destOffsetReg)
-end
-
-_llint_op_resolve_global_property:
- traceExecution()
- getResolveOperation(3, t0)
- loadp CodeBlock[cfr], t1
- loadp CodeBlock::m_globalObject[t1], t1
- loadp ResolveOperation::m_structure[t0], t2
- bpneq JSCell::m_structure[t1], t2, .llint_op_resolve_local
- loadis ResolveOperation::m_offset[t0], t0
- if JSVALUE64
- loadPropertyAtVariableOffsetKnownNotInline(t0, t1, t2)
- loadisFromInstruction(1, t0)
- moveJSValueFromRegister(t2, cfr, t0, 4)
- else
- loadPropertyAtVariableOffsetKnownNotInline(t0, t1, t2, t3)
- loadisFromInstruction(1, t0)
- moveJSValueFromRegisters(t2, t3, cfr, t0, 4)
- end
- dispatch(5)
-
-_llint_op_resolve_global_var:
- traceExecution()
- getResolveOperation(3, t0)
- loadp ResolveOperation::m_registerAddress[t0], t0
- loadisFromInstruction(1, t1)
- moveJSValueFromSlot(t0, cfr, t1, 4, t3)
- dispatch(5)
-
-macro resolveScopedVarBody(resolveOperations)
- # First ResolveOperation is to skip scope chain nodes
- getScope(macro(dest)
- loadp ScopeChain + JSCellPayloadOffset[cfr], dest
- end,
- ResolveOperation::m_scopesToSkip[resolveOperations], t1, t2)
- loadp JSVariableObject::m_registers[t1], t1 # t1 now contains the activation registers
-
- # Second ResolveOperation tells us what offset to use
- loadis ResolveOperation::m_offset + sizeof ResolveOperation[resolveOperations], t2
- loadisFromInstruction(1, t3)
- moveJSValue(t1, t2, cfr, t3, 4, t0)
-end
-
-_llint_op_resolve_scoped_var:
- traceExecution()
- getResolveOperation(3, t0)
- resolveScopedVarBody(t0)
- dispatch(5)
-
-_llint_op_resolve_scoped_var_on_top_scope:
+_llint_op_del_by_id:
traceExecution()
- getResolveOperation(3, t0)
-
- # Load destination index
- loadisFromInstruction(1, t3)
-
- # We know we want the top scope chain entry
- loadp ScopeChain + JSCellPayloadOffset[cfr], t1
- loadp JSVariableObject::m_registers[t1], t1 # t1 now contains the activation registers
-
- # Second ResolveOperation tells us what offset to use
- loadis ResolveOperation::m_offset + sizeof ResolveOperation[t0], t2
-
- moveJSValue(t1, t2, cfr, t3, 4, t0)
- dispatch(5)
+ callSlowPath(_llint_slow_path_del_by_id)
+ dispatch(4)
-_llint_op_resolve_scoped_var_with_top_scope_check:
- traceExecution()
- getResolveOperation(3, t0)
- # First ResolveOperation tells us what register to check
- loadis ResolveOperation::m_activationRegister[t0], t1
-
- loadp PayloadOffset[cfr, t1, 8], t1
-
- getScope(macro(dest)
- btpz t1, .scopeChainNotCreated
- loadp JSScope::m_next[t1], dest
- jmp .done
- .scopeChainNotCreated:
- loadp ScopeChain + JSCellPayloadOffset[cfr], dest
- .done:
- end,
- # Second ResolveOperation tells us how many more nodes to skip
- ResolveOperation::m_scopesToSkip + sizeof ResolveOperation[t0], t1, t2)
- loadp JSVariableObject::m_registers[t1], t1 # t1 now contains the activation registers
-
- # Third operation tells us what offset to use
- loadis ResolveOperation::m_offset + 2 * sizeof ResolveOperation[t0], t2
- loadisFromInstruction(1, t3)
- moveJSValue(t1, t2, cfr, t3, 4, t0)
- dispatch(5)
-_llint_op_resolve:
-.llint_op_resolve_local:
- traceExecution()
- getResolveOperation(3, t0)
- btpz t0, .noInstructions
- loadis ResolveOperation::m_operation[t0], t1
- bineq t1, ResolveOperationSkipScopes, .notSkipScopes
- resolveScopedVarBody(t0)
- dispatch(5)
-.notSkipScopes:
- bineq t1, ResolveOperationGetAndReturnGlobalVar, .notGetAndReturnGlobalVar
- loadp ResolveOperation::m_registerAddress[t0], t0
- loadisFromInstruction(1, t1)
- moveJSValueFromSlot(t0, cfr, t1, 4, t3)
- dispatch(5)
-.notGetAndReturnGlobalVar:
-
-.noInstructions:
- callSlowPath(_llint_slow_path_resolve)
- dispatch(5)
-
-_llint_op_resolve_base_to_global:
+_llint_op_del_by_val:
traceExecution()
- loadp CodeBlock[cfr], t1
- loadp CodeBlock::m_globalObject[t1], t1
- loadisFromInstruction(1, t3)
- if JSVALUE64
- moveJSValueFromRegister(t1, cfr, t3, 6)
- else
- move CellTag, t2
- moveJSValueFromRegisters(t2, t1, cfr, t3, 6)
- end
- dispatch(7)
-
-_llint_op_resolve_base_to_global_dynamic:
- jmp _llint_op_resolve_base
+ callSlowPath(_llint_slow_path_del_by_val)
+ dispatch(4)
-_llint_op_resolve_base_to_scope:
- traceExecution()
- getResolveOperation(4, t0)
- # First ResolveOperation is to skip scope chain nodes
- getScope(macro(dest)
- loadp ScopeChain + JSCellPayloadOffset[cfr], dest
- end,
- ResolveOperation::m_scopesToSkip[t0], t1, t2)
- loadisFromInstruction(1, t3)
- if JSVALUE64
- moveJSValueFromRegister(t1, cfr, t3, 6)
- else
- move CellTag, t2
- moveJSValueFromRegisters(t2, t1, cfr, t3, 6)
- end
- dispatch(7)
-_llint_op_resolve_base_to_scope_with_top_scope_check:
+_llint_op_put_by_index:
traceExecution()
- getResolveOperation(4, t0)
- # First ResolveOperation tells us what register to check
- loadis ResolveOperation::m_activationRegister[t0], t1
-
- loadp PayloadOffset[cfr, t1, 8], t1
-
- getScope(macro(dest)
- btpz t1, .scopeChainNotCreated
- loadp JSScope::m_next[t1], dest
- jmp .done
- .scopeChainNotCreated:
- loadp ScopeChain + JSCellPayloadOffset[cfr], dest
- .done:
- end,
- # Second ResolveOperation tells us how many more nodes to skip
- ResolveOperation::m_scopesToSkip + sizeof ResolveOperation[t0], t1, t2)
-
- loadisFromInstruction(1, t3)
- if JSVALUE64
- moveJSValueFromRegister(t1, cfr, t3, 6)
- else
- move CellTag, t2
- moveJSValueFromRegisters(t2, t1, cfr, t3, 6)
- end
- dispatch(7)
+ callSlowPath(_llint_slow_path_put_by_index)
+ dispatch(4)
-_llint_op_resolve_base:
- traceExecution()
- callSlowPath(_llint_slow_path_resolve_base)
- dispatch(7)
-macro interpretResolveWithBase(opcodeLength, slowPath)
+_llint_op_put_getter_by_id:
traceExecution()
- getResolveOperation(4, t0)
- btpz t0, .slowPath
-
- loadp ScopeChain + JSCellPayloadOffset[cfr], t3
- # Get the base
- loadis ResolveOperation::m_operation[t0], t2
-
- bineq t2, ResolveOperationSkipScopes, .notSkipScopes
- getScope(macro(dest) move t3, dest end,
- ResolveOperation::m_scopesToSkip[t0], t1, t2)
- move t1, t3
- addp sizeof ResolveOperation, t0, t0
- jmp .haveCorrectScope
-
- .notSkipScopes:
-
- bineq t2, ResolveOperationSkipTopScopeNode, .notSkipTopScopeNode
- loadis ResolveOperation::m_activationRegister[t0], t1
- loadp PayloadOffset[cfr, t1, 8], t1
-
- getScope(macro(dest)
- btpz t1, .scopeChainNotCreated
- loadp JSScope::m_next[t1], dest
- jmp .done
- .scopeChainNotCreated:
- loadp ScopeChain + JSCellPayloadOffset[cfr], dest
- .done:
- end,
- sizeof ResolveOperation + ResolveOperation::m_scopesToSkip[t0], t1, t2)
- move t1, t3
- # We've handled two opcodes here
- addp 2 * sizeof ResolveOperation, t0, t0
-
- .notSkipTopScopeNode:
-
- .haveCorrectScope:
-
- # t3 now contains the correct Scope
- # t0 contains a pointer to the current ResolveOperation
-
- loadis ResolveOperation::m_operation[t0], t2
- # t2 contains the next instruction
-
- loadisFromInstruction(1, t1)
- # t1 now contains the index for the base register
-
- bineq t2, ResolveOperationSetBaseToScope, .notSetBaseToScope
- if JSVALUE64
- storeq t3, [cfr, t1, 8]
- else
- storei t3, PayloadOffset[cfr, t1, 8]
- storei CellTag, TagOffset[cfr, t1, 8]
- end
- jmp .haveSetBase
-
- .notSetBaseToScope:
-
- bineq t2, ResolveOperationSetBaseToUndefined, .notSetBaseToUndefined
- if JSVALUE64
- storeq ValueUndefined, [cfr, t1, 8]
- else
- storei 0, PayloadOffset[cfr, t1, 8]
- storei UndefinedTag, TagOffset[cfr, t1, 8]
- end
- jmp .haveSetBase
-
- .notSetBaseToUndefined:
- bineq t2, ResolveOperationSetBaseToGlobal, .slowPath
- loadp JSCell::m_structure[t3], t2
- loadp Structure::m_globalObject[t2], t2
- if JSVALUE64
- storeq t2, [cfr, t1, 8]
- else
- storei t2, PayloadOffset[cfr, t1, 8]
- storei CellTag, TagOffset[cfr, t1, 8]
- end
-
- .haveSetBase:
-
- # Get the value
-
- # Load the operation into t2
- loadis ResolveOperation::m_operation + sizeof ResolveOperation[t0], t2
-
- # Load the index for the value register into t1
- loadisFromInstruction(2, t1)
-
- bineq t2, ResolveOperationGetAndReturnScopedVar, .notGetAndReturnScopedVar
- loadp JSVariableObject::m_registers[t3], t3 # t3 now contains the activation registers
-
- # Second ResolveOperation tells us what offset to use
- loadis ResolveOperation::m_offset + sizeof ResolveOperation[t0], t2
- moveJSValue(t3, t2, cfr, t1, opcodeLength - 1, t0)
- dispatch(opcodeLength)
-
- .notGetAndReturnScopedVar:
- bineq t2, ResolveOperationGetAndReturnGlobalProperty, .slowPath
- callSlowPath(slowPath)
- dispatch(opcodeLength)
-
-.slowPath:
- callSlowPath(slowPath)
- dispatch(opcodeLength)
-end
-
-_llint_op_resolve_with_base:
- interpretResolveWithBase(7, _llint_slow_path_resolve_with_base)
-
-
-_llint_op_resolve_with_this:
- interpretResolveWithBase(6, _llint_slow_path_resolve_with_this)
-
-
-macro withInlineStorage(object, propertyStorage, continuation)
- # Indicate that the object is the property storage, and that the
- # property storage register is unused.
- continuation(object, propertyStorage)
-end
-
-macro withOutOfLineStorage(object, propertyStorage, continuation)
- loadp JSObject::m_butterfly[object], propertyStorage
- # Indicate that the propertyStorage register now points to the
- # property storage, and that the object register may be reused
- # if the object pointer is not needed anymore.
- continuation(propertyStorage, object)
-end
+ callSlowPath(_llint_slow_path_put_getter_by_id)
+ dispatch(5)
-_llint_op_del_by_id:
+_llint_op_put_setter_by_id:
traceExecution()
- callSlowPath(_llint_slow_path_del_by_id)
- dispatch(4)
+ callSlowPath(_llint_slow_path_put_setter_by_id)
+ dispatch(5)
-_llint_op_del_by_val:
+_llint_op_put_getter_setter_by_id:
traceExecution()
- callSlowPath(_llint_slow_path_del_by_val)
- dispatch(4)
+ callSlowPath(_llint_slow_path_put_getter_setter_by_id)
+ dispatch(6)
-_llint_op_put_by_index:
+_llint_op_put_getter_by_val:
traceExecution()
- callSlowPath(_llint_slow_path_put_by_index)
- dispatch(4)
+ callSlowPath(_llint_slow_path_put_getter_by_val)
+ dispatch(5)
-_llint_op_put_getter_setter:
+_llint_op_put_setter_by_val:
traceExecution()
- callSlowPath(_llint_slow_path_put_getter_setter)
+ callSlowPath(_llint_slow_path_put_setter_by_val)
dispatch(5)
@@ -1059,18 +1434,27 @@ _llint_op_jngreatereq:
_llint_op_loop_hint:
traceExecution()
- loadp JITStackFrame::vm[sp], t1
- loadb VM::watchdog+Watchdog::m_timerDidFire[t1], t0
- btbnz t0, .handleWatchdogTimer
-.afterWatchdogTimerCheck:
checkSwitchToJITForLoop()
dispatch(1)
+
+
+_llint_op_watchdog:
+ traceExecution()
+ loadp CodeBlock[cfr], t1
+ loadp CodeBlock::m_vm[t1], t1
+ loadp VM::m_watchdog[t1], t0
+ btpnz t0, .handleWatchdogTimer
+.afterWatchdogTimerCheck:
+ dispatch(1)
.handleWatchdogTimer:
+ loadb Watchdog::m_timerDidFire[t0], t0
+ btbz t0, .afterWatchdogTimerCheck
callWatchdogTimerHandler(.throwHandler)
jmp .afterWatchdogTimerCheck
.throwHandler:
jmp _llint_throw_from_slow_path_trampoline
+
_llint_op_switch_string:
traceExecution()
callSlowPath(_llint_slow_path_switch_string)
@@ -1080,23 +1464,65 @@ _llint_op_switch_string:
_llint_op_new_func_exp:
traceExecution()
callSlowPath(_llint_slow_path_new_func_exp)
- dispatch(3)
+ dispatch(4)
+_llint_op_new_generator_func_exp:
+ traceExecution()
+ callSlowPath(_llint_slow_path_new_generator_func_exp)
+ dispatch(4)
+
+_llint_op_new_arrow_func_exp:
+ traceExecution()
+ callSlowPath(_llint_slow_path_new_arrow_func_exp)
+ dispatch(4)
_llint_op_call:
traceExecution()
arrayProfileForCall()
- doCall(_llint_slow_path_call)
+ doCall(_llint_slow_path_call, prepareForRegularCall)
+_llint_op_tail_call:
+ traceExecution()
+ arrayProfileForCall()
+ checkSwitchToJITForEpilogue()
+ doCall(_llint_slow_path_call, prepareForTailCall)
_llint_op_construct:
traceExecution()
- doCall(_llint_slow_path_construct)
+ doCall(_llint_slow_path_construct, prepareForRegularCall)
+macro doCallVarargs(slowPath, prepareCall)
+ callSlowPath(_llint_slow_path_size_frame_for_varargs)
+ branchIfException(_llint_throw_from_slow_path_trampoline)
+ # calleeFrame in r1
+ if JSVALUE64
+ move r1, sp
+ else
+ # The calleeFrame is not stack aligned, move down by CallerFrameAndPCSize to align
+ if ARMv7
+ subp r1, CallerFrameAndPCSize, t2
+ move t2, sp
+ else
+ subp r1, CallerFrameAndPCSize, sp
+ end
+ end
+ slowPathForCall(slowPath, prepareCall)
+end
_llint_op_call_varargs:
traceExecution()
- slowPathForCall(6, _llint_slow_path_call_varargs)
+ doCallVarargs(_llint_slow_path_call_varargs, prepareForRegularCall)
+
+_llint_op_tail_call_varargs:
+ traceExecution()
+ checkSwitchToJITForEpilogue()
+ # We lie and perform the tail call instead of preparing it since we can't
+ # prepare the frame for a call opcode
+ doCallVarargs(_llint_slow_path_call_varargs, prepareForTailCall)
+
+_llint_op_construct_varargs:
+ traceExecution()
+ doCallVarargs(_llint_slow_path_construct_varargs, prepareForRegularCall)
_llint_op_call_eval:
@@ -1135,7 +1561,7 @@ _llint_op_call_eval:
# and a PC to call, and that PC may be a dummy thunk that just
# returns the JS value that the eval returned.
- slowPathForCall(4, _llint_slow_path_call_eval)
+ slowPathForCall(_llint_slow_path_call_eval, prepareForRegularCall)
_llint_generic_return_point:
@@ -1144,32 +1570,38 @@ _llint_generic_return_point:
_llint_op_strcat:
traceExecution()
- callSlowPath(_llint_slow_path_strcat)
+ callSlowPath(_slow_path_strcat)
dispatch(4)
-_llint_op_get_pnames:
+_llint_op_push_with_scope:
traceExecution()
- callSlowPath(_llint_slow_path_get_pnames)
- dispatch(0) # The slow_path either advances the PC or jumps us to somewhere else.
+ callSlowPath(_slow_path_push_with_scope)
+ dispatch(4)
-_llint_op_push_with_scope:
+_llint_op_assert:
traceExecution()
- callSlowPath(_llint_slow_path_push_with_scope)
- dispatch(2)
+ callSlowPath(_slow_path_assert)
+ dispatch(3)
-_llint_op_pop_scope:
+_llint_op_save:
traceExecution()
- callSlowPath(_llint_slow_path_pop_scope)
- dispatch(1)
+ callSlowPath(_slow_path_save)
+ dispatch(4)
-_llint_op_push_name_scope:
+_llint_op_resume:
traceExecution()
- callSlowPath(_llint_slow_path_push_name_scope)
- dispatch(4)
+ callSlowPath(_slow_path_resume)
+ dispatch(3)
+
+
+_llint_op_create_lexical_environment:
+ traceExecution()
+ callSlowPath(_slow_path_create_lexical_environment)
+ dispatch(5)
_llint_op_throw:
@@ -1186,20 +1618,34 @@ _llint_op_throw_static_error:
_llint_op_profile_will_call:
traceExecution()
+ loadp CodeBlock[cfr], t0
+ loadp CodeBlock::m_vm[t0], t0
+ loadi VM::m_enabledProfiler[t0], t0
+ btpz t0, .opProfilerWillCallDone
callSlowPath(_llint_slow_path_profile_will_call)
+.opProfilerWillCallDone:
dispatch(2)
_llint_op_profile_did_call:
traceExecution()
+ loadp CodeBlock[cfr], t0
+ loadp CodeBlock::m_vm[t0], t0
+ loadi VM::m_enabledProfiler[t0], t0
+ btpz t0, .opProfilerDidCallDone
callSlowPath(_llint_slow_path_profile_did_call)
+.opProfilerDidCallDone:
dispatch(2)
_llint_op_debug:
traceExecution()
+ loadp CodeBlock[cfr], t0
+ loadi CodeBlock::m_debuggerRequests[t0], t0
+ btiz t0, .opDebugDone
callSlowPath(_llint_slow_path_debug)
- dispatch(5)
+.opDebugDone:
+ dispatch(3)
_llint_native_call_trampoline:
@@ -1209,6 +1655,56 @@ _llint_native_call_trampoline:
_llint_native_construct_trampoline:
nativeCallTrampoline(NativeExecutable::m_constructor)
+_llint_op_get_enumerable_length:
+ traceExecution()
+ callSlowPath(_slow_path_get_enumerable_length)
+ dispatch(3)
+
+_llint_op_has_indexed_property:
+ traceExecution()
+ callSlowPath(_slow_path_has_indexed_property)
+ dispatch(5)
+
+_llint_op_has_structure_property:
+ traceExecution()
+ callSlowPath(_slow_path_has_structure_property)
+ dispatch(5)
+
+_llint_op_has_generic_property:
+ traceExecution()
+ callSlowPath(_slow_path_has_generic_property)
+ dispatch(4)
+
+_llint_op_get_direct_pname:
+ traceExecution()
+ callSlowPath(_slow_path_get_direct_pname)
+ dispatch(7)
+
+_llint_op_get_property_enumerator:
+ traceExecution()
+ callSlowPath(_slow_path_get_property_enumerator)
+ dispatch(3)
+
+_llint_op_enumerator_structure_pname:
+ traceExecution()
+ callSlowPath(_slow_path_next_structure_enumerator_pname)
+ dispatch(4)
+
+_llint_op_enumerator_generic_pname:
+ traceExecution()
+ callSlowPath(_slow_path_next_generic_enumerator_pname)
+ dispatch(4)
+
+_llint_op_to_index_string:
+ traceExecution()
+ callSlowPath(_slow_path_to_index_string)
+ dispatch(3)
+
+_llint_op_copy_rest:
+ traceExecution()
+ callSlowPath(_slow_path_copy_rest)
+ dispatch(4)
+
# Lastly, make sure that we can link even though we don't support all opcodes.
# These opcodes should never arise when using LLInt or either JIT. We assert
@@ -1227,53 +1723,3 @@ macro notSupported()
break
end
end
-
-_llint_op_get_by_id_chain:
- notSupported()
-
-_llint_op_get_by_id_custom_chain:
- notSupported()
-
-_llint_op_get_by_id_custom_proto:
- notSupported()
-
-_llint_op_get_by_id_custom_self:
- notSupported()
-
-_llint_op_get_by_id_generic:
- notSupported()
-
-_llint_op_get_by_id_getter_chain:
- notSupported()
-
-_llint_op_get_by_id_getter_proto:
- notSupported()
-
-_llint_op_get_by_id_getter_self:
- notSupported()
-
-_llint_op_get_by_id_proto:
- notSupported()
-
-_llint_op_get_by_id_self:
- notSupported()
-
-_llint_op_get_string_length:
- notSupported()
-
-_llint_op_put_by_id_generic:
- notSupported()
-
-_llint_op_put_by_id_replace:
- notSupported()
-
-_llint_op_put_by_id_transition:
- notSupported()
-
-_llint_op_init_global_const_nop:
- dispatch(5)
-
-# Indicate the end of LLInt.
-_llint_end:
- crash()
-
diff --git a/Source/JavaScriptCore/llint/LowLevelInterpreter.cpp b/Source/JavaScriptCore/llint/LowLevelInterpreter.cpp
index c8f7254a4..72bcddf57 100644
--- a/Source/JavaScriptCore/llint/LowLevelInterpreter.cpp
+++ b/Source/JavaScriptCore/llint/LowLevelInterpreter.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (C) 2012 Apple Inc. All rights reserved.
+ * Copyright (C) 2012, 2014 Apple Inc. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
@@ -25,18 +25,15 @@
#include "config.h"
#include "LowLevelInterpreter.h"
-
-#if ENABLE(LLINT)
-
#include "LLIntOfflineAsmConfig.h"
#include <wtf/InlineASM.h>
-#if ENABLE(LLINT_C_LOOP)
+#if !ENABLE(JIT)
#include "CodeBlock.h"
+#include "CommonSlowPaths.h"
#include "LLIntCLoop.h"
#include "LLIntSlowPaths.h"
-#include "Operations.h"
-#include "VMInspector.h"
+#include "JSCInlines.h"
#include <wtf/Assertions.h>
#include <wtf/MathExtras.h>
@@ -89,15 +86,30 @@ using namespace JSC::LLInt;
#define OFFLINE_ASM_BEGIN
#define OFFLINE_ASM_END
+#if ENABLE(OPCODE_TRACING)
+#define TRACE_OPCODE(opcode) dataLogF(" op %s\n", #opcode)
+#else
+#define TRACE_OPCODE(opcode)
+#endif
+
+// To keep compilers happy in case of unused labels, force usage of the label:
+#define USE_LABEL(label) \
+ do { \
+ if (false) \
+ goto label; \
+ } while (false)
+
+#define OFFLINE_ASM_OPCODE_LABEL(opcode) DEFINE_OPCODE(opcode) USE_LABEL(opcode); TRACE_OPCODE(opcode);
+
+#define OFFLINE_ASM_GLOBAL_LABEL(label) OFFLINE_ASM_GLUE_LABEL(label)
-#define OFFLINE_ASM_OPCODE_LABEL(opcode) DEFINE_OPCODE(opcode)
#if ENABLE(COMPUTED_GOTO_OPCODES)
- #define OFFLINE_ASM_GLUE_LABEL(label) label:
+#define OFFLINE_ASM_GLUE_LABEL(label) label: USE_LABEL(label);
#else
- #define OFFLINE_ASM_GLUE_LABEL(label) case label: label:
+#define OFFLINE_ASM_GLUE_LABEL(label) case label: label: USE_LABEL(label);
#endif
-#define OFFLINE_ASM_LOCAL_LABEL(label) label:
+#define OFFLINE_ASM_LOCAL_LABEL(label) label: USE_LABEL(label);
//============================================================================
@@ -139,6 +151,7 @@ static void Double2Ints(double val, uint32_t& lo, uint32_t& hi)
// pseudo register, as well as hides endianness differences.
struct CLoopRegister {
+ CLoopRegister() { i = static_cast<intptr_t>(0xbadbeef0baddbeef); }
union {
intptr_t i;
uintptr_t u;
@@ -204,10 +217,15 @@ struct CLoopRegister {
#endif // !CPU(BIG_ENDIAN)
#endif // !USE(JSVALUE64)
+ intptr_t* ip;
int8_t* i8p;
void* vp;
+ CallFrame* callFrame;
ExecState* execState;
void* instruction;
+ VM* vm;
+ JSCell* cell;
+ ProtoCallFrame* protoCallFrame;
NativeFunction nativeFunc;
#if USE(JSVALUE64)
int64_t i64;
@@ -218,6 +236,13 @@ struct CLoopRegister {
Opcode opcode;
};
+ operator ExecState*() { return execState; }
+ operator Instruction*() { return reinterpret_cast<Instruction*>(instruction); }
+ operator VM*() { return vm; }
+ operator ProtoCallFrame*() { return protoCallFrame; }
+ operator Register*() { return reinterpret_cast<Register*>(vp); }
+ operator JSCell*() { return cell; }
+
#if USE(JSVALUE64)
inline void clearHighWord() { i32padding = 0; }
#else
@@ -229,8 +254,7 @@ struct CLoopRegister {
// The llint C++ interpreter loop:
//
-JSValue CLoop::execute(CallFrame* callFrame, OpcodeID bootstrapOpcodeId,
- bool isInitializationPass)
+JSValue CLoop::execute(OpcodeID entryOpcodeID, void* executableAddress, VM* vm, ProtoCallFrame* protoCallFrame, bool isInitializationPass)
{
#define CAST reinterpret_cast
#define SIGN_BIT32(x) ((x) & 0x80000000)
@@ -265,8 +289,6 @@ JSValue CLoop::execute(CallFrame* callFrame, OpcodeID bootstrapOpcodeId,
return JSValue();
}
- ASSERT(callFrame->vm().topCallFrame == callFrame);
-
// Define the pseudo registers used by the LLINT C Loop backend:
ASSERT(sizeof(CLoopRegister) == sizeof(intptr_t));
@@ -301,78 +323,66 @@ JSValue CLoop::execute(CallFrame* callFrame, OpcodeID bootstrapOpcodeId,
// 2. 32 bit result values will be in the low 32-bit of t0.
// 3. 64 bit result values will be in t0.
- CLoopRegister t0, t1, t2, t3;
+ CLoopRegister t0, t1, t2, t3, t5, t7, sp, cfr, lr, pc;
#if USE(JSVALUE64)
- CLoopRegister rBasePC, tagTypeNumber, tagMask;
+ CLoopRegister pcBase, tagTypeNumber, tagMask;
#endif
- CLoopRegister rRetVPC;
CLoopDoubleRegister d0, d1;
- // Keep the compiler happy. We don't really need this, but the compiler
- // will complain. This makes the warning go away.
- t0.i = 0;
- t1.i = 0;
-
- // Instantiate the pseudo JIT stack frame used by the LLINT C Loop backend:
- JITStackFrame jitStackFrame;
-
- // The llint expects the native stack pointer, sp, to be pointing to the
- // jitStackFrame (which is the simulation of the native stack frame):
- JITStackFrame* const sp = &jitStackFrame;
- sp->vm = &callFrame->vm();
-
- // Set up an alias for the vm ptr in the JITStackFrame:
- VM* &vm = sp->vm;
-
- CodeBlock* codeBlock = callFrame->codeBlock();
- Instruction* vPC;
-
- // rPC is an alias for vPC. Set up the alias:
- CLoopRegister& rPC = *CAST<CLoopRegister*>(&vPC);
+ lr.opcode = getOpcode(llint_return_to_host);
+ sp.vp = vm->interpreter->stack().topOfStack() + 1;
+ cfr.callFrame = vm->topCallFrame;
+#ifndef NDEBUG
+ void* startSP = sp.vp;
+ CallFrame* startCFR = cfr.callFrame;
+#endif
-#if USE(JSVALUE32_64)
- vPC = codeBlock->instructions().begin();
-#else // USE(JSVALUE64)
- vPC = 0;
- rBasePC.vp = codeBlock->instructions().begin();
+ // Initialize the incoming args for doVMEntryToJavaScript:
+ t0.vp = executableAddress;
+ t1.vm = vm;
+ t2.protoCallFrame = protoCallFrame;
+#if USE(JSVALUE64)
// For the ASM llint, JITStubs takes care of this initialization. We do
// it explicitly here for the C loop:
tagTypeNumber.i = 0xFFFF000000000000;
tagMask.i = 0xFFFF000000000002;
#endif // USE(JSVALUE64)
- // cfr is an alias for callFrame. Set up this alias:
- CLoopRegister& cfr = *CAST<CLoopRegister*>(&callFrame);
-
- // Simulate a native return PC which should never be used:
- rRetVPC.i = 0xbbadbeef;
-
// Interpreter variables for value passing between opcodes and/or helpers:
NativeFunction nativeFunc = 0;
JSValue functionReturnValue;
- Opcode opcode;
-
- opcode = LLInt::getOpcode(bootstrapOpcodeId);
-
- #if ENABLE(OPCODE_STATS)
- #define RECORD_OPCODE_STATS(__opcode) \
- OpcodeStats::recordInstruction(__opcode)
- #else
- #define RECORD_OPCODE_STATS(__opcode)
- #endif
+ Opcode opcode = getOpcode(entryOpcodeID);
+
+#define PUSH(cloopReg) \
+ do { \
+ sp.ip--; \
+ *sp.ip = cloopReg.i; \
+ } while (false)
+
+#define POP(cloopReg) \
+ do { \
+ cloopReg.i = *sp.ip; \
+ sp.ip++; \
+ } while (false)
+
+#if ENABLE(OPCODE_STATS)
+#define RECORD_OPCODE_STATS(__opcode) OpcodeStats::recordInstruction(__opcode)
+#else
+#define RECORD_OPCODE_STATS(__opcode)
+#endif
- #if USE(JSVALUE32_64)
- #define FETCH_OPCODE() vPC->u.opcode
- #else // USE(JSVALUE64)
- #define FETCH_OPCODE() *bitwise_cast<Opcode*>(rBasePC.i8p + rPC.i * 8)
- #endif // USE(JSVALUE64)
+#if USE(JSVALUE32_64)
+#define FETCH_OPCODE() pc.opcode
+#else // USE(JSVALUE64)
+#define FETCH_OPCODE() *bitwise_cast<Opcode*>(pcBase.i8p + pc.i * 8)
+#endif // USE(JSVALUE64)
- #define NEXT_INSTRUCTION() \
- do { \
- opcode = FETCH_OPCODE(); \
- DISPATCH_OPCODE(); \
- } while (false)
+#define NEXT_INSTRUCTION() \
+ do { \
+ opcode = FETCH_OPCODE(); \
+ DISPATCH_OPCODE(); \
+ } while (false)
#if ENABLE(COMPUTED_GOTO_OPCODES)
@@ -414,14 +424,22 @@ JSValue CLoop::execute(CallFrame* callFrame, OpcodeID bootstrapOpcodeId,
#include "LLIntAssembly.h"
+ OFFLINE_ASM_GLUE_LABEL(llint_return_to_host)
+ {
+ ASSERT(startSP == sp.vp);
+ ASSERT(startCFR == cfr.callFrame);
+#if USE(JSVALUE32_64)
+ return JSValue(t1.i, t0.i); // returning JSValue(tag, payload);
+#else
+ return JSValue::decode(t0.encodedJSValue);
+#endif
+ }
+
// In the ASM llint, getHostCallReturnValue() is a piece of glue
- // function provided by the JIT (see dfg/DFGOperations.cpp).
+ // function provided by the JIT (see jit/JITOperations.cpp).
// We simulate it here with a pseduo-opcode handler.
OFFLINE_ASM_GLUE_LABEL(getHostCallReturnValue)
{
- // The ASM part pops the frame:
- callFrame = callFrame->callerFrame();
-
// The part in getHostCallReturnValueWithExecState():
JSValue result = vm->hostCallReturnValue;
#if USE(JSVALUE32_64)
@@ -430,12 +448,8 @@ JSValue CLoop::execute(CallFrame* callFrame, OpcodeID bootstrapOpcodeId,
#else
t0.encodedJSValue = JSValue::encode(result);
#endif
- goto doReturnHelper;
- }
-
- OFFLINE_ASM_GLUE_LABEL(ctiOpThrowNotCaught)
- {
- return vm->exception;
+ opcode = lr.opcode;
+ DISPATCH_OPCODE();
}
#if !ENABLE(COMPUTED_GOTO_OPCODES)
@@ -445,56 +459,7 @@ JSValue CLoop::execute(CallFrame* callFrame, OpcodeID bootstrapOpcodeId,
} // END bytecode handler cases.
- //========================================================================
- // Bytecode helpers:
-
- doReturnHelper: {
- ASSERT(!!callFrame);
- if (callFrame->hasHostCallFrameFlag()) {
-#if USE(JSVALUE32_64)
- return JSValue(t1.i, t0.i); // returning JSValue(tag, payload);
-#else
- return JSValue::decode(t0.encodedJSValue);
-#endif
- }
-
- // The normal ASM llint call implementation returns to the caller as
- // recorded in rRetVPC, and the caller would fetch the return address
- // from ArgumentCount.tag() (see the dispatchAfterCall() macro used in
- // the callTargetFunction() macro in the llint asm files).
- //
- // For the C loop, we don't have the JIT stub to this work for us.
- // So, we need to implement the equivalent of dispatchAfterCall() here
- // before dispatching to the PC.
-
- vPC = callFrame->currentVPC();
-
-#if USE(JSVALUE64)
- // Based on LowLevelInterpreter64.asm's dispatchAfterCall():
-
- // When returning from a native trampoline call, unlike the assembly
- // LLInt, we can't simply return to the caller. In our case, we grab
- // the caller's VPC and resume execution there. However, the caller's
- // VPC returned by callFrame->currentVPC() is in the form of the real
- // address of the target bytecode, but the 64-bit llint expects the
- // VPC to be a bytecode offset. Hence, we need to map it back to a
- // bytecode offset before we dispatch via the usual dispatch mechanism
- // i.e. NEXT_INSTRUCTION():
-
- codeBlock = callFrame->codeBlock();
- ASSERT(codeBlock);
- rPC.vp = callFrame->currentVPC();
- rPC.i = rPC.i8p - reinterpret_cast<int8_t*>(codeBlock->instructions().begin());
- rPC.i >>= 3;
-
- rBasePC.vp = codeBlock->instructions().begin();
-#endif // USE(JSVALUE64)
-
- NEXT_INSTRUCTION();
-
- } // END doReturnHelper.
-
-
+#if ENABLE(COMPUTED_GOTO_OPCODES)
// Keep the compiler happy so that it doesn't complain about unused
// labels for the LLInt trampoline glue. The labels are automatically
// emitted by label macros above, and some of them are referenced by
@@ -505,7 +470,7 @@ JSValue CLoop::execute(CallFrame* callFrame, OpcodeID bootstrapOpcodeId,
UNUSED_LABEL(__opcode);
FOR_EACH_OPCODE_ID(LLINT_OPCODE_ENTRY);
#undef LLINT_OPCODE_ENTRY
-
+#endif
#undef NEXT_INSTRUCTION
#undef DEFINE_OPCODE
@@ -513,11 +478,12 @@ JSValue CLoop::execute(CallFrame* callFrame, OpcodeID bootstrapOpcodeId,
#undef CAST
#undef SIGN_BIT32
+ return JSValue(); // to suppress a compiler warning.
} // Interpreter::llintCLoopExecute()
} // namespace JSC
-#else // !ENABLE(LLINT_C_LOOP)
+#elif !OS(WINDOWS)
//============================================================================
// Define the opcode dispatch mechanism when using an ASM loop:
@@ -527,14 +493,32 @@ JSValue CLoop::execute(CallFrame* callFrame, OpcodeID bootstrapOpcodeId,
#define OFFLINE_ASM_BEGIN asm (
#define OFFLINE_ASM_END );
-#define OFFLINE_ASM_OPCODE_LABEL(__opcode) OFFLINE_ASM_GLOBAL_LABEL(llint_##__opcode)
-#define OFFLINE_ASM_GLUE_LABEL(__opcode) OFFLINE_ASM_GLOBAL_LABEL(__opcode)
-
+#define OFFLINE_ASM_OPCODE_LABEL(__opcode) OFFLINE_ASM_LOCAL_LABEL(llint_##__opcode)
+#define OFFLINE_ASM_GLUE_LABEL(__opcode) OFFLINE_ASM_LOCAL_LABEL(__opcode)
+
+#if CPU(ARM_THUMB2)
+#define OFFLINE_ASM_GLOBAL_LABEL(label) \
+ ".text\n" \
+ ".align 4\n" \
+ ".globl " SYMBOL_STRING(label) "\n" \
+ HIDE_SYMBOL(label) "\n" \
+ ".thumb\n" \
+ ".thumb_func " THUMB_FUNC_PARAM(label) "\n" \
+ SYMBOL_STRING(label) ":\n"
+#elif CPU(ARM64)
+#define OFFLINE_ASM_GLOBAL_LABEL(label) \
+ ".text\n" \
+ ".align 4\n" \
+ ".globl " SYMBOL_STRING(label) "\n" \
+ HIDE_SYMBOL(label) "\n" \
+ SYMBOL_STRING(label) ":\n"
+#else
#define OFFLINE_ASM_GLOBAL_LABEL(label) \
+ ".text\n" \
".globl " SYMBOL_STRING(label) "\n" \
HIDE_SYMBOL(label) "\n" \
- INLINE_ARM_FUNCTION(label) \
SYMBOL_STRING(label) ":\n"
+#endif
#define OFFLINE_ASM_LOCAL_LABEL(label) LOCAL_LABEL_STRING(label) ":\n"
@@ -542,6 +526,4 @@ JSValue CLoop::execute(CallFrame* callFrame, OpcodeID bootstrapOpcodeId,
// for the interpreter, as compiled from LowLevelInterpreter.asm.
#include "LLIntAssembly.h"
-#endif // !ENABLE(LLINT_C_LOOP)
-
-#endif // ENABLE(LLINT)
+#endif // ENABLE(JIT)
diff --git a/Source/JavaScriptCore/llint/LowLevelInterpreter.h b/Source/JavaScriptCore/llint/LowLevelInterpreter.h
index f45a07303..8621dbd5a 100644
--- a/Source/JavaScriptCore/llint/LowLevelInterpreter.h
+++ b/Source/JavaScriptCore/llint/LowLevelInterpreter.h
@@ -26,13 +26,9 @@
#ifndef LowLevelInterpreter_h
#define LowLevelInterpreter_h
-#include <wtf/Platform.h>
-
-#if ENABLE(LLINT)
-
#include "Opcode.h"
-#if ENABLE(LLINT_C_LOOP)
+#if !ENABLE(JIT)
namespace JSC {
@@ -49,18 +45,6 @@ FOR_EACH_CORE_OPCODE_ID(LLINT_OPCODE_ALIAS)
} // namespace JSC
-#else // !ENABLE(LLINT_C_LOOP)
-
-#define LLINT_INSTRUCTION_DECL(opcode, length) extern "C" void llint_##opcode();
- FOR_EACH_OPCODE_ID(LLINT_INSTRUCTION_DECL);
-#undef LLINT_INSTRUCTION_DECL
-
-#define DECLARE_LLINT_NATIVE_HELPER(name, length) extern "C" void name();
- FOR_EACH_LLINT_NATIVE_HELPER(DECLARE_LLINT_NATIVE_HELPER)
-#undef DECLARE_LLINT_NATIVE_HELPER
-
-#endif // !ENABLE(LLINT_C_LOOP)
-
-#endif // ENABLE(LLINT)
+#endif // !ENABLE(JIT)
#endif // LowLevelInterpreter_h
diff --git a/Source/JavaScriptCore/llint/LowLevelInterpreter32_64.asm b/Source/JavaScriptCore/llint/LowLevelInterpreter32_64.asm
index 87aa09eab..a92d55aa9 100644
--- a/Source/JavaScriptCore/llint/LowLevelInterpreter32_64.asm
+++ b/Source/JavaScriptCore/llint/LowLevelInterpreter32_64.asm
@@ -1,4 +1,4 @@
-# Copyright (C) 2011, 2012 Apple Inc. All rights reserved.
+# Copyright (C) 2011-2016 Apple Inc. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
@@ -22,66 +22,6 @@
# THE POSSIBILITY OF SUCH DAMAGE.
-# Crash course on the language that this is written in (which I just call
-# "assembly" even though it's more than that):
-#
-# - Mostly gas-style operand ordering. The last operand tends to be the
-# destination. So "a := b" is written as "mov b, a". But unlike gas,
-# comparisons are in-order, so "if (a < b)" is written as
-# "bilt a, b, ...".
-#
-# - "b" = byte, "h" = 16-bit word, "i" = 32-bit word, "p" = pointer.
-# Currently this is just 32-bit so "i" and "p" are interchangeable
-# except when an op supports one but not the other.
-#
-# - In general, valid operands for macro invocations and instructions are
-# registers (eg "t0"), addresses (eg "4[t0]"), base-index addresses
-# (eg "7[t0, t1, 2]"), absolute addresses (eg "0xa0000000[]"), or labels
-# (eg "_foo" or ".foo"). Macro invocations can also take anonymous
-# macros as operands. Instructions cannot take anonymous macros.
-#
-# - Labels must have names that begin with either "_" or ".". A "." label
-# is local and gets renamed before code gen to minimize namespace
-# pollution. A "_" label is an extern symbol (i.e. ".globl"). The "_"
-# may or may not be removed during code gen depending on whether the asm
-# conventions for C name mangling on the target platform mandate a "_"
-# prefix.
-#
-# - A "macro" is a lambda expression, which may be either anonymous or
-# named. But this has caveats. "macro" can take zero or more arguments,
-# which may be macros or any valid operands, but it can only return
-# code. But you can do Turing-complete things via continuation passing
-# style: "macro foo (a, b) b(a) end foo(foo, foo)". Actually, don't do
-# that, since you'll just crash the assembler.
-#
-# - An "if" is a conditional on settings. Any identifier supplied in the
-# predicate of an "if" is assumed to be a #define that is available
-# during code gen. So you can't use "if" for computation in a macro, but
-# you can use it to select different pieces of code for different
-# platforms.
-#
-# - Arguments to macros follow lexical scoping rather than dynamic scoping.
-# Const's also follow lexical scoping and may override (hide) arguments
-# or other consts. All variables (arguments and constants) can be bound
-# to operands. Additionally, arguments (but not constants) can be bound
-# to macros.
-
-
-# Below we have a bunch of constant declarations. Each constant must have
-# a corresponding ASSERT() in LLIntData.cpp.
-
-
-# Value representation constants.
-const Int32Tag = -1
-const BooleanTag = -2
-const NullTag = -3
-const UndefinedTag = -4
-const CellTag = -5
-const EmptyValueTag = -6
-const DeletedValueTag = -7
-const LowestTag = DeletedValueTag
-
-
# Utilities
macro dispatch(advance)
addp advance * 4, PC
@@ -101,49 +41,47 @@ end
macro dispatchAfterCall()
loadi ArgumentCount + TagOffset[cfr], PC
- jmp [PC]
+ loadi 4[PC], t3
+ storei r1, TagOffset[cfr, t3, 8]
+ storei r0, PayloadOffset[cfr, t3, 8]
+ valueProfile(r1, r0, 4 * (CallOpCodeSize - 1), t3)
+ dispatch(CallOpCodeSize)
end
-macro cCall2(function, arg1, arg2)
- if ARM or ARMv7 or ARMv7_TRADITIONAL
- move arg1, t0
- move arg2, t1
- call function
- elsif X86
- poke arg1, 0
- poke arg2, 1
+macro cCall2(function)
+ if ARM or ARMv7 or ARMv7_TRADITIONAL or MIPS or SH4
call function
- elsif MIPS or SH4
- move arg1, a0
- move arg2, a1
+ elsif X86 or X86_WIN
+ subp 8, sp
+ push a1
+ push a0
call function
+ addp 16, sp
elsif C_LOOP
- cloopCallSlowPath function, arg1, arg2
+ cloopCallSlowPath function, a0, a1
else
error
end
end
-# This barely works. arg3 and arg4 should probably be immediates.
-macro cCall4(function, arg1, arg2, arg3, arg4)
- if ARM or ARMv7 or ARMv7_TRADITIONAL
- move arg1, t0
- move arg2, t1
- move arg3, t2
- move arg4, t3
- call function
- elsif X86
- poke arg1, 0
- poke arg2, 1
- poke arg3, 2
- poke arg4, 3
+macro cCall2Void(function)
+ if C_LOOP
+ cloopCallSlowPathVoid function, a0, a1
+ else
+ cCall2(function)
+ end
+end
+
+macro cCall4(function)
+ if ARM or ARMv7 or ARMv7_TRADITIONAL or MIPS or SH4
call function
- elsif MIPS or SH4
- move arg1, a0
- move arg2, a1
- move arg3, a2
- move arg4, a3
+ elsif X86 or X86_WIN
+ push a3
+ push a2
+ push a1
+ push a0
call function
+ addp 16, sp
elsif C_LOOP
error
else
@@ -152,9 +90,251 @@ macro cCall4(function, arg1, arg2, arg3, arg4)
end
macro callSlowPath(slowPath)
- cCall2(slowPath, cfr, PC)
- move t0, PC
- move t1, cfr
+ move cfr, a0
+ move PC, a1
+ cCall2(slowPath)
+ move r0, PC
+end
+
+macro doVMEntry(makeCall)
+ functionPrologue()
+ pushCalleeSaves()
+
+ # x86 needs to load arguments from the stack
+ if X86 or X86_WIN
+ loadp 16[cfr], a2
+ loadp 12[cfr], a1
+ loadp 8[cfr], a0
+ end
+
+ const entry = a0
+ const vm = a1
+ const protoCallFrame = a2
+
+ # We are using t3, t4 and t5 as temporaries through the function.
+ # Since we have the guarantee that tX != aY when X != Y, we are safe from
+ # aliasing problems with our arguments.
+
+ if ARMv7
+ vmEntryRecord(cfr, t3)
+ move t3, sp
+ else
+ vmEntryRecord(cfr, sp)
+ end
+
+ storep vm, VMEntryRecord::m_vm[sp]
+ loadp VM::topCallFrame[vm], t4
+ storep t4, VMEntryRecord::m_prevTopCallFrame[sp]
+ loadp VM::topVMEntryFrame[vm], t4
+ storep t4, VMEntryRecord::m_prevTopVMEntryFrame[sp]
+
+ # Align stack pointer
+ if X86_WIN or MIPS
+ addp CallFrameAlignSlots * SlotSize, sp, t3
+ andp ~StackAlignmentMask, t3
+ subp t3, CallFrameAlignSlots * SlotSize, sp
+ elsif ARM or ARMv7 or ARMv7_TRADITIONAL
+ addp CallFrameAlignSlots * SlotSize, sp, t3
+ clrbp t3, StackAlignmentMask, t3
+ if ARMv7
+ subp t3, CallFrameAlignSlots * SlotSize, t3
+ move t3, sp
+ else
+ subp t3, CallFrameAlignSlots * SlotSize, sp
+ end
+ end
+
+ loadi ProtoCallFrame::paddedArgCount[protoCallFrame], t4
+ addp CallFrameHeaderSlots, t4, t4
+ lshiftp 3, t4
+ subp sp, t4, t3
+
+ # Ensure that we have enough additional stack capacity for the incoming args,
+ # and the frame for the JS code we're executing. We need to do this check
+ # before we start copying the args from the protoCallFrame below.
+ bpaeq t3, VM::m_jsStackLimit[vm], .stackHeightOK
+
+ if C_LOOP
+ move entry, t4
+ move vm, t5
+ cloopCallSlowPath _llint_stack_check_at_vm_entry, vm, t3
+ bpeq t0, 0, .stackCheckFailed
+ move t4, entry
+ move t5, vm
+ jmp .stackHeightOK
+
+.stackCheckFailed:
+ move t4, entry
+ move t5, vm
+ end
+
+ subp 8, sp # Align stack for cCall2() to make a call.
+ move vm, a0
+ move protoCallFrame, a1
+ cCall2(_llint_throw_stack_overflow_error)
+
+ if ARMv7
+ vmEntryRecord(cfr, t3)
+ move t3, sp
+ else
+ vmEntryRecord(cfr, sp)
+ end
+
+ loadp VMEntryRecord::m_vm[sp], t5
+ loadp VMEntryRecord::m_prevTopCallFrame[sp], t4
+ storep t4, VM::topCallFrame[t5]
+ loadp VMEntryRecord::m_prevTopVMEntryFrame[sp], t4
+ storep t4, VM::topVMEntryFrame[t5]
+
+ if ARMv7
+ subp cfr, CalleeRegisterSaveSize, t5
+ move t5, sp
+ else
+ subp cfr, CalleeRegisterSaveSize, sp
+ end
+
+ popCalleeSaves()
+ functionEpilogue()
+ ret
+
+.stackHeightOK:
+ move t3, sp
+ move 4, t3
+
+.copyHeaderLoop:
+ subi 1, t3
+ loadi TagOffset[protoCallFrame, t3, 8], t5
+ storei t5, TagOffset + CodeBlock[sp, t3, 8]
+ loadi PayloadOffset[protoCallFrame, t3, 8], t5
+ storei t5, PayloadOffset + CodeBlock[sp, t3, 8]
+ btinz t3, .copyHeaderLoop
+
+ loadi PayloadOffset + ProtoCallFrame::argCountAndCodeOriginValue[protoCallFrame], t4
+ subi 1, t4
+ loadi ProtoCallFrame::paddedArgCount[protoCallFrame], t5
+ subi 1, t5
+
+ bieq t4, t5, .copyArgs
+.fillExtraArgsLoop:
+ subi 1, t5
+ storei UndefinedTag, ThisArgumentOffset + 8 + TagOffset[sp, t5, 8]
+ storei 0, ThisArgumentOffset + 8 + PayloadOffset[sp, t5, 8]
+ bineq t4, t5, .fillExtraArgsLoop
+
+.copyArgs:
+ loadp ProtoCallFrame::args[protoCallFrame], t3
+
+.copyArgsLoop:
+ btiz t4, .copyArgsDone
+ subi 1, t4
+ loadi TagOffset[t3, t4, 8], t5
+ storei t5, ThisArgumentOffset + 8 + TagOffset[sp, t4, 8]
+ loadi PayloadOffset[t3, t4, 8], t5
+ storei t5, ThisArgumentOffset + 8 + PayloadOffset[sp, t4, 8]
+ jmp .copyArgsLoop
+
+.copyArgsDone:
+ storep sp, VM::topCallFrame[vm]
+ storep cfr, VM::topVMEntryFrame[vm]
+
+ makeCall(entry, t3, t4)
+
+ if ARMv7
+ vmEntryRecord(cfr, t3)
+ move t3, sp
+ else
+ vmEntryRecord(cfr, sp)
+ end
+
+ loadp VMEntryRecord::m_vm[sp], t5
+ loadp VMEntryRecord::m_prevTopCallFrame[sp], t4
+ storep t4, VM::topCallFrame[t5]
+ loadp VMEntryRecord::m_prevTopVMEntryFrame[sp], t4
+ storep t4, VM::topVMEntryFrame[t5]
+
+ if ARMv7
+ subp cfr, CalleeRegisterSaveSize, t5
+ move t5, sp
+ else
+ subp cfr, CalleeRegisterSaveSize, sp
+ end
+
+ popCalleeSaves()
+ functionEpilogue()
+ ret
+end
+
+macro makeJavaScriptCall(entry, temp, unused)
+ addp CallerFrameAndPCSize, sp
+ checkStackPointerAlignment(temp, 0xbad0dc02)
+ if C_LOOP
+ cloopCallJSFunction entry
+ else
+ call entry
+ end
+ checkStackPointerAlignment(temp, 0xbad0dc03)
+ subp CallerFrameAndPCSize, sp
+end
+
+macro makeHostFunctionCall(entry, temp1, temp2)
+ move entry, temp1
+ storep cfr, [sp]
+ if C_LOOP
+ move sp, a0
+ storep lr, PtrSize[sp]
+ cloopCallNative temp1
+ elsif X86 or X86_WIN
+ # Put callee frame pointer on stack as arg0, also put it in ecx for "fastcall" targets
+ move 0, temp2
+ move temp2, 4[sp] # put 0 in ReturnPC
+ move sp, a0 # a0 is ecx
+ push temp2 # Push dummy arg1
+ push a0
+ call temp1
+ addp 8, sp
+ else
+ move sp, a0
+ call temp1
+ end
+end
+
+_handleUncaughtException:
+ loadp Callee + PayloadOffset[cfr], t3
+ andp MarkedBlockMask, t3
+ loadp MarkedBlock::m_weakSet + WeakSet::m_vm[t3], t3
+ restoreCalleeSavesFromVMCalleeSavesBuffer(t3, t0)
+ loadp VM::callFrameForCatch[t3], cfr
+ storep 0, VM::callFrameForCatch[t3]
+
+ loadp CallerFrame[cfr], cfr
+
+ if ARMv7
+ vmEntryRecord(cfr, t3)
+ move t3, sp
+ else
+ vmEntryRecord(cfr, sp)
+ end
+
+ loadp VMEntryRecord::m_vm[sp], t3
+ loadp VMEntryRecord::m_prevTopCallFrame[sp], t5
+ storep t5, VM::topCallFrame[t3]
+ loadp VMEntryRecord::m_prevTopVMEntryFrame[sp], t5
+ storep t5, VM::topVMEntryFrame[t3]
+
+ if ARMv7
+ subp cfr, CalleeRegisterSaveSize, t3
+ move t3, sp
+ else
+ subp cfr, CalleeRegisterSaveSize, sp
+ end
+
+ popCalleeSaves()
+ functionEpilogue()
+ ret
+
+macro doReturnFromHostFunction(extraStackSpace)
+ functionEpilogue(extraStackSpace)
+ ret
end
# Debugging operation if you'd like to print an operand in the instruction stream. fromWhere
@@ -162,34 +342,43 @@ end
# debugging from. operand should likewise be an immediate, and should identify the operand
# in the instruction stream you'd like to print out.
macro traceOperand(fromWhere, operand)
- cCall4(_llint_trace_operand, cfr, PC, fromWhere, operand)
- move t0, PC
- move t1, cfr
+ move fromWhere, a2
+ move operand, a3
+ move cfr, a0
+ move PC, a1
+ cCall4(_llint_trace_operand)
+ move r0, PC
+ move r1, cfr
end
# Debugging operation if you'd like to print the value of an operand in the instruction
# stream. Same as traceOperand(), but assumes that the operand is a register, and prints its
# value.
macro traceValue(fromWhere, operand)
- cCall4(_llint_trace_value, cfr, PC, fromWhere, operand)
- move t0, PC
- move t1, cfr
+ move fromWhere, a2
+ move operand, a3
+ move cfr, a0
+ move PC, a1
+ cCall4(_llint_trace_value)
+ move r0, PC
+ move r1, cfr
end
# Call a slowPath for call opcodes.
-macro callCallSlowPath(advance, slowPath, action)
- addp advance * 4, PC, t0
- storep t0, ArgumentCount + TagOffset[cfr]
- cCall2(slowPath, cfr, PC)
- move t1, cfr
- action(t0)
+macro callCallSlowPath(slowPath, action)
+ storep PC, ArgumentCount + TagOffset[cfr]
+ move cfr, a0
+ move PC, a1
+ cCall2(slowPath)
+ action(r0, r1)
end
macro callWatchdogTimerHandler(throwHandler)
storei PC, ArgumentCount + TagOffset[cfr]
- cCall2(_llint_slow_path_handle_watchdog_timer, cfr, PC)
- move t1, cfr
- btpnz t0, throwHandler
+ move cfr, a0
+ move PC, a1
+ cCall2(_llint_slow_path_handle_watchdog_timer)
+ btpnz r0, throwHandler
loadi ArgumentCount + TagOffset[cfr], PC
end
@@ -198,15 +387,23 @@ macro checkSwitchToJITForLoop()
1,
macro ()
storei PC, ArgumentCount + TagOffset[cfr]
- cCall2(_llint_loop_osr, cfr, PC)
- move t1, cfr
- btpz t0, .recover
- jmp t0
+ move cfr, a0
+ move PC, a1
+ cCall2(_llint_loop_osr)
+ btpz r0, .recover
+ move r1, sp
+ jmp r0
.recover:
loadi ArgumentCount + TagOffset[cfr], PC
end)
end
+macro loadVariable(operand, index, tag, payload)
+ loadisFromInstruction(operand, index)
+ loadi TagOffset[cfr, index, 8], tag
+ loadi PayloadOffset[cfr, index, 8], payload
+end
+
# Index, tag, and payload must be different registers. Index is not
# changed.
macro loadConstantOrVariable(index, tag, payload)
@@ -289,30 +486,159 @@ macro loadConstantOrVariablePayloadUnchecked(index, payload)
payload)
end
-macro writeBarrier(tag, payload)
- # Nothing to do, since we don't have a generational or incremental collector.
+macro storeStructureWithTypeInfo(cell, structure, scratch)
+ storep structure, JSCell::m_structureID[cell]
+
+ loadi Structure::m_blob + StructureIDBlob::u.words.word2[structure], scratch
+ storei scratch, JSCell::m_indexingType[cell]
end
-macro valueProfile(tag, payload, profile)
- if VALUE_PROFILER
- storei tag, ValueProfile::m_buckets + TagOffset[profile]
- storei payload, ValueProfile::m_buckets + PayloadOffset[profile]
- end
+macro writeBarrierOnOperand(cellOperand)
+ loadisFromInstruction(cellOperand, t1)
+ loadConstantOrVariablePayload(t1, CellTag, t2, .writeBarrierDone)
+ skipIfIsRememberedOrInEden(t2, t1, t3,
+ macro(cellState)
+ btbnz cellState, .writeBarrierDone
+ push cfr, PC
+ # We make two extra slots because cCall2 will poke.
+ subp 8, sp
+ move t2, a1 # t2 can be a0 on x86
+ move cfr, a0
+ cCall2Void(_llint_write_barrier_slow)
+ addp 8, sp
+ pop PC, cfr
+ end
+ )
+.writeBarrierDone:
+end
+
+macro writeBarrierOnOperands(cellOperand, valueOperand)
+ loadisFromInstruction(valueOperand, t1)
+ loadConstantOrVariableTag(t1, t0)
+ bineq t0, CellTag, .writeBarrierDone
+
+ writeBarrierOnOperand(cellOperand)
+.writeBarrierDone:
+end
+
+macro writeBarrierOnGlobal(valueOperand, loadHelper)
+ loadisFromInstruction(valueOperand, t1)
+ loadConstantOrVariableTag(t1, t0)
+ bineq t0, CellTag, .writeBarrierDone
+
+ loadHelper(t3)
+
+ skipIfIsRememberedOrInEden(t3, t1, t2,
+ macro(gcData)
+ btbnz gcData, .writeBarrierDone
+ push cfr, PC
+ # We make two extra slots because cCall2 will poke.
+ subp 8, sp
+ move cfr, a0
+ move t3, a1
+ cCall2Void(_llint_write_barrier_slow)
+ addp 8, sp
+ pop PC, cfr
+ end
+ )
+.writeBarrierDone:
+end
+
+macro writeBarrierOnGlobalObject(valueOperand)
+ writeBarrierOnGlobal(valueOperand,
+ macro(registerToStoreGlobal)
+ loadp CodeBlock[cfr], registerToStoreGlobal
+ loadp CodeBlock::m_globalObject[registerToStoreGlobal], registerToStoreGlobal
+ end)
+end
+
+macro writeBarrierOnGlobalLexicalEnvironment(valueOperand)
+ writeBarrierOnGlobal(valueOperand,
+ macro(registerToStoreGlobal)
+ loadp CodeBlock[cfr], registerToStoreGlobal
+ loadp CodeBlock::m_globalObject[registerToStoreGlobal], registerToStoreGlobal
+ loadp JSGlobalObject::m_globalLexicalEnvironment[registerToStoreGlobal], registerToStoreGlobal
+ end)
+end
+
+macro valueProfile(tag, payload, operand, scratch)
+ loadp operand[PC], scratch
+ storei tag, ValueProfile::m_buckets + TagOffset[scratch]
+ storei payload, ValueProfile::m_buckets + PayloadOffset[scratch]
end
# Entrypoints into the interpreter
# Expects that CodeBlock is in t1, which is what prologue() leaves behind.
-macro functionArityCheck(doneLabel, slow_path)
+macro functionArityCheck(doneLabel, slowPath)
loadi PayloadOffset + ArgumentCount[cfr], t0
biaeq t0, CodeBlock::m_numParameters[t1], doneLabel
- cCall2(slow_path, cfr, PC) # This slow_path has a simple protocol: t0 = 0 => no error, t0 != 0 => error
- move t1, cfr
- btiz t0, .continue
- loadp JITStackFrame::vm[sp], t1
- loadp VM::callFrameForThrow[t1], t0
- jmp VM::targetMachinePCForThrow[t1]
+ move cfr, a0
+ move PC, a1
+ cCall2(slowPath) # This slowPath has a simple protocol: t0 = 0 => no error, t0 != 0 => error
+ btiz r0, .noError
+ move r1, cfr # r1 contains caller frame
+ jmp _llint_throw_from_slow_path_trampoline
+
+.noError:
+ # r1 points to ArityCheckData.
+ loadp CommonSlowPaths::ArityCheckData::thunkToCall[r1], t3
+ btpz t3, .proceedInline
+
+ loadp CommonSlowPaths::ArityCheckData::paddedStackSpace[r1], a0
+ call t3
+ if ASSERT_ENABLED
+ loadp ReturnPC[cfr], t0
+ loadp [t0], t0
+ end
+ jmp .continue
+
+.proceedInline:
+ loadi CommonSlowPaths::ArityCheckData::paddedStackSpace[r1], t1
+ btiz t1, .continue
+ loadi PayloadOffset + ArgumentCount[cfr], t2
+ addi CallFrameHeaderSlots, t2
+
+ // Check if there are some unaligned slots we can use
+ move t1, t3
+ andi StackAlignmentSlots - 1, t3
+ btiz t3, .noExtraSlot
+.fillExtraSlots:
+ move 0, t0
+ storei t0, PayloadOffset[cfr, t2, 8]
+ move UndefinedTag, t0
+ storei t0, TagOffset[cfr, t2, 8]
+ addi 1, t2
+ bsubinz 1, t3, .fillExtraSlots
+ andi ~(StackAlignmentSlots - 1), t1
+ btiz t1, .continue
+
+.noExtraSlot:
+ // Move frame up t1 slots
+ negi t1
+ move cfr, t3
+.copyLoop:
+ loadi PayloadOffset[t3], t0
+ storei t0, PayloadOffset[t3, t1, 8]
+ loadi TagOffset[t3], t0
+ storei t0, TagOffset[t3, t1, 8]
+ addp 8, t3
+ bsubinz 1, t2, .copyLoop
+
+ // Fill new slots with JSUndefined
+ move t1, t2
+.fillLoop:
+ move 0, t0
+ storei t0, PayloadOffset[t3, t1, 8]
+ move UndefinedTag, t0
+ storei t0, TagOffset[t3, t1, 8]
+ addp 8, t3
+ baddinz 1, t2, .fillLoop
+
+ lshiftp 3, t1
+ addp t1, cfr
+ addp t1, sp
.continue:
# Reload CodeBlock and PC, since the slow_path clobbered it.
loadp CodeBlock[cfr], t1
@@ -320,48 +646,44 @@ macro functionArityCheck(doneLabel, slow_path)
jmp doneLabel
end
+macro branchIfException(label)
+ loadp Callee + PayloadOffset[cfr], t3
+ andp MarkedBlockMask, t3
+ loadp MarkedBlock::m_weakSet + WeakSet::m_vm[t3], t3
+ btiz VM::m_exception[t3], .noException
+ jmp label
+.noException:
+end
+
# Instruction implementations
_llint_op_enter:
traceExecution()
+ checkStackPointerAlignment(t2, 0xdead00e1)
loadp CodeBlock[cfr], t2 // t2<CodeBlock> = cfr.CodeBlock
loadi CodeBlock::m_numVars[t2], t2 // t2<size_t> = t2<CodeBlock>.m_numVars
btiz t2, .opEnterDone
move UndefinedTag, t0
move 0, t1
+ negi t2
.opEnterLoop:
- subi 1, t2
storei t0, TagOffset[cfr, t2, 8]
storei t1, PayloadOffset[cfr, t2, 8]
+ addi 1, t2
btinz t2, .opEnterLoop
.opEnterDone:
+ callSlowPath(_slow_path_enter)
dispatch(1)
-_llint_op_create_activation:
- traceExecution()
- loadi 4[PC], t0
- bineq TagOffset[cfr, t0, 8], EmptyValueTag, .opCreateActivationDone
- callSlowPath(_llint_slow_path_create_activation)
-.opCreateActivationDone:
- dispatch(2)
-
-
-_llint_op_init_lazy_reg:
- traceExecution()
- loadi 4[PC], t0
- storei EmptyValueTag, TagOffset[cfr, t0, 8]
- storei 0, PayloadOffset[cfr, t0, 8]
- dispatch(2)
-
-
-_llint_op_create_arguments:
+_llint_op_get_scope:
traceExecution()
- loadi 4[PC], t0
- bineq TagOffset[cfr, t0, 8], EmptyValueTag, .opCreateArgumentsDone
- callSlowPath(_llint_slow_path_create_arguments)
-.opCreateArgumentsDone:
+ loadi Callee + PayloadOffset[cfr], t0
+ loadi JSCallee::m_scope[t0], t0
+ loadisFromInstruction(1, t1)
+ storei CellTag, TagOffset[cfr, t1, 8]
+ storei t0, PayloadOffset[cfr, t1, 8]
dispatch(2)
@@ -369,45 +691,39 @@ _llint_op_create_this:
traceExecution()
loadi 8[PC], t0
loadp PayloadOffset[cfr, t0, 8], t0
- loadp JSFunction::m_allocationProfile + ObjectAllocationProfile::m_allocator[t0], t1
- loadp JSFunction::m_allocationProfile + ObjectAllocationProfile::m_structure[t0], t2
+ loadp JSFunction::m_rareData[t0], t5
+ btpz t5, .opCreateThisSlow
+ loadp FunctionRareData::m_objectAllocationProfile + ObjectAllocationProfile::m_allocator[t5], t1
+ loadp FunctionRareData::m_objectAllocationProfile + ObjectAllocationProfile::m_structure[t5], t2
btpz t1, .opCreateThisSlow
+ loadpFromInstruction(4, t5)
+ bpeq t5, 1, .hasSeenMultipleCallee
+ bpneq t5, t0, .opCreateThisSlow
+.hasSeenMultipleCallee:
allocateJSObject(t1, t2, t0, t3, .opCreateThisSlow)
loadi 4[PC], t1
storei CellTag, TagOffset[cfr, t1, 8]
storei t0, PayloadOffset[cfr, t1, 8]
- dispatch(4)
+ dispatch(5)
.opCreateThisSlow:
- callSlowPath(_llint_slow_path_create_this)
- dispatch(4)
-
-
-_llint_op_get_callee:
- traceExecution()
- loadi 4[PC], t0
- loadp PayloadOffset + Callee[cfr], t1
- loadp 8[PC], t2
- valueProfile(CellTag, t1, t2)
- storei CellTag, TagOffset[cfr, t0, 8]
- storei t1, PayloadOffset[cfr, t0, 8]
- dispatch(3)
+ callSlowPath(_slow_path_create_this)
+ dispatch(5)
-_llint_op_convert_this:
+_llint_op_to_this:
traceExecution()
loadi 4[PC], t0
- bineq TagOffset[cfr, t0, 8], CellTag, .opConvertThisSlow
+ bineq TagOffset[cfr, t0, 8], CellTag, .opToThisSlow
loadi PayloadOffset[cfr, t0, 8], t0
- loadp JSCell::m_structure[t0], t0
- bbb Structure::m_typeInfo + TypeInfo::m_type[t0], ObjectType, .opConvertThisSlow
- loadi 8[PC], t1
- valueProfile(CellTag, t0, t1)
- dispatch(3)
+ bbneq JSCell::m_type[t0], FinalObjectType, .opToThisSlow
+ loadpFromInstruction(2, t2)
+ bpneq JSCell::m_structureID[t0], t2, .opToThisSlow
+ dispatch(4)
-.opConvertThisSlow:
- callSlowPath(_llint_slow_path_convert_this)
- dispatch(3)
+.opToThisSlow:
+ callSlowPath(_slow_path_to_this)
+ dispatch(4)
_llint_op_new_object:
@@ -426,6 +742,17 @@ _llint_op_new_object:
dispatch(4)
+_llint_op_check_tdz:
+ traceExecution()
+ loadisFromInstruction(1, t0)
+ loadConstantOrVariableTag(t0, t1)
+ bineq t1, EmptyValueTag, .opNotTDZ
+ callSlowPath(_slow_path_throw_tdz_error)
+
+.opNotTDZ:
+ dispatch(2)
+
+
_llint_op_mov:
traceExecution()
loadi 8[PC], t1
@@ -448,7 +775,7 @@ _llint_op_not:
dispatch(3)
.opNotSlow:
- callSlowPath(_llint_slow_path_not)
+ callSlowPath(_slow_path_not)
dispatch(3)
@@ -468,7 +795,7 @@ _llint_op_eq:
dispatch(4)
.opEqSlow:
- callSlowPath(_llint_slow_path_eq)
+ callSlowPath(_slow_path_eq)
dispatch(4)
@@ -480,11 +807,11 @@ _llint_op_eq_null:
loadi TagOffset[cfr, t0, 8], t1
loadi PayloadOffset[cfr, t0, 8], t0
bineq t1, CellTag, .opEqNullImmediate
- loadp JSCell::m_structure[t0], t1
- btbnz Structure::m_typeInfo + TypeInfo::m_flags[t1], MasqueradesAsUndefined, .opEqNullMasqueradesAsUndefined
+ btbnz JSCell::m_flags[t0], MasqueradesAsUndefined, .opEqNullMasqueradesAsUndefined
move 0, t1
jmp .opEqNullNotImmediate
.opEqNullMasqueradesAsUndefined:
+ loadp JSCell::m_structureID[t0], t1
loadp CodeBlock[cfr], t0
loadp CodeBlock::m_globalObject[t0], t0
cpeq Structure::m_globalObject[t1], t0, t1
@@ -515,7 +842,7 @@ _llint_op_neq:
dispatch(4)
.opNeqSlow:
- callSlowPath(_llint_slow_path_neq)
+ callSlowPath(_slow_path_neq)
dispatch(4)
@@ -527,11 +854,11 @@ _llint_op_neq_null:
loadi TagOffset[cfr, t0, 8], t1
loadi PayloadOffset[cfr, t0, 8], t0
bineq t1, CellTag, .opNeqNullImmediate
- loadp JSCell::m_structure[t0], t1
- btbnz Structure::m_typeInfo + TypeInfo::m_flags[t1], MasqueradesAsUndefined, .opNeqNullMasqueradesAsUndefined
+ btbnz JSCell::m_flags[t0], MasqueradesAsUndefined, .opNeqNullMasqueradesAsUndefined
move 1, t1
jmp .opNeqNullNotImmediate
.opNeqNullMasqueradesAsUndefined:
+ loadp JSCell::m_structureID[t0], t1
loadp CodeBlock[cfr], t0
loadp CodeBlock::m_globalObject[t0], t0
cpneq Structure::m_globalObject[t1], t0, t1
@@ -553,12 +880,10 @@ macro strictEq(equalityOperation, slowPath)
loadConstantOrVariable2Reg(t0, t2, t0)
bineq t2, t3, .slow
bib t2, LowestTag, .slow
- bineq t2, CellTag, .notString
- loadp JSCell::m_structure[t0], t2
- loadp JSCell::m_structure[t1], t3
- bbneq Structure::m_typeInfo + TypeInfo::m_type[t2], StringType, .notString
- bbeq Structure::m_typeInfo + TypeInfo::m_type[t3], StringType, .slow
-.notString:
+ bineq t2, CellTag, .notStringOrSymbol
+ bbaeq JSCell::m_type[t0], ObjectType, .notStringOrSymbol
+ bbb JSCell::m_type[t1], ObjectType, .slow
+.notStringOrSymbol:
loadi 4[PC], t2
equalityOperation(t0, t1, t0)
storei BooleanTag, TagOffset[cfr, t2, 8]
@@ -572,12 +897,12 @@ end
_llint_op_stricteq:
traceExecution()
- strictEq(macro (left, right, result) cieq left, right, result end, _llint_slow_path_stricteq)
+ strictEq(macro (left, right, result) cieq left, right, result end, _slow_path_stricteq)
_llint_op_nstricteq:
traceExecution()
- strictEq(macro (left, right, result) cineq left, right, result end, _llint_slow_path_nstricteq)
+ strictEq(macro (left, right, result) cineq left, right, result end, _slow_path_nstricteq)
_llint_op_inc:
@@ -590,7 +915,7 @@ _llint_op_inc:
dispatch(2)
.opIncSlow:
- callSlowPath(_llint_slow_path_pre_inc)
+ callSlowPath(_slow_path_inc)
dispatch(2)
@@ -604,7 +929,7 @@ _llint_op_dec:
dispatch(2)
.opDecSlow:
- callSlowPath(_llint_slow_path_pre_dec)
+ callSlowPath(_slow_path_dec)
dispatch(2)
@@ -621,7 +946,24 @@ _llint_op_to_number:
dispatch(3)
.opToNumberSlow:
- callSlowPath(_llint_slow_path_to_number)
+ callSlowPath(_slow_path_to_number)
+ dispatch(3)
+
+
+_llint_op_to_string:
+ traceExecution()
+ loadi 8[PC], t0
+ loadi 4[PC], t1
+ loadConstantOrVariable(t0, t2, t3)
+ bineq t2, CellTag, .opToStringSlow
+ bbneq JSCell::m_type[t3], StringType, .opToStringSlow
+.opToStringIsString:
+ storei t2, TagOffset[cfr, t1, 8]
+ storei t3, PayloadOffset[cfr, t1, 8]
+ dispatch(3)
+
+.opToStringSlow:
+ callSlowPath(_slow_path_to_string)
dispatch(3)
@@ -644,7 +986,7 @@ _llint_op_negate:
dispatch(3)
.opNegateSlow:
- callSlowPath(_llint_slow_path_negate)
+ callSlowPath(_slow_path_negate)
dispatch(3)
@@ -705,7 +1047,7 @@ _llint_op_add:
binaryOp(
macro (left, right, slow) baddio left, right, slow end,
macro (left, right) addd left, right end,
- _llint_slow_path_add)
+ _slow_path_add)
_llint_op_mul:
@@ -723,7 +1065,7 @@ _llint_op_mul:
storei scratch, PayloadOffset[cfr, index, 8]
end,
macro (left, right) muld left, right end,
- _llint_slow_path_mul)
+ _slow_path_mul)
_llint_op_sub:
@@ -731,7 +1073,7 @@ _llint_op_sub:
binaryOp(
macro (left, right, slow) bsubio left, right, slow end,
macro (left, right) subd left, right end,
- _llint_slow_path_sub)
+ _slow_path_sub)
_llint_op_div:
@@ -750,7 +1092,7 @@ _llint_op_div:
.done:
end,
macro (left, right) divd left, right end,
- _llint_slow_path_div)
+ _slow_path_div)
macro bitOp(operation, slowPath, advance)
@@ -761,7 +1103,7 @@ macro bitOp(operation, slowPath, advance)
bineq t3, Int32Tag, .slow
bineq t2, Int32Tag, .slow
loadi 4[PC], t2
- operation(t1, t0, .slow)
+ operation(t1, t0)
storei t3, TagOffset[cfr, t2, 8]
storei t0, PayloadOffset[cfr, t2, 8]
dispatch(advance)
@@ -774,66 +1116,94 @@ end
_llint_op_lshift:
traceExecution()
bitOp(
- macro (left, right, slow) lshifti left, right end,
- _llint_slow_path_lshift,
+ macro (left, right) lshifti left, right end,
+ _slow_path_lshift,
4)
_llint_op_rshift:
traceExecution()
bitOp(
- macro (left, right, slow) rshifti left, right end,
- _llint_slow_path_rshift,
+ macro (left, right) rshifti left, right end,
+ _slow_path_rshift,
4)
_llint_op_urshift:
traceExecution()
bitOp(
- macro (left, right, slow)
- urshifti left, right
- bilt right, 0, slow
- end,
- _llint_slow_path_urshift,
+ macro (left, right) urshifti left, right end,
+ _slow_path_urshift,
4)
+_llint_op_unsigned:
+ traceExecution()
+ loadi 4[PC], t0
+ loadi 8[PC], t1
+ loadConstantOrVariablePayload(t1, Int32Tag, t2, .opUnsignedSlow)
+ bilt t2, 0, .opUnsignedSlow
+ storei t2, PayloadOffset[cfr, t0, 8]
+ storei Int32Tag, TagOffset[cfr, t0, 8]
+ dispatch(3)
+.opUnsignedSlow:
+ callSlowPath(_slow_path_unsigned)
+ dispatch(3)
+
+
_llint_op_bitand:
traceExecution()
bitOp(
- macro (left, right, slow) andi left, right end,
- _llint_slow_path_bitand,
+ macro (left, right) andi left, right end,
+ _slow_path_bitand,
5)
_llint_op_bitxor:
traceExecution()
bitOp(
- macro (left, right, slow) xori left, right end,
- _llint_slow_path_bitxor,
+ macro (left, right) xori left, right end,
+ _slow_path_bitxor,
5)
_llint_op_bitor:
traceExecution()
bitOp(
- macro (left, right, slow) ori left, right end,
- _llint_slow_path_bitor,
+ macro (left, right) ori left, right end,
+ _slow_path_bitor,
5)
-_llint_op_check_has_instance:
+_llint_op_overrides_has_instance:
traceExecution()
- loadi 12[PC], t1
- loadConstantOrVariablePayload(t1, CellTag, t0, .opCheckHasInstanceSlow)
- loadp JSCell::m_structure[t0], t0
- btbz Structure::m_typeInfo + TypeInfo::m_flags[t0], ImplementsDefaultHasInstance, .opCheckHasInstanceSlow
- dispatch(5)
-.opCheckHasInstanceSlow:
- callSlowPath(_llint_slow_path_check_has_instance)
- dispatch(0)
+ loadisFromInstruction(1, t3)
+ storei BooleanTag, TagOffset[cfr, t3, 8]
+ # First check if hasInstanceValue is the one on Function.prototype[Symbol.hasInstance]
+ loadisFromInstruction(3, t0)
+ loadConstantOrVariablePayload(t0, CellTag, t2, .opOverrideshasInstanceValueNotCell)
+ loadConstantOrVariable(t0, t1, t2)
+ bineq t1, CellTag, .opOverrideshasInstanceValueNotCell
+
+ # We don't need hasInstanceValue's tag register anymore.
+ loadp CodeBlock[cfr], t1
+ loadp CodeBlock::m_globalObject[t1], t1
+ loadp JSGlobalObject::m_functionProtoHasInstanceSymbolFunction[t1], t1
+ bineq t1, t2, .opOverrideshasInstanceValueNotDefault
+
+ # We know the constructor is a cell.
+ loadisFromInstruction(2, t0)
+ loadConstantOrVariablePayloadUnchecked(t0, t1)
+ tbz JSCell::m_flags[t1], ImplementsDefaultHasInstance, t0
+ storei t0, PayloadOffset[cfr, t3, 8]
+ dispatch(4)
+
+.opOverrideshasInstanceValueNotCell:
+.opOverrideshasInstanceValueNotDefault:
+ storei 1, PayloadOffset[cfr, t3, 8]
+ dispatch(4)
_llint_op_instanceof:
traceExecution()
@@ -841,15 +1211,14 @@ _llint_op_instanceof:
loadi 12[PC], t0
loadi 4[PC], t3
loadConstantOrVariablePayload(t0, CellTag, t1, .opInstanceofSlow)
- loadp JSCell::m_structure[t1], t2
- bbb Structure::m_typeInfo + TypeInfo::m_type[t2], ObjectType, .opInstanceofSlow
+ bbb JSCell::m_type[t1], ObjectType, .opInstanceofSlow
loadi 8[PC], t0
loadConstantOrVariablePayload(t0, CellTag, t2, .opInstanceofSlow)
# Register state: t1 = prototype, t2 = value
move 1, t0
.opInstanceofLoop:
- loadp JSCell::m_structure[t2], t2
+ loadp JSCell::m_structureID[t2], t2
loadi Structure::m_prototype + PayloadOffset[t2], t2
bpeq t2, t1, .opInstanceofDone
btinz t2, .opInstanceofLoop
@@ -864,6 +1233,11 @@ _llint_op_instanceof:
callSlowPath(_llint_slow_path_instanceof)
dispatch(4)
+_llint_op_instanceof_custom:
+ traceExecution()
+ callSlowPath(_llint_slow_path_instanceof_custom)
+ dispatch(5)
+
_llint_op_is_undefined:
traceExecution()
@@ -876,12 +1250,12 @@ _llint_op_is_undefined:
storei t3, PayloadOffset[cfr, t0, 8]
dispatch(3)
.opIsUndefinedCell:
- loadp JSCell::m_structure[t3], t1
- btbnz Structure::m_typeInfo + TypeInfo::m_flags[t1], MasqueradesAsUndefined, .opIsUndefinedMasqueradesAsUndefined
+ btbnz JSCell::m_flags[t3], MasqueradesAsUndefined, .opIsUndefinedMasqueradesAsUndefined
move 0, t1
storei t1, PayloadOffset[cfr, t0, 8]
dispatch(3)
.opIsUndefinedMasqueradesAsUndefined:
+ loadp JSCell::m_structureID[t3], t1
loadp CodeBlock[cfr], t3
loadp CodeBlock::m_globalObject[t3], t3
cpeq Structure::m_globalObject[t1], t3, t1
@@ -919,8 +1293,7 @@ _llint_op_is_string:
loadConstantOrVariable(t1, t0, t3)
storei BooleanTag, TagOffset[cfr, t2, 8]
bineq t0, CellTag, .opIsStringNotCell
- loadp JSCell::m_structure[t3], t0
- cbeq Structure::m_typeInfo + TypeInfo::m_type[t0], StringType, t1
+ cbeq JSCell::m_type[t3], StringType, t1
storei t1, PayloadOffset[cfr, t2, 8]
dispatch(3)
.opIsStringNotCell:
@@ -928,6 +1301,21 @@ _llint_op_is_string:
dispatch(3)
+_llint_op_is_object:
+ traceExecution()
+ loadi 8[PC], t1
+ loadi 4[PC], t2
+ loadConstantOrVariable(t1, t0, t3)
+ storei BooleanTag, TagOffset[cfr, t2, 8]
+ bineq t0, CellTag, .opIsObjectNotCell
+ cbaeq JSCell::m_type[t3], ObjectType, t1
+ storei t1, PayloadOffset[cfr, t2, 8]
+ dispatch(3)
+.opIsObjectNotCell:
+ storep 0, PayloadOffset[cfr, t2, 8]
+ dispatch(3)
+
+
macro loadPropertyAtVariableOffsetKnownNotInline(propertyOffset, objectAndStorage, tag, payload)
assert(macro (ok) bigteq propertyOffset, firstOutOfLineOffset, ok end)
negi propertyOffset
@@ -948,50 +1336,18 @@ macro loadPropertyAtVariableOffset(propertyOffset, objectAndStorage, tag, payloa
loadi PayloadOffset + (firstOutOfLineOffset - 2) * 8[objectAndStorage, propertyOffset, 8], payload
end
-macro resolveGlobal(size, slow)
- # Operands are as follows:
- # 4[PC] Destination for the load.
- # 8[PC] Property identifier index in the code block.
- # 12[PC] Structure pointer, initialized to 0 by bytecode generator.
- # 16[PC] Offset in global object, initialized to 0 by bytecode generator.
- loadp CodeBlock[cfr], t0
- loadp CodeBlock::m_globalObject[t0], t0
- loadp JSCell::m_structure[t0], t1
- bpneq t1, 12[PC], slow
- loadi 16[PC], t1
- loadPropertyAtVariableOffsetKnownNotInline(t1, t0, t2, t3)
- loadi 4[PC], t0
- storei t2, TagOffset[cfr, t0, 8]
- storei t3, PayloadOffset[cfr, t0, 8]
- loadi (size - 1) * 4[PC], t0
- valueProfile(t2, t3, t0)
+macro storePropertyAtVariableOffset(propertyOffsetAsInt, objectAndStorage, tag, payload)
+ bilt propertyOffsetAsInt, firstOutOfLineOffset, .isInline
+ loadp JSObject::m_butterfly[objectAndStorage], objectAndStorage
+ negi propertyOffsetAsInt
+ jmp .ready
+.isInline:
+ addp sizeof JSObject - (firstOutOfLineOffset - 2) * 8, objectAndStorage
+.ready:
+ storei tag, TagOffset + (firstOutOfLineOffset - 2) * 8[objectAndStorage, propertyOffsetAsInt, 8]
+ storei payload, PayloadOffset + (firstOutOfLineOffset - 2) * 8[objectAndStorage, propertyOffsetAsInt, 8]
end
-_llint_op_init_global_const:
- traceExecution()
- loadi 8[PC], t1
- loadi 4[PC], t0
- loadConstantOrVariable(t1, t2, t3)
- writeBarrier(t2, t3)
- storei t2, TagOffset[t0]
- storei t3, PayloadOffset[t0]
- dispatch(5)
-
-
-_llint_op_init_global_const_check:
- traceExecution()
- loadp 12[PC], t2
- loadi 8[PC], t1
- loadi 4[PC], t0
- btbnz [t2], .opInitGlobalConstCheckSlow
- loadConstantOrVariable(t1, t2, t3)
- writeBarrier(t2, t3)
- storei t2, TagOffset[t0]
- storei t3, PayloadOffset[t0]
- dispatch(5)
-.opInitGlobalConstCheckSlow:
- callSlowPath(_llint_slow_path_init_global_const_check)
- dispatch(5)
# We only do monomorphic get_by_id caching for now, and we do not modify the
# opcode. We do, however, allow for the cache to change anytime if fails, since
@@ -999,38 +1355,23 @@ _llint_op_init_global_const_check:
# to take fast path on the new cache. At worst we take slow path, which is what
# we would have been doing anyway.
-macro getById(getPropertyStorage)
+_llint_op_get_by_id:
traceExecution()
loadi 8[PC], t0
loadi 16[PC], t1
loadConstantOrVariablePayload(t0, CellTag, t3, .opGetByIdSlow)
loadi 20[PC], t2
- getPropertyStorage(
- t3,
- t0,
- macro (propertyStorage, scratch)
- bpneq JSCell::m_structure[t3], t1, .opGetByIdSlow
- loadi 4[PC], t1
- loadi TagOffset[propertyStorage, t2], scratch
- loadi PayloadOffset[propertyStorage, t2], t2
- storei scratch, TagOffset[cfr, t1, 8]
- storei t2, PayloadOffset[cfr, t1, 8]
- loadi 32[PC], t1
- valueProfile(scratch, t2, t1)
- dispatch(9)
- end)
-
- .opGetByIdSlow:
- callSlowPath(_llint_slow_path_get_by_id)
- dispatch(9)
-end
-
-_llint_op_get_by_id:
- getById(withInlineStorage)
-
+ bineq JSCell::m_structureID[t3], t1, .opGetByIdSlow
+ loadPropertyAtVariableOffset(t2, t3, t0, t1)
+ loadi 4[PC], t2
+ storei t0, TagOffset[cfr, t2, 8]
+ storei t1, PayloadOffset[cfr, t2, 8]
+ valueProfile(t0, t1, 32, t2)
+ dispatch(9)
-_llint_op_get_by_id_out_of_line:
- getById(withOutOfLineStorage)
+.opGetByIdSlow:
+ callSlowPath(_llint_slow_path_get_by_id)
+ dispatch(9)
_llint_op_get_array_length:
@@ -1038,16 +1379,15 @@ _llint_op_get_array_length:
loadi 8[PC], t0
loadp 16[PC], t1
loadConstantOrVariablePayload(t0, CellTag, t3, .opGetArrayLengthSlow)
- loadp JSCell::m_structure[t3], t2
+ move t3, t2
arrayProfile(t2, t1, t0)
btiz t2, IsArray, .opGetArrayLengthSlow
btiz t2, IndexingShapeMask, .opGetArrayLengthSlow
loadi 4[PC], t1
- loadp 32[PC], t2
loadp JSObject::m_butterfly[t3], t0
loadi -sizeof IndexingHeader + IndexingHeader::u.lengths.publicLength[t0], t0
bilt t0, 0, .opGetArrayLengthSlow
- valueProfile(Int32Tag, t0, t2)
+ valueProfile(Int32Tag, t0, 32, t2)
storep t0, PayloadOffset[cfr, t1, 8]
storep Int32Tag, TagOffset[cfr, t1, 8]
dispatch(9)
@@ -1057,119 +1397,142 @@ _llint_op_get_array_length:
dispatch(9)
-_llint_op_get_arguments_length:
- traceExecution()
- loadi 8[PC], t0
- loadi 4[PC], t1
- bineq TagOffset[cfr, t0, 8], EmptyValueTag, .opGetArgumentsLengthSlow
- loadi ArgumentCount + PayloadOffset[cfr], t2
- subi 1, t2
- storei Int32Tag, TagOffset[cfr, t1, 8]
- storei t2, PayloadOffset[cfr, t1, 8]
- dispatch(4)
-
-.opGetArgumentsLengthSlow:
- callSlowPath(_llint_slow_path_get_arguments_length)
- dispatch(4)
-
-
-macro putById(getPropertyStorage)
+_llint_op_put_by_id:
traceExecution()
+ writeBarrierOnOperands(1, 3)
loadi 4[PC], t3
- loadi 16[PC], t1
loadConstantOrVariablePayload(t3, CellTag, t0, .opPutByIdSlow)
- loadi 12[PC], t2
- getPropertyStorage(
- t0,
- t3,
- macro (propertyStorage, scratch)
- bpneq JSCell::m_structure[t0], t1, .opPutByIdSlow
- loadi 20[PC], t1
- loadConstantOrVariable2Reg(t2, scratch, t2)
- writeBarrier(scratch, t2)
- storei scratch, TagOffset[propertyStorage, t1]
- storei t2, PayloadOffset[propertyStorage, t1]
- dispatch(9)
- end)
-end
+ loadi JSCell::m_structureID[t0], t2
+ bineq t2, 16[PC], .opPutByIdSlow
-_llint_op_put_by_id:
- putById(withInlineStorage)
+ # At this point, we have:
+ # t2 -> currentStructureID
+ # t0 -> object base
+ # We will lose currentStructureID in the shenanigans below.
+
+ loadi 12[PC], t1
+ loadConstantOrVariable(t1, t2, t3)
+ loadi 32[PC], t1
+
+ # At this point, we have:
+ # t0 -> object base
+ # t1 -> put by id flags
+ # t2 -> value tag
+ # t3 -> value payload
+
+ btinz t1, PutByIdPrimaryTypeMask, .opPutByIdTypeCheckObjectWithStructureOrOther
+
+ # We have one of the non-structure type checks. Find out which one.
+ andi PutByIdSecondaryTypeMask, t1
+ bilt t1, PutByIdSecondaryTypeString, .opPutByIdTypeCheckLessThanString
+
+ # We are one of the following: String, Symbol, Object, ObjectOrOther, Top
+ bilt t1, PutByIdSecondaryTypeObjectOrOther, .opPutByIdTypeCheckLessThanObjectOrOther
+
+ # We are either ObjectOrOther or Top.
+ bieq t1, PutByIdSecondaryTypeTop, .opPutByIdDoneCheckingTypes
+
+ # Check if we are ObjectOrOther.
+ bieq t2, CellTag, .opPutByIdTypeCheckObject
+.opPutByIdTypeCheckOther:
+ bieq t2, NullTag, .opPutByIdDoneCheckingTypes
+ bieq t2, UndefinedTag, .opPutByIdDoneCheckingTypes
+ jmp .opPutByIdSlow
+
+.opPutByIdTypeCheckLessThanObjectOrOther:
+ # We are either String, Symbol or Object.
+ bineq t2, CellTag, .opPutByIdSlow
+ bieq t1, PutByIdSecondaryTypeObject, .opPutByIdTypeCheckObject
+ bieq t1, PutByIdSecondaryTypeSymbol, .opPutByIdTypeCheckSymbol
+ bbeq JSCell::m_type[t3], StringType, .opPutByIdDoneCheckingTypes
+ jmp .opPutByIdSlow
+.opPutByIdTypeCheckObject:
+ bbaeq JSCell::m_type[t3], ObjectType, .opPutByIdDoneCheckingTypes
+ jmp .opPutByIdSlow
+.opPutByIdTypeCheckSymbol:
+ bbeq JSCell::m_type[t3], SymbolType, .opPutByIdDoneCheckingTypes
+ jmp .opPutByIdSlow
+
+.opPutByIdTypeCheckLessThanString:
+ # We are one of the following: Bottom, Boolean, Other, Int32, Number.
+ bilt t1, PutByIdSecondaryTypeInt32, .opPutByIdTypeCheckLessThanInt32
+
+ # We are either Int32 or Number.
+ bieq t1, PutByIdSecondaryTypeNumber, .opPutByIdTypeCheckNumber
+
+ bieq t2, Int32Tag, .opPutByIdDoneCheckingTypes
+ jmp .opPutByIdSlow
+
+.opPutByIdTypeCheckNumber:
+ bib t2, LowestTag + 1, .opPutByIdDoneCheckingTypes
+ jmp .opPutByIdSlow
+
+.opPutByIdTypeCheckLessThanInt32:
+ # We are one of the following: Bottom, Boolean, Other
+ bineq t1, PutByIdSecondaryTypeBoolean, .opPutByIdTypeCheckBottomOrOther
+ bieq t2, BooleanTag, .opPutByIdDoneCheckingTypes
+ jmp .opPutByIdSlow
+
+.opPutByIdTypeCheckBottomOrOther:
+ bieq t1, PutByIdSecondaryTypeOther, .opPutByIdTypeCheckOther
+ jmp .opPutByIdSlow
+
+.opPutByIdTypeCheckObjectWithStructureOrOther:
+ bieq t2, CellTag, .opPutByIdTypeCheckObjectWithStructure
+ btinz t1, PutByIdPrimaryTypeObjectWithStructureOrOther, .opPutByIdTypeCheckOther
+ jmp .opPutByIdSlow
+
+.opPutByIdTypeCheckObjectWithStructure:
+ andi PutByIdSecondaryTypeMask, t1
+ bineq t1, JSCell::m_structureID[t3], .opPutByIdSlow
+
+.opPutByIdDoneCheckingTypes:
+ loadi 24[PC], t1
+
+ btiz t1, .opPutByIdNotTransition
+
+ # This is the transition case. t1 holds the new Structure*. If we have a chain, we need to
+ # check it. t0 is the base. We may clobber t1 to use it as scratch.
+ loadp 28[PC], t3
+ btpz t3, .opPutByIdTransitionDirect
+
+ loadi 16[PC], t2 # Need old structure again.
+ loadp StructureChain::m_vector[t3], t3
+ assert(macro (ok) btpnz t3, ok end)
+
+ loadp Structure::m_prototype[t2], t2
+ btpz t2, .opPutByIdTransitionChainDone
+.opPutByIdTransitionChainLoop:
+ loadp [t3], t1
+ bpneq t1, JSCell::m_structureID[t2], .opPutByIdSlow
+ addp 4, t3
+ loadp Structure::m_prototype[t1], t2
+ btpnz t2, .opPutByIdTransitionChainLoop
+
+.opPutByIdTransitionChainDone:
+ loadi 24[PC], t1
+
+.opPutByIdTransitionDirect:
+ storei t1, JSCell::m_structureID[t0]
+
+.opPutByIdNotTransition:
+ # The only thing live right now is t0, which holds the base.
+ loadi 12[PC], t1
+ loadConstantOrVariable(t1, t2, t3)
+ loadi 20[PC], t1
+ storePropertyAtVariableOffset(t1, t0, t2, t3)
+ dispatch(9)
.opPutByIdSlow:
callSlowPath(_llint_slow_path_put_by_id)
dispatch(9)
-_llint_op_put_by_id_out_of_line:
- putById(withOutOfLineStorage)
-
-
-macro putByIdTransition(additionalChecks, getPropertyStorage)
- traceExecution()
- loadi 4[PC], t3
- loadi 16[PC], t1
- loadConstantOrVariablePayload(t3, CellTag, t0, .opPutByIdSlow)
- loadi 12[PC], t2
- bpneq JSCell::m_structure[t0], t1, .opPutByIdSlow
- additionalChecks(t1, t3)
- loadi 20[PC], t1
- getPropertyStorage(
- t0,
- t3,
- macro (propertyStorage, scratch)
- addp t1, propertyStorage, t3
- loadConstantOrVariable2Reg(t2, t1, t2)
- writeBarrier(t1, t2)
- storei t1, TagOffset[t3]
- loadi 24[PC], t1
- storei t2, PayloadOffset[t3]
- storep t1, JSCell::m_structure[t0]
- dispatch(9)
- end)
-end
-
-macro noAdditionalChecks(oldStructure, scratch)
-end
-
-macro structureChainChecks(oldStructure, scratch)
- const protoCell = oldStructure # Reusing the oldStructure register for the proto
-
- loadp 28[PC], scratch
- assert(macro (ok) btpnz scratch, ok end)
- loadp StructureChain::m_vector[scratch], scratch
- assert(macro (ok) btpnz scratch, ok end)
- bieq Structure::m_prototype + TagOffset[oldStructure], NullTag, .done
-.loop:
- loadi Structure::m_prototype + PayloadOffset[oldStructure], protoCell
- loadp JSCell::m_structure[protoCell], oldStructure
- bpneq oldStructure, [scratch], .opPutByIdSlow
- addp 4, scratch
- bineq Structure::m_prototype + TagOffset[oldStructure], NullTag, .loop
-.done:
-end
-
-_llint_op_put_by_id_transition_direct:
- putByIdTransition(noAdditionalChecks, withInlineStorage)
-
-
-_llint_op_put_by_id_transition_direct_out_of_line:
- putByIdTransition(noAdditionalChecks, withOutOfLineStorage)
-
-
-_llint_op_put_by_id_transition_normal:
- putByIdTransition(structureChainChecks, withInlineStorage)
-
-
-_llint_op_put_by_id_transition_normal_out_of_line:
- putByIdTransition(structureChainChecks, withOutOfLineStorage)
-
-
_llint_op_get_by_val:
traceExecution()
loadi 8[PC], t2
loadConstantOrVariablePayload(t2, CellTag, t0, .opGetByValSlow)
- loadp JSCell::m_structure[t0], t2
+ move t0, t2
loadp 16[PC], t3
arrayProfile(t2, t3, t1)
loadi 12[PC], t3
@@ -1208,77 +1571,17 @@ _llint_op_get_by_val:
.opGetByValNotEmpty:
storei t2, TagOffset[cfr, t0, 8]
storei t1, PayloadOffset[cfr, t0, 8]
- loadi 20[PC], t0
- valueProfile(t2, t1, t0)
+ valueProfile(t2, t1, 20, t0)
dispatch(6)
.opGetByValOutOfBounds:
- if VALUE_PROFILER
- loadpFromInstruction(4, t0)
- storeb 1, ArrayProfile::m_outOfBounds[t0]
- end
+ loadpFromInstruction(4, t0)
+ storeb 1, ArrayProfile::m_outOfBounds[t0]
.opGetByValSlow:
callSlowPath(_llint_slow_path_get_by_val)
dispatch(6)
-_llint_op_get_argument_by_val:
- # FIXME: At some point we should array profile this. Right now it isn't necessary
- # since the DFG will never turn a get_argument_by_val into a GetByVal.
- traceExecution()
- loadi 8[PC], t0
- loadi 12[PC], t1
- bineq TagOffset[cfr, t0, 8], EmptyValueTag, .opGetArgumentByValSlow
- loadConstantOrVariablePayload(t1, Int32Tag, t2, .opGetArgumentByValSlow)
- addi 1, t2
- loadi ArgumentCount + PayloadOffset[cfr], t1
- biaeq t2, t1, .opGetArgumentByValSlow
- negi t2
- loadi 4[PC], t3
- loadi ThisArgumentOffset + TagOffset[cfr, t2, 8], t0
- loadi ThisArgumentOffset + PayloadOffset[cfr, t2, 8], t1
- loadi 20[PC], t2
- storei t0, TagOffset[cfr, t3, 8]
- storei t1, PayloadOffset[cfr, t3, 8]
- valueProfile(t0, t1, t2)
- dispatch(6)
-
-.opGetArgumentByValSlow:
- callSlowPath(_llint_slow_path_get_argument_by_val)
- dispatch(6)
-
-
-_llint_op_get_by_pname:
- traceExecution()
- loadi 12[PC], t0
- loadConstantOrVariablePayload(t0, CellTag, t1, .opGetByPnameSlow)
- loadi 16[PC], t0
- bpneq t1, PayloadOffset[cfr, t0, 8], .opGetByPnameSlow
- loadi 8[PC], t0
- loadConstantOrVariablePayload(t0, CellTag, t2, .opGetByPnameSlow)
- loadi 20[PC], t0
- loadi PayloadOffset[cfr, t0, 8], t3
- loadp JSCell::m_structure[t2], t0
- bpneq t0, JSPropertyNameIterator::m_cachedStructure[t3], .opGetByPnameSlow
- loadi 24[PC], t0
- loadi [cfr, t0, 8], t0
- subi 1, t0
- biaeq t0, JSPropertyNameIterator::m_numCacheableSlots[t3], .opGetByPnameSlow
- bilt t0, JSPropertyNameIterator::m_cachedStructureInlineCapacity[t3], .opGetByPnameInlineProperty
- addi firstOutOfLineOffset, t0
- subi JSPropertyNameIterator::m_cachedStructureInlineCapacity[t3], t0
-.opGetByPnameInlineProperty:
- loadPropertyAtVariableOffset(t0, t2, t1, t3)
- loadi 4[PC], t0
- storei t1, TagOffset[cfr, t0, 8]
- storei t3, PayloadOffset[cfr, t0, 8]
- dispatch(7)
-
-.opGetByPnameSlow:
- callSlowPath(_llint_slow_path_get_by_pname)
- dispatch(7)
-
-
macro contiguousPutByVal(storeCallback)
biaeq t3, -sizeof IndexingHeader + IndexingHeader::u.lengths.publicLength[t0], .outOfBounds
.storeResult:
@@ -1288,20 +1591,19 @@ macro contiguousPutByVal(storeCallback)
.outOfBounds:
biaeq t3, -sizeof IndexingHeader + IndexingHeader::u.lengths.vectorLength[t0], .opPutByValOutOfBounds
- if VALUE_PROFILER
- loadp 16[PC], t2
- storeb 1, ArrayProfile::m_mayStoreToHole[t2]
- end
+ loadp 16[PC], t2
+ storeb 1, ArrayProfile::m_mayStoreToHole[t2]
addi 1, t3, t2
storei t2, -sizeof IndexingHeader + IndexingHeader::u.lengths.publicLength[t0]
jmp .storeResult
end
-_llint_op_put_by_val:
+macro putByVal(slowPath)
traceExecution()
+ writeBarrierOnOperands(1, 3)
loadi 4[PC], t0
loadConstantOrVariablePayload(t0, CellTag, t1, .opPutByValSlow)
- loadp JSCell::m_structure[t1], t2
+ move t1, t2
loadp 16[PC], t3
arrayProfile(t2, t3, t0)
loadi 8[PC], t0
@@ -1340,7 +1642,6 @@ _llint_op_put_by_val:
const tag = scratch
const payload = operand
loadConstantOrVariable2Reg(operand, tag, payload)
- writeBarrier(tag, payload)
storei tag, TagOffset[base, index, 8]
storei payload, PayloadOffset[base, index, 8]
end)
@@ -1352,16 +1653,13 @@ _llint_op_put_by_val:
.opPutByValArrayStorageStoreResult:
loadi 12[PC], t2
loadConstantOrVariable2Reg(t2, t1, t2)
- writeBarrier(t1, t2)
storei t1, ArrayStorage::m_vector + TagOffset[t0, t3, 8]
storei t2, ArrayStorage::m_vector + PayloadOffset[t0, t3, 8]
dispatch(5)
.opPutByValArrayStorageEmpty:
- if VALUE_PROFILER
- loadp 16[PC], t1
- storeb 1, ArrayProfile::m_mayStoreToHole[t1]
- end
+ loadp 16[PC], t1
+ storeb 1, ArrayProfile::m_mayStoreToHole[t1]
addi 1, ArrayStorage::m_numValuesInVector[t0]
bib t3, -sizeof IndexingHeader + IndexingHeader::u.lengths.publicLength[t0], .opPutByValArrayStorageStoreResult
addi 1, t3, t1
@@ -1369,14 +1667,18 @@ _llint_op_put_by_val:
jmp .opPutByValArrayStorageStoreResult
.opPutByValOutOfBounds:
- if VALUE_PROFILER
- loadpFromInstruction(4, t0)
- storeb 1, ArrayProfile::m_outOfBounds[t0]
- end
+ loadpFromInstruction(4, t0)
+ storeb 1, ArrayProfile::m_outOfBounds[t0]
.opPutByValSlow:
- callSlowPath(_llint_slow_path_put_by_val)
+ callSlowPath(slowPath)
dispatch(5)
+end
+_llint_op_put_by_val:
+ putByVal(_llint_slow_path_put_by_val)
+
+_llint_op_put_by_val_direct:
+ putByVal(_llint_slow_path_put_by_val_direct)
_llint_op_jmp:
traceExecution()
@@ -1404,8 +1706,8 @@ macro equalNull(cellHandler, immediateHandler)
loadi TagOffset[cfr, t0, 8], t1
loadi PayloadOffset[cfr, t0, 8], t0
bineq t1, CellTag, .immediate
- loadp JSCell::m_structure[t0], t2
- cellHandler(t2, Structure::m_typeInfo + TypeInfo::m_flags[t2], .target)
+ loadp JSCell::m_structureID[t0], t2
+ cellHandler(t2, JSCell::m_flags[t0], .target)
dispatch(3)
.target:
@@ -1504,7 +1806,7 @@ _llint_op_switch_imm:
loadp CodeBlock[cfr], t2
loadp CodeBlock::m_rareData[t2], t2
muli sizeof SimpleJumpTable, t3 # FIXME: would be nice to peephole this!
- loadp CodeBlock::RareData::m_immediateSwitchJumpTables + VectorBufferOffset[t2], t2
+ loadp CodeBlock::RareData::m_switchJumpTables + VectorBufferOffset[t2], t2
addp t3, t2
bineq t1, Int32Tag, .opSwitchImmNotInt
subi SimpleJumpTable::min[t2], t0
@@ -1532,11 +1834,10 @@ _llint_op_switch_char:
loadp CodeBlock[cfr], t2
loadp CodeBlock::m_rareData[t2], t2
muli sizeof SimpleJumpTable, t3
- loadp CodeBlock::RareData::m_characterSwitchJumpTables + VectorBufferOffset[t2], t2
+ loadp CodeBlock::RareData::m_switchJumpTables + VectorBufferOffset[t2], t2
addp t3, t2
bineq t1, CellTag, .opSwitchCharFallThrough
- loadp JSCell::m_structure[t0], t1
- bbneq Structure::m_typeInfo + TypeInfo::m_type[t1], StringType, .opSwitchCharFallThrough
+ bbneq JSCell::m_type[t0], StringType, .opSwitchCharFallThrough
bineq JSString::m_length[t0], 1, .opSwitchCharFallThrough
loadp JSString::m_value[t0], t0
btpz t0, .opSwitchOnRope
@@ -1562,75 +1863,40 @@ _llint_op_switch_char:
dispatch(0)
-_llint_op_new_func:
- traceExecution()
- btiz 12[PC], .opNewFuncUnchecked
- loadi 4[PC], t1
- bineq TagOffset[cfr, t1, 8], EmptyValueTag, .opNewFuncDone
-.opNewFuncUnchecked:
- callSlowPath(_llint_slow_path_new_func)
-.opNewFuncDone:
- dispatch(4)
-
-
macro arrayProfileForCall()
- if VALUE_PROFILER
- loadi 12[PC], t3
- bineq ThisArgumentOffset + TagOffset[cfr, t3, 8], CellTag, .done
- loadi ThisArgumentOffset + PayloadOffset[cfr, t3, 8], t0
- loadp JSCell::m_structure[t0], t0
- loadp 20[PC], t1
- storep t0, ArrayProfile::m_lastSeenStructure[t1]
- .done:
- end
+ loadi 16[PC], t3
+ negi t3
+ bineq ThisArgumentOffset + TagOffset[cfr, t3, 8], CellTag, .done
+ loadi ThisArgumentOffset + PayloadOffset[cfr, t3, 8], t0
+ loadp JSCell::m_structureID[t0], t0
+ loadpFromInstruction(CallOpCodeSize - 2, t1)
+ storep t0, ArrayProfile::m_lastSeenStructureID[t1]
+.done:
end
-macro doCall(slowPath)
- loadi 4[PC], t0
- loadi 16[PC], t1
+macro doCall(slowPath, prepareCall)
+ loadi 8[PC], t0
+ loadi 20[PC], t1
loadp LLIntCallLinkInfo::callee[t1], t2
loadConstantOrVariablePayload(t0, CellTag, t3, .opCallSlow)
bineq t3, t2, .opCallSlow
- loadi 12[PC], t3
- addp 24, PC
+ loadi 16[PC], t3
lshifti 3, t3
+ negi t3
addp cfr, t3 # t3 contains the new value of cfr
- loadp JSFunction::m_scope[t2], t0
storei t2, Callee + PayloadOffset[t3]
- storei t0, ScopeChain + PayloadOffset[t3]
- loadi 8 - 24[PC], t2
+ loadi 12[PC], t2
storei PC, ArgumentCount + TagOffset[cfr]
- storep cfr, CallerFrame[t3]
storei t2, ArgumentCount + PayloadOffset[t3]
storei CellTag, Callee + TagOffset[t3]
- storei CellTag, ScopeChain + TagOffset[t3]
- move t3, cfr
- callTargetFunction(t1)
+ move t3, sp
+ prepareCall(LLIntCallLinkInfo::machineCodeTarget[t1], t2, t3, t4)
+ callTargetFunction(LLIntCallLinkInfo::machineCodeTarget[t1])
.opCallSlow:
- slowPathForCall(6, slowPath)
+ slowPathForCall(slowPath, prepareCall)
end
-
-_llint_op_tear_off_activation:
- traceExecution()
- loadi 4[PC], t0
- bieq TagOffset[cfr, t0, 8], EmptyValueTag, .opTearOffActivationNotCreated
- callSlowPath(_llint_slow_path_tear_off_activation)
-.opTearOffActivationNotCreated:
- dispatch(2)
-
-
-_llint_op_tear_off_arguments:
- traceExecution()
- loadi 4[PC], t0
- subi 1, t0 # Get the unmodifiedArgumentsRegister
- bieq TagOffset[cfr, t0, 8], EmptyValueTag, .opTearOffArgumentsNotCreated
- callSlowPath(_llint_slow_path_tear_off_arguments)
-.opTearOffArgumentsNotCreated:
- dispatch(3)
-
-
_llint_op_ret:
traceExecution()
checkSwitchToJITForEpilogue()
@@ -1639,175 +1905,62 @@ _llint_op_ret:
doReturn()
-_llint_op_call_put_result:
- loadi 4[PC], t2
- loadi 8[PC], t3
- storei t1, TagOffset[cfr, t2, 8]
- storei t0, PayloadOffset[cfr, t2, 8]
- valueProfile(t1, t0, t3)
- traceExecution() # Needs to be here because it would clobber t1, t0
- dispatch(3)
-
-
-_llint_op_ret_object_or_this:
- traceExecution()
- checkSwitchToJITForEpilogue()
- loadi 4[PC], t2
- loadConstantOrVariable(t2, t1, t0)
- bineq t1, CellTag, .opRetObjectOrThisNotObject
- loadp JSCell::m_structure[t0], t2
- bbb Structure::m_typeInfo + TypeInfo::m_type[t2], ObjectType, .opRetObjectOrThisNotObject
- doReturn()
-
-.opRetObjectOrThisNotObject:
- loadi 8[PC], t2
- loadConstantOrVariable(t2, t1, t0)
- doReturn()
-
-
_llint_op_to_primitive:
traceExecution()
loadi 8[PC], t2
loadi 4[PC], t3
loadConstantOrVariable(t2, t1, t0)
bineq t1, CellTag, .opToPrimitiveIsImm
- loadp JSCell::m_structure[t0], t2
- bbneq Structure::m_typeInfo + TypeInfo::m_type[t2], StringType, .opToPrimitiveSlowCase
+ bbaeq JSCell::m_type[t0], ObjectType, .opToPrimitiveSlowCase
.opToPrimitiveIsImm:
storei t1, TagOffset[cfr, t3, 8]
storei t0, PayloadOffset[cfr, t3, 8]
dispatch(3)
.opToPrimitiveSlowCase:
- callSlowPath(_llint_slow_path_to_primitive)
+ callSlowPath(_slow_path_to_primitive)
dispatch(3)
-_llint_op_next_pname:
- traceExecution()
- loadi 12[PC], t1
- loadi 16[PC], t2
- loadi PayloadOffset[cfr, t1, 8], t0
- bieq t0, PayloadOffset[cfr, t2, 8], .opNextPnameEnd
- loadi 20[PC], t2
- loadi PayloadOffset[cfr, t2, 8], t2
- loadp JSPropertyNameIterator::m_jsStrings[t2], t3
- loadi PayloadOffset[t3, t0, 8], t3
- addi 1, t0
- storei t0, PayloadOffset[cfr, t1, 8]
- loadi 4[PC], t1
- storei CellTag, TagOffset[cfr, t1, 8]
- storei t3, PayloadOffset[cfr, t1, 8]
- loadi 8[PC], t3
- loadi PayloadOffset[cfr, t3, 8], t3
- loadp JSCell::m_structure[t3], t1
- bpneq t1, JSPropertyNameIterator::m_cachedStructure[t2], .opNextPnameSlow
- loadp JSPropertyNameIterator::m_cachedPrototypeChain[t2], t0
- loadp StructureChain::m_vector[t0], t0
- btpz [t0], .opNextPnameTarget
-.opNextPnameCheckPrototypeLoop:
- bieq Structure::m_prototype + TagOffset[t1], NullTag, .opNextPnameSlow
- loadp Structure::m_prototype + PayloadOffset[t1], t2
- loadp JSCell::m_structure[t2], t1
- bpneq t1, [t0], .opNextPnameSlow
- addp 4, t0
- btpnz [t0], .opNextPnameCheckPrototypeLoop
-.opNextPnameTarget:
- dispatchBranch(24[PC])
-
-.opNextPnameEnd:
- dispatch(7)
-
-.opNextPnameSlow:
- callSlowPath(_llint_slow_path_next_pname) # This either keeps the PC where it was (causing us to loop) or sets it to target.
- dispatch(0)
-
-
_llint_op_catch:
# This is where we end up from the JIT's throw trampoline (because the
# machine code return address will be set to _llint_op_catch), and from
# the interpreter's throw trampoline (see _llint_throw_trampoline).
- # The JIT throwing protocol calls for the cfr to be in t0. The throwing
- # code must have known that we were throwing to the interpreter, and have
- # set VM::targetInterpreterPCForThrow.
- move t0, cfr
- loadp JITStackFrame::vm[sp], t3
- loadi VM::targetInterpreterPCForThrow[t3], PC
- loadi VM::exception + PayloadOffset[t3], t0
- loadi VM::exception + TagOffset[t3], t1
- storei 0, VM::exception + PayloadOffset[t3]
- storei EmptyValueTag, VM::exception + TagOffset[t3]
- loadi 4[PC], t2
- storei t0, PayloadOffset[cfr, t2, 8]
- storei t1, TagOffset[cfr, t2, 8]
- traceExecution() # This needs to be here because we don't want to clobber t0, t1, t2, t3 above.
- dispatch(2)
-
-
-# Gives you the scope in t0, while allowing you to optionally perform additional checks on the
-# scopes as they are traversed. scopeCheck() is called with two arguments: the register
-# holding the scope, and a register that can be used for scratch. Note that this does not
-# use t3, so you can hold stuff in t3 if need be.
-macro getDeBruijnScope(deBruijinIndexOperand, scopeCheck)
- loadp ScopeChain + PayloadOffset[cfr], t0
- loadi deBruijinIndexOperand, t2
-
- btiz t2, .done
+ # The throwing code must have known that we were throwing to the interpreter,
+ # and have set VM::targetInterpreterPCForThrow.
+ loadp Callee + PayloadOffset[cfr], t3
+ andp MarkedBlockMask, t3
+ loadp MarkedBlock::m_weakSet + WeakSet::m_vm[t3], t3
+ restoreCalleeSavesFromVMCalleeSavesBuffer(t3, t0)
+ loadp VM::callFrameForCatch[t3], cfr
+ storep 0, VM::callFrameForCatch[t3]
+ restoreStackPointerAfterCall()
- loadp CodeBlock[cfr], t1
- bineq CodeBlock::m_codeType[t1], FunctionCode, .loop
- btbz CodeBlock::m_needsActivation[t1], .loop
-
- loadi CodeBlock::m_activationRegister[t1], t1
-
- # Need to conditionally skip over one scope.
- bieq TagOffset[cfr, t1, 8], EmptyValueTag, .noActivation
- scopeCheck(t0, t1)
- loadp JSScope::m_next[t0], t0
-.noActivation:
- subi 1, t2
+ loadi VM::targetInterpreterPCForThrow[t3], PC
- btiz t2, .done
-.loop:
- scopeCheck(t0, t1)
- loadp JSScope::m_next[t0], t0
- subi 1, t2
- btinz t2, .loop
+ callSlowPath(_llint_slow_path_check_if_exception_is_uncatchable_and_notify_profiler)
+ bpeq r1, 0, .isCatchableException
+ jmp _llint_throw_from_slow_path_trampoline
-.done:
+.isCatchableException:
+ loadp Callee + PayloadOffset[cfr], t3
+ andp MarkedBlockMask, t3
+ loadp MarkedBlock::m_weakSet + WeakSet::m_vm[t3], t3
-end
+ loadi VM::m_exception[t3], t0
+ storei 0, VM::m_exception[t3]
+ loadi 4[PC], t2
+ storei t0, PayloadOffset[cfr, t2, 8]
+ storei CellTag, TagOffset[cfr, t2, 8]
-_llint_op_get_scoped_var:
- traceExecution()
- # Operands are as follows:
- # 4[PC] Destination for the load.
- # 8[PC] Index of register in the scope.
- # 12[PC] De Bruijin index.
- getDeBruijnScope(12[PC], macro (scope, scratch) end)
- loadi 4[PC], t1
+ loadi Exception::m_value + TagOffset[t0], t1
+ loadi Exception::m_value + PayloadOffset[t0], t0
loadi 8[PC], t2
- loadp JSVariableObject::m_registers[t0], t0
- loadi TagOffset[t0, t2, 8], t3
- loadi PayloadOffset[t0, t2, 8], t0
- storei t3, TagOffset[cfr, t1, 8]
- storei t0, PayloadOffset[cfr, t1, 8]
- loadi 16[PC], t1
- valueProfile(t3, t0, t1)
- dispatch(5)
-
+ storei t0, PayloadOffset[cfr, t2, 8]
+ storei t1, TagOffset[cfr, t2, 8]
-_llint_op_put_scoped_var:
- traceExecution()
- getDeBruijnScope(8[PC], macro (scope, scratch) end)
- loadi 12[PC], t1
- loadConstantOrVariable(t1, t3, t2)
- loadi 4[PC], t1
- writeBarrier(t3, t2)
- loadp JSVariableObject::m_registers[t0], t0
- storei t3, TagOffset[t0, t1, 8]
- storei t2, PayloadOffset[t0, t1, 8]
- dispatch(4)
+ traceExecution() # This needs to be here because we don't want to clobber t0, t1, t2, t3 above.
+ dispatch(3)
_llint_op_end:
traceExecution()
@@ -1820,89 +1973,490 @@ _llint_op_end:
_llint_throw_from_slow_path_trampoline:
+ callSlowPath(_llint_slow_path_handle_exception)
+
# When throwing from the interpreter (i.e. throwing from LLIntSlowPaths), so
# the throw target is not necessarily interpreted code, we come to here.
# This essentially emulates the JIT's throwing protocol.
- loadp JITStackFrame::vm[sp], t1
- loadp VM::callFrameForThrow[t1], t0
+ loadp Callee[cfr], t1
+ andp MarkedBlockMask, t1
+ loadp MarkedBlock::m_weakSet + WeakSet::m_vm[t1], t1
+ copyCalleeSavesToVMCalleeSavesBuffer(t1, t2)
jmp VM::targetMachinePCForThrow[t1]
_llint_throw_during_call_trampoline:
preserveReturnAddressAfterCall(t2)
- loadp JITStackFrame::vm[sp], t1
- loadp VM::callFrameForThrow[t1], t0
- jmp VM::targetMachinePCForThrow[t1]
+ jmp _llint_throw_from_slow_path_trampoline
macro nativeCallTrampoline(executableOffsetToFunction)
+
+ functionPrologue()
storep 0, CodeBlock[cfr]
- loadp CallerFrame[cfr], t0
- loadi ScopeChain + PayloadOffset[t0], t1
- storei CellTag, ScopeChain + TagOffset[cfr]
- storei t1, ScopeChain + PayloadOffset[cfr]
- if X86
- loadp JITStackFrame::vm + 4[sp], t3 # Additional offset for return address
+ loadi Callee + PayloadOffset[cfr], t1
+ // Callee is still in t1 for code below
+ if X86 or X86_WIN
+ subp 8, sp # align stack pointer
+ andp MarkedBlockMask, t1
+ loadp MarkedBlock::m_weakSet + WeakSet::m_vm[t1], t3
storep cfr, VM::topCallFrame[t3]
- peek 0, t1
- storep t1, ReturnPC[cfr]
- move cfr, t2 # t2 = ecx
- subp 16 - 4, sp
+ move cfr, a0 # a0 = ecx
+ storep a0, [sp]
loadi Callee + PayloadOffset[cfr], t1
loadp JSFunction::m_executable[t1], t1
- move t0, cfr
+ checkStackPointerAlignment(t3, 0xdead0001)
call executableOffsetToFunction[t1]
- addp 16 - 4, sp
- loadp JITStackFrame::vm + 4[sp], t3
- elsif ARM or ARMv7 or ARMv7_TRADITIONAL
- loadp JITStackFrame::vm[sp], t3
- storep cfr, VM::topCallFrame[t3]
- move t0, t2
- preserveReturnAddressAfterCall(t3)
- storep t3, ReturnPC[cfr]
- move cfr, t0
+ loadp Callee + PayloadOffset[cfr], t3
+ andp MarkedBlockMask, t3
+ loadp MarkedBlock::m_weakSet + WeakSet::m_vm[t3], t3
+ addp 8, sp
+ elsif ARM or ARMv7 or ARMv7_TRADITIONAL or C_LOOP or MIPS or SH4
+ subp 8, sp # align stack pointer
+ # t1 already contains the Callee.
+ andp MarkedBlockMask, t1
+ loadp MarkedBlock::m_weakSet + WeakSet::m_vm[t1], t1
+ storep cfr, VM::topCallFrame[t1]
+ move cfr, a0
loadi Callee + PayloadOffset[cfr], t1
loadp JSFunction::m_executable[t1], t1
- move t2, cfr
- call executableOffsetToFunction[t1]
- restoreReturnAddressBeforeReturn(t3)
- loadp JITStackFrame::vm[sp], t3
- elsif MIPS or SH4
- loadp JITStackFrame::vm[sp], t3
- storep cfr, VM::topCallFrame[t3]
- move t0, t2
- preserveReturnAddressAfterCall(t3)
- storep t3, ReturnPC[cfr]
- move cfr, t0
- loadi Callee + PayloadOffset[cfr], t1
- loadp JSFunction::m_executable[t1], t1
- move t2, cfr
- move t0, a0
- call executableOffsetToFunction[t1]
- restoreReturnAddressBeforeReturn(t3)
- loadp JITStackFrame::vm[sp], t3
- elsif C_LOOP
- loadp JITStackFrame::vm[sp], t3
- storep cfr, VM::topCallFrame[t3]
- move t0, t2
- preserveReturnAddressAfterCall(t3)
- storep t3, ReturnPC[cfr]
- move cfr, t0
- loadi Callee + PayloadOffset[cfr], t1
- loadp JSFunction::m_executable[t1], t1
- move t2, cfr
- cloopCallNative executableOffsetToFunction[t1]
- restoreReturnAddressBeforeReturn(t3)
- loadp JITStackFrame::vm[sp], t3
- else
+ checkStackPointerAlignment(t3, 0xdead0001)
+ if C_LOOP
+ cloopCallNative executableOffsetToFunction[t1]
+ else
+ call executableOffsetToFunction[t1]
+ end
+ loadp Callee + PayloadOffset[cfr], t3
+ andp MarkedBlockMask, t3
+ loadp MarkedBlock::m_weakSet + WeakSet::m_vm[t3], t3
+ addp 8, sp
+ else
error
end
- bineq VM::exception + TagOffset[t3], EmptyValueTag, .exception
+
+ functionEpilogue()
+ btinz VM::m_exception[t3], .handleException
ret
-.exception:
- preserveReturnAddressAfterCall(t1) # This is really only needed on X86
- loadi ArgumentCount + TagOffset[cfr], PC
- callSlowPath(_llint_throw_from_native_call)
+
+.handleException:
+ storep cfr, VM::topCallFrame[t3]
+ restoreStackPointerAfterCall()
jmp _llint_throw_from_slow_path_trampoline
end
+
+macro getConstantScope(dst)
+ loadpFromInstruction(6, t0)
+ loadisFromInstruction(dst, t1)
+ storei CellTag, TagOffset[cfr, t1, 8]
+ storei t0, PayloadOffset[cfr, t1, 8]
+end
+
+macro varInjectionCheck(slowPath)
+ loadp CodeBlock[cfr], t0
+ loadp CodeBlock::m_globalObject[t0], t0
+ loadp JSGlobalObject::m_varInjectionWatchpoint[t0], t0
+ bbeq WatchpointSet::m_state[t0], IsInvalidated, slowPath
+end
+
+macro resolveScope()
+ loadp CodeBlock[cfr], t0
+ loadisFromInstruction(5, t2)
+
+ loadisFromInstruction(2, t0)
+ loadp PayloadOffset[cfr, t0, 8], t0
+ btiz t2, .resolveScopeLoopEnd
+
+.resolveScopeLoop:
+ loadp JSScope::m_next[t0], t0
+ subi 1, t2
+ btinz t2, .resolveScopeLoop
+
+.resolveScopeLoopEnd:
+ loadisFromInstruction(1, t1)
+ storei CellTag, TagOffset[cfr, t1, 8]
+ storei t0, PayloadOffset[cfr, t1, 8]
+end
+
+
+_llint_op_resolve_scope:
+ traceExecution()
+ loadisFromInstruction(4, t0)
+
+#rGlobalProperty:
+ bineq t0, GlobalProperty, .rGlobalVar
+ getConstantScope(1)
+ dispatch(7)
+
+.rGlobalVar:
+ bineq t0, GlobalVar, .rGlobalLexicalVar
+ getConstantScope(1)
+ dispatch(7)
+
+.rGlobalLexicalVar:
+ bineq t0, GlobalLexicalVar, .rClosureVar
+ getConstantScope(1)
+ dispatch(7)
+
+.rClosureVar:
+ bineq t0, ClosureVar, .rModuleVar
+ resolveScope()
+ dispatch(7)
+
+.rModuleVar:
+ bineq t0, ModuleVar, .rGlobalPropertyWithVarInjectionChecks
+ getConstantScope(1)
+ dispatch(7)
+
+.rGlobalPropertyWithVarInjectionChecks:
+ bineq t0, GlobalPropertyWithVarInjectionChecks, .rGlobalVarWithVarInjectionChecks
+ varInjectionCheck(.rDynamic)
+ getConstantScope(1)
+ dispatch(7)
+
+.rGlobalVarWithVarInjectionChecks:
+ bineq t0, GlobalVarWithVarInjectionChecks, .rGlobalLexicalVarWithVarInjectionChecks
+ varInjectionCheck(.rDynamic)
+ getConstantScope(1)
+ dispatch(7)
+
+.rGlobalLexicalVarWithVarInjectionChecks:
+ bineq t0, GlobalLexicalVarWithVarInjectionChecks, .rClosureVarWithVarInjectionChecks
+ varInjectionCheck(.rDynamic)
+ getConstantScope(1)
+ dispatch(7)
+
+.rClosureVarWithVarInjectionChecks:
+ bineq t0, ClosureVarWithVarInjectionChecks, .rDynamic
+ varInjectionCheck(.rDynamic)
+ resolveScope()
+ dispatch(7)
+
+.rDynamic:
+ callSlowPath(_slow_path_resolve_scope)
+ dispatch(7)
+
+
+macro loadWithStructureCheck(operand, slowPath)
+ loadisFromInstruction(operand, t0)
+ loadp PayloadOffset[cfr, t0, 8], t0
+ loadpFromInstruction(5, t1)
+ bpneq JSCell::m_structureID[t0], t1, slowPath
+end
+
+macro getProperty()
+ loadisFromInstruction(6, t3)
+ loadPropertyAtVariableOffset(t3, t0, t1, t2)
+ valueProfile(t1, t2, 28, t0)
+ loadisFromInstruction(1, t0)
+ storei t1, TagOffset[cfr, t0, 8]
+ storei t2, PayloadOffset[cfr, t0, 8]
+end
+
+macro getGlobalVar(tdzCheckIfNecessary)
+ loadpFromInstruction(6, t0)
+ loadp TagOffset[t0], t1
+ loadp PayloadOffset[t0], t2
+ tdzCheckIfNecessary(t1)
+ valueProfile(t1, t2, 28, t0)
+ loadisFromInstruction(1, t0)
+ storei t1, TagOffset[cfr, t0, 8]
+ storei t2, PayloadOffset[cfr, t0, 8]
+end
+
+macro getClosureVar()
+ loadisFromInstruction(6, t3)
+ loadp JSEnvironmentRecord_variables + TagOffset[t0, t3, 8], t1
+ loadp JSEnvironmentRecord_variables + PayloadOffset[t0, t3, 8], t2
+ valueProfile(t1, t2, 28, t0)
+ loadisFromInstruction(1, t0)
+ storei t1, TagOffset[cfr, t0, 8]
+ storei t2, PayloadOffset[cfr, t0, 8]
+end
+
+_llint_op_get_from_scope:
+ traceExecution()
+ loadisFromInstruction(4, t0)
+ andi ResolveTypeMask, t0
+
+#gGlobalProperty:
+ bineq t0, GlobalProperty, .gGlobalVar
+ loadWithStructureCheck(2, .gDynamic)
+ getProperty()
+ dispatch(8)
+
+.gGlobalVar:
+ bineq t0, GlobalVar, .gGlobalLexicalVar
+ getGlobalVar(macro(t) end)
+ dispatch(8)
+
+.gGlobalLexicalVar:
+ bineq t0, GlobalLexicalVar, .gClosureVar
+ getGlobalVar(
+ macro(tag)
+ bieq tag, EmptyValueTag, .gDynamic
+ end)
+ dispatch(8)
+
+.gClosureVar:
+ bineq t0, ClosureVar, .gGlobalPropertyWithVarInjectionChecks
+ loadVariable(2, t2, t1, t0)
+ getClosureVar()
+ dispatch(8)
+
+.gGlobalPropertyWithVarInjectionChecks:
+ bineq t0, GlobalPropertyWithVarInjectionChecks, .gGlobalVarWithVarInjectionChecks
+ loadWithStructureCheck(2, .gDynamic)
+ getProperty()
+ dispatch(8)
+
+.gGlobalVarWithVarInjectionChecks:
+ bineq t0, GlobalVarWithVarInjectionChecks, .gGlobalLexicalVarWithVarInjectionChecks
+ varInjectionCheck(.gDynamic)
+ getGlobalVar(macro(t) end)
+ dispatch(8)
+
+.gGlobalLexicalVarWithVarInjectionChecks:
+ bineq t0, GlobalLexicalVarWithVarInjectionChecks, .gClosureVarWithVarInjectionChecks
+ varInjectionCheck(.gDynamic)
+ getGlobalVar(
+ macro(tag)
+ bieq tag, EmptyValueTag, .gDynamic
+ end)
+ dispatch(8)
+
+.gClosureVarWithVarInjectionChecks:
+ bineq t0, ClosureVarWithVarInjectionChecks, .gDynamic
+ varInjectionCheck(.gDynamic)
+ loadVariable(2, t2, t1, t0)
+ getClosureVar()
+ dispatch(8)
+
+.gDynamic:
+ callSlowPath(_llint_slow_path_get_from_scope)
+ dispatch(8)
+
+
+macro putProperty()
+ loadisFromInstruction(3, t1)
+ loadConstantOrVariable(t1, t2, t3)
+ loadisFromInstruction(6, t1)
+ storePropertyAtVariableOffset(t1, t0, t2, t3)
+end
+
+macro putGlobalVariable()
+ loadisFromInstruction(3, t0)
+ loadConstantOrVariable(t0, t1, t2)
+ loadpFromInstruction(5, t3)
+ notifyWrite(t3, .pDynamic)
+ loadpFromInstruction(6, t0)
+ storei t1, TagOffset[t0]
+ storei t2, PayloadOffset[t0]
+end
+
+macro putClosureVar()
+ loadisFromInstruction(3, t1)
+ loadConstantOrVariable(t1, t2, t3)
+ loadisFromInstruction(6, t1)
+ storei t2, JSEnvironmentRecord_variables + TagOffset[t0, t1, 8]
+ storei t3, JSEnvironmentRecord_variables + PayloadOffset[t0, t1, 8]
+end
+
+macro putLocalClosureVar()
+ loadisFromInstruction(3, t1)
+ loadConstantOrVariable(t1, t2, t3)
+ loadpFromInstruction(5, t5)
+ btpz t5, .noVariableWatchpointSet
+ notifyWrite(t5, .pDynamic)
+.noVariableWatchpointSet:
+ loadisFromInstruction(6, t1)
+ storei t2, JSEnvironmentRecord_variables + TagOffset[t0, t1, 8]
+ storei t3, JSEnvironmentRecord_variables + PayloadOffset[t0, t1, 8]
+end
+
+
+_llint_op_put_to_scope:
+ traceExecution()
+ loadisFromInstruction(4, t0)
+ andi ResolveTypeMask, t0
+
+#pLocalClosureVar:
+ bineq t0, LocalClosureVar, .pGlobalProperty
+ writeBarrierOnOperands(1, 3)
+ loadVariable(1, t2, t1, t0)
+ putLocalClosureVar()
+ dispatch(7)
+
+.pGlobalProperty:
+ bineq t0, GlobalProperty, .pGlobalVar
+ writeBarrierOnOperands(1, 3)
+ loadWithStructureCheck(1, .pDynamic)
+ putProperty()
+ dispatch(7)
+
+.pGlobalVar:
+ bineq t0, GlobalVar, .pGlobalLexicalVar
+ writeBarrierOnGlobalObject(3)
+ putGlobalVariable()
+ dispatch(7)
+
+.pGlobalLexicalVar:
+ bineq t0, GlobalLexicalVar, .pClosureVar
+ writeBarrierOnGlobalLexicalEnvironment(3)
+ putGlobalVariable()
+ dispatch(7)
+
+.pClosureVar:
+ bineq t0, ClosureVar, .pGlobalPropertyWithVarInjectionChecks
+ writeBarrierOnOperands(1, 3)
+ loadVariable(1, t2, t1, t0)
+ putClosureVar()
+ dispatch(7)
+
+.pGlobalPropertyWithVarInjectionChecks:
+ bineq t0, GlobalPropertyWithVarInjectionChecks, .pGlobalVarWithVarInjectionChecks
+ writeBarrierOnOperands(1, 3)
+ loadWithStructureCheck(1, .pDynamic)
+ putProperty()
+ dispatch(7)
+
+.pGlobalVarWithVarInjectionChecks:
+ bineq t0, GlobalVarWithVarInjectionChecks, .pGlobalLexicalVarWithVarInjectionChecks
+ writeBarrierOnGlobalObject(3)
+ varInjectionCheck(.pDynamic)
+ putGlobalVariable()
+ dispatch(7)
+
+.pGlobalLexicalVarWithVarInjectionChecks:
+ bineq t0, GlobalLexicalVarWithVarInjectionChecks, .pClosureVarWithVarInjectionChecks
+ writeBarrierOnGlobalLexicalEnvironment(3)
+ varInjectionCheck(.pDynamic)
+ putGlobalVariable()
+ dispatch(7)
+
+.pClosureVarWithVarInjectionChecks:
+ bineq t0, ClosureVarWithVarInjectionChecks, .pModuleVar
+ writeBarrierOnOperands(1, 3)
+ varInjectionCheck(.pDynamic)
+ loadVariable(1, t2, t1, t0)
+ putClosureVar()
+ dispatch(7)
+
+.pModuleVar:
+ bineq t0, ModuleVar, .pDynamic
+ callSlowPath(_slow_path_throw_strict_mode_readonly_property_write_error)
+ dispatch(7)
+
+.pDynamic:
+ callSlowPath(_llint_slow_path_put_to_scope)
+ dispatch(7)
+
+
+_llint_op_get_from_arguments:
+ traceExecution()
+ loadisFromInstruction(2, t0)
+ loadi PayloadOffset[cfr, t0, 8], t0
+ loadi 12[PC], t1
+ loadi DirectArguments_storage + TagOffset[t0, t1, 8], t2
+ loadi DirectArguments_storage + PayloadOffset[t0, t1, 8], t3
+ loadisFromInstruction(1, t1)
+ valueProfile(t2, t3, 16, t0)
+ storei t2, TagOffset[cfr, t1, 8]
+ storei t3, PayloadOffset[cfr, t1, 8]
+ dispatch(5)
+
+
+_llint_op_put_to_arguments:
+ traceExecution()
+ writeBarrierOnOperands(1, 3)
+ loadisFromInstruction(1, t0)
+ loadi PayloadOffset[cfr, t0, 8], t0
+ loadisFromInstruction(3, t1)
+ loadConstantOrVariable(t1, t2, t3)
+ loadi 8[PC], t1
+ storei t2, DirectArguments_storage + TagOffset[t0, t1, 8]
+ storei t3, DirectArguments_storage + PayloadOffset[t0, t1, 8]
+ dispatch(4)
+
+
+_llint_op_get_parent_scope:
+ traceExecution()
+ loadisFromInstruction(2, t0)
+ loadp PayloadOffset[cfr, t0, 8], t0
+ loadp JSScope::m_next[t0], t0
+ loadisFromInstruction(1, t1)
+ storei CellTag, TagOffset[cfr, t1, 8]
+ storei t0, PayloadOffset[cfr, t1, 8]
+ dispatch(3)
+
+
+_llint_op_profile_type:
+ traceExecution()
+ loadp CodeBlock[cfr], t1
+ loadp CodeBlock::m_vm[t1], t1
+ # t1 is holding the pointer to the typeProfilerLog.
+ loadp VM::m_typeProfilerLog[t1], t1
+
+ # t0 is holding the payload, t5 is holding the tag.
+ loadisFromInstruction(1, t2)
+ loadConstantOrVariable(t2, t5, t0)
+
+ bieq t5, EmptyValueTag, .opProfileTypeDone
+
+ # t2 is holding the pointer to the current log entry.
+ loadp TypeProfilerLog::m_currentLogEntryPtr[t1], t2
+
+ # Store the JSValue onto the log entry.
+ storei t5, TypeProfilerLog::LogEntry::value + TagOffset[t2]
+ storei t0, TypeProfilerLog::LogEntry::value + PayloadOffset[t2]
+
+ # Store the TypeLocation onto the log entry.
+ loadpFromInstruction(2, t3)
+ storep t3, TypeProfilerLog::LogEntry::location[t2]
+
+ bieq t5, CellTag, .opProfileTypeIsCell
+ storei 0, TypeProfilerLog::LogEntry::structureID[t2]
+ jmp .opProfileTypeSkipIsCell
+.opProfileTypeIsCell:
+ loadi JSCell::m_structureID[t0], t3
+ storei t3, TypeProfilerLog::LogEntry::structureID[t2]
+.opProfileTypeSkipIsCell:
+
+ # Increment the current log entry.
+ addp sizeof TypeProfilerLog::LogEntry, t2
+ storep t2, TypeProfilerLog::m_currentLogEntryPtr[t1]
+
+ loadp TypeProfilerLog::m_logEndPtr[t1], t1
+ bpneq t2, t1, .opProfileTypeDone
+ callSlowPath(_slow_path_profile_type_clear_log)
+
+.opProfileTypeDone:
+ dispatch(6)
+
+
+_llint_op_profile_control_flow:
+ traceExecution()
+ loadpFromInstruction(1, t0)
+ loadi BasicBlockLocation::m_executionCount[t0], t1
+ addi 1, t1
+ bieq t1, 0, .done # We overflowed.
+ storei t1, BasicBlockLocation::m_executionCount[t0]
+.done:
+ dispatch(2)
+
+
+_llint_op_get_rest_length:
+ traceExecution()
+ loadi PayloadOffset + ArgumentCount[cfr], t0
+ subi 1, t0
+ loadisFromInstruction(2, t1)
+ bilteq t0, t1, .storeZero
+ subi t1, t0
+ jmp .finish
+.storeZero:
+ move 0, t0
+.finish:
+ loadisFromInstruction(1, t1)
+ storei t0, PayloadOffset[cfr, t1, 8]
+ storei Int32Tag, TagOffset[cfr, t1, 8]
+ dispatch(3)
diff --git a/Source/JavaScriptCore/llint/LowLevelInterpreter64.asm b/Source/JavaScriptCore/llint/LowLevelInterpreter64.asm
index 741963573..85173bc82 100644
--- a/Source/JavaScriptCore/llint/LowLevelInterpreter64.asm
+++ b/Source/JavaScriptCore/llint/LowLevelInterpreter64.asm
@@ -1,4 +1,4 @@
-# Copyright (C) 2011, 2012 Apple Inc. All rights reserved.
+# Copyright (C) 2011-2016 Apple Inc. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
@@ -22,16 +22,6 @@
# THE POSSIBILITY OF SUCH DAMAGE.
-# Some value representation constants.
-const TagBitTypeOther = 0x2
-const TagBitBool = 0x4
-const TagBitUndefined = 0x8
-const ValueEmpty = 0x0
-const ValueFalse = TagBitTypeOther | TagBitBool
-const ValueTrue = TagBitTypeOther | TagBitBool | 1
-const ValueUndefined = TagBitTypeOther | TagBitUndefined
-const ValueNull = TagBitTypeOther
-
# Utilities.
macro jumpToInstruction()
jmp [PB, PC, 8]
@@ -55,84 +45,310 @@ macro dispatchAfterCall()
loadi ArgumentCount + TagOffset[cfr], PC
loadp CodeBlock[cfr], PB
loadp CodeBlock::m_instructions[PB], PB
- jumpToInstruction()
+ loadisFromInstruction(1, t1)
+ storeq r0, [cfr, t1, 8]
+ valueProfile(r0, (CallOpCodeSize - 1), t3)
+ dispatch(CallOpCodeSize)
end
-macro cCall2(function, arg1, arg2)
- if X86_64
- move arg1, t5
- move arg2, t4
+macro cCall2(function)
+ checkStackPointerAlignment(t4, 0xbad0c002)
+ if X86_64 or ARM64
call function
+ elsif X86_64_WIN
+ # Note: this implementation is only correct if the return type size is > 8 bytes.
+ # See macro cCall2Void for an implementation when the return type <= 8 bytes.
+ # On Win64, when the return type is larger than 8 bytes, we need to allocate space on the stack for the return value.
+ # On entry rcx (a0), should contain a pointer to this stack space. The other parameters are shifted to the right,
+ # rdx (a1) should contain the first argument, and r8 (a2) should contain the second argument.
+ # On return, rax contains a pointer to this stack value, and we then need to copy the 16 byte return value into rax (r0) and rdx (r1)
+ # since the return value is expected to be split between the two.
+ # See http://msdn.microsoft.com/en-us/library/7572ztz4.aspx
+ move a1, a2
+ move a0, a1
+ subp 48, sp
+ move sp, a0
+ addp 32, a0
+ call function
+ addp 48, sp
+ move 8[r0], r1
+ move [r0], r0
elsif C_LOOP
- cloopCallSlowPath function, arg1, arg2
+ cloopCallSlowPath function, a0, a1
else
error
end
end
+macro cCall2Void(function)
+ if C_LOOP
+ cloopCallSlowPathVoid function, a0, a1
+ elsif X86_64_WIN
+ # Note: we cannot use the cCall2 macro for Win64 in this case,
+ # as the Win64 cCall2 implemenation is only correct when the return type size is > 8 bytes.
+ # On Win64, rcx and rdx are used for passing the first two parameters.
+ # We also need to make room on the stack for all four parameter registers.
+ # See http://msdn.microsoft.com/en-us/library/ms235286.aspx
+ subp 32, sp
+ call function
+ addp 32, sp
+ else
+ cCall2(function)
+ end
+end
+
# This barely works. arg3 and arg4 should probably be immediates.
-macro cCall4(function, arg1, arg2, arg3, arg4)
- if X86_64
- move arg1, t5
- move arg2, t4
- move arg3, t1
- move arg4, t2
+macro cCall4(function)
+ checkStackPointerAlignment(t4, 0xbad0c004)
+ if X86_64 or ARM64
call function
- elsif C_LOOP
- error
+ elsif X86_64_WIN
+ # On Win64, rcx, rdx, r8, and r9 are used for passing the first four parameters.
+ # We also need to make room on the stack for all four parameter registers.
+ # See http://msdn.microsoft.com/en-us/library/ms235286.aspx
+ subp 64, sp
+ call function
+ addp 64, sp
else
error
end
end
+macro doVMEntry(makeCall)
+ functionPrologue()
+ pushCalleeSaves()
+
+ const entry = a0
+ const vm = a1
+ const protoCallFrame = a2
+
+ vmEntryRecord(cfr, sp)
+
+ checkStackPointerAlignment(t4, 0xbad0dc01)
+
+ storep vm, VMEntryRecord::m_vm[sp]
+ loadp VM::topCallFrame[vm], t4
+ storep t4, VMEntryRecord::m_prevTopCallFrame[sp]
+ loadp VM::topVMEntryFrame[vm], t4
+ storep t4, VMEntryRecord::m_prevTopVMEntryFrame[sp]
+
+ loadi ProtoCallFrame::paddedArgCount[protoCallFrame], t4
+ addp CallFrameHeaderSlots, t4, t4
+ lshiftp 3, t4
+ subp sp, t4, t3
+
+ # Ensure that we have enough additional stack capacity for the incoming args,
+ # and the frame for the JS code we're executing. We need to do this check
+ # before we start copying the args from the protoCallFrame below.
+ bpaeq t3, VM::m_jsStackLimit[vm], .stackHeightOK
+
+ if C_LOOP
+ move entry, t4
+ move vm, t5
+ cloopCallSlowPath _llint_stack_check_at_vm_entry, vm, t3
+ bpeq t0, 0, .stackCheckFailed
+ move t4, entry
+ move t5, vm
+ jmp .stackHeightOK
+
+.stackCheckFailed:
+ move t4, entry
+ move t5, vm
+ end
+
+ move vm, a0
+ move protoCallFrame, a1
+ cCall2(_llint_throw_stack_overflow_error)
+
+ vmEntryRecord(cfr, t4)
+
+ loadp VMEntryRecord::m_vm[t4], vm
+ loadp VMEntryRecord::m_prevTopCallFrame[t4], extraTempReg
+ storep extraTempReg, VM::topCallFrame[vm]
+ loadp VMEntryRecord::m_prevTopVMEntryFrame[t4], extraTempReg
+ storep extraTempReg, VM::topVMEntryFrame[vm]
+
+ subp cfr, CalleeRegisterSaveSize, sp
+
+ popCalleeSaves()
+ functionEpilogue()
+ ret
+
+.stackHeightOK:
+ move t3, sp
+ move 4, t3
+
+.copyHeaderLoop:
+ subi 1, t3
+ loadq [protoCallFrame, t3, 8], extraTempReg
+ storeq extraTempReg, CodeBlock[sp, t3, 8]
+ btinz t3, .copyHeaderLoop
+
+ loadi PayloadOffset + ProtoCallFrame::argCountAndCodeOriginValue[protoCallFrame], t4
+ subi 1, t4
+ loadi ProtoCallFrame::paddedArgCount[protoCallFrame], extraTempReg
+ subi 1, extraTempReg
+
+ bieq t4, extraTempReg, .copyArgs
+ move ValueUndefined, t3
+.fillExtraArgsLoop:
+ subi 1, extraTempReg
+ storeq t3, ThisArgumentOffset + 8[sp, extraTempReg, 8]
+ bineq t4, extraTempReg, .fillExtraArgsLoop
+
+.copyArgs:
+ loadp ProtoCallFrame::args[protoCallFrame], t3
+
+.copyArgsLoop:
+ btiz t4, .copyArgsDone
+ subi 1, t4
+ loadq [t3, t4, 8], extraTempReg
+ storeq extraTempReg, ThisArgumentOffset + 8[sp, t4, 8]
+ jmp .copyArgsLoop
+
+.copyArgsDone:
+ if ARM64
+ move sp, t4
+ storep t4, VM::topCallFrame[vm]
+ else
+ storep sp, VM::topCallFrame[vm]
+ end
+ storep cfr, VM::topVMEntryFrame[vm]
+
+ checkStackPointerAlignment(extraTempReg, 0xbad0dc02)
+
+ makeCall(entry, t3)
+
+ # We may have just made a call into a JS function, so we can't rely on sp
+ # for anything but the fact that our own locals (ie the VMEntryRecord) are
+ # not below it. It also still has to be aligned, though.
+ checkStackPointerAlignment(t2, 0xbad0dc03)
+
+ vmEntryRecord(cfr, t4)
+
+ loadp VMEntryRecord::m_vm[t4], vm
+ loadp VMEntryRecord::m_prevTopCallFrame[t4], t2
+ storep t2, VM::topCallFrame[vm]
+ loadp VMEntryRecord::m_prevTopVMEntryFrame[t4], t2
+ storep t2, VM::topVMEntryFrame[vm]
+
+ subp cfr, CalleeRegisterSaveSize, sp
+
+ popCalleeSaves()
+ functionEpilogue()
+
+ ret
+end
+
+
+macro makeJavaScriptCall(entry, temp)
+ addp 16, sp
+ if C_LOOP
+ cloopCallJSFunction entry
+ else
+ call entry
+ end
+ subp 16, sp
+end
+
+
+macro makeHostFunctionCall(entry, temp)
+ move entry, temp
+ storep cfr, [sp]
+ move sp, a0
+ if C_LOOP
+ storep lr, 8[sp]
+ cloopCallNative temp
+ elsif X86_64_WIN
+ # We need to allocate 32 bytes on the stack for the shadow space.
+ subp 32, sp
+ call temp
+ addp 32, sp
+ else
+ call temp
+ end
+end
+
+
+_handleUncaughtException:
+ loadp Callee[cfr], t3
+ andp MarkedBlockMask, t3
+ loadp MarkedBlock::m_weakSet + WeakSet::m_vm[t3], t3
+ restoreCalleeSavesFromVMCalleeSavesBuffer(t3, t0)
+ loadp VM::callFrameForCatch[t3], cfr
+ storep 0, VM::callFrameForCatch[t3]
+
+ loadp CallerFrame[cfr], cfr
+ vmEntryRecord(cfr, t2)
+
+ loadp VMEntryRecord::m_vm[t2], t3
+ loadp VMEntryRecord::m_prevTopCallFrame[t2], extraTempReg
+ storep extraTempReg, VM::topCallFrame[t3]
+ loadp VMEntryRecord::m_prevTopVMEntryFrame[t2], extraTempReg
+ storep extraTempReg, VM::topVMEntryFrame[t3]
+
+ subp cfr, CalleeRegisterSaveSize, sp
+
+ popCalleeSaves()
+ functionEpilogue()
+ ret
+
+
macro prepareStateForCCall()
leap [PB, PC, 8], PC
- move PB, t3
end
macro restoreStateAfterCCall()
- move t0, PC
- move t1, cfr
- move t3, PB
+ move r0, PC
subp PB, PC
rshiftp 3, PC
end
macro callSlowPath(slowPath)
prepareStateForCCall()
- cCall2(slowPath, cfr, PC)
+ move cfr, a0
+ move PC, a1
+ cCall2(slowPath)
restoreStateAfterCCall()
end
macro traceOperand(fromWhere, operand)
prepareStateForCCall()
- cCall4(_llint_trace_operand, cfr, PC, fromWhere, operand)
+ move fromWhere, a2
+ move operand, a3
+ move cfr, a0
+ move PC, a1
+ cCall4(_llint_trace_operand)
restoreStateAfterCCall()
end
macro traceValue(fromWhere, operand)
prepareStateForCCall()
- cCall4(_llint_trace_value, cfr, PC, fromWhere, operand)
+ move fromWhere, a2
+ move operand, a3
+ move cfr, a0
+ move PC, a1
+ cCall4(_llint_trace_value)
restoreStateAfterCCall()
end
# Call a slow path for call call opcodes.
-macro callCallSlowPath(advance, slowPath, action)
- addi advance, PC, t0
- storei t0, ArgumentCount + TagOffset[cfr]
+macro callCallSlowPath(slowPath, action)
+ storei PC, ArgumentCount + TagOffset[cfr]
prepareStateForCCall()
- cCall2(slowPath, cfr, PC)
- move t1, cfr
- action(t0)
+ move cfr, a0
+ move PC, a1
+ cCall2(slowPath)
+ action(r0, r1)
end
macro callWatchdogTimerHandler(throwHandler)
storei PC, ArgumentCount + TagOffset[cfr]
prepareStateForCCall()
- cCall2(_llint_slow_path_handle_watchdog_timer, cfr, PC)
- move t1, cfr
- btpnz t0, throwHandler
- move t3, PB
+ move cfr, a0
+ move PC, a1
+ cCall2(_llint_slow_path_handle_watchdog_timer)
+ btpnz r0, throwHandler
loadi ArgumentCount + TagOffset[cfr], PC
end
@@ -142,16 +358,22 @@ macro checkSwitchToJITForLoop()
macro()
storei PC, ArgumentCount + TagOffset[cfr]
prepareStateForCCall()
- cCall2(_llint_loop_osr, cfr, PC)
- move t1, cfr
- btpz t0, .recover
- jmp t0
+ move cfr, a0
+ move PC, a1
+ cCall2(_llint_loop_osr)
+ btpz r0, .recover
+ move r1, sp
+ jmp r0
.recover:
- move t3, PB
loadi ArgumentCount + TagOffset[cfr], PC
end)
end
+macro loadVariable(operand, value)
+ loadisFromInstruction(operand, value)
+ loadq [cfr, value, 8], value
+end
+
# Index and value must be different registers. Index may be clobbered.
macro loadConstantOrVariable(index, value)
bpgteq index, FirstConstantRegisterIndex, .constant
@@ -175,30 +397,153 @@ macro loadConstantOrVariableCell(index, value, slow)
btqnz value, tagMask, slow
end
-macro writeBarrier(value)
- # Nothing to do, since we don't have a generational or incremental collector.
+macro writeBarrierOnOperand(cellOperand)
+ loadisFromInstruction(cellOperand, t1)
+ loadConstantOrVariableCell(t1, t2, .writeBarrierDone)
+ skipIfIsRememberedOrInEden(t2, t1, t3,
+ macro(cellState)
+ btbnz cellState, .writeBarrierDone
+ push PB, PC
+ move t2, a1 # t2 can be a0 (not on 64 bits, but better safe than sorry)
+ move cfr, a0
+ cCall2Void(_llint_write_barrier_slow)
+ pop PC, PB
+ end
+ )
+.writeBarrierDone:
end
-macro valueProfile(value, profile)
- if VALUE_PROFILER
- storeq value, ValueProfile::m_buckets[profile]
- end
+macro writeBarrierOnOperands(cellOperand, valueOperand)
+ loadisFromInstruction(valueOperand, t1)
+ loadConstantOrVariableCell(t1, t0, .writeBarrierDone)
+ btpz t0, .writeBarrierDone
+
+ writeBarrierOnOperand(cellOperand)
+.writeBarrierDone:
end
+macro writeBarrierOnGlobal(valueOperand, loadHelper)
+ loadisFromInstruction(valueOperand, t1)
+ loadConstantOrVariableCell(t1, t0, .writeBarrierDone)
+ btpz t0, .writeBarrierDone
+
+ loadHelper(t3)
+ skipIfIsRememberedOrInEden(t3, t1, t2,
+ macro(gcData)
+ btbnz gcData, .writeBarrierDone
+ push PB, PC
+ move cfr, a0
+ move t3, a1
+ cCall2Void(_llint_write_barrier_slow)
+ pop PC, PB
+ end
+ )
+.writeBarrierDone:
+end
+
+macro writeBarrierOnGlobalObject(valueOperand)
+ writeBarrierOnGlobal(valueOperand,
+ macro(registerToStoreGlobal)
+ loadp CodeBlock[cfr], registerToStoreGlobal
+ loadp CodeBlock::m_globalObject[registerToStoreGlobal], registerToStoreGlobal
+ end)
+end
+
+macro writeBarrierOnGlobalLexicalEnvironment(valueOperand)
+ writeBarrierOnGlobal(valueOperand,
+ macro(registerToStoreGlobal)
+ loadp CodeBlock[cfr], registerToStoreGlobal
+ loadp CodeBlock::m_globalObject[registerToStoreGlobal], registerToStoreGlobal
+ loadp JSGlobalObject::m_globalLexicalEnvironment[registerToStoreGlobal], registerToStoreGlobal
+ end)
+end
+
+macro valueProfile(value, operand, scratch)
+ loadpFromInstruction(operand, scratch)
+ storeq value, ValueProfile::m_buckets[scratch]
+end
+
+macro structureIDToStructureWithScratch(structureIDThenStructure, scratch)
+ loadp CodeBlock[cfr], scratch
+ loadp CodeBlock::m_vm[scratch], scratch
+ loadp VM::heap + Heap::m_structureIDTable + StructureIDTable::m_table[scratch], scratch
+ loadp [scratch, structureIDThenStructure, 8], structureIDThenStructure
+end
+
+macro loadStructureWithScratch(cell, structure, scratch)
+ loadi JSCell::m_structureID[cell], structure
+ structureIDToStructureWithScratch(structure, scratch)
+end
+
+macro loadStructureAndClobberFirstArg(cell, structure)
+ loadi JSCell::m_structureID[cell], structure
+ loadp CodeBlock[cfr], cell
+ loadp CodeBlock::m_vm[cell], cell
+ loadp VM::heap + Heap::m_structureIDTable + StructureIDTable::m_table[cell], cell
+ loadp [cell, structure, 8], structure
+end
+
+macro storeStructureWithTypeInfo(cell, structure, scratch)
+ loadq Structure::m_blob + StructureIDBlob::u.doubleWord[structure], scratch
+ storeq scratch, JSCell::m_structureID[cell]
+end
# Entrypoints into the interpreter.
# Expects that CodeBlock is in t1, which is what prologue() leaves behind.
-macro functionArityCheck(doneLabel, slow_path)
+macro functionArityCheck(doneLabel, slowPath)
loadi PayloadOffset + ArgumentCount[cfr], t0
biaeq t0, CodeBlock::m_numParameters[t1], doneLabel
prepareStateForCCall()
- cCall2(slow_path, cfr, PC) # This slow_path has a simple protocol: t0 = 0 => no error, t0 != 0 => error
- move t1, cfr
- btiz t0, .continue
- loadp JITStackFrame::vm[sp], t1
- loadp VM::callFrameForThrow[t1], t0
- jmp VM::targetMachinePCForThrow[t1]
+ move cfr, a0
+ move PC, a1
+ cCall2(slowPath) # This slowPath has the protocol: r0 = 0 => no error, r0 != 0 => error
+ btiz r0, .noError
+ move r1, cfr # r1 contains caller frame
+ jmp _llint_throw_from_slow_path_trampoline
+
+.noError:
+ loadi CommonSlowPaths::ArityCheckData::paddedStackSpace[r1], t1
+ btiz t1, .continue
+ loadi PayloadOffset + ArgumentCount[cfr], t2
+ addi CallFrameHeaderSlots, t2
+
+ // Check if there are some unaligned slots we can use
+ move t1, t3
+ andi StackAlignmentSlots - 1, t3
+ btiz t3, .noExtraSlot
+ move ValueUndefined, t0
+.fillExtraSlots:
+ storeq t0, [cfr, t2, 8]
+ addi 1, t2
+ bsubinz 1, t3, .fillExtraSlots
+ andi ~(StackAlignmentSlots - 1), t1
+ btiz t1, .continue
+
+.noExtraSlot:
+ // Move frame up t1 slots
+ negq t1
+ move cfr, t3
+ subp CalleeSaveSpaceAsVirtualRegisters * 8, t3
+ addi CalleeSaveSpaceAsVirtualRegisters, t2
+.copyLoop:
+ loadq [t3], t0
+ storeq t0, [t3, t1, 8]
+ addp 8, t3
+ bsubinz 1, t2, .copyLoop
+
+ // Fill new slots with JSUndefined
+ move t1, t2
+ move ValueUndefined, t0
+.fillLoop:
+ storeq t0, [t3, t1, 8]
+ addp 8, t3
+ baddinz 1, t2, .fillLoop
+
+ lshiftp 3, t1
+ addp t1, cfr
+ addp t1, sp
+
.continue:
# Reload CodeBlock and reset PC, since the slow_path clobbered them.
loadp CodeBlock[cfr], t1
@@ -207,45 +552,44 @@ macro functionArityCheck(doneLabel, slow_path)
jmp doneLabel
end
+macro branchIfException(label)
+ loadp Callee[cfr], t3
+ andp MarkedBlockMask, t3
+ loadp MarkedBlock::m_weakSet + WeakSet::m_vm[t3], t3
+ btqz VM::m_exception[t3], .noException
+ jmp label
+.noException:
+end
+
# Instruction implementations
-
_llint_op_enter:
traceExecution()
+ checkStackPointerAlignment(t2, 0xdead00e1)
loadp CodeBlock[cfr], t2 // t2<CodeBlock> = cfr.CodeBlock
loadi CodeBlock::m_numVars[t2], t2 // t2<size_t> = t2<CodeBlock>.m_numVars
+ subq CalleeSaveSpaceAsVirtualRegisters, t2
+ move cfr, t1
+ subq CalleeSaveSpaceAsVirtualRegisters * 8, t1
btiz t2, .opEnterDone
move ValueUndefined, t0
+ negi t2
+ sxi2q t2, t2
.opEnterLoop:
- subi 1, t2
- storeq t0, [cfr, t2, 8]
- btinz t2, .opEnterLoop
+ storeq t0, [t1, t2, 8]
+ addq 1, t2
+ btqnz t2, .opEnterLoop
.opEnterDone:
+ callSlowPath(_slow_path_enter)
dispatch(1)
-_llint_op_create_activation:
- traceExecution()
- loadisFromInstruction(1, t0)
- bqneq [cfr, t0, 8], ValueEmpty, .opCreateActivationDone
- callSlowPath(_llint_slow_path_create_activation)
-.opCreateActivationDone:
- dispatch(2)
-
-
-_llint_op_init_lazy_reg:
+_llint_op_get_scope:
traceExecution()
- loadisFromInstruction(1, t0)
- storeq ValueEmpty, [cfr, t0, 8]
- dispatch(2)
-
-
-_llint_op_create_arguments:
- traceExecution()
- loadisFromInstruction(1, t0)
- bqneq [cfr, t0, 8], ValueEmpty, .opCreateArgumentsDone
- callSlowPath(_llint_slow_path_create_arguments)
-.opCreateArgumentsDone:
+ loadp Callee[cfr], t0
+ loadp JSCallee::m_scope[t0], t0
+ loadisFromInstruction(1, t1)
+ storeq t0, [cfr, t1, 8]
dispatch(2)
@@ -253,43 +597,39 @@ _llint_op_create_this:
traceExecution()
loadisFromInstruction(2, t0)
loadp [cfr, t0, 8], t0
- loadp JSFunction::m_allocationProfile + ObjectAllocationProfile::m_allocator[t0], t1
- loadp JSFunction::m_allocationProfile + ObjectAllocationProfile::m_structure[t0], t2
+ loadp JSFunction::m_rareData[t0], t3
+ btpz t3, .opCreateThisSlow
+ loadp FunctionRareData::m_objectAllocationProfile + ObjectAllocationProfile::m_allocator[t3], t1
+ loadp FunctionRareData::m_objectAllocationProfile + ObjectAllocationProfile::m_structure[t3], t2
btpz t1, .opCreateThisSlow
+ loadpFromInstruction(4, t3)
+ bpeq t3, 1, .hasSeenMultipleCallee
+ bpneq t3, t0, .opCreateThisSlow
+.hasSeenMultipleCallee:
allocateJSObject(t1, t2, t0, t3, .opCreateThisSlow)
loadisFromInstruction(1, t1)
storeq t0, [cfr, t1, 8]
- dispatch(4)
+ dispatch(5)
.opCreateThisSlow:
- callSlowPath(_llint_slow_path_create_this)
- dispatch(4)
-
-
-_llint_op_get_callee:
- traceExecution()
- loadisFromInstruction(1, t0)
- loadpFromInstruction(2, t2)
- loadp Callee[cfr], t1
- valueProfile(t1, t2)
- storep t1, [cfr, t0, 8]
- dispatch(3)
+ callSlowPath(_slow_path_create_this)
+ dispatch(5)
-_llint_op_convert_this:
+_llint_op_to_this:
traceExecution()
loadisFromInstruction(1, t0)
loadq [cfr, t0, 8], t0
- btqnz t0, tagMask, .opConvertThisSlow
- loadp JSCell::m_structure[t0], t0
- bbb Structure::m_typeInfo + TypeInfo::m_type[t0], ObjectType, .opConvertThisSlow
- loadpFromInstruction(2, t1)
- valueProfile(t0, t1)
- dispatch(3)
+ btqnz t0, tagMask, .opToThisSlow
+ bbneq JSCell::m_type[t0], FinalObjectType, .opToThisSlow
+ loadStructureWithScratch(t0, t1, t2)
+ loadpFromInstruction(2, t2)
+ bpneq t1, t2, .opToThisSlow
+ dispatch(4)
-.opConvertThisSlow:
- callSlowPath(_llint_slow_path_convert_this)
- dispatch(3)
+.opToThisSlow:
+ callSlowPath(_slow_path_to_this)
+ dispatch(4)
_llint_op_new_object:
@@ -307,6 +647,17 @@ _llint_op_new_object:
dispatch(4)
+_llint_op_check_tdz:
+ traceExecution()
+ loadisFromInstruction(1, t0)
+ loadConstantOrVariable(t0, t1)
+ bqneq t1, ValueEmpty, .opNotTDZ
+ callSlowPath(_slow_path_throw_tdz_error)
+
+.opNotTDZ:
+ dispatch(2)
+
+
_llint_op_mov:
traceExecution()
loadisFromInstruction(2, t1)
@@ -328,7 +679,7 @@ _llint_op_not:
dispatch(3)
.opNotSlow:
- callSlowPath(_llint_slow_path_not)
+ callSlowPath(_slow_path_not)
dispatch(3)
@@ -352,24 +703,24 @@ end
_llint_op_eq:
equalityComparison(
macro (left, right, result) cieq left, right, result end,
- _llint_slow_path_eq)
+ _slow_path_eq)
_llint_op_neq:
equalityComparison(
macro (left, right, result) cineq left, right, result end,
- _llint_slow_path_neq)
+ _slow_path_neq)
macro equalNullComparison()
loadisFromInstruction(2, t0)
loadq [cfr, t0, 8], t0
btqnz t0, tagMask, .immediate
- loadp JSCell::m_structure[t0], t2
- btbnz Structure::m_typeInfo + TypeInfo::m_flags[t2], MasqueradesAsUndefined, .masqueradesAsUndefined
+ btbnz JSCell::m_flags[t0], MasqueradesAsUndefined, .masqueradesAsUndefined
move 0, t0
jmp .done
.masqueradesAsUndefined:
+ loadStructureWithScratch(t0, t2, t1)
loadp CodeBlock[cfr], t0
loadp CodeBlock::m_globalObject[t0], t0
cpeq Structure::m_globalObject[t2], t0, t0
@@ -427,13 +778,13 @@ end
_llint_op_stricteq:
strictEq(
macro (left, right, result) cqeq left, right, result end,
- _llint_slow_path_stricteq)
+ _slow_path_stricteq)
_llint_op_nstricteq:
strictEq(
macro (left, right, result) cqneq left, right, result end,
- _llint_slow_path_nstricteq)
+ _slow_path_nstricteq)
macro preOp(arithmeticOperation, slowPath)
@@ -454,13 +805,13 @@ end
_llint_op_inc:
preOp(
macro (value, slow) baddio 1, value, slow end,
- _llint_slow_path_pre_inc)
+ _slow_path_inc)
_llint_op_dec:
preOp(
macro (value, slow) bsubio 1, value, slow end,
- _llint_slow_path_pre_dec)
+ _slow_path_dec)
_llint_op_to_number:
@@ -475,7 +826,23 @@ _llint_op_to_number:
dispatch(3)
.opToNumberSlow:
- callSlowPath(_llint_slow_path_to_number)
+ callSlowPath(_slow_path_to_number)
+ dispatch(3)
+
+
+_llint_op_to_string:
+ traceExecution()
+ loadisFromInstruction(2, t1)
+ loadisFromInstruction(1, t2)
+ loadConstantOrVariable(t1, t0)
+ btqnz t0, tagMask, .opToStringSlow
+ bbneq JSCell::m_type[t0], StringType, .opToStringSlow
+.opToStringIsString:
+ storeq t0, [cfr, t2, 8]
+ dispatch(3)
+
+.opToStringSlow:
+ callSlowPath(_slow_path_to_string)
dispatch(3)
@@ -497,7 +864,7 @@ _llint_op_negate:
dispatch(3)
.opNegateSlow:
- callSlowPath(_llint_slow_path_negate)
+ callSlowPath(_slow_path_negate)
dispatch(3)
@@ -565,7 +932,7 @@ _llint_op_add:
binaryOp(
macro (left, right, slow) baddio left, right, slow end,
macro (left, right) addd left, right end,
- _llint_slow_path_add)
+ _slow_path_add)
_llint_op_mul:
@@ -583,7 +950,7 @@ _llint_op_mul:
storeq t3, [cfr, index, 8]
end,
macro (left, right) muld left, right end,
- _llint_slow_path_mul)
+ _slow_path_mul)
_llint_op_sub:
@@ -591,31 +958,36 @@ _llint_op_sub:
binaryOp(
macro (left, right, slow) bsubio left, right, slow end,
macro (left, right) subd left, right end,
- _llint_slow_path_sub)
+ _slow_path_sub)
_llint_op_div:
traceExecution()
- binaryOpCustomStore(
- macro (left, right, slow, index)
- # Assume t3 is scratchable.
- btiz left, slow
- bineq left, -1, .notNeg2TwoThe31DivByNeg1
- bieq right, -2147483648, .slow
- .notNeg2TwoThe31DivByNeg1:
- btinz right, .intOK
- bilt left, 0, slow
- .intOK:
- move left, t3
- move right, t0
- cdqi
- idivi t3
- btinz t1, slow
- orq tagTypeNumber, t0
- storeq t0, [cfr, index, 8]
- end,
- macro (left, right) divd left, right end,
- _llint_slow_path_div)
+ if X86_64 or X86_64_WIN
+ binaryOpCustomStore(
+ macro (left, right, slow, index)
+ # Assume t3 is scratchable.
+ btiz left, slow
+ bineq left, -1, .notNeg2TwoThe31DivByNeg1
+ bieq right, -2147483648, .slow
+ .notNeg2TwoThe31DivByNeg1:
+ btinz right, .intOK
+ bilt left, 0, slow
+ .intOK:
+ move left, t3
+ move right, t0
+ cdqi
+ idivi t3
+ btinz t1, slow
+ orq tagTypeNumber, t0
+ storeq t0, [cfr, index, 8]
+ end,
+ macro (left, right) divd left, right end,
+ _slow_path_div)
+ else
+ callSlowPath(_slow_path_div)
+ dispatch(5)
+ end
macro bitOp(operation, slowPath, advance)
@@ -626,7 +998,7 @@ macro bitOp(operation, slowPath, advance)
loadConstantOrVariable(t2, t0)
bqb t0, tagTypeNumber, .slow
bqb t1, tagTypeNumber, .slow
- operation(t1, t0, .slow)
+ operation(t1, t0)
orq tagTypeNumber, t0
storeq t0, [cfr, t3, 8]
dispatch(advance)
@@ -639,89 +1011,108 @@ end
_llint_op_lshift:
traceExecution()
bitOp(
- macro (left, right, slow) lshifti left, right end,
- _llint_slow_path_lshift,
+ macro (left, right) lshifti left, right end,
+ _slow_path_lshift,
4)
_llint_op_rshift:
traceExecution()
bitOp(
- macro (left, right, slow) rshifti left, right end,
- _llint_slow_path_rshift,
+ macro (left, right) rshifti left, right end,
+ _slow_path_rshift,
4)
_llint_op_urshift:
traceExecution()
bitOp(
- macro (left, right, slow)
- urshifti left, right
- bilt right, 0, slow
- end,
- _llint_slow_path_urshift,
+ macro (left, right) urshifti left, right end,
+ _slow_path_urshift,
4)
+_llint_op_unsigned:
+ traceExecution()
+ loadisFromInstruction(1, t0)
+ loadisFromInstruction(2, t1)
+ loadConstantOrVariable(t1, t2)
+ bilt t2, 0, .opUnsignedSlow
+ storeq t2, [cfr, t0, 8]
+ dispatch(3)
+.opUnsignedSlow:
+ callSlowPath(_slow_path_unsigned)
+ dispatch(3)
+
+
_llint_op_bitand:
traceExecution()
bitOp(
- macro (left, right, slow) andi left, right end,
- _llint_slow_path_bitand,
+ macro (left, right) andi left, right end,
+ _slow_path_bitand,
5)
_llint_op_bitxor:
traceExecution()
bitOp(
- macro (left, right, slow) xori left, right end,
- _llint_slow_path_bitxor,
+ macro (left, right) xori left, right end,
+ _slow_path_bitxor,
5)
_llint_op_bitor:
traceExecution()
bitOp(
- macro (left, right, slow) ori left, right end,
- _llint_slow_path_bitor,
+ macro (left, right) ori left, right end,
+ _slow_path_bitor,
5)
-_llint_op_check_has_instance:
+_llint_op_overrides_has_instance:
traceExecution()
+ loadisFromInstruction(1, t3)
+
loadisFromInstruction(3, t1)
- loadConstantOrVariableCell(t1, t0, .opCheckHasInstanceSlow)
- loadp JSCell::m_structure[t0], t0
- btbz Structure::m_typeInfo + TypeInfo::m_flags[t0], ImplementsDefaultHasInstance, .opCheckHasInstanceSlow
- dispatch(5)
+ loadConstantOrVariable(t1, t0)
+ loadp CodeBlock[cfr], t2
+ loadp CodeBlock::m_globalObject[t2], t2
+ loadp JSGlobalObject::m_functionProtoHasInstanceSymbolFunction[t2], t2
+ bqneq t0, t2, .opOverridesHasInstanceNotDefaultSymbol
-.opCheckHasInstanceSlow:
- callSlowPath(_llint_slow_path_check_has_instance)
- dispatch(0)
+ loadisFromInstruction(2, t1)
+ loadConstantOrVariable(t1, t0)
+ tbz JSCell::m_flags[t0], ImplementsDefaultHasInstance, t1
+ orq ValueFalse, t1
+ storeq t1, [cfr, t3, 8]
+ dispatch(4)
+
+.opOverridesHasInstanceNotDefaultSymbol:
+ storeq ValueTrue, [cfr, t3, 8]
+ dispatch(4)
_llint_op_instanceof:
traceExecution()
# Actually do the work.
loadisFromInstruction(3, t0)
- loadisFromInstruction(1, t3)
loadConstantOrVariableCell(t0, t1, .opInstanceofSlow)
- loadp JSCell::m_structure[t1], t2
- bbb Structure::m_typeInfo + TypeInfo::m_type[t2], ObjectType, .opInstanceofSlow
+ bbb JSCell::m_type[t1], ObjectType, .opInstanceofSlow
loadisFromInstruction(2, t0)
loadConstantOrVariableCell(t0, t2, .opInstanceofSlow)
# Register state: t1 = prototype, t2 = value
move 1, t0
.opInstanceofLoop:
- loadp JSCell::m_structure[t2], t2
- loadq Structure::m_prototype[t2], t2
+ loadStructureAndClobberFirstArg(t2, t3)
+ loadq Structure::m_prototype[t3], t2
bqeq t2, t1, .opInstanceofDone
btqz t2, tagMask, .opInstanceofLoop
move 0, t0
.opInstanceofDone:
orq ValueFalse, t0
+ loadisFromInstruction(1, t3)
storeq t0, [cfr, t3, 8]
dispatch(4)
@@ -729,6 +1120,10 @@ _llint_op_instanceof:
callSlowPath(_llint_slow_path_instanceof)
dispatch(4)
+_llint_op_instanceof_custom:
+ traceExecution()
+ callSlowPath(_llint_slow_path_instanceof_custom)
+ dispatch(5)
_llint_op_is_undefined:
traceExecution()
@@ -741,17 +1136,17 @@ _llint_op_is_undefined:
storeq t3, [cfr, t2, 8]
dispatch(3)
.opIsUndefinedCell:
- loadp JSCell::m_structure[t0], t0
- btbnz Structure::m_typeInfo + TypeInfo::m_flags[t0], MasqueradesAsUndefined, .masqueradesAsUndefined
+ btbnz JSCell::m_flags[t0], MasqueradesAsUndefined, .masqueradesAsUndefined
move ValueFalse, t1
storeq t1, [cfr, t2, 8]
dispatch(3)
.masqueradesAsUndefined:
+ loadStructureWithScratch(t0, t3, t1)
loadp CodeBlock[cfr], t1
loadp CodeBlock::m_globalObject[t1], t1
- cpeq Structure::m_globalObject[t0], t1, t3
- orq ValueFalse, t3
- storeq t3, [cfr, t2, 8]
+ cpeq Structure::m_globalObject[t3], t1, t0
+ orq ValueFalse, t0
+ storeq t0, [cfr, t2, 8]
dispatch(3)
@@ -784,8 +1179,7 @@ _llint_op_is_string:
loadisFromInstruction(1, t2)
loadConstantOrVariable(t1, t0)
btqnz t0, tagMask, .opIsStringNotCell
- loadp JSCell::m_structure[t0], t0
- cbeq Structure::m_typeInfo + TypeInfo::m_type[t0], StringType, t1
+ cbeq JSCell::m_type[t0], StringType, t1
orq ValueFalse, t1
storeq t1, [cfr, t2, 8]
dispatch(3)
@@ -794,16 +1188,25 @@ _llint_op_is_string:
dispatch(3)
-macro loadPropertyAtVariableOffsetKnownNotInline(propertyOffsetAsPointer, objectAndStorage, value)
- assert(macro (ok) bigteq propertyOffsetAsPointer, firstOutOfLineOffset, ok end)
- negp propertyOffsetAsPointer
- loadp JSObject::m_butterfly[objectAndStorage], objectAndStorage
- loadq (firstOutOfLineOffset - 2) * 8[objectAndStorage, propertyOffsetAsPointer, 8], value
-end
+_llint_op_is_object:
+ traceExecution()
+ loadisFromInstruction(2, t1)
+ loadisFromInstruction(1, t2)
+ loadConstantOrVariable(t1, t0)
+ btqnz t0, tagMask, .opIsObjectNotCell
+ cbaeq JSCell::m_type[t0], ObjectType, t1
+ orq ValueFalse, t1
+ storeq t1, [cfr, t2, 8]
+ dispatch(3)
+.opIsObjectNotCell:
+ storeq ValueFalse, [cfr, t2, 8]
+ dispatch(3)
+
-macro loadPropertyAtVariableOffset(propertyOffsetAsInt, objectAndStorage, value)
+macro loadPropertyAtVariableOffset(propertyOffsetAsInt, objectAndStorage, value, slow)
bilt propertyOffsetAsInt, firstOutOfLineOffset, .isInline
loadp JSObject::m_butterfly[objectAndStorage], objectAndStorage
+ copyBarrier(objectAndStorage, slow)
negi propertyOffsetAsInt
sxi2q propertyOffsetAsInt, propertyOffsetAsInt
jmp .ready
@@ -813,65 +1216,37 @@ macro loadPropertyAtVariableOffset(propertyOffsetAsInt, objectAndStorage, value)
loadq (firstOutOfLineOffset - 2) * 8[objectAndStorage, propertyOffsetAsInt, 8], value
end
-_llint_op_init_global_const:
- traceExecution()
- loadisFromInstruction(2, t1)
- loadpFromInstruction(1, t0)
- loadConstantOrVariable(t1, t2)
- writeBarrier(t2)
- storeq t2, [t0]
- dispatch(5)
+macro storePropertyAtVariableOffset(propertyOffsetAsInt, objectAndStorage, value, slow)
+ bilt propertyOffsetAsInt, firstOutOfLineOffset, .isInline
+ loadp JSObject::m_butterfly[objectAndStorage], objectAndStorage
+ copyBarrier(objectAndStorage, slow)
+ negi propertyOffsetAsInt
+ sxi2q propertyOffsetAsInt, propertyOffsetAsInt
+ jmp .ready
+.isInline:
+ addp sizeof JSObject - (firstOutOfLineOffset - 2) * 8, objectAndStorage
+.ready:
+ storeq value, (firstOutOfLineOffset - 2) * 8[objectAndStorage, propertyOffsetAsInt, 8]
+end
-_llint_op_init_global_const_check:
- traceExecution()
- loadpFromInstruction(3, t2)
- loadisFromInstruction(2, t1)
- loadpFromInstruction(1, t0)
- btbnz [t2], .opInitGlobalConstCheckSlow
- loadConstantOrVariable(t1, t2)
- writeBarrier(t2)
- storeq t2, [t0]
- dispatch(5)
-.opInitGlobalConstCheckSlow:
- callSlowPath(_llint_slow_path_init_global_const_check)
- dispatch(5)
-
-macro getById(getPropertyStorage)
+_llint_op_get_by_id:
traceExecution()
- # We only do monomorphic get_by_id caching for now, and we do not modify the
- # opcode. We do, however, allow for the cache to change anytime if fails, since
- # ping-ponging is free. At best we get lucky and the get_by_id will continue
- # to take fast path on the new cache. At worst we take slow path, which is what
- # we would have been doing anyway.
loadisFromInstruction(2, t0)
- loadpFromInstruction(4, t1)
loadConstantOrVariableCell(t0, t3, .opGetByIdSlow)
- loadisFromInstruction(5, t2)
- getPropertyStorage(
- t3,
- t0,
- macro (propertyStorage, scratch)
- bpneq JSCell::m_structure[t3], t1, .opGetByIdSlow
- loadisFromInstruction(1, t1)
- loadq [propertyStorage, t2], scratch
- storeq scratch, [cfr, t1, 8]
- loadpFromInstruction(8, t1)
- valueProfile(scratch, t1)
- dispatch(9)
- end)
-
- .opGetByIdSlow:
- callSlowPath(_llint_slow_path_get_by_id)
- dispatch(9)
-end
-
-_llint_op_get_by_id:
- getById(withInlineStorage)
-
+ loadi JSCell::m_structureID[t3], t1
+ loadisFromInstruction(4, t2)
+ bineq t2, t1, .opGetByIdSlow
+ loadisFromInstruction(5, t1)
+ loadisFromInstruction(1, t2)
+ loadPropertyAtVariableOffset(t1, t3, t0, .opGetByIdSlow)
+ storeq t0, [cfr, t2, 8]
+ valueProfile(t0, 8, t1)
+ dispatch(9)
-_llint_op_get_by_id_out_of_line:
- getById(withOutOfLineStorage)
+.opGetByIdSlow:
+ callSlowPath(_llint_slow_path_get_by_id)
+ dispatch(9)
_llint_op_get_array_length:
@@ -879,17 +1254,17 @@ _llint_op_get_array_length:
loadisFromInstruction(2, t0)
loadpFromInstruction(4, t1)
loadConstantOrVariableCell(t0, t3, .opGetArrayLengthSlow)
- loadp JSCell::m_structure[t3], t2
+ move t3, t2
arrayProfile(t2, t1, t0)
btiz t2, IsArray, .opGetArrayLengthSlow
btiz t2, IndexingShapeMask, .opGetArrayLengthSlow
loadisFromInstruction(1, t1)
- loadpFromInstruction(8, t2)
loadp JSObject::m_butterfly[t3], t0
+ copyBarrier(t0, .opGetArrayLengthSlow)
loadi -sizeof IndexingHeader + IndexingHeader::u.lengths.publicLength[t0], t0
bilt t0, 0, .opGetArrayLengthSlow
orq tagTypeNumber, t0
- valueProfile(t0, t2)
+ valueProfile(t0, 8, t2)
storeq t0, [cfr, t1, 8]
dispatch(9)
@@ -898,122 +1273,157 @@ _llint_op_get_array_length:
dispatch(9)
-_llint_op_get_arguments_length:
- traceExecution()
- loadisFromInstruction(2, t0)
- loadisFromInstruction(1, t1)
- btqnz [cfr, t0, 8], .opGetArgumentsLengthSlow
- loadi ArgumentCount + PayloadOffset[cfr], t2
- subi 1, t2
- orq tagTypeNumber, t2
- storeq t2, [cfr, t1, 8]
- dispatch(4)
-
-.opGetArgumentsLengthSlow:
- callSlowPath(_llint_slow_path_get_arguments_length)
- dispatch(4)
-
-
-macro putById(getPropertyStorage)
- traceExecution()
- loadisFromInstruction(1, t3)
- loadpFromInstruction(4, t1)
- loadConstantOrVariableCell(t3, t0, .opPutByIdSlow)
- loadisFromInstruction(3, t2)
- getPropertyStorage(
- t0,
- t3,
- macro (propertyStorage, scratch)
- bpneq JSCell::m_structure[t0], t1, .opPutByIdSlow
- loadisFromInstruction(5, t1)
- loadConstantOrVariable(t2, scratch)
- writeBarrier(t0)
- storeq scratch, [propertyStorage, t1]
- dispatch(9)
- end)
-end
-
_llint_op_put_by_id:
- putById(withInlineStorage)
-
-.opPutByIdSlow:
- callSlowPath(_llint_slow_path_put_by_id)
- dispatch(9)
-
-
-_llint_op_put_by_id_out_of_line:
- putById(withOutOfLineStorage)
-
-
-macro putByIdTransition(additionalChecks, getPropertyStorage)
traceExecution()
+ writeBarrierOnOperands(1, 3)
loadisFromInstruction(1, t3)
- loadpFromInstruction(4, t1)
loadConstantOrVariableCell(t3, t0, .opPutByIdSlow)
- loadisFromInstruction(3, t2)
- bpneq JSCell::m_structure[t0], t1, .opPutByIdSlow
- additionalChecks(t1, t3)
- loadisFromInstruction(5, t1)
- getPropertyStorage(
- t0,
- t3,
- macro (propertyStorage, scratch)
- addp t1, propertyStorage, t3
- loadConstantOrVariable(t2, t1)
- writeBarrier(t1)
- storeq t1, [t3]
- loadpFromInstruction(6, t1)
- storep t1, JSCell::m_structure[t0]
- dispatch(9)
- end)
-end
+ loadisFromInstruction(4, t2)
+ bineq t2, JSCell::m_structureID[t0], .opPutByIdSlow
-macro noAdditionalChecks(oldStructure, scratch)
-end
+ # At this point, we have:
+ # t2 -> current structure ID
+ # t0 -> object base
-macro structureChainChecks(oldStructure, scratch)
- const protoCell = oldStructure # Reusing the oldStructure register for the proto
- loadpFromInstruction(7, scratch)
- assert(macro (ok) btpnz scratch, ok end)
- loadp StructureChain::m_vector[scratch], scratch
- assert(macro (ok) btpnz scratch, ok end)
- bqeq Structure::m_prototype[oldStructure], ValueNull, .done
-.loop:
- loadq Structure::m_prototype[oldStructure], protoCell
- loadp JSCell::m_structure[protoCell], oldStructure
- bpneq oldStructure, [scratch], .opPutByIdSlow
- addp 8, scratch
- bqneq Structure::m_prototype[oldStructure], ValueNull, .loop
-.done:
-end
+ loadisFromInstruction(3, t1)
+ loadConstantOrVariable(t1, t3)
-_llint_op_put_by_id_transition_direct:
- putByIdTransition(noAdditionalChecks, withInlineStorage)
+ loadpFromInstruction(8, t1)
+
+ # At this point, we have:
+ # t0 -> object base
+ # t1 -> put by id flags
+ # t2 -> current structure ID
+ # t3 -> value to put
+
+ btpnz t1, PutByIdPrimaryTypeMask, .opPutByIdTypeCheckObjectWithStructureOrOther
+
+ # We have one of the non-structure type checks. Find out which one.
+ andp PutByIdSecondaryTypeMask, t1
+ bplt t1, PutByIdSecondaryTypeString, .opPutByIdTypeCheckLessThanString
+
+ # We are one of the following: String, Symbol, Object, ObjectOrOther, Top
+ bplt t1, PutByIdSecondaryTypeObjectOrOther, .opPutByIdTypeCheckLessThanObjectOrOther
+
+ # We are either ObjectOrOther or Top.
+ bpeq t1, PutByIdSecondaryTypeTop, .opPutByIdDoneCheckingTypes
+
+ # Check if we are ObjectOrOther.
+ btqz t3, tagMask, .opPutByIdTypeCheckObject
+.opPutByIdTypeCheckOther:
+ andq ~TagBitUndefined, t3
+ bqeq t3, ValueNull, .opPutByIdDoneCheckingTypes
+ jmp .opPutByIdSlow
+
+.opPutByIdTypeCheckLessThanObjectOrOther:
+ # We are either String, Symbol or Object.
+ btqnz t3, tagMask, .opPutByIdSlow
+ bpeq t1, PutByIdSecondaryTypeObject, .opPutByIdTypeCheckObject
+ bpeq t1, PutByIdSecondaryTypeSymbol, .opPutByIdTypeCheckSymbol
+ bbeq JSCell::m_type[t3], StringType, .opPutByIdDoneCheckingTypes
+ jmp .opPutByIdSlow
+.opPutByIdTypeCheckObject:
+ bbaeq JSCell::m_type[t3], ObjectType, .opPutByIdDoneCheckingTypes
+ jmp .opPutByIdSlow
+.opPutByIdTypeCheckSymbol:
+ bbeq JSCell::m_type[t3], SymbolType, .opPutByIdDoneCheckingTypes
+ jmp .opPutByIdSlow
+
+.opPutByIdTypeCheckLessThanString:
+ # We are one of the following: Bottom, Boolean, Other, Int32, Number
+ bplt t1, PutByIdSecondaryTypeInt32, .opPutByIdTypeCheckLessThanInt32
+
+ # We are either Int32 or Number.
+ bpeq t1, PutByIdSecondaryTypeNumber, .opPutByIdTypeCheckNumber
+
+ bqaeq t3, tagTypeNumber, .opPutByIdDoneCheckingTypes
+ jmp .opPutByIdSlow
+
+.opPutByIdTypeCheckNumber:
+ btqnz t3, tagTypeNumber, .opPutByIdDoneCheckingTypes
+ jmp .opPutByIdSlow
+
+.opPutByIdTypeCheckLessThanInt32:
+ # We are one of the following: Bottom, Boolean, Other.
+ bpneq t1, PutByIdSecondaryTypeBoolean, .opPutByIdTypeCheckBottomOrOther
+ xorq ValueFalse, t3
+ btqz t3, ~1, .opPutByIdDoneCheckingTypes
+ jmp .opPutByIdSlow
+
+.opPutByIdTypeCheckBottomOrOther:
+ bpeq t1, PutByIdSecondaryTypeOther, .opPutByIdTypeCheckOther
+ jmp .opPutByIdSlow
+
+.opPutByIdTypeCheckObjectWithStructureOrOther:
+ btqz t3, tagMask, .opPutByIdTypeCheckObjectWithStructure
+ btpnz t1, PutByIdPrimaryTypeObjectWithStructureOrOther, .opPutByIdTypeCheckOther
+ jmp .opPutByIdSlow
+
+.opPutByIdTypeCheckObjectWithStructure:
+ urshiftp 3, t1
+ bineq t1, JSCell::m_structureID[t3], .opPutByIdSlow
+
+.opPutByIdDoneCheckingTypes:
+ loadisFromInstruction(6, t1)
+
+ btiz t1, .opPutByIdNotTransition
+ # This is the transition case. t1 holds the new structureID. t2 holds the old structure ID.
+ # If we have a chain, we need to check it. t0 is the base. We may clobber t1 to use it as
+ # scratch.
+ loadpFromInstruction(7, t3)
+ btpz t3, .opPutByIdTransitionDirect
-_llint_op_put_by_id_transition_direct_out_of_line:
- putByIdTransition(noAdditionalChecks, withOutOfLineStorage)
+ loadp StructureChain::m_vector[t3], t3
+ assert(macro (ok) btpnz t3, ok end)
+ structureIDToStructureWithScratch(t2, t1)
+ loadq Structure::m_prototype[t2], t2
+ bqeq t2, ValueNull, .opPutByIdTransitionChainDone
+.opPutByIdTransitionChainLoop:
+ # At this point, t2 contains a prototye, and [t3] contains the Structure* that we want that
+ # prototype to have. We don't want to have to load the Structure* for t2. Instead, we load
+ # the Structure* from [t3], and then we compare its id to the id in the header of t2.
+ loadp [t3], t1
+ loadi JSCell::m_structureID[t2], t2
+ # Now, t1 has the Structure* and t2 has the StructureID that we want that Structure* to have.
+ bineq t2, Structure::m_blob + StructureIDBlob::u.fields.structureID[t1], .opPutByIdSlow
+ addp 8, t3
+ loadq Structure::m_prototype[t1], t2
+ bqneq t2, ValueNull, .opPutByIdTransitionChainLoop
-_llint_op_put_by_id_transition_normal:
- putByIdTransition(structureChainChecks, withInlineStorage)
+.opPutByIdTransitionChainDone:
+ # Reload the new structure, since we clobbered it above.
+ loadisFromInstruction(6, t1)
+.opPutByIdTransitionDirect:
+ storei t1, JSCell::m_structureID[t0]
-_llint_op_put_by_id_transition_normal_out_of_line:
- putByIdTransition(structureChainChecks, withOutOfLineStorage)
+.opPutByIdNotTransition:
+ # The only thing live right now is t0, which holds the base.
+ loadisFromInstruction(3, t1)
+ loadConstantOrVariable(t1, t2)
+ loadisFromInstruction(5, t1)
+ storePropertyAtVariableOffset(t1, t0, t2, .opPutByIdSlow)
+ dispatch(9)
+
+.opPutByIdSlow:
+ callSlowPath(_llint_slow_path_put_by_id)
+ dispatch(9)
_llint_op_get_by_val:
traceExecution()
loadisFromInstruction(2, t2)
loadConstantOrVariableCell(t2, t0, .opGetByValSlow)
- loadp JSCell::m_structure[t0], t2
loadpFromInstruction(4, t3)
+ move t0, t2
arrayProfile(t2, t3, t1)
loadisFromInstruction(3, t3)
loadConstantOrVariableInt32(t3, t1, .opGetByValSlow)
sxi2q t1, t1
loadp JSObject::m_butterfly[t0], t3
+ copyBarrier(t3, .opGetByValSlow)
andi IndexingShapeMask, t2
bieq t2, Int32Shape, .opGetByValIsContiguous
bineq t2, ContiguousShape, .opGetByValNotContiguous
@@ -1045,77 +1455,17 @@ _llint_op_get_by_val:
.opGetByValDone:
storeq t2, [cfr, t0, 8]
- loadpFromInstruction(5, t0)
- valueProfile(t2, t0)
+ valueProfile(t2, 5, t0)
dispatch(6)
.opGetByValOutOfBounds:
- if VALUE_PROFILER
- loadpFromInstruction(4, t0)
- storeb 1, ArrayProfile::m_outOfBounds[t0]
- end
+ loadpFromInstruction(4, t0)
+ storeb 1, ArrayProfile::m_outOfBounds[t0]
.opGetByValSlow:
callSlowPath(_llint_slow_path_get_by_val)
dispatch(6)
-_llint_op_get_argument_by_val:
- # FIXME: At some point we should array profile this. Right now it isn't necessary
- # since the DFG will never turn a get_argument_by_val into a GetByVal.
- traceExecution()
- loadisFromInstruction(2, t0)
- loadisFromInstruction(3, t1)
- btqnz [cfr, t0, 8], .opGetArgumentByValSlow
- loadConstantOrVariableInt32(t1, t2, .opGetArgumentByValSlow)
- addi 1, t2
- loadi ArgumentCount + PayloadOffset[cfr], t1
- biaeq t2, t1, .opGetArgumentByValSlow
- negi t2
- sxi2q t2, t2
- loadisFromInstruction(1, t3)
- loadpFromInstruction(5, t1)
- loadq ThisArgumentOffset[cfr, t2, 8], t0
- storeq t0, [cfr, t3, 8]
- valueProfile(t0, t1)
- dispatch(6)
-
-.opGetArgumentByValSlow:
- callSlowPath(_llint_slow_path_get_argument_by_val)
- dispatch(6)
-
-
-_llint_op_get_by_pname:
- traceExecution()
- loadisFromInstruction(3, t1)
- loadConstantOrVariable(t1, t0)
- loadisFromInstruction(4, t1)
- assertNotConstant(t1)
- bqneq t0, [cfr, t1, 8], .opGetByPnameSlow
- loadisFromInstruction(2, t2)
- loadisFromInstruction(5, t3)
- loadConstantOrVariableCell(t2, t0, .opGetByPnameSlow)
- assertNotConstant(t3)
- loadq [cfr, t3, 8], t1
- loadp JSCell::m_structure[t0], t2
- bpneq t2, JSPropertyNameIterator::m_cachedStructure[t1], .opGetByPnameSlow
- loadisFromInstruction(6, t3)
- loadi PayloadOffset[cfr, t3, 8], t3
- subi 1, t3
- biaeq t3, JSPropertyNameIterator::m_numCacheableSlots[t1], .opGetByPnameSlow
- bilt t3, JSPropertyNameIterator::m_cachedStructureInlineCapacity[t1], .opGetByPnameInlineProperty
- addi firstOutOfLineOffset, t3
- subi JSPropertyNameIterator::m_cachedStructureInlineCapacity[t1], t3
-.opGetByPnameInlineProperty:
- loadPropertyAtVariableOffset(t3, t0, t0)
- loadisFromInstruction(1, t1)
- storeq t0, [cfr, t1, 8]
- dispatch(7)
-
-.opGetByPnameSlow:
- callSlowPath(_llint_slow_path_get_by_pname)
- dispatch(7)
-
-
macro contiguousPutByVal(storeCallback)
biaeq t3, -sizeof IndexingHeader + IndexingHeader::u.lengths.publicLength[t0], .outOfBounds
.storeResult:
@@ -1125,26 +1475,26 @@ macro contiguousPutByVal(storeCallback)
.outOfBounds:
biaeq t3, -sizeof IndexingHeader + IndexingHeader::u.lengths.vectorLength[t0], .opPutByValOutOfBounds
- if VALUE_PROFILER
- loadp 32[PB, PC, 8], t2
- storeb 1, ArrayProfile::m_mayStoreToHole[t2]
- end
+ loadp 32[PB, PC, 8], t2
+ storeb 1, ArrayProfile::m_mayStoreToHole[t2]
addi 1, t3, t2
storei t2, -sizeof IndexingHeader + IndexingHeader::u.lengths.publicLength[t0]
jmp .storeResult
end
-_llint_op_put_by_val:
+macro putByVal(slowPath)
traceExecution()
+ writeBarrierOnOperands(1, 3)
loadisFromInstruction(1, t0)
loadConstantOrVariableCell(t0, t1, .opPutByValSlow)
- loadp JSCell::m_structure[t1], t2
loadpFromInstruction(4, t3)
+ move t1, t2
arrayProfile(t2, t3, t0)
loadisFromInstruction(2, t0)
loadConstantOrVariableInt32(t0, t3, .opPutByValSlow)
sxi2q t3, t3
loadp JSObject::m_butterfly[t1], t0
+ copyBarrier(t0, .opPutByValSlow)
andi IndexingShapeMask, t2
bineq t2, Int32Shape, .opPutByValNotInt32
contiguousPutByVal(
@@ -1175,7 +1525,6 @@ _llint_op_put_by_val:
contiguousPutByVal(
macro (operand, scratch, address)
loadConstantOrVariable(operand, scratch)
- writeBarrier(scratch)
storep scratch, address
end)
@@ -1186,15 +1535,12 @@ _llint_op_put_by_val:
.opPutByValArrayStorageStoreResult:
loadisFromInstruction(3, t2)
loadConstantOrVariable(t2, t1)
- writeBarrier(t1)
storeq t1, ArrayStorage::m_vector[t0, t3, 8]
dispatch(5)
.opPutByValArrayStorageEmpty:
- if VALUE_PROFILER
- loadpFromInstruction(4, t1)
- storeb 1, ArrayProfile::m_mayStoreToHole[t1]
- end
+ loadpFromInstruction(4, t1)
+ storeb 1, ArrayProfile::m_mayStoreToHole[t1]
addi 1, ArrayStorage::m_numValuesInVector[t0]
bib t3, -sizeof IndexingHeader + IndexingHeader::u.lengths.publicLength[t0], .opPutByValArrayStorageStoreResult
addi 1, t3, t1
@@ -1202,13 +1548,18 @@ _llint_op_put_by_val:
jmp .opPutByValArrayStorageStoreResult
.opPutByValOutOfBounds:
- if VALUE_PROFILER
- loadpFromInstruction(4, t0)
- storeb 1, ArrayProfile::m_outOfBounds[t0]
- end
+ loadpFromInstruction(4, t0)
+ storeb 1, ArrayProfile::m_outOfBounds[t0]
.opPutByValSlow:
- callSlowPath(_llint_slow_path_put_by_val)
+ callSlowPath(slowPath)
dispatch(5)
+end
+
+_llint_op_put_by_val:
+ putByVal(_llint_slow_path_put_by_val)
+
+_llint_op_put_by_val_direct:
+ putByVal(_llint_slow_path_put_by_val_direct)
_llint_op_jmp:
@@ -1238,8 +1589,8 @@ macro equalNull(cellHandler, immediateHandler)
assertNotConstant(t0)
loadq [cfr, t0, 8], t0
btqnz t0, tagMask, .immediate
- loadp JSCell::m_structure[t0], t2
- cellHandler(t2, Structure::m_typeInfo + TypeInfo::m_flags[t2], .target)
+ loadStructureWithScratch(t0, t2, t1)
+ cellHandler(t2, JSCell::m_flags[t0], .target)
dispatch(3)
.target:
@@ -1340,7 +1691,7 @@ _llint_op_switch_imm:
loadp CodeBlock[cfr], t2
loadp CodeBlock::m_rareData[t2], t2
muli sizeof SimpleJumpTable, t3 # FIXME: would be nice to peephole this!
- loadp CodeBlock::RareData::m_immediateSwitchJumpTables + VectorBufferOffset[t2], t2
+ loadp CodeBlock::RareData::m_switchJumpTables + VectorBufferOffset[t2], t2
addp t3, t2
bqb t1, tagTypeNumber, .opSwitchImmNotInt
subi SimpleJumpTable::min[t2], t1
@@ -1368,11 +1719,10 @@ _llint_op_switch_char:
loadp CodeBlock[cfr], t2
loadp CodeBlock::m_rareData[t2], t2
muli sizeof SimpleJumpTable, t3
- loadp CodeBlock::RareData::m_characterSwitchJumpTables + VectorBufferOffset[t2], t2
+ loadp CodeBlock::RareData::m_switchJumpTables + VectorBufferOffset[t2], t2
addp t3, t2
btqnz t1, tagMask, .opSwitchCharFallThrough
- loadp JSCell::m_structure[t1], t0
- bbneq Structure::m_typeInfo + TypeInfo::m_type[t0], StringType, .opSwitchCharFallThrough
+ bbneq JSCell::m_type[t1], StringType, .opSwitchCharFallThrough
bineq JSString::m_length[t1], 1, .opSwitchCharFallThrough
loadp JSString::m_value[t1], t0
btpz t0, .opSwitchOnRope
@@ -1398,104 +1748,44 @@ _llint_op_switch_char:
dispatch(0)
-_llint_op_new_func:
- traceExecution()
- loadisFromInstruction(3, t2)
- btiz t2, .opNewFuncUnchecked
- loadisFromInstruction(1, t1)
- btqnz [cfr, t1, 8], .opNewFuncDone
-.opNewFuncUnchecked:
- callSlowPath(_llint_slow_path_new_func)
-.opNewFuncDone:
- dispatch(4)
-
-
macro arrayProfileForCall()
- if VALUE_PROFILER
- loadisFromInstruction(3, t3)
- loadq ThisArgumentOffset[cfr, t3, 8], t0
- btqnz t0, tagMask, .done
- loadp JSCell::m_structure[t0], t0
- loadpFromInstruction(5, t1)
- storep t0, ArrayProfile::m_lastSeenStructure[t1]
- .done:
- end
+ loadisFromInstruction(4, t3)
+ negp t3
+ loadq ThisArgumentOffset[cfr, t3, 8], t0
+ btqnz t0, tagMask, .done
+ loadpFromInstruction((CallOpCodeSize - 2), t1)
+ loadi JSCell::m_structureID[t0], t3
+ storei t3, ArrayProfile::m_lastSeenStructureID[t1]
+.done:
end
-macro doCall(slowPath)
- loadisFromInstruction(1, t0)
- loadpFromInstruction(4, t1)
+macro doCall(slowPath, prepareCall)
+ loadisFromInstruction(2, t0)
+ loadpFromInstruction(5, t1)
loadp LLIntCallLinkInfo::callee[t1], t2
loadConstantOrVariable(t0, t3)
bqneq t3, t2, .opCallSlow
- loadisFromInstruction(3, t3)
- addi 6, PC
+ loadisFromInstruction(4, t3)
lshifti 3, t3
+ negp t3
addp cfr, t3
- loadp JSFunction::m_scope[t2], t0
storeq t2, Callee[t3]
- storeq t0, ScopeChain[t3]
- loadisFromInstruction(-4, t2)
+ loadisFromInstruction(3, t2)
storei PC, ArgumentCount + TagOffset[cfr]
- storeq cfr, CallerFrame[t3]
storei t2, ArgumentCount + PayloadOffset[t3]
- move t3, cfr
- callTargetFunction(t1)
+ move t3, sp
+ prepareCall(LLIntCallLinkInfo::machineCodeTarget[t1], t2, t3, t4)
+ callTargetFunction(LLIntCallLinkInfo::machineCodeTarget[t1])
.opCallSlow:
- slowPathForCall(6, slowPath)
+ slowPathForCall(slowPath, prepareCall)
end
-
-_llint_op_tear_off_activation:
- traceExecution()
- loadisFromInstruction(1, t0)
- btqz [cfr, t0, 8], .opTearOffActivationNotCreated
- callSlowPath(_llint_slow_path_tear_off_activation)
-.opTearOffActivationNotCreated:
- dispatch(2)
-
-
-_llint_op_tear_off_arguments:
- traceExecution()
- loadisFromInstruction(1, t0)
- subi 1, t0 # Get the unmodifiedArgumentsRegister
- btqz [cfr, t0, 8], .opTearOffArgumentsNotCreated
- callSlowPath(_llint_slow_path_tear_off_arguments)
-.opTearOffArgumentsNotCreated:
- dispatch(3)
-
-
_llint_op_ret:
traceExecution()
checkSwitchToJITForEpilogue()
loadisFromInstruction(1, t2)
- loadConstantOrVariable(t2, t0)
- doReturn()
-
-
-_llint_op_call_put_result:
- loadisFromInstruction(1, t2)
- loadpFromInstruction(2, t3)
- storeq t0, [cfr, t2, 8]
- valueProfile(t0, t3)
- traceExecution()
- dispatch(3)
-
-
-_llint_op_ret_object_or_this:
- traceExecution()
- checkSwitchToJITForEpilogue()
- loadisFromInstruction(1, t2)
- loadConstantOrVariable(t2, t0)
- btqnz t0, tagMask, .opRetObjectOrThisNotObject
- loadp JSCell::m_structure[t0], t2
- bbb Structure::m_typeInfo + TypeInfo::m_type[t2], ObjectType, .opRetObjectOrThisNotObject
- doReturn()
-
-.opRetObjectOrThisNotObject:
- loadisFromInstruction(2, t2)
- loadConstantOrVariable(t2, t0)
+ loadConstantOrVariable(t2, r0)
doReturn()
@@ -1505,80 +1795,56 @@ _llint_op_to_primitive:
loadisFromInstruction(1, t3)
loadConstantOrVariable(t2, t0)
btqnz t0, tagMask, .opToPrimitiveIsImm
- loadp JSCell::m_structure[t0], t2
- bbneq Structure::m_typeInfo + TypeInfo::m_type[t2], StringType, .opToPrimitiveSlowCase
+ bbaeq JSCell::m_type[t0], ObjectType, .opToPrimitiveSlowCase
.opToPrimitiveIsImm:
storeq t0, [cfr, t3, 8]
dispatch(3)
.opToPrimitiveSlowCase:
- callSlowPath(_llint_slow_path_to_primitive)
+ callSlowPath(_slow_path_to_primitive)
dispatch(3)
-_llint_op_next_pname:
- traceExecution()
- loadisFromInstruction(3, t1)
- loadisFromInstruction(4, t2)
- assertNotConstant(t1)
- assertNotConstant(t2)
- loadi PayloadOffset[cfr, t1, 8], t0
- bieq t0, PayloadOffset[cfr, t2, 8], .opNextPnameEnd
- loadisFromInstruction(5, t2)
- assertNotConstant(t2)
- loadp [cfr, t2, 8], t2
- loadp JSPropertyNameIterator::m_jsStrings[t2], t3
- loadq [t3, t0, 8], t3
- addi 1, t0
- storei t0, PayloadOffset[cfr, t1, 8]
- loadisFromInstruction(1, t1)
- storeq t3, [cfr, t1, 8]
- loadisFromInstruction(2, t3)
- assertNotConstant(t3)
- loadq [cfr, t3, 8], t3
- loadp JSCell::m_structure[t3], t1
- bpneq t1, JSPropertyNameIterator::m_cachedStructure[t2], .opNextPnameSlow
- loadp JSPropertyNameIterator::m_cachedPrototypeChain[t2], t0
- loadp StructureChain::m_vector[t0], t0
- btpz [t0], .opNextPnameTarget
-.opNextPnameCheckPrototypeLoop:
- bqeq Structure::m_prototype[t1], ValueNull, .opNextPnameSlow
- loadq Structure::m_prototype[t1], t2
- loadp JSCell::m_structure[t2], t1
- bpneq t1, [t0], .opNextPnameSlow
- addp 8, t0
- btpnz [t0], .opNextPnameCheckPrototypeLoop
-.opNextPnameTarget:
- dispatchIntIndirect(6)
-
-.opNextPnameEnd:
- dispatch(7)
-
-.opNextPnameSlow:
- callSlowPath(_llint_slow_path_next_pname) # This either keeps the PC where it was (causing us to loop) or sets it to target.
- dispatch(0)
-
-
_llint_op_catch:
# This is where we end up from the JIT's throw trampoline (because the
# machine code return address will be set to _llint_op_catch), and from
# the interpreter's throw trampoline (see _llint_throw_trampoline).
- # The JIT throwing protocol calls for the cfr to be in t0. The throwing
- # code must have known that we were throwing to the interpreter, and have
- # set VM::targetInterpreterPCForThrow.
- move t0, cfr
+ # The throwing code must have known that we were throwing to the interpreter,
+ # and have set VM::targetInterpreterPCForThrow.
+ loadp Callee[cfr], t3
+ andp MarkedBlockMask, t3
+ loadp MarkedBlock::m_weakSet + WeakSet::m_vm[t3], t3
+ restoreCalleeSavesFromVMCalleeSavesBuffer(t3, t0)
+ loadp VM::callFrameForCatch[t3], cfr
+ storep 0, VM::callFrameForCatch[t3]
+ restoreStackPointerAfterCall()
+
loadp CodeBlock[cfr], PB
loadp CodeBlock::m_instructions[PB], PB
- loadp JITStackFrame::vm[sp], t3
loadp VM::targetInterpreterPCForThrow[t3], PC
subp PB, PC
rshiftp 3, PC
- loadq VM::exception[t3], t0
- storeq 0, VM::exception[t3]
+
+ callSlowPath(_llint_slow_path_check_if_exception_is_uncatchable_and_notify_profiler)
+ bpeq r1, 0, .isCatchableException
+ jmp _llint_throw_from_slow_path_trampoline
+
+.isCatchableException:
+ loadp Callee[cfr], t3
+ andp MarkedBlockMask, t3
+ loadp MarkedBlock::m_weakSet + WeakSet::m_vm[t3], t3
+
+ loadq VM::m_exception[t3], t0
+ storeq 0, VM::m_exception[t3]
loadisFromInstruction(1, t2)
storeq t0, [cfr, t2, 8]
+
+ loadq Exception::m_value[t0], t3
+ loadisFromInstruction(2, t2)
+ storeq t3, [cfr, t2, 8]
+
traceExecution()
- dispatch(2)
+ dispatch(3)
_llint_op_end:
@@ -1586,139 +1852,472 @@ _llint_op_end:
checkSwitchToJITForEpilogue()
loadisFromInstruction(1, t0)
assertNotConstant(t0)
- loadq [cfr, t0, 8], t0
+ loadq [cfr, t0, 8], r0
doReturn()
_llint_throw_from_slow_path_trampoline:
+ loadp Callee[cfr], t1
+ andp MarkedBlockMask, t1
+ loadp MarkedBlock::m_weakSet + WeakSet::m_vm[t1], t1
+ copyCalleeSavesToVMCalleeSavesBuffer(t1, t2)
+
+ callSlowPath(_llint_slow_path_handle_exception)
+
# When throwing from the interpreter (i.e. throwing from LLIntSlowPaths), so
# the throw target is not necessarily interpreted code, we come to here.
# This essentially emulates the JIT's throwing protocol.
- loadp JITStackFrame::vm[sp], t1
- loadp VM::callFrameForThrow[t1], t0
+ loadp Callee[cfr], t1
+ andp MarkedBlockMask, t1
+ loadp MarkedBlock::m_weakSet + WeakSet::m_vm[t1], t1
jmp VM::targetMachinePCForThrow[t1]
_llint_throw_during_call_trampoline:
preserveReturnAddressAfterCall(t2)
- loadp JITStackFrame::vm[sp], t1
- loadp VM::callFrameForThrow[t1], t0
- jmp VM::targetMachinePCForThrow[t1]
+ jmp _llint_throw_from_slow_path_trampoline
-# Gives you the scope in t0, while allowing you to optionally perform additional checks on the
-# scopes as they are traversed. scopeCheck() is called with two arguments: the register
-# holding the scope, and a register that can be used for scratch. Note that this does not
-# use t3, so you can hold stuff in t3 if need be.
-macro getDeBruijnScope(deBruijinIndexOperand, scopeCheck)
- loadp ScopeChain[cfr], t0
- loadis deBruijinIndexOperand, t2
- btiz t2, .done
+macro nativeCallTrampoline(executableOffsetToFunction)
- loadp CodeBlock[cfr], t1
- bineq CodeBlock::m_codeType[t1], FunctionCode, .loop
- btbz CodeBlock::m_needsActivation[t1], .loop
+ functionPrologue()
+ storep 0, CodeBlock[cfr]
+ loadp Callee[cfr], t0
+ andp MarkedBlockMask, t0, t1
+ loadp MarkedBlock::m_weakSet + WeakSet::m_vm[t1], t1
+ storep cfr, VM::topCallFrame[t1]
+ if ARM64 or C_LOOP
+ storep lr, ReturnPC[cfr]
+ end
+ move cfr, a0
+ loadp Callee[cfr], t1
+ loadp JSFunction::m_executable[t1], t1
+ checkStackPointerAlignment(t3, 0xdead0001)
+ if C_LOOP
+ cloopCallNative executableOffsetToFunction[t1]
+ else
+ if X86_64_WIN
+ subp 32, sp
+ end
+ call executableOffsetToFunction[t1]
+ if X86_64_WIN
+ addp 32, sp
+ end
+ end
+ loadp Callee[cfr], t3
+ andp MarkedBlockMask, t3
+ loadp MarkedBlock::m_weakSet + WeakSet::m_vm[t3], t3
- loadis CodeBlock::m_activationRegister[t1], t1
+ functionEpilogue()
- # Need to conditionally skip over one scope.
- btpz [cfr, t1, 8], .noActivation
- scopeCheck(t0, t1)
- loadp JSScope::m_next[t0], t0
-.noActivation:
- subi 1, t2
+ btqnz VM::m_exception[t3], .handleException
+ ret
+
+.handleException:
+ storep cfr, VM::topCallFrame[t3]
+ restoreStackPointerAfterCall()
+ jmp _llint_throw_from_slow_path_trampoline
+end
- btiz t2, .done
-.loop:
- scopeCheck(t0, t1)
+macro getConstantScope(dst)
+ loadpFromInstruction(6, t0)
+ loadisFromInstruction(dst, t1)
+ storeq t0, [cfr, t1, 8]
+end
+
+macro varInjectionCheck(slowPath)
+ loadp CodeBlock[cfr], t0
+ loadp CodeBlock::m_globalObject[t0], t0
+ loadp JSGlobalObject::m_varInjectionWatchpoint[t0], t0
+ bbeq WatchpointSet::m_state[t0], IsInvalidated, slowPath
+end
+
+macro resolveScope()
+ loadisFromInstruction(5, t2)
+ loadisFromInstruction(2, t0)
+ loadp [cfr, t0, 8], t0
+ btiz t2, .resolveScopeLoopEnd
+
+.resolveScopeLoop:
loadp JSScope::m_next[t0], t0
subi 1, t2
- btinz t2, .loop
+ btinz t2, .resolveScopeLoop
-.done:
+.resolveScopeLoopEnd:
+ loadisFromInstruction(1, t1)
+ storeq t0, [cfr, t1, 8]
end
-_llint_op_get_scoped_var:
+
+_llint_op_resolve_scope:
traceExecution()
- # Operands are as follows:
- # pc[1]: Destination for the load
- # pc[2]: Index of register in the scope
- # 24[PB, PC, 8] De Bruijin index.
- getDeBruijnScope(24[PB, PC, 8], macro (scope, scratch) end)
+ loadisFromInstruction(4, t0)
+
+#rGlobalProperty:
+ bineq t0, GlobalProperty, .rGlobalVar
+ getConstantScope(1)
+ dispatch(7)
+
+.rGlobalVar:
+ bineq t0, GlobalVar, .rGlobalLexicalVar
+ getConstantScope(1)
+ dispatch(7)
+
+.rGlobalLexicalVar:
+ bineq t0, GlobalLexicalVar, .rClosureVar
+ getConstantScope(1)
+ dispatch(7)
+
+.rClosureVar:
+ bineq t0, ClosureVar, .rModuleVar
+ resolveScope()
+ dispatch(7)
+
+.rModuleVar:
+ bineq t0, ModuleVar, .rGlobalPropertyWithVarInjectionChecks
+ getConstantScope(1)
+ dispatch(7)
+
+.rGlobalPropertyWithVarInjectionChecks:
+ bineq t0, GlobalPropertyWithVarInjectionChecks, .rGlobalVarWithVarInjectionChecks
+ varInjectionCheck(.rDynamic)
+ getConstantScope(1)
+ dispatch(7)
+
+.rGlobalVarWithVarInjectionChecks:
+ bineq t0, GlobalVarWithVarInjectionChecks, .rGlobalLexicalVarWithVarInjectionChecks
+ varInjectionCheck(.rDynamic)
+ getConstantScope(1)
+ dispatch(7)
+
+.rGlobalLexicalVarWithVarInjectionChecks:
+ bineq t0, GlobalLexicalVarWithVarInjectionChecks, .rClosureVarWithVarInjectionChecks
+ varInjectionCheck(.rDynamic)
+ getConstantScope(1)
+ dispatch(7)
+
+.rClosureVarWithVarInjectionChecks:
+ bineq t0, ClosureVarWithVarInjectionChecks, .rDynamic
+ varInjectionCheck(.rDynamic)
+ resolveScope()
+ dispatch(7)
+
+.rDynamic:
+ callSlowPath(_slow_path_resolve_scope)
+ dispatch(7)
+
+
+macro loadWithStructureCheck(operand, slowPath)
+ loadisFromInstruction(operand, t0)
+ loadq [cfr, t0, 8], t0
+ loadStructureWithScratch(t0, t2, t1)
+ loadpFromInstruction(5, t1)
+ bpneq t2, t1, slowPath
+end
+
+macro getProperty(slow)
+ loadisFromInstruction(6, t1)
+ loadPropertyAtVariableOffset(t1, t0, t2, slow)
+ valueProfile(t2, 7, t0)
+ loadisFromInstruction(1, t0)
+ storeq t2, [cfr, t0, 8]
+end
+
+macro getGlobalVar(tdzCheckIfNecessary)
+ loadpFromInstruction(6, t0)
+ loadq [t0], t0
+ tdzCheckIfNecessary(t0)
+ valueProfile(t0, 7, t1)
loadisFromInstruction(1, t1)
- loadisFromInstruction(2, t2)
+ storeq t0, [cfr, t1, 8]
+end
+
+macro getClosureVar()
+ loadisFromInstruction(6, t1)
+ loadq JSEnvironmentRecord_variables[t0, t1, 8], t0
+ valueProfile(t0, 7, t1)
+ loadisFromInstruction(1, t1)
+ storeq t0, [cfr, t1, 8]
+end
+
+_llint_op_get_from_scope:
+ traceExecution()
+ loadisFromInstruction(4, t0)
+ andi ResolveTypeMask, t0
+
+#gGlobalProperty:
+ bineq t0, GlobalProperty, .gGlobalVar
+ loadWithStructureCheck(2, .gDynamic)
+ getProperty(.gDynamic)
+ dispatch(8)
+
+.gGlobalVar:
+ bineq t0, GlobalVar, .gGlobalLexicalVar
+ getGlobalVar(macro(v) end)
+ dispatch(8)
+
+.gGlobalLexicalVar:
+ bineq t0, GlobalLexicalVar, .gClosureVar
+ getGlobalVar(
+ macro (value)
+ bqeq value, ValueEmpty, .gDynamic
+ end)
+ dispatch(8)
+
+.gClosureVar:
+ bineq t0, ClosureVar, .gGlobalPropertyWithVarInjectionChecks
+ loadVariable(2, t0)
+ getClosureVar()
+ dispatch(8)
+
+.gGlobalPropertyWithVarInjectionChecks:
+ bineq t0, GlobalPropertyWithVarInjectionChecks, .gGlobalVarWithVarInjectionChecks
+ loadWithStructureCheck(2, .gDynamic)
+ getProperty(.gDynamic)
+ dispatch(8)
+
+.gGlobalVarWithVarInjectionChecks:
+ bineq t0, GlobalVarWithVarInjectionChecks, .gGlobalLexicalVarWithVarInjectionChecks
+ varInjectionCheck(.gDynamic)
+ getGlobalVar(macro(v) end)
+ dispatch(8)
+
+.gGlobalLexicalVarWithVarInjectionChecks:
+ bineq t0, GlobalLexicalVarWithVarInjectionChecks, .gClosureVarWithVarInjectionChecks
+ varInjectionCheck(.gDynamic)
+ getGlobalVar(
+ macro (value)
+ bqeq value, ValueEmpty, .gDynamic
+ end)
+ dispatch(8)
+
+.gClosureVarWithVarInjectionChecks:
+ bineq t0, ClosureVarWithVarInjectionChecks, .gDynamic
+ varInjectionCheck(.gDynamic)
+ loadVariable(2, t0)
+ getClosureVar()
+ dispatch(8)
+
+.gDynamic:
+ callSlowPath(_llint_slow_path_get_from_scope)
+ dispatch(8)
+
+
+macro putProperty(slow)
+ loadisFromInstruction(3, t1)
+ loadConstantOrVariable(t1, t2)
+ loadisFromInstruction(6, t1)
+ storePropertyAtVariableOffset(t1, t0, t2, slow)
+end
+
+macro putGlobalVariable()
+ loadisFromInstruction(3, t0)
+ loadConstantOrVariable(t0, t1)
+ loadpFromInstruction(5, t2)
+ loadpFromInstruction(6, t0)
+ notifyWrite(t2, .pDynamic)
+ storeq t1, [t0]
+end
+
+macro putClosureVar()
+ loadisFromInstruction(3, t1)
+ loadConstantOrVariable(t1, t2)
+ loadisFromInstruction(6, t1)
+ storeq t2, JSEnvironmentRecord_variables[t0, t1, 8]
+end
+
+macro putLocalClosureVar()
+ loadisFromInstruction(3, t1)
+ loadConstantOrVariable(t1, t2)
+ loadpFromInstruction(5, t3)
+ btpz t3, .noVariableWatchpointSet
+ notifyWrite(t3, .pDynamic)
+.noVariableWatchpointSet:
+ loadisFromInstruction(6, t1)
+ storeq t2, JSEnvironmentRecord_variables[t0, t1, 8]
+end
+
+macro checkTDZInGlobalPutToScopeIfNecessary()
+ loadisFromInstruction(4, t0)
+ andi InitializationModeMask, t0
+ rshifti InitializationModeShift, t0
+ bieq t0, Initialization, .noNeedForTDZCheck
+ loadpFromInstruction(6, t0)
+ loadq [t0], t0
+ bqeq t0, ValueEmpty, .pDynamic
+.noNeedForTDZCheck:
+end
+
+
+_llint_op_put_to_scope:
+ traceExecution()
+ loadisFromInstruction(4, t0)
+ andi ResolveTypeMask, t0
+
+#pLocalClosureVar:
+ bineq t0, LocalClosureVar, .pGlobalProperty
+ writeBarrierOnOperands(1, 3)
+ loadVariable(1, t0)
+ putLocalClosureVar()
+ dispatch(7)
- loadp JSVariableObject::m_registers[t0], t0
- loadp [t0, t2, 8], t3
- storep t3, [cfr, t1, 8]
- loadp 32[PB, PC, 8], t1
- valueProfile(t3, t1)
+.pGlobalProperty:
+ bineq t0, GlobalProperty, .pGlobalVar
+ writeBarrierOnOperands(1, 3)
+ loadWithStructureCheck(1, .pDynamic)
+ putProperty(.pDynamic)
+ dispatch(7)
+
+.pGlobalVar:
+ bineq t0, GlobalVar, .pGlobalLexicalVar
+ writeBarrierOnGlobalObject(3)
+ putGlobalVariable()
+ dispatch(7)
+
+.pGlobalLexicalVar:
+ bineq t0, GlobalLexicalVar, .pClosureVar
+ writeBarrierOnGlobalLexicalEnvironment(3)
+ checkTDZInGlobalPutToScopeIfNecessary()
+ putGlobalVariable()
+ dispatch(7)
+
+.pClosureVar:
+ bineq t0, ClosureVar, .pGlobalPropertyWithVarInjectionChecks
+ writeBarrierOnOperands(1, 3)
+ loadVariable(1, t0)
+ putClosureVar()
+ dispatch(7)
+
+.pGlobalPropertyWithVarInjectionChecks:
+ bineq t0, GlobalPropertyWithVarInjectionChecks, .pGlobalVarWithVarInjectionChecks
+ writeBarrierOnOperands(1, 3)
+ loadWithStructureCheck(1, .pDynamic)
+ putProperty(.pDynamic)
+ dispatch(7)
+
+.pGlobalVarWithVarInjectionChecks:
+ bineq t0, GlobalVarWithVarInjectionChecks, .pGlobalLexicalVarWithVarInjectionChecks
+ writeBarrierOnGlobalObject(3)
+ varInjectionCheck(.pDynamic)
+ putGlobalVariable()
+ dispatch(7)
+
+.pGlobalLexicalVarWithVarInjectionChecks:
+ bineq t0, GlobalLexicalVarWithVarInjectionChecks, .pClosureVarWithVarInjectionChecks
+ writeBarrierOnGlobalLexicalEnvironment(3)
+ varInjectionCheck(.pDynamic)
+ checkTDZInGlobalPutToScopeIfNecessary()
+ putGlobalVariable()
+ dispatch(7)
+
+.pClosureVarWithVarInjectionChecks:
+ bineq t0, ClosureVarWithVarInjectionChecks, .pModuleVar
+ writeBarrierOnOperands(1, 3)
+ varInjectionCheck(.pDynamic)
+ loadVariable(1, t0)
+ putClosureVar()
+ dispatch(7)
+
+.pModuleVar:
+ bineq t0, ModuleVar, .pDynamic
+ callSlowPath(_slow_path_throw_strict_mode_readonly_property_write_error)
+ dispatch(7)
+
+.pDynamic:
+ callSlowPath(_llint_slow_path_put_to_scope)
+ dispatch(7)
+
+
+_llint_op_get_from_arguments:
+ traceExecution()
+ loadVariable(2, t0)
+ loadi 24[PB, PC, 8], t1
+ loadq DirectArguments_storage[t0, t1, 8], t0
+ valueProfile(t0, 4, t1)
+ loadisFromInstruction(1, t1)
+ storeq t0, [cfr, t1, 8]
dispatch(5)
-_llint_op_put_scoped_var:
+_llint_op_put_to_arguments:
traceExecution()
- getDeBruijnScope(16[PB, PC, 8], macro (scope, scratch) end)
- loadis 24[PB, PC, 8], t1
- loadConstantOrVariable(t1, t3)
- loadis 8[PB, PC, 8], t1
- writeBarrier(t3)
- loadp JSVariableObject::m_registers[t0], t0
- storep t3, [t0, t1, 8]
+ writeBarrierOnOperands(1, 3)
+ loadVariable(1, t0)
+ loadi 16[PB, PC, 8], t1
+ loadisFromInstruction(3, t3)
+ loadConstantOrVariable(t3, t2)
+ storeq t2, DirectArguments_storage[t0, t1, 8]
dispatch(4)
-macro nativeCallTrampoline(executableOffsetToFunction)
- storep 0, CodeBlock[cfr]
- if X86_64
- loadp JITStackFrame::vm + 8[sp], t0
- storep cfr, VM::topCallFrame[t0]
- loadp CallerFrame[cfr], t0
- loadq ScopeChain[t0], t1
- storeq t1, ScopeChain[cfr]
- peek 0, t1
- storep t1, ReturnPC[cfr]
- move cfr, t5 # t5 = rdi
- subp 16 - 8, sp
- loadp Callee[cfr], t4 # t4 = rsi
- loadp JSFunction::m_executable[t4], t1
- move t0, cfr # Restore cfr to avoid loading from stack
- call executableOffsetToFunction[t1]
- addp 16 - 8, sp
- loadp JITStackFrame::vm + 8[sp], t3
- elsif C_LOOP
- loadp CallerFrame[cfr], t0
- loadp ScopeChain[t0], t1
- storep t1, ScopeChain[cfr]
-
- loadp JITStackFrame::vm[sp], t3
- storep cfr, VM::topCallFrame[t3]
-
- move t0, t2
- preserveReturnAddressAfterCall(t3)
- storep t3, ReturnPC[cfr]
- move cfr, t0
- loadp Callee[cfr], t1
- loadp JSFunction::m_executable[t1], t1
- move t2, cfr
- cloopCallNative executableOffsetToFunction[t1]
+_llint_op_get_parent_scope:
+ traceExecution()
+ loadVariable(2, t0)
+ loadp JSScope::m_next[t0], t0
+ loadisFromInstruction(1, t1)
+ storeq t0, [cfr, t1, 8]
+ dispatch(3)
- restoreReturnAddressBeforeReturn(t3)
- loadp JITStackFrame::vm[sp], t3
- else
- error
- end
- btqnz VM::exception[t3], .exception
- ret
-.exception:
- preserveReturnAddressAfterCall(t1)
- loadi ArgumentCount + TagOffset[cfr], PC
- loadp CodeBlock[cfr], PB
- loadp CodeBlock::m_instructions[PB], PB
- loadp JITStackFrame::vm[sp], t0
- storep cfr, VM::topCallFrame[t0]
- callSlowPath(_llint_throw_from_native_call)
- jmp _llint_throw_from_slow_path_trampoline
-end
+_llint_op_profile_type:
+ traceExecution()
+ loadp CodeBlock[cfr], t1
+ loadp CodeBlock::m_vm[t1], t1
+ # t1 is holding the pointer to the typeProfilerLog.
+ loadp VM::m_typeProfilerLog[t1], t1
+ # t2 is holding the pointer to the current log entry.
+ loadp TypeProfilerLog::m_currentLogEntryPtr[t1], t2
+
+ # t0 is holding the JSValue argument.
+ loadisFromInstruction(1, t3)
+ loadConstantOrVariable(t3, t0)
+
+ bqeq t0, ValueEmpty, .opProfileTypeDone
+ # Store the JSValue onto the log entry.
+ storeq t0, TypeProfilerLog::LogEntry::value[t2]
+
+ # Store the TypeLocation onto the log entry.
+ loadpFromInstruction(2, t3)
+ storep t3, TypeProfilerLog::LogEntry::location[t2]
+
+ btqz t0, tagMask, .opProfileTypeIsCell
+ storei 0, TypeProfilerLog::LogEntry::structureID[t2]
+ jmp .opProfileTypeSkipIsCell
+.opProfileTypeIsCell:
+ loadi JSCell::m_structureID[t0], t3
+ storei t3, TypeProfilerLog::LogEntry::structureID[t2]
+.opProfileTypeSkipIsCell:
+
+ # Increment the current log entry.
+ addp sizeof TypeProfilerLog::LogEntry, t2
+ storep t2, TypeProfilerLog::m_currentLogEntryPtr[t1]
+
+ loadp TypeProfilerLog::m_logEndPtr[t1], t1
+ bpneq t2, t1, .opProfileTypeDone
+ callSlowPath(_slow_path_profile_type_clear_log)
+
+.opProfileTypeDone:
+ dispatch(6)
+
+_llint_op_profile_control_flow:
+ traceExecution()
+ loadpFromInstruction(1, t0)
+ addq 1, BasicBlockLocation::m_executionCount[t0]
+ dispatch(2)
+
+_llint_op_get_rest_length:
+ traceExecution()
+ loadi PayloadOffset + ArgumentCount[cfr], t0
+ subi 1, t0
+ loadisFromInstruction(2, t1)
+ bilteq t0, t1, .storeZero
+ subi t1, t0
+ jmp .boxUp
+.storeZero:
+ move 0, t0
+.boxUp:
+ orq tagTypeNumber, t0
+ loadisFromInstruction(1, t1)
+ storeq t0, [cfr, t1, 8]
+ dispatch(3)