summaryrefslogtreecommitdiff
path: root/deps/v8/src/x64/macro-assembler-x64.h
diff options
context:
space:
mode:
authorRyan Dahl <ry@tinyclouds.org>2011-07-05 14:40:13 -0700
committerRyan Dahl <ry@tinyclouds.org>2011-07-05 14:51:29 -0700
commit149562555c9bf56457dee9a1ad70c53ed670a776 (patch)
treef6217cf3c54ddbee03f37247a3c7c75203f868fd /deps/v8/src/x64/macro-assembler-x64.h
parentf08720606757577d95bd09b48697c7decbf17f00 (diff)
downloadnode-new-149562555c9bf56457dee9a1ad70c53ed670a776.tar.gz
Downgrade V8 to 3.1.8.25
There are serious performance regressions both in V8 and our own legacy networking stack. Until we correct our own problems we are going back to the old V8.
Diffstat (limited to 'deps/v8/src/x64/macro-assembler-x64.h')
-rw-r--r--deps/v8/src/x64/macro-assembler-x64.h1035
1 files changed, 797 insertions, 238 deletions
diff --git a/deps/v8/src/x64/macro-assembler-x64.h b/deps/v8/src/x64/macro-assembler-x64.h
index f09fafc202..9557940a9a 100644
--- a/deps/v8/src/x64/macro-assembler-x64.h
+++ b/deps/v8/src/x64/macro-assembler-x64.h
@@ -29,7 +29,6 @@
#define V8_X64_MACRO_ASSEMBLER_X64_H_
#include "assembler.h"
-#include "v8globals.h"
namespace v8 {
namespace internal {
@@ -45,24 +44,21 @@ enum AllocationFlags {
RESULT_CONTAINS_TOP = 1 << 1
};
-
// Default scratch register used by MacroAssembler (and other code that needs
// a spare register). The register isn't callee save, and not used by the
// function calling convention.
static const Register kScratchRegister = { 10 }; // r10.
-static const Register kSmiConstantRegister = { 12 }; // r12 (callee save).
+static const Register kSmiConstantRegister = { 15 }; // r15 (callee save).
static const Register kRootRegister = { 13 }; // r13 (callee save).
// Value of smi in kSmiConstantRegister.
static const int kSmiConstantRegisterValue = 1;
-// Actual value of root register is offset from the root array's start
-// to take advantage of negitive 8-bit displacement values.
-static const int kRootRegisterBias = 128;
// Convenience for platform-independent signatures.
typedef Operand MemOperand;
// Forward declaration.
class JumpTarget;
+class PostCallGenerator;
struct SmiIndex {
SmiIndex(Register index_register, ScaleFactor scale)
@@ -75,64 +71,13 @@ struct SmiIndex {
// MacroAssembler implements a collection of frequently used macros.
class MacroAssembler: public Assembler {
public:
- // The isolate parameter can be NULL if the macro assembler should
- // not use isolate-dependent functionality. In this case, it's the
- // responsibility of the caller to never invoke such function on the
- // macro assembler.
- MacroAssembler(Isolate* isolate, void* buffer, int size);
-
- // Prevent the use of the RootArray during the lifetime of this
- // scope object.
- class NoRootArrayScope BASE_EMBEDDED {
- public:
- explicit NoRootArrayScope(MacroAssembler* assembler)
- : variable_(&assembler->root_array_available_),
- old_value_(assembler->root_array_available_) {
- assembler->root_array_available_ = false;
- }
- ~NoRootArrayScope() {
- *variable_ = old_value_;
- }
- private:
- bool* variable_;
- bool old_value_;
- };
-
- // Operand pointing to an external reference.
- // May emit code to set up the scratch register. The operand is
- // only guaranteed to be correct as long as the scratch register
- // isn't changed.
- // If the operand is used more than once, use a scratch register
- // that is guaranteed not to be clobbered.
- Operand ExternalOperand(ExternalReference reference,
- Register scratch = kScratchRegister);
- // Loads and stores the value of an external reference.
- // Special case code for load and store to take advantage of
- // load_rax/store_rax if possible/necessary.
- // For other operations, just use:
- // Operand operand = ExternalOperand(extref);
- // operation(operand, ..);
- void Load(Register destination, ExternalReference source);
- void Store(ExternalReference destination, Register source);
- // Loads the address of the external reference into the destination
- // register.
- void LoadAddress(Register destination, ExternalReference source);
- // Returns the size of the code generated by LoadAddress.
- // Used by CallSize(ExternalReference) to find the size of a call.
- int LoadAddressSize(ExternalReference source);
-
- // Operations on roots in the root-array.
+ MacroAssembler(void* buffer, int size);
+
void LoadRoot(Register destination, Heap::RootListIndex index);
- void StoreRoot(Register source, Heap::RootListIndex index);
- // Load a root value where the index (or part of it) is variable.
- // The variable_offset register is added to the fixed_offset value
- // to get the index into the root-array.
- void LoadRootIndexed(Register destination,
- Register variable_offset,
- int fixed_offset);
void CompareRoot(Register with, Heap::RootListIndex index);
void CompareRoot(const Operand& with, Heap::RootListIndex index);
void PushRoot(Heap::RootListIndex index);
+ void StoreRoot(Register source, Heap::RootListIndex index);
// ---------------------------------------------------------------------------
// GC Support
@@ -147,11 +92,11 @@ class MacroAssembler: public Assembler {
// Check if object is in new space. The condition cc can be equal or
// not_equal. If it is equal a jump will be done if the object is on new
// space. The register scratch can be object itself, but it will be clobbered.
+ template <typename LabelType>
void InNewSpace(Register object,
Register scratch,
Condition cc,
- Label* branch,
- Label::Distance near_jump = Label::kFar);
+ LabelType* branch);
// For page containing |object| mark region covering [object+offset]
// dirty. |object| is the object being stored into, |value| is the
@@ -231,56 +176,40 @@ class MacroAssembler: public Assembler {
void StoreToSafepointRegisterSlot(Register dst, Register src);
void LoadFromSafepointRegisterSlot(Register dst, Register src);
- void InitializeRootRegister() {
- ExternalReference roots_address =
- ExternalReference::roots_address(isolate());
- movq(kRootRegister, roots_address);
- addq(kRootRegister, Immediate(kRootRegisterBias));
- }
-
// ---------------------------------------------------------------------------
// JavaScript invokes
- // Setup call kind marking in rcx. The method takes rcx as an
- // explicit first parameter to make the code more readable at the
- // call sites.
- void SetCallKind(Register dst, CallKind kind);
-
// Invoke the JavaScript function code by either calling or jumping.
void InvokeCode(Register code,
const ParameterCount& expected,
const ParameterCount& actual,
InvokeFlag flag,
- const CallWrapper& call_wrapper,
- CallKind call_kind);
+ PostCallGenerator* post_call_generator = NULL);
void InvokeCode(Handle<Code> code,
const ParameterCount& expected,
const ParameterCount& actual,
RelocInfo::Mode rmode,
InvokeFlag flag,
- const CallWrapper& call_wrapper,
- CallKind call_kind);
+ PostCallGenerator* post_call_generator = NULL);
// Invoke the JavaScript function in the given register. Changes the
// current context to the context in the function before invoking.
void InvokeFunction(Register function,
const ParameterCount& actual,
InvokeFlag flag,
- const CallWrapper& call_wrapper,
- CallKind call_kind);
+ PostCallGenerator* post_call_generator = NULL);
void InvokeFunction(JSFunction* function,
const ParameterCount& actual,
InvokeFlag flag,
- const CallWrapper& call_wrapper,
- CallKind call_kind);
+ PostCallGenerator* post_call_generator = NULL);
// Invoke specified builtin JavaScript function. Adds an entry to
// the unresolved list if the name does not resolve.
void InvokeBuiltin(Builtins::JavaScript id,
InvokeFlag flag,
- const CallWrapper& call_wrapper = NullCallWrapper());
+ PostCallGenerator* post_call_generator = NULL);
// Store the function for the given builtin in the target register.
void GetBuiltinFunction(Register target, Builtins::JavaScript id);
@@ -333,20 +262,9 @@ class MacroAssembler: public Assembler {
Register src,
int power);
- // Perform the logical or of two smi values and return a smi value.
- // If either argument is not a smi, jump to on_not_smis and retain
- // the original values of source registers. The destination register
- // may be changed if it's not one of the source registers.
- void SmiOrIfSmis(Register dst,
- Register src1,
- Register src2,
- Label* on_not_smis,
- Label::Distance near_jump = Label::kFar);
-
- // Simple comparison of smis. Both sides must be known smis to use these,
- // otherwise use Cmp.
- void SmiCompare(Register smi1, Register smi2);
+ // Simple comparison of smis.
+ void SmiCompare(Register dst, Register src);
void SmiCompare(Register dst, Smi* src);
void SmiCompare(Register dst, const Operand& src);
void SmiCompare(const Operand& dst, Register src);
@@ -399,45 +317,42 @@ class MacroAssembler: public Assembler {
// above with a conditional jump.
// Jump if the value cannot be represented by a smi.
- void JumpIfNotValidSmiValue(Register src, Label* on_invalid,
- Label::Distance near_jump = Label::kFar);
+ template <typename LabelType>
+ void JumpIfNotValidSmiValue(Register src, LabelType* on_invalid);
// Jump if the unsigned integer value cannot be represented by a smi.
- void JumpIfUIntNotValidSmiValue(Register src, Label* on_invalid,
- Label::Distance near_jump = Label::kFar);
+ template <typename LabelType>
+ void JumpIfUIntNotValidSmiValue(Register src, LabelType* on_invalid);
// Jump to label if the value is a tagged smi.
- void JumpIfSmi(Register src,
- Label* on_smi,
- Label::Distance near_jump = Label::kFar);
+ template <typename LabelType>
+ void JumpIfSmi(Register src, LabelType* on_smi);
// Jump to label if the value is not a tagged smi.
- void JumpIfNotSmi(Register src,
- Label* on_not_smi,
- Label::Distance near_jump = Label::kFar);
+ template <typename LabelType>
+ void JumpIfNotSmi(Register src, LabelType* on_not_smi);
// Jump to label if the value is not a non-negative tagged smi.
- void JumpUnlessNonNegativeSmi(Register src,
- Label* on_not_smi,
- Label::Distance near_jump = Label::kFar);
+ template <typename LabelType>
+ void JumpUnlessNonNegativeSmi(Register src, LabelType* on_not_smi);
// Jump to label if the value, which must be a tagged smi, has value equal
// to the constant.
+ template <typename LabelType>
void JumpIfSmiEqualsConstant(Register src,
Smi* constant,
- Label* on_equals,
- Label::Distance near_jump = Label::kFar);
+ LabelType* on_equals);
// Jump if either or both register are not smi values.
+ template <typename LabelType>
void JumpIfNotBothSmi(Register src1,
Register src2,
- Label* on_not_both_smi,
- Label::Distance near_jump = Label::kFar);
+ LabelType* on_not_both_smi);
// Jump if either or both register are not non-negative smi values.
+ template <typename LabelType>
void JumpUnlessBothNonNegativeSmi(Register src1, Register src2,
- Label* on_not_both_smi,
- Label::Distance near_jump = Label::kFar);
+ LabelType* on_not_both_smi);
// Operations on tagged smi values.
@@ -447,11 +362,11 @@ class MacroAssembler: public Assembler {
// Optimistically adds an integer constant to a supposed smi.
// If the src is not a smi, or the result is not a smi, jump to
// the label.
+ template <typename LabelType>
void SmiTryAddConstant(Register dst,
Register src,
Smi* constant,
- Label* on_not_smi_result,
- Label::Distance near_jump = Label::kFar);
+ LabelType* on_not_smi_result);
// Add an integer constant to a tagged smi, giving a tagged smi as result.
// No overflow testing on the result is done.
@@ -463,11 +378,11 @@ class MacroAssembler: public Assembler {
// Add an integer constant to a tagged smi, giving a tagged smi as result,
// or jumping to a label if the result cannot be represented by a smi.
+ template <typename LabelType>
void SmiAddConstant(Register dst,
Register src,
Smi* constant,
- Label* on_not_smi_result,
- Label::Distance near_jump = Label::kFar);
+ LabelType* on_not_smi_result);
// Subtract an integer constant from a tagged smi, giving a tagged smi as
// result. No testing on the result is done. Sets the N and Z flags
@@ -476,32 +391,27 @@ class MacroAssembler: public Assembler {
// Subtract an integer constant from a tagged smi, giving a tagged smi as
// result, or jumping to a label if the result cannot be represented by a smi.
+ template <typename LabelType>
void SmiSubConstant(Register dst,
Register src,
Smi* constant,
- Label* on_not_smi_result,
- Label::Distance near_jump = Label::kFar);
+ LabelType* on_not_smi_result);
// Negating a smi can give a negative zero or too large positive value.
// NOTICE: This operation jumps on success, not failure!
+ template <typename LabelType>
void SmiNeg(Register dst,
Register src,
- Label* on_smi_result,
- Label::Distance near_jump = Label::kFar);
+ LabelType* on_smi_result);
// Adds smi values and return the result as a smi.
// If dst is src1, then src1 will be destroyed, even if
// the operation is unsuccessful.
+ template <typename LabelType>
void SmiAdd(Register dst,
Register src1,
Register src2,
- Label* on_not_smi_result,
- Label::Distance near_jump = Label::kFar);
- void SmiAdd(Register dst,
- Register src1,
- const Operand& src2,
- Label* on_not_smi_result,
- Label::Distance near_jump = Label::kFar);
+ LabelType* on_not_smi_result);
void SmiAdd(Register dst,
Register src1,
@@ -510,21 +420,21 @@ class MacroAssembler: public Assembler {
// Subtracts smi values and return the result as a smi.
// If dst is src1, then src1 will be destroyed, even if
// the operation is unsuccessful.
+ template <typename LabelType>
void SmiSub(Register dst,
Register src1,
Register src2,
- Label* on_not_smi_result,
- Label::Distance near_jump = Label::kFar);
+ LabelType* on_not_smi_result);
void SmiSub(Register dst,
Register src1,
Register src2);
+ template <typename LabelType>
void SmiSub(Register dst,
Register src1,
const Operand& src2,
- Label* on_not_smi_result,
- Label::Distance near_jump = Label::kFar);
+ LabelType* on_not_smi_result);
void SmiSub(Register dst,
Register src1,
@@ -534,27 +444,27 @@ class MacroAssembler: public Assembler {
// if possible.
// If dst is src1, then src1 will be destroyed, even if
// the operation is unsuccessful.
+ template <typename LabelType>
void SmiMul(Register dst,
Register src1,
Register src2,
- Label* on_not_smi_result,
- Label::Distance near_jump = Label::kFar);
+ LabelType* on_not_smi_result);
// Divides one smi by another and returns the quotient.
// Clobbers rax and rdx registers.
+ template <typename LabelType>
void SmiDiv(Register dst,
Register src1,
Register src2,
- Label* on_not_smi_result,
- Label::Distance near_jump = Label::kFar);
+ LabelType* on_not_smi_result);
// Divides one smi by another and returns the remainder.
// Clobbers rax and rdx registers.
+ template <typename LabelType>
void SmiMod(Register dst,
Register src1,
Register src2,
- Label* on_not_smi_result,
- Label::Distance near_jump = Label::kFar);
+ LabelType* on_not_smi_result);
// Bitwise operations.
void SmiNot(Register dst, Register src);
@@ -568,11 +478,11 @@ class MacroAssembler: public Assembler {
void SmiShiftLeftConstant(Register dst,
Register src,
int shift_value);
+ template <typename LabelType>
void SmiShiftLogicalRightConstant(Register dst,
Register src,
int shift_value,
- Label* on_not_smi_result,
- Label::Distance near_jump = Label::kFar);
+ LabelType* on_not_smi_result);
void SmiShiftArithmeticRightConstant(Register dst,
Register src,
int shift_value);
@@ -585,11 +495,11 @@ class MacroAssembler: public Assembler {
// Shifts a smi value to the right, shifting in zero bits at the top, and
// returns the unsigned intepretation of the result if that is a smi.
// Uses and clobbers rcx, so dst may not be rcx.
+ template <typename LabelType>
void SmiShiftLogicalRight(Register dst,
Register src1,
Register src2,
- Label* on_not_smi_result,
- Label::Distance near_jump = Label::kFar);
+ LabelType* on_not_smi_result);
// Shifts a smi value to the right, sign extending the top, and
// returns the signed intepretation of the result. That will always
// be a valid smi value, since it's numerically smaller than the
@@ -603,11 +513,11 @@ class MacroAssembler: public Assembler {
// Select the non-smi register of two registers where exactly one is a
// smi. If neither are smis, jump to the failure label.
+ template <typename LabelType>
void SelectNonSmi(Register dst,
Register src1,
Register src2,
- Label* on_not_smis,
- Label::Distance near_jump = Label::kFar);
+ LabelType* on_not_smis);
// Converts, if necessary, a smi to a combination of number and
// multiplier to be used as a scaled index.
@@ -622,10 +532,6 @@ class MacroAssembler: public Assembler {
// Converts a positive smi to a negative index.
SmiIndex SmiToNegativeIndex(Register dst, Register src, int shift);
- // Add the value of a smi in memory to an int32 register.
- // Sets flags as a normal add.
- void AddSmiField(Register dst, const Operand& src);
-
// Basic Smi operations.
void Move(Register dst, Smi* source) {
LoadSmiConstant(dst, source);
@@ -643,36 +549,35 @@ class MacroAssembler: public Assembler {
// String macros.
// If object is a string, its map is loaded into object_map.
+ template <typename LabelType>
void JumpIfNotString(Register object,
Register object_map,
- Label* not_string,
- Label::Distance near_jump = Label::kFar);
+ LabelType* not_string);
- void JumpIfNotBothSequentialAsciiStrings(
- Register first_object,
- Register second_object,
- Register scratch1,
- Register scratch2,
- Label* on_not_both_flat_ascii,
- Label::Distance near_jump = Label::kFar);
+ template <typename LabelType>
+ void JumpIfNotBothSequentialAsciiStrings(Register first_object,
+ Register second_object,
+ Register scratch1,
+ Register scratch2,
+ LabelType* on_not_both_flat_ascii);
// Check whether the instance type represents a flat ascii string. Jump to the
// label if not. If the instance type can be scratched specify same register
// for both instance type and scratch.
+ template <typename LabelType>
void JumpIfInstanceTypeIsNotSequentialAscii(
Register instance_type,
Register scratch,
- Label*on_not_flat_ascii_string,
- Label::Distance near_jump = Label::kFar);
+ LabelType *on_not_flat_ascii_string);
+ template <typename LabelType>
void JumpIfBothInstanceTypesAreNotSequentialAscii(
Register first_object_instance_type,
Register second_object_instance_type,
Register scratch1,
Register scratch2,
- Label* on_fail,
- Label::Distance near_jump = Label::kFar);
+ LabelType* on_fail);
// ---------------------------------------------------------------------------
// Macro instructions.
@@ -689,8 +594,6 @@ class MacroAssembler: public Assembler {
void Move(const Operand& dst, Handle<Object> source);
void Cmp(Register dst, Handle<Object> source);
void Cmp(const Operand& dst, Handle<Object> source);
- void Cmp(Register dst, Smi* src);
- void Cmp(const Operand& dst, Smi* src);
void Push(Handle<Object> source);
// Emit code to discard a non-negative number of pointer-sized elements
@@ -706,27 +609,7 @@ class MacroAssembler: public Assembler {
void Call(Address destination, RelocInfo::Mode rmode);
void Call(ExternalReference ext);
- void Call(Handle<Code> code_object,
- RelocInfo::Mode rmode,
- unsigned ast_id = kNoASTId);
-
- // The size of the code generated for different call instructions.
- int CallSize(Address destination, RelocInfo::Mode rmode) {
- return kCallInstructionLength;
- }
- int CallSize(ExternalReference ext);
- int CallSize(Handle<Code> code_object) {
- // Code calls use 32-bit relative addressing.
- return kShortCallInstructionLength;
- }
- int CallSize(Register target) {
- // Opcode: REX_opt FF /2 m64
- return (target.high_bit() != 0) ? 3 : 2;
- }
- int CallSize(const Operand& target) {
- // Opcode: REX_opt FF /2 m64
- return (target.requires_rex() ? 2 : 1) + target.operand_size();
- }
+ void Call(Handle<Code> code_object, RelocInfo::Mode rmode);
// Emit call to the code we are currently generating.
void CallSelf() {
@@ -754,27 +637,13 @@ class MacroAssembler: public Assembler {
// Always use unsigned comparisons: above and below, not less and greater.
void CmpInstanceType(Register map, InstanceType type);
- // Check if a map for a JSObject indicates that the object has fast elements.
- // Jump to the specified label if it does not.
- void CheckFastElements(Register map,
- Label* fail,
- Label::Distance distance = Label::kFar);
-
// Check if the map of an object is equal to a specified map and
// branch to label if not. Skip the smi check if not required
// (object is known to be a heap object)
void CheckMap(Register obj,
Handle<Map> map,
Label* fail,
- SmiCheckType smi_check_type);
-
- // Check if the map of an object is equal to a specified map and branch to a
- // specified target if equal. Skip the smi check if not required (object is
- // known to be a heap object)
- void DispatchMap(Register obj,
- Handle<Map> map,
- Handle<Code> success,
- SmiCheckType smi_check_type);
+ bool is_heap_object);
// Check if the object in register heap_object is a string. Afterwards the
// register map contains the object map and the register instance_type
@@ -790,15 +659,6 @@ class MacroAssembler: public Assembler {
// jcc instructions (je, ja, jae, jb, jbe, je, and jz).
void FCmp();
- void ClampUint8(Register reg);
-
- void ClampDoubleToUint8(XMMRegister input_reg,
- XMMRegister temp_xmm_reg,
- Register result_reg,
- Register temp_reg);
-
- void LoadInstanceDescriptors(Register map, Register descriptors);
-
// Abort execution if argument is not a number. Used in debug code.
void AbortIfNotNumber(Register object);
@@ -807,7 +667,6 @@ class MacroAssembler: public Assembler {
// Abort execution if argument is not a smi. Used in debug code.
void AbortIfNotSmi(Register object);
- void AbortIfNotSmi(const Operand& object);
// Abort execution if argument is a string. Used in debug code.
void AbortIfNotString(Register object);
@@ -971,7 +830,7 @@ class MacroAssembler: public Assembler {
// Runtime calls
// Call a code stub.
- void CallStub(CodeStub* stub, unsigned ast_id = kNoASTId);
+ void CallStub(CodeStub* stub);
// Call a code stub and return the code object called. Try to generate
// the code if necessary. Do not perform a GC but instead return a retry
@@ -990,7 +849,7 @@ class MacroAssembler: public Assembler {
void StubReturn(int argc);
// Call a runtime routine.
- void CallRuntime(const Runtime::Function* f, int num_arguments);
+ void CallRuntime(Runtime::Function* f, int num_arguments);
// Call a runtime function and save the value of XMM registers.
void CallRuntimeSaveDoubles(Runtime::FunctionId id);
@@ -998,7 +857,7 @@ class MacroAssembler: public Assembler {
// Call a runtime function, returning the CodeStub object called.
// Try to generate the stub code if necessary. Do not perform a GC
// but instead return a retry after GC failure.
- MUST_USE_RESULT MaybeObject* TryCallRuntime(const Runtime::Function* f,
+ MUST_USE_RESULT MaybeObject* TryCallRuntime(Runtime::Function* f,
int num_arguments);
// Convenience function: Same as above, but takes the fid instead.
@@ -1047,7 +906,7 @@ class MacroAssembler: public Assembler {
// Calls an API function. Allocates HandleScope, extracts
// returned value from handle and propagates exceptions.
- // Clobbers r14, r15, rbx and caller-save registers. Restores context.
+ // Clobbers r12, r14, rbx and caller-save registers. Restores context.
// On return removes stack_space * kPointerSize (GCed).
MUST_USE_RESULT MaybeObject* TryCallApiFunctionAndReturn(
ApiFunction* function, int stack_space);
@@ -1082,22 +941,7 @@ class MacroAssembler: public Assembler {
// may be bigger than 2^16 - 1. Requires a scratch register.
void Ret(int bytes_dropped, Register scratch);
- Handle<Object> CodeObject() {
- ASSERT(!code_object_.is_null());
- return code_object_;
- }
-
- // Copy length bytes from source to destination.
- // Uses scratch register internally (if you have a low-eight register
- // free, do use it, otherwise kScratchRegister will be used).
- // The min_length is a minimum limit on the value that length will have.
- // The algorithm has some special cases that might be omitted if the string
- // is known to always be long.
- void CopyBytes(Register destination,
- Register source,
- Register length,
- int min_length = 0,
- Register scratch = kScratchRegister);
+ Handle<Object> CodeObject() { return code_object_; }
// ---------------------------------------------------------------------------
@@ -1138,14 +982,12 @@ class MacroAssembler: public Assembler {
private:
// Order general registers are pushed by Pushad.
- // rax, rcx, rdx, rbx, rsi, rdi, r8, r9, r11, r14, r15.
+ // rax, rcx, rdx, rbx, rsi, rdi, r8, r9, r11, r12, r14.
static int kSafepointPushRegisterIndices[Register::kNumRegisters];
static const int kNumSafepointSavedRegisters = 11;
- static const int kSmiShift = kSmiTagSize + kSmiShiftSize;
bool generating_stub_;
bool allow_stub_calls_;
- bool root_array_available_;
// Returns a register holding the smi value. The register MUST NOT be
// modified. It may be the "smi 1 constant" register.
@@ -1158,15 +1000,14 @@ class MacroAssembler: public Assembler {
Handle<Object> code_object_;
// Helper functions for generating invokes.
+ template <typename LabelType>
void InvokePrologue(const ParameterCount& expected,
const ParameterCount& actual,
Handle<Code> code_constant,
Register code_register,
- Label* done,
+ LabelType* done,
InvokeFlag flag,
- Label::Distance near_jump = Label::kFar,
- const CallWrapper& call_wrapper = NullCallWrapper(),
- CallKind call_kind = CALL_AS_METHOD);
+ PostCallGenerator* post_call_generator);
// Activation support.
void EnterFrame(StackFrame::Type type);
@@ -1231,6 +1072,17 @@ class CodePatcher {
};
+// Helper class for generating code or data associated with the code
+// right after a call instruction. As an example this can be used to
+// generate safepoint data after calls for crankshaft.
+class PostCallGenerator {
+ public:
+ PostCallGenerator() { }
+ virtual ~PostCallGenerator() { }
+ virtual void Generate() = 0;
+};
+
+
// -----------------------------------------------------------------------------
// Static helper functions.
@@ -1292,6 +1144,713 @@ extern void LogGeneratedCodeCoverage(const char* file_line);
#define ACCESS_MASM(masm) masm->
#endif
+// -----------------------------------------------------------------------------
+// Template implementations.
+
+static int kSmiShift = kSmiTagSize + kSmiShiftSize;
+
+
+template <typename LabelType>
+void MacroAssembler::SmiNeg(Register dst,
+ Register src,
+ LabelType* on_smi_result) {
+ if (dst.is(src)) {
+ ASSERT(!dst.is(kScratchRegister));
+ movq(kScratchRegister, src);
+ neg(dst); // Low 32 bits are retained as zero by negation.
+ // Test if result is zero or Smi::kMinValue.
+ cmpq(dst, kScratchRegister);
+ j(not_equal, on_smi_result);
+ movq(src, kScratchRegister);
+ } else {
+ movq(dst, src);
+ neg(dst);
+ cmpq(dst, src);
+ // If the result is zero or Smi::kMinValue, negation failed to create a smi.
+ j(not_equal, on_smi_result);
+ }
+}
+
+
+template <typename LabelType>
+void MacroAssembler::SmiAdd(Register dst,
+ Register src1,
+ Register src2,
+ LabelType* on_not_smi_result) {
+ ASSERT_NOT_NULL(on_not_smi_result);
+ ASSERT(!dst.is(src2));
+ if (dst.is(src1)) {
+ movq(kScratchRegister, src1);
+ addq(kScratchRegister, src2);
+ j(overflow, on_not_smi_result);
+ movq(dst, kScratchRegister);
+ } else {
+ movq(dst, src1);
+ addq(dst, src2);
+ j(overflow, on_not_smi_result);
+ }
+}
+
+
+template <typename LabelType>
+void MacroAssembler::SmiSub(Register dst,
+ Register src1,
+ Register src2,
+ LabelType* on_not_smi_result) {
+ ASSERT_NOT_NULL(on_not_smi_result);
+ ASSERT(!dst.is(src2));
+ if (dst.is(src1)) {
+ cmpq(dst, src2);
+ j(overflow, on_not_smi_result);
+ subq(dst, src2);
+ } else {
+ movq(dst, src1);
+ subq(dst, src2);
+ j(overflow, on_not_smi_result);
+ }
+}
+
+
+template <typename LabelType>
+void MacroAssembler::SmiSub(Register dst,
+ Register src1,
+ const Operand& src2,
+ LabelType* on_not_smi_result) {
+ ASSERT_NOT_NULL(on_not_smi_result);
+ if (dst.is(src1)) {
+ movq(kScratchRegister, src2);
+ cmpq(src1, kScratchRegister);
+ j(overflow, on_not_smi_result);
+ subq(src1, kScratchRegister);
+ } else {
+ movq(dst, src1);
+ subq(dst, src2);
+ j(overflow, on_not_smi_result);
+ }
+}
+
+
+template <typename LabelType>
+void MacroAssembler::SmiMul(Register dst,
+ Register src1,
+ Register src2,
+ LabelType* on_not_smi_result) {
+ ASSERT(!dst.is(src2));
+ ASSERT(!dst.is(kScratchRegister));
+ ASSERT(!src1.is(kScratchRegister));
+ ASSERT(!src2.is(kScratchRegister));
+
+ if (dst.is(src1)) {
+ NearLabel failure, zero_correct_result;
+ movq(kScratchRegister, src1); // Create backup for later testing.
+ SmiToInteger64(dst, src1);
+ imul(dst, src2);
+ j(overflow, &failure);
+
+ // Check for negative zero result. If product is zero, and one
+ // argument is negative, go to slow case.
+ NearLabel correct_result;
+ testq(dst, dst);
+ j(not_zero, &correct_result);
+
+ movq(dst, kScratchRegister);
+ xor_(dst, src2);
+ j(positive, &zero_correct_result); // Result was positive zero.
+
+ bind(&failure); // Reused failure exit, restores src1.
+ movq(src1, kScratchRegister);
+ jmp(on_not_smi_result);
+
+ bind(&zero_correct_result);
+ Set(dst, 0);
+
+ bind(&correct_result);
+ } else {
+ SmiToInteger64(dst, src1);
+ imul(dst, src2);
+ j(overflow, on_not_smi_result);
+ // Check for negative zero result. If product is zero, and one
+ // argument is negative, go to slow case.
+ NearLabel correct_result;
+ testq(dst, dst);
+ j(not_zero, &correct_result);
+ // One of src1 and src2 is zero, the check whether the other is
+ // negative.
+ movq(kScratchRegister, src1);
+ xor_(kScratchRegister, src2);
+ j(negative, on_not_smi_result);
+ bind(&correct_result);
+ }
+}
+
+
+template <typename LabelType>
+void MacroAssembler::SmiTryAddConstant(Register dst,
+ Register src,
+ Smi* constant,
+ LabelType* on_not_smi_result) {
+ // Does not assume that src is a smi.
+ ASSERT_EQ(static_cast<int>(1), static_cast<int>(kSmiTagMask));
+ ASSERT_EQ(0, kSmiTag);
+ ASSERT(!dst.is(kScratchRegister));
+ ASSERT(!src.is(kScratchRegister));
+
+ JumpIfNotSmi(src, on_not_smi_result);
+ Register tmp = (dst.is(src) ? kScratchRegister : dst);
+ LoadSmiConstant(tmp, constant);
+ addq(tmp, src);
+ j(overflow, on_not_smi_result);
+ if (dst.is(src)) {
+ movq(dst, tmp);
+ }
+}
+
+
+template <typename LabelType>
+void MacroAssembler::SmiAddConstant(Register dst,
+ Register src,
+ Smi* constant,
+ LabelType* on_not_smi_result) {
+ if (constant->value() == 0) {
+ if (!dst.is(src)) {
+ movq(dst, src);
+ }
+ } else if (dst.is(src)) {
+ ASSERT(!dst.is(kScratchRegister));
+
+ LoadSmiConstant(kScratchRegister, constant);
+ addq(kScratchRegister, src);
+ j(overflow, on_not_smi_result);
+ movq(dst, kScratchRegister);
+ } else {
+ LoadSmiConstant(dst, constant);
+ addq(dst, src);
+ j(overflow, on_not_smi_result);
+ }
+}
+
+
+template <typename LabelType>
+void MacroAssembler::SmiSubConstant(Register dst,
+ Register src,
+ Smi* constant,
+ LabelType* on_not_smi_result) {
+ if (constant->value() == 0) {
+ if (!dst.is(src)) {
+ movq(dst, src);
+ }
+ } else if (dst.is(src)) {
+ ASSERT(!dst.is(kScratchRegister));
+ if (constant->value() == Smi::kMinValue) {
+ // Subtracting min-value from any non-negative value will overflow.
+ // We test the non-negativeness before doing the subtraction.
+ testq(src, src);
+ j(not_sign, on_not_smi_result);
+ LoadSmiConstant(kScratchRegister, constant);
+ subq(dst, kScratchRegister);
+ } else {
+ // Subtract by adding the negation.
+ LoadSmiConstant(kScratchRegister, Smi::FromInt(-constant->value()));
+ addq(kScratchRegister, dst);
+ j(overflow, on_not_smi_result);
+ movq(dst, kScratchRegister);
+ }
+ } else {
+ if (constant->value() == Smi::kMinValue) {
+ // Subtracting min-value from any non-negative value will overflow.
+ // We test the non-negativeness before doing the subtraction.
+ testq(src, src);
+ j(not_sign, on_not_smi_result);
+ LoadSmiConstant(dst, constant);
+ // Adding and subtracting the min-value gives the same result, it only
+ // differs on the overflow bit, which we don't check here.
+ addq(dst, src);
+ } else {
+ // Subtract by adding the negation.
+ LoadSmiConstant(dst, Smi::FromInt(-(constant->value())));
+ addq(dst, src);
+ j(overflow, on_not_smi_result);
+ }
+ }
+}
+
+
+template <typename LabelType>
+void MacroAssembler::SmiDiv(Register dst,
+ Register src1,
+ Register src2,
+ LabelType* on_not_smi_result) {
+ ASSERT(!src1.is(kScratchRegister));
+ ASSERT(!src2.is(kScratchRegister));
+ ASSERT(!dst.is(kScratchRegister));
+ ASSERT(!src2.is(rax));
+ ASSERT(!src2.is(rdx));
+ ASSERT(!src1.is(rdx));
+
+ // Check for 0 divisor (result is +/-Infinity).
+ NearLabel positive_divisor;
+ testq(src2, src2);
+ j(zero, on_not_smi_result);
+
+ if (src1.is(rax)) {
+ movq(kScratchRegister, src1);
+ }
+ SmiToInteger32(rax, src1);
+ // We need to rule out dividing Smi::kMinValue by -1, since that would
+ // overflow in idiv and raise an exception.
+ // We combine this with negative zero test (negative zero only happens
+ // when dividing zero by a negative number).
+
+ // We overshoot a little and go to slow case if we divide min-value
+ // by any negative value, not just -1.
+ NearLabel safe_div;
+ testl(rax, Immediate(0x7fffffff));
+ j(not_zero, &safe_div);
+ testq(src2, src2);
+ if (src1.is(rax)) {
+ j(positive, &safe_div);
+ movq(src1, kScratchRegister);
+ jmp(on_not_smi_result);
+ } else {
+ j(negative, on_not_smi_result);
+ }
+ bind(&safe_div);
+
+ SmiToInteger32(src2, src2);
+ // Sign extend src1 into edx:eax.
+ cdq();
+ idivl(src2);
+ Integer32ToSmi(src2, src2);
+ // Check that the remainder is zero.
+ testl(rdx, rdx);
+ if (src1.is(rax)) {
+ NearLabel smi_result;
+ j(zero, &smi_result);
+ movq(src1, kScratchRegister);
+ jmp(on_not_smi_result);
+ bind(&smi_result);
+ } else {
+ j(not_zero, on_not_smi_result);
+ }
+ if (!dst.is(src1) && src1.is(rax)) {
+ movq(src1, kScratchRegister);
+ }
+ Integer32ToSmi(dst, rax);
+}
+
+
+template <typename LabelType>
+void MacroAssembler::SmiMod(Register dst,
+ Register src1,
+ Register src2,
+ LabelType* on_not_smi_result) {
+ ASSERT(!dst.is(kScratchRegister));
+ ASSERT(!src1.is(kScratchRegister));
+ ASSERT(!src2.is(kScratchRegister));
+ ASSERT(!src2.is(rax));
+ ASSERT(!src2.is(rdx));
+ ASSERT(!src1.is(rdx));
+ ASSERT(!src1.is(src2));
+
+ testq(src2, src2);
+ j(zero, on_not_smi_result);
+
+ if (src1.is(rax)) {
+ movq(kScratchRegister, src1);
+ }
+ SmiToInteger32(rax, src1);
+ SmiToInteger32(src2, src2);
+
+ // Test for the edge case of dividing Smi::kMinValue by -1 (will overflow).
+ NearLabel safe_div;
+ cmpl(rax, Immediate(Smi::kMinValue));
+ j(not_equal, &safe_div);
+ cmpl(src2, Immediate(-1));
+ j(not_equal, &safe_div);
+ // Retag inputs and go slow case.
+ Integer32ToSmi(src2, src2);
+ if (src1.is(rax)) {
+ movq(src1, kScratchRegister);
+ }
+ jmp(on_not_smi_result);
+ bind(&safe_div);
+
+ // Sign extend eax into edx:eax.
+ cdq();
+ idivl(src2);
+ // Restore smi tags on inputs.
+ Integer32ToSmi(src2, src2);
+ if (src1.is(rax)) {
+ movq(src1, kScratchRegister);
+ }
+ // Check for a negative zero result. If the result is zero, and the
+ // dividend is negative, go slow to return a floating point negative zero.
+ NearLabel smi_result;
+ testl(rdx, rdx);
+ j(not_zero, &smi_result);
+ testq(src1, src1);
+ j(negative, on_not_smi_result);
+ bind(&smi_result);
+ Integer32ToSmi(dst, rdx);
+}
+
+
+template <typename LabelType>
+void MacroAssembler::SmiShiftLogicalRightConstant(
+ Register dst, Register src, int shift_value, LabelType* on_not_smi_result) {
+ // Logic right shift interprets its result as an *unsigned* number.
+ if (dst.is(src)) {
+ UNIMPLEMENTED(); // Not used.
+ } else {
+ movq(dst, src);
+ if (shift_value == 0) {
+ testq(dst, dst);
+ j(negative, on_not_smi_result);
+ }
+ shr(dst, Immediate(shift_value + kSmiShift));
+ shl(dst, Immediate(kSmiShift));
+ }
+}
+
+
+template <typename LabelType>
+void MacroAssembler::SmiShiftLogicalRight(Register dst,
+ Register src1,
+ Register src2,
+ LabelType* on_not_smi_result) {
+ ASSERT(!dst.is(kScratchRegister));
+ ASSERT(!src1.is(kScratchRegister));
+ ASSERT(!src2.is(kScratchRegister));
+ ASSERT(!dst.is(rcx));
+ // dst and src1 can be the same, because the one case that bails out
+ // is a shift by 0, which leaves dst, and therefore src1, unchanged.
+ NearLabel result_ok;
+ if (src1.is(rcx) || src2.is(rcx)) {
+ movq(kScratchRegister, rcx);
+ }
+ if (!dst.is(src1)) {
+ movq(dst, src1);
+ }
+ SmiToInteger32(rcx, src2);
+ orl(rcx, Immediate(kSmiShift));
+ shr_cl(dst); // Shift is rcx modulo 0x1f + 32.
+ shl(dst, Immediate(kSmiShift));
+ testq(dst, dst);
+ if (src1.is(rcx) || src2.is(rcx)) {
+ NearLabel positive_result;
+ j(positive, &positive_result);
+ if (src1.is(rcx)) {
+ movq(src1, kScratchRegister);
+ } else {
+ movq(src2, kScratchRegister);
+ }
+ jmp(on_not_smi_result);
+ bind(&positive_result);
+ } else {
+ j(negative, on_not_smi_result); // src2 was zero and src1 negative.
+ }
+}
+
+
+template <typename LabelType>
+void MacroAssembler::SelectNonSmi(Register dst,
+ Register src1,
+ Register src2,
+ LabelType* on_not_smis) {
+ ASSERT(!dst.is(kScratchRegister));
+ ASSERT(!src1.is(kScratchRegister));
+ ASSERT(!src2.is(kScratchRegister));
+ ASSERT(!dst.is(src1));
+ ASSERT(!dst.is(src2));
+ // Both operands must not be smis.
+#ifdef DEBUG
+ if (allow_stub_calls()) { // Check contains a stub call.
+ Condition not_both_smis = NegateCondition(CheckBothSmi(src1, src2));
+ Check(not_both_smis, "Both registers were smis in SelectNonSmi.");
+ }
+#endif
+ ASSERT_EQ(0, kSmiTag);
+ ASSERT_EQ(0, Smi::FromInt(0));
+ movl(kScratchRegister, Immediate(kSmiTagMask));
+ and_(kScratchRegister, src1);
+ testl(kScratchRegister, src2);
+ // If non-zero then both are smis.
+ j(not_zero, on_not_smis);
+
+ // Exactly one operand is a smi.
+ ASSERT_EQ(1, static_cast<int>(kSmiTagMask));
+ // kScratchRegister still holds src1 & kSmiTag, which is either zero or one.
+ subq(kScratchRegister, Immediate(1));
+ // If src1 is a smi, then scratch register all 1s, else it is all 0s.
+ movq(dst, src1);
+ xor_(dst, src2);
+ and_(dst, kScratchRegister);
+ // If src1 is a smi, dst holds src1 ^ src2, else it is zero.
+ xor_(dst, src1);
+ // If src1 is a smi, dst is src2, else it is src1, i.e., the non-smi.
+}
+
+
+template <typename LabelType>
+void MacroAssembler::JumpIfSmi(Register src, LabelType* on_smi) {
+ ASSERT_EQ(0, kSmiTag);
+ Condition smi = CheckSmi(src);
+ j(smi, on_smi);
+}
+
+
+template <typename LabelType>
+void MacroAssembler::JumpIfNotSmi(Register src, LabelType* on_not_smi) {
+ Condition smi = CheckSmi(src);
+ j(NegateCondition(smi), on_not_smi);
+}
+
+
+template <typename LabelType>
+void MacroAssembler::JumpUnlessNonNegativeSmi(
+ Register src, LabelType* on_not_smi_or_negative) {
+ Condition non_negative_smi = CheckNonNegativeSmi(src);
+ j(NegateCondition(non_negative_smi), on_not_smi_or_negative);
+}
+
+
+template <typename LabelType>
+void MacroAssembler::JumpIfSmiEqualsConstant(Register src,
+ Smi* constant,
+ LabelType* on_equals) {
+ SmiCompare(src, constant);
+ j(equal, on_equals);
+}
+
+
+template <typename LabelType>
+void MacroAssembler::JumpIfNotValidSmiValue(Register src,
+ LabelType* on_invalid) {
+ Condition is_valid = CheckInteger32ValidSmiValue(src);
+ j(NegateCondition(is_valid), on_invalid);
+}
+
+
+template <typename LabelType>
+void MacroAssembler::JumpIfUIntNotValidSmiValue(Register src,
+ LabelType* on_invalid) {
+ Condition is_valid = CheckUInteger32ValidSmiValue(src);
+ j(NegateCondition(is_valid), on_invalid);
+}
+
+
+template <typename LabelType>
+void MacroAssembler::JumpIfNotBothSmi(Register src1,
+ Register src2,
+ LabelType* on_not_both_smi) {
+ Condition both_smi = CheckBothSmi(src1, src2);
+ j(NegateCondition(both_smi), on_not_both_smi);
+}
+
+
+template <typename LabelType>
+void MacroAssembler::JumpUnlessBothNonNegativeSmi(Register src1,
+ Register src2,
+ LabelType* on_not_both_smi) {
+ Condition both_smi = CheckBothNonNegativeSmi(src1, src2);
+ j(NegateCondition(both_smi), on_not_both_smi);
+}
+
+
+template <typename LabelType>
+void MacroAssembler::JumpIfNotString(Register object,
+ Register object_map,
+ LabelType* not_string) {
+ Condition is_smi = CheckSmi(object);
+ j(is_smi, not_string);
+ CmpObjectType(object, FIRST_NONSTRING_TYPE, object_map);
+ j(above_equal, not_string);
+}
+
+
+template <typename LabelType>
+void MacroAssembler::JumpIfNotBothSequentialAsciiStrings(Register first_object,
+ Register second_object,
+ Register scratch1,
+ Register scratch2,
+ LabelType* on_fail) {
+ // Check that both objects are not smis.
+ Condition either_smi = CheckEitherSmi(first_object, second_object);
+ j(either_smi, on_fail);
+
+ // Load instance type for both strings.
+ movq(scratch1, FieldOperand(first_object, HeapObject::kMapOffset));
+ movq(scratch2, FieldOperand(second_object, HeapObject::kMapOffset));
+ movzxbl(scratch1, FieldOperand(scratch1, Map::kInstanceTypeOffset));
+ movzxbl(scratch2, FieldOperand(scratch2, Map::kInstanceTypeOffset));
+
+ // Check that both are flat ascii strings.
+ ASSERT(kNotStringTag != 0);
+ const int kFlatAsciiStringMask =
+ kIsNotStringMask | kStringRepresentationMask | kStringEncodingMask;
+ const int kFlatAsciiStringTag = ASCII_STRING_TYPE;
+
+ andl(scratch1, Immediate(kFlatAsciiStringMask));
+ andl(scratch2, Immediate(kFlatAsciiStringMask));
+ // Interleave the bits to check both scratch1 and scratch2 in one test.
+ ASSERT_EQ(0, kFlatAsciiStringMask & (kFlatAsciiStringMask << 3));
+ lea(scratch1, Operand(scratch1, scratch2, times_8, 0));
+ cmpl(scratch1,
+ Immediate(kFlatAsciiStringTag + (kFlatAsciiStringTag << 3)));
+ j(not_equal, on_fail);
+}
+
+
+template <typename LabelType>
+void MacroAssembler::JumpIfInstanceTypeIsNotSequentialAscii(
+ Register instance_type,
+ Register scratch,
+ LabelType *failure) {
+ if (!scratch.is(instance_type)) {
+ movl(scratch, instance_type);
+ }
+
+ const int kFlatAsciiStringMask =
+ kIsNotStringMask | kStringRepresentationMask | kStringEncodingMask;
+
+ andl(scratch, Immediate(kFlatAsciiStringMask));
+ cmpl(scratch, Immediate(kStringTag | kSeqStringTag | kAsciiStringTag));
+ j(not_equal, failure);
+}
+
+
+template <typename LabelType>
+void MacroAssembler::JumpIfBothInstanceTypesAreNotSequentialAscii(
+ Register first_object_instance_type,
+ Register second_object_instance_type,
+ Register scratch1,
+ Register scratch2,
+ LabelType* on_fail) {
+ // Load instance type for both strings.
+ movq(scratch1, first_object_instance_type);
+ movq(scratch2, second_object_instance_type);
+
+ // Check that both are flat ascii strings.
+ ASSERT(kNotStringTag != 0);
+ const int kFlatAsciiStringMask =
+ kIsNotStringMask | kStringRepresentationMask | kStringEncodingMask;
+ const int kFlatAsciiStringTag = ASCII_STRING_TYPE;
+
+ andl(scratch1, Immediate(kFlatAsciiStringMask));
+ andl(scratch2, Immediate(kFlatAsciiStringMask));
+ // Interleave the bits to check both scratch1 and scratch2 in one test.
+ ASSERT_EQ(0, kFlatAsciiStringMask & (kFlatAsciiStringMask << 3));
+ lea(scratch1, Operand(scratch1, scratch2, times_8, 0));
+ cmpl(scratch1,
+ Immediate(kFlatAsciiStringTag + (kFlatAsciiStringTag << 3)));
+ j(not_equal, on_fail);
+}
+
+
+template <typename LabelType>
+void MacroAssembler::InNewSpace(Register object,
+ Register scratch,
+ Condition cc,
+ LabelType* branch) {
+ if (Serializer::enabled()) {
+ // Can't do arithmetic on external references if it might get serialized.
+ // The mask isn't really an address. We load it as an external reference in
+ // case the size of the new space is different between the snapshot maker
+ // and the running system.
+ if (scratch.is(object)) {
+ movq(kScratchRegister, ExternalReference::new_space_mask());
+ and_(scratch, kScratchRegister);
+ } else {
+ movq(scratch, ExternalReference::new_space_mask());
+ and_(scratch, object);
+ }
+ movq(kScratchRegister, ExternalReference::new_space_start());
+ cmpq(scratch, kScratchRegister);
+ j(cc, branch);
+ } else {
+ ASSERT(is_int32(static_cast<int64_t>(Heap::NewSpaceMask())));
+ intptr_t new_space_start =
+ reinterpret_cast<intptr_t>(Heap::NewSpaceStart());
+ movq(kScratchRegister, -new_space_start, RelocInfo::NONE);
+ if (scratch.is(object)) {
+ addq(scratch, kScratchRegister);
+ } else {
+ lea(scratch, Operand(object, kScratchRegister, times_1, 0));
+ }
+ and_(scratch, Immediate(static_cast<int32_t>(Heap::NewSpaceMask())));
+ j(cc, branch);
+ }
+}
+
+
+template <typename LabelType>
+void MacroAssembler::InvokePrologue(const ParameterCount& expected,
+ const ParameterCount& actual,
+ Handle<Code> code_constant,
+ Register code_register,
+ LabelType* done,
+ InvokeFlag flag,
+ PostCallGenerator* post_call_generator) {
+ bool definitely_matches = false;
+ NearLabel invoke;
+ if (expected.is_immediate()) {
+ ASSERT(actual.is_immediate());
+ if (expected.immediate() == actual.immediate()) {
+ definitely_matches = true;
+ } else {
+ Set(rax, actual.immediate());
+ if (expected.immediate() ==
+ SharedFunctionInfo::kDontAdaptArgumentsSentinel) {
+ // Don't worry about adapting arguments for built-ins that
+ // don't want that done. Skip adaption code by making it look
+ // like we have a match between expected and actual number of
+ // arguments.
+ definitely_matches = true;
+ } else {
+ Set(rbx, expected.immediate());
+ }
+ }
+ } else {
+ if (actual.is_immediate()) {
+ // Expected is in register, actual is immediate. This is the
+ // case when we invoke function values without going through the
+ // IC mechanism.
+ cmpq(expected.reg(), Immediate(actual.immediate()));
+ j(equal, &invoke);
+ ASSERT(expected.reg().is(rbx));
+ Set(rax, actual.immediate());
+ } else if (!expected.reg().is(actual.reg())) {
+ // Both expected and actual are in (different) registers. This
+ // is the case when we invoke functions using call and apply.
+ cmpq(expected.reg(), actual.reg());
+ j(equal, &invoke);
+ ASSERT(actual.reg().is(rax));
+ ASSERT(expected.reg().is(rbx));
+ }
+ }
+
+ if (!definitely_matches) {
+ Handle<Code> adaptor =
+ Handle<Code>(Builtins::builtin(Builtins::ArgumentsAdaptorTrampoline));
+ if (!code_constant.is_null()) {
+ movq(rdx, code_constant, RelocInfo::EMBEDDED_OBJECT);
+ addq(rdx, Immediate(Code::kHeaderSize - kHeapObjectTag));
+ } else if (!code_register.is(rdx)) {
+ movq(rdx, code_register);
+ }
+
+ if (flag == CALL_FUNCTION) {
+ Call(adaptor, RelocInfo::CODE_TARGET);
+ if (post_call_generator != NULL) post_call_generator->Generate();
+ jmp(done);
+ } else {
+ Jump(adaptor, RelocInfo::CODE_TARGET);
+ }
+ bind(&invoke);
+ }
+}
+
+
} } // namespace v8::internal
#endif // V8_X64_MACRO_ASSEMBLER_X64_H_